Synced with FreeRTOS.

This commit is contained in:
Carl Lundin 2020-01-23 17:44:28 -08:00
parent f6393c2dbe
commit 7ceed10420
7 changed files with 12061 additions and 11872 deletions

61
queue.c
View file

@ -203,7 +203,7 @@ static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer
* Checks to see if a queue is a member of a queue set, and if so, notifies * Checks to see if a queue is a member of a queue set, and if so, notifies
* the queue set that the queue contains data. * the queue set that the queue contains data.
*/ */
static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION; static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
#endif #endif
/* /*
@ -373,17 +373,10 @@ Queue_t * const pxQueue = xQueue;
configASSERT( uxQueueLength > ( UBaseType_t ) 0 ); configASSERT( uxQueueLength > ( UBaseType_t ) 0 );
if( uxItemSize == ( UBaseType_t ) 0 ) /* Allocate enough space to hold the maximum number of items that
{ can be in the queue at any time. It is valid for uxItemSize to be
/* There is not going to be a queue storage area. */ zero in the case the queue is used as a semaphore. */
xQueueSizeInBytes = ( size_t ) 0; xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
}
else
{
/* Allocate enough space to hold the maximum number of items that
can be in the queue at any time. */
xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
}
/* Allocate the queue and storage area. Justification for MISRA /* Allocate the queue and storage area. Justification for MISRA
deviation as follows: pvPortMalloc() always ensures returned memory deviation as follows: pvPortMalloc() always ensures returned memory
@ -777,7 +770,7 @@ Queue_t * const pxQueue = xQueue;
#if ( configUSE_QUEUE_SETS == 1 ) #if ( configUSE_QUEUE_SETS == 1 )
{ {
UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting; const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition ); xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
@ -790,7 +783,7 @@ Queue_t * const pxQueue = xQueue;
in the queue has not changed. */ in the queue has not changed. */
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
else if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) != pdFALSE ) else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
{ {
/* The queue is a member of a queue set, and posting /* The queue is a member of a queue set, and posting
to the queue set caused a higher priority task to to the queue set caused a higher priority task to
@ -993,22 +986,31 @@ Queue_t * const pxQueue = xQueue;
traceQUEUE_SEND_FROM_ISR( pxQueue ); traceQUEUE_SEND_FROM_ISR( pxQueue );
/* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a
semaphore or mutex. That means prvCopyDataToQueue() cannot result
in a task disinheriting a priority and prvCopyDataToQueue() can be
called here even though the disinherit function does not check if
the scheduler is suspended before accessing the ready lists. */
( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
/* The event list is not altered if the queue is locked. This will /* The event list is not altered if the queue is locked. This will
be done when the queue is unlocked later. */ be done when the queue is unlocked later. */
if( cTxLock == queueUNLOCKED ) if( cTxLock == queueUNLOCKED )
{ {
#if ( configUSE_QUEUE_SETS == 1 ) #if ( configUSE_QUEUE_SETS == 1 )
{ {
const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
/* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a
semaphore or mutex. That means prvCopyDataToQueue() cannot result
in a task disinheriting a priority and prvCopyDataToQueue() can be
called here even though the disinherit function does not check if
the scheduler is suspended before accessing the ready lists. */
( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
if( pxQueue->pxQueueSetContainer != NULL ) if( pxQueue->pxQueueSetContainer != NULL )
{ {
if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) != pdFALSE ) if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) )
{
/* Do not notify the queue set as an existing item
was overwritten in the queue so the number of items
in the queue has not changed. */
mtCOVERAGE_TEST_MARKER();
}
else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
{ {
/* The queue is a member of a queue set, and posting /* The queue is a member of a queue set, and posting
to the queue set caused a higher priority task to to the queue set caused a higher priority task to
@ -1057,6 +1059,13 @@ Queue_t * const pxQueue = xQueue;
} }
#else /* configUSE_QUEUE_SETS */ #else /* configUSE_QUEUE_SETS */
{ {
/* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a
semaphore or mutex. That means prvCopyDataToQueue() cannot result
in a task disinheriting a priority and prvCopyDataToQueue() can be
called here even though the disinherit function does not check if
the scheduler is suspended before accessing the ready lists. */
( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{ {
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
@ -1173,7 +1182,7 @@ Queue_t * const pxQueue = xQueue;
{ {
if( pxQueue->pxQueueSetContainer != NULL ) if( pxQueue->pxQueueSetContainer != NULL )
{ {
if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) != pdFALSE ) if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
{ {
/* The semaphore is a member of a queue set, and /* The semaphore is a member of a queue set, and
posting to the queue set caused a higher priority posting to the queue set caused a higher priority
@ -2185,7 +2194,7 @@ static void prvUnlockQueue( Queue_t * const pxQueue )
{ {
if( pxQueue->pxQueueSetContainer != NULL ) if( pxQueue->pxQueueSetContainer != NULL )
{ {
if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) != pdFALSE ) if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
{ {
/* The queue is a member of a queue set, and posting to /* The queue is a member of a queue set, and posting to
the queue set caused a higher priority task to unblock. the queue set caused a higher priority task to unblock.
@ -2875,7 +2884,7 @@ Queue_t * const pxQueue = xQueue;
#if ( configUSE_QUEUE_SETS == 1 ) #if ( configUSE_QUEUE_SETS == 1 )
static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue )
{ {
Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer; Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer;
BaseType_t xReturn = pdFALSE; BaseType_t xReturn = pdFALSE;
@ -2892,7 +2901,7 @@ Queue_t * const pxQueue = xQueue;
traceQUEUE_SEND( pxQueueSetContainer ); traceQUEUE_SEND( pxQueueSetContainer );
/* The data copied is the handle of the queue that contains data. */ /* The data copied is the handle of the queue that contains data. */
xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition ); xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, queueSEND_TO_BACK );
if( cTxLock == queueUNLOCKED ) if( cTxLock == queueUNLOCKED )
{ {

199
tasks.c
View file

@ -300,7 +300,10 @@ typedef struct tskTaskControlBlock /* The old naming convention is used to pr
responsible for resulting newlib operation. User must be familiar with responsible for resulting newlib operation. User must be familiar with
newlib and must provide system-wide implementations of the necessary newlib and must provide system-wide implementations of the necessary
stubs. Be warned that (at the time of writing) the current newlib design stubs. Be warned that (at the time of writing) the current newlib design
implements a system-wide malloc() that must be provided with locks. */ implements a system-wide malloc() that must be provided with locks.
See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html
for additional information. */
struct _reent xNewLib_reent; struct _reent xNewLib_reent;
#endif #endif
@ -993,7 +996,9 @@ UBaseType_t x;
#if ( configUSE_NEWLIB_REENTRANT == 1 ) #if ( configUSE_NEWLIB_REENTRANT == 1 )
{ {
/* Initialise this task's Newlib reent structure. */ /* Initialise this task's Newlib reent structure.
See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html
for additional information. */
_REENT_INIT_PTR( ( &( pxNewTCB->xNewLib_reent ) ) ); _REENT_INIT_PTR( ( &( pxNewTCB->xNewLib_reent ) ) );
} }
#endif #endif
@ -1204,6 +1209,10 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB )
check the xTasksWaitingTermination list. */ check the xTasksWaitingTermination list. */
++uxDeletedTasksWaitingCleanUp; ++uxDeletedTasksWaitingCleanUp;
/* Call the delete hook before portPRE_TASK_DELETE_HOOK() as
portPRE_TASK_DELETE_HOOK() does not return in the Win32 port. */
traceTASK_DELETE( pxTCB );
/* The pre-delete hook is primarily for the Windows simulator, /* The pre-delete hook is primarily for the Windows simulator,
in which Windows specific clean up operations are performed, in which Windows specific clean up operations are performed,
after which it is not possible to yield away from this task - after which it is not possible to yield away from this task -
@ -1214,14 +1223,13 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB )
else else
{ {
--uxCurrentNumberOfTasks; --uxCurrentNumberOfTasks;
traceTASK_DELETE( pxTCB );
prvDeleteTCB( pxTCB ); prvDeleteTCB( pxTCB );
/* Reset the next expected unblock time in case it referred to /* Reset the next expected unblock time in case it referred to
the task that has just been deleted. */ the task that has just been deleted. */
prvResetNextTaskUnblockTime(); prvResetNextTaskUnblockTime();
} }
traceTASK_DELETE( pxTCB );
} }
taskEXIT_CRITICAL(); taskEXIT_CRITICAL();
@ -2041,7 +2049,9 @@ BaseType_t xReturn;
#if ( configUSE_NEWLIB_REENTRANT == 1 ) #if ( configUSE_NEWLIB_REENTRANT == 1 )
{ {
/* Switch Newlib's _impure_ptr variable to point to the _reent /* Switch Newlib's _impure_ptr variable to point to the _reent
structure specific to the task that will run first. */ structure specific to the task that will run first.
See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html
for additional information. */
_impure_ptr = &( pxCurrentTCB->xNewLib_reent ); _impure_ptr = &( pxCurrentTCB->xNewLib_reent );
} }
#endif /* configUSE_NEWLIB_REENTRANT */ #endif /* configUSE_NEWLIB_REENTRANT */
@ -2103,7 +2113,17 @@ void vTaskSuspendAll( void )
BaseType_t. Please read Richard Barry's reply in the following link to a BaseType_t. Please read Richard Barry's reply in the following link to a
post in the FreeRTOS support forum before reporting this as a bug! - post in the FreeRTOS support forum before reporting this as a bug! -
http://goo.gl/wu4acr */ http://goo.gl/wu4acr */
/* portSOFRWARE_BARRIER() is only implemented for emulated/simulated ports that
do not otherwise exhibit real time behaviour. */
portSOFTWARE_BARRIER();
/* The scheduler is suspended if uxSchedulerSuspended is non-zero. An increment
is used to allow calls to vTaskSuspendAll() to nest. */
++uxSchedulerSuspended; ++uxSchedulerSuspended;
/* Enforces ordering for ports and optimised compilers that may otherwise place
the above increment elsewhere. */
portMEMORY_BARRIER(); portMEMORY_BARRIER();
} }
/*----------------------------------------------------------*/ /*----------------------------------------------------------*/
@ -2616,7 +2636,7 @@ BaseType_t xYieldRequired = pdFALSE;
relies on xPendedTicks being wound down to 0 in xTaskResumeAll(). */ relies on xPendedTicks being wound down to 0 in xTaskResumeAll(). */
configASSERT( uxSchedulerSuspended == 0 ); configASSERT( uxSchedulerSuspended == 0 );
/* Use xPendedTicks to mimic xTicksToCatchUp number of ticks occuring when /* Use xPendedTicks to mimic xTicksToCatchUp number of ticks occurring when
the scheduler is suspended so the ticks are executed in xTaskResumeAll(). */ the scheduler is suspended so the ticks are executed in xTaskResumeAll(). */
vTaskSuspendAll(); vTaskSuspendAll();
xPendedTicks += xTicksToCatchUp; xPendedTicks += xTicksToCatchUp;
@ -2626,6 +2646,91 @@ BaseType_t xYieldRequired = pdFALSE;
} }
/*----------------------------------------------------------*/ /*----------------------------------------------------------*/
#if ( INCLUDE_xTaskAbortDelay == 1 )
BaseType_t xTaskAbortDelayFromISR( TaskHandle_t xTask, BaseType_t * const pxHigherPriorityTaskWoken )
{
TCB_t *pxTCB = xTask;
BaseType_t xReturn;
UBaseType_t uxSavedInterruptStatus;
configASSERT( pxTCB );
/* RTOS ports that support interrupt nesting have the concept of a maximum
system call (or maximum API call) interrupt priority. Interrupts that are
above the maximum system call priority are kept permanently enabled, even
when the RTOS kernel is in a critical section, but cannot make any calls to
FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
failure if a FreeRTOS API function is called from an interrupt that has been
assigned a priority above the configured maximum system call priority.
Only FreeRTOS functions that end in FromISR can be called from interrupts
that have been assigned a priority at or (logically) below the maximum
system call interrupt priority. FreeRTOS maintains a separate interrupt
safe API to ensure interrupt entry is as fast and as simple as possible.
More information (albeit Cortex-M specific) is provided on the following
link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
{
/* A task can only be prematurely removed from the Blocked state if
it is actually in the Blocked state. */
if( eTaskGetState( xTask ) == eBlocked )
{
xReturn = pdPASS;
/* Remove the reference to the task from the blocked list. A higher
priority interrupt won't touch the xStateListItem because of the
critical section. */
( void ) uxListRemove( &( pxTCB->xStateListItem ) );
/* Is the task waiting on an event also? If so remove it from
the event list too. */
if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
{
( void ) uxListRemove( &( pxTCB->xEventListItem ) );
/* This lets the task know it was forcibly removed from the
blocked state so it should not re-evaluate its block time and
then block again. */
pxTCB->ucDelayAborted = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Place the unblocked task into the appropriate ready list. */
prvAddTaskToReadyList( pxTCB );
if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
{
if( pxHigherPriorityTaskWoken != NULL )
{
/* Pend the yield to be performed when the scheduler
is unsuspended. */
*pxHigherPriorityTaskWoken = pdTRUE;
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
xReturn = pdFAIL;
}
}
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
return xReturn;
}
#endif
/*----------------------------------------------------------*/
#if ( INCLUDE_xTaskAbortDelay == 1 ) #if ( INCLUDE_xTaskAbortDelay == 1 )
BaseType_t xTaskAbortDelay( TaskHandle_t xTask ) BaseType_t xTaskAbortDelay( TaskHandle_t xTask )
@ -2657,6 +2762,10 @@ BaseType_t xYieldRequired = pdFALSE;
if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
{ {
( void ) uxListRemove( &( pxTCB->xEventListItem ) ); ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
/* This lets the task know it was forcibly removed from the
blocked state so it should not re-evaluate its block time and
then block again. */
pxTCB->ucDelayAborted = pdTRUE; pxTCB->ucDelayAborted = pdTRUE;
} }
else else
@ -2843,6 +2952,19 @@ BaseType_t xSwitchRequired = pdFALSE;
} }
} }
#endif /* configUSE_TICK_HOOK */ #endif /* configUSE_TICK_HOOK */
#if ( configUSE_PREEMPTION == 1 )
{
if( xYieldPending != pdFALSE )
{
xSwitchRequired = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* configUSE_PREEMPTION */
} }
else else
{ {
@ -2857,19 +2979,6 @@ BaseType_t xSwitchRequired = pdFALSE;
#endif #endif
} }
#if ( configUSE_PREEMPTION == 1 )
{
if( xYieldPending != pdFALSE )
{
xSwitchRequired = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* configUSE_PREEMPTION */
return xSwitchRequired; return xSwitchRequired;
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@ -3049,7 +3158,9 @@ void vTaskSwitchContext( void )
#if ( configUSE_NEWLIB_REENTRANT == 1 ) #if ( configUSE_NEWLIB_REENTRANT == 1 )
{ {
/* Switch Newlib's _impure_ptr variable to point to the _reent /* Switch Newlib's _impure_ptr variable to point to the _reent
structure specific to this task. */ structure specific to this task.
See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html
for additional information. */
_impure_ptr = &( pxCurrentTCB->xNewLib_reent ); _impure_ptr = &( pxCurrentTCB->xNewLib_reent );
} }
#endif /* configUSE_NEWLIB_REENTRANT */ #endif /* configUSE_NEWLIB_REENTRANT */
@ -3216,6 +3327,20 @@ TCB_t *pxUnblockedTCB;
configASSERT( pxUnblockedTCB ); configASSERT( pxUnblockedTCB );
( void ) uxListRemove( pxEventListItem ); ( void ) uxListRemove( pxEventListItem );
#if( configUSE_TICKLESS_IDLE != 0 )
{
/* If a task is blocked on a kernel object then xNextTaskUnblockTime
might be set to the blocked task's time out time. If the task is
unblocked for a reason other than a timeout xNextTaskUnblockTime is
normally left unchanged, because it is automatically reset to a new
value when the tick count equals xNextTaskUnblockTime. However if
tickless idling is used it might be more important to enter sleep mode
at the earliest possible time - so reset xNextTaskUnblockTime here to
ensure it is updated at the earliest possible time. */
prvResetNextTaskUnblockTime();
}
#endif
/* Remove the task from the delayed list and add it to the ready list. The /* Remove the task from the delayed list and add it to the ready list. The
scheduler is suspended so interrupts will not be accessing the ready scheduler is suspended so interrupts will not be accessing the ready
lists. */ lists. */
@ -3496,6 +3621,8 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
const UBaseType_t uxNonApplicationTasks = 1; const UBaseType_t uxNonApplicationTasks = 1;
eSleepModeStatus eReturn = eStandardSleep; eSleepModeStatus eReturn = eStandardSleep;
/* This function must be called from a critical section. */
if( listCURRENT_LIST_LENGTH( &xPendingReadyList ) != 0 ) if( listCURRENT_LIST_LENGTH( &xPendingReadyList ) != 0 )
{ {
/* A task was made ready while the scheduler was suspended. */ /* A task was made ready while the scheduler was suspended. */
@ -3537,6 +3664,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS ) if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
{ {
pxTCB = prvGetTCBFromHandle( xTaskToSet ); pxTCB = prvGetTCBFromHandle( xTaskToSet );
configASSERT( pxTCB != NULL );
pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue; pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue;
} }
} }
@ -3871,7 +3999,9 @@ static void prvCheckTasksWaitingTermination( void )
portCLEAN_UP_TCB( pxTCB ); portCLEAN_UP_TCB( pxTCB );
/* Free up the memory allocated by the scheduler for the task. It is up /* Free up the memory allocated by the scheduler for the task. It is up
to the task to free any memory allocated at the application level. */ to the task to free any memory allocated at the application level.
See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html
for additional information. */
#if ( configUSE_NEWLIB_REENTRANT == 1 ) #if ( configUSE_NEWLIB_REENTRANT == 1 )
{ {
_reclaim_reent( &( pxTCB->xNewLib_reent ) ); _reclaim_reent( &( pxTCB->xNewLib_reent ) );
@ -5082,7 +5212,6 @@ TickType_t uxReturn;
} }
#endif /* configUSE_TASK_NOTIFICATIONS */ #endif /* configUSE_TASK_NOTIFICATIONS */
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
#if( configUSE_TASK_NOTIFICATIONS == 1 ) #if( configUSE_TASK_NOTIFICATIONS == 1 )
@ -5116,6 +5245,32 @@ TickType_t uxReturn;
#endif /* configUSE_TASK_NOTIFICATIONS */ #endif /* configUSE_TASK_NOTIFICATIONS */
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
#if( configUSE_TASK_NOTIFICATIONS == 1 )
uint32_t ulTaskNotifyValueClear( TaskHandle_t xTask, uint32_t ulBitsToClear )
{
TCB_t *pxTCB;
uint32_t ulReturn;
/* If null is passed in here then it is the calling task that is having
its notification state cleared. */
pxTCB = prvGetTCBFromHandle( xTask );
taskENTER_CRITICAL();
{
/* Return the notification as it was before the bits were cleared,
then clear the bit mask. */
ulReturn = pxCurrentTCB->ulNotifiedValue;
pxTCB->ulNotifiedValue &= ~ulBitsToClear;
}
taskEXIT_CRITICAL();
return ulReturn;
}
#endif /* configUSE_TASK_NOTIFICATIONS */
/*-----------------------------------------------------------*/
#if( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) #if( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
uint32_t ulTaskGetIdleRunTimeCounter( void ) uint32_t ulTaskGetIdleRunTimeCounter( void )

View file

@ -182,7 +182,7 @@ static BaseType_t prvInsertTimerInActiveList( Timer_t * const pxTimer, const Tic
/* /*
* An active timer has reached its expire time. Reload the timer if it is an * An active timer has reached its expire time. Reload the timer if it is an
* auto reload timer, then call its callback. * auto-reload timer, then call its callback.
*/ */
static void prvProcessExpiredTimer( const TickType_t xNextExpireTime, const TickType_t xTimeNow ) PRIVILEGED_FUNCTION; static void prvProcessExpiredTimer( const TickType_t xNextExpireTime, const TickType_t xTimeNow ) PRIVILEGED_FUNCTION;
@ -292,7 +292,7 @@ BaseType_t xReturn = pdFAIL;
if( pxNewTimer != NULL ) if( pxNewTimer != NULL )
{ {
/* Status is thus far zero as the timer is not created statically /* Status is thus far zero as the timer is not created statically
and has not been started. The autoreload bit may get set in and has not been started. The auto-reload bit may get set in
prvInitialiseNewTimer. */ prvInitialiseNewTimer. */
pxNewTimer->ucStatus = 0x00; pxNewTimer->ucStatus = 0x00;
prvInitialiseNewTimer( pcTimerName, xTimerPeriodInTicks, uxAutoReload, pvTimerID, pxCallbackFunction, pxNewTimer ); prvInitialiseNewTimer( pcTimerName, xTimerPeriodInTicks, uxAutoReload, pvTimerID, pxCallbackFunction, pxNewTimer );
@ -334,7 +334,7 @@ BaseType_t xReturn = pdFAIL;
{ {
/* Timers can be created statically or dynamically so note this /* Timers can be created statically or dynamically so note this
timer was created statically in case it is later deleted. The timer was created statically in case it is later deleted. The
autoreload bit may get set in prvInitialiseNewTimer(). */ auto-reload bit may get set in prvInitialiseNewTimer(). */
pxNewTimer->ucStatus = tmrSTATUS_IS_STATICALLY_ALLOCATED; pxNewTimer->ucStatus = tmrSTATUS_IS_STATICALLY_ALLOCATED;
prvInitialiseNewTimer( pcTimerName, xTimerPeriodInTicks, uxAutoReload, pvTimerID, pxCallbackFunction, pxNewTimer ); prvInitialiseNewTimer( pcTimerName, xTimerPeriodInTicks, uxAutoReload, pvTimerID, pxCallbackFunction, pxNewTimer );
@ -459,6 +459,31 @@ Timer_t * pxTimer = xTimer;
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
UBaseType_t uxTimerGetReloadMode( TimerHandle_t xTimer )
{
Timer_t * pxTimer = xTimer;
UBaseType_t uxReturn;
configASSERT( xTimer );
taskENTER_CRITICAL();
{
if( ( pxTimer->ucStatus & tmrSTATUS_IS_AUTORELOAD ) == 0 )
{
/* Not an auto-reload timer. */
uxReturn = ( UBaseType_t ) pdFALSE;
}
else
{
/* Is an auto-reload timer. */
uxReturn = ( UBaseType_t ) pdTRUE;
}
}
taskEXIT_CRITICAL();
return uxReturn;
}
/*-----------------------------------------------------------*/
TickType_t xTimerGetExpiryTime( TimerHandle_t xTimer ) TickType_t xTimerGetExpiryTime( TimerHandle_t xTimer )
{ {
Timer_t * pxTimer = xTimer; Timer_t * pxTimer = xTimer;
@ -489,7 +514,7 @@ Timer_t * const pxTimer = ( Timer_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxCurrentTi
( void ) uxListRemove( &( pxTimer->xTimerListItem ) ); ( void ) uxListRemove( &( pxTimer->xTimerListItem ) );
traceTIMER_EXPIRED( pxTimer ); traceTIMER_EXPIRED( pxTimer );
/* If the timer is an auto reload timer then calculate the next /* If the timer is an auto-reload timer then calculate the next
expiry time and re-insert the timer in the list of active timers. */ expiry time and re-insert the timer in the list of active timers. */
if( ( pxTimer->ucStatus & tmrSTATUS_IS_AUTORELOAD ) != 0 ) if( ( pxTimer->ucStatus & tmrSTATUS_IS_AUTORELOAD ) != 0 )
{ {