Compare commits

..

6 commits

Author SHA1 Message Date
Darian Leung 26655c7e54 feat(freertos/smp): Add Granular Locking V4 proposal documents
Co-authored-by: Sudeep Mohanty <sudeep.mohanty@espressif.com>
2025-06-12 10:11:04 +02:00
Darian Leung 95fa53c1ee change(freertos/smp): Update timers.c locking
Updated timers.c to use granular locking

- Added xTaskSpinlock and xISRSpinlock
- Replaced critical section macros with data group critical section macros
such as taskENTER/EXIT_CRITICAL() with tmrENTER/EXIT_CRITICAL().
- Added vTimerEnterCritical() and vTimerExitCritical() to map to the
  data group critical section macros.

Co-authored-by: Sudeep Mohanty <sudeep.mohanty@espressif.com>
2025-06-12 10:11:04 +02:00
Darian Leung e1f69a6763 change(freertos/smp): Update stream_buffer.c locking
Updated stream_buffer.c to use granular locking

- Added xTaskSpinlock and xISRSpinlock
- Replaced critical section macros with data group critical section macros
such as taskENTER/EXIT_CRITICAL/_FROM_ISR() with sbENTER/EXIT_CRITICAL_FROM_ISR().
- Added vStreambuffersEnterCritical/FromISR() and
  vStreambuffersExitCritical/FromISR() to map to the data group critical
section macros.
- Added prvLockStreamBufferForTasks() and prvUnlockStreamBufferForTasks() to suspend the stream
buffer when executing non-deterministic code.

Co-authored-by: Sudeep Mohanty <sudeep.mohanty@espressif.com>
2025-06-12 10:11:04 +02:00
Darian Leung cba76ba77d change(freertos/smp): Update event_groups.c locking
Updated event_groups.c to use granular locking

- Added xTaskSpinlock and xISRSpinlock
- Replaced critical section macros with data group critical section macros
such as taskENTER/EXIT_CRITICAL/_FROM_ISR() with event_groupsENTER/EXIT_CRITICAL/_FROM_ISR().
- Added vEventGroupsEnterCritical/FromISR() and
  vEventGroupsExitCriti/FromISR() functions that map to the data group
critical section macros.
- Added prvLockEventGroupForTasks() and prvUnlockEventGroupForTasks() to suspend the event
group when executing non-deterministic code.
- xEventGroupSetBits() and vEventGroupDelete() accesses the kernel data group
directly. Thus, added vTaskSuspendAll()/xTaskResumeAll() to these fucntions.

Co-authored-by: Sudeep Mohanty <sudeep.mohanty@espressif.com>
2025-06-12 10:11:04 +02:00
Darian Leung b256ca88b1 change(freertos/smp): Update queue.c locking
Updated queue.c to use granular locking

- Added xTaskSpinlock and xISRSpinlock
- Replaced  critical section macros with data group critical section macros
such as taskENTER/EXIT_CRITICAL/_FROM_ISR() with queueENTER/EXIT_CRITICAL_FROM_ISR().
- Added vQueueEnterCritical/FromISR() and vQueueExitCritical/FromISR()
  which map to the data group critical section macros.
- Added prvLockQueueForTasks() and prvUnlockQueueForTasks() as the granular locking equivalents
to prvLockQueue() and prvUnlockQueue() respectively

Co-authored-by: Sudeep Mohanty <sudeep.mohanty@espressif.com>
2025-06-12 10:11:04 +02:00
Darian Leung 25c74e3d4b change(freertos/smp): Update tasks.c locking
Updated critical section macros with granular locks.

Some tasks.c API relied on their callers to enter critical sections. This
assumption no longer works under granular locking. Critical sections added to
the following functions:

- `vTaskInternalSetTimeOutState()`
- `xTaskIncrementTick()`
- `vTaskSwitchContext()`
- `xTaskRemoveFromEventList()`
- `vTaskInternalSetTimeOutState()`
- `eTaskConfirmSleepModeStatus()`
- `xTaskPriorityDisinherit()`
- `pvTaskIncrementMutexHeldCount()`

Added missing suspensions to the following functions:

- `vTaskPlaceOnEventList()`
- `vTaskPlaceOnUnorderedEventList()`
- `vTaskPlaceOnEventListRestricted()`

Fixed the locking in vTaskSwitchContext()

vTaskSwitchContext() must aquire both kernel locks, viz., task lock and
ISR lock. This is because, vTaskSwitchContext() can be called from
either task context or ISR context. Also, vTaskSwitchContext() must not
alter the interrupt state prematurely.

Co-authored-by: Sudeep Mohanty <sudeep.mohanty@espressif.com>
2025-06-12 10:10:55 +02:00

669
tasks.c
View file

@ -360,7 +360,7 @@
} \
else \
{ \
if( pxCurrentTCBs[ ( xCoreID ) ]->xPreemptionDisable == 0U ) \
if( pxCurrentTCBs[ ( xCoreID ) ]->uxPreemptionDisable == 0U ) \
{ \
/* Request other core to yield if it is not requested before. */ \
if( pxCurrentTCBs[ ( xCoreID ) ]->xTaskRunState != taskTASK_SCHEDULED_TO_YIELD ) \
@ -457,7 +457,7 @@ typedef struct tskTaskControlBlock /* The old naming convention is used to
char pcTaskName[ configMAX_TASK_NAME_LEN ]; /**< Descriptive name given to the task when created. Facilitates debugging only. */
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
UBaseType_t xPreemptionDisable; /**< Used to prevent the task from being preempted. */
UBaseType_t uxPreemptionDisable; /**< Used to prevent the task from being preempted. */
#endif
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
@ -1015,7 +1015,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION;
#endif
{
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
if( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == 0U )
if( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U )
#endif
{
xLowestPriorityToPreempt = xCurrentCoreTaskPriority;
@ -1321,7 +1321,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION;
( xYieldPendings[ uxCore ] == pdFALSE ) )
{
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
if( pxCurrentTCBs[ uxCore ]->xPreemptionDisable == 0U )
if( pxCurrentTCBs[ uxCore ]->uxPreemptionDisable == 0U )
#endif
{
xLowestPriority = xTaskPriority;
@ -2290,6 +2290,10 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
BaseType_t xDeleteTCBInIdleTask = pdFALSE;
BaseType_t xTaskIsRunningOrYielding;
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
BaseType_t xDeferredDeletion = pdFALSE;
#endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
traceENTER_vTaskDelete( xTaskToDelete );
kernelENTER_CRITICAL();
@ -2303,12 +2307,10 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
/* If the task has disabled preemption, we need to defer the deletion until the
* task enables preemption. The deletion will be performed in vTaskPreemptionEnable(). */
if( pxTCB->xPreemptionDisable > 0U )
if( pxTCB->uxPreemptionDisable > 0U )
{
pxTCB->xDeferredStateChange |= tskDEFERRED_DELETION;
kernelEXIT_CRITICAL();
traceRETURN_vTaskDelete();
return;
xDeferredDeletion = pdTRUE;
}
else
{
@ -2316,135 +2318,145 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
}
#endif /* configUSE_TASK_PREEMPTION_DISABLE */
/* Remove task from the ready/delayed list. */
if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
if( xDeferredDeletion == pdFALSE )
#endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
{
taskRESET_READY_PRIORITY( pxTCB->uxPriority );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Is the task waiting on an event also? */
if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
{
( void ) uxListRemove( &( pxTCB->xEventListItem ) );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Increment the uxTaskNumber also so kernel aware debuggers can
* detect that the task lists need re-generating. This is done before
* portPRE_TASK_DELETE_HOOK() as in the Windows port that macro will
* not return. */
uxTaskNumber++;
/* Use temp variable as distinct sequence points for reading volatile
* variables prior to a logical operator to ensure compliance with
* MISRA C 2012 Rule 13.5. */
xTaskIsRunningOrYielding = taskTASK_IS_RUNNING_OR_SCHEDULED_TO_YIELD( pxTCB );
/* If the task is running (or yielding), we must add it to the
* termination list so that an idle task can delete it when it is
* no longer running. */
if( ( xSchedulerRunning != pdFALSE ) && ( xTaskIsRunningOrYielding != pdFALSE ) )
{
/* A running task or a task which is scheduled to yield is being
* deleted. This cannot complete when the task is still running
* on a core, as a context switch to another task is required.
* Place the task in the termination list. The idle task will check
* the termination list and free up any memory allocated by the
* scheduler for the TCB and stack of the deleted task. */
vListInsertEnd( &xTasksWaitingTermination, &( pxTCB->xStateListItem ) );
/* Increment the ucTasksDeleted variable so the idle task knows
* there is a task that has been deleted and that it should therefore
* check the xTasksWaitingTermination list. */
++uxDeletedTasksWaitingCleanUp;
/* Call the delete hook before portPRE_TASK_DELETE_HOOK() as
* portPRE_TASK_DELETE_HOOK() does not return in the Win32 port. */
traceTASK_DELETE( pxTCB );
/* Delete the task TCB in idle task. */
xDeleteTCBInIdleTask = pdTRUE;
/* The pre-delete hook is primarily for the Windows simulator,
* in which Windows specific clean up operations are performed,
* after which it is not possible to yield away from this task -
* hence xYieldPending is used to latch that a context switch is
* required. */
#if ( configNUMBER_OF_CORES == 1 )
portPRE_TASK_DELETE_HOOK( pxTCB, &( xYieldPendings[ 0 ] ) );
#else
portPRE_TASK_DELETE_HOOK( pxTCB, &( xYieldPendings[ pxTCB->xTaskRunState ] ) );
#endif
/* In the case of SMP, it is possible that the task being deleted
* is running on another core. We must evict the task before
* exiting the critical section to ensure that the task cannot
* take an action which puts it back on ready/state/event list,
* thereby nullifying the delete operation. Once evicted, the
* task won't be scheduled ever as it will no longer be on the
* ready list. */
#if ( configNUMBER_OF_CORES > 1 )
/* Remove task from the ready/delayed list. */
if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
{
if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
{
if( pxTCB->xTaskRunState == ( BaseType_t ) portGET_CORE_ID() )
{
configASSERT( uxSchedulerSuspended == 0 );
taskYIELD_WITHIN_API();
}
else
{
prvYieldCore( pxTCB->xTaskRunState );
}
}
}
#endif /* #if ( configNUMBER_OF_CORES > 1 ) */
}
else
{
--uxCurrentNumberOfTasks;
traceTASK_DELETE( pxTCB );
/* Reset the next expected unblock time in case it referred to
* the task that has just been deleted. */
prvResetNextTaskUnblockTime();
}
}
kernelEXIT_CRITICAL();
/* If the task is not deleting itself, call prvDeleteTCB from outside of
* critical section. If a task deletes itself, prvDeleteTCB is called
* from prvCheckTasksWaitingTermination which is called from Idle task. */
if( xDeleteTCBInIdleTask != pdTRUE )
{
prvDeleteTCB( pxTCB );
}
/* Force a reschedule if it is the currently running task that has just
* been deleted. */
#if ( configNUMBER_OF_CORES == 1 )
{
if( xSchedulerRunning != pdFALSE )
{
if( pxTCB == pxCurrentTCB )
{
configASSERT( uxSchedulerSuspended == 0 );
taskYIELD_WITHIN_API();
taskRESET_READY_PRIORITY( pxTCB->uxPriority );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Is the task waiting on an event also? */
if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
{
( void ) uxListRemove( &( pxTCB->xEventListItem ) );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Increment the uxTaskNumber also so kernel aware debuggers can
* detect that the task lists need re-generating. This is done before
* portPRE_TASK_DELETE_HOOK() as in the Windows port that macro will
* not return. */
uxTaskNumber++;
/* Use temp variable as distinct sequence points for reading volatile
* variables prior to a logical operator to ensure compliance with
* MISRA C 2012 Rule 13.5. */
xTaskIsRunningOrYielding = taskTASK_IS_RUNNING_OR_SCHEDULED_TO_YIELD( pxTCB );
/* If the task is running (or yielding), we must add it to the
* termination list so that an idle task can delete it when it is
* no longer running. */
if( ( xSchedulerRunning != pdFALSE ) && ( xTaskIsRunningOrYielding != pdFALSE ) )
{
/* A running task or a task which is scheduled to yield is being
* deleted. This cannot complete when the task is still running
* on a core, as a context switch to another task is required.
* Place the task in the termination list. The idle task will check
* the termination list and free up any memory allocated by the
* scheduler for the TCB and stack of the deleted task. */
vListInsertEnd( &xTasksWaitingTermination, &( pxTCB->xStateListItem ) );
/* Increment the ucTasksDeleted variable so the idle task knows
* there is a task that has been deleted and that it should therefore
* check the xTasksWaitingTermination list. */
++uxDeletedTasksWaitingCleanUp;
/* Call the delete hook before portPRE_TASK_DELETE_HOOK() as
* portPRE_TASK_DELETE_HOOK() does not return in the Win32 port. */
traceTASK_DELETE( pxTCB );
/* Delete the task TCB in idle task. */
xDeleteTCBInIdleTask = pdTRUE;
/* The pre-delete hook is primarily for the Windows simulator,
* in which Windows specific clean up operations are performed,
* after which it is not possible to yield away from this task -
* hence xYieldPending is used to latch that a context switch is
* required. */
#if ( configNUMBER_OF_CORES == 1 )
portPRE_TASK_DELETE_HOOK( pxTCB, &( xYieldPendings[ 0 ] ) );
#else
portPRE_TASK_DELETE_HOOK( pxTCB, &( xYieldPendings[ pxTCB->xTaskRunState ] ) );
#endif
/* In the case of SMP, it is possible that the task being deleted
* is running on another core. We must evict the task before
* exiting the critical section to ensure that the task cannot
* take an action which puts it back on ready/state/event list,
* thereby nullifying the delete operation. Once evicted, the
* task won't be scheduled ever as it will no longer be on the
* ready list. */
#if ( configNUMBER_OF_CORES > 1 )
{
if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
{
if( pxTCB->xTaskRunState == ( BaseType_t ) portGET_CORE_ID() )
{
configASSERT( uxSchedulerSuspended == 0 );
taskYIELD_WITHIN_API();
}
else
{
prvYieldCore( pxTCB->xTaskRunState );
}
}
}
#endif /* #if ( configNUMBER_OF_CORES > 1 ) */
}
else
{
--uxCurrentNumberOfTasks;
traceTASK_DELETE( pxTCB );
/* Reset the next expected unblock time in case it referred to
* the task that has just been deleted. */
prvResetNextTaskUnblockTime();
}
}
}
#endif /* #if ( configNUMBER_OF_CORES == 1 ) */
kernelEXIT_CRITICAL();
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
if( xDeferredDeletion == pdFALSE )
#endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
{
/* If the task is not deleting itself, call prvDeleteTCB from outside of
* critical section. If a task deletes itself, prvDeleteTCB is called
* from prvCheckTasksWaitingTermination which is called from Idle task. */
if( xDeleteTCBInIdleTask != pdTRUE )
{
prvDeleteTCB( pxTCB );
}
/* Force a reschedule if it is the currently running task that has just
* been deleted. */
#if ( configNUMBER_OF_CORES == 1 )
{
if( xSchedulerRunning != pdFALSE )
{
if( pxTCB == pxCurrentTCB )
{
configASSERT( uxSchedulerSuspended == 0 );
taskYIELD_WITHIN_API();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
}
#endif /* #if ( configNUMBER_OF_CORES == 1 ) */
}
traceRETURN_vTaskDelete();
}
@ -2971,7 +2983,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
* there may now be another task of higher priority that
* is ready to execute. */
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
if( pxTCB->xPreemptionDisable == 0U )
if( pxTCB->uxPreemptionDisable == 0U )
#endif
{
xYieldRequired = pdTRUE;
@ -3194,7 +3206,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
pxTCB = prvGetTCBFromHandle( xTask );
configASSERT( pxTCB != NULL );
pxTCB->xPreemptionDisable++;
pxTCB->uxPreemptionDisable++;
}
else
{
@ -3223,11 +3235,11 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
{
pxTCB = prvGetTCBFromHandle( xTask );
configASSERT( pxTCB != NULL );
configASSERT( pxTCB->xPreemptionDisable > 0U );
configASSERT( pxTCB->uxPreemptionDisable > 0U );
pxTCB->xPreemptionDisable--;
pxTCB->uxPreemptionDisable--;
if( pxTCB->xPreemptionDisable == 0U )
if( pxTCB->uxPreemptionDisable == 0U )
{
/* Process deferred state changes which were inflicted while
* preemption was disabled. */
@ -3247,9 +3259,6 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
}
pxTCB->xDeferredStateChange = 0U;
kernelEXIT_CRITICAL();
traceRETURN_vTaskPreemptionEnable();
return;
}
else
{
@ -3287,6 +3296,10 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
{
TCB_t * pxTCB;
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
BaseType_t xDeferredSuspension = pdFALSE;
#endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
traceENTER_vTaskSuspend( xTaskToSuspend );
kernelENTER_CRITICAL();
@ -3300,12 +3313,10 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
/* If the task has disabled preemption, we need to defer the suspension until the
* task enables preemption. The suspension will be performed in vTaskPreemptionEnable(). */
if( pxTCB->xPreemptionDisable > 0U )
if( pxTCB->uxPreemptionDisable > 0U )
{
pxTCB->xDeferredStateChange |= tskDEFERRED_SUSPENSION;
kernelEXIT_CRITICAL();
traceRETURN_vTaskSuspend();
return;
xDeferredSuspension = pdTRUE;
}
else
{
@ -3313,73 +3324,82 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
}
#endif /* configUSE_TASK_PREEMPTION_DISABLE */
traceTASK_SUSPEND( pxTCB );
/* Remove task from the ready/delayed list and place in the
* suspended list. */
if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
if( xDeferredSuspension == pdFALSE )
#endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
{
taskRESET_READY_PRIORITY( pxTCB->uxPriority );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
traceTASK_SUSPEND( pxTCB );
/* Is the task waiting on an event also? */
if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
{
( void ) uxListRemove( &( pxTCB->xEventListItem ) );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
vListInsertEnd( &xSuspendedTaskList, &( pxTCB->xStateListItem ) );
#if ( configUSE_TASK_NOTIFICATIONS == 1 )
{
BaseType_t x;
for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ )
/* Remove task from the ready/delayed list and place in the
* suspended list. */
if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
{
if( pxTCB->ucNotifyState[ x ] == taskWAITING_NOTIFICATION )
taskRESET_READY_PRIORITY( pxTCB->uxPriority );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Is the task waiting on an event also? */
if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
{
( void ) uxListRemove( &( pxTCB->xEventListItem ) );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
vListInsertEnd( &xSuspendedTaskList, &( pxTCB->xStateListItem ) );
#if ( configUSE_TASK_NOTIFICATIONS == 1 )
{
BaseType_t x;
for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ )
{
/* The task was blocked to wait for a notification, but is
* now suspended, so no notification was received. */
pxTCB->ucNotifyState[ x ] = taskNOT_WAITING_NOTIFICATION;
if( pxTCB->ucNotifyState[ x ] == taskWAITING_NOTIFICATION )
{
/* The task was blocked to wait for a notification, but is
* now suspended, so no notification was received. */
pxTCB->ucNotifyState[ x ] = taskNOT_WAITING_NOTIFICATION;
}
}
}
}
#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
/* In the case of SMP, it is possible that the task being suspended
* is running on another core. We must evict the task before
* exiting the critical section to ensure that the task cannot
* take an action which puts it back on ready/state/event list,
* thereby nullifying the suspend operation. Once evicted, the
* task won't be scheduled before it is resumed as it will no longer
* be on the ready list. */
#if ( configNUMBER_OF_CORES > 1 )
{
if( xSchedulerRunning != pdFALSE )
/* In the case of SMP, it is possible that the task being suspended
* is running on another core. We must evict the task before
* exiting the critical section to ensure that the task cannot
* take an action which puts it back on ready/state/event list,
* thereby nullifying the suspend operation. Once evicted, the
* task won't be scheduled before it is resumed as it will no longer
* be on the ready list. */
#if ( configNUMBER_OF_CORES > 1 )
{
/* Reset the next expected unblock time in case it referred to the
* task that is now in the Suspended state. */
prvResetNextTaskUnblockTime();
if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
if( xSchedulerRunning != pdFALSE )
{
if( pxTCB->xTaskRunState == ( BaseType_t ) portGET_CORE_ID() )
/* Reset the next expected unblock time in case it referred to the
* task that is now in the Suspended state. */
prvResetNextTaskUnblockTime();
if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
{
/* The current task has just been suspended. */
configASSERT( uxSchedulerSuspended == 0 );
vTaskYieldWithinAPI();
if( pxTCB->xTaskRunState == ( BaseType_t ) portGET_CORE_ID() )
{
/* The current task has just been suspended. */
configASSERT( uxSchedulerSuspended == 0 );
vTaskYieldWithinAPI();
}
else
{
prvYieldCore( pxTCB->xTaskRunState );
}
}
else
{
prvYieldCore( pxTCB->xTaskRunState );
mtCOVERAGE_TEST_MARKER();
}
}
else
@ -3387,73 +3407,74 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* #if ( configNUMBER_OF_CORES > 1 ) */
}
}
kernelEXIT_CRITICAL();
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
if( xDeferredSuspension == pdFALSE )
#endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
{
#if ( configNUMBER_OF_CORES == 1 )
{
UBaseType_t uxCurrentListLength;
if( xSchedulerRunning != pdFALSE )
{
/* Reset the next expected unblock time in case it referred to the
* task that is now in the Suspended state. */
kernelENTER_CRITICAL();
{
prvResetNextTaskUnblockTime();
}
kernelEXIT_CRITICAL();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
if( pxTCB == pxCurrentTCB )
{
if( xSchedulerRunning != pdFALSE )
{
/* The current task has just been suspended. */
configASSERT( uxSchedulerSuspended == 0 );
portYIELD_WITHIN_API();
}
else
{
/* The scheduler is not running, but the task that was pointed
* to by pxCurrentTCB has just been suspended and pxCurrentTCB
* must be adjusted to point to a different task. */
/* Use a temp variable as a distinct sequence point for reading
* volatile variables prior to a comparison to ensure compliance
* with MISRA C 2012 Rule 13.2. */
uxCurrentListLength = listCURRENT_LIST_LENGTH( &xSuspendedTaskList );
if( uxCurrentListLength == uxCurrentNumberOfTasks )
{
/* No other tasks are ready, so set pxCurrentTCB back to
* NULL so when the next task is created pxCurrentTCB will
* be set to point to it no matter what its relative priority
* is. */
pxCurrentTCB = NULL;
}
else
{
vTaskSwitchContext();
}
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* #if ( configNUMBER_OF_CORES > 1 ) */
#endif /* #if ( configNUMBER_OF_CORES == 1 ) */
}
kernelEXIT_CRITICAL();
#if ( configNUMBER_OF_CORES == 1 )
{
UBaseType_t uxCurrentListLength;
if( xSchedulerRunning != pdFALSE )
{
/* Reset the next expected unblock time in case it referred to the
* task that is now in the Suspended state. */
kernelENTER_CRITICAL();
{
prvResetNextTaskUnblockTime();
}
kernelEXIT_CRITICAL();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
if( pxTCB == pxCurrentTCB )
{
if( xSchedulerRunning != pdFALSE )
{
/* The current task has just been suspended. */
configASSERT( uxSchedulerSuspended == 0 );
portYIELD_WITHIN_API();
}
else
{
/* The scheduler is not running, but the task that was pointed
* to by pxCurrentTCB has just been suspended and pxCurrentTCB
* must be adjusted to point to a different task. */
/* Use a temp variable as a distinct sequence point for reading
* volatile variables prior to a comparison to ensure compliance
* with MISRA C 2012 Rule 13.2. */
uxCurrentListLength = listCURRENT_LIST_LENGTH( &xSuspendedTaskList );
if( uxCurrentListLength == uxCurrentNumberOfTasks )
{
/* No other tasks are ready, so set pxCurrentTCB back to
* NULL so when the next task is created pxCurrentTCB will
* be set to point to it no matter what its relative priority
* is. */
pxCurrentTCB = NULL;
}
else
{
vTaskSwitchContext();
}
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* #if ( configNUMBER_OF_CORES == 1 ) */
traceRETURN_vTaskSuspend();
}
@ -4289,7 +4310,7 @@ BaseType_t xTaskResumeAll( void )
if( xYieldPendings[ xCoreID ] != pdFALSE
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
&& ( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == 0U )
&& ( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U )
#endif
)
{
@ -5036,7 +5057,7 @@ BaseType_t xTaskIncrementTick( void )
#if ( configNUMBER_OF_CORES == 1 )
{
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
if( pxCurrentTCB->xPreemptionDisable != 0U )
if( pxCurrentTCB->uxPreemptionDisable != 0U )
{
mtCOVERAGE_TEST_MARKER();
}
@ -5119,7 +5140,7 @@ BaseType_t xTaskIncrementTick( void )
for( xCoreID = 0; xCoreID < ( BaseType_t ) configNUMBER_OF_CORES; xCoreID++ )
{
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
if( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == 0U )
if( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U )
#endif
{
if( xYieldPendings[ xCoreID ] != pdFALSE )
@ -5409,7 +5430,7 @@ BaseType_t xTaskIncrementTick( void )
/* vTaskSwitchContext() must not be called with a task that has
* preemption disabled. */
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
configASSERT( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == 0U );
configASSERT( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U );
#endif
if( uxSchedulerSuspended != ( UBaseType_t ) 0U )
@ -7343,7 +7364,7 @@ static void prvResetNextTaskUnblockTime( void )
#endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) */
/*-----------------------------------------------------------*/
#if ( configNUMBER_OF_CORES > 1 )
#if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS != 1 ) )
void vTaskEnterCritical( void )
{
@ -7392,7 +7413,56 @@ static void prvResetNextTaskUnblockTime( void )
traceRETURN_vTaskEnterCritical();
}
#endif /* #if ( configNUMBER_OF_CORES > 1 ) */
#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS != 1 ) ) */
/*-----------------------------------------------------------*/
#if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS == 1 ) )
void vTaskEnterCritical( void )
{
traceENTER_vTaskEnterCritical();
portDISABLE_INTERRUPTS();
{
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
if( xSchedulerRunning != pdFALSE )
{
kernelGET_TASK_LOCK( xCoreID );
kernelGET_ISR_LOCK( xCoreID );
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
/* This is not the interrupt safe version of the enter critical
* function so assert() if it is being called from an interrupt
* context. Only API functions that end in "FromISR" can be used in an
* interrupt. Only assert if the critical nesting count is 1 to
* protect against recursive calls if the assert function also uses a
* critical section. */
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 1U )
{
portASSERT_IF_IN_ISR();
if( uxSchedulerSuspended == 0U )
{
/* The only time there would be a problem is if this is called
* before a context switch and vTaskExitCritical() is called
* after pxCurrentTCB changes. Therefore this should not be
* used within vTaskSwitchContext(). */
prvCheckForRunStateChange();
}
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
traceRETURN_vTaskEnterCritical();
}
#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS == 1 ) ) */
/*-----------------------------------------------------------*/
#if ( configNUMBER_OF_CORES > 1 )
@ -7473,7 +7543,7 @@ static void prvResetNextTaskUnblockTime( void )
#endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) */
/*-----------------------------------------------------------*/
#if ( configNUMBER_OF_CORES > 1 )
#if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS != 1 ) )
void vTaskExitCritical( void )
{
@ -7500,11 +7570,7 @@ static void prvResetNextTaskUnblockTime( void )
BaseType_t xYieldCurrentTask;
/* Get the xYieldPending stats inside the critical section. */
#if ( portUSING_GRANULAR_LOCKS == 1 )
xYieldCurrentTask = xTaskUnlockCanYield();
#else
xYieldCurrentTask = xYieldPendings[ xCoreID ];
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
xYieldCurrentTask = xYieldPendings[ xCoreID ];
kernelRELEASE_ISR_LOCK( xCoreID );
kernelRELEASE_TASK_LOCK( xCoreID );
@ -7537,7 +7603,72 @@ static void prvResetNextTaskUnblockTime( void )
traceRETURN_vTaskExitCritical();
}
#endif /* #if ( configNUMBER_OF_CORES > 1 ) */
#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS != 1 ) ) */
/*-----------------------------------------------------------*/
#if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS == 1 ) )
void vTaskExitCritical( void )
{
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
traceENTER_vTaskExitCritical();
if( xSchedulerRunning != pdFALSE )
{
/* If critical nesting count is zero then this function
* does not match a previous call to vTaskEnterCritical(). */
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U );
/* This function should not be called in ISR. Use vTaskExitCriticalFromISR
* to exit critical section from ISR. */
portASSERT_IF_IN_ISR();
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U )
{
/* Release the ISR and task locks */
kernelRELEASE_ISR_LOCK( xCoreID );
kernelRELEASE_TASK_LOCK( xCoreID );
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID );
/* If the critical nesting count is 0, enable interrupts */
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U )
{
BaseType_t xYieldCurrentTask;
/* Get the xYieldPending stats inside the critical section. */
xYieldCurrentTask = xTaskUnlockCanYield();
portENABLE_INTERRUPTS();
/* When a task yields in a critical section it just sets
* xYieldPending to true. So now that we have exited the
* critical section check if xYieldPending is true, and
* if so yield. */
if( xYieldCurrentTask != pdFALSE )
{
portYIELD();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
traceRETURN_vTaskExitCritical();
}
#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS == 1 ) ) */
/*-----------------------------------------------------------*/
#if ( configNUMBER_OF_CORES > 1 )
@ -7595,7 +7726,7 @@ static void prvResetNextTaskUnblockTime( void )
if( ( xYieldPendings[ xCoreID ] == pdTRUE ) && ( uxSchedulerSuspended == pdFALSE )
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
&& ( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == 0U )
&& ( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U )
#endif /* ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
)
{