Compare commits

..

7 commits

Author SHA1 Message Date
Sudeep Mohanty 7e95253277
Merge 26655c7e54 into 62bd622ffc 2025-06-12 08:11:12 +00:00
Darian Leung 26655c7e54 feat(freertos/smp): Add Granular Locking V4 proposal documents
Co-authored-by: Sudeep Mohanty <sudeep.mohanty@espressif.com>
2025-06-12 10:11:04 +02:00
Darian Leung 95fa53c1ee change(freertos/smp): Update timers.c locking
Updated timers.c to use granular locking

- Added xTaskSpinlock and xISRSpinlock
- Replaced critical section macros with data group critical section macros
such as taskENTER/EXIT_CRITICAL() with tmrENTER/EXIT_CRITICAL().
- Added vTimerEnterCritical() and vTimerExitCritical() to map to the
  data group critical section macros.

Co-authored-by: Sudeep Mohanty <sudeep.mohanty@espressif.com>
2025-06-12 10:11:04 +02:00
Darian Leung e1f69a6763 change(freertos/smp): Update stream_buffer.c locking
Updated stream_buffer.c to use granular locking

- Added xTaskSpinlock and xISRSpinlock
- Replaced critical section macros with data group critical section macros
such as taskENTER/EXIT_CRITICAL/_FROM_ISR() with sbENTER/EXIT_CRITICAL_FROM_ISR().
- Added vStreambuffersEnterCritical/FromISR() and
  vStreambuffersExitCritical/FromISR() to map to the data group critical
section macros.
- Added prvLockStreamBufferForTasks() and prvUnlockStreamBufferForTasks() to suspend the stream
buffer when executing non-deterministic code.

Co-authored-by: Sudeep Mohanty <sudeep.mohanty@espressif.com>
2025-06-12 10:11:04 +02:00
Darian Leung cba76ba77d change(freertos/smp): Update event_groups.c locking
Updated event_groups.c to use granular locking

- Added xTaskSpinlock and xISRSpinlock
- Replaced critical section macros with data group critical section macros
such as taskENTER/EXIT_CRITICAL/_FROM_ISR() with event_groupsENTER/EXIT_CRITICAL/_FROM_ISR().
- Added vEventGroupsEnterCritical/FromISR() and
  vEventGroupsExitCriti/FromISR() functions that map to the data group
critical section macros.
- Added prvLockEventGroupForTasks() and prvUnlockEventGroupForTasks() to suspend the event
group when executing non-deterministic code.
- xEventGroupSetBits() and vEventGroupDelete() accesses the kernel data group
directly. Thus, added vTaskSuspendAll()/xTaskResumeAll() to these fucntions.

Co-authored-by: Sudeep Mohanty <sudeep.mohanty@espressif.com>
2025-06-12 10:11:04 +02:00
Darian Leung b256ca88b1 change(freertos/smp): Update queue.c locking
Updated queue.c to use granular locking

- Added xTaskSpinlock and xISRSpinlock
- Replaced  critical section macros with data group critical section macros
such as taskENTER/EXIT_CRITICAL/_FROM_ISR() with queueENTER/EXIT_CRITICAL_FROM_ISR().
- Added vQueueEnterCritical/FromISR() and vQueueExitCritical/FromISR()
  which map to the data group critical section macros.
- Added prvLockQueueForTasks() and prvUnlockQueueForTasks() as the granular locking equivalents
to prvLockQueue() and prvUnlockQueue() respectively

Co-authored-by: Sudeep Mohanty <sudeep.mohanty@espressif.com>
2025-06-12 10:11:04 +02:00
Darian Leung 25c74e3d4b change(freertos/smp): Update tasks.c locking
Updated critical section macros with granular locks.

Some tasks.c API relied on their callers to enter critical sections. This
assumption no longer works under granular locking. Critical sections added to
the following functions:

- `vTaskInternalSetTimeOutState()`
- `xTaskIncrementTick()`
- `vTaskSwitchContext()`
- `xTaskRemoveFromEventList()`
- `vTaskInternalSetTimeOutState()`
- `eTaskConfirmSleepModeStatus()`
- `xTaskPriorityDisinherit()`
- `pvTaskIncrementMutexHeldCount()`

Added missing suspensions to the following functions:

- `vTaskPlaceOnEventList()`
- `vTaskPlaceOnUnorderedEventList()`
- `vTaskPlaceOnEventListRestricted()`

Fixed the locking in vTaskSwitchContext()

vTaskSwitchContext() must aquire both kernel locks, viz., task lock and
ISR lock. This is because, vTaskSwitchContext() can be called from
either task context or ISR context. Also, vTaskSwitchContext() must not
alter the interrupt state prematurely.

Co-authored-by: Sudeep Mohanty <sudeep.mohanty@espressif.com>
2025-06-12 10:10:55 +02:00

197
tasks.c
View file

@ -360,7 +360,7 @@
} \
else \
{ \
if( pxCurrentTCBs[ ( xCoreID ) ]->xPreemptionDisable == 0U ) \
if( pxCurrentTCBs[ ( xCoreID ) ]->uxPreemptionDisable == 0U ) \
{ \
/* Request other core to yield if it is not requested before. */ \
if( pxCurrentTCBs[ ( xCoreID ) ]->xTaskRunState != taskTASK_SCHEDULED_TO_YIELD ) \
@ -457,7 +457,7 @@ typedef struct tskTaskControlBlock /* The old naming convention is used to
char pcTaskName[ configMAX_TASK_NAME_LEN ]; /**< Descriptive name given to the task when created. Facilitates debugging only. */
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
UBaseType_t xPreemptionDisable; /**< Used to prevent the task from being preempted. */
UBaseType_t uxPreemptionDisable; /**< Used to prevent the task from being preempted. */
#endif
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
@ -1015,7 +1015,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION;
#endif
{
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
if( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == 0U )
if( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U )
#endif
{
xLowestPriorityToPreempt = xCurrentCoreTaskPriority;
@ -1321,7 +1321,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION;
( xYieldPendings[ uxCore ] == pdFALSE ) )
{
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
if( pxCurrentTCBs[ uxCore ]->xPreemptionDisable == 0U )
if( pxCurrentTCBs[ uxCore ]->uxPreemptionDisable == 0U )
#endif
{
xLowestPriority = xTaskPriority;
@ -2290,6 +2290,10 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
BaseType_t xDeleteTCBInIdleTask = pdFALSE;
BaseType_t xTaskIsRunningOrYielding;
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
BaseType_t xDeferredDeletion = pdFALSE;
#endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
traceENTER_vTaskDelete( xTaskToDelete );
kernelENTER_CRITICAL();
@ -2303,12 +2307,10 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
/* If the task has disabled preemption, we need to defer the deletion until the
* task enables preemption. The deletion will be performed in vTaskPreemptionEnable(). */
if( pxTCB->xPreemptionDisable > 0U )
if( pxTCB->uxPreemptionDisable > 0U )
{
pxTCB->xDeferredStateChange |= tskDEFERRED_DELETION;
kernelEXIT_CRITICAL();
traceRETURN_vTaskDelete();
return;
xDeferredDeletion = pdTRUE;
}
else
{
@ -2316,6 +2318,10 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
}
#endif /* configUSE_TASK_PREEMPTION_DISABLE */
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
if( xDeferredDeletion == pdFALSE )
#endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
{
/* Remove task from the ready/delayed list. */
if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
{
@ -2417,8 +2423,13 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
prvResetNextTaskUnblockTime();
}
}
}
kernelEXIT_CRITICAL();
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
if( xDeferredDeletion == pdFALSE )
#endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
{
/* If the task is not deleting itself, call prvDeleteTCB from outside of
* critical section. If a task deletes itself, prvDeleteTCB is called
* from prvCheckTasksWaitingTermination which is called from Idle task. */
@ -2445,6 +2456,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
}
}
#endif /* #if ( configNUMBER_OF_CORES == 1 ) */
}
traceRETURN_vTaskDelete();
}
@ -2971,7 +2983,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
* there may now be another task of higher priority that
* is ready to execute. */
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
if( pxTCB->xPreemptionDisable == 0U )
if( pxTCB->uxPreemptionDisable == 0U )
#endif
{
xYieldRequired = pdTRUE;
@ -3194,7 +3206,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
pxTCB = prvGetTCBFromHandle( xTask );
configASSERT( pxTCB != NULL );
pxTCB->xPreemptionDisable++;
pxTCB->uxPreemptionDisable++;
}
else
{
@ -3223,11 +3235,11 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
{
pxTCB = prvGetTCBFromHandle( xTask );
configASSERT( pxTCB != NULL );
configASSERT( pxTCB->xPreemptionDisable > 0U );
configASSERT( pxTCB->uxPreemptionDisable > 0U );
pxTCB->xPreemptionDisable--;
pxTCB->uxPreemptionDisable--;
if( pxTCB->xPreemptionDisable == 0U )
if( pxTCB->uxPreemptionDisable == 0U )
{
/* Process deferred state changes which were inflicted while
* preemption was disabled. */
@ -3247,9 +3259,6 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
}
pxTCB->xDeferredStateChange = 0U;
kernelEXIT_CRITICAL();
traceRETURN_vTaskPreemptionEnable();
return;
}
else
{
@ -3287,6 +3296,10 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
{
TCB_t * pxTCB;
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
BaseType_t xDeferredSuspension = pdFALSE;
#endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
traceENTER_vTaskSuspend( xTaskToSuspend );
kernelENTER_CRITICAL();
@ -3300,12 +3313,10 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
/* If the task has disabled preemption, we need to defer the suspension until the
* task enables preemption. The suspension will be performed in vTaskPreemptionEnable(). */
if( pxTCB->xPreemptionDisable > 0U )
if( pxTCB->uxPreemptionDisable > 0U )
{
pxTCB->xDeferredStateChange |= tskDEFERRED_SUSPENSION;
kernelEXIT_CRITICAL();
traceRETURN_vTaskSuspend();
return;
xDeferredSuspension = pdTRUE;
}
else
{
@ -3313,6 +3324,10 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
}
#endif /* configUSE_TASK_PREEMPTION_DISABLE */
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
if( xDeferredSuspension == pdFALSE )
#endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
{
traceTASK_SUSPEND( pxTCB );
/* Remove task from the ready/delayed list and place in the
@ -3394,8 +3409,13 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
}
#endif /* #if ( configNUMBER_OF_CORES > 1 ) */
}
}
kernelEXIT_CRITICAL();
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
if( xDeferredSuspension == pdFALSE )
#endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
{
#if ( configNUMBER_OF_CORES == 1 )
{
UBaseType_t uxCurrentListLength;
@ -3454,6 +3474,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
}
}
#endif /* #if ( configNUMBER_OF_CORES == 1 ) */
}
traceRETURN_vTaskSuspend();
}
@ -4289,7 +4310,7 @@ BaseType_t xTaskResumeAll( void )
if( xYieldPendings[ xCoreID ] != pdFALSE
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
&& ( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == 0U )
&& ( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U )
#endif
)
{
@ -5036,7 +5057,7 @@ BaseType_t xTaskIncrementTick( void )
#if ( configNUMBER_OF_CORES == 1 )
{
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
if( pxCurrentTCB->xPreemptionDisable != 0U )
if( pxCurrentTCB->uxPreemptionDisable != 0U )
{
mtCOVERAGE_TEST_MARKER();
}
@ -5119,7 +5140,7 @@ BaseType_t xTaskIncrementTick( void )
for( xCoreID = 0; xCoreID < ( BaseType_t ) configNUMBER_OF_CORES; xCoreID++ )
{
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
if( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == 0U )
if( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U )
#endif
{
if( xYieldPendings[ xCoreID ] != pdFALSE )
@ -5409,7 +5430,7 @@ BaseType_t xTaskIncrementTick( void )
/* vTaskSwitchContext() must not be called with a task that has
* preemption disabled. */
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
configASSERT( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == 0U );
configASSERT( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U );
#endif
if( uxSchedulerSuspended != ( UBaseType_t ) 0U )
@ -7343,7 +7364,7 @@ static void prvResetNextTaskUnblockTime( void )
#endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) */
/*-----------------------------------------------------------*/
#if ( configNUMBER_OF_CORES > 1 )
#if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS != 1 ) )
void vTaskEnterCritical( void )
{
@ -7392,7 +7413,56 @@ static void prvResetNextTaskUnblockTime( void )
traceRETURN_vTaskEnterCritical();
}
#endif /* #if ( configNUMBER_OF_CORES > 1 ) */
#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS != 1 ) ) */
/*-----------------------------------------------------------*/
#if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS == 1 ) )
void vTaskEnterCritical( void )
{
traceENTER_vTaskEnterCritical();
portDISABLE_INTERRUPTS();
{
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
if( xSchedulerRunning != pdFALSE )
{
kernelGET_TASK_LOCK( xCoreID );
kernelGET_ISR_LOCK( xCoreID );
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
/* This is not the interrupt safe version of the enter critical
* function so assert() if it is being called from an interrupt
* context. Only API functions that end in "FromISR" can be used in an
* interrupt. Only assert if the critical nesting count is 1 to
* protect against recursive calls if the assert function also uses a
* critical section. */
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 1U )
{
portASSERT_IF_IN_ISR();
if( uxSchedulerSuspended == 0U )
{
/* The only time there would be a problem is if this is called
* before a context switch and vTaskExitCritical() is called
* after pxCurrentTCB changes. Therefore this should not be
* used within vTaskSwitchContext(). */
prvCheckForRunStateChange();
}
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
traceRETURN_vTaskEnterCritical();
}
#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS == 1 ) ) */
/*-----------------------------------------------------------*/
#if ( configNUMBER_OF_CORES > 1 )
@ -7473,7 +7543,7 @@ static void prvResetNextTaskUnblockTime( void )
#endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) */
/*-----------------------------------------------------------*/
#if ( configNUMBER_OF_CORES > 1 )
#if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS != 1 ) )
void vTaskExitCritical( void )
{
@ -7500,11 +7570,7 @@ static void prvResetNextTaskUnblockTime( void )
BaseType_t xYieldCurrentTask;
/* Get the xYieldPending stats inside the critical section. */
#if ( portUSING_GRANULAR_LOCKS == 1 )
xYieldCurrentTask = xTaskUnlockCanYield();
#else
xYieldCurrentTask = xYieldPendings[ xCoreID ];
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
kernelRELEASE_ISR_LOCK( xCoreID );
kernelRELEASE_TASK_LOCK( xCoreID );
@ -7537,7 +7603,72 @@ static void prvResetNextTaskUnblockTime( void )
traceRETURN_vTaskExitCritical();
}
#endif /* #if ( configNUMBER_OF_CORES > 1 ) */
#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS != 1 ) ) */
/*-----------------------------------------------------------*/
#if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS == 1 ) )
void vTaskExitCritical( void )
{
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
traceENTER_vTaskExitCritical();
if( xSchedulerRunning != pdFALSE )
{
/* If critical nesting count is zero then this function
* does not match a previous call to vTaskEnterCritical(). */
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U );
/* This function should not be called in ISR. Use vTaskExitCriticalFromISR
* to exit critical section from ISR. */
portASSERT_IF_IN_ISR();
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U )
{
/* Release the ISR and task locks */
kernelRELEASE_ISR_LOCK( xCoreID );
kernelRELEASE_TASK_LOCK( xCoreID );
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID );
/* If the critical nesting count is 0, enable interrupts */
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U )
{
BaseType_t xYieldCurrentTask;
/* Get the xYieldPending stats inside the critical section. */
xYieldCurrentTask = xTaskUnlockCanYield();
portENABLE_INTERRUPTS();
/* When a task yields in a critical section it just sets
* xYieldPending to true. So now that we have exited the
* critical section check if xYieldPending is true, and
* if so yield. */
if( xYieldCurrentTask != pdFALSE )
{
portYIELD();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
traceRETURN_vTaskExitCritical();
}
#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS == 1 ) ) */
/*-----------------------------------------------------------*/
#if ( configNUMBER_OF_CORES > 1 )
@ -7595,7 +7726,7 @@ static void prvResetNextTaskUnblockTime( void )
if( ( xYieldPendings[ xCoreID ] == pdTRUE ) && ( uxSchedulerSuspended == pdFALSE )
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
&& ( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == 0U )
&& ( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U )
#endif /* ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
)
{