Compare commits

..

6 commits

Author SHA1 Message Date
Darian Leung 8c2dec097b feat(freertos/smp): Add Granular Locking V4 proposal documents
Co-authored-by: Sudeep Mohanty <sudeep.mohanty@espressif.com>
2025-06-11 17:38:36 +02:00
Darian Leung 42fce69365 change(freertos/smp): Update timers.c locking
Updated timers.c to use granular locking

- Added xTaskSpinlock and xISRSpinlock
- Replaced critical section macros with data group critical section macros
such as taskENTER/EXIT_CRITICAL() with tmrENTER/EXIT_CRITICAL().
- Added vTimerEnterCritical() and vTimerExitCritical() to map to the
  data group critical section macros.

Co-authored-by: Sudeep Mohanty <sudeep.mohanty@espressif.com>
2025-06-11 17:38:36 +02:00
Darian Leung 7f2c159dee change(freertos/smp): Update stream_buffer.c locking
Updated stream_buffer.c to use granular locking

- Added xTaskSpinlock and xISRSpinlock
- Replaced critical section macros with data group critical section macros
such as taskENTER/EXIT_CRITICAL/_FROM_ISR() with sbENTER/EXIT_CRITICAL_FROM_ISR().
- Added vStreambuffersEnterCritical/FromISR() and
  vStreambuffersExitCritical/FromISR() to map to the data group critical
section macros.
- Added prvLockStreamBufferForTasks() and prvUnlockStreamBufferForTasks() to suspend the stream
buffer when executing non-deterministic code.

Co-authored-by: Sudeep Mohanty <sudeep.mohanty@espressif.com>
2025-06-11 17:38:36 +02:00
Darian Leung 1b6b659c68 change(freertos/smp): Update event_groups.c locking
Updated event_groups.c to use granular locking

- Added xTaskSpinlock and xISRSpinlock
- Replaced critical section macros with data group critical section macros
such as taskENTER/EXIT_CRITICAL/_FROM_ISR() with event_groupsENTER/EXIT_CRITICAL/_FROM_ISR().
- Added vEventGroupsEnterCritical/FromISR() and
  vEventGroupsExitCriti/FromISR() functions that map to the data group
critical section macros.
- Added prvLockEventGroupForTasks() and prvUnlockEventGroupForTasks() to suspend the event
group when executing non-deterministic code.
- xEventGroupSetBits() and vEventGroupDelete() accesses the kernel data group
directly. Thus, added vTaskSuspendAll()/xTaskResumeAll() to these fucntions.

Co-authored-by: Sudeep Mohanty <sudeep.mohanty@espressif.com>
2025-06-11 17:38:36 +02:00
Darian Leung 9ad6c81b31 change(freertos/smp): Update queue.c locking
Updated queue.c to use granular locking

- Added xTaskSpinlock and xISRSpinlock
- Replaced  critical section macros with data group critical section macros
such as taskENTER/EXIT_CRITICAL/_FROM_ISR() with queueENTER/EXIT_CRITICAL_FROM_ISR().
- Added vQueueEnterCritical/FromISR() and vQueueExitCritical/FromISR()
  which map to the data group critical section macros.
- Added prvLockQueueForTasks() and prvUnlockQueueForTasks() as the granular locking equivalents
to prvLockQueue() and prvUnlockQueue() respectively

Co-authored-by: Sudeep Mohanty <sudeep.mohanty@espressif.com>
2025-06-11 17:38:36 +02:00
Darian Leung fd47eba3c7 change(freertos/smp): Update tasks.c locking
Updated critical section macros with granular locks.

Some tasks.c API relied on their callers to enter critical sections. This
assumption no longer works under granular locking. Critical sections added to
the following functions:

- `vTaskInternalSetTimeOutState()`
- `xTaskIncrementTick()`
- `vTaskSwitchContext()`
- `xTaskRemoveFromEventList()`
- `vTaskInternalSetTimeOutState()`
- `eTaskConfirmSleepModeStatus()`
- `xTaskPriorityDisinherit()`
- `pvTaskIncrementMutexHeldCount()`

Added missing suspensions to the following functions:

- `vTaskPlaceOnEventList()`
- `vTaskPlaceOnUnorderedEventList()`
- `vTaskPlaceOnEventListRestricted()`

Fixed the locking in vTaskSwitchContext()

vTaskSwitchContext() must aquire both kernel locks, viz., task lock and
ISR lock. This is because, vTaskSwitchContext() can be called from
either task context or ISR context. Also, vTaskSwitchContext() must not
alter the interrupt state prematurely.

Co-authored-by: Sudeep Mohanty <sudeep.mohanty@espressif.com>
2025-06-11 17:38:36 +02:00

201
tasks.c
View file

@ -360,7 +360,7 @@
} \ } \
else \ else \
{ \ { \
if( pxCurrentTCBs[ ( xCoreID ) ]->uxPreemptionDisable == 0U ) \ if( pxCurrentTCBs[ ( xCoreID ) ]->xPreemptionDisable == 0U ) \
{ \ { \
/* Request other core to yield if it is not requested before. */ \ /* Request other core to yield if it is not requested before. */ \
if( pxCurrentTCBs[ ( xCoreID ) ]->xTaskRunState != taskTASK_SCHEDULED_TO_YIELD ) \ if( pxCurrentTCBs[ ( xCoreID ) ]->xTaskRunState != taskTASK_SCHEDULED_TO_YIELD ) \
@ -457,7 +457,7 @@ typedef struct tskTaskControlBlock /* The old naming convention is used to
char pcTaskName[ configMAX_TASK_NAME_LEN ]; /**< Descriptive name given to the task when created. Facilitates debugging only. */ char pcTaskName[ configMAX_TASK_NAME_LEN ]; /**< Descriptive name given to the task when created. Facilitates debugging only. */
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
UBaseType_t uxPreemptionDisable; /**< Used to prevent the task from being preempted. */ UBaseType_t xPreemptionDisable; /**< Used to prevent the task from being preempted. */
#endif #endif
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
@ -1015,7 +1015,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION;
#endif #endif
{ {
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
if( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U ) if( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == 0U )
#endif #endif
{ {
xLowestPriorityToPreempt = xCurrentCoreTaskPriority; xLowestPriorityToPreempt = xCurrentCoreTaskPriority;
@ -1321,7 +1321,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION;
( xYieldPendings[ uxCore ] == pdFALSE ) ) ( xYieldPendings[ uxCore ] == pdFALSE ) )
{ {
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
if( pxCurrentTCBs[ uxCore ]->uxPreemptionDisable == 0U ) if( pxCurrentTCBs[ uxCore ]->xPreemptionDisable == 0U )
#endif #endif
{ {
xLowestPriority = xTaskPriority; xLowestPriority = xTaskPriority;
@ -2290,10 +2290,6 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
BaseType_t xDeleteTCBInIdleTask = pdFALSE; BaseType_t xDeleteTCBInIdleTask = pdFALSE;
BaseType_t xTaskIsRunningOrYielding; BaseType_t xTaskIsRunningOrYielding;
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
BaseType_t xDeferredDeletion = pdFALSE;
#endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
traceENTER_vTaskDelete( xTaskToDelete ); traceENTER_vTaskDelete( xTaskToDelete );
kernelENTER_CRITICAL(); kernelENTER_CRITICAL();
@ -2307,10 +2303,12 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
/* If the task has disabled preemption, we need to defer the deletion until the /* If the task has disabled preemption, we need to defer the deletion until the
* task enables preemption. The deletion will be performed in vTaskPreemptionEnable(). */ * task enables preemption. The deletion will be performed in vTaskPreemptionEnable(). */
if( pxTCB->uxPreemptionDisable > 0U ) if( pxTCB->xPreemptionDisable > 0U )
{ {
pxTCB->xDeferredStateChange |= tskDEFERRED_DELETION; pxTCB->xDeferredStateChange |= tskDEFERRED_DELETION;
xDeferredDeletion = pdTRUE; kernelEXIT_CRITICAL();
traceRETURN_vTaskDelete();
return;
} }
else else
{ {
@ -2318,10 +2316,6 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
} }
#endif /* configUSE_TASK_PREEMPTION_DISABLE */ #endif /* configUSE_TASK_PREEMPTION_DISABLE */
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
if( xDeferredDeletion == pdFALSE )
#endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
{
/* Remove task from the ready/delayed list. */ /* Remove task from the ready/delayed list. */
if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
{ {
@ -2423,13 +2417,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
prvResetNextTaskUnblockTime(); prvResetNextTaskUnblockTime();
} }
} }
}
kernelEXIT_CRITICAL(); kernelEXIT_CRITICAL();
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
if( xDeferredDeletion == pdFALSE )
#endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
{
/* If the task is not deleting itself, call prvDeleteTCB from outside of /* If the task is not deleting itself, call prvDeleteTCB from outside of
* critical section. If a task deletes itself, prvDeleteTCB is called * critical section. If a task deletes itself, prvDeleteTCB is called
* from prvCheckTasksWaitingTermination which is called from Idle task. */ * from prvCheckTasksWaitingTermination which is called from Idle task. */
@ -2456,7 +2445,6 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
} }
} }
#endif /* #if ( configNUMBER_OF_CORES == 1 ) */ #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
}
traceRETURN_vTaskDelete(); traceRETURN_vTaskDelete();
} }
@ -2983,7 +2971,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
* there may now be another task of higher priority that * there may now be another task of higher priority that
* is ready to execute. */ * is ready to execute. */
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
if( pxTCB->uxPreemptionDisable == 0U ) if( pxTCB->xPreemptionDisable == 0U )
#endif #endif
{ {
xYieldRequired = pdTRUE; xYieldRequired = pdTRUE;
@ -3206,7 +3194,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
pxTCB = prvGetTCBFromHandle( xTask ); pxTCB = prvGetTCBFromHandle( xTask );
configASSERT( pxTCB != NULL ); configASSERT( pxTCB != NULL );
pxTCB->uxPreemptionDisable++; pxTCB->xPreemptionDisable++;
} }
else else
{ {
@ -3235,11 +3223,11 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
{ {
pxTCB = prvGetTCBFromHandle( xTask ); pxTCB = prvGetTCBFromHandle( xTask );
configASSERT( pxTCB != NULL ); configASSERT( pxTCB != NULL );
configASSERT( pxTCB->uxPreemptionDisable > 0U ); configASSERT( pxTCB->xPreemptionDisable > 0U );
pxTCB->uxPreemptionDisable--; pxTCB->xPreemptionDisable--;
if( pxTCB->uxPreemptionDisable == 0U ) if( pxTCB->xPreemptionDisable == 0U )
{ {
/* Process deferred state changes which were inflicted while /* Process deferred state changes which were inflicted while
* preemption was disabled. */ * preemption was disabled. */
@ -3259,6 +3247,9 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
} }
pxTCB->xDeferredStateChange = 0U; pxTCB->xDeferredStateChange = 0U;
kernelEXIT_CRITICAL();
traceRETURN_vTaskPreemptionEnable();
return;
} }
else else
{ {
@ -3296,10 +3287,6 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
{ {
TCB_t * pxTCB; TCB_t * pxTCB;
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
BaseType_t xDeferredSuspension = pdFALSE;
#endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
traceENTER_vTaskSuspend( xTaskToSuspend ); traceENTER_vTaskSuspend( xTaskToSuspend );
kernelENTER_CRITICAL(); kernelENTER_CRITICAL();
@ -3313,10 +3300,12 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
/* If the task has disabled preemption, we need to defer the suspension until the /* If the task has disabled preemption, we need to defer the suspension until the
* task enables preemption. The suspension will be performed in vTaskPreemptionEnable(). */ * task enables preemption. The suspension will be performed in vTaskPreemptionEnable(). */
if( pxTCB->uxPreemptionDisable > 0U ) if( pxTCB->xPreemptionDisable > 0U )
{ {
pxTCB->xDeferredStateChange |= tskDEFERRED_SUSPENSION; pxTCB->xDeferredStateChange |= tskDEFERRED_SUSPENSION;
xDeferredSuspension = pdTRUE; kernelEXIT_CRITICAL();
traceRETURN_vTaskSuspend();
return;
} }
else else
{ {
@ -3324,10 +3313,6 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
} }
#endif /* configUSE_TASK_PREEMPTION_DISABLE */ #endif /* configUSE_TASK_PREEMPTION_DISABLE */
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
if( xDeferredSuspension == pdFALSE )
#endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
{
traceTASK_SUSPEND( pxTCB ); traceTASK_SUSPEND( pxTCB );
/* Remove task from the ready/delayed list and place in the /* Remove task from the ready/delayed list and place in the
@ -3409,13 +3394,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
} }
#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
} }
}
kernelEXIT_CRITICAL(); kernelEXIT_CRITICAL();
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
if( xDeferredSuspension == pdFALSE )
#endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
{
#if ( configNUMBER_OF_CORES == 1 ) #if ( configNUMBER_OF_CORES == 1 )
{ {
UBaseType_t uxCurrentListLength; UBaseType_t uxCurrentListLength;
@ -3474,7 +3454,6 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
} }
} }
#endif /* #if ( configNUMBER_OF_CORES == 1 ) */ #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
}
traceRETURN_vTaskSuspend(); traceRETURN_vTaskSuspend();
} }
@ -4310,7 +4289,7 @@ BaseType_t xTaskResumeAll( void )
if( xYieldPendings[ xCoreID ] != pdFALSE if( xYieldPendings[ xCoreID ] != pdFALSE
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
&& ( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U ) && ( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == 0U )
#endif #endif
) )
{ {
@ -5057,7 +5036,7 @@ BaseType_t xTaskIncrementTick( void )
#if ( configNUMBER_OF_CORES == 1 ) #if ( configNUMBER_OF_CORES == 1 )
{ {
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
if( pxCurrentTCB->uxPreemptionDisable != 0U ) if( pxCurrentTCB->xPreemptionDisable != 0U )
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
@ -5140,7 +5119,7 @@ BaseType_t xTaskIncrementTick( void )
for( xCoreID = 0; xCoreID < ( BaseType_t ) configNUMBER_OF_CORES; xCoreID++ ) for( xCoreID = 0; xCoreID < ( BaseType_t ) configNUMBER_OF_CORES; xCoreID++ )
{ {
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
if( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U ) if( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == 0U )
#endif #endif
{ {
if( xYieldPendings[ xCoreID ] != pdFALSE ) if( xYieldPendings[ xCoreID ] != pdFALSE )
@ -5430,7 +5409,7 @@ BaseType_t xTaskIncrementTick( void )
/* vTaskSwitchContext() must not be called with a task that has /* vTaskSwitchContext() must not be called with a task that has
* preemption disabled. */ * preemption disabled. */
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
configASSERT( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U ); configASSERT( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == 0U );
#endif #endif
if( uxSchedulerSuspended != ( UBaseType_t ) 0U ) if( uxSchedulerSuspended != ( UBaseType_t ) 0U )
@ -7364,7 +7343,7 @@ static void prvResetNextTaskUnblockTime( void )
#endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) */ #endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) */
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
#if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS != 1 ) ) #if ( configNUMBER_OF_CORES > 1 )
void vTaskEnterCritical( void ) void vTaskEnterCritical( void )
{ {
@ -7413,56 +7392,7 @@ static void prvResetNextTaskUnblockTime( void )
traceRETURN_vTaskEnterCritical(); traceRETURN_vTaskEnterCritical();
} }
#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS != 1 ) ) */ #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
/*-----------------------------------------------------------*/
#if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS == 1 ) )
void vTaskEnterCritical( void )
{
traceENTER_vTaskEnterCritical();
portDISABLE_INTERRUPTS();
{
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
if( xSchedulerRunning != pdFALSE )
{
kernelGET_TASK_LOCK( xCoreID );
kernelGET_ISR_LOCK( xCoreID );
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
/* This is not the interrupt safe version of the enter critical
* function so assert() if it is being called from an interrupt
* context. Only API functions that end in "FromISR" can be used in an
* interrupt. Only assert if the critical nesting count is 1 to
* protect against recursive calls if the assert function also uses a
* critical section. */
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 1U )
{
portASSERT_IF_IN_ISR();
if( uxSchedulerSuspended == 0U )
{
/* The only time there would be a problem is if this is called
* before a context switch and vTaskExitCritical() is called
* after pxCurrentTCB changes. Therefore this should not be
* used within vTaskSwitchContext(). */
prvCheckForRunStateChange();
}
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
traceRETURN_vTaskEnterCritical();
}
#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS == 1 ) ) */
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
#if ( configNUMBER_OF_CORES > 1 ) #if ( configNUMBER_OF_CORES > 1 )
@ -7543,7 +7473,7 @@ static void prvResetNextTaskUnblockTime( void )
#endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) */ #endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) */
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
#if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS != 1 ) ) #if ( configNUMBER_OF_CORES > 1 )
void vTaskExitCritical( void ) void vTaskExitCritical( void )
{ {
@ -7570,75 +7500,14 @@ static void prvResetNextTaskUnblockTime( void )
BaseType_t xYieldCurrentTask; BaseType_t xYieldCurrentTask;
/* Get the xYieldPending stats inside the critical section. */ /* Get the xYieldPending stats inside the critical section. */
xYieldCurrentTask = xYieldPendings[ xCoreID ]; #if ( portUSING_GRANULAR_LOCKS == 1 )
kernelRELEASE_ISR_LOCK( xCoreID );
kernelRELEASE_TASK_LOCK( xCoreID );
portENABLE_INTERRUPTS();
/* When a task yields in a critical section it just sets
* xYieldPending to true. So now that we have exited the
* critical section check if xYieldPending is true, and
* if so yield. */
if( xYieldCurrentTask != pdFALSE )
{
portYIELD();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
traceRETURN_vTaskExitCritical();
}
#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS != 1 ) ) */
/*-----------------------------------------------------------*/
#if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS == 1 ) )
void vTaskExitCritical( void )
{
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
traceENTER_vTaskExitCritical();
if( xSchedulerRunning != pdFALSE )
{
/* If critical nesting count is zero then this function
* does not match a previous call to vTaskEnterCritical(). */
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U );
/* This function should not be called in ISR. Use vTaskExitCriticalFromISR
* to exit critical section from ISR. */
portASSERT_IF_IN_ISR();
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U )
{
/* Release the ISR and task locks */
kernelRELEASE_ISR_LOCK( xCoreID );
kernelRELEASE_TASK_LOCK( xCoreID );
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID );
/* If the critical nesting count is 0, enable interrupts */
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U )
{
BaseType_t xYieldCurrentTask;
/* Get the xYieldPending stats inside the critical section. */
xYieldCurrentTask = xTaskUnlockCanYield(); xYieldCurrentTask = xTaskUnlockCanYield();
#else
xYieldCurrentTask = xYieldPendings[ xCoreID ];
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
kernelRELEASE_ISR_LOCK( xCoreID );
kernelRELEASE_TASK_LOCK( xCoreID );
portENABLE_INTERRUPTS(); portENABLE_INTERRUPTS();
/* When a task yields in a critical section it just sets /* When a task yields in a critical section it just sets
@ -7668,7 +7537,7 @@ static void prvResetNextTaskUnblockTime( void )
traceRETURN_vTaskExitCritical(); traceRETURN_vTaskExitCritical();
} }
#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS == 1 ) ) */ #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
#if ( configNUMBER_OF_CORES > 1 ) #if ( configNUMBER_OF_CORES > 1 )
@ -7726,7 +7595,7 @@ static void prvResetNextTaskUnblockTime( void )
if( ( xYieldPendings[ xCoreID ] == pdTRUE ) && ( uxSchedulerSuspended == pdFALSE ) if( ( xYieldPendings[ xCoreID ] == pdTRUE ) && ( uxSchedulerSuspended == pdFALSE )
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
&& ( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U ) && ( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == 0U )
#endif /* ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ #endif /* ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
) )
{ {