mirror of
https://github.com/FreeRTOS/FreeRTOS-Kernel.git
synced 2025-08-20 01:58:32 -04:00
feat(freertos-smp): Light Weight Preemption Disable Locks
This commit is contained in:
parent
ec3c41e444
commit
40a991d8eb
4 changed files with 223 additions and 22 deletions
|
@ -883,14 +883,29 @@
|
||||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
|
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
|
||||||
static BaseType_t prvUnlockEventGroupForTasks( EventGroup_t * pxEventBits )
|
static BaseType_t prvUnlockEventGroupForTasks( EventGroup_t * pxEventBits )
|
||||||
{
|
{
|
||||||
|
BaseType_t xReturn = pdFALSE;
|
||||||
|
|
||||||
/* Release the previously held task spinlock */
|
/* Release the previously held task spinlock */
|
||||||
portRELEASE_SPINLOCK( portGET_CORE_ID(), &( pxEventBits->xTaskSpinlock ) );
|
portRELEASE_SPINLOCK( portGET_CORE_ID(), &( pxEventBits->xTaskSpinlock ) );
|
||||||
|
|
||||||
/* Re-enable preemption */
|
/* Re-enable preemption */
|
||||||
vTaskPreemptionEnable( NULL );
|
vTaskPreemptionEnable( NULL );
|
||||||
|
|
||||||
/* We assume that the task was preempted when preemption was enabled */
|
/* Yield if preemption was re-enabled*/
|
||||||
return pdTRUE;
|
if( xTaskUnlockCanYield() == pdTRUE )
|
||||||
|
{
|
||||||
|
taskYIELD_WITHIN_API();
|
||||||
|
|
||||||
|
/* Return true as the task was preempted */
|
||||||
|
xReturn = pdTRUE;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
/* Return false as the task was not preempted */
|
||||||
|
xReturn = pdFALSE;
|
||||||
|
}
|
||||||
|
|
||||||
|
return xReturn;
|
||||||
}
|
}
|
||||||
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
|
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
|
@ -2958,6 +2958,10 @@
|
||||||
#error configUSE_PORT_OPTIMISED_TASK_SELECTION is not supported in SMP FreeRTOS
|
#error configUSE_PORT_OPTIMISED_TASK_SELECTION is not supported in SMP FreeRTOS
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifndef configLIGHTWEIGHT_CRITICAL_SECTION
|
||||||
|
#define configLIGHTWEIGHT_CRITICAL_SECTION 0
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifndef configINITIAL_TICK_COUNT
|
#ifndef configINITIAL_TICK_COUNT
|
||||||
#define configINITIAL_TICK_COUNT 0
|
#define configINITIAL_TICK_COUNT 0
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -3859,6 +3859,22 @@ void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNC
|
||||||
void vTaskExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus );
|
void vTaskExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus );
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This function is only intended for use when disabling or enabling preemption of a task.
|
||||||
|
* This function takes only the kernel ISR lock, not the task lock.
|
||||||
|
*/
|
||||||
|
#if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 )
|
||||||
|
void vKernelLightWeightEnterCritical( void );
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This function is only intended for use when disabling or enabling preemption of a task.
|
||||||
|
* This function releases only the kernel ISR lock, not the task lock.
|
||||||
|
*/
|
||||||
|
#if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 )
|
||||||
|
void vKernelLightWeightExitCritical( void );
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Checks whether a yield is required after portUNLOCK_DATA_GROUP() returns.
|
* Checks whether a yield is required after portUNLOCK_DATA_GROUP() returns.
|
||||||
* To be called while data group is locked.
|
* To be called while data group is locked.
|
||||||
|
|
206
tasks.c
206
tasks.c
|
@ -629,6 +629,14 @@ static BaseType_t prvCreateIdleTasks( void );
|
||||||
static void prvCheckForRunStateChange( void );
|
static void prvCheckForRunStateChange( void );
|
||||||
#endif /* #if ( configNUMBER_OF_CORES > 1 ) */
|
#endif /* #if ( configNUMBER_OF_CORES > 1 ) */
|
||||||
|
|
||||||
|
#if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 )
|
||||||
|
/*
|
||||||
|
* Checks to see if another task moved the current task out of the ready
|
||||||
|
* list while it was waiting to enter a lightweight critical section and yields, if so.
|
||||||
|
*/
|
||||||
|
static void prvLightWeightCheckForRunStateChange( void );
|
||||||
|
#endif /* #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 ) */
|
||||||
|
|
||||||
#if ( configNUMBER_OF_CORES > 1 )
|
#if ( configNUMBER_OF_CORES > 1 )
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -960,6 +968,68 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION;
|
||||||
|
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
|
#if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 )
|
||||||
|
static void prvLightWeightCheckForRunStateChange( void )
|
||||||
|
{
|
||||||
|
|
||||||
|
const TCB_t * pxThisTCB;
|
||||||
|
BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
|
||||||
|
|
||||||
|
/* This must only be called from within a task. */
|
||||||
|
portASSERT_IF_IN_ISR();
|
||||||
|
|
||||||
|
/* This function is always called with interrupts disabled
|
||||||
|
* so this is safe. */
|
||||||
|
pxThisTCB = pxCurrentTCBs[ xCoreID ];
|
||||||
|
|
||||||
|
while( pxThisTCB->xTaskRunState == taskTASK_SCHEDULED_TO_YIELD )
|
||||||
|
{
|
||||||
|
UBaseType_t uxPrevCriticalNesting;
|
||||||
|
|
||||||
|
/* We are only here if we just entered a critical section
|
||||||
|
* or if we just suspended the scheduler, and another task
|
||||||
|
* has requested that we yield.
|
||||||
|
*
|
||||||
|
* This is slightly complicated since we need to save and restore
|
||||||
|
* the suspension and critical nesting counts, as well as release
|
||||||
|
* and reacquire the correct locks. And then, do it all over again
|
||||||
|
* if our state changed again during the reacquisition. */
|
||||||
|
uxPrevCriticalNesting = portGET_CRITICAL_NESTING_COUNT( xCoreID );
|
||||||
|
|
||||||
|
if( uxPrevCriticalNesting > 0U )
|
||||||
|
{
|
||||||
|
portSET_CRITICAL_NESTING_COUNT( xCoreID, 0U );
|
||||||
|
kernelRELEASE_ISR_LOCK( xCoreID );
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
/* The scheduler is suspended. uxSchedulerSuspended is updated
|
||||||
|
* only when the task is not requested to yield. */
|
||||||
|
mtCOVERAGE_TEST_MARKER();
|
||||||
|
}
|
||||||
|
|
||||||
|
portMEMORY_BARRIER();
|
||||||
|
|
||||||
|
portENABLE_INTERRUPTS();
|
||||||
|
|
||||||
|
/* Enabling interrupts should cause this core to immediately service
|
||||||
|
* the pending interrupt and yield. After servicing the pending interrupt,
|
||||||
|
* the task needs to re-evaluate its run state within this loop, as
|
||||||
|
* other cores may have requested this task to yield, potentially altering
|
||||||
|
* its run state. */
|
||||||
|
|
||||||
|
portDISABLE_INTERRUPTS();
|
||||||
|
|
||||||
|
xCoreID = ( BaseType_t ) portGET_CORE_ID();
|
||||||
|
kernelGET_ISR_LOCK( xCoreID );
|
||||||
|
|
||||||
|
portSET_CRITICAL_NESTING_COUNT( xCoreID, uxPrevCriticalNesting );
|
||||||
|
};
|
||||||
|
}
|
||||||
|
#endif /* #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 ) */
|
||||||
|
|
||||||
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
#if ( configNUMBER_OF_CORES > 1 )
|
#if ( configNUMBER_OF_CORES > 1 )
|
||||||
static void prvYieldForTask( const TCB_t * pxTCB )
|
static void prvYieldForTask( const TCB_t * pxTCB )
|
||||||
{
|
{
|
||||||
|
@ -2314,7 +2384,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
mtCOVERAGE_TEST_MARKER();
|
/* Reset the deferred state change flags */
|
||||||
|
pxTCB->uxDeferredStateChange &= ~tskDEFERRED_DELETION;
|
||||||
}
|
}
|
||||||
#endif /* configUSE_TASK_PREEMPTION_DISABLE */
|
#endif /* configUSE_TASK_PREEMPTION_DISABLE */
|
||||||
|
|
||||||
|
@ -3199,7 +3270,11 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
|
||||||
|
|
||||||
traceENTER_vTaskPreemptionDisable( xTask );
|
traceENTER_vTaskPreemptionDisable( xTask );
|
||||||
|
|
||||||
kernelENTER_CRITICAL();
|
#if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 )
|
||||||
|
vKernelLightWeightEnterCritical();
|
||||||
|
#else
|
||||||
|
kernelENTER_CRITICAL();
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
if( xSchedulerRunning != pdFALSE )
|
if( xSchedulerRunning != pdFALSE )
|
||||||
{
|
{
|
||||||
|
@ -3213,7 +3288,11 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
|
||||||
mtCOVERAGE_TEST_MARKER();
|
mtCOVERAGE_TEST_MARKER();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
kernelEXIT_CRITICAL();
|
#if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 )
|
||||||
|
vKernelLightWeightExitCritical();
|
||||||
|
#else
|
||||||
|
kernelEXIT_CRITICAL();
|
||||||
|
#endif
|
||||||
|
|
||||||
traceRETURN_vTaskPreemptionDisable();
|
traceRETURN_vTaskPreemptionDisable();
|
||||||
}
|
}
|
||||||
|
@ -3226,10 +3305,15 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
|
||||||
void vTaskPreemptionEnable( const TaskHandle_t xTask )
|
void vTaskPreemptionEnable( const TaskHandle_t xTask )
|
||||||
{
|
{
|
||||||
TCB_t * pxTCB;
|
TCB_t * pxTCB;
|
||||||
|
UBaseType_t uxDeferredAction = 0U;
|
||||||
|
|
||||||
traceENTER_vTaskPreemptionEnable( xTask );
|
traceENTER_vTaskPreemptionEnable( xTask );
|
||||||
|
|
||||||
kernelENTER_CRITICAL();
|
#if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 )
|
||||||
|
vKernelLightWeightEnterCritical();
|
||||||
|
#else
|
||||||
|
kernelENTER_CRITICAL();
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
if( xSchedulerRunning != pdFALSE )
|
if( xSchedulerRunning != pdFALSE )
|
||||||
{
|
{
|
||||||
|
@ -3245,20 +3329,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
|
||||||
* preemption was disabled. */
|
* preemption was disabled. */
|
||||||
if( pxTCB->uxDeferredStateChange != 0U )
|
if( pxTCB->uxDeferredStateChange != 0U )
|
||||||
{
|
{
|
||||||
if( pxTCB->uxDeferredStateChange & tskDEFERRED_DELETION )
|
/* Capture the deferred action to perform outside critical section */
|
||||||
{
|
uxDeferredAction = pxTCB->uxDeferredStateChange;
|
||||||
vTaskDelete( xTask );
|
|
||||||
}
|
|
||||||
else if( pxTCB->uxDeferredStateChange & tskDEFERRED_SUSPENSION )
|
|
||||||
{
|
|
||||||
vTaskSuspend( xTask );
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
mtCOVERAGE_TEST_MARKER();
|
|
||||||
}
|
|
||||||
|
|
||||||
pxTCB->uxDeferredStateChange = 0U;
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -3282,7 +3354,28 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
|
||||||
mtCOVERAGE_TEST_MARKER();
|
mtCOVERAGE_TEST_MARKER();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
kernelEXIT_CRITICAL();
|
#if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 )
|
||||||
|
vKernelLightWeightExitCritical();
|
||||||
|
#else
|
||||||
|
kernelEXIT_CRITICAL();
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* Handle deferred actions outside critical section */
|
||||||
|
if( uxDeferredAction != 0U )
|
||||||
|
{
|
||||||
|
if( uxDeferredAction & tskDEFERRED_DELETION )
|
||||||
|
{
|
||||||
|
vTaskDelete( xTask );
|
||||||
|
}
|
||||||
|
else if( uxDeferredAction & tskDEFERRED_SUSPENSION )
|
||||||
|
{
|
||||||
|
vTaskSuspend( xTask );
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
mtCOVERAGE_TEST_MARKER();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
traceRETURN_vTaskPreemptionEnable();
|
traceRETURN_vTaskPreemptionEnable();
|
||||||
}
|
}
|
||||||
|
@ -3320,7 +3413,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
mtCOVERAGE_TEST_MARKER();
|
/* Reset the deferred state change flags */
|
||||||
|
pxTCB->uxDeferredStateChange &= ~tskDEFERRED_SUSPENSION;
|
||||||
}
|
}
|
||||||
#endif /* configUSE_TASK_PREEMPTION_DISABLE */
|
#endif /* configUSE_TASK_PREEMPTION_DISABLE */
|
||||||
|
|
||||||
|
@ -7741,6 +7835,78 @@ static void prvResetNextTaskUnblockTime( void )
|
||||||
#endif /* #if ( configNUMBER_OF_CORES > 1 ) */
|
#endif /* #if ( configNUMBER_OF_CORES > 1 ) */
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
|
#if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 )
|
||||||
|
|
||||||
|
void vKernelLightWeightEnterCritical( void )
|
||||||
|
{
|
||||||
|
if( xSchedulerRunning != pdFALSE )
|
||||||
|
{
|
||||||
|
portDISABLE_INTERRUPTS();
|
||||||
|
{
|
||||||
|
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
|
||||||
|
|
||||||
|
/* Get only the ISR lock, not the task lock */
|
||||||
|
kernelGET_ISR_LOCK( xCoreID );
|
||||||
|
|
||||||
|
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
|
||||||
|
|
||||||
|
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 1U
|
||||||
|
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
|
||||||
|
/* Check for the run state change of the task only if a deferred state change is not pending */
|
||||||
|
&& pxCurrentTCB->uxDeferredStateChange == 0U
|
||||||
|
#endif /* ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
|
||||||
|
)
|
||||||
|
{
|
||||||
|
prvLightWeightCheckForRunStateChange();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 ) */
|
||||||
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
|
#if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 )
|
||||||
|
|
||||||
|
void vKernelLightWeightExitCritical( void )
|
||||||
|
{
|
||||||
|
if( xSchedulerRunning != pdFALSE )
|
||||||
|
{
|
||||||
|
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
|
||||||
|
|
||||||
|
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U )
|
||||||
|
{
|
||||||
|
/* Release the ISR lock */
|
||||||
|
kernelRELEASE_ISR_LOCK( xCoreID );
|
||||||
|
|
||||||
|
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID );
|
||||||
|
|
||||||
|
BaseType_t xYieldCurrentTask;
|
||||||
|
|
||||||
|
xYieldCurrentTask = xTaskUnlockCanYield();
|
||||||
|
|
||||||
|
/* If the critical nesting count is 0, enable interrupts */
|
||||||
|
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U )
|
||||||
|
{
|
||||||
|
portENABLE_INTERRUPTS();
|
||||||
|
|
||||||
|
if( xYieldCurrentTask != pdFALSE
|
||||||
|
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
|
||||||
|
/* Yield only if no deferred state change is pending */
|
||||||
|
&& pxCurrentTCB->uxDeferredStateChange == 0U
|
||||||
|
#endif /* ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
|
||||||
|
)
|
||||||
|
{
|
||||||
|
portYIELD();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 ) */
|
||||||
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
|
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
|
||||||
|
|
||||||
BaseType_t xTaskUnlockCanYield( void )
|
BaseType_t xTaskUnlockCanYield( void )
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue