diff --git a/include/task.h b/include/task.h index 7d6000d3a..0d843f0c2 100644 --- a/include/task.h +++ b/include/task.h @@ -213,7 +213,7 @@ typedef enum * \defgroup taskYIELD taskYIELD * \ingroup SchedulerControl */ -#define taskYIELD() portYIELD() +#define taskYIELD() portYIELD() /** * task. h @@ -227,19 +227,12 @@ typedef enum * \defgroup taskENTER_CRITICAL taskENTER_CRITICAL * \ingroup SchedulerControl */ -#define taskENTER_CRITICAL() portENTER_CRITICAL() +#define taskENTER_CRITICAL() portENTER_CRITICAL() #if ( configNUMBER_OF_CORES == 1 ) - #define taskENTER_CRITICAL_FROM_ISR() portSET_INTERRUPT_MASK_FROM_ISR() + #define taskENTER_CRITICAL_FROM_ISR() portSET_INTERRUPT_MASK_FROM_ISR() #else - #define taskENTER_CRITICAL_FROM_ISR() portENTER_CRITICAL_FROM_ISR() + #define taskENTER_CRITICAL_FROM_ISR() portENTER_CRITICAL_FROM_ISR() #endif -#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - #define taskLOCK_DATA_GROUP( pxTaskSpinlock, pxISRSpinlock ) portLOCK_DATA_GROUP( ( portSPINLOCK_TYPE * ) pxTaskSpinlock, ( portSPINLOCK_TYPE * ) pxISRSpinlock ) - #define taskLOCK_DATA_GROUP_FROM_ISR( pxISRSpinlock ) portLOCK_DATA_GROUP_FROM_ISR( pxISRSpinlock ) -#else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ - #define taskLOCK_DATA_GROUP( pxTaskSpinlock, pxISRSpinlock ) taskENTER_CRITICAL() - #define taskLOCK_DATA_GROUP_FROM_ISR( pxISRSpinlock ) taskENTER_CRITICAL_FROM_ISR() -#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ /** * task. h @@ -253,19 +246,12 @@ typedef enum * \defgroup taskEXIT_CRITICAL taskEXIT_CRITICAL * \ingroup SchedulerControl */ -#define taskEXIT_CRITICAL() portEXIT_CRITICAL() +#define taskEXIT_CRITICAL() portEXIT_CRITICAL() #if ( configNUMBER_OF_CORES == 1 ) - #define taskEXIT_CRITICAL_FROM_ISR( x ) portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) + #define taskEXIT_CRITICAL_FROM_ISR( x ) portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) #else - #define taskEXIT_CRITICAL_FROM_ISR( x ) portEXIT_CRITICAL_FROM_ISR( x ) + #define taskEXIT_CRITICAL_FROM_ISR( x ) portEXIT_CRITICAL_FROM_ISR( x ) #endif -#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - #define taskUNLOCK_DATA_GROUP( pxTaskSpinlock, pxISRSpinlock ) portUNLOCK_DATA_GROUP( ( portSPINLOCK_TYPE * ) pxTaskSpinlock, ( portSPINLOCK_TYPE * ) pxISRSpinlock ) - #define taskUNLOCK_DATA_GROUP_FROM_ISR( x, pxISRSpinlock ) portUNLOCK_DATA_GROUP_FROM_ISR( x, pxISRSpinlock ) -#else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ - #define taskUNLOCK_DATA_GROUP( pxTaskSpinlock, pxISRSpinlock ) taskEXIT_CRITICAL() - #define taskUNLOCK_DATA_GROUP_FROM_ISR( x, pxISRSpinlock ) taskEXIT_CRITICAL_FROM_ISR( x ) -#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ /** * task. h @@ -3733,7 +3719,7 @@ void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNC * It should be used in the implementation of portENTER_CRITICAL if port is running a * multiple core FreeRTOS. */ -#if !( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) +#if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) || ( configNUMBER_OF_CORES > 1 ) ) void vTaskEnterCritical( void ); #endif @@ -3745,7 +3731,7 @@ void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNC * It should be used in the implementation of portEXIT_CRITICAL if port is running a * multiple core FreeRTOS. */ -#if !( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) +#if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) || ( configNUMBER_OF_CORES > 1 ) ) void vTaskExitCritical( void ); #endif @@ -3755,7 +3741,7 @@ void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNC * should be used in the implementation of portENTER_CRITICAL_FROM_ISR if port is * running a multiple core FreeRTOS. */ -#if !( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) +#if ( configNUMBER_OF_CORES > 1 ) UBaseType_t vTaskEnterCriticalFromISR( void ); #endif @@ -3765,12 +3751,12 @@ void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNC * should be used in the implementation of portEXIT_CRITICAL_FROM_ISR if port is * running a multiple core FreeRTOS. */ -#if !( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) +#if ( configNUMBER_OF_CORES > 1 ) void vTaskExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus ); #endif /* - * Checks whether a yield is required after taskUNLOCK_DATA_GROUP() returns. + * Checks whether a yield is required after portUNLOCK_DATA_GROUP() returns. * To be called while data group is locked. */ #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) diff --git a/tasks.c b/tasks.c index 6a393d027..99c0b9079 100644 --- a/tasks.c +++ b/tasks.c @@ -366,6 +366,38 @@ #endif /* #if ( configNUMBER_OF_CORES > 1 ) */ /*-----------------------------------------------------------*/ +/* Macros to take and release kernel spinlocks */ +#if ( configNUMBER_OF_CORES > 1 ) + #if ( portUSING_GRANULAR_LOCKS == 1 ) + #define kernelGET_TASK_LOCK( xCoreID ) portGET_SPINLOCK( xCoreID, &xTaskSpinlock ) + #define kernelRELEASE_TASK_LOCK( xCoreID ) portRELEASE_SPINLOCK( xCoreID, &xTaskSpinlock ) + #define kernelGET_ISR_LOCK( xCoreID ) portGET_SPINLOCK( xCoreID, &xISRSpinlock ) + #define kernelRELEASE_ISR_LOCK( xCoreID ) portRELEASE_SPINLOCK( xCoreID, &xISRSpinlock ) + #else + #define kernelGET_TASK_LOCK( xCoreID ) portGET_TASK_LOCK( xCoreID ) + #define kernelRELEASE_TASK_LOCK( xCoreID ) portRELEASE_TASK_LOCK( xCoreID ) + #define kernelGET_ISR_LOCK( xCoreID ) portGET_ISR_LOCK( xCoreID ) + #define kernelRELEASE_ISR_LOCK( xCoreID ) portRELEASE_ISR_LOCK( xCoreID ) + #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ + +/* + * Macros to mark the start and end of a critical code region. + */ +#if ( portUSING_GRANULAR_LOCKS == 1 ) + #define kernelENTER_CRITICAL() vTaskEnterCritical() + #define kernelENTER_CRITICAL_FROM_ISR() vTaskEnterCriticalFromISR() + #define kernelEXIT_CRITICAL() vTaskExitCritical() + #define kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ) vTaskExitCriticalFromISR( uxSavedInterruptStatus ) +#else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + #define kernelENTER_CRITICAL() taskENTER_CRITICAL() + #define kernelENTER_CRITICAL_FROM_ISR() taskENTER_CRITICAL_FROM_ISR() + #define kernelEXIT_CRITICAL() taskEXIT_CRITICAL() + #define kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ) taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ) +#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + +/*-----------------------------------------------------------*/ + /* * Task control block. A task control block (TCB) is allocated for each task, * and stores task state information, including a pointer to the task's context @@ -504,7 +536,7 @@ PRIVILEGED_DATA static volatile TickType_t xTickCount = ( TickType_t ) configINI PRIVILEGED_DATA static volatile UBaseType_t uxTopReadyPriority = tskIDLE_PRIORITY; PRIVILEGED_DATA static volatile BaseType_t xSchedulerRunning = pdFALSE; PRIVILEGED_DATA static volatile TickType_t xPendedTicks = ( TickType_t ) 0U; -PRIVILEGED_DATA volatile BaseType_t xYieldPendings[ configNUMBER_OF_CORES ] = { pdFALSE }; +PRIVILEGED_DATA static volatile BaseType_t xYieldPendings[ configNUMBER_OF_CORES ] = { pdFALSE }; PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0; PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U; PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = ( TickType_t ) 0U; /* Initialised to portMAX_DELAY before the scheduler starts. */ @@ -538,9 +570,10 @@ PRIVILEGED_DATA static volatile configRUN_TIME_COUNTER_TYPE ulTotalRunTime[ conf #endif +/* Kernel spinlock variables when using granular locking */ #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - PRIVILEGED_DATA static portSPINLOCK_TYPE xTaskSpinlock = portINIT_KERNEL_TASK_SPINLOCK_STATIC; - PRIVILEGED_DATA static portSPINLOCK_TYPE xISRSpinlock = portINIT_KERNEL_ISR_SPINLOCK_STATIC; + PRIVILEGED_DATA static portSPINLOCK_TYPE xTaskSpinlock = portINIT_SPINLOCK_STATIC; + PRIVILEGED_DATA static portSPINLOCK_TYPE xISRSpinlock = portINIT_SPINLOCK_STATIC; #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ /*-----------------------------------------------------------*/ @@ -552,14 +585,14 @@ PRIVILEGED_DATA static volatile configRUN_TIME_COUNTER_TYPE ulTotalRunTime[ conf */ static BaseType_t prvCreateIdleTasks( void ); -#if ( ( portUSING_GRANULAR_LOCKS == 0 ) && ( configNUMBER_OF_CORES > 1 ) ) +#if ( configNUMBER_OF_CORES > 1 ) /* * Checks to see if another task moved the current task out of the ready * list while it was waiting to enter a critical section and yields, if so. */ static void prvCheckForRunStateChange( void ); -#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 0 ) && ( configNUMBER_OF_CORES > 1 ) ) */ +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ #if ( configNUMBER_OF_CORES > 1 ) @@ -823,10 +856,9 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; #endif /* #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) */ /*-----------------------------------------------------------*/ -#if ( ( portUSING_GRANULAR_LOCKS == 0 ) && ( configNUMBER_OF_CORES > 1 ) ) +#if ( configNUMBER_OF_CORES > 1 ) static void prvCheckForRunStateChange( void ) { - UBaseType_t uxPrevCriticalNesting; const TCB_t * pxThisTCB; BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); @@ -839,6 +871,8 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; while( pxThisTCB->xTaskRunState == taskTASK_SCHEDULED_TO_YIELD ) { + UBaseType_t uxPrevCriticalNesting; + /* We are only here if we just entered a critical section * or if we just suspended the scheduler, and another task * has requested that we yield. @@ -852,7 +886,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; if( uxPrevCriticalNesting > 0U ) { portSET_CRITICAL_NESTING_COUNT( xCoreID, 0U ); - portRELEASE_ISR_LOCK( xCoreID ); + kernelRELEASE_ISR_LOCK( xCoreID ); } else { @@ -861,7 +895,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; mtCOVERAGE_TEST_MARKER(); } - portRELEASE_TASK_LOCK( xCoreID ); + kernelRELEASE_TASK_LOCK( xCoreID ); portMEMORY_BARRIER(); configASSERT( pxThisTCB->xTaskRunState == taskTASK_SCHEDULED_TO_YIELD ); @@ -876,18 +910,18 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; portDISABLE_INTERRUPTS(); xCoreID = ( BaseType_t ) portGET_CORE_ID(); - portGET_TASK_LOCK( xCoreID ); - portGET_ISR_LOCK( xCoreID ); + kernelGET_TASK_LOCK( xCoreID ); + kernelGET_ISR_LOCK( xCoreID ); portSET_CRITICAL_NESTING_COUNT( xCoreID, uxPrevCriticalNesting ); if( uxPrevCriticalNesting == 0U ) { - portRELEASE_ISR_LOCK( xCoreID ); + kernelRELEASE_ISR_LOCK( xCoreID ); } } } -#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 0 ) && ( configNUMBER_OF_CORES > 1 ) ) */ +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ /*-----------------------------------------------------------*/ @@ -2047,7 +2081,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, { /* Ensure interrupts don't access the task lists while the lists are being * updated. */ - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { uxCurrentNumberOfTasks = ( UBaseType_t ) ( uxCurrentNumberOfTasks + 1U ); @@ -2105,7 +2139,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, portSETUP_TCB( pxNewTCB ); } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); if( xSchedulerRunning != pdFALSE ) { @@ -2125,7 +2159,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, { /* Ensure interrupts don't access the task lists while the lists are being * updated. */ - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { uxCurrentNumberOfTasks++; @@ -2174,7 +2208,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); } #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ @@ -2223,7 +2257,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, traceENTER_vTaskDelete( xTaskToDelete ); - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { /* If null is passed in here then it is the calling task that is * being deleted. */ @@ -2331,7 +2365,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, prvResetNextTaskUnblockTime(); } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); /* If the task is not deleting itself, call prvDeleteTCB from outside of * critical section. If a task deletes itself, prvDeleteTCB is called @@ -2532,14 +2566,14 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, else #endif { - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { pxStateList = listLIST_ITEM_CONTAINER( &( pxTCB->xStateListItem ) ); pxEventList = listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ); pxDelayedList = pxDelayedTaskList; pxOverflowedDelayedList = pxOverflowDelayedTaskList; } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); if( pxEventList == &xPendingReadyList ) { @@ -2649,7 +2683,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, traceENTER_uxTaskPriorityGet( xTask ); - portBASE_TYPE_ENTER_CRITICAL(); + kernelENTER_CRITICAL(); { /* If null is passed in here then it is the priority of the task * that called uxTaskPriorityGet() that is being queried. */ @@ -2658,7 +2692,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, uxReturn = pxTCB->uxPriority; } - portBASE_TYPE_EXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_uxTaskPriorityGet( uxReturn ); @@ -2699,7 +2733,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, /* MISRA Ref 4.7.1 [Return value shall be checked] */ /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */ /* coverity[misra_c_2012_directive_4_7_violation] */ - uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = ( UBaseType_t ) kernelENTER_CRITICAL_FROM_ISR(); { /* If null is passed in here then it is the priority of the calling * task that is being queried. */ @@ -2708,7 +2742,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, uxReturn = pxTCB->uxPriority; } - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); traceRETURN_uxTaskPriorityGetFromISR( uxReturn ); @@ -2727,7 +2761,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, traceENTER_uxTaskBasePriorityGet( xTask ); - portBASE_TYPE_ENTER_CRITICAL(); + kernelENTER_CRITICAL(); { /* If null is passed in here then it is the base priority of the task * that called uxTaskBasePriorityGet() that is being queried. */ @@ -2736,7 +2770,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, uxReturn = pxTCB->uxBasePriority; } - portBASE_TYPE_EXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_uxTaskBasePriorityGet( uxReturn ); @@ -2777,7 +2811,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, /* MISRA Ref 4.7.1 [Return value shall be checked] */ /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */ /* coverity[misra_c_2012_directive_4_7_violation] */ - uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = ( UBaseType_t ) kernelENTER_CRITICAL_FROM_ISR(); { /* If null is passed in here then it is the base priority of the calling * task that is being queried. */ @@ -2786,7 +2820,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, uxReturn = pxTCB->uxBasePriority; } - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); traceRETURN_uxTaskBasePriorityGetFromISR( uxReturn ); @@ -2823,7 +2857,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, mtCOVERAGE_TEST_MARKER(); } - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { /* If null is passed in here then it is the priority of the calling * task that is being changed. */ @@ -3003,7 +3037,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, ( void ) uxPriorityUsedOnEntry; } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_vTaskPrioritySet(); } @@ -3020,7 +3054,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, traceENTER_vTaskCoreAffinitySet( xTask, uxCoreAffinityMask ); - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { pxTCB = prvGetTCBFromHandle( xTask ); configASSERT( pxTCB != NULL ); @@ -3061,7 +3095,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_vTaskCoreAffinitySet(); } @@ -3076,14 +3110,14 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, traceENTER_vTaskCoreAffinityGet( xTask ); - portBASE_TYPE_ENTER_CRITICAL(); + kernelENTER_CRITICAL(); { pxTCB = prvGetTCBFromHandle( xTask ); configASSERT( pxTCB != NULL ); uxCoreAffinityMask = pxTCB->uxCoreAffinityMask; } - portBASE_TYPE_EXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_vTaskCoreAffinityGet( uxCoreAffinityMask ); @@ -3101,14 +3135,14 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, traceENTER_vTaskPreemptionDisable( xTask ); - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { pxTCB = prvGetTCBFromHandle( xTask ); configASSERT( pxTCB != NULL ); pxTCB->xPreemptionDisable++; } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_vTaskPreemptionDisable(); } @@ -3125,7 +3159,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, traceENTER_vTaskPreemptionEnable( xTask ); - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { pxTCB = prvGetTCBFromHandle( xTask ); configASSERT( pxTCB != NULL ); @@ -3142,7 +3176,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_vTaskPreemptionEnable(); } @@ -3158,7 +3192,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, traceENTER_vTaskSuspend( xTaskToSuspend ); - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { /* If null is passed in here then it is the running task that is * being suspended. */ @@ -3246,7 +3280,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } #endif /* #if ( configNUMBER_OF_CORES > 1 ) */ } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); #if ( configNUMBER_OF_CORES == 1 ) { @@ -3256,11 +3290,11 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, { /* Reset the next expected unblock time in case it referred to the * task that is now in the Suspended state. */ - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { prvResetNextTaskUnblockTime(); } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); } else { @@ -3409,7 +3443,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, if( pxTCB != NULL ) #endif { - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE ) { @@ -3430,7 +3464,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); } else { @@ -3477,7 +3511,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, /* MISRA Ref 4.7.1 [Return value shall be checked] */ /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */ /* coverity[misra_c_2012_directive_4_7_violation] */ - uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = kernelENTER_CRITICAL_FROM_ISR(); { if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE ) { @@ -3533,7 +3567,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); traceRETURN_xTaskResumeFromISR( xYieldRequired ); @@ -3910,45 +3944,39 @@ void vTaskSuspendAll( void ) * do not otherwise exhibit real time behaviour. */ portSOFTWARE_BARRIER(); - #if ( portUSING_GRANULAR_LOCKS == 1 ) + kernelGET_TASK_LOCK( xCoreID ); + + /* uxSchedulerSuspended is increased after prvCheckForRunStateChange. The + * purpose is to prevent altering the variable when fromISR APIs are readying + * it. */ + if( ( uxSchedulerSuspended == 0U ) + + /* If the scheduler is being suspended after taking a granular lock (in any data group) + * then we do not check for the run state of a task and we let it run through until + * the granular lock is released. */ + #if ( portUSING_GRANULAR_LOCKS == 1 ) + && ( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U ) + #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + ) { - portGET_SPINLOCK( &xTaskSpinlock ); - portGET_SPINLOCK( &xISRSpinlock ); - - /* Increment xPreemptionDisable to prevent preemption and also - * track whether to yield in xTaskResumeAll(). */ - pxCurrentTCBs[ portGET_CORE_ID() ]->xPreemptionDisable++; - - /* The scheduler is suspended if uxSchedulerSuspended is non-zero. An increment - * is used to allow calls to vTaskSuspendAll() to nest. */ - ++uxSchedulerSuspended; - - portRELEASE_SPINLOCK( &xISRSpinlock ); + prvCheckForRunStateChange(); } - #else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + else { - portGET_TASK_LOCK(); - - /* uxSchedulerSuspended is increased after prvCheckForRunStateChange. The - * purpose is to prevent altering the variable when fromISR APIs are readying - * it. */ - if( uxSchedulerSuspended == 0U ) - { - prvCheckForRunStateChange(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - portGET_ISR_LOCK(); - - /* The scheduler is suspended if uxSchedulerSuspended is non-zero. An increment - * is used to allow calls to vTaskSuspendAll() to nest. */ - ++uxSchedulerSuspended; - portRELEASE_ISR_LOCK(); + mtCOVERAGE_TEST_MARKER(); } - #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + + /* Query the coreID again as prvCheckForRunStateChange may have + * caused the task to get scheduled on a different core. The correct + * task lock for the core is acquired in prvCheckForRunStateChange. */ + xCoreID = ( BaseType_t ) portGET_CORE_ID(); + + kernelGET_ISR_LOCK( xCoreID ); + + /* The scheduler is suspended if uxSchedulerSuspended is non-zero. An increment + * is used to allow calls to vTaskSuspendAll() to nest. */ + ++uxSchedulerSuspended; + kernelRELEASE_ISR_LOCK( xCoreID ); portCLEAR_INTERRUPT_MASK( ulState ); } @@ -4044,7 +4072,7 @@ BaseType_t xTaskResumeAll( void ) * removed task will have been added to the xPendingReadyList. Once the * scheduler has been resumed it is safe to move all the pending ready * tasks from this list into their appropriate ready list. */ - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); @@ -4053,24 +4081,9 @@ BaseType_t xTaskResumeAll( void ) configASSERT( uxSchedulerSuspended != 0U ); uxSchedulerSuspended = ( UBaseType_t ) ( uxSchedulerSuspended - 1U ); - #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - { - configASSERT( pxCurrentTCBs[ portGET_CORE_ID() ]->xPreemptionDisable > 0 ); - - /* Decrement xPreemptionDisable. If 0, it means this we are not - * in a nested suspension scenario, thus this function and yield - * if necessary. */ - pxCurrentTCBs[ portGET_CORE_ID() ]->xPreemptionDisable--; - - /* Release the kernel's task spinlock that was held throughout - * the kernel suspension. */ - portRELEASE_SPINLOCK( &xTaskSpinlock ); - } - #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ - { - portRELEASE_TASK_LOCK(); - } - #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + #if ( configNUMBER_OF_CORES > 1 ) + kernelRELEASE_TASK_LOCK( xCoreID ); + #endif if( uxSchedulerSuspended == ( UBaseType_t ) 0U ) { @@ -4160,7 +4173,11 @@ BaseType_t xTaskResumeAll( void ) } } - if( xYieldPendings[ xCoreID ] != pdFALSE ) + if( xYieldPendings[ xCoreID ] != pdFALSE + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + && ( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == 0U ) + #endif + ) { #if ( configUSE_PREEMPTION != 0 ) { @@ -4185,7 +4202,7 @@ BaseType_t xTaskResumeAll( void ) mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); } traceRETURN_xTaskResumeAll( xAlreadyYielded ); @@ -4614,11 +4631,11 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) configASSERT( xTicksToJump != ( TickType_t ) 0 ); /* Prevent the tick interrupt modifying xPendedTicks simultaneously. */ - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { xPendedTicks++; } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); xTicksToJump--; } else @@ -4650,11 +4667,11 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) vTaskSuspendAll(); /* Prevent the tick interrupt modifying xPendedTicks simultaneously. */ - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { xPendedTicks += xTicksToCatchUp; } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); xYieldOccurred = xTaskResumeAll(); traceRETURN_xTaskCatchUpTicks( xYieldOccurred ); @@ -4691,7 +4708,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) * the event list too. Interrupts can touch the event list item, * even though the scheduler is suspended, so a critical section * is used. */ - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) { @@ -4707,7 +4724,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); /* Place the unblocked task into the appropriate ready list. */ prvAddTaskToReadyList( pxTCB ); @@ -4734,11 +4751,11 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) } #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { prvYieldForTask( pxTCB ); } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); } #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } @@ -4767,6 +4784,10 @@ BaseType_t xTaskIncrementTick( void ) traceENTER_xTaskIncrementTick(); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + UBaseType_t uxSavedInterruptStatus = kernelENTER_CRITICAL_FROM_ISR(); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + /* Called by the portable layer each time a tick interrupt occurs. * Increments the tick then checks to see if the new tick value will cause any * tasks to be unblocked. */ @@ -4867,7 +4888,8 @@ BaseType_t xTaskIncrementTick( void ) /* Preemption is on, but a context switch should * only be performed if the unblocked task's * priority is higher than the currently executing - * task. + * task and the currently executing task does not + * have preemption disabled. * The case of equal priority tasks sharing * processing time (which happens when both * preemption and time slicing are on) is @@ -4899,14 +4921,32 @@ BaseType_t xTaskIncrementTick( void ) { #if ( configNUMBER_OF_CORES == 1 ) { - if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > 1U ) - { - xSwitchRequired = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + if( pxCurrentTCB->xPreemptionDisable != 0U ) + { + mtCOVERAGE_TEST_MARKER(); + } + else + { + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > 1U ) + { + xSwitchRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #else /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > 1U ) + { + xSwitchRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + #endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ } #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { @@ -5003,6 +5043,10 @@ BaseType_t xTaskIncrementTick( void ) #endif } + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + traceRETURN_xTaskIncrementTick( xSwitchRequired ); return xSwitchRequired; @@ -5031,11 +5075,11 @@ BaseType_t xTaskIncrementTick( void ) /* Save the hook function in the TCB. A critical section is required as * the value can be accessed from an interrupt. */ - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { xTCB->pxTaskTag = pxHookFunction; } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_vTaskSetApplicationTaskTag(); } @@ -5058,11 +5102,11 @@ BaseType_t xTaskIncrementTick( void ) /* Save the hook function in the TCB. A critical section is required as * the value can be accessed from an interrupt. */ - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { xReturn = pxTCB->pxTaskTag; } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_xTaskGetApplicationTaskTag( xReturn ); @@ -5091,11 +5135,11 @@ BaseType_t xTaskIncrementTick( void ) /* MISRA Ref 4.7.1 [Return value shall be checked] */ /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */ /* coverity[misra_c_2012_directive_4_7_violation] */ - uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = kernelENTER_CRITICAL_FROM_ISR(); { xReturn = pxTCB->pxTaskTag; } - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); traceRETURN_xTaskGetApplicationTaskTagFromISR( xReturn ); @@ -5240,18 +5284,23 @@ BaseType_t xTaskIncrementTick( void ) * and move on if another core suspended the scheduler. We should only * do that if the current core has suspended the scheduler. */ - portGET_TASK_LOCK( xCoreID ); /* Must always acquire the task lock first. */ - portGET_ISR_LOCK( xCoreID ); + kernelGET_TASK_LOCK( xCoreID ); /* Must always acquire the task lock first. */ + kernelGET_ISR_LOCK( xCoreID ); { /* vTaskSwitchContext() must never be called from within a critical section. * This is not necessarily true for single core FreeRTOS, but it is for this * SMP port. */ configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ); - if( uxSchedulerSuspended != ( UBaseType_t ) 0U ) + if( uxSchedulerSuspended != ( UBaseType_t ) 0U + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + || ( ( taskTASK_IS_RUNNING( pxCurrentTCBs[ xCoreID ] ) ) && ( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable > 0U ) ) + #endif + ) { - /* The scheduler is currently suspended - do not allow a context - * switch. */ + /* The scheduler is currently suspended or the task + * has requested to not be preempted - do not allow + * a context switch. */ xYieldPendings[ xCoreID ] = pdTRUE; } else @@ -5322,8 +5371,8 @@ BaseType_t xTaskIncrementTick( void ) #endif } } - portRELEASE_ISR_LOCK( xCoreID ); - portRELEASE_TASK_LOCK( xCoreID ); + kernelRELEASE_ISR_LOCK( xCoreID ); + kernelRELEASE_TASK_LOCK( xCoreID ); traceRETURN_vTaskSwitchContext(); } @@ -5337,8 +5386,15 @@ void vTaskPlaceOnEventList( List_t * const pxEventList, configASSERT( pxEventList ); - /* THIS FUNCTION MUST BE CALLED WITH THE - * SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. */ + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* Suspend the kernel data group as we are about to access its members */ + vTaskSuspendAll(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + + /* THIS FUNCTION MUST BE CALLED WITH THE + * SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. */ + configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ /* Place the event list item of the TCB in the appropriate event list. * This is placed in the list in priority order so the highest priority task @@ -5355,6 +5411,11 @@ void vTaskPlaceOnEventList( List_t * const pxEventList, prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* We are done accessing the kernel data group. Resume it. */ + ( void ) xTaskResumeAll(); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + traceRETURN_vTaskPlaceOnEventList(); } /*-----------------------------------------------------------*/ @@ -5367,9 +5428,16 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, configASSERT( pxEventList ); - /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by - * the event groups implementation. */ - configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U ); + + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* Suspend the kernel data group as we are about to access its members */ + vTaskSuspendAll(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + + /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by + * the event groups implementation. */ + configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ /* Store the item value in the event list item. It is safe to access the * event list item here as interrupts won't access the event list item of a @@ -5385,6 +5453,11 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* We are done accessing the kernel data group. Resume it. */ + ( void ) xTaskResumeAll(); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + traceRETURN_vTaskPlaceOnUnorderedEventList(); } /*-----------------------------------------------------------*/ @@ -5399,11 +5472,17 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, configASSERT( pxEventList ); - /* This function should not be called by application code hence the - * 'Restricted' in its name. It is not part of the public API. It is - * designed for use by kernel code, and has special calling requirements - - * it should be called with the scheduler suspended. */ + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* Suspend the kernel data group as we are about to access its members */ + vTaskSuspendAll(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + /* This function should not be called by application code hence the + * 'Restricted' in its name. It is not part of the public API. It is + * designed for use by kernel code, and has special calling requirements - + * it should be called with the scheduler suspended. */ + configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ /* Place the event list item of the TCB in the appropriate event list. * In this case it is assume that this is the only task that is going to @@ -5422,6 +5501,11 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, traceTASK_DELAY_UNTIL( ( xTickCount + xTicksToWait ) ); prvAddCurrentTaskToDelayedList( xTicksToWait, xWaitIndefinitely ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* We are done accessing the kernel data group. Resume it. */ + ( void ) xTaskResumeAll(); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + traceRETURN_vTaskPlaceOnEventListRestricted(); } @@ -5435,8 +5519,30 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) traceENTER_xTaskRemoveFromEventList( pxEventList ); - /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be - * called from a critical section within an ISR. */ + #if ( !( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) + + /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be + * called from a critical section within an ISR. */ + #else /* #if ( ! ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) */ + /* Lock the kernel data group as we are about to access its members */ + UBaseType_t uxSavedInterruptStatus; + + if( portCHECK_IF_IN_ISR() == pdTRUE ) + { + uxSavedInterruptStatus = kernelENTER_CRITICAL_FROM_ISR(); + } + else + { + uxSavedInterruptStatus = 0; + kernelENTER_CRITICAL(); + } + + /* Before taking the kernel lock, another task/ISR could have already + * emptied the pxEventList. So we insert a check here to see if + * pxEventList is empty before attempting to remove an item from it. */ + if( listLIST_IS_EMPTY( pxEventList ) == pdFALSE ) + { + #endif /* #if ( ! ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) */ /* The event list is sorted in priority order, so the first in the list can * be removed as it is known to be the highest priority. Remove the TCB from @@ -5516,6 +5622,26 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) } #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) +} +else +{ + /* The pxEventList was emptied before we entered the critical + * section, Nothing to do except return pdFALSE. */ + xReturn = pdFALSE; +} + +/* We are done accessing the kernel data group. Unlock it. */ +if( portCHECK_IF_IN_ISR() == pdTRUE ) +{ + kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); +} +else +{ + kernelEXIT_CRITICAL(); +} + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + traceRETURN_xTaskRemoveFromEventList( xReturn ); return xReturn; } @@ -5579,13 +5705,13 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, { #if ( configUSE_PREEMPTION == 1 ) { - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { prvYieldForTask( pxUnblockedTCB ); } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); } - #endif + #endif /* if ( configUSE_PREEMPTION == 1 ) */ } #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ @@ -5598,12 +5724,12 @@ void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) traceENTER_vTaskSetTimeOutState( pxTimeOut ); configASSERT( pxTimeOut ); - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { pxTimeOut->xOverflowCount = xNumOfOverflows; pxTimeOut->xTimeOnEntering = xTickCount; } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_vTaskSetTimeOutState(); } @@ -5613,10 +5739,20 @@ void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) { traceENTER_vTaskInternalSetTimeOutState( pxTimeOut ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* Lock the kernel data group as we are about to access its members */ + kernelENTER_CRITICAL(); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + /* For internal use only as it does not use a critical section. */ pxTimeOut->xOverflowCount = xNumOfOverflows; pxTimeOut->xTimeOnEntering = xTickCount; + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* We are done accessing the kernel data group. Unlock it. */ + kernelEXIT_CRITICAL(); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + traceRETURN_vTaskInternalSetTimeOutState(); } /*-----------------------------------------------------------*/ @@ -5631,7 +5767,7 @@ BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, configASSERT( pxTimeOut ); configASSERT( pxTicksToWait ); - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { /* Minor optimisation. The tick count cannot change in this block. */ const TickType_t xConstTickCount = xTickCount; @@ -5682,7 +5818,7 @@ BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, xReturn = pdTRUE; } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_xTaskCheckForTimeOut( xReturn ); @@ -5982,7 +6118,12 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) traceENTER_eTaskConfirmSleepModeStatus(); - /* This function must be called from a critical section. */ + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* Lock the kernel data group as we are about to access its members */ + kernelENTER_CRITICAL(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + /* This function must be called from a critical section. */ + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ if( listCURRENT_LIST_LENGTH( &xPendingReadyList ) != 0U ) { @@ -6016,6 +6157,11 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) mtCOVERAGE_TEST_MARKER(); } + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* We are done accessing the kernel data group. Unlock it. */ + kernelEXIT_CRITICAL(); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + traceRETURN_eTaskConfirmSleepModeStatus( eReturn ); return eReturn; @@ -6147,7 +6293,7 @@ static void prvCheckTasksWaitingTermination( void ) { #if ( configNUMBER_OF_CORES == 1 ) { - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { { /* MISRA Ref 11.5.3 [Void pointer assignment] */ @@ -6159,7 +6305,7 @@ static void prvCheckTasksWaitingTermination( void ) --uxDeletedTasksWaitingCleanUp; } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); prvDeleteTCB( pxTCB ); } @@ -6167,7 +6313,7 @@ static void prvCheckTasksWaitingTermination( void ) { pxTCB = NULL; - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { /* For SMP, multiple idles can be running simultaneously * and we need to check that other idles did not cleanup while we were @@ -6190,12 +6336,12 @@ static void prvCheckTasksWaitingTermination( void ) /* The TCB to be deleted still has not yet been switched out * by the scheduler, so we will just exit this loop early and * try again next time. */ - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); break; } } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); if( pxTCB != NULL ) { @@ -6317,14 +6463,14 @@ static void prvCheckTasksWaitingTermination( void ) /* Tasks can be in pending ready list and other state list at the * same time. These tasks are in ready state no matter what state * list the task is in. */ - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { if( listIS_CONTAINED_WITHIN( &xPendingReadyList, &( pxTCB->xEventListItem ) ) != pdFALSE ) { pxTaskStatus->eCurrentState = eReady; } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); } } else @@ -6646,7 +6792,7 @@ static void prvResetNextTaskUnblockTime( void ) else { #if ( configNUMBER_OF_CORES > 1 ) - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); #endif { if( uxSchedulerSuspended == ( UBaseType_t ) 0U ) @@ -6659,7 +6805,7 @@ static void prvResetNextTaskUnblockTime( void ) } } #if ( configNUMBER_OF_CORES > 1 ) - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); #endif } @@ -6680,7 +6826,9 @@ static void prvResetNextTaskUnblockTime( void ) traceENTER_xTaskPriorityInherit( pxMutexHolder ); - taskENTER_CRITICAL(); + #if ( portUSING_GRANULAR_LOCKS == 1 ) + kernelENTER_CRITICAL(); + #endif { /* If the mutex is taken by an interrupt, the mutex holder is NULL. Priority * inheritance is not applied in this scenario. */ @@ -6768,7 +6916,9 @@ static void prvResetNextTaskUnblockTime( void ) mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + #if ( portUSING_GRANULAR_LOCKS == 1 ) + kernelEXIT_CRITICAL(); + #endif traceRETURN_xTaskPriorityInherit( xReturn ); @@ -6787,6 +6937,11 @@ static void prvResetNextTaskUnblockTime( void ) traceENTER_xTaskPriorityDisinherit( pxMutexHolder ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* Lock the kernel data group as we are about to access its members */ + kernelENTER_CRITICAL(); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + if( pxMutexHolder != NULL ) { /* A task can only have an inherited priority if it holds the mutex. @@ -6864,6 +7019,11 @@ static void prvResetNextTaskUnblockTime( void ) mtCOVERAGE_TEST_MARKER(); } + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* We are done accessing the kernel data group. Unlock it. */ + kernelEXIT_CRITICAL(); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + traceRETURN_xTaskPriorityDisinherit( xReturn ); return xReturn; @@ -6883,88 +7043,97 @@ static void prvResetNextTaskUnblockTime( void ) traceENTER_vTaskPriorityDisinheritAfterTimeout( pxMutexHolder, uxHighestPriorityWaitingTask ); - if( pxMutexHolder != NULL ) + #if ( portUSING_GRANULAR_LOCKS == 1 ) + kernelENTER_CRITICAL(); + #endif { - /* If pxMutexHolder is not NULL then the holder must hold at least - * one mutex. */ - configASSERT( pxTCB->uxMutexesHeld ); + if( pxMutexHolder != NULL ) + { + /* If pxMutexHolder is not NULL then the holder must hold at least + * one mutex. */ + configASSERT( pxTCB->uxMutexesHeld ); - /* Determine the priority to which the priority of the task that - * holds the mutex should be set. This will be the greater of the - * holding task's base priority and the priority of the highest - * priority task that is waiting to obtain the mutex. */ - if( pxTCB->uxBasePriority < uxHighestPriorityWaitingTask ) - { - uxPriorityToUse = uxHighestPriorityWaitingTask; - } - else - { - uxPriorityToUse = pxTCB->uxBasePriority; - } - - /* Does the priority need to change? */ - if( pxTCB->uxPriority != uxPriorityToUse ) - { - /* Only disinherit if no other mutexes are held. This is a - * simplification in the priority inheritance implementation. If - * the task that holds the mutex is also holding other mutexes then - * the other mutexes may have caused the priority inheritance. */ - if( pxTCB->uxMutexesHeld == uxOnlyOneMutexHeld ) + /* Determine the priority to which the priority of the task that + * holds the mutex should be set. This will be the greater of the + * holding task's base priority and the priority of the highest + * priority task that is waiting to obtain the mutex. */ + if( pxTCB->uxBasePriority < uxHighestPriorityWaitingTask ) { - /* If a task has timed out because it already holds the - * mutex it was trying to obtain then it cannot of inherited - * its own priority. */ - configASSERT( pxTCB != pxCurrentTCB ); + uxPriorityToUse = uxHighestPriorityWaitingTask; + } + else + { + uxPriorityToUse = pxTCB->uxBasePriority; + } - /* Disinherit the priority, remembering the previous - * priority to facilitate determining the subject task's - * state. */ - traceTASK_PRIORITY_DISINHERIT( pxTCB, uxPriorityToUse ); - uxPriorityUsedOnEntry = pxTCB->uxPriority; - pxTCB->uxPriority = uxPriorityToUse; + /* Does the priority need to change? */ + if( pxTCB->uxPriority != uxPriorityToUse ) + { + /* Only disinherit if no other mutexes are held. This is a + * simplification in the priority inheritance implementation. If + * the task that holds the mutex is also holding other mutexes then + * the other mutexes may have caused the priority inheritance. */ + if( pxTCB->uxMutexesHeld == uxOnlyOneMutexHeld ) + { + /* If a task has timed out because it already holds the + * mutex it was trying to obtain then it cannot of inherited + * its own priority. */ + configASSERT( pxTCB != pxCurrentTCB ); - /* Only reset the event list item value if the value is not - * being used for anything else. */ - if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == ( ( TickType_t ) 0U ) ) - { - listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriorityToUse ); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + /* Disinherit the priority, remembering the previous + * priority to facilitate determining the subject task's + * state. */ + traceTASK_PRIORITY_DISINHERIT( pxTCB, uxPriorityToUse ); + uxPriorityUsedOnEntry = pxTCB->uxPriority; + pxTCB->uxPriority = uxPriorityToUse; - /* If the running task is not the task that holds the mutex - * then the task that holds the mutex could be in either the - * Ready, Blocked or Suspended states. Only remove the task - * from its current state list if it is in the Ready state as - * the task's priority is going to change and there is one - * Ready list per priority. */ - if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE ) - { - if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + /* Only reset the event list item value if the value is not + * being used for anything else. */ + if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == ( ( TickType_t ) 0U ) ) { - /* It is known that the task is in its ready list so - * there is no need to check again and the port level - * reset macro can be called directly. */ - portRESET_READY_PRIORITY( pxTCB->uxPriority, uxTopReadyPriority ); + listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriorityToUse ); } else { mtCOVERAGE_TEST_MARKER(); } - prvAddTaskToReadyList( pxTCB ); - #if ( configNUMBER_OF_CORES > 1 ) + /* If the running task is not the task that holds the mutex + * then the task that holds the mutex could be in either the + * Ready, Blocked or Suspended states. Only remove the task + * from its current state list if it is in the Ready state as + * the task's priority is going to change and there is one + * Ready list per priority. */ + if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE ) { - /* The priority of the task is dropped. Yield the core on - * which the task is running. */ - if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) + if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) { - prvYieldCore( pxTCB->xTaskRunState ); + /* It is known that the task is in its ready list so + * there is no need to check again and the port level + * reset macro can be called directly. */ + portRESET_READY_PRIORITY( pxTCB->uxPriority, uxTopReadyPriority ); } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + prvAddTaskToReadyList( pxTCB ); + #if ( configNUMBER_OF_CORES > 1 ) + { + /* The priority of the task is dropped. Yield the core on + * which the task is running. */ + if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) + { + prvYieldCore( pxTCB->xTaskRunState ); + } + } + #endif /* if ( configNUMBER_OF_CORES > 1 ) */ + } + else + { + mtCOVERAGE_TEST_MARKER(); } - #endif /* if ( configNUMBER_OF_CORES > 1 ) */ } else { @@ -6981,10 +7150,9 @@ static void prvResetNextTaskUnblockTime( void ) mtCOVERAGE_TEST_MARKER(); } } - else - { - mtCOVERAGE_TEST_MARKER(); - } + #if ( portUSING_GRANULAR_LOCKS == 1 ) + kernelEXIT_CRITICAL(); + #endif traceRETURN_vTaskPriorityDisinheritAfterTimeout(); } @@ -7059,7 +7227,7 @@ static void prvResetNextTaskUnblockTime( void ) #endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) */ /*-----------------------------------------------------------*/ -#if ( ( portUSING_GRANULAR_LOCKS == 0 ) && ( configNUMBER_OF_CORES > 1 ) ) +#if ( configNUMBER_OF_CORES > 1 ) void vTaskEnterCritical( void ) { @@ -7073,8 +7241,8 @@ static void prvResetNextTaskUnblockTime( void ) { if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U ) { - portGET_TASK_LOCK( xCoreID ); - portGET_ISR_LOCK( xCoreID ); + kernelGET_TASK_LOCK( xCoreID ); + kernelGET_ISR_LOCK( xCoreID ); } portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); @@ -7108,10 +7276,10 @@ static void prvResetNextTaskUnblockTime( void ) traceRETURN_vTaskEnterCritical(); } -#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 0 ) && ( configNUMBER_OF_CORES > 1 ) ) */ +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ /*-----------------------------------------------------------*/ -#if ( ( portUSING_GRANULAR_LOCKS == 0 ) && ( configNUMBER_OF_CORES > 1 ) ) +#if ( configNUMBER_OF_CORES > 1 ) UBaseType_t vTaskEnterCriticalFromISR( void ) { @@ -7126,7 +7294,7 @@ static void prvResetNextTaskUnblockTime( void ) if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U ) { - portGET_ISR_LOCK( xCoreID ); + kernelGET_ISR_LOCK( xCoreID ); } portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); @@ -7141,7 +7309,7 @@ static void prvResetNextTaskUnblockTime( void ) return uxSavedInterruptStatus; } -#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 0 ) && ( configNUMBER_OF_CORES > 1 ) ) */ +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ /*-----------------------------------------------------------*/ #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) @@ -7189,7 +7357,7 @@ static void prvResetNextTaskUnblockTime( void ) #endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) */ /*-----------------------------------------------------------*/ -#if ( ( portUSING_GRANULAR_LOCKS == 0 ) && ( configNUMBER_OF_CORES > 1 ) ) +#if ( configNUMBER_OF_CORES > 1 ) void vTaskExitCritical( void ) { @@ -7218,8 +7386,8 @@ static void prvResetNextTaskUnblockTime( void ) /* Get the xYieldPending stats inside the critical section. */ xYieldCurrentTask = xYieldPendings[ xCoreID ]; - portRELEASE_ISR_LOCK( xCoreID ); - portRELEASE_TASK_LOCK( xCoreID ); + kernelRELEASE_ISR_LOCK( xCoreID ); + kernelRELEASE_TASK_LOCK( xCoreID ); portENABLE_INTERRUPTS(); /* When a task yields in a critical section it just sets @@ -7249,10 +7417,10 @@ static void prvResetNextTaskUnblockTime( void ) traceRETURN_vTaskExitCritical(); } -#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 0 ) && ( configNUMBER_OF_CORES > 1 ) ) */ +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ /*-----------------------------------------------------------*/ -#if ( ( portUSING_GRANULAR_LOCKS == 0 ) && ( configNUMBER_OF_CORES > 1 ) ) +#if ( configNUMBER_OF_CORES > 1 ) void vTaskExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus ) { @@ -7274,7 +7442,7 @@ static void prvResetNextTaskUnblockTime( void ) if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U ) { - portRELEASE_ISR_LOCK( xCoreID ); + kernelRELEASE_ISR_LOCK( xCoreID ); portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); } else @@ -7295,7 +7463,7 @@ static void prvResetNextTaskUnblockTime( void ) traceRETURN_vTaskExitCriticalFromISR(); } -#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 0 ) && ( configNUMBER_OF_CORES > 1 ) ) */ +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ /*-----------------------------------------------------------*/ #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) @@ -7305,7 +7473,7 @@ static void prvResetNextTaskUnblockTime( void ) BaseType_t xReturn; BaseType_t xCoreID = portGET_CORE_ID(); - if( ( xYieldPendings[ xCoreID ] == pdTRUE ) && ( uxSchedulerSuspended == pdFALSE ) && ( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == pdFALSE ) ) + if( ( xYieldPendings[ xCoreID ] == pdTRUE ) && ( uxSchedulerSuspended == pdFALSE ) && ( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == 0U ) ) { xReturn = pdTRUE; } @@ -7731,6 +7899,11 @@ TickType_t uxTaskResetEventItemValue( void ) traceENTER_pvTaskIncrementMutexHeldCount(); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* Lock the kernel data group as we are about to access its members */ + kernelENTER_CRITICAL(); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + pxTCB = pxCurrentTCB; /* If xSemaphoreCreateMutex() is called before any tasks have been created @@ -7740,6 +7913,11 @@ TickType_t uxTaskResetEventItemValue( void ) ( pxTCB->uxMutexesHeld )++; } + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* We are done accessing the kernel data group. Unlock it. */ + kernelEXIT_CRITICAL(); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + traceRETURN_pvTaskIncrementMutexHeldCount( pxTCB ); return pxTCB; @@ -7773,7 +7951,7 @@ TickType_t uxTaskResetEventItemValue( void ) * has occurred and set the flag to indicate that we are waiting for * a notification. If we do not do so, a notification sent from an ISR * will get lost. */ - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { /* Only block if the notification count is not already non-zero. */ if( pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] == 0U ) @@ -7789,7 +7967,7 @@ TickType_t uxTaskResetEventItemValue( void ) mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); /* We are now out of the critical section but the scheduler is still * suspended, so we are safe to do non-deterministic operations such @@ -7817,7 +7995,7 @@ TickType_t uxTaskResetEventItemValue( void ) } } - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { traceTASK_NOTIFY_TAKE( uxIndexToWaitOn ); ulReturn = pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ]; @@ -7840,7 +8018,7 @@ TickType_t uxTaskResetEventItemValue( void ) pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskNOT_WAITING_NOTIFICATION; } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_ulTaskGenericNotifyTake( ulReturn ); @@ -7875,7 +8053,7 @@ TickType_t uxTaskResetEventItemValue( void ) /* We MUST enter a critical section to atomically check and update the * task notification value. If we do not do so, a notification from * an ISR will get lost. */ - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { /* Only block if a notification is not already pending. */ if( pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] != taskNOTIFICATION_RECEIVED ) @@ -7896,7 +8074,7 @@ TickType_t uxTaskResetEventItemValue( void ) mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); /* We are now out of the critical section but the scheduler is still * suspended, so we are safe to do non-deterministic operations such @@ -7924,7 +8102,7 @@ TickType_t uxTaskResetEventItemValue( void ) } } - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { traceTASK_NOTIFY_WAIT( uxIndexToWaitOn ); @@ -7954,7 +8132,7 @@ TickType_t uxTaskResetEventItemValue( void ) pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskNOT_WAITING_NOTIFICATION; } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_xTaskGenericNotifyWait( xReturn ); @@ -7982,7 +8160,7 @@ TickType_t uxTaskResetEventItemValue( void ) configASSERT( xTaskToNotify ); pxTCB = xTaskToNotify; - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { if( pulPreviousNotificationValue != NULL ) { @@ -8074,7 +8252,7 @@ TickType_t uxTaskResetEventItemValue( void ) mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_xTaskGenericNotify( xReturn ); @@ -8126,7 +8304,7 @@ TickType_t uxTaskResetEventItemValue( void ) /* MISRA Ref 4.7.1 [Return value shall be checked] */ /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */ /* coverity[misra_c_2012_directive_4_7_violation] */ - uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = ( UBaseType_t ) kernelENTER_CRITICAL_FROM_ISR(); { if( pulPreviousNotificationValue != NULL ) { @@ -8256,7 +8434,7 @@ TickType_t uxTaskResetEventItemValue( void ) #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } } - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); traceRETURN_xTaskGenericNotifyFromISR( xReturn ); @@ -8304,7 +8482,7 @@ TickType_t uxTaskResetEventItemValue( void ) /* MISRA Ref 4.7.1 [Return value shall be checked] */ /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */ /* coverity[misra_c_2012_directive_4_7_violation] */ - uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = ( UBaseType_t ) kernelENTER_CRITICAL_FROM_ISR(); { ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ]; pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED; @@ -8390,7 +8568,7 @@ TickType_t uxTaskResetEventItemValue( void ) #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } } - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); traceRETURN_vTaskGenericNotifyGiveFromISR(); } @@ -8415,7 +8593,7 @@ TickType_t uxTaskResetEventItemValue( void ) pxTCB = prvGetTCBFromHandle( xTask ); configASSERT( pxTCB != NULL ); - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { if( pxTCB->ucNotifyState[ uxIndexToClear ] == taskNOTIFICATION_RECEIVED ) { @@ -8427,7 +8605,7 @@ TickType_t uxTaskResetEventItemValue( void ) xReturn = pdFAIL; } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_xTaskGenericNotifyStateClear( xReturn ); @@ -8455,14 +8633,14 @@ TickType_t uxTaskResetEventItemValue( void ) pxTCB = prvGetTCBFromHandle( xTask ); configASSERT( pxTCB != NULL ); - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { /* Return the notification as it was before the bits were cleared, * then clear the bit mask. */ ulReturn = pxTCB->ulNotifiedValue[ uxIndexToClear ]; pxTCB->ulNotifiedValue[ uxIndexToClear ] &= ~ulBitsToClear; } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_ulTaskGenericNotifyValueClear( ulReturn );