diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index 9b5e8ac43..dd9f59bf7 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -3238,6 +3238,9 @@ typedef struct xSTATIC_TCB #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) UBaseType_t xDummy25; #endif + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + BaseType_t xDummy26; + #endif #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) void * pxDummy8; #endif diff --git a/include/task.h b/include/task.h index 410766890..b3040c643 100644 --- a/include/task.h +++ b/include/task.h @@ -213,7 +213,7 @@ typedef enum * \defgroup taskYIELD taskYIELD * \ingroup SchedulerControl */ -#define taskYIELD() portYIELD() +#define taskYIELD() portYIELD() /** * task. h @@ -227,19 +227,12 @@ typedef enum * \defgroup taskENTER_CRITICAL taskENTER_CRITICAL * \ingroup SchedulerControl */ -#define taskENTER_CRITICAL() portENTER_CRITICAL() +#define taskENTER_CRITICAL() portENTER_CRITICAL() #if ( configNUMBER_OF_CORES == 1 ) - #define taskENTER_CRITICAL_FROM_ISR() portSET_INTERRUPT_MASK_FROM_ISR() + #define taskENTER_CRITICAL_FROM_ISR() portSET_INTERRUPT_MASK_FROM_ISR() #else - #define taskENTER_CRITICAL_FROM_ISR() portENTER_CRITICAL_FROM_ISR() + #define taskENTER_CRITICAL_FROM_ISR() portENTER_CRITICAL_FROM_ISR() #endif -#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - #define taskLOCK_DATA_GROUP( pxTaskSpinlock, pxISRSpinlock ) portLOCK_DATA_GROUP( ( portSPINLOCK_TYPE * ) pxTaskSpinlock, ( portSPINLOCK_TYPE * ) pxISRSpinlock ) - #define taskLOCK_DATA_GROUP_FROM_ISR( pxISRSpinlock ) portLOCK_DATA_GROUP_FROM_ISR( pxISRSpinlock ) -#else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ - #define taskLOCK_DATA_GROUP( pxTaskSpinlock, pxISRSpinlock ) taskENTER_CRITICAL() - #define taskLOCK_DATA_GROUP_FROM_ISR( pxISRSpinlock ) taskENTER_CRITICAL_FROM_ISR() -#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ /** * task. h @@ -253,19 +246,12 @@ typedef enum * \defgroup taskEXIT_CRITICAL taskEXIT_CRITICAL * \ingroup SchedulerControl */ -#define taskEXIT_CRITICAL() portEXIT_CRITICAL() +#define taskEXIT_CRITICAL() portEXIT_CRITICAL() #if ( configNUMBER_OF_CORES == 1 ) - #define taskEXIT_CRITICAL_FROM_ISR( x ) portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) + #define taskEXIT_CRITICAL_FROM_ISR( x ) portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) #else - #define taskEXIT_CRITICAL_FROM_ISR( x ) portEXIT_CRITICAL_FROM_ISR( x ) + #define taskEXIT_CRITICAL_FROM_ISR( x ) portEXIT_CRITICAL_FROM_ISR( x ) #endif -#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - #define taskUNLOCK_DATA_GROUP( pxTaskSpinlock, pxISRSpinlock ) portUNLOCK_DATA_GROUP( ( portSPINLOCK_TYPE * ) pxTaskSpinlock, ( portSPINLOCK_TYPE * ) pxISRSpinlock ) - #define taskUNLOCK_DATA_GROUP_FROM_ISR( x, pxISRSpinlock ) portUNLOCK_DATA_GROUP_FROM_ISR( x, pxISRSpinlock ) -#else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ - #define taskUNLOCK_DATA_GROUP( pxTaskSpinlock, pxISRSpinlock ) taskEXIT_CRITICAL() - #define taskUNLOCK_DATA_GROUP_FROM_ISR( x, pxISRSpinlock ) taskEXIT_CRITICAL_FROM_ISR( x ) -#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ /** * task. h @@ -3837,7 +3823,7 @@ void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNC * It should be used in the implementation of portENTER_CRITICAL if port is running a * multiple core FreeRTOS. */ -#if !( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) +#if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) || ( configNUMBER_OF_CORES > 1 ) ) void vTaskEnterCritical( void ); #endif @@ -3849,7 +3835,7 @@ void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNC * It should be used in the implementation of portEXIT_CRITICAL if port is running a * multiple core FreeRTOS. */ -#if !( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) +#if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) || ( configNUMBER_OF_CORES > 1 ) ) void vTaskExitCritical( void ); #endif @@ -3859,7 +3845,7 @@ void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNC * should be used in the implementation of portENTER_CRITICAL_FROM_ISR if port is * running a multiple core FreeRTOS. */ -#if !( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) +#if ( configNUMBER_OF_CORES > 1 ) UBaseType_t vTaskEnterCriticalFromISR( void ); #endif @@ -3869,12 +3855,12 @@ void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNC * should be used in the implementation of portEXIT_CRITICAL_FROM_ISR if port is * running a multiple core FreeRTOS. */ -#if !( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) +#if ( configNUMBER_OF_CORES > 1 ) void vTaskExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus ); #endif /* - * Checks whether a yield is required after taskUNLOCK_DATA_GROUP() returns. + * Checks whether a yield is required after portUNLOCK_DATA_GROUP() returns. * To be called while data group is locked. */ #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) diff --git a/tasks.c b/tasks.c index 6a393d027..80ad29497 100644 --- a/tasks.c +++ b/tasks.c @@ -136,11 +136,15 @@ /* * Macros used by vListTask to indicate which state a task is in. */ -#define tskRUNNING_CHAR ( 'X' ) -#define tskBLOCKED_CHAR ( 'B' ) -#define tskREADY_CHAR ( 'R' ) -#define tskDELETED_CHAR ( 'D' ) -#define tskSUSPENDED_CHAR ( 'S' ) +#define tskRUNNING_CHAR ( 'X' ) +#define tskBLOCKED_CHAR ( 'B' ) +#define tskREADY_CHAR ( 'R' ) +#define tskDELETED_CHAR ( 'D' ) +#define tskSUSPENDED_CHAR ( 'S' ) + +/* Bits used to record a deferred state change of a task. */ +#define tskDEFERRED_DELETION ( UBaseType_t ) ( 1U << 0U ) +#define tskDEFERRED_SUSPENSION ( UBaseType_t ) ( 1U << 1U ) /* * Some kernel aware debuggers require the data the debugger needs access to be @@ -346,7 +350,33 @@ /* Yields the given core. This must be called from a critical section and xCoreID * must be valid. This macro is not required in single core since there is only * one core to yield. */ - #define prvYieldCore( xCoreID ) \ + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + #define prvYieldCore( xCoreID ) \ + do { \ + if( ( xCoreID ) == ( BaseType_t ) portGET_CORE_ID() ) \ + { \ + /* Pending a yield for this core since it is in the critical section. */ \ + xYieldPendings[ ( xCoreID ) ] = pdTRUE; \ + } \ + else \ + { \ + if( pxCurrentTCBs[ ( xCoreID ) ]->uxPreemptionDisable == 0U ) \ + { \ + /* Request other core to yield if it is not requested before. */ \ + if( pxCurrentTCBs[ ( xCoreID ) ]->xTaskRunState != taskTASK_SCHEDULED_TO_YIELD ) \ + { \ + portYIELD_CORE( xCoreID ); \ + pxCurrentTCBs[ ( xCoreID ) ]->xTaskRunState = taskTASK_SCHEDULED_TO_YIELD; \ + } \ + } \ + else \ + { \ + xYieldPendings[ ( xCoreID ) ] = pdTRUE; \ + } \ + } \ + } while( 0 ) + #else /* if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ + #define prvYieldCore( xCoreID ) \ do { \ if( ( xCoreID ) == ( BaseType_t ) portGET_CORE_ID() ) \ { \ @@ -363,9 +393,42 @@ } \ } \ } while( 0 ) + #endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ #endif /* #if ( configNUMBER_OF_CORES > 1 ) */ /*-----------------------------------------------------------*/ +/* Macros to take and release kernel spinlocks */ +#if ( configNUMBER_OF_CORES > 1 ) + #if ( portUSING_GRANULAR_LOCKS == 1 ) + #define kernelGET_TASK_LOCK( xCoreID ) portGET_SPINLOCK( xCoreID, &xTaskSpinlock ) + #define kernelRELEASE_TASK_LOCK( xCoreID ) portRELEASE_SPINLOCK( xCoreID, &xTaskSpinlock ) + #define kernelGET_ISR_LOCK( xCoreID ) portGET_SPINLOCK( xCoreID, &xISRSpinlock ) + #define kernelRELEASE_ISR_LOCK( xCoreID ) portRELEASE_SPINLOCK( xCoreID, &xISRSpinlock ) + #else + #define kernelGET_TASK_LOCK( xCoreID ) portGET_TASK_LOCK( xCoreID ) + #define kernelRELEASE_TASK_LOCK( xCoreID ) portRELEASE_TASK_LOCK( xCoreID ) + #define kernelGET_ISR_LOCK( xCoreID ) portGET_ISR_LOCK( xCoreID ) + #define kernelRELEASE_ISR_LOCK( xCoreID ) portRELEASE_ISR_LOCK( xCoreID ) + #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ + +/* + * Macros to mark the start and end of a critical code region. + */ +#if ( portUSING_GRANULAR_LOCKS == 1 ) + #define kernelENTER_CRITICAL() vTaskEnterCritical() + #define kernelENTER_CRITICAL_FROM_ISR() vTaskEnterCriticalFromISR() + #define kernelEXIT_CRITICAL() vTaskExitCritical() + #define kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ) vTaskExitCriticalFromISR( uxSavedInterruptStatus ) +#else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + #define kernelENTER_CRITICAL() taskENTER_CRITICAL() + #define kernelENTER_CRITICAL_FROM_ISR() taskENTER_CRITICAL_FROM_ISR() + #define kernelEXIT_CRITICAL() taskEXIT_CRITICAL() + #define kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ) taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ) +#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + +/*-----------------------------------------------------------*/ + /* * Task control block. A task control block (TCB) is allocated for each task, * and stores task state information, including a pointer to the task's context @@ -394,7 +457,11 @@ typedef struct tskTaskControlBlock /* The old naming convention is used to char pcTaskName[ configMAX_TASK_NAME_LEN ]; /**< Descriptive name given to the task when created. Facilitates debugging only. */ #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - UBaseType_t xPreemptionDisable; /**< Used to prevent the task from being preempted. */ + UBaseType_t uxPreemptionDisable; /**< Used to prevent the task from being preempted. */ + #endif + + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + UBaseType_t uxDeferredStateChange; /**< Used to indicate if the task's state change is deferred. */ #endif #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) @@ -504,7 +571,7 @@ PRIVILEGED_DATA static volatile TickType_t xTickCount = ( TickType_t ) configINI PRIVILEGED_DATA static volatile UBaseType_t uxTopReadyPriority = tskIDLE_PRIORITY; PRIVILEGED_DATA static volatile BaseType_t xSchedulerRunning = pdFALSE; PRIVILEGED_DATA static volatile TickType_t xPendedTicks = ( TickType_t ) 0U; -PRIVILEGED_DATA volatile BaseType_t xYieldPendings[ configNUMBER_OF_CORES ] = { pdFALSE }; +PRIVILEGED_DATA static volatile BaseType_t xYieldPendings[ configNUMBER_OF_CORES ] = { pdFALSE }; PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0; PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U; PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = ( TickType_t ) 0U; /* Initialised to portMAX_DELAY before the scheduler starts. */ @@ -538,9 +605,10 @@ PRIVILEGED_DATA static volatile configRUN_TIME_COUNTER_TYPE ulTotalRunTime[ conf #endif +/* Kernel spinlock variables when using granular locking */ #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - PRIVILEGED_DATA static portSPINLOCK_TYPE xTaskSpinlock = portINIT_KERNEL_TASK_SPINLOCK_STATIC; - PRIVILEGED_DATA static portSPINLOCK_TYPE xISRSpinlock = portINIT_KERNEL_ISR_SPINLOCK_STATIC; + PRIVILEGED_DATA static portSPINLOCK_TYPE xTaskSpinlock = portINIT_SPINLOCK_STATIC; + PRIVILEGED_DATA static portSPINLOCK_TYPE xISRSpinlock = portINIT_SPINLOCK_STATIC; #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ /*-----------------------------------------------------------*/ @@ -552,14 +620,14 @@ PRIVILEGED_DATA static volatile configRUN_TIME_COUNTER_TYPE ulTotalRunTime[ conf */ static BaseType_t prvCreateIdleTasks( void ); -#if ( ( portUSING_GRANULAR_LOCKS == 0 ) && ( configNUMBER_OF_CORES > 1 ) ) +#if ( configNUMBER_OF_CORES > 1 ) /* * Checks to see if another task moved the current task out of the ready * list while it was waiting to enter a critical section and yields, if so. */ static void prvCheckForRunStateChange( void ); -#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 0 ) && ( configNUMBER_OF_CORES > 1 ) ) */ +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ #if ( configNUMBER_OF_CORES > 1 ) @@ -823,10 +891,9 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; #endif /* #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) */ /*-----------------------------------------------------------*/ -#if ( ( portUSING_GRANULAR_LOCKS == 0 ) && ( configNUMBER_OF_CORES > 1 ) ) +#if ( configNUMBER_OF_CORES > 1 ) static void prvCheckForRunStateChange( void ) { - UBaseType_t uxPrevCriticalNesting; const TCB_t * pxThisTCB; BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); @@ -839,6 +906,8 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; while( pxThisTCB->xTaskRunState == taskTASK_SCHEDULED_TO_YIELD ) { + UBaseType_t uxPrevCriticalNesting; + /* We are only here if we just entered a critical section * or if we just suspended the scheduler, and another task * has requested that we yield. @@ -852,7 +921,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; if( uxPrevCriticalNesting > 0U ) { portSET_CRITICAL_NESTING_COUNT( xCoreID, 0U ); - portRELEASE_ISR_LOCK( xCoreID ); + kernelRELEASE_ISR_LOCK( xCoreID ); } else { @@ -861,7 +930,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; mtCOVERAGE_TEST_MARKER(); } - portRELEASE_TASK_LOCK( xCoreID ); + kernelRELEASE_TASK_LOCK( xCoreID ); portMEMORY_BARRIER(); configASSERT( pxThisTCB->xTaskRunState == taskTASK_SCHEDULED_TO_YIELD ); @@ -876,18 +945,18 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; portDISABLE_INTERRUPTS(); xCoreID = ( BaseType_t ) portGET_CORE_ID(); - portGET_TASK_LOCK( xCoreID ); - portGET_ISR_LOCK( xCoreID ); + kernelGET_TASK_LOCK( xCoreID ); + kernelGET_ISR_LOCK( xCoreID ); portSET_CRITICAL_NESTING_COUNT( xCoreID, uxPrevCriticalNesting ); if( uxPrevCriticalNesting == 0U ) { - portRELEASE_ISR_LOCK( xCoreID ); + kernelRELEASE_ISR_LOCK( xCoreID ); } } } -#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 0 ) && ( configNUMBER_OF_CORES > 1 ) ) */ +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ /*-----------------------------------------------------------*/ @@ -946,7 +1015,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; #endif { #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - if( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == 0U ) + if( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U ) #endif { xLowestPriorityToPreempt = xCurrentCoreTaskPriority; @@ -1252,7 +1321,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; ( xYieldPendings[ uxCore ] == pdFALSE ) ) { #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - if( pxCurrentTCBs[ uxCore ]->xPreemptionDisable == 0U ) + if( pxCurrentTCBs[ uxCore ]->uxPreemptionDisable == 0U ) #endif { xLowestPriority = xTaskPriority; @@ -2047,7 +2116,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, { /* Ensure interrupts don't access the task lists while the lists are being * updated. */ - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { uxCurrentNumberOfTasks = ( UBaseType_t ) ( uxCurrentNumberOfTasks + 1U ); @@ -2105,7 +2174,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, portSETUP_TCB( pxNewTCB ); } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); if( xSchedulerRunning != pdFALSE ) { @@ -2125,7 +2194,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, { /* Ensure interrupts don't access the task lists while the lists are being * updated. */ - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { uxCurrentNumberOfTasks++; @@ -2174,7 +2243,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); } #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ @@ -2221,144 +2290,173 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, BaseType_t xDeleteTCBInIdleTask = pdFALSE; BaseType_t xTaskIsRunningOrYielding; + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + BaseType_t xDeferredDeletion = pdFALSE; + #endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ + traceENTER_vTaskDelete( xTaskToDelete ); - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { /* If null is passed in here then it is the calling task that is * being deleted. */ pxTCB = prvGetTCBFromHandle( xTaskToDelete ); configASSERT( pxTCB != NULL ); - /* Remove task from the ready/delayed list. */ - if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) - { - taskRESET_READY_PRIORITY( pxTCB->uxPriority ); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - /* Is the task waiting on an event also? */ - if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) - { - ( void ) uxListRemove( &( pxTCB->xEventListItem ) ); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - /* Increment the uxTaskNumber also so kernel aware debuggers can - * detect that the task lists need re-generating. This is done before - * portPRE_TASK_DELETE_HOOK() as in the Windows port that macro will - * not return. */ - uxTaskNumber++; - - /* Use temp variable as distinct sequence points for reading volatile - * variables prior to a logical operator to ensure compliance with - * MISRA C 2012 Rule 13.5. */ - xTaskIsRunningOrYielding = taskTASK_IS_RUNNING_OR_SCHEDULED_TO_YIELD( pxTCB ); - - /* If the task is running (or yielding), we must add it to the - * termination list so that an idle task can delete it when it is - * no longer running. */ - if( ( xSchedulerRunning != pdFALSE ) && ( xTaskIsRunningOrYielding != pdFALSE ) ) - { - /* A running task or a task which is scheduled to yield is being - * deleted. This cannot complete when the task is still running - * on a core, as a context switch to another task is required. - * Place the task in the termination list. The idle task will check - * the termination list and free up any memory allocated by the - * scheduler for the TCB and stack of the deleted task. */ - vListInsertEnd( &xTasksWaitingTermination, &( pxTCB->xStateListItem ) ); - - /* Increment the ucTasksDeleted variable so the idle task knows - * there is a task that has been deleted and that it should therefore - * check the xTasksWaitingTermination list. */ - ++uxDeletedTasksWaitingCleanUp; - - /* Call the delete hook before portPRE_TASK_DELETE_HOOK() as - * portPRE_TASK_DELETE_HOOK() does not return in the Win32 port. */ - traceTASK_DELETE( pxTCB ); - - /* Delete the task TCB in idle task. */ - xDeleteTCBInIdleTask = pdTRUE; - - /* The pre-delete hook is primarily for the Windows simulator, - * in which Windows specific clean up operations are performed, - * after which it is not possible to yield away from this task - - * hence xYieldPending is used to latch that a context switch is - * required. */ - #if ( configNUMBER_OF_CORES == 1 ) - portPRE_TASK_DELETE_HOOK( pxTCB, &( xYieldPendings[ 0 ] ) ); - #else - portPRE_TASK_DELETE_HOOK( pxTCB, &( xYieldPendings[ pxTCB->xTaskRunState ] ) ); - #endif - - /* In the case of SMP, it is possible that the task being deleted - * is running on another core. We must evict the task before - * exiting the critical section to ensure that the task cannot - * take an action which puts it back on ready/state/event list, - * thereby nullifying the delete operation. Once evicted, the - * task won't be scheduled ever as it will no longer be on the - * ready list. */ - #if ( configNUMBER_OF_CORES > 1 ) + /* If the task has disabled preemption, we need to defer the deletion until the + * task enables preemption. The deletion will be performed in vTaskPreemptionEnable(). */ + if( pxTCB->uxPreemptionDisable > 0U ) { - if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) - { - if( pxTCB->xTaskRunState == ( BaseType_t ) portGET_CORE_ID() ) - { - configASSERT( uxSchedulerSuspended == 0 ); - taskYIELD_WITHIN_API(); - } - else - { - prvYieldCore( pxTCB->xTaskRunState ); - } - } - } - #endif /* #if ( configNUMBER_OF_CORES > 1 ) */ - } - else - { - --uxCurrentNumberOfTasks; - traceTASK_DELETE( pxTCB ); - - /* Reset the next expected unblock time in case it referred to - * the task that has just been deleted. */ - prvResetNextTaskUnblockTime(); - } - } - taskEXIT_CRITICAL(); - - /* If the task is not deleting itself, call prvDeleteTCB from outside of - * critical section. If a task deletes itself, prvDeleteTCB is called - * from prvCheckTasksWaitingTermination which is called from Idle task. */ - if( xDeleteTCBInIdleTask != pdTRUE ) - { - prvDeleteTCB( pxTCB ); - } - - /* Force a reschedule if it is the currently running task that has just - * been deleted. */ - #if ( configNUMBER_OF_CORES == 1 ) - { - if( xSchedulerRunning != pdFALSE ) - { - if( pxTCB == pxCurrentTCB ) - { - configASSERT( uxSchedulerSuspended == 0 ); - taskYIELD_WITHIN_API(); + pxTCB->uxDeferredStateChange |= tskDEFERRED_DELETION; + xDeferredDeletion = pdTRUE; } else { mtCOVERAGE_TEST_MARKER(); } + #endif /* configUSE_TASK_PREEMPTION_DISABLE */ + + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + if( xDeferredDeletion == pdFALSE ) + #endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ + { + /* Remove task from the ready/delayed list. */ + if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + taskRESET_READY_PRIORITY( pxTCB->uxPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Is the task waiting on an event also? */ + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) + { + ( void ) uxListRemove( &( pxTCB->xEventListItem ) ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Increment the uxTaskNumber also so kernel aware debuggers can + * detect that the task lists need re-generating. This is done before + * portPRE_TASK_DELETE_HOOK() as in the Windows port that macro will + * not return. */ + uxTaskNumber++; + + /* Use temp variable as distinct sequence points for reading volatile + * variables prior to a logical operator to ensure compliance with + * MISRA C 2012 Rule 13.5. */ + xTaskIsRunningOrYielding = taskTASK_IS_RUNNING_OR_SCHEDULED_TO_YIELD( pxTCB ); + + /* If the task is running (or yielding), we must add it to the + * termination list so that an idle task can delete it when it is + * no longer running. */ + if( ( xSchedulerRunning != pdFALSE ) && ( xTaskIsRunningOrYielding != pdFALSE ) ) + { + /* A running task or a task which is scheduled to yield is being + * deleted. This cannot complete when the task is still running + * on a core, as a context switch to another task is required. + * Place the task in the termination list. The idle task will check + * the termination list and free up any memory allocated by the + * scheduler for the TCB and stack of the deleted task. */ + vListInsertEnd( &xTasksWaitingTermination, &( pxTCB->xStateListItem ) ); + + /* Increment the ucTasksDeleted variable so the idle task knows + * there is a task that has been deleted and that it should therefore + * check the xTasksWaitingTermination list. */ + ++uxDeletedTasksWaitingCleanUp; + + /* Call the delete hook before portPRE_TASK_DELETE_HOOK() as + * portPRE_TASK_DELETE_HOOK() does not return in the Win32 port. */ + traceTASK_DELETE( pxTCB ); + + /* Delete the task TCB in idle task. */ + xDeleteTCBInIdleTask = pdTRUE; + + /* The pre-delete hook is primarily for the Windows simulator, + * in which Windows specific clean up operations are performed, + * after which it is not possible to yield away from this task - + * hence xYieldPending is used to latch that a context switch is + * required. */ + #if ( configNUMBER_OF_CORES == 1 ) + portPRE_TASK_DELETE_HOOK( pxTCB, &( xYieldPendings[ 0 ] ) ); + #else + portPRE_TASK_DELETE_HOOK( pxTCB, &( xYieldPendings[ pxTCB->xTaskRunState ] ) ); + #endif + + /* In the case of SMP, it is possible that the task being deleted + * is running on another core. We must evict the task before + * exiting the critical section to ensure that the task cannot + * take an action which puts it back on ready/state/event list, + * thereby nullifying the delete operation. Once evicted, the + * task won't be scheduled ever as it will no longer be on the + * ready list. */ + #if ( configNUMBER_OF_CORES > 1 ) + { + if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) + { + if( pxTCB->xTaskRunState == ( BaseType_t ) portGET_CORE_ID() ) + { + configASSERT( uxSchedulerSuspended == 0 ); + taskYIELD_WITHIN_API(); + } + else + { + prvYieldCore( pxTCB->xTaskRunState ); + } + } + } + #endif /* #if ( configNUMBER_OF_CORES > 1 ) */ + } + else + { + --uxCurrentNumberOfTasks; + traceTASK_DELETE( pxTCB ); + + /* Reset the next expected unblock time in case it referred to + * the task that has just been deleted. */ + prvResetNextTaskUnblockTime(); + } } } - #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ + kernelEXIT_CRITICAL(); + + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + if( xDeferredDeletion == pdFALSE ) + #endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ + { + /* If the task is not deleting itself, call prvDeleteTCB from outside of + * critical section. If a task deletes itself, prvDeleteTCB is called + * from prvCheckTasksWaitingTermination which is called from Idle task. */ + if( xDeleteTCBInIdleTask != pdTRUE ) + { + prvDeleteTCB( pxTCB ); + } + + /* Force a reschedule if it is the currently running task that has just + * been deleted. */ + #if ( configNUMBER_OF_CORES == 1 ) + { + if( xSchedulerRunning != pdFALSE ) + { + if( pxTCB == pxCurrentTCB ) + { + configASSERT( uxSchedulerSuspended == 0 ); + taskYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ + } traceRETURN_vTaskDelete(); } @@ -2532,14 +2630,14 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, else #endif { - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { pxStateList = listLIST_ITEM_CONTAINER( &( pxTCB->xStateListItem ) ); pxEventList = listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ); pxDelayedList = pxDelayedTaskList; pxOverflowedDelayedList = pxOverflowDelayedTaskList; } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); if( pxEventList == &xPendingReadyList ) { @@ -2649,7 +2747,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, traceENTER_uxTaskPriorityGet( xTask ); - portBASE_TYPE_ENTER_CRITICAL(); + kernelENTER_CRITICAL(); { /* If null is passed in here then it is the priority of the task * that called uxTaskPriorityGet() that is being queried. */ @@ -2658,7 +2756,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, uxReturn = pxTCB->uxPriority; } - portBASE_TYPE_EXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_uxTaskPriorityGet( uxReturn ); @@ -2699,7 +2797,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, /* MISRA Ref 4.7.1 [Return value shall be checked] */ /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */ /* coverity[misra_c_2012_directive_4_7_violation] */ - uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = ( UBaseType_t ) kernelENTER_CRITICAL_FROM_ISR(); { /* If null is passed in here then it is the priority of the calling * task that is being queried. */ @@ -2708,7 +2806,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, uxReturn = pxTCB->uxPriority; } - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); traceRETURN_uxTaskPriorityGetFromISR( uxReturn ); @@ -2727,7 +2825,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, traceENTER_uxTaskBasePriorityGet( xTask ); - portBASE_TYPE_ENTER_CRITICAL(); + kernelENTER_CRITICAL(); { /* If null is passed in here then it is the base priority of the task * that called uxTaskBasePriorityGet() that is being queried. */ @@ -2736,7 +2834,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, uxReturn = pxTCB->uxBasePriority; } - portBASE_TYPE_EXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_uxTaskBasePriorityGet( uxReturn ); @@ -2777,7 +2875,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, /* MISRA Ref 4.7.1 [Return value shall be checked] */ /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */ /* coverity[misra_c_2012_directive_4_7_violation] */ - uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = ( UBaseType_t ) kernelENTER_CRITICAL_FROM_ISR(); { /* If null is passed in here then it is the base priority of the calling * task that is being queried. */ @@ -2786,7 +2884,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, uxReturn = pxTCB->uxBasePriority; } - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); traceRETURN_uxTaskBasePriorityGetFromISR( uxReturn ); @@ -2823,7 +2921,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, mtCOVERAGE_TEST_MARKER(); } - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { /* If null is passed in here then it is the priority of the calling * task that is being changed. */ @@ -2885,7 +2983,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, * there may now be another task of higher priority that * is ready to execute. */ #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - if( pxTCB->xPreemptionDisable == 0U ) + if( pxTCB->uxPreemptionDisable == 0U ) #endif { xYieldRequired = pdTRUE; @@ -3003,7 +3101,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, ( void ) uxPriorityUsedOnEntry; } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_vTaskPrioritySet(); } @@ -3020,7 +3118,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, traceENTER_vTaskCoreAffinitySet( xTask, uxCoreAffinityMask ); - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { pxTCB = prvGetTCBFromHandle( xTask ); configASSERT( pxTCB != NULL ); @@ -3061,7 +3159,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_vTaskCoreAffinitySet(); } @@ -3076,14 +3174,14 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, traceENTER_vTaskCoreAffinityGet( xTask ); - portBASE_TYPE_ENTER_CRITICAL(); + kernelENTER_CRITICAL(); { pxTCB = prvGetTCBFromHandle( xTask ); configASSERT( pxTCB != NULL ); uxCoreAffinityMask = pxTCB->uxCoreAffinityMask; } - portBASE_TYPE_EXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_vTaskCoreAffinityGet( uxCoreAffinityMask ); @@ -3101,14 +3199,21 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, traceENTER_vTaskPreemptionDisable( xTask ); - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { - pxTCB = prvGetTCBFromHandle( xTask ); - configASSERT( pxTCB != NULL ); + if( xSchedulerRunning != pdFALSE ) + { + pxTCB = prvGetTCBFromHandle( xTask ); + configASSERT( pxTCB != NULL ); - pxTCB->xPreemptionDisable++; + pxTCB->uxPreemptionDisable++; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_vTaskPreemptionDisable(); } @@ -3121,28 +3226,63 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, void vTaskPreemptionEnable( const TaskHandle_t xTask ) { TCB_t * pxTCB; - BaseType_t xCoreID; traceENTER_vTaskPreemptionEnable( xTask ); - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { - pxTCB = prvGetTCBFromHandle( xTask ); - configASSERT( pxTCB != NULL ); - configASSERT( pxTCB->xPreemptionDisable > 0U ); - - pxTCB->xPreemptionDisable--; - if( xSchedulerRunning != pdFALSE ) { - if( ( pxTCB->xPreemptionDisable == 0U ) && ( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) ) + pxTCB = prvGetTCBFromHandle( xTask ); + configASSERT( pxTCB != NULL ); + configASSERT( pxTCB->uxPreemptionDisable > 0U ); + + pxTCB->uxPreemptionDisable--; + + if( pxTCB->uxPreemptionDisable == 0U ) { - xCoreID = ( BaseType_t ) pxTCB->xTaskRunState; - prvYieldCore( xCoreID ); + /* Process deferred state changes which were inflicted while + * preemption was disabled. */ + if( pxTCB->uxDeferredStateChange != 0U ) + { + if( pxTCB->uxDeferredStateChange & tskDEFERRED_DELETION ) + { + vTaskDelete( xTask ); + } + else if( pxTCB->uxDeferredStateChange & tskDEFERRED_SUSPENSION ) + { + vTaskSuspend( xTask ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + pxTCB->uxDeferredStateChange = 0U; + } + else + { + if( ( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) ) + { + prvYieldCore( pxTCB->xTaskRunState ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + else + { + mtCOVERAGE_TEST_MARKER(); } } + else + { + mtCOVERAGE_TEST_MARKER(); + } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_vTaskPreemptionEnable(); } @@ -3156,82 +3296,110 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, { TCB_t * pxTCB; + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + BaseType_t xDeferredSuspension = pdFALSE; + #endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ + traceENTER_vTaskSuspend( xTaskToSuspend ); - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { /* If null is passed in here then it is the running task that is * being suspended. */ pxTCB = prvGetTCBFromHandle( xTaskToSuspend ); configASSERT( pxTCB != NULL ); - traceTASK_SUSPEND( pxTCB ); + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - /* Remove task from the ready/delayed list and place in the - * suspended list. */ - if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) - { - taskRESET_READY_PRIORITY( pxTCB->uxPriority ); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - /* Is the task waiting on an event also? */ - if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) - { - ( void ) uxListRemove( &( pxTCB->xEventListItem ) ); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - vListInsertEnd( &xSuspendedTaskList, &( pxTCB->xStateListItem ) ); - - #if ( configUSE_TASK_NOTIFICATIONS == 1 ) - { - BaseType_t x; - - for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ ) + /* If the task has disabled preemption, we need to defer the suspension until the + * task enables preemption. The suspension will be performed in vTaskPreemptionEnable(). */ + if( pxTCB->uxPreemptionDisable > 0U ) { - if( pxTCB->ucNotifyState[ x ] == taskWAITING_NOTIFICATION ) + pxTCB->uxDeferredStateChange |= tskDEFERRED_SUSPENSION; + xDeferredSuspension = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + #endif /* configUSE_TASK_PREEMPTION_DISABLE */ + + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + if( xDeferredSuspension == pdFALSE ) + #endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ + { + traceTASK_SUSPEND( pxTCB ); + + /* Remove task from the ready/delayed list and place in the + * suspended list. */ + if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + taskRESET_READY_PRIORITY( pxTCB->uxPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Is the task waiting on an event also? */ + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) + { + ( void ) uxListRemove( &( pxTCB->xEventListItem ) ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + vListInsertEnd( &xSuspendedTaskList, &( pxTCB->xStateListItem ) ); + + #if ( configUSE_TASK_NOTIFICATIONS == 1 ) + { + BaseType_t x; + + for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ ) { - /* The task was blocked to wait for a notification, but is - * now suspended, so no notification was received. */ - pxTCB->ucNotifyState[ x ] = taskNOT_WAITING_NOTIFICATION; + if( pxTCB->ucNotifyState[ x ] == taskWAITING_NOTIFICATION ) + { + /* The task was blocked to wait for a notification, but is + * now suspended, so no notification was received. */ + pxTCB->ucNotifyState[ x ] = taskNOT_WAITING_NOTIFICATION; + } } } - } - #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ + #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ - /* In the case of SMP, it is possible that the task being suspended - * is running on another core. We must evict the task before - * exiting the critical section to ensure that the task cannot - * take an action which puts it back on ready/state/event list, - * thereby nullifying the suspend operation. Once evicted, the - * task won't be scheduled before it is resumed as it will no longer - * be on the ready list. */ - #if ( configNUMBER_OF_CORES > 1 ) - { - if( xSchedulerRunning != pdFALSE ) + /* In the case of SMP, it is possible that the task being suspended + * is running on another core. We must evict the task before + * exiting the critical section to ensure that the task cannot + * take an action which puts it back on ready/state/event list, + * thereby nullifying the suspend operation. Once evicted, the + * task won't be scheduled before it is resumed as it will no longer + * be on the ready list. */ + #if ( configNUMBER_OF_CORES > 1 ) { - /* Reset the next expected unblock time in case it referred to the - * task that is now in the Suspended state. */ - prvResetNextTaskUnblockTime(); - - if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) + if( xSchedulerRunning != pdFALSE ) { - if( pxTCB->xTaskRunState == ( BaseType_t ) portGET_CORE_ID() ) + /* Reset the next expected unblock time in case it referred to the + * task that is now in the Suspended state. */ + prvResetNextTaskUnblockTime(); + + if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) { - /* The current task has just been suspended. */ - configASSERT( uxSchedulerSuspended == 0 ); - vTaskYieldWithinAPI(); + if( pxTCB->xTaskRunState == ( BaseType_t ) portGET_CORE_ID() ) + { + /* The current task has just been suspended. */ + configASSERT( uxSchedulerSuspended == 0 ); + vTaskYieldWithinAPI(); + } + else + { + prvYieldCore( pxTCB->xTaskRunState ); + } } else { - prvYieldCore( pxTCB->xTaskRunState ); + mtCOVERAGE_TEST_MARKER(); } } else @@ -3239,73 +3407,74 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, mtCOVERAGE_TEST_MARKER(); } } + #endif /* #if ( configNUMBER_OF_CORES > 1 ) */ + } + } + kernelEXIT_CRITICAL(); + + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + if( xDeferredSuspension == pdFALSE ) + #endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ + { + #if ( configNUMBER_OF_CORES == 1 ) + { + UBaseType_t uxCurrentListLength; + + if( xSchedulerRunning != pdFALSE ) + { + /* Reset the next expected unblock time in case it referred to the + * task that is now in the Suspended state. */ + kernelENTER_CRITICAL(); + { + prvResetNextTaskUnblockTime(); + } + kernelEXIT_CRITICAL(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( pxTCB == pxCurrentTCB ) + { + if( xSchedulerRunning != pdFALSE ) + { + /* The current task has just been suspended. */ + configASSERT( uxSchedulerSuspended == 0 ); + portYIELD_WITHIN_API(); + } + else + { + /* The scheduler is not running, but the task that was pointed + * to by pxCurrentTCB has just been suspended and pxCurrentTCB + * must be adjusted to point to a different task. */ + + /* Use a temp variable as a distinct sequence point for reading + * volatile variables prior to a comparison to ensure compliance + * with MISRA C 2012 Rule 13.2. */ + uxCurrentListLength = listCURRENT_LIST_LENGTH( &xSuspendedTaskList ); + + if( uxCurrentListLength == uxCurrentNumberOfTasks ) + { + /* No other tasks are ready, so set pxCurrentTCB back to + * NULL so when the next task is created pxCurrentTCB will + * be set to point to it no matter what its relative priority + * is. */ + pxCurrentTCB = NULL; + } + else + { + vTaskSwitchContext(); + } + } + } else { mtCOVERAGE_TEST_MARKER(); } } - #endif /* #if ( configNUMBER_OF_CORES > 1 ) */ + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } - taskEXIT_CRITICAL(); - - #if ( configNUMBER_OF_CORES == 1 ) - { - UBaseType_t uxCurrentListLength; - - if( xSchedulerRunning != pdFALSE ) - { - /* Reset the next expected unblock time in case it referred to the - * task that is now in the Suspended state. */ - taskENTER_CRITICAL(); - { - prvResetNextTaskUnblockTime(); - } - taskEXIT_CRITICAL(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - if( pxTCB == pxCurrentTCB ) - { - if( xSchedulerRunning != pdFALSE ) - { - /* The current task has just been suspended. */ - configASSERT( uxSchedulerSuspended == 0 ); - portYIELD_WITHIN_API(); - } - else - { - /* The scheduler is not running, but the task that was pointed - * to by pxCurrentTCB has just been suspended and pxCurrentTCB - * must be adjusted to point to a different task. */ - - /* Use a temp variable as a distinct sequence point for reading - * volatile variables prior to a comparison to ensure compliance - * with MISRA C 2012 Rule 13.2. */ - uxCurrentListLength = listCURRENT_LIST_LENGTH( &xSuspendedTaskList ); - - if( uxCurrentListLength == uxCurrentNumberOfTasks ) - { - /* No other tasks are ready, so set pxCurrentTCB back to - * NULL so when the next task is created pxCurrentTCB will - * be set to point to it no matter what its relative priority - * is. */ - pxCurrentTCB = NULL; - } - else - { - vTaskSwitchContext(); - } - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ traceRETURN_vTaskSuspend(); } @@ -3389,6 +3558,10 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, { TCB_t * const pxTCB = xTaskToResume; + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + BaseType_t xTaskResumed = pdFALSE; + #endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ + traceENTER_vTaskResume( xTaskToResume ); /* It does not make sense to resume the calling task. */ @@ -3409,28 +3582,48 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, if( pxTCB != NULL ) #endif { - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { - if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE ) - { - traceTASK_RESUME( pxTCB ); + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - /* The ready list can be accessed even if the scheduler is - * suspended because this is inside a critical section. */ - ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); - prvAddTaskToReadyList( pxTCB ); + /* If the task being resumed is in a deferred suspension state, + * we simply clear the deferred suspension state and return. */ + if( pxTCB->uxDeferredStateChange & tskDEFERRED_SUSPENSION ) + { + pxTCB->uxDeferredStateChange &= ~tskDEFERRED_SUSPENSION; + xTaskResumed = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + #endif /* configUSE_TASK_PREEMPTION_DISABLE */ - /* This yield may not cause the task just resumed to run, - * but will leave the lists in the correct state for the - * next yield. */ - taskYIELD_ANY_CORE_IF_USING_PREEMPTION( pxTCB ); - } - else + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + if( xTaskResumed == pdFALSE ) + #endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ { - mtCOVERAGE_TEST_MARKER(); + if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE ) + { + traceTASK_RESUME( pxTCB ); + + /* The ready list can be accessed even if the scheduler is + * suspended because this is inside a critical section. */ + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxTCB ); + + /* This yield may not cause the task just resumed to run, + * but will leave the lists in the correct state for the + * next yield. */ + taskYIELD_ANY_CORE_IF_USING_PREEMPTION( pxTCB ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); } else { @@ -3477,7 +3670,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, /* MISRA Ref 4.7.1 [Return value shall be checked] */ /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */ /* coverity[misra_c_2012_directive_4_7_violation] */ - uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = kernelENTER_CRITICAL_FROM_ISR(); { if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE ) { @@ -3533,7 +3726,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); traceRETURN_xTaskResumeFromISR( xYieldRequired ); @@ -3910,45 +4103,39 @@ void vTaskSuspendAll( void ) * do not otherwise exhibit real time behaviour. */ portSOFTWARE_BARRIER(); - #if ( portUSING_GRANULAR_LOCKS == 1 ) + kernelGET_TASK_LOCK( xCoreID ); + + /* uxSchedulerSuspended is increased after prvCheckForRunStateChange. The + * purpose is to prevent altering the variable when fromISR APIs are readying + * it. */ + if( ( uxSchedulerSuspended == 0U ) + + /* If the scheduler is being suspended after taking a granular lock (in any data group) + * then we do not check for the run state of a task and we let it run through until + * the granular lock is released. */ + #if ( portUSING_GRANULAR_LOCKS == 1 ) + && ( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U ) + #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + ) { - portGET_SPINLOCK( &xTaskSpinlock ); - portGET_SPINLOCK( &xISRSpinlock ); - - /* Increment xPreemptionDisable to prevent preemption and also - * track whether to yield in xTaskResumeAll(). */ - pxCurrentTCBs[ portGET_CORE_ID() ]->xPreemptionDisable++; - - /* The scheduler is suspended if uxSchedulerSuspended is non-zero. An increment - * is used to allow calls to vTaskSuspendAll() to nest. */ - ++uxSchedulerSuspended; - - portRELEASE_SPINLOCK( &xISRSpinlock ); + prvCheckForRunStateChange(); } - #else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + else { - portGET_TASK_LOCK(); - - /* uxSchedulerSuspended is increased after prvCheckForRunStateChange. The - * purpose is to prevent altering the variable when fromISR APIs are readying - * it. */ - if( uxSchedulerSuspended == 0U ) - { - prvCheckForRunStateChange(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - portGET_ISR_LOCK(); - - /* The scheduler is suspended if uxSchedulerSuspended is non-zero. An increment - * is used to allow calls to vTaskSuspendAll() to nest. */ - ++uxSchedulerSuspended; - portRELEASE_ISR_LOCK(); + mtCOVERAGE_TEST_MARKER(); } - #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + + /* Query the coreID again as prvCheckForRunStateChange may have + * caused the task to get scheduled on a different core. The correct + * task lock for the core is acquired in prvCheckForRunStateChange. */ + xCoreID = ( BaseType_t ) portGET_CORE_ID(); + + kernelGET_ISR_LOCK( xCoreID ); + + /* The scheduler is suspended if uxSchedulerSuspended is non-zero. An increment + * is used to allow calls to vTaskSuspendAll() to nest. */ + ++uxSchedulerSuspended; + kernelRELEASE_ISR_LOCK( xCoreID ); portCLEAR_INTERRUPT_MASK( ulState ); } @@ -4044,7 +4231,7 @@ BaseType_t xTaskResumeAll( void ) * removed task will have been added to the xPendingReadyList. Once the * scheduler has been resumed it is safe to move all the pending ready * tasks from this list into their appropriate ready list. */ - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); @@ -4053,24 +4240,9 @@ BaseType_t xTaskResumeAll( void ) configASSERT( uxSchedulerSuspended != 0U ); uxSchedulerSuspended = ( UBaseType_t ) ( uxSchedulerSuspended - 1U ); - #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - { - configASSERT( pxCurrentTCBs[ portGET_CORE_ID() ]->xPreemptionDisable > 0 ); - - /* Decrement xPreemptionDisable. If 0, it means this we are not - * in a nested suspension scenario, thus this function and yield - * if necessary. */ - pxCurrentTCBs[ portGET_CORE_ID() ]->xPreemptionDisable--; - - /* Release the kernel's task spinlock that was held throughout - * the kernel suspension. */ - portRELEASE_SPINLOCK( &xTaskSpinlock ); - } - #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ - { - portRELEASE_TASK_LOCK(); - } - #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + #if ( configNUMBER_OF_CORES > 1 ) + kernelRELEASE_TASK_LOCK( xCoreID ); + #endif if( uxSchedulerSuspended == ( UBaseType_t ) 0U ) { @@ -4160,7 +4332,11 @@ BaseType_t xTaskResumeAll( void ) } } - if( xYieldPendings[ xCoreID ] != pdFALSE ) + if( xYieldPendings[ xCoreID ] != pdFALSE + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + && ( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U ) + #endif + ) { #if ( configUSE_PREEMPTION != 0 ) { @@ -4185,7 +4361,7 @@ BaseType_t xTaskResumeAll( void ) mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); } traceRETURN_xTaskResumeAll( xAlreadyYielded ); @@ -4614,11 +4790,11 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) configASSERT( xTicksToJump != ( TickType_t ) 0 ); /* Prevent the tick interrupt modifying xPendedTicks simultaneously. */ - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { xPendedTicks++; } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); xTicksToJump--; } else @@ -4650,11 +4826,11 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) vTaskSuspendAll(); /* Prevent the tick interrupt modifying xPendedTicks simultaneously. */ - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { xPendedTicks += xTicksToCatchUp; } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); xYieldOccurred = xTaskResumeAll(); traceRETURN_xTaskCatchUpTicks( xYieldOccurred ); @@ -4691,7 +4867,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) * the event list too. Interrupts can touch the event list item, * even though the scheduler is suspended, so a critical section * is used. */ - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) { @@ -4707,7 +4883,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); /* Place the unblocked task into the appropriate ready list. */ prvAddTaskToReadyList( pxTCB ); @@ -4734,11 +4910,11 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) } #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { prvYieldForTask( pxTCB ); } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); } #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } @@ -4767,6 +4943,10 @@ BaseType_t xTaskIncrementTick( void ) traceENTER_xTaskIncrementTick(); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + UBaseType_t uxSavedInterruptStatus = kernelENTER_CRITICAL_FROM_ISR(); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + /* Called by the portable layer each time a tick interrupt occurs. * Increments the tick then checks to see if the new tick value will cause any * tasks to be unblocked. */ @@ -4867,7 +5047,8 @@ BaseType_t xTaskIncrementTick( void ) /* Preemption is on, but a context switch should * only be performed if the unblocked task's * priority is higher than the currently executing - * task. + * task and the currently executing task does not + * have preemption disabled. * The case of equal priority tasks sharing * processing time (which happens when both * preemption and time slicing are on) is @@ -4899,14 +5080,32 @@ BaseType_t xTaskIncrementTick( void ) { #if ( configNUMBER_OF_CORES == 1 ) { - if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > 1U ) - { - xSwitchRequired = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + if( pxCurrentTCB->uxPreemptionDisable != 0U ) + { + mtCOVERAGE_TEST_MARKER(); + } + else + { + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > 1U ) + { + xSwitchRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #else /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > 1U ) + { + xSwitchRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + #endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ } #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { @@ -4965,7 +5164,7 @@ BaseType_t xTaskIncrementTick( void ) for( xCoreID = 0; xCoreID < ( BaseType_t ) configNUMBER_OF_CORES; xCoreID++ ) { #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - if( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == 0U ) + if( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U ) #endif { if( xYieldPendings[ xCoreID ] != pdFALSE ) @@ -5003,6 +5202,10 @@ BaseType_t xTaskIncrementTick( void ) #endif } + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + traceRETURN_xTaskIncrementTick( xSwitchRequired ); return xSwitchRequired; @@ -5031,11 +5234,11 @@ BaseType_t xTaskIncrementTick( void ) /* Save the hook function in the TCB. A critical section is required as * the value can be accessed from an interrupt. */ - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { xTCB->pxTaskTag = pxHookFunction; } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_vTaskSetApplicationTaskTag(); } @@ -5058,11 +5261,11 @@ BaseType_t xTaskIncrementTick( void ) /* Save the hook function in the TCB. A critical section is required as * the value can be accessed from an interrupt. */ - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { xReturn = pxTCB->pxTaskTag; } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_xTaskGetApplicationTaskTag( xReturn ); @@ -5091,11 +5294,11 @@ BaseType_t xTaskIncrementTick( void ) /* MISRA Ref 4.7.1 [Return value shall be checked] */ /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */ /* coverity[misra_c_2012_directive_4_7_violation] */ - uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = kernelENTER_CRITICAL_FROM_ISR(); { xReturn = pxTCB->pxTaskTag; } - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); traceRETURN_xTaskGetApplicationTaskTagFromISR( xReturn ); @@ -5240,18 +5443,25 @@ BaseType_t xTaskIncrementTick( void ) * and move on if another core suspended the scheduler. We should only * do that if the current core has suspended the scheduler. */ - portGET_TASK_LOCK( xCoreID ); /* Must always acquire the task lock first. */ - portGET_ISR_LOCK( xCoreID ); + kernelGET_TASK_LOCK( xCoreID ); /* Must always acquire the task lock first. */ + kernelGET_ISR_LOCK( xCoreID ); { /* vTaskSwitchContext() must never be called from within a critical section. * This is not necessarily true for single core FreeRTOS, but it is for this * SMP port. */ configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ); + /* vTaskSwitchContext() must not be called with a task that has + * preemption disabled. */ + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + configASSERT( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U ); + #endif + if( uxSchedulerSuspended != ( UBaseType_t ) 0U ) { - /* The scheduler is currently suspended - do not allow a context - * switch. */ + /* The scheduler is currently suspended or the task + * has requested to not be preempted - do not allow + * a context switch. */ xYieldPendings[ xCoreID ] = pdTRUE; } else @@ -5322,8 +5532,8 @@ BaseType_t xTaskIncrementTick( void ) #endif } } - portRELEASE_ISR_LOCK( xCoreID ); - portRELEASE_TASK_LOCK( xCoreID ); + kernelRELEASE_ISR_LOCK( xCoreID ); + kernelRELEASE_TASK_LOCK( xCoreID ); traceRETURN_vTaskSwitchContext(); } @@ -5337,8 +5547,15 @@ void vTaskPlaceOnEventList( List_t * const pxEventList, configASSERT( pxEventList ); - /* THIS FUNCTION MUST BE CALLED WITH THE - * SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. */ + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* Suspend the kernel data group as we are about to access its members */ + vTaskSuspendAll(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + + /* THIS FUNCTION MUST BE CALLED WITH THE + * SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. */ + configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ /* Place the event list item of the TCB in the appropriate event list. * This is placed in the list in priority order so the highest priority task @@ -5355,6 +5572,11 @@ void vTaskPlaceOnEventList( List_t * const pxEventList, prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* We are done accessing the kernel data group. Resume it. */ + ( void ) xTaskResumeAll(); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + traceRETURN_vTaskPlaceOnEventList(); } /*-----------------------------------------------------------*/ @@ -5367,9 +5589,16 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, configASSERT( pxEventList ); - /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by - * the event groups implementation. */ - configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U ); + + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* Suspend the kernel data group as we are about to access its members */ + vTaskSuspendAll(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + + /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by + * the event groups implementation. */ + configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ /* Store the item value in the event list item. It is safe to access the * event list item here as interrupts won't access the event list item of a @@ -5385,6 +5614,11 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* We are done accessing the kernel data group. Resume it. */ + ( void ) xTaskResumeAll(); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + traceRETURN_vTaskPlaceOnUnorderedEventList(); } /*-----------------------------------------------------------*/ @@ -5399,11 +5633,17 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, configASSERT( pxEventList ); - /* This function should not be called by application code hence the - * 'Restricted' in its name. It is not part of the public API. It is - * designed for use by kernel code, and has special calling requirements - - * it should be called with the scheduler suspended. */ + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* Suspend the kernel data group as we are about to access its members */ + vTaskSuspendAll(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + /* This function should not be called by application code hence the + * 'Restricted' in its name. It is not part of the public API. It is + * designed for use by kernel code, and has special calling requirements - + * it should be called with the scheduler suspended. */ + configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ /* Place the event list item of the TCB in the appropriate event list. * In this case it is assume that this is the only task that is going to @@ -5422,6 +5662,11 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, traceTASK_DELAY_UNTIL( ( xTickCount + xTicksToWait ) ); prvAddCurrentTaskToDelayedList( xTicksToWait, xWaitIndefinitely ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* We are done accessing the kernel data group. Resume it. */ + ( void ) xTaskResumeAll(); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + traceRETURN_vTaskPlaceOnEventListRestricted(); } @@ -5435,8 +5680,30 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) traceENTER_xTaskRemoveFromEventList( pxEventList ); - /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be - * called from a critical section within an ISR. */ + #if ( !( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) + + /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be + * called from a critical section within an ISR. */ + #else /* #if ( ! ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) */ + /* Lock the kernel data group as we are about to access its members */ + UBaseType_t uxSavedInterruptStatus; + + if( portCHECK_IF_IN_ISR() == pdTRUE ) + { + uxSavedInterruptStatus = kernelENTER_CRITICAL_FROM_ISR(); + } + else + { + uxSavedInterruptStatus = 0; + kernelENTER_CRITICAL(); + } + + /* Before taking the kernel lock, another task/ISR could have already + * emptied the pxEventList. So we insert a check here to see if + * pxEventList is empty before attempting to remove an item from it. */ + if( listLIST_IS_EMPTY( pxEventList ) == pdFALSE ) + { + #endif /* #if ( ! ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) */ /* The event list is sorted in priority order, so the first in the list can * be removed as it is known to be the highest priority. Remove the TCB from @@ -5516,6 +5783,26 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) } #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) +} +else +{ + /* The pxEventList was emptied before we entered the critical + * section, Nothing to do except return pdFALSE. */ + xReturn = pdFALSE; +} + +/* We are done accessing the kernel data group. Unlock it. */ +if( portCHECK_IF_IN_ISR() == pdTRUE ) +{ + kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); +} +else +{ + kernelEXIT_CRITICAL(); +} + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + traceRETURN_xTaskRemoveFromEventList( xReturn ); return xReturn; } @@ -5579,13 +5866,13 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, { #if ( configUSE_PREEMPTION == 1 ) { - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { prvYieldForTask( pxUnblockedTCB ); } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); } - #endif + #endif /* if ( configUSE_PREEMPTION == 1 ) */ } #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ @@ -5598,12 +5885,12 @@ void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) traceENTER_vTaskSetTimeOutState( pxTimeOut ); configASSERT( pxTimeOut ); - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { pxTimeOut->xOverflowCount = xNumOfOverflows; pxTimeOut->xTimeOnEntering = xTickCount; } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_vTaskSetTimeOutState(); } @@ -5613,10 +5900,20 @@ void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) { traceENTER_vTaskInternalSetTimeOutState( pxTimeOut ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* Lock the kernel data group as we are about to access its members */ + kernelENTER_CRITICAL(); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + /* For internal use only as it does not use a critical section. */ pxTimeOut->xOverflowCount = xNumOfOverflows; pxTimeOut->xTimeOnEntering = xTickCount; + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* We are done accessing the kernel data group. Unlock it. */ + kernelEXIT_CRITICAL(); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + traceRETURN_vTaskInternalSetTimeOutState(); } /*-----------------------------------------------------------*/ @@ -5631,7 +5928,7 @@ BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, configASSERT( pxTimeOut ); configASSERT( pxTicksToWait ); - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { /* Minor optimisation. The tick count cannot change in this block. */ const TickType_t xConstTickCount = xTickCount; @@ -5682,7 +5979,7 @@ BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, xReturn = pdTRUE; } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_xTaskCheckForTimeOut( xReturn ); @@ -5982,7 +6279,12 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) traceENTER_eTaskConfirmSleepModeStatus(); - /* This function must be called from a critical section. */ + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* Lock the kernel data group as we are about to access its members */ + kernelENTER_CRITICAL(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + /* This function must be called from a critical section. */ + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ if( listCURRENT_LIST_LENGTH( &xPendingReadyList ) != 0U ) { @@ -6016,6 +6318,11 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) mtCOVERAGE_TEST_MARKER(); } + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* We are done accessing the kernel data group. Unlock it. */ + kernelEXIT_CRITICAL(); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + traceRETURN_eTaskConfirmSleepModeStatus( eReturn ); return eReturn; @@ -6147,7 +6454,7 @@ static void prvCheckTasksWaitingTermination( void ) { #if ( configNUMBER_OF_CORES == 1 ) { - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { { /* MISRA Ref 11.5.3 [Void pointer assignment] */ @@ -6159,7 +6466,7 @@ static void prvCheckTasksWaitingTermination( void ) --uxDeletedTasksWaitingCleanUp; } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); prvDeleteTCB( pxTCB ); } @@ -6167,7 +6474,7 @@ static void prvCheckTasksWaitingTermination( void ) { pxTCB = NULL; - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { /* For SMP, multiple idles can be running simultaneously * and we need to check that other idles did not cleanup while we were @@ -6190,12 +6497,12 @@ static void prvCheckTasksWaitingTermination( void ) /* The TCB to be deleted still has not yet been switched out * by the scheduler, so we will just exit this loop early and * try again next time. */ - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); break; } } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); if( pxTCB != NULL ) { @@ -6317,14 +6624,14 @@ static void prvCheckTasksWaitingTermination( void ) /* Tasks can be in pending ready list and other state list at the * same time. These tasks are in ready state no matter what state * list the task is in. */ - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { if( listIS_CONTAINED_WITHIN( &xPendingReadyList, &( pxTCB->xEventListItem ) ) != pdFALSE ) { pxTaskStatus->eCurrentState = eReady; } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); } } else @@ -6646,7 +6953,7 @@ static void prvResetNextTaskUnblockTime( void ) else { #if ( configNUMBER_OF_CORES > 1 ) - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); #endif { if( uxSchedulerSuspended == ( UBaseType_t ) 0U ) @@ -6659,7 +6966,7 @@ static void prvResetNextTaskUnblockTime( void ) } } #if ( configNUMBER_OF_CORES > 1 ) - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); #endif } @@ -6680,7 +6987,9 @@ static void prvResetNextTaskUnblockTime( void ) traceENTER_xTaskPriorityInherit( pxMutexHolder ); - taskENTER_CRITICAL(); + #if ( portUSING_GRANULAR_LOCKS == 1 ) + kernelENTER_CRITICAL(); + #endif { /* If the mutex is taken by an interrupt, the mutex holder is NULL. Priority * inheritance is not applied in this scenario. */ @@ -6768,7 +7077,9 @@ static void prvResetNextTaskUnblockTime( void ) mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + #if ( portUSING_GRANULAR_LOCKS == 1 ) + kernelEXIT_CRITICAL(); + #endif traceRETURN_xTaskPriorityInherit( xReturn ); @@ -6787,6 +7098,11 @@ static void prvResetNextTaskUnblockTime( void ) traceENTER_xTaskPriorityDisinherit( pxMutexHolder ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* Lock the kernel data group as we are about to access its members */ + kernelENTER_CRITICAL(); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + if( pxMutexHolder != NULL ) { /* A task can only have an inherited priority if it holds the mutex. @@ -6864,6 +7180,11 @@ static void prvResetNextTaskUnblockTime( void ) mtCOVERAGE_TEST_MARKER(); } + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* We are done accessing the kernel data group. Unlock it. */ + kernelEXIT_CRITICAL(); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + traceRETURN_xTaskPriorityDisinherit( xReturn ); return xReturn; @@ -6883,88 +7204,97 @@ static void prvResetNextTaskUnblockTime( void ) traceENTER_vTaskPriorityDisinheritAfterTimeout( pxMutexHolder, uxHighestPriorityWaitingTask ); - if( pxMutexHolder != NULL ) + #if ( portUSING_GRANULAR_LOCKS == 1 ) + kernelENTER_CRITICAL(); + #endif { - /* If pxMutexHolder is not NULL then the holder must hold at least - * one mutex. */ - configASSERT( pxTCB->uxMutexesHeld ); + if( pxMutexHolder != NULL ) + { + /* If pxMutexHolder is not NULL then the holder must hold at least + * one mutex. */ + configASSERT( pxTCB->uxMutexesHeld ); - /* Determine the priority to which the priority of the task that - * holds the mutex should be set. This will be the greater of the - * holding task's base priority and the priority of the highest - * priority task that is waiting to obtain the mutex. */ - if( pxTCB->uxBasePriority < uxHighestPriorityWaitingTask ) - { - uxPriorityToUse = uxHighestPriorityWaitingTask; - } - else - { - uxPriorityToUse = pxTCB->uxBasePriority; - } - - /* Does the priority need to change? */ - if( pxTCB->uxPriority != uxPriorityToUse ) - { - /* Only disinherit if no other mutexes are held. This is a - * simplification in the priority inheritance implementation. If - * the task that holds the mutex is also holding other mutexes then - * the other mutexes may have caused the priority inheritance. */ - if( pxTCB->uxMutexesHeld == uxOnlyOneMutexHeld ) + /* Determine the priority to which the priority of the task that + * holds the mutex should be set. This will be the greater of the + * holding task's base priority and the priority of the highest + * priority task that is waiting to obtain the mutex. */ + if( pxTCB->uxBasePriority < uxHighestPriorityWaitingTask ) { - /* If a task has timed out because it already holds the - * mutex it was trying to obtain then it cannot of inherited - * its own priority. */ - configASSERT( pxTCB != pxCurrentTCB ); + uxPriorityToUse = uxHighestPriorityWaitingTask; + } + else + { + uxPriorityToUse = pxTCB->uxBasePriority; + } - /* Disinherit the priority, remembering the previous - * priority to facilitate determining the subject task's - * state. */ - traceTASK_PRIORITY_DISINHERIT( pxTCB, uxPriorityToUse ); - uxPriorityUsedOnEntry = pxTCB->uxPriority; - pxTCB->uxPriority = uxPriorityToUse; + /* Does the priority need to change? */ + if( pxTCB->uxPriority != uxPriorityToUse ) + { + /* Only disinherit if no other mutexes are held. This is a + * simplification in the priority inheritance implementation. If + * the task that holds the mutex is also holding other mutexes then + * the other mutexes may have caused the priority inheritance. */ + if( pxTCB->uxMutexesHeld == uxOnlyOneMutexHeld ) + { + /* If a task has timed out because it already holds the + * mutex it was trying to obtain then it cannot of inherited + * its own priority. */ + configASSERT( pxTCB != pxCurrentTCB ); - /* Only reset the event list item value if the value is not - * being used for anything else. */ - if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == ( ( TickType_t ) 0U ) ) - { - listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriorityToUse ); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + /* Disinherit the priority, remembering the previous + * priority to facilitate determining the subject task's + * state. */ + traceTASK_PRIORITY_DISINHERIT( pxTCB, uxPriorityToUse ); + uxPriorityUsedOnEntry = pxTCB->uxPriority; + pxTCB->uxPriority = uxPriorityToUse; - /* If the running task is not the task that holds the mutex - * then the task that holds the mutex could be in either the - * Ready, Blocked or Suspended states. Only remove the task - * from its current state list if it is in the Ready state as - * the task's priority is going to change and there is one - * Ready list per priority. */ - if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE ) - { - if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + /* Only reset the event list item value if the value is not + * being used for anything else. */ + if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == ( ( TickType_t ) 0U ) ) { - /* It is known that the task is in its ready list so - * there is no need to check again and the port level - * reset macro can be called directly. */ - portRESET_READY_PRIORITY( pxTCB->uxPriority, uxTopReadyPriority ); + listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriorityToUse ); } else { mtCOVERAGE_TEST_MARKER(); } - prvAddTaskToReadyList( pxTCB ); - #if ( configNUMBER_OF_CORES > 1 ) + /* If the running task is not the task that holds the mutex + * then the task that holds the mutex could be in either the + * Ready, Blocked or Suspended states. Only remove the task + * from its current state list if it is in the Ready state as + * the task's priority is going to change and there is one + * Ready list per priority. */ + if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE ) { - /* The priority of the task is dropped. Yield the core on - * which the task is running. */ - if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) + if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) { - prvYieldCore( pxTCB->xTaskRunState ); + /* It is known that the task is in its ready list so + * there is no need to check again and the port level + * reset macro can be called directly. */ + portRESET_READY_PRIORITY( pxTCB->uxPriority, uxTopReadyPriority ); } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + prvAddTaskToReadyList( pxTCB ); + #if ( configNUMBER_OF_CORES > 1 ) + { + /* The priority of the task is dropped. Yield the core on + * which the task is running. */ + if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) + { + prvYieldCore( pxTCB->xTaskRunState ); + } + } + #endif /* if ( configNUMBER_OF_CORES > 1 ) */ + } + else + { + mtCOVERAGE_TEST_MARKER(); } - #endif /* if ( configNUMBER_OF_CORES > 1 ) */ } else { @@ -6981,10 +7311,9 @@ static void prvResetNextTaskUnblockTime( void ) mtCOVERAGE_TEST_MARKER(); } } - else - { - mtCOVERAGE_TEST_MARKER(); - } + #if ( portUSING_GRANULAR_LOCKS == 1 ) + kernelEXIT_CRITICAL(); + #endif traceRETURN_vTaskPriorityDisinheritAfterTimeout(); } @@ -7059,7 +7388,7 @@ static void prvResetNextTaskUnblockTime( void ) #endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) */ /*-----------------------------------------------------------*/ -#if ( ( portUSING_GRANULAR_LOCKS == 0 ) && ( configNUMBER_OF_CORES > 1 ) ) +#if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS != 1 ) ) void vTaskEnterCritical( void ) { @@ -7073,8 +7402,8 @@ static void prvResetNextTaskUnblockTime( void ) { if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U ) { - portGET_TASK_LOCK( xCoreID ); - portGET_ISR_LOCK( xCoreID ); + kernelGET_TASK_LOCK( xCoreID ); + kernelGET_ISR_LOCK( xCoreID ); } portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); @@ -7108,10 +7437,59 @@ static void prvResetNextTaskUnblockTime( void ) traceRETURN_vTaskEnterCritical(); } -#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 0 ) && ( configNUMBER_OF_CORES > 1 ) ) */ +#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS != 1 ) ) */ /*-----------------------------------------------------------*/ -#if ( ( portUSING_GRANULAR_LOCKS == 0 ) && ( configNUMBER_OF_CORES > 1 ) ) +#if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS == 1 ) ) + + void vTaskEnterCritical( void ) + { + traceENTER_vTaskEnterCritical(); + + portDISABLE_INTERRUPTS(); + { + const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); + + if( xSchedulerRunning != pdFALSE ) + { + kernelGET_TASK_LOCK( xCoreID ); + kernelGET_ISR_LOCK( xCoreID ); + + portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); + + /* This is not the interrupt safe version of the enter critical + * function so assert() if it is being called from an interrupt + * context. Only API functions that end in "FromISR" can be used in an + * interrupt. Only assert if the critical nesting count is 1 to + * protect against recursive calls if the assert function also uses a + * critical section. */ + if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 1U ) + { + portASSERT_IF_IN_ISR(); + + if( uxSchedulerSuspended == 0U ) + { + /* The only time there would be a problem is if this is called + * before a context switch and vTaskExitCritical() is called + * after pxCurrentTCB changes. Therefore this should not be + * used within vTaskSwitchContext(). */ + prvCheckForRunStateChange(); + } + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + traceRETURN_vTaskEnterCritical(); + } + +#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUMBER_OF_CORES > 1 ) UBaseType_t vTaskEnterCriticalFromISR( void ) { @@ -7126,7 +7504,7 @@ static void prvResetNextTaskUnblockTime( void ) if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U ) { - portGET_ISR_LOCK( xCoreID ); + kernelGET_ISR_LOCK( xCoreID ); } portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); @@ -7141,7 +7519,7 @@ static void prvResetNextTaskUnblockTime( void ) return uxSavedInterruptStatus; } -#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 0 ) && ( configNUMBER_OF_CORES > 1 ) ) */ +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ /*-----------------------------------------------------------*/ #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) @@ -7189,7 +7567,7 @@ static void prvResetNextTaskUnblockTime( void ) #endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) */ /*-----------------------------------------------------------*/ -#if ( ( portUSING_GRANULAR_LOCKS == 0 ) && ( configNUMBER_OF_CORES > 1 ) ) +#if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS != 1 ) ) void vTaskExitCritical( void ) { @@ -7218,8 +7596,8 @@ static void prvResetNextTaskUnblockTime( void ) /* Get the xYieldPending stats inside the critical section. */ xYieldCurrentTask = xYieldPendings[ xCoreID ]; - portRELEASE_ISR_LOCK( xCoreID ); - portRELEASE_TASK_LOCK( xCoreID ); + kernelRELEASE_ISR_LOCK( xCoreID ); + kernelRELEASE_TASK_LOCK( xCoreID ); portENABLE_INTERRUPTS(); /* When a task yields in a critical section it just sets @@ -7249,10 +7627,75 @@ static void prvResetNextTaskUnblockTime( void ) traceRETURN_vTaskExitCritical(); } -#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 0 ) && ( configNUMBER_OF_CORES > 1 ) ) */ +#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS != 1 ) ) */ /*-----------------------------------------------------------*/ -#if ( ( portUSING_GRANULAR_LOCKS == 0 ) && ( configNUMBER_OF_CORES > 1 ) ) +#if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS == 1 ) ) + + void vTaskExitCritical( void ) + { + const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); + + traceENTER_vTaskExitCritical(); + + if( xSchedulerRunning != pdFALSE ) + { + /* If critical nesting count is zero then this function + * does not match a previous call to vTaskEnterCritical(). */ + configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ); + + /* This function should not be called in ISR. Use vTaskExitCriticalFromISR + * to exit critical section from ISR. */ + portASSERT_IF_IN_ISR(); + + if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ) + { + /* Release the ISR and task locks */ + kernelRELEASE_ISR_LOCK( xCoreID ); + kernelRELEASE_TASK_LOCK( xCoreID ); + portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); + + /* If the critical nesting count is 0, enable interrupts */ + if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U ) + { + BaseType_t xYieldCurrentTask; + + /* Get the xYieldPending stats inside the critical section. */ + xYieldCurrentTask = xTaskUnlockCanYield(); + + portENABLE_INTERRUPTS(); + + /* When a task yields in a critical section it just sets + * xYieldPending to true. So now that we have exited the + * critical section check if xYieldPending is true, and + * if so yield. */ + if( xYieldCurrentTask != pdFALSE ) + { + portYIELD(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + traceRETURN_vTaskExitCritical(); + } + +#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUMBER_OF_CORES > 1 ) void vTaskExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus ) { @@ -7274,7 +7717,7 @@ static void prvResetNextTaskUnblockTime( void ) if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U ) { - portRELEASE_ISR_LOCK( xCoreID ); + kernelRELEASE_ISR_LOCK( xCoreID ); portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); } else @@ -7295,7 +7738,7 @@ static void prvResetNextTaskUnblockTime( void ) traceRETURN_vTaskExitCriticalFromISR(); } -#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 0 ) && ( configNUMBER_OF_CORES > 1 ) ) */ +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ /*-----------------------------------------------------------*/ #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) @@ -7305,7 +7748,11 @@ static void prvResetNextTaskUnblockTime( void ) BaseType_t xReturn; BaseType_t xCoreID = portGET_CORE_ID(); - if( ( xYieldPendings[ xCoreID ] == pdTRUE ) && ( uxSchedulerSuspended == pdFALSE ) && ( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == pdFALSE ) ) + if( ( xYieldPendings[ xCoreID ] == pdTRUE ) && ( uxSchedulerSuspended == pdFALSE ) + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + && ( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U ) + #endif /* ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ + ) { xReturn = pdTRUE; } @@ -7731,6 +8178,11 @@ TickType_t uxTaskResetEventItemValue( void ) traceENTER_pvTaskIncrementMutexHeldCount(); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* Lock the kernel data group as we are about to access its members */ + kernelENTER_CRITICAL(); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + pxTCB = pxCurrentTCB; /* If xSemaphoreCreateMutex() is called before any tasks have been created @@ -7740,6 +8192,11 @@ TickType_t uxTaskResetEventItemValue( void ) ( pxTCB->uxMutexesHeld )++; } + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + /* We are done accessing the kernel data group. Unlock it. */ + kernelEXIT_CRITICAL(); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + traceRETURN_pvTaskIncrementMutexHeldCount( pxTCB ); return pxTCB; @@ -7773,7 +8230,7 @@ TickType_t uxTaskResetEventItemValue( void ) * has occurred and set the flag to indicate that we are waiting for * a notification. If we do not do so, a notification sent from an ISR * will get lost. */ - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { /* Only block if the notification count is not already non-zero. */ if( pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] == 0U ) @@ -7789,7 +8246,7 @@ TickType_t uxTaskResetEventItemValue( void ) mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); /* We are now out of the critical section but the scheduler is still * suspended, so we are safe to do non-deterministic operations such @@ -7817,7 +8274,7 @@ TickType_t uxTaskResetEventItemValue( void ) } } - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { traceTASK_NOTIFY_TAKE( uxIndexToWaitOn ); ulReturn = pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ]; @@ -7840,7 +8297,7 @@ TickType_t uxTaskResetEventItemValue( void ) pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskNOT_WAITING_NOTIFICATION; } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_ulTaskGenericNotifyTake( ulReturn ); @@ -7875,7 +8332,7 @@ TickType_t uxTaskResetEventItemValue( void ) /* We MUST enter a critical section to atomically check and update the * task notification value. If we do not do so, a notification from * an ISR will get lost. */ - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { /* Only block if a notification is not already pending. */ if( pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] != taskNOTIFICATION_RECEIVED ) @@ -7896,7 +8353,7 @@ TickType_t uxTaskResetEventItemValue( void ) mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); /* We are now out of the critical section but the scheduler is still * suspended, so we are safe to do non-deterministic operations such @@ -7924,7 +8381,7 @@ TickType_t uxTaskResetEventItemValue( void ) } } - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { traceTASK_NOTIFY_WAIT( uxIndexToWaitOn ); @@ -7954,7 +8411,7 @@ TickType_t uxTaskResetEventItemValue( void ) pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskNOT_WAITING_NOTIFICATION; } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_xTaskGenericNotifyWait( xReturn ); @@ -7982,7 +8439,7 @@ TickType_t uxTaskResetEventItemValue( void ) configASSERT( xTaskToNotify ); pxTCB = xTaskToNotify; - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { if( pulPreviousNotificationValue != NULL ) { @@ -8074,7 +8531,7 @@ TickType_t uxTaskResetEventItemValue( void ) mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_xTaskGenericNotify( xReturn ); @@ -8126,7 +8583,7 @@ TickType_t uxTaskResetEventItemValue( void ) /* MISRA Ref 4.7.1 [Return value shall be checked] */ /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */ /* coverity[misra_c_2012_directive_4_7_violation] */ - uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = ( UBaseType_t ) kernelENTER_CRITICAL_FROM_ISR(); { if( pulPreviousNotificationValue != NULL ) { @@ -8256,7 +8713,7 @@ TickType_t uxTaskResetEventItemValue( void ) #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } } - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); traceRETURN_xTaskGenericNotifyFromISR( xReturn ); @@ -8304,7 +8761,7 @@ TickType_t uxTaskResetEventItemValue( void ) /* MISRA Ref 4.7.1 [Return value shall be checked] */ /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */ /* coverity[misra_c_2012_directive_4_7_violation] */ - uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = ( UBaseType_t ) kernelENTER_CRITICAL_FROM_ISR(); { ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ]; pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED; @@ -8390,7 +8847,7 @@ TickType_t uxTaskResetEventItemValue( void ) #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } } - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); traceRETURN_vTaskGenericNotifyGiveFromISR(); } @@ -8415,7 +8872,7 @@ TickType_t uxTaskResetEventItemValue( void ) pxTCB = prvGetTCBFromHandle( xTask ); configASSERT( pxTCB != NULL ); - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { if( pxTCB->ucNotifyState[ uxIndexToClear ] == taskNOTIFICATION_RECEIVED ) { @@ -8427,7 +8884,7 @@ TickType_t uxTaskResetEventItemValue( void ) xReturn = pdFAIL; } } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_xTaskGenericNotifyStateClear( xReturn ); @@ -8455,14 +8912,14 @@ TickType_t uxTaskResetEventItemValue( void ) pxTCB = prvGetTCBFromHandle( xTask ); configASSERT( pxTCB != NULL ); - taskENTER_CRITICAL(); + kernelENTER_CRITICAL(); { /* Return the notification as it was before the bits were cleared, * then clear the bit mask. */ ulReturn = pxTCB->ulNotifiedValue[ uxIndexToClear ]; pxTCB->ulNotifiedValue[ uxIndexToClear ] &= ~ulBitsToClear; } - taskEXIT_CRITICAL(); + kernelEXIT_CRITICAL(); traceRETURN_ulTaskGenericNotifyValueClear( ulReturn );