From 80457ca165e53907e332a79f5af6106a7c5c95cf Mon Sep 17 00:00:00 2001 From: Joseph Julicher Date: Wed, 12 May 2021 12:03:08 -0700 Subject: [PATCH 01/16] added multiple idle tasks --- tasks.c | 110 ++++++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 95 insertions(+), 15 deletions(-) diff --git a/tasks.c b/tasks.c index e2245bcf5..0a88bfbf4 100644 --- a/tasks.c +++ b/tasks.c @@ -467,6 +467,9 @@ static void prvInitialiseTaskLists( void ) PRIVILEGED_FUNCTION; * */ static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters ) PRIVILEGED_FUNCTION; +#if ( configNUM_CORES > 1 ) +static portTASK_FUNCTION_PROTO( prvMinimalIdleTask, pvParameters ) PRIVILEGED_FUNCTION; +#endif /* * Utility to free all memory allocated by the scheduler to hold a TCB, @@ -1545,7 +1548,11 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, pxNewTCB->xTaskRunState = taskTASK_NOT_RUNNING; /* Is this an idle task? */ + #if(configNUM_CORES > 1) + pxNewTCB->xIsIdle = ( pxTaskCode == prvIdleTask ) || (pxTaskCode == prvMinimalIdleTask); + #else pxNewTCB->xIsIdle = ( pxTaskCode == prvIdleTask ); + #endif if( pxCreatedTask != NULL ) { @@ -2663,22 +2670,40 @@ void vTaskStartScheduler( void ) #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) { - #error User must specify an array of buffers for idle task TCBs and stacks - StaticTask_t * pxIdleTaskTCBBuffer = NULL; - StackType_t * pxIdleTaskStackBuffer = NULL; - uint32_t ulIdleTaskStackSize; - - /* The Idle task is created using user provided RAM - obtain the - * address of the RAM then create the idle task. */ - vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize ); - xIdleTaskHandle[ xCoreID ] = xTaskCreateStatic( prvIdleTask, - cIdleName, - ulIdleTaskStackSize, - ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */ - portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ - pxIdleTaskStackBuffer, - pxIdleTaskTCBBuffer ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + if(xCoreID == 0) + { + StaticTask_t * pxIdleTaskTCBBuffer = NULL; + StackType_t * pxIdleTaskStackBuffer = NULL; + uint32_t ulIdleTaskStackSize; + /* The Idle task is created using user provided RAM - obtain the + * address of the RAM then create the idle task. */ + vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize ); + xIdleTaskHandle[ xCoreID ] = xTaskCreateStatic( prvIdleTask, + cIdleName, + ulIdleTaskStackSize, + ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */ + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + pxIdleTaskStackBuffer, + pxIdleTaskTCBBuffer ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + } + #if( configNUM_CORES > 1) + else + { + struct taskMemory{ + StaticTask_t TCB; + StackType_t stack[configMINIMAL_STACK_SIZE]; + }; + static struct taskMemory idleMemory[configNUM_CORES]; + xIdleTaskHandle[ xCoreID ] = xTaskCreateStatic( prvMinimalIdleTask, + cIdleName, + configMINIMAL_STACK_SIZE, + ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */ + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + idleMemory[xCoreID].stack, + &idleMemory[xCoreID].TCB ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + } + #endif if( xIdleTaskHandle[ xCoreID ] != NULL ) { xReturn = pdPASS; @@ -4146,6 +4171,61 @@ void vTaskMissedYield( void ) #endif /* configUSE_TRACE_FACILITY */ +/* + * ----------------------------------------------------------- + * The MinimalIdle task. + * ---------------------------------------------------------- + * + * The portTASK_FUNCTION() macro is used to allow port/compiler specific + * language extensions. The equivalent prototype for this function is: + * + * void prvMinimalIdleTask( void *pvParameters ); + * + * The minimal idle task is used for all the additional Cores in a SMP system. + * There must be only 1 idle task and the rest are minimal idle tasks. + * + * @todo additional conditional compiles to remove this function. + */ +#if (configNUM_CORES > 1) +static portTASK_FUNCTION( prvMinimalIdleTask, pvParameters ) +{ + for(;;) + { + #if ( configUSE_PREEMPTION == 0 ) + { + /* If we are not using preemption we keep forcing a task switch to + * see if any other task has become available. If we are using + * preemption we don't need to do this as any task becoming available + * will automatically get the processor anyway. */ + taskYIELD(); + } + #endif /* configUSE_PREEMPTION */ + + #if ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) + { + /* When using preemption tasks of equal priority will be + * timesliced. If a task that is sharing the idle priority is ready + * to run then the idle task should yield before the end of the + * timeslice. + * + * A critical region is not required here as we are just reading from + * the list, and an occasional incorrect value will not matter. If + * the ready list at the idle priority contains one more task than the + * number of idle tasks, which is equal to the configured numbers of cores + * then a task other than the idle task is ready to execute. */ + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) configNUM_CORES ) + { + taskYIELD(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) */ + } +} +#endif /* * ----------------------------------------------------------- * The Idle task. From ad317efd627bdaa2400046d7f1e850a449607010 Mon Sep 17 00:00:00 2001 From: Joseph Julicher Date: Wed, 12 May 2021 12:13:44 -0700 Subject: [PATCH 02/16] Added multiple IDLE tasks to non-static allocation --- tasks.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tasks.c b/tasks.c index 0a88bfbf4..ba16ce2e1 100644 --- a/tasks.c +++ b/tasks.c @@ -2715,6 +2715,8 @@ void vTaskStartScheduler( void ) } #else /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ { + if(xCoreID == 0) + { /* The Idle task is being created using dynamically allocated RAM. */ xReturn = xTaskCreate( prvIdleTask, cIdleName, @@ -2722,6 +2724,18 @@ void vTaskStartScheduler( void ) ( void * ) NULL, portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ &xIdleTaskHandle[ xCoreID ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + } + #if( configNUM_CORES > 1 ) + else + { + xReturn = xTaskCreate( prvMinimalIdleTask, + cIdleName, + configMINIMAL_STACK_SIZE, + ( void * ) NULL, + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + &xIdleTaskHandle[ xCoreID ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + } + #endif } #endif /* configSUPPORT_STATIC_ALLOCATION */ } From b9a17479a0a34d0309731f7d4e26e973e9daa68d Mon Sep 17 00:00:00 2001 From: Joseph Julicher Date: Wed, 12 May 2021 17:01:00 -0700 Subject: [PATCH 03/16] Adjustments to tasks from PR review --- tasks.c | 72 +++++++++++++++++++++++++++++++-------------------------- 1 file changed, 39 insertions(+), 33 deletions(-) diff --git a/tasks.c b/tasks.c index ba16ce2e1..13e2535d0 100644 --- a/tasks.c +++ b/tasks.c @@ -410,6 +410,11 @@ PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t /* File private functions. --------------------------------*/ +/* + * Creates the idle tasks during scheduler start + */ +static BaseType_t prvCreateIdleTasks( void ); + /* * Returns the yield pending count for the calling core. */ @@ -460,11 +465,6 @@ static void prvInitialiseTaskLists( void ) PRIVILEGED_FUNCTION; * The idle task is automatically created and added to the ready lists upon * creation of the first user task. * - * The portTASK_FUNCTION_PROTO() macro is used to allow port/compiler specific - * language extensions. The equivalent prototype for this function is: - * - * void prvIdleTask( void *pvParameters ); - * */ static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters ) PRIVILEGED_FUNCTION; #if ( configNUM_CORES > 1 ) @@ -1548,11 +1548,20 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, pxNewTCB->xTaskRunState = taskTASK_NOT_RUNNING; /* Is this an idle task? */ + if(pxTaskCode == prvIdleTask) + { + pxNewTCB->xIsIdle = pdTRUE; + } #if(configNUM_CORES > 1) - pxNewTCB->xIsIdle = ( pxTaskCode == prvIdleTask ) || (pxTaskCode == prvMinimalIdleTask); - #else - pxNewTCB->xIsIdle = ( pxTaskCode == prvIdleTask ); + else if(pxTaskCode == prvMinimalIdleTask) + { + pxNewTCB->xIsIdle = pdTRUE; + } #endif + else + { + pxNewTCB->xIsIdle = pdFALSE; + } if( pxCreatedTask != NULL ) { @@ -2605,19 +2614,13 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) #endif /* ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) ) */ /*-----------------------------------------------------------*/ -void vTaskStartScheduler( void ) +static BaseType_t prvCreateIdleTasks( void ) { - BaseType_t xReturn; + BaseType_t xReturn = pdPASS; BaseType_t xCoreID; char cIdleName[ configMAX_TASK_NAME_LEN ]; - #if ( configUSE_TIMERS == 1 ) - { - xReturn = xTimerCreateTimerTask(); - } - #endif /* configUSE_TIMERS */ - - /* Add each idle task at the lowest priority. */ + /* Add each idle task at the lowest priority. */ for( xCoreID = ( BaseType_t ) 0; xCoreID < ( BaseType_t ) configNUM_CORES; xCoreID++ ) { BaseType_t x; @@ -2690,18 +2693,16 @@ void vTaskStartScheduler( void ) #if( configNUM_CORES > 1) else { - struct taskMemory{ - StaticTask_t TCB; - StackType_t stack[configMINIMAL_STACK_SIZE]; - }; - static struct taskMemory idleMemory[configNUM_CORES]; + static StaticTask_t xIdleTCBBuffers[configNUM_CORES-1]; + static StackType_t xIdleTaskStackBuffers[configMINIMAL_STACK_SIZE][configNUM_CORES-1]; + xIdleTaskHandle[ xCoreID ] = xTaskCreateStatic( prvMinimalIdleTask, cIdleName, configMINIMAL_STACK_SIZE, ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */ portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ - idleMemory[xCoreID].stack, - &idleMemory[xCoreID].TCB ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + xIdleTaskStackBuffers[xCoreID-1], + &xIdleTCBBuffers[xCoreID-1] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ } #endif if( xIdleTaskHandle[ xCoreID ] != NULL ) @@ -2739,6 +2740,20 @@ void vTaskStartScheduler( void ) } #endif /* configSUPPORT_STATIC_ALLOCATION */ } + return xReturn; +} + +void vTaskStartScheduler( void ) +{ + BaseType_t xReturn; + + #if ( configUSE_TIMERS == 1 ) + { + xReturn = xTimerCreateTimerTask(); + } + #endif /* configUSE_TIMERS */ + + xReturn = prvCreateIdleTasks(); if( xReturn == pdPASS ) { @@ -4190,11 +4205,6 @@ void vTaskMissedYield( void ) * The MinimalIdle task. * ---------------------------------------------------------- * - * The portTASK_FUNCTION() macro is used to allow port/compiler specific - * language extensions. The equivalent prototype for this function is: - * - * void prvMinimalIdleTask( void *pvParameters ); - * * The minimal idle task is used for all the additional Cores in a SMP system. * There must be only 1 idle task and the rest are minimal idle tasks. * @@ -4245,10 +4255,6 @@ static portTASK_FUNCTION( prvMinimalIdleTask, pvParameters ) * The Idle task. * ---------------------------------------------------------- * - * The portTASK_FUNCTION() macro is used to allow port/compiler specific - * language extensions. The equivalent prototype for this function is: - * - * void prvIdleTask( void *pvParameters ); * */ static portTASK_FUNCTION( prvIdleTask, pvParameters ) From d59bfca4f9230df2ad07198da5783ff5ca514868 Mon Sep 17 00:00:00 2001 From: Joseph Julicher Date: Thu, 13 May 2021 17:20:41 -0700 Subject: [PATCH 04/16] converted exclusion to affinity --- .github/lexicon.txt | 3 +- include/FreeRTOS.h | 9 +++--- include/task.h | 14 +++++++-- tasks.c | 73 ++++++++++++++++++++++----------------------- 4 files changed, 55 insertions(+), 44 deletions(-) diff --git a/.github/lexicon.txt b/.github/lexicon.txt index cd16258f3..04b3d5d76 100644 --- a/.github/lexicon.txt +++ b/.github/lexicon.txt @@ -2400,7 +2400,8 @@ uxbitstoset uxbitstowait uxbitstowaitfor uxcontrolbits -uxcoreexclude +uxcoreaffinitymask +uxcoreaffinityinheritancemask uxcriticalnesting uxcurrenteventbits uxcurrentnumberoftasks diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index b51940b92..8a47e0dbf 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -240,8 +240,8 @@ #define configUSE_TASK_PREEMPTION_DISABLE 0 #endif -#ifndef configUSE_CORE_EXCLUSION - #define configUSE_CORE_EXCLUSION 0 +#ifndef configUSE_CORE_AFFINITY + #define configUSE_CORE_AFFINITY 0 #endif #ifndef configUSE_ALTERNATIVE_API @@ -948,7 +948,7 @@ #error configUSE_MUTEXES must be set to 1 to use recursive mutexes #endif -#if( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configUSE_CORE_EXCLUSION != 0 ) ) +#if( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configUSE_CORE_AFFINITY != 0 ) ) #error configRUN_MULTIPLE_PRIORITIES must be set to 1 to use core exclusion #endif @@ -1209,8 +1209,9 @@ typedef struct xSTATIC_TCB #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) BaseType_t xDummy24; #endif - #if ( configUSE_CORE_EXCLUSION == 1 ) + #if ( configUSE_CORE_AFFINITY == 1 && configNUM_CORES > 1 ) UBaseType_t uxDummy25; + UBaseType_t uxDummy26; #endif #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) void * pxDummy8; diff --git a/include/task.h b/include/task.h index 87085b2c8..16339cc2a 100644 --- a/include/task.h +++ b/include/task.h @@ -170,6 +170,14 @@ typedef enum */ #define tskIDLE_PRIORITY ( ( UBaseType_t ) 0U ) +/** + * Defines affinity to all available cores. + * + */ +#define tskNO_AFFINITY ( ( UBaseType_t ) -1U ) + + + /** * task. h * @@ -1235,8 +1243,10 @@ void vTaskResume( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION; */ BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION; -void vTaskCoreExclusionSet( const TaskHandle_t xTask, UBaseType_t uxCoreExclude ); -UBaseType_t vTaskCoreExclusionGet( const TaskHandle_t xTask ); +#if ( configUSE_CORE_AFFINITY == 1) +void vTaskCoreAffinitySet( const TaskHandle_t xTask, UBaseType_t uxCoreAffinityMask ); +UBaseType_t vTaskCoreAffinityGet( const TaskHandle_t xTask ); +#endif void vTaskPreemptionDisable( const TaskHandle_t xTask ); void vTaskPreemptionEnable( const TaskHandle_t xTask ); diff --git a/tasks.c b/tasks.c index 13e2535d0..ae038e872 100644 --- a/tasks.c +++ b/tasks.c @@ -254,8 +254,9 @@ typedef struct tskTaskControlBlock /* The old naming convention is used to BaseType_t xPreemptionDisable; /*< Used to prevent the task from being preempted */ #endif - #if ( configUSE_CORE_EXCLUSION == 1 ) - UBaseType_t uxCoreExclude; /*< Used to exclude the task from certain cores */ + #if ( configUSE_CORE_AFFINITY == 1 && configNUM_CORES > 1 ) + UBaseType_t uxCoreAffinityMask; /*< Used to link the task to certain cores. UBaseType_t must have >= the same number of bits as SMP confNUM_CORES */ + UBaseType_t uxCoreAffinityInheritanceMask; /*< Used to allow a task to inherit the affinity of its parent */ #endif #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) @@ -321,7 +322,7 @@ typedef struct tskTaskControlBlock /* The old naming convention is used to #if ( configUSE_POSIX_ERRNO == 1 ) int iTaskErrno; #endif -} tskTCB; +} tskTCB; /* The old tskTCB name is maintained above then typedefed to the new TCB_t name * below to enable the use of older kernel aware debuggers. */ @@ -760,8 +761,8 @@ static void prvYieldForTask( TCB_t * pxTCB, { if( xTaskPriority <= xLowestPriority ) { - #if ( configUSE_CORE_EXCLUSION == 1 ) - if( ( pxTCB->uxCoreExclude & ( 1 << x ) ) == 0 ) + #if ( configUSE_CORE_AFFINITY == 1 ) + if( ( pxTCB->uxCoreAffinityMask & ( 1 << x ) ) == 1 ) #endif { #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) @@ -824,7 +825,7 @@ static void prvYieldForTask( TCB_t * pxTCB, BaseType_t xTaskScheduled = pdFALSE; BaseType_t xDecrementTopPriority = pdTRUE; - #if ( configUSE_CORE_EXCLUSION == 1 ) + #if ( configUSE_CORE_AFFINITY == 1 ) TCB_t * pxPreviousTCB = NULL; #endif #if ( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configNUM_CORES > 1 ) ) @@ -892,13 +893,13 @@ static void prvYieldForTask( TCB_t * pxTCB, if( pxTCB->xTaskRunState == taskTASK_NOT_RUNNING ) { - #if ( configUSE_CORE_EXCLUSION == 1 ) - if( ( pxTCB->uxCoreExclude & ( 1 << xCoreID ) ) == 0 ) + #if ( configUSE_CORE_AFFINITY == 1 ) + if( ( pxTCB->uxCoreAffinityMask & ( 1 << xCoreID ) ) == 1 ) #endif { /* If the task is not being executed by any core swap it in */ pxCurrentTCBs[ xCoreID ]->xTaskRunState = taskTASK_NOT_RUNNING; - #if ( configUSE_CORE_EXCLUSION == 1 ) + #if ( configUSE_CORE_AFFINITY == 1 ) pxPreviousTCB = pxCurrentTCBs[ xCoreID ]; #endif pxTCB->xTaskRunState = ( TaskRunning_t ) xCoreID; @@ -909,8 +910,8 @@ static void prvYieldForTask( TCB_t * pxTCB, else if( pxTCB == pxCurrentTCBs[ xCoreID ] ) { configASSERT( ( pxTCB->xTaskRunState == xCoreID ) || ( pxTCB->xTaskRunState == taskTASK_YIELDING ) ); - #if ( configUSE_CORE_EXCLUSION == 1 ) - if( ( pxTCB->uxCoreExclude & ( 1 << xCoreID ) ) == 0 ) + #if ( configUSE_CORE_AFFINITY == 1 ) + if( ( pxTCB->uxCoreAffinityMask & ( 1 << xCoreID ) ) == 1 ) #endif { /* The task is already running on this core, mark it as scheduled */ @@ -974,12 +975,12 @@ static void prvYieldForTask( TCB_t * pxTCB, } #endif /* if ( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configNUM_CORES > 1 ) ) */ - #if ( configUSE_CORE_EXCLUSION == 1 ) + #if ( configUSE_CORE_AFFINITY == 1 ) if( ( pxPreviousTCB != NULL ) && ( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxPreviousTCB->uxPriority ] ), &( pxPreviousTCB->xStateListItem ) ) != pdFALSE ) ) { - /* A ready task was just bumped off this core. Look at the cores it is not excluded + /* A ready task was just bumped off this core. Look at the cores it can run from * from to see if it is able to run on any of them */ - UBaseType_t uxCoreMap = ~( pxPreviousTCB->uxCoreExclude ); + UBaseType_t uxCoreMap = pxPreviousTCB->uxCoreAffinityMask; BaseType_t xLowestPriority = pxPreviousTCB->uxPriority - pxPreviousTCB->xIsIdle; BaseType_t xLowestPriorityCore = -1; @@ -990,12 +991,12 @@ static void prvYieldForTask( TCB_t * pxTCB, * on with the cores that the new task is excluded from. It is possible that the * new task was only placed onto this core because it is excluded from another. * Check to see if the previous task could run on one of those cores. */ - uxCoreMap &= pxCurrentTCBs[ xCoreID ]->uxCoreExclude; + uxCoreMap &= ~( pxCurrentTCBs[ xCoreID ]->uxCoreAffinityMask ); } else { /* The ready task that was removed from this core is excluded from it. - * See if we can schedule it on any of the cores where it is not excluded from. */ + * @todo See if we can schedule it on any of the cores where it is not excluded from. */ } uxCoreMap &= ( ( 1 << configNUM_CORES ) - 1 ); @@ -1027,7 +1028,7 @@ static void prvYieldForTask( TCB_t * pxTCB, prvYieldCore( xLowestPriorityCore ); } } - #endif /* if ( configUSE_CORE_EXCLUSION == 1 ) */ + #endif /* if ( configUSE_CORE_AFFINITY == 1 ) */ return pdTRUE; } @@ -1047,8 +1048,6 @@ static void prvYieldForTask( TCB_t * pxTCB, #endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */ /*-----------------------------------------------------------*/ - - #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) TaskHandle_t xTaskCreateStatic( TaskFunction_t pxTaskCode, @@ -1078,6 +1077,7 @@ static void prvYieldForTask( TCB_t * pxTCB, if( ( pxTaskBuffer != NULL ) && ( puxStackBuffer != NULL ) ) { + prvTaskCreator( pxTaskCode, ulStack) /* The memory used for the task's TCB and stack are passed into this * function - use them. */ pxNewTCB = ( TCB_t * ) pxTaskBuffer; /*lint !e740 !e9087 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */ @@ -1481,9 +1481,9 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } #endif - #if ( configUSE_CORE_EXCLUSION == 1 ) + #if ( configUSE_CORE_AFFINITY == 1 ) { - pxNewTCB->uxCoreExclude = 0; + pxNewTCB->uxCoreAffinityMask = tskNO_AFFINITY; } #endif #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) @@ -1608,9 +1608,9 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) if( pxCurrentTCBs[ xCoreID ] == NULL ) { pxNewTCB->xTaskRunState = xCoreID; - #if ( configUSE_CORE_EXCLUSION == 1 ) + #if ( configUSE_CORE_AFFINITY == 1 ) { - pxNewTCB->uxCoreExclude = ~( 1 << xCoreID ); + pxNewTCB->uxCoreAffinityMask = ( 1 << xCoreID ); } #endif pxCurrentTCBs[ xCoreID ] = pxNewTCB; @@ -2220,10 +2220,10 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) #endif /* INCLUDE_vTaskPrioritySet */ /*-----------------------------------------------------------*/ -#if ( configUSE_CORE_EXCLUSION == 1 ) +#if ( configUSE_CORE_AFFINITY == 1 ) - void vTaskCoreExclusionSet( const TaskHandle_t xTask, - UBaseType_t uxCoreExclude ) + void vTaskCoreAffinitySet( const TaskHandle_t xTask, + UBaseType_t uxCoreAffinityMask ) { TCB_t * pxTCB; BaseType_t xCoreID; @@ -2232,7 +2232,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { pxTCB = prvGetTCBFromHandle( xTask ); - pxTCB->uxCoreExclude = uxCoreExclude; + pxTCB->uxCoreAffinityMask = uxCoreAffinityMask; if( xSchedulerRunning != pdFALSE ) { @@ -2240,7 +2240,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { xCoreID = ( BaseType_t ) pxTCB->xTaskRunState; - if( ( uxCoreExclude & ( 1 << xCoreID ) ) != 0 ) + if( ( uxCoreAffinityMask & ( 1 << xCoreID ) ) != 1 ) { prvYieldCore( xCoreID ); } @@ -2250,27 +2250,27 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) taskEXIT_CRITICAL(); } -#endif /* configUSE_CORE_EXCLUSION */ +#endif /* configUSE_CORE_AFFINITY */ /*-----------------------------------------------------------*/ -#if ( configUSE_CORE_EXCLUSION == 1 ) +#if ( configUSE_CORE_AFFINITY == 1 ) - UBaseType_t vTaskCoreExclusionGet( const TaskHandle_t xTask ) + UBaseType_t vTaskCoreAffinityGet( const TaskHandle_t xTask ) { TCB_t * pxTCB; - UBaseType_t uxCoreExclude; + UBaseType_t uxCoreAffinityMask; taskENTER_CRITICAL(); { pxTCB = prvGetTCBFromHandle( xTask ); - uxCoreExclude = pxTCB->uxCoreExclude; + uxCoreAffinityMask = pxTCB->uxCoreAffinityMask; } taskEXIT_CRITICAL(); - return uxCoreExclude; + return uxCoreAffinityMask; } -#endif /* configUSE_CORE_EXCLUSION */ +#endif /* configUSE_CORE_AFFINITY */ /*-----------------------------------------------------------*/ #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) @@ -2451,8 +2451,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) BaseType_t xReturn = pdFALSE; const TCB_t * const pxTCB = xTask; - /* Accesses xPendingReadyList so must be called from a critical - * section. */ + /* Accesses xPendingReadyList so must be called from a critical section. */ /* It does not make sense to check if the calling task is suspended. */ configASSERT( xTask ); From 56d94df0a8731098acec85973a0556dede1b591d Mon Sep 17 00:00:00 2001 From: Joseph Julicher Date: Tue, 18 May 2021 15:07:26 -0700 Subject: [PATCH 05/16] Fixed bit mask compare bug --- tasks.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tasks.c b/tasks.c index ae038e872..4b7c99d15 100644 --- a/tasks.c +++ b/tasks.c @@ -894,7 +894,7 @@ static void prvYieldForTask( TCB_t * pxTCB, if( pxTCB->xTaskRunState == taskTASK_NOT_RUNNING ) { #if ( configUSE_CORE_AFFINITY == 1 ) - if( ( pxTCB->uxCoreAffinityMask & ( 1 << xCoreID ) ) == 1 ) + if( ( pxTCB->uxCoreAffinityMask & ( 1 << xCoreID ) ) != 0 ) #endif { /* If the task is not being executed by any core swap it in */ @@ -911,7 +911,7 @@ static void prvYieldForTask( TCB_t * pxTCB, { configASSERT( ( pxTCB->xTaskRunState == xCoreID ) || ( pxTCB->xTaskRunState == taskTASK_YIELDING ) ); #if ( configUSE_CORE_AFFINITY == 1 ) - if( ( pxTCB->uxCoreAffinityMask & ( 1 << xCoreID ) ) == 1 ) + if( ( pxTCB->uxCoreAffinityMask & ( 1 << xCoreID ) ) != 0 ) #endif { /* The task is already running on this core, mark it as scheduled */ From 6369b03bd15f00cc3afbe0f96d4881ec8b1c5c50 Mon Sep 17 00:00:00 2001 From: Joseph Julicher Date: Tue, 18 May 2021 15:17:50 -0700 Subject: [PATCH 06/16] fixed additional bit mask test errors --- tasks.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tasks.c b/tasks.c index 4b7c99d15..65b57992e 100644 --- a/tasks.c +++ b/tasks.c @@ -762,7 +762,7 @@ static void prvYieldForTask( TCB_t * pxTCB, if( xTaskPriority <= xLowestPriority ) { #if ( configUSE_CORE_AFFINITY == 1 ) - if( ( pxTCB->uxCoreAffinityMask & ( 1 << x ) ) == 1 ) + if( ( pxTCB->uxCoreAffinityMask & ( 1 << x ) ) != 0 ) #endif { #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) @@ -1608,11 +1608,13 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) if( pxCurrentTCBs[ xCoreID ] == NULL ) { pxNewTCB->xTaskRunState = xCoreID; + /* This section of code pins the idle tasks to cores. #if ( configUSE_CORE_AFFINITY == 1 ) { pxNewTCB->uxCoreAffinityMask = ( 1 << xCoreID ); } #endif + */ pxCurrentTCBs[ xCoreID ] = pxNewTCB; break; } @@ -2240,7 +2242,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { xCoreID = ( BaseType_t ) pxTCB->xTaskRunState; - if( ( uxCoreAffinityMask & ( 1 << xCoreID ) ) != 1 ) + if( ( uxCoreAffinityMask & ( 1 << xCoreID ) ) != 0 ) { prvYieldCore( xCoreID ); } From 87279a3dc7458437da044bb25312362c69f6b444 Mon Sep 17 00:00:00 2001 From: Joseph Julicher Date: Tue, 18 May 2021 19:49:02 -0700 Subject: [PATCH 07/16] Fixed static allocation internal array --- tasks.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/tasks.c b/tasks.c index 65b57992e..bd07d59a3 100644 --- a/tasks.c +++ b/tasks.c @@ -761,9 +761,11 @@ static void prvYieldForTask( TCB_t * pxTCB, { if( xTaskPriority <= xLowestPriority ) { + #if( configNUM_CORES > 1 ) #if ( configUSE_CORE_AFFINITY == 1 ) if( ( pxTCB->uxCoreAffinityMask & ( 1 << x ) ) != 0 ) #endif + #endif { #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) if( pxCurrentTCBs[ x ]->xPreemptionDisable == pdFALSE ) @@ -893,9 +895,11 @@ static void prvYieldForTask( TCB_t * pxTCB, if( pxTCB->xTaskRunState == taskTASK_NOT_RUNNING ) { + #if ( configNUM_CORES > 1 ) #if ( configUSE_CORE_AFFINITY == 1 ) if( ( pxTCB->uxCoreAffinityMask & ( 1 << xCoreID ) ) != 0 ) #endif + #endif { /* If the task is not being executed by any core swap it in */ pxCurrentTCBs[ xCoreID ]->xTaskRunState = taskTASK_NOT_RUNNING; @@ -910,9 +914,11 @@ static void prvYieldForTask( TCB_t * pxTCB, else if( pxTCB == pxCurrentTCBs[ xCoreID ] ) { configASSERT( ( pxTCB->xTaskRunState == xCoreID ) || ( pxTCB->xTaskRunState == taskTASK_YIELDING ) ); + #if( configNUM_CORES > 1 ) #if ( configUSE_CORE_AFFINITY == 1 ) if( ( pxTCB->uxCoreAffinityMask & ( 1 << xCoreID ) ) != 0 ) #endif + #endif { /* The task is already running on this core, mark it as scheduled */ pxTCB->xTaskRunState = ( TaskRunning_t ) xCoreID; @@ -975,6 +981,7 @@ static void prvYieldForTask( TCB_t * pxTCB, } #endif /* if ( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configNUM_CORES > 1 ) ) */ + #if ( configNUM_CORES > 1 ) #if ( configUSE_CORE_AFFINITY == 1 ) if( ( pxPreviousTCB != NULL ) && ( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxPreviousTCB->uxPriority ] ), &( pxPreviousTCB->xStateListItem ) ) != pdFALSE ) ) { @@ -1029,6 +1036,7 @@ static void prvYieldForTask( TCB_t * pxTCB, } } #endif /* if ( configUSE_CORE_AFFINITY == 1 ) */ + #endif /* if ( configNUM_CORES > 1 ) */ return pdTRUE; } @@ -1077,7 +1085,6 @@ static void prvYieldForTask( TCB_t * pxTCB, if( ( pxTaskBuffer != NULL ) && ( puxStackBuffer != NULL ) ) { - prvTaskCreator( pxTaskCode, ulStack) /* The memory used for the task's TCB and stack are passed into this * function - use them. */ pxNewTCB = ( TCB_t * ) pxTaskBuffer; /*lint !e740 !e9087 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */ @@ -1481,11 +1488,13 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } #endif + #if ( configNUM_CORES > 1 ) #if ( configUSE_CORE_AFFINITY == 1 ) { pxNewTCB->uxCoreAffinityMask = tskNO_AFFINITY; } #endif + #endif #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) { pxNewTCB->xPreemptionDisable = 0; @@ -2222,6 +2231,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) #endif /* INCLUDE_vTaskPrioritySet */ /*-----------------------------------------------------------*/ +#if ( configNUM_CORES > 1 ) #if ( configUSE_CORE_AFFINITY == 1 ) void vTaskCoreAffinitySet( const TaskHandle_t xTask, @@ -2253,8 +2263,10 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) } #endif /* configUSE_CORE_AFFINITY */ +#endif /*-----------------------------------------------------------*/ +#if ( configNUM_CORES > 1 ) #if ( configUSE_CORE_AFFINITY == 1 ) UBaseType_t vTaskCoreAffinityGet( const TaskHandle_t xTask ) @@ -2273,6 +2285,8 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) } #endif /* configUSE_CORE_AFFINITY */ +#endif + /*-----------------------------------------------------------*/ #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) @@ -2695,7 +2709,7 @@ static BaseType_t prvCreateIdleTasks( void ) else { static StaticTask_t xIdleTCBBuffers[configNUM_CORES-1]; - static StackType_t xIdleTaskStackBuffers[configMINIMAL_STACK_SIZE][configNUM_CORES-1]; + static StackType_t xIdleTaskStackBuffers[configNUM_CORES-1][configMINIMAL_STACK_SIZE]; xIdleTaskHandle[ xCoreID ] = xTaskCreateStatic( prvMinimalIdleTask, cIdleName, From 106e15ea6ba44189e231c06c5b6c3ca46b3b8134 Mon Sep 17 00:00:00 2001 From: Joseph Julicher Date: Wed, 19 May 2021 08:38:58 -0700 Subject: [PATCH 08/16] Uncrustified tasks.c --- tasks.c | 394 ++++++++++++++++++++++++++++---------------------------- 1 file changed, 199 insertions(+), 195 deletions(-) diff --git a/tasks.c b/tasks.c index bd07d59a3..911170f91 100644 --- a/tasks.c +++ b/tasks.c @@ -150,7 +150,7 @@ /* A port optimised version is provided. Call the port defined macros. */ #define taskRECORD_READY_PRIORITY( uxPriority ) portRECORD_READY_PRIORITY( uxPriority, uxTopReadyPriority ) - /*-----------------------------------------------------------*/ + /*-----------------------------------------------------------*/ /* A port optimised version is provided, call it only if the TCB being reset * is being referenced from a ready list. If it is referenced from a delayed @@ -255,7 +255,7 @@ typedef struct tskTaskControlBlock /* The old naming convention is used to #endif #if ( configUSE_CORE_AFFINITY == 1 && configNUM_CORES > 1 ) - UBaseType_t uxCoreAffinityMask; /*< Used to link the task to certain cores. UBaseType_t must have >= the same number of bits as SMP confNUM_CORES */ + UBaseType_t uxCoreAffinityMask; /*< Used to link the task to certain cores. UBaseType_t must have >= the same number of bits as SMP confNUM_CORES */ UBaseType_t uxCoreAffinityInheritanceMask; /*< Used to allow a task to inherit the affinity of its parent */ #endif @@ -290,7 +290,6 @@ typedef struct tskTaskControlBlock /* The old naming convention is used to #endif #if ( configUSE_NEWLIB_REENTRANT == 1 ) - /* Allocate a Newlib reent structure that is specific to this task. * Note Newlib support has been included by popular demand, but is not * used by the FreeRTOS maintainers themselves. FreeRTOS is not @@ -322,7 +321,7 @@ typedef struct tskTaskControlBlock /* The old naming convention is used to #if ( configUSE_POSIX_ERRNO == 1 ) int iTaskErrno; #endif -} tskTCB; +} tskTCB; /* The old tskTCB name is maintained above then typedefed to the new TCB_t name * below to enable the use of older kernel aware debuggers. */ @@ -469,7 +468,7 @@ static void prvInitialiseTaskLists( void ) PRIVILEGED_FUNCTION; */ static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters ) PRIVILEGED_FUNCTION; #if ( configNUM_CORES > 1 ) -static portTASK_FUNCTION_PROTO( prvMinimalIdleTask, pvParameters ) PRIVILEGED_FUNCTION; + static portTASK_FUNCTION_PROTO( prvMinimalIdleTask, pvParameters ) PRIVILEGED_FUNCTION; #endif /* @@ -634,13 +633,13 @@ static void prvCheckForRunStateChange( void ) while( pxThisTCB->xTaskRunState == taskTASK_YIELDING ) { /* We are only here if we just entered a critical section - * or if we just suspended the scheduler, and another task - * has requested that we yield. - * - * This is slightly complicated since we need to save and restore - * the suspension and critical nesting counts, as well as release - * and reacquire the correct locks. And then do it all over again - * if our state changed again during the reacquisition. */ + * or if we just suspended the scheduler, and another task + * has requested that we yield. + * + * This is slightly complicated since we need to save and restore + * the suspension and critical nesting counts, as well as release + * and reacquire the correct locks. And then do it all over again + * if our state changed again during the reacquisition. */ uxPrevCriticalNesting = pxThisTCB->uxCriticalNesting; uxPrevSchedulerSuspended = uxSchedulerSuspended; @@ -761,10 +760,10 @@ static void prvYieldForTask( TCB_t * pxTCB, { if( xTaskPriority <= xLowestPriority ) { - #if( configNUM_CORES > 1 ) - #if ( configUSE_CORE_AFFINITY == 1 ) - if( ( pxTCB->uxCoreAffinityMask & ( 1 << x ) ) != 0 ) - #endif + #if ( configNUM_CORES > 1 ) + #if ( configUSE_CORE_AFFINITY == 1 ) + if( ( pxTCB->uxCoreAffinityMask & ( 1 << x ) ) != 0 ) + #endif #endif { #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) @@ -896,9 +895,9 @@ static void prvYieldForTask( TCB_t * pxTCB, if( pxTCB->xTaskRunState == taskTASK_NOT_RUNNING ) { #if ( configNUM_CORES > 1 ) - #if ( configUSE_CORE_AFFINITY == 1 ) - if( ( pxTCB->uxCoreAffinityMask & ( 1 << xCoreID ) ) != 0 ) - #endif + #if ( configUSE_CORE_AFFINITY == 1 ) + if( ( pxTCB->uxCoreAffinityMask & ( 1 << xCoreID ) ) != 0 ) + #endif #endif { /* If the task is not being executed by any core swap it in */ @@ -914,10 +913,10 @@ static void prvYieldForTask( TCB_t * pxTCB, else if( pxTCB == pxCurrentTCBs[ xCoreID ] ) { configASSERT( ( pxTCB->xTaskRunState == xCoreID ) || ( pxTCB->xTaskRunState == taskTASK_YIELDING ) ); - #if( configNUM_CORES > 1 ) - #if ( configUSE_CORE_AFFINITY == 1 ) - if( ( pxTCB->uxCoreAffinityMask & ( 1 << xCoreID ) ) != 0 ) - #endif + #if ( configNUM_CORES > 1 ) + #if ( configUSE_CORE_AFFINITY == 1 ) + if( ( pxTCB->uxCoreAffinityMask & ( 1 << xCoreID ) ) != 0 ) + #endif #endif { /* The task is already running on this core, mark it as scheduled */ @@ -982,60 +981,60 @@ static void prvYieldForTask( TCB_t * pxTCB, #endif /* if ( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configNUM_CORES > 1 ) ) */ #if ( configNUM_CORES > 1 ) - #if ( configUSE_CORE_AFFINITY == 1 ) - if( ( pxPreviousTCB != NULL ) && ( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxPreviousTCB->uxPriority ] ), &( pxPreviousTCB->xStateListItem ) ) != pdFALSE ) ) - { - /* A ready task was just bumped off this core. Look at the cores it can run from - * from to see if it is able to run on any of them */ - UBaseType_t uxCoreMap = pxPreviousTCB->uxCoreAffinityMask; - BaseType_t xLowestPriority = pxPreviousTCB->uxPriority - pxPreviousTCB->xIsIdle; - BaseType_t xLowestPriorityCore = -1; - - if( ( uxCoreMap & ( 1 << xCoreID ) ) != 0 ) + #if ( configUSE_CORE_AFFINITY == 1 ) + if( ( pxPreviousTCB != NULL ) && ( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxPreviousTCB->uxPriority ] ), &( pxPreviousTCB->xStateListItem ) ) != pdFALSE ) ) { - /* The ready task that was removed from this core is not excluded from it. - * Only look at the intersection of the cores the removed task is allowed to run - * on with the cores that the new task is excluded from. It is possible that the - * new task was only placed onto this core because it is excluded from another. - * Check to see if the previous task could run on one of those cores. */ - uxCoreMap &= ~( pxCurrentTCBs[ xCoreID ]->uxCoreAffinityMask ); - } - else - { - /* The ready task that was removed from this core is excluded from it. - * @todo See if we can schedule it on any of the cores where it is not excluded from. */ - } + /* A ready task was just bumped off this core. Look at the cores it can run from + * from to see if it is able to run on any of them */ + UBaseType_t uxCoreMap = pxPreviousTCB->uxCoreAffinityMask; + BaseType_t xLowestPriority = pxPreviousTCB->uxPriority - pxPreviousTCB->xIsIdle; + BaseType_t xLowestPriorityCore = -1; - uxCoreMap &= ( ( 1 << configNUM_CORES ) - 1 ); - - while( uxCoreMap != 0 ) - { - int uxCore = 31UL - ( uint32_t ) __builtin_clz( uxCoreMap ); - - xassert( taskVALID_CORE_ID( uxCore ) ); - - uxCoreMap &= ~( 1 << uxCore ); - - BaseType_t xTaskPriority = ( BaseType_t ) pxCurrentTCBs[ uxCore ]->uxPriority - pxCurrentTCBs[ uxCore ]->xIsIdle; - - if( ( xTaskPriority < xLowestPriority ) && ( taskTASK_IS_RUNNING( pxCurrentTCBs[ uxCore ]->xTaskRunState ) != pdFALSE ) && ( xYieldPendings[ uxCore ] == pdFALSE ) ) + if( ( uxCoreMap & ( 1 << xCoreID ) ) != 0 ) { - #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - if( pxCurrentTCBs[ uxCore ]->xPreemptionDisable == pdFALSE ) - #endif + /* The ready task that was removed from this core is not excluded from it. + * Only look at the intersection of the cores the removed task is allowed to run + * on with the cores that the new task is excluded from. It is possible that the + * new task was only placed onto this core because it is excluded from another. + * Check to see if the previous task could run on one of those cores. */ + uxCoreMap &= ~( pxCurrentTCBs[ xCoreID ]->uxCoreAffinityMask ); + } + else + { + /* The ready task that was removed from this core is excluded from it. + * @todo See if we can schedule it on any of the cores where it is not excluded from. */ + } + + uxCoreMap &= ( ( 1 << configNUM_CORES ) - 1 ); + + while( uxCoreMap != 0 ) + { + int uxCore = 31UL - ( uint32_t ) __builtin_clz( uxCoreMap ); + + xassert( taskVALID_CORE_ID( uxCore ) ); + + uxCoreMap &= ~( 1 << uxCore ); + + BaseType_t xTaskPriority = ( BaseType_t ) pxCurrentTCBs[ uxCore ]->uxPriority - pxCurrentTCBs[ uxCore ]->xIsIdle; + + if( ( xTaskPriority < xLowestPriority ) && ( taskTASK_IS_RUNNING( pxCurrentTCBs[ uxCore ]->xTaskRunState ) != pdFALSE ) && ( xYieldPendings[ uxCore ] == pdFALSE ) ) { - xLowestPriority = xTaskPriority; - xLowestPriorityCore = uxCore; + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + if( pxCurrentTCBs[ uxCore ]->xPreemptionDisable == pdFALSE ) + #endif + { + xLowestPriority = xTaskPriority; + xLowestPriorityCore = uxCore; + } } } - } - if( taskVALID_CORE_ID( xLowestPriorityCore ) ) - { - prvYieldCore( xLowestPriorityCore ); + if( taskVALID_CORE_ID( xLowestPriorityCore ) ) + { + prvYieldCore( xLowestPriorityCore ); + } } - } - #endif /* if ( configUSE_CORE_AFFINITY == 1 ) */ + #endif /* if ( configUSE_CORE_AFFINITY == 1 ) */ #endif /* if ( configNUM_CORES > 1 ) */ return pdTRUE; @@ -1489,11 +1488,11 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, #endif #if ( configNUM_CORES > 1 ) - #if ( configUSE_CORE_AFFINITY == 1 ) - { - pxNewTCB->uxCoreAffinityMask = tskNO_AFFINITY; - } - #endif + #if ( configUSE_CORE_AFFINITY == 1 ) + { + pxNewTCB->uxCoreAffinityMask = tskNO_AFFINITY; + } + #endif #endif #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) { @@ -1557,15 +1556,16 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, pxNewTCB->xTaskRunState = taskTASK_NOT_RUNNING; /* Is this an idle task? */ - if(pxTaskCode == prvIdleTask) - { - pxNewTCB->xIsIdle = pdTRUE; - } - #if(configNUM_CORES > 1) - else if(pxTaskCode == prvMinimalIdleTask) + if( pxTaskCode == prvIdleTask ) { pxNewTCB->xIsIdle = pdTRUE; } + + #if ( configNUM_CORES > 1 ) + else if( pxTaskCode == prvMinimalIdleTask ) + { + pxNewTCB->xIsIdle = pdTRUE; + } #endif else { @@ -1617,13 +1617,14 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) if( pxCurrentTCBs[ xCoreID ] == NULL ) { pxNewTCB->xTaskRunState = xCoreID; + /* This section of code pins the idle tasks to cores. - #if ( configUSE_CORE_AFFINITY == 1 ) - { - pxNewTCB->uxCoreAffinityMask = ( 1 << xCoreID ); - } - #endif - */ + #if ( configUSE_CORE_AFFINITY == 1 ) + * { + * pxNewTCB->uxCoreAffinityMask = ( 1 << xCoreID ); + * } + #endif + */ pxCurrentTCBs[ xCoreID ] = pxNewTCB; break; } @@ -1713,7 +1714,6 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * no longer running. */ if( xTaskRunningOnCore != taskTASK_NOT_RUNNING ) { - /* A running task is being deleted. This cannot complete within the * task itself, as a context switch to another task is required. * Place the task in the termination list. The idle task will @@ -1755,7 +1755,6 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) xCoreID = portGET_CORE_ID(); - if( xTaskRunningOnCore == xCoreID ) { configASSERT( uxSchedulerSuspended == 0 ); @@ -1959,7 +1958,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) } } } - #else /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ + #else /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ { eReturn = eSuspended; } @@ -2232,60 +2231,60 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) /*-----------------------------------------------------------*/ #if ( configNUM_CORES > 1 ) -#if ( configUSE_CORE_AFFINITY == 1 ) + #if ( configUSE_CORE_AFFINITY == 1 ) - void vTaskCoreAffinitySet( const TaskHandle_t xTask, - UBaseType_t uxCoreAffinityMask ) - { - TCB_t * pxTCB; - BaseType_t xCoreID; - - taskENTER_CRITICAL(); + void vTaskCoreAffinitySet( const TaskHandle_t xTask, + UBaseType_t uxCoreAffinityMask ) { - pxTCB = prvGetTCBFromHandle( xTask ); + TCB_t * pxTCB; + BaseType_t xCoreID; - pxTCB->uxCoreAffinityMask = uxCoreAffinityMask; - - if( xSchedulerRunning != pdFALSE ) + taskENTER_CRITICAL(); { - if( taskTASK_IS_RUNNING( pxTCB->xTaskRunState ) ) - { - xCoreID = ( BaseType_t ) pxTCB->xTaskRunState; + pxTCB = prvGetTCBFromHandle( xTask ); - if( ( uxCoreAffinityMask & ( 1 << xCoreID ) ) != 0 ) + pxTCB->uxCoreAffinityMask = uxCoreAffinityMask; + + if( xSchedulerRunning != pdFALSE ) + { + if( taskTASK_IS_RUNNING( pxTCB->xTaskRunState ) ) { - prvYieldCore( xCoreID ); + xCoreID = ( BaseType_t ) pxTCB->xTaskRunState; + + if( ( uxCoreAffinityMask & ( 1 << xCoreID ) ) != 0 ) + { + prvYieldCore( xCoreID ); + } } } } + taskEXIT_CRITICAL(); } - taskEXIT_CRITICAL(); - } -#endif /* configUSE_CORE_AFFINITY */ -#endif + #endif /* configUSE_CORE_AFFINITY */ +#endif /* if ( configNUM_CORES > 1 ) */ /*-----------------------------------------------------------*/ #if ( configNUM_CORES > 1 ) -#if ( configUSE_CORE_AFFINITY == 1 ) + #if ( configUSE_CORE_AFFINITY == 1 ) - UBaseType_t vTaskCoreAffinityGet( const TaskHandle_t xTask ) - { - TCB_t * pxTCB; - UBaseType_t uxCoreAffinityMask; - - taskENTER_CRITICAL(); + UBaseType_t vTaskCoreAffinityGet( const TaskHandle_t xTask ) { - pxTCB = prvGetTCBFromHandle( xTask ); - uxCoreAffinityMask = pxTCB->uxCoreAffinityMask; + TCB_t * pxTCB; + UBaseType_t uxCoreAffinityMask; + + taskENTER_CRITICAL(); + { + pxTCB = prvGetTCBFromHandle( xTask ); + uxCoreAffinityMask = pxTCB->uxCoreAffinityMask; + } + taskEXIT_CRITICAL(); + + return uxCoreAffinityMask; } - taskEXIT_CRITICAL(); - return uxCoreAffinityMask; - } - -#endif /* configUSE_CORE_AFFINITY */ -#endif + #endif /* configUSE_CORE_AFFINITY */ +#endif /* if ( configNUM_CORES > 1 ) */ /*-----------------------------------------------------------*/ @@ -2635,7 +2634,7 @@ static BaseType_t prvCreateIdleTasks( void ) BaseType_t xCoreID; char cIdleName[ configMAX_TASK_NAME_LEN ]; - /* Add each idle task at the lowest priority. */ + /* Add each idle task at the lowest priority. */ for( xCoreID = ( BaseType_t ) 0; xCoreID < ( BaseType_t ) configNUM_CORES; xCoreID++ ) { BaseType_t x; @@ -2688,14 +2687,14 @@ static BaseType_t prvCreateIdleTasks( void ) #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) { - if(xCoreID == 0) + if( xCoreID == 0 ) { StaticTask_t * pxIdleTaskTCBBuffer = NULL; StackType_t * pxIdleTaskStackBuffer = NULL; uint32_t ulIdleTaskStackSize; /* The Idle task is created using user provided RAM - obtain the - * address of the RAM then create the idle task. */ + * address of the RAM then create the idle task. */ vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize ); xIdleTaskHandle[ xCoreID ] = xTaskCreateStatic( prvIdleTask, cIdleName, @@ -2705,21 +2704,23 @@ static BaseType_t prvCreateIdleTasks( void ) pxIdleTaskStackBuffer, pxIdleTaskTCBBuffer ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ } - #if( configNUM_CORES > 1) - else - { - static StaticTask_t xIdleTCBBuffers[configNUM_CORES-1]; - static StackType_t xIdleTaskStackBuffers[configNUM_CORES-1][configMINIMAL_STACK_SIZE]; - xIdleTaskHandle[ xCoreID ] = xTaskCreateStatic( prvMinimalIdleTask, - cIdleName, - configMINIMAL_STACK_SIZE, - ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */ - portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ - xIdleTaskStackBuffers[xCoreID-1], - &xIdleTCBBuffers[xCoreID-1] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ - } - #endif + #if ( configNUM_CORES > 1 ) + else + { + static StaticTask_t xIdleTCBBuffers[ configNUM_CORES - 1 ]; + static StackType_t xIdleTaskStackBuffers[ configNUM_CORES - 1 ][ configMINIMAL_STACK_SIZE ]; + + xIdleTaskHandle[ xCoreID ] = xTaskCreateStatic( prvMinimalIdleTask, + cIdleName, + configMINIMAL_STACK_SIZE, + ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */ + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + xIdleTaskStackBuffers[ xCoreID - 1 ], + &xIdleTCBBuffers[ xCoreID - 1 ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + } + #endif /* if ( configNUM_CORES > 1 ) */ + if( xIdleTaskHandle[ xCoreID ] != NULL ) { xReturn = pdPASS; @@ -2729,32 +2730,34 @@ static BaseType_t prvCreateIdleTasks( void ) xReturn = pdFAIL; } } - #else /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ + #else /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ { - if(xCoreID == 0) + if( xCoreID == 0 ) { - /* The Idle task is being created using dynamically allocated RAM. */ - xReturn = xTaskCreate( prvIdleTask, - cIdleName, - configMINIMAL_STACK_SIZE, - ( void * ) NULL, - portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ - &xIdleTaskHandle[ xCoreID ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ - } - #if( configNUM_CORES > 1 ) - else - { - xReturn = xTaskCreate( prvMinimalIdleTask, - cIdleName, - configMINIMAL_STACK_SIZE, - ( void * ) NULL, - portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ - &xIdleTaskHandle[ xCoreID ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + /* The Idle task is being created using dynamically allocated RAM. */ + xReturn = xTaskCreate( prvIdleTask, + cIdleName, + configMINIMAL_STACK_SIZE, + ( void * ) NULL, + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + &xIdleTaskHandle[ xCoreID ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ } + + #if ( configNUM_CORES > 1 ) + else + { + xReturn = xTaskCreate( prvMinimalIdleTask, + cIdleName, + configMINIMAL_STACK_SIZE, + ( void * ) NULL, + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + &xIdleTaskHandle[ xCoreID ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + } #endif } #endif /* configSUPPORT_STATIC_ALLOCATION */ } + return xReturn; } @@ -4222,49 +4225,50 @@ void vTaskMissedYield( void ) * * The minimal idle task is used for all the additional Cores in a SMP system. * There must be only 1 idle task and the rest are minimal idle tasks. - * + * * @todo additional conditional compiles to remove this function. */ -#if (configNUM_CORES > 1) -static portTASK_FUNCTION( prvMinimalIdleTask, pvParameters ) -{ - for(;;) +#if ( configNUM_CORES > 1 ) + static portTASK_FUNCTION( prvMinimalIdleTask, pvParameters ) { - #if ( configUSE_PREEMPTION == 0 ) - { - /* If we are not using preemption we keep forcing a task switch to - * see if any other task has become available. If we are using - * preemption we don't need to do this as any task becoming available - * will automatically get the processor anyway. */ - taskYIELD(); - } - #endif /* configUSE_PREEMPTION */ - - #if ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) - { - /* When using preemption tasks of equal priority will be - * timesliced. If a task that is sharing the idle priority is ready - * to run then the idle task should yield before the end of the - * timeslice. - * - * A critical region is not required here as we are just reading from - * the list, and an occasional incorrect value will not matter. If - * the ready list at the idle priority contains one more task than the - * number of idle tasks, which is equal to the configured numbers of cores - * then a task other than the idle task is ready to execute. */ - if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) configNUM_CORES ) + for( ; ; ) + { + #if ( configUSE_PREEMPTION == 0 ) { + /* If we are not using preemption we keep forcing a task switch to + * see if any other task has become available. If we are using + * preemption we don't need to do this as any task becoming available + * will automatically get the processor anyway. */ taskYIELD(); } - else + #endif /* configUSE_PREEMPTION */ + + #if ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) { - mtCOVERAGE_TEST_MARKER(); + /* When using preemption tasks of equal priority will be + * timesliced. If a task that is sharing the idle priority is ready + * to run then the idle task should yield before the end of the + * timeslice. + * + * A critical region is not required here as we are just reading from + * the list, and an occasional incorrect value will not matter. If + * the ready list at the idle priority contains one more task than the + * number of idle tasks, which is equal to the configured numbers of cores + * then a task other than the idle task is ready to execute. */ + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) configNUM_CORES ) + { + taskYIELD(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } - } - #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) */ + #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) */ + } } -} -#endif +#endif /* if ( configNUM_CORES > 1 ) */ + /* * ----------------------------------------------------------- * The Idle task. From 61fa2246946e82cf2c21272751be3fb15f9c06a0 Mon Sep 17 00:00:00 2001 From: Joseph Julicher Date: Wed, 19 May 2021 08:54:22 -0700 Subject: [PATCH 09/16] missed taskYield in minimalIDLE task --- tasks.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tasks.c b/tasks.c index 911170f91..cf8f51539 100644 --- a/tasks.c +++ b/tasks.c @@ -256,7 +256,6 @@ typedef struct tskTaskControlBlock /* The old naming convention is used to #if ( configUSE_CORE_AFFINITY == 1 && configNUM_CORES > 1 ) UBaseType_t uxCoreAffinityMask; /*< Used to link the task to certain cores. UBaseType_t must have >= the same number of bits as SMP confNUM_CORES */ - UBaseType_t uxCoreAffinityInheritanceMask; /*< Used to allow a task to inherit the affinity of its parent */ #endif #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) @@ -4231,6 +4230,7 @@ void vTaskMissedYield( void ) #if ( configNUM_CORES > 1 ) static portTASK_FUNCTION( prvMinimalIdleTask, pvParameters ) { + taskYIELD(); for( ; ; ) { #if ( configUSE_PREEMPTION == 0 ) From f5625177f5199fd7b595f1fcce36bad18d0d04e0 Mon Sep 17 00:00:00 2001 From: Joseph Julicher Date: Wed, 19 May 2021 09:03:59 -0700 Subject: [PATCH 10/16] removed extra parameter in TCB structure --- include/FreeRTOS.h | 1 - 1 file changed, 1 deletion(-) diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index 8a47e0dbf..9eefd0d8a 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -1211,7 +1211,6 @@ typedef struct xSTATIC_TCB #endif #if ( configUSE_CORE_AFFINITY == 1 && configNUM_CORES > 1 ) UBaseType_t uxDummy25; - UBaseType_t uxDummy26; #endif #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) void * pxDummy8; From f916ccf5062608d4b19ec8283992c0ff48e9c1f3 Mon Sep 17 00:00:00 2001 From: Joseph Julicher Date: Thu, 13 May 2021 15:42:57 -0700 Subject: [PATCH 11/16] Static allocation and lightweight idle tasks (#323) * added multiple idle tasks * Added multiple IDLE tasks to non-static allocation * Adjustments to tasks from PR review --- tasks.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tasks.c b/tasks.c index cf8f51539..eee43ef51 100644 --- a/tasks.c +++ b/tasks.c @@ -2756,6 +2756,20 @@ static BaseType_t prvCreateIdleTasks( void ) } #endif /* configSUPPORT_STATIC_ALLOCATION */ } + return xReturn; +} + +void vTaskStartScheduler( void ) +{ + BaseType_t xReturn; + + #if ( configUSE_TIMERS == 1 ) + { + xReturn = xTimerCreateTimerTask(); + } + #endif /* configUSE_TIMERS */ + + xReturn = prvCreateIdleTasks(); return xReturn; } From a65f379860b8812daeec563968a8d65871f0f9b4 Mon Sep 17 00:00:00 2001 From: Joseph Julicher Date: Wed, 12 May 2021 12:03:08 -0700 Subject: [PATCH 12/16] added multiple idle tasks --- tasks.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/tasks.c b/tasks.c index eee43ef51..643aeca8d 100644 --- a/tasks.c +++ b/tasks.c @@ -1555,11 +1555,10 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, pxNewTCB->xTaskRunState = taskTASK_NOT_RUNNING; /* Is this an idle task? */ - if( pxTaskCode == prvIdleTask ) + if(pxTaskCode == prvIdleTask) { pxNewTCB->xIsIdle = pdTRUE; } - #if ( configNUM_CORES > 1 ) else if( pxTaskCode == prvMinimalIdleTask ) { @@ -2703,7 +2702,6 @@ static BaseType_t prvCreateIdleTasks( void ) pxIdleTaskStackBuffer, pxIdleTaskTCBBuffer ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ } - #if ( configNUM_CORES > 1 ) else { @@ -4236,11 +4234,17 @@ void vTaskMissedYield( void ) * The MinimalIdle task. * ---------------------------------------------------------- * + * The portTASK_FUNCTION() macro is used to allow port/compiler specific + * language extensions. The equivalent prototype for this function is: + * + * void prvMinimalIdleTask( void *pvParameters ); + * * The minimal idle task is used for all the additional Cores in a SMP system. * There must be only 1 idle task and the rest are minimal idle tasks. - * + * * @todo additional conditional compiles to remove this function. */ + #if ( configNUM_CORES > 1 ) static portTASK_FUNCTION( prvMinimalIdleTask, pvParameters ) { From 74f816ab4052fae9fbeebce463096df56bfc5e69 Mon Sep 17 00:00:00 2001 From: Joseph Julicher Date: Wed, 12 May 2021 17:01:00 -0700 Subject: [PATCH 13/16] Adjustments to tasks from PR review --- tasks.c | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/tasks.c b/tasks.c index 643aeca8d..8dd539be4 100644 --- a/tasks.c +++ b/tasks.c @@ -2757,21 +2757,6 @@ static BaseType_t prvCreateIdleTasks( void ) return xReturn; } -void vTaskStartScheduler( void ) -{ - BaseType_t xReturn; - - #if ( configUSE_TIMERS == 1 ) - { - xReturn = xTimerCreateTimerTask(); - } - #endif /* configUSE_TIMERS */ - - xReturn = prvCreateIdleTasks(); - - return xReturn; -} - void vTaskStartScheduler( void ) { BaseType_t xReturn; @@ -4234,11 +4219,6 @@ void vTaskMissedYield( void ) * The MinimalIdle task. * ---------------------------------------------------------- * - * The portTASK_FUNCTION() macro is used to allow port/compiler specific - * language extensions. The equivalent prototype for this function is: - * - * void prvMinimalIdleTask( void *pvParameters ); - * * The minimal idle task is used for all the additional Cores in a SMP system. * There must be only 1 idle task and the rest are minimal idle tasks. * From bc42e04850586a8622923a48351b23647d644f4c Mon Sep 17 00:00:00 2001 From: Joseph Julicher Date: Wed, 19 May 2021 08:38:58 -0700 Subject: [PATCH 14/16] Uncrustified tasks.c --- tasks.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tasks.c b/tasks.c index 8dd539be4..a40e51d94 100644 --- a/tasks.c +++ b/tasks.c @@ -2754,6 +2754,7 @@ static BaseType_t prvCreateIdleTasks( void ) } #endif /* configSUPPORT_STATIC_ALLOCATION */ } + return xReturn; } @@ -4221,7 +4222,7 @@ void vTaskMissedYield( void ) * * The minimal idle task is used for all the additional Cores in a SMP system. * There must be only 1 idle task and the rest are minimal idle tasks. - * + * * @todo additional conditional compiles to remove this function. */ From e71e7021108e345701f6fa6198d90a1f3c754ffe Mon Sep 17 00:00:00 2001 From: Joseph Julicher Date: Thu, 13 May 2021 15:42:57 -0700 Subject: [PATCH 15/16] Static allocation and lightweight idle tasks (#323) * added multiple idle tasks * Added multiple IDLE tasks to non-static allocation * Adjustments to tasks from PR review --- tasks.c | 1 - 1 file changed, 1 deletion(-) diff --git a/tasks.c b/tasks.c index a40e51d94..313c463e9 100644 --- a/tasks.c +++ b/tasks.c @@ -2754,7 +2754,6 @@ static BaseType_t prvCreateIdleTasks( void ) } #endif /* configSUPPORT_STATIC_ALLOCATION */ } - return xReturn; } From d58750f5f997be0e691dacf8a5419930f0a8fcbb Mon Sep 17 00:00:00 2001 From: Joseph Julicher Date: Wed, 19 May 2021 10:02:49 -0700 Subject: [PATCH 16/16] Updated from upstream --- tasks.c | 272 +++++++++++++++++++++++++++----------------------------- 1 file changed, 132 insertions(+), 140 deletions(-) diff --git a/tasks.c b/tasks.c index 313c463e9..e3921347f 100644 --- a/tasks.c +++ b/tasks.c @@ -150,7 +150,7 @@ /* A port optimised version is provided. Call the port defined macros. */ #define taskRECORD_READY_PRIORITY( uxPriority ) portRECORD_READY_PRIORITY( uxPriority, uxTopReadyPriority ) - /*-----------------------------------------------------------*/ + /*-----------------------------------------------------------*/ /* A port optimised version is provided, call it only if the TCB being reset * is being referenced from a ready list. If it is referenced from a delayed @@ -467,7 +467,7 @@ static void prvInitialiseTaskLists( void ) PRIVILEGED_FUNCTION; */ static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters ) PRIVILEGED_FUNCTION; #if ( configNUM_CORES > 1 ) - static portTASK_FUNCTION_PROTO( prvMinimalIdleTask, pvParameters ) PRIVILEGED_FUNCTION; +static portTASK_FUNCTION_PROTO( prvMinimalIdleTask, pvParameters ) PRIVILEGED_FUNCTION; #endif /* @@ -632,13 +632,13 @@ static void prvCheckForRunStateChange( void ) while( pxThisTCB->xTaskRunState == taskTASK_YIELDING ) { /* We are only here if we just entered a critical section - * or if we just suspended the scheduler, and another task - * has requested that we yield. - * - * This is slightly complicated since we need to save and restore - * the suspension and critical nesting counts, as well as release - * and reacquire the correct locks. And then do it all over again - * if our state changed again during the reacquisition. */ + * or if we just suspended the scheduler, and another task + * has requested that we yield. + * + * This is slightly complicated since we need to save and restore + * the suspension and critical nesting counts, as well as release + * and reacquire the correct locks. And then do it all over again + * if our state changed again during the reacquisition. */ uxPrevCriticalNesting = pxThisTCB->uxCriticalNesting; uxPrevSchedulerSuspended = uxSchedulerSuspended; @@ -981,58 +981,58 @@ static void prvYieldForTask( TCB_t * pxTCB, #if ( configNUM_CORES > 1 ) #if ( configUSE_CORE_AFFINITY == 1 ) - if( ( pxPreviousTCB != NULL ) && ( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxPreviousTCB->uxPriority ] ), &( pxPreviousTCB->xStateListItem ) ) != pdFALSE ) ) - { + if( ( pxPreviousTCB != NULL ) && ( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxPreviousTCB->uxPriority ] ), &( pxPreviousTCB->xStateListItem ) ) != pdFALSE ) ) + { /* A ready task was just bumped off this core. Look at the cores it can run from - * from to see if it is able to run on any of them */ + * from to see if it is able to run on any of them */ UBaseType_t uxCoreMap = pxPreviousTCB->uxCoreAffinityMask; - BaseType_t xLowestPriority = pxPreviousTCB->uxPriority - pxPreviousTCB->xIsIdle; - BaseType_t xLowestPriorityCore = -1; + BaseType_t xLowestPriority = pxPreviousTCB->uxPriority - pxPreviousTCB->xIsIdle; + BaseType_t xLowestPriorityCore = -1; - if( ( uxCoreMap & ( 1 << xCoreID ) ) != 0 ) - { - /* The ready task that was removed from this core is not excluded from it. - * Only look at the intersection of the cores the removed task is allowed to run - * on with the cores that the new task is excluded from. It is possible that the - * new task was only placed onto this core because it is excluded from another. - * Check to see if the previous task could run on one of those cores. */ + if( ( uxCoreMap & ( 1 << xCoreID ) ) != 0 ) + { + /* The ready task that was removed from this core is not excluded from it. + * Only look at the intersection of the cores the removed task is allowed to run + * on with the cores that the new task is excluded from. It is possible that the + * new task was only placed onto this core because it is excluded from another. + * Check to see if the previous task could run on one of those cores. */ uxCoreMap &= ~( pxCurrentTCBs[ xCoreID ]->uxCoreAffinityMask ); - } - else - { - /* The ready task that was removed from this core is excluded from it. + } + else + { + /* The ready task that was removed from this core is excluded from it. * @todo See if we can schedule it on any of the cores where it is not excluded from. */ - } + } - uxCoreMap &= ( ( 1 << configNUM_CORES ) - 1 ); + uxCoreMap &= ( ( 1 << configNUM_CORES ) - 1 ); - while( uxCoreMap != 0 ) + while( uxCoreMap != 0 ) + { + int uxCore = 31UL - ( uint32_t ) __builtin_clz( uxCoreMap ); + + xassert( taskVALID_CORE_ID( uxCore ) ); + + uxCoreMap &= ~( 1 << uxCore ); + + BaseType_t xTaskPriority = ( BaseType_t ) pxCurrentTCBs[ uxCore ]->uxPriority - pxCurrentTCBs[ uxCore ]->xIsIdle; + + if( ( xTaskPriority < xLowestPriority ) && ( taskTASK_IS_RUNNING( pxCurrentTCBs[ uxCore ]->xTaskRunState ) != pdFALSE ) && ( xYieldPendings[ uxCore ] == pdFALSE ) ) { - int uxCore = 31UL - ( uint32_t ) __builtin_clz( uxCoreMap ); - - xassert( taskVALID_CORE_ID( uxCore ) ); - - uxCoreMap &= ~( 1 << uxCore ); - - BaseType_t xTaskPriority = ( BaseType_t ) pxCurrentTCBs[ uxCore ]->uxPriority - pxCurrentTCBs[ uxCore ]->xIsIdle; - - if( ( xTaskPriority < xLowestPriority ) && ( taskTASK_IS_RUNNING( pxCurrentTCBs[ uxCore ]->xTaskRunState ) != pdFALSE ) && ( xYieldPendings[ uxCore ] == pdFALSE ) ) + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + if( pxCurrentTCBs[ uxCore ]->xPreemptionDisable == pdFALSE ) + #endif { - #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - if( pxCurrentTCBs[ uxCore ]->xPreemptionDisable == pdFALSE ) - #endif - { - xLowestPriority = xTaskPriority; - xLowestPriorityCore = uxCore; - } + xLowestPriority = xTaskPriority; + xLowestPriorityCore = uxCore; } } - - if( taskVALID_CORE_ID( xLowestPriorityCore ) ) - { - prvYieldCore( xLowestPriorityCore ); - } } + + if( taskVALID_CORE_ID( xLowestPriorityCore ) ) + { + prvYieldCore( xLowestPriorityCore ); + } + } #endif /* if ( configUSE_CORE_AFFINITY == 1 ) */ #endif /* if ( configNUM_CORES > 1 ) */ @@ -1488,9 +1488,9 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, #if ( configNUM_CORES > 1 ) #if ( configUSE_CORE_AFFINITY == 1 ) - { + { pxNewTCB->uxCoreAffinityMask = tskNO_AFFINITY; - } + } #endif #endif #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) @@ -1561,9 +1561,9 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } #if ( configNUM_CORES > 1 ) else if( pxTaskCode == prvMinimalIdleTask ) - { - pxNewTCB->xIsIdle = pdTRUE; - } + { + pxNewTCB->xIsIdle = pdTRUE; + } #endif else { @@ -1615,14 +1615,6 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) if( pxCurrentTCBs[ xCoreID ] == NULL ) { pxNewTCB->xTaskRunState = xCoreID; - - /* This section of code pins the idle tasks to cores. - #if ( configUSE_CORE_AFFINITY == 1 ) - * { - * pxNewTCB->uxCoreAffinityMask = ( 1 << xCoreID ); - * } - #endif - */ pxCurrentTCBs[ xCoreID ] = pxNewTCB; break; } @@ -2233,31 +2225,31 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) void vTaskCoreAffinitySet( const TaskHandle_t xTask, UBaseType_t uxCoreAffinityMask ) - { - TCB_t * pxTCB; - BaseType_t xCoreID; + { + TCB_t * pxTCB; + BaseType_t xCoreID; - taskENTER_CRITICAL(); - { - pxTCB = prvGetTCBFromHandle( xTask ); + taskENTER_CRITICAL(); + { + pxTCB = prvGetTCBFromHandle( xTask ); pxTCB->uxCoreAffinityMask = uxCoreAffinityMask; - if( xSchedulerRunning != pdFALSE ) + if( xSchedulerRunning != pdFALSE ) + { + if( taskTASK_IS_RUNNING( pxTCB->xTaskRunState ) ) { - if( taskTASK_IS_RUNNING( pxTCB->xTaskRunState ) ) - { - xCoreID = ( BaseType_t ) pxTCB->xTaskRunState; + xCoreID = ( BaseType_t ) pxTCB->xTaskRunState; if( ( uxCoreAffinityMask & ( 1 << xCoreID ) ) != 0 ) - { - prvYieldCore( xCoreID ); - } + { + prvYieldCore( xCoreID ); } } } - taskEXIT_CRITICAL(); } + taskEXIT_CRITICAL(); + } #endif /* configUSE_CORE_AFFINITY */ #endif /* if ( configNUM_CORES > 1 ) */ @@ -2267,19 +2259,19 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) #if ( configUSE_CORE_AFFINITY == 1 ) UBaseType_t vTaskCoreAffinityGet( const TaskHandle_t xTask ) - { - TCB_t * pxTCB; + { + TCB_t * pxTCB; UBaseType_t uxCoreAffinityMask; - taskENTER_CRITICAL(); - { - pxTCB = prvGetTCBFromHandle( xTask ); + taskENTER_CRITICAL(); + { + pxTCB = prvGetTCBFromHandle( xTask ); uxCoreAffinityMask = pxTCB->uxCoreAffinityMask; - } - taskEXIT_CRITICAL(); + } + taskEXIT_CRITICAL(); return uxCoreAffinityMask; - } + } #endif /* configUSE_CORE_AFFINITY */ #endif /* if ( configNUM_CORES > 1 ) */ @@ -2632,7 +2624,7 @@ static BaseType_t prvCreateIdleTasks( void ) BaseType_t xCoreID; char cIdleName[ configMAX_TASK_NAME_LEN ]; - /* Add each idle task at the lowest priority. */ + /* Add each idle task at the lowest priority. */ for( xCoreID = ( BaseType_t ) 0; xCoreID < ( BaseType_t ) configNUM_CORES; xCoreID++ ) { BaseType_t x; @@ -2692,7 +2684,7 @@ static BaseType_t prvCreateIdleTasks( void ) uint32_t ulIdleTaskStackSize; /* The Idle task is created using user provided RAM - obtain the - * address of the RAM then create the idle task. */ + * address of the RAM then create the idle task. */ vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize ); xIdleTaskHandle[ xCoreID ] = xTaskCreateStatic( prvIdleTask, cIdleName, @@ -2703,19 +2695,19 @@ static BaseType_t prvCreateIdleTasks( void ) pxIdleTaskTCBBuffer ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ } #if ( configNUM_CORES > 1 ) - else - { + else + { static StaticTask_t xIdleTCBBuffers[ configNUM_CORES - 1 ]; static StackType_t xIdleTaskStackBuffers[ configNUM_CORES - 1 ][ configMINIMAL_STACK_SIZE ]; - xIdleTaskHandle[ xCoreID ] = xTaskCreateStatic( prvMinimalIdleTask, - cIdleName, - configMINIMAL_STACK_SIZE, + xIdleTaskHandle[ xCoreID ] = xTaskCreateStatic( prvMinimalIdleTask, + cIdleName, + configMINIMAL_STACK_SIZE, ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */ portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ xIdleTaskStackBuffers[ xCoreID - 1 ], &xIdleTCBBuffers[ xCoreID - 1 ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ - } + } #endif /* if ( configNUM_CORES > 1 ) */ if( xIdleTaskHandle[ xCoreID ] != NULL ) @@ -2731,25 +2723,25 @@ static BaseType_t prvCreateIdleTasks( void ) { if( xCoreID == 0 ) { - /* The Idle task is being created using dynamically allocated RAM. */ - xReturn = xTaskCreate( prvIdleTask, - cIdleName, - configMINIMAL_STACK_SIZE, - ( void * ) NULL, - portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ - &xIdleTaskHandle[ xCoreID ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + /* The Idle task is being created using dynamically allocated RAM. */ + xReturn = xTaskCreate( prvIdleTask, + cIdleName, + configMINIMAL_STACK_SIZE, + ( void * ) NULL, + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + &xIdleTaskHandle[ xCoreID ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ } #if ( configNUM_CORES > 1 ) - else - { - xReturn = xTaskCreate( prvMinimalIdleTask, - cIdleName, - configMINIMAL_STACK_SIZE, - ( void * ) NULL, - portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ - &xIdleTaskHandle[ xCoreID ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ - } + else + { + xReturn = xTaskCreate( prvMinimalIdleTask, + cIdleName, + configMINIMAL_STACK_SIZE, + ( void * ) NULL, + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + &xIdleTaskHandle[ xCoreID ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + } #endif } #endif /* configSUPPORT_STATIC_ALLOCATION */ @@ -4221,50 +4213,50 @@ void vTaskMissedYield( void ) * * The minimal idle task is used for all the additional Cores in a SMP system. * There must be only 1 idle task and the rest are minimal idle tasks. - * + * * @todo additional conditional compiles to remove this function. */ #if ( configNUM_CORES > 1 ) - static portTASK_FUNCTION( prvMinimalIdleTask, pvParameters ) - { +static portTASK_FUNCTION( prvMinimalIdleTask, pvParameters ) +{ taskYIELD(); for( ; ; ) - { - #if ( configUSE_PREEMPTION == 0 ) + { + #if ( configUSE_PREEMPTION == 0 ) + { + /* If we are not using preemption we keep forcing a task switch to + * see if any other task has become available. If we are using + * preemption we don't need to do this as any task becoming available + * will automatically get the processor anyway. */ + taskYIELD(); + } + #endif /* configUSE_PREEMPTION */ + + #if ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) + { + /* When using preemption tasks of equal priority will be + * timesliced. If a task that is sharing the idle priority is ready + * to run then the idle task should yield before the end of the + * timeslice. + * + * A critical region is not required here as we are just reading from + * the list, and an occasional incorrect value will not matter. If + * the ready list at the idle priority contains one more task than the + * number of idle tasks, which is equal to the configured numbers of cores + * then a task other than the idle task is ready to execute. */ + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) configNUM_CORES ) { - /* If we are not using preemption we keep forcing a task switch to - * see if any other task has become available. If we are using - * preemption we don't need to do this as any task becoming available - * will automatically get the processor anyway. */ taskYIELD(); } - #endif /* configUSE_PREEMPTION */ - - #if ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) + else { - /* When using preemption tasks of equal priority will be - * timesliced. If a task that is sharing the idle priority is ready - * to run then the idle task should yield before the end of the - * timeslice. - * - * A critical region is not required here as we are just reading from - * the list, and an occasional incorrect value will not matter. If - * the ready list at the idle priority contains one more task than the - * number of idle tasks, which is equal to the configured numbers of cores - * then a task other than the idle task is ready to execute. */ - if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) configNUM_CORES ) - { - taskYIELD(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + mtCOVERAGE_TEST_MARKER(); } - #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) */ - } + } + #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) */ } +} #endif /* if ( configNUM_CORES > 1 ) */ /*