diff --git a/tasks.c b/tasks.c index e2245bcf5..0a88bfbf4 100644 --- a/tasks.c +++ b/tasks.c @@ -467,6 +467,9 @@ static void prvInitialiseTaskLists( void ) PRIVILEGED_FUNCTION; * */ static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters ) PRIVILEGED_FUNCTION; +#if ( configNUM_CORES > 1 ) +static portTASK_FUNCTION_PROTO( prvMinimalIdleTask, pvParameters ) PRIVILEGED_FUNCTION; +#endif /* * Utility to free all memory allocated by the scheduler to hold a TCB, @@ -1545,7 +1548,11 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, pxNewTCB->xTaskRunState = taskTASK_NOT_RUNNING; /* Is this an idle task? */ + #if(configNUM_CORES > 1) + pxNewTCB->xIsIdle = ( pxTaskCode == prvIdleTask ) || (pxTaskCode == prvMinimalIdleTask); + #else pxNewTCB->xIsIdle = ( pxTaskCode == prvIdleTask ); + #endif if( pxCreatedTask != NULL ) { @@ -2663,22 +2670,40 @@ void vTaskStartScheduler( void ) #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) { - #error User must specify an array of buffers for idle task TCBs and stacks - StaticTask_t * pxIdleTaskTCBBuffer = NULL; - StackType_t * pxIdleTaskStackBuffer = NULL; - uint32_t ulIdleTaskStackSize; - - /* The Idle task is created using user provided RAM - obtain the - * address of the RAM then create the idle task. */ - vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize ); - xIdleTaskHandle[ xCoreID ] = xTaskCreateStatic( prvIdleTask, - cIdleName, - ulIdleTaskStackSize, - ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */ - portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ - pxIdleTaskStackBuffer, - pxIdleTaskTCBBuffer ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + if(xCoreID == 0) + { + StaticTask_t * pxIdleTaskTCBBuffer = NULL; + StackType_t * pxIdleTaskStackBuffer = NULL; + uint32_t ulIdleTaskStackSize; + /* The Idle task is created using user provided RAM - obtain the + * address of the RAM then create the idle task. */ + vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize ); + xIdleTaskHandle[ xCoreID ] = xTaskCreateStatic( prvIdleTask, + cIdleName, + ulIdleTaskStackSize, + ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */ + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + pxIdleTaskStackBuffer, + pxIdleTaskTCBBuffer ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + } + #if( configNUM_CORES > 1) + else + { + struct taskMemory{ + StaticTask_t TCB; + StackType_t stack[configMINIMAL_STACK_SIZE]; + }; + static struct taskMemory idleMemory[configNUM_CORES]; + xIdleTaskHandle[ xCoreID ] = xTaskCreateStatic( prvMinimalIdleTask, + cIdleName, + configMINIMAL_STACK_SIZE, + ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */ + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + idleMemory[xCoreID].stack, + &idleMemory[xCoreID].TCB ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + } + #endif if( xIdleTaskHandle[ xCoreID ] != NULL ) { xReturn = pdPASS; @@ -4146,6 +4171,61 @@ void vTaskMissedYield( void ) #endif /* configUSE_TRACE_FACILITY */ +/* + * ----------------------------------------------------------- + * The MinimalIdle task. + * ---------------------------------------------------------- + * + * The portTASK_FUNCTION() macro is used to allow port/compiler specific + * language extensions. The equivalent prototype for this function is: + * + * void prvMinimalIdleTask( void *pvParameters ); + * + * The minimal idle task is used for all the additional Cores in a SMP system. + * There must be only 1 idle task and the rest are minimal idle tasks. + * + * @todo additional conditional compiles to remove this function. + */ +#if (configNUM_CORES > 1) +static portTASK_FUNCTION( prvMinimalIdleTask, pvParameters ) +{ + for(;;) + { + #if ( configUSE_PREEMPTION == 0 ) + { + /* If we are not using preemption we keep forcing a task switch to + * see if any other task has become available. If we are using + * preemption we don't need to do this as any task becoming available + * will automatically get the processor anyway. */ + taskYIELD(); + } + #endif /* configUSE_PREEMPTION */ + + #if ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) + { + /* When using preemption tasks of equal priority will be + * timesliced. If a task that is sharing the idle priority is ready + * to run then the idle task should yield before the end of the + * timeslice. + * + * A critical region is not required here as we are just reading from + * the list, and an occasional incorrect value will not matter. If + * the ready list at the idle priority contains one more task than the + * number of idle tasks, which is equal to the configured numbers of cores + * then a task other than the idle task is ready to execute. */ + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) configNUM_CORES ) + { + taskYIELD(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) */ + } +} +#endif /* * ----------------------------------------------------------- * The Idle task.