diff --git a/event_groups.c b/event_groups.c index de44dc2e1..644ec2945 100644 --- a/event_groups.c +++ b/event_groups.c @@ -256,7 +256,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup, { if( xAlreadyYielded == pdFALSE ) { - portYIELD_WITHIN_API(); + vTaskYieldWithinAPI(); } else { @@ -408,7 +408,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup, { if( xAlreadyYielded == pdFALSE ) { - portYIELD_WITHIN_API(); + vTaskYieldWithinAPI(); } else { diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index 9121888ad..341ebb534 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -236,6 +236,14 @@ #define configUSE_COUNTING_SEMAPHORES 0 #endif +#ifndef configUSE_TASK_PREEMPTION_DISABLE + #define configUSE_TASK_PREEMPTION_DISABLE 0 +#endif + +#ifndef configUSE_CORE_EXCLUSION + #define configUSE_CORE_EXCLUSION 0 +#endif + #ifndef configUSE_ALTERNATIVE_API #define configUSE_ALTERNATIVE_API 0 #endif @@ -283,6 +291,15 @@ #define portSOFTWARE_BARRIER() #endif +#ifndef configNUM_CORES + #define configNUM_CORES 1 +#endif + +#ifndef configRUN_MULTIPLE_PRIORITIES + #define configRUN_MULTIPLE_PRIORITIES 0 +#endif + + /* The timers module relies on xTaskGetSchedulerState(). */ #if configUSE_TIMERS == 1 @@ -782,10 +799,6 @@ #define portPRIVILEGE_BIT ( ( UBaseType_t ) 0x00 ) #endif -#ifndef portYIELD_WITHIN_API - #define portYIELD_WITHIN_API portYIELD -#endif - #ifndef portSUPPRESS_TICKS_AND_SLEEP #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) #endif @@ -930,6 +943,18 @@ #error configUSE_MUTEXES must be set to 1 to use recursive mutexes #endif +#if( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configUSE_CORE_EXCLUSION != 0 ) ) + #error configRUN_MULTIPLE_PRIORITIES must be set to 1 to use core exclusion +#endif + +#if( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configUSE_TASK_PREEMPTION_DISABLE != 0 ) ) + #error configRUN_MULTIPLE_PRIORITIES must be set to 1 to use task preemption disable +#endif + +#if( ( configUSE_PREEMPTION == 0 ) && ( configUSE_TASK_PREEMPTION_DISABLE != 0 ) ) + #error configUSE_PREEMPTION must be set to 1 to use task preemption disable +#endif + #ifndef configINITIAL_TICK_COUNT #define configINITIAL_TICK_COUNT 0 #endif @@ -1174,7 +1199,14 @@ typedef struct xSTATIC_TCB StaticListItem_t xDummy3[ 2 ]; UBaseType_t uxDummy5; void * pxDummy6; + BaseType_t xDummy23[ 2 ]; uint8_t ucDummy7[ configMAX_TASK_NAME_LEN ]; + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + BaseType_t xDummy24; + #endif + #if ( configUSE_CORE_EXCLUSION == 1 ) + UBaseType_t uxDummy25; + #endif #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) void * pxDummy8; #endif diff --git a/include/task.h b/include/task.h index 66c99c56f..1959757f3 100644 --- a/include/task.h +++ b/include/task.h @@ -214,6 +214,9 @@ typedef enum * task. h * * Macro to disable all maskable interrupts. + * This also returns what the interrupt state was + * upon being called. This state may subsequently + * be passed to taskRESTORE_INTERRUPTS(). * * \defgroup taskDISABLE_INTERRUPTS taskDISABLE_INTERRUPTS * \ingroup SchedulerControl @@ -230,6 +233,28 @@ typedef enum */ #define taskENABLE_INTERRUPTS() portENABLE_INTERRUPTS() +/** + * task. h + * + * Macro to restore microcontroller interrupts to + * a previous state. + * + * \defgroup taskRESTORE_INTERRUPTS taskRESTORE_INTERRUPTS + * \ingroup SchedulerControl + */ +#define taskRESTORE_INTERRUPTS(ulState) portRESTORE_INTERRUPTS(ulState) + +/** + * task. h + * + * Macro that determines if it is being called from within an ISR + * or a task. Returns non-zero if it is in an ISR. + * + * \defgroup taskCHECK_IF_IN_ISR taskCHECK_IF_IN_ISR + * \ingroup SchedulerControl + */ +#define taskCHECK_IF_IN_ISR() portCHECK_IF_IN_ISR() + /* Definitions returned by xTaskGetSchedulerState(). taskSCHEDULER_SUSPENDED is * 0 to generate more optimal code when configASSERT() is defined as the constant * is used in assert() statements. */ @@ -237,6 +262,8 @@ typedef enum #define taskSCHEDULER_NOT_STARTED ( ( BaseType_t ) 1 ) #define taskSCHEDULER_RUNNING ( ( BaseType_t ) 2 ) +/* Check if core value is valid */ +#define taskVALID_CORE_ID( xCoreID ) ( ( BaseType_t ) ( ( 0 <= xCoreID ) && ( xCoreID < configNUM_CORES ) ) ) /*----------------------------------------------------------- * TASK CREATION API @@ -1208,6 +1235,12 @@ void vTaskResume( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION; */ BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION; +void vTaskCoreExclusionSet( const TaskHandle_t xTask, UBaseType_t uxCoreExclude ); +UBaseType_t vTaskCoreExclusionGet( const TaskHandle_t xTask ); + +void vTaskPreemptionDisable( const TaskHandle_t xTask ); +void vTaskPreemptionEnable( const TaskHandle_t xTask ); + /*----------------------------------------------------------- * SCHEDULER CONTROL *----------------------------------------------------------*/ @@ -1666,10 +1699,10 @@ BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask, * xTaskGetIdleTaskHandle() is only available if * INCLUDE_xTaskGetIdleTaskHandle is set to 1 in FreeRTOSConfig.h. * - * Simply returns the handle of the idle task. It is not valid to call - * xTaskGetIdleTaskHandle() before the scheduler has been started. + * Simply returns a pointer to the array of idle task handles. + * It is not valid to call xTaskGetIdleTaskHandle() before the scheduler has been started. */ -TaskHandle_t xTaskGetIdleTaskHandle( void ) PRIVILEGED_FUNCTION; +TaskHandle_t *xTaskGetIdleTaskHandle( void ) PRIVILEGED_FUNCTION; /** * configUSE_TRACE_FACILITY must be defined as 1 in FreeRTOSConfig.h for @@ -2946,7 +2979,7 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, * Sets the pointer to the current TCB to the TCB of the highest priority task * that is ready to run. */ -portDONT_DISCARD void vTaskSwitchContext( void ) PRIVILEGED_FUNCTION; +portDONT_DISCARD void vTaskSwitchContext( BaseType_t xCoreID ) PRIVILEGED_FUNCTION; /* * THESE FUNCTIONS MUST NOT BE USED FROM APPLICATION CODE. THEY ARE USED BY @@ -2959,6 +2992,11 @@ TickType_t uxTaskResetEventItemValue( void ) PRIVILEGED_FUNCTION; */ TaskHandle_t xTaskGetCurrentTaskHandle( void ) PRIVILEGED_FUNCTION; +/* + * Return the handle of the task running on specified core. + */ +TaskHandle_t xTaskGetCurrentTaskHandleCPU( UBaseType_t xCoreID ) PRIVILEGED_FUNCTION; + /* * Shortcut used by the queue implementation to prevent unnecessary call to * taskYIELD(); @@ -3044,6 +3082,11 @@ TaskHandle_t pvTaskIncrementMutexHeldCount( void ) PRIVILEGED_FUNCTION; */ void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNCTION; +/* + * For internal use only. Same as portYIELD_WITHIN_API() in single core FreeRTOS. + * For SMP this is not defined by the port. + */ +void vTaskYieldWithinAPI( void ); /* *INDENT-OFF* */ #ifdef __cplusplus diff --git a/portable/XCC/XCORE200/port.c b/portable/XCC/XCORE200/port.c index 965c24e47..b7ec504fb 100644 --- a/portable/XCC/XCORE200/port.c +++ b/portable/XCC/XCORE200/port.c @@ -9,14 +9,29 @@ static hwtimer_t xKernelTimer; -uint32_t ulPortYieldRequired = pdFALSE; +uint32_t ulPortYieldRequired[ portMAX_CORE_COUNT ] = { pdFALSE }; /*-----------------------------------------------------------*/ +void vIntercoreInterruptISR( void ) +{ + int xCoreID; + +// debug_printf( "In KCALL: %u\n", ulData ); + xCoreID = rtos_core_id_get(); + ulPortYieldRequired[ xCoreID ] = pdTRUE; +} +/*-----------------------------------------------------------*/ + DEFINE_RTOS_INTERRUPT_CALLBACK( pxKernelTimerISR, pvData ) { uint32_t ulLastTrigger; uint32_t ulNow; + int xCoreID; + + xCoreID = 0; + + configASSERT( xCoreID == rtos_core_id_get() ); /* Need the next interrupt to be scheduled relative to * the current trigger time, rather than the current @@ -40,14 +55,36 @@ DEFINE_RTOS_INTERRUPT_CALLBACK( pxKernelTimerISR, pvData ) if( xTaskIncrementTick() != pdFALSE ) { - ulPortYieldRequired = pdTRUE; + ulPortYieldRequired[ xCoreID ] = pdTRUE; } } /*-----------------------------------------------------------*/ -static void prvCoreInit( void ) +void vPortYieldOtherCore( int xOtherCoreID ) { - rtos_core_register(); + int xCoreID; + + /* + * This function must be called from within a critical section. + */ + + xCoreID = rtos_core_id_get(); + +// debug_printf("%d->%d\n", xCoreID, xOtherCoreID); + +// debug_printf("Yield core %d from %d\n", xOtherCoreID, xCoreID ); + + rtos_irq( xOtherCoreID, xCoreID ); +} +/*-----------------------------------------------------------*/ + +static int prvCoreInit( void ) +{ + int xCoreID; + + xCoreID = rtos_core_register(); + debug_printf( "Logical Core %d initializing as FreeRTOS Core %d\n", get_logical_core_id(), xCoreID ); + asm volatile ( "ldap r11, kexcept\n\t" "set kep, r11\n\t" @@ -56,35 +93,49 @@ static void prvCoreInit( void ) : "r11" ); - rtos_irq_enable( 1 ); + rtos_irq_enable( configNUM_CORES ); - uint32_t ulNow; - ulNow = hwtimer_get_time( xKernelTimer ); -// debug_printf( "The time is now (%u)\n", ulNow ); + /* + * All threads wait here until all have enabled IRQs + */ + while( rtos_irq_ready() == pdFALSE ); - ulNow += configCPU_CLOCK_HZ / configTICK_RATE_HZ; + if( xCoreID == 0 ) + { + uint32_t ulNow; + ulNow = hwtimer_get_time( xKernelTimer ); +// debug_printf( "The time is now (%u)\n", ulNow ); - triggerable_setup_interrupt_callback( xKernelTimer, NULL, RTOS_INTERRUPT_CALLBACK( pxKernelTimerISR ) ); - hwtimer_set_trigger_time( xKernelTimer, ulNow ); - triggerable_enable_trigger( xKernelTimer ); + ulNow += configCPU_CLOCK_HZ / configTICK_RATE_HZ; + + triggerable_setup_interrupt_callback( xKernelTimer, NULL, RTOS_INTERRUPT_CALLBACK( pxKernelTimerISR ) ); + hwtimer_set_trigger_time( xKernelTimer, ulNow ); + triggerable_enable_trigger( xKernelTimer ); + } + + return xCoreID; } /*-----------------------------------------------------------*/ DEFINE_RTOS_KERNEL_ENTRY( void, vPortStartSchedulerOnCore, void ) { - prvCoreInit(); + int xCoreID; - debug_printf( "FreeRTOS initialized\n" ); + xCoreID = prvCoreInit(); + + debug_printf( "FreeRTOS Core %d initialized\n", xCoreID ); /* * Restore the context of the first thread * to run and jump into it. */ asm volatile ( + "mov r6, %0\n\t" /* R6 must be the FreeRTOS core ID*/ + "ldaw r5, dp[pxCurrentTCBs]\n\t" /* R5 must be the TCB list which is indexed by R6 */ "bu _freertos_restore_ctx\n\t" : /* no outputs */ - : /* no inputs */ - : /* nothing is clobbered */ + : "r"(xCoreID) + : "r5", "r6" ); } /*-----------------------------------------------------------*/ @@ -159,14 +210,22 @@ StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t px } /*-----------------------------------------------------------*/ +void vPortStartSMPScheduler( void ); + /* * See header file for description. */ BaseType_t xPortStartScheduler( void ) { + if( ( configNUM_CORES > portMAX_CORE_COUNT ) || ( configNUM_CORES <= 0 ) ) + { + return pdFAIL; + } + rtos_locks_initialize(); xKernelTimer = hwtimer_alloc(); - RTOS_KERNEL_ENTRY(vPortStartSchedulerOnCore)(); + + vPortStartSMPScheduler(); return pdPASS; } diff --git a/portable/XCC/XCORE200/port.xc b/portable/XCC/XCORE200/port.xc new file mode 100644 index 000000000..41f3ae32b --- /dev/null +++ b/portable/XCC/XCORE200/port.xc @@ -0,0 +1,26 @@ +/* + * port.xc + * + * Created on: Jul 31, 2019 + * Author: mbruno + */ + +//#include "rtos_support.h" + +extern "C" { + +#include "FreeRTOSConfig.h" /* to get configNUM_CORES */ +#ifndef configNUM_CORES +#define configNUM_CORES 1 +#endif + +void __xcore_interrupt_permitted_ugs_vPortStartSchedulerOnCore(void); + +} /* extern "C" */ + +void vPortStartSMPScheduler( void ) +{ + par (int i = 0; i < configNUM_CORES; i++) { + __xcore_interrupt_permitted_ugs_vPortStartSchedulerOnCore(); + } +} diff --git a/portable/XCC/XCORE200/portasm.S b/portable/XCC/XCORE200/portasm.S index dcdf47f9b..5fcf10de1 100644 --- a/portable/XCC/XCORE200/portasm.S +++ b/portable/XCC/XCORE200/portasm.S @@ -21,10 +21,13 @@ kexcept: bau r11 //_TrapHandler is at 0x00040080. TODO: Is it always? Why can't I access the symbol _TrapHandler? _yield: - set sp, r4 /* Restore the task's SP to save the rest of its context. */ - bu _yield_continue /* Skip the ulPortYieldRequired check and jump right to */ - /* the context save and switch. Also skips saving SPC */ - /* since the kcall handler has already saved it. */ + {set sp, r4 /* Restore the task's SP to save the rest of its context. */ + get r11, id} /* Get the logical core ID into r11. */ + ldaw r0, dp[rtos_core_map] + ldw r0, r0[r11] /* Translate to the RTOS core ID into r0 */ + bu _yield_continue /* Skip the ulPortYieldRequired check and jump right to */ + /* the context save and switch. Also skips saving SPC */ + /* since the kcall handler has already saved it. */ .align 64 kcall: @@ -68,7 +71,7 @@ rtos_interrupt_callback_common: {stw r4, sp[12] /*stw r11, sp[19] already saved by the wrapper function. */ - ldaw r4, sp[0]} /* Get value of current stackpointer into r4 */ + ldaw r4, sp[0]} /* Get value of current stackpointer into r4. */ {kentsp 0 /* switch to the kernel stack. */ /* The value 0 is safe to use since we don't need the SP */ @@ -78,12 +81,16 @@ rtos_interrupt_callback_common: {mov r0, r11 /* into the first argument for the callback function... */ bla r1} /* and call the callback function. */ - set sp, r4 /* Restore the task's SP now. */ - - ldw r0, dp[ulPortYieldRequired] /* Is a yield required? */ - {bf r0, _freertos_restore_ctx_partial /* If not, restore the context now. */ - ldc r0, 0} - stw r0, dp[ulPortYieldRequired] /* Otherwise, clear the yield required flag. */ + {set sp, r4 /* Restore the task's SP now. */ + + get r11, id} /* Get the logical core ID into r11. */ + ldaw r0, dp[rtos_core_map] + ldw r0, r0[r11] /* Translate to the RTOS core ID into r0. */ + ldaw r2, dp[ulPortYieldRequired] /* Get the yield required array into r2. */ + ldw r1, r2[r0] /* Is a yield required for this core? */ + {bf r1, _freertos_restore_ctx_partial /* If not, restore the context now. */ + ldc r1, 0} + stw r1, r2[r0] /* Otherwise, clear the yield required flag. */ /* Save the rest of the current task's context. */ stw spc, sp[1] @@ -100,14 +107,17 @@ _yield_continue: stw r9, sp[17] stw r10, sp[18] - ldw r0, dp[pxCurrentTCB] /* Save the current task's SP to the first */ - stw r4, r0[0x0] /* word (top of stack) in the current TCB */ + ldaw r5, dp[pxCurrentTCBs] /* Get the current TCB array into r5. */ + ldw r1, r5[r0] /* Get this core's current TCB pointer into r1. */ + stw r4, r1[0x0] /* Save the current task's SP to the first */ + /* word (top of stack) in the current TCB. */ - kentsp 0 /* switch back to the kernel stack. */ + {kentsp 0 /* switch back to the kernel stack. */ + mov r6, r0} /* copy the RTOS core ID into r6 so we don't lose it. */ ldap r11, vTaskSwitchContext - bla r11 /* Finally call vTaskSwitchContext() now that the task's */ - /* entire context is saved. */ + bla r11 /* Finally call vTaskSwitchContext(core_id) now that the task's */ + /* entire context is saved. Note the core id in r0 is the argument. */ //krestsp 0 /* unnecessary since KSP is already set and the SP */ /* is being restored next from the current TCB. */ @@ -115,7 +125,7 @@ _yield_continue: .globl _freertos_restore_ctx _freertos_restore_ctx: - ldw r0, dp[pxCurrentTCB] + ldw r0, r5[r6] /* get this core's current TCB pointer into r0 */ ldw r0, r0[0x0] /* Get the top of the stack from the current TCB... */ set sp, r0; /* into the stack pointer register. */ diff --git a/portable/XCC/XCORE200/portmacro.h b/portable/XCC/XCORE200/portmacro.h index b22aff1df..ae28990d7 100644 --- a/portable/XCC/XCORE200/portmacro.h +++ b/portable/XCC/XCORE200/portmacro.h @@ -46,9 +46,9 @@ typedef uint32_t UBaseType_t; #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 #define portCRITICAL_NESTING_IN_TCB 1 -#ifdef configNUM_CORES -#warning configNUM_CORES should not be defined when using the single core XCORE port -#undef configNUM_CORES +#define portMAX_CORE_COUNT 8 +#ifndef configNUM_CORES +#define configNUM_CORES 1 #endif /* This may be set to zero in the config file if the rtos_time @@ -67,6 +67,12 @@ functions are not needed or if it is incremented elsewhere. */ #define portTHREAD_CONTEXT_STACK_GROWTH RTOS_SUPPORT_INTERRUPT_STACK_GROWTH #ifndef __ASSEMBLER__ + +/* Check validity of number of cores specified in config */ +#if ( configNUM_CORES < 1 || portMAX_CORE_COUNT < configNUM_CORES ) +#error "Invalid number of cores specified in config!" +#endif + #define portMEMORY_BARRIER() RTOS_MEMORY_BARRIER() #define portTASK_STACK_DEPTH(pxTaskCode) RTOS_THREAD_STACK_SIZE(pxTaskCode) /*-----------------------------------------------------------*/ @@ -79,17 +85,24 @@ do \ { \ if( xSwitchRequired != pdFALSE ) \ { \ - extern uint32_t ulPortYieldRequired; \ - ulPortYieldRequired = pdTRUE; \ + extern uint32_t ulPortYieldRequired[ portMAX_CORE_COUNT ]; \ + ulPortYieldRequired[ portGET_CORE_ID() ] = pdTRUE; \ } \ } while( 0 ) #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ +/* SMP utilities. */ +#define portGET_CORE_ID() rtos_core_id_get() + +void vPortYieldOtherCore( int xOtherCoreID ); +#define portYIELD_CORE( x ) vPortYieldOtherCore( x ) +/*-----------------------------------------------------------*/ + /* Architecture specific optimisations. */ #ifndef configUSE_PORT_OPTIMISED_TASK_SELECTION -#define configUSE_PORT_OPTIMISED_TASK_SELECTION 1 +#define configUSE_PORT_OPTIMISED_TASK_SELECTION 0 #endif #if configUSE_PORT_OPTIMISED_TASK_SELECTION == 1 @@ -108,10 +121,16 @@ do \ /* Critical section management. */ #define portGET_INTERRUPT_STATE() rtos_interrupt_mask_get() + +/* + * This differs from the standard portDISABLE_INTERRUPTS() + * in that it also returns what the interrupt state was + * before it disabling interrupts. + */ #define portDISABLE_INTERRUPTS() rtos_interrupt_mask_all() + #define portENABLE_INTERRUPTS() rtos_interrupt_unmask_all() -#define portSET_INTERRUPT_MASK_FROM_ISR() 0 -#define portCLEAR_INTERRUPT_MASK_FROM_ISR(x) + /* * Will enable interrupts if ulState is non-zero. */ @@ -122,12 +141,43 @@ do \ * ISR or otherwise in kernel mode. */ #define portCHECK_IF_IN_ISR() rtos_isr_running() + #define portASSERT_IF_IN_ISR() configASSERT( portCHECK_IF_IN_ISR() == 0 ) +#define portGET_ISR_LOCK() rtos_lock_acquire(0) +#define portRELEASE_ISR_LOCK() rtos_lock_release(0) +#define portGET_TASK_LOCK() rtos_lock_acquire(1) +#define portRELEASE_TASK_LOCK() rtos_lock_release(1) + void vTaskEnterCritical(void); void vTaskExitCritical(void); -#define portENTER_CRITICAL() vTaskEnterCritical() -#define portEXIT_CRITICAL() vTaskExitCritical() +#define portENTER_CRITICAL() vTaskEnterCritical() +#define portEXIT_CRITICAL() vTaskExitCritical() + +/* + * vTaskEnterCritical() has been modified to be safe to use + * from within ISRs. The previous mask does not need to be + * returned since in the xCORE interrupts are always disabled + * in ISRs. Effectively this call just grabs the kernel lock + * when called from an ISR. + */ +static inline uint32_t portSET_INTERRUPT_MASK_FROM_ISR( void ) +{ + vTaskEnterCritical(); + return 0; +} +#define portSET_INTERRUPT_MASK_FROM_ISR() portSET_INTERRUPT_MASK_FROM_ISR() + +/* + * vTaskExitCritical() has been modified to be safe to use + * from within ISRs. When the nesting level has reached zero + * it releases the lock, but when called from within an ISR + * it will *not* re-enable interrupts since it is assumed they + * were previously disabled. Thus the previous state in x is + * unused. + */ +#define portCLEAR_INTERRUPT_MASK_FROM_ISR(x) vTaskExitCritical() + /*-----------------------------------------------------------*/ /* Runtime stats support */ diff --git a/portable/XCC/XCORE200/rtos_support_rtos_config.h b/portable/XCC/XCORE200/rtos_support_rtos_config.h index 9ae7012ba..d4862ef38 100644 --- a/portable/XCC/XCORE200/rtos_support_rtos_config.h +++ b/portable/XCC/XCORE200/rtos_support_rtos_config.h @@ -35,7 +35,10 @@ * The RTOS provided handler that should run when a * core receives an intercore interrupt request. */ -#define RTOS_INTERCORE_INTERRUPT_ISR() +#define RTOS_INTERCORE_INTERRUPT_ISR() do { \ + void vIntercoreInterruptISR( void ); \ + vIntercoreInterruptISR(); \ +} while ( 0 ) /** * The number of hardware locks that the RTOS @@ -45,7 +48,7 @@ * Note that the IRQ routines require a lock and * will share the first one with the RTOS. */ -#define RTOS_LOCK_COUNT 0 +#define RTOS_LOCK_COUNT 2 /** * Remaps all calls to debug_printf() to rtos_printf(). @@ -64,6 +67,15 @@ #endif #define DEBUG_PRINT_ENABLE 1 + #ifndef configTASKS_DEBUG + #define configTASKS_DEBUG 0 + #endif + #if configTASKS_DEBUG == 1 + #define DEBUG_PRINT_ENABLE_FREERTOS_TASKS 1 + #else + #define DEBUG_PRINT_DISABLE_FREERTOS_TASKS 1 + #endif + #else /* configENABLE_DEBUG_PRINTF */ /* ensure that debug_printf is disabled */ diff --git a/portable/XCC/XCOREAI/port.c b/portable/XCC/XCOREAI/port.c index 68c3a9fc1..8f812e135 100644 --- a/portable/XCC/XCOREAI/port.c +++ b/portable/XCC/XCOREAI/port.c @@ -10,14 +10,29 @@ static hwtimer_t xKernelTimer; -uint32_t ulPortYieldRequired = pdFALSE; +uint32_t ulPortYieldRequired[ portMAX_CORE_COUNT ] = { pdFALSE }; /*-----------------------------------------------------------*/ +void vIntercoreInterruptISR( void ) +{ + int xCoreID; + +// debug_printf( "In KCALL: %u\n", ulData ); + xCoreID = rtos_core_id_get(); + ulPortYieldRequired[ xCoreID ] = pdTRUE; +} +/*-----------------------------------------------------------*/ + DEFINE_RTOS_INTERRUPT_CALLBACK( pxKernelTimerISR, pvData ) { uint32_t ulLastTrigger; uint32_t ulNow; + int xCoreID; + + xCoreID = 0; + + configASSERT( xCoreID == rtos_core_id_get() ); /* Need the next interrupt to be scheduled relative to * the current trigger time, rather than the current @@ -41,14 +56,36 @@ DEFINE_RTOS_INTERRUPT_CALLBACK( pxKernelTimerISR, pvData ) if( xTaskIncrementTick() != pdFALSE ) { - ulPortYieldRequired = pdTRUE; + ulPortYieldRequired[ xCoreID ] = pdTRUE; } } /*-----------------------------------------------------------*/ -static void prvCoreInit( void ) +void vPortYieldOtherCore( int xOtherCoreID ) { - rtos_core_register(); + int xCoreID; + + /* + * This function must be called from within a critical section. + */ + + xCoreID = rtos_core_id_get(); + +// debug_printf("%d->%d\n", xCoreID, xOtherCoreID); + +// debug_printf("Yield core %d from %d\n", xOtherCoreID, xCoreID ); + + rtos_irq( xOtherCoreID, xCoreID ); +} +/*-----------------------------------------------------------*/ + +static int prvCoreInit( void ) +{ + int xCoreID; + + xCoreID = rtos_core_register(); + debug_printf( "Logical Core %d initializing as FreeRTOS Core %d\n", get_logical_core_id(), xCoreID ); + asm volatile ( "ldap r11, kexcept\n\t" "set kep, r11\n\t" @@ -57,35 +94,57 @@ static void prvCoreInit( void ) : "r11" ); - rtos_irq_enable( 1 ); + rtos_irq_enable( configNUM_CORES ); - uint32_t ulNow; - ulNow = hwtimer_get_time( xKernelTimer ); -// debug_printf( "The time is now (%u)\n", ulNow ); + /* + * All threads wait here until all have enabled IRQs + */ + while( rtos_irq_ready() == pdFALSE ); - ulNow += configCPU_CLOCK_HZ / configTICK_RATE_HZ; + if( xCoreID == 0 ) + { + uint32_t ulNow; + ulNow = hwtimer_get_time( xKernelTimer ); +// debug_printf( "The time is now (%u)\n", ulNow ); - triggerable_setup_interrupt_callback( xKernelTimer, NULL, RTOS_INTERRUPT_CALLBACK( pxKernelTimerISR ) ); - hwtimer_set_trigger_time( xKernelTimer, ulNow ); - triggerable_enable_trigger( xKernelTimer ); + ulNow += configCPU_CLOCK_HZ / configTICK_RATE_HZ; + + triggerable_setup_interrupt_callback( xKernelTimer, NULL, RTOS_INTERRUPT_CALLBACK( pxKernelTimerISR ) ); + hwtimer_set_trigger_time( xKernelTimer, ulNow ); + triggerable_enable_trigger( xKernelTimer ); + } + + return xCoreID; } /*-----------------------------------------------------------*/ DEFINE_RTOS_KERNEL_ENTRY( void, vPortStartSchedulerOnCore, void ) { - prvCoreInit(); + int xCoreID; - debug_printf( "FreeRTOS initialized\n" ); + xCoreID = prvCoreInit(); + + #if( configUSE_CORE_INIT_HOOK == 1 ) + { + extern void vApplicationCoreInitHook( BaseType_t xCoreID ); + + vApplicationCoreInitHook( xCoreID ); + } + #endif + + debug_printf( "FreeRTOS Core %d initialized\n", xCoreID ); /* * Restore the context of the first thread * to run and jump into it. */ asm volatile ( + "mov r6, %0\n\t" /* R6 must be the FreeRTOS core ID*/ + "ldaw r5, dp[pxCurrentTCBs]\n\t" /* R5 must be the TCB list which is indexed by R6 */ "bu _freertos_restore_ctx\n\t" : /* no outputs */ - : /* no inputs */ - : /* nothing is clobbered */ + : "r"(xCoreID) + : "r5", "r6" ); } /*-----------------------------------------------------------*/ @@ -164,14 +223,22 @@ StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t px } /*-----------------------------------------------------------*/ +void vPortStartSMPScheduler( void ); + /* * See header file for description. */ BaseType_t xPortStartScheduler( void ) { + if( ( configNUM_CORES > portMAX_CORE_COUNT ) || ( configNUM_CORES <= 0 ) ) + { + return pdFAIL; + } + rtos_locks_initialize(); xKernelTimer = hwtimer_alloc(); - RTOS_KERNEL_ENTRY(vPortStartSchedulerOnCore)(); + + vPortStartSMPScheduler(); return pdPASS; } diff --git a/portable/XCC/XCOREAI/port.xc b/portable/XCC/XCOREAI/port.xc new file mode 100644 index 000000000..41f3ae32b --- /dev/null +++ b/portable/XCC/XCOREAI/port.xc @@ -0,0 +1,26 @@ +/* + * port.xc + * + * Created on: Jul 31, 2019 + * Author: mbruno + */ + +//#include "rtos_support.h" + +extern "C" { + +#include "FreeRTOSConfig.h" /* to get configNUM_CORES */ +#ifndef configNUM_CORES +#define configNUM_CORES 1 +#endif + +void __xcore_interrupt_permitted_ugs_vPortStartSchedulerOnCore(void); + +} /* extern "C" */ + +void vPortStartSMPScheduler( void ) +{ + par (int i = 0; i < configNUM_CORES; i++) { + __xcore_interrupt_permitted_ugs_vPortStartSchedulerOnCore(); + } +} diff --git a/portable/XCC/XCOREAI/portasm.S b/portable/XCC/XCOREAI/portasm.S index b2fe54aa9..b785ba643 100644 --- a/portable/XCC/XCOREAI/portasm.S +++ b/portable/XCC/XCOREAI/portasm.S @@ -21,10 +21,13 @@ kexcept: bau r11 //_TrapHandler is at 0x00080080. TODO: Is it always? Why can't I access the symbol _TrapHandler? _yield: - set sp, r4 /* Restore the task's SP to save the rest of its context. */ - bu _yield_continue /* Skip the ulPortYieldRequired check and jump right to */ - /* the context save and switch. Also skips saving SPC */ - /* since the kcall handler has already saved it. */ + {set sp, r4 /* Restore the task's SP to save the rest of its context. */ + get r11, id} /* Get the logical core ID into r11. */ + ldaw r0, dp[rtos_core_map] + ldw r0, r0[r11] /* Translate to the RTOS core ID into r0 */ + bu _yield_continue /* Skip the ulPortYieldRequired check and jump right to */ + /* the context save and switch. Also skips saving SPC */ + /* since the kcall handler has already saved it. */ .align 64 kcall: @@ -68,7 +71,7 @@ rtos_interrupt_callback_common: {stw r4, sp[12] /*stw r11, sp[19] already saved by the wrapper function. */ - ldaw r4, sp[0]} /* Get value of current stackpointer into r4 */ + ldaw r4, sp[0]} /* Get value of current stackpointer into r4. */ {kentsp 0 /* switch to the kernel stack. */ /* The value 0 is safe to use since we don't need the SP */ @@ -78,12 +81,16 @@ rtos_interrupt_callback_common: {mov r0, r11 /* into the first argument for the callback function... */ bla r1} /* and call the callback function. */ - set sp, r4 /* Restore the task's SP now. */ - - ldw r0, dp[ulPortYieldRequired] /* Is a yield required? */ - {bf r0, _freertos_restore_ctx_partial /* If not, restore the context now. */ - ldc r0, 0} - stw r0, dp[ulPortYieldRequired] /* Otherwise, clear the yield required flag. */ + {set sp, r4 /* Restore the task's SP now. */ + + get r11, id} /* Get the logical core ID into r11. */ + ldaw r0, dp[rtos_core_map] + ldw r0, r0[r11] /* Translate to the RTOS core ID into r0. */ + ldaw r2, dp[ulPortYieldRequired] /* Get the yield required array into r2. */ + ldw r1, r2[r0] /* Is a yield required for this core? */ + {bf r1, _freertos_restore_ctx_partial /* If not, restore the context now. */ + ldc r1, 0} + stw r1, r2[r0] /* Otherwise, clear the yield required flag. */ /* Save the rest of the current task's context. */ @@ -113,14 +120,17 @@ _yield_continue: ldaw r11, sp[37]} vstc r11[0] #endif - ldw r0, dp[pxCurrentTCB] /* Save the current task's SP to the first */ - stw r4, r0[0x0] /* word (top of stack) in the current TCB */ + ldaw r5, dp[pxCurrentTCBs] /* Get the current TCB array into r5. */ + ldw r1, r5[r0] /* Get this core's current TCB pointer into r1. */ + stw r4, r1[0x0] /* Save the current task's SP to the first */ + /* word (top of stack) in the current TCB. */ - kentsp 0 /* switch back to the kernel stack. */ + {kentsp 0 /* switch back to the kernel stack. */ + mov r6, r0} /* copy the RTOS core ID into r6 so we don't lose it. */ ldap r11, vTaskSwitchContext - bla r11 /* Finally call vTaskSwitchContext() now that the task's */ - /* entire context is saved. */ + bla r11 /* Finally call vTaskSwitchContext(core_id) now that the task's */ + /* entire context is saved. Note the core id in r0 is the argument. */ //krestsp 0 /* unnecessary since KSP is already set and the SP */ /* is being restored next from the current TCB. */ @@ -128,7 +138,7 @@ _yield_continue: .globl _freertos_restore_ctx _freertos_restore_ctx: - ldw r0, dp[pxCurrentTCB] + ldw r0, r5[r6] /* get this core's current TCB pointer into r0 */ ldw r0, r0[0x0] /* Get the top of the stack from the current TCB... */ set sp, r0 /* into the stack pointer register. */ diff --git a/portable/XCC/XCOREAI/portmacro.h b/portable/XCC/XCOREAI/portmacro.h index b22aff1df..ae28990d7 100644 --- a/portable/XCC/XCOREAI/portmacro.h +++ b/portable/XCC/XCOREAI/portmacro.h @@ -46,9 +46,9 @@ typedef uint32_t UBaseType_t; #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 #define portCRITICAL_NESTING_IN_TCB 1 -#ifdef configNUM_CORES -#warning configNUM_CORES should not be defined when using the single core XCORE port -#undef configNUM_CORES +#define portMAX_CORE_COUNT 8 +#ifndef configNUM_CORES +#define configNUM_CORES 1 #endif /* This may be set to zero in the config file if the rtos_time @@ -67,6 +67,12 @@ functions are not needed or if it is incremented elsewhere. */ #define portTHREAD_CONTEXT_STACK_GROWTH RTOS_SUPPORT_INTERRUPT_STACK_GROWTH #ifndef __ASSEMBLER__ + +/* Check validity of number of cores specified in config */ +#if ( configNUM_CORES < 1 || portMAX_CORE_COUNT < configNUM_CORES ) +#error "Invalid number of cores specified in config!" +#endif + #define portMEMORY_BARRIER() RTOS_MEMORY_BARRIER() #define portTASK_STACK_DEPTH(pxTaskCode) RTOS_THREAD_STACK_SIZE(pxTaskCode) /*-----------------------------------------------------------*/ @@ -79,17 +85,24 @@ do \ { \ if( xSwitchRequired != pdFALSE ) \ { \ - extern uint32_t ulPortYieldRequired; \ - ulPortYieldRequired = pdTRUE; \ + extern uint32_t ulPortYieldRequired[ portMAX_CORE_COUNT ]; \ + ulPortYieldRequired[ portGET_CORE_ID() ] = pdTRUE; \ } \ } while( 0 ) #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ +/* SMP utilities. */ +#define portGET_CORE_ID() rtos_core_id_get() + +void vPortYieldOtherCore( int xOtherCoreID ); +#define portYIELD_CORE( x ) vPortYieldOtherCore( x ) +/*-----------------------------------------------------------*/ + /* Architecture specific optimisations. */ #ifndef configUSE_PORT_OPTIMISED_TASK_SELECTION -#define configUSE_PORT_OPTIMISED_TASK_SELECTION 1 +#define configUSE_PORT_OPTIMISED_TASK_SELECTION 0 #endif #if configUSE_PORT_OPTIMISED_TASK_SELECTION == 1 @@ -108,10 +121,16 @@ do \ /* Critical section management. */ #define portGET_INTERRUPT_STATE() rtos_interrupt_mask_get() + +/* + * This differs from the standard portDISABLE_INTERRUPTS() + * in that it also returns what the interrupt state was + * before it disabling interrupts. + */ #define portDISABLE_INTERRUPTS() rtos_interrupt_mask_all() + #define portENABLE_INTERRUPTS() rtos_interrupt_unmask_all() -#define portSET_INTERRUPT_MASK_FROM_ISR() 0 -#define portCLEAR_INTERRUPT_MASK_FROM_ISR(x) + /* * Will enable interrupts if ulState is non-zero. */ @@ -122,12 +141,43 @@ do \ * ISR or otherwise in kernel mode. */ #define portCHECK_IF_IN_ISR() rtos_isr_running() + #define portASSERT_IF_IN_ISR() configASSERT( portCHECK_IF_IN_ISR() == 0 ) +#define portGET_ISR_LOCK() rtos_lock_acquire(0) +#define portRELEASE_ISR_LOCK() rtos_lock_release(0) +#define portGET_TASK_LOCK() rtos_lock_acquire(1) +#define portRELEASE_TASK_LOCK() rtos_lock_release(1) + void vTaskEnterCritical(void); void vTaskExitCritical(void); -#define portENTER_CRITICAL() vTaskEnterCritical() -#define portEXIT_CRITICAL() vTaskExitCritical() +#define portENTER_CRITICAL() vTaskEnterCritical() +#define portEXIT_CRITICAL() vTaskExitCritical() + +/* + * vTaskEnterCritical() has been modified to be safe to use + * from within ISRs. The previous mask does not need to be + * returned since in the xCORE interrupts are always disabled + * in ISRs. Effectively this call just grabs the kernel lock + * when called from an ISR. + */ +static inline uint32_t portSET_INTERRUPT_MASK_FROM_ISR( void ) +{ + vTaskEnterCritical(); + return 0; +} +#define portSET_INTERRUPT_MASK_FROM_ISR() portSET_INTERRUPT_MASK_FROM_ISR() + +/* + * vTaskExitCritical() has been modified to be safe to use + * from within ISRs. When the nesting level has reached zero + * it releases the lock, but when called from within an ISR + * it will *not* re-enable interrupts since it is assumed they + * were previously disabled. Thus the previous state in x is + * unused. + */ +#define portCLEAR_INTERRUPT_MASK_FROM_ISR(x) vTaskExitCritical() + /*-----------------------------------------------------------*/ /* Runtime stats support */ diff --git a/portable/XCC/XCOREAI/rtos_support_rtos_config.h b/portable/XCC/XCOREAI/rtos_support_rtos_config.h index cba840b3f..8b8d1054d 100644 --- a/portable/XCC/XCOREAI/rtos_support_rtos_config.h +++ b/portable/XCC/XCOREAI/rtos_support_rtos_config.h @@ -35,7 +35,10 @@ * The RTOS provided handler that should run when a * core receives an intercore interrupt request. */ -#define RTOS_INTERCORE_INTERRUPT_ISR() +#define RTOS_INTERCORE_INTERRUPT_ISR() do { \ + void vIntercoreInterruptISR( void ); \ + vIntercoreInterruptISR(); \ +} while ( 0 ) /** * The number of hardware locks that the RTOS @@ -45,7 +48,7 @@ * Note that the IRQ routines require a lock and * will share the first one with the RTOS. */ -#define RTOS_LOCK_COUNT 0 +#define RTOS_LOCK_COUNT 2 /** * Remaps all calls to debug_printf() to rtos_printf(). @@ -64,6 +67,15 @@ #endif #define DEBUG_PRINT_ENABLE 1 + #ifndef configTASKS_DEBUG + #define configTASKS_DEBUG 0 + #endif + #if configTASKS_DEBUG == 1 + #define DEBUG_PRINT_ENABLE_FREERTOS_TASKS 1 + #else + #define DEBUG_PRINT_DISABLE_FREERTOS_TASKS 1 + #endif + #else /* configENABLE_DEBUG_PRINTF */ /* ensure that debug_printf is disabled */ diff --git a/queue.c b/queue.c index 324564108..deb663b5c 100644 --- a/queue.c +++ b/queue.c @@ -87,7 +87,7 @@ typedef struct SemaphoreData * performed just because a higher priority task has been woken. */ #define queueYIELD_IF_USING_PREEMPTION() #else - #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API() + #define queueYIELD_IF_USING_PREEMPTION() vTaskYieldWithinAPI() #endif /* @@ -957,7 +957,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue, * is also a higher priority task in the pending ready list. */ if( xTaskResumeAll() == pdFALSE ) { - portYIELD_WITHIN_API(); + vTaskYieldWithinAPI(); } } else @@ -1422,7 +1422,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue, if( xTaskResumeAll() == pdFALSE ) { - portYIELD_WITHIN_API(); + vTaskYieldWithinAPI(); } else { @@ -1614,7 +1614,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, if( xTaskResumeAll() == pdFALSE ) { - portYIELD_WITHIN_API(); + vTaskYieldWithinAPI(); } else { @@ -1792,7 +1792,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue, if( xTaskResumeAll() == pdFALSE ) { - portYIELD_WITHIN_API(); + vTaskYieldWithinAPI(); } else { diff --git a/tasks.c b/tasks.c index d3280e0fe..45681532c 100644 --- a/tasks.c +++ b/tasks.c @@ -33,6 +33,8 @@ * task.h is included from an application file. */ #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE +#define DEBUG_UNIT FREERTOS_TASKS + /* FreeRTOS includes. */ #include "FreeRTOS.h" #include "task.h" @@ -62,7 +64,7 @@ * performed just because a higher priority task has been woken. */ #define taskYIELD_IF_USING_PREEMPTION() #else - #define taskYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API() + #define taskYIELD_IF_USING_PREEMPTION() vTaskYieldWithinAPI() #endif /* Values that can be assigned to the ucNotifyState member of the TCB. */ @@ -129,26 +131,7 @@ } \ } /* taskRECORD_READY_PRIORITY */ -/*-----------------------------------------------------------*/ - - #define taskSELECT_HIGHEST_PRIORITY_TASK() \ - { \ - UBaseType_t uxTopPriority = uxTopReadyPriority; \ - \ - /* Find the highest priority queue that contains ready tasks. */ \ - while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopPriority ] ) ) ) \ - { \ - configASSERT( uxTopPriority ); \ - --uxTopPriority; \ - } \ - \ - /* listGET_OWNER_OF_NEXT_ENTRY indexes through the list, so the tasks of \ - * the same priority get an equal share of the processor time. */ \ - listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \ - uxTopReadyPriority = uxTopPriority; \ - } /* taskSELECT_HIGHEST_PRIORITY_TASK */ - -/*-----------------------------------------------------------*/ + /*-----------------------------------------------------------*/ /* Define away taskRESET_READY_PRIORITY() and portRESET_READY_PRIORITY() as * they are only required when a port optimised method of task selection is @@ -158,6 +141,8 @@ #else /* configUSE_PORT_OPTIMISED_TASK_SELECTION */ + #error configUSE_PORT_OPTIMISED_TASK_SELECTION not yet supported in SMP + /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 1 then task selection is * performed in a way that is tailored to the particular microcontroller * architecture being used. */ @@ -165,19 +150,7 @@ /* A port optimised version is provided. Call the port defined macros. */ #define taskRECORD_READY_PRIORITY( uxPriority ) portRECORD_READY_PRIORITY( uxPriority, uxTopReadyPriority ) -/*-----------------------------------------------------------*/ - - #define taskSELECT_HIGHEST_PRIORITY_TASK() \ - { \ - UBaseType_t uxTopPriority; \ - \ - /* Find the highest priority list that contains ready tasks. */ \ - portGET_HIGHEST_PRIORITY( uxTopPriority, uxTopReadyPriority ); \ - configASSERT( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ uxTopPriority ] ) ) > 0 ); \ - listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \ - } /* taskSELECT_HIGHEST_PRIORITY_TASK() */ - -/*-----------------------------------------------------------*/ + /*-----------------------------------------------------------*/ /* A port optimised version is provided, call it only if the TCB being reset * is being referenced from a ready list. If it is referenced from a delayed @@ -245,6 +218,17 @@ #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x80000000UL #endif +/* Indicates that the task is not actively running on any core. */ +#define taskTASK_NOT_RUNNING ( TaskRunning_t ) ( -1 ) + +/* Indicates that the task is actively running but scheduled to yield. */ +#define taskTASK_YIELDING ( TaskRunning_t ) ( -2 ) + +/* Returns pdTRUE if the task is actively running and not scheduled to yield. */ +#define taskTASK_IS_RUNNING( xTaskRunState ) ( ( 0 <= xTaskRunState ) && ( xTaskRunState < configNUM_CORES ) ) + +typedef BaseType_t TaskRunning_t; + /* * Task control block. A task control block (TCB) is allocated for each task, * and stores task state information, including a pointer to the task's context @@ -262,8 +246,18 @@ typedef struct tskTaskControlBlock /* The old naming convention is used to ListItem_t xEventListItem; /*< Used to reference a task from an event list. */ UBaseType_t uxPriority; /*< The priority of the task. 0 is the lowest priority. */ StackType_t * pxStack; /*< Points to the start of the stack. */ + volatile TaskRunning_t xTaskRunState; /*< Used to identify the core the task is running on, if any. */ + BaseType_t xIsIdle; /*< Used to identify the idle tasks. */ char pcTaskName[ configMAX_TASK_NAME_LEN ]; /*< Descriptive name given to the task when created. Facilitates debugging only. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + BaseType_t xPreemptionDisable; /*< Used to prevent the task from being preempted */ + #endif + + #if ( configUSE_CORE_EXCLUSION == 1 ) + UBaseType_t uxCoreExclude; /*< Used to exclude the task from certain cores */ + #endif + #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) StackType_t * pxEndOfStack; /*< Points to the highest valid address for the stack. */ #endif @@ -335,7 +329,8 @@ typedef tskTCB TCB_t; /*lint -save -e956 A manual analysis and inspection has been used to determine * which static variables must be declared volatile. */ -PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB = NULL; +PRIVILEGED_DATA TCB_t * volatile pxCurrentTCBs[ configNUM_CORES ] = { NULL }; +#define pxCurrentTCB xTaskGetCurrentTaskHandle() /* Lists for ready and blocked tasks. -------------------- * xDelayedTaskList1 and xDelayedTaskList2 could be move to function scople but @@ -373,11 +368,13 @@ PRIVILEGED_DATA static volatile TickType_t xTickCount = ( TickType_t ) configINI PRIVILEGED_DATA static volatile UBaseType_t uxTopReadyPriority = tskIDLE_PRIORITY; PRIVILEGED_DATA static volatile BaseType_t xSchedulerRunning = pdFALSE; PRIVILEGED_DATA static volatile TickType_t xPendedTicks = ( TickType_t ) 0U; -PRIVILEGED_DATA static volatile BaseType_t xYieldPending = pdFALSE; +PRIVILEGED_DATA static volatile BaseType_t xYieldPendings[ configNUM_CORES ] = { pdFALSE }; PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0; PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U; PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = ( TickType_t ) 0U; /* Initialised to portMAX_DELAY before the scheduler starts. */ -PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandle = NULL; /*< Holds the handle of the idle task. The idle task is created automatically when the scheduler is started. */ +PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandle[ configNUM_CORES ] = { NULL }; /*< Holds the handle of the idle task. The idle task is created automatically when the scheduler is started. */ + +#define xYieldPending prvGetCurrentYieldPending() /* Improve support for OpenOCD. The kernel tracks Ready tasks via priority lists. * For tracking the state of remote threads, OpenOCD uses uxTopUsedPriority @@ -391,7 +388,11 @@ const volatile UBaseType_t uxTopUsedPriority = configMAX_PRIORITIES - 1U; * moves the task's event list item into the xPendingReadyList, ready for the * kernel to move the task from the pending ready list into the real ready list * when the scheduler is unsuspended. The pending ready list itself can only be - * accessed from a critical section. */ + * accessed from a critical section. + * + * Updates to uxSchedulerSuspended must be protected by both the task and ISR locks and + * must not be done by an ISR. Reads must be protected by either lock and may be done by + * either an ISR or a task. */ PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t ) pdFALSE; #if ( configGENERATE_RUN_TIME_STATS == 1 ) @@ -409,6 +410,34 @@ PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t /* File private functions. --------------------------------*/ +/* + * Returns the yield pending count for the calling core. + */ +static BaseType_t prvGetCurrentYieldPending( void ); + +/* + * Checks to see if another task moved the current task out of the ready + * list while it was waiting to enter a critical section and yields if so. + */ +static void prvCheckForRunStateChange( void ); + +/* + * Yields the given core. + */ +static void prvYieldCore( BaseType_t xCoreID ); + +/* + * Yields a core, or cores if multiple priorities are not allowed to run + * simultaneously, to allow the task pxTCB to run. + */ +static void prvYieldForTask( TCB_t * pxTCB, + const BaseType_t xPreemptEqualPriority ); + +/* + * Selects the highest priority available task + */ +static BaseType_t prvSelectHighestPriorityTask( const BaseType_t xCoreID ); + /** * Utility task that simply returns pdTRUE if the task referenced by xTask is * currently in the Suspended state, or pdFALSE if the task referenced by xTask @@ -568,6 +597,464 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; /*-----------------------------------------------------------*/ +static BaseType_t prvGetCurrentYieldPending( void ) +{ + BaseType_t xReturn; + UBaseType_t ulState; + + ulState = portDISABLE_INTERRUPTS(); + xReturn = xYieldPendings[ portGET_CORE_ID() ]; + portRESTORE_INTERRUPTS( ulState ); + + return xReturn; +} + +/*-----------------------------------------------------------*/ + +static void prvCheckForRunStateChange( void ) +{ + UBaseType_t uxPrevCriticalNesting; + UBaseType_t uxPrevSchedulerSuspended; + TCB_t * pxThisTCB; + + /* This should be skipped when entering a critical section within + * an ISR. If the task on the current core is no longer running, then + * vTaskSwitchContext() probably should be run before returning, but + * we don't have a way to force that to happen from here. */ + if( portCHECK_IF_IN_ISR() == pdFALSE ) + { + /* This function is always called with interrupts disabled + * so this is safe. */ + pxThisTCB = pxCurrentTCBs[ portGET_CORE_ID() ]; + + while( pxThisTCB->xTaskRunState == taskTASK_YIELDING ) + { + /* We are only here if we just entered a critical section + * or if we just suspended the scheduler, and another task + * has requested that we yield. + * + * This is slightly complicated since we need to save and restore + * the suspension and critical nesting counts, as well as release + * and reacquire the correct locks. And then do it all over again + * if our state changed again during the reacquisition. */ + + uxPrevCriticalNesting = pxThisTCB->uxCriticalNesting; + uxPrevSchedulerSuspended = uxSchedulerSuspended; + + /* this must only be called the first time we enter into a crtical + * section, otherwise it could context switch in the middle of a + * critical section. */ + configASSERT( uxPrevCriticalNesting + uxPrevSchedulerSuspended == 1U ); + + uxSchedulerSuspended = 0U; + + if( uxPrevCriticalNesting > 0U ) + { + pxThisTCB->uxCriticalNesting = 0U; + portRELEASE_ISR_LOCK(); + portRELEASE_TASK_LOCK(); + } + else + { + /* uxPrevSchedulerSuspended must be 1 */ + portRELEASE_TASK_LOCK(); + } + + portMEMORY_BARRIER(); + configASSERT( pxThisTCB->xTaskRunState == taskTASK_YIELDING ); + + portENABLE_INTERRUPTS(); + + /* Enabling interrupts should cause this core to immediately + * service the pending interrupt and yield. If the run state is still + * yielding here then that is a problem. */ + configASSERT( pxThisTCB->xTaskRunState != taskTASK_YIELDING ); + + portDISABLE_INTERRUPTS(); + portGET_TASK_LOCK(); + portGET_ISR_LOCK(); + pxCurrentTCB->uxCriticalNesting = uxPrevCriticalNesting; + uxSchedulerSuspended = uxPrevSchedulerSuspended; + + if( uxPrevCriticalNesting == 0U ) + { + /* uxPrevSchedulerSuspended must be 1 */ + configASSERT( uxPrevSchedulerSuspended != ( UBaseType_t ) pdFALSE ); + portRELEASE_ISR_LOCK(); + } + } + } +} + +/*-----------------------------------------------------------*/ + +static void prvYieldCore( BaseType_t xCoreID ) +{ + /* This must be called from a critical section and + * xCoreID must be valid. */ + + if( portCHECK_IF_IN_ISR() && ( xCoreID == portGET_CORE_ID() ) ) + { + xYieldPendings[ xCoreID ] = pdTRUE; + } + else if( pxCurrentTCBs[ xCoreID ]->xTaskRunState != taskTASK_YIELDING ) + { + if( xCoreID == portGET_CORE_ID() ) + { + xYieldPendings[ xCoreID ] = pdTRUE; + } + else + { + portYIELD_CORE( xCoreID ); + pxCurrentTCBs[ xCoreID ]->xTaskRunState = taskTASK_YIELDING; + } + } +} + +/*-----------------------------------------------------------*/ + +static void prvYieldForTask( TCB_t * pxTCB, + const BaseType_t xPreemptEqualPriority ) +{ + BaseType_t xLowestPriority; + BaseType_t xTaskPriority; + BaseType_t xLowestPriorityCore = -1; + BaseType_t xYieldCount = 0; + BaseType_t x; + TaskRunning_t xTaskRunState; + + /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION */ + + configASSERT( pxCurrentTCB->uxCriticalNesting > 0U ); + + #if ( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configNUM_CORES > 1 ) ) + { + /* No task should yield for this one if it is a lower priority + * than priority level of currently ready tasks. */ + if( pxTCB->uxPriority < uxTopReadyPriority ) + { + return; + } + } + #endif + + xLowestPriority = ( BaseType_t ) pxTCB->uxPriority; + + if( xPreemptEqualPriority == pdFALSE ) + { + /* xLowestPriority will be decremented to -1 if the priority of pxTCB + * is 0. This is ok as we will give system idle tasks a priority of -1 below. */ + --xLowestPriority; + } + + for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configNUM_CORES; x++ ) + { + /* System idle tasks are being assigned a priority of tskIDLE_PRIORITY - 1 here */ + xTaskPriority = ( BaseType_t ) pxCurrentTCBs[ x ]->uxPriority - pxCurrentTCBs[ x ]->xIsIdle; + xTaskRunState = pxCurrentTCBs[ x ]->xTaskRunState; + + if( ( taskTASK_IS_RUNNING( xTaskRunState ) != pdFALSE ) && ( xYieldPendings[ x ] == pdFALSE ) ) + { + if( xTaskPriority <= xLowestPriority ) + { + #if ( configUSE_CORE_EXCLUSION == 1 ) + if( ( pxTCB->uxCoreExclude & ( 1 << x ) ) == 0 ) + #endif + { + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + if( pxCurrentTCBs[ x ]->xPreemptionDisable == pdFALSE ) + #endif + { + xLowestPriority = xTaskPriority; + xLowestPriorityCore = x; + } + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + #if ( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configNUM_CORES > 1 ) ) && 1 + { + /* Yield all currently running non-idle tasks with a priority lower than + * the task that needs to run. */ + if( ( ( BaseType_t ) tskIDLE_PRIORITY - 1 < xTaskPriority ) && ( xTaskPriority < ( BaseType_t ) pxTCB->uxPriority ) ) + { + prvYieldCore( x ); + xYieldCount++; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* if ( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configNUM_CORES > 1 ) ) && 1 */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + if( ( xYieldCount == 0 ) && taskVALID_CORE_ID( xLowestPriorityCore ) ) + { + prvYieldCore( xLowestPriorityCore ); + xYieldCount++; + } + + #if ( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configNUM_CORES > 1 ) ) + /* Verify that the calling core always yields to higher priority tasks */ + if( !pxCurrentTCBs[ portGET_CORE_ID() ]->xIsIdle && ( pxTCB->uxPriority > pxCurrentTCBs[ portGET_CORE_ID() ]->uxPriority ) ) + { + configASSERT( xYieldPendings[ portGET_CORE_ID() ] == pdTRUE || taskTASK_IS_RUNNING( pxCurrentTCBs[ portGET_CORE_ID() ]->xTaskRunState ) == pdFALSE ); + } + #endif +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 ) + + static BaseType_t prvSelectHighestPriorityTask( const BaseType_t xCoreID ) + { + UBaseType_t uxCurrentPriority = uxTopReadyPriority; + BaseType_t xTaskScheduled = pdFALSE; + BaseType_t xDecrementTopPriority = pdTRUE; + + #if ( configUSE_CORE_EXCLUSION == 1 ) + TCB_t * pxPreviousTCB = NULL; + #endif + #if ( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configNUM_CORES > 1 ) ) + BaseType_t xPriorityDropped = pdFALSE; + #endif + + while( xTaskScheduled == pdFALSE ) + { + #if ( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configNUM_CORES > 1 ) ) + { + if( uxCurrentPriority < uxTopReadyPriority ) + { + /* We can't schedule any tasks, other than idle, that have a + * priority lower than the priority of a task currently running + * on another core. */ + uxCurrentPriority = tskIDLE_PRIORITY; + } + } + #endif + + if( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxCurrentPriority ] ) ) == pdFALSE ) + { + List_t * const pxReadyList = &( pxReadyTasksLists[ uxCurrentPriority ] ); + ListItem_t * pxLastTaskItem = pxReadyList->pxIndex->pxPrevious; + ListItem_t * pxTaskItem = pxLastTaskItem; + + if( ( void * ) pxLastTaskItem == ( void * ) &( pxReadyList->xListEnd ) ) + { + pxLastTaskItem = pxLastTaskItem->pxPrevious; + } + + /* The ready task list for uxCurrentPriority is not empty, so uxTopReadyPriority + * must not be decremented any further */ + xDecrementTopPriority = pdFALSE; + + do + { + TCB_t * pxTCB; + + pxTaskItem = pxTaskItem->pxNext; + + if( ( void * ) pxTaskItem == ( void * ) &( pxReadyList->xListEnd ) ) + { + pxTaskItem = pxTaskItem->pxNext; + } + + pxTCB = pxTaskItem->pvOwner; + + /*debug_printf("Attempting to schedule %s on core %d\n", pxTCB->pcTaskName, portGET_CORE_ID() ); */ + + #if ( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configNUM_CORES > 1 ) ) + { + /* When falling back to the idle priority because only one priority + * level is allowed to run at a time, we should ONLY schedule the true + * idle tasks, not user tasks at the idle priority. */ + if( uxCurrentPriority < uxTopReadyPriority ) + { + if( pxTCB->xIsIdle == pdFALSE ) + { + continue; + } + } + } + #endif /* if ( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configNUM_CORES > 1 ) ) */ + + if( pxTCB->xTaskRunState == taskTASK_NOT_RUNNING ) + { + #if ( configUSE_CORE_EXCLUSION == 1 ) + if( ( pxTCB->uxCoreExclude & ( 1 << xCoreID ) ) == 0 ) + #endif + { + /* If the task is not being executed by any core swap it in */ + /*rtos_printf("Current priority %d: swap out %s(%d) for %s(%d) on core %d\n", uxCurrentPriority, pxCurrentTCBs[ portGET_CORE_ID() ]->pcTaskName, pxCurrentTCBs[ portGET_CORE_ID() ]->uxPriority, pxTCB->pcTaskName, pxTCB->uxPriority, portGET_CORE_ID()); */ + pxCurrentTCBs[ xCoreID ]->xTaskRunState = taskTASK_NOT_RUNNING; + #if ( configUSE_CORE_EXCLUSION == 1 ) + pxPreviousTCB = pxCurrentTCBs[ xCoreID ]; + #endif + pxTCB->xTaskRunState = ( TaskRunning_t ) xCoreID; + pxCurrentTCBs[ xCoreID ] = pxTCB; + xTaskScheduled = pdTRUE; + } + } + else if( pxTCB == pxCurrentTCBs[ xCoreID ] ) + { + configASSERT( ( pxTCB->xTaskRunState == xCoreID ) || ( pxTCB->xTaskRunState == taskTASK_YIELDING ) ); + #if ( configUSE_CORE_EXCLUSION == 1 ) + if( ( pxTCB->uxCoreExclude & ( 1 << xCoreID ) ) == 0 ) + #endif + { + /* The task is already running on this core, mark it as scheduled */ + pxTCB->xTaskRunState = ( TaskRunning_t ) xCoreID; + xTaskScheduled = pdTRUE; + /*rtos_printf( "Keeping %s(%d) on core %d\n", pxTCB->pcTaskName, pxTCB->uxPriority, portGET_CORE_ID() ); */ + } + } + + if( xTaskScheduled != pdFALSE ) + { + /* Once a task has been selected to run on this core, + * move it to the end of the ready task list. */ + uxListRemove( pxTaskItem ); + vListInsertEnd( pxReadyList, pxTaskItem ); + break; + } + } while( pxTaskItem != pxLastTaskItem ); + } + else + { + if( xDecrementTopPriority != pdFALSE ) + { + uxTopReadyPriority--; + #if ( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configNUM_CORES > 1 ) ) + { + xPriorityDropped = pdTRUE; + } + #endif + } + } + + /* This function can get called by vTaskSuspend() before the scheduler is started. + * In that case, since the idle tasks have not yet been created it is possible that we + * won't find a new task to schedule. Return pdFALSE in this case. */ + if( ( xSchedulerRunning == pdFALSE ) && ( uxCurrentPriority == tskIDLE_PRIORITY ) && ( xTaskScheduled == pdFALSE ) ) + { + return pdFALSE; + } + + configASSERT( ( uxCurrentPriority > tskIDLE_PRIORITY ) || ( xTaskScheduled == pdTRUE ) ); + uxCurrentPriority--; + } + + configASSERT( taskTASK_IS_RUNNING( pxCurrentTCBs[ xCoreID ]->xTaskRunState ) ); + + #if ( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configNUM_CORES > 1 ) ) + if( xPriorityDropped != pdFALSE ) + { + /* There may be several ready tasks that were being prevented from running because there was + * a higher priority task running. Now that the last of the higher priority tasks is no longer + * running, make sure all the other idle tasks yield. */ + UBaseType_t x; + + for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configNUM_CORES; x++ ) + { + if( pxCurrentTCBs[ x ]->xIsIdle != pdFALSE ) + { + prvYieldCore( x ); + } + } + } + #endif /* if ( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configNUM_CORES > 1 ) ) */ + + #if ( configUSE_CORE_EXCLUSION == 1 ) + if( ( pxPreviousTCB != NULL ) && ( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxPreviousTCB->uxPriority ] ), &( pxPreviousTCB->xStateListItem ) ) != pdFALSE ) ) + { + /* A ready task was just bumped off this core. Look at the cores it is not excluded + * from to see if it is able to run on any of them */ + UBaseType_t uxCoreMap = ~( pxPreviousTCB->uxCoreExclude ); + BaseType_t xLowestPriority = pxPreviousTCB->uxPriority - pxPreviousTCB->xIsIdle; + BaseType_t xLowestPriorityCore = -1; + + if( ( uxCoreMap & ( 1 << xCoreID ) ) != 0 ) + { + /* The ready task that was removed from this core is not excluded from it. + * Only look at the intersection of the cores the removed task is allowed to run + * on with the cores that the new task is excluded from. It is possible that the + * new task was only placed onto this core because it is excluded from another. + * Check to see if the previous task could run on one of those cores. */ + uxCoreMap &= pxCurrentTCBs[ xCoreID ]->uxCoreExclude; + } + else + { + /* The ready task that was removed from this core is excluded from it. + * See if we can schedule it on any of the cores where it is not excluded from. */ + rtos_printf( "Kicked %s off core %d\n", pxPreviousTCB->pcTaskName, xCoreID ); + } + + uxCoreMap &= ( ( 1 << configNUM_CORES ) - 1 ); + + while( uxCoreMap != 0 ) + { + int uxCore = 31UL - ( uint32_t ) __builtin_clz( uxCoreMap ); + + xassert( taskVALID_CORE_ID( uxCore ) ); + + uxCoreMap &= ~( 1 << uxCore ); + + BaseType_t xTaskPriority = ( BaseType_t ) pxCurrentTCBs[ uxCore ]->uxPriority - pxCurrentTCBs[ uxCore ]->xIsIdle; + + if( ( xTaskPriority < xLowestPriority ) && ( taskTASK_IS_RUNNING( pxCurrentTCBs[ uxCore ]->xTaskRunState ) != pdFALSE ) && ( xYieldPendings[ uxCore ] == pdFALSE ) ) + { + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + if( pxCurrentTCBs[ uxCore ]->xPreemptionDisable == pdFALSE ) + #endif + { + xLowestPriority = xTaskPriority; + xLowestPriorityCore = uxCore; + } + } + } + + if( taskVALID_CORE_ID( xLowestPriorityCore ) ) + { + rtos_printf( "going to interrupt core %d which is running %s to place the task %s that was just replaced with %s on core %d\n", + xLowestPriorityCore, + pxCurrentTCBs[ xLowestPriorityCore ]->pcTaskName, + pxPreviousTCB->pcTaskName, + pxCurrentTCBs[ xCoreID ]->pcTaskName, + xCoreID ); + prvYieldCore( xLowestPriorityCore ); + } + } + #endif /* if ( configUSE_CORE_EXCLUSION == 1 ) */ + + return pdTRUE; + } + +#else /* configUSE_PORT_OPTIMISED_TASK_SELECTION */ + + static void prvSelectHighestPriorityTask( BaseType_t xCoreID ) + { + UBaseType_t uxTopPriority; + + /* Find the highest priority list that contains ready tasks. */ + portGET_HIGHEST_PRIORITY( uxTopPriority, uxTopReadyPriority ); + configASSERT( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ uxTopPriority ] ) ) > 0 ); + listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); + } + +#endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */ +/*-----------------------------------------------------------*/ + + + #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) TaskHandle_t xTaskCreateStatic( TaskFunction_t pxTaskCode, @@ -1000,6 +1487,17 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } #endif + #if ( configUSE_CORE_EXCLUSION == 1 ) + { + pxNewTCB->uxCoreExclude = 0; + } + #endif + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + { + pxNewTCB->xPreemptionDisable = 0; + } + #endif + /* Initialize the TCB stack to look as if the task was already running, * but had been interrupted by the scheduler. The return address is set * to the start of the task function. Once the stack has been initialised @@ -1052,6 +1550,12 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } #endif /* portUSING_MPU_WRAPPERS */ + /* Initialize to not running */ + pxNewTCB->xTaskRunState = taskTASK_NOT_RUNNING; + + /* Is this an idle task? */ + pxNewTCB->xIsIdle = ( pxTaskCode == prvIdleTask ); + if( pxCreatedTask != NULL ) { /* Pass the handle out in an anonymous way. The handle can be used to @@ -1073,12 +1577,8 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { uxCurrentNumberOfTasks++; - if( pxCurrentTCB == NULL ) + if( xSchedulerRunning == pdFALSE ) { - /* There are no other tasks, or all the other tasks are in - * the suspended state - make this the current task. */ - pxCurrentTCB = pxNewTCB; - if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 ) { /* This is the first task to be created so do the preliminary @@ -1090,27 +1590,33 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { mtCOVERAGE_TEST_MARKER(); } + + if( pxNewTCB->xIsIdle != pdFALSE ) + { + BaseType_t xCoreID; + + /* Check if a core is free. */ + for( xCoreID = ( UBaseType_t ) 0; xCoreID < ( UBaseType_t ) configNUM_CORES; xCoreID++ ) + { + if( pxCurrentTCBs[ xCoreID ] == NULL ) + { + rtos_printf( "adding idle task onto core %d\n", xCoreID ); + pxNewTCB->xTaskRunState = xCoreID; + #if ( configUSE_CORE_EXCLUSION == 1 ) + { + pxNewTCB->uxCoreExclude = ~( 1 << xCoreID ); + rtos_printf( "Set exclusion mask to %08x\n", pxNewTCB->uxCoreExclude ); + } + #endif + pxCurrentTCBs[ xCoreID ] = pxNewTCB; + break; + } + } + } } else { - /* If the scheduler is not already running, make this task the - * current task if it is the highest priority task to be created - * so far. */ - if( xSchedulerRunning == pdFALSE ) - { - if( pxCurrentTCB->uxPriority <= pxNewTCB->uxPriority ) - { - pxCurrentTCB = pxNewTCB; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + mtCOVERAGE_TEST_MARKER(); } uxTaskNumber++; @@ -1126,26 +1632,22 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) prvAddTaskToReadyList( pxNewTCB ); portSETUP_TCB( pxNewTCB ); - } - taskEXIT_CRITICAL(); - if( xSchedulerRunning != pdFALSE ) - { - /* If the created task is of a higher priority than the current task - * then it should run now. */ - if( pxCurrentTCB->uxPriority < pxNewTCB->uxPriority ) + if( xSchedulerRunning != pdFALSE ) { - taskYIELD_IF_USING_PREEMPTION(); + /* If the created task is of a higher priority than another + * currently running task and preemption is on then it should + * run now. */ + #if ( configUSE_PREEMPTION == 1 ) + prvYieldForTask( pxNewTCB, pdFALSE ); + #endif } else { mtCOVERAGE_TEST_MARKER(); } } - else - { - mtCOVERAGE_TEST_MARKER(); - } + taskEXIT_CRITICAL(); } /*-----------------------------------------------------------*/ @@ -1154,6 +1656,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) void vTaskDelete( TaskHandle_t xTaskToDelete ) { TCB_t * pxTCB; + TaskRunning_t xTaskRunningOnCore; taskENTER_CRITICAL(); { @@ -1161,6 +1664,8 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * being deleted. */ pxTCB = prvGetTCBFromHandle( xTaskToDelete ); + xTaskRunningOnCore = pxTCB->xTaskRunState; + /* Remove task from the ready/delayed list. */ if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) { @@ -1187,9 +1692,14 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * not return. */ uxTaskNumber++; - if( pxTCB == pxCurrentTCB ) + /* If the task is running (or yielding), we must add it to the + * termination list so that an idle task can delete it when it is + * no longer running. */ + if( xTaskRunningOnCore != taskTASK_NOT_RUNNING ) { - /* A task is deleting itself. This cannot complete within the + /*rtos_printf("Task %s is running on core %d and is now marked for deletion.\n", pxTCB->pcTaskName, xTaskRunningOnCore ); */ + + /* A running task is being deleted. This cannot complete within the * task itself, as a context switch to another task is required. * Place the task in the termination list. The idle task will * check the termination list and free up any memory allocated by @@ -1210,10 +1720,11 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * after which it is not possible to yield away from this task - * hence xYieldPending is used to latch that a context switch is * required. */ - portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPending ); + portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPendings[ pxTCB->xTaskRunState ] ); } else { + /*rtos_printf("Task %s is not running and will now be deleted.\n", pxTCB->pcTaskName ); */ --uxCurrentNumberOfTasks; traceTASK_DELETE( pxTCB ); prvDeleteTCB( pxTCB ); @@ -1222,23 +1733,28 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * the task that has just been deleted. */ prvResetNextTaskUnblockTime(); } + + /* Force a reschedule if the task that has just been deleted was running. */ + if( ( xSchedulerRunning != pdFALSE ) && ( taskTASK_IS_RUNNING( xTaskRunningOnCore ) ) ) + { + BaseType_t xCoreID; + + xCoreID = portGET_CORE_ID(); + + /*rtos_printf("Task deleted, yield core %d.\n", xTaskRunningOnCore ); */ + + if( xTaskRunningOnCore == xCoreID ) + { + configASSERT( uxSchedulerSuspended == 0 ); + vTaskYieldWithinAPI(); + } + else + { + prvYieldCore( xTaskRunningOnCore ); + } + } } taskEXIT_CRITICAL(); - - /* Force a reschedule if it is the currently running task that has just - * been deleted. */ - if( xSchedulerRunning != pdFALSE ) - { - if( pxTCB == pxCurrentTCB ) - { - configASSERT( uxSchedulerSuspended == 0 ); - portYIELD_WITHIN_API(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } } #endif /* INCLUDE_vTaskDelete */ @@ -1254,10 +1770,11 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) configASSERT( pxPreviousWakeTime ); configASSERT( ( xTimeIncrement > 0U ) ); - configASSERT( uxSchedulerSuspended == 0 ); vTaskSuspendAll(); { + configASSERT( uxSchedulerSuspended == 1 ); + /* Minor optimisation. The tick count cannot change in this * block. */ const TickType_t xConstTickCount = xTickCount; @@ -1318,7 +1835,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * have put ourselves to sleep. */ if( xAlreadyYielded == pdFALSE ) { - portYIELD_WITHIN_API(); + vTaskYieldWithinAPI(); } else { @@ -1340,9 +1857,9 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) /* A delay time of zero just forces a reschedule. */ if( xTicksToDelay > ( TickType_t ) 0U ) { - configASSERT( uxSchedulerSuspended == 0 ); vTaskSuspendAll(); { + configASSERT( uxSchedulerSuspended == 1 ); traceTASK_DELAY(); /* A task that is removed from the event list while the @@ -1365,7 +1882,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * have put ourselves to sleep. */ if( xAlreadyYielded == pdFALSE ) { - portYIELD_WITHIN_API(); + vTaskYieldWithinAPI(); } else { @@ -1386,83 +1903,83 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) configASSERT( pxTCB ); - if( pxTCB == pxCurrentTCB ) + taskENTER_CRITICAL(); { - /* The task calling this function is querying its own state. */ - eReturn = eRunning; + pxStateList = listLIST_ITEM_CONTAINER( &( pxTCB->xStateListItem ) ); + pxDelayedList = pxDelayedTaskList; + pxOverflowedDelayedList = pxOverflowDelayedTaskList; } - else + taskEXIT_CRITICAL(); + + if( ( pxStateList == pxDelayedList ) || ( pxStateList == pxOverflowedDelayedList ) ) { - taskENTER_CRITICAL(); - { - pxStateList = listLIST_ITEM_CONTAINER( &( pxTCB->xStateListItem ) ); - pxDelayedList = pxDelayedTaskList; - pxOverflowedDelayedList = pxOverflowDelayedTaskList; - } - taskEXIT_CRITICAL(); + /* The task being queried is referenced from one of the Blocked + * lists. */ + eReturn = eBlocked; + } - if( ( pxStateList == pxDelayedList ) || ( pxStateList == pxOverflowedDelayedList ) ) + #if ( INCLUDE_vTaskSuspend == 1 ) + else if( pxStateList == &xSuspendedTaskList ) { - /* The task being queried is referenced from one of the Blocked - * lists. */ - eReturn = eBlocked; - } - - #if ( INCLUDE_vTaskSuspend == 1 ) - else if( pxStateList == &xSuspendedTaskList ) + /* The task being queried is referenced from the suspended + * list. Is it genuinely suspended or is it blocked + * indefinitely? */ + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL ) { - /* The task being queried is referenced from the suspended - * list. Is it genuinely suspended or is it blocked - * indefinitely? */ - if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL ) - { - #if ( configUSE_TASK_NOTIFICATIONS == 1 ) + #if ( configUSE_TASK_NOTIFICATIONS == 1 ) + { + BaseType_t x; + + /* The task does not appear on the event list item of + * and of the RTOS objects, but could still be in the + * blocked state if it is waiting on its notification + * rather than waiting on an object. If not, is + * suspended. */ + eReturn = eSuspended; + + for( x = 0; x < configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ ) { - BaseType_t x; - - /* The task does not appear on the event list item of - * and of the RTOS objects, but could still be in the - * blocked state if it is waiting on its notification - * rather than waiting on an object. If not, is - * suspended. */ - eReturn = eSuspended; - - for( x = 0; x < configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ ) + if( pxTCB->ucNotifyState[ x ] == taskWAITING_NOTIFICATION ) { - if( pxTCB->ucNotifyState[ x ] == taskWAITING_NOTIFICATION ) - { - eReturn = eBlocked; - break; - } + eReturn = eBlocked; + break; } } - #else /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ - { - eReturn = eSuspended; - } - #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ - } - else - { - eReturn = eBlocked; - } + } + #else /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ + { + eReturn = eSuspended; + } + #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ } - #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ - - #if ( INCLUDE_vTaskDelete == 1 ) - else if( ( pxStateList == &xTasksWaitingTermination ) || ( pxStateList == NULL ) ) + else { - /* The task being queried is referenced from the deleted - * tasks list, or it is not referenced from any lists at - * all. */ - eReturn = eDeleted; + eReturn = eBlocked; } - #endif + } + #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ - else /*lint !e525 Negative indentation is intended to make use of pre-processor clearer. */ + #if ( INCLUDE_vTaskDelete == 1 ) + else if( ( pxStateList == &xTasksWaitingTermination ) || ( pxStateList == NULL ) ) + { + /* The task being queried is referenced from the deleted + * tasks list, or it is not referenced from any lists at + * all. */ + eReturn = eDeleted; + } + #endif + + else /*lint !e525 Negative indentation is intended to make use of pre-processor clearer. */ + { + /* If the task is not in any other state, it must be in the + * Ready (including pending ready) state. */ + if( taskTASK_IS_RUNNING( pxTCB->xTaskRunState ) ) + { + /* Is it actively running on a core? */ + eReturn = eRunning; + } + else { - /* If the task is not in any other state, it must be in the - * Ready (including pending ready) state. */ eReturn = eReady; } } @@ -1543,6 +2060,8 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) TCB_t * pxTCB; UBaseType_t uxCurrentBasePriority, uxPriorityUsedOnEntry; BaseType_t xYieldRequired = pdFALSE; + BaseType_t xYieldForTask = pdFALSE; + BaseType_t xCoreID; configASSERT( ( uxNewPriority < configMAX_PRIORITIES ) ); @@ -1577,36 +2096,25 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) if( uxCurrentBasePriority != uxNewPriority ) { /* The priority change may have readied a task of higher - * priority than the calling task. */ + * priority than a running task. */ if( uxNewPriority > uxCurrentBasePriority ) { - if( pxTCB != pxCurrentTCB ) - { - /* The priority of a task other than the currently - * running task is being raised. Is the priority being - * raised above that of the running task? */ - if( uxNewPriority >= pxCurrentTCB->uxPriority ) - { - xYieldRequired = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - /* The priority of the running task is being raised, - * but the running task must already be the highest - * priority task able to run so no yield is required. */ - } + /* The priority of a task is being raised so + * perform a yield for this task later. */ + xYieldForTask = pdTRUE; } - else if( pxTCB == pxCurrentTCB ) + else if( taskTASK_IS_RUNNING( pxTCB->xTaskRunState ) ) { - /* Setting the priority of the running task down means + /* Setting the priority of a running task down means * there may now be another task of higher priority that * is ready to execute. */ - xYieldRequired = pdTRUE; + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + if( pxTCB->xPreemptionDisable == pdFALSE ) + #endif + { + xCoreID = ( BaseType_t ) pxTCB->xTaskRunState; + xYieldRequired = pdTRUE; + } } else { @@ -1678,17 +2186,26 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) } else { - mtCOVERAGE_TEST_MARKER(); + /* It's possible that xYieldForTask was already set to pdTRUE because + * its priority is being raised. However, since it is not in a ready list + * we don't actually need to yield for it. */ + xYieldForTask = pdFALSE; } - if( xYieldRequired != pdFALSE ) - { - taskYIELD_IF_USING_PREEMPTION(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + #if ( configUSE_PREEMPTION == 1 ) + if( xYieldRequired != pdFALSE ) + { + prvYieldCore( xCoreID ); + } + else if( xYieldForTask != pdFALSE ) + { + prvYieldForTask( pxTCB, pdTRUE ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + #endif /* if ( configUSE_PREEMPTION == 1 ) */ /* Remove compiler warning about unused variables when the port * optimised task selection is not being used. */ @@ -1701,11 +2218,113 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) #endif /* INCLUDE_vTaskPrioritySet */ /*-----------------------------------------------------------*/ +#if ( configUSE_CORE_EXCLUSION == 1 ) + + void vTaskCoreExclusionSet( const TaskHandle_t xTask, + UBaseType_t uxCoreExclude ) + { + TCB_t * pxTCB; + BaseType_t xCoreID; + + taskENTER_CRITICAL(); + { + pxTCB = prvGetTCBFromHandle( xTask ); + + pxTCB->uxCoreExclude = uxCoreExclude; + + if( xSchedulerRunning != pdFALSE ) + { + if( taskTASK_IS_RUNNING( pxTCB->xTaskRunState ) ) + { + xCoreID = ( BaseType_t ) pxTCB->xTaskRunState; + + if( ( uxCoreExclude & ( 1 << xCoreID ) ) != 0 ) + { + rtos_printf( "New core exclusion mask on %s prevents it from running any longer on core %d\n", pxTCB->pcTaskName, xCoreID ); + prvYieldCore( xCoreID ); + } + } + } + } + taskEXIT_CRITICAL(); + } + +#endif /* configUSE_CORE_EXCLUSION */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_CORE_EXCLUSION == 1 ) + + UBaseType_t vTaskCoreExclusionGet( const TaskHandle_t xTask ) + { + TCB_t * pxTCB; + UBaseType_t uxCoreExclude; + + taskENTER_CRITICAL(); + { + pxTCB = prvGetTCBFromHandle( xTask ); + uxCoreExclude = pxTCB->uxCoreExclude; + } + taskEXIT_CRITICAL(); + + return uxCoreExclude; + } + +#endif /* configUSE_CORE_EXCLUSION */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + + void vTaskPreemptionDisable( const TaskHandle_t xTask ) + { + TCB_t * pxTCB; + BaseType_t xCoreID; + + taskENTER_CRITICAL(); + { + pxTCB = prvGetTCBFromHandle( xTask ); + + pxTCB->xPreemptionDisable = pdTRUE; + } + taskEXIT_CRITICAL(); + } + +#endif /* configUSE_TASK_PREEMPTION_DISABLE */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + + void vTaskPreemptionEnable( const TaskHandle_t xTask ) + { + TCB_t * pxTCB; + BaseType_t xCoreID; + + taskENTER_CRITICAL(); + { + pxTCB = prvGetTCBFromHandle( xTask ); + + pxTCB->xPreemptionDisable = pdFALSE; + + if( xSchedulerRunning != pdFALSE ) + { + if( taskTASK_IS_RUNNING( pxTCB->xTaskRunState ) ) + { + xCoreID = ( BaseType_t ) pxTCB->xTaskRunState; + prvYieldCore( xCoreID ); + } + } + } + taskEXIT_CRITICAL(); + } + +#endif /* configUSE_TASK_PREEMPTION_DISABLE */ +/*-----------------------------------------------------------*/ + #if ( INCLUDE_vTaskSuspend == 1 ) void vTaskSuspend( TaskHandle_t xTaskToSuspend ) { TCB_t * pxTCB; + TaskRunning_t xTaskRunningOnCore; taskENTER_CRITICAL(); { @@ -1715,6 +2334,8 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) traceTASK_SUSPEND( pxTCB ); + xTaskRunningOnCore = pxTCB->xTaskRunState; + /* Remove task from the ready/delayed list and place in the * suspended list. */ if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) @@ -1753,55 +2374,72 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) } } #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ - } - taskEXIT_CRITICAL(); - if( xSchedulerRunning != pdFALSE ) - { - /* Reset the next expected unblock time in case it referred to the - * task that is now in the Suspended state. */ - taskENTER_CRITICAL(); - { - prvResetNextTaskUnblockTime(); - } - taskEXIT_CRITICAL(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - if( pxTCB == pxCurrentTCB ) - { if( xSchedulerRunning != pdFALSE ) { - /* The current task has just been suspended. */ - configASSERT( uxSchedulerSuspended == 0 ); - portYIELD_WITHIN_API(); + /* Reset the next expected unblock time in case it referred to the + * task that is now in the Suspended state. */ + prvResetNextTaskUnblockTime(); } else { - /* The scheduler is not running, but the task that was pointed - * to by pxCurrentTCB has just been suspended and pxCurrentTCB - * must be adjusted to point to a different task. */ - if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == uxCurrentNumberOfTasks ) /*lint !e931 Right has no side effect, just volatile. */ + mtCOVERAGE_TEST_MARKER(); + } + + if( taskTASK_IS_RUNNING( xTaskRunningOnCore ) ) + { + if( xSchedulerRunning != pdFALSE ) { - /* No other tasks are ready, so set pxCurrentTCB back to - * NULL so when the next task is created pxCurrentTCB will - * be set to point to it no matter what its relative priority - * is. */ - pxCurrentTCB = NULL; + /*rtos_printf("Yield Core %d for task %s\n", xTaskRunningOnCore, pxTCB->pcTaskName ); */ + if( xTaskRunningOnCore == portGET_CORE_ID() ) + { + /* The current task has just been suspended. */ + configASSERT( uxSchedulerSuspended == 0 ); + vTaskYieldWithinAPI(); + } + else + { + prvYieldCore( xTaskRunningOnCore ); + } + + taskEXIT_CRITICAL(); } else { - vTaskSwitchContext(); + taskEXIT_CRITICAL(); + + configASSERT( pxTCB == pxCurrentTCBs[ xTaskRunningOnCore ] ); + + /* The scheduler is not running, but the task that was pointed + * to by pxCurrentTCB has just been suspended and pxCurrentTCB + * must be adjusted to point to a different task. */ + if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == uxCurrentNumberOfTasks ) /*lint !e931 Right has no side effect, just volatile. */ + { + /* No other tasks are ready, so set the core's TCB back to + * NULL so when the next task is created the core's TCB will + * be able to be set to point to it no matter what its relative + * priority is. */ + pxTCB->xTaskRunState = taskTASK_NOT_RUNNING; + pxCurrentTCBs[ xTaskRunningOnCore ] = NULL; + } + else + { + /* Attempt to switch in a new task. This could fail since the idle tasks + * haven't been created yet. If it does then set the core's TCB back to + * NULL. */ + if( prvSelectHighestPriorityTask( xTaskRunningOnCore ) == pdFALSE ) + { + pxTCB->xTaskRunState = taskTASK_NOT_RUNNING; + pxCurrentTCBs[ xTaskRunningOnCore ] = NULL; + } + } } } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + else + { + taskEXIT_CRITICAL(); + } + } /* taskEXIT_CRITICAL() - already exited in one of three cases above */ } #endif /* INCLUDE_vTaskSuspend */ @@ -1863,8 +2501,11 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) configASSERT( xTaskToResume ); /* The parameter cannot be NULL as it is impossible to resume the - * currently executing task. */ - if( ( pxTCB != pxCurrentTCB ) && ( pxTCB != NULL ) ) + * currently executing task. It is also impossible to resume a task + * that is actively running on another core but it is too dangerous + * to check their run state here. Safer to get into a critical section + * and check if it is actually suspended or not below. */ + if( pxTCB != NULL ) { taskENTER_CRITICAL(); { @@ -1878,17 +2519,11 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) prvAddTaskToReadyList( pxTCB ); /* A higher priority task may have just been resumed. */ - if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) - { - /* This yield may not cause the task just resumed to run, - * but will leave the lists in the correct state for the - * next yield. */ - taskYIELD_IF_USING_PREEMPTION(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + #if ( configUSE_PREEMPTION == 1 ) + { + prvYieldForTask( pxTCB, pdTRUE ); + } + #endif } else { @@ -1946,19 +2581,6 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { /* Ready lists can be accessed so move the task from the * suspended list to the ready list directly. */ - if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) - { - xYieldRequired = pdTRUE; - - /* Mark that a yield is pending in case the user is not - * using the return value to initiate a context switch - * from the ISR using portYIELD_FROM_ISR. */ - xYieldPending = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); prvAddTaskToReadyList( pxTCB ); @@ -1970,6 +2592,15 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * unsuspended. */ vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); } + + #if ( configUSE_PREEMPTION == 1 ) + prvYieldForTask( pxTCB, pdTRUE ); + + if( xYieldPendings[ portGET_CORE_ID() ] != pdFALSE ) + { + xYieldRequired = pdTRUE; + } + #endif } else { @@ -1987,58 +2618,105 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) void vTaskStartScheduler( void ) { BaseType_t xReturn; - - /* Add the idle task at the lowest priority. */ - #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) - { - StaticTask_t * pxIdleTaskTCBBuffer = NULL; - StackType_t * pxIdleTaskStackBuffer = NULL; - uint32_t ulIdleTaskStackSize; - - /* The Idle task is created using user provided RAM - obtain the - * address of the RAM then create the idle task. */ - vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize ); - xIdleTaskHandle = xTaskCreateStatic( prvIdleTask, - configIDLE_TASK_NAME, - ulIdleTaskStackSize, - ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */ - portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ - pxIdleTaskStackBuffer, - pxIdleTaskTCBBuffer ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ - - if( xIdleTaskHandle != NULL ) - { - xReturn = pdPASS; - } - else - { - xReturn = pdFAIL; - } - } - #else /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ - { - /* The Idle task is being created using dynamically allocated RAM. */ - xReturn = xTaskCreate( prvIdleTask, - configIDLE_TASK_NAME, - configMINIMAL_STACK_SIZE, - ( void * ) NULL, - portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ - &xIdleTaskHandle ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ - } - #endif /* configSUPPORT_STATIC_ALLOCATION */ + BaseType_t xCoreID; + char cIdleName[ configMAX_TASK_NAME_LEN ]; #if ( configUSE_TIMERS == 1 ) { - if( xReturn == pdPASS ) + xReturn = xTimerCreateTimerTask(); + } + #endif /* configUSE_TIMERS */ + + /* Add each idle task at the lowest priority. */ + for( xCoreID = ( BaseType_t ) 0; xCoreID < ( BaseType_t ) configNUM_CORES; xCoreID++ ) + { + BaseType_t x; + + if( xReturn == pdFAIL ) + { + break; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configMAX_TASK_NAME_LEN; x++ ) + { + cIdleName[ x ] = configIDLE_TASK_NAME[ x ]; + + /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than + * configMAX_TASK_NAME_LEN characters just in case the memory after the + * string is not accessible (extremely unlikely). */ + if( cIdleName[ x ] == ( char ) 0x00 ) { - xReturn = xTimerCreateTimerTask(); + break; } else { mtCOVERAGE_TEST_MARKER(); } } - #endif /* configUSE_TIMERS */ + + /* Apped the idle task number to the end of the name if there is space */ + if( x < configMAX_TASK_NAME_LEN ) + { + cIdleName[ x++ ] = xCoreID + '0'; + + /* And append a null character if there is space */ + if( x < configMAX_TASK_NAME_LEN ) + { + cIdleName[ x ] = '\0'; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + { + #error User must specify an array of buffers for idle task TCBs and stacks + StaticTask_t * pxIdleTaskTCBBuffer = NULL; + StackType_t * pxIdleTaskStackBuffer = NULL; + uint32_t ulIdleTaskStackSize; + + /* The Idle task is created using user provided RAM - obtain the + * address of the RAM then create the idle task. */ + vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize ); + xIdleTaskHandle[ xCoreID ] = xTaskCreateStatic( prvIdleTask, + cIdleName, + ulIdleTaskStackSize, + ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */ + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + pxIdleTaskStackBuffer, + pxIdleTaskTCBBuffer ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + + if( xIdleTaskHandle[ xCoreID ] != NULL ) + { + xReturn = pdPASS; + } + else + { + xReturn = pdFAIL; + } + } + #else /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ + { + /* The Idle task is being created using dynamically allocated RAM. */ + xReturn = xTaskCreate( prvIdleTask, + cIdleName, + configMINIMAL_STACK_SIZE, + ( void * ) NULL, + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + &xIdleTaskHandle[ xCoreID ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + } + #endif /* configSUPPORT_STATIC_ALLOCATION */ + } if( xReturn == pdPASS ) { @@ -2082,6 +2760,13 @@ void vTaskStartScheduler( void ) traceTASK_SWITCHED_IN(); + rtos_printf( "Scheduler starting, top priority is %d:\n", uxTopReadyPriority ); + + for( int i = 0; i < configNUM_CORES; i++ ) + { + rtos_printf( "\tCore %d: Task %s running on core: %d\n", i, pxCurrentTCBs[ i ]->pcTaskName, pxCurrentTCBs[ i ]->xTaskRunState ); + } + /* Setting up the timer tick is hardware specific and thus in the * portable interface. */ if( xPortStartScheduler() != pdFALSE ) @@ -2125,22 +2810,43 @@ void vTaskEndScheduler( void ) void vTaskSuspendAll( void ) { - /* A critical section is not required as the variable is of type - * BaseType_t. Please read Richard Barry's reply in the following link to a - * post in the FreeRTOS support forum before reporting this as a bug! - - * https://goo.gl/wu4acr */ + UBaseType_t ulState; - /* portSOFRWARE_BARRIER() is only implemented for emulated/simulated ports that - * do not otherwise exhibit real time behaviour. */ - portSOFTWARE_BARRIER(); + /* This must only be called from within a task */ + portASSERT_IF_IN_ISR(); - /* The scheduler is suspended if uxSchedulerSuspended is non-zero. An increment - * is used to allow calls to vTaskSuspendAll() to nest. */ - ++uxSchedulerSuspended; + if( xSchedulerRunning != pdFALSE ) + { + /* writes to uxSchedulerSuspended must be protected by both the task AND ISR locks. + * We must disable interrupts before we grab the locks in the event that this task is + * interrupted and switches context before incrementing uxSchedulerSuspended. + * It is safe to re-enable interrupts after releasing the ISR lock and incrementing + * uxSchedulerSuspended since that will prevent context switches. */ + ulState = portDISABLE_INTERRUPTS(); - /* Enforces ordering for ports and optimised compilers that may otherwise place - * the above increment elsewhere. */ - portMEMORY_BARRIER(); + /* portSOFRWARE_BARRIER() is only implemented for emulated/simulated ports that + * do not otherwise exhibit real time behaviour. */ + portSOFTWARE_BARRIER(); + + portGET_TASK_LOCK(); + portGET_ISR_LOCK(); + + /* The scheduler is suspended if uxSchedulerSuspended is non-zero. An increment + * is used to allow calls to vTaskSuspendAll() to nest. */ + ++uxSchedulerSuspended; + portRELEASE_ISR_LOCK(); + + if( ( uxSchedulerSuspended == 1U ) && ( pxCurrentTCB->uxCriticalNesting == 0U ) ) + { + prvCheckForRunStateChange(); + } + + portRESTORE_INTERRUPTS( ulState ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } /*----------------------------------------------------------*/ @@ -2212,107 +2918,113 @@ BaseType_t xTaskResumeAll( void ) TCB_t * pxTCB = NULL; BaseType_t xAlreadyYielded = pdFALSE; - /* If uxSchedulerSuspended is zero then this function does not match a - * previous call to vTaskSuspendAll(). */ - configASSERT( uxSchedulerSuspended ); - - /* It is possible that an ISR caused a task to be removed from an event - * list while the scheduler was suspended. If this was the case then the - * removed task will have been added to the xPendingReadyList. Once the - * scheduler has been resumed it is safe to move all the pending ready - * tasks from this list into their appropriate ready list. */ - taskENTER_CRITICAL(); + if( xSchedulerRunning != pdFALSE ) { - --uxSchedulerSuspended; - - if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + /* It is possible that an ISR caused a task to be removed from an event + * list while the scheduler was suspended. If this was the case then the + * removed task will have been added to the xPendingReadyList. Once the + * scheduler has been resumed it is safe to move all the pending ready + * tasks from this list into their appropriate ready list. */ + taskENTER_CRITICAL(); { - if( uxCurrentNumberOfTasks > ( UBaseType_t ) 0U ) + BaseType_t xCoreID; + + xCoreID = portGET_CORE_ID(); + + /* If uxSchedulerSuspended is zero then this function does not match a + * previous call to vTaskSuspendAll(). */ + configASSERT( uxSchedulerSuspended ); + + --uxSchedulerSuspended; + portRELEASE_TASK_LOCK(); + + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) { - /* Move any readied tasks from the pending list into the - * appropriate ready list. */ - while( listLIST_IS_EMPTY( &xPendingReadyList ) == pdFALSE ) + if( uxCurrentNumberOfTasks > ( UBaseType_t ) 0U ) { - pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ - ( void ) uxListRemove( &( pxTCB->xEventListItem ) ); - ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); - prvAddTaskToReadyList( pxTCB ); - - /* If the moved task has a priority higher than the current - * task then a yield must be performed. */ - if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) + /* Move any readied tasks from the pending list into the + * appropriate ready list. */ + while( listLIST_IS_EMPTY( &xPendingReadyList ) == pdFALSE ) { - xYieldPending = pdTRUE; + pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + ( void ) uxListRemove( &( pxTCB->xEventListItem ) ); + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxTCB ); + + /* All appropriate tasks yield at the moment a task is added to xPendingReadyList. + * If the current core yielded then vTaskSwitchContext() has already been called + * which sets xYieldPendings for the current core to pdTRUE. */ } - else + + if( pxTCB != NULL ) { - mtCOVERAGE_TEST_MARKER(); + /* A task was unblocked while the scheduler was suspended, + * which may have prevented the next unblock time from being + * re-calculated, in which case re-calculate it now. Mainly + * important for low power tickless implementations, where + * this can prevent an unnecessary exit from low power + * state. */ + prvResetNextTaskUnblockTime(); } - } - if( pxTCB != NULL ) - { - /* A task was unblocked while the scheduler was suspended, - * which may have prevented the next unblock time from being - * re-calculated, in which case re-calculate it now. Mainly - * important for low power tickless implementations, where - * this can prevent an unnecessary exit from low power - * state. */ - prvResetNextTaskUnblockTime(); - } - - /* If any ticks occurred while the scheduler was suspended then - * they should be processed now. This ensures the tick count does - * not slip, and that any delayed tasks are resumed at the correct - * time. */ - { - TickType_t xPendedCounts = xPendedTicks; /* Non-volatile copy. */ - - if( xPendedCounts > ( TickType_t ) 0U ) + /* If any ticks occurred while the scheduler was suspended then + * they should be processed now. This ensures the tick count does + * not slip, and that any delayed tasks are resumed at the correct + * time. + * + * It should be safe to call xTaskIncrementTick here from any core + * since we are in a critical section and xTaskIncrementTick itself + * protects itself within a critical section. Suspending the scheduler + * from any core causes xTaskIncrementTick to increment uxPendedCounts.*/ { - do + TickType_t xPendedCounts = xPendedTicks; /* Non-volatile copy. */ + + if( xPendedCounts > ( TickType_t ) 0U ) { - if( xTaskIncrementTick() != pdFALSE ) + do { - xYieldPending = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + if( xTaskIncrementTick() != pdFALSE ) + { + /* other cores are interrupted from + * within xTaskIncrementTick(). */ + xYieldPendings[ xCoreID ] = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } - --xPendedCounts; - } while( xPendedCounts > ( TickType_t ) 0U ); + --xPendedCounts; + } while( xPendedCounts > ( TickType_t ) 0U ); - xPendedTicks = 0; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - - if( xYieldPending != pdFALSE ) - { - #if ( configUSE_PREEMPTION != 0 ) - { - xAlreadyYielded = pdTRUE; + xPendedTicks = 0; } - #endif - taskYIELD_IF_USING_PREEMPTION(); - } - else - { - mtCOVERAGE_TEST_MARKER(); + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + if( xYieldPendings[ xCoreID ] != pdFALSE ) + { + /* If xYieldPendings is true then taskEXIT_CRITICAL() + * will yield, so make sure we return true to let the + * caller know a yield has already happened. */ + xAlreadyYielded = pdTRUE; + } } } + else + { + mtCOVERAGE_TEST_MARKER(); + } } - else - { - mtCOVERAGE_TEST_MARKER(); - } + taskEXIT_CRITICAL(); + } + else + { + mtCOVERAGE_TEST_MARKER(); } - taskEXIT_CRITICAL(); return xAlreadyYielded; } @@ -2594,12 +3306,12 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) - TaskHandle_t xTaskGetIdleTaskHandle( void ) + TaskHandle_t * xTaskGetIdleTaskHandle( void ) { /* If xTaskGetIdleTaskHandle() is called before the scheduler has been * started, then xIdleTaskHandle will be NULL. */ configASSERT( ( xIdleTaskHandle != NULL ) ); - return xIdleTaskHandle; + return &( xIdleTaskHandle[ 0 ] ); } #endif /* INCLUDE_xTaskGetIdleTaskHandle */ @@ -2693,19 +3405,11 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) * switch if preemption is turned off. */ #if ( configUSE_PREEMPTION == 1 ) { - /* Preemption is on, but a context switch should only be - * performed if the unblocked task has a priority that is - * equal to or higher than the currently executing task. */ - if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + taskENTER_CRITICAL(); { - /* Pend the yield to be performed when the scheduler - * is unsuspended. */ - xYieldPending = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); + prvYieldForTask( pxTCB, pdFALSE ); } + taskEXIT_CRITICAL(); } #endif /* configUSE_PREEMPTION */ } @@ -2728,168 +3432,213 @@ BaseType_t xTaskIncrementTick( void ) TickType_t xItemValue; BaseType_t xSwitchRequired = pdFALSE; - /* Called by the portable layer each time a tick interrupt occurs. - * Increments the tick then checks to see if the new tick value will cause any - * tasks to be unblocked. */ - traceTASK_INCREMENT_TICK( xTickCount ); + #if ( configUSE_PREEMPTION == 1 ) + UBaseType_t x; + BaseType_t xCoreYieldList[ configNUM_CORES ] = { pdFALSE }; + #endif /* configUSE_PREEMPTION */ - if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + taskENTER_CRITICAL(); { - /* Minor optimisation. The tick count cannot change in this - * block. */ - const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; + /* Called by the portable layer each time a tick interrupt occurs. + * Increments the tick then checks to see if the new tick value will cause any + * tasks to be unblocked. */ + traceTASK_INCREMENT_TICK( xTickCount ); - /* Increment the RTOS tick, switching the delayed and overflowed - * delayed lists if it wraps to 0. */ - xTickCount = xConstTickCount; + /* Tick increment should occur on every kernel timer event. Core 0 has the + * responsiblity to increment the tick, or increment the pended ticks if the + * scheduler is suspended. If pended ticks is greater than zero, the core that + * calls xTaskResumeAll has the responsibility to increment the tick. */ + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + { + /* Minor optimisation. The tick count cannot change in this + * block. */ + const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; - if( xConstTickCount == ( TickType_t ) 0U ) /*lint !e774 'if' does not always evaluate to false as it is looking for an overflow. */ - { - taskSWITCH_DELAYED_LISTS(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + /* Increment the RTOS tick, switching the delayed and overflowed + * delayed lists if it wraps to 0. */ + xTickCount = xConstTickCount; - /* See if this tick has made a timeout expire. Tasks are stored in - * the queue in the order of their wake time - meaning once one task - * has been found whose block time has not expired there is no need to - * look any further down the list. */ - if( xConstTickCount >= xNextTaskUnblockTime ) - { - for( ; ; ) + if( xConstTickCount == ( TickType_t ) 0U ) /*lint !e774 'if' does not always evaluate to false as it is looking for an overflow. */ { - if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE ) - { - /* The delayed list is empty. Set xNextTaskUnblockTime - * to the maximum possible value so it is extremely - * unlikely that the - * if( xTickCount >= xNextTaskUnblockTime ) test will pass - * next time through. */ - xNextTaskUnblockTime = portMAX_DELAY; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ - break; - } - else - { - /* The delayed list is not empty, get the value of the - * item at the head of the delayed list. This is the time - * at which the task at the head of the delayed list must - * be removed from the Blocked state. */ - pxTCB = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ - xItemValue = listGET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ) ); + taskSWITCH_DELAYED_LISTS(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } - if( xConstTickCount < xItemValue ) + /* See if this tick has made a timeout expire. Tasks are stored in + * the queue in the order of their wake time - meaning once one task + * has been found whose block time has not expired there is no need to + * look any further down the list. */ + if( xConstTickCount >= xNextTaskUnblockTime ) + { + for( ; ; ) + { + if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE ) { - /* It is not time to unblock this item yet, but the - * item value is the time at which the task at the head - * of the blocked list must be removed from the Blocked - * state - so record the item value in - * xNextTaskUnblockTime. */ - xNextTaskUnblockTime = xItemValue; - break; /*lint !e9011 Code structure here is deedmed easier to understand with multiple breaks. */ + /* The delayed list is empty. Set xNextTaskUnblockTime + * to the maximum possible value so it is extremely + * unlikely that the + * if( xTickCount >= xNextTaskUnblockTime ) test will pass + * next time through. */ + xNextTaskUnblockTime = portMAX_DELAY; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + break; } else { - mtCOVERAGE_TEST_MARKER(); - } + /* The delayed list is not empty, get the value of the + * item at the head of the delayed list. This is the time + * at which the task at the head of the delayed list must + * be removed from the Blocked state. */ + pxTCB = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + xItemValue = listGET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ) ); - /* It is time to remove the item from the Blocked state. */ - ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); - - /* Is the task waiting on an event also? If so remove - * it from the event list. */ - if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) - { - ( void ) uxListRemove( &( pxTCB->xEventListItem ) ); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - /* Place the unblocked task into the appropriate ready - * list. */ - prvAddTaskToReadyList( pxTCB ); - - /* A task being unblocked cannot cause an immediate - * context switch if preemption is turned off. */ - #if ( configUSE_PREEMPTION == 1 ) + if( xConstTickCount < xItemValue ) { - /* Preemption is on, but a context switch should - * only be performed if the unblocked task has a - * priority that is equal to or higher than the - * currently executing task. */ - if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) + /* It is not time to unblock this item yet, but the + * item value is the time at which the task at the head + * of the blocked list must be removed from the Blocked + * state - so record the item value in + * xNextTaskUnblockTime. */ + xNextTaskUnblockTime = xItemValue; + break; /*lint !e9011 Code structure here is deedmed easier to understand with multiple breaks. */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* It is time to remove the item from the Blocked state. */ + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + + /* Is the task waiting on an event also? If so remove + * it from the event list. */ + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) + { + ( void ) uxListRemove( &( pxTCB->xEventListItem ) ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Place the unblocked task into the appropriate ready + * list. */ + prvAddTaskToReadyList( pxTCB ); + + /* A task being unblocked cannot cause an immediate + * context switch if preemption is turned off. */ + #if ( configUSE_PREEMPTION == 1 ) { - xSwitchRequired = pdTRUE; + prvYieldForTask( pxTCB, pdTRUE ); + } + #endif /* configUSE_PREEMPTION */ + } + } + } + + /* Tasks of equal priority to the currently running task will share + * processing time (time slice) if preemption is on, and the application + * writer has not explicitly turned time slicing off. */ + #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) + { + /* TODO: If there are fewer "non-IDLE" READY tasks than cores, do not + * force a context switch that would just shuffle tasks around cores */ + /* TODO: There are certainly better ways of doing this that would reduce + * the number of interrupts and also potentially help prevent tasks from + * moving between cores as often. This, however, works for now. */ + for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configNUM_CORES; x++ ) + { + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCBs[ x ]->uxPriority ] ) ) > ( UBaseType_t ) 1 ) + { + xCoreYieldList[ x ] = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */ + + #if ( configUSE_TICK_HOOK == 1 ) + { + /* Guard against the tick hook being called when the pended tick + * count is being unwound (when the scheduler is being unlocked). */ + if( xPendedTicks == ( TickType_t ) 0 ) + { + vApplicationTickHook(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_TICK_HOOK */ + + #if ( configUSE_PREEMPTION == 1 ) + { + for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configNUM_CORES; x++ ) + { + if( xYieldPendings[ x ] != pdFALSE ) + { + xCoreYieldList[ x ] = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + #endif /* configUSE_PREEMPTION */ + + #if ( configUSE_PREEMPTION == 1 ) + { + BaseType_t xCoreID; + + xCoreID = portGET_CORE_ID(); + + for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configNUM_CORES; x++ ) + { + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + if( pxCurrentTCBs[ x ]->xPreemptionDisable == pdFALSE ) + #endif + { + if( xCoreYieldList[ x ] != pdFALSE ) + { + if( x == xCoreID ) + { + xSwitchRequired = pdTRUE; + } + else + { + prvYieldCore( x ); + } } else { mtCOVERAGE_TEST_MARKER(); } } - #endif /* configUSE_PREEMPTION */ + } } - } + #endif /* configUSE_PREEMPTION */ } + else + { + ++xPendedTicks; - /* Tasks of equal priority to the currently running task will share - * processing time (time slice) if preemption is on, and the application - * writer has not explicitly turned time slicing off. */ - #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) - { - if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > ( UBaseType_t ) 1 ) - { - xSwitchRequired = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */ - - #if ( configUSE_TICK_HOOK == 1 ) - { - /* Guard against the tick hook being called when the pended tick - * count is being unwound (when the scheduler is being unlocked). */ - if( xPendedTicks == ( TickType_t ) 0 ) + /* The tick hook gets called at regular intervals, even if the + * scheduler is locked. */ + #if ( configUSE_TICK_HOOK == 1 ) { vApplicationTickHook(); } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - #endif /* configUSE_TICK_HOOK */ - - #if ( configUSE_PREEMPTION == 1 ) - { - if( xYieldPending != pdFALSE ) - { - xSwitchRequired = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - #endif /* configUSE_PREEMPTION */ - } - else - { - ++xPendedTicks; - - /* The tick hook gets called at regular intervals, even if the - * scheduler is locked. */ - #if ( configUSE_TICK_HOOK == 1 ) - { - vApplicationTickHook(); - } - #endif + #endif + } } + taskEXIT_CRITICAL(); return xSwitchRequired; } @@ -3007,79 +3756,97 @@ BaseType_t xTaskIncrementTick( void ) #endif /* configUSE_APPLICATION_TASK_TAG */ /*-----------------------------------------------------------*/ -void vTaskSwitchContext( void ) +void vTaskSwitchContext( BaseType_t xCoreID ) { - if( uxSchedulerSuspended != ( UBaseType_t ) pdFALSE ) + /* Acquire both locks: + * - The ISR lock protects the ready list from simultaneous access by + * both other ISRs and tasks. + * - We also take the task lock to pause here in case another core has + * suspended the scheduler. We don't want to simply set xYieldPending + * and move on if another core suspended the scheduler. We should only + * do that if the current core has suspended the scheduler. */ + + portGET_TASK_LOCK(); /* Must always acquire the task lock first */ + portGET_ISR_LOCK(); { - /* The scheduler is currently suspended - do not allow a context - * switch. */ - xYieldPending = pdTRUE; - } - else - { - xYieldPending = pdFALSE; - traceTASK_SWITCHED_OUT(); + /* vTaskSwitchContext() must never be called from within a critical section. + * This is not necessarily true for vanilla FreeRTOS, but it is for this SMP port. */ + configASSERT( pxCurrentTCB->uxCriticalNesting == 0 ); - #if ( configGENERATE_RUN_TIME_STATS == 1 ) - { - #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE - portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime ); - #else - ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE(); - #endif + if( uxSchedulerSuspended != ( UBaseType_t ) pdFALSE ) + { + /* The scheduler is currently suspended - do not allow a context + * switch. */ + xYieldPendings[ xCoreID ] = pdTRUE; + } + else + { + xYieldPendings[ xCoreID ] = pdFALSE; + traceTASK_SWITCHED_OUT(); - /* Add the amount of time the task has been running to the - * accumulated time so far. The time the task started running was - * stored in ulTaskSwitchedInTime. Note that there is no overflow - * protection here so count values are only valid until the timer - * overflows. The guard against negative values is to protect - * against suspect run time stat counter implementations - which - * are provided by the application, not the kernel. */ - if( ulTotalRunTime > ulTaskSwitchedInTime ) + #if ( configGENERATE_RUN_TIME_STATS == 1 ) { - pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime ); + #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE + portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime ); + #else + ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE(); + #endif + + /* Add the amount of time the task has been running to the + * accumulated time so far. The time the task started running was + * stored in ulTaskSwitchedInTime. Note that there is no overflow + * protection here so count values are only valid until the timer + * overflows. The guard against negative values is to protect + * against suspect run time stat counter implementations - which + * are provided by the application, not the kernel. */ + if( ulTotalRunTime > ulTaskSwitchedInTime ) + { + pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + ulTaskSwitchedInTime = ulTotalRunTime; } - else + #endif /* configGENERATE_RUN_TIME_STATS */ + + /* Check for stack overflow, if configured. */ + taskCHECK_FOR_STACK_OVERFLOW(); + + /* Before the currently running task is switched out, save its errno. */ + #if ( configUSE_POSIX_ERRNO == 1 ) { - mtCOVERAGE_TEST_MARKER(); + pxCurrentTCB->iTaskErrno = FreeRTOS_errno; } + #endif - ulTaskSwitchedInTime = ulTotalRunTime; - } - #endif /* configGENERATE_RUN_TIME_STATS */ + /* Select a new task to run using either the generic C or port + * optimised asm code. */ + ( void ) prvSelectHighestPriorityTask( xCoreID ); + traceTASK_SWITCHED_IN(); - /* Check for stack overflow, if configured. */ - taskCHECK_FOR_STACK_OVERFLOW(); + /* After the new task is switched in, update the global errno. */ + #if ( configUSE_POSIX_ERRNO == 1 ) + { + FreeRTOS_errno = pxCurrentTCB->iTaskErrno; + } + #endif - /* Before the currently running task is switched out, save its errno. */ - #if ( configUSE_POSIX_ERRNO == 1 ) - { - pxCurrentTCB->iTaskErrno = FreeRTOS_errno; - } - #endif - - /* Select a new task to run using either the generic C or port - * optimised asm code. */ - taskSELECT_HIGHEST_PRIORITY_TASK(); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ - traceTASK_SWITCHED_IN(); - - /* After the new task is switched in, update the global errno. */ - #if ( configUSE_POSIX_ERRNO == 1 ) - { - FreeRTOS_errno = pxCurrentTCB->iTaskErrno; - } - #endif - - #if ( configUSE_NEWLIB_REENTRANT == 1 ) - { - /* Switch Newlib's _impure_ptr variable to point to the _reent - * structure specific to this task. - * See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html - * for additional information. */ - _impure_ptr = &( pxCurrentTCB->xNewLib_reent ); - } - #endif /* configUSE_NEWLIB_REENTRANT */ + #if ( configUSE_NEWLIB_REENTRANT == 1 ) + { + /* Switch Newlib's _impure_ptr variable to point to the _reent + * structure specific to this task. + * See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html + * for additional information. */ + _impure_ptr = &( pxCurrentTCB->xNewLib_reent ); + } + #endif /* configUSE_NEWLIB_REENTRANT */ + } } + portRELEASE_ISR_LOCK(); + portRELEASE_TASK_LOCK(); } /*-----------------------------------------------------------*/ @@ -3210,21 +3977,15 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) vListInsertEnd( &( xPendingReadyList ), &( pxUnblockedTCB->xEventListItem ) ); } - if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority ) - { - /* Return true if the task removed from the event list has a higher - * priority than the calling task. This allows the calling task to know if - * it should force a context switch now. */ - xReturn = pdTRUE; + xReturn = pdFALSE; + #if ( configUSE_PREEMPTION == 1 ) + prvYieldForTask( pxUnblockedTCB, pdFALSE ); - /* Mark that a yield is pending in case the user is not using the - * "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */ - xYieldPending = pdTRUE; - } - else - { - xReturn = pdFALSE; - } + if( xYieldPendings[ portGET_CORE_ID() ] != pdFALSE ) + { + xReturn = pdTRUE; + } + #endif return xReturn; } @@ -3268,14 +4029,13 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, ( void ) uxListRemove( &( pxUnblockedTCB->xStateListItem ) ); prvAddTaskToReadyList( pxUnblockedTCB ); - if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority ) - { - /* The unblocked task has a priority above that of the calling task, so - * a context switch is required. This function is called with the - * scheduler suspended so xYieldPending is set so the context switch - * occurs immediately that the scheduler is resumed (unsuspended). */ - xYieldPending = pdTRUE; - } + #if ( configUSE_PREEMPTION == 1 ) + taskENTER_CRITICAL(); + { + prvYieldForTask( pxUnblockedTCB, pdFALSE ); + } + taskEXIT_CRITICAL(); + #endif } /*-----------------------------------------------------------*/ @@ -3366,7 +4126,8 @@ BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, void vTaskMissedYield( void ) { - xYieldPending = pdTRUE; + /* Must be called from within a critical section */ + xYieldPendings[ portGET_CORE_ID() ] = pdTRUE; } /*-----------------------------------------------------------*/ @@ -3433,6 +4194,10 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) * any. */ portALLOCATE_SECURE_CONTEXT( configMINIMAL_SECURE_STACK_SIZE ); + /* All cores start up in the idle task. This initial yield gets the application + * tasks started. */ + taskYIELD(); + for( ; ; ) { /* See if any tasks have deleted themselves - if so then the idle task @@ -3458,9 +4223,10 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) * * A critical region is not required here as we are just reading from * the list, and an occasional incorrect value will not matter. If - * the ready list at the idle priority contains more than one task + * the ready list at the idle priority contains one more task than the + * number of idle tasks, which is equal to the configured numbers of cores * then a task other than the idle task is ready to execute. */ - if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) 1 ) + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) configNUM_CORES ) { taskYIELD(); } @@ -3691,14 +4457,31 @@ static void prvCheckTasksWaitingTermination( void ) { taskENTER_CRITICAL(); { - pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ - ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); - --uxCurrentNumberOfTasks; - --uxDeletedTasksWaitingCleanUp; + /* Since we are SMP, multiple idles can be running simultaneously + * and we need to check that other idles did not cleanup while we were + * waiting to enter the critical section */ + if( uxDeletedTasksWaitingCleanUp > ( UBaseType_t ) 0U ) + { + pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + + if( pxTCB->xTaskRunState == taskTASK_NOT_RUNNING ) + { + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + --uxCurrentNumberOfTasks; + --uxDeletedTasksWaitingCleanUp; + prvDeleteTCB( pxTCB ); + } + else + { + /* The TCB to be deleted still has not yet been switched out + * by the scheduler, so we will just exit this loop early and + * try again next time. */ + taskEXIT_CRITICAL(); + break; + } + } } taskEXIT_CRITICAL(); - - prvDeleteTCB( pxTCB ); } } #endif /* INCLUDE_vTaskDelete */ @@ -3748,7 +4531,7 @@ static void prvCheckTasksWaitingTermination( void ) * state is just set to whatever is passed in. */ if( eState != eInvalid ) { - if( pxTCB == pxCurrentTCB ) + if( taskTASK_IS_RUNNING( pxTCB->xTaskRunState ) ) { pxTaskStatus->eCurrentState = eRunning; } @@ -4011,11 +4794,23 @@ static void prvResetNextTaskUnblockTime( void ) TaskHandle_t xTaskGetCurrentTaskHandle( void ) { TaskHandle_t xReturn; + uint32_t ulState; - /* A critical section is not required as this is not called from - * an interrupt and the current TCB will always be the same for any - * individual execution thread. */ - xReturn = pxCurrentTCB; + ulState = portDISABLE_INTERRUPTS(); + xReturn = pxCurrentTCBs[ portGET_CORE_ID() ]; + portRESTORE_INTERRUPTS( ulState ); + + return xReturn; + } + + TaskHandle_t xTaskGetCurrentTaskHandleCPU( UBaseType_t xCoreID ) + { + TaskHandle_t xReturn = NULL; + + if( taskVALID_CORE_ID( xCoreID ) != pdFALSE ) + { + xReturn = pxCurrentTCBs[ xCoreID ]; + } return xReturn; } @@ -4035,14 +4830,18 @@ static void prvResetNextTaskUnblockTime( void ) } else { - if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + taskENTER_CRITICAL(); { - xReturn = taskSCHEDULER_RUNNING; - } - else - { - xReturn = taskSCHEDULER_SUSPENDED; + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + { + xReturn = taskSCHEDULER_RUNNING; + } + else + { + xReturn = taskSCHEDULER_SUSPENDED; + } } + taskEXIT_CRITICAL(); } return xReturn; @@ -4327,6 +5126,24 @@ static void prvResetNextTaskUnblockTime( void ) #endif /* configUSE_MUTEXES */ /*-----------------------------------------------------------*/ +/* + * If not in a critical section then yield immediately. + * Otherwise set xYieldPending to true to wait to + * yield until exiting the critical section. + */ +void vTaskYieldWithinAPI( void ) +{ + if( pxCurrentTCB->uxCriticalNesting == 0U ) + { + portYIELD(); + } + else + { + xYieldPendings[ portGET_CORE_ID() ] = pdTRUE; + } +} +/*-----------------------------------------------------------*/ + #if ( portCRITICAL_NESTING_IN_TCB == 1 ) void vTaskEnterCritical( void ) @@ -4335,17 +5152,26 @@ static void prvResetNextTaskUnblockTime( void ) if( xSchedulerRunning != pdFALSE ) { + if( pxCurrentTCB->uxCriticalNesting == 0U ) + { + if( portCHECK_IF_IN_ISR() == pdFALSE ) + { + portGET_TASK_LOCK(); + } + + portGET_ISR_LOCK(); + } + ( pxCurrentTCB->uxCriticalNesting )++; - /* This is not the interrupt safe version of the enter critical - * function so assert() if it is being called from an interrupt - * context. Only API functions that end in "FromISR" can be used in an - * interrupt. Only assert if the critical nesting count is 1 to - * protect against recursive calls if the assert function also uses a - * critical section. */ - if( pxCurrentTCB->uxCriticalNesting == 1 ) + /* This should now be interrupt safe. The only time there would be + * a problem is if this is called before a context switch and + * vTaskExitCritical() is called after pxCurrentTCB changes. Therefore + * this should not be used within vTaskSwitchContext(). */ + + if( ( uxSchedulerSuspended == 0U ) && ( pxCurrentTCB->uxCriticalNesting == 1U ) ) { - portASSERT_IF_IN_ISR(); + prvCheckForRunStateChange(); } } else @@ -4363,13 +5189,39 @@ static void prvResetNextTaskUnblockTime( void ) { if( xSchedulerRunning != pdFALSE ) { + /* If pxCurrentTCB->uxCriticalNesting is zero then this function + * does not match a previous call to vTaskEnterCritical(). */ + configASSERT( pxCurrentTCB->uxCriticalNesting > 0U ); + if( pxCurrentTCB->uxCriticalNesting > 0U ) { ( pxCurrentTCB->uxCriticalNesting )--; if( pxCurrentTCB->uxCriticalNesting == 0U ) { - portENABLE_INTERRUPTS(); + portRELEASE_ISR_LOCK(); + + if( portCHECK_IF_IN_ISR() == pdFALSE ) + { + portRELEASE_TASK_LOCK(); + portENABLE_INTERRUPTS(); + + /* When a task yields in a critical section it just sets + * xYieldPending to true. So now that we have exited the + * critical section check if xYieldPending is true, and + * if so yield. */ + if( xYieldPending != pdFALSE ) + { + portYIELD(); + } + } + else + { + /* In an ISR we don't hold the task lock and don't + * need to yield. Yield will happen if necessary when + * the application ISR calls portEND_SWITCHING_ISR() */ + mtCOVERAGE_TEST_MARKER(); + } } else { @@ -4706,7 +5558,7 @@ TickType_t uxTaskResetEventItemValue( void ) * section (some will yield immediately, others wait until the * critical section exits) - but it is not something that * application code should ever do. */ - portYIELD_WITHIN_API(); + vTaskYieldWithinAPI(); } else { @@ -4785,7 +5637,7 @@ TickType_t uxTaskResetEventItemValue( void ) * section (some will yield immediately, others wait until the * critical section exits) - but it is not something that * application code should ever do. */ - portYIELD_WITHIN_API(); + vTaskYieldWithinAPI(); } else { @@ -4936,16 +5788,11 @@ TickType_t uxTaskResetEventItemValue( void ) } #endif - if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) - { - /* The notified task has a priority above the currently - * executing task so a yield is required. */ - taskYIELD_IF_USING_PREEMPTION(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + #if ( configUSE_PREEMPTION == 1 ) + { + prvYieldForTask( pxTCB, pdFALSE ); + } + #endif } else { @@ -5071,24 +5918,17 @@ TickType_t uxTaskResetEventItemValue( void ) vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); } - if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) - { - /* The notified task has a priority above the currently - * executing task so a yield is required. */ - if( pxHigherPriorityTaskWoken != NULL ) - { - *pxHigherPriorityTaskWoken = pdTRUE; - } + #if ( configUSE_PREEMPTION == 1 ) + prvYieldForTask( pxTCB, pdFALSE ); - /* Mark that a yield is pending in case the user is not - * using the "xHigherPriorityTaskWoken" parameter to an ISR - * safe FreeRTOS function. */ - xYieldPending = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + if( xYieldPendings[ portGET_CORE_ID() ] == pdTRUE ) + { + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + } + #endif } } portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); @@ -5162,24 +6002,17 @@ TickType_t uxTaskResetEventItemValue( void ) vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); } - if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) - { - /* The notified task has a priority above the currently - * executing task so a yield is required. */ - if( pxHigherPriorityTaskWoken != NULL ) - { - *pxHigherPriorityTaskWoken = pdTRUE; - } + #if ( configUSE_PREEMPTION == 1 ) + prvYieldForTask( pxTCB, pdFALSE ); - /* Mark that a yield is pending in case the user is not - * using the "xHigherPriorityTaskWoken" parameter in an ISR - * safe FreeRTOS function. */ - xYieldPending = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + if( xYieldPendings[ portGET_CORE_ID() ] == pdTRUE ) + { + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + } + #endif } } portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); @@ -5254,10 +6087,17 @@ TickType_t uxTaskResetEventItemValue( void ) uint32_t ulTaskGetIdleRunTimeCounter( void ) { - return xIdleTaskHandle->ulRunTimeCounter; + uint32_t ulReturn = 0; + + for( BaseType_t i = 0; i < configNUM_CORES; i++ ) + { + ulReturn += xIdleTaskHandle[ i ]->ulRunTimeCounter; + } + + return ulReturn; } -#endif +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ /*-----------------------------------------------------------*/ static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, diff --git a/timers.c b/timers.c index a84c7b8b8..8f5316887 100644 --- a/timers.c +++ b/timers.c @@ -1,7 +1,6 @@ /* * FreeRTOS Kernel V10.4.3 * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in @@ -669,7 +668,7 @@ * block time to expire. If a command arrived between the * critical section being exited and this yield then the yield * will not cause the task to block. */ - portYIELD_WITHIN_API(); + vTaskYieldWithinAPI(); } else {