mirror of
https://github.com/FreeRTOS/FreeRTOS-Kernel.git
synced 2025-09-12 09:07:46 -04:00
Update kernel to support SMP
The XMOS XCore ports are also updated to support SMP.
This commit is contained in:
parent
bec63029ef
commit
596296fc73
16 changed files with 2043 additions and 807 deletions
|
@ -256,7 +256,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
|
||||||
{
|
{
|
||||||
if( xAlreadyYielded == pdFALSE )
|
if( xAlreadyYielded == pdFALSE )
|
||||||
{
|
{
|
||||||
portYIELD_WITHIN_API();
|
vTaskYieldWithinAPI();
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -408,7 +408,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
|
||||||
{
|
{
|
||||||
if( xAlreadyYielded == pdFALSE )
|
if( xAlreadyYielded == pdFALSE )
|
||||||
{
|
{
|
||||||
portYIELD_WITHIN_API();
|
vTaskYieldWithinAPI();
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
|
|
@ -236,6 +236,14 @@
|
||||||
#define configUSE_COUNTING_SEMAPHORES 0
|
#define configUSE_COUNTING_SEMAPHORES 0
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifndef configUSE_TASK_PREEMPTION_DISABLE
|
||||||
|
#define configUSE_TASK_PREEMPTION_DISABLE 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef configUSE_CORE_EXCLUSION
|
||||||
|
#define configUSE_CORE_EXCLUSION 0
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifndef configUSE_ALTERNATIVE_API
|
#ifndef configUSE_ALTERNATIVE_API
|
||||||
#define configUSE_ALTERNATIVE_API 0
|
#define configUSE_ALTERNATIVE_API 0
|
||||||
#endif
|
#endif
|
||||||
|
@ -283,6 +291,15 @@
|
||||||
#define portSOFTWARE_BARRIER()
|
#define portSOFTWARE_BARRIER()
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifndef configNUM_CORES
|
||||||
|
#define configNUM_CORES 1
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef configRUN_MULTIPLE_PRIORITIES
|
||||||
|
#define configRUN_MULTIPLE_PRIORITIES 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
/* The timers module relies on xTaskGetSchedulerState(). */
|
/* The timers module relies on xTaskGetSchedulerState(). */
|
||||||
#if configUSE_TIMERS == 1
|
#if configUSE_TIMERS == 1
|
||||||
|
|
||||||
|
@ -782,10 +799,6 @@
|
||||||
#define portPRIVILEGE_BIT ( ( UBaseType_t ) 0x00 )
|
#define portPRIVILEGE_BIT ( ( UBaseType_t ) 0x00 )
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef portYIELD_WITHIN_API
|
|
||||||
#define portYIELD_WITHIN_API portYIELD
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef portSUPPRESS_TICKS_AND_SLEEP
|
#ifndef portSUPPRESS_TICKS_AND_SLEEP
|
||||||
#define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime )
|
#define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime )
|
||||||
#endif
|
#endif
|
||||||
|
@ -930,6 +943,18 @@
|
||||||
#error configUSE_MUTEXES must be set to 1 to use recursive mutexes
|
#error configUSE_MUTEXES must be set to 1 to use recursive mutexes
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configUSE_CORE_EXCLUSION != 0 ) )
|
||||||
|
#error configRUN_MULTIPLE_PRIORITIES must be set to 1 to use core exclusion
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configUSE_TASK_PREEMPTION_DISABLE != 0 ) )
|
||||||
|
#error configRUN_MULTIPLE_PRIORITIES must be set to 1 to use task preemption disable
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if( ( configUSE_PREEMPTION == 0 ) && ( configUSE_TASK_PREEMPTION_DISABLE != 0 ) )
|
||||||
|
#error configUSE_PREEMPTION must be set to 1 to use task preemption disable
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifndef configINITIAL_TICK_COUNT
|
#ifndef configINITIAL_TICK_COUNT
|
||||||
#define configINITIAL_TICK_COUNT 0
|
#define configINITIAL_TICK_COUNT 0
|
||||||
#endif
|
#endif
|
||||||
|
@ -1174,7 +1199,14 @@ typedef struct xSTATIC_TCB
|
||||||
StaticListItem_t xDummy3[ 2 ];
|
StaticListItem_t xDummy3[ 2 ];
|
||||||
UBaseType_t uxDummy5;
|
UBaseType_t uxDummy5;
|
||||||
void * pxDummy6;
|
void * pxDummy6;
|
||||||
|
BaseType_t xDummy23[ 2 ];
|
||||||
uint8_t ucDummy7[ configMAX_TASK_NAME_LEN ];
|
uint8_t ucDummy7[ configMAX_TASK_NAME_LEN ];
|
||||||
|
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
|
||||||
|
BaseType_t xDummy24;
|
||||||
|
#endif
|
||||||
|
#if ( configUSE_CORE_EXCLUSION == 1 )
|
||||||
|
UBaseType_t uxDummy25;
|
||||||
|
#endif
|
||||||
#if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) )
|
#if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) )
|
||||||
void * pxDummy8;
|
void * pxDummy8;
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -214,6 +214,9 @@ typedef enum
|
||||||
* task. h
|
* task. h
|
||||||
*
|
*
|
||||||
* Macro to disable all maskable interrupts.
|
* Macro to disable all maskable interrupts.
|
||||||
|
* This also returns what the interrupt state was
|
||||||
|
* upon being called. This state may subsequently
|
||||||
|
* be passed to taskRESTORE_INTERRUPTS().
|
||||||
*
|
*
|
||||||
* \defgroup taskDISABLE_INTERRUPTS taskDISABLE_INTERRUPTS
|
* \defgroup taskDISABLE_INTERRUPTS taskDISABLE_INTERRUPTS
|
||||||
* \ingroup SchedulerControl
|
* \ingroup SchedulerControl
|
||||||
|
@ -230,6 +233,28 @@ typedef enum
|
||||||
*/
|
*/
|
||||||
#define taskENABLE_INTERRUPTS() portENABLE_INTERRUPTS()
|
#define taskENABLE_INTERRUPTS() portENABLE_INTERRUPTS()
|
||||||
|
|
||||||
|
/**
|
||||||
|
* task. h
|
||||||
|
*
|
||||||
|
* Macro to restore microcontroller interrupts to
|
||||||
|
* a previous state.
|
||||||
|
*
|
||||||
|
* \defgroup taskRESTORE_INTERRUPTS taskRESTORE_INTERRUPTS
|
||||||
|
* \ingroup SchedulerControl
|
||||||
|
*/
|
||||||
|
#define taskRESTORE_INTERRUPTS(ulState) portRESTORE_INTERRUPTS(ulState)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* task. h
|
||||||
|
*
|
||||||
|
* Macro that determines if it is being called from within an ISR
|
||||||
|
* or a task. Returns non-zero if it is in an ISR.
|
||||||
|
*
|
||||||
|
* \defgroup taskCHECK_IF_IN_ISR taskCHECK_IF_IN_ISR
|
||||||
|
* \ingroup SchedulerControl
|
||||||
|
*/
|
||||||
|
#define taskCHECK_IF_IN_ISR() portCHECK_IF_IN_ISR()
|
||||||
|
|
||||||
/* Definitions returned by xTaskGetSchedulerState(). taskSCHEDULER_SUSPENDED is
|
/* Definitions returned by xTaskGetSchedulerState(). taskSCHEDULER_SUSPENDED is
|
||||||
* 0 to generate more optimal code when configASSERT() is defined as the constant
|
* 0 to generate more optimal code when configASSERT() is defined as the constant
|
||||||
* is used in assert() statements. */
|
* is used in assert() statements. */
|
||||||
|
@ -237,6 +262,8 @@ typedef enum
|
||||||
#define taskSCHEDULER_NOT_STARTED ( ( BaseType_t ) 1 )
|
#define taskSCHEDULER_NOT_STARTED ( ( BaseType_t ) 1 )
|
||||||
#define taskSCHEDULER_RUNNING ( ( BaseType_t ) 2 )
|
#define taskSCHEDULER_RUNNING ( ( BaseType_t ) 2 )
|
||||||
|
|
||||||
|
/* Check if core value is valid */
|
||||||
|
#define taskVALID_CORE_ID( xCoreID ) ( ( BaseType_t ) ( ( 0 <= xCoreID ) && ( xCoreID < configNUM_CORES ) ) )
|
||||||
|
|
||||||
/*-----------------------------------------------------------
|
/*-----------------------------------------------------------
|
||||||
* TASK CREATION API
|
* TASK CREATION API
|
||||||
|
@ -1208,6 +1235,12 @@ void vTaskResume( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION;
|
||||||
*/
|
*/
|
||||||
BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION;
|
BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION;
|
||||||
|
|
||||||
|
void vTaskCoreExclusionSet( const TaskHandle_t xTask, UBaseType_t uxCoreExclude );
|
||||||
|
UBaseType_t vTaskCoreExclusionGet( const TaskHandle_t xTask );
|
||||||
|
|
||||||
|
void vTaskPreemptionDisable( const TaskHandle_t xTask );
|
||||||
|
void vTaskPreemptionEnable( const TaskHandle_t xTask );
|
||||||
|
|
||||||
/*-----------------------------------------------------------
|
/*-----------------------------------------------------------
|
||||||
* SCHEDULER CONTROL
|
* SCHEDULER CONTROL
|
||||||
*----------------------------------------------------------*/
|
*----------------------------------------------------------*/
|
||||||
|
@ -1666,10 +1699,10 @@ BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask,
|
||||||
* xTaskGetIdleTaskHandle() is only available if
|
* xTaskGetIdleTaskHandle() is only available if
|
||||||
* INCLUDE_xTaskGetIdleTaskHandle is set to 1 in FreeRTOSConfig.h.
|
* INCLUDE_xTaskGetIdleTaskHandle is set to 1 in FreeRTOSConfig.h.
|
||||||
*
|
*
|
||||||
* Simply returns the handle of the idle task. It is not valid to call
|
* Simply returns a pointer to the array of idle task handles.
|
||||||
* xTaskGetIdleTaskHandle() before the scheduler has been started.
|
* It is not valid to call xTaskGetIdleTaskHandle() before the scheduler has been started.
|
||||||
*/
|
*/
|
||||||
TaskHandle_t xTaskGetIdleTaskHandle( void ) PRIVILEGED_FUNCTION;
|
TaskHandle_t *xTaskGetIdleTaskHandle( void ) PRIVILEGED_FUNCTION;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* configUSE_TRACE_FACILITY must be defined as 1 in FreeRTOSConfig.h for
|
* configUSE_TRACE_FACILITY must be defined as 1 in FreeRTOSConfig.h for
|
||||||
|
@ -2946,7 +2979,7 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem,
|
||||||
* Sets the pointer to the current TCB to the TCB of the highest priority task
|
* Sets the pointer to the current TCB to the TCB of the highest priority task
|
||||||
* that is ready to run.
|
* that is ready to run.
|
||||||
*/
|
*/
|
||||||
portDONT_DISCARD void vTaskSwitchContext( void ) PRIVILEGED_FUNCTION;
|
portDONT_DISCARD void vTaskSwitchContext( BaseType_t xCoreID ) PRIVILEGED_FUNCTION;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* THESE FUNCTIONS MUST NOT BE USED FROM APPLICATION CODE. THEY ARE USED BY
|
* THESE FUNCTIONS MUST NOT BE USED FROM APPLICATION CODE. THEY ARE USED BY
|
||||||
|
@ -2959,6 +2992,11 @@ TickType_t uxTaskResetEventItemValue( void ) PRIVILEGED_FUNCTION;
|
||||||
*/
|
*/
|
||||||
TaskHandle_t xTaskGetCurrentTaskHandle( void ) PRIVILEGED_FUNCTION;
|
TaskHandle_t xTaskGetCurrentTaskHandle( void ) PRIVILEGED_FUNCTION;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Return the handle of the task running on specified core.
|
||||||
|
*/
|
||||||
|
TaskHandle_t xTaskGetCurrentTaskHandleCPU( UBaseType_t xCoreID ) PRIVILEGED_FUNCTION;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Shortcut used by the queue implementation to prevent unnecessary call to
|
* Shortcut used by the queue implementation to prevent unnecessary call to
|
||||||
* taskYIELD();
|
* taskYIELD();
|
||||||
|
@ -3044,6 +3082,11 @@ TaskHandle_t pvTaskIncrementMutexHeldCount( void ) PRIVILEGED_FUNCTION;
|
||||||
*/
|
*/
|
||||||
void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNCTION;
|
void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNCTION;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For internal use only. Same as portYIELD_WITHIN_API() in single core FreeRTOS.
|
||||||
|
* For SMP this is not defined by the port.
|
||||||
|
*/
|
||||||
|
void vTaskYieldWithinAPI( void );
|
||||||
|
|
||||||
/* *INDENT-OFF* */
|
/* *INDENT-OFF* */
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
|
|
|
@ -9,14 +9,29 @@
|
||||||
|
|
||||||
static hwtimer_t xKernelTimer;
|
static hwtimer_t xKernelTimer;
|
||||||
|
|
||||||
uint32_t ulPortYieldRequired = pdFALSE;
|
uint32_t ulPortYieldRequired[ portMAX_CORE_COUNT ] = { pdFALSE };
|
||||||
|
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
|
void vIntercoreInterruptISR( void )
|
||||||
|
{
|
||||||
|
int xCoreID;
|
||||||
|
|
||||||
|
// debug_printf( "In KCALL: %u\n", ulData );
|
||||||
|
xCoreID = rtos_core_id_get();
|
||||||
|
ulPortYieldRequired[ xCoreID ] = pdTRUE;
|
||||||
|
}
|
||||||
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
DEFINE_RTOS_INTERRUPT_CALLBACK( pxKernelTimerISR, pvData )
|
DEFINE_RTOS_INTERRUPT_CALLBACK( pxKernelTimerISR, pvData )
|
||||||
{
|
{
|
||||||
uint32_t ulLastTrigger;
|
uint32_t ulLastTrigger;
|
||||||
uint32_t ulNow;
|
uint32_t ulNow;
|
||||||
|
int xCoreID;
|
||||||
|
|
||||||
|
xCoreID = 0;
|
||||||
|
|
||||||
|
configASSERT( xCoreID == rtos_core_id_get() );
|
||||||
|
|
||||||
/* Need the next interrupt to be scheduled relative to
|
/* Need the next interrupt to be scheduled relative to
|
||||||
* the current trigger time, rather than the current
|
* the current trigger time, rather than the current
|
||||||
|
@ -40,14 +55,36 @@ DEFINE_RTOS_INTERRUPT_CALLBACK( pxKernelTimerISR, pvData )
|
||||||
|
|
||||||
if( xTaskIncrementTick() != pdFALSE )
|
if( xTaskIncrementTick() != pdFALSE )
|
||||||
{
|
{
|
||||||
ulPortYieldRequired = pdTRUE;
|
ulPortYieldRequired[ xCoreID ] = pdTRUE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
static void prvCoreInit( void )
|
void vPortYieldOtherCore( int xOtherCoreID )
|
||||||
{
|
{
|
||||||
rtos_core_register();
|
int xCoreID;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This function must be called from within a critical section.
|
||||||
|
*/
|
||||||
|
|
||||||
|
xCoreID = rtos_core_id_get();
|
||||||
|
|
||||||
|
// debug_printf("%d->%d\n", xCoreID, xOtherCoreID);
|
||||||
|
|
||||||
|
// debug_printf("Yield core %d from %d\n", xOtherCoreID, xCoreID );
|
||||||
|
|
||||||
|
rtos_irq( xOtherCoreID, xCoreID );
|
||||||
|
}
|
||||||
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
|
static int prvCoreInit( void )
|
||||||
|
{
|
||||||
|
int xCoreID;
|
||||||
|
|
||||||
|
xCoreID = rtos_core_register();
|
||||||
|
debug_printf( "Logical Core %d initializing as FreeRTOS Core %d\n", get_logical_core_id(), xCoreID );
|
||||||
|
|
||||||
asm volatile (
|
asm volatile (
|
||||||
"ldap r11, kexcept\n\t"
|
"ldap r11, kexcept\n\t"
|
||||||
"set kep, r11\n\t"
|
"set kep, r11\n\t"
|
||||||
|
@ -56,35 +93,49 @@ static void prvCoreInit( void )
|
||||||
: "r11"
|
: "r11"
|
||||||
);
|
);
|
||||||
|
|
||||||
rtos_irq_enable( 1 );
|
rtos_irq_enable( configNUM_CORES );
|
||||||
|
|
||||||
uint32_t ulNow;
|
/*
|
||||||
ulNow = hwtimer_get_time( xKernelTimer );
|
* All threads wait here until all have enabled IRQs
|
||||||
// debug_printf( "The time is now (%u)\n", ulNow );
|
*/
|
||||||
|
while( rtos_irq_ready() == pdFALSE );
|
||||||
|
|
||||||
ulNow += configCPU_CLOCK_HZ / configTICK_RATE_HZ;
|
if( xCoreID == 0 )
|
||||||
|
{
|
||||||
|
uint32_t ulNow;
|
||||||
|
ulNow = hwtimer_get_time( xKernelTimer );
|
||||||
|
// debug_printf( "The time is now (%u)\n", ulNow );
|
||||||
|
|
||||||
triggerable_setup_interrupt_callback( xKernelTimer, NULL, RTOS_INTERRUPT_CALLBACK( pxKernelTimerISR ) );
|
ulNow += configCPU_CLOCK_HZ / configTICK_RATE_HZ;
|
||||||
hwtimer_set_trigger_time( xKernelTimer, ulNow );
|
|
||||||
triggerable_enable_trigger( xKernelTimer );
|
triggerable_setup_interrupt_callback( xKernelTimer, NULL, RTOS_INTERRUPT_CALLBACK( pxKernelTimerISR ) );
|
||||||
|
hwtimer_set_trigger_time( xKernelTimer, ulNow );
|
||||||
|
triggerable_enable_trigger( xKernelTimer );
|
||||||
|
}
|
||||||
|
|
||||||
|
return xCoreID;
|
||||||
}
|
}
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
DEFINE_RTOS_KERNEL_ENTRY( void, vPortStartSchedulerOnCore, void )
|
DEFINE_RTOS_KERNEL_ENTRY( void, vPortStartSchedulerOnCore, void )
|
||||||
{
|
{
|
||||||
prvCoreInit();
|
int xCoreID;
|
||||||
|
|
||||||
debug_printf( "FreeRTOS initialized\n" );
|
xCoreID = prvCoreInit();
|
||||||
|
|
||||||
|
debug_printf( "FreeRTOS Core %d initialized\n", xCoreID );
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Restore the context of the first thread
|
* Restore the context of the first thread
|
||||||
* to run and jump into it.
|
* to run and jump into it.
|
||||||
*/
|
*/
|
||||||
asm volatile (
|
asm volatile (
|
||||||
|
"mov r6, %0\n\t" /* R6 must be the FreeRTOS core ID*/
|
||||||
|
"ldaw r5, dp[pxCurrentTCBs]\n\t" /* R5 must be the TCB list which is indexed by R6 */
|
||||||
"bu _freertos_restore_ctx\n\t"
|
"bu _freertos_restore_ctx\n\t"
|
||||||
: /* no outputs */
|
: /* no outputs */
|
||||||
: /* no inputs */
|
: "r"(xCoreID)
|
||||||
: /* nothing is clobbered */
|
: "r5", "r6"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
@ -159,14 +210,22 @@ StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t px
|
||||||
}
|
}
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
|
void vPortStartSMPScheduler( void );
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* See header file for description.
|
* See header file for description.
|
||||||
*/
|
*/
|
||||||
BaseType_t xPortStartScheduler( void )
|
BaseType_t xPortStartScheduler( void )
|
||||||
{
|
{
|
||||||
|
if( ( configNUM_CORES > portMAX_CORE_COUNT ) || ( configNUM_CORES <= 0 ) )
|
||||||
|
{
|
||||||
|
return pdFAIL;
|
||||||
|
}
|
||||||
|
|
||||||
rtos_locks_initialize();
|
rtos_locks_initialize();
|
||||||
xKernelTimer = hwtimer_alloc();
|
xKernelTimer = hwtimer_alloc();
|
||||||
RTOS_KERNEL_ENTRY(vPortStartSchedulerOnCore)();
|
|
||||||
|
vPortStartSMPScheduler();
|
||||||
|
|
||||||
return pdPASS;
|
return pdPASS;
|
||||||
}
|
}
|
||||||
|
|
26
portable/XCC/XCORE200/port.xc
Normal file
26
portable/XCC/XCORE200/port.xc
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
/*
|
||||||
|
* port.xc
|
||||||
|
*
|
||||||
|
* Created on: Jul 31, 2019
|
||||||
|
* Author: mbruno
|
||||||
|
*/
|
||||||
|
|
||||||
|
//#include "rtos_support.h"
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
|
||||||
|
#include "FreeRTOSConfig.h" /* to get configNUM_CORES */
|
||||||
|
#ifndef configNUM_CORES
|
||||||
|
#define configNUM_CORES 1
|
||||||
|
#endif
|
||||||
|
|
||||||
|
void __xcore_interrupt_permitted_ugs_vPortStartSchedulerOnCore(void);
|
||||||
|
|
||||||
|
} /* extern "C" */
|
||||||
|
|
||||||
|
void vPortStartSMPScheduler( void )
|
||||||
|
{
|
||||||
|
par (int i = 0; i < configNUM_CORES; i++) {
|
||||||
|
__xcore_interrupt_permitted_ugs_vPortStartSchedulerOnCore();
|
||||||
|
}
|
||||||
|
}
|
|
@ -21,10 +21,13 @@ kexcept:
|
||||||
bau r11 //_TrapHandler is at 0x00040080. TODO: Is it always? Why can't I access the symbol _TrapHandler?
|
bau r11 //_TrapHandler is at 0x00040080. TODO: Is it always? Why can't I access the symbol _TrapHandler?
|
||||||
|
|
||||||
_yield:
|
_yield:
|
||||||
set sp, r4 /* Restore the task's SP to save the rest of its context. */
|
{set sp, r4 /* Restore the task's SP to save the rest of its context. */
|
||||||
bu _yield_continue /* Skip the ulPortYieldRequired check and jump right to */
|
get r11, id} /* Get the logical core ID into r11. */
|
||||||
/* the context save and switch. Also skips saving SPC */
|
ldaw r0, dp[rtos_core_map]
|
||||||
/* since the kcall handler has already saved it. */
|
ldw r0, r0[r11] /* Translate to the RTOS core ID into r0 */
|
||||||
|
bu _yield_continue /* Skip the ulPortYieldRequired check and jump right to */
|
||||||
|
/* the context save and switch. Also skips saving SPC */
|
||||||
|
/* since the kcall handler has already saved it. */
|
||||||
|
|
||||||
.align 64
|
.align 64
|
||||||
kcall:
|
kcall:
|
||||||
|
@ -68,7 +71,7 @@ rtos_interrupt_callback_common:
|
||||||
{stw r4, sp[12]
|
{stw r4, sp[12]
|
||||||
/*stw r11, sp[19] already saved by the wrapper function. */
|
/*stw r11, sp[19] already saved by the wrapper function. */
|
||||||
|
|
||||||
ldaw r4, sp[0]} /* Get value of current stackpointer into r4 */
|
ldaw r4, sp[0]} /* Get value of current stackpointer into r4. */
|
||||||
|
|
||||||
{kentsp 0 /* switch to the kernel stack. */
|
{kentsp 0 /* switch to the kernel stack. */
|
||||||
/* The value 0 is safe to use since we don't need the SP */
|
/* The value 0 is safe to use since we don't need the SP */
|
||||||
|
@ -78,12 +81,16 @@ rtos_interrupt_callback_common:
|
||||||
{mov r0, r11 /* into the first argument for the callback function... */
|
{mov r0, r11 /* into the first argument for the callback function... */
|
||||||
bla r1} /* and call the callback function. */
|
bla r1} /* and call the callback function. */
|
||||||
|
|
||||||
set sp, r4 /* Restore the task's SP now. */
|
{set sp, r4 /* Restore the task's SP now. */
|
||||||
|
|
||||||
ldw r0, dp[ulPortYieldRequired] /* Is a yield required? */
|
get r11, id} /* Get the logical core ID into r11. */
|
||||||
{bf r0, _freertos_restore_ctx_partial /* If not, restore the context now. */
|
ldaw r0, dp[rtos_core_map]
|
||||||
ldc r0, 0}
|
ldw r0, r0[r11] /* Translate to the RTOS core ID into r0. */
|
||||||
stw r0, dp[ulPortYieldRequired] /* Otherwise, clear the yield required flag. */
|
ldaw r2, dp[ulPortYieldRequired] /* Get the yield required array into r2. */
|
||||||
|
ldw r1, r2[r0] /* Is a yield required for this core? */
|
||||||
|
{bf r1, _freertos_restore_ctx_partial /* If not, restore the context now. */
|
||||||
|
ldc r1, 0}
|
||||||
|
stw r1, r2[r0] /* Otherwise, clear the yield required flag. */
|
||||||
|
|
||||||
/* Save the rest of the current task's context. */
|
/* Save the rest of the current task's context. */
|
||||||
stw spc, sp[1]
|
stw spc, sp[1]
|
||||||
|
@ -100,14 +107,17 @@ _yield_continue:
|
||||||
stw r9, sp[17]
|
stw r9, sp[17]
|
||||||
stw r10, sp[18]
|
stw r10, sp[18]
|
||||||
|
|
||||||
ldw r0, dp[pxCurrentTCB] /* Save the current task's SP to the first */
|
ldaw r5, dp[pxCurrentTCBs] /* Get the current TCB array into r5. */
|
||||||
stw r4, r0[0x0] /* word (top of stack) in the current TCB */
|
ldw r1, r5[r0] /* Get this core's current TCB pointer into r1. */
|
||||||
|
stw r4, r1[0x0] /* Save the current task's SP to the first */
|
||||||
|
/* word (top of stack) in the current TCB. */
|
||||||
|
|
||||||
kentsp 0 /* switch back to the kernel stack. */
|
{kentsp 0 /* switch back to the kernel stack. */
|
||||||
|
|
||||||
|
mov r6, r0} /* copy the RTOS core ID into r6 so we don't lose it. */
|
||||||
ldap r11, vTaskSwitchContext
|
ldap r11, vTaskSwitchContext
|
||||||
bla r11 /* Finally call vTaskSwitchContext() now that the task's */
|
bla r11 /* Finally call vTaskSwitchContext(core_id) now that the task's */
|
||||||
/* entire context is saved. */
|
/* entire context is saved. Note the core id in r0 is the argument. */
|
||||||
|
|
||||||
//krestsp 0 /* unnecessary since KSP is already set and the SP */
|
//krestsp 0 /* unnecessary since KSP is already set and the SP */
|
||||||
/* is being restored next from the current TCB. */
|
/* is being restored next from the current TCB. */
|
||||||
|
@ -115,7 +125,7 @@ _yield_continue:
|
||||||
.globl _freertos_restore_ctx
|
.globl _freertos_restore_ctx
|
||||||
_freertos_restore_ctx:
|
_freertos_restore_ctx:
|
||||||
|
|
||||||
ldw r0, dp[pxCurrentTCB]
|
ldw r0, r5[r6] /* get this core's current TCB pointer into r0 */
|
||||||
ldw r0, r0[0x0] /* Get the top of the stack from the current TCB... */
|
ldw r0, r0[0x0] /* Get the top of the stack from the current TCB... */
|
||||||
set sp, r0; /* into the stack pointer register. */
|
set sp, r0; /* into the stack pointer register. */
|
||||||
|
|
||||||
|
|
|
@ -46,9 +46,9 @@ typedef uint32_t UBaseType_t;
|
||||||
#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
|
#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
|
||||||
#define portBYTE_ALIGNMENT 8
|
#define portBYTE_ALIGNMENT 8
|
||||||
#define portCRITICAL_NESTING_IN_TCB 1
|
#define portCRITICAL_NESTING_IN_TCB 1
|
||||||
#ifdef configNUM_CORES
|
#define portMAX_CORE_COUNT 8
|
||||||
#warning configNUM_CORES should not be defined when using the single core XCORE port
|
#ifndef configNUM_CORES
|
||||||
#undef configNUM_CORES
|
#define configNUM_CORES 1
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* This may be set to zero in the config file if the rtos_time
|
/* This may be set to zero in the config file if the rtos_time
|
||||||
|
@ -67,6 +67,12 @@ functions are not needed or if it is incremented elsewhere. */
|
||||||
#define portTHREAD_CONTEXT_STACK_GROWTH RTOS_SUPPORT_INTERRUPT_STACK_GROWTH
|
#define portTHREAD_CONTEXT_STACK_GROWTH RTOS_SUPPORT_INTERRUPT_STACK_GROWTH
|
||||||
|
|
||||||
#ifndef __ASSEMBLER__
|
#ifndef __ASSEMBLER__
|
||||||
|
|
||||||
|
/* Check validity of number of cores specified in config */
|
||||||
|
#if ( configNUM_CORES < 1 || portMAX_CORE_COUNT < configNUM_CORES )
|
||||||
|
#error "Invalid number of cores specified in config!"
|
||||||
|
#endif
|
||||||
|
|
||||||
#define portMEMORY_BARRIER() RTOS_MEMORY_BARRIER()
|
#define portMEMORY_BARRIER() RTOS_MEMORY_BARRIER()
|
||||||
#define portTASK_STACK_DEPTH(pxTaskCode) RTOS_THREAD_STACK_SIZE(pxTaskCode)
|
#define portTASK_STACK_DEPTH(pxTaskCode) RTOS_THREAD_STACK_SIZE(pxTaskCode)
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
@ -79,17 +85,24 @@ do \
|
||||||
{ \
|
{ \
|
||||||
if( xSwitchRequired != pdFALSE ) \
|
if( xSwitchRequired != pdFALSE ) \
|
||||||
{ \
|
{ \
|
||||||
extern uint32_t ulPortYieldRequired; \
|
extern uint32_t ulPortYieldRequired[ portMAX_CORE_COUNT ]; \
|
||||||
ulPortYieldRequired = pdTRUE; \
|
ulPortYieldRequired[ portGET_CORE_ID() ] = pdTRUE; \
|
||||||
} \
|
} \
|
||||||
} while( 0 )
|
} while( 0 )
|
||||||
|
|
||||||
#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
|
#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
|
/* SMP utilities. */
|
||||||
|
#define portGET_CORE_ID() rtos_core_id_get()
|
||||||
|
|
||||||
|
void vPortYieldOtherCore( int xOtherCoreID );
|
||||||
|
#define portYIELD_CORE( x ) vPortYieldOtherCore( x )
|
||||||
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
/* Architecture specific optimisations. */
|
/* Architecture specific optimisations. */
|
||||||
#ifndef configUSE_PORT_OPTIMISED_TASK_SELECTION
|
#ifndef configUSE_PORT_OPTIMISED_TASK_SELECTION
|
||||||
#define configUSE_PORT_OPTIMISED_TASK_SELECTION 1
|
#define configUSE_PORT_OPTIMISED_TASK_SELECTION 0
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if configUSE_PORT_OPTIMISED_TASK_SELECTION == 1
|
#if configUSE_PORT_OPTIMISED_TASK_SELECTION == 1
|
||||||
|
@ -108,10 +121,16 @@ do \
|
||||||
/* Critical section management. */
|
/* Critical section management. */
|
||||||
|
|
||||||
#define portGET_INTERRUPT_STATE() rtos_interrupt_mask_get()
|
#define portGET_INTERRUPT_STATE() rtos_interrupt_mask_get()
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This differs from the standard portDISABLE_INTERRUPTS()
|
||||||
|
* in that it also returns what the interrupt state was
|
||||||
|
* before it disabling interrupts.
|
||||||
|
*/
|
||||||
#define portDISABLE_INTERRUPTS() rtos_interrupt_mask_all()
|
#define portDISABLE_INTERRUPTS() rtos_interrupt_mask_all()
|
||||||
|
|
||||||
#define portENABLE_INTERRUPTS() rtos_interrupt_unmask_all()
|
#define portENABLE_INTERRUPTS() rtos_interrupt_unmask_all()
|
||||||
#define portSET_INTERRUPT_MASK_FROM_ISR() 0
|
|
||||||
#define portCLEAR_INTERRUPT_MASK_FROM_ISR(x)
|
|
||||||
/*
|
/*
|
||||||
* Will enable interrupts if ulState is non-zero.
|
* Will enable interrupts if ulState is non-zero.
|
||||||
*/
|
*/
|
||||||
|
@ -122,12 +141,43 @@ do \
|
||||||
* ISR or otherwise in kernel mode.
|
* ISR or otherwise in kernel mode.
|
||||||
*/
|
*/
|
||||||
#define portCHECK_IF_IN_ISR() rtos_isr_running()
|
#define portCHECK_IF_IN_ISR() rtos_isr_running()
|
||||||
|
|
||||||
#define portASSERT_IF_IN_ISR() configASSERT( portCHECK_IF_IN_ISR() == 0 )
|
#define portASSERT_IF_IN_ISR() configASSERT( portCHECK_IF_IN_ISR() == 0 )
|
||||||
|
|
||||||
|
#define portGET_ISR_LOCK() rtos_lock_acquire(0)
|
||||||
|
#define portRELEASE_ISR_LOCK() rtos_lock_release(0)
|
||||||
|
#define portGET_TASK_LOCK() rtos_lock_acquire(1)
|
||||||
|
#define portRELEASE_TASK_LOCK() rtos_lock_release(1)
|
||||||
|
|
||||||
void vTaskEnterCritical(void);
|
void vTaskEnterCritical(void);
|
||||||
void vTaskExitCritical(void);
|
void vTaskExitCritical(void);
|
||||||
#define portENTER_CRITICAL() vTaskEnterCritical()
|
#define portENTER_CRITICAL() vTaskEnterCritical()
|
||||||
#define portEXIT_CRITICAL() vTaskExitCritical()
|
#define portEXIT_CRITICAL() vTaskExitCritical()
|
||||||
|
|
||||||
|
/*
|
||||||
|
* vTaskEnterCritical() has been modified to be safe to use
|
||||||
|
* from within ISRs. The previous mask does not need to be
|
||||||
|
* returned since in the xCORE interrupts are always disabled
|
||||||
|
* in ISRs. Effectively this call just grabs the kernel lock
|
||||||
|
* when called from an ISR.
|
||||||
|
*/
|
||||||
|
static inline uint32_t portSET_INTERRUPT_MASK_FROM_ISR( void )
|
||||||
|
{
|
||||||
|
vTaskEnterCritical();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#define portSET_INTERRUPT_MASK_FROM_ISR() portSET_INTERRUPT_MASK_FROM_ISR()
|
||||||
|
|
||||||
|
/*
|
||||||
|
* vTaskExitCritical() has been modified to be safe to use
|
||||||
|
* from within ISRs. When the nesting level has reached zero
|
||||||
|
* it releases the lock, but when called from within an ISR
|
||||||
|
* it will *not* re-enable interrupts since it is assumed they
|
||||||
|
* were previously disabled. Thus the previous state in x is
|
||||||
|
* unused.
|
||||||
|
*/
|
||||||
|
#define portCLEAR_INTERRUPT_MASK_FROM_ISR(x) vTaskExitCritical()
|
||||||
|
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
/* Runtime stats support */
|
/* Runtime stats support */
|
||||||
|
|
|
@ -35,7 +35,10 @@
|
||||||
* The RTOS provided handler that should run when a
|
* The RTOS provided handler that should run when a
|
||||||
* core receives an intercore interrupt request.
|
* core receives an intercore interrupt request.
|
||||||
*/
|
*/
|
||||||
#define RTOS_INTERCORE_INTERRUPT_ISR()
|
#define RTOS_INTERCORE_INTERRUPT_ISR() do { \
|
||||||
|
void vIntercoreInterruptISR( void ); \
|
||||||
|
vIntercoreInterruptISR(); \
|
||||||
|
} while ( 0 )
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The number of hardware locks that the RTOS
|
* The number of hardware locks that the RTOS
|
||||||
|
@ -45,7 +48,7 @@
|
||||||
* Note that the IRQ routines require a lock and
|
* Note that the IRQ routines require a lock and
|
||||||
* will share the first one with the RTOS.
|
* will share the first one with the RTOS.
|
||||||
*/
|
*/
|
||||||
#define RTOS_LOCK_COUNT 0
|
#define RTOS_LOCK_COUNT 2
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Remaps all calls to debug_printf() to rtos_printf().
|
* Remaps all calls to debug_printf() to rtos_printf().
|
||||||
|
@ -64,6 +67,15 @@
|
||||||
#endif
|
#endif
|
||||||
#define DEBUG_PRINT_ENABLE 1
|
#define DEBUG_PRINT_ENABLE 1
|
||||||
|
|
||||||
|
#ifndef configTASKS_DEBUG
|
||||||
|
#define configTASKS_DEBUG 0
|
||||||
|
#endif
|
||||||
|
#if configTASKS_DEBUG == 1
|
||||||
|
#define DEBUG_PRINT_ENABLE_FREERTOS_TASKS 1
|
||||||
|
#else
|
||||||
|
#define DEBUG_PRINT_DISABLE_FREERTOS_TASKS 1
|
||||||
|
#endif
|
||||||
|
|
||||||
#else /* configENABLE_DEBUG_PRINTF */
|
#else /* configENABLE_DEBUG_PRINTF */
|
||||||
|
|
||||||
/* ensure that debug_printf is disabled */
|
/* ensure that debug_printf is disabled */
|
||||||
|
|
|
@ -10,14 +10,29 @@
|
||||||
|
|
||||||
static hwtimer_t xKernelTimer;
|
static hwtimer_t xKernelTimer;
|
||||||
|
|
||||||
uint32_t ulPortYieldRequired = pdFALSE;
|
uint32_t ulPortYieldRequired[ portMAX_CORE_COUNT ] = { pdFALSE };
|
||||||
|
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
|
void vIntercoreInterruptISR( void )
|
||||||
|
{
|
||||||
|
int xCoreID;
|
||||||
|
|
||||||
|
// debug_printf( "In KCALL: %u\n", ulData );
|
||||||
|
xCoreID = rtos_core_id_get();
|
||||||
|
ulPortYieldRequired[ xCoreID ] = pdTRUE;
|
||||||
|
}
|
||||||
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
DEFINE_RTOS_INTERRUPT_CALLBACK( pxKernelTimerISR, pvData )
|
DEFINE_RTOS_INTERRUPT_CALLBACK( pxKernelTimerISR, pvData )
|
||||||
{
|
{
|
||||||
uint32_t ulLastTrigger;
|
uint32_t ulLastTrigger;
|
||||||
uint32_t ulNow;
|
uint32_t ulNow;
|
||||||
|
int xCoreID;
|
||||||
|
|
||||||
|
xCoreID = 0;
|
||||||
|
|
||||||
|
configASSERT( xCoreID == rtos_core_id_get() );
|
||||||
|
|
||||||
/* Need the next interrupt to be scheduled relative to
|
/* Need the next interrupt to be scheduled relative to
|
||||||
* the current trigger time, rather than the current
|
* the current trigger time, rather than the current
|
||||||
|
@ -41,14 +56,36 @@ DEFINE_RTOS_INTERRUPT_CALLBACK( pxKernelTimerISR, pvData )
|
||||||
|
|
||||||
if( xTaskIncrementTick() != pdFALSE )
|
if( xTaskIncrementTick() != pdFALSE )
|
||||||
{
|
{
|
||||||
ulPortYieldRequired = pdTRUE;
|
ulPortYieldRequired[ xCoreID ] = pdTRUE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
static void prvCoreInit( void )
|
void vPortYieldOtherCore( int xOtherCoreID )
|
||||||
{
|
{
|
||||||
rtos_core_register();
|
int xCoreID;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This function must be called from within a critical section.
|
||||||
|
*/
|
||||||
|
|
||||||
|
xCoreID = rtos_core_id_get();
|
||||||
|
|
||||||
|
// debug_printf("%d->%d\n", xCoreID, xOtherCoreID);
|
||||||
|
|
||||||
|
// debug_printf("Yield core %d from %d\n", xOtherCoreID, xCoreID );
|
||||||
|
|
||||||
|
rtos_irq( xOtherCoreID, xCoreID );
|
||||||
|
}
|
||||||
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
|
static int prvCoreInit( void )
|
||||||
|
{
|
||||||
|
int xCoreID;
|
||||||
|
|
||||||
|
xCoreID = rtos_core_register();
|
||||||
|
debug_printf( "Logical Core %d initializing as FreeRTOS Core %d\n", get_logical_core_id(), xCoreID );
|
||||||
|
|
||||||
asm volatile (
|
asm volatile (
|
||||||
"ldap r11, kexcept\n\t"
|
"ldap r11, kexcept\n\t"
|
||||||
"set kep, r11\n\t"
|
"set kep, r11\n\t"
|
||||||
|
@ -57,35 +94,57 @@ static void prvCoreInit( void )
|
||||||
: "r11"
|
: "r11"
|
||||||
);
|
);
|
||||||
|
|
||||||
rtos_irq_enable( 1 );
|
rtos_irq_enable( configNUM_CORES );
|
||||||
|
|
||||||
uint32_t ulNow;
|
/*
|
||||||
ulNow = hwtimer_get_time( xKernelTimer );
|
* All threads wait here until all have enabled IRQs
|
||||||
// debug_printf( "The time is now (%u)\n", ulNow );
|
*/
|
||||||
|
while( rtos_irq_ready() == pdFALSE );
|
||||||
|
|
||||||
ulNow += configCPU_CLOCK_HZ / configTICK_RATE_HZ;
|
if( xCoreID == 0 )
|
||||||
|
{
|
||||||
|
uint32_t ulNow;
|
||||||
|
ulNow = hwtimer_get_time( xKernelTimer );
|
||||||
|
// debug_printf( "The time is now (%u)\n", ulNow );
|
||||||
|
|
||||||
triggerable_setup_interrupt_callback( xKernelTimer, NULL, RTOS_INTERRUPT_CALLBACK( pxKernelTimerISR ) );
|
ulNow += configCPU_CLOCK_HZ / configTICK_RATE_HZ;
|
||||||
hwtimer_set_trigger_time( xKernelTimer, ulNow );
|
|
||||||
triggerable_enable_trigger( xKernelTimer );
|
triggerable_setup_interrupt_callback( xKernelTimer, NULL, RTOS_INTERRUPT_CALLBACK( pxKernelTimerISR ) );
|
||||||
|
hwtimer_set_trigger_time( xKernelTimer, ulNow );
|
||||||
|
triggerable_enable_trigger( xKernelTimer );
|
||||||
|
}
|
||||||
|
|
||||||
|
return xCoreID;
|
||||||
}
|
}
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
DEFINE_RTOS_KERNEL_ENTRY( void, vPortStartSchedulerOnCore, void )
|
DEFINE_RTOS_KERNEL_ENTRY( void, vPortStartSchedulerOnCore, void )
|
||||||
{
|
{
|
||||||
prvCoreInit();
|
int xCoreID;
|
||||||
|
|
||||||
debug_printf( "FreeRTOS initialized\n" );
|
xCoreID = prvCoreInit();
|
||||||
|
|
||||||
|
#if( configUSE_CORE_INIT_HOOK == 1 )
|
||||||
|
{
|
||||||
|
extern void vApplicationCoreInitHook( BaseType_t xCoreID );
|
||||||
|
|
||||||
|
vApplicationCoreInitHook( xCoreID );
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
debug_printf( "FreeRTOS Core %d initialized\n", xCoreID );
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Restore the context of the first thread
|
* Restore the context of the first thread
|
||||||
* to run and jump into it.
|
* to run and jump into it.
|
||||||
*/
|
*/
|
||||||
asm volatile (
|
asm volatile (
|
||||||
|
"mov r6, %0\n\t" /* R6 must be the FreeRTOS core ID*/
|
||||||
|
"ldaw r5, dp[pxCurrentTCBs]\n\t" /* R5 must be the TCB list which is indexed by R6 */
|
||||||
"bu _freertos_restore_ctx\n\t"
|
"bu _freertos_restore_ctx\n\t"
|
||||||
: /* no outputs */
|
: /* no outputs */
|
||||||
: /* no inputs */
|
: "r"(xCoreID)
|
||||||
: /* nothing is clobbered */
|
: "r5", "r6"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
@ -164,14 +223,22 @@ StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t px
|
||||||
}
|
}
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
|
void vPortStartSMPScheduler( void );
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* See header file for description.
|
* See header file for description.
|
||||||
*/
|
*/
|
||||||
BaseType_t xPortStartScheduler( void )
|
BaseType_t xPortStartScheduler( void )
|
||||||
{
|
{
|
||||||
|
if( ( configNUM_CORES > portMAX_CORE_COUNT ) || ( configNUM_CORES <= 0 ) )
|
||||||
|
{
|
||||||
|
return pdFAIL;
|
||||||
|
}
|
||||||
|
|
||||||
rtos_locks_initialize();
|
rtos_locks_initialize();
|
||||||
xKernelTimer = hwtimer_alloc();
|
xKernelTimer = hwtimer_alloc();
|
||||||
RTOS_KERNEL_ENTRY(vPortStartSchedulerOnCore)();
|
|
||||||
|
vPortStartSMPScheduler();
|
||||||
|
|
||||||
return pdPASS;
|
return pdPASS;
|
||||||
}
|
}
|
||||||
|
|
26
portable/XCC/XCOREAI/port.xc
Normal file
26
portable/XCC/XCOREAI/port.xc
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
/*
|
||||||
|
* port.xc
|
||||||
|
*
|
||||||
|
* Created on: Jul 31, 2019
|
||||||
|
* Author: mbruno
|
||||||
|
*/
|
||||||
|
|
||||||
|
//#include "rtos_support.h"
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
|
||||||
|
#include "FreeRTOSConfig.h" /* to get configNUM_CORES */
|
||||||
|
#ifndef configNUM_CORES
|
||||||
|
#define configNUM_CORES 1
|
||||||
|
#endif
|
||||||
|
|
||||||
|
void __xcore_interrupt_permitted_ugs_vPortStartSchedulerOnCore(void);
|
||||||
|
|
||||||
|
} /* extern "C" */
|
||||||
|
|
||||||
|
void vPortStartSMPScheduler( void )
|
||||||
|
{
|
||||||
|
par (int i = 0; i < configNUM_CORES; i++) {
|
||||||
|
__xcore_interrupt_permitted_ugs_vPortStartSchedulerOnCore();
|
||||||
|
}
|
||||||
|
}
|
|
@ -21,10 +21,13 @@ kexcept:
|
||||||
bau r11 //_TrapHandler is at 0x00080080. TODO: Is it always? Why can't I access the symbol _TrapHandler?
|
bau r11 //_TrapHandler is at 0x00080080. TODO: Is it always? Why can't I access the symbol _TrapHandler?
|
||||||
|
|
||||||
_yield:
|
_yield:
|
||||||
set sp, r4 /* Restore the task's SP to save the rest of its context. */
|
{set sp, r4 /* Restore the task's SP to save the rest of its context. */
|
||||||
bu _yield_continue /* Skip the ulPortYieldRequired check and jump right to */
|
get r11, id} /* Get the logical core ID into r11. */
|
||||||
/* the context save and switch. Also skips saving SPC */
|
ldaw r0, dp[rtos_core_map]
|
||||||
/* since the kcall handler has already saved it. */
|
ldw r0, r0[r11] /* Translate to the RTOS core ID into r0 */
|
||||||
|
bu _yield_continue /* Skip the ulPortYieldRequired check and jump right to */
|
||||||
|
/* the context save and switch. Also skips saving SPC */
|
||||||
|
/* since the kcall handler has already saved it. */
|
||||||
|
|
||||||
.align 64
|
.align 64
|
||||||
kcall:
|
kcall:
|
||||||
|
@ -68,7 +71,7 @@ rtos_interrupt_callback_common:
|
||||||
{stw r4, sp[12]
|
{stw r4, sp[12]
|
||||||
/*stw r11, sp[19] already saved by the wrapper function. */
|
/*stw r11, sp[19] already saved by the wrapper function. */
|
||||||
|
|
||||||
ldaw r4, sp[0]} /* Get value of current stackpointer into r4 */
|
ldaw r4, sp[0]} /* Get value of current stackpointer into r4. */
|
||||||
|
|
||||||
{kentsp 0 /* switch to the kernel stack. */
|
{kentsp 0 /* switch to the kernel stack. */
|
||||||
/* The value 0 is safe to use since we don't need the SP */
|
/* The value 0 is safe to use since we don't need the SP */
|
||||||
|
@ -78,12 +81,16 @@ rtos_interrupt_callback_common:
|
||||||
{mov r0, r11 /* into the first argument for the callback function... */
|
{mov r0, r11 /* into the first argument for the callback function... */
|
||||||
bla r1} /* and call the callback function. */
|
bla r1} /* and call the callback function. */
|
||||||
|
|
||||||
set sp, r4 /* Restore the task's SP now. */
|
{set sp, r4 /* Restore the task's SP now. */
|
||||||
|
|
||||||
ldw r0, dp[ulPortYieldRequired] /* Is a yield required? */
|
get r11, id} /* Get the logical core ID into r11. */
|
||||||
{bf r0, _freertos_restore_ctx_partial /* If not, restore the context now. */
|
ldaw r0, dp[rtos_core_map]
|
||||||
ldc r0, 0}
|
ldw r0, r0[r11] /* Translate to the RTOS core ID into r0. */
|
||||||
stw r0, dp[ulPortYieldRequired] /* Otherwise, clear the yield required flag. */
|
ldaw r2, dp[ulPortYieldRequired] /* Get the yield required array into r2. */
|
||||||
|
ldw r1, r2[r0] /* Is a yield required for this core? */
|
||||||
|
{bf r1, _freertos_restore_ctx_partial /* If not, restore the context now. */
|
||||||
|
ldc r1, 0}
|
||||||
|
stw r1, r2[r0] /* Otherwise, clear the yield required flag. */
|
||||||
|
|
||||||
/* Save the rest of the current task's context. */
|
/* Save the rest of the current task's context. */
|
||||||
|
|
||||||
|
@ -113,14 +120,17 @@ _yield_continue:
|
||||||
ldaw r11, sp[37]}
|
ldaw r11, sp[37]}
|
||||||
vstc r11[0]
|
vstc r11[0]
|
||||||
#endif
|
#endif
|
||||||
ldw r0, dp[pxCurrentTCB] /* Save the current task's SP to the first */
|
ldaw r5, dp[pxCurrentTCBs] /* Get the current TCB array into r5. */
|
||||||
stw r4, r0[0x0] /* word (top of stack) in the current TCB */
|
ldw r1, r5[r0] /* Get this core's current TCB pointer into r1. */
|
||||||
|
stw r4, r1[0x0] /* Save the current task's SP to the first */
|
||||||
|
/* word (top of stack) in the current TCB. */
|
||||||
|
|
||||||
kentsp 0 /* switch back to the kernel stack. */
|
{kentsp 0 /* switch back to the kernel stack. */
|
||||||
|
|
||||||
|
mov r6, r0} /* copy the RTOS core ID into r6 so we don't lose it. */
|
||||||
ldap r11, vTaskSwitchContext
|
ldap r11, vTaskSwitchContext
|
||||||
bla r11 /* Finally call vTaskSwitchContext() now that the task's */
|
bla r11 /* Finally call vTaskSwitchContext(core_id) now that the task's */
|
||||||
/* entire context is saved. */
|
/* entire context is saved. Note the core id in r0 is the argument. */
|
||||||
|
|
||||||
//krestsp 0 /* unnecessary since KSP is already set and the SP */
|
//krestsp 0 /* unnecessary since KSP is already set and the SP */
|
||||||
/* is being restored next from the current TCB. */
|
/* is being restored next from the current TCB. */
|
||||||
|
@ -128,7 +138,7 @@ _yield_continue:
|
||||||
.globl _freertos_restore_ctx
|
.globl _freertos_restore_ctx
|
||||||
_freertos_restore_ctx:
|
_freertos_restore_ctx:
|
||||||
|
|
||||||
ldw r0, dp[pxCurrentTCB]
|
ldw r0, r5[r6] /* get this core's current TCB pointer into r0 */
|
||||||
ldw r0, r0[0x0] /* Get the top of the stack from the current TCB... */
|
ldw r0, r0[0x0] /* Get the top of the stack from the current TCB... */
|
||||||
set sp, r0 /* into the stack pointer register. */
|
set sp, r0 /* into the stack pointer register. */
|
||||||
|
|
||||||
|
|
|
@ -46,9 +46,9 @@ typedef uint32_t UBaseType_t;
|
||||||
#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
|
#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
|
||||||
#define portBYTE_ALIGNMENT 8
|
#define portBYTE_ALIGNMENT 8
|
||||||
#define portCRITICAL_NESTING_IN_TCB 1
|
#define portCRITICAL_NESTING_IN_TCB 1
|
||||||
#ifdef configNUM_CORES
|
#define portMAX_CORE_COUNT 8
|
||||||
#warning configNUM_CORES should not be defined when using the single core XCORE port
|
#ifndef configNUM_CORES
|
||||||
#undef configNUM_CORES
|
#define configNUM_CORES 1
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* This may be set to zero in the config file if the rtos_time
|
/* This may be set to zero in the config file if the rtos_time
|
||||||
|
@ -67,6 +67,12 @@ functions are not needed or if it is incremented elsewhere. */
|
||||||
#define portTHREAD_CONTEXT_STACK_GROWTH RTOS_SUPPORT_INTERRUPT_STACK_GROWTH
|
#define portTHREAD_CONTEXT_STACK_GROWTH RTOS_SUPPORT_INTERRUPT_STACK_GROWTH
|
||||||
|
|
||||||
#ifndef __ASSEMBLER__
|
#ifndef __ASSEMBLER__
|
||||||
|
|
||||||
|
/* Check validity of number of cores specified in config */
|
||||||
|
#if ( configNUM_CORES < 1 || portMAX_CORE_COUNT < configNUM_CORES )
|
||||||
|
#error "Invalid number of cores specified in config!"
|
||||||
|
#endif
|
||||||
|
|
||||||
#define portMEMORY_BARRIER() RTOS_MEMORY_BARRIER()
|
#define portMEMORY_BARRIER() RTOS_MEMORY_BARRIER()
|
||||||
#define portTASK_STACK_DEPTH(pxTaskCode) RTOS_THREAD_STACK_SIZE(pxTaskCode)
|
#define portTASK_STACK_DEPTH(pxTaskCode) RTOS_THREAD_STACK_SIZE(pxTaskCode)
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
@ -79,17 +85,24 @@ do \
|
||||||
{ \
|
{ \
|
||||||
if( xSwitchRequired != pdFALSE ) \
|
if( xSwitchRequired != pdFALSE ) \
|
||||||
{ \
|
{ \
|
||||||
extern uint32_t ulPortYieldRequired; \
|
extern uint32_t ulPortYieldRequired[ portMAX_CORE_COUNT ]; \
|
||||||
ulPortYieldRequired = pdTRUE; \
|
ulPortYieldRequired[ portGET_CORE_ID() ] = pdTRUE; \
|
||||||
} \
|
} \
|
||||||
} while( 0 )
|
} while( 0 )
|
||||||
|
|
||||||
#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
|
#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
|
/* SMP utilities. */
|
||||||
|
#define portGET_CORE_ID() rtos_core_id_get()
|
||||||
|
|
||||||
|
void vPortYieldOtherCore( int xOtherCoreID );
|
||||||
|
#define portYIELD_CORE( x ) vPortYieldOtherCore( x )
|
||||||
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
/* Architecture specific optimisations. */
|
/* Architecture specific optimisations. */
|
||||||
#ifndef configUSE_PORT_OPTIMISED_TASK_SELECTION
|
#ifndef configUSE_PORT_OPTIMISED_TASK_SELECTION
|
||||||
#define configUSE_PORT_OPTIMISED_TASK_SELECTION 1
|
#define configUSE_PORT_OPTIMISED_TASK_SELECTION 0
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if configUSE_PORT_OPTIMISED_TASK_SELECTION == 1
|
#if configUSE_PORT_OPTIMISED_TASK_SELECTION == 1
|
||||||
|
@ -108,10 +121,16 @@ do \
|
||||||
/* Critical section management. */
|
/* Critical section management. */
|
||||||
|
|
||||||
#define portGET_INTERRUPT_STATE() rtos_interrupt_mask_get()
|
#define portGET_INTERRUPT_STATE() rtos_interrupt_mask_get()
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This differs from the standard portDISABLE_INTERRUPTS()
|
||||||
|
* in that it also returns what the interrupt state was
|
||||||
|
* before it disabling interrupts.
|
||||||
|
*/
|
||||||
#define portDISABLE_INTERRUPTS() rtos_interrupt_mask_all()
|
#define portDISABLE_INTERRUPTS() rtos_interrupt_mask_all()
|
||||||
|
|
||||||
#define portENABLE_INTERRUPTS() rtos_interrupt_unmask_all()
|
#define portENABLE_INTERRUPTS() rtos_interrupt_unmask_all()
|
||||||
#define portSET_INTERRUPT_MASK_FROM_ISR() 0
|
|
||||||
#define portCLEAR_INTERRUPT_MASK_FROM_ISR(x)
|
|
||||||
/*
|
/*
|
||||||
* Will enable interrupts if ulState is non-zero.
|
* Will enable interrupts if ulState is non-zero.
|
||||||
*/
|
*/
|
||||||
|
@ -122,12 +141,43 @@ do \
|
||||||
* ISR or otherwise in kernel mode.
|
* ISR or otherwise in kernel mode.
|
||||||
*/
|
*/
|
||||||
#define portCHECK_IF_IN_ISR() rtos_isr_running()
|
#define portCHECK_IF_IN_ISR() rtos_isr_running()
|
||||||
|
|
||||||
#define portASSERT_IF_IN_ISR() configASSERT( portCHECK_IF_IN_ISR() == 0 )
|
#define portASSERT_IF_IN_ISR() configASSERT( portCHECK_IF_IN_ISR() == 0 )
|
||||||
|
|
||||||
|
#define portGET_ISR_LOCK() rtos_lock_acquire(0)
|
||||||
|
#define portRELEASE_ISR_LOCK() rtos_lock_release(0)
|
||||||
|
#define portGET_TASK_LOCK() rtos_lock_acquire(1)
|
||||||
|
#define portRELEASE_TASK_LOCK() rtos_lock_release(1)
|
||||||
|
|
||||||
void vTaskEnterCritical(void);
|
void vTaskEnterCritical(void);
|
||||||
void vTaskExitCritical(void);
|
void vTaskExitCritical(void);
|
||||||
#define portENTER_CRITICAL() vTaskEnterCritical()
|
#define portENTER_CRITICAL() vTaskEnterCritical()
|
||||||
#define portEXIT_CRITICAL() vTaskExitCritical()
|
#define portEXIT_CRITICAL() vTaskExitCritical()
|
||||||
|
|
||||||
|
/*
|
||||||
|
* vTaskEnterCritical() has been modified to be safe to use
|
||||||
|
* from within ISRs. The previous mask does not need to be
|
||||||
|
* returned since in the xCORE interrupts are always disabled
|
||||||
|
* in ISRs. Effectively this call just grabs the kernel lock
|
||||||
|
* when called from an ISR.
|
||||||
|
*/
|
||||||
|
static inline uint32_t portSET_INTERRUPT_MASK_FROM_ISR( void )
|
||||||
|
{
|
||||||
|
vTaskEnterCritical();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#define portSET_INTERRUPT_MASK_FROM_ISR() portSET_INTERRUPT_MASK_FROM_ISR()
|
||||||
|
|
||||||
|
/*
|
||||||
|
* vTaskExitCritical() has been modified to be safe to use
|
||||||
|
* from within ISRs. When the nesting level has reached zero
|
||||||
|
* it releases the lock, but when called from within an ISR
|
||||||
|
* it will *not* re-enable interrupts since it is assumed they
|
||||||
|
* were previously disabled. Thus the previous state in x is
|
||||||
|
* unused.
|
||||||
|
*/
|
||||||
|
#define portCLEAR_INTERRUPT_MASK_FROM_ISR(x) vTaskExitCritical()
|
||||||
|
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
/* Runtime stats support */
|
/* Runtime stats support */
|
||||||
|
|
|
@ -35,7 +35,10 @@
|
||||||
* The RTOS provided handler that should run when a
|
* The RTOS provided handler that should run when a
|
||||||
* core receives an intercore interrupt request.
|
* core receives an intercore interrupt request.
|
||||||
*/
|
*/
|
||||||
#define RTOS_INTERCORE_INTERRUPT_ISR()
|
#define RTOS_INTERCORE_INTERRUPT_ISR() do { \
|
||||||
|
void vIntercoreInterruptISR( void ); \
|
||||||
|
vIntercoreInterruptISR(); \
|
||||||
|
} while ( 0 )
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The number of hardware locks that the RTOS
|
* The number of hardware locks that the RTOS
|
||||||
|
@ -45,7 +48,7 @@
|
||||||
* Note that the IRQ routines require a lock and
|
* Note that the IRQ routines require a lock and
|
||||||
* will share the first one with the RTOS.
|
* will share the first one with the RTOS.
|
||||||
*/
|
*/
|
||||||
#define RTOS_LOCK_COUNT 0
|
#define RTOS_LOCK_COUNT 2
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Remaps all calls to debug_printf() to rtos_printf().
|
* Remaps all calls to debug_printf() to rtos_printf().
|
||||||
|
@ -64,6 +67,15 @@
|
||||||
#endif
|
#endif
|
||||||
#define DEBUG_PRINT_ENABLE 1
|
#define DEBUG_PRINT_ENABLE 1
|
||||||
|
|
||||||
|
#ifndef configTASKS_DEBUG
|
||||||
|
#define configTASKS_DEBUG 0
|
||||||
|
#endif
|
||||||
|
#if configTASKS_DEBUG == 1
|
||||||
|
#define DEBUG_PRINT_ENABLE_FREERTOS_TASKS 1
|
||||||
|
#else
|
||||||
|
#define DEBUG_PRINT_DISABLE_FREERTOS_TASKS 1
|
||||||
|
#endif
|
||||||
|
|
||||||
#else /* configENABLE_DEBUG_PRINTF */
|
#else /* configENABLE_DEBUG_PRINTF */
|
||||||
|
|
||||||
/* ensure that debug_printf is disabled */
|
/* ensure that debug_printf is disabled */
|
||||||
|
|
10
queue.c
10
queue.c
|
@ -87,7 +87,7 @@ typedef struct SemaphoreData
|
||||||
* performed just because a higher priority task has been woken. */
|
* performed just because a higher priority task has been woken. */
|
||||||
#define queueYIELD_IF_USING_PREEMPTION()
|
#define queueYIELD_IF_USING_PREEMPTION()
|
||||||
#else
|
#else
|
||||||
#define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
|
#define queueYIELD_IF_USING_PREEMPTION() vTaskYieldWithinAPI()
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -957,7 +957,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
|
||||||
* is also a higher priority task in the pending ready list. */
|
* is also a higher priority task in the pending ready list. */
|
||||||
if( xTaskResumeAll() == pdFALSE )
|
if( xTaskResumeAll() == pdFALSE )
|
||||||
{
|
{
|
||||||
portYIELD_WITHIN_API();
|
vTaskYieldWithinAPI();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -1422,7 +1422,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
|
||||||
|
|
||||||
if( xTaskResumeAll() == pdFALSE )
|
if( xTaskResumeAll() == pdFALSE )
|
||||||
{
|
{
|
||||||
portYIELD_WITHIN_API();
|
vTaskYieldWithinAPI();
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -1614,7 +1614,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
|
||||||
|
|
||||||
if( xTaskResumeAll() == pdFALSE )
|
if( xTaskResumeAll() == pdFALSE )
|
||||||
{
|
{
|
||||||
portYIELD_WITHIN_API();
|
vTaskYieldWithinAPI();
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -1792,7 +1792,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
|
||||||
|
|
||||||
if( xTaskResumeAll() == pdFALSE )
|
if( xTaskResumeAll() == pdFALSE )
|
||||||
{
|
{
|
||||||
portYIELD_WITHIN_API();
|
vTaskYieldWithinAPI();
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
|
3
timers.c
3
timers.c
|
@ -1,7 +1,6 @@
|
||||||
/*
|
/*
|
||||||
* FreeRTOS Kernel V10.4.3
|
* FreeRTOS Kernel V10.4.3
|
||||||
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a copy of
|
* Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
* this software and associated documentation files (the "Software"), to deal in
|
* this software and associated documentation files (the "Software"), to deal in
|
||||||
|
@ -669,7 +668,7 @@
|
||||||
* block time to expire. If a command arrived between the
|
* block time to expire. If a command arrived between the
|
||||||
* critical section being exited and this yield then the yield
|
* critical section being exited and this yield then the yield
|
||||||
* will not cause the task to block. */
|
* will not cause the task to block. */
|
||||||
portYIELD_WITHIN_API();
|
vTaskYieldWithinAPI();
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue