CI-CD Updates (#768)

* Use new version of CI-CD Actions
* Use cSpell spell check, and use ubuntu-20.04 for formatting check
* Format and spell check all files in the portable directory
* Remove the https:// from #errors and #warnings as uncrustify attempts to change it to /*
* Use checkout@v3 instead of checkout@v2 on all jobs
---------
This commit is contained in:
Soren Ptak 2023-09-05 17:24:04 -04:00 committed by GitHub
parent d6bccb1f4c
commit 5fb9b50da8
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
485 changed files with 108790 additions and 107581 deletions

View file

@ -26,39 +26,41 @@
#include "FreeRTOS.h"
#include "task.h"
extern void vPortStartTask(void);
extern void vPortStartTask( void );
/* Used to keep track of the number of nested calls to taskENTER_CRITICAL(). This
will be set to 0 prior to the first task being started. */
* will be set to 0 prior to the first task being started. */
portLONG ulCriticalNesting = 0x9999UL;
/* Used to record one tack want to swtich task after enter critical area, we need know it
* and implement task switch after exit critical area */
portLONG pendsvflag = 0;
StackType_t *pxPortInitialiseStack( StackType_t * pxTopOfStack, TaskFunction_t pxCode, void *pvParameters )
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
TaskFunction_t pxCode,
void * pvParameters )
{
StackType_t *stk = NULL;
StackType_t * stk = NULL;
stk = pxTopOfStack;
*(--stk) = (uint32_t)pxCode; /* Entry Point */
*(--stk) = (uint32_t)0xE0000140L; /* PSR */
*(--stk) = (uint32_t)0xFFFFFFFEL; /* R15 (LR) (init value will cause fault if ever used) */
*(--stk) = (uint32_t)0x13131313L; /* R13 */
*(--stk) = (uint32_t)0x12121212L; /* R12 */
*(--stk) = (uint32_t)0x11111111L; /* R11 */
*(--stk) = (uint32_t)0x10101010L; /* R10 */
*(--stk) = (uint32_t)0x09090909L; /* R9 */
*(--stk) = (uint32_t)0x08080808L; /* R8 */
*(--stk) = (uint32_t)0x07070707L; /* R7 */
*(--stk) = (uint32_t)0x06060606L; /* R6 */
*(--stk) = (uint32_t)0x05050505L; /* R5 */
*(--stk) = (uint32_t)0x04040404L; /* R4 */
*(--stk) = (uint32_t)0x03030303L; /* R3 */
*(--stk) = (uint32_t)0x02020202L; /* R2 */
*(--stk) = (uint32_t)0x01010101L; /* R1 */
*(--stk) = (uint32_t)pvParameters; /* R0 : argument */
*( --stk ) = ( uint32_t ) pxCode; /* Entry Point */
*( --stk ) = ( uint32_t ) 0xE0000140L; /* PSR */
*( --stk ) = ( uint32_t ) 0xFFFFFFFEL; /* R15 (LR) (init value will cause fault if ever used) */
*( --stk ) = ( uint32_t ) 0x13131313L; /* R13 */
*( --stk ) = ( uint32_t ) 0x12121212L; /* R12 */
*( --stk ) = ( uint32_t ) 0x11111111L; /* R11 */
*( --stk ) = ( uint32_t ) 0x10101010L; /* R10 */
*( --stk ) = ( uint32_t ) 0x09090909L; /* R9 */
*( --stk ) = ( uint32_t ) 0x08080808L; /* R8 */
*( --stk ) = ( uint32_t ) 0x07070707L; /* R7 */
*( --stk ) = ( uint32_t ) 0x06060606L; /* R6 */
*( --stk ) = ( uint32_t ) 0x05050505L; /* R5 */
*( --stk ) = ( uint32_t ) 0x04040404L; /* R4 */
*( --stk ) = ( uint32_t ) 0x03030303L; /* R3 */
*( --stk ) = ( uint32_t ) 0x02020202L; /* R2 */
*( --stk ) = ( uint32_t ) 0x01010101L; /* R1 */
*( --stk ) = ( uint32_t ) pvParameters; /* R0 : argument */
return stk;
}
@ -81,21 +83,25 @@ void vPortEndScheduler( void )
void vPortEnterCritical( void )
{
portDISABLE_INTERRUPTS();
ulCriticalNesting ++;
ulCriticalNesting++;
}
void vPortExitCritical( void )
{
if (ulCriticalNesting == 0) {
while(1);
if( ulCriticalNesting == 0 )
{
while( 1 )
{
}
}
ulCriticalNesting --;
if (ulCriticalNesting == 0)
ulCriticalNesting--;
if( ulCriticalNesting == 0 )
{
portENABLE_INTERRUPTS();
if (pendsvflag)
if( pendsvflag )
{
pendsvflag = 0;
portYIELD();
@ -104,30 +110,30 @@ void vPortExitCritical( void )
}
#if configUSE_PREEMPTION == 0
void xPortSysTickHandler( void )
{
portLONG ulDummy;
void xPortSysTickHandler( void )
{
portLONG ulDummy;
ulDummy = portSET_INTERRUPT_MASK_FROM_ISR();
xTaskIncrementTick();
portCLEAR_INTERRUPT_MASK_FROM_ISR( ulDummy );
}
ulDummy = portSET_INTERRUPT_MASK_FROM_ISR();
xTaskIncrementTick();
portCLEAR_INTERRUPT_MASK_FROM_ISR( ulDummy );
}
#else
void xPortSysTickHandler( void )
{
portLONG ulDummy;
ulDummy = portSET_INTERRUPT_MASK_FROM_ISR();
void xPortSysTickHandler( void )
{
if (xTaskIncrementTick() != pdFALSE)
portLONG ulDummy;
ulDummy = portSET_INTERRUPT_MASK_FROM_ISR();
{
portYIELD_FROM_ISR(pdTRUE);
if( xTaskIncrementTick() != pdFALSE )
{
portYIELD_FROM_ISR( pdTRUE );
}
}
portCLEAR_INTERRUPT_MASK_FROM_ISR( ulDummy );
}
portCLEAR_INTERRUPT_MASK_FROM_ISR( ulDummy );
}
#endif
#endif /* if configUSE_PREEMPTION == 0 */
void vPortYieldHandler( void )
{
@ -140,12 +146,17 @@ void vPortYieldHandler( void )
portCLEAR_INTERRUPT_MASK_FROM_ISR( ulSavedInterruptMask );
}
__attribute__((weak)) void vApplicationStackOverflowHook( xTaskHandle *pxTask, signed portCHAR *pcTaskName )
__attribute__( ( weak ) ) void vApplicationStackOverflowHook( xTaskHandle * pxTask,
signed portCHAR * pcTaskName )
{
for(;;);
for( ; ; )
{
}
}
__attribute__((weak)) void vApplicationMallocFailedHook( void )
__attribute__( ( weak ) ) void vApplicationMallocFailedHook( void )
{
for(;;);
for( ; ; )
{
}
}

View file

@ -29,7 +29,7 @@
#include <stdint.h>
#include <csi_core.h>
extern void vPortYield(void);
extern void vPortYield( void );
/* *INDENT-OFF* */
#ifdef __cplusplus
@ -50,24 +50,24 @@ extern void vPortYield(void);
*/
/* Type definitions. */
#define portCHAR char
#define portFLOAT float
#define portDOUBLE double
#define portLONG long
#define portSHORT short
#define portSTACK_TYPE uint32_t
#define portBASE_TYPE long
#define portCHAR char
#define portFLOAT float
#define portDOUBLE double
#define portLONG long
#define portSHORT short
#define portSTACK_TYPE uint32_t
#define portBASE_TYPE long
typedef portSTACK_TYPE StackType_t;
typedef long BaseType_t;
typedef unsigned long UBaseType_t;
typedef void (*portvectorfunc)(void);
typedef portSTACK_TYPE StackType_t;
typedef long BaseType_t;
typedef unsigned long UBaseType_t;
typedef void (* portvectorfunc)( void );
#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
typedef uint32_t TickType_t;
#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
#else
#error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
@ -75,10 +75,10 @@ typedef void (*portvectorfunc)(void);
/* Hardware specifics. */
#define portBYTE_ALIGNMENT 8
#define portSTACK_GROWTH -1
#define portMS_PERIOD_TICK 10
#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
#define portBYTE_ALIGNMENT 8
#define portSTACK_GROWTH -1
#define portMS_PERIOD_TICK 10
#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
static inline void vPortEnableInterrupt( void )
@ -91,69 +91,72 @@ static inline void vPortDisableInterrupt( void )
__disable_irq();
}
static inline portLONG GetCurrentPSR (void)
static inline portLONG GetCurrentPSR( void )
{
return __get_PSR();
}
static inline portLONG SaveLocalPSR (void)
static inline portLONG SaveLocalPSR( void )
{
portLONG flags = __get_PSR();
__disable_irq();
return flags;
}
static inline void RestoreLocalPSR (portLONG newMask)
static inline void RestoreLocalPSR( portLONG newMask )
{
__asm__ __volatile__(
"mtcr %0, psr \n"
:
:"r" (newMask)
:"memory"
);
__asm__ __volatile__ (
"mtcr %0, psr \n"
:
: "r" ( newMask )
: "memory"
);
}
extern void vPortEnterCritical( void );
extern void vPortExitCritical( void );
extern __attribute__((naked)) void cpu_yeild(void);
extern __attribute__( ( naked ) ) void cpu_yeild( void );
#define portDISABLE_INTERRUPTS() vPortDisableInterrupt()
#define portENABLE_INTERRUPTS() vPortEnableInterrupt()
#define portENTER_CRITICAL() vPortEnterCritical()
#define portEXIT_CRITICAL() vPortExitCritical()
#define portSET_INTERRUPT_MASK_FROM_ISR() SaveLocalPSR()
#define portCLEAR_INTERRUPT_MASK_FROM_ISR(a) RestoreLocalPSR(a)
#define portDISABLE_INTERRUPTS() vPortDisableInterrupt()
#define portENABLE_INTERRUPTS() vPortEnableInterrupt()
#define portENTER_CRITICAL() vPortEnterCritical()
#define portEXIT_CRITICAL() vPortExitCritical()
#define portSET_INTERRUPT_MASK_FROM_ISR() SaveLocalPSR()
#define portCLEAR_INTERRUPT_MASK_FROM_ISR( a ) RestoreLocalPSR( a )
#define portNOP() asm("nop")
#define portNOP() asm ( "nop" )
extern portLONG ulCriticalNesting;
extern portLONG pendsvflag;
#define portYIELD() if (ulCriticalNesting == 0) \
{ \
vPortYield(); \
} \
else \
{ \
pendsvflag = 1; \
} \
portNOP();portNOP()
#define portYIELD() \
if( ulCriticalNesting == 0 ) \
{ \
vPortYield(); \
} \
else \
{ \
pendsvflag = 1; \
} \
portNOP(); portNOP()
/*-----------------------------------------------------------*/
/* Task function macros as described on the FreeRTOS.org WEB site. */
#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) __attribute__((noreturn))
#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters )
#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) __attribute__( ( noreturn ) )
#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
/*-----------------------------------------------------------*/
#define portEND_SWITCHING_ISR( xSwitchRequired ) do { \
if( xSwitchRequired != pdFALSE ) \
{ \
portYIELD(); \
} \
}while(0)
#define portEND_SWITCHING_ISR( xSwitchRequired ) \
do { \
if( xSwitchRequired != pdFALSE ) \
{ \
portYIELD(); \
} \
} while( 0 )
#define portYIELD_FROM_ISR( a ) vTaskSwitchContext()
#define portYIELD_FROM_ISR( a ) vTaskSwitchContext()

View file

@ -78,15 +78,15 @@
#define false 0 /* false */
#endif /* false */
typedef portSTACK_TYPE StackType_t;
typedef long BaseType_t;
typedef unsigned long UBaseType_t;
typedef portSTACK_TYPE StackType_t;
typedef long BaseType_t;
typedef unsigned long UBaseType_t;
#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
typedef uint32_t TickType_t;
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
#else
#error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.

View file

@ -212,7 +212,7 @@ void vPortEndTask( void )
volatile StackType_t * pxTopOfStack; /*< Points to the location of the last item placed on the tasks stack. THIS MUST BE THE FIRST MEMBER OF THE TCB STRUCT. */
#if ( portUSING_MPU_WRAPPERS == 1 )
xMPU_SETTINGS xMPUSettings; /*< The MPU settings are defined as part of the port layer. THIS MUST BE THE SECOND MEMBER OF THE TCB STRUCT. */
xMPU_SETTINGS xMPUSettings; /*< The MPU settings are defined as part of the port layer. THIS MUST BE THE SECOND MEMBER OF THE TCB STRUCT. */
#endif
ListItem_t xStateListItem; /*< The list that the state list item of a task is reference from denotes the state of that task (Ready, Blocked, Suspended ). */
@ -222,20 +222,20 @@ void vPortEndTask( void )
char pcTaskName[ configMAX_TASK_NAME_LEN ]; /*< Descriptive name given to the task when created. Facilitates debugging only. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
#if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) )
StackType_t * pxEndOfStack; /*< Points to the highest valid address for the stack. */
StackType_t * pxEndOfStack; /*< Points to the highest valid address for the stack. */
#endif
#if ( portCRITICAL_NESTING_IN_TCB == 1 )
UBaseType_t uxCriticalNesting; /*< Holds the critical section nesting depth for ports that do not maintain their own count in the port layer. */
UBaseType_t uxCriticalNesting; /*< Holds the critical section nesting depth for ports that do not maintain their own count in the port layer. */
#endif
#if ( configUSE_TRACE_FACILITY == 1 )
UBaseType_t uxTCBNumber; /*< Stores a number that increments each time a TCB is created. It allows debuggers to determine when a task has been deleted and then recreated. */
UBaseType_t uxTaskNumber; /*< Stores a number specifically for use by third party trace code. */
UBaseType_t uxTCBNumber; /*< Stores a number that increments each time a TCB is created. It allows debuggers to determine when a task has been deleted and then recreated. */
UBaseType_t uxTaskNumber; /*< Stores a number specifically for use by third party trace code. */
#endif
#if ( configUSE_MUTEXES == 1 )
UBaseType_t uxBasePriority; /*< The priority last assigned to the task - used by the priority inheritance mechanism. */
UBaseType_t uxBasePriority; /*< The priority last assigned to the task - used by the priority inheritance mechanism. */
UBaseType_t uxMutexesHeld;
#endif
@ -248,7 +248,7 @@ void vPortEndTask( void )
#endif
#if ( configGENERATE_RUN_TIME_STATS == 1 )
uint32_t ulRunTimeCounter; /*< Stores the amount of time the task has spent in the Running state. */
uint32_t ulRunTimeCounter; /*< Stores the amount of time the task has spent in the Running state. */
#endif
#if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 )

View file

@ -27,8 +27,8 @@
*/
#ifndef PORTMACRO_H
#define PORTMACRO_H
#include "embARC.h"
#define PORTMACRO_H
#include "embARC.h"
/* *INDENT-OFF* */
#ifdef __cplusplus
@ -37,9 +37,9 @@
/* *INDENT-ON* */
/* record stack high address for stack check */
#ifndef configRECORD_STACK_HIGH_ADDRESS
#define configRECORD_STACK_HIGH_ADDRESS 1
#endif
#ifndef configRECORD_STACK_HIGH_ADDRESS
#define configRECORD_STACK_HIGH_ADDRESS 1
#endif
/*-----------------------------------------------------------
* Port specific definitions.
@ -52,78 +52,78 @@
*/
/* Type definitions. */
#define portCHAR char
#define portFLOAT float
#define portDOUBLE double
#define portLONG long
#define portSHORT short
#define portSTACK_TYPE unsigned int
#define portBASE_TYPE portLONG
#define portCHAR char
#define portFLOAT float
#define portDOUBLE double
#define portLONG long
#define portSHORT short
#define portSTACK_TYPE unsigned int
#define portBASE_TYPE portLONG
#ifndef Asm
#define Asm __asm__ volatile
#endif
#ifndef Asm
#define Asm __asm__ volatile
#endif
/*
* normal constants
*/
#ifndef NULL
#define NULL 0 /* invalid pointer */
#endif /* NULL */
#ifndef NULL
#define NULL 0 /* invalid pointer */
#endif /* NULL */
#ifndef true
#define true 1 /* true */
#endif /* true */
#ifndef true
#define true 1 /* true */
#endif /* true */
#ifndef false
#define false 0 /* false */
#endif /* false */
#ifndef false
#define false 0 /* false */
#endif /* false */
typedef portSTACK_TYPE StackType_t;
typedef long BaseType_t;
typedef unsigned long UBaseType_t;
typedef portSTACK_TYPE StackType_t;
typedef long BaseType_t;
typedef unsigned long UBaseType_t;
#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
#else
#error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
#endif
#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
#else
#error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
#endif
#define portNO_CRITICAL_NESTING ( ( uint32_t ) 0 )
#define portSTACK_GROWTH ( -1 )
#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
#define portBYTE_ALIGNMENT 8
#define portNOP() Asm( "nop_s" );
#define IPM_ENABLE_ALL 1
#define portNO_CRITICAL_NESTING ( ( uint32_t ) 0 )
#define portSTACK_GROWTH ( -1 )
#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
#define portBYTE_ALIGNMENT 8
#define portNOP() Asm( "nop_s" );
#define IPM_ENABLE_ALL 1
#define portYIELD_FROM_ISR() vPortYieldFromIsr()
#define portYIELD() vPortYield()
#define portYIELD_FROM_ISR() vPortYieldFromIsr()
#define portYIELD() vPortYield()
/* Critical section management. */
#define portDISABLE_INTERRUPTS() \
{ \
arc_lock(); \
} \
#define portDISABLE_INTERRUPTS() \
{ \
arc_lock(); \
} \
#define portENABLE_INTERRUPTS() \
{ \
arc_unlock(); \
} \
#define portENABLE_INTERRUPTS() \
{ \
arc_unlock(); \
} \
extern volatile unsigned int ulCriticalNesting;
extern volatile unsigned int ulCriticalNesting;
#define portENTER_CRITICAL() \
#define portENTER_CRITICAL() \
{ \
portDISABLE_INTERRUPTS() \
ulCriticalNesting++; \
}
#define portEXIT_CRITICAL() \
#define portEXIT_CRITICAL() \
{ \
if( ulCriticalNesting > portNO_CRITICAL_NESTING ) \
{ \
@ -136,14 +136,14 @@
}
#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
#define portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() do {} while( 0 ) /* we use the timer */
#define portALT_GET_RUN_TIME_COUNTER_VALUE( dest ) ( dest = xTickCount )
#define portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() do {} while( 0 ) /* we use the timer */
#define portALT_GET_RUN_TIME_COUNTER_VALUE( dest ) ( dest = xTickCount )
void vPortYield( void );
void vPortYieldFromIsr( void );
void vPortYield( void );
void vPortYieldFromIsr( void );
/* *INDENT-OFF* */
#ifdef __cplusplus

View file

@ -32,7 +32,7 @@ See the [link](https://git.trustedfirmware.org/TF-M/trusted-firmware-m.git/) to
### Build TF-M
Please refer to this [link](https://tf-m-user-guide.trustedfirmware.org/docs/technical_references/instructions/tfm_build_instruction.html) to build the secure side.
Please refer to this [link](https://developer.nordicsemi.com/nRF_Connect_SDK/doc/latest/tfm/building/tfm_build_instruction.html) to build the secure side.
_**Note:** ```TFM_NS_MANAGE_NSID``` must be configured as "OFF" when building TF-M_.
## Build the Non-Secure Side

View file

@ -34,58 +34,72 @@
#include "semphr.h"
#include "mpu_wrappers.h"
#if( configSUPPORT_STATIC_ALLOCATION == 1 )
/*
* In the static allocation, the RAM is required to hold the semaphore's
* state.
*/
#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
/*
* In the static allocation, the RAM is required to hold the semaphore's
* state.
*/
StaticSemaphore_t xSecureMutexBuffer;
#endif
void * os_wrapper_mutex_create( void )
{
SemaphoreHandle_t xMutexHandle = NULL;
SemaphoreHandle_t xMutexHandle = NULL;
#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
xMutexHandle = xSemaphoreCreateMutex();
#elif( configSUPPORT_STATIC_ALLOCATION == 1 )
xMutexHandle = xSemaphoreCreateMutexStatic( &xSecureMutexBuffer );
#endif
#if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
xMutexHandle = xSemaphoreCreateMutex();
#elif ( configSUPPORT_STATIC_ALLOCATION == 1 )
xMutexHandle = xSemaphoreCreateMutexStatic( &xSecureMutexBuffer );
#endif
return ( void * ) xMutexHandle;
}
/*-----------------------------------------------------------*/
uint32_t os_wrapper_mutex_acquire( void * handle, uint32_t timeout )
uint32_t os_wrapper_mutex_acquire( void * handle,
uint32_t timeout )
{
BaseType_t xRet;
BaseType_t xRet;
if( ! handle )
if( !handle )
{
return OS_WRAPPER_ERROR;
}
xRet = xSemaphoreTake( ( SemaphoreHandle_t ) handle,
( timeout == OS_WRAPPER_WAIT_FOREVER ) ?
portMAX_DELAY : ( TickType_t ) timeout );
if( xRet != pdPASS )
{
return OS_WRAPPER_ERROR;
}
else
{
return OS_WRAPPER_SUCCESS;
}
}
/*-----------------------------------------------------------*/
uint32_t os_wrapper_mutex_release( void * handle )
{
BaseType_t xRet;
BaseType_t xRet;
if( !handle )
{
return OS_WRAPPER_ERROR;
}
xRet = xSemaphoreGive( ( SemaphoreHandle_t ) handle );
if( xRet != pdPASS )
{
return OS_WRAPPER_ERROR;
}
else
{
return OS_WRAPPER_SUCCESS;
}
}
/*-----------------------------------------------------------*/

File diff suppressed because it is too large Load diff

View file

@ -24,7 +24,7 @@
* https://www.FreeRTOS.org
* https://github.com/FreeRTOS
*
*/
*/
#ifndef PORTMACRO_H
#define PORTMACRO_H
@ -48,23 +48,23 @@
#include <avr/wdt.h>
/* Type definitions. */
#define portCHAR char
#define portFLOAT float
#define portDOUBLE double
#define portLONG long
#define portSHORT int
#define portCHAR char
#define portFLOAT float
#define portDOUBLE double
#define portLONG long
#define portSHORT int
#define portPOINTER_SIZE_TYPE uint16_t
typedef uint8_t StackType_t;
typedef int8_t BaseType_t;
typedef uint8_t UBaseType_t;
typedef uint8_t StackType_t;
typedef int8_t BaseType_t;
typedef uint8_t UBaseType_t;
#if configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
typedef uint32_t TickType_t;
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
#else
#error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
@ -73,23 +73,25 @@ typedef uint8_t UBaseType_t;
/* Critical section management. */
#define portENTER_CRITICAL() __asm__ __volatile__ ( \
"in __tmp_reg__, __SREG__" "\n\t" \
"cli" "\n\t" \
"push __tmp_reg__" "\n\t" \
::: "memory" \
)
#define portENTER_CRITICAL() \
__asm__ __volatile__ ( \
"in __tmp_reg__, __SREG__" "\n\t" \
"cli" "\n\t" \
"push __tmp_reg__" "\n\t" \
::: "memory" \
)
#define portEXIT_CRITICAL() __asm__ __volatile__ ( \
"pop __tmp_reg__" "\n\t" \
"out __SREG__, __tmp_reg__" "\n\t" \
::: "memory" \
)
#define portEXIT_CRITICAL() \
__asm__ __volatile__ ( \
"pop __tmp_reg__" "\n\t" \
"out __SREG__, __tmp_reg__" "\n\t" \
::: "memory" \
)
#define portDISABLE_INTERRUPTS() __asm__ __volatile__ ( "cli" ::: "memory")
#define portENABLE_INTERRUPTS() __asm__ __volatile__ ( "sei" ::: "memory")
#define portDISABLE_INTERRUPTS() __asm__ __volatile__ ( "cli" ::: "memory" )
#define portENABLE_INTERRUPTS() __asm__ __volatile__ ( "sei" ::: "memory" )
/*-----------------------------------------------------------*/
/* Architecture specifics. */
@ -98,27 +100,27 @@ typedef uint8_t UBaseType_t;
* Prefer to use the enhanced Watchdog Timer, but also Timer0 is ok.
*/
#if defined(WDIE) && defined(WDIF) /* If Enhanced WDT with interrupt capability is available */
#if defined( WDIE ) && defined( WDIF ) /* If Enhanced WDT with interrupt capability is available */
#define portUSE_WDTO WDTO_15MS /* use the Watchdog Timer for xTaskIncrementTick */
#define portUSE_WDTO WDTO_15MS /* use the Watchdog Timer for xTaskIncrementTick */
/* Watchdog period options: WDTO_15MS
WDTO_30MS
WDTO_60MS
WDTO_120MS
WDTO_250MS
WDTO_500MS
WDTO_1S
WDTO_2S
*/
* WDTO_30MS
* WDTO_60MS
* WDTO_120MS
* WDTO_250MS
* WDTO_500MS
* WDTO_1S
* WDTO_2S
*/
#else
#define portUSE_TIMER0 /* use the 8-bit Timer0 for xTaskIncrementTick */
#define portUSE_TIMER0 /* use the 8-bit Timer0 for xTaskIncrementTick */
#endif
#define portSTACK_GROWTH ( -1 )
#define portSTACK_GROWTH ( -1 )
/* Timing for the scheduler.
* Watchdog Timer is 128kHz nominal,
@ -126,35 +128,35 @@ typedef uint8_t UBaseType_t;
* from data sheet.
*/
#if defined( portUSE_WDTO )
#define portTICK_PERIOD_MS ( (TickType_t) _BV( portUSE_WDTO + 4 ) )
#define portTICK_PERIOD_MS ( ( TickType_t ) _BV( portUSE_WDTO + 4 ) )
#else
#define portTICK_PERIOD_MS ( (TickType_t) 1000 / configTICK_RATE_HZ )
#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
#endif
#define portBYTE_ALIGNMENT 1
#define portNOP() __asm__ __volatile__ ( "nop" );
#define portBYTE_ALIGNMENT 1
#define portNOP() __asm__ __volatile__ ( "nop" );
/*-----------------------------------------------------------*/
/* Kernel utilities. */
extern void vPortYield( void ) __attribute__ ( ( naked ) );
#define portYIELD() vPortYield()
extern void vPortYield( void ) __attribute__( ( naked ) );
#define portYIELD() vPortYield()
extern void vPortYieldFromISR( void ) __attribute__ ( ( naked ) );
#define portYIELD_FROM_ISR() vPortYieldFromISR()
extern void vPortYieldFromISR( void ) __attribute__( ( naked ) );
#define portYIELD_FROM_ISR() vPortYieldFromISR()
/*-----------------------------------------------------------*/
#if defined(__AVR_3_BYTE_PC__)
#if defined( __AVR_3_BYTE_PC__ )
/* Task function macros as described on the FreeRTOS.org WEB site. */
/* Add .lowtext tag from the avr linker script avr6.x for ATmega2560 and ATmega2561
* to make sure functions are loaded in low memory.
*/
#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) __attribute__ ((section (".lowtext")))
#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) __attribute__( ( section( ".lowtext" ) ) )
#else
#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters )
#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
#endif
#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters )
#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
/* *INDENT-OFF* */
#ifdef __cplusplus

View file

@ -119,10 +119,10 @@ static void prvPortYieldFromISR( void );
/*-----------------------------------------------------------*/
static void prvFatalError( const char * pcCall,
int iErrno ) __attribute__ ((__noreturn__));
int iErrno ) __attribute__( ( __noreturn__ ) );
void prvFatalError( const char * pcCall,
int iErrno )
int iErrno )
{
fprintf( stderr, "%s: %s\n", pcCall, strerror( iErrno ) );
abort();
@ -148,11 +148,11 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
*/
thread = ( Thread_t * ) ( pxTopOfStack + 1 ) - 1;
pxTopOfStack = ( StackType_t * ) thread - 1;
ulStackSize = ( size_t )( pxTopOfStack + 1 - pxEndOfStack ) * sizeof( *pxTopOfStack );
ulStackSize = ( size_t ) ( pxTopOfStack + 1 - pxEndOfStack ) * sizeof( *pxTopOfStack );
#ifdef __APPLE__
pxEndOfStack = mach_vm_round_page ( pxEndOfStack );
ulStackSize = mach_vm_trunc_page ( ulStackSize );
pxEndOfStack = mach_vm_round_page( pxEndOfStack );
ulStackSize = mach_vm_trunc_page( ulStackSize );
#endif
thread->pxCode = pxCode;
@ -161,6 +161,7 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
pthread_attr_init( &xThreadAttributes );
iRet = pthread_attr_setstack( &xThreadAttributes, pxEndOfStack, ulStackSize );
if( iRet != 0 )
{
fprintf( stderr, "[WARN] pthread_attr_setstack failed with return value: %d. Default stack will be used.\n", iRet );
@ -336,7 +337,7 @@ UBaseType_t xPortSetInterruptMask( void )
{
/* Interrupts are always disabled inside ISRs (signals
* handlers). */
return ( UBaseType_t )0;
return ( UBaseType_t ) 0;
}
/*-----------------------------------------------------------*/
@ -352,7 +353,7 @@ static uint64_t prvGetTimeNs( void )
clock_gettime( CLOCK_MONOTONIC, &t );
return ( uint64_t )t.tv_sec * ( uint64_t )1000000000UL + ( uint64_t )t.tv_nsec;
return ( uint64_t ) t.tv_sec * ( uint64_t ) 1000000000UL + ( uint64_t ) t.tv_nsec;
}
static uint64_t prvStartTimeNs;

View file

@ -50,71 +50,72 @@
*/
/* Type definitions. */
#define portCHAR char
#define portFLOAT float
#define portDOUBLE double
#define portLONG long
#define portSHORT short
#define portSTACK_TYPE unsigned long
#define portBASE_TYPE long
#define portPOINTER_SIZE_TYPE intptr_t
#define portCHAR char
#define portFLOAT float
#define portDOUBLE double
#define portLONG long
#define portSHORT short
#define portSTACK_TYPE unsigned long
#define portBASE_TYPE long
#define portPOINTER_SIZE_TYPE intptr_t
typedef portSTACK_TYPE StackType_t;
typedef long BaseType_t;
typedef unsigned long UBaseType_t;
typedef portSTACK_TYPE StackType_t;
typedef long BaseType_t;
typedef unsigned long UBaseType_t;
typedef unsigned long TickType_t;
#define portMAX_DELAY ( TickType_t ) ULONG_MAX
typedef unsigned long TickType_t;
#define portMAX_DELAY ( TickType_t ) ULONG_MAX
#define portTICK_TYPE_IS_ATOMIC 1
#define portTICK_TYPE_IS_ATOMIC 1
/*-----------------------------------------------------------*/
/* Architecture specifics. */
#define portSTACK_GROWTH ( -1 )
#define portHAS_STACK_OVERFLOW_CHECKING ( 1 )
#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
#define portTICK_RATE_MICROSECONDS ( ( TickType_t ) 1000000 / configTICK_RATE_HZ )
#define portBYTE_ALIGNMENT 8
#define portSTACK_GROWTH ( -1 )
#define portHAS_STACK_OVERFLOW_CHECKING ( 1 )
#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
#define portTICK_RATE_MICROSECONDS ( ( TickType_t ) 1000000 / configTICK_RATE_HZ )
#define portBYTE_ALIGNMENT 8
/*-----------------------------------------------------------*/
/* Scheduler utilities. */
extern void vPortYield( void );
#define portYIELD() vPortYield()
#define portYIELD() vPortYield()
#define portEND_SWITCHING_ISR( xSwitchRequired ) if( xSwitchRequired != pdFALSE ) vPortYield()
#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
#define portEND_SWITCHING_ISR( xSwitchRequired ) if( xSwitchRequired != pdFALSE ) vPortYield( )
#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
/*-----------------------------------------------------------*/
/* Critical section management. */
extern void vPortDisableInterrupts( void );
extern void vPortEnableInterrupts( void );
#define portSET_INTERRUPT_MASK() ( vPortDisableInterrupts() )
#define portCLEAR_INTERRUPT_MASK() ( vPortEnableInterrupts() )
#define portSET_INTERRUPT_MASK() ( vPortDisableInterrupts() )
#define portCLEAR_INTERRUPT_MASK() ( vPortEnableInterrupts() )
extern UBaseType_t xPortSetInterruptMask( void );
extern void vPortClearInterruptMask( UBaseType_t xMask );
extern void vPortEnterCritical( void );
extern void vPortExitCritical( void );
#define portSET_INTERRUPT_MASK_FROM_ISR() xPortSetInterruptMask()
#define portCLEAR_INTERRUPT_MASK_FROM_ISR(x) vPortClearInterruptMask(x)
#define portDISABLE_INTERRUPTS() portSET_INTERRUPT_MASK()
#define portENABLE_INTERRUPTS() portCLEAR_INTERRUPT_MASK()
#define portENTER_CRITICAL() vPortEnterCritical()
#define portEXIT_CRITICAL() vPortExitCritical()
#define portSET_INTERRUPT_MASK_FROM_ISR() xPortSetInterruptMask()
#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vPortClearInterruptMask( x )
#define portDISABLE_INTERRUPTS() portSET_INTERRUPT_MASK()
#define portENABLE_INTERRUPTS() portCLEAR_INTERRUPT_MASK()
#define portENTER_CRITICAL() vPortEnterCritical()
#define portEXIT_CRITICAL() vPortExitCritical()
/*-----------------------------------------------------------*/
extern void vPortThreadDying( void *pxTaskToDelete, volatile BaseType_t *pxPendYield );
extern void vPortCancelThread( void *pxTaskToDelete );
#define portPRE_TASK_DELETE_HOOK( pvTaskToDelete, pxPendYield ) vPortThreadDying( ( pvTaskToDelete ), ( pxPendYield ) )
#define portCLEAN_UP_TCB( pxTCB ) vPortCancelThread( pxTCB )
extern void vPortThreadDying( void * pxTaskToDelete,
volatile BaseType_t * pxPendYield );
extern void vPortCancelThread( void * pxTaskToDelete );
#define portPRE_TASK_DELETE_HOOK( pvTaskToDelete, pxPendYield ) vPortThreadDying( ( pvTaskToDelete ), ( pxPendYield ) )
#define portCLEAN_UP_TCB( pxTCB ) vPortCancelThread( pxTCB )
/*-----------------------------------------------------------*/
#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) __attribute__( ( noreturn ) )
#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters )
#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) __attribute__( ( noreturn ) )
#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
/*-----------------------------------------------------------*/
/*
@ -125,11 +126,11 @@ extern void vPortCancelThread( void *pxTaskToDelete );
* Thus, only a compilier barrier is needed to prevent the compiler
* reordering.
*/
#define portMEMORY_BARRIER() __asm volatile( "" ::: "memory" )
#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" )
extern uint32_t ulPortGetRunTime( void );
#define portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() /* no-op */
#define portGET_RUN_TIME_COUNTER_VALUE() ulPortGetRunTime()
#define portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() /* no-op */
#define portGET_RUN_TIME_COUNTER_VALUE() ulPortGetRunTime()
/* *INDENT-OFF* */
#ifdef __cplusplus

View file

@ -77,10 +77,10 @@ bool event_wait_timed( struct event * ev,
clock_gettime( CLOCK_REALTIME, &ts );
ts.tv_sec += ms / 1000;
ts.tv_nsec += ((ms % 1000) * 1000000);
ts.tv_nsec += ( ( ms % 1000 ) * 1000000 );
pthread_mutex_lock( &ev->mutex );
while( (ev->event_triggered == false) && (ret == 0) )
while( ( ev->event_triggered == false ) && ( ret == 0 ) )
{
ret = pthread_cond_timedwait( &ev->cond, &ev->mutex, &ts );

View file

@ -11,23 +11,23 @@ more efficient to use the non SMP version in the main FreeRTOS-Kernel branch in
## Using this port
You can copy [FreeRTOS-Kernel-import.cmake](FreeRTOS-Kernel-import.cmake) into your project, and
You can copy [FreeRTOS_Kernel_import.cmake](FreeRTOS_Kernel_import.cmake) into your project, and
add the following in your `CMakeLists.txt`:
```cmake
include(FreeRTOS_Kernel_import.cmake)
```
This will locate the FreeRTOS kernel if it is a direct sub-module of your project, or if you provide the
This will locate the FreeRTOS kernel if it is a direct sub-module of your project, or if you provide the
`FREERTOS_KERNEL_PATH` variable in your environment or via `-DFREERTOS_KERNEL_PATH=/path/to/FreeRTOS-Kernel` on the CMake command line.
**NOTE:** If you are using version 1.3.1 or older of the Raspberry Pi Pico SDK then this line must appear before the
`pico_sdk_init()` and will cause FreeRTOS to be included/required in all RP2040 targets in your project. After this SDK
version, you can include the FreeRTOS-Kernel support later in your CMake build (possibly in a subdirectory) and the
**NOTE:** If you are using version 1.3.1 or older of the Raspberry Pi Pico SDK then this line must appear before the
`pico_sdk_init()` and will cause FreeRTOS to be included/required in all RP2040 targets in your project. After this SDK
version, you can include the FreeRTOS-Kernel support later in your CMake build (possibly in a subdirectory) and the
FreeRTOS-Kernel support will only apply to those targets which explicitly include FreeRTOS support.
As an alternative to the `import` statement above, you can just add this directory directly via thw following (with
the same placement restrictions related to the Raspberry Pi Pico SDK version above):
As an alternative to the `import` statement above, you can just add this directory directly via thw following (with
the same placement restrictions related to the Raspberry Pi Pico SDK version above):
```cmake
add_subdirectory(path/to/this/directory FreeRTOS-Kernel)

View file

@ -28,25 +28,25 @@
#include "FreeRTOS.h"
void vApplicationGetIdleTaskMemory( StaticTask_t **ppxIdleTaskTCBBuffer,
StackType_t **ppxIdleTaskStackBuffer,
uint32_t *pulIdleTaskStackSize )
void vApplicationGetIdleTaskMemory( StaticTask_t ** ppxIdleTaskTCBBuffer,
StackType_t ** ppxIdleTaskStackBuffer,
uint32_t * pulIdleTaskStackSize )
{
/* If the buffers to be provided to the Idle task are declared inside this
function then they must be declared static - otherwise they will be allocated on
the stack and so not exists after this function exits. */
* function then they must be declared static - otherwise they will be allocated on
* the stack and so not exists after this function exits. */
static StaticTask_t xIdleTaskTCB;
static StackType_t uxIdleTaskStack[ configMINIMAL_STACK_SIZE ];
/* Pass out a pointer to the StaticTask_t structure in which the Idle task's
state will be stored. */
* state will be stored. */
*ppxIdleTaskTCBBuffer = &xIdleTaskTCB;
/* Pass out the array that will be used as the Idle task's stack. */
*ppxIdleTaskStackBuffer = uxIdleTaskStack;
/* Pass out the size of the array pointed to by *ppxIdleTaskStackBuffer.
Note that, as the array is necessarily of type StackType_t,
configMINIMAL_STACK_SIZE is specified in words, not bytes. */
* Note that, as the array is necessarily of type StackType_t,
* configMINIMAL_STACK_SIZE is specified in words, not bytes. */
*pulIdleTaskStackSize = configMINIMAL_STACK_SIZE;
}

View file

@ -33,41 +33,45 @@
#include "FreeRTOSConfig.h"
#include "rp2040_config.h"
#ifndef PICO_USE_MALLOC_MUTEX
// malloc needs to be made thread safe
#define PICO_USE_MALLOC_MUTEX 1
/* malloc needs to be made thread safe */
#define PICO_USE_MALLOC_MUTEX 1
#endif /* PICO_USE_MALLOC_MUTEX */
#if ( configSUPPORT_PICO_SYNC_INTEROP == 1 )
// increase the amount of time it may reasonably take to wake us up
/* increase the amount of time it may reasonably take to wake us up */
#ifndef PICO_TIME_SLEEP_OVERHEAD_ADJUST_US
#define PICO_TIME_SLEEP_OVERHEAD_ADJUST_US 150
#define PICO_TIME_SLEEP_OVERHEAD_ADJUST_US 150
#endif
#define lock_owner_id_t uint32_t
extern uint32_t ulPortLockGetCurrentOwnerId(void);
#define lock_get_caller_owner_id() ulPortLockGetCurrentOwnerId()
#define LOCK_INVALID_OWNER_ID ((uint32_t)-1)
#define lock_owner_id_t uint32_t
extern uint32_t ulPortLockGetCurrentOwnerId( void );
#define lock_get_caller_owner_id() ulPortLockGetCurrentOwnerId()
#define LOCK_INVALID_OWNER_ID ( ( uint32_t ) -1 )
struct lock_core;
#ifndef lock_internal_spin_unlock_with_wait
extern void vPortLockInternalSpinUnlockWithWait( struct lock_core *pxLock, uint32_t ulSave);
#define lock_internal_spin_unlock_with_wait(lock, save) vPortLockInternalSpinUnlockWithWait(lock, save)
extern void vPortLockInternalSpinUnlockWithWait( struct lock_core * pxLock,
uint32_t ulSave );
#define lock_internal_spin_unlock_with_wait( lock, save ) vPortLockInternalSpinUnlockWithWait( lock, save )
#endif
#ifndef lock_internal_spin_unlock_with_notify
extern void vPortLockInternalSpinUnlockWithNotify( struct lock_core *pxLock, uint32_t save);
#define lock_internal_spin_unlock_with_notify(lock, save) vPortLockInternalSpinUnlockWithNotify(lock, save);
extern void vPortLockInternalSpinUnlockWithNotify( struct lock_core * pxLock,
uint32_t save );
#define lock_internal_spin_unlock_with_notify( lock, save ) vPortLockInternalSpinUnlockWithNotify( lock, save );
#endif
#ifndef lock_internal_spin_unlock_with_best_effort_wait_or_timeout
extern bool xPortLockInternalSpinUnlockWithBestEffortWaitOrTimeout( struct lock_core *pxLock, uint32_t ulSave, absolute_time_t uxUntil);
#define lock_internal_spin_unlock_with_best_effort_wait_or_timeout(lock, save, until) \
xPortLockInternalSpinUnlockWithBestEffortWaitOrTimeout(lock, save, until)
extern bool xPortLockInternalSpinUnlockWithBestEffortWaitOrTimeout( struct lock_core * pxLock,
uint32_t ulSave,
absolute_time_t uxUntil );
#define lock_internal_spin_unlock_with_best_effort_wait_or_timeout( lock, save, until ) \
xPortLockInternalSpinUnlockWithBestEffortWaitOrTimeout( lock, save, until )
#endif
#endif /* configSUPPORT_PICO_SYNC_INTEROP */
#if ( configSUPPORT_PICO_TIME_INTEROP == 1 )
extern void xPortSyncInternalYieldUntilBefore(absolute_time_t t);
#define sync_internal_yield_until_before(t) xPortSyncInternalYieldUntilBefore(t)
extern void xPortSyncInternalYieldUntilBefore( absolute_time_t t );
#define sync_internal_yield_until_before( t ) xPortSyncInternalYieldUntilBefore( t )
#endif /* configSUPPORT_PICO_TIME_INTEROP */
#endif /* __ASSEMBLER__ */
#endif
#endif /* ifndef FREERTOS_SDK_CONFIG_H */

View file

@ -28,7 +28,7 @@
*/
#ifndef PORTMACRO_H
#define PORTMACRO_H
#define PORTMACRO_H
/* *INDENT-OFF* */
#ifdef __cplusplus
@ -36,8 +36,8 @@
#endif
/* *INDENT-ON* */
#include "pico.h"
#include "hardware/sync.h"
#include "pico.h"
#include "hardware/sync.h"
/*-----------------------------------------------------------
* Port specific definitions.
@ -50,210 +50,225 @@
*/
/* Type definitions. */
#define portCHAR char
#define portFLOAT float
#define portDOUBLE double
#define portLONG long
#define portSHORT short
#define portSTACK_TYPE uint32_t
#define portBASE_TYPE long
#define portCHAR char
#define portFLOAT float
#define portDOUBLE double
#define portLONG long
#define portSHORT short
#define portSTACK_TYPE uint32_t
#define portBASE_TYPE long
typedef portSTACK_TYPE StackType_t;
typedef int32_t BaseType_t;
typedef uint32_t UBaseType_t;
typedef portSTACK_TYPE StackType_t;
typedef int32_t BaseType_t;
typedef uint32_t UBaseType_t;
#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
#define portTICK_TYPE_IS_ATOMIC 1
#else
#error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
#endif
#define portTICK_TYPE_IS_ATOMIC 1
#else
#error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
#endif
/*-----------------------------------------------------------*/
/* Architecture specifics. */
#define portSTACK_GROWTH ( -1 )
#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
#define portBYTE_ALIGNMENT 8
#define portDONT_DISCARD __attribute__( ( used ) )
/* We have to use PICO_DIVIDER_DISABLE_INTERRUPTS as the source of truth rathern than our config,
* as our FreeRTOSConfig.h header cannot be included by ASM code - which is what this affects in the SDK */
#define portUSE_DIVIDER_SAVE_RESTORE !PICO_DIVIDER_DISABLE_INTERRUPTS
#if portUSE_DIVIDER_SAVE_RESTORE
#define portSTACK_LIMIT_PADDING 4
#endif
#define portSTACK_GROWTH ( -1 )
#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
#define portBYTE_ALIGNMENT 8
#define portDONT_DISCARD __attribute__( ( used ) )
/* We have to use PICO_DIVIDER_DISABLE_INTERRUPTS as the source of truth rathern than our config,
* as our FreeRTOSConfig.h header cannot be included by ASM code - which is what this affects in the SDK */
#define portUSE_DIVIDER_SAVE_RESTORE !PICO_DIVIDER_DISABLE_INTERRUPTS
#if portUSE_DIVIDER_SAVE_RESTORE
#define portSTACK_LIMIT_PADDING 4
#endif
/*-----------------------------------------------------------*/
/* Scheduler utilities. */
extern void vPortYield( void );
#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) )
#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
#define portYIELD() vPortYield()
#define portEND_SWITCHING_ISR( xSwitchRequired ) if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT
#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
extern void vPortYield( void );
#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) )
#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
#define portYIELD() vPortYield()
#define portEND_SWITCHING_ISR( xSwitchRequired ) if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT
#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
/*-----------------------------------------------------------*/
/* Exception handlers */
#if (configUSE_DYNAMIC_EXCEPTION_HANDLERS == 0)
/* We only need to override the SDK's weak functions if we want to replace them at compile time */
#define vPortSVCHandler isr_svcall
#define xPortPendSVHandler isr_pendsv
#define xPortSysTickHandler isr_systick
#endif
#if ( configUSE_DYNAMIC_EXCEPTION_HANDLERS == 0 )
/* We only need to override the SDK's weak functions if we want to replace them at compile time */
#define vPortSVCHandler isr_svcall
#define xPortPendSVHandler isr_pendsv
#define xPortSysTickHandler isr_systick
#endif
/*-----------------------------------------------------------*/
/* Multi-core */
#define portMAX_CORE_COUNT 2
#define portMAX_CORE_COUNT 2
/* Check validity of number of cores specified in config */
#if ( configNUMBER_OF_CORES < 1 || portMAX_CORE_COUNT < configNUMBER_OF_CORES )
#error "Invalid number of cores specified in config!"
#endif
/* Check validity of number of cores specified in config */
#if ( configNUMBER_OF_CORES < 1 || portMAX_CORE_COUNT < configNUMBER_OF_CORES )
#error "Invalid number of cores specified in config!"
#endif
#if ( configTICK_CORE < 0 || configTICK_CORE > configNUMBER_OF_CORES )
#error "Invalid tick core specified in config!"
#endif
/* FreeRTOS core id is always zero based, so always 0 if we're running on only one core */
#if configNUMBER_OF_CORES == portMAX_CORE_COUNT
#define portGET_CORE_ID() get_core_num()
#else
#define portGET_CORE_ID() 0
#endif
#if ( configTICK_CORE < 0 || configTICK_CORE > configNUMBER_OF_CORES )
#error "Invalid tick core specified in config!"
#endif
/* FreeRTOS core id is always zero based, so always 0 if we're running on only one core */
#if configNUMBER_OF_CORES == portMAX_CORE_COUNT
#define portGET_CORE_ID() get_core_num()
#else
#define portGET_CORE_ID() 0
#endif
#define portCHECK_IF_IN_ISR() ({ \
uint32_t ulIPSR; \
__asm volatile ("mrs %0, IPSR" : "=r" (ulIPSR)::); \
((uint8_t)ulIPSR)>0;})
#define portCHECK_IF_IN_ISR() \
( { \
uint32_t ulIPSR; \
__asm volatile ( "mrs %0, IPSR" : "=r" ( ulIPSR )::); \
( ( uint8_t ) ulIPSR ) > 0; } )
void vYieldCore(int xCoreID);
#define portYIELD_CORE(a) vYieldCore(a)
#define portRESTORE_INTERRUPTS(ulState) __asm volatile ("msr PRIMASK,%0"::"r" (ulState) : )
void vYieldCore( int xCoreID );
#define portYIELD_CORE( a ) vYieldCore( a )
#define portRESTORE_INTERRUPTS( ulState ) __asm volatile ( "msr PRIMASK,%0" ::"r" ( ulState ) : )
/*-----------------------------------------------------------*/
/* Critical nesting count management. */
extern UBaseType_t uxCriticalNestings[ configNUMBER_OF_CORES ];
#define portGET_CRITICAL_NESTING_COUNT() ( uxCriticalNestings[ portGET_CORE_ID() ] )
#define portSET_CRITICAL_NESTING_COUNT( x ) ( uxCriticalNestings[ portGET_CORE_ID() ] = ( x ) )
#define portINCREMENT_CRITICAL_NESTING_COUNT() ( uxCriticalNestings[ portGET_CORE_ID() ]++ )
#define portDECREMENT_CRITICAL_NESTING_COUNT() ( uxCriticalNestings[ portGET_CORE_ID() ]-- )
extern UBaseType_t uxCriticalNestings[ configNUMBER_OF_CORES ];
#define portGET_CRITICAL_NESTING_COUNT() ( uxCriticalNestings[ portGET_CORE_ID() ] )
#define portSET_CRITICAL_NESTING_COUNT( x ) ( uxCriticalNestings[ portGET_CORE_ID() ] = ( x ) )
#define portINCREMENT_CRITICAL_NESTING_COUNT() ( uxCriticalNestings[ portGET_CORE_ID() ]++ )
#define portDECREMENT_CRITICAL_NESTING_COUNT() ( uxCriticalNestings[ portGET_CORE_ID() ]-- )
/*-----------------------------------------------------------*/
/* Critical section management. */
#define portSET_INTERRUPT_MASK() ({ \
uint32_t ulState; \
__asm volatile ("mrs %0, PRIMASK" : "=r" (ulState)::); \
__asm volatile ( " cpsid i " ::: "memory" ); \
ulState;})
#define portSET_INTERRUPT_MASK() \
( { \
uint32_t ulState; \
__asm volatile ( "mrs %0, PRIMASK" : "=r" ( ulState )::); \
__asm volatile ( " cpsid i " ::: "memory" ); \
ulState; } )
#define portCLEAR_INTERRUPT_MASK(ulState) __asm volatile ("msr PRIMASK,%0"::"r" (ulState) : )
#define portCLEAR_INTERRUPT_MASK( ulState ) __asm volatile ( "msr PRIMASK,%0" ::"r" ( ulState ) : )
extern uint32_t ulSetInterruptMaskFromISR( void ) __attribute__( ( naked ) );
extern void vClearInterruptMaskFromISR( uint32_t ulMask ) __attribute__( ( naked ) );
#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMaskFromISR()
#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMaskFromISR( x )
extern uint32_t ulSetInterruptMaskFromISR( void ) __attribute__( ( naked ) );
extern void vClearInterruptMaskFromISR( uint32_t ulMask ) __attribute__( ( naked ) );
#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMaskFromISR()
#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMaskFromISR( x )
#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" )
#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" )
extern void vPortEnableInterrupts();
#define portENABLE_INTERRUPTS() vPortEnableInterrupts()
extern void vPortEnableInterrupts();
#define portENABLE_INTERRUPTS() vPortEnableInterrupts()
#if ( configNUMBER_OF_CORES == 1 )
extern void vPortEnterCritical( void );
extern void vPortExitCritical( void );
#define portENTER_CRITICAL() vPortEnterCritical()
#define portEXIT_CRITICAL() vPortExitCritical()
#else
extern void vTaskEnterCritical( void );
extern void vTaskExitCritical( void );
extern UBaseType_t vTaskEnterCriticalFromISR( void );
extern void vTaskExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus );
#define portENTER_CRITICAL() vTaskEnterCritical()
#define portEXIT_CRITICAL() vTaskExitCritical()
#define portENTER_CRITICAL_FROM_ISR() vTaskEnterCriticalFromISR()
#define portEXIT_CRITICAL_FROM_ISR( x ) vTaskExitCriticalFromISR( x )
#endif
#if ( configNUMBER_OF_CORES == 1 )
extern void vPortEnterCritical( void );
extern void vPortExitCritical( void );
#define portENTER_CRITICAL() vPortEnterCritical()
#define portEXIT_CRITICAL() vPortExitCritical()
#else
extern void vTaskEnterCritical( void );
extern void vTaskExitCritical( void );
extern UBaseType_t vTaskEnterCriticalFromISR( void );
extern void vTaskExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus );
#define portENTER_CRITICAL() vTaskEnterCritical()
#define portEXIT_CRITICAL() vTaskExitCritical()
#define portENTER_CRITICAL_FROM_ISR() vTaskEnterCriticalFromISR()
#define portEXIT_CRITICAL_FROM_ISR( x ) vTaskExitCriticalFromISR( x )
#endif /* if ( configNUMBER_OF_CORES == 1 ) */
#define portRTOS_SPINLOCK_COUNT 2
#define portRTOS_SPINLOCK_COUNT 2
/* Note this is a single method with uxAcquire parameter since we have
* static vars, the method is always called with a compile time constant for
* uxAcquire, and the compiler should dothe right thing! */
static inline void vPortRecursiveLock(uint32_t ulLockNum, spin_lock_t *pxSpinLock, BaseType_t uxAcquire) {
static uint8_t ucOwnedByCore[ portMAX_CORE_COUNT ];
static uint8_t ucRecursionCountByLock[ portRTOS_SPINLOCK_COUNT ];
configASSERT( ulLockNum < portRTOS_SPINLOCK_COUNT );
uint32_t ulCoreNum = get_core_num();
uint32_t ulLockBit = 1u << ulLockNum;
configASSERT(ulLockBit < 256u );
if( uxAcquire )
/* Note this is a single method with uxAcquire parameter since we have
* static vars, the method is always called with a compile time constant for
* uxAcquire, and the compiler should dothe right thing! */
static inline void vPortRecursiveLock( uint32_t ulLockNum,
spin_lock_t * pxSpinLock,
BaseType_t uxAcquire )
{
static uint8_t ucOwnedByCore[ portMAX_CORE_COUNT ];
static uint8_t ucRecursionCountByLock[ portRTOS_SPINLOCK_COUNT ];
configASSERT( ulLockNum < portRTOS_SPINLOCK_COUNT );
uint32_t ulCoreNum = get_core_num();
uint32_t ulLockBit = 1u << ulLockNum;
configASSERT( ulLockBit < 256u );
if( uxAcquire )
{
if( __builtin_expect( !*pxSpinLock, 0 ) )
{
if( __builtin_expect( !*pxSpinLock, 0 ) )
if( ucOwnedByCore[ ulCoreNum ] & ulLockBit )
{
if( ucOwnedByCore[ulCoreNum] & ulLockBit )
{
configASSERT(ucRecursionCountByLock[ulLockNum] != 255u );
ucRecursionCountByLock[ulLockNum]++;
return;
}
while ( __builtin_expect( !*pxSpinLock, 0 ) );
configASSERT( ucRecursionCountByLock[ ulLockNum ] != 255u );
ucRecursionCountByLock[ ulLockNum ]++;
return;
}
__mem_fence_acquire();
configASSERT(ucRecursionCountByLock[ulLockNum] == 0 );
ucRecursionCountByLock[ulLockNum] = 1;
ucOwnedByCore[ulCoreNum] |= ulLockBit;
} else {
configASSERT((ucOwnedByCore[ulCoreNum] & ulLockBit) != 0 );
configASSERT(ucRecursionCountByLock[ulLockNum] != 0 );
if( !--ucRecursionCountByLock[ulLockNum] )
while( __builtin_expect( !*pxSpinLock, 0 ) )
{
ucOwnedByCore[ulCoreNum] &= ~ulLockBit;
__mem_fence_release();
*pxSpinLock = 1;
}
}
}
#if ( configNUMBER_OF_CORES == 1 )
#define portGET_ISR_LOCK()
#define portRELEASE_ISR_LOCK()
#define portGET_TASK_LOCK()
#define portRELEASE_TASK_LOCK()
#else
#define portGET_ISR_LOCK() vPortRecursiveLock(0, spin_lock_instance(configSMP_SPINLOCK_0), pdTRUE)
#define portRELEASE_ISR_LOCK() vPortRecursiveLock(0, spin_lock_instance(configSMP_SPINLOCK_0), pdFALSE)
#define portGET_TASK_LOCK() vPortRecursiveLock(1, spin_lock_instance(configSMP_SPINLOCK_1), pdTRUE)
#define portRELEASE_TASK_LOCK() vPortRecursiveLock(1, spin_lock_instance(configSMP_SPINLOCK_1), pdFALSE)
#endif
__mem_fence_acquire();
configASSERT( ucRecursionCountByLock[ ulLockNum ] == 0 );
ucRecursionCountByLock[ ulLockNum ] = 1;
ucOwnedByCore[ ulCoreNum ] |= ulLockBit;
}
else
{
configASSERT( ( ucOwnedByCore[ ulCoreNum ] & ulLockBit ) != 0 );
configASSERT( ucRecursionCountByLock[ ulLockNum ] != 0 );
if( !--ucRecursionCountByLock[ ulLockNum ] )
{
ucOwnedByCore[ ulCoreNum ] &= ~ulLockBit;
__mem_fence_release();
*pxSpinLock = 1;
}
}
}
#if ( configNUMBER_OF_CORES == 1 )
#define portGET_ISR_LOCK()
#define portRELEASE_ISR_LOCK()
#define portGET_TASK_LOCK()
#define portRELEASE_TASK_LOCK()
#else
#define portGET_ISR_LOCK() vPortRecursiveLock( 0, spin_lock_instance( configSMP_SPINLOCK_0 ), pdTRUE )
#define portRELEASE_ISR_LOCK() vPortRecursiveLock( 0, spin_lock_instance( configSMP_SPINLOCK_0 ), pdFALSE )
#define portGET_TASK_LOCK() vPortRecursiveLock( 1, spin_lock_instance( configSMP_SPINLOCK_1 ), pdTRUE )
#define portRELEASE_TASK_LOCK() vPortRecursiveLock( 1, spin_lock_instance( configSMP_SPINLOCK_1 ), pdFALSE )
#endif
/*-----------------------------------------------------------*/
/* Tickless idle/low power functionality. */
#ifndef portSUPPRESS_TICKS_AND_SLEEP
extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime );
#define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime )
#endif
#ifndef portSUPPRESS_TICKS_AND_SLEEP
extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime );
#define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime )
#endif
/*-----------------------------------------------------------*/
/* Task function macros as described on the FreeRTOS.org WEB site. */
#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
#define portNOP()
#define portNOP()
#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" )
#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" )
/* *INDENT-OFF* */
#ifdef __cplusplus

View file

@ -40,9 +40,9 @@
*/
#ifndef configUSE_DYNAMIC_EXCEPTION_HANDLERS
#if defined( PICO_NO_RAM_VECTOR_TABLE ) && ( PICO_NO_RAM_VECTOR_TABLE == 1 )
#define configUSE_DYNAMIC_EXCEPTION_HANDLERS 0
#define configUSE_DYNAMIC_EXCEPTION_HANDLERS 0
#else
#define configUSE_DYNAMIC_EXCEPTION_HANDLERS 1
#define configUSE_DYNAMIC_EXCEPTION_HANDLERS 1
#endif
#endif
@ -51,7 +51,7 @@
*/
#ifndef configSUPPORT_PICO_SYNC_INTEROP
#if LIB_PICO_SYNC
#define configSUPPORT_PICO_SYNC_INTEROP 1
#define configSUPPORT_PICO_SYNC_INTEROP 1
#endif
#endif
@ -61,15 +61,16 @@
*/
#ifndef configSUPPORT_PICO_TIME_INTEROP
#if LIB_PICO_TIME
#define configSUPPORT_PICO_TIME_INTEROP 1
#define configSUPPORT_PICO_TIME_INTEROP 1
#endif
#endif
#if ( configNUMBER_OF_CORES > 1 )
/* configTICK_CORE indicates which core should handle the SysTick
* interrupts */
/* configTICK_CORE indicates which core should handle the SysTick
* interrupts */
#ifndef configTICK_CORE
#define configTICK_CORE 0
#define configTICK_CORE 0
#endif
#endif
@ -77,11 +78,11 @@
* the spin lock numbers to be used are defined statically and defaulted here
* to the values nominally set aside for RTOS by the SDK */
#ifndef configSMP_SPINLOCK_0
#define configSMP_SPINLOCK_0 PICO_SPINLOCK_ID_OS1
#define configSMP_SPINLOCK_0 PICO_SPINLOCK_ID_OS1
#endif
#ifndef configSMP_SPINLOCK_1
#define configSMP_SPINLOCK_1 PICO_SPINLOCK_ID_OS2
#define configSMP_SPINLOCK_1 PICO_SPINLOCK_ID_OS2
#endif
/* *INDENT-OFF* */
@ -90,4 +91,4 @@
#endif
/* *INDENT-ON* */
#endif
#endif /* ifndef RP2040_CONFIG_H */

View file

@ -28,8 +28,8 @@
*/
/*----------------------------------------------------------------------
* Implementation of functions defined in portable.h for the RP2040 port.
*----------------------------------------------------------------------*/
* Implementation of functions defined in portable.h for the RP2040 port.
*----------------------------------------------------------------------*/
#include "FreeRTOS.h"
#include "task.h"
@ -42,12 +42,12 @@
* the non SMP FreeRTOS_Kernel is not linked with pico_multicore itself). We
* use this flag to determine if we need multi-core functionality.
*/
#if ( LIB_PICO_MULTICORE == 1)
#if ( LIB_PICO_MULTICORE == 1 )
#include "pico/multicore.h"
#endif /* LIB_PICO_MULTICORE */
/* TODO : consider to remove this macro. */
#define portRUNNING_ON_BOTH_CORES ( configNUMBER_OF_CORES == portMAX_CORE_COUNT )
#define portRUNNING_ON_BOTH_CORES ( configNUMBER_OF_CORES == portMAX_CORE_COUNT )
/* Constants required to manipulate the NVIC. */
#define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) )
@ -118,7 +118,7 @@ static void prvTaskExitError( void );
#if ( configNUMBER_OF_CORES == 1 )
static UBaseType_t uxCriticalNesting;
#else /* #if ( configNUMBER_OF_CORES == 1 ) */
UBaseType_t uxCriticalNestings[ configNUMBER_OF_CORES ] = { 0 };
UBaseType_t uxCriticalNestings[ configNUMBER_OF_CORES ] = { 0 };
#endif /* #if ( configNUMBER_OF_CORES == 1 ) */
/*-----------------------------------------------------------*/
@ -129,7 +129,7 @@ static void prvTaskExitError( void );
#include "event_groups.h"
#if configSUPPORT_STATIC_ALLOCATION
static StaticEventGroup_t xStaticEventGroup;
#define pEventGroup (&xStaticEventGroup)
#define pEventGroup ( &xStaticEventGroup )
#endif /* configSUPPORT_STATIC_ALLOCATION */
static EventGroupHandle_t xEventGroup;
#if ( portRUNNING_ON_BOTH_CORES == 0 )
@ -166,15 +166,15 @@ static void prvTaskExitError( void );
/*-----------------------------------------------------------*/
#define INVALID_PRIMARY_CORE_NUM 0xffu
#define INVALID_PRIMARY_CORE_NUM 0xffu
/* The primary core number (the own which has the SysTick handler) */
static uint8_t ucPrimaryCoreNum = INVALID_PRIMARY_CORE_NUM;
/* Note: portIS_FREE_RTOS_CORE() also returns false until the scheduler is started */
#if ( portRUNNING_ON_BOTH_CORES == 1 )
#define portIS_FREE_RTOS_CORE() (ucPrimaryCoreNum != INVALID_PRIMARY_CORE_NUM)
#define portIS_FREE_RTOS_CORE() ( ucPrimaryCoreNum != INVALID_PRIMARY_CORE_NUM )
#else
#define portIS_FREE_RTOS_CORE() (ucPrimaryCoreNum == get_core_num())
#define portIS_FREE_RTOS_CORE() ( ucPrimaryCoreNum == get_core_num() )
#endif
/*
@ -218,70 +218,70 @@ void vPortSVCHandler( void )
void vPortStartFirstTask( void )
{
#if ( configNUMBER_OF_CORES == 1 )
__asm volatile (
" .syntax unified \n"
" ldr r2, pxCurrentTCBConst1 \n"/* Obtain location of pxCurrentTCB. */
" ldr r3, [r2] \n"
" ldr r0, [r3] \n"/* The first item in pxCurrentTCB is the task top of stack. */
" adds r0, #32 \n"/* Discard everything up to r0. */
" msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
" movs r0, #2 \n"/* Switch to the psp stack. */
" msr CONTROL, r0 \n"
" isb \n"
" pop {r0-r5} \n"/* Pop the registers that are saved automatically. */
" mov lr, r5 \n"/* lr is now in r5. */
" pop {r3} \n"/* Return address is now in r3. */
" pop {r2} \n"/* Pop and discard XPSR. */
" cpsie i \n"/* The first task has its context and interrupts can be enabled. */
" bx r3 \n"/* Finally, jump to the user defined task code. */
" .align 4 \n"
"pxCurrentTCBConst1: .word pxCurrentTCB\n"
);
#else
__asm volatile (
" .syntax unified \n"
#if configRESET_STACK_POINTER
" ldr r0, =0xE000ED08 \n" /* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n"
" ldr r0, [r0] \n"
" msr msp, r0 \n" /* Set the msp back to the start of the stack. */
#endif /* configRESET_STACK_POINTER */
#if portRUNNING_ON_BOTH_CORES
" adr r1, ulAsmLocals \n"/* Get the location of the current TCB for the current core. */
" ldmia r1!, {r2, r3} \n"
" ldr r2, [r2] \n"/* r2 = Core number */
" lsls r2, #2 \n"
" ldr r3, [r3, r2] \n"/* r3 = pxCurrentTCBs[get_core_num()] */
#else
" ldr r3, =pxCurrentTCBs \n"
" ldr r3, [r3] \n" /* r3 = pxCurrentTCBs[0] */
#endif /* portRUNNING_ON_BOTH_CORES */
" ldr r0, [r3] \n"/* The first item in pxCurrentTCB is the task top of stack. */
" adds r0, #32 \n"/* Discard everything up to r0. */
" msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
" movs r0, #2 \n"/* Switch to the psp stack. */
" msr CONTROL, r0 \n"
" isb \n"
" pop {r0-r5} \n"/* Pop the registers that are saved automatically. */
" mov lr, r5 \n"/* lr is now in r5. */
" pop {r3} \n"/* Return address is now in r3. */
" pop {r2} \n"/* Pop and discard XPSR. */
" cpsie i \n"/* The first task has its context and interrupts can be enabled. */
" bx r3 \n"/* Finally, jump to the user defined task code. */
#if portRUNNING_ON_BOTH_CORES
" \n"
" .align 4 \n"
"ulAsmLocals: \n"
" .word 0xD0000000 \n"/* SIO */
" .word pxCurrentTCBs \n"
#endif /* portRUNNING_ON_BOTH_CORES */
);
#endif
#if ( configNUMBER_OF_CORES == 1 )
__asm volatile (
" .syntax unified \n"
" ldr r2, pxCurrentTCBConst1 \n" /* Obtain location of pxCurrentTCB. */
" ldr r3, [r2] \n"
" ldr r0, [r3] \n" /* The first item in pxCurrentTCB is the task top of stack. */
" adds r0, #32 \n" /* Discard everything up to r0. */
" msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
" movs r0, #2 \n" /* Switch to the psp stack. */
" msr CONTROL, r0 \n"
" isb \n"
" pop {r0-r5} \n" /* Pop the registers that are saved automatically. */
" mov lr, r5 \n" /* lr is now in r5. */
" pop {r3} \n" /* Return address is now in r3. */
" pop {r2} \n" /* Pop and discard XPSR. */
" cpsie i \n" /* The first task has its context and interrupts can be enabled. */
" bx r3 \n" /* Finally, jump to the user defined task code. */
" .align 4 \n"
"pxCurrentTCBConst1: .word pxCurrentTCB\n"
);
#else /* if ( configNUMBER_OF_CORES == 1 ) */
__asm volatile (
" .syntax unified \n"
#if configRESET_STACK_POINTER
" ldr r0, =0xE000ED08 \n" /* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n"
" ldr r0, [r0] \n"
" msr msp, r0 \n" /* Set the msp back to the start of the stack. */
#endif /* configRESET_STACK_POINTER */
#if portRUNNING_ON_BOTH_CORES
" adr r1, ulAsmLocals \n" /* Get the location of the current TCB for the current core. */
" ldmia r1!, {r2, r3} \n"
" ldr r2, [r2] \n" /* r2 = Core number */
" lsls r2, #2 \n"
" ldr r3, [r3, r2] \n" /* r3 = pxCurrentTCBs[get_core_num()] */
#else
" ldr r3, =pxCurrentTCBs \n"
" ldr r3, [r3] \n" /* r3 = pxCurrentTCBs[0] */
#endif /* portRUNNING_ON_BOTH_CORES */
" ldr r0, [r3] \n" /* The first item in pxCurrentTCB is the task top of stack. */
" adds r0, #32 \n" /* Discard everything up to r0. */
" msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
" movs r0, #2 \n" /* Switch to the psp stack. */
" msr CONTROL, r0 \n"
" isb \n"
" pop {r0-r5} \n" /* Pop the registers that are saved automatically. */
" mov lr, r5 \n" /* lr is now in r5. */
" pop {r3} \n" /* Return address is now in r3. */
" pop {r2} \n" /* Pop and discard XPSR. */
" cpsie i \n" /* The first task has its context and interrupts can be enabled. */
" bx r3 \n" /* Finally, jump to the user defined task code. */
#if portRUNNING_ON_BOTH_CORES
" \n"
" .align 4 \n"
"ulAsmLocals: \n"
" .word 0xD0000000 \n" /* SIO */
" .word pxCurrentTCBs \n"
#endif /* portRUNNING_ON_BOTH_CORES */
);
#endif /* if ( configNUMBER_OF_CORES == 1 ) */
}
/*-----------------------------------------------------------*/
#if ( LIB_PICO_MULTICORE == 1 ) && ( configSUPPORT_PICO_SYNC_INTEROP == 1)
#if ( LIB_PICO_MULTICORE == 1 ) && ( configSUPPORT_PICO_SYNC_INTEROP == 1 )
static void prvFIFOInterruptHandler()
{
/* We must remove the contents (which we don't care about)
@ -303,15 +303,16 @@ void vPortStartFirstTask( void )
portYIELD_FROM_ISR( xHigherPriorityTaskWoken );
#endif /* portRUNNING_ON_BOTH_CORES */
}
#endif
#endif /* if ( LIB_PICO_MULTICORE == 1 ) && ( configSUPPORT_PICO_SYNC_INTEROP == 1 ) */
#if ( configNUMBER_OF_CORES > 1 )
/*
* See header file for description.
*/
/*
* See header file for description.
*/
static BaseType_t xPortStartSchedulerOnCore()
{
if( ucPrimaryCoreNum == get_core_num())
if( ucPrimaryCoreNum == get_core_num() )
{
/* Start the timer that generates the tick ISR. Interrupts are disabled
* here already. */
@ -363,9 +364,9 @@ void vPortStartFirstTask( void )
}
#endif
/*
* See header file for description.
*/
/*
* See header file for description.
*/
BaseType_t xPortStartScheduler( void )
{
configASSERT( ucPrimaryCoreNum == INVALID_PRIMARY_CORE_NUM );
@ -376,7 +377,7 @@ void vPortStartFirstTask( void )
#if portRUNNING_ON_BOTH_CORES
ucPrimaryCoreNum = configTICK_CORE;
configASSERT( get_core_num() == 0) ; // we must be started on core 0
configASSERT( get_core_num() == 0 ); /* we must be started on core 0 */
multicore_launch_core1( prvDisableInterruptsAndPortStartSchedulerOnCore );
#else
ucPrimaryCoreNum = get_core_num();
@ -387,17 +388,18 @@ void vPortStartFirstTask( void )
return 0;
}
#else
/*
* See header file for description.
*/
#else /* if ( configNUMBER_OF_CORES > 1 ) */
/*
* See header file for description.
*/
BaseType_t xPortStartScheduler( void )
{
/* Make PendSV, CallSV and SysTick the same priority as the kernel. */
portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI;
portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI;
#if (configUSE_DYNAMIC_EXCEPTION_HANDLERS == 1)
#if ( configUSE_DYNAMIC_EXCEPTION_HANDLERS == 1 )
exception_set_exclusive_handler( PENDSV_EXCEPTION, xPortPendSVHandler );
exception_set_exclusive_handler( SYSTICK_EXCEPTION, xPortSysTickHandler );
exception_set_exclusive_handler( SVCALL_EXCEPTION, vPortSVCHandler );
@ -411,8 +413,8 @@ void vPortStartFirstTask( void )
uxCriticalNesting = 0;
ucPrimaryCoreNum = get_core_num();
#if (LIB_PICO_MULTICORE == 1)
#if ( configSUPPORT_PICO_SYNC_INTEROP == 1)
#if ( LIB_PICO_MULTICORE == 1 )
#if ( configSUPPORT_PICO_SYNC_INTEROP == 1 )
multicore_fifo_clear_irq();
multicore_fifo_drain();
uint32_t irq_num = 15 + get_core_num();
@ -437,7 +439,7 @@ void vPortStartFirstTask( void )
/* Should not get here! */
return 0;
}
#endif
#endif /* if ( configNUMBER_OF_CORES > 1 ) */
/*-----------------------------------------------------------*/
@ -451,6 +453,7 @@ void vPortEndScheduler( void )
void vPortYield( void )
{
#if ( configSUPPORT_PICO_SYNC_INTEROP == 1 )
/* We are not in an ISR, and pxYieldSpinLock is always dealt with and
* cleared when interrupts are re-enabled, so should be NULL */
configASSERT( pxYieldSpinLock[ portGET_CORE_ID() ] == NULL );
@ -483,6 +486,7 @@ void vPortYield( void )
{
configASSERT( uxCriticalNesting );
uxCriticalNesting--;
if( uxCriticalNesting == 0 )
{
portENABLE_INTERRUPTS();
@ -494,11 +498,12 @@ void vPortEnableInterrupts( void )
{
#if ( configSUPPORT_PICO_SYNC_INTEROP == 1 )
int xCoreID = ( int ) portGET_CORE_ID();
if( pxYieldSpinLock[xCoreID] )
if( pxYieldSpinLock[ xCoreID ] )
{
spin_lock_t* const pxTmpLock = pxYieldSpinLock[xCoreID];
pxYieldSpinLock[xCoreID] = NULL;
spin_unlock( pxTmpLock, ulYieldSpinLockSaveValue[xCoreID] );
spin_lock_t * const pxTmpLock = pxYieldSpinLock[ xCoreID ];
pxYieldSpinLock[ xCoreID ] = NULL;
spin_unlock( pxTmpLock, ulYieldSpinLockSaveValue[ xCoreID ] );
}
#endif
__asm volatile ( " cpsie i " ::: "memory" );
@ -537,8 +542,9 @@ void vYieldCore( int xCoreID )
configASSERT( xCoreID != ( int ) portGET_CORE_ID() );
#if portRUNNING_ON_BOTH_CORES
/* Non blocking, will cause interrupt on other core if the queue isn't already full,
in which case an IRQ must be pending */
* in which case an IRQ must be pending */
sio_hw->fifo_wr = 0;
#endif
}
@ -548,179 +554,185 @@ void vYieldCore( int xCoreID )
void xPortPendSVHandler( void )
{
/* This is a naked function. */
#if ( configNUMBER_OF_CORES == 1 )
__asm volatile
(
" .syntax unified \n"
" mrs r0, psp \n"
" \n"
" ldr r3, pxCurrentTCBConst2 \n"/* Get the location of the current TCB. */
" ldr r2, [r3] \n"
" \n"
" subs r0, r0, #32 \n"/* Make space for the remaining low registers. */
" str r0, [r2] \n"/* Save the new top of stack. */
" stmia r0!, {r4-r7} \n"/* Store the low registers that are not saved automatically. */
" mov r4, r8 \n"/* Store the high registers. */
" mov r5, r9 \n"
" mov r6, r10 \n"
" mov r7, r11 \n"
" stmia r0!, {r4-r7} \n"
#if portUSE_DIVIDER_SAVE_RESTORE
" movs r2, #0xd \n"/* Store the divider state. */
" lsls r2, #28 \n"
/* We expect that the divider is ready at this point (which is
* necessary to safely save/restore), because:
* a) if we have not been interrupted since we entered this method,
* then >8 cycles have clearly passed, so the divider is done
* b) if we were interrupted in the interim, then any "safe" - i.e.
* does the right thing in an IRQ - use of the divider should
* have waited for any in-process divide to complete, saved and
* then fully restored the result, thus the result is ready in
* that case too. */
" ldr r4, [r2, #0x60] \n"/* SIO_DIV_UDIVIDEND_OFFSET */
" ldr r5, [r2, #0x64] \n"/* SIO_DIV_UDIVISOR_OFFSET */
" ldr r6, [r2, #0x74] \n"/* SIO_DIV_REMAINDER_OFFSET */
" ldr r7, [r2, #0x70] \n"/* SIO_DIV_QUOTIENT_OFFSET */
/* We actually save the divider state in the 4 words below
* our recorded stack pointer, so as not to disrupt the stack
* frame expected by debuggers - this is addressed by
* portEXTRA_STACK_SIZE */
" subs r0, r0, #48 \n"
" stmia r0!, {r4-r7} \n"
#endif /* portUSE_DIVIDER_SAVE_RESTORE */
" push {r3, r14} \n"
" cpsid i \n"
" bl vTaskSwitchContext \n"
" cpsie i \n"
" pop {r2, r3} \n"/* lr goes in r3. r2 now holds tcb pointer. */
" \n"
" ldr r1, [r2] \n"
" ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. */
" adds r0, r0, #16 \n"/* Move to the high registers. */
" ldmia r0!, {r4-r7} \n"/* Pop the high registers. */
" mov r8, r4 \n"
" mov r9, r5 \n"
" mov r10, r6 \n"
" mov r11, r7 \n"
" \n"
" msr psp, r0 \n"/* Remember the new top of stack for the task. */
" \n"
#if portUSE_DIVIDER_SAVE_RESTORE
" movs r2, #0xd \n"/* Pop the divider state. */
" lsls r2, #28 \n"
" subs r0, r0, #48 \n"/* Go back for the divider state */
" ldmia r0!, {r4-r7} \n"/* Pop the divider state. */
/* Note always restore via SIO_DIV_UDIVI*, because we will overwrite the
* results stopping the calculation anyway, however the sign of results
* is adjusted by the h/w at read time based on whether the last started
* division was signed and the inputs' signs differed */
" str r4, [r2, #0x60] \n"/* SIO_DIV_UDIVIDEND_OFFSET */
" str r5, [r2, #0x64] \n"/* SIO_DIV_UDIVISOR_OFFSET */
" str r6, [r2, #0x74] \n"/* SIO_DIV_REMAINDER_OFFSET */
" str r7, [r2, #0x70] \n"/* SIO_DIV_QUOTIENT_OFFSET */
#else
" subs r0, r0, #32 \n"/* Go back for the low registers that are not automatically restored. */
#endif /* portUSE_DIVIDER_SAVE_RESTORE */
" ldmia r0!, {r4-r7} \n"/* Pop low registers. */
" \n"
" bx r3 \n"
" .align 4 \n"
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
);
#else
__asm volatile
(
" .syntax unified \n"
" mrs r1, psp \n"
" \n"
" adr r0, ulAsmLocals2 \n"/* Get the location of the current TCB for the current core. */
" ldmia r0!, {r2, r3} \n"
#if portRUNNING_ON_BOTH_CORES
" ldr r0, [r2] \n"/* r0 = Core number */
" lsls r0, r0, #2 \n"
" adds r3, r0 \n"/* r3 = &pxCurrentTCBs[get_core_num()] */
#else
" \n"/* r3 = &pxCurrentTCBs[0] */
#endif /* portRUNNING_ON_BOTH_CORES */
" ldr r0, [r3] \n"/* r0 = pxCurrentTCB */
" \n"
" subs r1, r1, #32 \n"/* Make space for the remaining low registers. */
" str r1, [r0] \n"/* Save the new top of stack. */
" stmia r1!, {r4-r7} \n"/* Store the low registers that are not saved automatically. */
" mov r4, r8 \n"/* Store the high registers. */
" mov r5, r9 \n"
" mov r6, r10 \n"
" mov r7, r11 \n"
" stmia r1!, {r4-r7} \n"
#if portUSE_DIVIDER_SAVE_RESTORE
/* We expect that the divider is ready at this point (which is
* necessary to safely save/restore), because:
* a) if we have not been interrupted since we entered this method,
* then >8 cycles have clearly passed, so the divider is done
* b) if we were interrupted in the interim, then any "safe" - i.e.
* does the right thing in an IRQ - use of the divider should
* have waited for any in-process divide to complete, saved and
* then fully restored the result, thus the result is ready in
* that case too. */
" ldr r4, [r2, #0x60] \n"/* SIO_DIV_UDIVIDEND_OFFSET */
" ldr r5, [r2, #0x64] \n"/* SIO_DIV_UDIVISOR_OFFSET */
" ldr r6, [r2, #0x74] \n"/* SIO_DIV_REMAINDER_OFFSET */
" ldr r7, [r2, #0x70] \n"/* SIO_DIV_QUOTIENT_OFFSET */
/* We actually save the divider state in the 4 words below
* our recorded stack pointer, so as not to disrupt the stack
* frame expected by debuggers - this is addressed by
* portEXTRA_STACK_SIZE */
" subs r1, r1, #48 \n"
" stmia r1!, {r4-r7} \n"
#endif /* portUSE_DIVIDER_SAVE_RESTORE */
#if portRUNNING_ON_BOTH_CORES
" ldr r0, [r2] \n"/* r0 = Core number */
#else
" movs r0, #0 \n"
#endif /* portRUNNING_ON_BOTH_CORES */
" push {r3, r14} \n"
" cpsid i \n"
" bl vTaskSwitchContext \n"
" cpsie i \n"
" pop {r2, r3} \n"/* lr goes in r3. r2 now holds tcb pointer. */
" \n"
" ldr r1, [r2] \n"
" ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. */
" adds r0, r0, #16 \n"/* Move to the high registers. */
" ldmia r0!, {r4-r7} \n"/* Pop the high registers. */
" mov r8, r4 \n"
" mov r9, r5 \n"
" mov r10, r6 \n"
" mov r11, r7 \n"
" \n"
" msr psp, r0 \n"/* Remember the new top of stack for the task. */
" \n"
#if portUSE_DIVIDER_SAVE_RESTORE
" movs r2, #0xd \n"/* Pop the divider state. */
" lsls r2, #28 \n"
" subs r0, r0, #48 \n"/* Go back for the divider state */
" ldmia r0!, {r4-r7} \n"/* Pop the divider state. */
/* Note always restore via SIO_DIV_UDIVI*, because we will overwrite the
* results stopping the calculation anyway, however the sign of results
* is adjusted by the h/w at read time based on whether the last started
* division was signed and the inputs' signs differed */
" str r4, [r2, #0x60] \n"/* SIO_DIV_UDIVIDEND_OFFSET */
" str r5, [r2, #0x64] \n"/* SIO_DIV_UDIVISOR_OFFSET */
" str r6, [r2, #0x74] \n"/* SIO_DIV_REMAINDER_OFFSET */
" str r7, [r2, #0x70] \n"/* SIO_DIV_QUOTIENT_OFFSET */
#else
" subs r0, r0, #32 \n"/* Go back for the low registers that are not automatically restored. */
#endif /* portUSE_DIVIDER_SAVE_RESTORE */
" ldmia r0!, {r4-r7} \n"/* Pop low registers. */
" \n"
" bx r3 \n"
" \n"
" .align 4 \n"
"ulAsmLocals2: \n"
" .word 0xD0000000 \n"/* SIO */
" .word pxCurrentTCBs \n"
);
#endif
#if ( configNUMBER_OF_CORES == 1 )
__asm volatile
(
" .syntax unified \n"
" mrs r0, psp \n"
" \n"
" ldr r3, pxCurrentTCBConst2 \n" /* Get the location of the current TCB. */
" ldr r2, [r3] \n"
" \n"
" subs r0, r0, #32 \n" /* Make space for the remaining low registers. */
" str r0, [r2] \n" /* Save the new top of stack. */
" stmia r0!, {r4-r7} \n" /* Store the low registers that are not saved automatically. */
" mov r4, r8 \n" /* Store the high registers. */
" mov r5, r9 \n"
" mov r6, r10 \n"
" mov r7, r11 \n"
" stmia r0!, {r4-r7} \n"
#if portUSE_DIVIDER_SAVE_RESTORE
" movs r2, #0xd \n" /* Store the divider state. */
" lsls r2, #28 \n"
/* We expect that the divider is ready at this point (which is
* necessary to safely save/restore), because:
* a) if we have not been interrupted since we entered this method,
* then >8 cycles have clearly passed, so the divider is done
* b) if we were interrupted in the interim, then any "safe" - i.e.
* does the right thing in an IRQ - use of the divider should
* have waited for any in-process divide to complete, saved and
* then fully restored the result, thus the result is ready in
* that case too. */
" ldr r4, [r2, #0x60] \n" /* SIO_DIV_UDIVIDEND_OFFSET */
" ldr r5, [r2, #0x64] \n" /* SIO_DIV_UDIVISOR_OFFSET */
" ldr r6, [r2, #0x74] \n" /* SIO_DIV_REMAINDER_OFFSET */
" ldr r7, [r2, #0x70] \n" /* SIO_DIV_QUOTIENT_OFFSET */
/* We actually save the divider state in the 4 words below
* our recorded stack pointer, so as not to disrupt the stack
* frame expected by debuggers - this is addressed by
* portEXTRA_STACK_SIZE */
" subs r0, r0, #48 \n"
" stmia r0!, {r4-r7} \n"
#endif /* portUSE_DIVIDER_SAVE_RESTORE */
" push {r3, r14} \n"
" cpsid i \n"
" bl vTaskSwitchContext \n"
" cpsie i \n"
" pop {r2, r3} \n" /* lr goes in r3. r2 now holds tcb pointer. */
" \n"
" ldr r1, [r2] \n"
" ldr r0, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. */
" adds r0, r0, #16 \n" /* Move to the high registers. */
" ldmia r0!, {r4-r7} \n" /* Pop the high registers. */
" mov r8, r4 \n"
" mov r9, r5 \n"
" mov r10, r6 \n"
" mov r11, r7 \n"
" \n"
" msr psp, r0 \n" /* Remember the new top of stack for the task. */
" \n"
#if portUSE_DIVIDER_SAVE_RESTORE
" movs r2, #0xd \n" /* Pop the divider state. */
" lsls r2, #28 \n"
" subs r0, r0, #48 \n" /* Go back for the divider state */
" ldmia r0!, {r4-r7} \n" /* Pop the divider state. */
/* Note always restore via SIO_DIV_UDIVI*, because we will overwrite the
* results stopping the calculation anyway, however the sign of results
* is adjusted by the h/w at read time based on whether the last started
* division was signed and the inputs' signs differed */
" str r4, [r2, #0x60] \n" /* SIO_DIV_UDIVIDEND_OFFSET */
" str r5, [r2, #0x64] \n" /* SIO_DIV_UDIVISOR_OFFSET */
" str r6, [r2, #0x74] \n" /* SIO_DIV_REMAINDER_OFFSET */
" str r7, [r2, #0x70] \n" /* SIO_DIV_QUOTIENT_OFFSET */
#else /* if portUSE_DIVIDER_SAVE_RESTORE */
" subs r0, r0, #32 \n" /* Go back for the low registers that are not automatically restored. */
#endif /* portUSE_DIVIDER_SAVE_RESTORE */
" ldmia r0!, {r4-r7} \n" /* Pop low registers. */
" \n"
" bx r3 \n"
" .align 4 \n"
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
);
#else /* if ( configNUMBER_OF_CORES == 1 ) */
__asm volatile
(
" .syntax unified \n"
" mrs r1, psp \n"
" \n"
" adr r0, ulAsmLocals2 \n" /* Get the location of the current TCB for the current core. */
" ldmia r0!, {r2, r3} \n"
#if portRUNNING_ON_BOTH_CORES
" ldr r0, [r2] \n" /* r0 = Core number */
" lsls r0, r0, #2 \n"
" adds r3, r0 \n" /* r3 = &pxCurrentTCBs[get_core_num()] */
#else
" \n" /* r3 = &pxCurrentTCBs[0] */
#endif /* portRUNNING_ON_BOTH_CORES */
" ldr r0, [r3] \n" /* r0 = pxCurrentTCB */
" \n"
" subs r1, r1, #32 \n" /* Make space for the remaining low registers. */
" str r1, [r0] \n" /* Save the new top of stack. */
" stmia r1!, {r4-r7} \n" /* Store the low registers that are not saved automatically. */
" mov r4, r8 \n" /* Store the high registers. */
" mov r5, r9 \n"
" mov r6, r10 \n"
" mov r7, r11 \n"
" stmia r1!, {r4-r7} \n"
#if portUSE_DIVIDER_SAVE_RESTORE
/* We expect that the divider is ready at this point (which is
* necessary to safely save/restore), because:
* a) if we have not been interrupted since we entered this method,
* then >8 cycles have clearly passed, so the divider is done
* b) if we were interrupted in the interim, then any "safe" - i.e.
* does the right thing in an IRQ - use of the divider should
* have waited for any in-process divide to complete, saved and
* then fully restored the result, thus the result is ready in
* that case too. */
" ldr r4, [r2, #0x60] \n" /* SIO_DIV_UDIVIDEND_OFFSET */
" ldr r5, [r2, #0x64] \n" /* SIO_DIV_UDIVISOR_OFFSET */
" ldr r6, [r2, #0x74] \n" /* SIO_DIV_REMAINDER_OFFSET */
" ldr r7, [r2, #0x70] \n" /* SIO_DIV_QUOTIENT_OFFSET */
/* We actually save the divider state in the 4 words below
* our recorded stack pointer, so as not to disrupt the stack
* frame expected by debuggers - this is addressed by
* portEXTRA_STACK_SIZE */
" subs r1, r1, #48 \n"
" stmia r1!, {r4-r7} \n"
#endif /* portUSE_DIVIDER_SAVE_RESTORE */
#if portRUNNING_ON_BOTH_CORES
" ldr r0, [r2] \n" /* r0 = Core number */
#else
" movs r0, #0 \n"
#endif /* portRUNNING_ON_BOTH_CORES */
" push {r3, r14} \n"
" cpsid i \n"
" bl vTaskSwitchContext \n"
" cpsie i \n"
" pop {r2, r3} \n" /* lr goes in r3. r2 now holds tcb pointer. */
" \n"
" ldr r1, [r2] \n"
" ldr r0, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. */
" adds r0, r0, #16 \n" /* Move to the high registers. */
" ldmia r0!, {r4-r7} \n" /* Pop the high registers. */
" mov r8, r4 \n"
" mov r9, r5 \n"
" mov r10, r6 \n"
" mov r11, r7 \n"
" \n"
" msr psp, r0 \n" /* Remember the new top of stack for the task. */
" \n"
#if portUSE_DIVIDER_SAVE_RESTORE
" movs r2, #0xd \n" /* Pop the divider state. */
" lsls r2, #28 \n"
" subs r0, r0, #48 \n" /* Go back for the divider state */
" ldmia r0!, {r4-r7} \n" /* Pop the divider state. */
/* Note always restore via SIO_DIV_UDIVI*, because we will overwrite the
* results stopping the calculation anyway, however the sign of results
* is adjusted by the h/w at read time based on whether the last started
* division was signed and the inputs' signs differed */
" str r4, [r2, #0x60] \n" /* SIO_DIV_UDIVIDEND_OFFSET */
" str r5, [r2, #0x64] \n" /* SIO_DIV_UDIVISOR_OFFSET */
" str r6, [r2, #0x74] \n" /* SIO_DIV_REMAINDER_OFFSET */
" str r7, [r2, #0x70] \n" /* SIO_DIV_QUOTIENT_OFFSET */
#else /* if portUSE_DIVIDER_SAVE_RESTORE */
" subs r0, r0, #32 \n" /* Go back for the low registers that are not automatically restored. */
#endif /* portUSE_DIVIDER_SAVE_RESTORE */
" ldmia r0!, {r4-r7} \n" /* Pop low registers. */
" \n"
" bx r3 \n"
" \n"
" .align 4 \n"
"ulAsmLocals2: \n"
" .word 0xD0000000 \n" /* SIO */
" .word pxCurrentTCBs \n"
);
#endif /* if ( configNUMBER_OF_CORES == 1 ) */
}
/*-----------------------------------------------------------*/
@ -749,11 +761,11 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void )
{
/* Calculate the constants required to configure the tick interrupt. */
#if ( configUSE_TICKLESS_IDLE == 1 )
{
ulTimerCountsForOneTick = ( clock_get_hz(clk_sys) / configTICK_RATE_HZ );
xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick;
ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR;
}
{
ulTimerCountsForOneTick = ( clock_get_hz( clk_sys ) / configTICK_RATE_HZ );
xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick;
ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR;
}
#endif /* configUSE_TICKLESS_IDLE */
/* Stop and reset the SysTick. */
@ -938,32 +950,38 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void )
#if ( configSUPPORT_PICO_SYNC_INTEROP == 1 ) || ( configSUPPORT_PICO_TIME_INTEROP == 1 )
static TickType_t prvGetTicksToWaitBefore( absolute_time_t t )
{
int64_t xDelay = absolute_time_diff_us(get_absolute_time(), t);
int64_t xDelay = absolute_time_diff_us( get_absolute_time(), t );
const uint32_t ulTickPeriod = 1000000 / configTICK_RATE_HZ;
xDelay -= ulTickPeriod;
if( xDelay >= ulTickPeriod )
{
return xDelay / ulTickPeriod;
}
return 0;
}
#endif
#endif /* if ( configSUPPORT_PICO_SYNC_INTEROP == 1 ) || ( configSUPPORT_PICO_TIME_INTEROP == 1 ) */
#if ( configSUPPORT_PICO_SYNC_INTEROP == 1 )
uint32_t ulPortLockGetCurrentOwnerId()
{
if( portIS_FREE_RTOS_CORE())
if( portIS_FREE_RTOS_CORE() )
{
uint32_t exception = __get_current_exception();
if( !exception )
{
return ( uintptr_t ) xTaskGetCurrentTaskHandle();
}
/* Note: since ROM as at 0x00000000, these can't be confused with
* valid task handles (pointers) in RAM */
/* We make all exception handler/core combinations distinct owners */
return get_core_num() + exception * 2;
}
/* Note: since ROM as at 0x00000000, this can't be confused with
* valid task handles (pointers) in RAM */
return get_core_num();
@ -972,10 +990,11 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void )
static inline EventBits_t prvGetEventGroupBit( spin_lock_t * spinLock )
{
uint32_t ulBit;
#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
ulBit = 1u << (spin_lock_get_num(spinLock) & 0x7u);
ulBit = 1u << ( spin_lock_get_num( spinLock ) & 0x7u );
#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
ulBit = 1u << spin_lock_get_num(spinLock);
ulBit = 1u << spin_lock_get_num( spinLock );
/* reduce to range 0-24 */
ulBit |= ulBit << 8u;
ulBit >>= 8u;
@ -986,45 +1005,52 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void )
static inline EventBits_t prvGetAllEventGroupBits()
{
#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
return (EventBits_t) 0xffu;
return ( EventBits_t ) 0xffu;
#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
return ( EventBits_t ) 0xffffffu;
#endif /* configTICK_TYPE_WIDTH_IN_BITS */
}
void vPortLockInternalSpinUnlockWithWait( struct lock_core * pxLock, uint32_t ulSave )
void vPortLockInternalSpinUnlockWithWait( struct lock_core * pxLock,
uint32_t ulSave )
{
configASSERT( !portCHECK_IF_IN_ISR() );
// note no need to check LIB_PICO_MULTICORE, as this is always returns true if that is not defined
/* note no need to check LIB_PICO_MULTICORE, as this is always returns true if that is not defined */
if( !portIS_FREE_RTOS_CORE() )
{
spin_unlock(pxLock->spin_lock, ulSave );
spin_unlock( pxLock->spin_lock, ulSave );
__wfe();
}
else
{
configASSERT( pxYieldSpinLock[ portGET_CORE_ID() ] == NULL );
// we want to hold the lock until the event bits have been set; since interrupts are currently disabled
// by the spinlock, we can defer until portENABLE_INTERRUPTS is called which is always called when
// the scheduler is unlocked during this call
configASSERT(pxLock->spin_lock);
/* we want to hold the lock until the event bits have been set; since interrupts are currently disabled */
/* by the spinlock, we can defer until portENABLE_INTERRUPTS is called which is always called when */
/* the scheduler is unlocked during this call */
configASSERT( pxLock->spin_lock );
int xCoreID = ( int ) portGET_CORE_ID();
pxYieldSpinLock[xCoreID] = pxLock->spin_lock;
ulYieldSpinLockSaveValue[xCoreID] = ulSave;
xEventGroupWaitBits( xEventGroup, prvGetEventGroupBit(pxLock->spin_lock),
pdTRUE, pdFALSE, portMAX_DELAY);
pxYieldSpinLock[ xCoreID ] = pxLock->spin_lock;
ulYieldSpinLockSaveValue[ xCoreID ] = ulSave;
xEventGroupWaitBits( xEventGroup, prvGetEventGroupBit( pxLock->spin_lock ),
pdTRUE, pdFALSE, portMAX_DELAY );
}
}
void vPortLockInternalSpinUnlockWithNotify( struct lock_core *pxLock, uint32_t ulSave ) {
EventBits_t uxBits = prvGetEventGroupBit(pxLock->spin_lock );
if (portIS_FREE_RTOS_CORE()) {
void vPortLockInternalSpinUnlockWithNotify( struct lock_core * pxLock,
uint32_t ulSave )
{
EventBits_t uxBits = prvGetEventGroupBit( pxLock->spin_lock );
if( portIS_FREE_RTOS_CORE() )
{
#if LIB_PICO_MULTICORE
/* signal an event in case a regular core is waiting */
__sev();
#endif
spin_unlock(pxLock->spin_lock, ulSave );
spin_unlock( pxLock->spin_lock, ulSave );
if( !portCHECK_IF_IN_ISR() )
{
xEventGroupSetBits( xEventGroup, uxBits );
@ -1040,34 +1066,39 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void )
{
__sev();
#if ( portRUNNING_ON_BOTH_CORES == 0 )
/* We could sent the bits across the FIFO which would have required us to block here if the FIFO was full,
* or we could have just set all bits on the other side, however it seems reasonable instead to take
* the hit of another spin lock to protect an accurate bit set. */
if( pxCrossCoreSpinLock != pxLock->spin_lock )
{
spin_lock_unsafe_blocking(pxCrossCoreSpinLock);
spin_lock_unsafe_blocking( pxCrossCoreSpinLock );
uxCrossCoreEventBits |= uxBits;
spin_unlock_unsafe(pxCrossCoreSpinLock);
spin_unlock_unsafe( pxCrossCoreSpinLock );
}
else
{
uxCrossCoreEventBits |= uxBits;
}
/* This causes fifo irq on the other (FreeRTOS) core which will do the set the event bits */
sio_hw->fifo_wr = 0;
#endif /* portRUNNING_ON_BOTH_CORES == 0 */
spin_unlock(pxLock->spin_lock, ulSave);
spin_unlock( pxLock->spin_lock, ulSave );
}
}
bool xPortLockInternalSpinUnlockWithBestEffortWaitOrTimeout( struct lock_core * pxLock, uint32_t ulSave, absolute_time_t uxUntil )
bool xPortLockInternalSpinUnlockWithBestEffortWaitOrTimeout( struct lock_core * pxLock,
uint32_t ulSave,
absolute_time_t uxUntil )
{
configASSERT( !portCHECK_IF_IN_ISR() );
// note no need to check LIB_PICO_MULTICORE, as this is always returns true if that is not defined
/* note no need to check LIB_PICO_MULTICORE, as this is always returns true if that is not defined */
if( !portIS_FREE_RTOS_CORE() )
{
spin_unlock(pxLock->spin_lock, ulSave);
return best_effort_wfe_or_timeout(uxUntil);
spin_unlock( pxLock->spin_lock, ulSave );
return best_effort_wfe_or_timeout( uxUntil );
}
else
{
@ -1075,24 +1106,26 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void )
configASSERT( pxYieldSpinLock[ portGET_CORE_ID() ] == NULL );
TickType_t uxTicksToWait = prvGetTicksToWaitBefore( uxUntil );
if( uxTicksToWait )
{
/* We want to hold the lock until the event bits have been set; since interrupts are currently disabled
* by the spinlock, we can defer until portENABLE_INTERRUPTS is called which is always called when
* the scheduler is unlocked during this call */
configASSERT(pxLock->spin_lock);
configASSERT( pxLock->spin_lock );
int xCoreID = ( int ) portGET_CORE_ID();
pxYieldSpinLock[xCoreID] = pxLock->spin_lock;
ulYieldSpinLockSaveValue[xCoreID] = ulSave;
pxYieldSpinLock[ xCoreID ] = pxLock->spin_lock;
ulYieldSpinLockSaveValue[ xCoreID ] = ulSave;
xEventGroupWaitBits( xEventGroup,
prvGetEventGroupBit(pxLock->spin_lock), pdTRUE,
prvGetEventGroupBit( pxLock->spin_lock ), pdTRUE,
pdFALSE, uxTicksToWait );
}
else
{
spin_unlock( pxLock->spin_lock, ulSave );
}
if ( time_reached( uxUntil ) )
if( time_reached( uxUntil ) )
{
return true;
}
@ -1106,9 +1139,9 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void )
}
}
#if ( configSUPPORT_PICO_SYNC_INTEROP == 1)
#if ( configSUPPORT_PICO_SYNC_INTEROP == 1 )
/* runs before main */
static void __attribute__((constructor)) prvRuntimeInitializer( void )
static void __attribute__( ( constructor ) ) prvRuntimeInitializer( void )
{
/* This must be done even before the scheduler is started, as the spin lock
* is used by the overrides of the SDK wait/notify primitives */
@ -1119,8 +1152,9 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void )
/* The event group is not used prior to scheduler init, but is initialized
* here to since it logically belongs with the spin lock */
#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
xEventGroup = xEventGroupCreateStatic(&xStaticEventGroup);
xEventGroup = xEventGroupCreateStatic( &xStaticEventGroup );
#else
/* Note that it is slightly dubious calling this here before the scheduler is initialized,
* however the only thing it touches is the allocator which then calls vPortEnterCritical
* and vPortExitCritical, and allocating here saves us checking the one time initialized variable in
@ -1128,16 +1162,17 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void )
xEventGroup = xEventGroupCreate();
#endif /* configSUPPORT_STATIC_ALLOCATION */
}
#endif
#endif /* if ( configSUPPORT_PICO_SYNC_INTEROP == 1 ) */
#endif /* configSUPPORT_PICO_SYNC_INTEROP */
#if ( configSUPPORT_PICO_TIME_INTEROP == 1 )
void xPortSyncInternalYieldUntilBefore( absolute_time_t t )
{
TickType_t uxTicksToWait = prvGetTicksToWaitBefore(t);
TickType_t uxTicksToWait = prvGetTicksToWaitBefore( t );
if( uxTicksToWait )
{
vTaskDelay(uxTicksToWait);
vTaskDelay( uxTicksToWait );
}
}
#endif /* configSUPPORT_PICO_TIME_INTEROP */

View file

@ -5,6 +5,7 @@
*
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
*/
/*
* FreeRTOS Kernel V10.4.3
* Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
@ -39,29 +40,30 @@
#include "sdkconfig.h"
/* enable use of optimized task selection by the scheduler */
#if defined (CONFIG_FREERTOS_OPTIMIZED_SCHEDULER) && !defined(configUSE_PORT_OPTIMISED_TASK_SELECTION)
#define configUSE_PORT_OPTIMISED_TASK_SELECTION 1
#if defined( CONFIG_FREERTOS_OPTIMIZED_SCHEDULER ) && !defined( configUSE_PORT_OPTIMISED_TASK_SELECTION )
#define configUSE_PORT_OPTIMISED_TASK_SELECTION 1
#endif
#define XT_USE_THREAD_SAFE_CLIB 0
#define XT_USE_THREAD_SAFE_CLIB 0
#undef XT_USE_SWPRI
#if CONFIG_FREERTOS_CORETIMER_0
#define XT_TIMER_INDEX 0
#define XT_TIMER_INDEX 0
#elif CONFIG_FREERTOS_CORETIMER_1
#define XT_TIMER_INDEX 1
#define XT_TIMER_INDEX 1
#endif
#ifndef __ASSEMBLER__
/**
* This function is defined to provide a deprecation warning whenever
* XT_CLOCK_FREQ macro is used.
* Update the code to use esp_clk_cpu_freq function instead.
* @return current CPU clock frequency, in Hz
*/
int xt_clock_freq(void) __attribute__((deprecated));
int xt_clock_freq( void ) __attribute__( ( deprecated ) );
#define XT_CLOCK_FREQ (xt_clock_freq())
#define XT_CLOCK_FREQ ( xt_clock_freq() )
#endif // __ASSEMBLER__
@ -70,47 +72,50 @@ int xt_clock_freq(void) __attribute__((deprecated));
/* configASSERT behaviour */
#ifndef __ASSEMBLER__
#include <assert.h>
#include "esp_rom_sys.h"
#if CONFIG_IDF_TARGET_ESP32
#include "esp32/rom/ets_sys.h" // will be removed in idf v5.0
#elif CONFIG_IDF_TARGET_ESP32S2
#include "esp32s2/rom/ets_sys.h"
#elif CONFIG_IDF_TARGET_ESP32S3
#include "esp32s3/rom/ets_sys.h"
#endif
#include <assert.h>
#include "esp_rom_sys.h"
#if CONFIG_IDF_TARGET_ESP32
#include "esp32/rom/ets_sys.h" /* will be removed in idf v5.0 */
#elif CONFIG_IDF_TARGET_ESP32S2
#include "esp32s2/rom/ets_sys.h"
#elif CONFIG_IDF_TARGET_ESP32S3
#include "esp32s3/rom/ets_sys.h"
#endif
#endif // __ASSEMBLER__
// If CONFIG_FREERTOS_ASSERT_DISABLE is set then configASSERT is defined empty later in FreeRTOS.h and the macro
// configASSERT_DEFINED remains unset (meaning some warnings are avoided)
/* If CONFIG_FREERTOS_ASSERT_DISABLE is set then configASSERT is defined empty later in FreeRTOS.h and the macro */
/* configASSERT_DEFINED remains unset (meaning some warnings are avoided) */
#ifdef configASSERT
#undef configASSERT
#if defined(CONFIG_FREERTOS_ASSERT_FAIL_PRINT_CONTINUE)
#define configASSERT(a) if (unlikely(!(a))) { \
esp_rom_printf("%s:%d (%s)- assert failed!\n", __FILE__, __LINE__, \
__FUNCTION__); \
#undef configASSERT
#if defined( CONFIG_FREERTOS_ASSERT_FAIL_PRINT_CONTINUE )
#define configASSERT( a ) \
if( unlikely( !( a ) ) ) { \
esp_rom_printf( "%s:%d (%s)- assert failed!\n", __FILE__, __LINE__, \
__FUNCTION__ ); \
}
#elif defined(CONFIG_FREERTOS_ASSERT_FAIL_ABORT)
#define configASSERT(a) assert(a)
#endif
#endif
#elif defined( CONFIG_FREERTOS_ASSERT_FAIL_ABORT )
#define configASSERT( a ) assert( a )
#endif
#endif /* ifdef configASSERT */
#if CONFIG_FREERTOS_ASSERT_ON_UNTESTED_FUNCTION
#define UNTESTED_FUNCTION() { esp_rom_printf("Untested FreeRTOS function %s\r\n", __FUNCTION__); configASSERT(false); } while(0)
#define UNTESTED_FUNCTION() \
{ esp_rom_printf( "Untested FreeRTOS function %s\r\n", __FUNCTION__ ); configASSERT( false ); } \
while( 0 )
#else
#define UNTESTED_FUNCTION()
#define UNTESTED_FUNCTION()
#endif
#define configXT_BOARD 1 /* Board mode */
#define configXT_SIMULATOR 0
#define configXT_BOARD 1 /* Board mode */
#define configXT_SIMULATOR 0
/* The maximum interrupt priority from which FreeRTOS.org API functions can
be called. Only API functions that end in ...FromISR() can be used within
interrupts. */
* be called. Only API functions that end in ...FromISR() can be used within
* interrupts. */
#define configMAX_SYSCALL_INTERRUPT_PRIORITY XCHAL_EXCM_LEVEL
/* Stack alignment, architecture specifc. Must be a power of two. */
#define configSTACK_ALIGNMENT 16
#define configSTACK_ALIGNMENT 16
/* The Xtensa port uses a separate interrupt stack. Adjust the stack size
@ -119,16 +124,16 @@ int xt_clock_freq(void) __attribute__((deprecated));
* the stack for the 2nd CPU will be calculated using configISR_STACK_SIZE.
*/
#ifndef configISR_STACK_SIZE
#define configISR_STACK_SIZE ((CONFIG_FREERTOS_ISR_STACKSIZE + configSTACK_ALIGNMENT - 1) & (~(configSTACK_ALIGNMENT - 1)))
#define configISR_STACK_SIZE ( ( CONFIG_FREERTOS_ISR_STACKSIZE + configSTACK_ALIGNMENT - 1 ) & ( ~( configSTACK_ALIGNMENT - 1 ) ) )
#endif
#ifndef __ASSEMBLER__
#if CONFIG_APPTRACE_SV_ENABLE
extern uint32_t port_switch_flag[];
#define os_task_switch_is_pended(_cpu_) (port_switch_flag[_cpu_])
#else
#define os_task_switch_is_pended(_cpu_) (false)
#endif
#if CONFIG_APPTRACE_SV_ENABLE
extern uint32_t port_switch_flag[];
#define os_task_switch_is_pended( _cpu_ ) ( port_switch_flag[ _cpu_ ] )
#else
#define os_task_switch_is_pended( _cpu_ ) ( false )
#endif
#endif
#endif // FREERTOS_CONFIG_XTENSA_H

View file

@ -15,7 +15,7 @@
/**
* @brief Set up the SysTick interrupt
*/
void vPortSetupTimer(void);
void vPortSetupTimer( void );
/* *INDENT-OFF* */
#ifdef __cplusplus

View file

@ -5,6 +5,7 @@
*
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
*/
/*
* Copyright (c) 2015-2019 Cadence Design Systems, Inc.
*

View file

@ -6,6 +6,7 @@
*
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
*/
/*
* FreeRTOS Kernel V10.4.3
* Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
@ -75,23 +76,23 @@
#include <xtensa/config/system.h> /* required for XSHAL_CLIB */
#include <xtensa/xtruntime.h>
#include "soc/spinlock.h"
#include "esp_timer.h" /* required for FreeRTOS run time stats */
#include "esp_timer.h" /* required for FreeRTOS run time stats */
#include "esp_system.h"
#include "esp_idf_version.h"
#include "esp_heap_caps.h"
/* TODO: Resolve build warnings generated due to this header inclusion */
/* TODO: Resolve build warnings generated due to this header inclusion */
#include "hal/cpu_hal.h"
/* TODO: These includes are not directly used in this file. They are kept into to prevent a breaking change. Remove these. */
/* TODO: These includes are not directly used in this file. They are kept into to prevent a breaking change. Remove these. */
#include <limits.h>
#include <xtensa/xtensa_api.h>
#include "soc/cpu.h"
#include "soc/soc_memory_layout.h"
#if (ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0))
#include "soc/compare_set.h"
#endif /* ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0) */
#if ( ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL( 4, 2, 0 ) )
#include "soc/compare_set.h"
#endif /* ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0) */
/*#include "xtensa_context.h" */
@ -122,7 +123,7 @@
#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
#else
@ -137,14 +138,14 @@
#include "esp_attr.h"
/* "mux" data structure (spinlock) */
typedef spinlock_t portMUX_TYPE; /**< Spinlock type used by FreeRTOS critical sections */
#define portMUX_INITIALIZER_UNLOCKED SPINLOCK_INITIALIZER /**< Spinlock initializer */
#define portMUX_FREE_VAL SPINLOCK_FREE /**< Spinlock is free. [refactor-todo] check if this is still required */
#define portMUX_NO_TIMEOUT SPINLOCK_WAIT_FOREVER /**< When passed for 'timeout_cycles', spin forever if necessary. [refactor-todo] check if this is still required */
#define portMUX_TRY_LOCK SPINLOCK_NO_WAIT /**< Try to acquire the spinlock a single time only. [refactor-todo] check if this is still required */
#define portMUX_INITIALIZE(mux) spinlock_initialize(mux) /*< Initialize a spinlock to its unlocked state */
typedef spinlock_t portMUX_TYPE; /**< Spinlock type used by FreeRTOS critical sections */
#define portMUX_INITIALIZER_UNLOCKED SPINLOCK_INITIALIZER /**< Spinlock initializer */
#define portMUX_FREE_VAL SPINLOCK_FREE /**< Spinlock is free. [refactor-todo] check if this is still required */
#define portMUX_NO_TIMEOUT SPINLOCK_WAIT_FOREVER /**< When passed for 'timeout_cycles', spin forever if necessary. [refactor-todo] check if this is still required */
#define portMUX_TRY_LOCK SPINLOCK_NO_WAIT /**< Try to acquire the spinlock a single time only. [refactor-todo] check if this is still required */
#define portMUX_INITIALIZE( mux ) spinlock_initialize( mux ) /*< Initialize a spinlock to its unlocked state */
#define portCRITICAL_NESTING_IN_TCB 1
#define portCRITICAL_NESTING_IN_TCB 1
/*
* Modifications to portENTER_CRITICAL.
@ -223,8 +224,8 @@
} \
} while( 0 )
#define portASSERT_IF_IN_ISR() vPortAssertIfInISR()
void vPortAssertIfInISR(void);
#define portASSERT_IF_IN_ISR() vPortAssertIfInISR()
void vPortAssertIfInISR( void );
/* Critical section management. NW-TODO: replace XTOS_SET_INTLEVEL with more efficient version, if any? */
/* These cannot be nested. They should be used with a lot of care and cannot be called from interrupt level. */
@ -239,6 +240,7 @@
static inline UBaseType_t __attribute__( ( always_inline ) ) xPortSetInterruptMaskFromISR( void )
{
UBaseType_t prev_int_level = XTOS_SET_INTLEVEL( XCHAL_EXCM_LEVEL );
portbenchmarkINTERRUPT_DISABLE();
return prev_int_level;
}
@ -255,11 +257,11 @@
/*Because the ROM routines don't necessarily handle a stack in external RAM correctly, we force */
/*the stack memory to always be internal. */
#define portTcbMemoryCaps (MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT)
#define portStackMemoryCaps (MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT)
#define portTcbMemoryCaps ( MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT )
#define portStackMemoryCaps ( MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT )
#define pvPortMallocTcbMem(size) heap_caps_malloc(size, portTcbMemoryCaps)
#define pvPortMallocStackMem(size) heap_caps_malloc(size, portStackMemoryCaps)
#define pvPortMallocTcbMem( size ) heap_caps_malloc( size, portTcbMemoryCaps )
#define pvPortMallocStackMem( size ) heap_caps_malloc( size, portStackMemoryCaps )
/*xTaskCreateStatic uses these functions to check incoming memory. */
#define portVALID_TCB_MEM( ptr ) ( esp_ptr_internal( ptr ) && esp_ptr_byte_accessible( ptr ) )
@ -278,18 +280,18 @@
* *bitwise inverse* of the old mem if the mem wasn't written. This doesn't seem to happen on the
* ESP32 (portMUX assertions would fail).
*/
static inline void uxPortCompareSet( volatile uint32_t * addr,
uint32_t compare,
uint32_t * set )
{
#if (ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(4, 2, 0))
static inline void uxPortCompareSet( volatile uint32_t * addr,
uint32_t compare,
uint32_t * set )
{
#if ( ESP_IDF_VERSION < ESP_IDF_VERSION_VAL( 4, 2, 0 ) )
__asm__ __volatile__ (
"WSR %2,SCOMPARE1 \n"
"S32C1I %0, %1, 0 \n"
: "=r" ( *set )
: "r" ( addr ), "r" ( compare ), "0" ( *set )
);
#else
#else
#if ( XCHAL_HAVE_S32C1I > 0 )
__asm__ __volatile__ (
"WSR %2,SCOMPARE1 \n"
@ -316,21 +318,23 @@
*set = old_value;
#endif /* if ( XCHAL_HAVE_S32C1I > 0 ) */
#endif /* #if (ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0)) */
}
#endif /* #if (ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0)) */
}
#if (ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(4, 2, 0))
#if ( ESP_IDF_VERSION < ESP_IDF_VERSION_VAL( 4, 2, 0 ) )
void uxPortCompareSetExtram( volatile uint32_t * addr,
uint32_t compare,
uint32_t * set );
#else
static inline void uxPortCompareSetExtram(volatile uint32_t *addr, uint32_t compare, uint32_t *set)
#else
static inline void uxPortCompareSetExtram( volatile uint32_t * addr,
uint32_t compare,
uint32_t * set )
{
#if defined(CONFIG_SPIRAM)
compare_and_set_extram(addr, compare, set);
#endif
#if defined( CONFIG_SPIRAM )
compare_and_set_extram( addr, compare, set );
#endif
}
#endif
#endif /* if ( ESP_IDF_VERSION < ESP_IDF_VERSION_VAL( 4, 2, 0 ) ) */
/*-----------------------------------------------------------*/
@ -348,15 +352,17 @@
#ifdef CONFIG_FREERTOS_RUN_TIME_STATS_USING_ESP_TIMER
/* Coarse resolution time (us) */
#define portALT_GET_RUN_TIME_COUNTER_VALUE( x ) do { x = ( uint32_t )esp_timer_get_time(); } while( 0 )
#define portALT_GET_RUN_TIME_COUNTER_VALUE( x ) do { x = ( uint32_t ) esp_timer_get_time(); } while( 0 )
#endif
/* Kernel utilities. */
void vPortYield( void );
void vPortEvaluateYieldFromISR( int argc, ... );
void vPortEvaluateYieldFromISR( int argc,
... );
void _frxt_setup_switch( void );
/* Macro to count number of arguments of a __VA_ARGS__ used to support portYIELD_FROM_ISR with,
* or without arguments. The macro counts only 0 or 1 arguments.
*
@ -366,26 +372,26 @@
* This allows users to compile their code with standard C++20 enabled instead of the GNU extension.
* Below C++20, we haven't found any good alternative to using ##__VA_ARGS__.
*/
#if defined( __cplusplus ) && ( __cplusplus > 201703L )
#define portGET_ARGUMENT_COUNT(...) portGET_ARGUMENT_COUNT_INNER( 0 __VA_OPT__(,) __VA_ARGS__, 1 , 0 )
#if defined( __cplusplus ) && ( __cplusplus > 201703L )
#define portGET_ARGUMENT_COUNT( ... ) portGET_ARGUMENT_COUNT_INNER( 0 __VA_OPT__(, ) __VA_ARGS__, 1, 0 )
#else
#define portGET_ARGUMENT_COUNT(...) portGET_ARGUMENT_COUNT_INNER( 0, ##__VA_ARGS__, 1, 0 )
#define portGET_ARGUMENT_COUNT( ... ) portGET_ARGUMENT_COUNT_INNER( 0, ## __VA_ARGS__, 1, 0 )
#endif
#define portGET_ARGUMENT_COUNT_INNER( zero, one, count, ... ) count
#define portGET_ARGUMENT_COUNT_INNER( zero, one, count, ... ) count
_Static_assert( portGET_ARGUMENT_COUNT() == 0, "portGET_ARGUMENT_COUNT() result does not match for 0 arguments" );
_Static_assert( portGET_ARGUMENT_COUNT( 1 ) == 1, "portGET_ARGUMENT_COUNT() result does not match for 1 argument" );
#define portYIELD() vPortYield()
#define portYIELD() vPortYield()
/* The macro below could be used when passing a single argument, or without any argument,
* it was developed to support both usages of portYIELD inside of an ISR. Any other usage form
* might result in undesired behaviour
*/
#if defined( __cplusplus ) && ( __cplusplus > 201703L )
#define portYIELD_FROM_ISR(...) vPortEvaluateYieldFromISR( portGET_ARGUMENT_COUNT( __VA_ARGS__ ) __VA_OPT__( , ) __VA_ARGS__ )
#if defined( __cplusplus ) && ( __cplusplus > 201703L )
#define portYIELD_FROM_ISR( ... ) vPortEvaluateYieldFromISR( portGET_ARGUMENT_COUNT( __VA_ARGS__ ) __VA_OPT__(, ) __VA_ARGS__ )
#else
#define portYIELD_FROM_ISR(...) vPortEvaluateYieldFromISR( portGET_ARGUMENT_COUNT( __VA_ARGS__ ), ##__VA_ARGS__ )
#define portYIELD_FROM_ISR( ... ) vPortEvaluateYieldFromISR( portGET_ARGUMENT_COUNT( __VA_ARGS__ ), ## __VA_ARGS__ )
#endif
static inline BaseType_t xPortGetCoreID();
@ -436,31 +442,31 @@
void _xt_coproc_release( volatile void * coproc_sa_base );
/*-----------------------------------------------------------*/
/*-----------------------------------------------------------*/
#if (ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0))
/* Architecture specific optimisations. */
#if ( ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL( 4, 2, 0 ) )
/* Architecture specific optimisations. */
#if configUSE_PORT_OPTIMISED_TASK_SELECTION == 1
#if configUSE_PORT_OPTIMISED_TASK_SELECTION == 1
/* Check the configuration. */
#if( configMAX_PRIORITIES > 32 )
#error configUSE_PORT_OPTIMISED_TASK_SELECTION can only be set to 1 when configMAX_PRIORITIES is less than or equal to 32. It is very rare that a system requires more than 10 to 15 different priorities as tasks that share a priority will time slice.
#endif
/* Check the configuration. */
#if ( configMAX_PRIORITIES > 32 )
#error configUSE_PORT_OPTIMISED_TASK_SELECTION can only be set to 1 when configMAX_PRIORITIES is less than or equal to 32. It is very rare that a system requires more than 10 to 15 different priorities as tasks that share a priority will time slice.
#endif
/* Store/clear the ready priorities in a bit map. */
#define portRECORD_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) |= ( 1UL << ( uxPriority ) )
#define portRESET_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) &= ~( 1UL << ( uxPriority ) )
/* Store/clear the ready priorities in a bit map. */
#define portRECORD_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) |= ( 1UL << ( uxPriority ) )
#define portRESET_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) &= ~( 1UL << ( uxPriority ) )
/*-----------------------------------------------------------*/
/*-----------------------------------------------------------*/
#define portGET_HIGHEST_PRIORITY( uxTopPriority, uxReadyPriorities ) uxTopPriority = ( 31 - __builtin_clz( ( uxReadyPriorities ) ) )
#define portGET_HIGHEST_PRIORITY( uxTopPriority, uxReadyPriorities ) uxTopPriority = ( 31 - __builtin_clz( ( uxReadyPriorities ) ) )
#endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
#endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
#endif /* ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0) */
/*-----------------------------------------------------------*/
/*-----------------------------------------------------------*/
/*
* Map to the memory management routines required for the port.
@ -474,7 +480,8 @@
#define xPortGetFreeHeapSize esp_get_free_heap_size
#define xPortGetMinimumEverFreeHeapSize esp_get_minimum_free_heap_size
#if (ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(4, 2, 0))
#if ( ESP_IDF_VERSION < ESP_IDF_VERSION_VAL( 4, 2, 0 ) )
/*
* Send an interrupt to another core in order to make the task running
* on it yield for a higher-priority task.
@ -482,7 +489,7 @@
void vPortYieldOtherCore( BaseType_t coreid ) PRIVILEGED_FUNCTION;
#endif /* ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(4, 2, 0) */
#endif /* ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(4, 2, 0) */
/*
* Callback to set a watchpoint on the end of the stack. Called every context switch to change the stack
@ -522,27 +529,27 @@
/* Multi-core: get current core ID */
static inline BaseType_t IRAM_ATTR xPortGetCoreID()
{
return ( uint32_t )cpu_hal_get_core_id();
return ( uint32_t ) cpu_hal_get_core_id();
}
/* Get tick rate per second */
uint32_t xPortGetTickRateHz( void );
static inline bool IRAM_ATTR xPortCanYield(void)
static inline bool IRAM_ATTR xPortCanYield( void )
{
uint32_t ps_reg = 0;
//Get the current value of PS (processor status) register
RSR(PS, ps_reg);
/*Get the current value of PS (processor status) register */
RSR( PS, ps_reg );
/*
* intlevel = (ps_reg & 0xf);
* excm = (ps_reg >> 4) & 0x1;
* CINTLEVEL is max(excm * EXCMLEVEL, INTLEVEL), where EXCMLEVEL is 3.
* However, just return true, only intlevel is zero.
*/
* intlevel = (ps_reg & 0xf);
* excm = (ps_reg >> 4) & 0x1;
* CINTLEVEL is max(excm * EXCMLEVEL, INTLEVEL), where EXCMLEVEL is 3.
* However, just return true, only intlevel is zero.
*/
return ((ps_reg & PS_INTLEVEL_MASK) == 0);
return( ( ps_reg & PS_INTLEVEL_MASK ) == 0 );
}
/* porttrace */

View file

@ -8,7 +8,7 @@
/* File adapted to use on IDF FreeRTOS component, extracted
* originally from zephyr RTOS code base:
* https://github.com/zephyrproject-rtos/zephyr/blob/dafd348/arch/xtensa/include/xtensa-asm2-s.h
* https://github.com/zephyrproject-rtos/zephyr/blob/dafd3485bf67880e667b6e9a758b0b64fb688d63/arch/xtensa/include/xtensa-asm2-s.h
*/
#ifndef __XT_ASM_UTILS_H
@ -67,9 +67,9 @@
rotw 3
and a4, a4, a4
rotw 2
#else
#error Unrecognized XCHAL_NUM_AREGS
#endif
.endm
#else /* if XCHAL_NUM_AREGS == 64 */
#error Unrecognized XCHAL_NUM_AREGS
#endif /* if XCHAL_NUM_AREGS == 64 */
.endm
#endif
#endif /* ifndef __XT_ASM_UTILS_H */

View file

@ -5,6 +5,7 @@
*
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
*/
/*
* Copyright (c) 2015-2019 Cadence Design Systems, Inc.
*

View file

@ -5,6 +5,7 @@
*
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
*/
/*
* Copyright (c) 2015-2019 Cadence Design Systems, Inc.
*
@ -40,7 +41,7 @@
*******************************************************************************/
#ifndef XTENSA_CONFIG_H
#define XTENSA_CONFIG_H
#define XTENSA_CONFIG_H
/* *INDENT-OFF* */
#ifdef __cplusplus
@ -48,11 +49,11 @@
#endif
/* *INDENT-ON* */
#include <xtensa/hal.h>
#include <xtensa/config/core.h>
#include <xtensa/config/system.h> /* required for XSHAL_CLIB */
#include <xtensa/hal.h>
#include <xtensa/config/core.h>
#include <xtensa/config/system.h> /* required for XSHAL_CLIB */
#include "xtensa_context.h"
#include "xtensa_context.h"
/*-----------------------------------------------------------------------------
@ -105,27 +106,27 @@
* -----------------------------------------------------------------------------*/
/* Extra space required for interrupt/exception hooks. */
#ifdef XT_INTEXC_HOOKS
#ifdef __XTENSA_CALL0_ABI__
#define STK_INTEXC_EXTRA 0x200
#else
#define STK_INTEXC_EXTRA 0x180
#endif
#ifdef XT_INTEXC_HOOKS
#ifdef __XTENSA_CALL0_ABI__
#define STK_INTEXC_EXTRA 0x200
#else
#define STK_INTEXC_EXTRA 0
#define STK_INTEXC_EXTRA 0x180
#endif
#else
#define STK_INTEXC_EXTRA 0
#endif
#define XT_CLIB_CONTEXT_AREA_SIZE 0
#define XT_CLIB_CONTEXT_AREA_SIZE 0
/*------------------------------------------------------------------------------
* Extra size -- interrupt frame plus coprocessor save area plus hook space.
* NOTE: Make sure XT_INTEXC_HOOKS is undefined unless you really need the hooks.
* ------------------------------------------------------------------------------*/
#ifdef __XTENSA_CALL0_ABI__
#define XT_XTRA_SIZE ( XT_STK_FRMSZ + STK_INTEXC_EXTRA + 0x10 + XT_CP_SIZE )
#else
#define XT_XTRA_SIZE ( XT_STK_FRMSZ + STK_INTEXC_EXTRA + 0x20 + XT_CP_SIZE )
#endif
#ifdef __XTENSA_CALL0_ABI__
#define XT_XTRA_SIZE ( XT_STK_FRMSZ + STK_INTEXC_EXTRA + 0x10 + XT_CP_SIZE )
#else
#define XT_XTRA_SIZE ( XT_STK_FRMSZ + STK_INTEXC_EXTRA + 0x20 + XT_CP_SIZE )
#endif
/*------------------------------------------------------------------------------
* Space allocated for user code -- function calls and local variables.
@ -135,18 +136,18 @@
* NOTE: The windowed ABI requires more stack, since space has to be reserved
* for spilling register windows.
* ------------------------------------------------------------------------------*/
#ifdef __XTENSA_CALL0_ABI__
#define XT_USER_SIZE 0x200
#else
#define XT_USER_SIZE 0x400
#endif
#ifdef __XTENSA_CALL0_ABI__
#define XT_USER_SIZE 0x200
#else
#define XT_USER_SIZE 0x400
#endif
/* Minimum recommended stack size. */
#define XT_STACK_MIN_SIZE ( ( XT_XTRA_SIZE + XT_USER_SIZE ) / sizeof( unsigned char ) )
#define XT_STACK_MIN_SIZE ( ( XT_XTRA_SIZE + XT_USER_SIZE ) / sizeof( unsigned char ) )
/* OS overhead with and without C library thread context. */
#define XT_STACK_EXTRA ( XT_XTRA_SIZE )
#define XT_STACK_EXTRA_CLIB ( XT_XTRA_SIZE + XT_CLIB_CONTEXT_AREA_SIZE )
#define XT_STACK_EXTRA ( XT_XTRA_SIZE )
#define XT_STACK_EXTRA_CLIB ( XT_XTRA_SIZE + XT_CLIB_CONTEXT_AREA_SIZE )
/* *INDENT-OFF* */
#ifdef __cplusplus

View file

@ -5,6 +5,7 @@
*
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
*/
/*
* Copyright (c) 2015-2019 Cadence Design Systems, Inc.
*

View file

@ -5,6 +5,7 @@
*
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
*/
/*
* Copyright (c) 2015-2019 Cadence Design Systems, Inc.
*
@ -60,6 +61,7 @@
#include <xtensa/corebits.h>
#include <xtensa/config/system.h>
#include "sdkconfig.h"
/*
* Include any RTOS specific definitions that are needed by this header.
*/
@ -155,9 +157,9 @@
*/
/* void XT_RTOS_TIMER_INT(void) */
#ifdef CONFIG_FREERTOS_SYSTICK_USES_CCOUNT
#define XT_RTOS_TIMER_INT _frxt_timer_int
#define XT_RTOS_TIMER_INT _frxt_timer_int
#endif
#define XT_TICK_PER_SEC configTICK_RATE_HZ
#define XT_TICK_PER_SEC configTICK_RATE_HZ
/*
* Return in a15 the base address of the co-processor state save area for the

View file

@ -5,6 +5,7 @@
*
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
*/
/*
* Copyright (c) 2015-2019 Cadence Design Systems, Inc.
*

View file

@ -6,6 +6,7 @@
*
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
*/
/*
* FreeRTOS Kernel V10.4.3
* Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
@ -33,6 +34,7 @@
*
* 1 tab == 4 spaces!
*/
/*
* Copyright (c) 2015-2019 Cadence Design Systems, Inc.
*
@ -63,22 +65,22 @@
#include "xtensa_rtos.h"
#include "esp_idf_version.h"
#if (ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(4, 2, 0))
#include "rom/ets_sys.h"
#include "esp_panic.h"
#include "esp_crosscore_int.h"
#if ( ESP_IDF_VERSION < ESP_IDF_VERSION_VAL( 4, 2, 0 ) )
#include "rom/ets_sys.h"
#include "esp_panic.h"
#include "esp_crosscore_int.h"
#else
#if CONFIG_IDF_TARGET_ESP32S3
#include "esp32s3/rom/ets_sys.h"
#elif CONFIG_IDF_TARGET_ESP32S2
#include "esp32s2/rom/ets_sys.h"
#elif CONFIG_IDF_TARGET_ESP32
#include "esp32/rom/ets_sys.h"
#endif
#include "esp_private/panic_reason.h"
#include "esp_debug_helpers.h"
#include "esp_private/crosscore_int.h"
#include "esp_log.h"
#if CONFIG_IDF_TARGET_ESP32S3
#include "esp32s3/rom/ets_sys.h"
#elif CONFIG_IDF_TARGET_ESP32S2
#include "esp32s2/rom/ets_sys.h"
#elif CONFIG_IDF_TARGET_ESP32
#include "esp32/rom/ets_sys.h"
#endif
#include "esp_private/panic_reason.h"
#include "esp_debug_helpers.h"
#include "esp_private/crosscore_int.h"
#include "esp_log.h"
#endif /* ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(4, 2, 0) */
#include "soc/cpu.h"
@ -94,12 +96,12 @@
/* Defined in xtensa_context.S */
extern void _xt_coproc_init( void );
_Static_assert(tskNO_AFFINITY == CONFIG_FREERTOS_NO_AFFINITY, "incorrect tskNO_AFFINITY value");
_Static_assert( tskNO_AFFINITY == CONFIG_FREERTOS_NO_AFFINITY, "incorrect tskNO_AFFINITY value" );
/*-----------------------------------------------------------*/
extern volatile int port_xSchedulerRunning[portNUM_PROCESSORS];
unsigned port_interruptNesting[ portNUM_PROCESSORS ] = { 0 }; /* Interrupt nesting level. Increased/decreased in portasm.c, _frxt_int_enter/_frxt_int_exit */
extern volatile int port_xSchedulerRunning[ portNUM_PROCESSORS ];
unsigned port_interruptNesting[ portNUM_PROCESSORS ] = { 0 }; /* Interrupt nesting level. Increased/decreased in portasm.c, _frxt_int_enter/_frxt_int_exit */
/*-----------------------------------------------------------*/
@ -153,15 +155,15 @@ void _xt_user_exit( void );
thread_local_sz = ALIGNUP( 0x10, thread_local_sz );
/* Initialize task's stack so that we have the following structure at the top:
----LOW ADDRESSES ----------------------------------------HIGH ADDRESSES----------
task stack | interrupt stack frame | thread local vars | co-processor save area |
----------------------------------------------------------------------------------
| |
SP pxTopOfStack
All parts are aligned to 16 byte boundary.
*/
*
* ----LOW ADDRESSES ----------------------------------------HIGH ADDRESSES----------
* task stack | interrupt stack frame | thread local vars | co-processor save area |
* ----------------------------------------------------------------------------------
| |
| SP pxTopOfStack
|
| All parts are aligned to 16 byte boundary.
*/
/* Create interrupt stack frame aligned to 16 byte boundary */
sp = ( StackType_t * ) ( ( ( UBaseType_t ) pxTopOfStack - XT_CP_SIZE - thread_local_sz - XT_STK_FRMSZ ) & ~0xf );
@ -309,7 +311,7 @@ void vPortYieldOtherCore( BaseType_t coreid )
uint32_t usStackDepth )
{
#if XCHAL_CP_NUM > 0
xMPUSettings->coproc_area = ( StackType_t * ) ( ( uint32_t ) ( pxBottomOfStack + usStackDepth - 1 ));
xMPUSettings->coproc_area = ( StackType_t * ) ( ( uint32_t ) ( pxBottomOfStack + usStackDepth - 1 ) );
xMPUSettings->coproc_area = ( StackType_t * ) ( ( ( portPOINTER_SIZE_TYPE ) xMPUSettings->coproc_area ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) );
xMPUSettings->coproc_area = ( StackType_t * ) ( ( ( uint32_t ) xMPUSettings->coproc_area - XT_CP_SIZE ) & ~0xf );
@ -351,27 +353,29 @@ BaseType_t IRAM_ATTR xPortInterruptedFromISRContext()
return( port_interruptNesting[ xPortGetCoreID() ] != 0 );
}
void IRAM_ATTR vPortEvaluateYieldFromISR( int argc, ... )
void IRAM_ATTR vPortEvaluateYieldFromISR( int argc,
... )
{
BaseType_t xYield;
va_list ap;
va_start( ap, argc );
if( argc )
{
xYield = ( BaseType_t )va_arg( ap, int );
xYield = ( BaseType_t ) va_arg( ap, int );
va_end( ap );
}
else
{
//it is a empty parameter vPortYieldFromISR macro call:
/*it is a empty parameter vPortYieldFromISR macro call: */
va_end( ap );
traceISR_EXIT_TO_SCHEDULER();
_frxt_setup_switch();
return;
}
//Yield exists, so need evaluate it first then switch:
/*Yield exists, so need evaluate it first then switch: */
if( xYield == pdTRUE )
{
traceISR_EXIT_TO_SCHEDULER();
@ -477,8 +481,8 @@ void vPortCPUInitializeMutex( portMUX_TYPE * mux )
}
#endif /* ifdef CONFIG_FREERTOS_PORTMUX_DEBUG */
#define STACK_WATCH_AREA_SIZE ( 32 )
#define STACK_WATCH_POINT_NUMBER ( SOC_CPU_WATCHPOINTS_NUM - 1 )
#define STACK_WATCH_AREA_SIZE ( 32 )
#define STACK_WATCH_POINT_NUMBER ( SOC_CPU_WATCHPOINTS_NUM - 1 )
void vPortSetStackWatchpoint( void * pxStackStart )
{
@ -491,48 +495,48 @@ void vPortSetStackWatchpoint( void * pxStackStart )
int addr = ( int ) pxStackStart;
addr = ( addr + 31 ) & ( ~31 );
esp_cpu_set_watchpoint( STACK_WATCH_POINT_NUMBER, (char*)addr, 32, ESP_WATCHPOINT_STORE );
esp_cpu_set_watchpoint( STACK_WATCH_POINT_NUMBER, ( char * ) addr, 32, ESP_WATCHPOINT_STORE );
}
#if (ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(4, 2, 0))
#if ( ESP_IDF_VERSION < ESP_IDF_VERSION_VAL( 4, 2, 0 ) )
#if defined( CONFIG_SPIRAM_SUPPORT )
#if defined( CONFIG_SPIRAM_SUPPORT )
/*
* Compare & set (S32C1) does not work in external RAM. Instead, this routine uses a mux (in internal memory) to fake it.
*/
static portMUX_TYPE extram_mux = portMUX_INITIALIZER_UNLOCKED;
static portMUX_TYPE extram_mux = portMUX_INITIALIZER_UNLOCKED;
void uxPortCompareSetExtram( volatile uint32_t * addr,
uint32_t compare,
uint32_t * set )
{
uint32_t prev;
uint32_t oldlevel = portSET_INTERRUPT_MASK_FROM_ISR();
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
vPortCPUAcquireMutexIntsDisabled( &extram_mux, portMUX_NO_TIMEOUT, __FUNCTION__, __LINE__ );
#else
vPortCPUAcquireMutexIntsDisabled( &extram_mux, portMUX_NO_TIMEOUT );
#endif
prev = *addr;
if( prev == compare )
void uxPortCompareSetExtram( volatile uint32_t * addr,
uint32_t compare,
uint32_t * set )
{
*addr = *set;
uint32_t prev;
uint32_t oldlevel = portSET_INTERRUPT_MASK_FROM_ISR();
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
vPortCPUAcquireMutexIntsDisabled( &extram_mux, portMUX_NO_TIMEOUT, __FUNCTION__, __LINE__ );
#else
vPortCPUAcquireMutexIntsDisabled( &extram_mux, portMUX_NO_TIMEOUT );
#endif
prev = *addr;
if( prev == compare )
{
*addr = *set;
}
*set = prev;
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
vPortCPUReleaseMutexIntsDisabled( &extram_mux, __FUNCTION__, __LINE__ );
#else
vPortCPUReleaseMutexIntsDisabled( &extram_mux );
#endif
portCLEAR_INTERRUPT_MASK_FROM_ISR( oldlevel );
}
*set = prev;
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
vPortCPUReleaseMutexIntsDisabled( &extram_mux, __FUNCTION__, __LINE__ );
#else
vPortCPUReleaseMutexIntsDisabled( &extram_mux );
#endif
portCLEAR_INTERRUPT_MASK_FROM_ISR(oldlevel);
}
#endif //defined(CONFIG_SPIRAM_SUPPORT)
#endif //defined(CONFIG_SPIRAM_SUPPORT)
#endif /* ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(4, 2, 0) */
@ -542,27 +546,27 @@ uint32_t xPortGetTickRateHz( void )
return ( uint32_t ) configTICK_RATE_HZ;
}
// For now, running FreeRTOS on one core and a bare metal on the other (or other OSes)
// is not supported. For now CONFIG_FREERTOS_UNICORE and CONFIG_ESP_SYSTEM_SINGLE_CORE_MODE
// should mirror each other's values.
//
// And since this should be true, we can just check for CONFIG_FREERTOS_UNICORE.
/* For now, running FreeRTOS on one core and a bare metal on the other (or other OSes) */
/* is not supported. For now CONFIG_FREERTOS_UNICORE and CONFIG_ESP_SYSTEM_SINGLE_CORE_MODE */
/* should mirror each other's values. */
/* */
/* And since this should be true, we can just check for CONFIG_FREERTOS_UNICORE. */
#if CONFIG_FREERTOS_UNICORE != CONFIG_ESP_SYSTEM_SINGLE_CORE_MODE
#error "FreeRTOS and system configuration mismatch regarding the use of multiple cores."
#endif
extern void esp_startup_start_app_common(void);
extern void esp_startup_start_app_common( void );
void esp_startup_start_app(void)
void esp_startup_start_app( void )
{
#if !CONFIG_ESP_INT_WDT
#if CONFIG_ESP32_ECO3_CACHE_LOCK_FIX
assert(!soc_has_cache_lock_bug() && "ESP32 Rev 3 + Dual Core + PSRAM requires INT WDT enabled in project config!");
#endif
#endif
#if !CONFIG_ESP_INT_WDT
#if CONFIG_ESP32_ECO3_CACHE_LOCK_FIX
assert( !soc_has_cache_lock_bug() && "ESP32 Rev 3 + Dual Core + PSRAM requires INT WDT enabled in project config!" );
#endif
#endif
esp_startup_start_app_common();
ESP_LOGI("cpu_start", "Starting scheduler on PRO CPU.");
ESP_LOGI( "cpu_start", "Starting scheduler on PRO CPU." );
vTaskStartScheduler();
}

View file

@ -14,7 +14,7 @@
#include "esp_task_wdt.h"
#include "esp_task.h"
#include "esp_private/crosscore_int.h"
#include "esp_private/startup_internal.h" /* Required by g_spiram_ok. [refactor-todo] for g_spiram_ok */
#include "esp_private/startup_internal.h" /* Required by g_spiram_ok. [refactor-todo] for g_spiram_ok */
#include "esp_log.h"
#include "soc/soc_memory_types.h"
#include "soc/dport_access.h"
@ -22,17 +22,17 @@
#include "esp_freertos_hooks.h"
#if CONFIG_IDF_TARGET_ESP32
#include "esp32/spiram.h"
#include "esp32/spiram.h"
#elif CONFIG_IDF_TARGET_ESP32S2
#include "esp32s2/spiram.h"
#include "esp32s2/spiram.h"
#elif CONFIG_IDF_TARGET_ESP32S3
#include "esp32s3/spiram.h"
#include "esp32s3/spiram.h"
#elif CONFIG_IDF_TARGET_ESP32C3 || CONFIG_IDF_TARGET_ESP32H2
// SPIRAM is not supported on ESP32-C3
/* SPIRAM is not supported on ESP32-C3 */
#endif
#if CONFIG_SPIRAM_MALLOC_RESERVE_INTERNAL
static const char* TAG = "cpu_start";
static const char * TAG = "cpu_start";
#endif
/* Architecture-agnostic parts of the FreeRTOS ESP-IDF port layer can go here.
@ -41,119 +41,127 @@ static const char* TAG = "cpu_start";
* which will then call esp_startup_start_app_common()
*/
// Duplicate of inaccessible xSchedulerRunning; needed at startup to avoid counting nesting
volatile unsigned port_xSchedulerRunning[portNUM_PROCESSORS] = {0};
/* Duplicate of inaccessible xSchedulerRunning; needed at startup to avoid counting nesting */
volatile unsigned port_xSchedulerRunning[ portNUM_PROCESSORS ] = { 0 };
// For now, running FreeRTOS on one core and a bare metal on the other (or other OSes)
// is not supported. For now CONFIG_FREERTOS_UNICORE and CONFIG_ESP_SYSTEM_SINGLE_CORE_MODE
// should mirror each other's values.
//
// And since this should be true, we can just check for CONFIG_FREERTOS_UNICORE.
/* For now, running FreeRTOS on one core and a bare metal on the other (or other OSes) */
/* is not supported. For now CONFIG_FREERTOS_UNICORE and CONFIG_ESP_SYSTEM_SINGLE_CORE_MODE */
/* should mirror each other's values. */
/* */
/* And since this should be true, we can just check for CONFIG_FREERTOS_UNICORE. */
#if CONFIG_FREERTOS_UNICORE != CONFIG_ESP_SYSTEM_SINGLE_CORE_MODE
#error "FreeRTOS and system configuration mismatch regarding the use of multiple cores."
#endif
static void main_task(void* args);
static void main_task( void * args );
#ifdef CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
void esp_gdbstub_init(void);
void esp_gdbstub_init( void );
#endif // CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
extern void app_main(void);
extern void app_main( void );
void esp_startup_start_app_common(void)
void esp_startup_start_app_common( void )
{
#if CONFIG_ESP_INT_WDT
esp_int_wdt_init();
//Initialize the interrupt watch dog for CPU0.
esp_int_wdt_cpu_init();
#endif
#if CONFIG_ESP_INT_WDT
esp_int_wdt_init();
/*Initialize the interrupt watch dog for CPU0. */
esp_int_wdt_cpu_init();
#endif
esp_crosscore_int_init();
#ifdef CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
esp_gdbstub_init();
#endif // CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
#ifdef CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
esp_gdbstub_init();
#endif // CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
portBASE_TYPE res = xTaskCreatePinnedToCore(&main_task, "main",
ESP_TASK_MAIN_STACK, NULL,
ESP_TASK_MAIN_PRIO, NULL, ESP_TASK_MAIN_CORE);
assert(res == pdTRUE);
(void)res;
portBASE_TYPE res = xTaskCreatePinnedToCore( &main_task, "main",
ESP_TASK_MAIN_STACK, NULL,
ESP_TASK_MAIN_PRIO, NULL, ESP_TASK_MAIN_CORE );
assert( res == pdTRUE );
( void ) res;
}
#if !CONFIG_FREERTOS_UNICORE
static volatile bool s_other_cpu_startup_done = false;
static bool other_cpu_startup_idle_hook_cb(void)
{
s_other_cpu_startup_done = true;
return true;
}
#endif
static void main_task(void* args)
{
#if !CONFIG_FREERTOS_UNICORE
// Wait for FreeRTOS initialization to finish on other core, before replacing its startup stack
esp_register_freertos_idle_hook_for_cpu(other_cpu_startup_idle_hook_cb, !xPortGetCoreID());
while (!s_other_cpu_startup_done) {
;
static volatile bool s_other_cpu_startup_done = false;
static bool other_cpu_startup_idle_hook_cb( void )
{
s_other_cpu_startup_done = true;
return true;
}
esp_deregister_freertos_idle_hook_for_cpu(other_cpu_startup_idle_hook_cb, !xPortGetCoreID());
#endif
// [refactor-todo] check if there is a way to move the following block to esp_system startup
static void main_task( void * args )
{
#if !CONFIG_FREERTOS_UNICORE
/* Wait for FreeRTOS initialization to finish on other core, before replacing its startup stack */
esp_register_freertos_idle_hook_for_cpu( other_cpu_startup_idle_hook_cb, !xPortGetCoreID() );
while( !s_other_cpu_startup_done )
{
}
esp_deregister_freertos_idle_hook_for_cpu( other_cpu_startup_idle_hook_cb, !xPortGetCoreID() );
#endif
/* [refactor-todo] check if there is a way to move the following block to esp_system startup */
heap_caps_enable_nonos_stack_heaps();
// Now we have startup stack RAM available for heap, enable any DMA pool memory
#if CONFIG_SPIRAM_MALLOC_RESERVE_INTERNAL
if (g_spiram_ok) {
esp_err_t r = esp_spiram_reserve_dma_pool(CONFIG_SPIRAM_MALLOC_RESERVE_INTERNAL);
if (r != ESP_OK) {
ESP_EARLY_LOGE(TAG, "Could not reserve internal/DMA pool (error 0x%x)", r);
abort();
/* Now we have startup stack RAM available for heap, enable any DMA pool memory */
#if CONFIG_SPIRAM_MALLOC_RESERVE_INTERNAL
if( g_spiram_ok )
{
esp_err_t r = esp_spiram_reserve_dma_pool( CONFIG_SPIRAM_MALLOC_RESERVE_INTERNAL );
if( r != ESP_OK )
{
ESP_EARLY_LOGE( TAG, "Could not reserve internal/DMA pool (error 0x%x)", r );
abort();
}
}
}
#endif
#endif /* if CONFIG_SPIRAM_MALLOC_RESERVE_INTERNAL */
//Initialize task wdt if configured to do so
#ifdef CONFIG_ESP_TASK_WDT_PANIC
ESP_ERROR_CHECK(esp_task_wdt_init(CONFIG_ESP_TASK_WDT_TIMEOUT_S, true));
#elif CONFIG_ESP_TASK_WDT
ESP_ERROR_CHECK(esp_task_wdt_init(CONFIG_ESP_TASK_WDT_TIMEOUT_S, false));
#endif
/*Initialize task wdt if configured to do so */
#ifdef CONFIG_ESP_TASK_WDT_PANIC
ESP_ERROR_CHECK( esp_task_wdt_init( CONFIG_ESP_TASK_WDT_TIMEOUT_S, true ) );
#elif CONFIG_ESP_TASK_WDT
ESP_ERROR_CHECK( esp_task_wdt_init( CONFIG_ESP_TASK_WDT_TIMEOUT_S, false ) );
#endif
//Add IDLE 0 to task wdt
#ifdef CONFIG_ESP_TASK_WDT_CHECK_IDLE_TASK_CPU0
TaskHandle_t idle_0 = xTaskGetIdleTaskHandleForCPU(0);
if(idle_0 != NULL){
ESP_ERROR_CHECK(esp_task_wdt_add(idle_0));
}
#endif
//Add IDLE 1 to task wdt
#ifdef CONFIG_ESP_TASK_WDT_CHECK_IDLE_TASK_CPU1
TaskHandle_t idle_1 = xTaskGetIdleTaskHandleForCPU(1);
if(idle_1 != NULL){
ESP_ERROR_CHECK(esp_task_wdt_add(idle_1));
}
#endif
/*Add IDLE 0 to task wdt */
#ifdef CONFIG_ESP_TASK_WDT_CHECK_IDLE_TASK_CPU0
TaskHandle_t idle_0 = xTaskGetIdleTaskHandleForCPU( 0 );
if( idle_0 != NULL )
{
ESP_ERROR_CHECK( esp_task_wdt_add( idle_0 ) );
}
#endif
/*Add IDLE 1 to task wdt */
#ifdef CONFIG_ESP_TASK_WDT_CHECK_IDLE_TASK_CPU1
TaskHandle_t idle_1 = xTaskGetIdleTaskHandleForCPU( 1 );
if( idle_1 != NULL )
{
ESP_ERROR_CHECK( esp_task_wdt_add( idle_1 ) );
}
#endif
app_main();
vTaskDelete(NULL);
vTaskDelete( NULL );
}
// -------------------- Heap Related -----------------------
/* -------------------- Heap Related ----------------------- */
bool xPortCheckValidTCBMem(const void *ptr)
bool xPortCheckValidTCBMem( const void * ptr )
{
return esp_ptr_internal(ptr) && esp_ptr_byte_accessible(ptr);
return esp_ptr_internal( ptr ) && esp_ptr_byte_accessible( ptr );
}
bool xPortcheckValidStackMem(const void *ptr)
bool xPortcheckValidStackMem( const void * ptr )
{
#ifdef CONFIG_SPIRAM_ALLOW_STACK_EXTERNAL_MEMORY
return esp_ptr_byte_accessible(ptr);
#else
return esp_ptr_internal(ptr) && esp_ptr_byte_accessible(ptr);
#endif
#ifdef CONFIG_SPIRAM_ALLOW_STACK_EXTERNAL_MEMORY
return esp_ptr_byte_accessible( ptr );
#else
return esp_ptr_internal( ptr ) && esp_ptr_byte_accessible( ptr );
#endif
}

View file

@ -14,51 +14,51 @@
#include "esp_log.h"
#include "sdkconfig.h"
#ifdef CONFIG_FREERTOS_SYSTICK_USES_SYSTIMER
#include "soc/periph_defs.h"
#include "soc/system_reg.h"
#include "hal/systimer_hal.h"
#include "hal/systimer_ll.h"
#include "soc/periph_defs.h"
#include "soc/system_reg.h"
#include "hal/systimer_hal.h"
#include "hal/systimer_ll.h"
#endif
#ifdef CONFIG_PM_TRACE
#include "esp_private/pm_trace.h"
#include "esp_private/pm_trace.h"
#endif //CONFIG_PM_TRACE
BaseType_t xPortSysTickHandler(void);
BaseType_t xPortSysTickHandler( void );
#ifdef CONFIG_FREERTOS_SYSTICK_USES_CCOUNT
extern void _frxt_tick_timer_init(void);
extern void _xt_tick_divisor_init(void);
extern void _frxt_tick_timer_init( void );
extern void _xt_tick_divisor_init( void );
#ifdef CONFIG_FREERTOS_CORETIMER_0
#define SYSTICK_INTR_ID (ETS_INTERNAL_TIMER0_INTR_SOURCE+ETS_INTERNAL_INTR_SOURCE_OFF)
#endif
#ifdef CONFIG_FREERTOS_CORETIMER_1
#define SYSTICK_INTR_ID (ETS_INTERNAL_TIMER1_INTR_SOURCE+ETS_INTERNAL_INTR_SOURCE_OFF)
#endif
#ifdef CONFIG_FREERTOS_CORETIMER_0
#define SYSTICK_INTR_ID ( ETS_INTERNAL_TIMER0_INTR_SOURCE + ETS_INTERNAL_INTR_SOURCE_OFF )
#endif
#ifdef CONFIG_FREERTOS_CORETIMER_1
#define SYSTICK_INTR_ID ( ETS_INTERNAL_TIMER1_INTR_SOURCE + ETS_INTERNAL_INTR_SOURCE_OFF )
#endif
/**
* @brief Initialize CCONT timer to generate the tick interrupt
*
*/
void vPortSetupTimer(void)
{
/* Init the tick divisor value */
_xt_tick_divisor_init();
void vPortSetupTimer( void )
{
/* Init the tick divisor value */
_xt_tick_divisor_init();
_frxt_tick_timer_init();
}
_frxt_tick_timer_init();
}
#elif CONFIG_FREERTOS_SYSTICK_USES_SYSTIMER
_Static_assert(SOC_CPU_CORES_NUM <= SOC_SYSTIMER_ALARM_NUM - 1, "the number of cores must match the number of core alarms in SYSTIMER");
_Static_assert( SOC_CPU_CORES_NUM <= SOC_SYSTIMER_ALARM_NUM - 1, "the number of cores must match the number of core alarms in SYSTIMER" );
void SysTickIsrHandler(void *arg);
void SysTickIsrHandler( void * arg );
static uint32_t s_handled_systicks[portNUM_PROCESSORS] = { 0 };
static uint32_t s_handled_systicks[ portNUM_PROCESSORS ] = { 0 };
#define SYSTICK_INTR_ID (ETS_SYSTIMER_TARGET0_EDGE_INTR_SOURCE)
#define SYSTICK_INTR_ID ( ETS_SYSTIMER_TARGET0_EDGE_INTR_SOURCE )
/**
* @brief Set up the systimer peripheral to generate the tick interrupt
@ -67,50 +67,58 @@ static uint32_t s_handled_systicks[portNUM_PROCESSORS] = { 0 };
* It is done at the same time so SysTicks for both CPUs occur at the same time or very close.
* Shifts a time of triggering interrupts for core 0 and core 1.
*/
void vPortSetupTimer(void)
{
unsigned cpuid = xPortGetCoreID();
#ifdef CONFIG_FREERTOS_CORETIMER_SYSTIMER_LVL3
const unsigned level = ESP_INTR_FLAG_LEVEL3;
#else
const unsigned level = ESP_INTR_FLAG_LEVEL1;
#endif
/* Systimer HAL layer object */
static systimer_hal_context_t systimer_hal;
/* set system timer interrupt vector */
ESP_ERROR_CHECK(esp_intr_alloc(ETS_SYSTIMER_TARGET0_EDGE_INTR_SOURCE + cpuid, ESP_INTR_FLAG_IRAM | level, SysTickIsrHandler, &systimer_hal, NULL));
void vPortSetupTimer( void )
{
unsigned cpuid = xPortGetCoreID();
if (cpuid == 0) {
systimer_hal_init(&systimer_hal);
systimer_ll_set_counter_value(systimer_hal.dev, SYSTIMER_LL_COUNTER_OS_TICK, 0);
systimer_ll_apply_counter_value(systimer_hal.dev, SYSTIMER_LL_COUNTER_OS_TICK);
#ifdef CONFIG_FREERTOS_CORETIMER_SYSTIMER_LVL3
const unsigned level = ESP_INTR_FLAG_LEVEL3;
#else
const unsigned level = ESP_INTR_FLAG_LEVEL1;
#endif
/* Systimer HAL layer object */
static systimer_hal_context_t systimer_hal;
/* set system timer interrupt vector */
ESP_ERROR_CHECK( esp_intr_alloc( ETS_SYSTIMER_TARGET0_EDGE_INTR_SOURCE + cpuid, ESP_INTR_FLAG_IRAM | level, SysTickIsrHandler, &systimer_hal, NULL ) );
for (cpuid = 0; cpuid < SOC_CPU_CORES_NUM; cpuid++) {
systimer_hal_counter_can_stall_by_cpu(&systimer_hal, SYSTIMER_LL_COUNTER_OS_TICK, cpuid, false);
}
if( cpuid == 0 )
{
systimer_hal_init( &systimer_hal );
systimer_ll_set_counter_value( systimer_hal.dev, SYSTIMER_LL_COUNTER_OS_TICK, 0 );
systimer_ll_apply_counter_value( systimer_hal.dev, SYSTIMER_LL_COUNTER_OS_TICK );
for (cpuid = 0; cpuid < portNUM_PROCESSORS; ++cpuid) {
uint32_t alarm_id = SYSTIMER_LL_ALARM_OS_TICK_CORE0 + cpuid;
for( cpuid = 0; cpuid < SOC_CPU_CORES_NUM; cpuid++ )
{
systimer_hal_counter_can_stall_by_cpu( &systimer_hal, SYSTIMER_LL_COUNTER_OS_TICK, cpuid, false );
}
/* configure the timer */
systimer_hal_connect_alarm_counter(&systimer_hal, alarm_id, SYSTIMER_LL_COUNTER_OS_TICK);
systimer_hal_set_alarm_period(&systimer_hal, alarm_id, 1000000UL / CONFIG_FREERTOS_HZ);
systimer_hal_select_alarm_mode(&systimer_hal, alarm_id, SYSTIMER_ALARM_MODE_PERIOD);
systimer_hal_counter_can_stall_by_cpu(&systimer_hal, SYSTIMER_LL_COUNTER_OS_TICK, cpuid, true);
if (cpuid == 0) {
systimer_hal_enable_alarm_int(&systimer_hal, alarm_id);
systimer_hal_enable_counter(&systimer_hal, SYSTIMER_LL_COUNTER_OS_TICK);
#ifndef CONFIG_FREERTOS_UNICORE
// SysTick of core 0 and core 1 are shifted by half of period
systimer_hal_counter_value_advance(&systimer_hal, SYSTIMER_LL_COUNTER_OS_TICK, 1000000UL / CONFIG_FREERTOS_HZ / 2);
#endif
for( cpuid = 0; cpuid < portNUM_PROCESSORS; ++cpuid )
{
uint32_t alarm_id = SYSTIMER_LL_ALARM_OS_TICK_CORE0 + cpuid;
/* configure the timer */
systimer_hal_connect_alarm_counter( &systimer_hal, alarm_id, SYSTIMER_LL_COUNTER_OS_TICK );
systimer_hal_set_alarm_period( &systimer_hal, alarm_id, 1000000UL / CONFIG_FREERTOS_HZ );
systimer_hal_select_alarm_mode( &systimer_hal, alarm_id, SYSTIMER_ALARM_MODE_PERIOD );
systimer_hal_counter_can_stall_by_cpu( &systimer_hal, SYSTIMER_LL_COUNTER_OS_TICK, cpuid, true );
if( cpuid == 0 )
{
systimer_hal_enable_alarm_int( &systimer_hal, alarm_id );
systimer_hal_enable_counter( &systimer_hal, SYSTIMER_LL_COUNTER_OS_TICK );
#ifndef CONFIG_FREERTOS_UNICORE
/* SysTick of core 0 and core 1 are shifted by half of period */
systimer_hal_counter_value_advance( &systimer_hal, SYSTIMER_LL_COUNTER_OS_TICK, 1000000UL / CONFIG_FREERTOS_HZ / 2 );
#endif
}
}
}
} else {
uint32_t alarm_id = SYSTIMER_LL_ALARM_OS_TICK_CORE0 + cpuid;
systimer_hal_enable_alarm_int(&systimer_hal, alarm_id);
else
{
uint32_t alarm_id = SYSTIMER_LL_ALARM_OS_TICK_CORE0 + cpuid;
systimer_hal_enable_alarm_int( &systimer_hal, alarm_id );
}
}
}
/**
* @brief Systimer interrupt handler.
@ -118,37 +126,46 @@ void vPortSetupTimer(void)
* The Systimer interrupt for SysTick works in periodic mode no need to calc the next alarm.
* If a timer interrupt is ever serviced more than one tick late, it is necessary to process multiple ticks.
*/
IRAM_ATTR void SysTickIsrHandler(void *arg)
{
uint32_t cpuid = xPortGetCoreID();
systimer_hal_context_t *systimer_hal = (systimer_hal_context_t *)arg;
#ifdef CONFIG_PM_TRACE
ESP_PM_TRACE_ENTER(TICK, cpuid);
#endif
IRAM_ATTR void SysTickIsrHandler( void * arg )
{
uint32_t cpuid = xPortGetCoreID();
systimer_hal_context_t * systimer_hal = ( systimer_hal_context_t * ) arg;
uint32_t alarm_id = SYSTIMER_LL_ALARM_OS_TICK_CORE0 + cpuid;
do {
systimer_ll_clear_alarm_int(systimer_hal->dev, alarm_id);
#ifdef CONFIG_PM_TRACE
ESP_PM_TRACE_ENTER( TICK, cpuid );
#endif
uint32_t diff = systimer_hal_get_counter_value(systimer_hal, SYSTIMER_LL_COUNTER_OS_TICK) / systimer_ll_get_alarm_period(systimer_hal->dev, alarm_id) - s_handled_systicks[cpuid];
if (diff > 0) {
if (s_handled_systicks[cpuid] == 0) {
s_handled_systicks[cpuid] = diff;
diff = 1;
} else {
s_handled_systicks[cpuid] += diff;
uint32_t alarm_id = SYSTIMER_LL_ALARM_OS_TICK_CORE0 + cpuid;
do
{
systimer_ll_clear_alarm_int( systimer_hal->dev, alarm_id );
uint32_t diff = systimer_hal_get_counter_value( systimer_hal, SYSTIMER_LL_COUNTER_OS_TICK ) / systimer_ll_get_alarm_period( systimer_hal->dev, alarm_id ) - s_handled_systicks[ cpuid ];
if( diff > 0 )
{
if( s_handled_systicks[ cpuid ] == 0 )
{
s_handled_systicks[ cpuid ] = diff;
diff = 1;
}
else
{
s_handled_systicks[ cpuid ] += diff;
}
do
{
xPortSysTickHandler();
} while( --diff );
}
} while( systimer_ll_is_alarm_int_fired( systimer_hal->dev, alarm_id ) );
do {
xPortSysTickHandler();
} while (--diff);
}
} while (systimer_ll_is_alarm_int_fired(systimer_hal->dev, alarm_id));
#ifdef CONFIG_PM_TRACE
ESP_PM_TRACE_EXIT(TICK, cpuid);
#endif
}
#ifdef CONFIG_PM_TRACE
ESP_PM_TRACE_EXIT( TICK, cpuid );
#endif
}
#endif // CONFIG_FREERTOS_SYSTICK_USES_CCOUNT
@ -160,15 +177,20 @@ IRAM_ATTR void SysTickIsrHandler(void *arg)
* - SysTickIsrHandler for xtensa with CONFIG_FREERTOS_SYSTICK_USES_SYSTIMER
* - SysTickIsrHandler for riscv
*/
BaseType_t xPortSysTickHandler(void)
BaseType_t xPortSysTickHandler( void )
{
portbenchmarkIntLatency();
traceISR_ENTER(SYSTICK_INTR_ID);
traceISR_ENTER( SYSTICK_INTR_ID );
BaseType_t ret = xTaskIncrementTick();
if(ret != pdFALSE) {
if( ret != pdFALSE )
{
portYIELD_FROM_ISR();
} else {
}
else
{
traceISR_EXIT();
}
return ret;
}

View file

@ -31,10 +31,10 @@
#include "portable.h"
/* XOR one core ID with this value to get the other core ID */
#if (ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(4, 2, 0))
#define CORE_ID_XOR_SWAP ( CORE_ID_PRO ^ CORE_ID_APP )
#if ( ESP_IDF_VERSION < ESP_IDF_VERSION_VAL( 4, 2, 0 ) )
#define CORE_ID_XOR_SWAP ( CORE_ID_PRO ^ CORE_ID_APP )
#else
#define CORE_ID_REGVAL_XOR_SWAP (CORE_ID_REGVAL_PRO ^ CORE_ID_REGVAL_APP)
#define CORE_ID_REGVAL_XOR_SWAP ( CORE_ID_REGVAL_PRO ^ CORE_ID_REGVAL_APP )
#endif

View file

@ -53,16 +53,16 @@ static inline bool __attribute__( ( always_inline ) )
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
uint32_t owner = mux->owner;
#if (ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(4, 2, 0))
if( ( owner != portMUX_FREE_VAL ) && ( owner != CORE_ID_PRO ) && ( owner != CORE_ID_APP ) )
#if ( ESP_IDF_VERSION < ESP_IDF_VERSION_VAL( 4, 2, 0 ) )
if( ( owner != portMUX_FREE_VAL ) && ( owner != CORE_ID_PRO ) && ( owner != CORE_ID_APP ) )
#else
if (owner != portMUX_FREE_VAL && owner != CORE_ID_REGVAL_PRO && owner != CORE_ID_REGVAL_APP)
if( ( owner != portMUX_FREE_VAL ) && ( owner != CORE_ID_REGVAL_PRO ) && ( owner != CORE_ID_REGVAL_APP ) )
#endif
{
ets_printf( "ERROR: vPortCPUAcquireMutex: mux %p is uninitialized (0x%X)! Called from %s line %d.\n", mux, owner, fnName, line );
mux->owner = portMUX_FREE_VAL;
}
#endif
#endif /* ifdef CONFIG_FREERTOS_PORTMUX_DEBUG */
/* Spin until we own the core */
@ -71,10 +71,10 @@ static inline bool __attribute__( ( always_inline ) )
/* Note: coreID is the full 32 bit core ID (CORE_ID_PRO/CORE_ID_APP),
* not the 0/1 value returned by xPortGetCoreID()
*/
#if (ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(4, 2, 0))
otherCoreID = CORE_ID_XOR_SWAP ^ coreID;
#if ( ESP_IDF_VERSION < ESP_IDF_VERSION_VAL( 4, 2, 0 ) )
otherCoreID = CORE_ID_XOR_SWAP ^ coreID;
#else
otherCoreID = CORE_ID_REGVAL_XOR_SWAP ^ coreID;
otherCoreID = CORE_ID_REGVAL_XOR_SWAP ^ coreID;
#endif
do
@ -154,10 +154,10 @@ static inline bool __attribute__( ( always_inline ) )
mux->lastLockedLine = line;
uint32_t owner = mux->owner;
#if (ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(4, 2, 0))
if( ( owner != portMUX_FREE_VAL ) && ( owner != CORE_ID_PRO ) && ( owner != CORE_ID_APP ) )
#if ( ESP_IDF_VERSION < ESP_IDF_VERSION_VAL( 4, 2, 0 ) )
if( ( owner != portMUX_FREE_VAL ) && ( owner != CORE_ID_PRO ) && ( owner != CORE_ID_APP ) )
#else
if (owner != portMUX_FREE_VAL && owner != CORE_ID_REGVAL_PRO && owner != CORE_ID_REGVAL_APP)
if( ( owner != portMUX_FREE_VAL ) && ( owner != CORE_ID_REGVAL_PRO ) && ( owner != CORE_ID_REGVAL_APP ) )
#endif
{
ets_printf( "ERROR: vPortCPUReleaseMutex: mux %p is invalid (0x%x)!\n", mux, mux->owner );

View file

@ -5,6 +5,7 @@
*
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
*/
/*
* Copyright (c) 2015-2019 Cadence Design Systems, Inc.
*
@ -39,37 +40,37 @@
#ifdef XT_BOARD
#include "xtensa/xtbsp.h"
#include "xtensa/xtbsp.h"
#endif
#include "xtensa_rtos.h"
#include "sdkconfig.h"
#include "esp_idf_version.h"
#if (ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(4, 2, 0))
#include "esp_clk.h"
#if ( ESP_IDF_VERSION < ESP_IDF_VERSION_VAL( 4, 2, 0 ) )
#include "esp_clk.h"
#else
#if CONFIG_IDF_TARGET_ESP32
#include "esp32/clk.h"
#elif CONFIG_IDF_TARGET_ESP32S2
#include "esp32s2/clk.h"
#elif CONFIG_IDF_TARGET_ESP32S3
#include "esp32s3/clk.h"
#endif
#if CONFIG_IDF_TARGET_ESP32
#include "esp32/clk.h"
#elif CONFIG_IDF_TARGET_ESP32S2
#include "esp32s2/clk.h"
#elif CONFIG_IDF_TARGET_ESP32S3
#include "esp32s3/clk.h"
#endif
#endif /* ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(4, 2, 0) */
#ifdef XT_RTOS_TIMER_INT
unsigned _xt_tick_divisor = 0; /* cached number of cycles per tick */
unsigned _xt_tick_divisor = 0; /* cached number of cycles per tick */
void _xt_tick_divisor_init(void)
{
_xt_tick_divisor = esp_clk_cpu_freq() / XT_TICK_PER_SEC;
}
void _xt_tick_divisor_init( void )
{
_xt_tick_divisor = esp_clk_cpu_freq() / XT_TICK_PER_SEC;
}
/* Deprecated, to be removed */
int xt_clock_freq(void)
{
return esp_clk_cpu_freq();
}
int xt_clock_freq( void )
{
return esp_clk_cpu_freq();
}
#endif /* XT_RTOS_TIMER_INT */

View file

@ -5,6 +5,7 @@
*
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
*/
/*
* Copyright (c) 2015-2019 Cadence Design Systems, Inc.
*

View file

@ -37,34 +37,39 @@
/* Defined in portasm.h */
extern void _frxt_tick_timer_init(void);
extern void _frxt_tick_timer_init( void );
/* Defined in xtensa_context.S */
extern void _xt_coproc_init(void);
extern void _xt_coproc_init( void );
/*-----------------------------------------------------------*/
/* We require the address of the pxCurrentTCB variable, but don't want to know
any details of its type. */
* any details of its type. */
typedef void TCB_t;
extern volatile TCB_t * volatile pxCurrentTCB;
unsigned port_xSchedulerRunning = 0; // Duplicate of inaccessible xSchedulerRunning; needed at startup to avoid counting nesting
unsigned port_interruptNesting = 0; // Interrupt nesting level
unsigned port_xSchedulerRunning = 0; /* Duplicate of inaccessible xSchedulerRunning; needed at startup to avoid counting nesting */
unsigned port_interruptNesting = 0; /* Interrupt nesting level */
/*-----------------------------------------------------------*/
// User exception dispatcher when exiting
void _xt_user_exit(void);
/* User exception dispatcher when exiting */
void _xt_user_exit( void );
/*
* Stack initialization
*/
#if portUSING_MPU_WRAPPERS
StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters, BaseType_t xRunPrivileged )
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
TaskFunction_t pxCode,
void * pvParameters,
BaseType_t xRunPrivileged )
#else
StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters )
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
TaskFunction_t pxCode,
void * pvParameters )
#endif
{
StackType_t * sp;
@ -129,18 +134,18 @@ StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t px
void vPortEndScheduler( void )
{
/* It is unlikely that the Xtensa port will get stopped. If required simply
disable the tick interrupt here. */
* disable the tick interrupt here. */
}
/*-----------------------------------------------------------*/
BaseType_t xPortStartScheduler( void )
{
// Interrupts are disabled at this point and stack contains PS with enabled interrupts when task context is restored
/* Interrupts are disabled at this point and stack contains PS with enabled interrupts when task context is restored */
#if XCHAL_CP_NUM > 0
/* Initialize co-processor management for tasks. Leave CPENABLE alone. */
_xt_coproc_init();
/* Initialize co-processor management for tasks. Leave CPENABLE alone. */
_xt_coproc_init();
#endif
/* Init the tick divisor value */
@ -150,14 +155,14 @@ BaseType_t xPortStartScheduler( void )
_frxt_tick_timer_init();
#if XT_USE_THREAD_SAFE_CLIB
// Init C library
vPortClibInit();
/* Init C library */
vPortClibInit();
#endif
port_xSchedulerRunning = 1;
// Cannot be directly called from C; never returns
__asm__ volatile ("call0 _frxt_dispatch\n");
/* Cannot be directly called from C; never returns */
__asm__ volatile ( "call0 _frxt_dispatch\n" );
/* Should not get here. */
return pdTRUE;
@ -190,19 +195,19 @@ BaseType_t xPortSysTickHandler( void )
* Used to set coprocessor area in stack. Current hack is to reuse MPU pointer for coprocessor area.
*/
#if portUSING_MPU_WRAPPERS
void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings,
const struct xMEMORY_REGION * const xRegions,
StackType_t * pxBottomOfStack,
uint32_t ulStackDepth )
{
#if XCHAL_CP_NUM > 0
xMPUSettings->coproc_area = ( StackType_t * ) ( ( uint32_t ) ( pxBottomOfStack + ulStackDepth - 1 ));
xMPUSettings->coproc_area = ( StackType_t * ) ( ( ( portPOINTER_SIZE_TYPE ) xMPUSettings->coproc_area ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) );
xMPUSettings->coproc_area = ( StackType_t * ) ( ( ( uint32_t ) xMPUSettings->coproc_area - XT_CP_SIZE ) & ~0xf );
void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings,
const struct xMEMORY_REGION * const xRegions,
StackType_t * pxBottomOfStack,
uint32_t ulStackDepth )
{
#if XCHAL_CP_NUM > 0
xMPUSettings->coproc_area = ( StackType_t * ) ( ( uint32_t ) ( pxBottomOfStack + ulStackDepth - 1 ) );
xMPUSettings->coproc_area = ( StackType_t * ) ( ( ( portPOINTER_SIZE_TYPE ) xMPUSettings->coproc_area ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) );
xMPUSettings->coproc_area = ( StackType_t * ) ( ( ( uint32_t ) xMPUSettings->coproc_area - XT_CP_SIZE ) & ~0xf );
/* NOTE: we cannot initialize the coprocessor save area here because FreeRTOS is going to
* clear the stack area after we return. This is done in pxPortInitialiseStack().
*/
#endif
}
/* NOTE: we cannot initialize the coprocessor save area here because FreeRTOS is going to
* clear the stack area after we return. This is done in pxPortInitialiseStack().
*/
#endif
}
#endif /* if portUSING_MPU_WRAPPERS */

View file

@ -42,7 +42,7 @@
#endif
#define portbenchmarkINTERRUPT_DISABLE()
#define portbenchmarkINTERRUPT_RESTORE(newstate)
#define portbenchmarkINTERRUPT_RESTORE( newstate )
#define portbenchmarkIntLatency()
#define portbenchmarkIntWait()
#define portbenchmarkReset()

View file

@ -31,200 +31,204 @@
#if XT_USE_THREAD_SAFE_CLIB
#if XSHAL_CLIB == XTHAL_CLIB_XCLIB
#if XSHAL_CLIB == XTHAL_CLIB_XCLIB
#include <errno.h>
#include <sys/reent.h>
#include <errno.h>
#include <sys/reent.h>
#include "semphr.h"
#include "semphr.h"
typedef SemaphoreHandle_t _Rmtx;
typedef SemaphoreHandle_t _Rmtx;
//-----------------------------------------------------------------------------
// Override this and set to nonzero to enable locking.
//-----------------------------------------------------------------------------
int32_t _xclib_use_mt = 1;
/*----------------------------------------------------------------------------- */
/* Override this and set to nonzero to enable locking. */
/*----------------------------------------------------------------------------- */
int32_t _xclib_use_mt = 1;
//-----------------------------------------------------------------------------
// Init lock.
//-----------------------------------------------------------------------------
void
_Mtxinit(_Rmtx * mtx)
{
*mtx = xSemaphoreCreateRecursiveMutex();
}
/*----------------------------------------------------------------------------- */
/* Init lock. */
/*----------------------------------------------------------------------------- */
void _Mtxinit( _Rmtx * mtx )
{
*mtx = xSemaphoreCreateRecursiveMutex();
}
//-----------------------------------------------------------------------------
// Destroy lock.
//-----------------------------------------------------------------------------
void
_Mtxdst(_Rmtx * mtx)
{
if ((mtx != NULL) && (*mtx != NULL)) {
vSemaphoreDelete(*mtx);
}
}
/*----------------------------------------------------------------------------- */
/* Destroy lock. */
/*----------------------------------------------------------------------------- */
void _Mtxdst( _Rmtx * mtx )
{
if( ( mtx != NULL ) && ( *mtx != NULL ) )
{
vSemaphoreDelete( *mtx );
}
}
//-----------------------------------------------------------------------------
// Lock.
//-----------------------------------------------------------------------------
void
_Mtxlock(_Rmtx * mtx)
{
if ((mtx != NULL) && (*mtx != NULL)) {
xSemaphoreTakeRecursive(*mtx, portMAX_DELAY);
}
}
/*----------------------------------------------------------------------------- */
/* Lock. */
/*----------------------------------------------------------------------------- */
void _Mtxlock( _Rmtx * mtx )
{
if( ( mtx != NULL ) && ( *mtx != NULL ) )
{
xSemaphoreTakeRecursive( *mtx, portMAX_DELAY );
}
}
//-----------------------------------------------------------------------------
// Unlock.
//-----------------------------------------------------------------------------
void
_Mtxunlock(_Rmtx * mtx)
{
if ((mtx != NULL) && (*mtx != NULL)) {
xSemaphoreGiveRecursive(*mtx);
}
}
/*----------------------------------------------------------------------------- */
/* Unlock. */
/*----------------------------------------------------------------------------- */
void _Mtxunlock( _Rmtx * mtx )
{
if( ( mtx != NULL ) && ( *mtx != NULL ) )
{
xSemaphoreGiveRecursive( *mtx );
}
}
//-----------------------------------------------------------------------------
// Called by malloc() to allocate blocks of memory from the heap.
//-----------------------------------------------------------------------------
void *
_sbrk_r (struct _reent * reent, int32_t incr)
{
extern char _end;
extern char _heap_sentry;
static char * _heap_sentry_ptr = &_heap_sentry;
static char * heap_ptr;
char * base;
/*----------------------------------------------------------------------------- */
/* Called by malloc() to allocate blocks of memory from the heap. */
/*----------------------------------------------------------------------------- */
void * _sbrk_r( struct _reent * reent,
int32_t incr )
{
extern char _end;
extern char _heap_sentry;
static char * _heap_sentry_ptr = &_heap_sentry;
static char * heap_ptr;
char * base;
if (!heap_ptr)
heap_ptr = (char *) &_end;
if( !heap_ptr )
{
heap_ptr = ( char * ) &_end;
}
base = heap_ptr;
if (heap_ptr + incr >= _heap_sentry_ptr) {
reent->_errno = ENOMEM;
return (char *) -1;
}
base = heap_ptr;
heap_ptr += incr;
return base;
}
if( heap_ptr + incr >= _heap_sentry_ptr )
{
reent->_errno = ENOMEM;
return ( char * ) -1;
}
//-----------------------------------------------------------------------------
// Global initialization for C library.
//-----------------------------------------------------------------------------
void
vPortClibInit(void)
{
}
heap_ptr += incr;
return base;
}
//-----------------------------------------------------------------------------
// Per-thread cleanup stub provided for linking, does nothing.
//-----------------------------------------------------------------------------
void
_reclaim_reent(void * ptr)
{
}
/*----------------------------------------------------------------------------- */
/* Global initialization for C library. */
/*----------------------------------------------------------------------------- */
void vPortClibInit( void )
{
}
#endif /* XSHAL_CLIB == XTHAL_CLIB_XCLIB */
/*----------------------------------------------------------------------------- */
/* Per-thread cleanup stub provided for linking, does nothing. */
/*----------------------------------------------------------------------------- */
void _reclaim_reent( void * ptr )
{
}
#if XSHAL_CLIB == XTHAL_CLIB_NEWLIB
#endif /* XSHAL_CLIB == XTHAL_CLIB_XCLIB */
#include <errno.h>
#include <malloc.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#if XSHAL_CLIB == XTHAL_CLIB_NEWLIB
#include "semphr.h"
#include <errno.h>
#include <malloc.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
static SemaphoreHandle_t xClibMutex;
static uint32_t ulClibInitDone = 0;
#include "semphr.h"
//-----------------------------------------------------------------------------
// Get C library lock.
//-----------------------------------------------------------------------------
void
__malloc_lock(struct _reent * ptr)
{
if (!ulClibInitDone)
return;
static SemaphoreHandle_t xClibMutex;
static uint32_t ulClibInitDone = 0;
xSemaphoreTakeRecursive(xClibMutex, portMAX_DELAY);
}
/*----------------------------------------------------------------------------- */
/* Get C library lock. */
/*----------------------------------------------------------------------------- */
void __malloc_lock( struct _reent * ptr )
{
if( !ulClibInitDone )
{
return;
}
//-----------------------------------------------------------------------------
// Release C library lock.
//-----------------------------------------------------------------------------
void
__malloc_unlock(struct _reent * ptr)
{
if (!ulClibInitDone)
return;
xSemaphoreTakeRecursive( xClibMutex, portMAX_DELAY );
}
xSemaphoreGiveRecursive(xClibMutex);
}
/*----------------------------------------------------------------------------- */
/* Release C library lock. */
/*----------------------------------------------------------------------------- */
void __malloc_unlock( struct _reent * ptr )
{
if( !ulClibInitDone )
{
return;
}
//-----------------------------------------------------------------------------
// Lock for environment. Since we have only one global lock we can just call
// the malloc() lock function.
//-----------------------------------------------------------------------------
void
__env_lock(struct _reent * ptr)
{
__malloc_lock(ptr);
}
xSemaphoreGiveRecursive( xClibMutex );
}
/*----------------------------------------------------------------------------- */
/* Lock for environment. Since we have only one global lock we can just call */
/* the malloc() lock function. */
/*----------------------------------------------------------------------------- */
void __env_lock( struct _reent * ptr )
{
__malloc_lock( ptr );
}
//-----------------------------------------------------------------------------
// Unlock environment.
//-----------------------------------------------------------------------------
void
__env_unlock(struct _reent * ptr)
{
__malloc_unlock(ptr);
}
/*----------------------------------------------------------------------------- */
/* Unlock environment. */
/*----------------------------------------------------------------------------- */
void __env_unlock( struct _reent * ptr )
{
__malloc_unlock( ptr );
}
//-----------------------------------------------------------------------------
// Called by malloc() to allocate blocks of memory from the heap.
//-----------------------------------------------------------------------------
void *
_sbrk_r (struct _reent * reent, int32_t incr)
{
extern char _end;
extern char _heap_sentry;
static char * _heap_sentry_ptr = &_heap_sentry;
static char * heap_ptr;
char * base;
/*----------------------------------------------------------------------------- */
/* Called by malloc() to allocate blocks of memory from the heap. */
/*----------------------------------------------------------------------------- */
void * _sbrk_r( struct _reent * reent,
int32_t incr )
{
extern char _end;
extern char _heap_sentry;
static char * _heap_sentry_ptr = &_heap_sentry;
static char * heap_ptr;
char * base;
if (!heap_ptr)
heap_ptr = (char *) &_end;
if( !heap_ptr )
{
heap_ptr = ( char * ) &_end;
}
base = heap_ptr;
if (heap_ptr + incr >= _heap_sentry_ptr) {
reent->_errno = ENOMEM;
return (char *) -1;
}
base = heap_ptr;
heap_ptr += incr;
return base;
}
if( heap_ptr + incr >= _heap_sentry_ptr )
{
reent->_errno = ENOMEM;
return ( char * ) -1;
}
//-----------------------------------------------------------------------------
// Global initialization for C library.
//-----------------------------------------------------------------------------
void
vPortClibInit(void)
{
configASSERT(!ulClibInitDone);
heap_ptr += incr;
return base;
}
xClibMutex = xSemaphoreCreateRecursiveMutex();
ulClibInitDone = 1;
}
/*----------------------------------------------------------------------------- */
/* Global initialization for C library. */
/*----------------------------------------------------------------------------- */
void vPortClibInit( void )
{
configASSERT( !ulClibInitDone );
#endif /* XSHAL_CLIB == XTHAL_CLIB_NEWLIB */
xClibMutex = xSemaphoreCreateRecursiveMutex();
ulClibInitDone = 1;
}
#endif /* XSHAL_CLIB == XTHAL_CLIB_NEWLIB */
#endif /* XT_USE_THREAD_SAFE_CLIB */

View file

@ -38,15 +38,15 @@
#ifndef __ASSEMBLER__
#include <stdint.h>
#include <stdint.h>
#include <xtensa/tie/xt_core.h>
#include <xtensa/hal.h>
#include <xtensa/config/core.h>
#include <xtensa/config/system.h> /* required for XSHAL_CLIB */
#include <xtensa/xtruntime.h>
#include <xtensa/tie/xt_core.h>
#include <xtensa/hal.h>
#include <xtensa/config/core.h>
#include <xtensa/config/system.h> /* required for XSHAL_CLIB */
#include <xtensa/xtruntime.h>
//#include "xtensa_context.h"
/*#include "xtensa_context.h" */
/*-----------------------------------------------------------
* Port specific definitions.
@ -60,149 +60,159 @@
/* Type definitions. */
#define portCHAR int8_t
#define portFLOAT float
#define portDOUBLE double
#define portLONG int32_t
#define portSHORT int16_t
#define portSTACK_TYPE uint32_t
#define portBASE_TYPE int
#define portCHAR int8_t
#define portFLOAT float
#define portDOUBLE double
#define portLONG int32_t
#define portSHORT int16_t
#define portSTACK_TYPE uint32_t
#define portBASE_TYPE int
typedef portSTACK_TYPE StackType_t;
typedef portBASE_TYPE BaseType_t;
typedef unsigned portBASE_TYPE UBaseType_t;
typedef portSTACK_TYPE StackType_t;
typedef portBASE_TYPE BaseType_t;
typedef unsigned portBASE_TYPE UBaseType_t;
#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
#else
#error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
#endif
#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
#else
#error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width.
#endif
/*-----------------------------------------------------------*/
// portbenchmark
#include "portbenchmark.h"
/* portbenchmark */
#include "portbenchmark.h"
/* Critical section management. NW-TODO: replace XTOS_SET_INTLEVEL with more efficient version, if any? */
// These cannot be nested. They should be used with a lot of care and cannot be called from interrupt level.
#define portDISABLE_INTERRUPTS() do { XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL); portbenchmarkINTERRUPT_DISABLE(); } while (0)
#define portENABLE_INTERRUPTS() do { portbenchmarkINTERRUPT_RESTORE(0); XTOS_SET_INTLEVEL(0); } while (0)
/* These cannot be nested. They should be used with a lot of care and cannot be called from interrupt level. */
#define portDISABLE_INTERRUPTS() do { XTOS_SET_INTLEVEL( XCHAL_EXCM_LEVEL ); portbenchmarkINTERRUPT_DISABLE(); } while( 0 )
#define portENABLE_INTERRUPTS() do { portbenchmarkINTERRUPT_RESTORE( 0 ); XTOS_SET_INTLEVEL( 0 ); } while( 0 )
// These can be nested
#define portCRITICAL_NESTING_IN_TCB 1 // For now, let FreeRTOS' (tasks.c) manage critical nesting
void vTaskEnterCritical(void);
void vTaskExitCritical(void);
#define portENTER_CRITICAL() vTaskEnterCritical()
#define portEXIT_CRITICAL() vTaskExitCritical()
/* These can be nested */
#define portCRITICAL_NESTING_IN_TCB 1 /* For now, let FreeRTOS' (tasks.c) manage critical nesting */
void vTaskEnterCritical( void );
void vTaskExitCritical( void );
#define portENTER_CRITICAL() vTaskEnterCritical()
#define portEXIT_CRITICAL() vTaskExitCritical()
// Cleaner and preferred solution allows nested interrupts disabling and restoring via local registers or stack.
// They can be called from interrupts too.
static inline unsigned portENTER_CRITICAL_NESTED() { unsigned state = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL); portbenchmarkINTERRUPT_DISABLE(); return state; }
#define portEXIT_CRITICAL_NESTED(state) do { portbenchmarkINTERRUPT_RESTORE(state); XTOS_RESTORE_JUST_INTLEVEL(state); } while (0)
/* Cleaner and preferred solution allows nested interrupts disabling and restoring via local registers or stack. */
/* They can be called from interrupts too. */
static inline unsigned portENTER_CRITICAL_NESTED()
{
unsigned state = XTOS_SET_INTLEVEL( XCHAL_EXCM_LEVEL );
// These FreeRTOS versions are similar to the nested versions above
#define portSET_INTERRUPT_MASK_FROM_ISR() portENTER_CRITICAL_NESTED()
#define portCLEAR_INTERRUPT_MASK_FROM_ISR(state) portEXIT_CRITICAL_NESTED(state)
portbenchmarkINTERRUPT_DISABLE();
return state;
}
#define portEXIT_CRITICAL_NESTED( state ) do { portbenchmarkINTERRUPT_RESTORE( state ); XTOS_RESTORE_JUST_INTLEVEL( state ); } while( 0 )
/* These FreeRTOS versions are similar to the nested versions above */
#define portSET_INTERRUPT_MASK_FROM_ISR() portENTER_CRITICAL_NESTED()
#define portCLEAR_INTERRUPT_MASK_FROM_ISR( state ) portEXIT_CRITICAL_NESTED( state )
/*-----------------------------------------------------------*/
/* Architecture specifics. */
#define portSTACK_GROWTH ( -1 )
#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
#define portBYTE_ALIGNMENT 4
#define portNOP() XT_NOP()
#define portSTACK_GROWTH ( -1 )
#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
#define portBYTE_ALIGNMENT 4
#define portNOP() XT_NOP()
/*-----------------------------------------------------------*/
/* Fine resolution time */
#define portGET_RUN_TIME_COUNTER_VALUE() xthal_get_ccount()
#define portGET_RUN_TIME_COUNTER_VALUE() xthal_get_ccount()
/* Kernel utilities. */
void vPortYield( void );
void _frxt_setup_switch( void );
#define portYIELD() vPortYield()
#define portYIELD_FROM_ISR( xHigherPriorityTaskWoken ) \
if ( ( xHigherPriorityTaskWoken ) != 0 ) { \
_frxt_setup_switch(); \
void vPortYield( void );
void _frxt_setup_switch( void );
#define portYIELD() vPortYield()
#define portYIELD_FROM_ISR( xHigherPriorityTaskWoken ) \
if( ( xHigherPriorityTaskWoken ) != 0 ) { \
_frxt_setup_switch(); \
}
/*-----------------------------------------------------------*/
/* Task function macros as described on the FreeRTOS.org WEB site. */
#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters )
#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters )
#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
// When coprocessors are defined, we to maintain a pointer to coprocessors area.
// We currently use a hack: redefine field xMPU_SETTINGS in TCB block as a structure that can hold:
// MPU wrappers, coprocessor area pointer, trace code structure, and more if needed.
// The field is normally used for memory protection. FreeRTOS should create another general purpose field.
typedef struct {
#if XCHAL_CP_NUM > 0
volatile StackType_t* coproc_area; // Pointer to coprocessor save area; MUST BE FIRST
#endif
#if portUSING_MPU_WRAPPERS
// Define here mpu_settings, which is port dependent
int mpu_setting; // Just a dummy example here; MPU not ported to Xtensa yet
/* When coprocessors are defined, we to maintain a pointer to coprocessors area. */
/* We currently use a hack: redefine field xMPU_SETTINGS in TCB block as a structure that can hold: */
/* MPU wrappers, coprocessor area pointer, trace code structure, and more if needed. */
/* The field is normally used for memory protection. FreeRTOS should create another general purpose field. */
typedef struct
{
#if XCHAL_CP_NUM > 0
volatile StackType_t * coproc_area; /* Pointer to coprocessor save area; MUST BE FIRST */
#endif
#if portUSING_MPU_WRAPPERS
/* Define here mpu_settings, which is port dependent */
int mpu_setting; /* Just a dummy example here; MPU not ported to Xtensa yet */
#endif
#if configUSE_TRACE_FACILITY_2
struct
{
/* Cf. porttraceStamp() */
int taskstamp; /* Stamp from inside task to see where we are */
int taskstampcount; /* A counter usually incremented when we restart the task's loop */
} porttrace;
#endif
} xMPU_SETTINGS;
/* Main hack to use MPU_wrappers even when no MPU is defined (warning: mpu_setting should not be accessed; otherwise move this above xMPU_SETTINGS) */
#if ( XCHAL_CP_NUM > 0 || configUSE_TRACE_FACILITY_2 ) && !portUSING_MPU_WRAPPERS /* If MPU wrappers not used, we still need to allocate coproc area */
#undef portUSING_MPU_WRAPPERS
#define portUSING_MPU_WRAPPERS 1 /* Enable it to allocate coproc area */
#define MPU_WRAPPERS_H /* Override mpu_wrapper.h to disable unwanted code */
#define PRIVILEGED_FUNCTION
#define PRIVILEGED_DATA
#endif
/* porttrace */
#if configUSE_TRACE_FACILITY_2
struct {
// Cf. porttraceStamp()
int taskstamp; /* Stamp from inside task to see where we are */
int taskstampcount; /* A counter usually incremented when we restart the task's loop */
} porttrace;
#include "porttrace.h"
#endif
} xMPU_SETTINGS;
// Main hack to use MPU_wrappers even when no MPU is defined (warning: mpu_setting should not be accessed; otherwise move this above xMPU_SETTINGS)
#if (XCHAL_CP_NUM > 0 || configUSE_TRACE_FACILITY_2) && !portUSING_MPU_WRAPPERS // If MPU wrappers not used, we still need to allocate coproc area
#undef portUSING_MPU_WRAPPERS
#define portUSING_MPU_WRAPPERS 1 // Enable it to allocate coproc area
#define MPU_WRAPPERS_H // Override mpu_wrapper.h to disable unwanted code
#define PRIVILEGED_FUNCTION
#define PRIVILEGED_DATA
#endif
// porttrace
#if configUSE_TRACE_FACILITY_2
#include "porttrace.h"
#endif
// configASSERT_2 if requested
#if configASSERT_2
#include <stdio.h>
void exit(int);
#define configASSERT( x ) if (!(x)) { porttracePrint(-1); printf("\nAssertion failed in %s:%d\n", __FILE__, __LINE__); exit(-1); }
#endif
/* configASSERT_2 if requested */
#if configASSERT_2
#include <stdio.h>
void exit( int );
#define configASSERT( x ) if( !( x ) ) { porttracePrint( -1 ); printf( "\nAssertion failed in %s:%d\n", __FILE__, __LINE__ ); exit( -1 ); }
#endif
/* C library support -- only XCLIB and NEWLIB are supported. */
/* To enable thread-safe C library support, XT_USE_THREAD_SAFE_CLIB must be
defined to be > 0 somewhere above or on the command line. */
* defined to be > 0 somewhere above or on the command line. */
#if (XT_USE_THREAD_SAFE_CLIB > 0u) && (XSHAL_CLIB == XTHAL_CLIB_XCLIB)
extern void vPortClibInit(void);
#endif // XCLIB support
#if ( XT_USE_THREAD_SAFE_CLIB > 0u ) && ( XSHAL_CLIB == XTHAL_CLIB_XCLIB )
extern void vPortClibInit( void );
#endif // XCLIB support
#if (XT_USE_THREAD_SAFE_CLIB > 0u) && (XSHAL_CLIB == XTHAL_CLIB_NEWLIB)
extern void vPortClibInit(void);
#if ( XT_USE_THREAD_SAFE_CLIB > 0u ) && ( XSHAL_CLIB == XTHAL_CLIB_NEWLIB )
extern void vPortClibInit( void );
// This C library cleanup is not currently done by FreeRTOS when deleting a task
#include <stdio.h>
#define portCLEAN_UP_TCB(pxTCB) vPortCleanUpTcbClib(&((pxTCB)->xNewLib_reent))
static inline void vPortCleanUpTcbClib(struct _reent *ptr)
{
FILE * fp = &(ptr->__sf[0]);
int i;
for (i = 0; i < 3; ++i, ++fp) {
fp->_close = NULL;
}
}
#endif // NEWLIB support
/* This C library cleanup is not currently done by FreeRTOS when deleting a task */
#include <stdio.h>
#define portCLEAN_UP_TCB( pxTCB ) vPortCleanUpTcbClib( &( ( pxTCB )->xNewLib_reent ) )
static inline void vPortCleanUpTcbClib( struct _reent * ptr )
{
FILE * fp = &( ptr->__sf[ 0 ] );
int i;
for( i = 0; i < 3; ++i, ++fp )
{
fp->_close = NULL;
}
}
#endif // NEWLIB support
#endif // __ASSEMBLER__

View file

@ -1,4 +1,4 @@
/*
/*
* FreeRTOS Kernel <DEVELOPMENT BRANCH>
* Copyright (C) 2015-2019 Cadence Design Systems, Inc.
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
@ -43,7 +43,7 @@
#error "You need to download the FreeRTOS_trace patch that overwrites this file"
#endif
#define porttracePrint(nelements)
#define porttraceStamp(stamp, count_incr)
#define porttracePrint( nelements )
#define porttraceStamp( stamp, count_incr )
#endif /* PORTTRACE_H */

View file

@ -1,4 +1,4 @@
/*
/*
* FreeRTOS Kernel <DEVELOPMENT BRANCH>
* Copyright (C) 2015-2019 Cadence Design Systems, Inc.
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
@ -40,87 +40,90 @@
/* Typedef for C-callable interrupt handler function */
typedef void (*xt_handler)(void *);
typedef void (* xt_handler)( void * );
/* Typedef for C-callable exception handler function */
typedef void (*xt_exc_handler)(XtExcFrame *);
typedef void (* xt_exc_handler)( XtExcFrame * );
/*
-------------------------------------------------------------------------------
Call this function to set a handler for the specified exception.
n - Exception number (type)
f - Handler function address, NULL to uninstall handler.
The handler will be passed a pointer to the exception frame, which is created
on the stack of the thread that caused the exception.
If the handler returns, the thread context will be restored and the faulting
instruction will be retried. Any values in the exception frame that are
modified by the handler will be restored as part of the context. For details
of the exception frame structure see xtensa_context.h.
-------------------------------------------------------------------------------
*/
extern xt_exc_handler xt_set_exception_handler(int n, xt_exc_handler f);
* -------------------------------------------------------------------------------
* Call this function to set a handler for the specified exception.
*
* n - Exception number (type)
* f - Handler function address, NULL to uninstall handler.
*
* The handler will be passed a pointer to the exception frame, which is created
* on the stack of the thread that caused the exception.
*
* If the handler returns, the thread context will be restored and the faulting
* instruction will be retried. Any values in the exception frame that are
* modified by the handler will be restored as part of the context. For details
* of the exception frame structure see xtensa_context.h.
* -------------------------------------------------------------------------------
*/
extern xt_exc_handler xt_set_exception_handler( int n,
xt_exc_handler f );
/*
-------------------------------------------------------------------------------
Call this function to set a handler for the specified interrupt.
n - Interrupt number.
f - Handler function address, NULL to uninstall handler.
arg - Argument to be passed to handler.
-------------------------------------------------------------------------------
*/
extern xt_handler xt_set_interrupt_handler(int n, xt_handler f, void * arg);
* -------------------------------------------------------------------------------
* Call this function to set a handler for the specified interrupt.
*
* n - Interrupt number.
* f - Handler function address, NULL to uninstall handler.
* arg - Argument to be passed to handler.
* -------------------------------------------------------------------------------
*/
extern xt_handler xt_set_interrupt_handler( int n,
xt_handler f,
void * arg );
/*
-------------------------------------------------------------------------------
Call this function to enable the specified interrupts.
mask - Bit mask of interrupts to be enabled.
Returns the previous state of the interrupt enables.
-------------------------------------------------------------------------------
*/
extern unsigned int xt_ints_on(unsigned int mask);
* -------------------------------------------------------------------------------
* Call this function to enable the specified interrupts.
*
* mask - Bit mask of interrupts to be enabled.
*
* Returns the previous state of the interrupt enables.
* -------------------------------------------------------------------------------
*/
extern unsigned int xt_ints_on( unsigned int mask );
/*
-------------------------------------------------------------------------------
Call this function to disable the specified interrupts.
mask - Bit mask of interrupts to be disabled.
Returns the previous state of the interrupt enables.
-------------------------------------------------------------------------------
*/
extern unsigned int xt_ints_off(unsigned int mask);
* -------------------------------------------------------------------------------
* Call this function to disable the specified interrupts.
*
* mask - Bit mask of interrupts to be disabled.
*
* Returns the previous state of the interrupt enables.
* -------------------------------------------------------------------------------
*/
extern unsigned int xt_ints_off( unsigned int mask );
/*
-------------------------------------------------------------------------------
Call this function to set the specified (s/w) interrupt.
-------------------------------------------------------------------------------
*/
static inline void xt_set_intset(unsigned int arg)
* -------------------------------------------------------------------------------
* Call this function to set the specified (s/w) interrupt.
* -------------------------------------------------------------------------------
*/
static inline void xt_set_intset( unsigned int arg )
{
xthal_set_intset(arg);
xthal_set_intset( arg );
}
/*
-------------------------------------------------------------------------------
Call this function to clear the specified (s/w or edge-triggered)
interrupt.
-------------------------------------------------------------------------------
*/
static inline void xt_set_intclear(unsigned int arg)
* -------------------------------------------------------------------------------
* Call this function to clear the specified (s/w or edge-triggered)
* interrupt.
* -------------------------------------------------------------------------------
*/
static inline void xt_set_intclear( unsigned int arg )
{
xthal_set_intclear(arg);
xthal_set_intclear( arg );
}

View file

@ -1,4 +1,4 @@
/*
/*
* FreeRTOS Kernel <DEVELOPMENT BRANCH>
* Copyright (C) 2015-2019 Cadence Design Systems, Inc.
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
@ -47,139 +47,139 @@
#include <xtensa/hal.h>
#include <xtensa/config/core.h>
#include <xtensa/config/system.h> /* required for XSHAL_CLIB */
#include <xtensa/config/system.h> /* required for XSHAL_CLIB */
#include "xtensa_context.h"
/*-----------------------------------------------------------------------------
* STACK REQUIREMENTS
*
* This section defines the minimum stack size, and the extra space required to
* be allocated for saving coprocessor state and/or C library state information
* (if thread safety is enabled for the C library). The sizes are in bytes.
*
* Stack sizes for individual tasks should be derived from these minima based on
* the maximum call depth of the task and the maximum level of interrupt nesting.
* A minimum stack size is defined by XT_STACK_MIN_SIZE. This minimum is based
* on the requirement for a task that calls nothing else but can be interrupted.
* This assumes that interrupt handlers do not call more than a few levels deep.
* If this is not true, i.e. one or more interrupt handlers make deep calls then
* the minimum must be increased.
*
* If the Xtensa processor configuration includes coprocessors, then space is
* allocated to save the coprocessor state on the stack.
*
* If thread safety is enabled for the C runtime library, (XT_USE_THREAD_SAFE_CLIB
* is defined) then space is allocated to save the C library context in the TCB.
*
* Allocating insufficient stack space is a common source of hard-to-find errors.
* During development, it is best to enable the FreeRTOS stack checking features.
*
* Usage:
*
* XT_USE_THREAD_SAFE_CLIB -- Define this to a nonzero value to enable thread-safe
* use of the C library. This will require extra stack
* space to be allocated for tasks that use the C library
* reentrant functions. See below for more information.
*
* NOTE: The Xtensa toolchain supports multiple C libraries and not all of them
* support thread safety. Check your core configuration to see which C library
* was chosen for your system.
*
* XT_STACK_MIN_SIZE -- The minimum stack size for any task. It is recommended
* that you do not use a stack smaller than this for any
* task. In case you want to use stacks smaller than this
* size, you must verify that the smaller size(s) will work
* under all operating conditions.
*
* XT_STACK_EXTRA -- The amount of extra stack space to allocate for a task
* that does not make C library reentrant calls. Add this
* to the amount of stack space required by the task itself.
*
* XT_STACK_EXTRA_CLIB -- The amount of space to allocate for C library state.
*
-----------------------------------------------------------------------------*/
* STACK REQUIREMENTS
*
* This section defines the minimum stack size, and the extra space required to
* be allocated for saving coprocessor state and/or C library state information
* (if thread safety is enabled for the C library). The sizes are in bytes.
*
* Stack sizes for individual tasks should be derived from these minima based on
* the maximum call depth of the task and the maximum level of interrupt nesting.
* A minimum stack size is defined by XT_STACK_MIN_SIZE. This minimum is based
* on the requirement for a task that calls nothing else but can be interrupted.
* This assumes that interrupt handlers do not call more than a few levels deep.
* If this is not true, i.e. one or more interrupt handlers make deep calls then
* the minimum must be increased.
*
* If the Xtensa processor configuration includes coprocessors, then space is
* allocated to save the coprocessor state on the stack.
*
* If thread safety is enabled for the C runtime library, (XT_USE_THREAD_SAFE_CLIB
* is defined) then space is allocated to save the C library context in the TCB.
*
* Allocating insufficient stack space is a common source of hard-to-find errors.
* During development, it is best to enable the FreeRTOS stack checking features.
*
* Usage:
*
* XT_USE_THREAD_SAFE_CLIB -- Define this to a nonzero value to enable thread-safe
* use of the C library. This will require extra stack
* space to be allocated for tasks that use the C library
* reentrant functions. See below for more information.
*
* NOTE: The Xtensa toolchain supports multiple C libraries and not all of them
* support thread safety. Check your core configuration to see which C library
* was chosen for your system.
*
* XT_STACK_MIN_SIZE -- The minimum stack size for any task. It is recommended
* that you do not use a stack smaller than this for any
* task. In case you want to use stacks smaller than this
* size, you must verify that the smaller size(s) will work
* under all operating conditions.
*
* XT_STACK_EXTRA -- The amount of extra stack space to allocate for a task
* that does not make C library reentrant calls. Add this
* to the amount of stack space required by the task itself.
*
* XT_STACK_EXTRA_CLIB -- The amount of space to allocate for C library state.
*
* -----------------------------------------------------------------------------*/
/* Extra space required for interrupt/exception hooks. */
#ifdef XT_INTEXC_HOOKS
#ifdef __XTENSA_CALL0_ABI__
#define STK_INTEXC_EXTRA 0x200
#else
#define STK_INTEXC_EXTRA 0x180
#endif
#ifdef __XTENSA_CALL0_ABI__
#define STK_INTEXC_EXTRA 0x200
#else
#define STK_INTEXC_EXTRA 0x180
#endif
#else
#define STK_INTEXC_EXTRA 0
#define STK_INTEXC_EXTRA 0
#endif
/* Check C library thread safety support and compute size of C library save area.
For the supported libraries, we enable thread safety by default, and this can
be overridden from the compiler/make command line. */
#if (XSHAL_CLIB == XTHAL_CLIB_NEWLIB) || (XSHAL_CLIB == XTHAL_CLIB_XCLIB)
#ifndef XT_USE_THREAD_SAFE_CLIB
#define XT_USE_THREAD_SAFE_CLIB 1
#endif
* For the supported libraries, we enable thread safety by default, and this can
* be overridden from the compiler/make command line. */
#if ( XSHAL_CLIB == XTHAL_CLIB_NEWLIB ) || ( XSHAL_CLIB == XTHAL_CLIB_XCLIB )
#ifndef XT_USE_THREAD_SAFE_CLIB
#define XT_USE_THREAD_SAFE_CLIB 1
#endif
#else
#define XT_USE_THREAD_SAFE_CLIB 0
#define XT_USE_THREAD_SAFE_CLIB 0
#endif
#if XT_USE_THREAD_SAFE_CLIB > 0u
#if XSHAL_CLIB == XTHAL_CLIB_XCLIB
#define XT_HAVE_THREAD_SAFE_CLIB 1
#if !defined __ASSEMBLER__
#include <sys/reent.h>
#define XT_CLIB_CONTEXT_AREA_SIZE ((sizeof(struct _reent) + 15) + (-16))
#define XT_CLIB_GLOBAL_PTR _reent_ptr
#define _REENT_INIT_PTR _init_reent
#define _impure_ptr _reent_ptr
#if XSHAL_CLIB == XTHAL_CLIB_XCLIB
#define XT_HAVE_THREAD_SAFE_CLIB 1
#if !defined __ASSEMBLER__
#include <sys/reent.h>
#define XT_CLIB_CONTEXT_AREA_SIZE ( ( sizeof( struct _reent ) + 15 ) + ( -16 ) )
#define XT_CLIB_GLOBAL_PTR _reent_ptr
#define _REENT_INIT_PTR _init_reent
#define _impure_ptr _reent_ptr
void _reclaim_reent(void * ptr);
#endif
#elif XSHAL_CLIB == XTHAL_CLIB_NEWLIB
#define XT_HAVE_THREAD_SAFE_CLIB 1
#if !defined __ASSEMBLER__
#include <sys/reent.h>
#define XT_CLIB_CONTEXT_AREA_SIZE ((sizeof(struct _reent) + 15) + (-16))
#define XT_CLIB_GLOBAL_PTR _impure_ptr
#endif
#else
#define XT_HAVE_THREAD_SAFE_CLIB 0
#error The selected C runtime library is not thread safe.
#endif
void _reclaim_reent( void * ptr );
#endif
#elif XSHAL_CLIB == XTHAL_CLIB_NEWLIB
#define XT_HAVE_THREAD_SAFE_CLIB 1
#if !defined __ASSEMBLER__
#include <sys/reent.h>
#define XT_CLIB_CONTEXT_AREA_SIZE ( ( sizeof( struct _reent ) + 15 ) + ( -16 ) )
#define XT_CLIB_GLOBAL_PTR _impure_ptr
#endif
#else /* if XSHAL_CLIB == XTHAL_CLIB_XCLIB */
#define XT_HAVE_THREAD_SAFE_CLIB 0
#error The selected C runtime library is not thread safe.
#endif /* if XSHAL_CLIB == XTHAL_CLIB_XCLIB */
#else /* if XT_USE_THREAD_SAFE_CLIB > 0u */
#define XT_CLIB_CONTEXT_AREA_SIZE 0
#endif /* if XT_USE_THREAD_SAFE_CLIB > 0u */
/*------------------------------------------------------------------------------
* Extra size -- interrupt frame plus coprocessor save area plus hook space.
* NOTE: Make sure XT_INTEXC_HOOKS is undefined unless you really need the hooks.
* ------------------------------------------------------------------------------*/
#ifdef __XTENSA_CALL0_ABI__
#define XT_XTRA_SIZE ( XT_STK_FRMSZ + STK_INTEXC_EXTRA + 0x10 + XT_CP_SIZE )
#else
#define XT_CLIB_CONTEXT_AREA_SIZE 0
#define XT_XTRA_SIZE ( XT_STK_FRMSZ + STK_INTEXC_EXTRA + 0x20 + XT_CP_SIZE )
#endif
/*------------------------------------------------------------------------------
Extra size -- interrupt frame plus coprocessor save area plus hook space.
NOTE: Make sure XT_INTEXC_HOOKS is undefined unless you really need the hooks.
------------------------------------------------------------------------------*/
* Space allocated for user code -- function calls and local variables.
* NOTE: This number can be adjusted to suit your needs. You must verify that the
* amount of space you reserve is adequate for the worst-case conditions in your
* application.
* NOTE: The windowed ABI requires more stack, since space has to be reserved
* for spilling register windows.
* ------------------------------------------------------------------------------*/
#ifdef __XTENSA_CALL0_ABI__
#define XT_XTRA_SIZE (XT_STK_FRMSZ + STK_INTEXC_EXTRA + 0x10 + XT_CP_SIZE)
#define XT_USER_SIZE 0x200
#else
#define XT_XTRA_SIZE (XT_STK_FRMSZ + STK_INTEXC_EXTRA + 0x20 + XT_CP_SIZE)
#endif
/*------------------------------------------------------------------------------
Space allocated for user code -- function calls and local variables.
NOTE: This number can be adjusted to suit your needs. You must verify that the
amount of space you reserve is adequate for the worst-case conditions in your
application.
NOTE: The windowed ABI requires more stack, since space has to be reserved
for spilling register windows.
------------------------------------------------------------------------------*/
#ifdef __XTENSA_CALL0_ABI__
#define XT_USER_SIZE 0x200
#else
#define XT_USER_SIZE 0x400
#define XT_USER_SIZE 0x400
#endif
/* Minimum recommended stack size. */
#define XT_STACK_MIN_SIZE ((XT_XTRA_SIZE + XT_USER_SIZE) / sizeof(unsigned char))
#define XT_STACK_MIN_SIZE ( ( XT_XTRA_SIZE + XT_USER_SIZE ) / sizeof( unsigned char ) )
/* OS overhead with and without C library thread context. */
#define XT_STACK_EXTRA (XT_XTRA_SIZE)
#define XT_STACK_EXTRA_CLIB (XT_XTRA_SIZE + XT_CLIB_CONTEXT_AREA_SIZE)
#define XT_STACK_EXTRA ( XT_XTRA_SIZE )
#define XT_STACK_EXTRA_CLIB ( XT_XTRA_SIZE + XT_CLIB_CONTEXT_AREA_SIZE )
/* *INDENT-OFF* */

View file

@ -1,4 +1,4 @@
/*
/*
* FreeRTOS Kernel <DEVELOPMENT BRANCH>
* Copyright (C) 2015-2019 Cadence Design Systems, Inc.
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
@ -44,7 +44,7 @@
#define XTENSA_CONTEXT_H
#ifdef __ASSEMBLER__
#include <xtensa/coreasm.h>
#include <xtensa/coreasm.h>
#endif
#include <xtensa/config/tie.h>
@ -53,303 +53,307 @@
/* Align a value up to nearest n-byte boundary, where n is a power of 2. */
#define ALIGNUP(n, val) (((val) + (n)-1) & -(n))
#define ALIGNUP( n, val ) ( ( ( val ) + ( n ) - 1 ) & -( n ) )
/*
-------------------------------------------------------------------------------
Macros that help define structures for both C and assembler.
-------------------------------------------------------------------------------
*/
#if defined(_ASMLANGUAGE) || defined(__ASSEMBLER__)
* -------------------------------------------------------------------------------
* Macros that help define structures for both C and assembler.
* -------------------------------------------------------------------------------
*/
#if defined( _ASMLANGUAGE ) || defined( __ASSEMBLER__ )
#define STRUCT_BEGIN .pushsection .text; .struct 0
#define STRUCT_FIELD(ctype,size,asname,name) asname: .space size
#define STRUCT_AFIELD(ctype,size,asname,name,n) asname: .space (size)*(n)
#define STRUCT_END(sname) sname##Size:; .popsection
#define STRUCT_BEGIN .pushsection.text; .struct 0
#define STRUCT_FIELD( ctype, size, asname, name ) asname :.space size
#define STRUCT_AFIELD( ctype, size, asname, name, n ) asname :.space( size ) *( n )
#define STRUCT_END( sname ) sname ## Size:; .popsection
#else
#define STRUCT_BEGIN typedef struct {
#define STRUCT_FIELD(ctype,size,asname,name) ctype name;
#define STRUCT_AFIELD(ctype,size,asname,name,n) ctype name[n];
#define STRUCT_END(sname) } sname;
#define STRUCT_BEGIN typedef struct {
#define STRUCT_FIELD( ctype, size, asname, name ) ctype name;
#define STRUCT_AFIELD( ctype, size, asname, name, n ) ctype name[ n ];
#define STRUCT_END( sname ) \
} \
sname;
#endif //_ASMLANGUAGE || __ASSEMBLER__
/*
-------------------------------------------------------------------------------
INTERRUPT/EXCEPTION STACK FRAME FOR A THREAD OR NESTED INTERRUPT
* -------------------------------------------------------------------------------
* INTERRUPT/EXCEPTION STACK FRAME FOR A THREAD OR NESTED INTERRUPT
*
* A stack frame of this structure is allocated for any interrupt or exception.
* It goes on the current stack. If the RTOS has a system stack for handling
* interrupts, every thread stack must allow space for just one interrupt stack
* frame, then nested interrupt stack frames go on the system stack.
*
* The frame includes basic registers (explicit) and "extra" registers introduced
* by user TIE or the use of the MAC16 option in the user's Xtensa config.
* The frame size is minimized by omitting regs not applicable to user's config.
*
* For Windowed ABI, this stack frame includes the interruptee's base save area,
* another base save area to manage gcc nested functions, and a little temporary
* space to help manage the spilling of the register windows.
* -------------------------------------------------------------------------------
*/
A stack frame of this structure is allocated for any interrupt or exception.
It goes on the current stack. If the RTOS has a system stack for handling
interrupts, every thread stack must allow space for just one interrupt stack
frame, then nested interrupt stack frames go on the system stack.
The frame includes basic registers (explicit) and "extra" registers introduced
by user TIE or the use of the MAC16 option in the user's Xtensa config.
The frame size is minimized by omitting regs not applicable to user's config.
For Windowed ABI, this stack frame includes the interruptee's base save area,
another base save area to manage gcc nested functions, and a little temporary
space to help manage the spilling of the register windows.
-------------------------------------------------------------------------------
*/
STRUCT_BEGIN
STRUCT_FIELD (long, 4, XT_STK_EXIT, exit) /* exit point for dispatch */
STRUCT_FIELD (long, 4, XT_STK_PC, pc) /* return PC */
STRUCT_FIELD (long, 4, XT_STK_PS, ps) /* return PS */
STRUCT_FIELD (long, 4, XT_STK_A0, a0)
STRUCT_FIELD (long, 4, XT_STK_A1, a1) /* stack pointer before interrupt */
STRUCT_FIELD (long, 4, XT_STK_A2, a2)
STRUCT_FIELD (long, 4, XT_STK_A3, a3)
STRUCT_FIELD (long, 4, XT_STK_A4, a4)
STRUCT_FIELD (long, 4, XT_STK_A5, a5)
STRUCT_FIELD (long, 4, XT_STK_A6, a6)
STRUCT_FIELD (long, 4, XT_STK_A7, a7)
STRUCT_FIELD (long, 4, XT_STK_A8, a8)
STRUCT_FIELD (long, 4, XT_STK_A9, a9)
STRUCT_FIELD (long, 4, XT_STK_A10, a10)
STRUCT_FIELD (long, 4, XT_STK_A11, a11)
STRUCT_FIELD (long, 4, XT_STK_A12, a12)
STRUCT_FIELD (long, 4, XT_STK_A13, a13)
STRUCT_FIELD (long, 4, XT_STK_A14, a14)
STRUCT_FIELD (long, 4, XT_STK_A15, a15)
STRUCT_FIELD (long, 4, XT_STK_SAR, sar)
STRUCT_FIELD (long, 4, XT_STK_EXCCAUSE, exccause)
STRUCT_FIELD (long, 4, XT_STK_EXCVADDR, excvaddr)
STRUCT_BEGIN STRUCT_FIELD( long,
4,
XT_STK_EXIT,
exit ) /* exit point for dispatch */
STRUCT_FIELD( long, 4, XT_STK_PC, pc ) /* return PC */
STRUCT_FIELD( long, 4, XT_STK_PS, ps ) /* return PS */
STRUCT_FIELD( long, 4, XT_STK_A0, a0 )
STRUCT_FIELD( long, 4, XT_STK_A1, a1 ) /* stack pointer before interrupt */
STRUCT_FIELD( long, 4, XT_STK_A2, a2 )
STRUCT_FIELD( long, 4, XT_STK_A3, a3 )
STRUCT_FIELD( long, 4, XT_STK_A4, a4 )
STRUCT_FIELD( long, 4, XT_STK_A5, a5 )
STRUCT_FIELD( long, 4, XT_STK_A6, a6 )
STRUCT_FIELD( long, 4, XT_STK_A7, a7 )
STRUCT_FIELD( long, 4, XT_STK_A8, a8 )
STRUCT_FIELD( long, 4, XT_STK_A9, a9 )
STRUCT_FIELD( long, 4, XT_STK_A10, a10 )
STRUCT_FIELD( long, 4, XT_STK_A11, a11 )
STRUCT_FIELD( long, 4, XT_STK_A12, a12 )
STRUCT_FIELD( long, 4, XT_STK_A13, a13 )
STRUCT_FIELD( long, 4, XT_STK_A14, a14 )
STRUCT_FIELD( long, 4, XT_STK_A15, a15 )
STRUCT_FIELD( long, 4, XT_STK_SAR, sar )
STRUCT_FIELD( long, 4, XT_STK_EXCCAUSE, exccause )
STRUCT_FIELD( long, 4, XT_STK_EXCVADDR, excvaddr )
#if XCHAL_HAVE_LOOPS
STRUCT_FIELD (long, 4, XT_STK_LBEG, lbeg)
STRUCT_FIELD (long, 4, XT_STK_LEND, lend)
STRUCT_FIELD (long, 4, XT_STK_LCOUNT, lcount)
STRUCT_FIELD( long, 4, XT_STK_LBEG, lbeg )
STRUCT_FIELD( long, 4, XT_STK_LEND, lend )
STRUCT_FIELD( long, 4, XT_STK_LCOUNT, lcount )
#endif
#ifndef __XTENSA_CALL0_ABI__
/* Temporary space for saving stuff during window spill */
STRUCT_FIELD (long, 4, XT_STK_TMP0, tmp0)
STRUCT_FIELD (long, 4, XT_STK_TMP1, tmp1)
STRUCT_FIELD (long, 4, XT_STK_TMP2, tmp2)
STRUCT_FIELD( long, 4, XT_STK_TMP0, tmp0 )
STRUCT_FIELD( long, 4, XT_STK_TMP1, tmp1 )
STRUCT_FIELD( long, 4, XT_STK_TMP2, tmp2 )
#endif
#ifdef XT_USE_SWPRI
/* Storage for virtual priority mask */
STRUCT_FIELD (long, 4, XT_STK_VPRI, vpri)
STRUCT_FIELD( long, 4, XT_STK_VPRI, vpri )
#endif
#ifdef XT_USE_OVLY
/* Storage for overlay state */
STRUCT_FIELD (long, 4, XT_STK_OVLY, ovly)
STRUCT_FIELD( long, 4, XT_STK_OVLY, ovly )
#endif
STRUCT_END(XtExcFrame)
STRUCT_END( XtExcFrame )
#if defined(_ASMLANGUAGE) || defined(__ASSEMBLER__)
#define XT_STK_NEXT1 XtExcFrameSize
#if defined( _ASMLANGUAGE ) || defined( __ASSEMBLER__ )
#define XT_STK_NEXT1 XtExcFrameSize
#else
#define XT_STK_NEXT1 sizeof(XtExcFrame)
#define XT_STK_NEXT1 sizeof( XtExcFrame )
#endif
/* Allocate extra storage if needed */
#if XCHAL_EXTRA_SA_SIZE != 0
#if XCHAL_EXTRA_SA_ALIGN <= 16
#define XT_STK_EXTRA ALIGNUP(XCHAL_EXTRA_SA_ALIGN, XT_STK_NEXT1)
#else
#if XCHAL_EXTRA_SA_ALIGN <= 16
#define XT_STK_EXTRA ALIGNUP( XCHAL_EXTRA_SA_ALIGN, XT_STK_NEXT1 )
#else
/* If need more alignment than stack, add space for dynamic alignment */
#define XT_STK_EXTRA (ALIGNUP(XCHAL_EXTRA_SA_ALIGN, XT_STK_NEXT1) + XCHAL_EXTRA_SA_ALIGN)
#endif
#define XT_STK_NEXT2 (XT_STK_EXTRA + XCHAL_EXTRA_SA_SIZE)
#define XT_STK_EXTRA ( ALIGNUP( XCHAL_EXTRA_SA_ALIGN, XT_STK_NEXT1 ) + XCHAL_EXTRA_SA_ALIGN )
#endif
#define XT_STK_NEXT2 ( XT_STK_EXTRA + XCHAL_EXTRA_SA_SIZE )
#else
#define XT_STK_NEXT2 XT_STK_NEXT1
#define XT_STK_NEXT2 XT_STK_NEXT1
#endif
#endif /* if XCHAL_EXTRA_SA_SIZE != 0 */
/*
-------------------------------------------------------------------------------
This is the frame size. Add space for 4 registers (interruptee's base save
area) and some space for gcc nested functions if any.
-------------------------------------------------------------------------------
*/
#define XT_STK_FRMSZ (ALIGNUP(0x10, XT_STK_NEXT2) + 0x20)
* -------------------------------------------------------------------------------
* This is the frame size. Add space for 4 registers (interruptee's base save
* area) and some space for gcc nested functions if any.
* -------------------------------------------------------------------------------
*/
#define XT_STK_FRMSZ ( ALIGNUP( 0x10, XT_STK_NEXT2 ) + 0x20 )
/*
-------------------------------------------------------------------------------
SOLICITED STACK FRAME FOR A THREAD
A stack frame of this structure is allocated whenever a thread enters the
RTOS kernel intentionally (and synchronously) to submit to thread scheduling.
It goes on the current thread's stack.
The solicited frame only includes registers that are required to be preserved
by the callee according to the compiler's ABI conventions, some space to save
the return address for returning to the caller, and the caller's PS register.
For Windowed ABI, this stack frame includes the caller's base save area.
Note on XT_SOL_EXIT field:
It is necessary to distinguish a solicited from an interrupt stack frame.
This field corresponds to XT_STK_EXIT in the interrupt stack frame and is
always at the same offset (0). It can be written with a code (usually 0)
to distinguish a solicted frame from an interrupt frame. An RTOS port may
opt to ignore this field if it has another way of distinguishing frames.
-------------------------------------------------------------------------------
*/
* -------------------------------------------------------------------------------
* SOLICITED STACK FRAME FOR A THREAD
*
* A stack frame of this structure is allocated whenever a thread enters the
* RTOS kernel intentionally (and synchronously) to submit to thread scheduling.
* It goes on the current thread's stack.
*
* The solicited frame only includes registers that are required to be preserved
* by the callee according to the compiler's ABI conventions, some space to save
* the return address for returning to the caller, and the caller's PS register.
*
* For Windowed ABI, this stack frame includes the caller's base save area.
*
* Note on XT_SOL_EXIT field:
* It is necessary to distinguish a solicited from an interrupt stack frame.
* This field corresponds to XT_STK_EXIT in the interrupt stack frame and is
* always at the same offset (0). It can be written with a code (usually 0)
* to distinguish a solicted frame from an interrupt frame. An RTOS port may
* opt to ignore this field if it has another way of distinguishing frames.
* -------------------------------------------------------------------------------
*/
STRUCT_BEGIN
#ifdef __XTENSA_CALL0_ABI__
STRUCT_FIELD (long, 4, XT_SOL_EXIT, exit)
STRUCT_FIELD (long, 4, XT_SOL_PC, pc)
STRUCT_FIELD (long, 4, XT_SOL_PS, ps)
STRUCT_FIELD (long, 4, XT_SOL_NEXT, next)
STRUCT_FIELD (long, 4, XT_SOL_A12, a12) /* should be on 16-byte alignment */
STRUCT_FIELD (long, 4, XT_SOL_A13, a13)
STRUCT_FIELD (long, 4, XT_SOL_A14, a14)
STRUCT_FIELD (long, 4, XT_SOL_A15, a15)
STRUCT_FIELD( long, 4, XT_SOL_EXIT, exit )
STRUCT_FIELD( long, 4, XT_SOL_PC, pc )
STRUCT_FIELD( long, 4, XT_SOL_PS, ps )
STRUCT_FIELD( long, 4, XT_SOL_NEXT, next )
STRUCT_FIELD( long, 4, XT_SOL_A12, a12 ) /* should be on 16-byte alignment */
STRUCT_FIELD( long, 4, XT_SOL_A13, a13 )
STRUCT_FIELD( long, 4, XT_SOL_A14, a14 )
STRUCT_FIELD( long, 4, XT_SOL_A15, a15 )
#else
STRUCT_FIELD (long, 4, XT_SOL_EXIT, exit)
STRUCT_FIELD (long, 4, XT_SOL_PC, pc)
STRUCT_FIELD (long, 4, XT_SOL_PS, ps)
STRUCT_FIELD (long, 4, XT_SOL_NEXT, next)
STRUCT_FIELD (long, 4, XT_SOL_A0, a0) /* should be on 16-byte alignment */
STRUCT_FIELD (long, 4, XT_SOL_A1, a1)
STRUCT_FIELD (long, 4, XT_SOL_A2, a2)
STRUCT_FIELD (long, 4, XT_SOL_A3, a3)
#endif
STRUCT_END(XtSolFrame)
STRUCT_FIELD( long, 4, XT_SOL_EXIT, exit )
STRUCT_FIELD( long, 4, XT_SOL_PC, pc )
STRUCT_FIELD( long, 4, XT_SOL_PS, ps )
STRUCT_FIELD( long, 4, XT_SOL_NEXT, next )
STRUCT_FIELD( long, 4, XT_SOL_A0, a0 ) /* should be on 16-byte alignment */
STRUCT_FIELD( long, 4, XT_SOL_A1, a1 )
STRUCT_FIELD( long, 4, XT_SOL_A2, a2 )
STRUCT_FIELD( long, 4, XT_SOL_A3, a3 )
#endif /* ifdef __XTENSA_CALL0_ABI__ */
STRUCT_END( XtSolFrame )
/* Size of solicited stack frame */
#define XT_SOL_FRMSZ ALIGNUP(0x10, XtSolFrameSize)
#define XT_SOL_FRMSZ ALIGNUP( 0x10, XtSolFrameSize )
/*
-------------------------------------------------------------------------------
CO-PROCESSOR STATE SAVE AREA FOR A THREAD
The RTOS must provide an area per thread to save the state of co-processors
when that thread does not have control. Co-processors are context-switched
lazily (on demand) only when a new thread uses a co-processor instruction,
otherwise a thread retains ownership of the co-processor even when it loses
control of the processor. An Xtensa co-processor exception is triggered when
any co-processor instruction is executed by a thread that is not the owner,
and the context switch of that co-processor is then peformed by the handler.
Ownership represents which thread's state is currently in the co-processor.
Co-processors may not be used by interrupt or exception handlers. If an
co-processor instruction is executed by an interrupt or exception handler,
the co-processor exception handler will trigger a kernel panic and freeze.
This restriction is introduced to reduce the overhead of saving and restoring
co-processor state (which can be quite large) and in particular remove that
overhead from interrupt handlers.
The co-processor state save area may be in any convenient per-thread location
such as in the thread control block or above the thread stack area. It need
not be in the interrupt stack frame since interrupts don't use co-processors.
Along with the save area for each co-processor, two bitmasks with flags per
co-processor (laid out as in the CPENABLE reg) help manage context-switching
co-processors as efficiently as possible:
XT_CPENABLE
The contents of a non-running thread's CPENABLE register.
It represents the co-processors owned (and whose state is still needed)
by the thread. When a thread is preempted, its CPENABLE is saved here.
When a thread solicits a context-swtich, its CPENABLE is cleared - the
compiler has saved the (caller-saved) co-proc state if it needs to.
When a non-running thread loses ownership of a CP, its bit is cleared.
When a thread runs, it's XT_CPENABLE is loaded into the CPENABLE reg.
Avoids co-processor exceptions when no change of ownership is needed.
XT_CPSTORED
A bitmask with the same layout as CPENABLE, a bit per co-processor.
Indicates whether the state of each co-processor is saved in the state
save area. When a thread enters the kernel, only the state of co-procs
still enabled in CPENABLE is saved. When the co-processor exception
handler assigns ownership of a co-processor to a thread, it restores
the saved state only if this bit is set, and clears this bit.
XT_CP_CS_ST
A bitmask with the same layout as CPENABLE, a bit per co-processor.
Indicates whether callee-saved state is saved in the state save area.
Callee-saved state is saved by itself on a solicited context switch,
and restored when needed by the coprocessor exception handler.
Unsolicited switches will cause the entire coprocessor to be saved
when necessary.
XT_CP_ASA
Pointer to the aligned save area. Allows it to be aligned more than
the overall save area (which might only be stack-aligned or TCB-aligned).
Especially relevant for Xtensa cores configured with a very large data
path that requires alignment greater than 16 bytes (ABI stack alignment).
-------------------------------------------------------------------------------
*/
* -------------------------------------------------------------------------------
* CO-PROCESSOR STATE SAVE AREA FOR A THREAD
*
* The RTOS must provide an area per thread to save the state of co-processors
* when that thread does not have control. Co-processors are context-switched
* lazily (on demand) only when a new thread uses a co-processor instruction,
* otherwise a thread retains ownership of the co-processor even when it loses
* control of the processor. An Xtensa co-processor exception is triggered when
* any co-processor instruction is executed by a thread that is not the owner,
* and the context switch of that co-processor is then peformed by the handler.
* Ownership represents which thread's state is currently in the co-processor.
*
* Co-processors may not be used by interrupt or exception handlers. If an
* co-processor instruction is executed by an interrupt or exception handler,
* the co-processor exception handler will trigger a kernel panic and freeze.
* This restriction is introduced to reduce the overhead of saving and restoring
* co-processor state (which can be quite large) and in particular remove that
* overhead from interrupt handlers.
*
* The co-processor state save area may be in any convenient per-thread location
* such as in the thread control block or above the thread stack area. It need
* not be in the interrupt stack frame since interrupts don't use co-processors.
*
* Along with the save area for each co-processor, two bitmasks with flags per
* co-processor (laid out as in the CPENABLE reg) help manage context-switching
* co-processors as efficiently as possible:
*
* XT_CPENABLE
* The contents of a non-running thread's CPENABLE register.
* It represents the co-processors owned (and whose state is still needed)
* by the thread. When a thread is preempted, its CPENABLE is saved here.
* When a thread solicits a context-swtich, its CPENABLE is cleared - the
* compiler has saved the (caller-saved) co-proc state if it needs to.
* When a non-running thread loses ownership of a CP, its bit is cleared.
* When a thread runs, it's XT_CPENABLE is loaded into the CPENABLE reg.
* Avoids co-processor exceptions when no change of ownership is needed.
*
* XT_CPSTORED
* A bitmask with the same layout as CPENABLE, a bit per co-processor.
* Indicates whether the state of each co-processor is saved in the state
* save area. When a thread enters the kernel, only the state of co-procs
* still enabled in CPENABLE is saved. When the co-processor exception
* handler assigns ownership of a co-processor to a thread, it restores
* the saved state only if this bit is set, and clears this bit.
*
* XT_CP_CS_ST
* A bitmask with the same layout as CPENABLE, a bit per co-processor.
* Indicates whether callee-saved state is saved in the state save area.
* Callee-saved state is saved by itself on a solicited context switch,
* and restored when needed by the coprocessor exception handler.
* Unsolicited switches will cause the entire coprocessor to be saved
* when necessary.
*
* XT_CP_ASA
* Pointer to the aligned save area. Allows it to be aligned more than
* the overall save area (which might only be stack-aligned or TCB-aligned).
* Especially relevant for Xtensa cores configured with a very large data
* path that requires alignment greater than 16 bytes (ABI stack alignment).
* -------------------------------------------------------------------------------
*/
#if XCHAL_CP_NUM > 0
/* Offsets of each coprocessor save area within the 'aligned save area': */
#define XT_CP0_SA 0
#define XT_CP1_SA ALIGNUP(XCHAL_CP1_SA_ALIGN, XT_CP0_SA + XCHAL_CP0_SA_SIZE)
#define XT_CP2_SA ALIGNUP(XCHAL_CP2_SA_ALIGN, XT_CP1_SA + XCHAL_CP1_SA_SIZE)
#define XT_CP3_SA ALIGNUP(XCHAL_CP3_SA_ALIGN, XT_CP2_SA + XCHAL_CP2_SA_SIZE)
#define XT_CP4_SA ALIGNUP(XCHAL_CP4_SA_ALIGN, XT_CP3_SA + XCHAL_CP3_SA_SIZE)
#define XT_CP5_SA ALIGNUP(XCHAL_CP5_SA_ALIGN, XT_CP4_SA + XCHAL_CP4_SA_SIZE)
#define XT_CP6_SA ALIGNUP(XCHAL_CP6_SA_ALIGN, XT_CP5_SA + XCHAL_CP5_SA_SIZE)
#define XT_CP7_SA ALIGNUP(XCHAL_CP7_SA_ALIGN, XT_CP6_SA + XCHAL_CP6_SA_SIZE)
#define XT_CP_SA_SIZE ALIGNUP(16, XT_CP7_SA + XCHAL_CP7_SA_SIZE)
#define XT_CP0_SA 0
#define XT_CP1_SA ALIGNUP( XCHAL_CP1_SA_ALIGN, XT_CP0_SA + XCHAL_CP0_SA_SIZE )
#define XT_CP2_SA ALIGNUP( XCHAL_CP2_SA_ALIGN, XT_CP1_SA + XCHAL_CP1_SA_SIZE )
#define XT_CP3_SA ALIGNUP( XCHAL_CP3_SA_ALIGN, XT_CP2_SA + XCHAL_CP2_SA_SIZE )
#define XT_CP4_SA ALIGNUP( XCHAL_CP4_SA_ALIGN, XT_CP3_SA + XCHAL_CP3_SA_SIZE )
#define XT_CP5_SA ALIGNUP( XCHAL_CP5_SA_ALIGN, XT_CP4_SA + XCHAL_CP4_SA_SIZE )
#define XT_CP6_SA ALIGNUP( XCHAL_CP6_SA_ALIGN, XT_CP5_SA + XCHAL_CP5_SA_SIZE )
#define XT_CP7_SA ALIGNUP( XCHAL_CP7_SA_ALIGN, XT_CP6_SA + XCHAL_CP6_SA_SIZE )
#define XT_CP_SA_SIZE ALIGNUP( 16, XT_CP7_SA + XCHAL_CP7_SA_SIZE )
/* Offsets within the overall save area: */
#define XT_CPENABLE 0 /* (2 bytes) coprocessors active for this thread */
#define XT_CPSTORED 2 /* (2 bytes) coprocessors saved for this thread */
#define XT_CP_CS_ST 4 /* (2 bytes) coprocessor callee-saved regs stored for this thread */
#define XT_CP_ASA 8 /* (4 bytes) ptr to aligned save area */
#define XT_CPENABLE 0 /* (2 bytes) coprocessors active for this thread */
#define XT_CPSTORED 2 /* (2 bytes) coprocessors saved for this thread */
#define XT_CP_CS_ST 4 /* (2 bytes) coprocessor callee-saved regs stored for this thread */
#define XT_CP_ASA 8 /* (4 bytes) ptr to aligned save area */
/* Overall size allows for dynamic alignment: */
#define XT_CP_SIZE (12 + XT_CP_SA_SIZE + XCHAL_TOTAL_SA_ALIGN)
#else
#define XT_CP_SIZE 0
#endif
#define XT_CP_SIZE ( 12 + XT_CP_SA_SIZE + XCHAL_TOTAL_SA_ALIGN )
#else /* if XCHAL_CP_NUM > 0 */
#define XT_CP_SIZE 0
#endif /* if XCHAL_CP_NUM > 0 */
/*
-------------------------------------------------------------------------------
MACROS TO HANDLE ABI SPECIFICS OF FUNCTION ENTRY AND RETURN
Convenient where the frame size requirements are the same for both ABIs.
ENTRY(sz), RET(sz) are for framed functions (have locals or make calls).
ENTRY0, RET0 are for frameless functions (no locals, no calls).
where size = size of stack frame in bytes (must be >0 and aligned to 16).
For framed functions the frame is created and the return address saved at
base of frame (Call0 ABI) or as determined by hardware (Windowed ABI).
For frameless functions, there is no frame and return address remains in a0.
Note: Because CPP macros expand to a single line, macros requiring multi-line
expansions are implemented as assembler macros.
-------------------------------------------------------------------------------
*/
* -------------------------------------------------------------------------------
* MACROS TO HANDLE ABI SPECIFICS OF FUNCTION ENTRY AND RETURN
*
* Convenient where the frame size requirements are the same for both ABIs.
* ENTRY(sz), RET(sz) are for framed functions (have locals or make calls).
* ENTRY0, RET0 are for frameless functions (no locals, no calls).
*
* where size = size of stack frame in bytes (must be >0 and aligned to 16).
* For framed functions the frame is created and the return address saved at
* base of frame (Call0 ABI) or as determined by hardware (Windowed ABI).
* For frameless functions, there is no frame and return address remains in a0.
* Note: Because CPP macros expand to a single line, macros requiring multi-line
* expansions are implemented as assembler macros.
* -------------------------------------------------------------------------------
*/
#ifdef __ASSEMBLER__
#ifdef __XTENSA_CALL0_ABI__
/* Call0 */
#define ENTRY(sz) entry1 sz
.macro entry1 size=0x10
addi sp, sp, -\size
s32i a0, sp, 0
.endm
#define ENTRY0
#define RET(sz) ret1 sz
.macro ret1 size=0x10
l32i a0, sp, 0
addi sp, sp, \size
ret
.endm
#define RET0 ret
#else
/* Windowed */
#define ENTRY(sz) entry sp, sz
#define ENTRY0 entry sp, 0x10
#define RET(sz) retw
#define RET0 retw
#endif
#endif
#ifdef __XTENSA_CALL0_ABI__
/* Call0 */
#define ENTRY( sz ) entry1 sz
.macro entry1 size = 0x10
addi sp, sp, -\ size
s32i a0, sp, 0
.endm
#define ENTRY0
#define RET( sz ) ret1 sz
.macro ret1 size = 0x10
l32i a0, sp, 0
addi sp, sp, \ size
ret
.endm
#define RET0 ret
#else /* ifdef __XTENSA_CALL0_ABI__ */
/* Windowed */
#define ENTRY( sz ) entry sp, sz
#define ENTRY0 entry sp, 0x10
#define RET( sz ) retw
#define RET0 retw
#endif /* ifdef __XTENSA_CALL0_ABI__ */
#endif /* ifdef __ASSEMBLER__ */
#endif /* XTENSA_CONTEXT_H */

View file

@ -1,4 +1,4 @@
/*
/*
* FreeRTOS Kernel <DEVELOPMENT BRANCH>
* Copyright (C) 2015-2019 Cadence Design Systems, Inc.
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
@ -36,35 +36,31 @@
#ifdef XT_BOARD
#include <xtensa/xtbsp.h>
#include <xtensa/xtbsp.h>
#endif
#include "xtensa_rtos.h"
#ifdef XT_RTOS_TIMER_INT
unsigned _xt_tick_divisor = 0; /* cached number of cycles per tick */
unsigned _xt_tick_divisor = 0; /* cached number of cycles per tick */
/*
Compute and initialize at run-time the tick divisor (the number of
processor clock cycles in an RTOS tick, used to set the tick timer).
Called when the processor clock frequency is not known at compile-time.
*/
void _xt_tick_divisor_init(void)
{
#ifdef XT_CLOCK_FREQ
_xt_tick_divisor = (XT_CLOCK_FREQ / XT_TICK_PER_SEC);
#else
#ifdef XT_BOARD
_xt_tick_divisor = xtbsp_clock_freq_hz() / XT_TICK_PER_SEC;
#else
#error "No way to obtain processor clock frequency"
#endif /* XT_BOARD */
#endif /* XT_CLOCK_FREQ */
}
* Compute and initialize at run-time the tick divisor (the number of
* processor clock cycles in an RTOS tick, used to set the tick timer).
* Called when the processor clock frequency is not known at compile-time.
*/
void _xt_tick_divisor_init( void )
{
#ifdef XT_CLOCK_FREQ
_xt_tick_divisor = ( XT_CLOCK_FREQ / XT_TICK_PER_SEC );
#else
#ifdef XT_BOARD
_xt_tick_divisor = xtbsp_clock_freq_hz() / XT_TICK_PER_SEC;
#else
#error "No way to obtain processor clock frequency"
#endif /* XT_BOARD */
#endif /* XT_CLOCK_FREQ */
}
#endif /* XT_RTOS_TIMER_INT */

View file

@ -1,4 +1,4 @@
/*
/*
* FreeRTOS Kernel <DEVELOPMENT BRANCH>
* Copyright (C) 2015-2019 Cadence Design Systems, Inc.
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
@ -43,95 +43,110 @@
/* Handler table is in xtensa_intr_asm.S */
extern xt_exc_handler _xt_exception_table[XCHAL_EXCCAUSE_NUM];
extern xt_exc_handler _xt_exception_table[ XCHAL_EXCCAUSE_NUM ];
/*
Default handler for unhandled exceptions.
*/
void xt_unhandled_exception(XtExcFrame *frame)
{
exit(-1);
}
* Default handler for unhandled exceptions.
*/
void xt_unhandled_exception( XtExcFrame * frame )
{
exit( -1 );
}
/*
This function registers a handler for the specified exception.
The function returns the address of the previous handler.
On error, it returns 0.
*/
xt_exc_handler xt_set_exception_handler(int n, xt_exc_handler f)
{
xt_exc_handler old;
* This function registers a handler for the specified exception.
* The function returns the address of the previous handler.
* On error, it returns 0.
*/
xt_exc_handler xt_set_exception_handler( int n,
xt_exc_handler f )
{
xt_exc_handler old;
if( n < 0 || n >= XCHAL_EXCCAUSE_NUM )
return 0; /* invalid exception number */
if( ( n < 0 ) || ( n >= XCHAL_EXCCAUSE_NUM ) )
{
return 0; /* invalid exception number */
}
old = _xt_exception_table[n];
old = _xt_exception_table[ n ];
if (f) {
_xt_exception_table[n] = f;
}
else {
_xt_exception_table[n] = &xt_unhandled_exception;
if( f )
{
_xt_exception_table[ n ] = f;
}
else
{
_xt_exception_table[ n ] = &xt_unhandled_exception;
}
return( ( old == &xt_unhandled_exception ) ? 0 : old );
}
return ((old == &xt_unhandled_exception) ? 0 : old);
}
#endif
#endif /* if XCHAL_HAVE_EXCEPTIONS */
#if XCHAL_HAVE_INTERRUPTS
/* Handler table is in xtensa_intr_asm.S */
typedef struct xt_handler_table_entry {
void * handler;
void * arg;
} xt_handler_table_entry;
typedef struct xt_handler_table_entry
{
void * handler;
void * arg;
} xt_handler_table_entry;
extern xt_handler_table_entry _xt_interrupt_table[XCHAL_NUM_INTERRUPTS];
extern xt_handler_table_entry _xt_interrupt_table[ XCHAL_NUM_INTERRUPTS ];
/*
Default handler for unhandled interrupts.
*/
void xt_unhandled_interrupt(void * arg)
{
exit(-1);
}
* Default handler for unhandled interrupts.
*/
void xt_unhandled_interrupt( void * arg )
{
exit( -1 );
}
/*
This function registers a handler for the specified interrupt. The "arg"
parameter specifies the argument to be passed to the handler when it is
invoked. The function returns the address of the previous handler.
On error, it returns 0.
*/
xt_handler xt_set_interrupt_handler(int n, xt_handler f, void * arg)
{
xt_handler_table_entry * entry;
xt_handler old;
* This function registers a handler for the specified interrupt. The "arg"
* parameter specifies the argument to be passed to the handler when it is
* invoked. The function returns the address of the previous handler.
* On error, it returns 0.
*/
xt_handler xt_set_interrupt_handler( int n,
xt_handler f,
void * arg )
{
xt_handler_table_entry * entry;
xt_handler old;
if( n < 0 || n >= XCHAL_NUM_INTERRUPTS )
return 0; /* invalid interrupt number */
if( Xthal_intlevel[n] > XCHAL_EXCM_LEVEL )
return 0; /* priority level too high to safely handle in C */
if( ( n < 0 ) || ( n >= XCHAL_NUM_INTERRUPTS ) )
{
return 0; /* invalid interrupt number */
}
entry = _xt_interrupt_table + n;
old = entry->handler;
if( Xthal_intlevel[ n ] > XCHAL_EXCM_LEVEL )
{
return 0; /* priority level too high to safely handle in C */
}
if (f) {
entry->handler = f;
entry->arg = arg;
entry = _xt_interrupt_table + n;
old = entry->handler;
if( f )
{
entry->handler = f;
entry->arg = arg;
}
else
{
entry->handler = &xt_unhandled_interrupt;
entry->arg = ( void * ) n;
}
return( ( old == &xt_unhandled_interrupt ) ? 0 : old );
}
else {
entry->handler = &xt_unhandled_interrupt;
entry->arg = (void*)n;
}
return ((old == &xt_unhandled_interrupt) ? 0 : old);
}
#endif /* XCHAL_HAVE_INTERRUPTS */

View file

@ -1,4 +1,4 @@
/*
/*
* FreeRTOS Kernel <DEVELOPMENT BRANCH>
* Copyright (C) 2015-2019 Cadence Design Systems, Inc.
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
@ -39,7 +39,7 @@
/* Mutex object that controls access to the overlay. Currently only one
* overlay region is supported so one mutex suffices.
*/
static SemaphoreHandle_t xt_overlay_mutex;
static SemaphoreHandle_t xt_overlay_mutex;
/* This function should be overridden to provide OS specific init such
@ -47,30 +47,30 @@ static SemaphoreHandle_t xt_overlay_mutex;
* Typically this mutex would be set up with priority inheritance. See
* overlay manager documentation for more details.
*/
void xt_overlay_init_os(void)
{
/* Create the mutex for overlay access. Priority inheritance is
* required.
*/
xt_overlay_mutex = xSemaphoreCreateMutex();
}
void xt_overlay_init_os( void )
{
/* Create the mutex for overlay access. Priority inheritance is
* required.
*/
xt_overlay_mutex = xSemaphoreCreateMutex();
}
/* This function locks access to shared overlay resources, typically
* by acquiring a mutex.
*/
void xt_overlay_lock(void)
{
xSemaphoreTake(xt_overlay_mutex, 0);
}
void xt_overlay_lock( void )
{
xSemaphoreTake( xt_overlay_mutex, 0 );
}
/* This function releases access to shared overlay resources, typically
* by unlocking a mutex.
*/
void xt_overlay_unlock(void)
{
xSemaphoreGive(xt_overlay_mutex);
}
void xt_overlay_unlock( void )
{
xSemaphoreGive( xt_overlay_mutex );
}
#endif
#endif /* if configUSE_MUTEX */

View file

@ -1,4 +1,4 @@
/*
/*
* FreeRTOS Kernel <DEVELOPMENT BRANCH>
* Copyright (C) 2015-2019 Cadence Design Systems, Inc.
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
@ -49,9 +49,9 @@
#define XTENSA_RTOS_H
#ifdef __ASSEMBLER__
#include <xtensa/coreasm.h>
#include <xtensa/coreasm.h>
#else
#include <xtensa/config/core.h>
#include <xtensa/config/core.h>
#endif
#include <xtensa/corebits.h>
@ -59,180 +59,180 @@
#include <xtensa/simcall.h>
/*
Include any RTOS specific definitions that are needed by this header.
*/
* Include any RTOS specific definitions that are needed by this header.
*/
#include <FreeRTOSConfig.h>
/*
Convert FreeRTOSConfig definitions to XTENSA definitions.
However these can still be overridden from the command line.
*/
* Convert FreeRTOSConfig definitions to XTENSA definitions.
* However these can still be overridden from the command line.
*/
#ifndef XT_SIMULATOR
#if configXT_SIMULATOR
#define XT_SIMULATOR 1 /* Simulator mode */
#endif
#if configXT_SIMULATOR
#define XT_SIMULATOR 1 /* Simulator mode */
#endif
#endif
#ifndef XT_BOARD
#if configXT_BOARD
#define XT_BOARD 1 /* Board mode */
#endif
#if configXT_BOARD
#define XT_BOARD 1 /* Board mode */
#endif
#endif
#ifndef XT_TIMER_INDEX
#if defined configXT_TIMER_INDEX
#define XT_TIMER_INDEX configXT_TIMER_INDEX /* Index of hardware timer to be used */
#endif
#if defined configXT_TIMER_INDEX
#define XT_TIMER_INDEX configXT_TIMER_INDEX /* Index of hardware timer to be used */
#endif
#endif
#ifndef XT_INTEXC_HOOKS
#if configXT_INTEXC_HOOKS
#define XT_INTEXC_HOOKS 1 /* Enables exception hooks */
#endif
#if configXT_INTEXC_HOOKS
#define XT_INTEXC_HOOKS 1 /* Enables exception hooks */
#endif
#endif
#if (!XT_SIMULATOR) && (!XT_BOARD)
#error Either XT_SIMULATOR or XT_BOARD must be defined.
#if ( !XT_SIMULATOR ) && ( !XT_BOARD )
#error Either XT_SIMULATOR or XT_BOARD must be defined.
#endif
/*
Name of RTOS (for messages).
*/
* Name of RTOS (for messages).
*/
#define XT_RTOS_NAME FreeRTOS
/*
Check some Xtensa configuration requirements and report error if not met.
Error messages can be customize to the RTOS port.
*/
* Check some Xtensa configuration requirements and report error if not met.
* Error messages can be customize to the RTOS port.
*/
#if !XCHAL_HAVE_XEA2
#error "FreeRTOS/Xtensa requires XEA2 (exception architecture 2)."
#error "FreeRTOS/Xtensa requires XEA2 (exception architecture 2)."
#endif
/*******************************************************************************
RTOS CALLOUT MACROS MAPPED TO RTOS PORT-SPECIFIC FUNCTIONS.
Define callout macros used in generic Xtensa code to interact with the RTOS.
The macros are simply the function names for use in calls from assembler code.
Some of these functions may call back to generic functions in xtensa_context.h .
*
* RTOS CALLOUT MACROS MAPPED TO RTOS PORT-SPECIFIC FUNCTIONS.
*
* Define callout macros used in generic Xtensa code to interact with the RTOS.
* The macros are simply the function names for use in calls from assembler code.
* Some of these functions may call back to generic functions in xtensa_context.h .
*
*******************************************************************************/
/*
Inform RTOS of entry into an interrupt handler that will affect it.
Allows RTOS to manage switch to any system stack and count nesting level.
Called after minimal context has been saved, with interrupts disabled.
RTOS port can call0 _xt_context_save to save the rest of the context.
May only be called from assembly code by the 'call0' instruction.
*/
// void XT_RTOS_INT_ENTER(void)
#define XT_RTOS_INT_ENTER _frxt_int_enter
* Inform RTOS of entry into an interrupt handler that will affect it.
* Allows RTOS to manage switch to any system stack and count nesting level.
* Called after minimal context has been saved, with interrupts disabled.
* RTOS port can call0 _xt_context_save to save the rest of the context.
* May only be called from assembly code by the 'call0' instruction.
*/
/* void XT_RTOS_INT_ENTER(void) */
#define XT_RTOS_INT_ENTER _frxt_int_enter
/*
Inform RTOS of completion of an interrupt handler, and give control to
RTOS to perform thread/task scheduling, switch back from any system stack
and restore the context, and return to the exit dispatcher saved in the
stack frame at XT_STK_EXIT. RTOS port can call0 _xt_context_restore
to save the context saved in XT_RTOS_INT_ENTER via _xt_context_save,
leaving only a minimal part of the context to be restored by the exit
dispatcher. This function does not return to the place it was called from.
May only be called from assembly code by the 'call0' instruction.
*/
// void XT_RTOS_INT_EXIT(void)
* Inform RTOS of completion of an interrupt handler, and give control to
* RTOS to perform thread/task scheduling, switch back from any system stack
* and restore the context, and return to the exit dispatcher saved in the
* stack frame at XT_STK_EXIT. RTOS port can call0 _xt_context_restore
* to save the context saved in XT_RTOS_INT_ENTER via _xt_context_save,
* leaving only a minimal part of the context to be restored by the exit
* dispatcher. This function does not return to the place it was called from.
* May only be called from assembly code by the 'call0' instruction.
*/
/* void XT_RTOS_INT_EXIT(void) */
#define XT_RTOS_INT_EXIT _frxt_int_exit
/*
Inform RTOS of the occurrence of a tick timer interrupt.
If RTOS has no tick timer, leave XT_RTOS_TIMER_INT undefined.
May be coded in or called from C or assembly, per ABI conventions.
RTOS may optionally define XT_TICK_PER_SEC in its own way (eg. macro).
*/
// void XT_RTOS_TIMER_INT(void)
#define XT_RTOS_TIMER_INT _frxt_timer_int
#define XT_TICK_PER_SEC configTICK_RATE_HZ
* Inform RTOS of the occurrence of a tick timer interrupt.
* If RTOS has no tick timer, leave XT_RTOS_TIMER_INT undefined.
* May be coded in or called from C or assembly, per ABI conventions.
* RTOS may optionally define XT_TICK_PER_SEC in its own way (eg. macro).
*/
/* void XT_RTOS_TIMER_INT(void) */
#define XT_RTOS_TIMER_INT _frxt_timer_int
#define XT_TICK_PER_SEC configTICK_RATE_HZ
/*
Return in a15 the base address of the co-processor state save area for the
thread that triggered a co-processor exception, or 0 if no thread was running.
The state save area is structured as defined in xtensa_context.h and has size
XT_CP_SIZE. Co-processor instructions should only be used in thread code, never
in interrupt handlers or the RTOS kernel. May only be called from assembly code
and by the 'call0' instruction. A result of 0 indicates an unrecoverable error.
The implementation may use only a2-4, a15 (all other regs must be preserved).
*/
// void* XT_RTOS_CP_STATE(void)
* Return in a15 the base address of the co-processor state save area for the
* thread that triggered a co-processor exception, or 0 if no thread was running.
* The state save area is structured as defined in xtensa_context.h and has size
* XT_CP_SIZE. Co-processor instructions should only be used in thread code, never
* in interrupt handlers or the RTOS kernel. May only be called from assembly code
* and by the 'call0' instruction. A result of 0 indicates an unrecoverable error.
* The implementation may use only a2-4, a15 (all other regs must be preserved).
*/
/* void* XT_RTOS_CP_STATE(void) */
#define XT_RTOS_CP_STATE _frxt_task_coproc_state
/*******************************************************************************
HOOKS TO DYNAMICALLY INSTALL INTERRUPT AND EXCEPTION HANDLERS PER LEVEL.
This Xtensa RTOS port provides hooks for dynamically installing exception
and interrupt handlers to facilitate automated testing where each test
case can install its own handler for user exceptions and each interrupt
priority (level). This consists of an array of function pointers indexed
by interrupt priority, with index 0 being the user exception handler hook.
Each entry in the array is initially 0, and may be replaced by a function
pointer of type XT_INTEXC_HOOK. A handler may be uninstalled by installing 0.
The handler for low and medium priority obeys ABI conventions so may be coded
in C. For the exception handler, the cause is the contents of the EXCCAUSE
reg, and the result is -1 if handled, else the cause (still needs handling).
For interrupt handlers, the cause is a mask of pending enabled interrupts at
that level, and the result is the same mask with the bits for the handled
interrupts cleared (those not cleared still need handling). This allows a test
case to either pre-handle or override the default handling for the exception
or interrupt level (see xtensa_vectors.S).
High priority handlers (including NMI) must be coded in assembly, are always
called by 'call0' regardless of ABI, must preserve all registers except a0,
and must not use or modify the interrupted stack. The hook argument 'cause'
is not passed and the result is ignored, so as not to burden the caller with
saving and restoring a2 (it assumes only one interrupt per level - see the
discussion in high priority interrupts in xtensa_vectors.S). The handler
therefore should be coded to prototype 'void h(void)' even though it plugs
into an array of handlers of prototype 'unsigned h(unsigned)'.
To enable interrupt/exception hooks, compile the RTOS with '-DXT_INTEXC_HOOKS'.
*
* HOOKS TO DYNAMICALLY INSTALL INTERRUPT AND EXCEPTION HANDLERS PER LEVEL.
*
* This Xtensa RTOS port provides hooks for dynamically installing exception
* and interrupt handlers to facilitate automated testing where each test
* case can install its own handler for user exceptions and each interrupt
* priority (level). This consists of an array of function pointers indexed
* by interrupt priority, with index 0 being the user exception handler hook.
* Each entry in the array is initially 0, and may be replaced by a function
* pointer of type XT_INTEXC_HOOK. A handler may be uninstalled by installing 0.
*
* The handler for low and medium priority obeys ABI conventions so may be coded
* in C. For the exception handler, the cause is the contents of the EXCCAUSE
* reg, and the result is -1 if handled, else the cause (still needs handling).
* For interrupt handlers, the cause is a mask of pending enabled interrupts at
* that level, and the result is the same mask with the bits for the handled
* interrupts cleared (those not cleared still need handling). This allows a test
* case to either pre-handle or override the default handling for the exception
* or interrupt level (see xtensa_vectors.S).
*
* High priority handlers (including NMI) must be coded in assembly, are always
* called by 'call0' regardless of ABI, must preserve all registers except a0,
* and must not use or modify the interrupted stack. The hook argument 'cause'
* is not passed and the result is ignored, so as not to burden the caller with
* saving and restoring a2 (it assumes only one interrupt per level - see the
* discussion in high priority interrupts in xtensa_vectors.S). The handler
* therefore should be coded to prototype 'void h(void)' even though it plugs
* into an array of handlers of prototype 'unsigned h(unsigned)'.
*
* To enable interrupt/exception hooks, compile the RTOS with '-DXT_INTEXC_HOOKS'.
*
*******************************************************************************/
#define XT_INTEXC_HOOK_NUM (1 + XCHAL_NUM_INTLEVELS + XCHAL_HAVE_NMI)
#define XT_INTEXC_HOOK_NUM ( 1 + XCHAL_NUM_INTLEVELS + XCHAL_HAVE_NMI )
#ifndef __ASSEMBLER__
typedef unsigned (*XT_INTEXC_HOOK)(unsigned cause);
extern volatile XT_INTEXC_HOOK _xt_intexc_hooks[XT_INTEXC_HOOK_NUM];
typedef unsigned (* XT_INTEXC_HOOK)( unsigned cause );
extern volatile XT_INTEXC_HOOK _xt_intexc_hooks[ XT_INTEXC_HOOK_NUM ];
#endif
/*******************************************************************************
CONVENIENCE INCLUSIONS.
Ensures RTOS specific files need only include this one Xtensa-generic header.
These headers are included last so they can use the RTOS definitions above.
*
* CONVENIENCE INCLUSIONS.
*
* Ensures RTOS specific files need only include this one Xtensa-generic header.
* These headers are included last so they can use the RTOS definitions above.
*
*******************************************************************************/
#include "xtensa_context.h"
#ifdef XT_RTOS_TIMER_INT
#include "xtensa_timer.h"
#include "xtensa_timer.h"
#endif
/*******************************************************************************
Xtensa Port Version.
*
* Xtensa Port Version.
*
*******************************************************************************/
#define XTENSA_PORT_VERSION 1.7
#define XTENSA_PORT_VERSION_STRING "1.7"
#define XTENSA_PORT_VERSION 1.7
#define XTENSA_PORT_VERSION_STRING "1.7"
#endif /* XTENSA_RTOS_H */

View file

@ -1,4 +1,4 @@
/*
/*
* FreeRTOS Kernel <DEVELOPMENT BRANCH>
* Copyright (C) 2015-2019 Cadence Design Systems, Inc.
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
@ -47,118 +47,118 @@
#define XTENSA_TIMER_H
#ifdef __ASSEMBLER__
#include <xtensa/coreasm.h>
#include <xtensa/coreasm.h>
#endif
#include <xtensa/corebits.h>
#include <xtensa/config/system.h>
#include "xtensa_rtos.h" /* in case this wasn't included directly */
#include "xtensa_rtos.h" /* in case this wasn't included directly */
#include <FreeRTOSConfig.h>
/*
Select timer to use for periodic tick, and determine its interrupt number
and priority. User may specify a timer by defining XT_TIMER_INDEX with -D,
in which case its validity is checked (it must exist in this core and must
not be on a high priority interrupt - an error will be reported in invalid).
Otherwise select the first low or medium priority interrupt timer available.
*/
* Select timer to use for periodic tick, and determine its interrupt number
* and priority. User may specify a timer by defining XT_TIMER_INDEX with -D,
* in which case its validity is checked (it must exist in this core and must
* not be on a high priority interrupt - an error will be reported in invalid).
* Otherwise select the first low or medium priority interrupt timer available.
*/
#if XCHAL_NUM_TIMERS == 0
#error "This Xtensa configuration is unsupported, it has no timers."
#error "This Xtensa configuration is unsupported, it has no timers."
#else
#ifndef XT_TIMER_INDEX
#if XCHAL_TIMER3_INTERRUPT != XTHAL_TIMER_UNCONFIGURED
#if XCHAL_INT_LEVEL(XCHAL_TIMER3_INTERRUPT) <= XCHAL_EXCM_LEVEL
#undef XT_TIMER_INDEX
#define XT_TIMER_INDEX 3
#ifndef XT_TIMER_INDEX
#if XCHAL_TIMER3_INTERRUPT != XTHAL_TIMER_UNCONFIGURED
#if XCHAL_INT_LEVEL( XCHAL_TIMER3_INTERRUPT ) <= XCHAL_EXCM_LEVEL
#undef XT_TIMER_INDEX
#define XT_TIMER_INDEX 3
#endif
#endif
#if XCHAL_TIMER2_INTERRUPT != XTHAL_TIMER_UNCONFIGURED
#if XCHAL_INT_LEVEL( XCHAL_TIMER2_INTERRUPT ) <= XCHAL_EXCM_LEVEL
#undef XT_TIMER_INDEX
#define XT_TIMER_INDEX 2
#endif
#endif
#if XCHAL_TIMER1_INTERRUPT != XTHAL_TIMER_UNCONFIGURED
#if XCHAL_INT_LEVEL( XCHAL_TIMER1_INTERRUPT ) <= XCHAL_EXCM_LEVEL
#undef XT_TIMER_INDEX
#define XT_TIMER_INDEX 1
#endif
#endif
#if XCHAL_TIMER0_INTERRUPT != XTHAL_TIMER_UNCONFIGURED
#if XCHAL_INT_LEVEL( XCHAL_TIMER0_INTERRUPT ) <= XCHAL_EXCM_LEVEL
#undef XT_TIMER_INDEX
#define XT_TIMER_INDEX 0
#endif
#endif
#endif /* ifndef XT_TIMER_INDEX */
#ifndef XT_TIMER_INDEX
#error "There is no suitable timer in this Xtensa configuration."
#endif
#endif
#if XCHAL_TIMER2_INTERRUPT != XTHAL_TIMER_UNCONFIGURED
#if XCHAL_INT_LEVEL(XCHAL_TIMER2_INTERRUPT) <= XCHAL_EXCM_LEVEL
#undef XT_TIMER_INDEX
#define XT_TIMER_INDEX 2
#endif
#endif
#if XCHAL_TIMER1_INTERRUPT != XTHAL_TIMER_UNCONFIGURED
#if XCHAL_INT_LEVEL(XCHAL_TIMER1_INTERRUPT) <= XCHAL_EXCM_LEVEL
#undef XT_TIMER_INDEX
#define XT_TIMER_INDEX 1
#endif
#endif
#if XCHAL_TIMER0_INTERRUPT != XTHAL_TIMER_UNCONFIGURED
#if XCHAL_INT_LEVEL(XCHAL_TIMER0_INTERRUPT) <= XCHAL_EXCM_LEVEL
#undef XT_TIMER_INDEX
#define XT_TIMER_INDEX 0
#endif
#endif
#endif
#ifndef XT_TIMER_INDEX
#error "There is no suitable timer in this Xtensa configuration."
#endif
#define XT_CCOMPARE (CCOMPARE + XT_TIMER_INDEX)
#define XT_TIMER_INTNUM XCHAL_TIMER_INTERRUPT(XT_TIMER_INDEX)
#define XT_TIMER_INTPRI XCHAL_INT_LEVEL(XT_TIMER_INTNUM)
#define XT_TIMER_INTEN (1 << XT_TIMER_INTNUM)
#define XT_CCOMPARE ( CCOMPARE + XT_TIMER_INDEX )
#define XT_TIMER_INTNUM XCHAL_TIMER_INTERRUPT( XT_TIMER_INDEX )
#define XT_TIMER_INTPRI XCHAL_INT_LEVEL( XT_TIMER_INTNUM )
#define XT_TIMER_INTEN ( 1 << XT_TIMER_INTNUM )
#if XT_TIMER_INTNUM == XTHAL_TIMER_UNCONFIGURED
#error "The timer selected by XT_TIMER_INDEX does not exist in this core."
#elif XT_TIMER_INTPRI > XCHAL_EXCM_LEVEL
#error "The timer interrupt cannot be high priority (use medium or low)."
#endif
#if XT_TIMER_INTNUM == XTHAL_TIMER_UNCONFIGURED
#error "The timer selected by XT_TIMER_INDEX does not exist in this core."
#elif XT_TIMER_INTPRI > XCHAL_EXCM_LEVEL
#error "The timer interrupt cannot be high priority (use medium or low)."
#endif
#endif /* XCHAL_NUM_TIMERS */
/*
Set processor clock frequency, used to determine clock divisor for timer tick.
User should BE SURE TO ADJUST THIS for the Xtensa platform being used.
If using a supported board via the board-independent API defined in xtbsp.h,
this may be left undefined and frequency and tick divisor will be computed
and cached during run-time initialization.
NOTE ON SIMULATOR:
Under the Xtensa instruction set simulator, the frequency can only be estimated
because it depends on the speed of the host and the version of the simulator.
Also because it runs much slower than hardware, it is not possible to achieve
real-time performance for most applications under the simulator. A frequency
too low does not allow enough time between timer interrupts, starving threads.
To obtain a more convenient but non-real-time tick duration on the simulator,
compile with xt-xcc option "-DXT_SIMULATOR".
Adjust this frequency to taste (it's not real-time anyway!).
*/
#if defined(XT_SIMULATOR) && !defined(XT_CLOCK_FREQ)
#define XT_CLOCK_FREQ configCPU_CLOCK_HZ
* Set processor clock frequency, used to determine clock divisor for timer tick.
* User should BE SURE TO ADJUST THIS for the Xtensa platform being used.
* If using a supported board via the board-independent API defined in xtbsp.h,
* this may be left undefined and frequency and tick divisor will be computed
* and cached during run-time initialization.
*
* NOTE ON SIMULATOR:
* Under the Xtensa instruction set simulator, the frequency can only be estimated
* because it depends on the speed of the host and the version of the simulator.
* Also because it runs much slower than hardware, it is not possible to achieve
* real-time performance for most applications under the simulator. A frequency
* too low does not allow enough time between timer interrupts, starving threads.
* To obtain a more convenient but non-real-time tick duration on the simulator,
* compile with xt-xcc option "-DXT_SIMULATOR".
* Adjust this frequency to taste (it's not real-time anyway!).
*/
#if defined( XT_SIMULATOR ) && !defined( XT_CLOCK_FREQ )
#define XT_CLOCK_FREQ configCPU_CLOCK_HZ
#endif
#if !defined(XT_CLOCK_FREQ) && !defined(XT_BOARD)
#error "XT_CLOCK_FREQ must be defined for the target platform."
#if !defined( XT_CLOCK_FREQ ) && !defined( XT_BOARD )
#error "XT_CLOCK_FREQ must be defined for the target platform."
#endif
/*
Default number of timer "ticks" per second (default 100 for 10ms tick).
RTOS may define this in its own way (if applicable) in xtensa_rtos.h.
User may redefine this to an optimal value for the application, either by
editing this here or in xtensa_rtos.h, or compiling with xt-xcc option
"-DXT_TICK_PER_SEC=<value>" where <value> is a suitable number.
*/
* Default number of timer "ticks" per second (default 100 for 10ms tick).
* RTOS may define this in its own way (if applicable) in xtensa_rtos.h.
* User may redefine this to an optimal value for the application, either by
* editing this here or in xtensa_rtos.h, or compiling with xt-xcc option
* "-DXT_TICK_PER_SEC=<value>" where <value> is a suitable number.
*/
#ifndef XT_TICK_PER_SEC
#define XT_TICK_PER_SEC configTICK_RATE_HZ /* 10 ms tick = 100 ticks per second */
#define XT_TICK_PER_SEC configTICK_RATE_HZ /* 10 ms tick = 100 ticks per second */
#endif
/*
Derivation of clock divisor for timer tick and interrupt (one per tick).
*/
* Derivation of clock divisor for timer tick and interrupt (one per tick).
*/
#ifdef XT_CLOCK_FREQ
#define XT_TICK_DIVISOR (XT_CLOCK_FREQ / XT_TICK_PER_SEC)
#define XT_TICK_DIVISOR ( XT_CLOCK_FREQ / XT_TICK_PER_SEC )
#endif
#ifndef __ASSEMBLER__
extern unsigned _xt_tick_divisor;
extern void _xt_tick_divisor_init(void);
extern unsigned _xt_tick_divisor;
extern void _xt_tick_divisor_init( void );
#endif
#endif /* XTENSA_TIMER_H */
#endif /* XTENSA_TIMER_H */

View file

@ -1,4 +1,4 @@
// Copyright (c) 2019, XMOS Ltd, All rights reserved
/* Copyright (c) 2019, XMOS Ltd, All rights reserved */
/* Scheduler includes. */
#include "FreeRTOS.h"
@ -16,139 +16,144 @@ uint32_t ulPortYieldRequired[ portMAX_CORE_COUNT ] = { pdFALSE };
void vIntercoreInterruptISR( void )
{
int xCoreID;
int xCoreID;
// debug_printf( "In KCALL: %u\n", ulData );
xCoreID = rtos_core_id_get();
ulPortYieldRequired[ xCoreID ] = pdTRUE;
/* debug_printf( "In KCALL: %u\n", ulData ); */
xCoreID = rtos_core_id_get();
ulPortYieldRequired[ xCoreID ] = pdTRUE;
}
/*-----------------------------------------------------------*/
DEFINE_RTOS_INTERRUPT_CALLBACK( pxKernelTimerISR, pvData )
{
uint32_t ulLastTrigger;
uint32_t ulNow;
int xCoreID;
uint32_t ulLastTrigger;
uint32_t ulNow;
int xCoreID;
UBaseType_t uxSavedInterruptStatus;
xCoreID = 0;
xCoreID = 0;
configASSERT( xCoreID == rtos_core_id_get() );
configASSERT( xCoreID == rtos_core_id_get() );
/* Need the next interrupt to be scheduled relative to
* the current trigger time, rather than the current
* time. */
ulLastTrigger = hwtimer_get_trigger_time( xKernelTimer );
/* Need the next interrupt to be scheduled relative to
* the current trigger time, rather than the current
* time. */
ulLastTrigger = hwtimer_get_trigger_time( xKernelTimer );
/* Check to see if the ISR is late. If it is, we don't
* want to schedule the next interrupt to be in the past. */
ulNow = hwtimer_get_time( xKernelTimer );
if( ulNow - ulLastTrigger >= configCPU_CLOCK_HZ / configTICK_RATE_HZ )
{
ulLastTrigger = ulNow;
}
/* Check to see if the ISR is late. If it is, we don't
* want to schedule the next interrupt to be in the past. */
ulNow = hwtimer_get_time( xKernelTimer );
ulLastTrigger += configCPU_CLOCK_HZ / configTICK_RATE_HZ;
hwtimer_change_trigger_time( xKernelTimer, ulLastTrigger );
if( ulNow - ulLastTrigger >= configCPU_CLOCK_HZ / configTICK_RATE_HZ )
{
ulLastTrigger = ulNow;
}
#if configUPDATE_RTOS_TIME_FROM_TICK_ISR == 1
rtos_time_increment( RTOS_TICK_PERIOD( configTICK_RATE_HZ ) );
#endif
ulLastTrigger += configCPU_CLOCK_HZ / configTICK_RATE_HZ;
hwtimer_change_trigger_time( xKernelTimer, ulLastTrigger );
#if configUPDATE_RTOS_TIME_FROM_TICK_ISR == 1
rtos_time_increment( RTOS_TICK_PERIOD( configTICK_RATE_HZ ) );
#endif
uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
if( xTaskIncrementTick() != pdFALSE )
{
ulPortYieldRequired[ xCoreID ] = pdTRUE;
}
if( xTaskIncrementTick() != pdFALSE )
{
ulPortYieldRequired[ xCoreID ] = pdTRUE;
}
taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
}
/*-----------------------------------------------------------*/
void vPortYieldOtherCore( int xOtherCoreID )
{
int xCoreID;
int xCoreID;
/*
* This function must be called from within a critical section.
*/
/*
* This function must be called from within a critical section.
*/
xCoreID = rtos_core_id_get();
xCoreID = rtos_core_id_get();
// debug_printf("%d->%d\n", xCoreID, xOtherCoreID);
/* debug_printf("%d->%d\n", xCoreID, xOtherCoreID); */
// debug_printf("Yield core %d from %d\n", xOtherCoreID, xCoreID );
/* debug_printf("Yield core %d from %d\n", xOtherCoreID, xCoreID ); */
rtos_irq( xOtherCoreID, xCoreID );
rtos_irq( xOtherCoreID, xCoreID );
}
/*-----------------------------------------------------------*/
static int prvCoreInit( void )
{
int xCoreID;
int xCoreID;
xCoreID = rtos_core_register();
debug_printf( "Logical Core %d initializing as FreeRTOS Core %d\n", get_logical_core_id(), xCoreID );
xCoreID = rtos_core_register();
debug_printf( "Logical Core %d initializing as FreeRTOS Core %d\n", get_logical_core_id(), xCoreID );
asm volatile (
"ldap r11, kexcept\n\t"
"set kep, r11\n\t"
:
:
: "r11"
);
asm volatile (
"ldap r11, kexcept\n\t"
"set kep, r11\n\t"
:
:
: "r11"
);
rtos_irq_enable( configNUMBER_OF_CORES );
rtos_irq_enable( configNUMBER_OF_CORES );
/*
* All threads wait here until all have enabled IRQs
*/
while( rtos_irq_ready() == pdFALSE );
/*
* All threads wait here until all have enabled IRQs
*/
while( rtos_irq_ready() == pdFALSE )
{
}
if( xCoreID == 0 )
{
uint32_t ulNow;
ulNow = hwtimer_get_time( xKernelTimer );
// debug_printf( "The time is now (%u)\n", ulNow );
if( xCoreID == 0 )
{
uint32_t ulNow;
ulNow = hwtimer_get_time( xKernelTimer );
/* debug_printf( "The time is now (%u)\n", ulNow ); */
ulNow += configCPU_CLOCK_HZ / configTICK_RATE_HZ;
ulNow += configCPU_CLOCK_HZ / configTICK_RATE_HZ;
triggerable_setup_interrupt_callback( xKernelTimer, NULL, RTOS_INTERRUPT_CALLBACK( pxKernelTimerISR ) );
hwtimer_set_trigger_time( xKernelTimer, ulNow );
triggerable_enable_trigger( xKernelTimer );
}
triggerable_setup_interrupt_callback( xKernelTimer, NULL, RTOS_INTERRUPT_CALLBACK( pxKernelTimerISR ) );
hwtimer_set_trigger_time( xKernelTimer, ulNow );
triggerable_enable_trigger( xKernelTimer );
}
return xCoreID;
return xCoreID;
}
/*-----------------------------------------------------------*/
DEFINE_RTOS_KERNEL_ENTRY( void, vPortStartSchedulerOnCore, void )
{
int xCoreID;
int xCoreID;
xCoreID = prvCoreInit();
xCoreID = prvCoreInit();
#if( configUSE_CORE_INIT_HOOK == 1 )
{
extern void vApplicationCoreInitHook( BaseType_t xCoreID );
#if ( configUSE_CORE_INIT_HOOK == 1 )
{
extern void vApplicationCoreInitHook( BaseType_t xCoreID );
vApplicationCoreInitHook( xCoreID );
}
#endif
vApplicationCoreInitHook( xCoreID );
}
#endif
debug_printf( "FreeRTOS Core %d initialized\n", xCoreID );
debug_printf( "FreeRTOS Core %d initialized\n", xCoreID );
/*
* Restore the context of the first thread
* to run and jump into it.
*/
asm volatile (
"mov r6, %0\n\t" /* R6 must be the FreeRTOS core ID*/
"ldaw r5, dp[pxCurrentTCBs]\n\t" /* R5 must be the TCB list which is indexed by R6 */
"bu _freertos_restore_ctx\n\t"
: /* no outputs */
: "r"(xCoreID)
: "r5", "r6"
);
/*
* Restore the context of the first thread
* to run and jump into it.
*/
asm volatile (
"mov r6, %0\n\t" /* R6 must be the FreeRTOS core ID*/
"ldaw r5, dp[pxCurrentTCBs]\n\t" /* R5 must be the TCB list which is indexed by R6 */
"bu _freertos_restore_ctx\n\t"
: /* no outputs */
: "r" ( xCoreID )
: "r5", "r6"
);
}
/*-----------------------------------------------------------*/
@ -159,70 +164,73 @@ DEFINE_RTOS_KERNEL_ENTRY( void, vPortStartSchedulerOnCore, void )
/*
* See header file for description.
*/
StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters )
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
TaskFunction_t pxCode,
void * pvParameters )
{
//debug_printf( "Top of stack was %p for task %p\n", pxTopOfStack, pxCode );
/*
* Grow the thread's stack by portTHREAD_CONTEXT_STACK_GROWTH
* so we can push the context onto it.
*/
pxTopOfStack -= portTHREAD_CONTEXT_STACK_GROWTH;
/*debug_printf( "Top of stack was %p for task %p\n", pxTopOfStack, pxCode ); */
uint32_t dp;
uint32_t cp;
/*
* Grow the thread's stack by portTHREAD_CONTEXT_STACK_GROWTH
* so we can push the context onto it.
*/
pxTopOfStack -= portTHREAD_CONTEXT_STACK_GROWTH;
/*
* We need to get the current CP and DP pointers.
*/
asm volatile (
"ldaw r11, cp[0]\n\t" /* get CP into R11 */
"mov %0, r11\n\t" /* get R11 (CP) into cp */
"ldaw r11, dp[0]\n\t" /* get DP into R11 */
"mov %1, r11\n\t" /* get R11 (DP) into dp */
: "=r"(cp), "=r"(dp) /* output 0 is cp, output 1 is dp */
: /* there are no inputs */
: "r11" /* R11 gets clobbered */
);
uint32_t dp;
uint32_t cp;
/*
* Push the thread context onto the stack.
* Saved PC will point to the new thread's
* entry pointer.
* Interrupts will default to enabled.
* KEDI is also set to enable dual issue mode
* upon kernel entry.
*/
pxTopOfStack[ 1 ] = ( StackType_t ) pxCode; /* SP[1] := SPC */
/*
* We need to get the current CP and DP pointers.
*/
asm volatile (
"ldaw r11, cp[0]\n\t" /* get CP into R11 */
"mov %0, r11\n\t" /* get R11 (CP) into cp */
"ldaw r11, dp[0]\n\t" /* get DP into R11 */
"mov %1, r11\n\t" /* get R11 (DP) into dp */
: "=r" ( cp ), "=r" ( dp ) /* output 0 is cp, output 1 is dp */
: /* there are no inputs */
: "r11" /* R11 gets clobbered */
);
/*
* Push the thread context onto the stack.
* Saved PC will point to the new thread's
* entry pointer.
* Interrupts will default to enabled.
* KEDI is also set to enable dual issue mode
* upon kernel entry.
*/
pxTopOfStack[ 1 ] = ( StackType_t ) pxCode; /* SP[1] := SPC */
pxTopOfStack[ 2 ] = XS1_SR_IEBLE_MASK
| XS1_SR_KEDI_MASK; /* SP[2] := SSR */
pxTopOfStack[ 3 ] = 0x00000000; /* SP[3] := SED */
pxTopOfStack[ 4 ] = 0x00000000; /* SP[4] := ET */
pxTopOfStack[ 5 ] = dp; /* SP[5] := DP */
pxTopOfStack[ 6 ] = cp; /* SP[6] := CP */
pxTopOfStack[ 7 ] = 0x00000000; /* SP[7] := LR */
pxTopOfStack[ 8 ] = ( StackType_t ) pvParameters; /* SP[8] := R0 */
pxTopOfStack[ 9 ] = 0x01010101; /* SP[9] := R1 */
pxTopOfStack[ 10 ] = 0x02020202; /* SP[10] := R2 */
pxTopOfStack[ 11 ] = 0x03030303; /* SP[11] := R3 */
pxTopOfStack[ 12 ] = 0x04040404; /* SP[12] := R4 */
pxTopOfStack[ 13 ] = 0x05050505; /* SP[13] := R5 */
pxTopOfStack[ 14 ] = 0x06060606; /* SP[14] := R6 */
pxTopOfStack[ 15 ] = 0x07070707; /* SP[15] := R7 */
pxTopOfStack[ 16 ] = 0x08080808; /* SP[16] := R8 */
pxTopOfStack[ 17 ] = 0x09090909; /* SP[17] := R9 */
pxTopOfStack[ 18 ] = 0x10101010; /* SP[18] := R10 */
pxTopOfStack[ 19 ] = 0x11111111; /* SP[19] := R11 */
pxTopOfStack[ 20 ] = 0x00000000; /* SP[20] := vH and vSR */
memset(&pxTopOfStack[21], 0, 32); /* SP[21 - 28] := vR */
memset(&pxTopOfStack[29], 1, 32); /* SP[29 - 36] := vD */
memset(&pxTopOfStack[37], 2, 32); /* SP[37 - 44] := vC */
| XS1_SR_KEDI_MASK; /* SP[2] := SSR */
pxTopOfStack[ 3 ] = 0x00000000; /* SP[3] := SED */
pxTopOfStack[ 4 ] = 0x00000000; /* SP[4] := ET */
pxTopOfStack[ 5 ] = dp; /* SP[5] := DP */
pxTopOfStack[ 6 ] = cp; /* SP[6] := CP */
pxTopOfStack[ 7 ] = 0x00000000; /* SP[7] := LR */
pxTopOfStack[ 8 ] = ( StackType_t ) pvParameters; /* SP[8] := R0 */
pxTopOfStack[ 9 ] = 0x01010101; /* SP[9] := R1 */
pxTopOfStack[ 10 ] = 0x02020202; /* SP[10] := R2 */
pxTopOfStack[ 11 ] = 0x03030303; /* SP[11] := R3 */
pxTopOfStack[ 12 ] = 0x04040404; /* SP[12] := R4 */
pxTopOfStack[ 13 ] = 0x05050505; /* SP[13] := R5 */
pxTopOfStack[ 14 ] = 0x06060606; /* SP[14] := R6 */
pxTopOfStack[ 15 ] = 0x07070707; /* SP[15] := R7 */
pxTopOfStack[ 16 ] = 0x08080808; /* SP[16] := R8 */
pxTopOfStack[ 17 ] = 0x09090909; /* SP[17] := R9 */
pxTopOfStack[ 18 ] = 0x10101010; /* SP[18] := R10 */
pxTopOfStack[ 19 ] = 0x11111111; /* SP[19] := R11 */
pxTopOfStack[ 20 ] = 0x00000000; /* SP[20] := vH and vSR */
memset( &pxTopOfStack[ 21 ], 0, 32 ); /* SP[21 - 28] := vR */
memset( &pxTopOfStack[ 29 ], 1, 32 ); /* SP[29 - 36] := vD */
memset( &pxTopOfStack[ 37 ], 2, 32 ); /* SP[37 - 44] := vC */
//debug_printf( "Top of stack is now %p for task %p\n", pxTopOfStack, pxCode );
/*debug_printf( "Top of stack is now %p for task %p\n", pxTopOfStack, pxCode ); */
/*
* Returns the new top of the stack
*/
return pxTopOfStack;
/*
* Returns the new top of the stack
*/
return pxTopOfStack;
}
/*-----------------------------------------------------------*/
@ -233,22 +241,22 @@ void vPortStartSMPScheduler( void );
*/
BaseType_t xPortStartScheduler( void )
{
if( ( configNUMBER_OF_CORES > portMAX_CORE_COUNT ) || ( configNUMBER_OF_CORES <= 0 ) )
{
return pdFAIL;
}
if( ( configNUMBER_OF_CORES > portMAX_CORE_COUNT ) || ( configNUMBER_OF_CORES <= 0 ) )
{
return pdFAIL;
}
rtos_locks_initialize();
xKernelTimer = hwtimer_alloc();
rtos_locks_initialize();
xKernelTimer = hwtimer_alloc();
vPortStartSMPScheduler();
vPortStartSMPScheduler();
return pdPASS;
return pdPASS;
}
/*-----------------------------------------------------------*/
void vPortEndScheduler( void )
{
/* Do not implement. */
/* Do not implement. */
}
/*-----------------------------------------------------------*/

View file

@ -1,63 +1,63 @@
// Copyright (c) 2020, XMOS Ltd, All rights reserved
/* Copyright (c) 2020, XMOS Ltd, All rights reserved */
#ifndef PORTMACRO_H
#define PORTMACRO_H
#define PORTMACRO_H
#ifndef __ASSEMBLER__
#ifndef __ASSEMBLER__
/* Inclusion of xc1.h will result in clock being defined as a type.
* By default, FreeRTOS will require standard time.h, where clock is a function.
*/
#ifndef USE_XCORE_CLOCK_TYPE
#define _clock_defined
#endif
#ifndef USE_XCORE_CLOCK_TYPE
#define _clock_defined
#endif
#include <xs1.h>
#include "rtos_support.h"
#include <xs1.h>
#include "rtos_support.h"
#ifdef __cplusplus
extern "C" {
#endif
#ifdef __cplusplus
extern "C" {
#endif
/* Type definitions. */
#define portSTACK_TYPE uint32_t
typedef portSTACK_TYPE StackType_t;
typedef double portDOUBLE;
typedef int32_t BaseType_t;
typedef uint32_t UBaseType_t;
#define portSTACK_TYPE uint32_t
typedef portSTACK_TYPE StackType_t;
typedef double portDOUBLE;
typedef int32_t BaseType_t;
typedef uint32_t UBaseType_t;
#define portBASE_TYPE BaseType_t
#define portBASE_TYPE BaseType_t
#if( configUSE_16_BIT_TICKS == 1 )
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
#else
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
#if ( configUSE_16_BIT_TICKS == 1 )
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
#else
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
not need to be guarded with a critical section. */
#define portTICK_TYPE_IS_ATOMIC 1
#endif
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
#define portTICK_TYPE_IS_ATOMIC 1
#endif
/*-----------------------------------------------------------*/
#endif /* __ASSEMBLER__ */
#endif /* __ASSEMBLER__ */
/* Architecture specifics. These can be used by assembly files as well. */
#define portSTACK_GROWTH ( -1 )
#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
#define portBYTE_ALIGNMENT 8
#define portCRITICAL_NESTING_IN_TCB 1
#define portMAX_CORE_COUNT 8
#ifndef configNUMBER_OF_CORES
#define configNUMBER_OF_CORES 1
#endif
#define portSTACK_GROWTH ( -1 )
#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
#define portBYTE_ALIGNMENT 8
#define portCRITICAL_NESTING_IN_TCB 1
#define portMAX_CORE_COUNT 8
#ifndef configNUMBER_OF_CORES
#define configNUMBER_OF_CORES 1
#endif
/* This may be set to zero in the config file if the rtos_time
functions are not needed or if it is incremented elsewhere. */
#ifndef configUPDATE_RTOS_TIME_FROM_TICK_ISR
#define configUPDATE_RTOS_TIME_FROM_TICK_ISR 1
#endif
* functions are not needed or if it is incremented elsewhere. */
#ifndef configUPDATE_RTOS_TIME_FROM_TICK_ISR
#define configUPDATE_RTOS_TIME_FROM_TICK_ISR 1
#endif
/*
* When entering an ISR we need to grow the stack by one more word than
@ -66,150 +66,149 @@ functions are not needed or if it is incremented elsewhere. */
* it is OK to store words at SP[0]. Therefore the ISR must leave SP[0] alone
* even though it is normally not necessary to do so.
*/
#define portTHREAD_CONTEXT_STACK_GROWTH RTOS_SUPPORT_INTERRUPT_STACK_GROWTH
#define portTHREAD_CONTEXT_STACK_GROWTH RTOS_SUPPORT_INTERRUPT_STACK_GROWTH
#ifndef __ASSEMBLER__
#ifndef __ASSEMBLER__
/* Check validity of number of cores specified in config */
#if ( configNUMBER_OF_CORES < 1 || portMAX_CORE_COUNT < configNUMBER_OF_CORES )
#error "Invalid number of cores specified in config!"
#endif
#if ( configNUMBER_OF_CORES < 1 || portMAX_CORE_COUNT < configNUMBER_OF_CORES )
#error "Invalid number of cores specified in config!"
#endif
#define portMEMORY_BARRIER() RTOS_MEMORY_BARRIER()
#define portTASK_STACK_DEPTH(pxTaskCode) RTOS_THREAD_STACK_SIZE(pxTaskCode)
#define portMEMORY_BARRIER() RTOS_MEMORY_BARRIER()
#define portTASK_STACK_DEPTH( pxTaskCode ) RTOS_THREAD_STACK_SIZE( pxTaskCode )
/*-----------------------------------------------------------*/
/* Scheduler utilities. */
#define portYIELD() asm volatile( "KCALLI_lu6 0" ::: "memory" )
#define portYIELD() asm volatile ( "KCALLI_lu6 0" ::: "memory" )
#define portEND_SWITCHING_ISR( xSwitchRequired ) \
do \
{ \
if( xSwitchRequired != pdFALSE ) \
{ \
extern uint32_t ulPortYieldRequired[ portMAX_CORE_COUNT ]; \
ulPortYieldRequired[ portGET_CORE_ID() ] = pdTRUE; \
} \
} while( 0 )
#define portEND_SWITCHING_ISR( xSwitchRequired ) \
do \
{ \
if( xSwitchRequired != pdFALSE ) \
{ \
extern uint32_t ulPortYieldRequired[ portMAX_CORE_COUNT ]; \
ulPortYieldRequired[ portGET_CORE_ID() ] = pdTRUE; \
} \
} while( 0 )
#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
/*-----------------------------------------------------------*/
/* SMP utilities. */
#define portGET_CORE_ID() rtos_core_id_get()
#define portGET_CORE_ID() rtos_core_id_get()
void vPortYieldOtherCore( int xOtherCoreID );
#define portYIELD_CORE( x ) vPortYieldOtherCore( x )
void vPortYieldOtherCore( int xOtherCoreID );
#define portYIELD_CORE( x ) vPortYieldOtherCore( x )
/*-----------------------------------------------------------*/
/* Architecture specific optimisations. */
#ifndef configUSE_PORT_OPTIMISED_TASK_SELECTION
#define configUSE_PORT_OPTIMISED_TASK_SELECTION 0
#endif
#ifndef configUSE_PORT_OPTIMISED_TASK_SELECTION
#define configUSE_PORT_OPTIMISED_TASK_SELECTION 0
#endif
#if configUSE_PORT_OPTIMISED_TASK_SELECTION == 1
#if configUSE_PORT_OPTIMISED_TASK_SELECTION == 1
/* Store/clear the ready priorities in a bit map. */
#define portRECORD_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) |= ( 1UL << ( uxPriority ) )
#define portRESET_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) &= ~( 1UL << ( uxPriority ) )
/* Store/clear the ready priorities in a bit map. */
#define portRECORD_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) |= ( 1UL << ( uxPriority ) )
#define portRESET_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) &= ~( 1UL << ( uxPriority ) )
/*-----------------------------------------------------------*/
/*-----------------------------------------------------------*/
#define portGET_HIGHEST_PRIORITY( uxTopPriority, uxReadyPriorities ) uxTopPriority = ( 31UL - ( uint32_t ) __builtin_clz( uxReadyPriorities ) )
#define portGET_HIGHEST_PRIORITY( uxTopPriority, uxReadyPriorities ) uxTopPriority = ( 31UL - ( uint32_t ) __builtin_clz( uxReadyPriorities ) )
#endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
#endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
/*-----------------------------------------------------------*/
/* Critical section management. */
#define portGET_INTERRUPT_STATE() rtos_interrupt_mask_get()
#define portGET_INTERRUPT_STATE() rtos_interrupt_mask_get()
/*
* This differs from the standard portDISABLE_INTERRUPTS()
* in that it also returns what the interrupt state was
* before it disabling interrupts.
*/
#define portDISABLE_INTERRUPTS() rtos_interrupt_mask_all()
#define portDISABLE_INTERRUPTS() rtos_interrupt_mask_all()
#define portENABLE_INTERRUPTS() rtos_interrupt_unmask_all()
#define portENABLE_INTERRUPTS() rtos_interrupt_unmask_all()
/*
* Port set interrupt mask and clear interrupt mask.
*/
#define portSET_INTERRUPT_MASK() rtos_interrupt_mask_all()
#define portCLEAR_INTERRUPT_MASK( ulState ) rtos_interrupt_mask_set( ulState )
#define portSET_INTERRUPT_MASK() rtos_interrupt_mask_all()
#define portCLEAR_INTERRUPT_MASK( ulState ) rtos_interrupt_mask_set( ulState )
#define portSET_INTERRUPT_MASK_FROM_ISR() ( 0 )
#define portCLEAR_INTERRUPT_MASK_FROM_ISR(x) ( (void) x )
#define portSET_INTERRUPT_MASK_FROM_ISR() ( 0 )
#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) ( ( void ) x )
/*
* Will enable interrupts if ulState is non-zero.
*/
#define portRESTORE_INTERRUPTS(ulState) rtos_interrupt_mask_set(ulState)
#define portRESTORE_INTERRUPTS( ulState ) rtos_interrupt_mask_set( ulState )
/*
* Returns non-zero if currently running in an
* ISR or otherwise in kernel mode.
*/
#define portCHECK_IF_IN_ISR() rtos_isr_running()
#define portCHECK_IF_IN_ISR() rtos_isr_running()
#define portASSERT_IF_IN_ISR() configASSERT( portCHECK_IF_IN_ISR() == 0 )
#define portASSERT_IF_IN_ISR() configASSERT( portCHECK_IF_IN_ISR() == 0 )
#define portGET_ISR_LOCK() rtos_lock_acquire(0)
#define portRELEASE_ISR_LOCK() rtos_lock_release(0)
#define portGET_TASK_LOCK() rtos_lock_acquire(1)
#define portRELEASE_TASK_LOCK() rtos_lock_release(1)
#define portGET_ISR_LOCK() rtos_lock_acquire( 0 )
#define portRELEASE_ISR_LOCK() rtos_lock_release( 0 )
#define portGET_TASK_LOCK() rtos_lock_acquire( 1 )
#define portRELEASE_TASK_LOCK() rtos_lock_release( 1 )
void vTaskEnterCritical(void);
void vTaskExitCritical(void);
#define portENTER_CRITICAL() vTaskEnterCritical()
#define portEXIT_CRITICAL() vTaskExitCritical()
void vTaskEnterCritical( void );
void vTaskExitCritical( void );
#define portENTER_CRITICAL() vTaskEnterCritical()
#define portEXIT_CRITICAL() vTaskExitCritical()
extern UBaseType_t vTaskEnterCriticalFromISR( void );
extern void vTaskExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus );
#define portENTER_CRITICAL_FROM_ISR vTaskEnterCriticalFromISR
#define portEXIT_CRITICAL_FROM_ISR vTaskExitCriticalFromISR
extern UBaseType_t vTaskEnterCriticalFromISR( void );
extern void vTaskExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus );
#define portENTER_CRITICAL_FROM_ISR vTaskEnterCriticalFromISR
#define portEXIT_CRITICAL_FROM_ISR vTaskExitCriticalFromISR
/*-----------------------------------------------------------*/
/* Runtime stats support */
#if ( configGENERATE_RUN_TIME_STATS == 1 )
int xscope_gettime( void );
#define portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() /* nothing needed here */
#define portGET_RUN_TIME_COUNTER_VALUE() xscope_gettime()
#endif
#if ( configGENERATE_RUN_TIME_STATS == 1 )
int xscope_gettime( void );
#define portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() /* nothing needed here */
#define portGET_RUN_TIME_COUNTER_VALUE() xscope_gettime()
#endif
/*-----------------------------------------------------------*/
/* Maps sprintf and snprintf to the lite version in lib_rtos_support */
#if ( configUSE_DEBUG_SPRINTF == 1 )
#define sprintf(...) rtos_sprintf(__VA_ARGS__)
#define snprintf(...) rtos_snprintf(__VA_ARGS__)
#endif
#if ( configUSE_DEBUG_SPRINTF == 1 )
#define sprintf( ... ) rtos_sprintf( __VA_ARGS__ )
#define snprintf( ... ) rtos_snprintf( __VA_ARGS__ )
#endif
/* Attribute for the pxCallbackFunction member of the Timer_t struct.
Required by xcc to calculate stack usage. */
#define portTIMER_CALLBACK_ATTRIBUTE __attribute__((fptrgroup("timerCallbackGroup")))
* Required by xcc to calculate stack usage. */
#define portTIMER_CALLBACK_ATTRIBUTE __attribute__( ( fptrgroup( "timerCallbackGroup" ) ) )
/* Timer callback function macros. For xcc this ensures they get added to the timer callback
group so that stack usage for certain functions in timers.c can be calculated. */
#define portTIMER_CALLBACK_FUNCTION_PROTO( vFunction, xTimer ) void vFunction( TimerHandle_t xTimer )
#define portTIMER_CALLBACK_FUNCTION( vFunction, xTimer ) portTIMER_CALLBACK_ATTRIBUTE void vFunction( TimerHandle_t xTimer )
* group so that stack usage for certain functions in timers.c can be calculated. */
#define portTIMER_CALLBACK_FUNCTION_PROTO( vFunction, xTimer ) void vFunction( TimerHandle_t xTimer )
#define portTIMER_CALLBACK_FUNCTION( vFunction, xTimer ) portTIMER_CALLBACK_ATTRIBUTE void vFunction( TimerHandle_t xTimer )
/*-----------------------------------------------------------*/
/* Task function macros as described on the FreeRTOS.org WEB site. These are
not necessary for to use this port. They are defined so the common demo files
(which build with all the ports) will build. */
#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters )
#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters )
* not necessary for to use this port. They are defined so the common demo files
* (which build with all the ports) will build. */
#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
/*-----------------------------------------------------------*/
#ifdef __cplusplus
#ifdef __cplusplus
}
#endif
#endif
#endif /* __ASSEMBLER__ */
#endif /* __ASSEMBLER__ */
#endif /* PORTMACRO_H */

View file

@ -1,4 +1,4 @@
// Copyright (c) 2020, XMOS Ltd, All rights reserved
/* Copyright (c) 2020, XMOS Ltd, All rights reserved */
#ifndef RTOS_SUPPORT_RTOS_CONFIG_H_
#define RTOS_SUPPORT_RTOS_CONFIG_H_
@ -6,7 +6,7 @@
/**
* Lets the application know that the RTOS in use is FreeRTOS.
*/
#define RTOS_FREERTOS 1
#define RTOS_FREERTOS 1
/**
* The number of words to extend the stack by when entering an ISR.
@ -17,28 +17,29 @@
* it is OK to store words at SP[0]. Therefore the ISR must leave SP[0] alone
* even though it is normally not necessary to do so.
*/
#define RTOS_SUPPORT_INTERRUPT_STACK_GROWTH ( 44 + 1 )
#define RTOS_SUPPORT_INTERRUPT_STACK_GROWTH ( 44 + 1 )
/**
* The word offset into the stack where R1 is to be stored after it
* is extended when saving a thread's context.
*/
#define RTOS_SUPPORT_INTERRUPT_R1_STACK_OFFSET 9
#define RTOS_SUPPORT_INTERRUPT_R1_STACK_OFFSET 9
/**
* The word offset into the stack where R11 is to be stored after it
* is extended when saving a thread's context.
*/
#define RTOS_SUPPORT_INTERRUPT_R11_STACK_OFFSET 19
#define RTOS_SUPPORT_INTERRUPT_R11_STACK_OFFSET 19
/**
* The RTOS provided handler that should run when a
* core receives an intercore interrupt request.
*/
#define RTOS_INTERCORE_INTERRUPT_ISR() do { \
void vIntercoreInterruptISR( void ); \
vIntercoreInterruptISR(); \
} while ( 0 )
#define RTOS_INTERCORE_INTERRUPT_ISR() \
do { \
void vIntercoreInterruptISR( void ); \
vIntercoreInterruptISR(); \
} while( 0 )
/**
* The number of hardware locks that the RTOS
@ -48,48 +49,47 @@
* Note that the IRQ routines require a lock and
* will share the first one with the RTOS.
*/
#define RTOS_LOCK_COUNT 2
#define RTOS_LOCK_COUNT 2
/**
* Remaps all calls to debug_printf() to rtos_printf().
* When this is on, files should not include both rtos_support.h
* and debug_print.h.
*/
#define RTOS_DEBUG_PRINTF_REMAP 1
#define RTOS_DEBUG_PRINTF_REMAP 1
#ifdef configENABLE_DEBUG_PRINTF
#if configENABLE_DEBUG_PRINTF
#if configENABLE_DEBUG_PRINTF
/* ensure that debug_printf is enabled */
#ifdef DEBUG_PRINT_ENABLE
#undef DEBUG_PRINT_ENABLE
#endif
#define DEBUG_PRINT_ENABLE 1
/* ensure that debug_printf is enabled */
#ifdef DEBUG_PRINT_ENABLE
#undef DEBUG_PRINT_ENABLE
#endif
#define DEBUG_PRINT_ENABLE 1
#ifndef configTASKS_DEBUG
#define configTASKS_DEBUG 0
#endif
#if configTASKS_DEBUG == 1
#define DEBUG_PRINT_ENABLE_FREERTOS_TASKS 1
#else
#define DEBUG_PRINT_DISABLE_FREERTOS_TASKS 1
#endif
#ifndef configTASKS_DEBUG
#define configTASKS_DEBUG 0
#endif
#if configTASKS_DEBUG == 1
#define DEBUG_PRINT_ENABLE_FREERTOS_TASKS 1
#else
#define DEBUG_PRINT_DISABLE_FREERTOS_TASKS 1
#endif
#else /* configENABLE_DEBUG_PRINTF */
#else /* configENABLE_DEBUG_PRINTF */
/* ensure that debug_printf is disabled */
#ifdef DEBUG_UNIT
#undef DEBUG_UNIT
#endif
#ifdef DEBUG_PRINT_ENABLE
#undef DEBUG_PRINT_ENABLE
#endif
/* ensure that debug_printf is disabled */
#ifdef DEBUG_UNIT
#undef DEBUG_UNIT
#endif
#ifdef DEBUG_PRINT_ENABLE
#undef DEBUG_PRINT_ENABLE
#endif
#define DEBUG_PRINT_ENABLE 0
#define DEBUG_PRINT_ENABLE 0
#endif /* configENABLE_DEBUG_PRINTF */
#endif
#endif /* configENABLE_DEBUG_PRINTF */
#endif /* ifdef configENABLE_DEBUG_PRINTF */
#endif /* RTOS_SUPPORT_RTOS_CONFIG_H_ */