make ARM_AARCH64 Port SMP ready

This commit is contained in:
NikJen26 2024-09-07 11:23:19 +02:00
parent a045081f73
commit d30956e7da
3 changed files with 541 additions and 237 deletions

View file

@ -27,11 +27,14 @@
*/ */
/* Standard includes. */ /* Standard includes. */
#include <stdlib.h> #include <string.h>
/* Scheduler includes. */ /* Scheduler includes. */
#include "FreeRTOS.h" #include "FreeRTOS.h"
#include "task.h" #include "task.h"
#if ( configNUMBER_OF_CORES > 1 )
#include "hardware_setup.h"
#endif
#ifndef configINTERRUPT_CONTROLLER_BASE_ADDRESS #ifndef configINTERRUPT_CONTROLLER_BASE_ADDRESS
#error "configINTERRUPT_CONTROLLER_BASE_ADDRESS must be defined. See www.FreeRTOS.org/Using-FreeRTOS-on-Cortex-A-Embedded-Processors.html" #error "configINTERRUPT_CONTROLLER_BASE_ADDRESS must be defined. See www.FreeRTOS.org/Using-FreeRTOS-on-Cortex-A-Embedded-Processors.html"
@ -118,16 +121,6 @@
/* The I bit in the DAIF bits. */ /* The I bit in the DAIF bits. */
#define portDAIF_I ( 0x80 ) #define portDAIF_I ( 0x80 )
/* Macro to unmask all interrupt priorities. */
#define portCLEAR_INTERRUPT_MASK() \
{ \
portDISABLE_INTERRUPTS(); \
portICCPMR_PRIORITY_MASK_REGISTER = portUNMASK_VALUE; \
__asm volatile ( "DSB SY \n" \
"ISB SY \n" ); \
portENABLE_INTERRUPTS(); \
}
/* Hardware specifics used when sanity checking the configuration. */ /* Hardware specifics used when sanity checking the configuration. */
#define portINTERRUPT_PRIORITY_REGISTER_OFFSET 0x400UL #define portINTERRUPT_PRIORITY_REGISTER_OFFSET 0x400UL
#define portMAX_8_BIT_VALUE ( ( uint8_t ) 0xff ) #define portMAX_8_BIT_VALUE ( ( uint8_t ) 0xff )
@ -152,18 +145,26 @@ extern void vPortRestoreTaskContext( void );
* a non zero value to ensure interrupts don't inadvertently become unmasked before * a non zero value to ensure interrupts don't inadvertently become unmasked before
* the scheduler starts. As it is stored as part of the task context it will * the scheduler starts. As it is stored as part of the task context it will
* automatically be set to 0 when the first task is started. */ * automatically be set to 0 when the first task is started. */
volatile uint64_t ullCriticalNesting = 9999ULL; #if ( configNUMBER_OF_CORES == 1 )
volatile uint64_t ullCriticalNesting = 0ULL;
/* Saved as part of the task context. If ullPortTaskHasFPUContext is non-zero /* Saved as part of the task context. If ullPortTaskHasFPUContext is non-zero
* then floating point context must be saved and restored for the task. */ * then floating point context must be saved and restored for the task. */
uint64_t ullPortTaskHasFPUContext = pdFALSE; uint64_t ullPortTaskHasFPUContext = pdFALSE;
/* Set to 1 to pend a context switch from an ISR. */ /* Set to 1 to pend a context switch from an ISR. */
uint64_t ullPortYieldRequired = pdFALSE; uint64_t ullPortYieldRequired = pdFALSE;
/* Counts the interrupt nesting depth. A context switch is only performed if /* Counts the interrupt nesting depth. A context switch is only performed if
* if the nesting depth is 0. */ * if the nesting depth is 0. */
uint64_t ullPortInterruptNesting = 0; uint64_t ullPortInterruptNesting = 0;
#else /* #if ( configNUMBER_OF_CORES == 1 ) */
#if ( portCRITICAL_NESTING_IN_TCB == 0 )
volatile uint64_t ullCriticalNestings[ configNUMBER_OF_CORES ] = { 0ULL };
#endif
/* Saved as part of the task context. If ullPortTaskHasFPUContext is non-zero
* then floating point context must be saved and restored for the task. */
uint64_t ullPortTaskHasFPUContext[configNUMBER_OF_CORES] = {pdFALSE};
uint64_t ullPortYieldRequired[configNUMBER_OF_CORES] = {pdFALSE};
uint64_t ullPortInterruptNestings[configNUMBER_OF_CORES] = {0};
#endif /* #if ( configNUMBER_OF_CORES == 1 ) */
/* Used in the ASM code. */ /* Used in the ASM code. */
__attribute__( ( used ) ) const uint64_t ullICCEOIR = portICCEOIR_END_OF_INTERRUPT_REGISTER_ADDRESS; __attribute__( ( used ) ) const uint64_t ullICCEOIR = portICCEOIR_END_OF_INTERRUPT_REGISTER_ADDRESS;
@ -171,6 +172,80 @@ __attribute__( ( used ) ) const uint64_t ullICCIAR = portICCIAR_INTERRUPT_ACKNOW
__attribute__( ( used ) ) const uint64_t ullICCPMR = portICCPMR_PRIORITY_MASK_REGISTER_ADDRESS; __attribute__( ( used ) ) const uint64_t ullICCPMR = portICCPMR_PRIORITY_MASK_REGISTER_ADDRESS;
__attribute__( ( used ) ) const uint64_t ullMaxAPIPriorityMask = ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT ); __attribute__( ( used ) ) const uint64_t ullMaxAPIPriorityMask = ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT );
#if ( configNUMBER_OF_CORES > 1 )
typedef struct {
volatile uint32_t lock;
uint32_t owner;
uint32_t count;
} recursive_spinlock_t;
static recursive_spinlock_t rms[2] = {{.lock=0,.owner = 0xFFFFFFFF,.count=0},{.lock=0,.owner = 0xFFFFFFFF,.count=0}};
static inline uint32_t ldaxr(volatile uint32_t *addr) {
uint32_t value;
asm volatile ("ldaxr %w0, [%1]" : "=r" (value) : "r" (addr) : "memory");
return value;
}
static inline uint32_t stlxr(volatile uint32_t *addr, uint32_t value) {
uint32_t success;
asm volatile(
"stlxr %w[success], %w[value], [%[address]]"
: [success] "=&r" (success)
: [value] "r" (value), [address] "r" (addr)
: "memory"
);
return success;
}
void vPortRecursiveLock( uint32_t ulLockNum )
{
configASSERT( ulLockNum < 2 );
uint32_t ulCoreNum = portGET_CORE_ID();
recursive_spinlock_t* lock = &rms[ulLockNum];
if (ldaxr(&lock->lock) && lock->owner == ulCoreNum) {
lock->count++;
return;
}
uint32_t expected, success;
do {
do {
asm volatile("wfe"); // Wait for event
expected = ldaxr(&lock->lock);
} while (expected != 0);
success = stlxr(&lock->lock, 1);
} while (success != 0);
lock->owner = ulCoreNum;
lock->count = 1;
}
void vPortRecursiveUnlock( uint32_t ulLockNum )
{
configASSERT( ulLockNum < 2 );
uint32_t ulCoreNum = portGET_CORE_ID();
recursive_spinlock_t* lock = &rms[ulLockNum];
if (ldaxr(&lock->lock) && lock->owner == ulCoreNum) {
if (--lock->count == 0) {
lock->owner = 0xFFFFFFFF; // Set to no owner
asm volatile ("stlr %w0, [%1]" : : "r" (0), "r" (&lock->lock) : "memory");
asm volatile ("sev"); // Send event
}
}
}
#endif
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
/* /*
@ -282,7 +357,11 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
pxTopOfStack--; pxTopOfStack--;
*pxTopOfStack = pdTRUE; *pxTopOfStack = pdTRUE;
#if configNUMBER_OF_CORES == 1
ullPortTaskHasFPUContext = pdTRUE; ullPortTaskHasFPUContext = pdTRUE;
#else
ullPortTaskHasFPUContext[portGET_CORE_ID()] = pdTRUE;
#endif
} }
#else /* if ( configUSE_TASK_FPU_SUPPORT == 1 ) */ #else /* if ( configUSE_TASK_FPU_SUPPORT == 1 ) */
{ {
@ -363,6 +442,13 @@ BaseType_t xPortStartScheduler( void )
* executing. */ * executing. */
portDISABLE_INTERRUPTS(); portDISABLE_INTERRUPTS();
#if ( configNUMBER_OF_CORES > 1 )
/* Start all other Cores and let them execute vPortRestoreTaskContext()*/
for (int i = 1; i < configNUMBER_OF_CORES; i++) {
vPortStartCore(i);
}
#endif
/* Start the timer that generates the tick ISR. */ /* Start the timer that generates the tick ISR. */
configSETUP_TICK_INTERRUPT(); configSETUP_TICK_INTERRUPT();
@ -379,10 +465,15 @@ void vPortEndScheduler( void )
{ {
/* Not implemented in ports where there is nothing to return to. /* Not implemented in ports where there is nothing to return to.
* Artificially force an assert. */ * Artificially force an assert. */
#if ( configNUMBER_OF_CORES == 1 )
configASSERT( ullCriticalNesting == 1000ULL ); configASSERT( ullCriticalNesting == 1000ULL );
#else
configASSERT( portGET_CRITICAL_NESTING_COUNT() == 1000ULL );
#endif
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
#if ( configNUMBER_OF_CORES == 1 )
void vPortEnterCritical( void ) void vPortEnterCritical( void )
{ {
/* Mask interrupts up to the max syscall interrupt priority. */ /* Mask interrupts up to the max syscall interrupt priority. */
@ -419,11 +510,13 @@ void vPortExitCritical( void )
{ {
/* Critical nesting has reached zero so all interrupt priorities /* Critical nesting has reached zero so all interrupt priorities
* should be unmasked. */ * should be unmasked. */
portCLEAR_INTERRUPT_MASK(); vPortClearInterruptMask(pdFALSE);
} }
} }
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
#endif
void FreeRTOS_Tick_Handler( void ) void FreeRTOS_Tick_Handler( void )
{ {
@ -434,6 +527,9 @@ void FreeRTOS_Tick_Handler( void )
} }
#endif #endif
/* Ok to enable interrupts after the interrupt source has been cleared. */
configCLEAR_TICK_INTERRUPT();
/* Interrupts should not be enabled before this point. */ /* Interrupts should not be enabled before this point. */
#if ( configASSERT_DEFINED == 1 ) #if ( configASSERT_DEFINED == 1 )
{ {
@ -444,50 +540,65 @@ void FreeRTOS_Tick_Handler( void )
} }
#endif /* configASSERT_DEFINED */ #endif /* configASSERT_DEFINED */
/* Set interrupt mask before altering scheduler structures. The tick uint32_t ulPreviousMask;
* handler runs at the lowest priority, so interrupts cannot already be masked, ulPreviousMask = taskENTER_CRITICAL_FROM_ISR();
* so there is no need to save and restore the current mask value. It is
* necessary to turn off interrupts in the CPU itself while the ICCPMR is being
* updated. */
portICCPMR_PRIORITY_MASK_REGISTER = ( uint32_t ) ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT );
__asm volatile ( "dsb sy \n"
"isb sy \n" ::: "memory" );
/* Ok to enable interrupts after the interrupt source has been cleared. */
configCLEAR_TICK_INTERRUPT();
portENABLE_INTERRUPTS();
/* Increment the RTOS tick. */ /* Increment the RTOS tick. */
if( xTaskIncrementTick() != pdFALSE ) if( xTaskIncrementTick() != pdFALSE )
{ {
#if ( configNUMBER_OF_CORES == 1 )
ullPortYieldRequired = pdTRUE; ullPortYieldRequired = pdTRUE;
#else
ullPortYieldRequired[portGET_CORE_ID()] = pdTRUE;
#endif
} }
/* Ensure all interrupt priorities are active again. */ taskEXIT_CRITICAL_FROM_ISR( ulPreviousMask );
portCLEAR_INTERRUPT_MASK();
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
#if ( configUSE_TASK_FPU_SUPPORT != 2 ) uint32_t uxPortGetCoreID ( void ) {
unsigned int __core_id;
__asm volatile("mrs %0, mpidr_el1"
: "=r" (__core_id)
: /* no input operands */
: /* no clobbers */);
return __core_id & 0xff;
}
#if ( configUSE_TASK_FPU_SUPPORT != 2 )
void vPortTaskUsesFPU( void ) void vPortTaskUsesFPU( void )
{ {
/* A task is registering the fact that it needs an FPU context. Set the /* A task is registering the fact that it needs an FPU context. Set the
* FPU flag (which is saved as part of the task context). */ * FPU flag (which is saved as part of the task context). */
ullPortTaskHasFPUContext = pdTRUE;
#if ( configNUMBER_OF_CORES == 1 )
ullPortTaskHasFPUContext = pdTRUE;
#else
ullPortTaskHasFPUContext[portGET_CORE_ID()] = pdTRUE;
#endif
/* Consider initialising the FPSR here - but probably not necessary in /* Consider initialising the FPSR here - but probably not necessary in
* AArch64. */ * AArch64. */
} }
#endif /* configUSE_TASK_FPU_SUPPORT */ #endif /* configUSE_TASK_FPU_SUPPORT */
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
#define portCLEAR_INTERRUPT_MASK_P() \
{ \
portDISABLE_INTERRUPTS(); \
portICCPMR_PRIORITY_MASK_REGISTER = 0xFFFF; \
__asm volatile ( "DSB SY \n" \
"ISB SY \n" ); \
portENABLE_INTERRUPTS(); \
}
void vPortClearInterruptMask( UBaseType_t uxNewMaskValue ) void vPortClearInterruptMask( UBaseType_t uxNewMaskValue )
{ {
if( uxNewMaskValue == pdFALSE ) if( uxNewMaskValue == pdFALSE )
{ {
portCLEAR_INTERRUPT_MASK(); portCLEAR_INTERRUPT_MASK_P();
} }
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@ -495,12 +606,13 @@ void vPortClearInterruptMask( UBaseType_t uxNewMaskValue )
UBaseType_t uxPortSetInterruptMask( void ) UBaseType_t uxPortSetInterruptMask( void )
{ {
uint32_t ulReturn; uint32_t ulReturn;
uint32_t mask;
/* Interrupt in the CPU must be turned off while the ICCPMR is being /* Interrupt in the CPU must be turned off while the ICCPMR is being
* updated. */ updated. */
portDISABLE_INTERRUPTS(); portDISABLE_INTERRUPTS();
mask = portICCPMR_PRIORITY_MASK_REGISTER;
if( portICCPMR_PRIORITY_MASK_REGISTER == ( uint32_t ) ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT ) ) if( mask == ( uint32_t ) ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT ) )
{ {
/* Interrupts were already masked. */ /* Interrupts were already masked. */
ulReturn = pdTRUE; ulReturn = pdTRUE;

View file

@ -25,17 +25,24 @@
* https://github.com/FreeRTOS * https://github.com/FreeRTOS
* *
*/ */
#include "FreeRTOSConfig.h"
.text .section .interrupt_handlers, "ax"
/* Variables and functions. */ /* Variables and functions. */
.extern ullMaxAPIPriorityMask .extern ullMaxAPIPriorityMask
#if configNUMBER_OF_CORES == 1
.extern pxCurrentTCB .extern pxCurrentTCB
.extern ullCriticalNesting
.extern ullPortInterruptNesting
#else
.extern pxCurrentTCBs
.extern ullCriticalNestings
.extern ullPortInterruptNestings
#endif
.extern vTaskSwitchContext .extern vTaskSwitchContext
.extern vApplicationIRQHandler .extern vApplicationIRQHandler
.extern ullPortInterruptNesting
.extern ullPortTaskHasFPUContext .extern ullPortTaskHasFPUContext
.extern ullCriticalNesting
.extern ullPortYieldRequired .extern ullPortYieldRequired
.extern ullICCEOIR .extern ullICCEOIR
.extern ullICCIAR .extern ullICCIAR
@ -45,12 +52,7 @@
.global FreeRTOS_SWI_Handler .global FreeRTOS_SWI_Handler
.global vPortRestoreTaskContext .global vPortRestoreTaskContext
.macro saveallgpregisters
.macro portSAVE_CONTEXT
/* Switch to use the EL0 stack pointer. */
MSR SPSEL, #0
/* Save the entire context. */ /* Save the entire context. */
STP X0, X1, [SP, #-0x10]! STP X0, X1, [SP, #-0x10]!
STP X2, X3, [SP, #-0x10]! STP X2, X3, [SP, #-0x10]!
@ -68,30 +70,56 @@
STP X26, X27, [SP, #-0x10]! STP X26, X27, [SP, #-0x10]!
STP X28, X29, [SP, #-0x10]! STP X28, X29, [SP, #-0x10]!
STP X30, XZR, [SP, #-0x10]! STP X30, XZR, [SP, #-0x10]!
.endm
/* Save the SPSR. */ .macro restoreallgpregisters
#if defined( GUEST ) LDP X30, XZR, [SP], #0x10
MRS X3, SPSR_EL1 LDP X28, X29, [SP], #0x10
MRS X2, ELR_EL1 LDP X26, X27, [SP], #0x10
#else LDP X24, X25, [SP], #0x10
MRS X3, SPSR_EL3 LDP X22, X23, [SP], #0x10
/* Save the ELR. */ LDP X20, X21, [SP], #0x10
MRS X2, ELR_EL3 LDP X18, X19, [SP], #0x10
#endif LDP X16, X17, [SP], #0x10
LDP X14, X15, [SP], #0x10
LDP X12, X13, [SP], #0x10
LDP X10, X11, [SP], #0x10
LDP X8, X9, [SP], #0x10
LDP X6, X7, [SP], #0x10
LDP X4, X5, [SP], #0x10
LDP X2, X3, [SP], #0x10
LDP X0, X1, [SP], #0x10
.endm
.macro savefuncontextgpregs
STP X0, X1, [SP, #-0x10]!
STP X2, X3, [SP, #-0x10]! STP X2, X3, [SP, #-0x10]!
STP X4, X5, [SP, #-0x10]!
STP X6, X7, [SP, #-0x10]!
STP X8, X9, [SP, #-0x10]!
STP X10, X11, [SP, #-0x10]!
STP X12, X13, [SP, #-0x10]!
STP X14, X15, [SP, #-0x10]!
STP X16, X17, [SP, #-0x10]!
STP X18, X19, [SP, #-0x10]!
STP X29, X30, [SP, #-0x10]!
.endm
/* Save the critical section nesting depth. */ .macro restorefuncontextgpregs
LDR X0, ullCriticalNestingConst LDP X29, X30, [SP], #0x10
LDR X3, [X0] LDP X18, X19, [SP], #0x10
LDP X16, X17, [SP], #0x10
LDP X14, X15, [SP], #0x10
LDP X12, X13, [SP], #0x10
LDP X10, X11, [SP], #0x10
LDP X8, X9, [SP], #0x10
LDP X6, X7, [SP], #0x10
LDP X4, X5, [SP], #0x10
LDP X2, X3, [SP], #0x10
LDP X0, X1, [SP], #0x10
.endm
/* Save the FPU context indicator. */ .macro savefloatregisters
LDR X0, ullPortTaskHasFPUContextConst
LDR X2, [X0]
/* Save the FPU context, if any (32 128-bit registers). */
CMP X2, #0
B.EQ 1f
STP Q0, Q1, [SP,#-0x20]! STP Q0, Q1, [SP,#-0x20]!
STP Q2, Q3, [SP,#-0x20]! STP Q2, Q3, [SP,#-0x20]!
STP Q4, Q5, [SP,#-0x20]! STP Q4, Q5, [SP,#-0x20]!
@ -108,12 +136,86 @@
STP Q26, Q27, [SP,#-0x20]! STP Q26, Q27, [SP,#-0x20]!
STP Q28, Q29, [SP,#-0x20]! STP Q28, Q29, [SP,#-0x20]!
STP Q30, Q31, [SP,#-0x20]! STP Q30, Q31, [SP,#-0x20]!
.endm
.macro restorefloatregisters
LDP Q30, Q31, [SP], #0x20
LDP Q28, Q29, [SP], #0x20
LDP Q26, Q27, [SP], #0x20
LDP Q24, Q25, [SP], #0x20
LDP Q22, Q23, [SP], #0x20
LDP Q20, Q21, [SP], #0x20
LDP Q18, Q19, [SP], #0x20
LDP Q16, Q17, [SP], #0x20
LDP Q14, Q15, [SP], #0x20
LDP Q12, Q13, [SP], #0x20
LDP Q10, Q11, [SP], #0x20
LDP Q8, Q9, [SP], #0x20
LDP Q6, Q7, [SP], #0x20
LDP Q4, Q5, [SP], #0x20
LDP Q2, Q3, [SP], #0x20
LDP Q0, Q1, [SP], #0x20
.endm
.macro portSAVE_CONTEXT
/* Switch to use the EL0 stack pointer. */
MSR SPSEL, #0
/* Save the entire context. */
saveallgpregisters
/* Save the SPSR. */
#if defined( GUEST )
MRS X3, SPSR_EL1
MRS X2, ELR_EL1
#else
MRS X3, SPSR_EL3
/* Save the ELR. */
MRS X2, ELR_EL3
#endif
STP X2, X3, [SP, #-0x10]!
/* Save the critical section nesting depth. */
LDR X0, ullCriticalNestingsConst
#if configNUMBER_OF_CORES > 1
/* Calculate the correct index for ullCriticalNestings array based on core ID. */
MRS X1, MPIDR_EL1 // Read the Multiprocessor Affinity Register
AND X1, X1, #0xff // Extract Aff0 which contains the core ID
/* Calculate offset to the correct critical nesting value based on the core ID */
LSL X1, X1, #3 // Multiply core ID by 8 (size of a pointer on ARM64)
ADD X0, X0, X1 // Add to the base of the critical nesting array
#endif
LDR X3, [X0]
/* Save the FPU context indicator. */
LDR X0, ullPortTaskHasFPUContextConst
#if configNUMBER_OF_CORES > 1
ADD X0, X0, X1 // Add to the base of the FPU array
#endif
LDR X2, [X0]
/* Save the FPU context, if any (32 128-bit registers). */
CMP X2, #0
B.EQ 1f
savefloatregisters
1: 1:
/* Store the critical nesting count and FPU context indicator. */ /* Store the critical nesting count and FPU context indicator. */
STP X2, X3, [SP, #-0x10]! STP X2, X3, [SP, #-0x10]!
LDR X0, pxCurrentTCBConst LDR X0, pxCurrentTCBsConst
#if configNUMBER_OF_CORES > 1
MRS X1, MPIDR_EL1 // Read the Multiprocessor Affinity Register
AND X1, X1, #0xff // Extract Aff0 which contains the core ID
/* Calculate offset to the correct TCB pointer based on the core ID */
LSL X1, X1, #3 // Multiply core ID by 8 (size of a pointer on ARM64)
ADD X0, X0, X1 // Add to the base of the TCB array
#endif
/* Load the address of the TCB for the current core */
LDR X1, [X0] LDR X1, [X0]
MOV X0, SP /* Move SP into X0 for saving. */ MOV X0, SP /* Move SP into X0 for saving. */
STR X0, [X1] STR X0, [X1]
@ -131,16 +233,33 @@
MSR SPSEL, #0 MSR SPSEL, #0
/* Set the SP to point to the stack of the task being restored. */ /* Set the SP to point to the stack of the task being restored. */
LDR X0, pxCurrentTCBConst LDR X0, pxCurrentTCBsConst
#if configNUMBER_OF_CORES > 1
/* Get the core ID to index the TCB correctly. */
MRS X2, MPIDR_EL1 // Read the Multiprocessor Affinity Register
AND X2, X2, #0xff // Extract Aff0 which contains the core ID
LSL X2, X2, #3 // Scale the core ID to the size of a pointer (64-bit system)
ADD X0, X0, X2 // Add the offset for the current core's TCB pointer
#endif
LDR X1, [X0] LDR X1, [X0]
LDR X0, [X1] LDR X0, [X1]
MOV SP, X0 MOV SP, X0
LDP X2, X3, [SP], #0x10 /* Critical nesting and FPU context. */ LDP X2, X3, [SP], #0x10 /* Critical nesting and FPU context. */
/* Set the PMR register to be correct for the current critical nesting
depth. */ /* Calculate offset for the current core's ullCriticalNesting and load its address. */
LDR X0, ullCriticalNestingConst /* X0 holds the address of ullCriticalNesting. */ LDR X0, ullCriticalNestingsConst /* Load base address of the ullCriticalNesting array */
#if configNUMBER_OF_CORES > 1
/* Existing code to get core ID and scale to pointer size is reused. */
MRS X1, MPIDR_EL1 /* Read Multiprocessor Affinity Register */
AND X1, X1, #0xff /* Extract Aff0, which contains the core ID */
LSL X1, X1, #3 /* Scale core ID to the size of a pointer (assuming 64-bit system) */
ADD X0, X0, X1 /* Add offset for the current core's ullCriticalNesting */
LDR X0, [X0] /* Load the address of the ullCriticalNesting for the current core */
#endif
MOV X1, #255 /* X1 holds the unmask value. */ MOV X1, #255 /* X1 holds the unmask value. */
LDR X4, ullICCPMRConst /* X4 holds the address of the ICCPMR constant. */ LDR X4, ullICCPMRConst /* X4 holds the address of the ICCPMR constant. */
CMP X3, #0 CMP X3, #0
@ -154,29 +273,22 @@
ISB SY ISB SY
STR X3, [X0] /* Restore the task's critical nesting count. */ STR X3, [X0] /* Restore the task's critical nesting count. */
/* Restore the FPU context indicator. */
LDR X0, ullPortTaskHasFPUContextConst LDR X0, ullPortTaskHasFPUContextConst
#if configNUMBER_OF_CORES > 1
/* Existing code to get core ID and scale to pointer size is reused. */
MRS X1, MPIDR_EL1 /* Read Multiprocessor Affinity Register */
AND X1, X1, #0xff /* Extract Aff0, which contains the core ID */
LSL X1, X1, #3 /* Scale core ID to the size of a pointer (assuming 64-bit system) */
/* Restore the FPU context indicator. */
ADD X0, X0, X1 // Add to the base of the FPU array
#endif
STR X2, [X0] STR X2, [X0]
/* Restore the FPU context, if any. */ /* Restore the FPU context, if any. */
CMP X2, #0 CMP X2, #0
B.EQ 1f B.EQ 1f
LDP Q30, Q31, [SP], #0x20 restorefloatregisters
LDP Q28, Q29, [SP], #0x20
LDP Q26, Q27, [SP], #0x20
LDP Q24, Q25, [SP], #0x20
LDP Q22, Q23, [SP], #0x20
LDP Q20, Q21, [SP], #0x20
LDP Q18, Q19, [SP], #0x20
LDP Q16, Q17, [SP], #0x20
LDP Q14, Q15, [SP], #0x20
LDP Q12, Q13, [SP], #0x20
LDP Q10, Q11, [SP], #0x20
LDP Q8, Q9, [SP], #0x20
LDP Q6, Q7, [SP], #0x20
LDP Q4, Q5, [SP], #0x20
LDP Q2, Q3, [SP], #0x20
LDP Q0, Q1, [SP], #0x20
1: 1:
LDP X2, X3, [SP], #0x10 /* SPSR and ELR. */ LDP X2, X3, [SP], #0x10 /* SPSR and ELR. */
@ -192,22 +304,7 @@
MSR ELR_EL3, X2 MSR ELR_EL3, X2
#endif #endif
LDP X30, XZR, [SP], #0x10 restoreallgpregisters
LDP X28, X29, [SP], #0x10
LDP X26, X27, [SP], #0x10
LDP X24, X25, [SP], #0x10
LDP X22, X23, [SP], #0x10
LDP X20, X21, [SP], #0x10
LDP X18, X19, [SP], #0x10
LDP X16, X17, [SP], #0x10
LDP X14, X15, [SP], #0x10
LDP X12, X13, [SP], #0x10
LDP X10, X11, [SP], #0x10
LDP X8, X9, [SP], #0x10
LDP X6, X7, [SP], #0x10
LDP X4, X5, [SP], #0x10
LDP X2, X3, [SP], #0x10
LDP X0, X1, [SP], #0x10
/* Switch to use the ELx stack pointer. _RB_ Might not be required. */ /* Switch to use the ELx stack pointer. _RB_ Might not be required. */
MSR SPSEL, #1 MSR SPSEL, #1
@ -239,6 +336,8 @@ FreeRTOS_SWI_Handler:
CMP X1, #0x17 /* 0x17 = SMC instruction. */ CMP X1, #0x17 /* 0x17 = SMC instruction. */
#endif #endif
B.NE FreeRTOS_Abort B.NE FreeRTOS_Abort
MRS x0, mpidr_el1
AND x0, x0, 255
BL vTaskSwitchContext BL vTaskSwitchContext
portRESTORE_CONTEXT portRESTORE_CONTEXT
@ -276,17 +375,8 @@ vPortRestoreTaskContext:
.type FreeRTOS_IRQ_Handler, %function .type FreeRTOS_IRQ_Handler, %function
FreeRTOS_IRQ_Handler: FreeRTOS_IRQ_Handler:
/* Save volatile registers. */ /* Save volatile registers. */
STP X0, X1, [SP, #-0x10]! savefuncontextgpregs
STP X2, X3, [SP, #-0x10]! savefloatregisters
STP X4, X5, [SP, #-0x10]!
STP X6, X7, [SP, #-0x10]!
STP X8, X9, [SP, #-0x10]!
STP X10, X11, [SP, #-0x10]!
STP X12, X13, [SP, #-0x10]!
STP X14, X15, [SP, #-0x10]!
STP X16, X17, [SP, #-0x10]!
STP X18, X19, [SP, #-0x10]!
STP X29, X30, [SP, #-0x10]!
/* Save the SPSR and ELR. */ /* Save the SPSR and ELR. */
#if defined( GUEST ) #if defined( GUEST )
@ -299,7 +389,16 @@ FreeRTOS_IRQ_Handler:
STP X2, X3, [SP, #-0x10]! STP X2, X3, [SP, #-0x10]!
/* Increment the interrupt nesting counter. */ /* Increment the interrupt nesting counter. */
LDR X5, ullPortInterruptNestingConst LDR X5, ullPortInterruptNestingsConst /* Load base address of the ullPortYieldRequired array */
#if configNUMBER_OF_CORES > 1
/* Existing code to get core ID and scale to pointer size is reused. */
MRS X2, MPIDR_EL1 /* Read Multiprocessor Affinity Register */
AND X2, X2, #0xff /* Extract Aff0, which contains the core ID */
LSL X2, X2, #3 /* Scale core ID to the size of a pointer (assuming 64-bit system) */
/* Calculate offset for the current core's ullPortYieldRequired and load its address. */
ADD X5, X5, X2 /* Add offset for the current core's ullPortYieldRequired */
#endif
LDR X1, [X5] /* Old nesting count in X1. */ LDR X1, [X5] /* Old nesting count in X1. */
ADD X6, X1, #1 ADD X6, X1, #1
STR X6, [X5] /* Address of nesting count variable in X5. */ STR X6, [X5] /* Address of nesting count variable in X5. */
@ -313,6 +412,7 @@ FreeRTOS_IRQ_Handler:
LDR X3, [X2] LDR X3, [X2]
LDR W0, [X3] /* ICCIAR in W0 as parameter. */ LDR W0, [X3] /* ICCIAR in W0 as parameter. */
/* ICCIAR in W0 as parameter. */
/* Maintain the ICCIAR value across the function call. */ /* Maintain the ICCIAR value across the function call. */
STP X0, X1, [SP, #-0x10]! STP X0, X1, [SP, #-0x10]!
@ -342,6 +442,16 @@ FreeRTOS_IRQ_Handler:
/* Is a context switch required? */ /* Is a context switch required? */
LDR X0, ullPortYieldRequiredConst LDR X0, ullPortYieldRequiredConst
#if configNUMBER_OF_CORES > 1
/* Existing code to get core ID and scale to pointer size is reused. */
MRS X2, MPIDR_EL1 /* Read Multiprocessor Affinity Register */
AND X2, X2, #0xff /* Extract Aff0, which contains the core ID */
LSL X2, X2, #3 /* Scale core ID to the size of a pointer (assuming 64-bit system) */
/* Calculate offset for the current core's ullPortYieldRequired and load its address. */
ADD X0, X0, X2 /* Add offset for the current core's ullPortYieldRequired */
#endif
LDR X1, [X0] LDR X1, [X0]
CMP X1, #0 CMP X1, #0
B.EQ Exit_IRQ_No_Context_Switch B.EQ Exit_IRQ_No_Context_Switch
@ -362,20 +472,13 @@ FreeRTOS_IRQ_Handler:
DSB SY DSB SY
ISB SY ISB SY
LDP X29, X30, [SP], #0x10 restorefloatregisters
LDP X18, X19, [SP], #0x10 restorefuncontextgpregs
LDP X16, X17, [SP], #0x10
LDP X14, X15, [SP], #0x10
LDP X12, X13, [SP], #0x10
LDP X10, X11, [SP], #0x10
LDP X8, X9, [SP], #0x10
LDP X6, X7, [SP], #0x10
LDP X4, X5, [SP], #0x10
LDP X2, X3, [SP], #0x10
LDP X0, X1, [SP], #0x10
/* Save the context of the current task and select a new task to run. */ /* Save the context of the current task and select a new task to run. */
portSAVE_CONTEXT portSAVE_CONTEXT
MRS x0, mpidr_el1
AND x0, x0, 255
BL vTaskSwitchContext BL vTaskSwitchContext
portRESTORE_CONTEXT portRESTORE_CONTEXT
@ -392,17 +495,8 @@ Exit_IRQ_No_Context_Switch:
DSB SY DSB SY
ISB SY ISB SY
LDP X29, X30, [SP], #0x10 restorefloatregisters
LDP X18, X19, [SP], #0x10 restorefuncontextgpregs
LDP X16, X17, [SP], #0x10
LDP X14, X15, [SP], #0x10
LDP X12, X13, [SP], #0x10
LDP X10, X11, [SP], #0x10
LDP X8, X9, [SP], #0x10
LDP X6, X7, [SP], #0x10
LDP X4, X5, [SP], #0x10
LDP X2, X3, [SP], #0x10
LDP X0, X1, [SP], #0x10
ERET ERET
@ -410,14 +504,22 @@ Exit_IRQ_No_Context_Switch:
.align 8 .align 8
pxCurrentTCBConst: .dword pxCurrentTCB #if configNUMBER_OF_CORES == 1
ullCriticalNestingConst: .dword ullCriticalNesting pxCurrentTCBsConst: .dword pxCurrentTCB
ullCriticalNestingsConst: .dword ullCriticalNesting
ullPortInterruptNestingsConst: .dword ullPortInterruptNesting
ullPortYieldRequiredConst: .dword ullPortYieldRequired
ullPortTaskHasFPUContextConst: .dword ullPortTaskHasFPUContext ullPortTaskHasFPUContextConst: .dword ullPortTaskHasFPUContext
#else
pxCurrentTCBsConst: .dword pxCurrentTCBs
ullCriticalNestingsConst: .dword ullCriticalNestings
ullPortInterruptNestingsConst: .dword ullPortInterruptNestings
ullPortYieldRequiredConst: .dword ullPortYieldRequired
ullPortTaskHasFPUContextConst: .dword ullPortTaskHasFPUContext
#endif
ullICCPMRConst: .dword ullICCPMR ullICCPMRConst: .dword ullICCPMR
ullMaxAPIPriorityMaskConst: .dword ullMaxAPIPriorityMask ullMaxAPIPriorityMaskConst: .dword ullMaxAPIPriorityMask
ullPortInterruptNestingConst: .dword ullPortInterruptNesting
ullPortYieldRequiredConst: .dword ullPortYieldRequired
ullICCIARConst: .dword ullICCIAR ullICCIARConst: .dword ullICCIAR
ullICCEOIRConst: .dword ullICCEOIR ullICCEOIRConst: .dword ullICCEOIR
vApplicationIRQHandlerConst: .word vApplicationIRQHandler vApplicationIRQHandlerConst: .word vApplicationIRQHandler

View file

@ -33,6 +33,9 @@
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
#include "FreeRTOS.h"
/* *INDENT-ON* */ /* *INDENT-ON* */
/*----------------------------------------------------------- /*-----------------------------------------------------------
@ -65,6 +68,8 @@ typedef uint64_t TickType_t;
* not need to be guarded with a critical section. */ * not need to be guarded with a critical section. */
#define portTICK_TYPE_IS_ATOMIC 1 #define portTICK_TYPE_IS_ATOMIC 1
#define portCRITICAL_NESTING_IN_TCB 0
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
/* Hardware specifics. */ /* Hardware specifics. */
@ -78,6 +83,7 @@ typedef uint64_t TickType_t;
/* Task utilities. */ /* Task utilities. */
/* Called at the end of an ISR that can cause a context switch. */ /* Called at the end of an ISR that can cause a context switch. */
#if ( configNUMBER_OF_CORES == 1 )
#define portEND_SWITCHING_ISR( xSwitchRequired ) \ #define portEND_SWITCHING_ISR( xSwitchRequired ) \
{ \ { \
extern uint64_t ullPortYieldRequired; \ extern uint64_t ullPortYieldRequired; \
@ -87,6 +93,17 @@ typedef uint64_t TickType_t;
ullPortYieldRequired = pdTRUE; \ ullPortYieldRequired = pdTRUE; \
} \ } \
} }
#else
#define portEND_SWITCHING_ISR( xSwitchRequired ) \
{ \
extern uint64_t ullPortYieldRequired[configNUMBER_OF_CORES]; \
\
if( xSwitchRequired != pdFALSE ) \
{ \
ullPortYieldRequired[portGET_CORE_ID()] = pdTRUE; \
} \
}
#endif
#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
#if defined( GUEST ) #if defined( GUEST )
@ -98,30 +115,89 @@ typedef uint64_t TickType_t;
/*----------------------------------------------------------- /*-----------------------------------------------------------
* Critical section control * Critical section control
*----------------------------------------------------------*/ *----------------------------------------------------------*/
extern void vPortEnterCritical( void );
extern void vPortExitCritical( void );
extern UBaseType_t uxPortSetInterruptMask( void );
extern void vPortClearInterruptMask( UBaseType_t uxNewMaskValue );
extern void vPortInstallFreeRTOSVectorTable( void ); extern void vPortInstallFreeRTOSVectorTable( void );
#define portDISABLE_INTERRUPTS() \ static inline UBaseType_t uxDisableInterrupts()
__asm volatile ( "MSR DAIFSET, #2" ::: "memory" ); \ {
__asm volatile ( "DSB SY" ); \ unsigned long flags;
__asm volatile ( "ISB SY" );
#define portENABLE_INTERRUPTS() \ __asm volatile (
__asm volatile ( "MSR DAIFCLR, #2" ::: "memory" ); \ "mrs %0, daif\n"
__asm volatile ( "DSB SY" ); \ "msr daifset, #2\n"
__asm volatile ( "ISB SY" ); "dsb sy\n"
"isb sy\n"
: "=r" (flags)
:
: "memory"
);
return flags;
}
static inline void vEnableInterrupts()
{
__asm volatile (
"mrs x0, daif\n"
"msr daifclr, #2\n"
"dsb sy\n"
"isb sy\n"
:
:
: "memory"
);
}
static inline void vRestoreInterrupts(UBaseType_t flags)
{
__asm volatile(
"and x2, %0, #128"
:
: "r" (flags)
: "x2"
);
__asm volatile("mrs x1, daif" ::: "x1");
__asm volatile("bic x1, x1, #128" ::: "x1");
__asm volatile("orr x1, x1, x2" ::: "x1", "x2");
__asm volatile("msr daif, x1" ::: "x1");
__asm volatile("dsb sy");
__asm volatile("isb sy");
__asm volatile("" ::: "memory");
}
#define portDISABLE_INTERRUPTS() uxDisableInterrupts()
#define portENABLE_INTERRUPTS() vEnableInterrupts()
#define portSET_INTERRUPT_MASK() uxDisableInterrupts()
#define portCLEAR_INTERRUPT_MASK(x) vRestoreInterrupts(x)
UBaseType_t uxPortSetInterruptMask( void );
void vPortClearInterruptMask( UBaseType_t );
#define portSET_INTERRUPT_MASK_FROM_ISR() uxPortSetInterruptMask()
#define portCLEAR_INTERRUPT_MASK_FROM_ISR(x) vPortClearInterruptMask(x)
/* These macros do not globally disable/enable interrupts. They do mask off /* These macros do not globally disable/enable interrupts. They do mask off
* interrupts that have a priority below configMAX_API_CALL_INTERRUPT_PRIORITY. */ * interrupts that have a priority below configMAX_API_CALL_INTERRUPT_PRIORITY. */
#define portENTER_CRITICAL() vPortEnterCritical(); #if ( configNUMBER_OF_CORES == 1 )
#define portEXIT_CRITICAL() vPortExitCritical(); extern void vPortEnterCritical( void );
#define portSET_INTERRUPT_MASK_FROM_ISR() uxPortSetInterruptMask() extern void vPortExitCritical( void );
#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vPortClearInterruptMask( x ) #define portENTER_CRITICAL() vPortEnterCritical()
#define portEXIT_CRITICAL() vPortExitCritical()
#else
#define portENTER_CRITICAL() vTaskEnterCritical()
#define portEXIT_CRITICAL() vTaskExitCritical()
#endif
#define portENTER_CRITICAL_FROM_ISR() vTaskEnterCriticalFromISR()
#define portEXIT_CRITICAL_FROM_ISR( x ) vTaskExitCriticalFromISR( x )
/* Critical nesting count management. */
#if ( ( configNUMBER_OF_CORES > 1 ) && ( portCRITICAL_NESTING_IN_TCB == 0 ) )
extern volatile UBaseType_t ullCriticalNestings[ configNUMBER_OF_CORES ];
#define portGET_CRITICAL_NESTING_COUNT() ( ullCriticalNestings[ portGET_CORE_ID() ] )
#define portSET_CRITICAL_NESTING_COUNT( x ) ( ullCriticalNestings[ portGET_CORE_ID() ] = ( x ) )
#define portINCREMENT_CRITICAL_NESTING_COUNT() ( ullCriticalNestings[ portGET_CORE_ID() ]++ )
#define portDECREMENT_CRITICAL_NESTING_COUNT() ( ullCriticalNestings[ portGET_CORE_ID() ]-- )
#endif /* ( ( configNUMBER_OF_CORES > 1 ) && ( portCRITICAL_NESTING_IN_TCB == 1 ) ) */
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@ -215,6 +291,20 @@ void FreeRTOS_Tick_Handler( void );
#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" )
#if ( configNUMBER_OF_CORES > 1 )
extern uint32_t uxPortGetCoreID ( void );
#define portGET_CORE_ID() uxPortGetCoreID()
extern void vPortYieldCore ( uint8_t CoreID );
#define portYIELD_CORE( xCoreID ) vPortYieldCore (xCoreID)
extern void vPortRecursiveLock( uint32_t ulLockNum );
extern void vPortRecursiveUnlock( uint32_t ulLockNum );
#define portRELEASE_ISR_LOCK() vPortRecursiveUnlock(0u)
#define portGET_ISR_LOCK() vPortRecursiveLock(0u)
#define portRELEASE_TASK_LOCK() vPortRecursiveUnlock(1u)
#define portGET_TASK_LOCK() vPortRecursiveLock(1u)
#endif
/* *INDENT-OFF* */ /* *INDENT-OFF* */
#ifdef __cplusplus #ifdef __cplusplus
} }