make ARM_AARCH64 Port SMP ready

This commit is contained in:
NikJen26 2024-09-07 11:23:19 +02:00
parent a045081f73
commit d30956e7da
3 changed files with 541 additions and 237 deletions

View file

@ -27,11 +27,14 @@
*/
/* Standard includes. */
#include <stdlib.h>
#include <string.h>
/* Scheduler includes. */
#include "FreeRTOS.h"
#include "task.h"
#if ( configNUMBER_OF_CORES > 1 )
#include "hardware_setup.h"
#endif
#ifndef configINTERRUPT_CONTROLLER_BASE_ADDRESS
#error "configINTERRUPT_CONTROLLER_BASE_ADDRESS must be defined. See www.FreeRTOS.org/Using-FreeRTOS-on-Cortex-A-Embedded-Processors.html"
@ -118,16 +121,6 @@
/* The I bit in the DAIF bits. */
#define portDAIF_I ( 0x80 )
/* Macro to unmask all interrupt priorities. */
#define portCLEAR_INTERRUPT_MASK() \
{ \
portDISABLE_INTERRUPTS(); \
portICCPMR_PRIORITY_MASK_REGISTER = portUNMASK_VALUE; \
__asm volatile ( "DSB SY \n" \
"ISB SY \n" ); \
portENABLE_INTERRUPTS(); \
}
/* Hardware specifics used when sanity checking the configuration. */
#define portINTERRUPT_PRIORITY_REGISTER_OFFSET 0x400UL
#define portMAX_8_BIT_VALUE ( ( uint8_t ) 0xff )
@ -152,18 +145,26 @@ extern void vPortRestoreTaskContext( void );
* a non zero value to ensure interrupts don't inadvertently become unmasked before
* the scheduler starts. As it is stored as part of the task context it will
* automatically be set to 0 when the first task is started. */
volatile uint64_t ullCriticalNesting = 9999ULL;
/* Saved as part of the task context. If ullPortTaskHasFPUContext is non-zero
#if ( configNUMBER_OF_CORES == 1 )
volatile uint64_t ullCriticalNesting = 0ULL;
/* Saved as part of the task context. If ullPortTaskHasFPUContext is non-zero
* then floating point context must be saved and restored for the task. */
uint64_t ullPortTaskHasFPUContext = pdFALSE;
/* Set to 1 to pend a context switch from an ISR. */
uint64_t ullPortYieldRequired = pdFALSE;
/* Counts the interrupt nesting depth. A context switch is only performed if
uint64_t ullPortTaskHasFPUContext = pdFALSE;
/* Set to 1 to pend a context switch from an ISR. */
uint64_t ullPortYieldRequired = pdFALSE;
/* Counts the interrupt nesting depth. A context switch is only performed if
* if the nesting depth is 0. */
uint64_t ullPortInterruptNesting = 0;
uint64_t ullPortInterruptNesting = 0;
#else /* #if ( configNUMBER_OF_CORES == 1 ) */
#if ( portCRITICAL_NESTING_IN_TCB == 0 )
volatile uint64_t ullCriticalNestings[ configNUMBER_OF_CORES ] = { 0ULL };
#endif
/* Saved as part of the task context. If ullPortTaskHasFPUContext is non-zero
* then floating point context must be saved and restored for the task. */
uint64_t ullPortTaskHasFPUContext[configNUMBER_OF_CORES] = {pdFALSE};
uint64_t ullPortYieldRequired[configNUMBER_OF_CORES] = {pdFALSE};
uint64_t ullPortInterruptNestings[configNUMBER_OF_CORES] = {0};
#endif /* #if ( configNUMBER_OF_CORES == 1 ) */
/* Used in the ASM code. */
__attribute__( ( used ) ) const uint64_t ullICCEOIR = portICCEOIR_END_OF_INTERRUPT_REGISTER_ADDRESS;
@ -171,6 +172,80 @@ __attribute__( ( used ) ) const uint64_t ullICCIAR = portICCIAR_INTERRUPT_ACKNOW
__attribute__( ( used ) ) const uint64_t ullICCPMR = portICCPMR_PRIORITY_MASK_REGISTER_ADDRESS;
__attribute__( ( used ) ) const uint64_t ullMaxAPIPriorityMask = ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT );
#if ( configNUMBER_OF_CORES > 1 )
typedef struct {
volatile uint32_t lock;
uint32_t owner;
uint32_t count;
} recursive_spinlock_t;
static recursive_spinlock_t rms[2] = {{.lock=0,.owner = 0xFFFFFFFF,.count=0},{.lock=0,.owner = 0xFFFFFFFF,.count=0}};
static inline uint32_t ldaxr(volatile uint32_t *addr) {
uint32_t value;
asm volatile ("ldaxr %w0, [%1]" : "=r" (value) : "r" (addr) : "memory");
return value;
}
static inline uint32_t stlxr(volatile uint32_t *addr, uint32_t value) {
uint32_t success;
asm volatile(
"stlxr %w[success], %w[value], [%[address]]"
: [success] "=&r" (success)
: [value] "r" (value), [address] "r" (addr)
: "memory"
);
return success;
}
void vPortRecursiveLock( uint32_t ulLockNum )
{
configASSERT( ulLockNum < 2 );
uint32_t ulCoreNum = portGET_CORE_ID();
recursive_spinlock_t* lock = &rms[ulLockNum];
if (ldaxr(&lock->lock) && lock->owner == ulCoreNum) {
lock->count++;
return;
}
uint32_t expected, success;
do {
do {
asm volatile("wfe"); // Wait for event
expected = ldaxr(&lock->lock);
} while (expected != 0);
success = stlxr(&lock->lock, 1);
} while (success != 0);
lock->owner = ulCoreNum;
lock->count = 1;
}
void vPortRecursiveUnlock( uint32_t ulLockNum )
{
configASSERT( ulLockNum < 2 );
uint32_t ulCoreNum = portGET_CORE_ID();
recursive_spinlock_t* lock = &rms[ulLockNum];
if (ldaxr(&lock->lock) && lock->owner == ulCoreNum) {
if (--lock->count == 0) {
lock->owner = 0xFFFFFFFF; // Set to no owner
asm volatile ("stlr %w0, [%1]" : : "r" (0), "r" (&lock->lock) : "memory");
asm volatile ("sev"); // Send event
}
}
}
#endif
/*-----------------------------------------------------------*/
/*
@ -282,7 +357,11 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
pxTopOfStack--;
*pxTopOfStack = pdTRUE;
#if configNUMBER_OF_CORES == 1
ullPortTaskHasFPUContext = pdTRUE;
#else
ullPortTaskHasFPUContext[portGET_CORE_ID()] = pdTRUE;
#endif
}
#else /* if ( configUSE_TASK_FPU_SUPPORT == 1 ) */
{
@ -363,6 +442,13 @@ BaseType_t xPortStartScheduler( void )
* executing. */
portDISABLE_INTERRUPTS();
#if ( configNUMBER_OF_CORES > 1 )
/* Start all other Cores and let them execute vPortRestoreTaskContext()*/
for (int i = 1; i < configNUMBER_OF_CORES; i++) {
vPortStartCore(i);
}
#endif
/* Start the timer that generates the tick ISR. */
configSETUP_TICK_INTERRUPT();
@ -379,12 +465,17 @@ void vPortEndScheduler( void )
{
/* Not implemented in ports where there is nothing to return to.
* Artificially force an assert. */
#if ( configNUMBER_OF_CORES == 1 )
configASSERT( ullCriticalNesting == 1000ULL );
#else
configASSERT( portGET_CRITICAL_NESTING_COUNT() == 1000ULL );
#endif
}
/*-----------------------------------------------------------*/
void vPortEnterCritical( void )
{
#if ( configNUMBER_OF_CORES == 1 )
void vPortEnterCritical( void )
{
/* Mask interrupts up to the max syscall interrupt priority. */
uxPortSetInterruptMask();
@ -402,11 +493,11 @@ void vPortEnterCritical( void )
{
configASSERT( ullPortInterruptNesting == 0 );
}
}
/*-----------------------------------------------------------*/
}
/*-----------------------------------------------------------*/
void vPortExitCritical( void )
{
void vPortExitCritical( void )
{
if( ullCriticalNesting > portNO_CRITICAL_NESTING )
{
/* Decrement the nesting count as the critical section is being
@ -419,11 +510,13 @@ void vPortExitCritical( void )
{
/* Critical nesting has reached zero so all interrupt priorities
* should be unmasked. */
portCLEAR_INTERRUPT_MASK();
vPortClearInterruptMask(pdFALSE);
}
}
}
/*-----------------------------------------------------------*/
}
/*-----------------------------------------------------------*/
#endif
void FreeRTOS_Tick_Handler( void )
{
@ -434,6 +527,9 @@ void FreeRTOS_Tick_Handler( void )
}
#endif
/* Ok to enable interrupts after the interrupt source has been cleared. */
configCLEAR_TICK_INTERRUPT();
/* Interrupts should not be enabled before this point. */
#if ( configASSERT_DEFINED == 1 )
{
@ -444,63 +540,79 @@ void FreeRTOS_Tick_Handler( void )
}
#endif /* configASSERT_DEFINED */
/* Set interrupt mask before altering scheduler structures. The tick
* handler runs at the lowest priority, so interrupts cannot already be masked,
* so there is no need to save and restore the current mask value. It is
* necessary to turn off interrupts in the CPU itself while the ICCPMR is being
* updated. */
portICCPMR_PRIORITY_MASK_REGISTER = ( uint32_t ) ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT );
__asm volatile ( "dsb sy \n"
"isb sy \n" ::: "memory" );
uint32_t ulPreviousMask;
ulPreviousMask = taskENTER_CRITICAL_FROM_ISR();
/* Ok to enable interrupts after the interrupt source has been cleared. */
configCLEAR_TICK_INTERRUPT();
portENABLE_INTERRUPTS();
/* Increment the RTOS tick. */
if( xTaskIncrementTick() != pdFALSE )
{
#if ( configNUMBER_OF_CORES == 1 )
ullPortYieldRequired = pdTRUE;
#else
ullPortYieldRequired[portGET_CORE_ID()] = pdTRUE;
#endif
}
/* Ensure all interrupt priorities are active again. */
portCLEAR_INTERRUPT_MASK();
taskEXIT_CRITICAL_FROM_ISR( ulPreviousMask );
}
/*-----------------------------------------------------------*/
#if ( configUSE_TASK_FPU_SUPPORT != 2 )
uint32_t uxPortGetCoreID ( void ) {
unsigned int __core_id;
__asm volatile("mrs %0, mpidr_el1"
: "=r" (__core_id)
: /* no input operands */
: /* no clobbers */);
return __core_id & 0xff;
}
#if ( configUSE_TASK_FPU_SUPPORT != 2 )
void vPortTaskUsesFPU( void )
{
/* A task is registering the fact that it needs an FPU context. Set the
* FPU flag (which is saved as part of the task context). */
ullPortTaskHasFPUContext = pdTRUE;
#if ( configNUMBER_OF_CORES == 1 )
ullPortTaskHasFPUContext = pdTRUE;
#else
ullPortTaskHasFPUContext[portGET_CORE_ID()] = pdTRUE;
#endif
/* Consider initialising the FPSR here - but probably not necessary in
* AArch64. */
}
#endif /* configUSE_TASK_FPU_SUPPORT */
/*-----------------------------------------------------------*/
#define portCLEAR_INTERRUPT_MASK_P() \
{ \
portDISABLE_INTERRUPTS(); \
portICCPMR_PRIORITY_MASK_REGISTER = 0xFFFF; \
__asm volatile ( "DSB SY \n" \
"ISB SY \n" ); \
portENABLE_INTERRUPTS(); \
}
void vPortClearInterruptMask( UBaseType_t uxNewMaskValue )
{
if( uxNewMaskValue == pdFALSE )
{
portCLEAR_INTERRUPT_MASK();
portCLEAR_INTERRUPT_MASK_P();
}
}
/*-----------------------------------------------------------*/
UBaseType_t uxPortSetInterruptMask( void )
{
uint32_t ulReturn;
uint32_t ulReturn;
uint32_t mask;
/* Interrupt in the CPU must be turned off while the ICCPMR is being
* updated. */
updated. */
portDISABLE_INTERRUPTS();
mask = portICCPMR_PRIORITY_MASK_REGISTER;
if( portICCPMR_PRIORITY_MASK_REGISTER == ( uint32_t ) ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT ) )
if( mask == ( uint32_t ) ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT ) )
{
/* Interrupts were already masked. */
ulReturn = pdTRUE;

View file

@ -25,17 +25,24 @@
* https://github.com/FreeRTOS
*
*/
#include "FreeRTOSConfig.h"
.text
.section .interrupt_handlers, "ax"
/* Variables and functions. */
.extern ullMaxAPIPriorityMask
#if configNUMBER_OF_CORES == 1
.extern pxCurrentTCB
.extern ullCriticalNesting
.extern ullPortInterruptNesting
#else
.extern pxCurrentTCBs
.extern ullCriticalNestings
.extern ullPortInterruptNestings
#endif
.extern vTaskSwitchContext
.extern vApplicationIRQHandler
.extern ullPortInterruptNesting
.extern ullPortTaskHasFPUContext
.extern ullCriticalNesting
.extern ullPortYieldRequired
.extern ullICCEOIR
.extern ullICCIAR
@ -45,12 +52,7 @@
.global FreeRTOS_SWI_Handler
.global vPortRestoreTaskContext
.macro portSAVE_CONTEXT
/* Switch to use the EL0 stack pointer. */
MSR SPSEL, #0
.macro saveallgpregisters
/* Save the entire context. */
STP X0, X1, [SP, #-0x10]!
STP X2, X3, [SP, #-0x10]!
@ -68,30 +70,56 @@
STP X26, X27, [SP, #-0x10]!
STP X28, X29, [SP, #-0x10]!
STP X30, XZR, [SP, #-0x10]!
.endm
/* Save the SPSR. */
#if defined( GUEST )
MRS X3, SPSR_EL1
MRS X2, ELR_EL1
#else
MRS X3, SPSR_EL3
/* Save the ELR. */
MRS X2, ELR_EL3
#endif
.macro restoreallgpregisters
LDP X30, XZR, [SP], #0x10
LDP X28, X29, [SP], #0x10
LDP X26, X27, [SP], #0x10
LDP X24, X25, [SP], #0x10
LDP X22, X23, [SP], #0x10
LDP X20, X21, [SP], #0x10
LDP X18, X19, [SP], #0x10
LDP X16, X17, [SP], #0x10
LDP X14, X15, [SP], #0x10
LDP X12, X13, [SP], #0x10
LDP X10, X11, [SP], #0x10
LDP X8, X9, [SP], #0x10
LDP X6, X7, [SP], #0x10
LDP X4, X5, [SP], #0x10
LDP X2, X3, [SP], #0x10
LDP X0, X1, [SP], #0x10
.endm
.macro savefuncontextgpregs
STP X0, X1, [SP, #-0x10]!
STP X2, X3, [SP, #-0x10]!
STP X4, X5, [SP, #-0x10]!
STP X6, X7, [SP, #-0x10]!
STP X8, X9, [SP, #-0x10]!
STP X10, X11, [SP, #-0x10]!
STP X12, X13, [SP, #-0x10]!
STP X14, X15, [SP, #-0x10]!
STP X16, X17, [SP, #-0x10]!
STP X18, X19, [SP, #-0x10]!
STP X29, X30, [SP, #-0x10]!
.endm
/* Save the critical section nesting depth. */
LDR X0, ullCriticalNestingConst
LDR X3, [X0]
.macro restorefuncontextgpregs
LDP X29, X30, [SP], #0x10
LDP X18, X19, [SP], #0x10
LDP X16, X17, [SP], #0x10
LDP X14, X15, [SP], #0x10
LDP X12, X13, [SP], #0x10
LDP X10, X11, [SP], #0x10
LDP X8, X9, [SP], #0x10
LDP X6, X7, [SP], #0x10
LDP X4, X5, [SP], #0x10
LDP X2, X3, [SP], #0x10
LDP X0, X1, [SP], #0x10
.endm
/* Save the FPU context indicator. */
LDR X0, ullPortTaskHasFPUContextConst
LDR X2, [X0]
/* Save the FPU context, if any (32 128-bit registers). */
CMP X2, #0
B.EQ 1f
.macro savefloatregisters
STP Q0, Q1, [SP,#-0x20]!
STP Q2, Q3, [SP,#-0x20]!
STP Q4, Q5, [SP,#-0x20]!
@ -108,12 +136,86 @@
STP Q26, Q27, [SP,#-0x20]!
STP Q28, Q29, [SP,#-0x20]!
STP Q30, Q31, [SP,#-0x20]!
.endm
.macro restorefloatregisters
LDP Q30, Q31, [SP], #0x20
LDP Q28, Q29, [SP], #0x20
LDP Q26, Q27, [SP], #0x20
LDP Q24, Q25, [SP], #0x20
LDP Q22, Q23, [SP], #0x20
LDP Q20, Q21, [SP], #0x20
LDP Q18, Q19, [SP], #0x20
LDP Q16, Q17, [SP], #0x20
LDP Q14, Q15, [SP], #0x20
LDP Q12, Q13, [SP], #0x20
LDP Q10, Q11, [SP], #0x20
LDP Q8, Q9, [SP], #0x20
LDP Q6, Q7, [SP], #0x20
LDP Q4, Q5, [SP], #0x20
LDP Q2, Q3, [SP], #0x20
LDP Q0, Q1, [SP], #0x20
.endm
.macro portSAVE_CONTEXT
/* Switch to use the EL0 stack pointer. */
MSR SPSEL, #0
/* Save the entire context. */
saveallgpregisters
/* Save the SPSR. */
#if defined( GUEST )
MRS X3, SPSR_EL1
MRS X2, ELR_EL1
#else
MRS X3, SPSR_EL3
/* Save the ELR. */
MRS X2, ELR_EL3
#endif
STP X2, X3, [SP, #-0x10]!
/* Save the critical section nesting depth. */
LDR X0, ullCriticalNestingsConst
#if configNUMBER_OF_CORES > 1
/* Calculate the correct index for ullCriticalNestings array based on core ID. */
MRS X1, MPIDR_EL1 // Read the Multiprocessor Affinity Register
AND X1, X1, #0xff // Extract Aff0 which contains the core ID
/* Calculate offset to the correct critical nesting value based on the core ID */
LSL X1, X1, #3 // Multiply core ID by 8 (size of a pointer on ARM64)
ADD X0, X0, X1 // Add to the base of the critical nesting array
#endif
LDR X3, [X0]
/* Save the FPU context indicator. */
LDR X0, ullPortTaskHasFPUContextConst
#if configNUMBER_OF_CORES > 1
ADD X0, X0, X1 // Add to the base of the FPU array
#endif
LDR X2, [X0]
/* Save the FPU context, if any (32 128-bit registers). */
CMP X2, #0
B.EQ 1f
savefloatregisters
1:
/* Store the critical nesting count and FPU context indicator. */
STP X2, X3, [SP, #-0x10]!
LDR X0, pxCurrentTCBConst
LDR X0, pxCurrentTCBsConst
#if configNUMBER_OF_CORES > 1
MRS X1, MPIDR_EL1 // Read the Multiprocessor Affinity Register
AND X1, X1, #0xff // Extract Aff0 which contains the core ID
/* Calculate offset to the correct TCB pointer based on the core ID */
LSL X1, X1, #3 // Multiply core ID by 8 (size of a pointer on ARM64)
ADD X0, X0, X1 // Add to the base of the TCB array
#endif
/* Load the address of the TCB for the current core */
LDR X1, [X0]
MOV X0, SP /* Move SP into X0 for saving. */
STR X0, [X1]
@ -131,16 +233,33 @@
MSR SPSEL, #0
/* Set the SP to point to the stack of the task being restored. */
LDR X0, pxCurrentTCBConst
LDR X0, pxCurrentTCBsConst
#if configNUMBER_OF_CORES > 1
/* Get the core ID to index the TCB correctly. */
MRS X2, MPIDR_EL1 // Read the Multiprocessor Affinity Register
AND X2, X2, #0xff // Extract Aff0 which contains the core ID
LSL X2, X2, #3 // Scale the core ID to the size of a pointer (64-bit system)
ADD X0, X0, X2 // Add the offset for the current core's TCB pointer
#endif
LDR X1, [X0]
LDR X0, [X1]
MOV SP, X0
LDP X2, X3, [SP], #0x10 /* Critical nesting and FPU context. */
/* Set the PMR register to be correct for the current critical nesting
depth. */
LDR X0, ullCriticalNestingConst /* X0 holds the address of ullCriticalNesting. */
/* Calculate offset for the current core's ullCriticalNesting and load its address. */
LDR X0, ullCriticalNestingsConst /* Load base address of the ullCriticalNesting array */
#if configNUMBER_OF_CORES > 1
/* Existing code to get core ID and scale to pointer size is reused. */
MRS X1, MPIDR_EL1 /* Read Multiprocessor Affinity Register */
AND X1, X1, #0xff /* Extract Aff0, which contains the core ID */
LSL X1, X1, #3 /* Scale core ID to the size of a pointer (assuming 64-bit system) */
ADD X0, X0, X1 /* Add offset for the current core's ullCriticalNesting */
LDR X0, [X0] /* Load the address of the ullCriticalNesting for the current core */
#endif
MOV X1, #255 /* X1 holds the unmask value. */
LDR X4, ullICCPMRConst /* X4 holds the address of the ICCPMR constant. */
CMP X3, #0
@ -154,29 +273,22 @@
ISB SY
STR X3, [X0] /* Restore the task's critical nesting count. */
/* Restore the FPU context indicator. */
LDR X0, ullPortTaskHasFPUContextConst
#if configNUMBER_OF_CORES > 1
/* Existing code to get core ID and scale to pointer size is reused. */
MRS X1, MPIDR_EL1 /* Read Multiprocessor Affinity Register */
AND X1, X1, #0xff /* Extract Aff0, which contains the core ID */
LSL X1, X1, #3 /* Scale core ID to the size of a pointer (assuming 64-bit system) */
/* Restore the FPU context indicator. */
ADD X0, X0, X1 // Add to the base of the FPU array
#endif
STR X2, [X0]
/* Restore the FPU context, if any. */
CMP X2, #0
B.EQ 1f
LDP Q30, Q31, [SP], #0x20
LDP Q28, Q29, [SP], #0x20
LDP Q26, Q27, [SP], #0x20
LDP Q24, Q25, [SP], #0x20
LDP Q22, Q23, [SP], #0x20
LDP Q20, Q21, [SP], #0x20
LDP Q18, Q19, [SP], #0x20
LDP Q16, Q17, [SP], #0x20
LDP Q14, Q15, [SP], #0x20
LDP Q12, Q13, [SP], #0x20
LDP Q10, Q11, [SP], #0x20
LDP Q8, Q9, [SP], #0x20
LDP Q6, Q7, [SP], #0x20
LDP Q4, Q5, [SP], #0x20
LDP Q2, Q3, [SP], #0x20
LDP Q0, Q1, [SP], #0x20
restorefloatregisters
1:
LDP X2, X3, [SP], #0x10 /* SPSR and ELR. */
@ -192,22 +304,7 @@
MSR ELR_EL3, X2
#endif
LDP X30, XZR, [SP], #0x10
LDP X28, X29, [SP], #0x10
LDP X26, X27, [SP], #0x10
LDP X24, X25, [SP], #0x10
LDP X22, X23, [SP], #0x10
LDP X20, X21, [SP], #0x10
LDP X18, X19, [SP], #0x10
LDP X16, X17, [SP], #0x10
LDP X14, X15, [SP], #0x10
LDP X12, X13, [SP], #0x10
LDP X10, X11, [SP], #0x10
LDP X8, X9, [SP], #0x10
LDP X6, X7, [SP], #0x10
LDP X4, X5, [SP], #0x10
LDP X2, X3, [SP], #0x10
LDP X0, X1, [SP], #0x10
restoreallgpregisters
/* Switch to use the ELx stack pointer. _RB_ Might not be required. */
MSR SPSEL, #1
@ -239,6 +336,8 @@ FreeRTOS_SWI_Handler:
CMP X1, #0x17 /* 0x17 = SMC instruction. */
#endif
B.NE FreeRTOS_Abort
MRS x0, mpidr_el1
AND x0, x0, 255
BL vTaskSwitchContext
portRESTORE_CONTEXT
@ -276,17 +375,8 @@ vPortRestoreTaskContext:
.type FreeRTOS_IRQ_Handler, %function
FreeRTOS_IRQ_Handler:
/* Save volatile registers. */
STP X0, X1, [SP, #-0x10]!
STP X2, X3, [SP, #-0x10]!
STP X4, X5, [SP, #-0x10]!
STP X6, X7, [SP, #-0x10]!
STP X8, X9, [SP, #-0x10]!
STP X10, X11, [SP, #-0x10]!
STP X12, X13, [SP, #-0x10]!
STP X14, X15, [SP, #-0x10]!
STP X16, X17, [SP, #-0x10]!
STP X18, X19, [SP, #-0x10]!
STP X29, X30, [SP, #-0x10]!
savefuncontextgpregs
savefloatregisters
/* Save the SPSR and ELR. */
#if defined( GUEST )
@ -299,7 +389,16 @@ FreeRTOS_IRQ_Handler:
STP X2, X3, [SP, #-0x10]!
/* Increment the interrupt nesting counter. */
LDR X5, ullPortInterruptNestingConst
LDR X5, ullPortInterruptNestingsConst /* Load base address of the ullPortYieldRequired array */
#if configNUMBER_OF_CORES > 1
/* Existing code to get core ID and scale to pointer size is reused. */
MRS X2, MPIDR_EL1 /* Read Multiprocessor Affinity Register */
AND X2, X2, #0xff /* Extract Aff0, which contains the core ID */
LSL X2, X2, #3 /* Scale core ID to the size of a pointer (assuming 64-bit system) */
/* Calculate offset for the current core's ullPortYieldRequired and load its address. */
ADD X5, X5, X2 /* Add offset for the current core's ullPortYieldRequired */
#endif
LDR X1, [X5] /* Old nesting count in X1. */
ADD X6, X1, #1
STR X6, [X5] /* Address of nesting count variable in X5. */
@ -313,6 +412,7 @@ FreeRTOS_IRQ_Handler:
LDR X3, [X2]
LDR W0, [X3] /* ICCIAR in W0 as parameter. */
/* ICCIAR in W0 as parameter. */
/* Maintain the ICCIAR value across the function call. */
STP X0, X1, [SP, #-0x10]!
@ -342,6 +442,16 @@ FreeRTOS_IRQ_Handler:
/* Is a context switch required? */
LDR X0, ullPortYieldRequiredConst
#if configNUMBER_OF_CORES > 1
/* Existing code to get core ID and scale to pointer size is reused. */
MRS X2, MPIDR_EL1 /* Read Multiprocessor Affinity Register */
AND X2, X2, #0xff /* Extract Aff0, which contains the core ID */
LSL X2, X2, #3 /* Scale core ID to the size of a pointer (assuming 64-bit system) */
/* Calculate offset for the current core's ullPortYieldRequired and load its address. */
ADD X0, X0, X2 /* Add offset for the current core's ullPortYieldRequired */
#endif
LDR X1, [X0]
CMP X1, #0
B.EQ Exit_IRQ_No_Context_Switch
@ -362,20 +472,13 @@ FreeRTOS_IRQ_Handler:
DSB SY
ISB SY
LDP X29, X30, [SP], #0x10
LDP X18, X19, [SP], #0x10
LDP X16, X17, [SP], #0x10
LDP X14, X15, [SP], #0x10
LDP X12, X13, [SP], #0x10
LDP X10, X11, [SP], #0x10
LDP X8, X9, [SP], #0x10
LDP X6, X7, [SP], #0x10
LDP X4, X5, [SP], #0x10
LDP X2, X3, [SP], #0x10
LDP X0, X1, [SP], #0x10
restorefloatregisters
restorefuncontextgpregs
/* Save the context of the current task and select a new task to run. */
portSAVE_CONTEXT
MRS x0, mpidr_el1
AND x0, x0, 255
BL vTaskSwitchContext
portRESTORE_CONTEXT
@ -392,17 +495,8 @@ Exit_IRQ_No_Context_Switch:
DSB SY
ISB SY
LDP X29, X30, [SP], #0x10
LDP X18, X19, [SP], #0x10
LDP X16, X17, [SP], #0x10
LDP X14, X15, [SP], #0x10
LDP X12, X13, [SP], #0x10
LDP X10, X11, [SP], #0x10
LDP X8, X9, [SP], #0x10
LDP X6, X7, [SP], #0x10
LDP X4, X5, [SP], #0x10
LDP X2, X3, [SP], #0x10
LDP X0, X1, [SP], #0x10
restorefloatregisters
restorefuncontextgpregs
ERET
@ -410,14 +504,22 @@ Exit_IRQ_No_Context_Switch:
.align 8
pxCurrentTCBConst: .dword pxCurrentTCB
ullCriticalNestingConst: .dword ullCriticalNesting
ullPortTaskHasFPUContextConst: .dword ullPortTaskHasFPUContext
#if configNUMBER_OF_CORES == 1
pxCurrentTCBsConst: .dword pxCurrentTCB
ullCriticalNestingsConst: .dword ullCriticalNesting
ullPortInterruptNestingsConst: .dword ullPortInterruptNesting
ullPortYieldRequiredConst: .dword ullPortYieldRequired
ullPortTaskHasFPUContextConst: .dword ullPortTaskHasFPUContext
#else
pxCurrentTCBsConst: .dword pxCurrentTCBs
ullCriticalNestingsConst: .dword ullCriticalNestings
ullPortInterruptNestingsConst: .dword ullPortInterruptNestings
ullPortYieldRequiredConst: .dword ullPortYieldRequired
ullPortTaskHasFPUContextConst: .dword ullPortTaskHasFPUContext
#endif
ullICCPMRConst: .dword ullICCPMR
ullMaxAPIPriorityMaskConst: .dword ullMaxAPIPriorityMask
ullPortInterruptNestingConst: .dword ullPortInterruptNesting
ullPortYieldRequiredConst: .dword ullPortYieldRequired
ullICCIARConst: .dword ullICCIAR
ullICCEOIRConst: .dword ullICCEOIR
vApplicationIRQHandlerConst: .word vApplicationIRQHandler

View file

@ -33,6 +33,9 @@
#ifdef __cplusplus
extern "C" {
#endif
#include "FreeRTOS.h"
/* *INDENT-ON* */
/*-----------------------------------------------------------
@ -65,6 +68,8 @@ typedef uint64_t TickType_t;
* not need to be guarded with a critical section. */
#define portTICK_TYPE_IS_ATOMIC 1
#define portCRITICAL_NESTING_IN_TCB 0
/*-----------------------------------------------------------*/
/* Hardware specifics. */
@ -78,6 +83,7 @@ typedef uint64_t TickType_t;
/* Task utilities. */
/* Called at the end of an ISR that can cause a context switch. */
#if ( configNUMBER_OF_CORES == 1 )
#define portEND_SWITCHING_ISR( xSwitchRequired ) \
{ \
extern uint64_t ullPortYieldRequired; \
@ -87,6 +93,17 @@ typedef uint64_t TickType_t;
ullPortYieldRequired = pdTRUE; \
} \
}
#else
#define portEND_SWITCHING_ISR( xSwitchRequired ) \
{ \
extern uint64_t ullPortYieldRequired[configNUMBER_OF_CORES]; \
\
if( xSwitchRequired != pdFALSE ) \
{ \
ullPortYieldRequired[portGET_CORE_ID()] = pdTRUE; \
} \
}
#endif
#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
#if defined( GUEST )
@ -98,30 +115,89 @@ typedef uint64_t TickType_t;
/*-----------------------------------------------------------
* Critical section control
*----------------------------------------------------------*/
extern void vPortEnterCritical( void );
extern void vPortExitCritical( void );
extern UBaseType_t uxPortSetInterruptMask( void );
extern void vPortClearInterruptMask( UBaseType_t uxNewMaskValue );
extern void vPortInstallFreeRTOSVectorTable( void );
#define portDISABLE_INTERRUPTS() \
__asm volatile ( "MSR DAIFSET, #2" ::: "memory" ); \
__asm volatile ( "DSB SY" ); \
__asm volatile ( "ISB SY" );
static inline UBaseType_t uxDisableInterrupts()
{
unsigned long flags;
#define portENABLE_INTERRUPTS() \
__asm volatile ( "MSR DAIFCLR, #2" ::: "memory" ); \
__asm volatile ( "DSB SY" ); \
__asm volatile ( "ISB SY" );
__asm volatile (
"mrs %0, daif\n"
"msr daifset, #2\n"
"dsb sy\n"
"isb sy\n"
: "=r" (flags)
:
: "memory"
);
return flags;
}
static inline void vEnableInterrupts()
{
__asm volatile (
"mrs x0, daif\n"
"msr daifclr, #2\n"
"dsb sy\n"
"isb sy\n"
:
:
: "memory"
);
}
static inline void vRestoreInterrupts(UBaseType_t flags)
{
__asm volatile(
"and x2, %0, #128"
:
: "r" (flags)
: "x2"
);
__asm volatile("mrs x1, daif" ::: "x1");
__asm volatile("bic x1, x1, #128" ::: "x1");
__asm volatile("orr x1, x1, x2" ::: "x1", "x2");
__asm volatile("msr daif, x1" ::: "x1");
__asm volatile("dsb sy");
__asm volatile("isb sy");
__asm volatile("" ::: "memory");
}
#define portDISABLE_INTERRUPTS() uxDisableInterrupts()
#define portENABLE_INTERRUPTS() vEnableInterrupts()
#define portSET_INTERRUPT_MASK() uxDisableInterrupts()
#define portCLEAR_INTERRUPT_MASK(x) vRestoreInterrupts(x)
UBaseType_t uxPortSetInterruptMask( void );
void vPortClearInterruptMask( UBaseType_t );
#define portSET_INTERRUPT_MASK_FROM_ISR() uxPortSetInterruptMask()
#define portCLEAR_INTERRUPT_MASK_FROM_ISR(x) vPortClearInterruptMask(x)
/* These macros do not globally disable/enable interrupts. They do mask off
* interrupts that have a priority below configMAX_API_CALL_INTERRUPT_PRIORITY. */
#define portENTER_CRITICAL() vPortEnterCritical();
#define portEXIT_CRITICAL() vPortExitCritical();
#define portSET_INTERRUPT_MASK_FROM_ISR() uxPortSetInterruptMask()
#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vPortClearInterruptMask( x )
#if ( configNUMBER_OF_CORES == 1 )
extern void vPortEnterCritical( void );
extern void vPortExitCritical( void );
#define portENTER_CRITICAL() vPortEnterCritical()
#define portEXIT_CRITICAL() vPortExitCritical()
#else
#define portENTER_CRITICAL() vTaskEnterCritical()
#define portEXIT_CRITICAL() vTaskExitCritical()
#endif
#define portENTER_CRITICAL_FROM_ISR() vTaskEnterCriticalFromISR()
#define portEXIT_CRITICAL_FROM_ISR( x ) vTaskExitCriticalFromISR( x )
/* Critical nesting count management. */
#if ( ( configNUMBER_OF_CORES > 1 ) && ( portCRITICAL_NESTING_IN_TCB == 0 ) )
extern volatile UBaseType_t ullCriticalNestings[ configNUMBER_OF_CORES ];
#define portGET_CRITICAL_NESTING_COUNT() ( ullCriticalNestings[ portGET_CORE_ID() ] )
#define portSET_CRITICAL_NESTING_COUNT( x ) ( ullCriticalNestings[ portGET_CORE_ID() ] = ( x ) )
#define portINCREMENT_CRITICAL_NESTING_COUNT() ( ullCriticalNestings[ portGET_CORE_ID() ]++ )
#define portDECREMENT_CRITICAL_NESTING_COUNT() ( ullCriticalNestings[ portGET_CORE_ID() ]-- )
#endif /* ( ( configNUMBER_OF_CORES > 1 ) && ( portCRITICAL_NESTING_IN_TCB == 1 ) ) */
/*-----------------------------------------------------------*/
@ -215,6 +291,20 @@ void FreeRTOS_Tick_Handler( void );
#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" )
#if ( configNUMBER_OF_CORES > 1 )
extern uint32_t uxPortGetCoreID ( void );
#define portGET_CORE_ID() uxPortGetCoreID()
extern void vPortYieldCore ( uint8_t CoreID );
#define portYIELD_CORE( xCoreID ) vPortYieldCore (xCoreID)
extern void vPortRecursiveLock( uint32_t ulLockNum );
extern void vPortRecursiveUnlock( uint32_t ulLockNum );
#define portRELEASE_ISR_LOCK() vPortRecursiveUnlock(0u)
#define portGET_ISR_LOCK() vPortRecursiveLock(0u)
#define portRELEASE_TASK_LOCK() vPortRecursiveUnlock(1u)
#define portGET_TASK_LOCK() vPortRecursiveLock(1u)
#endif
/* *INDENT-OFF* */
#ifdef __cplusplus
}