Compare commits

...

2 commits

Author SHA1 Message Date
Ahmed Ismail e0bdef952c
Merge 255b9b642b into 7225fbcbb9 2025-07-03 16:17:56 +00:00
Ahmed Ismail 255b9b642b armv8-r: Add Arm Cortex-R82 non-MPU port
The goal of this commit is to add the GCC/ARMClang non-MPU
port variant for ARM Cortex-R82 processor which is
ARMv8-R AArch64 based.
The work done is inspired by the GCC ARM_AARCH64 FreeRTOS port.

This port has the following features:
* Uses single security state (non TrustZone).
* Supports SMP (Symmetric multi-processing).
* Doesn't support Hypervisor (EL2).
* Doesn't support neither PMSA (MPU) nor VMSA (MMU).

Signed-off-by: Ahmed Ismail <Ahmed.Ismail@arm.com>
2025-07-03 17:04:06 +01:00
7 changed files with 1622 additions and 0 deletions

View file

@ -430,6 +430,7 @@ ldrbs
LDRBS LDRBS
LDRNE LDRNE
ldsr ldsr
ldxr
lidt lidt
LINKR LINKR
LJMP LJMP
@ -505,6 +506,7 @@ movs
movw movw
MOVWF MOVWF
movx movx
MPIDR
MPLAB MPLAB
MPUCTRL MPUCTRL
MQTT MQTT
@ -809,6 +811,7 @@ STTBRK
STTDLY STTDLY
STTOUT STTOUT
STTTO STTTO
stxr
SVACC SVACC
svcne svcne
SVDIS SVDIS

View file

@ -114,6 +114,7 @@ KERNEL_ARM_COLLAB_FILES_PATTERNS = [
r'.*portable/.*/ARM_CM35*', r'.*portable/.*/ARM_CM35*',
r'.*portable/.*/ARM_CM55*', r'.*portable/.*/ARM_CM55*',
r'.*portable/.*/ARM_CM85*', r'.*portable/.*/ARM_CM85*',
r'.*portable/.*/ARM_CR82*',
] ]
KERNEL_HEADER = [ KERNEL_HEADER = [

View file

@ -87,6 +87,7 @@ if(NOT FREERTOS_PORT)
" GCC_ARM_CR5 - Compiler: GCC Target: ARM Cortex-R5\n" " GCC_ARM_CR5 - Compiler: GCC Target: ARM Cortex-R5\n"
" GCC_ARM_CRX_MPU - Compiler: GCC Target: ARM Cortex-Rx with MPU\n" " GCC_ARM_CRX_MPU - Compiler: GCC Target: ARM Cortex-Rx with MPU\n"
" GCC_ARM_CRX_NOGIC - Compiler: GCC Target: ARM Cortex-Rx no GIC\n" " GCC_ARM_CRX_NOGIC - Compiler: GCC Target: ARM Cortex-Rx no GIC\n"
" GCC_ARM_CR82 - Compiler: GCC Target: ARM Cortex-R82\n"
" GCC_ARM7_AT91FR40008 - Compiler: GCC Target: ARM7 Atmel AT91R40008\n" " GCC_ARM7_AT91FR40008 - Compiler: GCC Target: ARM7 Atmel AT91R40008\n"
" GCC_ARM7_AT91SAM7S - Compiler: GCC Target: ARM7 Atmel AT91SAM7S\n" " GCC_ARM7_AT91SAM7S - Compiler: GCC Target: ARM7 Atmel AT91SAM7S\n"
" GCC_ARM7_LPC2000 - Compiler: GCC Target: ARM7 LPC2000\n" " GCC_ARM7_LPC2000 - Compiler: GCC Target: ARM7 LPC2000\n"

View file

@ -218,6 +218,11 @@ add_library(freertos_kernel_port OBJECT
GCC/ARM_CRx_No_GIC/port.c GCC/ARM_CRx_No_GIC/port.c
GCC/ARM_CRx_No_GIC/portASM.S> GCC/ARM_CRx_No_GIC/portASM.S>
# ARMv8-R ports for GCC
$<$<STREQUAL:${FREERTOS_PORT},GCC_ARM_CR82>:
GCC/ARM_CR82/port.c
GCC/ARM_CR82/portASM.S>
# ARMv4T ARM7TDMI ports for GCC # ARMv4T ARM7TDMI ports for GCC
$<$<STREQUAL:${FREERTOS_PORT},GCC_ARM7_AT91FR40008>: $<$<STREQUAL:${FREERTOS_PORT},GCC_ARM7_AT91FR40008>:
GCC/ARM7_AT91FR40008/port.c GCC/ARM7_AT91FR40008/port.c
@ -963,6 +968,9 @@ target_include_directories(freertos_kernel_port_headers INTERFACE
$<$<STREQUAL:${FREERTOS_PORT},GCC_ARM_CRX_MPU>:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CRx_MPU> $<$<STREQUAL:${FREERTOS_PORT},GCC_ARM_CRX_MPU>:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CRx_MPU>
$<$<STREQUAL:${FREERTOS_PORT},GCC_ARM_CRX_NOGIC>:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CRx_No_GIC> $<$<STREQUAL:${FREERTOS_PORT},GCC_ARM_CRX_NOGIC>:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CRx_No_GIC>
# ARMv8-R ports for GCC
$<$<STREQUAL:${FREERTOS_PORT},GCC_ARM_CR82>:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CR82>
# ARMv4T ARM7TDMI ports for GCC # ARMv4T ARM7TDMI ports for GCC
$<$<STREQUAL:${FREERTOS_PORT},GCC_ARM7_AT91FR40008>:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM7_AT91FR40008> $<$<STREQUAL:${FREERTOS_PORT},GCC_ARM7_AT91FR40008>:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM7_AT91FR40008>
$<$<STREQUAL:${FREERTOS_PORT},GCC_ARM7_AT91SAM7S>:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM7_AT91SAM7S> $<$<STREQUAL:${FREERTOS_PORT},GCC_ARM7_AT91SAM7S>:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM7_AT91SAM7S>

View file

@ -0,0 +1,811 @@
/*
* FreeRTOS Kernel <DEVELOPMENT BRANCH>
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
* Copyright 2025 Arm Limited and/or its affiliates
* <open-source-office@arm.com>
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* https://www.FreeRTOS.org
* https://github.com/FreeRTOS
*
*/
/* Standard includes. */
#include <stdlib.h>
#include <string.h>
/* Scheduler includes. */
#include "FreeRTOS.h"
#include "task.h"
#ifndef configINTERRUPT_CONTROLLER_BASE_ADDRESS
#error configINTERRUPT_CONTROLLER_BASE_ADDRESS must be defined. Refer to Cortex-A equivalent: /* https://www.freertos.org/Using-FreeRTOS-on-Cortex-A-Embedded-Processors */
#endif
#ifndef configINTERRUPT_CONTROLLER_CPU_INTERFACE_OFFSET
#error configINTERRUPT_CONTROLLER_CPU_INTERFACE_OFFSET must be defined. Refer to Cortex-A equivalent: /* https://www.freertos.org/Using-FreeRTOS-on-Cortex-A-Embedded-Processors */
#endif
#ifndef configUNIQUE_INTERRUPT_PRIORITIES
#error configUNIQUE_INTERRUPT_PRIORITIES must be defined. Refer to Cortex-A equivalent: /* https://www.freertos.org/Using-FreeRTOS-on-Cortex-A-Embedded-Processors */
#endif
#ifndef configSETUP_TICK_INTERRUPT
#error configSETUP_TICK_INTERRUPT() must be defined. Refer to Cortex-A equivalent: /* https://www.freertos.org/Using-FreeRTOS-on-Cortex-A-Embedded-Processors */
#endif /* configSETUP_TICK_INTERRUPT */
#ifndef configMAX_API_CALL_INTERRUPT_PRIORITY
#error configMAX_API_CALL_INTERRUPT_PRIORITY must be defined. Refer to Cortex-A equivalent: /* https://www.freertos.org/Using-FreeRTOS-on-Cortex-A-Embedded-Processors */
#endif
#if configMAX_API_CALL_INTERRUPT_PRIORITY == 0
#error "configMAX_API_CALL_INTERRUPT_PRIORITY must not be set to 0"
#endif
#if configMAX_API_CALL_INTERRUPT_PRIORITY > configUNIQUE_INTERRUPT_PRIORITIES
#error "configMAX_API_CALL_INTERRUPT_PRIORITY must be less than or equal to configUNIQUE_INTERRUPT_PRIORITIES as the lower the numeric priority value the higher the logical interrupt priority"
#endif
#if configUSE_PORT_OPTIMISED_TASK_SELECTION == 1
/* Check the configuration. */
#if ( configMAX_PRIORITIES > 32 )
#error "configUSE_PORT_OPTIMISED_TASK_SELECTION can only be set to 1 when configMAX_PRIORITIES is less than or equal to 32. It is very rare that a system requires more than 10 to 15 different priorities as tasks that share a priority will time slice."
#endif
#endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
/* In case security extensions are implemented. */
#if configMAX_API_CALL_INTERRUPT_PRIORITY <= ( configUNIQUE_INTERRUPT_PRIORITIES / 2 )
#error "configMAX_API_CALL_INTERRUPT_PRIORITY must be greater than ( configUNIQUE_INTERRUPT_PRIORITIES / 2 )"
#endif
#ifndef configCLEAR_TICK_INTERRUPT
#error configCLEAR_TICK_INTERRUPT must be defined in FreeRTOSConfig.h to clear which ever interrupt was used to generate the tick interrupt.
#endif
#if configNUMBER_OF_CORES < 1
#error configNUMBER_OF_CORES must be set to 1 or greater. If the application is not using multiple cores then set configNUMBER_OF_CORES to 1.
#endif /* configNUMBER_OF_CORES < 1 */
/* A critical section is exited when the critical section nesting count reaches
* this value. */
#define portNO_CRITICAL_NESTING ( ( size_t ) 0 )
/* In all GICs 255 can be written to the priority mask register to unmask all
* (but the lowest) interrupt priority. */
#define portUNMASK_VALUE ( 0xFFUL )
/* Macro to unmask all interrupt priorities. */
#define portCLEAR_INTERRUPT_PRIORITIES_MASK() \
{ \
__asm volatile ( \
"MSR DAIFSET, #2 \n" \
"DSB SY \n" \
"ISB SY \n" \
"MSR ICC_PMR_EL1, %0 \n" \
"DSB SY \n" \
"ISB SY \n" \
"MSR DAIFCLR, #2 \n" \
"DSB SY \n" \
"ISB SY \n" \
: \
: "r" ( portUNMASK_VALUE ) \
); \
}
/* Tasks are not created with a floating point context, but can be given a
* floating point context after they have been created. A variable is stored as
* part of the tasks context that holds portNO_FLOATING_POINT_CONTEXT if the task
* does not have an FPU context, or any other value if the task does have an FPU
* context. */
#define portNO_FLOATING_POINT_CONTEXT ( ( StackType_t ) 0 )
/* Constants required to setup the initial task context. */
#define portSP_ELx ( ( StackType_t ) 0x01 )
#define portSP_EL0 ( ( StackType_t ) 0x00 )
#define portEL1 ( ( StackType_t ) 0x04 )
#define portINITIAL_PSTATE ( portEL1 | portSP_EL0 )
/* Used by portASSERT_IF_INTERRUPT_PRIORITY_INVALID() when ensuring the binary
* point is zero. */
#define portBINARY_POINT_BITS ( ( uint8_t ) 0x03 )
/* Masks all bits in the APSR other than the mode bits. */
#define portAPSR_MODE_BITS_MASK ( 0x0C )
/* The I bit in the DAIF bits. */
#define portDAIF_I ( 0x80 )
#define portMAX_8_BIT_VALUE ( ( uint8_t ) 0xff )
#define portBIT_0_SET ( ( uint8_t ) 0x01 )
/* The space on the stack required to hold the FPU registers.
* There are 32 128-bit plus 2 64-bit status registers.*/
#define portFPU_REGISTER_WORDS ( ( 32 * 2 ) + 2 )
/*-----------------------------------------------------------*/
/*
* Starts the first task executing. This function is necessarily written in
* assembly code so is implemented in portASM.s.
*/
extern void vPortRestoreTaskContext( void );
extern void vGIC_EnableIRQ( uint32_t ulInterruptID );
extern void vGIC_SetPriority( uint32_t ulInterruptID, uint32_t ulPriority );
extern void vGIC_PowerUpRedistributor( void );
extern void vGIC_EnableCPUInterface( void );
/*-----------------------------------------------------------*/
#if ( configNUMBER_OF_CORES == 1 )
volatile uint64_t ullCriticalNesting = 0ULL;
/* Saved as part of the task context. If ullPortTaskHasFPUContext is non-zero
* then floating point context must be saved and restored for the task. */
uint64_t ullPortTaskHasFPUContext = pdFALSE;
/* Set to 1 to pend a context switch from an ISR. */
uint64_t ullPortYieldRequired = pdFALSE;
/* Counts the interrupt nesting depth. A context switch is only performed if
* if the nesting depth is 0. */
uint64_t ullPortInterruptNesting = 0;
#else /* #if ( configNUMBER_OF_CORES == 1 ) */
volatile uint64_t ullCriticalNestings[ configNUMBER_OF_CORES ] = { 0 };
/* Saved as part of the task context. If ullPortTaskHasFPUContext is non-zero
* then floating point context must be saved and restored for the task. */
uint64_t ullPortTaskHasFPUContext[ configNUMBER_OF_CORES ] = { pdFALSE };
uint64_t ullPortYieldRequired[ configNUMBER_OF_CORES ] = { pdFALSE };
uint64_t ullPortInterruptNestings[ configNUMBER_OF_CORES ] = { 0 };
/* flag to control tick ISR handling, this is made true just before schedular start */
__attribute__((section(".shared_ram")))
uint8_t ucPortSchedulerRunning = pdFALSE;
#endif /* #if ( configNUMBER_OF_CORES == 1 ) */
/* Used in the ASM code. */
__attribute__( ( used ) ) const uint64_t ullMaxAPIPriorityMask = ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT );
/*-----------------------------------------------------------*/
/*
* See header file for description.
*/
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
TaskFunction_t pxCode,
void * pvParameters )
{
/* Setup the initial stack of the task. The stack is set exactly as
* expected by the portRESTORE_CONTEXT() macro. */
/* First all the general purpose registers. */
pxTopOfStack--;
*pxTopOfStack = 0x0101010101010101ULL; /* R1 */
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
pxTopOfStack--;
*pxTopOfStack = 0x0303030303030303ULL; /* R3 */
pxTopOfStack--;
*pxTopOfStack = 0x0202020202020202ULL; /* R2 */
pxTopOfStack--;
*pxTopOfStack = 0x0505050505050505ULL; /* R5 */
pxTopOfStack--;
*pxTopOfStack = 0x0404040404040404ULL; /* R4 */
pxTopOfStack--;
*pxTopOfStack = 0x0707070707070707ULL; /* R7 */
pxTopOfStack--;
*pxTopOfStack = 0x0606060606060606ULL; /* R6 */
pxTopOfStack--;
*pxTopOfStack = 0x0909090909090909ULL; /* R9 */
pxTopOfStack--;
*pxTopOfStack = 0x0808080808080808ULL; /* R8 */
pxTopOfStack--;
*pxTopOfStack = 0x1111111111111111ULL; /* R11 */
pxTopOfStack--;
*pxTopOfStack = 0x1010101010101010ULL; /* R10 */
pxTopOfStack--;
*pxTopOfStack = 0x1313131313131313ULL; /* R13 */
pxTopOfStack--;
*pxTopOfStack = 0x1212121212121212ULL; /* R12 */
pxTopOfStack--;
*pxTopOfStack = 0x1515151515151515ULL; /* R15 */
pxTopOfStack--;
*pxTopOfStack = 0x1414141414141414ULL; /* R14 */
pxTopOfStack--;
*pxTopOfStack = 0x1717171717171717ULL; /* R17 */
pxTopOfStack--;
*pxTopOfStack = 0x1616161616161616ULL; /* R16 */
pxTopOfStack--;
*pxTopOfStack = 0x1919191919191919ULL; /* R19 */
pxTopOfStack--;
*pxTopOfStack = 0x1818181818181818ULL; /* R18 */
pxTopOfStack--;
*pxTopOfStack = 0x2121212121212121ULL; /* R21 */
pxTopOfStack--;
*pxTopOfStack = 0x2020202020202020ULL; /* R20 */
pxTopOfStack--;
*pxTopOfStack = 0x2323232323232323ULL; /* R23 */
pxTopOfStack--;
*pxTopOfStack = 0x2222222222222222ULL; /* R22 */
pxTopOfStack--;
*pxTopOfStack = 0x2525252525252525ULL; /* R25 */
pxTopOfStack--;
*pxTopOfStack = 0x2424242424242424ULL; /* R24 */
pxTopOfStack--;
*pxTopOfStack = 0x2727272727272727ULL; /* R27 */
pxTopOfStack--;
*pxTopOfStack = 0x2626262626262626ULL; /* R26 */
pxTopOfStack--;
*pxTopOfStack = 0x2929292929292929ULL; /* R29 */
pxTopOfStack--;
*pxTopOfStack = 0x2828282828282828ULL; /* R28 */
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) 0x00; /* XZR - has no effect, used so there are an even number of registers. */
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) 0x00; /* R30 - procedure call link register. */
pxTopOfStack--;
*pxTopOfStack = portINITIAL_PSTATE;
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) pxCode; /* Exception return address. */
#if ( configUSE_TASK_FPU_SUPPORT == portTASK_NO_FPU_CONTEXT_BY_DEFAULT )
{
/* The task will start with a critical nesting count of 0 as interrupts are
* enabled. */
pxTopOfStack--;
*pxTopOfStack = portNO_CRITICAL_NESTING;
/* The task will start without a floating point context. A task that
* uses the floating point hardware must call vPortTaskUsesFPU() before
* executing any floating point instructions. */
pxTopOfStack--;
*pxTopOfStack = portNO_FLOATING_POINT_CONTEXT;
}
#elif ( configUSE_TASK_FPU_SUPPORT == portTASK_HAVE_FPU_CONTEXT_BY_DEFAULT )
{
/* The task will start with a floating point context. Leave enough
* space for the registers - and ensure they are initialised to 0. */
pxTopOfStack -= portFPU_REGISTER_WORDS;
memset( pxTopOfStack, 0x00, portFPU_REGISTER_WORDS * sizeof( StackType_t ) );
/* The task will start with a critical nesting count of 0 as interrupts are
* enabled. */
pxTopOfStack--;
*pxTopOfStack = portNO_CRITICAL_NESTING;
pxTopOfStack--;
*pxTopOfStack = pdTRUE;
#if ( configNUMBER_OF_CORES == 1 )
ullPortTaskHasFPUContext = pdTRUE;
#else
ullPortTaskHasFPUContext[ portGET_CORE_ID() ] = pdTRUE;
#endif
}
#else /* if ( configUSE_TASK_FPU_SUPPORT == portTASK_NO_FPU_CONTEXT_BY_DEFAULT ) */
{
#error "Invalid configUSE_TASK_FPU_SUPPORT setting - configUSE_TASK_FPU_SUPPORT must be set to 1, 2, or left undefined."
}
#endif /* if ( configUSE_TASK_FPU_SUPPORT == portTASK_NO_FPU_CONTEXT_BY_DEFAULT ) */
return pxTopOfStack;
}
/*-----------------------------------------------------------*/
BaseType_t xPortStartScheduler( void )
{
uint64_t ullAPSR;
#if ( configASSERT_DEFINED == 1 )
{
if ( portGET_CORE_ID() == 0 )
{
volatile uint8_t ucOriginalPriority;
volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( configINTERRUPT_CONTROLLER_BASE_ADDRESS + portINTERRUPT_PRIORITY_REGISTER_OFFSET );
volatile uint8_t ucMaxPriorityValue;
/* Determine how many priority bits are implemented in the GIC.
*
* Save the interrupt priority value that is about to be clobbered. */
ucOriginalPriority = *pucFirstUserPriorityRegister;
/* Determine the number of priority bits available. First write to
* all possible bits. */
*pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE;
/* Read the value back to see how many bits stuck. */
ucMaxPriorityValue = *pucFirstUserPriorityRegister;
/* Shift to the least significant bits. */
while( ( ucMaxPriorityValue & portBIT_0_SET ) != portBIT_0_SET )
{
ucMaxPriorityValue >>= ( uint8_t ) 0x01;
}
/* Sanity check configUNIQUE_INTERRUPT_PRIORITIES matches the read
* value. */
configASSERT( ucMaxPriorityValue >= portLOWEST_INTERRUPT_PRIORITY );
/* Restore the clobbered interrupt priority register to its original
* value. */
*pucFirstUserPriorityRegister = ucOriginalPriority;
}
}
#endif /* configASSERT_DEFINED */
__asm volatile ( "MRS %0, CurrentEL" : "=r" ( ullAPSR ) );
ullAPSR &= portAPSR_MODE_BITS_MASK;
configASSERT( ullAPSR == portEL1 );
/* Interrupts are turned off in the CPU itself to ensure a tick does
* not execute while the scheduler is being started. Interrupts are
* automatically turned back on in the CPU when the first task starts
* executing. */
portDISABLE_INTERRUPTS();
#if ( configNUMBER_OF_CORES > 1 )
if (0 == portGET_CORE_ID())
{
/* Start the timer that generates the tick ISR. */
configSETUP_TICK_INTERRUPT();
ucPortSchedulerRunning = pdTRUE;
__asm__ volatile ("dsb sy");
/* Start all other Cores and let them execute vPortRestoreTaskContext()*/
__asm__ volatile ("sev");
}
else
{
portSETUP_SGI_INTERRUPT();
}
#else /* if ( configNUMBER_OF_CORES > 1 ) */
/* Start the timer that generates the tick ISR. */
configSETUP_TICK_INTERRUPT();
#endif /* if ( configNUMBER_OF_CORES > 1 ) */
/* Start the first task executing. */
vPortRestoreTaskContext();
return 0;
}
/*-----------------------------------------------------------*/
void vPortEndScheduler( void )
{
/* Stub implementation for ports where there is nothing to return to
* Artificially force an assert. */
configASSERT( NULL );
}
/*-----------------------------------------------------------*/
#if ( configNUMBER_OF_CORES == 1 )
void vPortEnterCritical( void )
{
/* Mask interrupts up to the max syscall interrupt priority. */
uxPortSetInterruptMask();
/* Now interrupts are disabled ullCriticalNesting can be accessed
* directly. Increment ullCriticalNesting to keep a count of how many times
* portENTER_CRITICAL() has been called. */
ullCriticalNesting++;
/* This is not the interrupt safe version of the enter critical function so
* assert() if it is being called from an interrupt context. Only API
* functions that end in "FromISR" can be used in an interrupt. Only assert if
* the critical nesting count is 1 to protect against recursive calls if the
* assert function also uses a critical section. */
if( ullCriticalNesting == 1ULL )
{
configASSERT( ullPortInterruptNesting == 0 );
}
}
/*-----------------------------------------------------------*/
void vPortExitCritical( void )
{
if( ullCriticalNesting > portNO_CRITICAL_NESTING )
{
/* Decrement the nesting count as the critical section is being
* exited. */
ullCriticalNesting--;
/* If the nesting level has reached zero then all interrupt
* priorities must be re-enabled. */
if( ullCriticalNesting == portNO_CRITICAL_NESTING )
{
/* Critical nesting has reached zero so all interrupt priorities
* should be unmasked. */
portCLEAR_INTERRUPT_PRIORITIES_MASK();
}
}
}
#endif /* if ( configNUMBER_OF_CORES == 1 ) */
/*-----------------------------------------------------------*/
void FreeRTOS_Tick_Handler( void )
{
/* Must be the lowest possible priority. */
uint64_t ullRunningInterruptPriority;
__asm volatile ( "MRS %0, ICC_RPR_EL1" : "=r" ( ullRunningInterruptPriority ) );
configASSERT( ullRunningInterruptPriority == ( portLOWEST_USABLE_INTERRUPT_PRIORITY << portPRIORITY_SHIFT ) );
/* Interrupts should not be enabled before this point. */
#if ( configASSERT_DEFINED == 1 )
{
uint64_t ullMaskBits;
__asm volatile ( "mrs %0, DAIF" : "=r" ( ullMaskBits )::"memory" );
configASSERT( ( ullMaskBits & portDAIF_I ) != 0 );
}
#endif /* configASSERT_DEFINED */
/* Set interrupt mask before altering scheduler structures. The tick
* handler runs at the lowest priority, so interrupts cannot already be masked,
* so there is no need to save and restore the current mask value. It is
* necessary to turn off interrupts in the CPU itself while the ICCPMR is being
* updated. */
__asm volatile ( "MSR ICC_PMR_EL1, %0 \n"
"DSB SY \n"
"ISB SY \n"
::"r" ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT ) : "memory" );
/* Ok to enable interrupts after the interrupt source has been cleared. */
configCLEAR_TICK_INTERRUPT();
portENABLE_INTERRUPTS();
#if ( configNUMBER_OF_CORES > 1 )
UBaseType_t x = portENTER_CRITICAL_FROM_ISR();
#endif /* if ( configNUMBER_OF_CORES > 1 ) */
/* Increment the RTOS tick. */
if( xTaskIncrementTick() != pdFALSE )
{
#if ( configNUMBER_OF_CORES == 1 )
ullPortYieldRequired = pdTRUE;
#else
ullPortYieldRequired[ portGET_CORE_ID() ] = pdTRUE;
#endif
}
#if ( configNUMBER_OF_CORES > 1 )
portEXIT_CRITICAL_FROM_ISR(x);
#endif /* if ( configNUMBER_OF_CORES > 1 ) */
/* Ensure all interrupt priorities are active again. */
portCLEAR_INTERRUPT_PRIORITIES_MASK();
}
/*-----------------------------------------------------------*/
#if ( configUSE_TASK_FPU_SUPPORT == portTASK_NO_FPU_CONTEXT_BY_DEFAULT )
void vPortTaskUsesFPU( void )
{
/* A task is registering the fact that it needs an FPU context. Set the
* FPU flag (which is saved as part of the task context). */
#if ( configNUMBER_OF_CORES == 1 )
ullPortTaskHasFPUContext = pdTRUE;
#else
ullPortTaskHasFPUContext[ portGET_CORE_ID() ] = pdTRUE;
#endif
/* Consider initialising the FPSR here - but probably not necessary in
* AArch64. */
}
#endif /* configUSE_TASK_FPU_SUPPORT */
/*-----------------------------------------------------------*/
void vPortClearInterruptMask( UBaseType_t uxNewMaskValue )
{
if( uxNewMaskValue == pdFALSE )
{
portCLEAR_INTERRUPT_PRIORITIES_MASK();
}
}
/*-----------------------------------------------------------*/
UBaseType_t uxPortSetInterruptMask( void )
{
uint32_t ulReturn;
uint64_t ullPMRValue;
/* Interrupt in the CPU must be turned off while the ICCPMR is being
* updated. */
portDISABLE_INTERRUPTS();
__asm volatile ( "MRS %0, ICC_PMR_EL1" : "=r" ( ullPMRValue ) );
if( ullPMRValue == ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT ) )
{
/* Interrupts were already masked. */
ulReturn = pdTRUE;
}
else
{
ulReturn = pdFALSE;
__asm volatile ( "MSR ICC_PMR_EL1, %0 \n"
"DSB SY \n"
"ISB SY \n"
::"r" ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT ) : "memory" );
}
portENABLE_INTERRUPTS();
return ulReturn;
}
/*-----------------------------------------------------------*/
#if ( configASSERT_DEFINED == 1 )
void vPortValidateInterruptPriority( void )
{
/* The following assertion will fail if a service routine (ISR) for
* an interrupt that has been assigned a priority above
* configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API
* function. ISR safe FreeRTOS API functions must *only* be called
* from interrupts that have been assigned a priority at or below
* configMAX_SYSCALL_INTERRUPT_PRIORITY.
*
* Numerically low interrupt priority numbers represent logically high
* interrupt priorities, therefore the priority of the interrupt must
* be set to a value equal to or numerically *higher* than
* configMAX_SYSCALL_INTERRUPT_PRIORITY.
*
* FreeRTOS maintains separate thread and ISR API functions to ensure
* interrupt entry is as fast and simple as possible. */
uint64_t ullRunningInterruptPriority;
__asm volatile ( "MRS %0, ICC_RPR_EL1" : "=r" ( ullRunningInterruptPriority ) );
configASSERT( ullRunningInterruptPriority >= ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT ) );
}
#endif /* configASSERT_DEFINED */
/*-----------------------------------------------------------*/
/*
* If the application provides an implementation of vApplicationIRQHandler(),
* then it will get called directly without saving the FPU registers on
* interrupt entry, and this weak implementation of
* vApplicationFPUSafeIRQHandler() is just provided to remove linkage errors -
* it should never actually get called so its implementation contains a
* call to configASSERT() that will always fail.
*
* If the application provides its own implementation of
* vApplicationFPUSafeIRQHandler() then the implementation of
* vApplicationIRQHandler() provided in portASM.S will save the FPU registers
* before calling it.
*
* Therefore, if the application writer wants FPU registers to be saved on
* interrupt entry their IRQ handler must be called
* vApplicationFPUSafeIRQHandler(), and if the application writer does not want
* FPU registers to be saved on interrupt entry their IRQ handler must be
* called vApplicationIRQHandler().
*/
__attribute__( ( weak ) ) void vApplicationFPUSafeIRQHandler( uint32_t ulICCIAR )
{
( void ) ulICCIAR;
configASSERT( ( volatile void * ) NULL );
}
#if ( configNUMBER_OF_CORES > 1 )
/* Which core owns the lock */
volatile uint64_t ucOwnedByCore[ portMAX_CORE_COUNT ];
/* Lock count a core owns */
volatile uint64_t ucRecursionCountByLock[ eLockCount ];
/* Index 0 is used for ISR lock and Index 1 is used for task lock */
uint32_t ulGateWord[ eLockCount ];
void vInterruptCore(uint32_t ulInterruptID, uint32_t ulCoreID)
{
uint64_t ulRegVal = 0;
uint32_t ulCoreMask = (1UL << ulCoreID);
ulRegVal |= ( (ulCoreMask & 0xFFFF) | ( ( ulInterruptID & 0xF ) << 24U ) );
__asm__ volatile ( "msr ICC_SGI1R_EL1, %0" : : "r" ( ulRegVal ) );
__asm__ volatile ( "dsb sy");
__asm__ volatile ( "isb sy");
}
static inline void prvSpinUnlock( uint32_t * ulLock )
{
__asm volatile (
"dmb sy\n"
"mov w1, #0\n"
"str w1, [%x0]\n"
"dsb sy\n"
"sev\n"
:
: "r" ( ulLock )
: "memory", "w1"
);
}
static inline uint32_t prvSpinTrylock( uint32_t * ulLock )
{
register uint32_t ulRet;
/* Try to acquire spinlock; caller is responsible for further barriers. */
__asm volatile (
"1:\n"
"ldxr w1, [%x1]\n"
"cmp w1, #1\n"
"beq 2f\n"
"mov w2, #1\n"
"stxr w1, w2, [%x1]\n"
"cmp w1, #0\n"
"bne 1b\n"
"2:\n"
"mov %w0, w1\n"
: "=r" ( ulRet )
: "r" ( ulLock )
: "memory", "w1", "w2"
);
return ulRet;
}
/* Read 64b value shared between cores */
static inline uint64_t prvGet64( volatile uint64_t * x )
{
__asm( "dsb sy" );
return *x;
}
/* Write 64b value shared between cores */
static inline void prvSet64( volatile uint64_t * x,
uint64_t value )
{
*x = value;
__asm( "dsb sy" );
}
void vPortRecursiveLock( BaseType_t xCoreID,
ePortRTOSLock eLockNum,
BaseType_t uxAcquire )
{
/* Validate the core ID and lock number */
configASSERT( xCoreID < portMAX_CORE_COUNT );
configASSERT( eLockNum < eLockCount );
uint32_t ulLockBit = 1u << eLockNum;
/* Lock acquire */
if( uxAcquire )
{
/* Check if spinlock is available */
/* If spinlock is not available check if the core owns the lock */
/* If the core owns the lock wait increment the lock count by the core */
/* If core does not own the lock wait for the spinlock */
if( prvSpinTrylock( &ulGateWord[ eLockNum ] ) != 0 )
{
/* Check if the core owns the spinlock */
if( prvGet64( &ucOwnedByCore[ xCoreID ] ) & ulLockBit )
{
configASSERT( prvGet64( &ucRecursionCountByLock[ eLockNum ] ) != 255u );
prvSet64( &ucRecursionCountByLock[ eLockNum ], ( prvGet64( &ucRecursionCountByLock[ eLockNum ] ) + 1 ) );
return;
}
/* Preload the gate word into the cache */
uint32_t dummy = ulGateWord[ eLockNum ];
dummy++;
while( prvSpinTrylock( &ulGateWord[ eLockNum ] ) != 0 )
{
__asm volatile ( "wfe" );
}
}
/* Add barrier to ensure lock is taken before we proceed */
__asm__ __volatile__ ( "dmb sy" ::: "memory" );
/* Assert the lock count is 0 when the spinlock is free and is acquired */
configASSERT( prvGet64( &ucRecursionCountByLock[ eLockNum ] ) == 0 );
/* Set lock count as 1 */
prvSet64( &ucRecursionCountByLock[ eLockNum ], 1 );
/* Set ucOwnedByCore */
prvSet64( &ucOwnedByCore[ xCoreID ], ( prvGet64( &ucOwnedByCore[ xCoreID ] ) | ulLockBit ) );
}
/* Lock release */
else
{
/* Assert the lock is not free already */
configASSERT( ( prvGet64( &ucOwnedByCore[ xCoreID ] ) & ulLockBit ) != 0 );
configASSERT( prvGet64( &ucRecursionCountByLock[ eLockNum ] ) != 0 );
/* Reduce ucRecursionCountByLock by 1 */
prvSet64( &ucRecursionCountByLock[ eLockNum ], ( prvGet64( &ucRecursionCountByLock[ eLockNum ] ) - 1 ) );
if( !prvGet64( &ucRecursionCountByLock[ eLockNum ] ) )
{
prvSet64( &ucOwnedByCore[ xCoreID ], ( prvGet64( &ucOwnedByCore[ xCoreID ] ) & ~ulLockBit ) );
prvSpinUnlock( &ulGateWord[ eLockNum ] );
/* Add barrier to ensure lock status is reflected before we proceed */
__asm__ __volatile__ ( "dmb sy" ::: "memory" );
}
}
}
BaseType_t xPortGetCoreID( void )
{
register BaseType_t xCoreID;
__asm volatile (
"mrs x0, MPIDR_EL1\n"
"and %0, x0, #0xFF\n"
: "=r" ( xCoreID )
:
: "memory", "x0"
);
return xCoreID;
}
void FreeRTOS_SGI_Handler( void )
{
/* Must be the lowest possible priority. */
uint64_t ullRunningInterruptPriority;
__asm volatile ( "MRS %0, ICC_RPR_EL1" : "=r" ( ullRunningInterruptPriority ) );
configASSERT( ullRunningInterruptPriority == ( portLOWEST_USABLE_INTERRUPT_PRIORITY << portPRIORITY_SHIFT ) );
/* Interrupts should not be enabled before this point. */
#if ( configASSERT_DEFINED == 1 )
{
uint64_t ullMaskBits;
__asm volatile ( "mrs %0, DAIF" : "=r" ( ullMaskBits )::"memory" );
configASSERT( ( ullMaskBits & portDAIF_I ) != 0 );
}
#endif /* configASSERT_DEFINED */
/* Set interrupt mask before altering scheduler structures. The SGI
* handler runs at the lowest priority, so interrupts cannot already be masked,
* so there is no need to save and restore the current mask value. It is
* necessary to turn off interrupts in the CPU itself while the ICCPMR is being
* updated. */
__asm volatile ( "MSR ICC_PMR_EL1, %0 \n"
"DSB SY \n"
"ISB SY \n"
::"r" ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT ) : "memory" );
/* Ok to enable interrupts after the interrupt source has been cleared. */
portENABLE_INTERRUPTS();
#if ( configNUMBER_OF_CORES == 1 )
ullPortYieldRequired = pdTRUE;
#else
ullPortYieldRequired[ portGET_CORE_ID() ] = pdTRUE;
#endif
/* Ensure all interrupt priorities are active again. */
portCLEAR_INTERRUPT_PRIORITIES_MASK();
}
#endif /* if( configNUMBER_OF_CORES > 1 ) */

View file

@ -0,0 +1,521 @@
/*
* FreeRTOS Kernel <DEVELOPMENT BRANCH>
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
* Copyright 2025 Arm Limited and/or its affiliates
* <open-source-office@arm.com>
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* https://www.FreeRTOS.org
* https://github.com/FreeRTOS
*
*/
/*
* This file is tailored for ARM Cortex-R82 with SMP enabled.
* It includes macros and functions for saving/restoring task context,
* handling interrupts, and supporting multi-core operations.
*/
#include "FreeRTOSConfig.h"
.text
/* Variables and functions. */
.extern ullMaxAPIPriorityMask
#if ( configNUMBER_OF_CORES == 1 )
.extern pxCurrentTCB
.extern ullCriticalNesting
.extern ullPortInterruptNesting
#else /* #if ( configNUMBER_OF_CORES == 1 ) */
.extern pxCurrentTCBs
.extern ullCriticalNestings
.extern ullPortInterruptNestings
#endif
.extern vTaskSwitchContext
.extern vApplicationIRQHandler
.extern ullPortTaskHasFPUContext
.extern ullPortYieldRequired
.extern _freertos_vector_table
.global FreeRTOS_IRQ_Handler
.global FreeRTOS_SWI_Handler
.global vPortRestoreTaskContext
.macro saveallgpregisters
/* Save all general-purpose registers on stack. */
STP X0, X1, [ SP, # - 0x10 ] !
STP X2, X3, [ SP, # - 0x10 ] !
STP X4, X5, [ SP, # - 0x10 ] !
STP X6, X7, [ SP, # - 0x10 ] !
STP X8, X9, [ SP, # - 0x10 ] !
STP X10, X11, [ SP, # - 0x10 ] !
STP X12, X13, [ SP, # - 0x10 ] !
STP X14, X15, [ SP, # - 0x10 ] !
STP X16, X17, [ SP, # - 0x10 ] !
STP X18, X19, [ SP, # - 0x10 ] !
STP X20, X21, [ SP, # - 0x10 ] !
STP X22, X23, [ SP, # - 0x10 ] !
STP X24, X25, [ SP, # - 0x10 ] !
STP X26, X27, [ SP, # - 0x10 ] !
STP X28, X29, [ SP, # - 0x10 ] !
STR X30, [ SP, # - 0x10 ] !
.endm
.macro restoreallgpregisters
/* Restore all general-purpose registers from stack. */
LDR X30, [ SP ], # 0x10
LDP X28, X29, [ SP ], # 0x10
LDP X26, X27, [ SP ], # 0x10
LDP X24, X25, [ SP ], # 0x10
LDP X22, X23, [ SP ], # 0x10
LDP X20, X21, [ SP ], # 0x10
LDP X18, X19, [ SP ], # 0x10
LDP X16, X17, [ SP ], # 0x10
LDP X14, X15, [ SP ], # 0x10
LDP X12, X13, [ SP ], # 0x10
LDP X10, X11, [ SP ], # 0x10
LDP X8, X9, [ SP ], # 0x10
LDP X6, X7, [ SP ], # 0x10
LDP X4, X5, [ SP ], # 0x10
LDP X2, X3, [ SP ], # 0x10
LDP X0, X1, [ SP ], # 0x10
.endm
.macro savefuncontextgpregs
/* Save function context general-purpose registers. */
STP X0, X1, [ SP, # - 0x10 ] !
STP X2, X3, [ SP, # - 0x10 ] !
STP X4, X5, [ SP, # - 0x10 ] !
STP X6, X7, [ SP, # - 0x10 ] !
STP X8, X9, [ SP, # - 0x10 ] !
STP X10, X11, [ SP, # - 0x10 ] !
STP X12, X13, [ SP, # - 0x10 ] !
STP X14, X15, [ SP, # - 0x10 ] !
STP X16, X17, [ SP, # - 0x10 ] !
STP X18, X29, [ SP, # - 0x10 ] !
STR X30, [ SP, # - 0x10 ] !
.endm
.macro restorefuncontextgpregs
/* Restore function context general-purpose registers. */
LDR X30, [ SP ], # 0x10
LDP X18, X29, [ SP ], # 0x10
LDP X16, X17, [ SP ], # 0x10
LDP X14, X15, [ SP ], # 0x10
LDP X12, X13, [ SP ], # 0x10
LDP X10, X11, [ SP ], # 0x10
LDP X8, X9, [ SP ], # 0x10
LDP X6, X7, [ SP ], # 0x10
LDP X4, X5, [ SP ], # 0x10
LDP X2, X3, [ SP ], # 0x10
LDP X0, X1, [ SP ], # 0x10
.endm
.macro savefloatregisters
/* Save floating-point registers and configuration/status registers. */
STP Q0, Q1, [ SP, # - 0x20 ] !
STP Q2, Q3, [ SP, # - 0x20 ] !
STP Q4, Q5, [ SP, # - 0x20 ] !
STP Q6, Q7, [ SP, # - 0x20 ] !
STP Q8, Q9, [ SP, # - 0x20 ] !
STP Q10, Q11, [ SP, # - 0x20 ] !
STP Q12, Q13, [ SP, # - 0x20 ] !
STP Q14, Q15, [ SP, # - 0x20 ] !
STP Q16, Q17, [ SP, # - 0x20 ] !
STP Q18, Q19, [ SP, # - 0x20 ] !
STP Q20, Q21, [ SP, # - 0x20 ] !
STP Q22, Q23, [ SP, # - 0x20 ] !
STP Q24, Q25, [ SP, # - 0x20 ] !
STP Q26, Q27, [ SP, # - 0x20 ] !
STP Q28, Q29, [ SP, # - 0x20 ] !
STP Q30, Q31, [ SP, # - 0x20 ] !
MRS X9, FPSR
MRS X10, FPCR
STP W9, W10, [ SP, # - 0x10 ] !
.endm
.macro restorefloatregisters
/* Restore floating-point registers and configuration/status registers. */
LDP W9, W10, [ SP ], # 0x10
MSR FPSR, X9
MSR FPCR, X10
LDP Q30, Q31, [ SP ], # 0x20
LDP Q28, Q29, [ SP ], # 0x20
LDP Q26, Q27, [ SP ], # 0x20
LDP Q24, Q25, [ SP ], # 0x20
LDP Q22, Q23, [ SP ], # 0x20
LDP Q20, Q21, [ SP ], # 0x20
LDP Q18, Q19, [ SP ], # 0x20
LDP Q16, Q17, [ SP ], # 0x20
LDP Q14, Q15, [ SP ], # 0x20
LDP Q12, Q13, [ SP ], # 0x20
LDP Q10, Q11, [ SP ], # 0x20
LDP Q8, Q9, [ SP ], # 0x20
LDP Q6, Q7, [ SP ], # 0x20
LDP Q4, Q5, [ SP ], # 0x20
LDP Q2, Q3, [ SP ], # 0x20
LDP Q0, Q1, [ SP ], # 0x20
.endm
.macro portSAVE_CONTEXT
/* Switch to use the EL0 stack pointer. */
MSR SPSEL, # 0
/* Save the entire context. */
saveallgpregisters
/* Save the SPSR and ELR values. */
MRS X3, SPSR_EL1
MRS X2, ELR_EL1
STP X2, X3, [ SP, # - 0x10 ] !
/* Save the critical section nesting depth. */
LDR X0, ullCriticalNestingsConst
#if configNUMBER_OF_CORES > 1
/* Calculate per-core index using MPIDR_EL1 for SMP support. */
MRS X1, MPIDR_EL1 /* Read the Multiprocessor Affinity Register */
AND X1, X1, # 0xff /* Extract Aff0 (core ID) */
LSL X1, X1, # 3 /* Multiply core ID by pointer size (8 bytes) */
ADD X0, X0, X1 /* Add offset to base address */
#endif
LDR X3, [ X0 ]
/* Save the FPU context indicator. */
LDR X0, ullPortTaskHasFPUContextConst
#if configNUMBER_OF_CORES > 1
ADD X0, X0, X1 /* Add to the base of the FPU array */
#endif
LDR X2, [ X0 ]
/* Save the FPU context, if any (32 128-bit registers). */
CMP X2, # 0
B.EQ 1f /* FPU context not present, skip saving FPU registers */
savefloatregisters
1 :
/* Store the critical nesting count and FPU context indicator. */
STP X2, X3, [ SP, # - 0x10 ] !
LDR X0, pxCurrentTCBsConst
#if ( configNUMBER_OF_CORES > 1 )
MRS X1, MPIDR_EL1 /* Read Multiprocessor Affinity Register */
AND X1, X1, # 0xff /* Extract core ID */
LSL X1, X1, # 3 /* Multiply core ID by pointer size */
ADD X0, X0, X1 /* Offset for current core's TCB pointer */
#endif
LDR X1, [ X0 ]
MOV X0, SP /* Save current stack pointer */
STR X0, [ X1 ]
/* Switch to use the ELx stack pointer. */
MSR SPSEL, # 1
.endm
.macro portRESTORE_CONTEXT
/* Switch to use the EL0 stack pointer. */
MSR SPSEL, # 0
/* Set the SP to point to the stack of the task being restored. */
LDR X0, pxCurrentTCBsConst
#if configNUMBER_OF_CORES > 1
/* Get the core ID to index the TCB correctly. */
MRS X2, MPIDR_EL1 /* Read the Multiprocessor Affinity Register */
AND X2, X2, # 0xff /* Extract Aff0 which contains the core ID */
LSL X2, X2, # 3 /* Scale the core ID to the size of a pointer (64-bit system) */
ADD X0, X0, X2 /* Add the offset for the current core's TCB pointer */
#endif
LDR X1, [ X0 ]
LDR X0, [ X1 ]
MOV SP, X0
LDP X2, X3, [ SP ], # 0x10 /* Retrieve critical nesting and FPU indicator */
LDR X0, ullCriticalNestingsConst
/* Calculate offset for current core's ullCriticalNesting */
#if configNUMBER_OF_CORES > 1
/* Existing code to get core ID and scale to pointer size is reused. */
MRS X1, MPIDR_EL1 /* Read Multiprocessor Affinity Register */
AND X1, X1, # 0xff /* Extract Aff0, which contains the core ID */
LSL X1, X1, # 3 /* Scale core ID to the size of a pointer (assuming 64-bit system) */
ADD X0, X0, X1 /* Add offset for the current core's ullCriticalNesting */
#endif
MOV X1, # 255 /* Default mask */
CMP X3, # 0
B.EQ 1f
LDR X6, ullMaxAPIPriorityMaskConst
LDR X1, [ X6 ] /* Use computed mask value */
1 :
MSR ICC_PMR_EL1, X1 /* Set interrupt mask */
DSB SY
ISB SY
STR X3, [ X0 ] /* Restore critical nesting */
/* Restore the FPU context indicator. */
LDR X0, ullPortTaskHasFPUContextConst
#if configNUMBER_OF_CORES > 1
/* Existing code to get core ID and scale to pointer size is reused. */
MRS X1, MPIDR_EL1 /* Read Multiprocessor Affinity Register */
AND X1, X1, # 0xff /* Extract Aff0, which contains the core ID */
LSL X1, X1, # 3 /* Scale core ID to the size of a pointer (assuming 64-bit system) */
/* Restore the FPU context indicator. */
ADD X0, X0, X1 /* Add to the base of the FPU array */
#endif
STR X2, [ X0 ]
/* Restore the FPU context, if any. */
CMP X2, # 0
B.EQ 1f
restorefloatregisters
1 :
LDP X2, X3, [ SP ], # 0x10 /* Restore SPSR and ELR */
MSR SPSR_EL1, X3
MSR ELR_EL1, X2
restoreallgpregisters
/* Switch to use the ELx stack pointer. */
MSR SPSEL, # 1
ERET
.endm
/******************************************************************************
* FreeRTOS_SWI_Handler handler is used to perform a context switch.
*****************************************************************************/
.align 8
.type FreeRTOS_SWI_Handler, % function
FreeRTOS_SWI_Handler:
/* Save the context of the current task and select a new task to run. */
portSAVE_CONTEXT
MRS X0, ESR_EL1
LSR X1, X0, # 26
CMP X1, # 0x15 /* 0x15 = SVC instruction. */
B.NE FreeRTOS_Abort
#if configNUMBER_OF_CORES > 1
MRS x0, mpidr_el1
AND x0, x0, 255
#endif
BL vTaskSwitchContext
portRESTORE_CONTEXT
FreeRTOS_Abort:
/* Full ESR is in X0, exception class code is in X1. */
B .
/******************************************************************************
* vPortRestoreTaskContext is used to start the scheduler.
*****************************************************************************/
.align 8
.type vPortRestoreTaskContext, % function
vPortRestoreTaskContext:
.set freertos_vector_base, _freertos_vector_table
/* Install the FreeRTOS interrupt handlers. */
LDR X1, = freertos_vector_base
MSR VBAR_EL1, X1
DSB SY
ISB SY
/* Start the first task. */
portRESTORE_CONTEXT
/******************************************************************************
* FreeRTOS_IRQ_Handler handles IRQ entry and exit.
*
* This handler is supposed to be used only for IRQs and never for FIQs. Per ARM
* GIC documentation [1], Group 0 interrupts are always signaled as FIQs. Since
* this handler is only for IRQs, We can safely assume Group 1 while accessing
* Interrupt Acknowledge and End Of Interrupt registers and therefore, use
* ICC_IAR1_EL1 and ICC_EOIR1_EL1.
*
* [1] https://developer.arm.com/documentation/198123/0300/Arm-CoreLink-GIC-fundamentals
*****************************************************************************/
.align 8
.type FreeRTOS_IRQ_Handler, % function
FreeRTOS_IRQ_Handler:
/* Save volatile registers. */
savefuncontextgpregs
savefloatregisters
/* Save the SPSR and ELR. */
MRS X3, SPSR_EL1
MRS X2, ELR_EL1
STP X2, X3, [ SP, # - 0x10 ] !
/* Increment the interrupt nesting counter. */
LDR X5, ullPortInterruptNestingsConst /* Load base address of the ullPortYieldRequired array */
#if configNUMBER_OF_CORES > 1
/* Existing code to get core ID and scale to pointer size is reused. */
MRS X2, MPIDR_EL1 /* Read Multiprocessor Affinity Register */
AND X2, X2, # 0xff /* Extract Aff0, which contains the core ID */
LSL X2, X2, # 3 /* Scale core ID to the size of a pointer (assuming 64-bit system) */
/* Calculate offset for the current core's ullPortYieldRequired and load its address. */
ADD X5, X5, X2 /* Add offset for the current core's ullPortYieldRequired */
#endif
LDR X1, [ X5 ] /* Old nesting count in X1. */
ADD X6, X1, # 1
STR X6, [ X5 ] /* Address of nesting count variable in X5. */
/* Maintain the interrupt nesting information across the function call. */
STP X1, X5, [ SP, # - 0x10 ] !
/* Read interrupt ID from the interrupt acknowledge register and store it
* in X0 for future parameter and interrupt clearing use. */
MRS X0, ICC_IAR1_EL1
/* Maintain the interrupt ID value across the function call. */
STP X0, X1, [ SP, # - 0x10 ] !
/* Call the C handler. */
BL vApplicationIRQHandler
/* Disable interrupts. */
MSR DAIFSET, # 2
DSB SY
ISB SY
/* Restore the interrupt ID value. */
LDP X0, X1, [ SP ], # 0x10
/* End IRQ processing by writing interrupt ID value to the EOI register. */
MSR ICC_EOIR1_EL1, X0
/* Restore the critical nesting count. */
LDP X1, X5, [ SP ], # 0x10
STR X1, [ X5 ]
/* Has interrupt nesting unwound? */
CMP X1, # 0
B.NE Exit_IRQ_No_Context_Switch
/* Is a context switch required? */
LDR X0, ullPortYieldRequiredConst
#if configNUMBER_OF_CORES > 1
/* Existing code to get core ID and scale to pointer size is reused. */
MRS X2, MPIDR_EL1 /* Read Multiprocessor Affinity Register */
AND X2, X2, # 0xff /* Extract Aff0, which contains the core ID */
LSL X2, X2, # 3 /* Scale core ID to the size of a pointer (assuming 64-bit system) */
/* Calculate offset for the current core's ullPortYieldRequired and load its address. */
ADD X0, X0, X2 /* Add offset for the current core's ullPortYieldRequired */
#endif
LDR X1, [ X0 ]
CMP X1, # 0
B.EQ Exit_IRQ_No_Context_Switch
/* Reset ullPortYieldRequired to 0. */
MOV X2, # 0
STR X2, [ X0 ]
/* Restore volatile registers. */
LDP X4, X5, [ SP ], # 0x10 /* SPSR and ELR. */
MSR SPSR_EL1, X5
MSR ELR_EL1, X4
DSB SY
ISB SY
restorefloatregisters
restorefuncontextgpregs
/* Save the context of the current task and select a new task to run. */
portSAVE_CONTEXT
#if configNUMBER_OF_CORES > 1
MRS x0, mpidr_el1
AND x0, x0, 255
#endif
BL vTaskSwitchContext
portRESTORE_CONTEXT
Exit_IRQ_No_Context_Switch:
/* Restore volatile registers. */
LDP X4, X5, [ SP ], # 0x10 /* SPSR and ELR. */
MSR SPSR_EL1, X5
MSR ELR_EL1, X4
DSB SY
ISB SY
restorefloatregisters
restorefuncontextgpregs
ERET
/******************************************************************************
* If the application provides an implementation of vApplicationIRQHandler(),
* then it will get called directly without saving the FPU registers on
* interrupt entry, and this weak implementation of
* vApplicationIRQHandler() will not get called.
*
* If the application provides its own implementation of
* vApplicationFPUSafeIRQHandler() then this implementation of
* vApplicationIRQHandler() will be called, save the FPU registers, and then
* call vApplicationFPUSafeIRQHandler().
*
* Therefore, if the application writer wants FPU registers to be saved on
* interrupt entry their IRQ handler must be called
* vApplicationFPUSafeIRQHandler(), and if the application writer does not want
* FPU registers to be saved on interrupt entry their IRQ handler must be
* called vApplicationIRQHandler().
*****************************************************************************/
.align 8
.weak vApplicationIRQHandler
.type vApplicationIRQHandler, % function
vApplicationIRQHandler:
/* Save LR and FP on the stack */
STP X29, X30, [ SP, # - 0x10 ] !
/* Save FPU registers (32 128-bits + 2 64-bits configuration and status registers) */
savefloatregisters
/* Call the C handler. */
BL vApplicationFPUSafeIRQHandler
/* Restore FPU registers */
restorefloatregisters
/* Restore FP and LR */
LDP X29, X30, [ SP ], # 0x10
RET
.align 8
#if ( configNUMBER_OF_CORES == 1 )
pxCurrentTCBsConst:.dword pxCurrentTCB
ullCriticalNestingsConst:.dword ullCriticalNesting
ullPortInterruptNestingsConst:.dword ullPortInterruptNesting
ullPortYieldRequiredConst:.dword ullPortYieldRequired
ullPortTaskHasFPUContextConst:.dword ullPortTaskHasFPUContext
#else
pxCurrentTCBsConst:.dword pxCurrentTCBs
ullCriticalNestingsConst:.dword ullCriticalNestings
ullPortInterruptNestingsConst:.dword ullPortInterruptNestings
ullPortYieldRequiredConst:.dword ullPortYieldRequired
ullPortTaskHasFPUContextConst:.dword ullPortTaskHasFPUContext
#endif /* if ( configNUMBER_OF_CORES == 1 ) */
ullMaxAPIPriorityMaskConst:.dword ullMaxAPIPriorityMask
vApplicationIRQHandlerConst:.word vApplicationIRQHandler
.end

View file

@ -0,0 +1,277 @@
/*
* FreeRTOS Kernel <DEVELOPMENT BRANCH>
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
* Copyright 2025 Arm Limited and/or its affiliates
* <open-source-office@arm.com>
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* https://www.FreeRTOS.org
* https://github.com/FreeRTOS
*
*/
#ifndef PORTMACRO_H
#define PORTMACRO_H
/* *INDENT-OFF* */
#ifdef __cplusplus
extern "C" {
#endif
/* *INDENT-ON* */
/*-----------------------------------------------------------
* Port specific definitions.
*
* The settings in this file configure FreeRTOS correctly for the given hardware
* and compiler.
*
* These settings should not be altered.
*-----------------------------------------------------------
*/
/* Type definitions. */
#define portCHAR char
#define portFLOAT float
#define portDOUBLE double
#define portLONG long
#define portSHORT short
#define portSTACK_TYPE size_t
#define portBASE_TYPE long
typedef portSTACK_TYPE StackType_t;
typedef portBASE_TYPE BaseType_t;
typedef uint64_t UBaseType_t;
typedef uint64_t TickType_t;
#define portMAX_DELAY ( ( TickType_t ) 0xffffffffffffffff )
/* 32-bit tick type on a 64-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
#define portTICK_TYPE_IS_ATOMIC 1
/*-----------------------------------------------------------*/
/* Hardware specifics. */
#define portSTACK_GROWTH ( -1 )
#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
#define portBYTE_ALIGNMENT 16
#define portPOINTER_SIZE_TYPE uint64_t
/*-----------------------------------------------------------*/
/* Task utilities. */
/* Called at the end of an ISR that can cause a context switch. */
#if ( configNUMBER_OF_CORES == 1 )
#define portEND_SWITCHING_ISR( xSwitchRequired ) \
{ \
extern uint64_t ullPortYieldRequired; \
\
if( xSwitchRequired != pdFALSE ) \
{ \
ullPortYieldRequired = pdTRUE; \
} \
}
#else
#define portEND_SWITCHING_ISR( xSwitchRequired ) \
{ \
extern uint64_t ullPortYieldRequired[ configNUMBER_OF_CORES ]; \
\
if( xSwitchRequired != pdFALSE ) \
{ \
ullPortYieldRequired[ portGET_CORE_ID() ] = pdTRUE; \
} \
}
#endif /* if ( configNUMBER_OF_CORES == 1 ) */
#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
#define portYIELD() __asm volatile ( "SVC 0" ::: "memory" )
/*-----------------------------------------------------------
* Critical section control
*----------------------------------------------------------*/
extern UBaseType_t vTaskEnterCriticalFromISR( void );
extern void vTaskExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus );
extern UBaseType_t uxPortSetInterruptMask( void );
extern void vPortClearInterruptMask( UBaseType_t uxNewMaskValue );
extern void vInterruptCore(uint32_t ulInterruptID, uint32_t ulCoreID);
#define portDISABLE_INTERRUPTS() \
__asm volatile ( "MSR DAIFSET, #2" ::: "memory" ); \
__asm volatile ( "DSB SY" ); \
__asm volatile ( "ISB SY" );
#define portENABLE_INTERRUPTS() \
__asm volatile ( "MSR DAIFCLR, #2" ::: "memory" ); \
__asm volatile ( "DSB SY" ); \
__asm volatile ( "ISB SY" );
/* These macros do not globally disable/enable interrupts. They do mask off
* interrupts that have a priority below configMAX_API_CALL_INTERRUPT_PRIORITY. */
#if ( configNUMBER_OF_CORES == 1 )
extern void vPortEnterCritical( void );
extern void vPortExitCritical( void );
#define portENTER_CRITICAL() vPortEnterCritical()
#define portEXIT_CRITICAL() vPortExitCritical()
#else
#define portENTER_CRITICAL() vTaskEnterCritical()
#define portEXIT_CRITICAL() vTaskExitCritical()
#endif
#define portSET_INTERRUPT_MASK_FROM_ISR() uxPortSetInterruptMask()
#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vPortClearInterruptMask( x )
#define portENTER_CRITICAL_FROM_ISR() vTaskEnterCriticalFromISR()
#define portEXIT_CRITICAL_FROM_ISR( x ) vTaskExitCriticalFromISR( x )
/*-----------------------------------------------------------*/
/* Task function macros as described on the FreeRTOS.org WEB site. These are
* not required for this port but included in case common demo code that uses these
* macros is used. */
#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
/* Prototype of the FreeRTOS tick handler. This must be installed as the
* handler for whichever peripheral is used to generate the RTOS tick. */
void FreeRTOS_Tick_Handler( void );
#define portTASK_NO_FPU_CONTEXT_BY_DEFAULT (1U)
#define portTASK_HAVE_FPU_CONTEXT_BY_DEFAULT (2U)
/* If configUSE_TASK_FPU_SUPPORT is set to portTASK_NO_FPU_CONTEXT_BY_DEFAULT (1U)
* (or left undefined) then tasks are created without an FPU context and
* must call vPortTaskUsesFPU() to give themselves an FPU context before
* using any FPU instructions. If configUSE_TASK_FPU_SUPPORT is set to
* portTASK_HAVE_FPU_CONTEXT_BY_DEFAULT (2U) then all tasks will have an FPU context
* by default. */
#if ( configUSE_TASK_FPU_SUPPORT == portTASK_NO_FPU_CONTEXT_BY_DEFAULT )
void vPortTaskUsesFPU( void );
#else
/* Each task has an FPU context already, so define this function away to
* nothing to prevent it from being called accidentally. */
#define vPortTaskUsesFPU()
#endif
#define portTASK_USES_FLOATING_POINT() vPortTaskUsesFPU()
#define portLOWEST_INTERRUPT_PRIORITY ( ( ( uint32_t ) configUNIQUE_INTERRUPT_PRIORITIES ) - 1UL )
#define portLOWEST_USABLE_INTERRUPT_PRIORITY ( portLOWEST_INTERRUPT_PRIORITY - 1UL )
/* Architecture specific optimisations. */
#ifndef configUSE_PORT_OPTIMISED_TASK_SELECTION
#define configUSE_PORT_OPTIMISED_TASK_SELECTION 1
#endif
#if configUSE_PORT_OPTIMISED_TASK_SELECTION == 1
/* Store/clear the ready priorities in a bit map. */
#define portRECORD_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) |= ( 1UL << ( uxPriority ) )
#define portRESET_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) &= ~( 1UL << ( uxPriority ) )
/*-----------------------------------------------------------*/
#define portGET_HIGHEST_PRIORITY( uxTopPriority, uxReadyPriorities ) uxTopPriority = ( 31 - __builtin_clz( uxReadyPriorities ) )
#endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
#if ( configASSERT_DEFINED == 1 )
void vPortValidateInterruptPriority( void );
#define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority()
#endif /* configASSERT */
#define portNOP() __asm volatile ( "NOP" )
#define portINLINE __inline
/* The number of bits to shift for an interrupt priority is dependent on the
* number of bits implemented by the interrupt controller. */
#if configUNIQUE_INTERRUPT_PRIORITIES == 16
#define portPRIORITY_SHIFT 4
#define portMAX_BINARY_POINT_VALUE 3
#elif configUNIQUE_INTERRUPT_PRIORITIES == 32
#define portPRIORITY_SHIFT 3
#define portMAX_BINARY_POINT_VALUE 2
#elif configUNIQUE_INTERRUPT_PRIORITIES == 64
#define portPRIORITY_SHIFT 2
#define portMAX_BINARY_POINT_VALUE 1
#elif configUNIQUE_INTERRUPT_PRIORITIES == 128
#define portPRIORITY_SHIFT 1
#define portMAX_BINARY_POINT_VALUE 0
#elif configUNIQUE_INTERRUPT_PRIORITIES == 256
#define portPRIORITY_SHIFT 0
#define portMAX_BINARY_POINT_VALUE 0
#else /* if configUNIQUE_INTERRUPT_PRIORITIES == 16 */
#error Invalid configUNIQUE_INTERRUPT_PRIORITIES setting. configUNIQUE_INTERRUPT_PRIORITIES must be set to the number of unique priorities implemented by the target hardware
#endif /* if configUNIQUE_INTERRUPT_PRIORITIES == 16 */
#define portINTERRUPT_PRIORITY_REGISTER_OFFSET ( 0x400U )
#if ( configNUMBER_OF_CORES > 1 )
typedef enum
{
eIsrLock = 0,
eTaskLock,
eLockCount
} ePortRTOSLock;
extern volatile uint64_t ullCriticalNestings[ configNUMBER_OF_CORES ];
extern void vPortRecursiveLock( BaseType_t xCoreID,
ePortRTOSLock eLockNum,
BaseType_t uxAcquire );
extern BaseType_t xPortGetCoreID( void );
#define portSET_INTERRUPT_MASK() uxPortSetInterruptMask()
#define portCLEAR_INTERRUPT_MASK( x ) vPortClearInterruptMask( ( x ) )
#define portMAX_CORE_COUNT configNUMBER_OF_CORES
#define portGET_CORE_ID() xPortGetCoreID()
/* Use PPI 0 as the yield core interrupt. */
#define portYIELD_CORE_INT_ID 0
#define portYIELD_CORE( xCoreID ) vInterruptCore(portYIELD_CORE_INT_ID, (uint32_t)xCoreID)
#define portRELEASE_ISR_LOCK( xCoreID ) vPortRecursiveLock( ( xCoreID ), eIsrLock, pdFALSE )
#define portGET_ISR_LOCK( xCoreID ) vPortRecursiveLock( ( xCoreID ), eIsrLock, pdTRUE )
#define portRELEASE_TASK_LOCK( xCoreID ) vPortRecursiveLock( ( xCoreID ), eTaskLock, pdFALSE )
#define portGET_TASK_LOCK( xCoreID ) vPortRecursiveLock( ( xCoreID ), eTaskLock, pdTRUE )
#define portGET_CRITICAL_NESTING_COUNT( xCoreID ) ( ullCriticalNestings[ ( xCoreID ) ] )
#define portSET_CRITICAL_NESTING_COUNT( xCoreID, x ) ( ullCriticalNestings[ ( xCoreID ) ] = ( x ) )
#define portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ) ( ullCriticalNestings[ ( xCoreID ) ]++ )
#define portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ) ( ullCriticalNestings[ ( xCoreID ) ]-- )
#define portSETUP_SGI_INTERRUPT() vGIC_PowerUpRedistributor(); \
vGIC_SetPriority( portYIELD_CORE_INT_ID, ( portLOWEST_USABLE_INTERRUPT_PRIORITY << portPRIORITY_SHIFT ) ); \
vGIC_EnableIRQ( portYIELD_CORE_INT_ID ); \
vGIC_EnableCPUInterface()
#endif /* configNUMBER_OF_CORES > 1 */
#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" )
/* *INDENT-OFF* */
#ifdef __cplusplus
}
#endif
/* *INDENT-ON* */
#endif /* PORTMACRO_H */