cortex-r82: Minor code improvements

This commit includes minor code improvements to enhance readability
and maintainability of the Cortex-R82 port files. Changes include
refactoring variable names, optimizing comments, and improving code
structure without altering functionality.

Signed-off-by: Ahmed Ismail <Ahmed.Ismail@arm.com>
This commit is contained in:
Ahmed Ismail 2025-10-09 14:45:09 +01:00
parent 3cca1ec00e
commit 8e8d4152e3
3 changed files with 124 additions and 105 deletions

View file

@ -229,7 +229,6 @@
* assembly code so is implemented in portASM.s. * assembly code so is implemented in portASM.s.
*/ */
extern void vPortRestoreTaskContext( void ); extern void vPortRestoreTaskContext( void );
extern void vGIC_EnableIRQ( uint32_t ulInterruptID ); extern void vGIC_EnableIRQ( uint32_t ulInterruptID );
extern void vGIC_SetPriority( uint32_t ulInterruptID, uint32_t ulPriority ); extern void vGIC_SetPriority( uint32_t ulInterruptID, uint32_t ulPriority );
extern void vGIC_PowerUpRedistributor( void ); extern void vGIC_PowerUpRedistributor( void );
@ -238,28 +237,38 @@ extern void vGIC_EnableCPUInterface( void );
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
#if ( configNUMBER_OF_CORES == 1 ) #if ( configNUMBER_OF_CORES == 1 )
PRIVILEGED_DATA volatile uint64_t ullCriticalNesting = 0ULL; PRIVILEGED_DATA volatile uint64_t ullCriticalNesting = 0ULL;
/* Saved as part of the task context. If ullPortTaskHasFPUContext is non-zero /* Saved as part of the task context. If ullPortTaskHasFPUContext is non-zero
* then floating point context must be saved and restored for the task. */ * then floating point context must be saved and restored for the task. */
PRIVILEGED_DATA uint64_t ullPortTaskHasFPUContext = pdFALSE; PRIVILEGED_DATA uint64_t ullPortTaskHasFPUContext = pdFALSE;
/* Set to 1 to pend a context switch from an ISR. */ /* Set to 1 to pend a context switch from an ISR. */
PRIVILEGED_DATA uint64_t ullPortYieldRequired = pdFALSE; PRIVILEGED_DATA uint64_t ullPortYieldRequired = pdFALSE;
/* Counts the interrupt nesting depth. A context switch is only performed if /* Counts the interrupt nesting depth. A context switch is only performed if
* if the nesting depth is 0. */ * if the nesting depth is 0. */
PRIVILEGED_DATA uint64_t ullPortInterruptNesting = 0; PRIVILEGED_DATA uint64_t ullPortInterruptNesting = 0;
#else /* #if ( configNUMBER_OF_CORES == 1 ) */ #else /* #if ( configNUMBER_OF_CORES == 1 ) */
PRIVILEGED_DATA volatile uint64_t ullCriticalNestings[ configNUMBER_OF_CORES ] = { 0 }; PRIVILEGED_DATA volatile uint64_t ullCriticalNestings[ configNUMBER_OF_CORES ] = { 0 };
/* Flags to check if the secondary cores are ready. */ /* Flags to check if the secondary cores are ready. */
PRIVILEGED_DATA volatile uint8_t ucSecondaryCoresReadyFlags[ configNUMBER_OF_CORES - 1 ] = { 0 }; PRIVILEGED_DATA volatile uint8_t ucSecondaryCoresReadyFlags[ configNUMBER_OF_CORES - 1 ] = { 0 };
/* Flag to signal that the primary core has done all the shared initialisations. */
PRIVILEGED_DATA volatile uint8_t ucPrimaryCoreInitDoneFlag = 0; PRIVILEGED_DATA volatile uint8_t ucPrimaryCoreInitDoneFlag = 0;
/* Saved as part of the task context. If ullPortTaskHasFPUContext is non-zero /* Saved as part of the task context. If ullPortTaskHasFPUContext is non-zero
* then floating point context must be saved and restored for the task. */ * then floating point context must be saved and restored for the task. */
PRIVILEGED_DATA uint64_t ullPortTaskHasFPUContext[ configNUMBER_OF_CORES ] = { pdFALSE }; PRIVILEGED_DATA uint64_t ullPortTaskHasFPUContext[ configNUMBER_OF_CORES ] = { pdFALSE };
/* Set to 1 to pend a context switch from an ISR. */
PRIVILEGED_DATA uint64_t ullPortYieldRequired[ configNUMBER_OF_CORES ] = { pdFALSE }; PRIVILEGED_DATA uint64_t ullPortYieldRequired[ configNUMBER_OF_CORES ] = { pdFALSE };
/* Counts the interrupt nesting depth. A context switch is only performed if
* if the nesting depth is 0. */
PRIVILEGED_DATA uint64_t ullPortInterruptNestings[ configNUMBER_OF_CORES ] = { 0 }; PRIVILEGED_DATA uint64_t ullPortInterruptNestings[ configNUMBER_OF_CORES ] = { 0 };
#endif /* #if ( configNUMBER_OF_CORES == 1 ) */ #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
@ -1523,9 +1532,9 @@ UBaseType_t uxPortSetInterruptMaskFromISR( void )
#if ( configNUMBER_OF_CORES > 1 ) #if ( configNUMBER_OF_CORES > 1 )
/* Which core owns the lock? Keep in privileged, shareable RAM. */ /* Which core owns the lock? Keep in privileged, shareable RAM. */
PRIVILEGED_DATA volatile uint64_t ucOwnedByCore[ portMAX_CORE_COUNT ]; PRIVILEGED_DATA volatile uint64_t ullOwnedByCore[ portMAX_CORE_COUNT ];
/* Lock count a core owns. */ /* Lock count a core owns. */
PRIVILEGED_DATA volatile uint64_t ucRecursionCountByLock[ eLockCount ]; PRIVILEGED_DATA volatile uint64_t ullRecursionCountByLock[ eLockCount ];
/* Index 0 is used for ISR lock and Index 1 is used for task lock. */ /* Index 0 is used for ISR lock and Index 1 is used for task lock. */
PRIVILEGED_DATA uint32_t ulGateWord[ eLockCount ]; PRIVILEGED_DATA uint32_t ulGateWord[ eLockCount ];
@ -1549,13 +1558,14 @@ UBaseType_t uxPortSetInterruptMaskFromISR( void )
static inline void prvSpinUnlock( uint32_t * ulLock ) static inline void prvSpinUnlock( uint32_t * ulLock )
{ {
/* Conservative unlock: preserve original barriers for broad HW/FVP. */
__asm volatile ( __asm volatile (
"dmb sy\n" "dmb sy \n"
"mov w1, #0\n" "mov w1, #0 \n"
"str w1, [%x0]\n" "str w1, [%x0] \n"
"sev\n" "sev \n"
"dsb sy\n" "dsb sy \n"
"isb sy\n" "isb sy \n"
: :
: "r" ( ulLock ) : "r" ( ulLock )
: "memory", "w1" : "memory", "w1"
@ -1566,22 +1576,30 @@ UBaseType_t uxPortSetInterruptMaskFromISR( void )
static inline uint32_t prvSpinTrylock( uint32_t * ulLock ) static inline uint32_t prvSpinTrylock( uint32_t * ulLock )
{ {
/*
* Conservative LDXR/STXR trylock:
* - Return 1 immediately if busy, clearing exclusive state (CLREX).
* - Retry STXR only on spurious failure when observed free.
* - DMB on success to preserve expected acquire semantics.
*/
register uint32_t ulRet; register uint32_t ulRet;
/* Try to acquire spinlock; caller is responsible for further barriers. */
__asm volatile ( __asm volatile (
"1:\n" "1: \n"
"ldxr w1, [%x1]\n" "ldxr w1, [%x1] \n"
"cmp w1, #1\n" "cbnz w1, 2f \n" /* Busy -> return 1 */
"beq 2f\n" "mov w2, #1 \n"
"mov w2, #1\n" "stxr w3, w2, [%x1] \n" /* w3 = status */
"stxr w1, w2, [%x1]\n" "cbnz w3, 1b \n" /* Retry on STXR failure */
"cmp w1, #0\n" "dmb sy \n" /* Acquire barrier on success */
"bne 1b\n" "mov %w0, #0 \n" /* Success */
"2:\n" "b 3f \n"
"mov %w0, w1\n" "2: \n"
"clrex \n" /* Clear monitor when busy */
"mov %w0, #1 \n" /* Busy */
"3: \n"
: "=r" ( ulRet ) : "=r" ( ulRet )
: "r" ( ulLock ) : "r" ( ulLock )
: "memory", "w1", "w2" : "memory", "w1", "w2", "w3"
); );
return ulRet; return ulRet;
@ -1629,10 +1647,10 @@ UBaseType_t uxPortSetInterruptMaskFromISR( void )
if( prvSpinTrylock( &ulGateWord[ eLockNum ] ) != 0 ) if( prvSpinTrylock( &ulGateWord[ eLockNum ] ) != 0 )
{ {
/* Check if the core owns the spinlock. */ /* Check if the core owns the spinlock. */
if( prvGet64( &ucOwnedByCore[ xCoreID ] ) & ulLockBit ) if( prvGet64( &ullOwnedByCore[ xCoreID ] ) & ulLockBit )
{ {
configASSERT( prvGet64( &ucRecursionCountByLock[ eLockNum ] ) != 255u ); configASSERT( prvGet64( &ullRecursionCountByLock[ eLockNum ] ) != 255u );
prvSet64( &ucRecursionCountByLock[ eLockNum ], ( prvGet64( &ucRecursionCountByLock[ eLockNum ] ) + 1 ) ); prvSet64( &ullRecursionCountByLock[ eLockNum ], ( prvGet64( &ullRecursionCountByLock[ eLockNum ] ) + 1 ) );
return; return;
} }
@ -1656,26 +1674,26 @@ UBaseType_t uxPortSetInterruptMaskFromISR( void )
__asm__ __volatile__ ( "dmb sy" ::: "memory" ); __asm__ __volatile__ ( "dmb sy" ::: "memory" );
/* Assert the lock count is 0 when the spinlock is free and is acquired. */ /* Assert the lock count is 0 when the spinlock is free and is acquired. */
configASSERT( prvGet64( &ucRecursionCountByLock[ eLockNum ] ) == 0 ); configASSERT( prvGet64( &ullRecursionCountByLock[ eLockNum ] ) == 0 );
/* Set lock count as 1. */ /* Set lock count as 1. */
prvSet64( &ucRecursionCountByLock[ eLockNum ], 1 ); prvSet64( &ullRecursionCountByLock[ eLockNum ], 1 );
/* Set ucOwnedByCore. */ /* Set ullOwnedByCore. */
prvSet64( &ucOwnedByCore[ xCoreID ], ( prvGet64( &ucOwnedByCore[ xCoreID ] ) | ulLockBit ) ); prvSet64( &ullOwnedByCore[ xCoreID ], ( prvGet64( &ullOwnedByCore[ xCoreID ] ) | ulLockBit ) );
} }
/* Lock release. */ /* Lock release. */
else else
{ {
/* Assert the lock is not free already. */ /* Assert the lock is not free already. */
configASSERT( ( prvGet64( &ucOwnedByCore[ xCoreID ] ) & ulLockBit ) != 0 ); configASSERT( ( prvGet64( &ullOwnedByCore[ xCoreID ] ) & ulLockBit ) != 0 );
configASSERT( prvGet64( &ucRecursionCountByLock[ eLockNum ] ) != 0 ); configASSERT( prvGet64( &ullRecursionCountByLock[ eLockNum ] ) != 0 );
/* Reduce ucRecursionCountByLock by 1. */ /* Reduce ullRecursionCountByLock by 1. */
prvSet64( &ucRecursionCountByLock[ eLockNum ], ( prvGet64( &ucRecursionCountByLock[ eLockNum ] ) - 1 ) ); prvSet64( &ullRecursionCountByLock[ eLockNum ], ( prvGet64( &ullRecursionCountByLock[ eLockNum ] ) - 1 ) );
if( !prvGet64( &ucRecursionCountByLock[ eLockNum ] ) ) if( !prvGet64( &ullRecursionCountByLock[ eLockNum ] ) )
{ {
prvSet64( &ucOwnedByCore[ xCoreID ], ( prvGet64( &ucOwnedByCore[ xCoreID ] ) & ~ulLockBit ) ); prvSet64( &ullOwnedByCore[ xCoreID ], ( prvGet64( &ullOwnedByCore[ xCoreID ] ) & ~ulLockBit ) );
prvSpinUnlock( &ulGateWord[ eLockNum ] ); prvSpinUnlock( &ulGateWord[ eLockNum ] );
/* Add barrier to ensure lock status is reflected before we proceed. */ /* Add barrier to ensure lock status is reflected before we proceed. */
__asm__ __volatile__ ( "dmb sy" ::: "memory" ); __asm__ __volatile__ ( "dmb sy" ::: "memory" );

View file

@ -369,8 +369,10 @@ LDP Q0, Q1, [ SP ], # 0x20
#endif /* #if ( configENABLE_MPU == 1 ) */ #endif /* #if ( configENABLE_MPU == 1 ) */
MSR SPSEL, # 0 MSR SPSEL, # 0
/* Save the entire context. */ /* Save the entire context. */
saveallgpregisters saveallgpregisters
/* Save the SPSR and ELR values. */ /* Save the SPSR and ELR values. */
MRS X3, SPSR_EL1 MRS X3, SPSR_EL1
MRS X2, ELR_EL1 MRS X2, ELR_EL1
@ -379,24 +381,25 @@ STP X2, X3, [ SP, # - 0x10 ] !
/* Save the critical section nesting depth. */ /* Save the critical section nesting depth. */
#if ( configNUMBER_OF_CORES == 1 ) #if ( configNUMBER_OF_CORES == 1 )
adrp X0, ullCriticalNesting adrp X0, ullCriticalNesting
add X0, X0, :lo12:ullCriticalNesting /* X0 = &ullCriticalNesting */ add X0, X0, :lo12:ullCriticalNesting /* X0 = &ullCriticalNesting */
#else #else
adrp X0, ullCriticalNestings adrp X0, ullCriticalNestings
add X0, X0, :lo12:ullCriticalNestings /* X0 = &ullCriticalNestings */ add X0, X0, :lo12:ullCriticalNestings /* X0 = &ullCriticalNestings */
/* Calculate per-core index using MPIDR_EL1 for SMP support. */ /* Calculate per-core index using MPIDR_EL1 for SMP support. */
MRS X1, MPIDR_EL1 /* Read the Multiprocessor Affinity Register. */ MRS X1, MPIDR_EL1 /* Read the Multiprocessor Affinity Register. */
AND X1, X1, # 0xff /* Extract Aff0 (core ID). */ AND X1, X1, # 0xff /* Extract Aff0 (core ID). */
LSL X1, X1, # 3 /* Multiply core ID by pointer size (8 bytes). */ LSL X1, X1, # 3 /* Multiply core ID by pointer size (8 bytes). */
ADD X0, X0, X1 /* Add offset to base address. */ ADD X0, X0, X1 /* Add offset to base address. */
#endif #endif
LDR X3, [ X0 ] LDR X3, [ X0 ]
/* Save the FPU context indicator. */ /* Save the FPU context indicator. */
adrp X0, ullPortTaskHasFPUContext adrp X0, ullPortTaskHasFPUContext
add X0, X0, :lo12:ullPortTaskHasFPUContext /* X0 = &ullPortTaskHasFPUContext */ add X0, X0, :lo12:ullPortTaskHasFPUContext /* X0 = &ullPortTaskHasFPUContext */
#if configNUMBER_OF_CORES > 1 #if ( configNUMBER_OF_CORES > 1 )
ADD X0, X0, X1 /* Add to the base of the FPU array. */ ADD X0, X0, X1 /* Add to the base of the FPU array. */
#endif #endif
LDR X2, [ X0 ] LDR X2, [ X0 ]
@ -926,8 +929,8 @@ LDP X0, X1, [SP], #0x10
portSAVE_CONTEXT portSAVE_CONTEXT
savefuncontextgpregs savefuncontextgpregs
#if ( configNUMBER_OF_CORES > 1 ) #if ( configNUMBER_OF_CORES > 1 )
MRS x0, mpidr_el1 MRS x0, mpidr_el1
AND x0, x0, 255 AND x0, x0, 255
#endif #endif
BL vTaskSwitchContext BL vTaskSwitchContext
restorefuncontextgpregs restorefuncontextgpregs
@ -1039,12 +1042,12 @@ ISB SY
/* Restore the interrupt ID value. */ /* Restore the interrupt ID value. */
LDP X0, X1, [ SP ], # 0x10 LDP X0, X1, [ SP ], # 0x10
/* End IRQ processing by writing interrupt ID value to the EOI register. */ /* End IRQ processing by writing interrupt ID value to the EOI register. */
MSR ICC_EOIR1_EL1, X0 MSR ICC_EOIR1_EL1, X0
/* Restore the critical nesting count. */ /* Restore the critical nesting count. */
LDP X1, X5, [ SP ], # 0x10 LDP X1, X5, [ SP ], # 0x10
STR X1, [ X5 ] STR X1, [ X5 ]
/* Has interrupt nesting unwound? */ /* Has interrupt nesting unwound? */
@ -1116,7 +1119,7 @@ ISB SY
restorefloatregisters restorefloatregisters
restoreallgpregisters restoreallgpregisters
ERET ERET
/****************************************************************************** /******************************************************************************
* If the application provides an implementation of vApplicationIRQHandler(), * If the application provides an implementation of vApplicationIRQHandler(),

View file

@ -142,12 +142,11 @@
extern void vInterruptCore( uint32_t ulInterruptID, uint32_t ulCoreID ); extern void vInterruptCore( uint32_t ulInterruptID, uint32_t ulCoreID );
#endif /* if !defined(__ASSEMBLER__) */ #endif /* if !defined(__ASSEMBLER__) */
/* Use SVC so this is safe from EL0. EL1 sites in the port use direct MSR. */\ /* Use SVC so this is safe from EL0. EL1 sites in the port use direct MSR. */
#define portDISABLE_INTERRUPTS() __asm volatile ( "SVC %0" : : "i" ( portSVC_DISABLE_INTERRUPTS ) : "memory" ) #define portDISABLE_INTERRUPTS() __asm volatile ( "SVC %0" : : "i" ( portSVC_DISABLE_INTERRUPTS ) : "memory" )
#define portENABLE_INTERRUPTS() __asm volatile ( "SVC %0" : : "i" ( portSVC_ENABLE_INTERRUPTS ) : "memory" ) #define portENABLE_INTERRUPTS() __asm volatile ( "SVC %0" : : "i" ( portSVC_ENABLE_INTERRUPTS ) : "memory" )
/* In all GICs 255 can be written to the priority mask register to unmask all /* In all GICs 255 can be written to the priority mask register to unmask all
* (but the lowest) interrupt priority. */ * (but the lowest) interrupt priority. */
#define portUNMASK_VALUE ( 0xFFUL ) #define portUNMASK_VALUE ( 0xFFUL )
@ -200,6 +199,7 @@
* nothing to prevent it from being called accidentally. */ * nothing to prevent it from being called accidentally. */
#define vPortTaskUsesFPU() #define vPortTaskUsesFPU()
#endif #endif
#define portTASK_USES_FLOATING_POINT() vPortTaskUsesFPU() #define portTASK_USES_FLOATING_POINT() vPortTaskUsesFPU()
#define portLOWEST_INTERRUPT_PRIORITY ( ( ( uint32_t ) configUNIQUE_INTERRUPT_PRIORITIES ) - 1UL ) #define portLOWEST_INTERRUPT_PRIORITY ( ( ( uint32_t ) configUNIQUE_INTERRUPT_PRIORITIES ) - 1UL )
@ -212,12 +212,10 @@
#if configUSE_PORT_OPTIMISED_TASK_SELECTION == 1 #if configUSE_PORT_OPTIMISED_TASK_SELECTION == 1
/* Store/clear the ready priorities in a bit map. */ /* Store/clear the ready priorities in a bit map. */
#define portRECORD_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) |= ( 1UL << ( uxPriority ) ) #define portRECORD_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) |= ( 1UL << ( uxPriority ) )
#define portRESET_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) &= ~( 1UL << ( uxPriority ) ) #define portRESET_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) &= ~( 1UL << ( uxPriority ) )
/*-----------------------------------------------------------*/
#define portGET_HIGHEST_PRIORITY( uxTopPriority, uxReadyPriorities ) uxTopPriority = ( 31 - __builtin_clz( uxReadyPriorities ) ) #define portGET_HIGHEST_PRIORITY( uxTopPriority, uxReadyPriorities ) uxTopPriority = ( 31 - __builtin_clz( uxReadyPriorities ) )
#endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */ #endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
@ -280,7 +278,7 @@
#define portGET_CORE_ID() ucPortGetCoreID() #define portGET_CORE_ID() ucPortGetCoreID()
#define portGET_CORE_ID_FROM_ISR() ucPortGetCoreIDFromIsr() #define portGET_CORE_ID_FROM_ISR() ucPortGetCoreIDFromIsr()
/* Use SGI 0 as the yield core interrupt. */ /* Use SGI 0 as the yield core interrupt. */
#define portYIELD_CORE( xCoreID ) vInterruptCore( portYIELD_CORE_INT_ID, ( uint32_t ) xCoreID ) #define portYIELD_CORE( xCoreID ) vInterruptCore( portYIELD_CORE_INT_ID, ( uint32_t ) xCoreID )
#define portRELEASE_ISR_LOCK( xCoreID ) vPortRecursiveLock( ( xCoreID ), eIsrLock, pdFALSE ) #define portRELEASE_ISR_LOCK( xCoreID ) vPortRecursiveLock( ( xCoreID ), eIsrLock, pdFALSE )