mirror of
https://github.com/FreeRTOS/FreeRTOS-Kernel.git
synced 2026-01-21 09:10:37 -05:00
cortex-r82: Minor code improvements
This commit includes minor code improvements to enhance readability and maintainability of the Cortex-R82 port files. Changes include refactoring variable names, optimizing comments, and improving code structure without altering functionality. Signed-off-by: Ahmed Ismail <Ahmed.Ismail@arm.com>
This commit is contained in:
parent
3cca1ec00e
commit
8e8d4152e3
3 changed files with 124 additions and 105 deletions
|
|
@ -229,7 +229,6 @@
|
|||
* assembly code so is implemented in portASM.s.
|
||||
*/
|
||||
extern void vPortRestoreTaskContext( void );
|
||||
|
||||
extern void vGIC_EnableIRQ( uint32_t ulInterruptID );
|
||||
extern void vGIC_SetPriority( uint32_t ulInterruptID, uint32_t ulPriority );
|
||||
extern void vGIC_PowerUpRedistributor( void );
|
||||
|
|
@ -238,28 +237,38 @@ extern void vGIC_EnableCPUInterface( void );
|
|||
/*-----------------------------------------------------------*/
|
||||
|
||||
#if ( configNUMBER_OF_CORES == 1 )
|
||||
|
||||
PRIVILEGED_DATA volatile uint64_t ullCriticalNesting = 0ULL;
|
||||
|
||||
/* Saved as part of the task context. If ullPortTaskHasFPUContext is non-zero
|
||||
* then floating point context must be saved and restored for the task. */
|
||||
/* Saved as part of the task context. If ullPortTaskHasFPUContext is non-zero
|
||||
* then floating point context must be saved and restored for the task. */
|
||||
PRIVILEGED_DATA uint64_t ullPortTaskHasFPUContext = pdFALSE;
|
||||
|
||||
/* Set to 1 to pend a context switch from an ISR. */
|
||||
/* Set to 1 to pend a context switch from an ISR. */
|
||||
PRIVILEGED_DATA uint64_t ullPortYieldRequired = pdFALSE;
|
||||
|
||||
/* Counts the interrupt nesting depth. A context switch is only performed if
|
||||
* if the nesting depth is 0. */
|
||||
/* Counts the interrupt nesting depth. A context switch is only performed if
|
||||
* if the nesting depth is 0. */
|
||||
PRIVILEGED_DATA uint64_t ullPortInterruptNesting = 0;
|
||||
|
||||
#else /* #if ( configNUMBER_OF_CORES == 1 ) */
|
||||
PRIVILEGED_DATA volatile uint64_t ullCriticalNestings[ configNUMBER_OF_CORES ] = { 0 };
|
||||
|
||||
/* Flags to check if the secondary cores are ready. */
|
||||
PRIVILEGED_DATA volatile uint8_t ucSecondaryCoresReadyFlags[ configNUMBER_OF_CORES - 1 ] = { 0 };
|
||||
|
||||
/* Flag to signal that the primary core has done all the shared initialisations. */
|
||||
PRIVILEGED_DATA volatile uint8_t ucPrimaryCoreInitDoneFlag = 0;
|
||||
/* Saved as part of the task context. If ullPortTaskHasFPUContext is non-zero
|
||||
|
||||
/* Saved as part of the task context. If ullPortTaskHasFPUContext is non-zero
|
||||
* then floating point context must be saved and restored for the task. */
|
||||
PRIVILEGED_DATA uint64_t ullPortTaskHasFPUContext[ configNUMBER_OF_CORES ] = { pdFALSE };
|
||||
|
||||
/* Set to 1 to pend a context switch from an ISR. */
|
||||
PRIVILEGED_DATA uint64_t ullPortYieldRequired[ configNUMBER_OF_CORES ] = { pdFALSE };
|
||||
|
||||
/* Counts the interrupt nesting depth. A context switch is only performed if
|
||||
* if the nesting depth is 0. */
|
||||
PRIVILEGED_DATA uint64_t ullPortInterruptNestings[ configNUMBER_OF_CORES ] = { 0 };
|
||||
|
||||
#endif /* #if ( configNUMBER_OF_CORES == 1 ) */
|
||||
|
|
@ -1157,12 +1166,12 @@ BaseType_t xPortStartScheduler( void )
|
|||
volatile uint8_t ucMaxPriorityValue;
|
||||
|
||||
/* Determine how many priority bits are implemented in the GIC.
|
||||
*
|
||||
* Save the interrupt priority value that is about to be clobbered. */
|
||||
*
|
||||
* Save the interrupt priority value that is about to be clobbered. */
|
||||
ucOriginalPriority = *pucFirstUserPriorityRegister;
|
||||
|
||||
/* Determine the number of priority bits available. First write to
|
||||
* all possible bits. */
|
||||
* all possible bits. */
|
||||
*pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE;
|
||||
|
||||
/* Read the value back to see how many bits stuck. */
|
||||
|
|
@ -1175,12 +1184,12 @@ BaseType_t xPortStartScheduler( void )
|
|||
}
|
||||
|
||||
/* Sanity check configUNIQUE_INTERRUPT_PRIORITIES matches the read
|
||||
* value. */
|
||||
* value. */
|
||||
configASSERT( ucMaxPriorityValue >= portLOWEST_INTERRUPT_PRIORITY );
|
||||
|
||||
|
||||
/* Restore the clobbered interrupt priority register to its original
|
||||
* value. */
|
||||
* value. */
|
||||
*pucFirstUserPriorityRegister = ucOriginalPriority;
|
||||
}
|
||||
#endif /* configASSERT_DEFINED */
|
||||
|
|
@ -1523,9 +1532,9 @@ UBaseType_t uxPortSetInterruptMaskFromISR( void )
|
|||
#if ( configNUMBER_OF_CORES > 1 )
|
||||
|
||||
/* Which core owns the lock? Keep in privileged, shareable RAM. */
|
||||
PRIVILEGED_DATA volatile uint64_t ucOwnedByCore[ portMAX_CORE_COUNT ];
|
||||
PRIVILEGED_DATA volatile uint64_t ullOwnedByCore[ portMAX_CORE_COUNT ];
|
||||
/* Lock count a core owns. */
|
||||
PRIVILEGED_DATA volatile uint64_t ucRecursionCountByLock[ eLockCount ];
|
||||
PRIVILEGED_DATA volatile uint64_t ullRecursionCountByLock[ eLockCount ];
|
||||
/* Index 0 is used for ISR lock and Index 1 is used for task lock. */
|
||||
PRIVILEGED_DATA uint32_t ulGateWord[ eLockCount ];
|
||||
|
||||
|
|
@ -1549,13 +1558,14 @@ UBaseType_t uxPortSetInterruptMaskFromISR( void )
|
|||
|
||||
static inline void prvSpinUnlock( uint32_t * ulLock )
|
||||
{
|
||||
/* Conservative unlock: preserve original barriers for broad HW/FVP. */
|
||||
__asm volatile (
|
||||
"dmb sy\n"
|
||||
"mov w1, #0\n"
|
||||
"str w1, [%x0]\n"
|
||||
"sev\n"
|
||||
"dsb sy\n"
|
||||
"isb sy\n"
|
||||
"dmb sy \n"
|
||||
"mov w1, #0 \n"
|
||||
"str w1, [%x0] \n"
|
||||
"sev \n"
|
||||
"dsb sy \n"
|
||||
"isb sy \n"
|
||||
:
|
||||
: "r" ( ulLock )
|
||||
: "memory", "w1"
|
||||
|
|
@ -1566,22 +1576,30 @@ UBaseType_t uxPortSetInterruptMaskFromISR( void )
|
|||
|
||||
static inline uint32_t prvSpinTrylock( uint32_t * ulLock )
|
||||
{
|
||||
/*
|
||||
* Conservative LDXR/STXR trylock:
|
||||
* - Return 1 immediately if busy, clearing exclusive state (CLREX).
|
||||
* - Retry STXR only on spurious failure when observed free.
|
||||
* - DMB on success to preserve expected acquire semantics.
|
||||
*/
|
||||
register uint32_t ulRet;
|
||||
/* Try to acquire spinlock; caller is responsible for further barriers. */
|
||||
__asm volatile (
|
||||
"1:\n"
|
||||
"ldxr w1, [%x1]\n"
|
||||
"cmp w1, #1\n"
|
||||
"beq 2f\n"
|
||||
"mov w2, #1\n"
|
||||
"stxr w1, w2, [%x1]\n"
|
||||
"cmp w1, #0\n"
|
||||
"bne 1b\n"
|
||||
"2:\n"
|
||||
"mov %w0, w1\n"
|
||||
"1: \n"
|
||||
"ldxr w1, [%x1] \n"
|
||||
"cbnz w1, 2f \n" /* Busy -> return 1 */
|
||||
"mov w2, #1 \n"
|
||||
"stxr w3, w2, [%x1] \n" /* w3 = status */
|
||||
"cbnz w3, 1b \n" /* Retry on STXR failure */
|
||||
"dmb sy \n" /* Acquire barrier on success */
|
||||
"mov %w0, #0 \n" /* Success */
|
||||
"b 3f \n"
|
||||
"2: \n"
|
||||
"clrex \n" /* Clear monitor when busy */
|
||||
"mov %w0, #1 \n" /* Busy */
|
||||
"3: \n"
|
||||
: "=r" ( ulRet )
|
||||
: "r" ( ulLock )
|
||||
: "memory", "w1", "w2"
|
||||
: "memory", "w1", "w2", "w3"
|
||||
);
|
||||
|
||||
return ulRet;
|
||||
|
|
@ -1629,10 +1647,10 @@ UBaseType_t uxPortSetInterruptMaskFromISR( void )
|
|||
if( prvSpinTrylock( &ulGateWord[ eLockNum ] ) != 0 )
|
||||
{
|
||||
/* Check if the core owns the spinlock. */
|
||||
if( prvGet64( &ucOwnedByCore[ xCoreID ] ) & ulLockBit )
|
||||
if( prvGet64( &ullOwnedByCore[ xCoreID ] ) & ulLockBit )
|
||||
{
|
||||
configASSERT( prvGet64( &ucRecursionCountByLock[ eLockNum ] ) != 255u );
|
||||
prvSet64( &ucRecursionCountByLock[ eLockNum ], ( prvGet64( &ucRecursionCountByLock[ eLockNum ] ) + 1 ) );
|
||||
configASSERT( prvGet64( &ullRecursionCountByLock[ eLockNum ] ) != 255u );
|
||||
prvSet64( &ullRecursionCountByLock[ eLockNum ], ( prvGet64( &ullRecursionCountByLock[ eLockNum ] ) + 1 ) );
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -1656,26 +1674,26 @@ UBaseType_t uxPortSetInterruptMaskFromISR( void )
|
|||
__asm__ __volatile__ ( "dmb sy" ::: "memory" );
|
||||
|
||||
/* Assert the lock count is 0 when the spinlock is free and is acquired. */
|
||||
configASSERT( prvGet64( &ucRecursionCountByLock[ eLockNum ] ) == 0 );
|
||||
configASSERT( prvGet64( &ullRecursionCountByLock[ eLockNum ] ) == 0 );
|
||||
|
||||
/* Set lock count as 1. */
|
||||
prvSet64( &ucRecursionCountByLock[ eLockNum ], 1 );
|
||||
/* Set ucOwnedByCore. */
|
||||
prvSet64( &ucOwnedByCore[ xCoreID ], ( prvGet64( &ucOwnedByCore[ xCoreID ] ) | ulLockBit ) );
|
||||
prvSet64( &ullRecursionCountByLock[ eLockNum ], 1 );
|
||||
/* Set ullOwnedByCore. */
|
||||
prvSet64( &ullOwnedByCore[ xCoreID ], ( prvGet64( &ullOwnedByCore[ xCoreID ] ) | ulLockBit ) );
|
||||
}
|
||||
/* Lock release. */
|
||||
else
|
||||
{
|
||||
/* Assert the lock is not free already. */
|
||||
configASSERT( ( prvGet64( &ucOwnedByCore[ xCoreID ] ) & ulLockBit ) != 0 );
|
||||
configASSERT( prvGet64( &ucRecursionCountByLock[ eLockNum ] ) != 0 );
|
||||
configASSERT( ( prvGet64( &ullOwnedByCore[ xCoreID ] ) & ulLockBit ) != 0 );
|
||||
configASSERT( prvGet64( &ullRecursionCountByLock[ eLockNum ] ) != 0 );
|
||||
|
||||
/* Reduce ucRecursionCountByLock by 1. */
|
||||
prvSet64( &ucRecursionCountByLock[ eLockNum ], ( prvGet64( &ucRecursionCountByLock[ eLockNum ] ) - 1 ) );
|
||||
/* Reduce ullRecursionCountByLock by 1. */
|
||||
prvSet64( &ullRecursionCountByLock[ eLockNum ], ( prvGet64( &ullRecursionCountByLock[ eLockNum ] ) - 1 ) );
|
||||
|
||||
if( !prvGet64( &ucRecursionCountByLock[ eLockNum ] ) )
|
||||
if( !prvGet64( &ullRecursionCountByLock[ eLockNum ] ) )
|
||||
{
|
||||
prvSet64( &ucOwnedByCore[ xCoreID ], ( prvGet64( &ucOwnedByCore[ xCoreID ] ) & ~ulLockBit ) );
|
||||
prvSet64( &ullOwnedByCore[ xCoreID ], ( prvGet64( &ullOwnedByCore[ xCoreID ] ) & ~ulLockBit ) );
|
||||
prvSpinUnlock( &ulGateWord[ eLockNum ] );
|
||||
/* Add barrier to ensure lock status is reflected before we proceed. */
|
||||
__asm__ __volatile__ ( "dmb sy" ::: "memory" );
|
||||
|
|
|
|||
|
|
@ -52,13 +52,13 @@
|
|||
|
||||
/* Variables and functions. */
|
||||
#if ( configNUMBER_OF_CORES == 1 )
|
||||
.extern pxCurrentTCB
|
||||
.extern ullCriticalNesting
|
||||
.extern ullPortInterruptNesting
|
||||
.extern pxCurrentTCB
|
||||
.extern ullCriticalNesting
|
||||
.extern ullPortInterruptNesting
|
||||
#else /* #if ( configNUMBER_OF_CORES == 1 ) */
|
||||
.extern pxCurrentTCBs
|
||||
.extern ullCriticalNestings
|
||||
.extern ullPortInterruptNestings
|
||||
.extern pxCurrentTCBs
|
||||
.extern ullCriticalNestings
|
||||
.extern ullPortInterruptNestings
|
||||
#endif
|
||||
.extern vTaskSwitchContext
|
||||
.extern vApplicationIRQHandler
|
||||
|
|
@ -308,16 +308,16 @@ LDP Q0, Q1, [ SP ], # 0x20
|
|||
|
||||
/* Store user allocated task stack and use ullContext as the SP */
|
||||
#if ( configNUMBER_OF_CORES == 1 )
|
||||
adrp X0, pxCurrentTCB
|
||||
add X0, X0, :lo12:pxCurrentTCB /* X0 = &pxCurrentTCB */
|
||||
adrp X0, pxCurrentTCB
|
||||
add X0, X0, :lo12:pxCurrentTCB /* X0 = &pxCurrentTCB */
|
||||
#else
|
||||
adrp X0, pxCurrentTCBs
|
||||
add X0, X0, :lo12:pxCurrentTCBs /* X0 = &pxCurrentTCBs */
|
||||
/* Get the core ID to index the TCB correctly. */
|
||||
MRS X2, MPIDR_EL1 /* Read the Multiprocessor Affinity Register */
|
||||
AND X2, X2, # 0xff /* Extract Aff0 which contains the core ID */
|
||||
LSL X2, X2, # 3 /* Scale the core ID to the size of a pointer (64-bit system) */
|
||||
ADD X0, X0, X2 /* Add the offset for the current core's TCB pointer */
|
||||
adrp X0, pxCurrentTCBs
|
||||
add X0, X0, :lo12:pxCurrentTCBs /* X0 = &pxCurrentTCBs */
|
||||
/* Get the core ID to index the TCB correctly. */
|
||||
MRS X2, MPIDR_EL1 /* Read the Multiprocessor Affinity Register */
|
||||
AND X2, X2, # 0xff /* Extract Aff0 which contains the core ID */
|
||||
LSL X2, X2, # 3 /* Scale the core ID to the size of a pointer (64-bit system) */
|
||||
ADD X0, X0, X2 /* Add the offset for the current core's TCB pointer */
|
||||
#endif
|
||||
LDR X1, [ X0 ]
|
||||
ADD X1, X1, #8 /* X1 = X1 + 8, X1 now points to ullTaskUnprivilegedSP in TCB. */
|
||||
|
|
@ -339,16 +339,16 @@ LDP Q0, Q1, [ SP ], # 0x20
|
|||
#endif
|
||||
CBNZ X0, 3f /* If task is privileged, skip saving MPU context. */
|
||||
#if ( configNUMBER_OF_CORES == 1 )
|
||||
adrp X0, pxCurrentTCB
|
||||
add X0, X0, :lo12:pxCurrentTCB /* X0 = &pxCurrentTCB */
|
||||
adrp X0, pxCurrentTCB
|
||||
add X0, X0, :lo12:pxCurrentTCB /* X0 = &pxCurrentTCB */
|
||||
#else
|
||||
adrp X0, pxCurrentTCBs
|
||||
add X0, X0, :lo12:pxCurrentTCBs /* X0 = &pxCurrentTCBs */
|
||||
/* Get the core ID to index the TCB correctly. */
|
||||
MRS X2, MPIDR_EL1 /* Read the Multiprocessor Affinity Register */
|
||||
AND X2, X2, # 0xff /* Extract Aff0 which contains the core ID */
|
||||
LSL X2, X2, # 3 /* Scale the core ID to the size of a pointer (64-bit system) */
|
||||
ADD X0, X0, X2 /* Add the offset for the current core's TCB pointer */
|
||||
adrp X0, pxCurrentTCBs
|
||||
add X0, X0, :lo12:pxCurrentTCBs /* X0 = &pxCurrentTCBs */
|
||||
/* Get the core ID to index the TCB correctly. */
|
||||
MRS X2, MPIDR_EL1 /* Read the Multiprocessor Affinity Register */
|
||||
AND X2, X2, # 0xff /* Extract Aff0 which contains the core ID */
|
||||
LSL X2, X2, # 3 /* Scale the core ID to the size of a pointer (64-bit system) */
|
||||
ADD X0, X0, X2 /* Add the offset for the current core's TCB pointer */
|
||||
#endif
|
||||
LDR X0, [ X0 ]
|
||||
|
||||
|
|
@ -369,8 +369,10 @@ LDP Q0, Q1, [ SP ], # 0x20
|
|||
#endif /* #if ( configENABLE_MPU == 1 ) */
|
||||
|
||||
MSR SPSEL, # 0
|
||||
|
||||
/* Save the entire context. */
|
||||
saveallgpregisters
|
||||
|
||||
/* Save the SPSR and ELR values. */
|
||||
MRS X3, SPSR_EL1
|
||||
MRS X2, ELR_EL1
|
||||
|
|
@ -379,24 +381,25 @@ STP X2, X3, [ SP, # - 0x10 ] !
|
|||
|
||||
/* Save the critical section nesting depth. */
|
||||
#if ( configNUMBER_OF_CORES == 1 )
|
||||
adrp X0, ullCriticalNesting
|
||||
add X0, X0, :lo12:ullCriticalNesting /* X0 = &ullCriticalNesting */
|
||||
adrp X0, ullCriticalNesting
|
||||
add X0, X0, :lo12:ullCriticalNesting /* X0 = &ullCriticalNesting */
|
||||
#else
|
||||
adrp X0, ullCriticalNestings
|
||||
add X0, X0, :lo12:ullCriticalNestings /* X0 = &ullCriticalNestings */
|
||||
/* Calculate per-core index using MPIDR_EL1 for SMP support. */
|
||||
MRS X1, MPIDR_EL1 /* Read the Multiprocessor Affinity Register. */
|
||||
AND X1, X1, # 0xff /* Extract Aff0 (core ID). */
|
||||
LSL X1, X1, # 3 /* Multiply core ID by pointer size (8 bytes). */
|
||||
ADD X0, X0, X1 /* Add offset to base address. */
|
||||
adrp X0, ullCriticalNestings
|
||||
add X0, X0, :lo12:ullCriticalNestings /* X0 = &ullCriticalNestings */
|
||||
/* Calculate per-core index using MPIDR_EL1 for SMP support. */
|
||||
MRS X1, MPIDR_EL1 /* Read the Multiprocessor Affinity Register. */
|
||||
AND X1, X1, # 0xff /* Extract Aff0 (core ID). */
|
||||
LSL X1, X1, # 3 /* Multiply core ID by pointer size (8 bytes). */
|
||||
ADD X0, X0, X1 /* Add offset to base address. */
|
||||
#endif
|
||||
|
||||
LDR X3, [ X0 ]
|
||||
|
||||
/* Save the FPU context indicator. */
|
||||
adrp X0, ullPortTaskHasFPUContext
|
||||
add X0, X0, :lo12:ullPortTaskHasFPUContext /* X0 = &ullPortTaskHasFPUContext */
|
||||
|
||||
#if configNUMBER_OF_CORES > 1
|
||||
#if ( configNUMBER_OF_CORES > 1 )
|
||||
ADD X0, X0, X1 /* Add to the base of the FPU array. */
|
||||
#endif
|
||||
LDR X2, [ X0 ]
|
||||
|
|
@ -547,16 +550,16 @@ MSR SPSEL, # 1
|
|||
STP X8, X9, [ SP, # - 0x10 ] !
|
||||
STR X10, [ SP, # - 0x10 ] !
|
||||
#if ( configNUMBER_OF_CORES == 1 )
|
||||
adrp X8, pxCurrentTCB
|
||||
add X8, X8, :lo12:pxCurrentTCB /* X8 = &pxCurrentTCB */
|
||||
adrp X8, pxCurrentTCB
|
||||
add X8, X8, :lo12:pxCurrentTCB /* X8 = &pxCurrentTCB */
|
||||
#else
|
||||
adrp X8, pxCurrentTCBs
|
||||
add X8, X8, :lo12:pxCurrentTCBs /* X8 = &pxCurrentTCBs */
|
||||
/* Get the core ID to index the TCB correctly. */
|
||||
MRS X10, MPIDR_EL1 /* Read the Multiprocessor Affinity Register */
|
||||
AND X10, X10, # 0xff /* Extract Aff0 which contains the core ID */
|
||||
LSL X10, X10, # 3 /* Scale the core ID to the size of a pointer (64-bit system) */
|
||||
ADD X8, X8, X10 /* Add the offset for the current core's TCB pointer */
|
||||
adrp X8, pxCurrentTCBs
|
||||
add X8, X8, :lo12:pxCurrentTCBs /* X8 = &pxCurrentTCBs */
|
||||
/* Get the core ID to index the TCB correctly. */
|
||||
MRS X10, MPIDR_EL1 /* Read the Multiprocessor Affinity Register */
|
||||
AND X10, X10, # 0xff /* Extract Aff0 which contains the core ID */
|
||||
LSL X10, X10, # 3 /* Scale the core ID to the size of a pointer (64-bit system) */
|
||||
ADD X8, X8, X10 /* Add the offset for the current core's TCB pointer */
|
||||
#endif
|
||||
LDR X9, [ X8 ]
|
||||
MRS X8, SP_EL0
|
||||
|
|
@ -926,8 +929,8 @@ LDP X0, X1, [SP], #0x10
|
|||
portSAVE_CONTEXT
|
||||
savefuncontextgpregs
|
||||
#if ( configNUMBER_OF_CORES > 1 )
|
||||
MRS x0, mpidr_el1
|
||||
AND x0, x0, 255
|
||||
MRS x0, mpidr_el1
|
||||
AND x0, x0, 255
|
||||
#endif
|
||||
BL vTaskSwitchContext
|
||||
restorefuncontextgpregs
|
||||
|
|
@ -1039,12 +1042,12 @@ ISB SY
|
|||
/* Restore the interrupt ID value. */
|
||||
LDP X0, X1, [ SP ], # 0x10
|
||||
|
||||
|
||||
/* End IRQ processing by writing interrupt ID value to the EOI register. */
|
||||
MSR ICC_EOIR1_EL1, X0
|
||||
|
||||
/* Restore the critical nesting count. */
|
||||
LDP X1, X5, [ SP ], # 0x10
|
||||
|
||||
STR X1, [ X5 ]
|
||||
|
||||
/* Has interrupt nesting unwound? */
|
||||
|
|
@ -1095,8 +1098,8 @@ restoreallgpregisters
|
|||
/* Save the context of the current task and select a new task to run. */
|
||||
portSAVE_CONTEXT
|
||||
#if configNUMBER_OF_CORES > 1
|
||||
MRS x0, mpidr_el1
|
||||
AND x0, x0, 255
|
||||
MRS x0, mpidr_el1
|
||||
AND x0, x0, 255
|
||||
#endif
|
||||
savefuncontextgpregs
|
||||
BL vTaskSwitchContext
|
||||
|
|
@ -1116,7 +1119,7 @@ ISB SY
|
|||
restorefloatregisters
|
||||
restoreallgpregisters
|
||||
|
||||
ERET
|
||||
ERET
|
||||
|
||||
/******************************************************************************
|
||||
* If the application provides an implementation of vApplicationIRQHandler(),
|
||||
|
|
|
|||
|
|
@ -142,19 +142,18 @@
|
|||
extern void vInterruptCore( uint32_t ulInterruptID, uint32_t ulCoreID );
|
||||
#endif /* if !defined(__ASSEMBLER__) */
|
||||
|
||||
/* Use SVC so this is safe from EL0. EL1 sites in the port use direct MSR. */\
|
||||
/* Use SVC so this is safe from EL0. EL1 sites in the port use direct MSR. */
|
||||
#define portDISABLE_INTERRUPTS() __asm volatile ( "SVC %0" : : "i" ( portSVC_DISABLE_INTERRUPTS ) : "memory" )
|
||||
|
||||
#define portENABLE_INTERRUPTS() __asm volatile ( "SVC %0" : : "i" ( portSVC_ENABLE_INTERRUPTS ) : "memory" )
|
||||
|
||||
|
||||
/* In all GICs 255 can be written to the priority mask register to unmask all
|
||||
* (but the lowest) interrupt priority. */
|
||||
#define portUNMASK_VALUE ( 0xFFUL )
|
||||
|
||||
#if !defined(__ASSEMBLER__)
|
||||
/* These macros do not globally disable/enable interrupts. They do mask off
|
||||
* interrupts that have a priority below configMAX_API_CALL_INTERRUPT_PRIORITY. */
|
||||
* interrupts that have a priority below configMAX_API_CALL_INTERRUPT_PRIORITY. */
|
||||
#if ( configNUMBER_OF_CORES == 1 )
|
||||
extern void vPortEnterCritical( void );
|
||||
extern void vPortExitCritical( void );
|
||||
|
|
@ -180,7 +179,7 @@
|
|||
|
||||
#if !defined(__ASSEMBLER__)
|
||||
/* Prototype of the FreeRTOS tick handler. This must be installed as the
|
||||
* handler for whichever peripheral is used to generate the RTOS tick. */
|
||||
* handler for whichever peripheral is used to generate the RTOS tick. */
|
||||
void FreeRTOS_Tick_Handler( void );
|
||||
#endif /* if !defined(__ASSEMBLER__) */
|
||||
|
||||
|
|
@ -200,7 +199,8 @@
|
|||
* nothing to prevent it from being called accidentally. */
|
||||
#define vPortTaskUsesFPU()
|
||||
#endif
|
||||
#define portTASK_USES_FLOATING_POINT() vPortTaskUsesFPU()
|
||||
|
||||
#define portTASK_USES_FLOATING_POINT() vPortTaskUsesFPU()
|
||||
|
||||
#define portLOWEST_INTERRUPT_PRIORITY ( ( ( uint32_t ) configUNIQUE_INTERRUPT_PRIORITIES ) - 1UL )
|
||||
#define portLOWEST_USABLE_INTERRUPT_PRIORITY ( portLOWEST_INTERRUPT_PRIORITY - 1UL )
|
||||
|
|
@ -212,12 +212,10 @@
|
|||
|
||||
#if configUSE_PORT_OPTIMISED_TASK_SELECTION == 1
|
||||
|
||||
/* Store/clear the ready priorities in a bit map. */
|
||||
/* Store/clear the ready priorities in a bit map. */
|
||||
#define portRECORD_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) |= ( 1UL << ( uxPriority ) )
|
||||
#define portRESET_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) &= ~( 1UL << ( uxPriority ) )
|
||||
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
#define portGET_HIGHEST_PRIORITY( uxTopPriority, uxReadyPriorities ) uxTopPriority = ( 31 - __builtin_clz( uxReadyPriorities ) )
|
||||
|
||||
#endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
|
||||
|
|
@ -227,7 +225,7 @@
|
|||
#define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority()
|
||||
#endif /* configASSERT */
|
||||
|
||||
#define portNOP() __asm volatile ( "NOP" )
|
||||
#define portNOP() __asm volatile ( "NOP" )
|
||||
#define portINLINE __inline
|
||||
|
||||
/* The number of bits to shift for an interrupt priority is dependent on the
|
||||
|
|
@ -280,8 +278,8 @@
|
|||
#define portGET_CORE_ID() ucPortGetCoreID()
|
||||
#define portGET_CORE_ID_FROM_ISR() ucPortGetCoreIDFromIsr()
|
||||
|
||||
/* Use SGI 0 as the yield core interrupt. */
|
||||
#define portYIELD_CORE( xCoreID ) vInterruptCore( portYIELD_CORE_INT_ID, ( uint32_t ) xCoreID )
|
||||
/* Use SGI 0 as the yield core interrupt. */
|
||||
#define portYIELD_CORE( xCoreID ) vInterruptCore( portYIELD_CORE_INT_ID, ( uint32_t ) xCoreID )
|
||||
|
||||
#define portRELEASE_ISR_LOCK( xCoreID ) vPortRecursiveLock( ( xCoreID ), eIsrLock, pdFALSE )
|
||||
#define portGET_ISR_LOCK( xCoreID ) vPortRecursiveLock( ( xCoreID ), eIsrLock, pdTRUE )
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue