mirror of
https://github.com/FreeRTOS/FreeRTOS-Kernel.git
synced 2025-04-19 21:11:57 -04:00
Pass core ID to port lock macros (#1212)
Pass core ID to task/ISR lock functions
This commit is contained in:
parent
f63bc2b5cc
commit
f05244a8d5
|
@ -445,7 +445,7 @@
|
||||||
#ifndef portRELEASE_TASK_LOCK
|
#ifndef portRELEASE_TASK_LOCK
|
||||||
|
|
||||||
#if ( configNUMBER_OF_CORES == 1 )
|
#if ( configNUMBER_OF_CORES == 1 )
|
||||||
#define portRELEASE_TASK_LOCK()
|
#define portRELEASE_TASK_LOCK( xCoreID )
|
||||||
#else
|
#else
|
||||||
#error portRELEASE_TASK_LOCK is required in SMP
|
#error portRELEASE_TASK_LOCK is required in SMP
|
||||||
#endif
|
#endif
|
||||||
|
@ -455,7 +455,7 @@
|
||||||
#ifndef portGET_TASK_LOCK
|
#ifndef portGET_TASK_LOCK
|
||||||
|
|
||||||
#if ( configNUMBER_OF_CORES == 1 )
|
#if ( configNUMBER_OF_CORES == 1 )
|
||||||
#define portGET_TASK_LOCK()
|
#define portGET_TASK_LOCK( xCoreID )
|
||||||
#else
|
#else
|
||||||
#error portGET_TASK_LOCK is required in SMP
|
#error portGET_TASK_LOCK is required in SMP
|
||||||
#endif
|
#endif
|
||||||
|
@ -465,7 +465,7 @@
|
||||||
#ifndef portRELEASE_ISR_LOCK
|
#ifndef portRELEASE_ISR_LOCK
|
||||||
|
|
||||||
#if ( configNUMBER_OF_CORES == 1 )
|
#if ( configNUMBER_OF_CORES == 1 )
|
||||||
#define portRELEASE_ISR_LOCK()
|
#define portRELEASE_ISR_LOCK( xCoreID )
|
||||||
#else
|
#else
|
||||||
#error portRELEASE_ISR_LOCK is required in SMP
|
#error portRELEASE_ISR_LOCK is required in SMP
|
||||||
#endif
|
#endif
|
||||||
|
@ -475,7 +475,7 @@
|
||||||
#ifndef portGET_ISR_LOCK
|
#ifndef portGET_ISR_LOCK
|
||||||
|
|
||||||
#if ( configNUMBER_OF_CORES == 1 )
|
#if ( configNUMBER_OF_CORES == 1 )
|
||||||
#define portGET_ISR_LOCK()
|
#define portGET_ISR_LOCK( xCoreID )
|
||||||
#else
|
#else
|
||||||
#error portGET_ISR_LOCK is required in SMP
|
#error portGET_ISR_LOCK is required in SMP
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -258,8 +258,8 @@ void vPortTickISR( void );
|
||||||
* already had lock can acquire lock without waiting. This function could be
|
* already had lock can acquire lock without waiting. This function could be
|
||||||
* call from task and interrupt context, the critical section is called
|
* call from task and interrupt context, the critical section is called
|
||||||
* as in ISR */
|
* as in ISR */
|
||||||
void vPortRecursiveLockAcquire( BaseType_t xFromIsr );
|
void vPortRecursiveLockAcquire( BaseType_t xCoreID, BaseType_t xFromIsr );
|
||||||
void vPortRecursiveLockRelease( BaseType_t xFromIsr );
|
void vPortRecursiveLockRelease( BaseType_t xCoreID, BaseType_t xFromIsr );
|
||||||
|
|
||||||
#endif /* (configNUMBER_OF_CORES > 1) */
|
#endif /* (configNUMBER_OF_CORES > 1) */
|
||||||
|
|
||||||
|
@ -688,10 +688,9 @@ prvExclusiveLock_Lock_success:
|
||||||
}
|
}
|
||||||
|
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
void vPortRecursiveLockAcquire( BaseType_t xFromIsr )
|
void vPortRecursiveLockAcquire( BaseType_t xCoreID, BaseType_t xFromIsr )
|
||||||
{
|
{
|
||||||
BaseType_t xSavedInterruptStatus;
|
BaseType_t xSavedInterruptStatus;
|
||||||
BaseType_t xCoreID = xPortGET_CORE_ID();
|
|
||||||
BaseType_t xBitPosition = ( xFromIsr == pdTRUE );
|
BaseType_t xBitPosition = ( xFromIsr == pdTRUE );
|
||||||
|
|
||||||
xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||||
|
@ -705,10 +704,9 @@ prvExclusiveLock_Lock_success:
|
||||||
portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus );
|
portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus );
|
||||||
}
|
}
|
||||||
|
|
||||||
void vPortRecursiveLockRelease( BaseType_t xFromIsr )
|
void vPortRecursiveLockRelease( BaseType_t xCoreID, BaseType_t xFromIsr )
|
||||||
{
|
{
|
||||||
BaseType_t xSavedInterruptStatus;
|
BaseType_t xSavedInterruptStatus;
|
||||||
BaseType_t xCoreID = xPortGET_CORE_ID();
|
|
||||||
BaseType_t xBitPosition = ( xFromIsr == pdTRUE );
|
BaseType_t xBitPosition = ( xFromIsr == pdTRUE );
|
||||||
|
|
||||||
xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||||
|
|
|
@ -141,18 +141,18 @@
|
||||||
#endif /* if ( configNUMBER_OF_CORES > 1 ) */
|
#endif /* if ( configNUMBER_OF_CORES > 1 ) */
|
||||||
|
|
||||||
#if ( configNUMBER_OF_CORES == 1 )
|
#if ( configNUMBER_OF_CORES == 1 )
|
||||||
#define portGET_ISR_LOCK()
|
#define portGET_ISR_LOCK( xCoreID )
|
||||||
#define portRELEASE_ISR_LOCK()
|
#define portRELEASE_ISR_LOCK( xCoreID )
|
||||||
#define portGET_TASK_LOCK()
|
#define portGET_TASK_LOCK( xCoreID )
|
||||||
#define portRELEASE_TASK_LOCK()
|
#define portRELEASE_TASK_LOCK( xCoreID )
|
||||||
#else
|
#else
|
||||||
extern void vPortRecursiveLockAcquire( BaseType_t xFromIsr );
|
extern void vPortRecursiveLockAcquire( BaseType_t xCoreID, BaseType_t xFromIsr );
|
||||||
extern void vPortRecursiveLockRelease( BaseType_t xFromIsr );
|
extern void vPortRecursiveLockRelease( BaseType_t xCoreID, BaseType_t xFromIsr );
|
||||||
|
|
||||||
#define portGET_ISR_LOCK() vPortRecursiveLockAcquire( pdTRUE )
|
#define portGET_ISR_LOCK( xCoreID ) vPortRecursiveLockAcquire( ( xCoreID ), pdTRUE )
|
||||||
#define portRELEASE_ISR_LOCK() vPortRecursiveLockRelease( pdTRUE )
|
#define portRELEASE_ISR_LOCK( xCoreID ) vPortRecursiveLockRelease( ( xCoreID ), pdTRUE )
|
||||||
#define portGET_TASK_LOCK() vPortRecursiveLockAcquire( pdFALSE )
|
#define portGET_TASK_LOCK( xCoreID ) vPortRecursiveLockAcquire( ( xCoreID ), pdFALSE )
|
||||||
#define portRELEASE_TASK_LOCK() vPortRecursiveLockRelease( pdFALSE )
|
#define portRELEASE_TASK_LOCK( xCoreID ) vPortRecursiveLockRelease( ( xCoreID ), pdFALSE )
|
||||||
#endif /* if ( configNUMBER_OF_CORES == 1 ) */
|
#endif /* if ( configNUMBER_OF_CORES == 1 ) */
|
||||||
|
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
|
@ -211,7 +211,8 @@ __force_inline static bool spin_try_lock_unsafe(spin_lock_t *lock) {
|
||||||
/* Note this is a single method with uxAcquire parameter since we have
|
/* Note this is a single method with uxAcquire parameter since we have
|
||||||
* static vars, the method is always called with a compile time constant for
|
* static vars, the method is always called with a compile time constant for
|
||||||
* uxAcquire, and the compiler should do the right thing! */
|
* uxAcquire, and the compiler should do the right thing! */
|
||||||
static inline void vPortRecursiveLock( uint32_t ulLockNum,
|
static inline void vPortRecursiveLock( BaseType_t xCoreID,
|
||||||
|
uint32_t ulLockNum,
|
||||||
spin_lock_t * pxSpinLock,
|
spin_lock_t * pxSpinLock,
|
||||||
BaseType_t uxAcquire )
|
BaseType_t uxAcquire )
|
||||||
{
|
{
|
||||||
|
@ -219,12 +220,11 @@ static inline void vPortRecursiveLock( uint32_t ulLockNum,
|
||||||
static volatile uint8_t ucRecursionCountByLock[ portRTOS_SPINLOCK_COUNT ];
|
static volatile uint8_t ucRecursionCountByLock[ portRTOS_SPINLOCK_COUNT ];
|
||||||
|
|
||||||
configASSERT( ulLockNum < portRTOS_SPINLOCK_COUNT );
|
configASSERT( ulLockNum < portRTOS_SPINLOCK_COUNT );
|
||||||
uint32_t ulCoreNum = get_core_num();
|
|
||||||
|
|
||||||
if( uxAcquire )
|
if( uxAcquire )
|
||||||
{
|
{
|
||||||
if (!spin_try_lock_unsafe(pxSpinLock)) {
|
if (!spin_try_lock_unsafe(pxSpinLock)) {
|
||||||
if( ucOwnedByCore[ ulCoreNum ][ ulLockNum ] )
|
if( ucOwnedByCore[ xCoreID ][ ulLockNum ] )
|
||||||
{
|
{
|
||||||
configASSERT( ucRecursionCountByLock[ ulLockNum ] != 255u );
|
configASSERT( ucRecursionCountByLock[ ulLockNum ] != 255u );
|
||||||
ucRecursionCountByLock[ ulLockNum ]++;
|
ucRecursionCountByLock[ ulLockNum ]++;
|
||||||
|
@ -234,31 +234,31 @@ static inline void vPortRecursiveLock( uint32_t ulLockNum,
|
||||||
}
|
}
|
||||||
configASSERT( ucRecursionCountByLock[ ulLockNum ] == 0 );
|
configASSERT( ucRecursionCountByLock[ ulLockNum ] == 0 );
|
||||||
ucRecursionCountByLock[ ulLockNum ] = 1;
|
ucRecursionCountByLock[ ulLockNum ] = 1;
|
||||||
ucOwnedByCore[ ulCoreNum ][ ulLockNum ] = 1;
|
ucOwnedByCore[ xCoreID ][ ulLockNum ] = 1;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
configASSERT( ( ucOwnedByCore[ ulCoreNum ] [ulLockNum ] ) != 0 );
|
configASSERT( ( ucOwnedByCore[ xCoreID ] [ulLockNum ] ) != 0 );
|
||||||
configASSERT( ucRecursionCountByLock[ ulLockNum ] != 0 );
|
configASSERT( ucRecursionCountByLock[ ulLockNum ] != 0 );
|
||||||
|
|
||||||
if( !--ucRecursionCountByLock[ ulLockNum ] )
|
if( !--ucRecursionCountByLock[ ulLockNum ] )
|
||||||
{
|
{
|
||||||
ucOwnedByCore[ ulCoreNum ] [ ulLockNum ] = 0;
|
ucOwnedByCore[ xCoreID ] [ ulLockNum ] = 0;
|
||||||
spin_unlock_unsafe(pxSpinLock);
|
spin_unlock_unsafe(pxSpinLock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#if ( configNUMBER_OF_CORES == 1 )
|
#if ( configNUMBER_OF_CORES == 1 )
|
||||||
#define portGET_ISR_LOCK()
|
#define portGET_ISR_LOCK( xCoreID )
|
||||||
#define portRELEASE_ISR_LOCK()
|
#define portRELEASE_ISR_LOCK( xCoreID )
|
||||||
#define portGET_TASK_LOCK()
|
#define portGET_TASK_LOCK( xCoreID )
|
||||||
#define portRELEASE_TASK_LOCK()
|
#define portRELEASE_TASK_LOCK( xCoreID )
|
||||||
#else
|
#else
|
||||||
#define portGET_ISR_LOCK() vPortRecursiveLock( 0, spin_lock_instance( configSMP_SPINLOCK_0 ), pdTRUE )
|
#define portGET_ISR_LOCK( xCoreID ) vPortRecursiveLock( ( xCoreID ), 0, spin_lock_instance( configSMP_SPINLOCK_0 ), pdTRUE )
|
||||||
#define portRELEASE_ISR_LOCK() vPortRecursiveLock( 0, spin_lock_instance( configSMP_SPINLOCK_0 ), pdFALSE )
|
#define portRELEASE_ISR_LOCK( xCoreID ) vPortRecursiveLock( ( xCoreID ), 0, spin_lock_instance( configSMP_SPINLOCK_0 ), pdFALSE )
|
||||||
#define portGET_TASK_LOCK() vPortRecursiveLock( 1, spin_lock_instance( configSMP_SPINLOCK_1 ), pdTRUE )
|
#define portGET_TASK_LOCK( xCoreID ) vPortRecursiveLock( ( xCoreID ), 1, spin_lock_instance( configSMP_SPINLOCK_1 ), pdTRUE )
|
||||||
#define portRELEASE_TASK_LOCK() vPortRecursiveLock( 1, spin_lock_instance( configSMP_SPINLOCK_1 ), pdFALSE )
|
#define portRELEASE_TASK_LOCK( xCoreID ) vPortRecursiveLock( ( xCoreID ), 1, spin_lock_instance( configSMP_SPINLOCK_1 ), pdFALSE )
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
|
@ -152,10 +152,11 @@
|
||||||
|
|
||||||
#define portASSERT_IF_IN_ISR() configASSERT( portCHECK_IF_IN_ISR() == 0 )
|
#define portASSERT_IF_IN_ISR() configASSERT( portCHECK_IF_IN_ISR() == 0 )
|
||||||
|
|
||||||
#define portGET_ISR_LOCK() rtos_lock_acquire( 0 )
|
#define portGET_ISR_LOCK( xCoreID ) do{ ( void )( xCoreID ); rtos_lock_acquire( 0 ); } while( 0 )
|
||||||
#define portRELEASE_ISR_LOCK() rtos_lock_release( 0 )
|
#define portRELEASE_ISR_LOCK( xCoreID ) do{ ( void )( xCoreID ); rtos_lock_release( 0 ); } while( 0 )
|
||||||
#define portGET_TASK_LOCK() rtos_lock_acquire( 1 )
|
#define portGET_TASK_LOCK( xCoreID ) do{ ( void )( xCoreID ); rtos_lock_acquire( 1 ); } while( 0 )
|
||||||
#define portRELEASE_TASK_LOCK() rtos_lock_release( 1 )
|
#define portRELEASE_TASK_LOCK( xCoreID ) do{ ( void )( xCoreID ); rtos_lock_release( 1 ); } while( 0 )
|
||||||
|
|
||||||
|
|
||||||
void vTaskEnterCritical( void );
|
void vTaskEnterCritical( void );
|
||||||
void vTaskExitCritical( void );
|
void vTaskExitCritical( void );
|
||||||
|
|
|
@ -123,19 +123,19 @@ extern void vPortYield( void );
|
||||||
|
|
||||||
/* Acquire the TASK lock. TASK lock is a recursive lock.
|
/* Acquire the TASK lock. TASK lock is a recursive lock.
|
||||||
* It should be able to be locked by the same core multiple times. */
|
* It should be able to be locked by the same core multiple times. */
|
||||||
#define portGET_TASK_LOCK() do {} while( 0 )
|
#define portGET_TASK_LOCK( xCoreID ) do {} while( 0 )
|
||||||
|
|
||||||
/* Release the TASK lock. If a TASK lock is locked by the same core multiple times,
|
/* Release the TASK lock. If a TASK lock is locked by the same core multiple times,
|
||||||
* it should be released as many times as it is locked. */
|
* it should be released as many times as it is locked. */
|
||||||
#define portRELEASE_TASK_LOCK() do {} while( 0 )
|
#define portRELEASE_TASK_LOCK( xCoreID ) do {} while( 0 )
|
||||||
|
|
||||||
/* Acquire the ISR lock. ISR lock is a recursive lock.
|
/* Acquire the ISR lock. ISR lock is a recursive lock.
|
||||||
* It should be able to be locked by the same core multiple times. */
|
* It should be able to be locked by the same core multiple times. */
|
||||||
#define portGET_ISR_LOCK() do {} while( 0 )
|
#define portGET_ISR_LOCK( xCoreID ) do {} while( 0 )
|
||||||
|
|
||||||
/* Release the ISR lock. If a ISR lock is locked by the same core multiple times, \
|
/* Release the ISR lock. If a ISR lock is locked by the same core multiple times, \
|
||||||
* it should be released as many times as it is locked. */
|
* it should be released as many times as it is locked. */
|
||||||
#define portRELEASE_ISR_LOCK() do {} while( 0 )
|
#define portRELEASE_ISR_LOCK( xCoreID ) do {} while( 0 )
|
||||||
|
|
||||||
#endif /* if ( configNUMBER_OF_CORES > 1 ) */
|
#endif /* if ( configNUMBER_OF_CORES > 1 ) */
|
||||||
|
|
||||||
|
|
49
tasks.c
49
tasks.c
|
@ -831,7 +831,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION;
|
||||||
if( uxPrevCriticalNesting > 0U )
|
if( uxPrevCriticalNesting > 0U )
|
||||||
{
|
{
|
||||||
portSET_CRITICAL_NESTING_COUNT( xCoreID, 0U );
|
portSET_CRITICAL_NESTING_COUNT( xCoreID, 0U );
|
||||||
portRELEASE_ISR_LOCK();
|
portRELEASE_ISR_LOCK( xCoreID );
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -840,7 +840,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION;
|
||||||
mtCOVERAGE_TEST_MARKER();
|
mtCOVERAGE_TEST_MARKER();
|
||||||
}
|
}
|
||||||
|
|
||||||
portRELEASE_TASK_LOCK();
|
portRELEASE_TASK_LOCK( xCoreID );
|
||||||
portMEMORY_BARRIER();
|
portMEMORY_BARRIER();
|
||||||
configASSERT( pxThisTCB->xTaskRunState == taskTASK_SCHEDULED_TO_YIELD );
|
configASSERT( pxThisTCB->xTaskRunState == taskTASK_SCHEDULED_TO_YIELD );
|
||||||
|
|
||||||
|
@ -853,15 +853,16 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION;
|
||||||
* its run state. */
|
* its run state. */
|
||||||
|
|
||||||
portDISABLE_INTERRUPTS();
|
portDISABLE_INTERRUPTS();
|
||||||
portGET_TASK_LOCK();
|
|
||||||
portGET_ISR_LOCK();
|
|
||||||
xCoreID = ( BaseType_t ) portGET_CORE_ID();
|
xCoreID = ( BaseType_t ) portGET_CORE_ID();
|
||||||
|
portGET_TASK_LOCK( xCoreID );
|
||||||
|
portGET_ISR_LOCK( xCoreID );
|
||||||
|
|
||||||
portSET_CRITICAL_NESTING_COUNT( xCoreID, uxPrevCriticalNesting );
|
portSET_CRITICAL_NESTING_COUNT( xCoreID, uxPrevCriticalNesting );
|
||||||
|
|
||||||
if( uxPrevCriticalNesting == 0U )
|
if( uxPrevCriticalNesting == 0U )
|
||||||
{
|
{
|
||||||
portRELEASE_ISR_LOCK();
|
portRELEASE_ISR_LOCK( xCoreID );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3854,6 +3855,7 @@ void vTaskSuspendAll( void )
|
||||||
#else /* #if ( configNUMBER_OF_CORES == 1 ) */
|
#else /* #if ( configNUMBER_OF_CORES == 1 ) */
|
||||||
{
|
{
|
||||||
UBaseType_t ulState;
|
UBaseType_t ulState;
|
||||||
|
BaseType_t xCoreID;
|
||||||
|
|
||||||
/* This must only be called from within a task. */
|
/* This must only be called from within a task. */
|
||||||
portASSERT_IF_IN_ISR();
|
portASSERT_IF_IN_ISR();
|
||||||
|
@ -3867,14 +3869,16 @@ void vTaskSuspendAll( void )
|
||||||
* uxSchedulerSuspended since that will prevent context switches. */
|
* uxSchedulerSuspended since that will prevent context switches. */
|
||||||
ulState = portSET_INTERRUPT_MASK();
|
ulState = portSET_INTERRUPT_MASK();
|
||||||
|
|
||||||
|
xCoreID = ( BaseType_t ) portGET_CORE_ID();
|
||||||
|
|
||||||
/* This must never be called from inside a critical section. */
|
/* This must never be called from inside a critical section. */
|
||||||
configASSERT( portGET_CRITICAL_NESTING_COUNT( portGET_CORE_ID() ) == 0 );
|
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 );
|
||||||
|
|
||||||
/* portSOFTWARE_BARRIER() is only implemented for emulated/simulated ports that
|
/* portSOFTWARE_BARRIER() is only implemented for emulated/simulated ports that
|
||||||
* do not otherwise exhibit real time behaviour. */
|
* do not otherwise exhibit real time behaviour. */
|
||||||
portSOFTWARE_BARRIER();
|
portSOFTWARE_BARRIER();
|
||||||
|
|
||||||
portGET_TASK_LOCK();
|
portGET_TASK_LOCK( xCoreID );
|
||||||
|
|
||||||
/* uxSchedulerSuspended is increased after prvCheckForRunStateChange. The
|
/* uxSchedulerSuspended is increased after prvCheckForRunStateChange. The
|
||||||
* purpose is to prevent altering the variable when fromISR APIs are readying
|
* purpose is to prevent altering the variable when fromISR APIs are readying
|
||||||
|
@ -3888,12 +3892,17 @@ void vTaskSuspendAll( void )
|
||||||
mtCOVERAGE_TEST_MARKER();
|
mtCOVERAGE_TEST_MARKER();
|
||||||
}
|
}
|
||||||
|
|
||||||
portGET_ISR_LOCK();
|
/* Query the coreID again as prvCheckForRunStateChange may have
|
||||||
|
* caused the task to get scheduled on a different core. The correct
|
||||||
|
* task lock for the core is acquired in prvCheckForRunStateChange. */
|
||||||
|
xCoreID = ( BaseType_t ) portGET_CORE_ID();
|
||||||
|
|
||||||
|
portGET_ISR_LOCK( xCoreID );
|
||||||
|
|
||||||
/* The scheduler is suspended if uxSchedulerSuspended is non-zero. An increment
|
/* The scheduler is suspended if uxSchedulerSuspended is non-zero. An increment
|
||||||
* is used to allow calls to vTaskSuspendAll() to nest. */
|
* is used to allow calls to vTaskSuspendAll() to nest. */
|
||||||
++uxSchedulerSuspended;
|
++uxSchedulerSuspended;
|
||||||
portRELEASE_ISR_LOCK();
|
portRELEASE_ISR_LOCK( xCoreID );
|
||||||
|
|
||||||
portCLEAR_INTERRUPT_MASK( ulState );
|
portCLEAR_INTERRUPT_MASK( ulState );
|
||||||
}
|
}
|
||||||
|
@ -3998,7 +4007,7 @@ BaseType_t xTaskResumeAll( void )
|
||||||
configASSERT( uxSchedulerSuspended != 0U );
|
configASSERT( uxSchedulerSuspended != 0U );
|
||||||
|
|
||||||
uxSchedulerSuspended = ( UBaseType_t ) ( uxSchedulerSuspended - 1U );
|
uxSchedulerSuspended = ( UBaseType_t ) ( uxSchedulerSuspended - 1U );
|
||||||
portRELEASE_TASK_LOCK();
|
portRELEASE_TASK_LOCK( xCoreID );
|
||||||
|
|
||||||
if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
|
if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
|
||||||
{
|
{
|
||||||
|
@ -5168,8 +5177,8 @@ BaseType_t xTaskIncrementTick( void )
|
||||||
* and move on if another core suspended the scheduler. We should only
|
* and move on if another core suspended the scheduler. We should only
|
||||||
* do that if the current core has suspended the scheduler. */
|
* do that if the current core has suspended the scheduler. */
|
||||||
|
|
||||||
portGET_TASK_LOCK(); /* Must always acquire the task lock first. */
|
portGET_TASK_LOCK( xCoreID ); /* Must always acquire the task lock first. */
|
||||||
portGET_ISR_LOCK();
|
portGET_ISR_LOCK( xCoreID );
|
||||||
{
|
{
|
||||||
/* vTaskSwitchContext() must never be called from within a critical section.
|
/* vTaskSwitchContext() must never be called from within a critical section.
|
||||||
* This is not necessarily true for single core FreeRTOS, but it is for this
|
* This is not necessarily true for single core FreeRTOS, but it is for this
|
||||||
|
@ -5250,8 +5259,8 @@ BaseType_t xTaskIncrementTick( void )
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
portRELEASE_ISR_LOCK();
|
portRELEASE_ISR_LOCK( xCoreID );
|
||||||
portRELEASE_TASK_LOCK();
|
portRELEASE_TASK_LOCK( xCoreID );
|
||||||
|
|
||||||
traceRETURN_vTaskSwitchContext();
|
traceRETURN_vTaskSwitchContext();
|
||||||
}
|
}
|
||||||
|
@ -6997,8 +7006,8 @@ static void prvResetNextTaskUnblockTime( void )
|
||||||
{
|
{
|
||||||
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U )
|
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U )
|
||||||
{
|
{
|
||||||
portGET_TASK_LOCK();
|
portGET_TASK_LOCK( xCoreID );
|
||||||
portGET_ISR_LOCK();
|
portGET_ISR_LOCK( xCoreID );
|
||||||
}
|
}
|
||||||
|
|
||||||
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
|
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
|
||||||
|
@ -7051,7 +7060,7 @@ static void prvResetNextTaskUnblockTime( void )
|
||||||
|
|
||||||
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U )
|
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U )
|
||||||
{
|
{
|
||||||
portGET_ISR_LOCK();
|
portGET_ISR_LOCK( xCoreID );
|
||||||
}
|
}
|
||||||
|
|
||||||
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
|
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
|
||||||
|
@ -7143,8 +7152,8 @@ static void prvResetNextTaskUnblockTime( void )
|
||||||
/* Get the xYieldPending stats inside the critical section. */
|
/* Get the xYieldPending stats inside the critical section. */
|
||||||
xYieldCurrentTask = xYieldPendings[ xCoreID ];
|
xYieldCurrentTask = xYieldPendings[ xCoreID ];
|
||||||
|
|
||||||
portRELEASE_ISR_LOCK();
|
portRELEASE_ISR_LOCK( xCoreID );
|
||||||
portRELEASE_TASK_LOCK();
|
portRELEASE_TASK_LOCK( xCoreID );
|
||||||
portENABLE_INTERRUPTS();
|
portENABLE_INTERRUPTS();
|
||||||
|
|
||||||
/* When a task yields in a critical section it just sets
|
/* When a task yields in a critical section it just sets
|
||||||
|
@ -7199,7 +7208,7 @@ static void prvResetNextTaskUnblockTime( void )
|
||||||
|
|
||||||
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U )
|
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U )
|
||||||
{
|
{
|
||||||
portRELEASE_ISR_LOCK();
|
portRELEASE_ISR_LOCK( xCoreID );
|
||||||
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
|
Loading…
Reference in a new issue