fix(freertos-smp): Miscellaneous fixes for granular locks

This commit is contained in:
Sudeep Mohanty 2025-08-15 17:03:39 +02:00
parent e3d92dd5fd
commit 35025858cf
7 changed files with 302 additions and 359 deletions

View file

@ -87,24 +87,6 @@
#define event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); #define event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
/*
* Locks an event group for tasks. Prevents other tasks from accessing the event group but allows
* ISRs to pend access to the event group. Caller cannot be preempted by other tasks
* after locking the event group, thus allowing the caller to execute non-deterministic
* operations.
*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
static void prvLockEventGroupForTasks( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION;
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
/*
* Unlocks an event group for tasks. Handles all pended access from ISRs, then reenables
* preemption for the caller.
*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
static BaseType_t prvUnlockEventGroupForTasks( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION;
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
/* /*
* Test the bits set in uxCurrentEventBits to see if the wait condition is met. * Test the bits set in uxCurrentEventBits to see if the wait condition is met.
* The wait condition is defined by xWaitForAllBits. If xWaitForAllBits is * The wait condition is defined by xWaitForAllBits. If xWaitForAllBits is
@ -129,8 +111,22 @@
* When the task unlocks the event group, all pended access attempts are handled. * When the task unlocks the event group, all pended access attempts are handled.
*/ */
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
#define event_groupsLOCK( pxEventBits ) prvLockEventGroupForTasks( pxEventBits ) #define event_groupsLOCK( pxEventBits ) taskDATA_GROUP_LOCK( &( ( pxEventBits )->xTaskSpinlock ) )
#define event_groupsUNLOCK( pxEventBits ) prvUnlockEventGroupForTasks( pxEventBits ); #define event_groupsUNLOCK( pxEventBits ) \
( { \
taskDATA_GROUP_UNLOCK( &( ( pxEventBits )->xTaskSpinlock ) ); \
BaseType_t xAlreadyYielded; \
if( xTaskUnlockCanYield() == pdTRUE ) \
{ \
taskYIELD_WITHIN_API(); \
xAlreadyYielded = pdTRUE; \
} \
else \
{ \
xAlreadyYielded = pdFALSE; \
} \
xAlreadyYielded; \
} )
#else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
#define event_groupsLOCK( pxEventBits ) vTaskSuspendAll() #define event_groupsLOCK( pxEventBits ) vTaskSuspendAll()
#define event_groupsUNLOCK( pxEventBits ) xTaskResumeAll() #define event_groupsUNLOCK( pxEventBits ) xTaskResumeAll()
@ -867,48 +863,6 @@
traceRETURN_vEventGroupClearBitsCallback(); traceRETURN_vEventGroupClearBitsCallback();
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
static void prvLockEventGroupForTasks( EventGroup_t * pxEventBits )
{
/* Disable preemption so that the current task cannot be preempted by another task */
vTaskPreemptionDisable( NULL );
/* Keep holding xTaskSpinlock to prevent tasks on other cores from accessing
* the event group while it is suspended. */
portGET_SPINLOCK( portGET_CORE_ID(), &( pxEventBits->xTaskSpinlock ) );
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
/*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
static BaseType_t prvUnlockEventGroupForTasks( EventGroup_t * pxEventBits )
{
BaseType_t xReturn = pdFALSE;
/* Release the previously held task spinlock */
portRELEASE_SPINLOCK( portGET_CORE_ID(), &( pxEventBits->xTaskSpinlock ) );
/* Re-enable preemption */
vTaskPreemptionEnable( NULL );
/* Yield if preemption was re-enabled*/
if( xTaskUnlockCanYield() == pdTRUE )
{
taskYIELD_WITHIN_API();
/* Return true as the task was preempted */
xReturn = pdTRUE;
}
else
{
/* Return false as the task was not preempted */
xReturn = pdFALSE;
}
return xReturn;
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
/*-----------------------------------------------------------*/
static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits, static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits,
const EventBits_t uxBitsToWaitFor, const EventBits_t uxBitsToWaitFor,

View file

@ -2972,8 +2972,8 @@
* portTICK_TYPE_IS_ATOMIC was not set - map the critical sections used when * portTICK_TYPE_IS_ATOMIC was not set - map the critical sections used when
* the tick count is returned to the standard critical section macros. */ * the tick count is returned to the standard critical section macros. */
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
#define portTICK_TYPE_ENTER_CRITICAL() portENTER_CRITICAL_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ) #define portTICK_TYPE_ENTER_CRITICAL() kernelENTER_CRITICAL()
#define portTICK_TYPE_EXIT_CRITICAL() portEXIT_CRITICAL_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ) #define portTICK_TYPE_EXIT_CRITICAL() kernelEXIT_CRITICAL()
#else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
#define portTICK_TYPE_ENTER_CRITICAL() portENTER_CRITICAL() #define portTICK_TYPE_ENTER_CRITICAL() portENTER_CRITICAL()
#define portTICK_TYPE_EXIT_CRITICAL() portEXIT_CRITICAL() #define portTICK_TYPE_EXIT_CRITICAL() portEXIT_CRITICAL()

View file

@ -299,11 +299,11 @@ typedef enum
{ \ { \
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \ const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \
/* Task spinlock is always taken first */ \ /* Task spinlock is always taken first */ \
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) pxTaskSpinlock ); \ portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) ( pxTaskSpinlock ) ); \
/* Disable interrupts */ \ /* Disable interrupts */ \
portDISABLE_INTERRUPTS(); \ portDISABLE_INTERRUPTS(); \
/* Take the ISR spinlock next */ \ /* Take the ISR spinlock next */ \
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) pxISRSpinlock ); \ portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) ( pxISRSpinlock ) ); \
/* Increment the critical nesting count */ \ /* Increment the critical nesting count */ \
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \ portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \
} \ } \
@ -322,11 +322,13 @@ typedef enum
#define taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR( pxISRSpinlock, puxSavedInterruptStatus ) \ #define taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR( pxISRSpinlock, puxSavedInterruptStatus ) \
do { \ do { \
*( puxSavedInterruptStatus ) = portSET_INTERRUPT_MASK_FROM_ISR(); \ *( puxSavedInterruptStatus ) = portSET_INTERRUPT_MASK_FROM_ISR(); \
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \ { \
/* Take the ISR spinlock */ \ const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) pxISRSpinlock ); \ /* Take the ISR spinlock */ \
/* Increment the critical nesting count */ \ portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) ( pxISRSpinlock ) ); \
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \ /* Increment the critical nesting count */ \
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \
} \
} while( 0 ) } while( 0 )
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
@ -339,27 +341,27 @@ typedef enum
* \ingroup GranularLocks * \ingroup GranularLocks
*/ */
#if ( portUSING_GRANULAR_LOCKS == 1 ) #if ( portUSING_GRANULAR_LOCKS == 1 )
#define taskDATA_GROUP_EXIT_CRITICAL( pxTaskSpinlock, pxISRSpinlock ) \ #define taskDATA_GROUP_EXIT_CRITICAL( pxTaskSpinlock, pxISRSpinlock ) \
do { \ do { \
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \ const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ); \ configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ); \
/* Release the ISR spinlock */ \ /* Release the ISR spinlock */ \
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) pxISRSpinlock ); \ portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) ( pxISRSpinlock ) ); \
/* Release the task spinlock */ \ /* Release the task spinlock */ \
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) pxTaskSpinlock ); \ portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) ( pxTaskSpinlock ) ); \
/* Decrement the critical nesting count */ \ /* Decrement the critical nesting count */ \
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \ portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \
/* Enable interrupts only if the critical nesting count is 0 */ \ /* Enable interrupts only if the critical nesting count is 0 */ \
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) \ if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) \
{ \ { \
portENABLE_INTERRUPTS(); \ portENABLE_INTERRUPTS(); \
} \ } \
else \ else \
{ \ { \
mtCOVERAGE_TEST_MARKER(); \ mtCOVERAGE_TEST_MARKER(); \
} \ } \
/* Re-enable preemption */ \ /* Re-enable preemption */ \
vTaskPreemptionEnable( NULL ); \ vTaskPreemptionEnable( NULL ); \
} while( 0 ) } while( 0 )
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
@ -379,7 +381,7 @@ typedef enum
/* Decrement the critical nesting count */ \ /* Decrement the critical nesting count */ \
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \ portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \
/* Release the ISR spinlock */ \ /* Release the ISR spinlock */ \
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) pxISRSpinlock ); \ portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) ( pxISRSpinlock ) ); \
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) \ if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) \
{ \ { \
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); \ portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); \
@ -387,6 +389,44 @@ typedef enum
} while( 0 ) } while( 0 )
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
/**
* task. h
*
* Macros to lock a data group (task-level lock only).
*
* \defgroup taskDATA_GROUP_LOCK taskDATA_GROUP_LOCK
* \ingroup GranularLocks
*/
#if ( portUSING_GRANULAR_LOCKS == 1 )
#define taskDATA_GROUP_LOCK( pxTaskSpinlock ) \
do { \
/* Disable preemption while holding the task spinlock. */ \
vTaskPreemptionDisable( NULL ); \
{ \
portGET_SPINLOCK( portGET_CORE_ID(), ( portSPINLOCK_TYPE * ) ( pxTaskSpinlock ) ); \
} \
} while( 0 )
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
/**
* task. h
*
* Macros to unlock a data group (task-level lock only).
*
* \defgroup taskDATA_GROUP_UNLOCK taskDATA_GROUP_UNLOCK
* \ingroup GranularLocks
*/
#if ( portUSING_GRANULAR_LOCKS == 1 )
#define taskDATA_GROUP_UNLOCK( pxTaskSpinlock ) \
do { \
{ \
portRELEASE_SPINLOCK( portGET_CORE_ID(), ( portSPINLOCK_TYPE * ) ( pxTaskSpinlock ) ); \
} \
/* Re-enable preemption after releasing the task spinlock. */ \
vTaskPreemptionEnable( NULL ); \
} while( 0 )
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
/*----------------------------------------------------------- /*-----------------------------------------------------------
* TASK CREATION API * TASK CREATION API
*----------------------------------------------------------*/ *----------------------------------------------------------*/

34
queue.c
View file

@ -328,25 +328,23 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
* When the tasks unlocks the queue, all pended access attempts are handled. * When the tasks unlocks the queue, all pended access attempts are handled.
*/ */
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
#define queueLOCK( pxQueue ) \ #define queueLOCK( pxQueue ) \
do { \ do { \
vTaskPreemptionDisable( NULL ); \ taskDATA_GROUP_LOCK( &( ( pxQueue )->xTaskSpinlock ) ); \
prvLockQueue( ( pxQueue ) ); \ prvLockQueue( ( pxQueue ) ); \
portGET_SPINLOCK( portGET_CORE_ID(), &( pxQueue->xTaskSpinlock ) ); \
} while( 0 ) } while( 0 )
#define queueUNLOCK( pxQueue, xYieldAPI ) \ #define queueUNLOCK( pxQueue, xYieldAPI ) \
do { \ do { \
prvUnlockQueue( ( pxQueue ) ); \ prvUnlockQueue( ( pxQueue ) ); \
portRELEASE_SPINLOCK( portGET_CORE_ID(), &( pxQueue->xTaskSpinlock ) ); \ taskDATA_GROUP_UNLOCK( &( ( pxQueue )->xTaskSpinlock ) ); \
vTaskPreemptionEnable( NULL ); \ if( ( xYieldAPI ) == pdTRUE ) \
if( ( xYieldAPI ) == pdTRUE ) \ { \
{ \ taskYIELD_WITHIN_API(); \
taskYIELD_WITHIN_API(); \ } \
} \ else \
else \ { \
{ \ mtCOVERAGE_TEST_MARKER(); \
mtCOVERAGE_TEST_MARKER(); \ } \
} \
} while( 0 ) } while( 0 )
#else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
#define queueLOCK( pxQueue ) \ #define queueLOCK( pxQueue ) \

View file

@ -63,10 +63,10 @@
* Macros to mark the start and end of a critical code region. * Macros to mark the start and end of a critical code region.
*/ */
#if ( portUSING_GRANULAR_LOCKS == 1 ) #if ( portUSING_GRANULAR_LOCKS == 1 )
#define sbENTER_CRITICAL( pxStreamBuffer ) taskDATA_GROUP_ENTER_CRITICAL( &pxStreamBuffer->xTaskSpinlock, &pxStreamBuffer->xISRSpinlock ) #define sbENTER_CRITICAL( pxStreamBuffer ) taskDATA_GROUP_ENTER_CRITICAL( &( ( pxStreamBuffer )->xTaskSpinlock ), &( ( pxStreamBuffer )->xISRSpinlock ) )
#define sbENTER_CRITICAL_FROM_ISR( pxStreamBuffer, puxSavedInterruptStatus ) taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR( &pxStreamBuffer->xISRSpinlock, puxSavedInterruptStatus ) #define sbENTER_CRITICAL_FROM_ISR( pxStreamBuffer, puxSavedInterruptStatus ) taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR( &( ( pxStreamBuffer )->xISRSpinlock ), puxSavedInterruptStatus )
#define sbEXIT_CRITICAL( pxStreamBuffer ) taskDATA_GROUP_EXIT_CRITICAL( &pxStreamBuffer->xTaskSpinlock, &pxStreamBuffer->xISRSpinlock ) #define sbEXIT_CRITICAL( pxStreamBuffer ) taskDATA_GROUP_EXIT_CRITICAL( &( ( pxStreamBuffer )->xTaskSpinlock ), &( ( pxStreamBuffer )->xISRSpinlock ) )
#define sbEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxStreamBuffer ) taskDATA_GROUP_EXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, &pxStreamBuffer->xISRSpinlock ) #define sbEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxStreamBuffer ) taskDATA_GROUP_EXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, &( ( pxStreamBuffer )->xISRSpinlock ) )
#else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ #else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
#define sbENTER_CRITICAL( pxStreamBuffer ) taskENTER_CRITICAL(); #define sbENTER_CRITICAL( pxStreamBuffer ) taskENTER_CRITICAL();
#define sbENTER_CRITICAL_FROM_ISR( pxStreamBuffer, puxSavedInterruptStatus ) do { *( puxSavedInterruptStatus ) = taskENTER_CRITICAL_FROM_ISR(); } while( 0 ) #define sbENTER_CRITICAL_FROM_ISR( pxStreamBuffer, puxSavedInterruptStatus ) do { *( puxSavedInterruptStatus ) = taskENTER_CRITICAL_FROM_ISR(); } while( 0 )
@ -84,8 +84,8 @@
* When the task unlocks the stream buffer, all pended access attempts are handled. * When the task unlocks the stream buffer, all pended access attempts are handled.
*/ */
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
#define sbLOCK( pxStreamBuffer ) prvLockStreamBufferForTasks( pxStreamBuffer ) #define sbLOCK( pxStreamBuffer ) taskDATA_GROUP_LOCK( &( ( pxStreamBuffer )->xTaskSpinlock ) )
#define sbUNLOCK( pxStreamBuffer ) prvUnlockStreamBufferForTasks( pxStreamBuffer ) #define sbUNLOCK( pxStreamBuffer ) taskDATA_GROUP_UNLOCK( &( ( pxStreamBuffer )->xTaskSpinlock ) )
#else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
#define sbLOCK( pxStreamBuffer ) vTaskSuspendAll() #define sbLOCK( pxStreamBuffer ) vTaskSuspendAll()
#define sbUNLOCK( pxStreamBuffer ) ( void ) xTaskResumeAll() #define sbUNLOCK( pxStreamBuffer ) ( void ) xTaskResumeAll()
@ -109,7 +109,7 @@
( pxStreamBuffer )->xTaskWaitingToSend = NULL; \ ( pxStreamBuffer )->xTaskWaitingToSend = NULL; \
} \ } \
} \ } \
( void ) sbUNLOCK( pxStreamBuffer ); \ sbUNLOCK( pxStreamBuffer ); \
} while( 0 ) } while( 0 )
#endif /* sbRECEIVE_COMPLETED */ #endif /* sbRECEIVE_COMPLETED */
@ -189,7 +189,7 @@
( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \ ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \
} \ } \
} \ } \
( void ) sbUNLOCK( pxStreamBuffer ) sbUNLOCK( pxStreamBuffer )
#endif /* sbSEND_COMPLETED */ #endif /* sbSEND_COMPLETED */
/* If user has provided a per-instance send completed callback, then /* If user has provided a per-instance send completed callback, then
@ -288,24 +288,6 @@ typedef struct StreamBufferDef_t
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
} StreamBuffer_t; } StreamBuffer_t;
/*
* Locks a stream buffer for tasks. Prevents other tasks from accessing the stream buffer
* but allows ISRs to pend access to the stream buffer. Caller cannot be preempted
* by other tasks after locking the stream buffer, thus allowing the caller to
* execute non-deterministic operations.
*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
static void prvLockStreamBufferForTasks( StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION;
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
/*
* Unlocks a stream buffer for tasks. Handles all pended access from ISRs, then reenables preemption
* for the caller.
*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
static void prvUnlockStreamBufferForTasks( StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION;
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
/* /*
* The number of bytes available to be read from the buffer. * The number of bytes available to be read from the buffer.
*/ */
@ -381,31 +363,6 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer,
StreamBufferCallbackFunction_t pxSendCompletedCallback, StreamBufferCallbackFunction_t pxSendCompletedCallback,
StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) PRIVILEGED_FUNCTION; StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) PRIVILEGED_FUNCTION;
/*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
static void prvLockStreamBufferForTasks( StreamBuffer_t * const pxStreamBuffer )
{
/* Disable preemption so that the current task cannot be preempted by another task */
vTaskPreemptionDisable( NULL );
/* Keep holding xTaskSpinlock after unlocking the data group to prevent tasks
* on other cores from accessing the stream buffer while it is suspended. */
portGET_SPINLOCK( portGET_CORE_ID(), &( pxStreamBuffer->xTaskSpinlock ) );
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
/*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
static void prvUnlockStreamBufferForTasks( StreamBuffer_t * const pxStreamBuffer )
{
/* Release the previously held task spinlock */
portRELEASE_SPINLOCK( portGET_CORE_ID(), &( pxStreamBuffer->xTaskSpinlock ) );
/* Re-enable preemption */
vTaskPreemptionEnable( NULL );
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
#if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )

340
tasks.c
View file

@ -630,6 +630,7 @@ static BaseType_t prvCreateIdleTasks( void );
#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
#if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 ) #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 )
/* /*
* Checks to see if another task moved the current task out of the ready * Checks to see if another task moved the current task out of the ready
* list while it was waiting to enter a lightweight critical section and yields, if so. * list while it was waiting to enter a lightweight critical section and yields, if so.
@ -971,7 +972,6 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION;
#if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 ) #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 )
static void prvLightWeightCheckForRunStateChange( void ) static void prvLightWeightCheckForRunStateChange( void )
{ {
const TCB_t * pxThisTCB; const TCB_t * pxThisTCB;
BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
@ -1024,8 +1024,8 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION;
kernelGET_ISR_LOCK( xCoreID ); kernelGET_ISR_LOCK( xCoreID );
portSET_CRITICAL_NESTING_COUNT( xCoreID, uxPrevCriticalNesting ); portSET_CRITICAL_NESTING_COUNT( xCoreID, uxPrevCriticalNesting );
}; }
} }
#endif /* #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 ) */ #endif /* #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 ) */
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@ -1096,12 +1096,12 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION;
xYieldPendings[ xCoreID ] = pdTRUE; xYieldPendings[ xCoreID ] = pdTRUE;
} }
} }
#else #else /* if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
{ {
xLowestPriorityToPreempt = xCurrentCoreTaskPriority; xLowestPriorityToPreempt = xCurrentCoreTaskPriority;
xLowestPriorityCore = xCoreID; xLowestPriorityCore = xCoreID;
} }
#endif #endif /* if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
} }
} }
else else
@ -1413,12 +1413,12 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION;
xYieldPendings[ uxCore ] = pdTRUE; xYieldPendings[ uxCore ] = pdTRUE;
} }
} }
#else #else /* if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
{ {
xLowestPriority = xTaskPriority; xLowestPriority = xTaskPriority;
xLowestPriorityCore = ( BaseType_t ) uxCore; xLowestPriorityCore = ( BaseType_t ) uxCore;
} }
#endif #endif /* if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
} }
} }
} }
@ -2840,7 +2840,15 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
traceENTER_uxTaskPriorityGet( xTask ); traceENTER_uxTaskPriorityGet( xTask );
kernelENTER_CRITICAL(); #if ( ( configNUMBER_OF_CORES > 1 ) )
{
kernelENTER_CRITICAL();
}
#else
{
portBASE_TYPE_ENTER_CRITICAL();
}
#endif
{ {
/* If null is passed in here then it is the priority of the task /* If null is passed in here then it is the priority of the task
* that called uxTaskPriorityGet() that is being queried. */ * that called uxTaskPriorityGet() that is being queried. */
@ -2849,7 +2857,15 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
uxReturn = pxTCB->uxPriority; uxReturn = pxTCB->uxPriority;
} }
kernelEXIT_CRITICAL(); #if ( ( configNUMBER_OF_CORES > 1 ) )
{
kernelEXIT_CRITICAL();
}
#else
{
portBASE_TYPE_EXIT_CRITICAL();
}
#endif
traceRETURN_uxTaskPriorityGet( uxReturn ); traceRETURN_uxTaskPriorityGet( uxReturn );
@ -2918,7 +2934,15 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
traceENTER_uxTaskBasePriorityGet( xTask ); traceENTER_uxTaskBasePriorityGet( xTask );
kernelENTER_CRITICAL(); #if ( ( configNUMBER_OF_CORES > 1 ) )
{
kernelENTER_CRITICAL();
}
#else
{
portBASE_TYPE_ENTER_CRITICAL();
}
#endif
{ {
/* If null is passed in here then it is the base priority of the task /* If null is passed in here then it is the base priority of the task
* that called uxTaskBasePriorityGet() that is being queried. */ * that called uxTaskBasePriorityGet() that is being queried. */
@ -2927,7 +2951,15 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
uxReturn = pxTCB->uxBasePriority; uxReturn = pxTCB->uxBasePriority;
} }
kernelEXIT_CRITICAL(); #if ( ( configNUMBER_OF_CORES > 1 ) )
{
kernelEXIT_CRITICAL();
}
#else
{
portBASE_TYPE_EXIT_CRITICAL();
}
#endif
traceRETURN_uxTaskBasePriorityGet( uxReturn ); traceRETURN_uxTaskBasePriorityGet( uxReturn );
@ -3262,14 +3294,30 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
traceENTER_vTaskCoreAffinityGet( xTask ); traceENTER_vTaskCoreAffinityGet( xTask );
kernelENTER_CRITICAL(); #if ( ( configNUMBER_OF_CORES > 1 ) )
{
kernelENTER_CRITICAL();
}
#else
{
portBASE_TYPE_ENTER_CRITICAL();
}
#endif
{ {
pxTCB = prvGetTCBFromHandle( xTask ); pxTCB = prvGetTCBFromHandle( xTask );
configASSERT( pxTCB != NULL ); configASSERT( pxTCB != NULL );
uxCoreAffinityMask = pxTCB->uxCoreAffinityMask; uxCoreAffinityMask = pxTCB->uxCoreAffinityMask;
} }
kernelEXIT_CRITICAL(); #if ( ( configNUMBER_OF_CORES > 1 ) )
{
kernelEXIT_CRITICAL();
}
#else
{
portBASE_TYPE_EXIT_CRITICAL();
}
#endif
traceRETURN_vTaskCoreAffinityGet( uxCoreAffinityMask ); traceRETURN_vTaskCoreAffinityGet( uxCoreAffinityMask );
@ -3351,7 +3399,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
} }
else else
{ {
if( xYieldPendings[ pxTCB->xTaskRunState ] != pdFALSE && ( taskTASK_IS_RUNNING( pxTCB ) != pdFALSE ) ) if( ( xYieldPendings[ pxTCB->xTaskRunState ] != pdFALSE ) && ( taskTASK_IS_RUNNING( pxTCB ) != pdFALSE ) )
{ {
prvYieldCore( pxTCB->xTaskRunState ); prvYieldCore( pxTCB->xTaskRunState );
} }
@ -4443,11 +4491,7 @@ BaseType_t xTaskResumeAll( void )
} }
} }
if( xYieldPendings[ xCoreID ] != pdFALSE if( xYieldPendings[ xCoreID ] != pdFALSE )
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
&& ( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U )
#endif
)
{ {
#if ( configUSE_PREEMPTION != 0 ) #if ( configUSE_PREEMPTION != 0 )
{ {
@ -5052,10 +5096,14 @@ BaseType_t xTaskIncrementTick( void )
TickType_t xItemValue; TickType_t xItemValue;
BaseType_t xSwitchRequired = pdFALSE; BaseType_t xSwitchRequired = pdFALSE;
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
UBaseType_t uxSavedInterruptStatus;
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
traceENTER_xTaskIncrementTick(); traceENTER_xTaskIncrementTick();
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
UBaseType_t uxSavedInterruptStatus = kernelENTER_CRITICAL_FROM_ISR(); uxSavedInterruptStatus = kernelENTER_CRITICAL_FROM_ISR();
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
/* Called by the portable layer each time a tick interrupt occurs. /* Called by the portable layer each time a tick interrupt occurs.
@ -5191,32 +5239,14 @@ BaseType_t xTaskIncrementTick( void )
{ {
#if ( configNUMBER_OF_CORES == 1 ) #if ( configNUMBER_OF_CORES == 1 )
{ {
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > 1U )
if( pxCurrentTCB->uxPreemptionDisable != 0U ) {
{ xSwitchRequired = pdTRUE;
mtCOVERAGE_TEST_MARKER(); }
} else
else {
{ mtCOVERAGE_TEST_MARKER();
if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > 1U ) }
{
xSwitchRequired = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#else /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > 1U )
{
xSwitchRequired = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
#endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
} }
#else /* #if ( configNUMBER_OF_CORES == 1 ) */ #else /* #if ( configNUMBER_OF_CORES == 1 ) */
{ {
@ -5789,6 +5819,10 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )
TCB_t * pxUnblockedTCB; TCB_t * pxUnblockedTCB;
BaseType_t xReturn; BaseType_t xReturn;
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
UBaseType_t uxSavedInterruptStatus;
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
traceENTER_xTaskRemoveFromEventList( pxEventList ); traceENTER_xTaskRemoveFromEventList( pxEventList );
#if ( !( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) #if ( !( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) )
@ -5797,8 +5831,6 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )
* called from a critical section within an ISR. */ * called from a critical section within an ISR. */
#else /* #if ( ! ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) */ #else /* #if ( ! ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) */
/* Lock the kernel data group as we are about to access its members */ /* Lock the kernel data group as we are about to access its members */
UBaseType_t uxSavedInterruptStatus;
if( portCHECK_IF_IN_ISR() == pdTRUE ) if( portCHECK_IF_IN_ISR() == pdTRUE )
{ {
uxSavedInterruptStatus = kernelENTER_CRITICAL_FROM_ISR(); uxSavedInterruptStatus = kernelENTER_CRITICAL_FROM_ISR();
@ -7098,9 +7130,7 @@ static void prvResetNextTaskUnblockTime( void )
traceENTER_xTaskPriorityInherit( pxMutexHolder ); traceENTER_xTaskPriorityInherit( pxMutexHolder );
#if ( portUSING_GRANULAR_LOCKS == 1 ) kernelENTER_CRITICAL();
kernelENTER_CRITICAL();
#endif
{ {
/* If the mutex is taken by an interrupt, the mutex holder is NULL. Priority /* If the mutex is taken by an interrupt, the mutex holder is NULL. Priority
* inheritance is not applied in this scenario. */ * inheritance is not applied in this scenario. */
@ -7188,9 +7218,7 @@ static void prvResetNextTaskUnblockTime( void )
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
#if ( portUSING_GRANULAR_LOCKS == 1 ) kernelEXIT_CRITICAL();
kernelEXIT_CRITICAL();
#endif
traceRETURN_xTaskPriorityInherit( xReturn ); traceRETURN_xTaskPriorityInherit( xReturn );
@ -7499,7 +7527,7 @@ static void prvResetNextTaskUnblockTime( void )
#endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) */ #endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) */
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
#if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS != 1 ) ) #if ( configNUMBER_OF_CORES > 1 )
void vTaskEnterCritical( void ) void vTaskEnterCritical( void )
{ {
@ -7511,11 +7539,24 @@ static void prvResetNextTaskUnblockTime( void )
if( xSchedulerRunning != pdFALSE ) if( xSchedulerRunning != pdFALSE )
{ {
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U ) /* When using granular locks, the critical section nesting count
* might have already been incremented if this call is a nested
* call from a data group critical section. Hence, we have to
* acquire the kernel task and ISR locks unconditionally. */
#if ( portUSING_GRANULAR_LOCKS == 1 )
{ {
kernelGET_TASK_LOCK( xCoreID ); kernelGET_TASK_LOCK( xCoreID );
kernelGET_ISR_LOCK( xCoreID ); kernelGET_ISR_LOCK( xCoreID );
} }
#else /* portUSING_GRANULAR_LOCKS */
{
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U )
{
kernelGET_TASK_LOCK( xCoreID );
kernelGET_ISR_LOCK( xCoreID );
}
}
#endif /* portUSING_GRANULAR_LOCKS */
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
@ -7548,56 +7589,7 @@ static void prvResetNextTaskUnblockTime( void )
traceRETURN_vTaskEnterCritical(); traceRETURN_vTaskEnterCritical();
} }
#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS != 1 ) ) */ #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
/*-----------------------------------------------------------*/
#if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS == 1 ) )
void vTaskEnterCritical( void )
{
traceENTER_vTaskEnterCritical();
portDISABLE_INTERRUPTS();
{
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
if( xSchedulerRunning != pdFALSE )
{
kernelGET_TASK_LOCK( xCoreID );
kernelGET_ISR_LOCK( xCoreID );
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
/* This is not the interrupt safe version of the enter critical
* function so assert() if it is being called from an interrupt
* context. Only API functions that end in "FromISR" can be used in an
* interrupt. Only assert if the critical nesting count is 1 to
* protect against recursive calls if the assert function also uses a
* critical section. */
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 1U )
{
portASSERT_IF_IN_ISR();
if( uxSchedulerSuspended == 0U )
{
/* The only time there would be a problem is if this is called
* before a context switch and vTaskExitCritical() is called
* after pxCurrentTCB changes. Therefore this should not be
* used within vTaskSwitchContext(). */
prvCheckForRunStateChange();
}
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
traceRETURN_vTaskEnterCritical();
}
#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS == 1 ) ) */
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
#if ( configNUMBER_OF_CORES > 1 ) #if ( configNUMBER_OF_CORES > 1 )
@ -7678,7 +7670,7 @@ static void prvResetNextTaskUnblockTime( void )
#endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) */ #endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) */
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
#if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS != 1 ) ) #if ( configNUMBER_OF_CORES > 1 )
void vTaskExitCritical( void ) void vTaskExitCritical( void )
{ {
@ -7698,97 +7690,67 @@ static void prvResetNextTaskUnblockTime( void )
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ) if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U )
{ {
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); #if ( portUSING_GRANULAR_LOCKS == 1 )
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U )
{
BaseType_t xYieldCurrentTask;
/* Get the xYieldPending stats inside the critical section. */
xYieldCurrentTask = xYieldPendings[ xCoreID ];
kernelRELEASE_ISR_LOCK( xCoreID );
kernelRELEASE_TASK_LOCK( xCoreID );
portENABLE_INTERRUPTS();
/* When a task yields in a critical section it just sets
* xYieldPending to true. So now that we have exited the
* critical section check if xYieldPending is true, and
* if so yield. */
if( xYieldCurrentTask != pdFALSE )
{
portYIELD();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
traceRETURN_vTaskExitCritical();
}
#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS != 1 ) ) */
/*-----------------------------------------------------------*/
#if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS == 1 ) )
void vTaskExitCritical( void )
{
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
traceENTER_vTaskExitCritical();
if( xSchedulerRunning != pdFALSE )
{
/* If critical nesting count is zero then this function
* does not match a previous call to vTaskEnterCritical(). */
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U );
/* This function should not be called in ISR. Use vTaskExitCriticalFromISR
* to exit critical section from ISR. */
portASSERT_IF_IN_ISR();
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U )
{
/* Release the ISR and task locks */
kernelRELEASE_ISR_LOCK( xCoreID );
kernelRELEASE_TASK_LOCK( xCoreID );
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID );
/* If the critical nesting count is 0, enable interrupts */
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U )
{ {
BaseType_t xYieldCurrentTask; BaseType_t xYieldCurrentTask;
/* Get the xYieldPending stats inside the critical section. */ /* Get the xYieldPending stats inside the critical section. */
xYieldCurrentTask = xTaskUnlockCanYield(); xYieldCurrentTask = xTaskUnlockCanYield();
portENABLE_INTERRUPTS(); /* Release the ISR and task locks first when using granular locks. */
kernelRELEASE_ISR_LOCK( xCoreID );
kernelRELEASE_TASK_LOCK( xCoreID );
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID );
/* When a task yields in a critical section it just sets if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U )
* xYieldPending to true. So now that we have exited the
* critical section check if xYieldPending is true, and
* if so yield. */
if( xYieldCurrentTask != pdFALSE )
{ {
portYIELD(); portENABLE_INTERRUPTS();
/* When a task yields in a critical section it just sets
* xYieldPending to true. So now that we have exited the
* critical section check if xYieldPending is true, and
* if so yield. */
if( xYieldCurrentTask != pdFALSE )
{
portYIELD();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
} }
} }
else #else /* portUSING_GRANULAR_LOCKS */
{ {
mtCOVERAGE_TEST_MARKER(); /* Decrement first; release locks and enable interrupts when count reaches zero. */
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID );
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U )
{
BaseType_t xYieldCurrentTask;
/* Get the xYieldPending stats inside the critical section. */
xYieldCurrentTask = xYieldPendings[ xCoreID ];
kernelRELEASE_ISR_LOCK( xCoreID );
kernelRELEASE_TASK_LOCK( xCoreID );
portENABLE_INTERRUPTS();
/* When a task yields in a critical section it just sets
* xYieldPending to true. So now that we have exited the
* critical section check if xYieldPending is true, and
* if so yield. */
if( xYieldCurrentTask != pdFALSE )
{
portYIELD();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
} }
#endif /* portUSING_GRANULAR_LOCKS */
} }
else else
{ {
@ -7803,7 +7765,7 @@ static void prvResetNextTaskUnblockTime( void )
traceRETURN_vTaskExitCritical(); traceRETURN_vTaskExitCritical();
} }
#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS == 1 ) ) */ #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
#if ( configNUMBER_OF_CORES > 1 ) #if ( configNUMBER_OF_CORES > 1 )
@ -7923,8 +7885,8 @@ static void prvResetNextTaskUnblockTime( void )
if( ( xYieldPendings[ xCoreID ] == pdTRUE ) && ( uxSchedulerSuspended == pdFALSE ) if( ( xYieldPendings[ xCoreID ] == pdTRUE ) && ( uxSchedulerSuspended == pdFALSE )
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
&& ( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U ) && ( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U ) &&
&& ( pxCurrentTCBs[ xCoreID ]->uxDeferredStateChange == 0U ) ( pxCurrentTCBs[ xCoreID ]->uxDeferredStateChange == 0U )
#endif /* ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ #endif /* ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
) )
{ {

View file

@ -83,8 +83,8 @@
* Macros to mark the start and end of a critical code region. * Macros to mark the start and end of a critical code region.
*/ */
#if ( portUSING_GRANULAR_LOCKS == 1 ) #if ( portUSING_GRANULAR_LOCKS == 1 )
#define tmrENTER_CRITICAL() taskDATA_GROUP_ENTER_CRITICAL( &xTaskSpinlock, &xISRSpinlock ) #define tmrENTER_CRITICAL() taskDATA_GROUP_ENTER_CRITICAL( &xTimerTaskSpinlock, &xTimerISRSpinlock )
#define tmrEXIT_CRITICAL() taskDATA_GROUP_EXIT_CRITICAL( &xTaskSpinlock, &xISRSpinlock ) #define tmrEXIT_CRITICAL() taskDATA_GROUP_EXIT_CRITICAL( &xTimerTaskSpinlock, &xTimerISRSpinlock )
#else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ #else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
#define tmrENTER_CRITICAL() taskENTER_CRITICAL() #define tmrENTER_CRITICAL() taskENTER_CRITICAL()
#define tmrEXIT_CRITICAL() taskEXIT_CRITICAL() #define tmrEXIT_CRITICAL() taskEXIT_CRITICAL()
@ -161,8 +161,8 @@
PRIVILEGED_DATA static TaskHandle_t xTimerTaskHandle = NULL; PRIVILEGED_DATA static TaskHandle_t xTimerTaskHandle = NULL;
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
PRIVILEGED_DATA static portSPINLOCK_TYPE xTaskSpinlock = portINIT_SPINLOCK_STATIC; PRIVILEGED_DATA static portSPINLOCK_TYPE xTimerTaskSpinlock = portINIT_SPINLOCK_STATIC;
PRIVILEGED_DATA static portSPINLOCK_TYPE xISRSpinlock = portINIT_SPINLOCK_STATIC; PRIVILEGED_DATA static portSPINLOCK_TYPE xTimerISRSpinlock = portINIT_SPINLOCK_STATIC;
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@ -613,7 +613,15 @@
traceENTER_xTimerGetReloadMode( xTimer ); traceENTER_xTimerGetReloadMode( xTimer );
configASSERT( xTimer ); configASSERT( xTimer );
tmrENTER_CRITICAL(); #if ( ( configNUMBER_OF_CORES > 1 ) )
{
tmrENTER_CRITICAL();
}
#else
{
portBASE_TYPE_ENTER_CRITICAL();
}
#endif
{ {
if( ( pxTimer->ucStatus & tmrSTATUS_IS_AUTORELOAD ) == 0U ) if( ( pxTimer->ucStatus & tmrSTATUS_IS_AUTORELOAD ) == 0U )
{ {
@ -626,7 +634,15 @@
xReturn = pdTRUE; xReturn = pdTRUE;
} }
} }
tmrEXIT_CRITICAL(); #if ( ( configNUMBER_OF_CORES > 1 ) )
{
tmrEXIT_CRITICAL();
}
#else
{
portBASE_TYPE_EXIT_CRITICAL();
}
#endif
traceRETURN_xTimerGetReloadMode( xReturn ); traceRETURN_xTimerGetReloadMode( xReturn );
@ -1188,7 +1204,15 @@
configASSERT( xTimer ); configASSERT( xTimer );
/* Is the timer in the list of active timers? */ /* Is the timer in the list of active timers? */
tmrENTER_CRITICAL(); #if ( ( configNUMBER_OF_CORES > 1 ) )
{
tmrENTER_CRITICAL();
}
#else
{
portBASE_TYPE_ENTER_CRITICAL();
}
#endif
{ {
if( ( pxTimer->ucStatus & tmrSTATUS_IS_ACTIVE ) == 0U ) if( ( pxTimer->ucStatus & tmrSTATUS_IS_ACTIVE ) == 0U )
{ {
@ -1199,7 +1223,15 @@
xReturn = pdTRUE; xReturn = pdTRUE;
} }
} }
tmrEXIT_CRITICAL(); #if ( ( configNUMBER_OF_CORES > 1 ) )
{
tmrEXIT_CRITICAL();
}
#else
{
portBASE_TYPE_EXIT_CRITICAL();
}
#endif
traceRETURN_xTimerIsTimerActive( xReturn ); traceRETURN_xTimerIsTimerActive( xReturn );