change(freertos/smp): Update queue.c locking

Updated queue.c to use granular locking

- Added xTaskSpinlock and xISRSpinlock
- Replaced  critical section macros with data group critical section macros
such as taskENTER/EXIT_CRITICAL/_FROM_ISR() with queueENTER/EXIT_CRITICAL_FROM_ISR().
- Added vQueueEnterCritical/FromISR() and vQueueExitCritical/FromISR()
  which map to the data group critical section macros.
- Added prvLockQueueForTasks() and prvUnlockQueueForTasks() as the granular locking equivalents
to prvLockQueue() and prvUnlockQueue() respectively

Co-authored-by: Sudeep Mohanty <sudeep.mohanty@espressif.com>
This commit is contained in:
Darian Leung 2024-06-16 00:53:03 +08:00 committed by Sudeep Mohanty
parent 6ac7cc844b
commit f7d7a117f1
2 changed files with 277 additions and 124 deletions

View file

@ -3318,6 +3318,10 @@ typedef struct xSTATIC_QUEUE
UBaseType_t uxDummy8; UBaseType_t uxDummy8;
uint8_t ucDummy9; uint8_t ucDummy9;
#endif #endif
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
portSPINLOCK_TYPE xDummySpinlock[ 2 ];
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
} StaticQueue_t; } StaticQueue_t;
typedef StaticQueue_t StaticSemaphore_t; typedef StaticQueue_t StaticSemaphore_t;

397
queue.c
View file

@ -133,6 +133,11 @@ typedef struct QueueDefinition /* The old naming convention is used to prevent b
UBaseType_t uxQueueNumber; UBaseType_t uxQueueNumber;
uint8_t ucQueueType; uint8_t ucQueueType;
#endif #endif
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
portSPINLOCK_TYPE xTaskSpinlock;
portSPINLOCK_TYPE xISRSpinlock;
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
} xQUEUE; } xQUEUE;
/* The old xQUEUE name is maintained above then typedefed to the new Queue_t /* The old xQUEUE name is maintained above then typedefed to the new Queue_t
@ -251,12 +256,55 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
#endif #endif
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
/*
* Macros to mark the start and end of a critical code region.
*/
#if ( portUSING_GRANULAR_LOCKS == 1 )
#define queueENTER_CRITICAL( pxQueue ) vQueueEnterCritical( pxQueue )
#define queueENTER_CRITICAL_FROM_ISR( pxQueue ) uxQueueEnterCriticalFromISR( pxQueue )
#define queueEXIT_CRITICAL( pxQueue ) vQueueExitCritical( pxQueue )
#define queueEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue ) vQueueExitCriticalFromISR( uxSavedInterruptStatus, pxQueue )
#else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
#define queueENTER_CRITICAL( pxQueue ) taskENTER_CRITICAL();
#define queueENTER_CRITICAL_FROM_ISR( pxQueue ) taskENTER_CRITICAL_FROM_ISR();
#define queueEXIT_CRITICAL( pxQueue ) taskEXIT_CRITICAL();
#define queueEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue ) taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
#if ( portUSING_GRANULAR_LOCKS == 1 )
/*
* Enters a critical section for a queue. Disables interrupts and takes
* both task and ISR spinlocks to ensure thread safety.
*/
static void vQueueEnterCritical( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION;
/*
* Enters a critical section for a queue from an ISR context. Takes the ISR
* spinlock and returns the previous interrupt state.
*/
static UBaseType_t uxQueueEnterCriticalFromISR( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION;
/*
* Exits a critical section for a queue. Releases spinlocks in reverse order
* and conditionally re-enables interrupts and yields if required.
*/
static void vQueueExitCritical( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION;
/*
* Exits a critical section for a queue from an ISR context. Releases the ISR
* spinlock and conditionally restores the previous interrupt state.
*/
static void vQueueExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus,
const Queue_t * pxQueue ) PRIVILEGED_FUNCTION;
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
/* /*
* Macro to mark a queue as locked. Locking a queue prevents an ISR from * Macro to mark a queue as locked. Locking a queue prevents an ISR from
* accessing the queue event lists. * accessing the queue event lists.
*/ */
#define prvLockQueue( pxQueue ) \ #define prvLockQueue( pxQueue ) \
taskENTER_CRITICAL(); \ queueENTER_CRITICAL( pxQueue ); \
{ \ { \
if( ( pxQueue )->cRxLock == queueUNLOCKED ) \ if( ( pxQueue )->cRxLock == queueUNLOCKED ) \
{ \ { \
@ -267,7 +315,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED; \ ( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED; \
} \ } \
} \ } \
taskEXIT_CRITICAL() queueEXIT_CRITICAL( pxQueue )
/* /*
* Macro to increment cTxLock member of the queue data structure. It is * Macro to increment cTxLock member of the queue data structure. It is
@ -298,6 +346,49 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
( pxQueue )->cRxLock = ( int8_t ) ( ( cRxLock ) + ( int8_t ) 1 ); \ ( pxQueue )->cRxLock = ( int8_t ) ( ( cRxLock ) + ( int8_t ) 1 ); \
} \ } \
} while( 0 ) } while( 0 )
/*
* Macro used to lock and unlock a queue. When a task locks a queue, the
* task will have thread safe non-deterministic access to the queue.
* - Concurrent access from other tasks will be blocked by the xTaskSpinlock
* - Concurrent access from ISRs will be pended
*
* When the tasks unlocks the queue, all pended access attempts are handled.
*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
#define queueLOCK( pxQueue ) \
do { \
vTaskPreemptionDisable( NULL ); \
prvLockQueue( ( pxQueue ) ); \
portGET_SPINLOCK( portGET_CORE_ID(), &( pxQueue->xTaskSpinlock ) ); \
} while( 0 )
#define queueUNLOCK( pxQueue, xYieldAPI ) \
do { \
prvUnlockQueue( ( pxQueue ) ); \
portRELEASE_SPINLOCK( portGET_CORE_ID(), &( pxQueue->xTaskSpinlock ) ); \
vTaskPreemptionEnable( NULL ); \
} while( 0 )
#else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
#define queueLOCK( pxQueue ) \
do { \
vTaskSuspendAll(); \
prvLockQueue( ( pxQueue ) ); \
} while( 0 )
#define queueUNLOCK( pxQueue, xYieldAPI ) \
do { \
BaseType_t xAlreadyYielded; \
prvUnlockQueue( ( pxQueue ) ); \
xAlreadyYielded = xTaskResumeAll(); \
if( ( xAlreadyYielded == pdFALSE ) && ( ( xYieldAPI ) == pdTRUE ) ) \
{ \
taskYIELD_WITHIN_API(); \
} \
else \
{ \
mtCOVERAGE_TEST_MARKER(); \
} \
} while( 0 )
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
@ -310,12 +401,22 @@ BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
configASSERT( pxQueue ); configASSERT( pxQueue );
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
{
if( xNewQueue == pdTRUE )
{
portINIT_SPINLOCK( &( pxQueue->xTaskSpinlock ) );
portINIT_SPINLOCK( &( pxQueue->xISRSpinlock ) );
}
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
if( ( pxQueue != NULL ) && if( ( pxQueue != NULL ) &&
( pxQueue->uxLength >= 1U ) && ( pxQueue->uxLength >= 1U ) &&
/* Check for multiplication overflow. */ /* Check for multiplication overflow. */
( ( SIZE_MAX / pxQueue->uxLength ) >= pxQueue->uxItemSize ) ) ( ( SIZE_MAX / pxQueue->uxLength ) >= pxQueue->uxItemSize ) )
{ {
taskENTER_CRITICAL(); queueENTER_CRITICAL( pxQueue );
{ {
pxQueue->u.xQueue.pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize ); pxQueue->u.xQueue.pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize );
pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U; pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
@ -354,7 +455,7 @@ BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
vListInitialise( &( pxQueue->xTasksWaitingToReceive ) ); vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
} }
} }
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
} }
else else
{ {
@ -703,7 +804,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
* calling task is the mutex holder, but not a good way of determining the * calling task is the mutex holder, but not a good way of determining the
* identity of the mutex holder, as the holder may change between the * identity of the mutex holder, as the holder may change between the
* following critical section exiting and the function returning. */ * following critical section exiting and the function returning. */
taskENTER_CRITICAL(); queueENTER_CRITICAL( pxSemaphore );
{ {
if( pxSemaphore->uxQueueType == queueQUEUE_IS_MUTEX ) if( pxSemaphore->uxQueueType == queueQUEUE_IS_MUTEX )
{ {
@ -714,7 +815,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
pxReturn = NULL; pxReturn = NULL;
} }
} }
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxSemaphore );
traceRETURN_xQueueGetMutexHolder( pxReturn ); traceRETURN_xQueueGetMutexHolder( pxReturn );
@ -968,7 +1069,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
for( ; ; ) for( ; ; )
{ {
taskENTER_CRITICAL(); queueENTER_CRITICAL( pxQueue );
{ {
/* Is there room on the queue now? The running task must be the /* Is there room on the queue now? The running task must be the
* highest priority task wanting to access the queue. If the head item * highest priority task wanting to access the queue. If the head item
@ -1074,7 +1175,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
} }
#endif /* configUSE_QUEUE_SETS */ #endif /* configUSE_QUEUE_SETS */
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
traceRETURN_xQueueGenericSend( pdPASS ); traceRETURN_xQueueGenericSend( pdPASS );
@ -1086,7 +1187,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
{ {
/* The queue was full and no block time is specified (or /* The queue was full and no block time is specified (or
* the block time has expired) so leave now. */ * the block time has expired) so leave now. */
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
/* Return to the original privilege level before exiting /* Return to the original privilege level before exiting
* the function. */ * the function. */
@ -1109,13 +1210,12 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
} }
} }
} }
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
/* Interrupts and other tasks can send to and receive from the queue /* Interrupts and other tasks can send to and receive from the queue
* now the critical section has been exited. */ * now the critical section has been exited. */
vTaskSuspendAll(); queueLOCK( pxQueue );
prvLockQueue( pxQueue );
/* Update the timeout state to see if it has expired yet. */ /* Update the timeout state to see if it has expired yet. */
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
@ -1125,35 +1225,18 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
traceBLOCKING_ON_QUEUE_SEND( pxQueue ); traceBLOCKING_ON_QUEUE_SEND( pxQueue );
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait ); vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
/* Unlocking the queue means queue events can effect the queueUNLOCK( pxQueue, pdTRUE );
* event list. It is possible that interrupts occurring now
* remove this task from the event list again - but as the
* scheduler is suspended the task will go onto the pending
* ready list instead of the actual ready list. */
prvUnlockQueue( pxQueue );
/* Resuming the scheduler will move tasks from the pending
* ready list into the ready list - so it is feasible that this
* task is already in the ready list before it yields - in which
* case the yield will not cause a context switch unless there
* is also a higher priority task in the pending ready list. */
if( xTaskResumeAll() == pdFALSE )
{
taskYIELD_WITHIN_API();
}
} }
else else
{ {
/* Try again. */ /* Try again. */
prvUnlockQueue( pxQueue ); queueUNLOCK( pxQueue, pdFALSE );
( void ) xTaskResumeAll();
} }
} }
else else
{ {
/* The timeout has expired. */ /* The timeout has expired. */
prvUnlockQueue( pxQueue ); queueUNLOCK( pxQueue, pdFALSE );
( void ) xTaskResumeAll();
traceQUEUE_SEND_FAILED( pxQueue ); traceQUEUE_SEND_FAILED( pxQueue );
traceRETURN_xQueueGenericSend( errQUEUE_FULL ); traceRETURN_xQueueGenericSend( errQUEUE_FULL );
@ -1203,7 +1286,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
/* MISRA Ref 4.7.1 [Return value shall be checked] */ /* MISRA Ref 4.7.1 [Return value shall be checked] */
/* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */ /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
/* coverity[misra_c_2012_directive_4_7_violation] */ /* coverity[misra_c_2012_directive_4_7_violation] */
uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR(); uxSavedInterruptStatus = ( UBaseType_t ) queueENTER_CRITICAL_FROM_ISR( pxQueue );
{ {
if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) ) if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
{ {
@ -1328,7 +1411,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
xReturn = errQUEUE_FULL; xReturn = errQUEUE_FULL;
} }
} }
taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); queueEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue );
traceRETURN_xQueueGenericSendFromISR( xReturn ); traceRETURN_xQueueGenericSendFromISR( xReturn );
@ -1381,7 +1464,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
/* MISRA Ref 4.7.1 [Return value shall be checked] */ /* MISRA Ref 4.7.1 [Return value shall be checked] */
/* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */ /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
/* coverity[misra_c_2012_directive_4_7_violation] */ /* coverity[misra_c_2012_directive_4_7_violation] */
uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR(); uxSavedInterruptStatus = ( UBaseType_t ) queueENTER_CRITICAL_FROM_ISR( pxQueue );
{ {
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
@ -1501,7 +1584,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
xReturn = errQUEUE_FULL; xReturn = errQUEUE_FULL;
} }
} }
taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); queueEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue );
traceRETURN_xQueueGiveFromISR( xReturn ); traceRETURN_xQueueGiveFromISR( xReturn );
@ -1535,7 +1618,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
for( ; ; ) for( ; ; )
{ {
taskENTER_CRITICAL(); queueENTER_CRITICAL( pxQueue );
{ {
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
@ -1567,7 +1650,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
traceRETURN_xQueueReceive( pdPASS ); traceRETURN_xQueueReceive( pdPASS );
@ -1579,7 +1662,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
{ {
/* The queue was empty and no block time is specified (or /* The queue was empty and no block time is specified (or
* the block time has expired) so leave now. */ * the block time has expired) so leave now. */
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
traceQUEUE_RECEIVE_FAILED( pxQueue ); traceQUEUE_RECEIVE_FAILED( pxQueue );
traceRETURN_xQueueReceive( errQUEUE_EMPTY ); traceRETURN_xQueueReceive( errQUEUE_EMPTY );
@ -1600,13 +1683,12 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
} }
} }
} }
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
/* Interrupts and other tasks can send to and receive from the queue /* Interrupts and other tasks can send to and receive from the queue
* now the critical section has been exited. */ * now the critical section has been exited. */
vTaskSuspendAll(); queueLOCK( pxQueue );
prvLockQueue( pxQueue );
/* Update the timeout state to see if it has expired yet. */ /* Update the timeout state to see if it has expired yet. */
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
@ -1617,31 +1699,20 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
{ {
traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
prvUnlockQueue( pxQueue ); queueUNLOCK( pxQueue, pdTRUE );
if( xTaskResumeAll() == pdFALSE )
{
taskYIELD_WITHIN_API();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
} }
else else
{ {
/* The queue contains data again. Loop back to try and read the /* The queue contains data again. Loop back to try and read the
* data. */ * data. */
prvUnlockQueue( pxQueue ); queueUNLOCK( pxQueue, pdFALSE );
( void ) xTaskResumeAll();
} }
} }
else else
{ {
/* Timed out. If there is no data in the queue exit, otherwise loop /* Timed out. If there is no data in the queue exit, otherwise loop
* back and attempt to read the data. */ * back and attempt to read the data. */
prvUnlockQueue( pxQueue ); queueUNLOCK( pxQueue, pdFALSE );
( void ) xTaskResumeAll();
if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
{ {
@ -1688,7 +1759,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
for( ; ; ) for( ; ; )
{ {
taskENTER_CRITICAL(); queueENTER_CRITICAL( pxQueue );
{ {
/* Semaphores are queues with an item size of 0, and where the /* Semaphores are queues with an item size of 0, and where the
* number of messages in the queue is the semaphore's count value. */ * number of messages in the queue is the semaphore's count value. */
@ -1737,7 +1808,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
traceRETURN_xQueueSemaphoreTake( pdPASS ); traceRETURN_xQueueSemaphoreTake( pdPASS );
@ -1749,7 +1820,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
{ {
/* The semaphore count was 0 and no block time is specified /* The semaphore count was 0 and no block time is specified
* (or the block time has expired) so exit now. */ * (or the block time has expired) so exit now. */
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
traceQUEUE_RECEIVE_FAILED( pxQueue ); traceQUEUE_RECEIVE_FAILED( pxQueue );
traceRETURN_xQueueSemaphoreTake( errQUEUE_EMPTY ); traceRETURN_xQueueSemaphoreTake( errQUEUE_EMPTY );
@ -1770,13 +1841,12 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
} }
} }
} }
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
/* Interrupts and other tasks can give to and take from the semaphore /* Interrupts and other tasks can give to and take from the semaphore
* now the critical section has been exited. */ * now the critical section has been exited. */
vTaskSuspendAll(); queueLOCK( pxQueue );
prvLockQueue( pxQueue );
/* Update the timeout state to see if it has expired yet. */ /* Update the timeout state to see if it has expired yet. */
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
@ -1803,30 +1873,19 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
#endif /* if ( configUSE_MUTEXES == 1 ) */ #endif /* if ( configUSE_MUTEXES == 1 ) */
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
prvUnlockQueue( pxQueue ); queueUNLOCK( pxQueue, pdTRUE );
if( xTaskResumeAll() == pdFALSE )
{
taskYIELD_WITHIN_API();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
} }
else else
{ {
/* There was no timeout and the semaphore count was not 0, so /* There was no timeout and the semaphore count was not 0, so
* attempt to take the semaphore again. */ * attempt to take the semaphore again. */
prvUnlockQueue( pxQueue ); queueUNLOCK( pxQueue, pdFALSE );
( void ) xTaskResumeAll();
} }
} }
else else
{ {
/* Timed out. */ /* Timed out. */
prvUnlockQueue( pxQueue ); queueUNLOCK( pxQueue, pdFALSE );
( void ) xTaskResumeAll();
/* If the semaphore count is 0 exit now as the timeout has /* If the semaphore count is 0 exit now as the timeout has
* expired. Otherwise return to attempt to take the semaphore that is * expired. Otherwise return to attempt to take the semaphore that is
@ -1841,7 +1900,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
* test the mutex type again to check it is actually a mutex. */ * test the mutex type again to check it is actually a mutex. */
if( xInheritanceOccurred != pdFALSE ) if( xInheritanceOccurred != pdFALSE )
{ {
taskENTER_CRITICAL(); queueENTER_CRITICAL( pxQueue );
{ {
UBaseType_t uxHighestWaitingPriority; UBaseType_t uxHighestWaitingPriority;
@ -1861,7 +1920,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
/* coverity[overrun] */ /* coverity[overrun] */
vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority ); vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority );
} }
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
} }
} }
#endif /* configUSE_MUTEXES */ #endif /* configUSE_MUTEXES */
@ -1907,7 +1966,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
for( ; ; ) for( ; ; )
{ {
taskENTER_CRITICAL(); queueENTER_CRITICAL( pxQueue );
{ {
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
@ -1945,7 +2004,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
traceRETURN_xQueuePeek( pdPASS ); traceRETURN_xQueuePeek( pdPASS );
@ -1957,7 +2016,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
{ {
/* The queue was empty and no block time is specified (or /* The queue was empty and no block time is specified (or
* the block time has expired) so leave now. */ * the block time has expired) so leave now. */
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
traceQUEUE_PEEK_FAILED( pxQueue ); traceQUEUE_PEEK_FAILED( pxQueue );
traceRETURN_xQueuePeek( errQUEUE_EMPTY ); traceRETURN_xQueuePeek( errQUEUE_EMPTY );
@ -1979,13 +2038,12 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
} }
} }
} }
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
/* Interrupts and other tasks can send to and receive from the queue /* Interrupts and other tasks can send to and receive from the queue
* now that the critical section has been exited. */ * now that the critical section has been exited. */
vTaskSuspendAll(); queueLOCK( pxQueue );
prvLockQueue( pxQueue );
/* Update the timeout state to see if it has expired yet. */ /* Update the timeout state to see if it has expired yet. */
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
@ -1996,31 +2054,20 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
{ {
traceBLOCKING_ON_QUEUE_PEEK( pxQueue ); traceBLOCKING_ON_QUEUE_PEEK( pxQueue );
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
prvUnlockQueue( pxQueue ); queueUNLOCK( pxQueue, pdTRUE );
if( xTaskResumeAll() == pdFALSE )
{
taskYIELD_WITHIN_API();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
} }
else else
{ {
/* There is data in the queue now, so don't enter the blocked /* There is data in the queue now, so don't enter the blocked
* state, instead return to try and obtain the data. */ * state, instead return to try and obtain the data. */
prvUnlockQueue( pxQueue ); queueUNLOCK( pxQueue, pdFALSE );
( void ) xTaskResumeAll();
} }
} }
else else
{ {
/* The timeout has expired. If there is still no data in the queue /* The timeout has expired. If there is still no data in the queue
* exit, otherwise go back and try to read the data again. */ * exit, otherwise go back and try to read the data again. */
prvUnlockQueue( pxQueue ); queueUNLOCK( pxQueue, pdFALSE );
( void ) xTaskResumeAll();
if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
{ {
@ -2070,7 +2117,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
/* MISRA Ref 4.7.1 [Return value shall be checked] */ /* MISRA Ref 4.7.1 [Return value shall be checked] */
/* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */ /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
/* coverity[misra_c_2012_directive_4_7_violation] */ /* coverity[misra_c_2012_directive_4_7_violation] */
uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR(); uxSavedInterruptStatus = ( UBaseType_t ) queueENTER_CRITICAL_FROM_ISR( pxQueue );
{ {
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
@ -2130,7 +2177,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ); traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
} }
} }
taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); queueEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue );
traceRETURN_xQueueReceiveFromISR( xReturn ); traceRETURN_xQueueReceiveFromISR( xReturn );
@ -2171,7 +2218,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
/* MISRA Ref 4.7.1 [Return value shall be checked] */ /* MISRA Ref 4.7.1 [Return value shall be checked] */
/* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */ /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
/* coverity[misra_c_2012_directive_4_7_violation] */ /* coverity[misra_c_2012_directive_4_7_violation] */
uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR(); uxSavedInterruptStatus = ( UBaseType_t ) queueENTER_CRITICAL_FROM_ISR( pxQueue );
{ {
/* Cannot block in an ISR, so check there is data available. */ /* Cannot block in an ISR, so check there is data available. */
if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 ) if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
@ -2192,7 +2239,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue ); traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
} }
} }
taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); queueEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue );
traceRETURN_xQueuePeekFromISR( xReturn ); traceRETURN_xQueuePeekFromISR( xReturn );
@ -2208,11 +2255,11 @@ UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
configASSERT( xQueue ); configASSERT( xQueue );
portBASE_TYPE_ENTER_CRITICAL(); queueENTER_CRITICAL( xQueue );
{ {
uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting; uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
} }
portBASE_TYPE_EXIT_CRITICAL(); queueEXIT_CRITICAL( xQueue );
traceRETURN_uxQueueMessagesWaiting( uxReturn ); traceRETURN_uxQueueMessagesWaiting( uxReturn );
@ -2229,11 +2276,11 @@ UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )
configASSERT( pxQueue ); configASSERT( pxQueue );
portBASE_TYPE_ENTER_CRITICAL(); queueENTER_CRITICAL( pxQueue );
{ {
uxReturn = ( UBaseType_t ) ( pxQueue->uxLength - pxQueue->uxMessagesWaiting ); uxReturn = ( UBaseType_t ) ( pxQueue->uxLength - pxQueue->uxMessagesWaiting );
} }
portBASE_TYPE_EXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
traceRETURN_uxQueueSpacesAvailable( uxReturn ); traceRETURN_uxQueueSpacesAvailable( uxReturn );
@ -2499,13 +2546,14 @@ static void prvCopyDataFromQueue( Queue_t * const pxQueue,
static void prvUnlockQueue( Queue_t * const pxQueue ) static void prvUnlockQueue( Queue_t * const pxQueue )
{ {
/* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */ /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED WHEN portUSING_GRANULAR_LOCKS IS 0.
* IT MUST BE CALLED WITH TASK PREEMTION DISABLED WHEN portUSING_GRANULAR_LOCKS IS 1. */
/* The lock counts contains the number of extra data items placed or /* The lock counts contains the number of extra data items placed or
* removed from the queue while the queue was locked. When a queue is * removed from the queue while the queue was locked. When a queue is
* locked items can be added or removed, but the event lists cannot be * locked items can be added or removed, but the event lists cannot be
* updated. */ * updated. */
taskENTER_CRITICAL(); queueENTER_CRITICAL( pxQueue );
{ {
int8_t cTxLock = pxQueue->cTxLock; int8_t cTxLock = pxQueue->cTxLock;
@ -2583,10 +2631,10 @@ static void prvUnlockQueue( Queue_t * const pxQueue )
pxQueue->cTxLock = queueUNLOCKED; pxQueue->cTxLock = queueUNLOCKED;
} }
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
/* Do the same for the Rx lock. */ /* Do the same for the Rx lock. */
taskENTER_CRITICAL(); queueENTER_CRITICAL( pxQueue );
{ {
int8_t cRxLock = pxQueue->cRxLock; int8_t cRxLock = pxQueue->cRxLock;
@ -2613,15 +2661,115 @@ static void prvUnlockQueue( Queue_t * const pxQueue )
pxQueue->cRxLock = queueUNLOCKED; pxQueue->cRxLock = queueUNLOCKED;
} }
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
static void vQueueEnterCritical( const Queue_t * pxQueue )
{
portDISABLE_INTERRUPTS();
{
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
/* Task spinlock is always taken first */
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xTaskSpinlock ) );
/* Take the ISR spinlock next */
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xISRSpinlock ) );
/* Increment the critical nesting count */
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
}
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
/*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
static UBaseType_t uxQueueEnterCriticalFromISR( const Queue_t * pxQueue )
{
UBaseType_t uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
/* Take the ISR spinlock */
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xISRSpinlock ) );
/* Increment the critical nesting count */
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
return uxSavedInterruptStatus;
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
/*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
static void vQueueExitCritical( const Queue_t * pxQueue )
{
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U );
/* Get the xYieldPending status inside the critical section. */
BaseType_t xYieldCurrentTask = xTaskUnlockCanYield();
/* Decrement the critical nesting count */
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID );
/* Release the ISR spinlock */
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xISRSpinlock ) );
/* Release the task spinlock */
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xTaskSpinlock ) );
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 )
{
portENABLE_INTERRUPTS();
if( xYieldCurrentTask != pdFALSE )
{
portYIELD();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
/*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
static void vQueueExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus,
const Queue_t * pxQueue )
{
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U );
/* Decrement the critical nesting count */
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID );
/* Release the ISR spinlock */
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xISRSpinlock ) );
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 )
{
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
}
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
/*-----------------------------------------------------------*/
static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue ) static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue )
{ {
BaseType_t xReturn; BaseType_t xReturn;
taskENTER_CRITICAL(); queueENTER_CRITICAL( pxQueue );
{ {
if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 ) if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
{ {
@ -2632,7 +2780,7 @@ static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue )
xReturn = pdFALSE; xReturn = pdFALSE;
} }
} }
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
return xReturn; return xReturn;
} }
@ -2666,7 +2814,7 @@ static BaseType_t prvIsQueueFull( const Queue_t * pxQueue )
{ {
BaseType_t xReturn; BaseType_t xReturn;
taskENTER_CRITICAL(); queueENTER_CRITICAL( pxQueue );
{ {
if( pxQueue->uxMessagesWaiting == pxQueue->uxLength ) if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
{ {
@ -2677,7 +2825,7 @@ static BaseType_t prvIsQueueFull( const Queue_t * pxQueue )
xReturn = pdFALSE; xReturn = pdFALSE;
} }
} }
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
return xReturn; return xReturn;
} }
@ -3157,7 +3305,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
* time a yield will be performed. If an item is added to the queue while * time a yield will be performed. If an item is added to the queue while
* the queue is locked, and the calling task blocks on the queue, then the * the queue is locked, and the calling task blocks on the queue, then the
* calling task will be immediately unblocked when the queue is unlocked. */ * calling task will be immediately unblocked when the queue is unlocked. */
prvLockQueue( pxQueue ); queueLOCK( pxQueue );
if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U ) if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )
{ {
@ -3169,7 +3317,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
prvUnlockQueue( pxQueue ); queueUNLOCK( pxQueue, pdFALSE );
traceRETURN_vQueueWaitForMessageRestricted(); traceRETURN_vQueueWaitForMessageRestricted();
} }
@ -3221,17 +3369,18 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
QueueSetHandle_t xQueueSet ) QueueSetHandle_t xQueueSet )
{ {
BaseType_t xReturn; BaseType_t xReturn;
Queue_t * const pxQueue = xQueueOrSemaphore;
traceENTER_xQueueAddToSet( xQueueOrSemaphore, xQueueSet ); traceENTER_xQueueAddToSet( xQueueOrSemaphore, xQueueSet );
taskENTER_CRITICAL(); queueENTER_CRITICAL( pxQueue );
{ {
if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL ) if( pxQueue->pxQueueSetContainer != NULL )
{ {
/* Cannot add a queue/semaphore to more than one queue set. */ /* Cannot add a queue/semaphore to more than one queue set. */
xReturn = pdFAIL; xReturn = pdFAIL;
} }
else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 ) else if( pxQueue->uxMessagesWaiting != ( UBaseType_t ) 0 )
{ {
/* Cannot add a queue/semaphore to a queue set if there are already /* Cannot add a queue/semaphore to a queue set if there are already
* items in the queue/semaphore. */ * items in the queue/semaphore. */
@ -3239,11 +3388,11 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
} }
else else
{ {
( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet; pxQueue->pxQueueSetContainer = xQueueSet;
xReturn = pdPASS; xReturn = pdPASS;
} }
} }
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
traceRETURN_xQueueAddToSet( xReturn ); traceRETURN_xQueueAddToSet( xReturn );
@ -3277,12 +3426,12 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
} }
else else
{ {
taskENTER_CRITICAL(); queueENTER_CRITICAL( pxQueueOrSemaphore );
{ {
/* The queue is no longer contained in the set. */ /* The queue is no longer contained in the set. */
pxQueueOrSemaphore->pxQueueSetContainer = NULL; pxQueueOrSemaphore->pxQueueSetContainer = NULL;
} }
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueueOrSemaphore );
xReturn = pdPASS; xReturn = pdPASS;
} }