mirror of
https://github.com/FreeRTOS/FreeRTOS-Kernel.git
synced 2025-04-19 21:11:57 -04:00
change(freertos/smp): Update queue.c locking
Updated queue.c to use granular locking - Added xTaskSpinlock and xISRSpinlock - Replaced critical section macros with data group critical section macros such as taskENTER/EXIT_CRITICAL/_FROM_ISR() with queueENTER/EXIT_CRITICAL_FROM_ISR(). - Added vQueueEnterCritical/FromISR() and vQueueExitCritical/FromISR() which map to the data group critical section macros. - Added prvLockQueueForTasks() and prvUnlockQueueForTasks() as the granular locking equivalents to prvLockQueue() and prvUnlockQueue() respectively Co-authored-by: Sudeep Mohanty <sudeep.mohanty@espressif.com>
This commit is contained in:
parent
6ac7cc844b
commit
f7d7a117f1
|
@ -3318,6 +3318,10 @@ typedef struct xSTATIC_QUEUE
|
|||
UBaseType_t uxDummy8;
|
||||
uint8_t ucDummy9;
|
||||
#endif
|
||||
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
|
||||
portSPINLOCK_TYPE xDummySpinlock[ 2 ];
|
||||
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
|
||||
} StaticQueue_t;
|
||||
typedef StaticQueue_t StaticSemaphore_t;
|
||||
|
||||
|
|
397
queue.c
397
queue.c
|
@ -133,6 +133,11 @@ typedef struct QueueDefinition /* The old naming convention is used to prevent b
|
|||
UBaseType_t uxQueueNumber;
|
||||
uint8_t ucQueueType;
|
||||
#endif
|
||||
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
|
||||
portSPINLOCK_TYPE xTaskSpinlock;
|
||||
portSPINLOCK_TYPE xISRSpinlock;
|
||||
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
|
||||
} xQUEUE;
|
||||
|
||||
/* The old xQUEUE name is maintained above then typedefed to the new Queue_t
|
||||
|
@ -251,12 +256,55 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
|
|||
#endif
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
/*
|
||||
* Macros to mark the start and end of a critical code region.
|
||||
*/
|
||||
#if ( portUSING_GRANULAR_LOCKS == 1 )
|
||||
#define queueENTER_CRITICAL( pxQueue ) vQueueEnterCritical( pxQueue )
|
||||
#define queueENTER_CRITICAL_FROM_ISR( pxQueue ) uxQueueEnterCriticalFromISR( pxQueue )
|
||||
#define queueEXIT_CRITICAL( pxQueue ) vQueueExitCritical( pxQueue )
|
||||
#define queueEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue ) vQueueExitCriticalFromISR( uxSavedInterruptStatus, pxQueue )
|
||||
#else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
|
||||
#define queueENTER_CRITICAL( pxQueue ) taskENTER_CRITICAL();
|
||||
#define queueENTER_CRITICAL_FROM_ISR( pxQueue ) taskENTER_CRITICAL_FROM_ISR();
|
||||
#define queueEXIT_CRITICAL( pxQueue ) taskEXIT_CRITICAL();
|
||||
#define queueEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue ) taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
|
||||
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
|
||||
|
||||
#if ( portUSING_GRANULAR_LOCKS == 1 )
|
||||
|
||||
/*
|
||||
* Enters a critical section for a queue. Disables interrupts and takes
|
||||
* both task and ISR spinlocks to ensure thread safety.
|
||||
*/
|
||||
static void vQueueEnterCritical( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/*
|
||||
* Enters a critical section for a queue from an ISR context. Takes the ISR
|
||||
* spinlock and returns the previous interrupt state.
|
||||
*/
|
||||
static UBaseType_t uxQueueEnterCriticalFromISR( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/*
|
||||
* Exits a critical section for a queue. Releases spinlocks in reverse order
|
||||
* and conditionally re-enables interrupts and yields if required.
|
||||
*/
|
||||
static void vQueueExitCritical( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/*
|
||||
* Exits a critical section for a queue from an ISR context. Releases the ISR
|
||||
* spinlock and conditionally restores the previous interrupt state.
|
||||
*/
|
||||
static void vQueueExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus,
|
||||
const Queue_t * pxQueue ) PRIVILEGED_FUNCTION;
|
||||
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
|
||||
|
||||
/*
|
||||
* Macro to mark a queue as locked. Locking a queue prevents an ISR from
|
||||
* accessing the queue event lists.
|
||||
*/
|
||||
#define prvLockQueue( pxQueue ) \
|
||||
taskENTER_CRITICAL(); \
|
||||
queueENTER_CRITICAL( pxQueue ); \
|
||||
{ \
|
||||
if( ( pxQueue )->cRxLock == queueUNLOCKED ) \
|
||||
{ \
|
||||
|
@ -267,7 +315,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
|
|||
( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED; \
|
||||
} \
|
||||
} \
|
||||
taskEXIT_CRITICAL()
|
||||
queueEXIT_CRITICAL( pxQueue )
|
||||
|
||||
/*
|
||||
* Macro to increment cTxLock member of the queue data structure. It is
|
||||
|
@ -298,6 +346,49 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
|
|||
( pxQueue )->cRxLock = ( int8_t ) ( ( cRxLock ) + ( int8_t ) 1 ); \
|
||||
} \
|
||||
} while( 0 )
|
||||
|
||||
/*
|
||||
* Macro used to lock and unlock a queue. When a task locks a queue, the
|
||||
* task will have thread safe non-deterministic access to the queue.
|
||||
* - Concurrent access from other tasks will be blocked by the xTaskSpinlock
|
||||
* - Concurrent access from ISRs will be pended
|
||||
*
|
||||
* When the tasks unlocks the queue, all pended access attempts are handled.
|
||||
*/
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
|
||||
#define queueLOCK( pxQueue ) \
|
||||
do { \
|
||||
vTaskPreemptionDisable( NULL ); \
|
||||
prvLockQueue( ( pxQueue ) ); \
|
||||
portGET_SPINLOCK( portGET_CORE_ID(), &( pxQueue->xTaskSpinlock ) ); \
|
||||
} while( 0 )
|
||||
#define queueUNLOCK( pxQueue, xYieldAPI ) \
|
||||
do { \
|
||||
prvUnlockQueue( ( pxQueue ) ); \
|
||||
portRELEASE_SPINLOCK( portGET_CORE_ID(), &( pxQueue->xTaskSpinlock ) ); \
|
||||
vTaskPreemptionEnable( NULL ); \
|
||||
} while( 0 )
|
||||
#else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
|
||||
#define queueLOCK( pxQueue ) \
|
||||
do { \
|
||||
vTaskSuspendAll(); \
|
||||
prvLockQueue( ( pxQueue ) ); \
|
||||
} while( 0 )
|
||||
#define queueUNLOCK( pxQueue, xYieldAPI ) \
|
||||
do { \
|
||||
BaseType_t xAlreadyYielded; \
|
||||
prvUnlockQueue( ( pxQueue ) ); \
|
||||
xAlreadyYielded = xTaskResumeAll(); \
|
||||
if( ( xAlreadyYielded == pdFALSE ) && ( ( xYieldAPI ) == pdTRUE ) ) \
|
||||
{ \
|
||||
taskYIELD_WITHIN_API(); \
|
||||
} \
|
||||
else \
|
||||
{ \
|
||||
mtCOVERAGE_TEST_MARKER(); \
|
||||
} \
|
||||
} while( 0 )
|
||||
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
|
||||
|
@ -310,12 +401,22 @@ BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
|
|||
|
||||
configASSERT( pxQueue );
|
||||
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
|
||||
{
|
||||
if( xNewQueue == pdTRUE )
|
||||
{
|
||||
portINIT_SPINLOCK( &( pxQueue->xTaskSpinlock ) );
|
||||
portINIT_SPINLOCK( &( pxQueue->xISRSpinlock ) );
|
||||
}
|
||||
}
|
||||
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
|
||||
|
||||
if( ( pxQueue != NULL ) &&
|
||||
( pxQueue->uxLength >= 1U ) &&
|
||||
/* Check for multiplication overflow. */
|
||||
( ( SIZE_MAX / pxQueue->uxLength ) >= pxQueue->uxItemSize ) )
|
||||
{
|
||||
taskENTER_CRITICAL();
|
||||
queueENTER_CRITICAL( pxQueue );
|
||||
{
|
||||
pxQueue->u.xQueue.pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize );
|
||||
pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
|
||||
|
@ -354,7 +455,7 @@ BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
|
|||
vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
queueEXIT_CRITICAL( pxQueue );
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -703,7 +804,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
|
|||
* calling task is the mutex holder, but not a good way of determining the
|
||||
* identity of the mutex holder, as the holder may change between the
|
||||
* following critical section exiting and the function returning. */
|
||||
taskENTER_CRITICAL();
|
||||
queueENTER_CRITICAL( pxSemaphore );
|
||||
{
|
||||
if( pxSemaphore->uxQueueType == queueQUEUE_IS_MUTEX )
|
||||
{
|
||||
|
@ -714,7 +815,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
|
|||
pxReturn = NULL;
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
queueEXIT_CRITICAL( pxSemaphore );
|
||||
|
||||
traceRETURN_xQueueGetMutexHolder( pxReturn );
|
||||
|
||||
|
@ -968,7 +1069,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
|
|||
|
||||
for( ; ; )
|
||||
{
|
||||
taskENTER_CRITICAL();
|
||||
queueENTER_CRITICAL( pxQueue );
|
||||
{
|
||||
/* Is there room on the queue now? The running task must be the
|
||||
* highest priority task wanting to access the queue. If the head item
|
||||
|
@ -1074,7 +1175,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
|
|||
}
|
||||
#endif /* configUSE_QUEUE_SETS */
|
||||
|
||||
taskEXIT_CRITICAL();
|
||||
queueEXIT_CRITICAL( pxQueue );
|
||||
|
||||
traceRETURN_xQueueGenericSend( pdPASS );
|
||||
|
||||
|
@ -1086,7 +1187,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
|
|||
{
|
||||
/* The queue was full and no block time is specified (or
|
||||
* the block time has expired) so leave now. */
|
||||
taskEXIT_CRITICAL();
|
||||
queueEXIT_CRITICAL( pxQueue );
|
||||
|
||||
/* Return to the original privilege level before exiting
|
||||
* the function. */
|
||||
|
@ -1109,13 +1210,12 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
|
|||
}
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
queueEXIT_CRITICAL( pxQueue );
|
||||
|
||||
/* Interrupts and other tasks can send to and receive from the queue
|
||||
* now the critical section has been exited. */
|
||||
|
||||
vTaskSuspendAll();
|
||||
prvLockQueue( pxQueue );
|
||||
queueLOCK( pxQueue );
|
||||
|
||||
/* Update the timeout state to see if it has expired yet. */
|
||||
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
|
||||
|
@ -1125,35 +1225,18 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
|
|||
traceBLOCKING_ON_QUEUE_SEND( pxQueue );
|
||||
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
|
||||
|
||||
/* Unlocking the queue means queue events can effect the
|
||||
* event list. It is possible that interrupts occurring now
|
||||
* remove this task from the event list again - but as the
|
||||
* scheduler is suspended the task will go onto the pending
|
||||
* ready list instead of the actual ready list. */
|
||||
prvUnlockQueue( pxQueue );
|
||||
|
||||
/* Resuming the scheduler will move tasks from the pending
|
||||
* ready list into the ready list - so it is feasible that this
|
||||
* task is already in the ready list before it yields - in which
|
||||
* case the yield will not cause a context switch unless there
|
||||
* is also a higher priority task in the pending ready list. */
|
||||
if( xTaskResumeAll() == pdFALSE )
|
||||
{
|
||||
taskYIELD_WITHIN_API();
|
||||
}
|
||||
queueUNLOCK( pxQueue, pdTRUE );
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Try again. */
|
||||
prvUnlockQueue( pxQueue );
|
||||
( void ) xTaskResumeAll();
|
||||
queueUNLOCK( pxQueue, pdFALSE );
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* The timeout has expired. */
|
||||
prvUnlockQueue( pxQueue );
|
||||
( void ) xTaskResumeAll();
|
||||
queueUNLOCK( pxQueue, pdFALSE );
|
||||
|
||||
traceQUEUE_SEND_FAILED( pxQueue );
|
||||
traceRETURN_xQueueGenericSend( errQUEUE_FULL );
|
||||
|
@ -1203,7 +1286,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
|
|||
/* MISRA Ref 4.7.1 [Return value shall be checked] */
|
||||
/* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
|
||||
/* coverity[misra_c_2012_directive_4_7_violation] */
|
||||
uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR();
|
||||
uxSavedInterruptStatus = ( UBaseType_t ) queueENTER_CRITICAL_FROM_ISR( pxQueue );
|
||||
{
|
||||
if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
|
||||
{
|
||||
|
@ -1328,7 +1411,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
|
|||
xReturn = errQUEUE_FULL;
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
|
||||
queueEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue );
|
||||
|
||||
traceRETURN_xQueueGenericSendFromISR( xReturn );
|
||||
|
||||
|
@ -1381,7 +1464,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
|
|||
/* MISRA Ref 4.7.1 [Return value shall be checked] */
|
||||
/* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
|
||||
/* coverity[misra_c_2012_directive_4_7_violation] */
|
||||
uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR();
|
||||
uxSavedInterruptStatus = ( UBaseType_t ) queueENTER_CRITICAL_FROM_ISR( pxQueue );
|
||||
{
|
||||
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
|
||||
|
||||
|
@ -1501,7 +1584,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
|
|||
xReturn = errQUEUE_FULL;
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
|
||||
queueEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue );
|
||||
|
||||
traceRETURN_xQueueGiveFromISR( xReturn );
|
||||
|
||||
|
@ -1535,7 +1618,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
|
|||
|
||||
for( ; ; )
|
||||
{
|
||||
taskENTER_CRITICAL();
|
||||
queueENTER_CRITICAL( pxQueue );
|
||||
{
|
||||
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
|
||||
|
||||
|
@ -1567,7 +1650,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
|
|||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
|
||||
taskEXIT_CRITICAL();
|
||||
queueEXIT_CRITICAL( pxQueue );
|
||||
|
||||
traceRETURN_xQueueReceive( pdPASS );
|
||||
|
||||
|
@ -1579,7 +1662,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
|
|||
{
|
||||
/* The queue was empty and no block time is specified (or
|
||||
* the block time has expired) so leave now. */
|
||||
taskEXIT_CRITICAL();
|
||||
queueEXIT_CRITICAL( pxQueue );
|
||||
|
||||
traceQUEUE_RECEIVE_FAILED( pxQueue );
|
||||
traceRETURN_xQueueReceive( errQUEUE_EMPTY );
|
||||
|
@ -1600,13 +1683,12 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
|
|||
}
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
queueEXIT_CRITICAL( pxQueue );
|
||||
|
||||
/* Interrupts and other tasks can send to and receive from the queue
|
||||
* now the critical section has been exited. */
|
||||
|
||||
vTaskSuspendAll();
|
||||
prvLockQueue( pxQueue );
|
||||
queueLOCK( pxQueue );
|
||||
|
||||
/* Update the timeout state to see if it has expired yet. */
|
||||
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
|
||||
|
@ -1617,31 +1699,20 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
|
|||
{
|
||||
traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
|
||||
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
|
||||
prvUnlockQueue( pxQueue );
|
||||
|
||||
if( xTaskResumeAll() == pdFALSE )
|
||||
{
|
||||
taskYIELD_WITHIN_API();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
queueUNLOCK( pxQueue, pdTRUE );
|
||||
}
|
||||
else
|
||||
{
|
||||
/* The queue contains data again. Loop back to try and read the
|
||||
* data. */
|
||||
prvUnlockQueue( pxQueue );
|
||||
( void ) xTaskResumeAll();
|
||||
queueUNLOCK( pxQueue, pdFALSE );
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Timed out. If there is no data in the queue exit, otherwise loop
|
||||
* back and attempt to read the data. */
|
||||
prvUnlockQueue( pxQueue );
|
||||
( void ) xTaskResumeAll();
|
||||
queueUNLOCK( pxQueue, pdFALSE );
|
||||
|
||||
if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
|
||||
{
|
||||
|
@ -1688,7 +1759,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
|
|||
|
||||
for( ; ; )
|
||||
{
|
||||
taskENTER_CRITICAL();
|
||||
queueENTER_CRITICAL( pxQueue );
|
||||
{
|
||||
/* Semaphores are queues with an item size of 0, and where the
|
||||
* number of messages in the queue is the semaphore's count value. */
|
||||
|
@ -1737,7 +1808,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
|
|||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
|
||||
taskEXIT_CRITICAL();
|
||||
queueEXIT_CRITICAL( pxQueue );
|
||||
|
||||
traceRETURN_xQueueSemaphoreTake( pdPASS );
|
||||
|
||||
|
@ -1749,7 +1820,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
|
|||
{
|
||||
/* The semaphore count was 0 and no block time is specified
|
||||
* (or the block time has expired) so exit now. */
|
||||
taskEXIT_CRITICAL();
|
||||
queueEXIT_CRITICAL( pxQueue );
|
||||
|
||||
traceQUEUE_RECEIVE_FAILED( pxQueue );
|
||||
traceRETURN_xQueueSemaphoreTake( errQUEUE_EMPTY );
|
||||
|
@ -1770,13 +1841,12 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
|
|||
}
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
queueEXIT_CRITICAL( pxQueue );
|
||||
|
||||
/* Interrupts and other tasks can give to and take from the semaphore
|
||||
* now the critical section has been exited. */
|
||||
|
||||
vTaskSuspendAll();
|
||||
prvLockQueue( pxQueue );
|
||||
queueLOCK( pxQueue );
|
||||
|
||||
/* Update the timeout state to see if it has expired yet. */
|
||||
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
|
||||
|
@ -1803,30 +1873,19 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
|
|||
#endif /* if ( configUSE_MUTEXES == 1 ) */
|
||||
|
||||
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
|
||||
prvUnlockQueue( pxQueue );
|
||||
|
||||
if( xTaskResumeAll() == pdFALSE )
|
||||
{
|
||||
taskYIELD_WITHIN_API();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
queueUNLOCK( pxQueue, pdTRUE );
|
||||
}
|
||||
else
|
||||
{
|
||||
/* There was no timeout and the semaphore count was not 0, so
|
||||
* attempt to take the semaphore again. */
|
||||
prvUnlockQueue( pxQueue );
|
||||
( void ) xTaskResumeAll();
|
||||
queueUNLOCK( pxQueue, pdFALSE );
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Timed out. */
|
||||
prvUnlockQueue( pxQueue );
|
||||
( void ) xTaskResumeAll();
|
||||
queueUNLOCK( pxQueue, pdFALSE );
|
||||
|
||||
/* If the semaphore count is 0 exit now as the timeout has
|
||||
* expired. Otherwise return to attempt to take the semaphore that is
|
||||
|
@ -1841,7 +1900,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
|
|||
* test the mutex type again to check it is actually a mutex. */
|
||||
if( xInheritanceOccurred != pdFALSE )
|
||||
{
|
||||
taskENTER_CRITICAL();
|
||||
queueENTER_CRITICAL( pxQueue );
|
||||
{
|
||||
UBaseType_t uxHighestWaitingPriority;
|
||||
|
||||
|
@ -1861,7 +1920,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
|
|||
/* coverity[overrun] */
|
||||
vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority );
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
queueEXIT_CRITICAL( pxQueue );
|
||||
}
|
||||
}
|
||||
#endif /* configUSE_MUTEXES */
|
||||
|
@ -1907,7 +1966,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
|
|||
|
||||
for( ; ; )
|
||||
{
|
||||
taskENTER_CRITICAL();
|
||||
queueENTER_CRITICAL( pxQueue );
|
||||
{
|
||||
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
|
||||
|
||||
|
@ -1945,7 +2004,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
|
|||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
|
||||
taskEXIT_CRITICAL();
|
||||
queueEXIT_CRITICAL( pxQueue );
|
||||
|
||||
traceRETURN_xQueuePeek( pdPASS );
|
||||
|
||||
|
@ -1957,7 +2016,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
|
|||
{
|
||||
/* The queue was empty and no block time is specified (or
|
||||
* the block time has expired) so leave now. */
|
||||
taskEXIT_CRITICAL();
|
||||
queueEXIT_CRITICAL( pxQueue );
|
||||
|
||||
traceQUEUE_PEEK_FAILED( pxQueue );
|
||||
traceRETURN_xQueuePeek( errQUEUE_EMPTY );
|
||||
|
@ -1979,13 +2038,12 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
|
|||
}
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
queueEXIT_CRITICAL( pxQueue );
|
||||
|
||||
/* Interrupts and other tasks can send to and receive from the queue
|
||||
* now that the critical section has been exited. */
|
||||
|
||||
vTaskSuspendAll();
|
||||
prvLockQueue( pxQueue );
|
||||
queueLOCK( pxQueue );
|
||||
|
||||
/* Update the timeout state to see if it has expired yet. */
|
||||
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
|
||||
|
@ -1996,31 +2054,20 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
|
|||
{
|
||||
traceBLOCKING_ON_QUEUE_PEEK( pxQueue );
|
||||
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
|
||||
prvUnlockQueue( pxQueue );
|
||||
|
||||
if( xTaskResumeAll() == pdFALSE )
|
||||
{
|
||||
taskYIELD_WITHIN_API();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
queueUNLOCK( pxQueue, pdTRUE );
|
||||
}
|
||||
else
|
||||
{
|
||||
/* There is data in the queue now, so don't enter the blocked
|
||||
* state, instead return to try and obtain the data. */
|
||||
prvUnlockQueue( pxQueue );
|
||||
( void ) xTaskResumeAll();
|
||||
queueUNLOCK( pxQueue, pdFALSE );
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* The timeout has expired. If there is still no data in the queue
|
||||
* exit, otherwise go back and try to read the data again. */
|
||||
prvUnlockQueue( pxQueue );
|
||||
( void ) xTaskResumeAll();
|
||||
queueUNLOCK( pxQueue, pdFALSE );
|
||||
|
||||
if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
|
||||
{
|
||||
|
@ -2070,7 +2117,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
|
|||
/* MISRA Ref 4.7.1 [Return value shall be checked] */
|
||||
/* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
|
||||
/* coverity[misra_c_2012_directive_4_7_violation] */
|
||||
uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR();
|
||||
uxSavedInterruptStatus = ( UBaseType_t ) queueENTER_CRITICAL_FROM_ISR( pxQueue );
|
||||
{
|
||||
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
|
||||
|
||||
|
@ -2130,7 +2177,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
|
|||
traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
|
||||
queueEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue );
|
||||
|
||||
traceRETURN_xQueueReceiveFromISR( xReturn );
|
||||
|
||||
|
@ -2171,7 +2218,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
|
|||
/* MISRA Ref 4.7.1 [Return value shall be checked] */
|
||||
/* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
|
||||
/* coverity[misra_c_2012_directive_4_7_violation] */
|
||||
uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR();
|
||||
uxSavedInterruptStatus = ( UBaseType_t ) queueENTER_CRITICAL_FROM_ISR( pxQueue );
|
||||
{
|
||||
/* Cannot block in an ISR, so check there is data available. */
|
||||
if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
|
||||
|
@ -2192,7 +2239,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
|
|||
traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
|
||||
queueEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue );
|
||||
|
||||
traceRETURN_xQueuePeekFromISR( xReturn );
|
||||
|
||||
|
@ -2208,11 +2255,11 @@ UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
|
|||
|
||||
configASSERT( xQueue );
|
||||
|
||||
portBASE_TYPE_ENTER_CRITICAL();
|
||||
queueENTER_CRITICAL( xQueue );
|
||||
{
|
||||
uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
|
||||
}
|
||||
portBASE_TYPE_EXIT_CRITICAL();
|
||||
queueEXIT_CRITICAL( xQueue );
|
||||
|
||||
traceRETURN_uxQueueMessagesWaiting( uxReturn );
|
||||
|
||||
|
@ -2229,11 +2276,11 @@ UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )
|
|||
|
||||
configASSERT( pxQueue );
|
||||
|
||||
portBASE_TYPE_ENTER_CRITICAL();
|
||||
queueENTER_CRITICAL( pxQueue );
|
||||
{
|
||||
uxReturn = ( UBaseType_t ) ( pxQueue->uxLength - pxQueue->uxMessagesWaiting );
|
||||
}
|
||||
portBASE_TYPE_EXIT_CRITICAL();
|
||||
queueEXIT_CRITICAL( pxQueue );
|
||||
|
||||
traceRETURN_uxQueueSpacesAvailable( uxReturn );
|
||||
|
||||
|
@ -2499,13 +2546,14 @@ static void prvCopyDataFromQueue( Queue_t * const pxQueue,
|
|||
|
||||
static void prvUnlockQueue( Queue_t * const pxQueue )
|
||||
{
|
||||
/* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
|
||||
/* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED WHEN portUSING_GRANULAR_LOCKS IS 0.
|
||||
* IT MUST BE CALLED WITH TASK PREEMTION DISABLED WHEN portUSING_GRANULAR_LOCKS IS 1. */
|
||||
|
||||
/* The lock counts contains the number of extra data items placed or
|
||||
* removed from the queue while the queue was locked. When a queue is
|
||||
* locked items can be added or removed, but the event lists cannot be
|
||||
* updated. */
|
||||
taskENTER_CRITICAL();
|
||||
queueENTER_CRITICAL( pxQueue );
|
||||
{
|
||||
int8_t cTxLock = pxQueue->cTxLock;
|
||||
|
||||
|
@ -2583,10 +2631,10 @@ static void prvUnlockQueue( Queue_t * const pxQueue )
|
|||
|
||||
pxQueue->cTxLock = queueUNLOCKED;
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
queueEXIT_CRITICAL( pxQueue );
|
||||
|
||||
/* Do the same for the Rx lock. */
|
||||
taskENTER_CRITICAL();
|
||||
queueENTER_CRITICAL( pxQueue );
|
||||
{
|
||||
int8_t cRxLock = pxQueue->cRxLock;
|
||||
|
||||
|
@ -2613,15 +2661,115 @@ static void prvUnlockQueue( Queue_t * const pxQueue )
|
|||
|
||||
pxQueue->cRxLock = queueUNLOCKED;
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
queueEXIT_CRITICAL( pxQueue );
|
||||
}
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
|
||||
static void vQueueEnterCritical( const Queue_t * pxQueue )
|
||||
{
|
||||
portDISABLE_INTERRUPTS();
|
||||
{
|
||||
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
|
||||
|
||||
/* Task spinlock is always taken first */
|
||||
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xTaskSpinlock ) );
|
||||
|
||||
/* Take the ISR spinlock next */
|
||||
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xISRSpinlock ) );
|
||||
|
||||
/* Increment the critical nesting count */
|
||||
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
|
||||
}
|
||||
}
|
||||
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
|
||||
static UBaseType_t uxQueueEnterCriticalFromISR( const Queue_t * pxQueue )
|
||||
{
|
||||
UBaseType_t uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||
|
||||
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
|
||||
|
||||
/* Take the ISR spinlock */
|
||||
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xISRSpinlock ) );
|
||||
|
||||
/* Increment the critical nesting count */
|
||||
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
|
||||
|
||||
return uxSavedInterruptStatus;
|
||||
}
|
||||
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
|
||||
static void vQueueExitCritical( const Queue_t * pxQueue )
|
||||
{
|
||||
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
|
||||
|
||||
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U );
|
||||
|
||||
/* Get the xYieldPending status inside the critical section. */
|
||||
BaseType_t xYieldCurrentTask = xTaskUnlockCanYield();
|
||||
|
||||
/* Decrement the critical nesting count */
|
||||
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID );
|
||||
|
||||
/* Release the ISR spinlock */
|
||||
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xISRSpinlock ) );
|
||||
|
||||
/* Release the task spinlock */
|
||||
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xTaskSpinlock ) );
|
||||
|
||||
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 )
|
||||
{
|
||||
portENABLE_INTERRUPTS();
|
||||
|
||||
if( xYieldCurrentTask != pdFALSE )
|
||||
{
|
||||
portYIELD();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
|
||||
static void vQueueExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus,
|
||||
const Queue_t * pxQueue )
|
||||
{
|
||||
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
|
||||
|
||||
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U );
|
||||
|
||||
/* Decrement the critical nesting count */
|
||||
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID );
|
||||
|
||||
/* Release the ISR spinlock */
|
||||
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xISRSpinlock ) );
|
||||
|
||||
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 )
|
||||
{
|
||||
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
||||
}
|
||||
}
|
||||
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue )
|
||||
{
|
||||
BaseType_t xReturn;
|
||||
|
||||
taskENTER_CRITICAL();
|
||||
queueENTER_CRITICAL( pxQueue );
|
||||
{
|
||||
if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
|
||||
{
|
||||
|
@ -2632,7 +2780,7 @@ static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue )
|
|||
xReturn = pdFALSE;
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
queueEXIT_CRITICAL( pxQueue );
|
||||
|
||||
return xReturn;
|
||||
}
|
||||
|
@ -2666,7 +2814,7 @@ static BaseType_t prvIsQueueFull( const Queue_t * pxQueue )
|
|||
{
|
||||
BaseType_t xReturn;
|
||||
|
||||
taskENTER_CRITICAL();
|
||||
queueENTER_CRITICAL( pxQueue );
|
||||
{
|
||||
if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
|
||||
{
|
||||
|
@ -2677,7 +2825,7 @@ static BaseType_t prvIsQueueFull( const Queue_t * pxQueue )
|
|||
xReturn = pdFALSE;
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
queueEXIT_CRITICAL( pxQueue );
|
||||
|
||||
return xReturn;
|
||||
}
|
||||
|
@ -3157,7 +3305,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
|||
* time a yield will be performed. If an item is added to the queue while
|
||||
* the queue is locked, and the calling task blocks on the queue, then the
|
||||
* calling task will be immediately unblocked when the queue is unlocked. */
|
||||
prvLockQueue( pxQueue );
|
||||
queueLOCK( pxQueue );
|
||||
|
||||
if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )
|
||||
{
|
||||
|
@ -3169,7 +3317,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
|||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
|
||||
prvUnlockQueue( pxQueue );
|
||||
queueUNLOCK( pxQueue, pdFALSE );
|
||||
|
||||
traceRETURN_vQueueWaitForMessageRestricted();
|
||||
}
|
||||
|
@ -3221,17 +3369,18 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
|||
QueueSetHandle_t xQueueSet )
|
||||
{
|
||||
BaseType_t xReturn;
|
||||
Queue_t * const pxQueue = xQueueOrSemaphore;
|
||||
|
||||
traceENTER_xQueueAddToSet( xQueueOrSemaphore, xQueueSet );
|
||||
|
||||
taskENTER_CRITICAL();
|
||||
queueENTER_CRITICAL( pxQueue );
|
||||
{
|
||||
if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )
|
||||
if( pxQueue->pxQueueSetContainer != NULL )
|
||||
{
|
||||
/* Cannot add a queue/semaphore to more than one queue set. */
|
||||
xReturn = pdFAIL;
|
||||
}
|
||||
else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )
|
||||
else if( pxQueue->uxMessagesWaiting != ( UBaseType_t ) 0 )
|
||||
{
|
||||
/* Cannot add a queue/semaphore to a queue set if there are already
|
||||
* items in the queue/semaphore. */
|
||||
|
@ -3239,11 +3388,11 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
|||
}
|
||||
else
|
||||
{
|
||||
( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;
|
||||
pxQueue->pxQueueSetContainer = xQueueSet;
|
||||
xReturn = pdPASS;
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
queueEXIT_CRITICAL( pxQueue );
|
||||
|
||||
traceRETURN_xQueueAddToSet( xReturn );
|
||||
|
||||
|
@ -3277,12 +3426,12 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
|||
}
|
||||
else
|
||||
{
|
||||
taskENTER_CRITICAL();
|
||||
queueENTER_CRITICAL( pxQueueOrSemaphore );
|
||||
{
|
||||
/* The queue is no longer contained in the set. */
|
||||
pxQueueOrSemaphore->pxQueueSetContainer = NULL;
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
queueEXIT_CRITICAL( pxQueueOrSemaphore );
|
||||
xReturn = pdPASS;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue