Add traceQUEUE_SEND{_FROM_ISR,}_EXT and traceQUEUE_RESET hooks.

The current hooks were not effective at allowing a tracer to track the
number of items in a queue, since it could not differentiate between a
queueOVERWRITE, queueSEND_TO_BACK, and queueSEND_TO_FRONT send, and
could not (efficiently) trace a queue reset.

For sends, this adds extended tracing macros that, if not implemented,
fall back on the original tracing macros for backwards compatibility,
and introduces a new traceQUEUE_RESET hook.

Discussed here: https://forums.freertos.org/t/queue-tracing/20054/4
This commit is contained in:
schilkp 2024-05-17 18:47:03 +02:00
parent 0801c91bc6
commit 20575a2986
2 changed files with 21 additions and 3 deletions

View file

@ -769,6 +769,12 @@
#define traceQUEUE_SEND( pxQueue ) #define traceQUEUE_SEND( pxQueue )
#endif #endif
/* Extended version of traceQUEUE_SEND that also reports the copy position
* of the sent data. */
#ifndef traceQUEUE_SEND_EXT
#define traceQUEUE_SEND_EXT( pxQueue, xCopyPosition ) traceQUEUE_SEND( pxQueue )
#endif
#ifndef traceQUEUE_SEND_FAILED #ifndef traceQUEUE_SEND_FAILED
#define traceQUEUE_SEND_FAILED( pxQueue ) #define traceQUEUE_SEND_FAILED( pxQueue )
#endif #endif
@ -797,6 +803,12 @@
#define traceQUEUE_SEND_FROM_ISR( pxQueue ) #define traceQUEUE_SEND_FROM_ISR( pxQueue )
#endif #endif
/* Extended version of traceQUEUE_SEND_FROM_ISR that also reports the copy
* position of the sent data. */
#ifndef traceQUEUE_SEND_FROM_ISR_EXT
#define traceQUEUE_SEND_FROM_ISR_EXT( pxQueue, xCopyPosition ) traceQUEUE_SEND_FROM_ISR( pxQueue )
#endif
#ifndef traceQUEUE_SEND_FROM_ISR_FAILED #ifndef traceQUEUE_SEND_FROM_ISR_FAILED
#define traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue ) #define traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue )
#endif #endif
@ -813,6 +825,10 @@
#define traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue ) #define traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue )
#endif #endif
#ifndef traceQUEUE_RESET
#define traceQUEUE_RESET( pxQueue, xNewQueue )
#endif
#ifndef traceQUEUE_DELETE #ifndef traceQUEUE_DELETE
#define traceQUEUE_DELETE( pxQueue ) #define traceQUEUE_DELETE( pxQueue )
#endif #endif

View file

@ -315,6 +315,8 @@ BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
/* Check for multiplication overflow. */ /* Check for multiplication overflow. */
( ( SIZE_MAX / pxQueue->uxLength ) >= pxQueue->uxItemSize ) ) ( ( SIZE_MAX / pxQueue->uxLength ) >= pxQueue->uxItemSize ) )
{ {
traceQUEUE_RESET( pxQueue, xNewQueue );
taskENTER_CRITICAL(); taskENTER_CRITICAL();
{ {
pxQueue->u.xQueue.pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize ); pxQueue->u.xQueue.pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize );
@ -966,7 +968,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
* queue is full. */ * queue is full. */
if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) ) if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
{ {
traceQUEUE_SEND( pxQueue ); traceQUEUE_SEND_EXT( pxQueue, xCopyPosition );
#if ( configUSE_QUEUE_SETS == 1 ) #if ( configUSE_QUEUE_SETS == 1 )
{ {
@ -1200,7 +1202,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
const int8_t cTxLock = pxQueue->cTxLock; const int8_t cTxLock = pxQueue->cTxLock;
const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting; const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
traceQUEUE_SEND_FROM_ISR( pxQueue ); traceQUEUE_SEND_FROM_ISR_EXT( pxQueue, xCopyPosition );
/* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a
* semaphore or mutex. That means prvCopyDataToQueue() cannot result * semaphore or mutex. That means prvCopyDataToQueue() cannot result
@ -1382,7 +1384,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
{ {
const int8_t cTxLock = pxQueue->cTxLock; const int8_t cTxLock = pxQueue->cTxLock;
traceQUEUE_SEND_FROM_ISR( pxQueue ); traceQUEUE_SEND_FROM_ISR_EXT( pxQueue, queueSEND_TO_BACK );
/* A task can only have an inherited priority if it is a mutex /* A task can only have an inherited priority if it is a mutex
* holder - and if there is a mutex holder then the mutex cannot be * holder - and if there is a mutex holder then the mutex cannot be