Add trace macros.

This commit is contained in:
Richard Barry 2008-03-03 16:32:05 +00:00
parent 2b174e556c
commit b8b70528f4
2 changed files with 113 additions and 3 deletions

View file

@ -213,10 +213,13 @@ size_t xQueueSizeInBytes;
vListInitialise( &( pxNewQueue->xTasksWaitingToSend ) ); vListInitialise( &( pxNewQueue->xTasksWaitingToSend ) );
vListInitialise( &( pxNewQueue->xTasksWaitingToReceive ) ); vListInitialise( &( pxNewQueue->xTasksWaitingToReceive ) );
traceQUEUE_CREATE( pxNewQueue );
return pxNewQueue; return pxNewQueue;
} }
else else
{ {
traceQUEUE_CREATE_FAILED();
vPortFree( pxNewQueue ); vPortFree( pxNewQueue );
} }
} }
@ -262,6 +265,12 @@ size_t xQueueSizeInBytes;
/* Start with the semaphore in the expected state. */ /* Start with the semaphore in the expected state. */
xQueueGenericSend( pxNewQueue, NULL, 0, queueSEND_TO_BACK ); xQueueGenericSend( pxNewQueue, NULL, 0, queueSEND_TO_BACK );
traceCREATE_MUTEX( pxNewQueue );
}
else
{
traceCREATE_MUTEX_FAILED();
} }
return pxNewQueue; return pxNewQueue;
@ -300,11 +309,15 @@ size_t xQueueSizeInBytes;
} }
xReturn = pdPASS; xReturn = pdPASS;
traceGIVE_MUTEX_RECURSIVE( pxMutex );
} }
else else
{ {
/* We cannot give the mutex because we are not the holder. */ /* We cannot give the mutex because we are not the holder. */
xReturn = pdFAIL; xReturn = pdFAIL;
traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );
} }
return xReturn; return xReturn;
@ -339,6 +352,8 @@ size_t xQueueSizeInBytes;
} }
} }
traceTAKE_MUTEX_RECURSIVE( pxMutex );
return xReturn; return xReturn;
} }
@ -356,6 +371,12 @@ size_t xQueueSizeInBytes;
if( pxHandle != NULL ) if( pxHandle != NULL )
{ {
pxHandle->uxMessagesWaiting = uxInitialCount; pxHandle->uxMessagesWaiting = uxInitialCount;
traceCREATE_COUNTING_SEMAPHORE();
}
else
{
traceCREATE_COUNTING_SEMAPHORE_FAILED();
} }
return pxHandle; return pxHandle;
@ -448,6 +469,8 @@ xTimeOutType xTimeOut;
list. */ list. */
taskENTER_CRITICAL(); taskENTER_CRITICAL();
{ {
traceBLOCKING_ON_QUEUE_SEND( pxQueue );
/* We can safely unlock the queue and scheduler here as /* We can safely unlock the queue and scheduler here as
interrupts are disabled. We must not yield with anything interrupts are disabled. We must not yield with anything
locked, but we can yield from within a critical section. locked, but we can yield from within a critical section.
@ -505,6 +528,8 @@ xTimeOutType xTimeOut;
{ {
if( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
{ {
traceQUEUE_SEND( pxQueue );
/* There is room in the queue, copy the data into the queue. */ /* There is room in the queue, copy the data into the queue. */
prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition ); prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
xReturn = pdPASS; xReturn = pdPASS;
@ -529,6 +554,14 @@ xTimeOutType xTimeOut;
{ {
xReturn = queueERRONEOUS_UNBLOCK; xReturn = queueERRONEOUS_UNBLOCK;
} }
else
{
traceQUEUE_SEND_FAILED( pxQueue );
}
}
else
{
traceQUEUE_SEND_FAILED( pxQueue );
} }
} }
} }
@ -571,6 +604,8 @@ xTimeOutType xTimeOut;
posting? */ posting? */
if( xTicksToWait > ( portTickType ) 0 ) if( xTicksToWait > ( portTickType ) 0 )
{ {
traceBLOCKING_ON_QUEUE_SEND( pxQueue );
/* We are going to place ourselves on the xTasksWaitingToSend /* We are going to place ourselves on the xTasksWaitingToSend
event list, and will get woken should the delay expire, or event list, and will get woken should the delay expire, or
space become available on the queue. */ space become available on the queue. */
@ -586,6 +621,8 @@ xTimeOutType xTimeOut;
if( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
{ {
traceQUEUE_SEND( pxQueue );
/* There is room in the queue, copy the data into the queue. */ /* There is room in the queue, copy the data into the queue. */
prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition ); prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
xReturn = pdPASS; xReturn = pdPASS;
@ -611,6 +648,14 @@ xTimeOutType xTimeOut;
this task unblocking and actually executing. */ this task unblocking and actually executing. */
xReturn = queueERRONEOUS_UNBLOCK; xReturn = queueERRONEOUS_UNBLOCK;
} }
else
{
traceQUEUE_SEND_FAILED( pxQueue );
}
}
else
{
traceQUEUE_SEND_FAILED( pxQueue );
} }
} }
} }
@ -655,6 +700,8 @@ xTimeOutType xTimeOut;
leave with nothing? */ leave with nothing? */
if( xTicksToWait > ( portTickType ) 0 ) if( xTicksToWait > ( portTickType ) 0 )
{ {
traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
#if ( configUSE_MUTEXES == 1 ) #if ( configUSE_MUTEXES == 1 )
{ {
if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
@ -678,6 +725,8 @@ xTimeOutType xTimeOut;
if( xJustPeeking == pdFALSE ) if( xJustPeeking == pdFALSE )
{ {
traceQUEUE_RECEIVE( pxQueue );
/* We are actually removing data. */ /* We are actually removing data. */
--( pxQueue->uxMessagesWaiting ); --( pxQueue->uxMessagesWaiting );
@ -703,6 +752,8 @@ xTimeOutType xTimeOut;
} }
else else
{ {
traceQUEUE_PEEK( pxQueue );
/* We are not removing the data, so reset our read /* We are not removing the data, so reset our read
pointer. */ pointer. */
pxQueue->pcReadFrom = pcOriginalReadPosition; pxQueue->pcReadFrom = pcOriginalReadPosition;
@ -720,6 +771,14 @@ xTimeOutType xTimeOut;
{ {
xReturn = queueERRONEOUS_UNBLOCK; xReturn = queueERRONEOUS_UNBLOCK;
} }
else
{
traceQUEUE_RECEIVE_FAILED( pxQueue );
}
}
else
{
traceQUEUE_RECEIVE_FAILED( pxQueue );
} }
} }
@ -742,6 +801,8 @@ signed portBASE_TYPE xQueueGenericSendFromISR( xQueueHandle pxQueue, const void
by this post). */ by this post). */
if( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
{ {
traceQUEUE_SEND_FROM_ISR( pxQueue );
prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition ); prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
/* If the queue is locked we do not alter the event list. This will /* If the queue is locked we do not alter the event list. This will
@ -770,6 +831,10 @@ signed portBASE_TYPE xQueueGenericSendFromISR( xQueueHandle pxQueue, const void
++( pxQueue->xTxLock ); ++( pxQueue->xTxLock );
} }
} }
else
{
traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
}
return xTaskPreviouslyWoken; return xTaskPreviouslyWoken;
} }
@ -802,6 +867,8 @@ signed portCHAR *pcOriginalReadPosition;
leave with nothing? */ leave with nothing? */
if( xTicksToWait > ( portTickType ) 0 ) if( xTicksToWait > ( portTickType ) 0 )
{ {
traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
#if ( configUSE_MUTEXES == 1 ) #if ( configUSE_MUTEXES == 1 )
{ {
if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
@ -849,6 +916,8 @@ signed portCHAR *pcOriginalReadPosition;
if( xJustPeeking == pdFALSE ) if( xJustPeeking == pdFALSE )
{ {
traceQUEUE_RECEIVE( pxQueue );
/* We are actually removing data. */ /* We are actually removing data. */
--( pxQueue->uxMessagesWaiting ); --( pxQueue->uxMessagesWaiting );
@ -869,6 +938,8 @@ signed portCHAR *pcOriginalReadPosition;
} }
else else
{ {
traceQUEUE_PEEK( pxQueue );
/* We are not removing the data, so reset our read /* We are not removing the data, so reset our read
pointer. */ pointer. */
pxQueue->pcReadFrom = pcOriginalReadPosition; pxQueue->pcReadFrom = pcOriginalReadPosition;
@ -897,6 +968,14 @@ signed portCHAR *pcOriginalReadPosition;
{ {
xReturn = queueERRONEOUS_UNBLOCK; xReturn = queueERRONEOUS_UNBLOCK;
} }
else
{
traceQUEUE_RECEIVE_FAILED( pxQueue );
}
}
else
{
traceQUEUE_RECEIVE_FAILED( pxQueue );
} }
} }
} while( xReturn == queueERRONEOUS_UNBLOCK ); } while( xReturn == queueERRONEOUS_UNBLOCK );
@ -916,6 +995,8 @@ signed portBASE_TYPE xReturn;
/* We cannot block from an ISR, so check there is data available. */ /* We cannot block from an ISR, so check there is data available. */
if( pxQueue->uxMessagesWaiting > ( unsigned portBASE_TYPE ) 0 ) if( pxQueue->uxMessagesWaiting > ( unsigned portBASE_TYPE ) 0 )
{ {
traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
prvCopyDataFromQueue( pxQueue, pvBuffer ); prvCopyDataFromQueue( pxQueue, pvBuffer );
--( pxQueue->uxMessagesWaiting ); --( pxQueue->uxMessagesWaiting );
@ -951,6 +1032,7 @@ signed portBASE_TYPE xReturn;
else else
{ {
xReturn = pdFAIL; xReturn = pdFAIL;
traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
} }
return xReturn; return xReturn;
@ -971,6 +1053,8 @@ unsigned portBASE_TYPE uxReturn;
void vQueueDelete( xQueueHandle pxQueue ) void vQueueDelete( xQueueHandle pxQueue )
{ {
traceQUEUE_DELETE( pxQueue );
vPortFree( pxQueue->pcHead ); vPortFree( pxQueue->pcHead );
vPortFree( pxQueue ); vPortFree( pxQueue );
} }

View file

@ -508,7 +508,7 @@ static tskTCB *prvAllocateTCBAndStack( unsigned portSHORT usStackDepth );
* This function determines the 'high water mark' of the task stack by * This function determines the 'high water mark' of the task stack by
* determining how much of the stack remains at the original preset value. * determining how much of the stack remains at the original preset value.
*/ */
#if ( configUSE_TRACE_FACILITY == 1 ) #if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxGetStackHighWaterMark == 1 ) )
unsigned portSHORT usTaskCheckFreeStackSpace( const unsigned portCHAR * pucStackByte ); unsigned portSHORT usTaskCheckFreeStackSpace( const unsigned portCHAR * pucStackByte );
@ -620,12 +620,14 @@ tskTCB * pxNewTCB;
prvAddTaskToReadyQueue( pxNewTCB ); prvAddTaskToReadyQueue( pxNewTCB );
xReturn = pdPASS; xReturn = pdPASS;
traceTASK_CREATE( pxNewTCB );
} }
portEXIT_CRITICAL(); portEXIT_CRITICAL();
} }
else else
{ {
xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY; xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
traceTASK_CREATE_FAILED( pxNewTCB );
} }
if( xReturn == pdPASS ) if( xReturn == pdPASS )
@ -671,6 +673,8 @@ tskTCB * pxNewTCB;
/* If null is passed in here then we are deleting ourselves. */ /* If null is passed in here then we are deleting ourselves. */
pxTCB = prvGetTCBFromHandle( pxTaskToDelete ); pxTCB = prvGetTCBFromHandle( pxTaskToDelete );
traceTASK_DELETE( pxTCB );
/* Remove task from the ready list and place in the termination list. /* Remove task from the ready list and place in the termination list.
This will stop the task from be scheduled. The idle task will check This will stop the task from be scheduled. The idle task will check
the termination list and free up any memory allocated by the the termination list and free up any memory allocated by the
@ -753,6 +757,8 @@ tskTCB * pxNewTCB;
if( xShouldDelay ) if( xShouldDelay )
{ {
traceTASK_DELAY_UNTIL();
/* We must remove ourselves from the ready list before adding /* We must remove ourselves from the ready list before adding
ourselves to the blocked list as the same list item is used for ourselves to the blocked list as the same list item is used for
both lists. */ both lists. */
@ -800,6 +806,8 @@ tskTCB * pxNewTCB;
{ {
vTaskSuspendAll(); vTaskSuspendAll();
{ {
traceTASK_DELAY();
/* A task that is removed from the event list while the /* A task that is removed from the event list while the
scheduler is suspended will not get placed in the ready scheduler is suspended will not get placed in the ready
list or removed from the blocked list until the scheduler list or removed from the blocked list until the scheduler
@ -888,6 +896,8 @@ tskTCB * pxNewTCB;
priority of the calling function. */ priority of the calling function. */
pxTCB = prvGetTCBFromHandle( pxTask ); pxTCB = prvGetTCBFromHandle( pxTask );
traceTASK_PRIORITY_SET( pxTask, uxNewPriority );
#if ( configUSE_MUTEXES == 1 ) #if ( configUSE_MUTEXES == 1 )
{ {
uxCurrentPriority = pxTCB->uxBasePriority; uxCurrentPriority = pxTCB->uxBasePriority;
@ -985,6 +995,8 @@ tskTCB * pxNewTCB;
/* If null is passed in here then we are suspending ourselves. */ /* If null is passed in here then we are suspending ourselves. */
pxTCB = prvGetTCBFromHandle( pxTaskToSuspend ); pxTCB = prvGetTCBFromHandle( pxTaskToSuspend );
traceTASK_SUSPEND( pxTaskToSuspend );
/* Remove task from the ready/delayed list and place in the suspended list. */ /* Remove task from the ready/delayed list and place in the suspended list. */
vListRemove( &( pxTCB->xGenericListItem ) ); vListRemove( &( pxTCB->xGenericListItem ) );
@ -1056,6 +1068,8 @@ tskTCB * pxNewTCB;
{ {
if( prvIsTaskSuspended( pxTCB ) == pdTRUE ) if( prvIsTaskSuspended( pxTCB ) == pdTRUE )
{ {
traceTASK_RESUME( pxTCB );
/* As we are in a critical section we can access the ready /* As we are in a critical section we can access the ready
lists even if the scheduler is suspended. */ lists even if the scheduler is suspended. */
vListRemove( &( pxTCB->xGenericListItem ) ); vListRemove( &( pxTCB->xGenericListItem ) );
@ -1089,6 +1103,8 @@ tskTCB * pxNewTCB;
if( prvIsTaskSuspended( pxTCB ) == pdTRUE ) if( prvIsTaskSuspended( pxTCB ) == pdTRUE )
{ {
traceTASK_RESUME_FROM_ISR( pxTCB );
if( uxSchedulerSuspended == ( unsigned portBASE_TYPE ) pdFALSE ) if( uxSchedulerSuspended == ( unsigned portBASE_TYPE ) pdFALSE )
{ {
xYieldRequired = ( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ); xYieldRequired = ( pxTCB->uxPriority >= pxCurrentTCB->uxPriority );
@ -1430,6 +1446,8 @@ inline void vTaskIncrementTick( void )
} }
} }
#endif #endif
traceTASK_INCREMENT_TICK( xTickCount );
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@ -1505,6 +1523,8 @@ void vTaskSwitchContext( void )
/* listGET_OWNER_OF_NEXT_ENTRY walks through the list, so the tasks of the /* listGET_OWNER_OF_NEXT_ENTRY walks through the list, so the tasks of the
same priority get an equal share of the processor time. */ same priority get an equal share of the processor time. */
listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopReadyPriority ] ) ); listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopReadyPriority ] ) );
traceTASK_SWITCHED_IN();
vWriteTraceToBuffer(); vWriteTraceToBuffer();
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@ -1913,7 +1933,7 @@ tskTCB *pxNewTCB;
#endif #endif
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
#if ( configUSE_TRACE_FACILITY == 1 ) #if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxGetStackHighWaterMark == 1 ) )
unsigned portSHORT usTaskCheckFreeStackSpace( const unsigned portCHAR * pucStackByte ) unsigned portSHORT usTaskCheckFreeStackSpace( const unsigned portCHAR * pucStackByte )
{ {
register unsigned portSHORT usCount = 0; register unsigned portSHORT usCount = 0;
@ -1931,7 +1951,13 @@ tskTCB *pxNewTCB;
#endif #endif
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
#if ( INCLUDE_uxGetStackHighWaterMark == 1 )
unsigned portBASE_TYPE uxGetStackHighWaterMark( void )
{
return usTaskCheckFreeStackSpace( pxCurrentTCB->pxStack );
}
#endif
/*-----------------------------------------------------------*/
#if ( ( INCLUDE_vTaskDelete == 1 ) || ( INCLUDE_vTaskCleanUpResources == 1 ) ) #if ( ( INCLUDE_vTaskDelete == 1 ) || ( INCLUDE_vTaskCleanUpResources == 1 ) )