mirror of
https://github.com/FreeRTOS/FreeRTOS-Kernel.git
synced 2025-04-19 21:11:57 -04:00
Default the definition of portASSERT_IF_IN_ISR() to nothing if it is not defined.
Helper updates to allow a count of the number of mutexes held to be added. Updates to the CCS Cortex-R4 implementation necessitated by a change in compiler semantics. Update PIC32MX and MZ ports to assert if a non ISR safe function is called from an ISR.
This commit is contained in:
parent
b4659d8872
commit
583b144bc3
|
@ -717,6 +717,10 @@ is included as it is used by the port layer. */
|
|||
#define mtCOVERAGE_TEST_MARKER()
|
||||
#endif
|
||||
|
||||
#ifndef portASSERT_IF_IN_ISR
|
||||
#define portASSERT_IF_IN_ISR()
|
||||
#endif
|
||||
|
||||
/* Definitions to allow backward compatibility with FreeRTOS versions prior to
|
||||
V8 if desired. */
|
||||
#ifndef configENABLE_BACKWARD_COMPATIBILITY
|
||||
|
|
|
@ -1507,7 +1507,7 @@ void vTaskPriorityInherit( TaskHandle_t const pxMutexHolder ) PRIVILEGED_FUNCTIO
|
|||
* Set the priority of a task back to its proper priority in the case that it
|
||||
* inherited a higher priority while it was holding a semaphore.
|
||||
*/
|
||||
void vTaskPriorityDisinherit( TaskHandle_t const pxMutexHolder ) PRIVILEGED_FUNCTION;
|
||||
BaseType_t xTaskPriorityDisinherit( TaskHandle_t const pxMutexHolder ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/*
|
||||
* Generic version of the task creation function which is in turn called by the
|
||||
|
@ -1552,6 +1552,15 @@ void vTaskStepTick( const TickType_t xTicksToJump ) PRIVILEGED_FUNCTION;
|
|||
*/
|
||||
eSleepModeStatus eTaskConfirmSleepModeStatus( void ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/*
|
||||
* For internal use only. Increment the mutex held count when a mutex is
|
||||
* taken and decrement the mutex held count when the mutex is given back
|
||||
* respectively. The mutex held count is used to know when it is safe to
|
||||
* disinherit a priority.
|
||||
*/
|
||||
void vTaskIncrementMutexHeldCount( void );
|
||||
void vTaskDecrementMutexHeldCount( void );
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -101,7 +101,7 @@ portSAVE_CONTEXT .macro
|
|||
|
||||
; If the task is not using a floating point context then skip the
|
||||
; saving of the FPU registers.
|
||||
BEQ PC+3
|
||||
BEQ $+16
|
||||
FSTMDBD LR!, {D0-D15}
|
||||
FMRX R1, FPSCR
|
||||
STMFD LR!, {R1}
|
||||
|
@ -137,7 +137,7 @@ portRESTORE_CONTEXT .macro
|
|||
|
||||
; If the task is not using a floating point context then skip the
|
||||
; VFP register loads.
|
||||
BEQ PC+3
|
||||
BEQ $+16
|
||||
|
||||
; Restore the floating point context.
|
||||
LDMFD LR!, {R0}
|
||||
|
|
|
@ -315,6 +315,16 @@ void vPortEnterCritical( void )
|
|||
directly. Increment ulCriticalNesting to keep a count of how many times
|
||||
portENTER_CRITICAL() has been called. */
|
||||
ulCriticalNesting++;
|
||||
|
||||
/* This is not the interrupt safe version of the enter critical function so
|
||||
assert() if it is being called from an interrupt context. Only API
|
||||
functions that end in "FromISR" can be used in an interrupt. Only assert if
|
||||
the critical nesting count is 1 to protect against recursive calls if the
|
||||
assert function also uses a critical section. */
|
||||
if( ulCriticalNesting == 1 )
|
||||
{
|
||||
configASSERT( ulPortInterruptNesting == 0 );
|
||||
}
|
||||
}
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
|
|
|
@ -204,8 +204,8 @@ uint32_t ulCause; \
|
|||
_CP0_SET_CAUSE( ulCause ); \
|
||||
}
|
||||
|
||||
#define portCURRENT_INTERRUPT_PRIORITY ( ( _CP0_GET_STATUS() & portALL_IPL_BITS ) >> portIPL_SHIFT )
|
||||
#define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() configASSERT( portCURRENT_INTERRUPT_PRIORITY <= configMAX_SYSCALL_INTERRUPT_PRIORITY )
|
||||
extern volatile UBaseType_t uxInterruptNesting;
|
||||
#define portASSERT_IF_IN_ISR() configASSERT( uxInterruptNesting == 0 )
|
||||
|
||||
#define portNOP() __asm volatile ( "nop" )
|
||||
|
||||
|
|
|
@ -206,8 +206,8 @@ uint32_t ulCause; \
|
|||
_CP0_SET_CAUSE( ulCause ); \
|
||||
}
|
||||
|
||||
#define portCURRENT_INTERRUPT_PRIORITY ( ( _CP0_GET_STATUS() & portALL_IPL_BITS ) >> portIPL_SHIFT )
|
||||
#define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() configASSERT( portCURRENT_INTERRUPT_PRIORITY <= configMAX_SYSCALL_INTERRUPT_PRIORITY )
|
||||
extern volatile UBaseType_t uxInterruptNesting;
|
||||
#define portASSERT_IF_IN_ISR() configASSERT( uxInterruptNesting == 0 )
|
||||
|
||||
#define portNOP() __asm volatile ( "nop" )
|
||||
|
||||
|
|
|
@ -216,7 +216,7 @@ static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
|
|||
* Copies an item into the queue, either at the front of the queue or the
|
||||
* back of the queue.
|
||||
*/
|
||||
static void prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;
|
||||
static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/*
|
||||
* Copies an item out of a queue.
|
||||
|
@ -421,7 +421,10 @@ QueueHandle_t xReturn = NULL;
|
|||
|
||||
traceCREATE_MUTEX( pxNewQueue );
|
||||
|
||||
/* Start with the semaphore in the expected state. */
|
||||
/* Start with the semaphore in the expected state. Preload the
|
||||
mutex held count as calling xQueueGenericSend() will decrement the
|
||||
count back to 0. */
|
||||
vTaskIncrementMutexHeldCount();
|
||||
( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );
|
||||
}
|
||||
else
|
||||
|
@ -508,7 +511,8 @@ QueueHandle_t xReturn = NULL;
|
|||
}
|
||||
else
|
||||
{
|
||||
/* We cannot give the mutex because we are not the holder. */
|
||||
/* The mutex cannot be given because the calling task is not the
|
||||
holder. */
|
||||
xReturn = pdFAIL;
|
||||
|
||||
traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );
|
||||
|
@ -543,8 +547,9 @@ QueueHandle_t xReturn = NULL;
|
|||
{
|
||||
xReturn = xQueueGenericReceive( pxMutex, NULL, xTicksToWait, pdFALSE );
|
||||
|
||||
/* pdPASS will only be returned if we successfully obtained the mutex,
|
||||
we may have blocked to reach here. */
|
||||
/* pdPASS will only be returned if the mutex was successfully
|
||||
obtained. The calling task may have entered the Blocked state
|
||||
before reaching here. */
|
||||
if( xReturn == pdPASS )
|
||||
{
|
||||
( pxMutex->u.uxRecursiveCallCount )++;
|
||||
|
@ -592,7 +597,7 @@ QueueHandle_t xReturn = NULL;
|
|||
|
||||
BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )
|
||||
{
|
||||
BaseType_t xEntryTimeSet = pdFALSE;
|
||||
BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
|
||||
TimeOut_t xTimeOut;
|
||||
Queue_t * const pxQueue = ( Queue_t * ) xQueue;
|
||||
|
||||
|
@ -620,7 +625,7 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
|
|||
if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
|
||||
{
|
||||
traceQUEUE_SEND( pxQueue );
|
||||
prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
|
||||
xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
|
||||
|
||||
#if ( configUSE_QUEUE_SETS == 1 )
|
||||
{
|
||||
|
@ -657,6 +662,14 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
|
|||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else if( xYieldRequired != pdFALSE )
|
||||
{
|
||||
/* This path is a special case that will only get
|
||||
executed if the task was holding multiple mutexes
|
||||
and the mutexes were given back in an order that is
|
||||
different to that in which they were taken. */
|
||||
queueYIELD_IF_USING_PREEMPTION();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
|
@ -690,9 +703,6 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
|
|||
#endif /* configUSE_QUEUE_SETS */
|
||||
|
||||
taskEXIT_CRITICAL();
|
||||
|
||||
/* Return to the original privilege level before exiting the
|
||||
function. */
|
||||
return pdPASS;
|
||||
}
|
||||
else
|
||||
|
@ -1059,7 +1069,20 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
|
|||
{
|
||||
traceQUEUE_SEND_FROM_ISR( pxQueue );
|
||||
|
||||
prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
|
||||
if( prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition ) != pdFALSE )
|
||||
{
|
||||
/* This is a special case that can only be executed if a task
|
||||
holds multiple mutexes and then gives the mutexes back in an
|
||||
order that is different to that in which they were taken. */
|
||||
if( pxHigherPriorityTaskWoken != NULL )
|
||||
{
|
||||
*pxHigherPriorityTaskWoken = pdTRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
|
||||
/* The event list is not altered if the queue is locked. This will
|
||||
be done when the queue is unlocked later. */
|
||||
|
@ -1591,8 +1614,10 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
|
|||
#endif /* configUSE_TRACE_FACILITY */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
static void prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )
|
||||
static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )
|
||||
{
|
||||
BaseType_t xReturn = pdFALSE;
|
||||
|
||||
if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )
|
||||
{
|
||||
#if ( configUSE_MUTEXES == 1 )
|
||||
|
@ -1600,7 +1625,8 @@ static void prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQue
|
|||
if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
|
||||
{
|
||||
/* The mutex is no longer being held. */
|
||||
vTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );
|
||||
vTaskDecrementMutexHeldCount();
|
||||
xReturn = xTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );
|
||||
pxQueue->pxMutexHolder = NULL;
|
||||
}
|
||||
else
|
||||
|
@ -1658,6 +1684,8 @@ static void prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQue
|
|||
}
|
||||
|
||||
++( pxQueue->uxMessagesWaiting );
|
||||
|
||||
return xReturn;
|
||||
}
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
|
@ -1678,7 +1706,8 @@ static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer
|
|||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
/* A mutex was taken. */
|
||||
vTaskIncrementMutexHeldCount();
|
||||
}
|
||||
}
|
||||
/*-----------------------------------------------------------*/
|
||||
|
@ -2367,8 +2396,9 @@ BaseType_t xReturn;
|
|||
if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
|
||||
{
|
||||
traceQUEUE_SEND( pxQueueSetContainer );
|
||||
/* The data copies is the handle of the queue that contains data. */
|
||||
prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );
|
||||
/* The data copied is the handle of the queue that contains data. */
|
||||
xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );
|
||||
|
||||
if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )
|
||||
{
|
||||
if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )
|
||||
|
|
Loading…
Reference in a new issue