Merge branch 'FreeRTOS:main' into MISRA-13.3

This commit is contained in:
bradleysmith23 2024-02-02 15:28:57 -08:00 committed by GitHub
commit d302cea139
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 103 additions and 100 deletions

View file

@ -2289,7 +2289,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) PRIVILEGED_FUNCTION;
* \defgroup vTaskList vTaskList
* \ingroup TaskUtils
*/
#define vTaskList( pcWriteBuffer ) vTaskListTasks( pcWriteBuffer, configSTATS_BUFFER_MAX_LENGTH )
#define vTaskList( pcWriteBuffer ) vTaskListTasks( ( pcWriteBuffer ), configSTATS_BUFFER_MAX_LENGTH )
/**
* task. h
@ -2412,7 +2412,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) PRIVILEGED_FUNCTION;
* \defgroup vTaskGetRunTimeStats vTaskGetRunTimeStats
* \ingroup TaskUtils
*/
#define vTaskGetRunTimeStats( pcWriteBuffer ) vTaskGetRunTimeStatistics( pcWriteBuffer, configSTATS_BUFFER_MAX_LENGTH )
#define vTaskGetRunTimeStats( pcWriteBuffer ) vTaskGetRunTimeStatistics( ( pcWriteBuffer ), configSTATS_BUFFER_MAX_LENGTH )
/**
* task. h
@ -3574,9 +3574,7 @@ TaskHandle_t xTaskGetCurrentTaskHandle( void ) PRIVILEGED_FUNCTION;
/*
* Return the handle of the task running on specified core.
*/
#if ( configNUMBER_OF_CORES > 1 )
TaskHandle_t xTaskGetCurrentTaskHandleForCore( BaseType_t xCoreID ) PRIVILEGED_FUNCTION;
#endif
/*
* Shortcut used by the queue implementation to prevent unnecessary call to

View file

@ -197,46 +197,46 @@
/*
* Wrappers to keep all the casting in one place.
*/
#define MPU_StoreQueueHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, NULL, KERNEL_OBJECT_TYPE_QUEUE )
#define MPU_GetQueueHandleAtIndex( lIndex ) ( QueueHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_QUEUE )
#define MPU_StoreQueueHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( ( lIndex ), ( OpaqueObjectHandle_t ) ( xHandle ), NULL, KERNEL_OBJECT_TYPE_QUEUE )
#define MPU_GetQueueHandleAtIndex( lIndex ) ( QueueHandle_t ) MPU_GetHandleAtIndex( ( lIndex ), KERNEL_OBJECT_TYPE_QUEUE )
#if ( configUSE_QUEUE_SETS == 1 )
#define MPU_StoreQueueSetHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, NULL, KERNEL_OBJECT_TYPE_QUEUE )
#define MPU_GetQueueSetHandleAtIndex( lIndex ) ( QueueSetHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_QUEUE )
#define MPU_StoreQueueSetMemberHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, NULL, KERNEL_OBJECT_TYPE_QUEUE )
#define MPU_GetQueueSetMemberHandleAtIndex( lIndex ) ( QueueSetMemberHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_QUEUE )
#define MPU_GetIndexForQueueSetMemberHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) xHandle, KERNEL_OBJECT_TYPE_QUEUE )
#define MPU_StoreQueueSetHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( ( lIndex ), ( OpaqueObjectHandle_t ) ( xHandle ), NULL, KERNEL_OBJECT_TYPE_QUEUE )
#define MPU_GetQueueSetHandleAtIndex( lIndex ) ( QueueSetHandle_t ) MPU_GetHandleAtIndex( ( lIndex ), KERNEL_OBJECT_TYPE_QUEUE )
#define MPU_StoreQueueSetMemberHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( ( lIndex ), ( OpaqueObjectHandle_t ) ( xHandle ), NULL, KERNEL_OBJECT_TYPE_QUEUE )
#define MPU_GetQueueSetMemberHandleAtIndex( lIndex ) ( QueueSetMemberHandle_t ) MPU_GetHandleAtIndex( ( lIndex ), KERNEL_OBJECT_TYPE_QUEUE )
#define MPU_GetIndexForQueueSetMemberHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) ( xHandle ), KERNEL_OBJECT_TYPE_QUEUE )
#endif
/*
* Wrappers to keep all the casting in one place for Task APIs.
*/
#define MPU_StoreTaskHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, NULL, KERNEL_OBJECT_TYPE_TASK )
#define MPU_GetTaskHandleAtIndex( lIndex ) ( TaskHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_TASK )
#define MPU_GetIndexForTaskHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) xHandle, KERNEL_OBJECT_TYPE_TASK )
#define MPU_StoreTaskHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( ( lIndex ), ( OpaqueObjectHandle_t ) ( xHandle ), NULL, KERNEL_OBJECT_TYPE_TASK )
#define MPU_GetTaskHandleAtIndex( lIndex ) ( TaskHandle_t ) MPU_GetHandleAtIndex( ( lIndex ), KERNEL_OBJECT_TYPE_TASK )
#define MPU_GetIndexForTaskHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) ( xHandle ), KERNEL_OBJECT_TYPE_TASK )
/*
* Wrappers to keep all the casting in one place for Event Group APIs.
*/
#define MPU_StoreEventGroupHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, NULL, KERNEL_OBJECT_TYPE_EVENT_GROUP )
#define MPU_GetEventGroupHandleAtIndex( lIndex ) ( EventGroupHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_EVENT_GROUP )
#define MPU_GetIndexForEventGroupHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) xHandle, KERNEL_OBJECT_TYPE_EVENT_GROUP )
#define MPU_StoreEventGroupHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( ( lIndex ), ( OpaqueObjectHandle_t ) ( xHandle ), NULL, KERNEL_OBJECT_TYPE_EVENT_GROUP )
#define MPU_GetEventGroupHandleAtIndex( lIndex ) ( EventGroupHandle_t ) MPU_GetHandleAtIndex( ( lIndex ), KERNEL_OBJECT_TYPE_EVENT_GROUP )
#define MPU_GetIndexForEventGroupHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) ( xHandle ), KERNEL_OBJECT_TYPE_EVENT_GROUP )
/*
* Wrappers to keep all the casting in one place for Stream Buffer APIs.
*/
#define MPU_StoreStreamBufferHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, NULL, KERNEL_OBJECT_TYPE_STREAM_BUFFER )
#define MPU_GetStreamBufferHandleAtIndex( lIndex ) ( StreamBufferHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_STREAM_BUFFER )
#define MPU_GetIndexForStreamBufferHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) xHandle, KERNEL_OBJECT_TYPE_STREAM_BUFFER )
#define MPU_StoreStreamBufferHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( ( lIndex ), ( OpaqueObjectHandle_t ) ( xHandle), NULL, KERNEL_OBJECT_TYPE_STREAM_BUFFER )
#define MPU_GetStreamBufferHandleAtIndex( lIndex ) ( StreamBufferHandle_t ) MPU_GetHandleAtIndex( ( lIndex ), KERNEL_OBJECT_TYPE_STREAM_BUFFER )
#define MPU_GetIndexForStreamBufferHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) ( xHandle ), KERNEL_OBJECT_TYPE_STREAM_BUFFER )
#if ( configUSE_TIMERS == 1 )
/*
* Wrappers to keep all the casting in one place for Timer APIs.
*/
#define MPU_StoreTimerHandleAtIndex( lIndex, xHandle, pxApplicationCallback ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, ( void * ) pxApplicationCallback, KERNEL_OBJECT_TYPE_TIMER )
#define MPU_GetTimerHandleAtIndex( lIndex ) ( TimerHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_TIMER )
#define MPU_GetIndexForTimerHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) xHandle, KERNEL_OBJECT_TYPE_TIMER )
#define MPU_StoreTimerHandleAtIndex( lIndex, xHandle, pxApplicationCallback ) MPU_StoreHandleAndDataAtIndex( ( lIndex ), ( OpaqueObjectHandle_t ) ( xHandle ), ( void * ) ( pxApplicationCallback ), KERNEL_OBJECT_TYPE_TIMER )
#define MPU_GetTimerHandleAtIndex( lIndex ) ( TimerHandle_t ) MPU_GetHandleAtIndex( ( lIndex ), KERNEL_OBJECT_TYPE_TIMER )
#define MPU_GetIndexForTimerHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) ( xHandle ), KERNEL_OBJECT_TYPE_TIMER )
#endif /* #if ( configUSE_TIMERS == 1 ) */
@ -245,7 +245,7 @@
/**
* @brief Kernel object pool.
*/
PRIVILEGED_DATA static KernelObject_t xKernelObjectPool[ configPROTECTED_KERNEL_OBJECT_POOL_SIZE ] = { NULL };
PRIVILEGED_DATA static KernelObject_t xKernelObjectPool[ configPROTECTED_KERNEL_OBJECT_POOL_SIZE ] = { 0 };
/*-----------------------------------------------------------*/
static int32_t MPU_GetFreeIndexInKernelObjectPool( void ) /* PRIVILEGED_FUNCTION */
@ -263,13 +263,13 @@
if( xKernelObjectPool[ i ].xInternalObjectHandle == NULL )
{
/* Mark this index as not free. */
xKernelObjectPool[ i ].xInternalObjectHandle = ( OpaqueObjectHandle_t ) ( ~0 );
xKernelObjectPool[ i ].xInternalObjectHandle = ( OpaqueObjectHandle_t ) ( ~0U );
lFreeIndex = i;
break;
}
}
}
xTaskResumeAll();
( void ) xTaskResumeAll();
return lFreeIndex;
}
@ -2964,7 +2964,7 @@
QueueHandle_t xInternalQueueHandle = NULL;
BaseType_t xReturn = pdFAIL;
lIndex = ( uint32_t ) xQueue;
lIndex = ( int32_t ) xQueue;
if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
{

View file

@ -74,7 +74,6 @@
/* Scheduler includes. */
#include "FreeRTOS.h"
#include "task.h"
#include "list.h"
#include "timers.h"
#include "utils/wait_for_event.h"
/*-----------------------------------------------------------*/
@ -88,7 +87,6 @@ typedef struct THREAD
void * pvParams;
BaseType_t xDying;
struct event * ev;
ListItem_t xThreadListItem;
} Thread_t;
/*
@ -113,7 +111,6 @@ static BaseType_t xSchedulerEnd = pdFALSE;
static pthread_t hTimerTickThread;
static bool xTimerTickThreadShouldRun;
static uint64_t prvStartTimeNs;
static List_t xThreadList;
/*-----------------------------------------------------------*/
static void prvSetupSignalsAndSchedulerPolicy( void );
@ -197,14 +194,8 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
thread->ev = event_create();
vListInitialiseItem( &thread->xThreadListItem );
listSET_LIST_ITEM_OWNER( &thread->xThreadListItem, thread );
vPortEnterCritical();
/* Add the new thread in xThreadList. */
vListInsertEnd( &xThreadList, &thread->xThreadListItem );
iRet = pthread_create( &thread->pthread, &xThreadAttributes,
prvWaitForStart, thread );
@ -235,8 +226,6 @@ BaseType_t xPortStartScheduler( void )
{
int iSignal;
sigset_t xSignals;
ListItem_t * pxIterator;
const ListItem_t * pxEndMarker;
hMainThread = pthread_self();
prvPortSetCurrentThreadName("Scheduler");
@ -263,19 +252,6 @@ BaseType_t xPortStartScheduler( void )
sigwait( &xSignals, &iSignal );
}
/* Cancel all the running thread. */
pxEndMarker = listGET_END_MARKER( &xThreadList );
for( pxIterator = listGET_HEAD_ENTRY( &xThreadList ); pxIterator != pxEndMarker; pxIterator = listGET_NEXT( pxIterator ) )
{
Thread_t * pxThread = ( Thread_t * ) listGET_LIST_ITEM_OWNER( pxIterator );
pthread_cancel( pxThread->pthread );
event_signal( pxThread->ev );
pthread_join( pxThread->pthread, NULL );
event_delete( pxThread->ev );
}
/*
* clear out the variable that is used to end the scheduler, otherwise
* subsequent scheduler restarts will end immediately.
@ -300,6 +276,8 @@ BaseType_t xPortStartScheduler( void )
void vPortEndScheduler( void )
{
Thread_t * pxCurrentThread;
/* Stop the timer tick thread. */
xTimerTickThreadShouldRun = false;
pthread_join( hTimerTickThread, NULL );
@ -308,7 +286,10 @@ void vPortEndScheduler( void )
xSchedulerEnd = pdTRUE;
( void ) pthread_kill( hMainThread, SIG_RESUME );
pthread_exit( NULL );
/* Waiting to be deleted here. */
pxCurrentThread = prvGetThreadFromTask( xTaskGetCurrentTaskHandle() );
event_wait( pxCurrentThread->ev );
pthread_testcancel();
}
/*-----------------------------------------------------------*/
@ -491,11 +472,6 @@ void vPortCancelThread( void * pxTaskToDelete )
{
Thread_t * pxThreadToCancel = prvGetThreadFromTask( pxTaskToDelete );
/* Remove the thread from xThreadList. */
vPortEnterCritical();
uxListRemove( &pxThreadToCancel->xThreadListItem );
vPortExitCritical();
/*
* The thread has already been suspended so it can be safely cancelled.
*/
@ -600,9 +576,6 @@ static void prvSetupSignalsAndSchedulerPolicy( void )
hMainThread = pthread_self();
/* Setup thread list to record all the task which are not deleted. */
vListInitialise( &xThreadList );
/* Initialise common signal masks. */
sigfillset( &xAllSignals );

View file

@ -1190,7 +1190,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
* read, instead return a flag to say whether a context switch is required or
* not (i.e. has a task with a higher priority than us been woken by this
* post). */
uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR();
{
if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
{
@ -1365,7 +1365,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
* link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR();
{
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
@ -2055,7 +2055,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
* link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR();
{
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
@ -2153,7 +2153,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
* link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR();
{
/* Cannot block in an ISR, so check there is data available. */
if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )

52
tasks.c
View file

@ -180,7 +180,7 @@
UBaseType_t uxTopPriority = uxTopReadyPriority; \
\
/* Find the highest priority queue that contains ready tasks. */ \
while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopPriority ] ) ) ) \
while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopPriority ] ) ) != pdFALSE ) \
{ \
configASSERT( uxTopPriority ); \
--uxTopPriority; \
@ -2229,7 +2229,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
/* If the task is running (or yielding), we must add it to the
* termination list so that an idle task can delete it when it is
* no longer running. */
if( taskTASK_IS_RUNNING_OR_SCHEDULED_TO_YIELD( pxTCB ) != pdFALSE )
if( ( xSchedulerRunning != pdFALSE ) && ( taskTASK_IS_RUNNING_OR_SCHEDULED_TO_YIELD( pxTCB ) != pdFALSE ) )
{
/* A running task or a task which is scheduled to yield is being
* deleted. This cannot complete when the task is still running
@ -2657,7 +2657,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
* https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR();
{
/* If null is passed in here then it is the priority of the calling
* task that is being queried. */
@ -2728,7 +2728,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
* https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR();
{
/* If null is passed in here then it is the base priority of the calling
* task that is being queried. */
@ -3740,11 +3740,39 @@ void vTaskEndScheduler( void )
{
traceENTER_vTaskEndScheduler();
#if ( INCLUDE_vTaskDelete == 1 )
{
BaseType_t xCoreID;
#if ( configUSE_TIMERS == 1 )
{
/* Delete the timer task created by the kernel. */
vTaskDelete( xTimerGetTimerDaemonTaskHandle() );
}
#endif /* #if ( configUSE_TIMERS == 1 ) */
/* Delete Idle tasks created by the kernel.*/
for( xCoreID = 0; xCoreID < ( BaseType_t ) configNUMBER_OF_CORES; xCoreID++ )
{
vTaskDelete( xIdleTaskHandles[ xCoreID ] );
}
/* Idle task is responsible for reclaiming the resources of the tasks in
* xTasksWaitingTermination list. Since the idle task is now deleted and
* no longer going to run, we need to reclaim resources of all the tasks
* in the xTasksWaitingTermination list. */
prvCheckTasksWaitingTermination();
}
#endif /* #if ( INCLUDE_vTaskDelete == 1 ) */
/* Stop the scheduler interrupts and call the portable scheduler end
* routine so the original ISRs can be restored if necessary. The port
* layer must ensure interrupts enable bit is left in the correct state. */
portDISABLE_INTERRUPTS();
xSchedulerRunning = pdFALSE;
/* This function must be called from a task and the application is
* responsible for deleting that task after the scheduler is stopped. */
vPortEndScheduler();
traceRETURN_vTaskEndScheduler();
@ -4629,7 +4657,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
/* This lets the task know it was forcibly removed from the
* blocked state so it should not re-evaluate its block time and
* then block again. */
pxTCB->ucDelayAborted = pdTRUE;
pxTCB->ucDelayAborted = ( uint8_t ) pdTRUE;
}
else
{
@ -5570,7 +5598,7 @@ BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
{
/* The delay was aborted, which is not the same as a time out,
* but has the same result. */
pxCurrentTCB->ucDelayAborted = pdFALSE;
pxCurrentTCB->ucDelayAborted = ( uint8_t ) pdFALSE;
xReturn = pdTRUE;
}
else
@ -6531,6 +6559,7 @@ static void prvResetNextTaskUnblockTime( void )
return xReturn;
}
#endif /* #if ( configNUMBER_OF_CORES == 1 ) */
TaskHandle_t xTaskGetCurrentTaskHandleForCore( BaseType_t xCoreID )
{
@ -6540,14 +6569,17 @@ static void prvResetNextTaskUnblockTime( void )
if( taskVALID_CORE_ID( xCoreID ) != pdFALSE )
{
#if ( configNUMBER_OF_CORES == 1 )
xReturn = pxCurrentTCB;
#else /* #if ( configNUMBER_OF_CORES == 1 ) */
xReturn = pxCurrentTCBs[ xCoreID ];
#endif /* #if ( configNUMBER_OF_CORES == 1 ) */
}
traceRETURN_xTaskGetCurrentTaskHandleForCore( xReturn );
return xReturn;
}
#endif /* #if ( configNUMBER_OF_CORES == 1 ) */
#endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
/*-----------------------------------------------------------*/
@ -8036,7 +8068,7 @@ TickType_t uxTaskResetEventItemValue( void )
pxTCB = xTaskToNotify;
uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR();
{
if( pulPreviousNotificationValue != NULL )
{
@ -8195,7 +8227,7 @@ TickType_t uxTaskResetEventItemValue( void )
pxTCB = xTaskToNotify;
uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR();
{
ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ];
pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED;
@ -8469,7 +8501,7 @@ static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait,
/* About to enter a delayed list, so ensure the ucDelayAborted flag is
* reset to pdFALSE so it can be detected as having been set to pdTRUE
* when the task leaves the Blocked state. */
pxCurrentTCB->ucDelayAborted = pdFALSE;
pxCurrentTCB->ucDelayAborted = ( uint8_t ) pdFALSE;
}
#endif