mirror of
https://github.com/FreeRTOS/FreeRTOS-Kernel.git
synced 2025-12-07 13:45:00 -05:00
Merge branch 'FreeRTOS:main' into MISRA-10.4
This commit is contained in:
commit
e24e4a829e
5 changed files with 71 additions and 70 deletions
|
|
@ -2289,7 +2289,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) PRIVILEGED_FUNCTION;
|
||||||
* \defgroup vTaskList vTaskList
|
* \defgroup vTaskList vTaskList
|
||||||
* \ingroup TaskUtils
|
* \ingroup TaskUtils
|
||||||
*/
|
*/
|
||||||
#define vTaskList( pcWriteBuffer ) vTaskListTasks( pcWriteBuffer, configSTATS_BUFFER_MAX_LENGTH )
|
#define vTaskList( pcWriteBuffer ) vTaskListTasks( ( pcWriteBuffer ), configSTATS_BUFFER_MAX_LENGTH )
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* task. h
|
* task. h
|
||||||
|
|
@ -2412,7 +2412,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) PRIVILEGED_FUNCTION;
|
||||||
* \defgroup vTaskGetRunTimeStats vTaskGetRunTimeStats
|
* \defgroup vTaskGetRunTimeStats vTaskGetRunTimeStats
|
||||||
* \ingroup TaskUtils
|
* \ingroup TaskUtils
|
||||||
*/
|
*/
|
||||||
#define vTaskGetRunTimeStats( pcWriteBuffer ) vTaskGetRunTimeStatistics( pcWriteBuffer, configSTATS_BUFFER_MAX_LENGTH )
|
#define vTaskGetRunTimeStats( pcWriteBuffer ) vTaskGetRunTimeStatistics( ( pcWriteBuffer ), configSTATS_BUFFER_MAX_LENGTH )
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* task. h
|
* task. h
|
||||||
|
|
|
||||||
|
|
@ -197,46 +197,46 @@
|
||||||
/*
|
/*
|
||||||
* Wrappers to keep all the casting in one place.
|
* Wrappers to keep all the casting in one place.
|
||||||
*/
|
*/
|
||||||
#define MPU_StoreQueueHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, NULL, KERNEL_OBJECT_TYPE_QUEUE )
|
#define MPU_StoreQueueHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( ( lIndex ), ( OpaqueObjectHandle_t ) ( xHandle ), NULL, KERNEL_OBJECT_TYPE_QUEUE )
|
||||||
#define MPU_GetQueueHandleAtIndex( lIndex ) ( QueueHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_QUEUE )
|
#define MPU_GetQueueHandleAtIndex( lIndex ) ( QueueHandle_t ) MPU_GetHandleAtIndex( ( lIndex ), KERNEL_OBJECT_TYPE_QUEUE )
|
||||||
|
|
||||||
#if ( configUSE_QUEUE_SETS == 1 )
|
#if ( configUSE_QUEUE_SETS == 1 )
|
||||||
#define MPU_StoreQueueSetHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, NULL, KERNEL_OBJECT_TYPE_QUEUE )
|
#define MPU_StoreQueueSetHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( ( lIndex ), ( OpaqueObjectHandle_t ) ( xHandle ), NULL, KERNEL_OBJECT_TYPE_QUEUE )
|
||||||
#define MPU_GetQueueSetHandleAtIndex( lIndex ) ( QueueSetHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_QUEUE )
|
#define MPU_GetQueueSetHandleAtIndex( lIndex ) ( QueueSetHandle_t ) MPU_GetHandleAtIndex( ( lIndex ), KERNEL_OBJECT_TYPE_QUEUE )
|
||||||
#define MPU_StoreQueueSetMemberHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, NULL, KERNEL_OBJECT_TYPE_QUEUE )
|
#define MPU_StoreQueueSetMemberHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( ( lIndex ), ( OpaqueObjectHandle_t ) ( xHandle ), NULL, KERNEL_OBJECT_TYPE_QUEUE )
|
||||||
#define MPU_GetQueueSetMemberHandleAtIndex( lIndex ) ( QueueSetMemberHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_QUEUE )
|
#define MPU_GetQueueSetMemberHandleAtIndex( lIndex ) ( QueueSetMemberHandle_t ) MPU_GetHandleAtIndex( ( lIndex ), KERNEL_OBJECT_TYPE_QUEUE )
|
||||||
#define MPU_GetIndexForQueueSetMemberHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) xHandle, KERNEL_OBJECT_TYPE_QUEUE )
|
#define MPU_GetIndexForQueueSetMemberHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) ( xHandle ), KERNEL_OBJECT_TYPE_QUEUE )
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Wrappers to keep all the casting in one place for Task APIs.
|
* Wrappers to keep all the casting in one place for Task APIs.
|
||||||
*/
|
*/
|
||||||
#define MPU_StoreTaskHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, NULL, KERNEL_OBJECT_TYPE_TASK )
|
#define MPU_StoreTaskHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( ( lIndex ), ( OpaqueObjectHandle_t ) ( xHandle ), NULL, KERNEL_OBJECT_TYPE_TASK )
|
||||||
#define MPU_GetTaskHandleAtIndex( lIndex ) ( TaskHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_TASK )
|
#define MPU_GetTaskHandleAtIndex( lIndex ) ( TaskHandle_t ) MPU_GetHandleAtIndex( ( lIndex ), KERNEL_OBJECT_TYPE_TASK )
|
||||||
#define MPU_GetIndexForTaskHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) xHandle, KERNEL_OBJECT_TYPE_TASK )
|
#define MPU_GetIndexForTaskHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) ( xHandle ), KERNEL_OBJECT_TYPE_TASK )
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Wrappers to keep all the casting in one place for Event Group APIs.
|
* Wrappers to keep all the casting in one place for Event Group APIs.
|
||||||
*/
|
*/
|
||||||
#define MPU_StoreEventGroupHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, NULL, KERNEL_OBJECT_TYPE_EVENT_GROUP )
|
#define MPU_StoreEventGroupHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( ( lIndex ), ( OpaqueObjectHandle_t ) ( xHandle ), NULL, KERNEL_OBJECT_TYPE_EVENT_GROUP )
|
||||||
#define MPU_GetEventGroupHandleAtIndex( lIndex ) ( EventGroupHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_EVENT_GROUP )
|
#define MPU_GetEventGroupHandleAtIndex( lIndex ) ( EventGroupHandle_t ) MPU_GetHandleAtIndex( ( lIndex ), KERNEL_OBJECT_TYPE_EVENT_GROUP )
|
||||||
#define MPU_GetIndexForEventGroupHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) xHandle, KERNEL_OBJECT_TYPE_EVENT_GROUP )
|
#define MPU_GetIndexForEventGroupHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) ( xHandle ), KERNEL_OBJECT_TYPE_EVENT_GROUP )
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Wrappers to keep all the casting in one place for Stream Buffer APIs.
|
* Wrappers to keep all the casting in one place for Stream Buffer APIs.
|
||||||
*/
|
*/
|
||||||
#define MPU_StoreStreamBufferHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, NULL, KERNEL_OBJECT_TYPE_STREAM_BUFFER )
|
#define MPU_StoreStreamBufferHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( ( lIndex ), ( OpaqueObjectHandle_t ) ( xHandle), NULL, KERNEL_OBJECT_TYPE_STREAM_BUFFER )
|
||||||
#define MPU_GetStreamBufferHandleAtIndex( lIndex ) ( StreamBufferHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_STREAM_BUFFER )
|
#define MPU_GetStreamBufferHandleAtIndex( lIndex ) ( StreamBufferHandle_t ) MPU_GetHandleAtIndex( ( lIndex ), KERNEL_OBJECT_TYPE_STREAM_BUFFER )
|
||||||
#define MPU_GetIndexForStreamBufferHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) xHandle, KERNEL_OBJECT_TYPE_STREAM_BUFFER )
|
#define MPU_GetIndexForStreamBufferHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) ( xHandle ), KERNEL_OBJECT_TYPE_STREAM_BUFFER )
|
||||||
|
|
||||||
#if ( configUSE_TIMERS == 1 )
|
#if ( configUSE_TIMERS == 1 )
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Wrappers to keep all the casting in one place for Timer APIs.
|
* Wrappers to keep all the casting in one place for Timer APIs.
|
||||||
*/
|
*/
|
||||||
#define MPU_StoreTimerHandleAtIndex( lIndex, xHandle, pxApplicationCallback ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, ( void * ) pxApplicationCallback, KERNEL_OBJECT_TYPE_TIMER )
|
#define MPU_StoreTimerHandleAtIndex( lIndex, xHandle, pxApplicationCallback ) MPU_StoreHandleAndDataAtIndex( ( lIndex ), ( OpaqueObjectHandle_t ) ( xHandle ), ( void * ) ( pxApplicationCallback ), KERNEL_OBJECT_TYPE_TIMER )
|
||||||
#define MPU_GetTimerHandleAtIndex( lIndex ) ( TimerHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_TIMER )
|
#define MPU_GetTimerHandleAtIndex( lIndex ) ( TimerHandle_t ) MPU_GetHandleAtIndex( ( lIndex ), KERNEL_OBJECT_TYPE_TIMER )
|
||||||
#define MPU_GetIndexForTimerHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) xHandle, KERNEL_OBJECT_TYPE_TIMER )
|
#define MPU_GetIndexForTimerHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) ( xHandle ), KERNEL_OBJECT_TYPE_TIMER )
|
||||||
|
|
||||||
#endif /* #if ( configUSE_TIMERS == 1 ) */
|
#endif /* #if ( configUSE_TIMERS == 1 ) */
|
||||||
|
|
||||||
|
|
@ -245,7 +245,7 @@
|
||||||
/**
|
/**
|
||||||
* @brief Kernel object pool.
|
* @brief Kernel object pool.
|
||||||
*/
|
*/
|
||||||
PRIVILEGED_DATA static KernelObject_t xKernelObjectPool[ configPROTECTED_KERNEL_OBJECT_POOL_SIZE ] = { NULL };
|
PRIVILEGED_DATA static KernelObject_t xKernelObjectPool[ configPROTECTED_KERNEL_OBJECT_POOL_SIZE ] = { 0 };
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
static int32_t MPU_GetFreeIndexInKernelObjectPool( void ) /* PRIVILEGED_FUNCTION */
|
static int32_t MPU_GetFreeIndexInKernelObjectPool( void ) /* PRIVILEGED_FUNCTION */
|
||||||
|
|
@ -263,13 +263,13 @@
|
||||||
if( xKernelObjectPool[ i ].xInternalObjectHandle == NULL )
|
if( xKernelObjectPool[ i ].xInternalObjectHandle == NULL )
|
||||||
{
|
{
|
||||||
/* Mark this index as not free. */
|
/* Mark this index as not free. */
|
||||||
xKernelObjectPool[ i ].xInternalObjectHandle = ( OpaqueObjectHandle_t ) ( ~0 );
|
xKernelObjectPool[ i ].xInternalObjectHandle = ( OpaqueObjectHandle_t ) ( ~0U );
|
||||||
lFreeIndex = i;
|
lFreeIndex = i;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
xTaskResumeAll();
|
( void ) xTaskResumeAll();
|
||||||
|
|
||||||
return lFreeIndex;
|
return lFreeIndex;
|
||||||
}
|
}
|
||||||
|
|
@ -2964,7 +2964,7 @@
|
||||||
QueueHandle_t xInternalQueueHandle = NULL;
|
QueueHandle_t xInternalQueueHandle = NULL;
|
||||||
BaseType_t xReturn = pdFAIL;
|
BaseType_t xReturn = pdFAIL;
|
||||||
|
|
||||||
lIndex = ( uint32_t ) xQueue;
|
lIndex = ( int32_t ) xQueue;
|
||||||
|
|
||||||
if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
|
if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
|
||||||
{
|
{
|
||||||
|
|
|
||||||
39
portable/ThirdParty/GCC/Posix/port.c
vendored
39
portable/ThirdParty/GCC/Posix/port.c
vendored
|
|
@ -74,7 +74,6 @@
|
||||||
/* Scheduler includes. */
|
/* Scheduler includes. */
|
||||||
#include "FreeRTOS.h"
|
#include "FreeRTOS.h"
|
||||||
#include "task.h"
|
#include "task.h"
|
||||||
#include "list.h"
|
|
||||||
#include "timers.h"
|
#include "timers.h"
|
||||||
#include "utils/wait_for_event.h"
|
#include "utils/wait_for_event.h"
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
@ -88,7 +87,6 @@ typedef struct THREAD
|
||||||
void * pvParams;
|
void * pvParams;
|
||||||
BaseType_t xDying;
|
BaseType_t xDying;
|
||||||
struct event * ev;
|
struct event * ev;
|
||||||
ListItem_t xThreadListItem;
|
|
||||||
} Thread_t;
|
} Thread_t;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
@ -113,7 +111,6 @@ static BaseType_t xSchedulerEnd = pdFALSE;
|
||||||
static pthread_t hTimerTickThread;
|
static pthread_t hTimerTickThread;
|
||||||
static bool xTimerTickThreadShouldRun;
|
static bool xTimerTickThreadShouldRun;
|
||||||
static uint64_t prvStartTimeNs;
|
static uint64_t prvStartTimeNs;
|
||||||
static List_t xThreadList;
|
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
static void prvSetupSignalsAndSchedulerPolicy( void );
|
static void prvSetupSignalsAndSchedulerPolicy( void );
|
||||||
|
|
@ -197,14 +194,8 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
|
||||||
|
|
||||||
thread->ev = event_create();
|
thread->ev = event_create();
|
||||||
|
|
||||||
vListInitialiseItem( &thread->xThreadListItem );
|
|
||||||
listSET_LIST_ITEM_OWNER( &thread->xThreadListItem, thread );
|
|
||||||
|
|
||||||
vPortEnterCritical();
|
vPortEnterCritical();
|
||||||
|
|
||||||
/* Add the new thread in xThreadList. */
|
|
||||||
vListInsertEnd( &xThreadList, &thread->xThreadListItem );
|
|
||||||
|
|
||||||
iRet = pthread_create( &thread->pthread, &xThreadAttributes,
|
iRet = pthread_create( &thread->pthread, &xThreadAttributes,
|
||||||
prvWaitForStart, thread );
|
prvWaitForStart, thread );
|
||||||
|
|
||||||
|
|
@ -235,8 +226,6 @@ BaseType_t xPortStartScheduler( void )
|
||||||
{
|
{
|
||||||
int iSignal;
|
int iSignal;
|
||||||
sigset_t xSignals;
|
sigset_t xSignals;
|
||||||
ListItem_t * pxIterator;
|
|
||||||
const ListItem_t * pxEndMarker;
|
|
||||||
|
|
||||||
hMainThread = pthread_self();
|
hMainThread = pthread_self();
|
||||||
prvPortSetCurrentThreadName("Scheduler");
|
prvPortSetCurrentThreadName("Scheduler");
|
||||||
|
|
@ -263,19 +252,6 @@ BaseType_t xPortStartScheduler( void )
|
||||||
sigwait( &xSignals, &iSignal );
|
sigwait( &xSignals, &iSignal );
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Cancel all the running thread. */
|
|
||||||
pxEndMarker = listGET_END_MARKER( &xThreadList );
|
|
||||||
|
|
||||||
for( pxIterator = listGET_HEAD_ENTRY( &xThreadList ); pxIterator != pxEndMarker; pxIterator = listGET_NEXT( pxIterator ) )
|
|
||||||
{
|
|
||||||
Thread_t * pxThread = ( Thread_t * ) listGET_LIST_ITEM_OWNER( pxIterator );
|
|
||||||
|
|
||||||
pthread_cancel( pxThread->pthread );
|
|
||||||
event_signal( pxThread->ev );
|
|
||||||
pthread_join( pxThread->pthread, NULL );
|
|
||||||
event_delete( pxThread->ev );
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* clear out the variable that is used to end the scheduler, otherwise
|
* clear out the variable that is used to end the scheduler, otherwise
|
||||||
* subsequent scheduler restarts will end immediately.
|
* subsequent scheduler restarts will end immediately.
|
||||||
|
|
@ -300,6 +276,8 @@ BaseType_t xPortStartScheduler( void )
|
||||||
|
|
||||||
void vPortEndScheduler( void )
|
void vPortEndScheduler( void )
|
||||||
{
|
{
|
||||||
|
Thread_t * pxCurrentThread;
|
||||||
|
|
||||||
/* Stop the timer tick thread. */
|
/* Stop the timer tick thread. */
|
||||||
xTimerTickThreadShouldRun = false;
|
xTimerTickThreadShouldRun = false;
|
||||||
pthread_join( hTimerTickThread, NULL );
|
pthread_join( hTimerTickThread, NULL );
|
||||||
|
|
@ -308,7 +286,10 @@ void vPortEndScheduler( void )
|
||||||
xSchedulerEnd = pdTRUE;
|
xSchedulerEnd = pdTRUE;
|
||||||
( void ) pthread_kill( hMainThread, SIG_RESUME );
|
( void ) pthread_kill( hMainThread, SIG_RESUME );
|
||||||
|
|
||||||
pthread_exit( NULL );
|
/* Waiting to be deleted here. */
|
||||||
|
pxCurrentThread = prvGetThreadFromTask( xTaskGetCurrentTaskHandle() );
|
||||||
|
event_wait( pxCurrentThread->ev );
|
||||||
|
pthread_testcancel();
|
||||||
}
|
}
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
|
|
@ -491,11 +472,6 @@ void vPortCancelThread( void * pxTaskToDelete )
|
||||||
{
|
{
|
||||||
Thread_t * pxThreadToCancel = prvGetThreadFromTask( pxTaskToDelete );
|
Thread_t * pxThreadToCancel = prvGetThreadFromTask( pxTaskToDelete );
|
||||||
|
|
||||||
/* Remove the thread from xThreadList. */
|
|
||||||
vPortEnterCritical();
|
|
||||||
uxListRemove( &pxThreadToCancel->xThreadListItem );
|
|
||||||
vPortExitCritical();
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The thread has already been suspended so it can be safely cancelled.
|
* The thread has already been suspended so it can be safely cancelled.
|
||||||
*/
|
*/
|
||||||
|
|
@ -600,9 +576,6 @@ static void prvSetupSignalsAndSchedulerPolicy( void )
|
||||||
|
|
||||||
hMainThread = pthread_self();
|
hMainThread = pthread_self();
|
||||||
|
|
||||||
/* Setup thread list to record all the task which are not deleted. */
|
|
||||||
vListInitialise( &xThreadList );
|
|
||||||
|
|
||||||
/* Initialise common signal masks. */
|
/* Initialise common signal masks. */
|
||||||
sigfillset( &xAllSignals );
|
sigfillset( &xAllSignals );
|
||||||
|
|
||||||
|
|
|
||||||
8
queue.c
8
queue.c
|
|
@ -1190,7 +1190,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
|
||||||
* read, instead return a flag to say whether a context switch is required or
|
* read, instead return a flag to say whether a context switch is required or
|
||||||
* not (i.e. has a task with a higher priority than us been woken by this
|
* not (i.e. has a task with a higher priority than us been woken by this
|
||||||
* post). */
|
* post). */
|
||||||
uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
|
uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR();
|
||||||
{
|
{
|
||||||
if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
|
if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
|
||||||
{
|
{
|
||||||
|
|
@ -1365,7 +1365,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
|
||||||
* link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
|
* link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
|
||||||
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
|
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
|
||||||
|
|
||||||
uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
|
uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR();
|
||||||
{
|
{
|
||||||
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
|
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
|
||||||
|
|
||||||
|
|
@ -2055,7 +2055,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
|
||||||
* link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
|
* link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
|
||||||
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
|
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
|
||||||
|
|
||||||
uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
|
uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR();
|
||||||
{
|
{
|
||||||
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
|
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
|
||||||
|
|
||||||
|
|
@ -2153,7 +2153,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
|
||||||
* link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
|
* link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
|
||||||
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
|
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
|
||||||
|
|
||||||
uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
|
uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR();
|
||||||
{
|
{
|
||||||
/* Cannot block in an ISR, so check there is data available. */
|
/* Cannot block in an ISR, so check there is data available. */
|
||||||
if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
|
if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
|
||||||
|
|
|
||||||
44
tasks.c
44
tasks.c
|
|
@ -2229,7 +2229,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
|
||||||
/* If the task is running (or yielding), we must add it to the
|
/* If the task is running (or yielding), we must add it to the
|
||||||
* termination list so that an idle task can delete it when it is
|
* termination list so that an idle task can delete it when it is
|
||||||
* no longer running. */
|
* no longer running. */
|
||||||
if( taskTASK_IS_RUNNING_OR_SCHEDULED_TO_YIELD( pxTCB ) != pdFALSE )
|
if( ( xSchedulerRunning != pdFALSE ) && ( taskTASK_IS_RUNNING_OR_SCHEDULED_TO_YIELD( pxTCB ) != pdFALSE ) )
|
||||||
{
|
{
|
||||||
/* A running task or a task which is scheduled to yield is being
|
/* A running task or a task which is scheduled to yield is being
|
||||||
* deleted. This cannot complete when the task is still running
|
* deleted. This cannot complete when the task is still running
|
||||||
|
|
@ -2657,7 +2657,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
|
||||||
* https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
|
* https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
|
||||||
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
|
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
|
||||||
|
|
||||||
uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
|
uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR();
|
||||||
{
|
{
|
||||||
/* If null is passed in here then it is the priority of the calling
|
/* If null is passed in here then it is the priority of the calling
|
||||||
* task that is being queried. */
|
* task that is being queried. */
|
||||||
|
|
@ -2728,7 +2728,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
|
||||||
* https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
|
* https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
|
||||||
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
|
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
|
||||||
|
|
||||||
uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
|
uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR();
|
||||||
{
|
{
|
||||||
/* If null is passed in here then it is the base priority of the calling
|
/* If null is passed in here then it is the base priority of the calling
|
||||||
* task that is being queried. */
|
* task that is being queried. */
|
||||||
|
|
@ -3740,11 +3740,39 @@ void vTaskEndScheduler( void )
|
||||||
{
|
{
|
||||||
traceENTER_vTaskEndScheduler();
|
traceENTER_vTaskEndScheduler();
|
||||||
|
|
||||||
|
#if ( INCLUDE_vTaskDelete == 1 )
|
||||||
|
{
|
||||||
|
BaseType_t xCoreID;
|
||||||
|
|
||||||
|
#if ( configUSE_TIMERS == 1 )
|
||||||
|
{
|
||||||
|
/* Delete the timer task created by the kernel. */
|
||||||
|
vTaskDelete( xTimerGetTimerDaemonTaskHandle() );
|
||||||
|
}
|
||||||
|
#endif /* #if ( configUSE_TIMERS == 1 ) */
|
||||||
|
|
||||||
|
/* Delete Idle tasks created by the kernel.*/
|
||||||
|
for( xCoreID = 0; xCoreID < ( BaseType_t ) configNUMBER_OF_CORES; xCoreID++ )
|
||||||
|
{
|
||||||
|
vTaskDelete( xIdleTaskHandles[ xCoreID ] );
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Idle task is responsible for reclaiming the resources of the tasks in
|
||||||
|
* xTasksWaitingTermination list. Since the idle task is now deleted and
|
||||||
|
* no longer going to run, we need to reclaim resources of all the tasks
|
||||||
|
* in the xTasksWaitingTermination list. */
|
||||||
|
prvCheckTasksWaitingTermination();
|
||||||
|
}
|
||||||
|
#endif /* #if ( INCLUDE_vTaskDelete == 1 ) */
|
||||||
|
|
||||||
/* Stop the scheduler interrupts and call the portable scheduler end
|
/* Stop the scheduler interrupts and call the portable scheduler end
|
||||||
* routine so the original ISRs can be restored if necessary. The port
|
* routine so the original ISRs can be restored if necessary. The port
|
||||||
* layer must ensure interrupts enable bit is left in the correct state. */
|
* layer must ensure interrupts enable bit is left in the correct state. */
|
||||||
portDISABLE_INTERRUPTS();
|
portDISABLE_INTERRUPTS();
|
||||||
xSchedulerRunning = pdFALSE;
|
xSchedulerRunning = pdFALSE;
|
||||||
|
|
||||||
|
/* This function must be called from a task and the application is
|
||||||
|
* responsible for deleting that task after the scheduler is stopped. */
|
||||||
vPortEndScheduler();
|
vPortEndScheduler();
|
||||||
|
|
||||||
traceRETURN_vTaskEndScheduler();
|
traceRETURN_vTaskEndScheduler();
|
||||||
|
|
@ -4629,7 +4657,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
|
||||||
/* This lets the task know it was forcibly removed from the
|
/* This lets the task know it was forcibly removed from the
|
||||||
* blocked state so it should not re-evaluate its block time and
|
* blocked state so it should not re-evaluate its block time and
|
||||||
* then block again. */
|
* then block again. */
|
||||||
pxTCB->ucDelayAborted = pdTRUE;
|
pxTCB->ucDelayAborted = ( uint8_t ) pdTRUE;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
|
@ -5570,7 +5598,7 @@ BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
|
||||||
{
|
{
|
||||||
/* The delay was aborted, which is not the same as a time out,
|
/* The delay was aborted, which is not the same as a time out,
|
||||||
* but has the same result. */
|
* but has the same result. */
|
||||||
pxCurrentTCB->ucDelayAborted = pdFALSE;
|
pxCurrentTCB->ucDelayAborted = ( uint8_t ) pdFALSE;
|
||||||
xReturn = pdTRUE;
|
xReturn = pdTRUE;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
|
@ -8036,7 +8064,7 @@ TickType_t uxTaskResetEventItemValue( void )
|
||||||
|
|
||||||
pxTCB = xTaskToNotify;
|
pxTCB = xTaskToNotify;
|
||||||
|
|
||||||
uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
|
uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR();
|
||||||
{
|
{
|
||||||
if( pulPreviousNotificationValue != NULL )
|
if( pulPreviousNotificationValue != NULL )
|
||||||
{
|
{
|
||||||
|
|
@ -8195,7 +8223,7 @@ TickType_t uxTaskResetEventItemValue( void )
|
||||||
|
|
||||||
pxTCB = xTaskToNotify;
|
pxTCB = xTaskToNotify;
|
||||||
|
|
||||||
uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
|
uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR();
|
||||||
{
|
{
|
||||||
ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ];
|
ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ];
|
||||||
pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED;
|
pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED;
|
||||||
|
|
@ -8469,7 +8497,7 @@ static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait,
|
||||||
/* About to enter a delayed list, so ensure the ucDelayAborted flag is
|
/* About to enter a delayed list, so ensure the ucDelayAborted flag is
|
||||||
* reset to pdFALSE so it can be detected as having been set to pdTRUE
|
* reset to pdFALSE so it can be detected as having been set to pdTRUE
|
||||||
* when the task leaves the Blocked state. */
|
* when the task leaves the Blocked state. */
|
||||||
pxCurrentTCB->ucDelayAborted = pdFALSE;
|
pxCurrentTCB->ucDelayAborted = ( uint8_t ) pdFALSE;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue