feat(freertos-smp): Added xTaskRemoveFromEventListFromISR()

This commit is contained in:
Sudeep Mohanty 2025-08-21 13:02:53 +02:00
parent 592177f42e
commit fbd37a219e
4 changed files with 200 additions and 119 deletions

View file

@ -2146,10 +2146,18 @@
#define traceENTER_xTaskRemoveFromEventList( pxEventList ) #define traceENTER_xTaskRemoveFromEventList( pxEventList )
#endif #endif
#ifndef traceENTER_xTaskRemoveFromEventListFromISR
#define traceENTER_xTaskRemoveFromEventListFromISR( pxEventList )
#endif
#ifndef traceRETURN_xTaskRemoveFromEventList #ifndef traceRETURN_xTaskRemoveFromEventList
#define traceRETURN_xTaskRemoveFromEventList( xReturn ) #define traceRETURN_xTaskRemoveFromEventList( xReturn )
#endif #endif
#ifndef traceRETURN_xTaskRemoveFromEventListFromISR
#define traceRETURN_xTaskRemoveFromEventListFromISR( xReturn )
#endif
#ifndef traceENTER_vTaskRemoveFromUnorderedEventList #ifndef traceENTER_vTaskRemoveFromUnorderedEventList
#define traceENTER_vTaskRemoveFromUnorderedEventList( pxEventListItem, xItemValue ) #define traceENTER_vTaskRemoveFromUnorderedEventList( pxEventListItem, xItemValue )
#endif #endif

View file

@ -3720,6 +3720,8 @@ void vTaskPlaceOnEventListRestricted( List_t * const pxEventList,
* Removes a task from both the specified event list and the list of blocked * Removes a task from both the specified event list and the list of blocked
* tasks, and places it on a ready queue. * tasks, and places it on a ready queue.
* *
* Do not call this function from an ISR context. Call xTaskRemoveFromEventListFromISR() instead.
*
* xTaskRemoveFromEventList()/vTaskRemoveFromUnorderedEventList() will be called * xTaskRemoveFromEventList()/vTaskRemoveFromUnorderedEventList() will be called
* if either an event occurs to unblock a task, or the block timeout period * if either an event occurs to unblock a task, or the block timeout period
* expires. * expires.
@ -3736,6 +3738,23 @@ void vTaskPlaceOnEventListRestricted( List_t * const pxEventList,
* making the call, otherwise pdFALSE. * making the call, otherwise pdFALSE.
*/ */
BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) PRIVILEGED_FUNCTION; BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) PRIVILEGED_FUNCTION;
/*
* THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS ONLY
* INTENDED FOR USE WHEN IMPLEMENTING A PORT OF THE SCHEDULER AND IS
* AN INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER.
*
* THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED.
*
* Removes a task from both the specified event list and the list of blocked
* tasks, and places it on a ready queue. This function is the ISR-safe version
* of xTaskRemoveFromEventList().
*
* @return pdTRUE if the task being removed has a higher priority than the task
* making the call, otherwise pdFALSE.
*/
BaseType_t xTaskRemoveFromEventListFromISR( const List_t * const pxEventList ) PRIVILEGED_FUNCTION;
void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem,
const TickType_t xItemValue ) PRIVILEGED_FUNCTION; const TickType_t xItemValue ) PRIVILEGED_FUNCTION;

55
queue.c
View file

@ -222,7 +222,20 @@ static void prvCopyDataFromQueue( Queue_t * const pxQueue,
* the queue set that the queue contains data. * the queue set that the queue contains data.
*/ */
static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION; static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
#endif
/*
* A version of prvNotifyQueueSetContainer() that can be called from an
* interrupt service routine (ISR).
*/
static BaseType_t prvNotifyQueueSetContainerFromISR( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
/*
* This function serves as a generic implementation for prvNotifyQueueSetContainer()
* and prvNotifyQueueSetContainerFromISR().
*/
static BaseType_t prvNotifyQueueSetContainerGeneric( const Queue_t * const pxQueue,
const BaseType_t xIsISR ) PRIVILEGED_FUNCTION;
#endif /* if ( configUSE_QUEUE_SETS == 1 ) */
/* /*
* Called after a Queue_t structure has been allocated either statically or * Called after a Queue_t structure has been allocated either statically or
@ -1294,7 +1307,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
* in the queue has not changed. */ * in the queue has not changed. */
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE ) else if( prvNotifyQueueSetContainerFromISR( pxQueue ) != pdFALSE )
{ {
/* The queue is a member of a queue set, and posting /* The queue is a member of a queue set, and posting
* to the queue set caused a higher priority task to * to the queue set caused a higher priority task to
@ -1317,7 +1330,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
{ {
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{ {
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) if( xTaskRemoveFromEventListFromISR( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{ {
/* The task waiting has a higher priority so /* The task waiting has a higher priority so
* record that a context switch is required. */ * record that a context switch is required. */
@ -1345,7 +1358,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
{ {
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{ {
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) if( xTaskRemoveFromEventListFromISR( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{ {
/* The task waiting has a higher priority so record that a /* The task waiting has a higher priority so record that a
* context switch is required. */ * context switch is required. */
@ -1468,7 +1481,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
{ {
if( pxQueue->pxQueueSetContainer != NULL ) if( pxQueue->pxQueueSetContainer != NULL )
{ {
if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE ) if( prvNotifyQueueSetContainerFromISR( pxQueue ) != pdFALSE )
{ {
/* The semaphore is a member of a queue set, and /* The semaphore is a member of a queue set, and
* posting to the queue set caused a higher priority * posting to the queue set caused a higher priority
@ -1491,7 +1504,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
{ {
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{ {
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) if( xTaskRemoveFromEventListFromISR( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{ {
/* The task waiting has a higher priority so /* The task waiting has a higher priority so
* record that a context switch is required. */ * record that a context switch is required. */
@ -1519,7 +1532,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
{ {
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{ {
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) if( xTaskRemoveFromEventListFromISR( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{ {
/* The task waiting has a higher priority so record that a /* The task waiting has a higher priority so record that a
* context switch is required. */ * context switch is required. */
@ -2111,7 +2124,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
{ {
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
{ {
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) if( xTaskRemoveFromEventListFromISR( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
{ {
/* The task waiting has a higher priority than us so /* The task waiting has a higher priority than us so
* force a context switch. */ * force a context switch. */
@ -3354,6 +3367,19 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
#if ( configUSE_QUEUE_SETS == 1 ) #if ( configUSE_QUEUE_SETS == 1 )
static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue ) static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue )
{
/* Call the generic version with xIsISR = pdFALSE to indicate task context */
return prvNotifyQueueSetContainerGeneric( pxQueue, pdFALSE );
}
static BaseType_t prvNotifyQueueSetContainerFromISR( const Queue_t * const pxQueue )
{
/* Call the generic version with xIsISR = pdTRUE to indicate ISR context */
return prvNotifyQueueSetContainerGeneric( pxQueue, pdTRUE );
}
static BaseType_t prvNotifyQueueSetContainerGeneric( const Queue_t * const pxQueue,
const BaseType_t xIsISR )
{ {
Queue_t * pxQueueSetContainer = pxQueue->pxQueueSetContainer; Queue_t * pxQueueSetContainer = pxQueue->pxQueueSetContainer;
BaseType_t xReturn = pdFALSE; BaseType_t xReturn = pdFALSE;
@ -3379,7 +3405,18 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
{ {
if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE ) if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )
{ {
if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE ) BaseType_t xHigherPriorityTaskWoken;
if( xIsISR == pdTRUE )
{
xHigherPriorityTaskWoken = xTaskRemoveFromEventListFromISR( &( pxQueueSetContainer->xTasksWaitingToReceive ) );
}
else
{
xHigherPriorityTaskWoken = xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) );
}
if( xHigherPriorityTaskWoken != pdFALSE )
{ {
/* The task waiting has a higher priority. */ /* The task waiting has a higher priority. */
xReturn = pdTRUE; xReturn = pdTRUE;

237
tasks.c
View file

@ -704,6 +704,12 @@ static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters ) PRIVILEGED_FUNCTION;
*/ */
static void prvCheckTasksWaitingTermination( void ) PRIVILEGED_FUNCTION; static void prvCheckTasksWaitingTermination( void ) PRIVILEGED_FUNCTION;
/*
* Private helper function to remove a task from an event list. This function
* is shared between the task context and ISR context versions.
*/
static BaseType_t prvTaskRemoveFromEventList( const List_t * const pxEventList ) PRIVILEGED_FUNCTION;
/* /*
* The currently executing task is entering the Blocked state. Add the task to * The currently executing task is entering the Blocked state. Add the task to
* either the current or the overflow delayed task list. * either the current or the overflow delayed task list.
@ -5749,137 +5755,148 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )
{ {
TCB_t * pxUnblockedTCB; traceENTER_xTaskRemoveFromEventList( pxEventList );
BaseType_t xReturn;
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
/* Lock the kernel data group as we are about to access its members */
kernelENTER_CRITICAL();
{
xReturn = prvTaskRemoveFromEventList( pxEventList );
}
kernelEXIT_CRITICAL();
#else
xReturn = prvTaskRemoveFromEventList( pxEventList );
#endif
traceRETURN_xTaskRemoveFromEventList( xReturn );
return xReturn;
}
/*-----------------------------------------------------------*/
BaseType_t xTaskRemoveFromEventListFromISR( const List_t * const pxEventList )
{
traceENTER_xTaskRemoveFromEventListFromISR( pxEventList );
BaseType_t xReturn; BaseType_t xReturn;
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
UBaseType_t uxSavedInterruptStatus; UBaseType_t uxSavedInterruptStatus;
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
traceENTER_xTaskRemoveFromEventList( pxEventList );
#if ( !( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) )
/* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be
* called from a critical section within an ISR. */
#else /* #if ( ! ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) */
/* Lock the kernel data group as we are about to access its members */ /* Lock the kernel data group as we are about to access its members */
if( portCHECK_IF_IN_ISR() == pdTRUE ) uxSavedInterruptStatus = kernelENTER_CRITICAL_FROM_ISR();
{ {
uxSavedInterruptStatus = kernelENTER_CRITICAL_FROM_ISR(); xReturn = prvTaskRemoveFromEventList( pxEventList );
}
kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
#else
xReturn = prvTaskRemoveFromEventList( pxEventList );
#endif
traceRETURN_xTaskRemoveFromEventListFromISR( xReturn );
return xReturn;
}
/*-----------------------------------------------------------*/
static BaseType_t prvTaskRemoveFromEventList( const List_t * const pxEventList )
{
TCB_t * pxUnblockedTCB;
BaseType_t xReturn;
/* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be
* called from a critical section within an ISR. */
/* Before proceeding, check if the event list is empty */
if( listLIST_IS_EMPTY( pxEventList ) == pdFALSE )
{
/* The event list is sorted in priority order, so the first in the list can
* be removed as it is known to be the highest priority. Remove the TCB from
* the delayed list, and add it to the ready list.
*
* If an event is for a queue that is locked then this function will never
* get called - the lock count on the queue will get modified instead. This
* means exclusive access to the event list is guaranteed here.
*
* This function assumes that a check has already been made to ensure that
* pxEventList is not empty. */
/* MISRA Ref 11.5.3 [Void pointer assignment] */
/* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
/* coverity[misra_c_2012_rule_11_5_violation] */
pxUnblockedTCB = listGET_OWNER_OF_HEAD_ENTRY( pxEventList );
configASSERT( pxUnblockedTCB );
listREMOVE_ITEM( &( pxUnblockedTCB->xEventListItem ) );
if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
{
listREMOVE_ITEM( &( pxUnblockedTCB->xStateListItem ) );
prvAddTaskToReadyList( pxUnblockedTCB );
#if ( configUSE_TICKLESS_IDLE != 0 )
{
/* If a task is blocked on a kernel object then xNextTaskUnblockTime
* might be set to the blocked task's time out time. If the task is
* unblocked for a reason other than a timeout xNextTaskUnblockTime is
* normally left unchanged, because it is automatically reset to a new
* value when the tick count equals xNextTaskUnblockTime. However if
* tickless idling is used it might be more important to enter sleep mode
* at the earliest possible time - so reset xNextTaskUnblockTime here to
* ensure it is updated at the earliest possible time. */
prvResetNextTaskUnblockTime();
}
#endif
} }
else else
{ {
uxSavedInterruptStatus = 0; /* The delayed and ready lists cannot be accessed, so hold this task
kernelENTER_CRITICAL(); * pending until the scheduler is resumed. */
listINSERT_END( &( xPendingReadyList ), &( pxUnblockedTCB->xEventListItem ) );
} }
/* Before taking the kernel lock, another task/ISR could have already #if ( configNUMBER_OF_CORES == 1 )
* emptied the pxEventList. So we insert a check here to see if
* pxEventList is empty before attempting to remove an item from it. */
if( listLIST_IS_EMPTY( pxEventList ) == pdFALSE )
{ {
#endif /* #if ( ! ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) */ if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority )
{
/* Return true if the task removed from the event list has a higher
* priority than the calling task. This allows the calling task to know if
* it should force a context switch now. */
xReturn = pdTRUE;
/* The event list is sorted in priority order, so the first in the list can /* Mark that a yield is pending in case the user is not using the
* be removed as it is known to be the highest priority. Remove the TCB from * "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */
* the delayed list, and add it to the ready list. xYieldPendings[ 0 ] = pdTRUE;
* }
* If an event is for a queue that is locked then this function will never else
* get called - the lock count on the queue will get modified instead. This {
* means exclusive access to the event list is guaranteed here. xReturn = pdFALSE;
* }
* This function assumes that a check has already been made to ensure that
* pxEventList is not empty. */
/* MISRA Ref 11.5.3 [Void pointer assignment] */
/* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
/* coverity[misra_c_2012_rule_11_5_violation] */
pxUnblockedTCB = listGET_OWNER_OF_HEAD_ENTRY( pxEventList );
configASSERT( pxUnblockedTCB );
listREMOVE_ITEM( &( pxUnblockedTCB->xEventListItem ) );
if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
{
listREMOVE_ITEM( &( pxUnblockedTCB->xStateListItem ) );
prvAddTaskToReadyList( pxUnblockedTCB );
#if ( configUSE_TICKLESS_IDLE != 0 )
{
/* If a task is blocked on a kernel object then xNextTaskUnblockTime
* might be set to the blocked task's time out time. If the task is
* unblocked for a reason other than a timeout xNextTaskUnblockTime is
* normally left unchanged, because it is automatically reset to a new
* value when the tick count equals xNextTaskUnblockTime. However if
* tickless idling is used it might be more important to enter sleep mode
* at the earliest possible time - so reset xNextTaskUnblockTime here to
* ensure it is updated at the earliest possible time. */
prvResetNextTaskUnblockTime();
} }
#endif #else /* #if ( configNUMBER_OF_CORES == 1 ) */
{
xReturn = pdFALSE;
#if ( configUSE_PREEMPTION == 1 )
{
prvYieldForTask( pxUnblockedTCB );
if( xYieldPendings[ portGET_CORE_ID() ] != pdFALSE )
{
xReturn = pdTRUE;
}
}
#endif /* #if ( configUSE_PREEMPTION == 1 ) */
}
#endif /* #if ( configNUMBER_OF_CORES == 1 ) */
} }
else else
{ {
/* The delayed and ready lists cannot be accessed, so hold this task /* The pxEventList was emptied before we entered the critical
* pending until the scheduler is resumed. */ * section, Nothing to do except return pdFALSE. */
listINSERT_END( &( xPendingReadyList ), &( pxUnblockedTCB->xEventListItem ) );
}
#if ( configNUMBER_OF_CORES == 1 )
{
if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority )
{
/* Return true if the task removed from the event list has a higher
* priority than the calling task. This allows the calling task to know if
* it should force a context switch now. */
xReturn = pdTRUE;
/* Mark that a yield is pending in case the user is not using the
* "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */
xYieldPendings[ 0 ] = pdTRUE;
}
else
{
xReturn = pdFALSE;
}
}
#else /* #if ( configNUMBER_OF_CORES == 1 ) */
{
xReturn = pdFALSE; xReturn = pdFALSE;
#if ( configUSE_PREEMPTION == 1 )
{
prvYieldForTask( pxUnblockedTCB );
if( xYieldPendings[ portGET_CORE_ID() ] != pdFALSE )
{
xReturn = pdTRUE;
}
}
#endif /* #if ( configUSE_PREEMPTION == 1 ) */
} }
#endif /* #if ( configNUMBER_OF_CORES == 1 ) */
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
}
else
{
/* The pxEventList was emptied before we entered the critical
* section, Nothing to do except return pdFALSE. */
xReturn = pdFALSE;
}
/* We are done accessing the kernel data group. Unlock it. */
if( portCHECK_IF_IN_ISR() == pdTRUE )
{
kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
}
else
{
kernelEXIT_CRITICAL();
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
traceRETURN_xTaskRemoveFromEventList( xReturn );
return xReturn; return xReturn;
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/