This commit is contained in:
Sudeep Mohanty 2025-04-03 15:08:03 +05:30 committed by GitHub
commit 72a314ec59
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
8 changed files with 1890 additions and 459 deletions

View file

@ -63,10 +63,77 @@
#if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */ uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */
#endif #endif
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
portSPINLOCK_TYPE xTaskSpinlock;
portSPINLOCK_TYPE xISRSpinlock;
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
} EventGroup_t; } EventGroup_t;
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
/*
* Macros to mark the start and end of a critical code region.
*/
#if ( portUSING_GRANULAR_LOCKS == 1 )
#define event_groupsENTER_CRITICAL( pxEventBits ) vEventGroupsEnterCritical( pxEventBits )
#define event_groupsENTER_CRITICAL_FROM_ISR( pxEventBits ) uxEventGroupsEnterCriticalFromISR( pxEventBits )
#define event_groupsEXIT_CRITICAL( pxEventBits ) vEventGroupsExitCritical( pxEventBits )
#define event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) vEventGroupsExitCriticalFromISR( uxSavedInterruptStatus, pxEventBits )
#else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
#define event_groupsENTER_CRITICAL( pxEventBits ) taskENTER_CRITICAL();
#define event_groupsENTER_CRITICAL_FROM_ISR( pxEventBits ) taskENTER_CRITICAL_FROM_ISR();
#define event_groupsEXIT_CRITICAL( pxEventBits ) taskEXIT_CRITICAL();
#define event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
/*
* Enters a critical section for an event group. Disables interrupts and takes
* both task and ISR spinlocks to ensure thread safety.
*/
static void vEventGroupsEnterCritical( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION;
/*
* Enters a critical section for an event group from an ISR context. Takes the ISR
* spinlock and returns the previous interrupt state.
*/
static UBaseType_t uxEventGroupsEnterCriticalFromISR( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION;
/*
* Exits a critical section for an event group. Releases spinlocks in reverse order
* and conditionally re-enables interrupts and yields if required.
*/
static void vEventGroupsExitCritical( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION;
/*
* Exits a critical section for an event group from an ISR context. Releases the ISR
* spinlock and conditionally restores the previous interrupt state.
*/
static void vEventGroupsExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus,
EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION;
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
/*
* Locks an event group for tasks. Prevents other tasks from accessing the event group but allows
* ISRs to pend access to the event group. Caller cannot be preempted by other tasks
* after locking the event group, thus allowing the caller to execute non-deterministic
* operations.
*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
static void prvLockEventGroupForTasks( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION;
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
/*
* Unlocks an event group for tasks. Handles all pended access from ISRs, then reenables
* preemption for the caller.
*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
static BaseType_t prvUnlockEventGroupForTasks( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION;
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
/* /*
* Test the bits set in uxCurrentEventBits to see if the wait condition is met. * Test the bits set in uxCurrentEventBits to see if the wait condition is met.
* The wait condition is defined by xWaitForAllBits. If xWaitForAllBits is * The wait condition is defined by xWaitForAllBits. If xWaitForAllBits is
@ -79,6 +146,25 @@
const EventBits_t uxBitsToWaitFor, const EventBits_t uxBitsToWaitFor,
const BaseType_t xWaitForAllBits ) PRIVILEGED_FUNCTION; const BaseType_t xWaitForAllBits ) PRIVILEGED_FUNCTION;
/*-----------------------------------------------------------*/
/*
* Macros used to lock and unlock an event group. When a task locks an,
* event group, the task will have thread safe non-deterministic access to
* the event group.
* - Concurrent access from other tasks will be blocked by the xTaskSpinlock
* - Concurrent access from ISRs will be pended
*
* When the task unlocks the event group, all pended access attempts are handled.
*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
#define event_groupsLOCK( pxEventBits ) prvLockEventGroupForTasks( pxEventBits )
#define event_groupsUNLOCK( pxEventBits ) prvUnlockEventGroupForTasks( pxEventBits );
#else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
#define event_groupsLOCK( pxEventBits ) vTaskSuspendAll()
#define event_groupsUNLOCK( pxEventBits ) xTaskResumeAll()
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
#if ( configSUPPORT_STATIC_ALLOCATION == 1 ) #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
@ -122,6 +208,13 @@
} }
#endif /* configSUPPORT_DYNAMIC_ALLOCATION */ #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
{
portINIT_SPINLOCK( &( pxEventBits->xTaskSpinlock ) );
portINIT_SPINLOCK( &( pxEventBits->xISRSpinlock ) );
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
traceEVENT_GROUP_CREATE( pxEventBits ); traceEVENT_GROUP_CREATE( pxEventBits );
} }
else else
@ -167,6 +260,13 @@
} }
#endif /* configSUPPORT_STATIC_ALLOCATION */ #endif /* configSUPPORT_STATIC_ALLOCATION */
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
{
portINIT_SPINLOCK( &( pxEventBits->xTaskSpinlock ) );
portINIT_SPINLOCK( &( pxEventBits->xISRSpinlock ) );
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
traceEVENT_GROUP_CREATE( pxEventBits ); traceEVENT_GROUP_CREATE( pxEventBits );
} }
else else
@ -202,7 +302,7 @@
} }
#endif #endif
vTaskSuspendAll(); event_groupsLOCK( pxEventBits );
{ {
uxOriginalBitValue = pxEventBits->uxEventBits; uxOriginalBitValue = pxEventBits->uxEventBits;
@ -245,7 +345,7 @@
} }
} }
} }
xAlreadyYielded = xTaskResumeAll(); xAlreadyYielded = event_groupsUNLOCK( pxEventBits );
if( xTicksToWait != ( TickType_t ) 0 ) if( xTicksToWait != ( TickType_t ) 0 )
{ {
@ -267,7 +367,7 @@
if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 ) if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
{ {
/* The task timed out, just return the current event bit value. */ /* The task timed out, just return the current event bit value. */
taskENTER_CRITICAL(); event_groupsENTER_CRITICAL( pxEventBits );
{ {
uxReturn = pxEventBits->uxEventBits; uxReturn = pxEventBits->uxEventBits;
@ -284,7 +384,7 @@
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
taskEXIT_CRITICAL(); event_groupsEXIT_CRITICAL( pxEventBits );
xTimeoutOccurred = pdTRUE; xTimeoutOccurred = pdTRUE;
} }
@ -333,7 +433,7 @@
} }
#endif #endif
vTaskSuspendAll(); event_groupsLOCK( pxEventBits );
{ {
const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits; const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits;
@ -401,7 +501,7 @@
traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor ); traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor );
} }
} }
xAlreadyYielded = xTaskResumeAll(); xAlreadyYielded = event_groupsUNLOCK( pxEventBits );
if( xTicksToWait != ( TickType_t ) 0 ) if( xTicksToWait != ( TickType_t ) 0 )
{ {
@ -422,7 +522,7 @@
if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 ) if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
{ {
taskENTER_CRITICAL(); event_groupsENTER_CRITICAL( pxEventBits );
{ {
/* The task timed out, just return the current event bit value. */ /* The task timed out, just return the current event bit value. */
uxReturn = pxEventBits->uxEventBits; uxReturn = pxEventBits->uxEventBits;
@ -447,7 +547,7 @@
xTimeoutOccurred = pdTRUE; xTimeoutOccurred = pdTRUE;
} }
taskEXIT_CRITICAL(); event_groupsEXIT_CRITICAL( pxEventBits );
} }
else else
{ {
@ -482,7 +582,7 @@
configASSERT( xEventGroup ); configASSERT( xEventGroup );
configASSERT( ( uxBitsToClear & eventEVENT_BITS_CONTROL_BYTES ) == 0 ); configASSERT( ( uxBitsToClear & eventEVENT_BITS_CONTROL_BYTES ) == 0 );
taskENTER_CRITICAL(); event_groupsENTER_CRITICAL( pxEventBits );
{ {
traceEVENT_GROUP_CLEAR_BITS( xEventGroup, uxBitsToClear ); traceEVENT_GROUP_CLEAR_BITS( xEventGroup, uxBitsToClear );
@ -493,7 +593,7 @@
/* Clear the bits. */ /* Clear the bits. */
pxEventBits->uxEventBits &= ~uxBitsToClear; pxEventBits->uxEventBits &= ~uxBitsToClear;
} }
taskEXIT_CRITICAL(); event_groupsEXIT_CRITICAL( pxEventBits );
traceRETURN_xEventGroupClearBits( uxReturn ); traceRETURN_xEventGroupClearBits( uxReturn );
@ -524,7 +624,7 @@
EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup ) EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup )
{ {
UBaseType_t uxSavedInterruptStatus; UBaseType_t uxSavedInterruptStatus;
EventGroup_t const * const pxEventBits = xEventGroup; EventGroup_t * const pxEventBits = xEventGroup;
EventBits_t uxReturn; EventBits_t uxReturn;
traceENTER_xEventGroupGetBitsFromISR( xEventGroup ); traceENTER_xEventGroupGetBitsFromISR( xEventGroup );
@ -532,11 +632,11 @@
/* MISRA Ref 4.7.1 [Return value shall be checked] */ /* MISRA Ref 4.7.1 [Return value shall be checked] */
/* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */ /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
/* coverity[misra_c_2012_directive_4_7_violation] */ /* coverity[misra_c_2012_directive_4_7_violation] */
uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); uxSavedInterruptStatus = event_groupsENTER_CRITICAL_FROM_ISR( pxEventBits );
{ {
uxReturn = pxEventBits->uxEventBits; uxReturn = pxEventBits->uxEventBits;
} }
taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits );
traceRETURN_xEventGroupGetBitsFromISR( uxReturn ); traceRETURN_xEventGroupGetBitsFromISR( uxReturn );
@ -564,10 +664,17 @@
pxList = &( pxEventBits->xTasksWaitingForBits ); pxList = &( pxEventBits->xTasksWaitingForBits );
pxListEnd = listGET_END_MARKER( pxList ); pxListEnd = listGET_END_MARKER( pxList );
vTaskSuspendAll(); event_groupsLOCK( pxEventBits );
{ {
traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet ); traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet );
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
/* We are about to access the kernel data group non-deterministically,
* thus we suspend the kernel data group.*/
vTaskSuspendAll();
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
pxListItem = listGET_HEAD_ENTRY( pxList ); pxListItem = listGET_HEAD_ENTRY( pxList );
/* Set the bits. */ /* Set the bits. */
@ -638,8 +745,12 @@
/* Snapshot resulting bits. */ /* Snapshot resulting bits. */
uxReturnBits = pxEventBits->uxEventBits; uxReturnBits = pxEventBits->uxEventBits;
}
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
( void ) xTaskResumeAll(); ( void ) xTaskResumeAll();
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
}
( void ) event_groupsUNLOCK( pxEventBits );
traceRETURN_xEventGroupSetBits( uxReturnBits ); traceRETURN_xEventGroupSetBits( uxReturnBits );
@ -658,10 +769,17 @@
pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits ); pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits );
vTaskSuspendAll(); event_groupsLOCK( pxEventBits );
{ {
traceEVENT_GROUP_DELETE( xEventGroup ); traceEVENT_GROUP_DELETE( xEventGroup );
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
/* We are about to access the kernel data group non-deterministically,
* thus we suspend the kernel data group.*/
vTaskSuspendAll();
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
while( listCURRENT_LIST_LENGTH( pxTasksWaitingForBits ) > ( UBaseType_t ) 0 ) while( listCURRENT_LIST_LENGTH( pxTasksWaitingForBits ) > ( UBaseType_t ) 0 )
{ {
/* Unblock the task, returning 0 as the event list is being deleted /* Unblock the task, returning 0 as the event list is being deleted
@ -669,8 +787,12 @@
configASSERT( pxTasksWaitingForBits->xListEnd.pxNext != ( const ListItem_t * ) &( pxTasksWaitingForBits->xListEnd ) ); configASSERT( pxTasksWaitingForBits->xListEnd.pxNext != ( const ListItem_t * ) &( pxTasksWaitingForBits->xListEnd ) );
vTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET ); vTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET );
} }
}
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
( void ) xTaskResumeAll(); ( void ) xTaskResumeAll();
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
}
( void ) event_groupsUNLOCK( pxEventBits );
#if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) ) #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
{ {
@ -774,6 +896,129 @@
traceRETURN_vEventGroupClearBitsCallback(); traceRETURN_vEventGroupClearBitsCallback();
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
static void vEventGroupsEnterCritical( EventGroup_t * pxEventBits )
{
portDISABLE_INTERRUPTS();
{
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
/* Task spinlock is always taken first */
portGET_SPINLOCK( xCoreID, &( pxEventBits->xTaskSpinlock ) );
/* Take the ISR spinlock next */
portGET_SPINLOCK( xCoreID, &( pxEventBits->xISRSpinlock ) );
/* Increment the critical nesting count */
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
}
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
/*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
static UBaseType_t uxEventGroupsEnterCriticalFromISR( EventGroup_t * pxEventBits )
{
UBaseType_t uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
/* Take the ISR spinlock */
portGET_SPINLOCK( xCoreID, &( pxEventBits->xISRSpinlock ) );
/* Increment the critical nesting count */
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
return uxSavedInterruptStatus;
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
/*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
static void vEventGroupsExitCritical( EventGroup_t * pxEventBits )
{
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U );
/* Get the xYieldPending stats inside the critical section. */
BaseType_t xYieldCurrentTask = xTaskUnlockCanYield();
/* Decrement the critical nesting count */
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID );
/* Release the ISR spinlock */
portRELEASE_SPINLOCK( xCoreID, &( pxEventBits->xISRSpinlock ) );
/* Release the task spinlock */
portRELEASE_SPINLOCK( xCoreID, &( pxEventBits->xTaskSpinlock ) );
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 )
{
portENABLE_INTERRUPTS();
if( xYieldCurrentTask != pdFALSE )
{
portYIELD();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
/*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
static void vEventGroupsExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus,
EventGroup_t * pxEventBits )
{
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U );
/* Decrement the critical nesting count */
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID );
/* Release the ISR spinlock */
portRELEASE_SPINLOCK( xCoreID, &( pxEventBits->xISRSpinlock ) );
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 )
{
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
}
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
/*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
static void prvLockEventGroupForTasks( EventGroup_t * pxEventBits )
{
/* Disable preemption so that the current task cannot be preempted by another task */
vTaskPreemptionDisable( NULL );
/* Keep holding xTaskSpinlock to prevent tasks on other cores from accessing
* the event group while it is suspended. */
portGET_SPINLOCK( portGET_CORE_ID(), &( pxEventBits->xTaskSpinlock ) );
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
/*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
static BaseType_t prvUnlockEventGroupForTasks( EventGroup_t * pxEventBits )
{
/* Release the previously held task spinlock */
portRELEASE_SPINLOCK( portGET_CORE_ID(), &( pxEventBits->xTaskSpinlock ) );
/* Re-enable preemption */
vTaskPreemptionEnable( NULL );
/* We assume that the task was preempted when preemption was enabled */
return pdTRUE;
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
/*-----------------------------------------------------------*/
static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits, static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits,
const EventBits_t uxBitsToWaitFor, const EventBits_t uxBitsToWaitFor,

411
granular_locks_v4.md Normal file
View file

@ -0,0 +1,411 @@
# Introduction
Currently, the SMP FreeRTOS kernel implements critical section using a "Global locking" approach, where all data is protected by a pair of spinlocks (namely the Task Lock and ISR Lock). This means that every critical section contests for this pair of spinlocks, even if those critical sections access unrelated/orthogonal data.
The goal of this proposal is to use granular or localized spinlocks so that concurrent access to different data groups do no contest for the same spinlocks. This will reduce lock contention and hopefully increase performance of the SMP FreeRTOS kernel.
This proposal describes a **"Dual Spinlock With Data Group Locking"** approach to granular locking.
Source code changes are based off release V11.1.0 of the FreeRTOS kernel.
# Data Groups
To make the spinlocks granular, FreeRTOS data will be organized into the following data groups, where each data group is protected by their own set of spinlocks.
- Kernel Data Group
- All data in `tasks.c` and all event lists (e.g., `xTasksWaitingToSend` and `xTasksWaitingToReceive` in the queue objects)
- Queue Data Group
- Each queue object (`Queue_t`) is its own data group (excluding the task lists)
- Event Group Data Group
- Each event group object (`EventGroup_t`) is its own data group (excluding the task lists)
- Stream Buffer Data Group
- Each stream buffer object (`StreamBuffer_t`) is its own data group (excluding the task lists)
- Timers
- All data in `timers.c` and timer objects, belong to the same Timer Data Group
- User/Port Data Groups
- The user and ports are free to organize their own data in to data groups of their choosing
# Dual Spinlock With Data Group Locking
The **"Dual Spinlock With Data Group Locking"** uses a pair of spinlocks to protect each data group (namely the `xTaskSpinlock` and `xISRSpinlock` spinlocks).
```c
typedef struct
{
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
portSPINLOCK_TYPE xTaskSpinlock;
portSPINLOCK_TYPE xISRSpinlock;
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
} xSomeDataGroup_t;
```
However, each data group must also allow for non-deterministic access with interrupts **enabled** by providing a pair of lock/unlock functions. These functions must block access to other tasks trying to access members of a data group and must pend access to ISRs trying to access members of the data group.
```c
static void prvLockSomeDataGroupForTasks( xSomeDataGroup_t * const pxSomDataGroup );
static void prvUnlockSomeDataGroupForTasks( xSomeDataGroup_t * const pxSomDataGroup );
```
In simple terms, the "Dual Spinlock With Data Group Locking" is an extension is the existing dual spinlock (i.e., Task and ISR spinlock) approach used in the SMP FreeRTOS kernel, but replicated across different data groups.
## Data Group Critical Sections (Granular Locks)
A critical section for a data group can be achieved as follows:
- When entering a data group critical section from a task
1. Disable interrupts
2. Take `xTaskSpinlock` of data group
3. Take `xISRSpinlock` of data group
4. Increment nesting count
- When entering data group critical section from an ISR
1. Disable interrupts
2. Take `xISRSpinlock` of data group
3. Increment nesting count
When exiting a data group critical section, the procedure is reversed. Furthermore, since yield requests are pended when inside a critical section, exiting a task critical section will also need to handle any pended yields.
- When exiting a data group critical section from a task
1. Release `xISRSpinlock` of data group
2. Release `xTaskSpinlock` of data group
3. Decrement nesting count
4. Reenable interrupts if nesting count is 0
5. Trigger yield if there is a yield pending
- When exiting data group critical section form an ISR
1. Release `xISRSpinlock` of data group
2. Decrement nesting count
3. Reenable interrupts if nesting count is 0
Entering multiple data group critical sections in a nested manner is permitted. This means, if a code path has already entered a critical section in data group A, it can then enter a critical section in data group B. This is analogous to nested critical sections. However, care must be taken to avoid deadlocks. This can be achieved by organizing data groups into a hierarchy, where a higher layer data group cannot nest into a lower one.
```
+-------------------+
| Kernel Data Group |
+-------------------+
+-------------------+ +---------------------------+ +--------------------------+ +------------------+
| Queue Data Groups | | Stream Buffer Data Groups | | Event Groups Data Groups | | Timer Data Group |
+-------------------+ +---------------------------+ +--------------------------+ +------------------+
+------------------------------------------------------------------------------------------------------+
| User Data Groups |
+------------------------------------------------------------------------------------------------------+
```
If nested locking only occurs from bottom up (e.g., User data group can nested into a Queue data group which in turn can nested into Kernel data group), then deadlocking will never occur.
## Data Group Locking
FreeRTOS does not permit walking linked lists while interrupts are disabled to ensure deterministic ISR latency. Therefore, each data group must provide a method of locking so that non-deterministic operations can be executed for a data group. While a data group is locked:
- Preemption is disabled for the current task
- Interrupts remained enabled
- The data group's `xTaskSpinlock` is taken to prevent tasks running on other cores from accessing the data group
- Any ISR that attempts to update the data group will have their access pended. These pended accesses will be handled on resumption
The logic of suspending a data group is analogous to the logic of `vTaskSuspendAll()`/`xTaskResumeAll()` and `prvLockQueue()`/`prvUnlockQueue()` in the existing SMP kernel.
The details of how ISR accesses are pended during suspension will be specific to each data group type, thus the implementation of the suspend/resumption functions also be specified to each data group type. However, the procedure for data group suspension and resumption will typically be as follows:
- Suspension
1. Disable preemption
2. Lock the data group
3. Set a suspension flag that indicates the data group is suspended
4. Unlock the data group, but keep holding `xTaskSpinlock`
- Resumption
1. Lock the data group
2. Clear the suspension flag
3. Handle all pended accesses from ISRs
4. Unlock the data group, thus releasing `xTaskSpinlock`
Locking multiple data groups in a nested manner is permitted, meaning if a code path has already locked data group A, it can then lock data group B. This is analogous to nested `vTaskSuspendAll()` calls. Similar to data group locking, deadlocks can be avoided by organizing data groups into a hierarchy.
## Thread Safety Check
Under SMP, there are four sources of concurrent access for a particular data group:
- Preempting task on the same core
- Preempting ISR on the same core
- Concurrent task on another core
- Concurrent ISR on another core
This section checks that the data group critical section and locking mechanisms mentioned, ensure thread safety from each concurrent source of access.
- Data Group Critical Section from tasks: Interrupts are disabled, `xTaskSpinlock` and `xISRSpinlock` are taken
- Task (same core): Context switch cannot occur because interrupts are disabled
- Task (other cores): The task will spin on `xTaskSpinlock`
- ISR (same core): Interrupts on the current core are disabled
- ISR (other cores): ISR will spin on `xISRSpinlock`
- Data Group Critical Sections from ISRs: Interrupts are disabled, `xISRSpinlock` is taken
- Task (same core): Context switch cannot occur because we are in an ISR
- Task (other cores): The task will spin on `xISRSpinlock`
- ISR (same core): Interrupts on the current core are disabled
- ISR (other cores): ISR will spin on `xISRSpinlock`
- Data Group Locking from tasks: Preemption is disabled, `xTaskSpinlock` is taken
- Task (same core): Context switch cannot occur because preemption is disabled
- Task (other cores): The task will spin on `xTaskSpinlock`
- ISR (same core): Critical section is entered because `xISRSpinlock` is not held, but access is pended
- ISR (other cores): Critical section is entered because `xISRSpinlock` is not held, but access is pended
# Public API Changes
To support **"Dual Spinlock With Data Group Locking"**, the following changes have been made to the public facing API. These changes are non-breaking, meaning that applications that can build against the existing SMP FreeRTOS kernel will still be able to build even with granular locking enabled (albeit less performant).
The following APIs have been added to enter/exit a critical section in a data group. This are called by FreeRTOS source code to mark critical sections in data groups. However, users can also create their own data groups and enter/exit critical sections in the same manner.
If granular locking is disabled, these macros simply revert to being the standard task enter/exit critical macros.
```c
#if ( portUSING_GRANULAR_LOCKS == 1 )
#define data_groupENTER_CRITICAL() portENTER_CRITICAL_DATA_GROUP( ( portSPINLOCK_TYPE * ) pxTaskSpinlock, ( portSPINLOCK_TYPE * ) pxISRSpinlock )
#define data_groupENTER_CRITICAL_FROM_ISR() portENTER_CRITICAL_DATA_GROUP_FROM_ISR( ( portSPINLOCK_TYPE * ) pxISRSpinlock )
#define data_groupEXIT_CRITICAL() portEXIT_CRITICAL_DATA_GROUP( ( portSPINLOCK_TYPE * ) pxTaskSpinlock, ( portSPINLOCK_TYPE * ) pxISRSpinlock )
#define data_groupEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ) portEXIT_CRITICAL_DATA_GROUP_FROM_ISR( uxSavedInterruptStatus, ( portSPINLOCK_TYPE * ) pxISRSpinlock )
#else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
#define data_groupENTER_CRITICAL() taskENTER_CRITICAL()
#define data_groupENTER_CRITICAL_FROM_ISR() taskENTER_CRITICAL_FROM_ISR()
#define data_groupEXIT_CRITICAL() taskEXIT_CRITICAL()
#define data_groupEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ) taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus )
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
```
In case of the kernel data group (tasks.c), the granular locking macros make use of the existing `vTaskEnter/ExitCritical<FromISR>()` functions to establish critical sections.
```c
#if ( portUSING_GRANULAR_LOCKS == 1 )
#define kernelENTER_CRITICAL() vTaskEnterCritical()
#define kernelENTER_CRITICAL_FROM_ISR() vTaskEnterCriticalFromISR()
#define kernelEXIT_CRITICAL() vTaskExitCritical()
#define kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ) vTaskExitCriticalFromISR( uxSavedInterruptStatus )
#else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
#define kernelENTER_CRITICAL() taskENTER_CRITICAL()
#define kernelENTER_CRITICAL_FROM_ISR() taskENTER_CRITICAL_FROM_ISR()
#define kernelEXIT_CRITICAL() taskEXIT_CRITICAL()
#define kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ) taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus )
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
```
The previous critical section macros, viz., `taskENTER/EXIT_CRITICAL()` are still provided and can be called by users. However, FreeRTOS source code will no longer call them. If called by the users, the port should implement a "user" data group. As a result, if an application previously relied on `taskENTER/EXIT_CRITICAL()` for thread safe access to some user data, the same code is still thread safe with granular locking enabled.
```c
#define taskENTER_CRITICAL() portENTER_CRITICAL()
#define taskEXIT_CRITICAL() portEXIT_CRITICAL()
#define taskENTER_CRITICAL_FROM_ISR() portENTER_CRITICAL_FROM_ISR()
#define taskEXIT_CRITICAL_FROM_ISR( x ) portEXIT_CRITICAL_FROM_ISR( x )
```
# Porting Interface
To support **"Dual Spinlock With Data Group Locking"**, ports will need to provide the following macro definitions
## Port Config
The ports will need to provide the following port configuration macros
```c
#define portUSING_GRANULAR_LOCKS 1 // Enables usage of granular locks
#define portCRITICAL_NESTING_IN_TCB 0 // Disable critical nesting in TCB. Ports will need to track their own critical nesting
```
## Spinlocks
Ports will need to provide the following spinlock related macros macros
```c
/*
Data type for the port's implementation of a spinlock
*/
#define portSPINLOCK_TYPE port_spinlock_t
```
Macros are provided for the spinlocks for initializing them either statically or dynamically. This is reflected in the macros API pattern.
```c
#define portINIT_SPINLOCK( pxSpinlock ) _port_spinlock_init( pxSpinlock )
#define portINIT__SPINLOCK_STATIC PORT_SPINLOCK_STATIC_INIT
```
## Critical Section Macros
The port will need to provide implementations for macros to enter/exit a data group critical section according the procedures described above. Typical implementations of each macro is demonstrated below:
```c
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
#define portENTER_CRITICAL_DATA_GROUP( pxTaskSpinlock, pxISRSpinlock ) vPortEnterCriticalDataGroup( pxTaskSpinlock, pxISRSpinlock )
#define portENTER_CRITICAL_DATA_GROUP_FROM_ISR( pxISRSpinlock ) uxPortEnterCriticalDataGroupFromISR( pxISRSpinlock )
#define portEXIT_CRITICAL_DATA_GROUP( pxTaskSpinlock, pxISRSpinlock ) vPortExitCriticalDataGroup( pxTaskSpinlock, pxISRSpinlock )
#define portEXIT_CRITICAL_DATA_GROUP_FROM_ISR( x, pxISRSpinlock ) vPortExitCriticalDataGroupFromISR( x, pxISRSpinlock )
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
```
Example implementation of `portENTER_CRITICAL_DATA_GROUP( pxTaskSpinlock, pxISRSpinlock )` and `portEXIT_CRITICAL_DATA_GROUP( pxTaskSpinlock, pxISRSpinlock )`. Note that:
- `pxTaskSpinlock` is made optional in case users want to create their own data groups that are protected only be a single lock.
- The kernel implements the `xTaskUnlockCanYield()` function to indicate whether an yield should occur when a critical section exits. This function takes into account whether there are any pending yields and whether preemption is currently disabled.
```c
void vPortEnterCriticalDataGroup( port_spinlock_t *pxTaskSpinlock, port_spinlock_t *pxISRSpinlock )
{
portDISABLE_INTERRUPTS();
BaseType_t xCoreID = xPortGetCoreID();
/* Task spinlock is optional and is always taken first */
if( pxTaskSpinlock != NULL )
{
vPortSpinlockTake( pxTaskSpinlock, portMUX_NO_TIMEOUT );
uxCriticalNesting[ xCoreID ]++;
}
/* ISR spinlock must always be provided */
vPortSpinlockTake( pxISRSpinlock, portMUX_NO_TIMEOUT );
uxCriticalNesting[ xCoreID ]++;
}
void vPortExitCriticalDataGroup( port_spinlock_t *pxTaskSpinlock, port_spinlock_t *pxISRSpinlock )
{
BaseType_t xCoreID = xPortGetCoreID();
BaseType_t xYieldCurrentTask;
configASSERT( uxCriticalNesting[ xCoreID ] > 0U );
/* Get the xYieldPending stats inside the critical section. */
xYieldCurrentTask = xTaskUnlockCanYield();
/* ISR spinlock must always be provided */
vPortSpinlockRelease( pxISRSpinlock );
uxCriticalNesting[ xCoreID ]--;
/* Task spinlock is optional and is always taken first */
if( pxTaskSpinlock != NULL )
{
vPortSpinlockRelease( pxTaskSpinlock);
uxCriticalNesting[ xCoreID ]--;
}
assert(uxCriticalNesting[ xCoreID ] >= 0);
if( uxCriticalNesting[ xCoreID ] == 0 )
{
portENABLE_INTERRUPTS();
/* When a task yields in a critical section it just sets xYieldPending to
* true. So now that we have exited the critical section check if xYieldPending
* is true, and if so yield. */
if( xYieldCurrentTask != pdFALSE )
{
portYIELD();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
}
```
Example implementation of `portENTER_CRITICAL_DATA_GROUP_FROM_ISR( pxISRSpinlock )` and `portEXIT_CRITICAL_DATA_GROUP_FROM_ISR( x, pxISRSpinlock )`. Note that only `pxISRSpinlock` needs to be provided since ISR critical sections take a single lock.
```c
UBaseType_t uxPortLockDataGroupFromISR( port_spinlock_t *pxISRSpinlock )
{
UBaseType_t uxSavedInterruptStatus = 0;
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
vPortSpinlockTake( pxISRSpinlock, portMUX_NO_TIMEOUT );
uxCriticalNesting[xPortGetCoreID()]++;
return uxSavedInterruptStatus;
}
```c
void vPortUnlockDataGroupFromISR( UBaseType_t uxSavedInterruptStatus, port_spinlock_t *pxISRSpinlock )
{
BaseType_t xCoreID = xPortGetCoreID();
vPortSpinlockRelease( pxISRSpinlock );
uxCriticalNesting[ xCoreID ]--;
assert(uxCriticalNesting[ xCoreID ] >= 0);
if( uxCriticalNesting[ xCoreID ] == 0 )
{
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
}
}
```
# Source Specific Changes
- Added a `xTaskSpinlock` and `xISRSpinlock` to the data structures of each data group
- All calls to `taskENTER/EXIT_CRITICAL[_FROM_ISR]()` have been replaced with `data_groupENTER/EXIT_CRITICAL[_FROM_ISR]()`.
- Added `xTaskUnlockCanYield()` which indicates whether a yield should occur when exiting a critical (i.e., unlocking a data group). Yields should not occur if preemption is disabled (such as when exiting a critical section inside a suspension block).
## Tasks (Kernel Data Group)
- Some functions are called from nested critical sections of other data groups, thus an extra critical section call needs to be added to lock/unlock the kernel data group:
- `vTaskInternalSetTimeOutState()`
- `xTaskIncrementTick()`
- `vTaskSwitchContext()`
- `xTaskRemoveFromEventList()`
- `vTaskInternalSetTimeOutState()`
- `eTaskConfirmSleepModeStatus()`
- `xTaskPriorityDisinherit()`
- `pvTaskIncrementMutexHeldCount()`
- Some functions are called from nested suspension blocks of other data gropus, thus an extra suspend/resume call need to be added:
- `vTaskPlaceOnEventList()`
- `vTaskPlaceOnUnorderedEventList()`
- `vTaskPlaceOnEventListRestricted()`
- `prvCheckForRunStateChange()` has been removed
- Updated `vTaskSuspendAll()` and `xTaskResumeAll()`
- Now holds the `xTaskSpinlock` during kernel suspension
- Also increments/decrements `xPreemptionDisable` to prevent yield from occuring when exiting a critical section from inside a kernel suspension block.
## Queue
- Added `queueLOCK()` and `queueUNLOCK()`
- If granular locks are disabled, reverts to the previous `prvLockQueue()` and `prvUnlockQueue()`
- If granular locks are enabled, will lock/unlock the queue data group for tasks
## Event Groups
- Added `eventLOCK()` and `eventUNLOCK()`
- If granular locks are disabled, reverts to the previous `vTaskSuspendAll()` and `xTaskResumeAll()` calls
- If granular locks are enabled, will lock/unlock the event groups data group for tasks
- `xEventGroupSetBits()` and `vEventGroupDelete()` will manually walk the task lists (which belong to the kernel data group). Thus, an extra `vTaskSuspendAll()`/`xTaskResumeAll()` is added to ensure that the kernel data group is suspended while walking those tasks lists.
## Stream Buffer
- Added `sbLOCK()` and `sbUNLOCK()`
- If granular locks are disabled, reverts to the previous `vTaskSuspendAll()` and `xTaskResumeAll()` calls
- If granular locks are enabled, will lock/unlock the stream buffer data group for tasks
## Timers
- Timers don't have a lock/unlock function. The existing `vTaskSuspendAll()`/`xTaskResumeAll()` calls are valid as they rely on freezing the tick count which is part of the kernel data group.
# Prerequisite Refactoring
A number of refactoring commits have been added to make the addition of granular locking changes simpler:
1. Move critical sections inside `xTaskPriorityInherit()`
Currently, `xTaskPriorityInherit()` is called with wrapping critical sections. The critical sections have now be moved inside the function so that they have access to the kernel data group's spinlocks.
2. Move critical section into `vTaskPriorityDisinheritAfterTimeout()`
Currently, `vTaskPriorityDisinheritAfterTimeout()` is called wrapping critical sections, where the highest priority waiting task is separately obtained via `prvGetDisinheritPriorityAfterTimeout()`. The critical section and checking of the highest priority have been all been moved into `vTaskPriorityDisinheritAfterTimeout()` as all of these operations access the kernel data group.
3. Allow `vTaskPreemptionEnable()` to be nested
Currently, nested calls of `vTaskPreemptionEnable()` is not supported. However, nested calls are required for granular locking due the occurrence of nested suspension across multiple data groups.
Thus, `vTaskPreemptionEnable()` has been updated to support nested calls. This is done by changing `xPreemptionDisable` to a count, where a non-zero count means that the current task cannot be preempted.
# Performance Metrics
Todo

View file

@ -359,6 +359,10 @@
#define portCRITICAL_NESTING_IN_TCB 0 #define portCRITICAL_NESTING_IN_TCB 0
#endif #endif
#ifndef portUSING_GRANULAR_LOCKS
#define portUSING_GRANULAR_LOCKS 0
#endif
#ifndef configMAX_TASK_NAME_LEN #ifndef configMAX_TASK_NAME_LEN
#define configMAX_TASK_NAME_LEN 16 #define configMAX_TASK_NAME_LEN 16
#endif #endif
@ -444,44 +448,68 @@
#ifndef portRELEASE_TASK_LOCK #ifndef portRELEASE_TASK_LOCK
#if ( configNUMBER_OF_CORES == 1 ) #if ( ( portUSING_GRANULAR_LOCKS == 1 ) || ( configNUMBER_OF_CORES == 1 ) )
#define portRELEASE_TASK_LOCK( xCoreID ) #define portRELEASE_TASK_LOCK( xCoreID )
#else #else
#error portRELEASE_TASK_LOCK is required in SMP #error portRELEASE_TASK_LOCK is required in SMP without granular locking feature enabled
#endif #endif
#endif /* portRELEASE_TASK_LOCK */ #endif /* portRELEASE_TASK_LOCK */
#ifndef portGET_TASK_LOCK #ifndef portGET_TASK_LOCK
#if ( configNUMBER_OF_CORES == 1 ) #if ( ( portUSING_GRANULAR_LOCKS == 1 ) || ( configNUMBER_OF_CORES == 1 ) )
#define portGET_TASK_LOCK( xCoreID ) #define portGET_TASK_LOCK( xCoreID )
#else #else
#error portGET_TASK_LOCK is required in SMP #error portGET_TASK_LOCK is required in SMP without granular locking feature enabled
#endif #endif
#endif /* portGET_TASK_LOCK */ #endif /* portGET_TASK_LOCK */
#ifndef portRELEASE_ISR_LOCK #ifndef portRELEASE_ISR_LOCK
#if ( configNUMBER_OF_CORES == 1 ) #if ( ( portUSING_GRANULAR_LOCKS == 1 ) || ( configNUMBER_OF_CORES == 1 ) )
#define portRELEASE_ISR_LOCK( xCoreID ) #define portRELEASE_ISR_LOCK( xCoreID )
#else #else
#error portRELEASE_ISR_LOCK is required in SMP #error portRELEASE_ISR_LOCK is required in SMP without granular locking feature enabled
#endif #endif
#endif /* portRELEASE_ISR_LOCK */ #endif /* portRELEASE_ISR_LOCK */
#ifndef portGET_ISR_LOCK #ifndef portGET_ISR_LOCK
#if ( configNUMBER_OF_CORES == 1 ) #if ( ( portUSING_GRANULAR_LOCKS == 1 ) || ( configNUMBER_OF_CORES == 1 ) )
#define portGET_ISR_LOCK( xCoreID ) #define portGET_ISR_LOCK( xCoreID )
#else #else
#error portGET_ISR_LOCK is required in SMP #error portGET_ISR_LOCK is required in SMP without granular locking feature enabled
#endif #endif
#endif /* portGET_ISR_LOCK */ #endif /* portGET_ISR_LOCK */
#ifndef portRELEASE_SPINLOCK
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
#error portRELEASE_SPINLOCK is required for SMP with granular locking feature enabled
#endif
#endif
#ifndef portGET_SPINLOCK
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
#error portGET_SPINLOCK is required for SMP with granular locking feature enabled
#endif
#endif
#ifndef portCHECK_IF_IN_ISR
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
#error portCHECK_IF_IN_ISR is required for granular locking
#endif
#endif
#ifndef portENTER_CRITICAL_FROM_ISR #ifndef portENTER_CRITICAL_FROM_ISR
#if ( configNUMBER_OF_CORES > 1 ) #if ( configNUMBER_OF_CORES > 1 )
@ -498,6 +526,30 @@
#endif #endif
#ifndef portSPINLOCK_TYPE
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
#error portSPINLOCK_TYPE is required for SMP with granular locking feature enabled
#endif
#endif
#ifndef portINIT_SPINLOCK
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
#error portINIT_SPINLOCK is required for SMP with granular locking feature enabled
#endif
#endif
#ifndef portINIT_SPINLOCK_STATIC
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
#error portINIT_SPINLOCK_STATIC is required for SMP with granular locking feature enabled
#endif
#endif
#ifndef configUSE_CORE_AFFINITY #ifndef configUSE_CORE_AFFINITY
#define configUSE_CORE_AFFINITY 0 #define configUSE_CORE_AFFINITY 0
#endif /* configUSE_CORE_AFFINITY */ #endif /* configUSE_CORE_AFFINITY */
@ -2905,11 +2957,16 @@
/* Either variables of tick type cannot be read atomically, or /* Either variables of tick type cannot be read atomically, or
* portTICK_TYPE_IS_ATOMIC was not set - map the critical sections used when * portTICK_TYPE_IS_ATOMIC was not set - map the critical sections used when
* the tick count is returned to the standard critical section macros. */ * the tick count is returned to the standard critical section macros. */
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
#define portTICK_TYPE_ENTER_CRITICAL() portENTER_CRITICAL_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock )
#define portTICK_TYPE_EXIT_CRITICAL() portEXIT_CRITICAL_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock )
#else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
#define portTICK_TYPE_ENTER_CRITICAL() portENTER_CRITICAL() #define portTICK_TYPE_ENTER_CRITICAL() portENTER_CRITICAL()
#define portTICK_TYPE_EXIT_CRITICAL() portEXIT_CRITICAL() #define portTICK_TYPE_EXIT_CRITICAL() portEXIT_CRITICAL()
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
#define portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR() portSET_INTERRUPT_MASK_FROM_ISR() #define portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR() portSET_INTERRUPT_MASK_FROM_ISR()
#define portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( x ) portCLEAR_INTERRUPT_MASK_FROM_ISR( ( x ) ) #define portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( x ) portCLEAR_INTERRUPT_MASK_FROM_ISR( ( x ) )
#else #else /* if ( portTICK_TYPE_IS_ATOMIC == 0 ) */
/* The tick type can be read atomically, so critical sections used when the /* The tick type can be read atomically, so critical sections used when the
* tick count is returned can be defined away. */ * tick count is returned can be defined away. */
@ -3179,7 +3236,7 @@ typedef struct xSTATIC_TCB
#endif #endif
uint8_t ucDummy7[ configMAX_TASK_NAME_LEN ]; uint8_t ucDummy7[ configMAX_TASK_NAME_LEN ];
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
BaseType_t xDummy25; UBaseType_t xDummy25;
#endif #endif
#if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) )
void * pxDummy8; void * pxDummy8;
@ -3261,6 +3318,10 @@ typedef struct xSTATIC_QUEUE
UBaseType_t uxDummy8; UBaseType_t uxDummy8;
uint8_t ucDummy9; uint8_t ucDummy9;
#endif #endif
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
portSPINLOCK_TYPE xDummySpinlock[ 2 ];
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
} StaticQueue_t; } StaticQueue_t;
typedef StaticQueue_t StaticSemaphore_t; typedef StaticQueue_t StaticSemaphore_t;
@ -3290,6 +3351,10 @@ typedef struct xSTATIC_EVENT_GROUP
#if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
uint8_t ucDummy4; uint8_t ucDummy4;
#endif #endif
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
portSPINLOCK_TYPE xDummySpinlock[ 2 ];
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
} StaticEventGroup_t; } StaticEventGroup_t;
/* /*
@ -3345,6 +3410,9 @@ typedef struct xSTATIC_STREAM_BUFFER
void * pvDummy5[ 2 ]; void * pvDummy5[ 2 ];
#endif #endif
UBaseType_t uxDummy6; UBaseType_t uxDummy6;
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
portSPINLOCK_TYPE xDummySpinlock[ 2 ];
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
} StaticStreamBuffer_t; } StaticStreamBuffer_t;
/* Message buffers are built on stream buffers. */ /* Message buffers are built on stream buffers. */

View file

@ -3755,6 +3755,14 @@ void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNC
void vTaskExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus ); void vTaskExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus );
#endif #endif
/*
* Checks whether a yield is required after portUNLOCK_DATA_GROUP() returns.
* To be called while data group is locked.
*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
BaseType_t xTaskUnlockCanYield( void );
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
#if ( portUSING_MPU_WRAPPERS == 1 ) #if ( portUSING_MPU_WRAPPERS == 1 )
/* /*

401
queue.c
View file

@ -133,6 +133,11 @@ typedef struct QueueDefinition /* The old naming convention is used to prevent b
UBaseType_t uxQueueNumber; UBaseType_t uxQueueNumber;
uint8_t ucQueueType; uint8_t ucQueueType;
#endif #endif
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
portSPINLOCK_TYPE xTaskSpinlock;
portSPINLOCK_TYPE xISRSpinlock;
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
} xQUEUE; } xQUEUE;
/* The old xQUEUE name is maintained above then typedefed to the new Queue_t /* The old xQUEUE name is maintained above then typedefed to the new Queue_t
@ -251,12 +256,55 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
#endif #endif
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
/*
* Macros to mark the start and end of a critical code region.
*/
#if ( portUSING_GRANULAR_LOCKS == 1 )
#define queueENTER_CRITICAL( pxQueue ) vQueueEnterCritical( pxQueue )
#define queueENTER_CRITICAL_FROM_ISR( pxQueue ) uxQueueEnterCriticalFromISR( pxQueue )
#define queueEXIT_CRITICAL( pxQueue ) vQueueExitCritical( pxQueue )
#define queueEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue ) vQueueExitCriticalFromISR( uxSavedInterruptStatus, pxQueue )
#else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
#define queueENTER_CRITICAL( pxQueue ) taskENTER_CRITICAL();
#define queueENTER_CRITICAL_FROM_ISR( pxQueue ) taskENTER_CRITICAL_FROM_ISR();
#define queueEXIT_CRITICAL( pxQueue ) taskEXIT_CRITICAL();
#define queueEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue ) taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
#if ( portUSING_GRANULAR_LOCKS == 1 )
/*
* Enters a critical section for a queue. Disables interrupts and takes
* both task and ISR spinlocks to ensure thread safety.
*/
static void vQueueEnterCritical( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION;
/*
* Enters a critical section for a queue from an ISR context. Takes the ISR
* spinlock and returns the previous interrupt state.
*/
static UBaseType_t uxQueueEnterCriticalFromISR( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION;
/*
* Exits a critical section for a queue. Releases spinlocks in reverse order
* and conditionally re-enables interrupts and yields if required.
*/
static void vQueueExitCritical( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION;
/*
* Exits a critical section for a queue from an ISR context. Releases the ISR
* spinlock and conditionally restores the previous interrupt state.
*/
static void vQueueExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus,
const Queue_t * pxQueue ) PRIVILEGED_FUNCTION;
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
/* /*
* Macro to mark a queue as locked. Locking a queue prevents an ISR from * Macro to mark a queue as locked. Locking a queue prevents an ISR from
* accessing the queue event lists. * accessing the queue event lists.
*/ */
#define prvLockQueue( pxQueue ) \ #define prvLockQueue( pxQueue ) \
taskENTER_CRITICAL(); \ queueENTER_CRITICAL( pxQueue ); \
{ \ { \
if( ( pxQueue )->cRxLock == queueUNLOCKED ) \ if( ( pxQueue )->cRxLock == queueUNLOCKED ) \
{ \ { \
@ -267,7 +315,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED; \ ( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED; \
} \ } \
} \ } \
taskEXIT_CRITICAL() queueEXIT_CRITICAL( pxQueue )
/* /*
* Macro to increment cTxLock member of the queue data structure. It is * Macro to increment cTxLock member of the queue data structure. It is
@ -298,6 +346,49 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
( pxQueue )->cRxLock = ( int8_t ) ( ( cRxLock ) + ( int8_t ) 1 ); \ ( pxQueue )->cRxLock = ( int8_t ) ( ( cRxLock ) + ( int8_t ) 1 ); \
} \ } \
} while( 0 ) } while( 0 )
/*
* Macro used to lock and unlock a queue. When a task locks a queue, the
* task will have thread safe non-deterministic access to the queue.
* - Concurrent access from other tasks will be blocked by the xTaskSpinlock
* - Concurrent access from ISRs will be pended
*
* When the tasks unlocks the queue, all pended access attempts are handled.
*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
#define queueLOCK( pxQueue ) \
do { \
vTaskPreemptionDisable( NULL ); \
prvLockQueue( ( pxQueue ) ); \
portGET_SPINLOCK( portGET_CORE_ID(), &( pxQueue->xTaskSpinlock ) ); \
} while( 0 )
#define queueUNLOCK( pxQueue, xYieldAPI ) \
do { \
prvUnlockQueue( ( pxQueue ) ); \
portRELEASE_SPINLOCK( portGET_CORE_ID(), &( pxQueue->xTaskSpinlock ) ); \
vTaskPreemptionEnable( NULL ); \
} while( 0 )
#else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
#define queueLOCK( pxQueue ) \
do { \
vTaskSuspendAll(); \
prvLockQueue( ( pxQueue ) ); \
} while( 0 )
#define queueUNLOCK( pxQueue, xYieldAPI ) \
do { \
BaseType_t xAlreadyYielded; \
prvUnlockQueue( ( pxQueue ) ); \
xAlreadyYielded = xTaskResumeAll(); \
if( ( xAlreadyYielded == pdFALSE ) && ( ( xYieldAPI ) == pdTRUE ) ) \
{ \
taskYIELD_WITHIN_API(); \
} \
else \
{ \
mtCOVERAGE_TEST_MARKER(); \
} \
} while( 0 )
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
@ -310,12 +401,22 @@ BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
configASSERT( pxQueue ); configASSERT( pxQueue );
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
{
if( xNewQueue == pdTRUE )
{
portINIT_SPINLOCK( &( pxQueue->xTaskSpinlock ) );
portINIT_SPINLOCK( &( pxQueue->xISRSpinlock ) );
}
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
if( ( pxQueue != NULL ) && if( ( pxQueue != NULL ) &&
( pxQueue->uxLength >= 1U ) && ( pxQueue->uxLength >= 1U ) &&
/* Check for multiplication overflow. */ /* Check for multiplication overflow. */
( ( SIZE_MAX / pxQueue->uxLength ) >= pxQueue->uxItemSize ) ) ( ( SIZE_MAX / pxQueue->uxLength ) >= pxQueue->uxItemSize ) )
{ {
taskENTER_CRITICAL(); queueENTER_CRITICAL( pxQueue );
{ {
pxQueue->u.xQueue.pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize ); pxQueue->u.xQueue.pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize );
pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U; pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
@ -354,7 +455,7 @@ BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
vListInitialise( &( pxQueue->xTasksWaitingToReceive ) ); vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
} }
} }
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
} }
else else
{ {
@ -703,7 +804,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
* calling task is the mutex holder, but not a good way of determining the * calling task is the mutex holder, but not a good way of determining the
* identity of the mutex holder, as the holder may change between the * identity of the mutex holder, as the holder may change between the
* following critical section exiting and the function returning. */ * following critical section exiting and the function returning. */
taskENTER_CRITICAL(); queueENTER_CRITICAL( pxSemaphore );
{ {
if( pxSemaphore->uxQueueType == queueQUEUE_IS_MUTEX ) if( pxSemaphore->uxQueueType == queueQUEUE_IS_MUTEX )
{ {
@ -714,7 +815,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
pxReturn = NULL; pxReturn = NULL;
} }
} }
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxSemaphore );
traceRETURN_xQueueGetMutexHolder( pxReturn ); traceRETURN_xQueueGetMutexHolder( pxReturn );
@ -968,7 +1069,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
for( ; ; ) for( ; ; )
{ {
taskENTER_CRITICAL(); queueENTER_CRITICAL( pxQueue );
{ {
/* Is there room on the queue now? The running task must be the /* Is there room on the queue now? The running task must be the
* highest priority task wanting to access the queue. If the head item * highest priority task wanting to access the queue. If the head item
@ -1074,7 +1175,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
} }
#endif /* configUSE_QUEUE_SETS */ #endif /* configUSE_QUEUE_SETS */
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
traceRETURN_xQueueGenericSend( pdPASS ); traceRETURN_xQueueGenericSend( pdPASS );
@ -1086,7 +1187,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
{ {
/* The queue was full and no block time is specified (or /* The queue was full and no block time is specified (or
* the block time has expired) so leave now. */ * the block time has expired) so leave now. */
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
/* Return to the original privilege level before exiting /* Return to the original privilege level before exiting
* the function. */ * the function. */
@ -1109,13 +1210,12 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
} }
} }
} }
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
/* Interrupts and other tasks can send to and receive from the queue /* Interrupts and other tasks can send to and receive from the queue
* now the critical section has been exited. */ * now the critical section has been exited. */
vTaskSuspendAll(); queueLOCK( pxQueue );
prvLockQueue( pxQueue );
/* Update the timeout state to see if it has expired yet. */ /* Update the timeout state to see if it has expired yet. */
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
@ -1125,35 +1225,18 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
traceBLOCKING_ON_QUEUE_SEND( pxQueue ); traceBLOCKING_ON_QUEUE_SEND( pxQueue );
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait ); vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
/* Unlocking the queue means queue events can effect the queueUNLOCK( pxQueue, pdTRUE );
* event list. It is possible that interrupts occurring now
* remove this task from the event list again - but as the
* scheduler is suspended the task will go onto the pending
* ready list instead of the actual ready list. */
prvUnlockQueue( pxQueue );
/* Resuming the scheduler will move tasks from the pending
* ready list into the ready list - so it is feasible that this
* task is already in the ready list before it yields - in which
* case the yield will not cause a context switch unless there
* is also a higher priority task in the pending ready list. */
if( xTaskResumeAll() == pdFALSE )
{
taskYIELD_WITHIN_API();
}
} }
else else
{ {
/* Try again. */ /* Try again. */
prvUnlockQueue( pxQueue ); queueUNLOCK( pxQueue, pdFALSE );
( void ) xTaskResumeAll();
} }
} }
else else
{ {
/* The timeout has expired. */ /* The timeout has expired. */
prvUnlockQueue( pxQueue ); queueUNLOCK( pxQueue, pdFALSE );
( void ) xTaskResumeAll();
traceQUEUE_SEND_FAILED( pxQueue ); traceQUEUE_SEND_FAILED( pxQueue );
traceRETURN_xQueueGenericSend( errQUEUE_FULL ); traceRETURN_xQueueGenericSend( errQUEUE_FULL );
@ -1203,7 +1286,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
/* MISRA Ref 4.7.1 [Return value shall be checked] */ /* MISRA Ref 4.7.1 [Return value shall be checked] */
/* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */ /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
/* coverity[misra_c_2012_directive_4_7_violation] */ /* coverity[misra_c_2012_directive_4_7_violation] */
uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR(); uxSavedInterruptStatus = ( UBaseType_t ) queueENTER_CRITICAL_FROM_ISR( pxQueue );
{ {
if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) ) if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
{ {
@ -1328,7 +1411,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
xReturn = errQUEUE_FULL; xReturn = errQUEUE_FULL;
} }
} }
taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); queueEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue );
traceRETURN_xQueueGenericSendFromISR( xReturn ); traceRETURN_xQueueGenericSendFromISR( xReturn );
@ -1381,7 +1464,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
/* MISRA Ref 4.7.1 [Return value shall be checked] */ /* MISRA Ref 4.7.1 [Return value shall be checked] */
/* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */ /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
/* coverity[misra_c_2012_directive_4_7_violation] */ /* coverity[misra_c_2012_directive_4_7_violation] */
uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR(); uxSavedInterruptStatus = ( UBaseType_t ) queueENTER_CRITICAL_FROM_ISR( pxQueue );
{ {
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
@ -1501,7 +1584,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
xReturn = errQUEUE_FULL; xReturn = errQUEUE_FULL;
} }
} }
taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); queueEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue );
traceRETURN_xQueueGiveFromISR( xReturn ); traceRETURN_xQueueGiveFromISR( xReturn );
@ -1535,7 +1618,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
for( ; ; ) for( ; ; )
{ {
taskENTER_CRITICAL(); queueENTER_CRITICAL( pxQueue );
{ {
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
@ -1567,7 +1650,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
traceRETURN_xQueueReceive( pdPASS ); traceRETURN_xQueueReceive( pdPASS );
@ -1579,7 +1662,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
{ {
/* The queue was empty and no block time is specified (or /* The queue was empty and no block time is specified (or
* the block time has expired) so leave now. */ * the block time has expired) so leave now. */
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
traceQUEUE_RECEIVE_FAILED( pxQueue ); traceQUEUE_RECEIVE_FAILED( pxQueue );
traceRETURN_xQueueReceive( errQUEUE_EMPTY ); traceRETURN_xQueueReceive( errQUEUE_EMPTY );
@ -1600,13 +1683,12 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
} }
} }
} }
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
/* Interrupts and other tasks can send to and receive from the queue /* Interrupts and other tasks can send to and receive from the queue
* now the critical section has been exited. */ * now the critical section has been exited. */
vTaskSuspendAll(); queueLOCK( pxQueue );
prvLockQueue( pxQueue );
/* Update the timeout state to see if it has expired yet. */ /* Update the timeout state to see if it has expired yet. */
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
@ -1617,31 +1699,20 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
{ {
traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
prvUnlockQueue( pxQueue ); queueUNLOCK( pxQueue, pdTRUE );
if( xTaskResumeAll() == pdFALSE )
{
taskYIELD_WITHIN_API();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
} }
else else
{ {
/* The queue contains data again. Loop back to try and read the /* The queue contains data again. Loop back to try and read the
* data. */ * data. */
prvUnlockQueue( pxQueue ); queueUNLOCK( pxQueue, pdFALSE );
( void ) xTaskResumeAll();
} }
} }
else else
{ {
/* Timed out. If there is no data in the queue exit, otherwise loop /* Timed out. If there is no data in the queue exit, otherwise loop
* back and attempt to read the data. */ * back and attempt to read the data. */
prvUnlockQueue( pxQueue ); queueUNLOCK( pxQueue, pdFALSE );
( void ) xTaskResumeAll();
if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
{ {
@ -1688,7 +1759,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
for( ; ; ) for( ; ; )
{ {
taskENTER_CRITICAL(); queueENTER_CRITICAL( pxQueue );
{ {
/* Semaphores are queues with an item size of 0, and where the /* Semaphores are queues with an item size of 0, and where the
* number of messages in the queue is the semaphore's count value. */ * number of messages in the queue is the semaphore's count value. */
@ -1737,7 +1808,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
traceRETURN_xQueueSemaphoreTake( pdPASS ); traceRETURN_xQueueSemaphoreTake( pdPASS );
@ -1749,7 +1820,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
{ {
/* The semaphore count was 0 and no block time is specified /* The semaphore count was 0 and no block time is specified
* (or the block time has expired) so exit now. */ * (or the block time has expired) so exit now. */
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
traceQUEUE_RECEIVE_FAILED( pxQueue ); traceQUEUE_RECEIVE_FAILED( pxQueue );
traceRETURN_xQueueSemaphoreTake( errQUEUE_EMPTY ); traceRETURN_xQueueSemaphoreTake( errQUEUE_EMPTY );
@ -1770,13 +1841,12 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
} }
} }
} }
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
/* Interrupts and other tasks can give to and take from the semaphore /* Interrupts and other tasks can give to and take from the semaphore
* now the critical section has been exited. */ * now the critical section has been exited. */
vTaskSuspendAll(); queueLOCK( pxQueue );
prvLockQueue( pxQueue );
/* Update the timeout state to see if it has expired yet. */ /* Update the timeout state to see if it has expired yet. */
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
@ -1792,13 +1862,9 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
#if ( configUSE_MUTEXES == 1 ) #if ( configUSE_MUTEXES == 1 )
{ {
if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
{
taskENTER_CRITICAL();
{ {
xInheritanceOccurred = xTaskPriorityInherit( pxQueue->u.xSemaphore.xMutexHolder ); xInheritanceOccurred = xTaskPriorityInherit( pxQueue->u.xSemaphore.xMutexHolder );
} }
taskEXIT_CRITICAL();
}
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
@ -1807,30 +1873,19 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
#endif /* if ( configUSE_MUTEXES == 1 ) */ #endif /* if ( configUSE_MUTEXES == 1 ) */
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
prvUnlockQueue( pxQueue ); queueUNLOCK( pxQueue, pdTRUE );
if( xTaskResumeAll() == pdFALSE )
{
taskYIELD_WITHIN_API();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
} }
else else
{ {
/* There was no timeout and the semaphore count was not 0, so /* There was no timeout and the semaphore count was not 0, so
* attempt to take the semaphore again. */ * attempt to take the semaphore again. */
prvUnlockQueue( pxQueue ); queueUNLOCK( pxQueue, pdFALSE );
( void ) xTaskResumeAll();
} }
} }
else else
{ {
/* Timed out. */ /* Timed out. */
prvUnlockQueue( pxQueue ); queueUNLOCK( pxQueue, pdFALSE );
( void ) xTaskResumeAll();
/* If the semaphore count is 0 exit now as the timeout has /* If the semaphore count is 0 exit now as the timeout has
* expired. Otherwise return to attempt to take the semaphore that is * expired. Otherwise return to attempt to take the semaphore that is
@ -1845,7 +1900,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
* test the mutex type again to check it is actually a mutex. */ * test the mutex type again to check it is actually a mutex. */
if( xInheritanceOccurred != pdFALSE ) if( xInheritanceOccurred != pdFALSE )
{ {
taskENTER_CRITICAL(); queueENTER_CRITICAL( pxQueue );
{ {
UBaseType_t uxHighestWaitingPriority; UBaseType_t uxHighestWaitingPriority;
@ -1865,7 +1920,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
/* coverity[overrun] */ /* coverity[overrun] */
vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority ); vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority );
} }
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
} }
} }
#endif /* configUSE_MUTEXES */ #endif /* configUSE_MUTEXES */
@ -1911,7 +1966,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
for( ; ; ) for( ; ; )
{ {
taskENTER_CRITICAL(); queueENTER_CRITICAL( pxQueue );
{ {
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
@ -1949,7 +2004,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
traceRETURN_xQueuePeek( pdPASS ); traceRETURN_xQueuePeek( pdPASS );
@ -1961,7 +2016,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
{ {
/* The queue was empty and no block time is specified (or /* The queue was empty and no block time is specified (or
* the block time has expired) so leave now. */ * the block time has expired) so leave now. */
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
traceQUEUE_PEEK_FAILED( pxQueue ); traceQUEUE_PEEK_FAILED( pxQueue );
traceRETURN_xQueuePeek( errQUEUE_EMPTY ); traceRETURN_xQueuePeek( errQUEUE_EMPTY );
@ -1983,13 +2038,12 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
} }
} }
} }
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
/* Interrupts and other tasks can send to and receive from the queue /* Interrupts and other tasks can send to and receive from the queue
* now that the critical section has been exited. */ * now that the critical section has been exited. */
vTaskSuspendAll(); queueLOCK( pxQueue );
prvLockQueue( pxQueue );
/* Update the timeout state to see if it has expired yet. */ /* Update the timeout state to see if it has expired yet. */
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
@ -2000,31 +2054,20 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
{ {
traceBLOCKING_ON_QUEUE_PEEK( pxQueue ); traceBLOCKING_ON_QUEUE_PEEK( pxQueue );
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
prvUnlockQueue( pxQueue ); queueUNLOCK( pxQueue, pdTRUE );
if( xTaskResumeAll() == pdFALSE )
{
taskYIELD_WITHIN_API();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
} }
else else
{ {
/* There is data in the queue now, so don't enter the blocked /* There is data in the queue now, so don't enter the blocked
* state, instead return to try and obtain the data. */ * state, instead return to try and obtain the data. */
prvUnlockQueue( pxQueue ); queueUNLOCK( pxQueue, pdFALSE );
( void ) xTaskResumeAll();
} }
} }
else else
{ {
/* The timeout has expired. If there is still no data in the queue /* The timeout has expired. If there is still no data in the queue
* exit, otherwise go back and try to read the data again. */ * exit, otherwise go back and try to read the data again. */
prvUnlockQueue( pxQueue ); queueUNLOCK( pxQueue, pdFALSE );
( void ) xTaskResumeAll();
if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
{ {
@ -2074,7 +2117,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
/* MISRA Ref 4.7.1 [Return value shall be checked] */ /* MISRA Ref 4.7.1 [Return value shall be checked] */
/* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */ /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
/* coverity[misra_c_2012_directive_4_7_violation] */ /* coverity[misra_c_2012_directive_4_7_violation] */
uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR(); uxSavedInterruptStatus = ( UBaseType_t ) queueENTER_CRITICAL_FROM_ISR( pxQueue );
{ {
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
@ -2134,7 +2177,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ); traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
} }
} }
taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); queueEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue );
traceRETURN_xQueueReceiveFromISR( xReturn ); traceRETURN_xQueueReceiveFromISR( xReturn );
@ -2175,7 +2218,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
/* MISRA Ref 4.7.1 [Return value shall be checked] */ /* MISRA Ref 4.7.1 [Return value shall be checked] */
/* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */ /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
/* coverity[misra_c_2012_directive_4_7_violation] */ /* coverity[misra_c_2012_directive_4_7_violation] */
uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR(); uxSavedInterruptStatus = ( UBaseType_t ) queueENTER_CRITICAL_FROM_ISR( pxQueue );
{ {
/* Cannot block in an ISR, so check there is data available. */ /* Cannot block in an ISR, so check there is data available. */
if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 ) if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
@ -2196,7 +2239,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue ); traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
} }
} }
taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); queueEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue );
traceRETURN_xQueuePeekFromISR( xReturn ); traceRETURN_xQueuePeekFromISR( xReturn );
@ -2212,11 +2255,11 @@ UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
configASSERT( xQueue ); configASSERT( xQueue );
portBASE_TYPE_ENTER_CRITICAL(); queueENTER_CRITICAL( xQueue );
{ {
uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting; uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
} }
portBASE_TYPE_EXIT_CRITICAL(); queueEXIT_CRITICAL( xQueue );
traceRETURN_uxQueueMessagesWaiting( uxReturn ); traceRETURN_uxQueueMessagesWaiting( uxReturn );
@ -2233,11 +2276,11 @@ UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )
configASSERT( pxQueue ); configASSERT( pxQueue );
portBASE_TYPE_ENTER_CRITICAL(); queueENTER_CRITICAL( pxQueue );
{ {
uxReturn = ( UBaseType_t ) ( pxQueue->uxLength - pxQueue->uxMessagesWaiting ); uxReturn = ( UBaseType_t ) ( pxQueue->uxLength - pxQueue->uxMessagesWaiting );
} }
portBASE_TYPE_EXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
traceRETURN_uxQueueSpacesAvailable( uxReturn ); traceRETURN_uxQueueSpacesAvailable( uxReturn );
@ -2503,13 +2546,14 @@ static void prvCopyDataFromQueue( Queue_t * const pxQueue,
static void prvUnlockQueue( Queue_t * const pxQueue ) static void prvUnlockQueue( Queue_t * const pxQueue )
{ {
/* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */ /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED WHEN portUSING_GRANULAR_LOCKS IS 0.
* IT MUST BE CALLED WITH TASK PREEMTION DISABLED WHEN portUSING_GRANULAR_LOCKS IS 1. */
/* The lock counts contains the number of extra data items placed or /* The lock counts contains the number of extra data items placed or
* removed from the queue while the queue was locked. When a queue is * removed from the queue while the queue was locked. When a queue is
* locked items can be added or removed, but the event lists cannot be * locked items can be added or removed, but the event lists cannot be
* updated. */ * updated. */
taskENTER_CRITICAL(); queueENTER_CRITICAL( pxQueue );
{ {
int8_t cTxLock = pxQueue->cTxLock; int8_t cTxLock = pxQueue->cTxLock;
@ -2587,10 +2631,10 @@ static void prvUnlockQueue( Queue_t * const pxQueue )
pxQueue->cTxLock = queueUNLOCKED; pxQueue->cTxLock = queueUNLOCKED;
} }
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
/* Do the same for the Rx lock. */ /* Do the same for the Rx lock. */
taskENTER_CRITICAL(); queueENTER_CRITICAL( pxQueue );
{ {
int8_t cRxLock = pxQueue->cRxLock; int8_t cRxLock = pxQueue->cRxLock;
@ -2617,15 +2661,115 @@ static void prvUnlockQueue( Queue_t * const pxQueue )
pxQueue->cRxLock = queueUNLOCKED; pxQueue->cRxLock = queueUNLOCKED;
} }
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
static void vQueueEnterCritical( const Queue_t * pxQueue )
{
portDISABLE_INTERRUPTS();
{
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
/* Task spinlock is always taken first */
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xTaskSpinlock ) );
/* Take the ISR spinlock next */
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xISRSpinlock ) );
/* Increment the critical nesting count */
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
}
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
/*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
static UBaseType_t uxQueueEnterCriticalFromISR( const Queue_t * pxQueue )
{
UBaseType_t uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
/* Take the ISR spinlock */
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xISRSpinlock ) );
/* Increment the critical nesting count */
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
return uxSavedInterruptStatus;
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
/*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
static void vQueueExitCritical( const Queue_t * pxQueue )
{
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U );
/* Get the xYieldPending status inside the critical section. */
BaseType_t xYieldCurrentTask = xTaskUnlockCanYield();
/* Decrement the critical nesting count */
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID );
/* Release the ISR spinlock */
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xISRSpinlock ) );
/* Release the task spinlock */
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xTaskSpinlock ) );
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 )
{
portENABLE_INTERRUPTS();
if( xYieldCurrentTask != pdFALSE )
{
portYIELD();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
/*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
static void vQueueExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus,
const Queue_t * pxQueue )
{
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U );
/* Decrement the critical nesting count */
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID );
/* Release the ISR spinlock */
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xISRSpinlock ) );
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 )
{
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
}
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
/*-----------------------------------------------------------*/
static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue ) static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue )
{ {
BaseType_t xReturn; BaseType_t xReturn;
taskENTER_CRITICAL(); queueENTER_CRITICAL( pxQueue );
{ {
if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 ) if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
{ {
@ -2636,7 +2780,7 @@ static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue )
xReturn = pdFALSE; xReturn = pdFALSE;
} }
} }
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
return xReturn; return xReturn;
} }
@ -2670,7 +2814,7 @@ static BaseType_t prvIsQueueFull( const Queue_t * pxQueue )
{ {
BaseType_t xReturn; BaseType_t xReturn;
taskENTER_CRITICAL(); queueENTER_CRITICAL( pxQueue );
{ {
if( pxQueue->uxMessagesWaiting == pxQueue->uxLength ) if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
{ {
@ -2681,7 +2825,7 @@ static BaseType_t prvIsQueueFull( const Queue_t * pxQueue )
xReturn = pdFALSE; xReturn = pdFALSE;
} }
} }
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
return xReturn; return xReturn;
} }
@ -3161,7 +3305,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
* time a yield will be performed. If an item is added to the queue while * time a yield will be performed. If an item is added to the queue while
* the queue is locked, and the calling task blocks on the queue, then the * the queue is locked, and the calling task blocks on the queue, then the
* calling task will be immediately unblocked when the queue is unlocked. */ * calling task will be immediately unblocked when the queue is unlocked. */
prvLockQueue( pxQueue ); queueLOCK( pxQueue );
if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U ) if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )
{ {
@ -3173,7 +3317,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
prvUnlockQueue( pxQueue ); queueUNLOCK( pxQueue, pdFALSE );
traceRETURN_vQueueWaitForMessageRestricted(); traceRETURN_vQueueWaitForMessageRestricted();
} }
@ -3225,17 +3369,18 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
QueueSetHandle_t xQueueSet ) QueueSetHandle_t xQueueSet )
{ {
BaseType_t xReturn; BaseType_t xReturn;
Queue_t * const pxQueue = xQueueOrSemaphore;
traceENTER_xQueueAddToSet( xQueueOrSemaphore, xQueueSet ); traceENTER_xQueueAddToSet( xQueueOrSemaphore, xQueueSet );
taskENTER_CRITICAL(); queueENTER_CRITICAL( pxQueue );
{ {
if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL ) if( pxQueue->pxQueueSetContainer != NULL )
{ {
/* Cannot add a queue/semaphore to more than one queue set. */ /* Cannot add a queue/semaphore to more than one queue set. */
xReturn = pdFAIL; xReturn = pdFAIL;
} }
else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 ) else if( pxQueue->uxMessagesWaiting != ( UBaseType_t ) 0 )
{ {
/* Cannot add a queue/semaphore to a queue set if there are already /* Cannot add a queue/semaphore to a queue set if there are already
* items in the queue/semaphore. */ * items in the queue/semaphore. */
@ -3243,11 +3388,11 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
} }
else else
{ {
( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet; pxQueue->pxQueueSetContainer = xQueueSet;
xReturn = pdPASS; xReturn = pdPASS;
} }
} }
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueue );
traceRETURN_xQueueAddToSet( xReturn ); traceRETURN_xQueueAddToSet( xReturn );
@ -3281,12 +3426,12 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
} }
else else
{ {
taskENTER_CRITICAL(); queueENTER_CRITICAL( pxQueueOrSemaphore );
{ {
/* The queue is no longer contained in the set. */ /* The queue is no longer contained in the set. */
pxQueueOrSemaphore->pxQueueSetContainer = NULL; pxQueueOrSemaphore->pxQueueSetContainer = NULL;
} }
taskEXIT_CRITICAL(); queueEXIT_CRITICAL( pxQueueOrSemaphore );
xReturn = pdPASS; xReturn = pdPASS;
} }

View file

@ -58,6 +58,39 @@
#error INCLUDE_xTaskGetCurrentTaskHandle must be set to 1 to build stream_buffer.c #error INCLUDE_xTaskGetCurrentTaskHandle must be set to 1 to build stream_buffer.c
#endif #endif
/*
* Macros to mark the start and end of a critical code region.
*/
#if ( portUSING_GRANULAR_LOCKS == 1 )
#define sbENTER_CRITICAL( pxStreamBuffer ) vStreamBufferEnterCritical( pxStreamBuffer )
#define sbENTER_CRITICAL_FROM_ISR( pxStreamBuffer ) uxStreamBufferEnterCriticalFromISR( pxStreamBuffer )
#define sbEXIT_CRITICAL( pxStreamBuffer ) vStreamBufferExitCritical( pxStreamBuffer )
#define sbEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxStreamBuffer ) vStreamBufferExitCriticalFromISR( uxSavedInterruptStatus, pxStreamBuffer )
#else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
#define sbENTER_CRITICAL( pxEventBits ) taskENTER_CRITICAL();
#define sbENTER_CRITICAL_FROM_ISR( pxEventBits ) taskENTER_CRITICAL_FROM_ISR();
#define sbEXIT_CRITICAL( pxEventBits ) taskEXIT_CRITICAL();
#define sbEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxStreamBuffer ) taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
/*
* Macro used to lock and unlock a stream buffer. When a task locks a stream
* buffer, the task will have thread safe non-deterministic access to the stream
* buffer.
* - Concurrent access from other tasks will be blocked by the xTaskSpinlock
* - Concurrent access from ISRs will be pended
*
* When the task unlocks the stream buffer, all pended access attempts are handled.
*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
#define sbLOCK( pxStreamBuffer ) prvLockStreamBufferForTasks( pxStreamBuffer )
#define sbUNLOCK( pxStreamBuffer ) prvUnlockStreamBufferForTasks( pxStreamBuffer )
#else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
#define sbLOCK( pxStreamBuffer ) vTaskSuspendAll()
#define sbUNLOCK( pxStreamBuffer ) ( void ) xTaskResumeAll()
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
/* If the user has not provided application specific Rx notification macros, /* If the user has not provided application specific Rx notification macros,
* or #defined the notification macros away, then provide default implementations * or #defined the notification macros away, then provide default implementations
* that uses task notifications. */ * that uses task notifications. */
@ -65,7 +98,7 @@
#define sbRECEIVE_COMPLETED( pxStreamBuffer ) \ #define sbRECEIVE_COMPLETED( pxStreamBuffer ) \
do \ do \
{ \ { \
vTaskSuspendAll(); \ sbLOCK( pxStreamBuffer ); \
{ \ { \
if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \ if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \
{ \ { \
@ -76,7 +109,7 @@
( pxStreamBuffer )->xTaskWaitingToSend = NULL; \ ( pxStreamBuffer )->xTaskWaitingToSend = NULL; \
} \ } \
} \ } \
( void ) xTaskResumeAll(); \ ( void ) sbUNLOCK( pxStreamBuffer ); \
} while( 0 ) } while( 0 )
#endif /* sbRECEIVE_COMPLETED */ #endif /* sbRECEIVE_COMPLETED */
@ -105,7 +138,7 @@
do { \ do { \
UBaseType_t uxSavedInterruptStatus; \ UBaseType_t uxSavedInterruptStatus; \
\ \
uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); \ uxSavedInterruptStatus = sbENTER_CRITICAL_FROM_ISR( pxStreamBuffer ); \
{ \ { \
if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \ if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \
{ \ { \
@ -117,7 +150,7 @@
( pxStreamBuffer )->xTaskWaitingToSend = NULL; \ ( pxStreamBuffer )->xTaskWaitingToSend = NULL; \
} \ } \
} \ } \
taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); \ sbEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxStreamBuffer ); \
} while( 0 ) } while( 0 )
#endif /* sbRECEIVE_COMPLETED_FROM_ISR */ #endif /* sbRECEIVE_COMPLETED_FROM_ISR */
@ -145,7 +178,7 @@
*/ */
#ifndef sbSEND_COMPLETED #ifndef sbSEND_COMPLETED
#define sbSEND_COMPLETED( pxStreamBuffer ) \ #define sbSEND_COMPLETED( pxStreamBuffer ) \
vTaskSuspendAll(); \ sbLOCK( pxStreamBuffer ); \
{ \ { \
if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \ if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \
{ \ { \
@ -156,7 +189,7 @@
( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \ ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \
} \ } \
} \ } \
( void ) xTaskResumeAll() ( void ) sbUNLOCK( pxStreamBuffer )
#endif /* sbSEND_COMPLETED */ #endif /* sbSEND_COMPLETED */
/* If user has provided a per-instance send completed callback, then /* If user has provided a per-instance send completed callback, then
@ -184,7 +217,7 @@
do { \ do { \
UBaseType_t uxSavedInterruptStatus; \ UBaseType_t uxSavedInterruptStatus; \
\ \
uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); \ uxSavedInterruptStatus = sbENTER_CRITICAL_FROM_ISR( pxStreamBuffer ); \
{ \ { \
if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \ if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \
{ \ { \
@ -196,7 +229,7 @@
( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \ ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \
} \ } \
} \ } \
taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); \ sbEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxStreamBuffer ); \
} while( 0 ) } while( 0 )
#endif /* sbSEND_COMPLETE_FROM_ISR */ #endif /* sbSEND_COMPLETE_FROM_ISR */
@ -249,8 +282,59 @@ typedef struct StreamBufferDef_t
StreamBufferCallbackFunction_t pxReceiveCompletedCallback; /* Optional callback called on receive complete. sbRECEIVE_COMPLETED is called if this is NULL. */ StreamBufferCallbackFunction_t pxReceiveCompletedCallback; /* Optional callback called on receive complete. sbRECEIVE_COMPLETED is called if this is NULL. */
#endif #endif
UBaseType_t uxNotificationIndex; /* The index we are using for notification, by default tskDEFAULT_INDEX_TO_NOTIFY. */ UBaseType_t uxNotificationIndex; /* The index we are using for notification, by default tskDEFAULT_INDEX_TO_NOTIFY. */
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
portSPINLOCK_TYPE xTaskSpinlock;
portSPINLOCK_TYPE xISRSpinlock;
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
} StreamBuffer_t; } StreamBuffer_t;
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
/*
* Enters a critical section for a stream buffer. Disables interrupts and takes
* both task and ISR spinlocks to ensure thread safety.
*/
static void vStreamBufferEnterCritical( StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION;
/*
* Enters a critical section for a stream buffer from an ISR context. Takes the ISR
* spinlock and returns the previous interrupt state.
*/
static UBaseType_t uxStreamBufferEnterCriticalFromISR( StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION;
/*
* Exits a critical section for a stream buffer. Releases spinlocks in reverse order
* and conditionally re-enables interrupts and yields if required.
*/
static void vStreamBufferExitCritical( StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION;
/*
* Exits a critical section for a stream buffer from an ISR context. Releases the ISR
* spinlock and conditionally restores the previous interrupt state.
*/
static void vStreamBufferExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus,
StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION;
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) ) */
/*
* Locks a stream buffer for tasks. Prevents other tasks from accessing the stream buffer
* but allows ISRs to pend access to the stream buffer. Caller cannot be preempted
* by other tasks after locking the stream buffer, thus allowing the caller to
* execute non-deterministic operations.
*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
static void prvLockStreamBufferForTasks( StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION;
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
/*
* Unlocks a stream buffer for tasks. Handles all pended access from ISRs, then reenables preemption
* for the caller.
*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
static void prvUnlockStreamBufferForTasks( StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION;
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
/* /*
* The number of bytes available to be read from the buffer. * The number of bytes available to be read from the buffer.
*/ */
@ -327,6 +411,131 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer,
StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) PRIVILEGED_FUNCTION; StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) PRIVILEGED_FUNCTION;
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
static void vStreamBufferEnterCritical( StreamBuffer_t * const pxStreamBuffer )
{
portDISABLE_INTERRUPTS();
{
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
/* Task spinlock is always taken first */
portGET_SPINLOCK( xCoreID, &( pxStreamBuffer->xTaskSpinlock ) );
/* Take the ISR spinlock next */
portGET_SPINLOCK( xCoreID, &( pxStreamBuffer->xISRSpinlock ) );
/* Increment the critical nesting count */
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
}
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
/*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
static UBaseType_t uxStreamBufferEnterCriticalFromISR( StreamBuffer_t * const pxStreamBuffer )
{
UBaseType_t uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
/* Take the ISR spinlock */
portGET_SPINLOCK( xCoreID, &( pxStreamBuffer->xISRSpinlock ) );
/* Increment the critical nesting count */
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
return uxSavedInterruptStatus;
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
/*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
static void vStreamBufferExitCritical( StreamBuffer_t * const pxStreamBuffer )
{
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U );
/* Get the xYieldPending status inside the critical section. */
BaseType_t xYieldCurrentTask = xTaskUnlockCanYield();
/* Decrement the critical nesting count */
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID );
/* Release the ISR spinlock */
portRELEASE_SPINLOCK( xCoreID, &( pxStreamBuffer->xISRSpinlock ) );
/* Release the task spinlock */
portRELEASE_SPINLOCK( xCoreID, &( pxStreamBuffer->xTaskSpinlock ) );
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 )
{
portENABLE_INTERRUPTS();
if( xYieldCurrentTask != pdFALSE )
{
portYIELD();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
/*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
static void vStreamBufferExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus,
StreamBuffer_t * const pxStreamBuffer )
{
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U );
/* Decrement the critical nesting count */
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID );
/* Release the ISR spinlock */
portRELEASE_SPINLOCK( xCoreID, &( pxStreamBuffer->xISRSpinlock ) );
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 )
{
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
}
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
/*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
static void prvLockStreamBufferForTasks( StreamBuffer_t * const pxStreamBuffer )
{
/* Disable preemption so that the current task cannot be preempted by another task */
vTaskPreemptionDisable( NULL );
/* Keep holding xTaskSpinlock after unlocking the data group to prevent tasks
* on other cores from accessing the stream buffer while it is suspended. */
portGET_SPINLOCK( portGET_CORE_ID(), &( pxStreamBuffer->xTaskSpinlock ) );
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
/*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
static void prvUnlockStreamBufferForTasks( StreamBuffer_t * const pxStreamBuffer )
{
/* Release the previously held task spinlock */
portRELEASE_SPINLOCK( portGET_CORE_ID(), &( pxStreamBuffer->xTaskSpinlock ) );
/* Re-enable preemption */
vTaskPreemptionEnable( NULL );
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
/*-----------------------------------------------------------*/
#if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
StreamBufferHandle_t xStreamBufferGenericCreate( size_t xBufferSizeBytes, StreamBufferHandle_t xStreamBufferGenericCreate( size_t xBufferSizeBytes,
size_t xTriggerLevelBytes, size_t xTriggerLevelBytes,
@ -405,6 +614,13 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer,
pxSendCompletedCallback, pxSendCompletedCallback,
pxReceiveCompletedCallback ); pxReceiveCompletedCallback );
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
{
portINIT_SPINLOCK( &( ( ( StreamBuffer_t * ) pvAllocatedMemory )->xTaskSpinlock ) );
portINIT_SPINLOCK( &( ( ( StreamBuffer_t * ) pvAllocatedMemory )->xISRSpinlock ) );
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
traceSTREAM_BUFFER_CREATE( ( ( StreamBuffer_t * ) pvAllocatedMemory ), xStreamBufferType ); traceSTREAM_BUFFER_CREATE( ( ( StreamBuffer_t * ) pvAllocatedMemory ), xStreamBufferType );
} }
else else
@ -499,6 +715,13 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer,
* again. */ * again. */
pxStreamBuffer->ucFlags |= sbFLAGS_IS_STATICALLY_ALLOCATED; pxStreamBuffer->ucFlags |= sbFLAGS_IS_STATICALLY_ALLOCATED;
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
{
portINIT_SPINLOCK( &( pxStreamBuffer->xTaskSpinlock ) );
portINIT_SPINLOCK( &( pxStreamBuffer->xISRSpinlock ) );
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
traceSTREAM_BUFFER_CREATE( pxStreamBuffer, xStreamBufferType ); traceSTREAM_BUFFER_CREATE( pxStreamBuffer, xStreamBufferType );
/* MISRA Ref 11.3.1 [Misaligned access] */ /* MISRA Ref 11.3.1 [Misaligned access] */
@ -614,7 +837,7 @@ BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer )
#endif #endif
/* Can only reset a message buffer if there are no tasks blocked on it. */ /* Can only reset a message buffer if there are no tasks blocked on it. */
taskENTER_CRITICAL(); sbENTER_CRITICAL( pxStreamBuffer );
{ {
if( ( pxStreamBuffer->xTaskWaitingToReceive == NULL ) && ( pxStreamBuffer->xTaskWaitingToSend == NULL ) ) if( ( pxStreamBuffer->xTaskWaitingToReceive == NULL ) && ( pxStreamBuffer->xTaskWaitingToSend == NULL ) )
{ {
@ -644,7 +867,7 @@ BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer )
xReturn = pdPASS; xReturn = pdPASS;
} }
} }
taskEXIT_CRITICAL(); sbEXIT_CRITICAL( pxStreamBuffer );
traceRETURN_xStreamBufferReset( xReturn ); traceRETURN_xStreamBufferReset( xReturn );
@ -872,7 +1095,7 @@ size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
{ {
/* Wait until the required number of bytes are free in the message /* Wait until the required number of bytes are free in the message
* buffer. */ * buffer. */
taskENTER_CRITICAL(); sbENTER_CRITICAL( pxStreamBuffer );
{ {
xSpace = xStreamBufferSpacesAvailable( pxStreamBuffer ); xSpace = xStreamBufferSpacesAvailable( pxStreamBuffer );
@ -887,11 +1110,11 @@ size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
} }
else else
{ {
taskEXIT_CRITICAL(); sbEXIT_CRITICAL( pxStreamBuffer );
break; break;
} }
} }
taskEXIT_CRITICAL(); sbEXIT_CRITICAL( pxStreamBuffer );
traceBLOCKING_ON_STREAM_BUFFER_SEND( xStreamBuffer ); traceBLOCKING_ON_STREAM_BUFFER_SEND( xStreamBuffer );
( void ) xTaskNotifyWaitIndexed( pxStreamBuffer->uxNotificationIndex, ( uint32_t ) 0, ( uint32_t ) 0, NULL, xTicksToWait ); ( void ) xTaskNotifyWaitIndexed( pxStreamBuffer->uxNotificationIndex, ( uint32_t ) 0, ( uint32_t ) 0, NULL, xTicksToWait );
@ -1087,7 +1310,7 @@ size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
{ {
/* Checking if there is data and clearing the notification state must be /* Checking if there is data and clearing the notification state must be
* performed atomically. */ * performed atomically. */
taskENTER_CRITICAL(); sbENTER_CRITICAL( pxStreamBuffer );
{ {
xBytesAvailable = prvBytesInBuffer( pxStreamBuffer ); xBytesAvailable = prvBytesInBuffer( pxStreamBuffer );
@ -1112,7 +1335,7 @@ size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
taskEXIT_CRITICAL(); sbEXIT_CRITICAL( pxStreamBuffer );
if( xBytesAvailable <= xBytesToStoreMessageLength ) if( xBytesAvailable <= xBytesToStoreMessageLength )
{ {
@ -1409,7 +1632,7 @@ BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer
/* MISRA Ref 4.7.1 [Return value shall be checked] */ /* MISRA Ref 4.7.1 [Return value shall be checked] */
/* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */ /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
/* coverity[misra_c_2012_directive_4_7_violation] */ /* coverity[misra_c_2012_directive_4_7_violation] */
uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); uxSavedInterruptStatus = sbENTER_CRITICAL_FROM_ISR( pxStreamBuffer );
{ {
if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL )
{ {
@ -1426,7 +1649,7 @@ BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer
xReturn = pdFALSE; xReturn = pdFALSE;
} }
} }
taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); sbEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxStreamBuffer );
traceRETURN_xStreamBufferSendCompletedFromISR( xReturn ); traceRETURN_xStreamBufferSendCompletedFromISR( xReturn );
@ -1448,7 +1671,7 @@ BaseType_t xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuf
/* MISRA Ref 4.7.1 [Return value shall be checked] */ /* MISRA Ref 4.7.1 [Return value shall be checked] */
/* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */ /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
/* coverity[misra_c_2012_directive_4_7_violation] */ /* coverity[misra_c_2012_directive_4_7_violation] */
uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); uxSavedInterruptStatus = sbENTER_CRITICAL_FROM_ISR( pxStreamBuffer );
{ {
if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL )
{ {
@ -1465,7 +1688,7 @@ BaseType_t xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuf
xReturn = pdFALSE; xReturn = pdFALSE;
} }
} }
taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); sbEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxStreamBuffer );
traceRETURN_xStreamBufferReceiveCompletedFromISR( xReturn ); traceRETURN_xStreamBufferReceiveCompletedFromISR( xReturn );

480
tasks.c

File diff suppressed because it is too large Load diff

115
timers.c
View file

@ -79,6 +79,17 @@
#define tmrSTATUS_IS_STATICALLY_ALLOCATED ( 0x02U ) #define tmrSTATUS_IS_STATICALLY_ALLOCATED ( 0x02U )
#define tmrSTATUS_IS_AUTORELOAD ( 0x04U ) #define tmrSTATUS_IS_AUTORELOAD ( 0x04U )
/*
* Macros to mark the start and end of a critical code region.
*/
#if ( portUSING_GRANULAR_LOCKS == 1 )
#define tmrENTER_CRITICAL() vTimerEnterCritical()
#define tmrEXIT_CRITICAL() vTimerExitCritical()
#else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
#define tmrENTER_CRITICAL() taskENTER_CRITICAL()
#define tmrEXIT_CRITICAL() taskEXIT_CRITICAL()
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
/* The definition of the timers themselves. */ /* The definition of the timers themselves. */
typedef struct tmrTimerControl /* The old naming convention is used to prevent breaking kernel aware debuggers. */ typedef struct tmrTimerControl /* The old naming convention is used to prevent breaking kernel aware debuggers. */
{ {
@ -149,6 +160,25 @@
PRIVILEGED_DATA static QueueHandle_t xTimerQueue = NULL; PRIVILEGED_DATA static QueueHandle_t xTimerQueue = NULL;
PRIVILEGED_DATA static TaskHandle_t xTimerTaskHandle = NULL; PRIVILEGED_DATA static TaskHandle_t xTimerTaskHandle = NULL;
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
PRIVILEGED_DATA static portSPINLOCK_TYPE xTaskSpinlock = portINIT_SPINLOCK_STATIC;
PRIVILEGED_DATA static portSPINLOCK_TYPE xISRSpinlock = portINIT_SPINLOCK_STATIC;
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
#if ( portUSING_GRANULAR_LOCKS == 1 )
/*
* Enters a critical section for timers. Disables interrupts and takes
* both task and ISR spinlocks to ensure thread safety.
*/
static void vTimerEnterCritical( void ) PRIVILEGED_FUNCTION;
/*
* Exits a critical section for timers. Releases spinlocks in reverse order
* and conditionally re-enables interrupts and yields if required.
*/
static void vTimerExitCritical( void ) PRIVILEGED_FUNCTION;
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
/* /*
@ -572,7 +602,7 @@
traceENTER_vTimerSetReloadMode( xTimer, xAutoReload ); traceENTER_vTimerSetReloadMode( xTimer, xAutoReload );
configASSERT( xTimer ); configASSERT( xTimer );
taskENTER_CRITICAL(); tmrENTER_CRITICAL();
{ {
if( xAutoReload != pdFALSE ) if( xAutoReload != pdFALSE )
{ {
@ -583,7 +613,7 @@
pxTimer->ucStatus &= ( ( uint8_t ) ~tmrSTATUS_IS_AUTORELOAD ); pxTimer->ucStatus &= ( ( uint8_t ) ~tmrSTATUS_IS_AUTORELOAD );
} }
} }
taskEXIT_CRITICAL(); tmrEXIT_CRITICAL();
traceRETURN_vTimerSetReloadMode(); traceRETURN_vTimerSetReloadMode();
} }
@ -597,7 +627,7 @@
traceENTER_xTimerGetReloadMode( xTimer ); traceENTER_xTimerGetReloadMode( xTimer );
configASSERT( xTimer ); configASSERT( xTimer );
portBASE_TYPE_ENTER_CRITICAL(); tmrENTER_CRITICAL();
{ {
if( ( pxTimer->ucStatus & tmrSTATUS_IS_AUTORELOAD ) == 0U ) if( ( pxTimer->ucStatus & tmrSTATUS_IS_AUTORELOAD ) == 0U )
{ {
@ -610,7 +640,7 @@
xReturn = pdTRUE; xReturn = pdTRUE;
} }
} }
portBASE_TYPE_EXIT_CRITICAL(); tmrEXIT_CRITICAL();
traceRETURN_xTimerGetReloadMode( xReturn ); traceRETURN_xTimerGetReloadMode( xReturn );
@ -1116,7 +1146,7 @@
/* Check that the list from which active timers are referenced, and the /* Check that the list from which active timers are referenced, and the
* queue used to communicate with the timer service, have been * queue used to communicate with the timer service, have been
* initialised. */ * initialised. */
taskENTER_CRITICAL(); tmrENTER_CRITICAL();
{ {
if( xTimerQueue == NULL ) if( xTimerQueue == NULL )
{ {
@ -1158,7 +1188,7 @@
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
taskEXIT_CRITICAL(); tmrEXIT_CRITICAL();
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@ -1172,7 +1202,7 @@
configASSERT( xTimer ); configASSERT( xTimer );
/* Is the timer in the list of active timers? */ /* Is the timer in the list of active timers? */
portBASE_TYPE_ENTER_CRITICAL(); tmrENTER_CRITICAL();
{ {
if( ( pxTimer->ucStatus & tmrSTATUS_IS_ACTIVE ) == 0U ) if( ( pxTimer->ucStatus & tmrSTATUS_IS_ACTIVE ) == 0U )
{ {
@ -1183,7 +1213,7 @@
xReturn = pdTRUE; xReturn = pdTRUE;
} }
} }
portBASE_TYPE_EXIT_CRITICAL(); tmrEXIT_CRITICAL();
traceRETURN_xTimerIsTimerActive( xReturn ); traceRETURN_xTimerIsTimerActive( xReturn );
@ -1200,11 +1230,11 @@
configASSERT( xTimer ); configASSERT( xTimer );
taskENTER_CRITICAL(); tmrENTER_CRITICAL();
{ {
pvReturn = pxTimer->pvTimerID; pvReturn = pxTimer->pvTimerID;
} }
taskEXIT_CRITICAL(); tmrEXIT_CRITICAL();
traceRETURN_pvTimerGetTimerID( pvReturn ); traceRETURN_pvTimerGetTimerID( pvReturn );
@ -1221,11 +1251,11 @@
configASSERT( xTimer ); configASSERT( xTimer );
taskENTER_CRITICAL(); tmrENTER_CRITICAL();
{ {
pxTimer->pvTimerID = pvNewID; pxTimer->pvTimerID = pvNewID;
} }
taskEXIT_CRITICAL(); tmrEXIT_CRITICAL();
traceRETURN_vTimerSetTimerID(); traceRETURN_vTimerSetTimerID();
} }
@ -1337,6 +1367,67 @@
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
#if ( portUSING_GRANULAR_LOCKS == 1 )
static void vTimerEnterCritical( void )
{
portDISABLE_INTERRUPTS();
{
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
/* Task spinlock is always taken first */
portGET_SPINLOCK( xCoreID, &xTaskSpinlock );
/* Take the ISR spinlock next */
portGET_SPINLOCK( xCoreID, &xISRSpinlock );
/* Increment the critical nesting count */
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
}
}
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
/*-----------------------------------------------------------*/
#if ( portUSING_GRANULAR_LOCKS == 1 )
static void vTimerExitCritical( void )
{
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U );
/* Get the xYieldPending status inside the critical section. */
BaseType_t xYieldCurrentTask = xTaskUnlockCanYield();
/* Decrement the critical nesting count */
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID );
/* Release the ISR spinlock */
portRELEASE_SPINLOCK( xCoreID, &xISRSpinlock );
/* Release the task spinlock */
portRELEASE_SPINLOCK( xCoreID, &xTaskSpinlock );
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 )
{
portENABLE_INTERRUPTS();
if( xYieldCurrentTask != pdFALSE )
{
portYIELD();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
/*-----------------------------------------------------------*/
/* This entire source file will be skipped if the application is not configured /* This entire source file will be skipped if the application is not configured
* to include software timer functionality. If you want to include software timer * to include software timer functionality. If you want to include software timer
* functionality then ensure configUSE_TIMERS is set to 1 in FreeRTOSConfig.h. */ * functionality then ensure configUSE_TIMERS is set to 1 in FreeRTOSConfig.h. */