mirror of
https://github.com/FreeRTOS/FreeRTOS-Kernel.git
synced 2025-04-19 21:11:57 -04:00
change(freertos/smp): Update event_groups.c locking
Updated event_groups.c to use granular locking - Added xTaskSpinlock and xISRSpinlock - Replaced critical section macros with data group critical section macros such as taskENTER/EXIT_CRITICAL/_FROM_ISR() with event_groupsENTER/EXIT_CRITICAL/_FROM_ISR(). - Added vEventGroupsEnterCritical/FromISR() and vEventGroupsExitCriti/FromISR() functions that map to the data group critical section macros. - Added prvLockEventGroupForTasks() and prvUnlockEventGroupForTasks() to suspend the event group when executing non-deterministic code. - xEventGroupSetBits() and vEventGroupDelete() accesses the kernel data group directly. Thus, added vTaskSuspendAll()/xTaskResumeAll() to these fucntions. Co-authored-by: Sudeep Mohanty <sudeep.mohanty@espressif.com>
This commit is contained in:
parent
f7d7a117f1
commit
e76ae93c8a
279
event_groups.c
279
event_groups.c
|
@ -63,10 +63,77 @@
|
||||||
#if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
|
#if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
|
||||||
uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */
|
uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
|
||||||
|
portSPINLOCK_TYPE xTaskSpinlock;
|
||||||
|
portSPINLOCK_TYPE xISRSpinlock;
|
||||||
|
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
|
||||||
} EventGroup_t;
|
} EventGroup_t;
|
||||||
|
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Macros to mark the start and end of a critical code region.
|
||||||
|
*/
|
||||||
|
#if ( portUSING_GRANULAR_LOCKS == 1 )
|
||||||
|
#define event_groupsENTER_CRITICAL( pxEventBits ) vEventGroupsEnterCritical( pxEventBits )
|
||||||
|
#define event_groupsENTER_CRITICAL_FROM_ISR( pxEventBits ) uxEventGroupsEnterCriticalFromISR( pxEventBits )
|
||||||
|
#define event_groupsEXIT_CRITICAL( pxEventBits ) vEventGroupsExitCritical( pxEventBits )
|
||||||
|
#define event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) vEventGroupsExitCriticalFromISR( uxSavedInterruptStatus, pxEventBits )
|
||||||
|
#else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
|
||||||
|
#define event_groupsENTER_CRITICAL( pxEventBits ) taskENTER_CRITICAL();
|
||||||
|
#define event_groupsENTER_CRITICAL_FROM_ISR( pxEventBits ) taskENTER_CRITICAL_FROM_ISR();
|
||||||
|
#define event_groupsEXIT_CRITICAL( pxEventBits ) taskEXIT_CRITICAL();
|
||||||
|
#define event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
|
||||||
|
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
|
||||||
|
|
||||||
|
|
||||||
|
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Enters a critical section for an event group. Disables interrupts and takes
|
||||||
|
* both task and ISR spinlocks to ensure thread safety.
|
||||||
|
*/
|
||||||
|
static void vEventGroupsEnterCritical( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Enters a critical section for an event group from an ISR context. Takes the ISR
|
||||||
|
* spinlock and returns the previous interrupt state.
|
||||||
|
*/
|
||||||
|
static UBaseType_t uxEventGroupsEnterCriticalFromISR( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Exits a critical section for an event group. Releases spinlocks in reverse order
|
||||||
|
* and conditionally re-enables interrupts and yields if required.
|
||||||
|
*/
|
||||||
|
static void vEventGroupsExitCritical( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Exits a critical section for an event group from an ISR context. Releases the ISR
|
||||||
|
* spinlock and conditionally restores the previous interrupt state.
|
||||||
|
*/
|
||||||
|
static void vEventGroupsExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus,
|
||||||
|
EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION;
|
||||||
|
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Locks an event group for tasks. Prevents other tasks from accessing the event group but allows
|
||||||
|
* ISRs to pend access to the event group. Caller cannot be preempted by other tasks
|
||||||
|
* after locking the event group, thus allowing the caller to execute non-deterministic
|
||||||
|
* operations.
|
||||||
|
*/
|
||||||
|
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
|
||||||
|
static void prvLockEventGroupForTasks( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION;
|
||||||
|
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Unlocks an event group for tasks. Handles all pended access from ISRs, then reenables
|
||||||
|
* preemption for the caller.
|
||||||
|
*/
|
||||||
|
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
|
||||||
|
static BaseType_t prvUnlockEventGroupForTasks( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION;
|
||||||
|
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Test the bits set in uxCurrentEventBits to see if the wait condition is met.
|
* Test the bits set in uxCurrentEventBits to see if the wait condition is met.
|
||||||
* The wait condition is defined by xWaitForAllBits. If xWaitForAllBits is
|
* The wait condition is defined by xWaitForAllBits. If xWaitForAllBits is
|
||||||
|
@ -79,6 +146,25 @@
|
||||||
const EventBits_t uxBitsToWaitFor,
|
const EventBits_t uxBitsToWaitFor,
|
||||||
const BaseType_t xWaitForAllBits ) PRIVILEGED_FUNCTION;
|
const BaseType_t xWaitForAllBits ) PRIVILEGED_FUNCTION;
|
||||||
|
|
||||||
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Macros used to lock and unlock an event group. When a task locks an,
|
||||||
|
* event group, the task will have thread safe non-deterministic access to
|
||||||
|
* the event group.
|
||||||
|
* - Concurrent access from other tasks will be blocked by the xTaskSpinlock
|
||||||
|
* - Concurrent access from ISRs will be pended
|
||||||
|
*
|
||||||
|
* When the task unlocks the event group, all pended access attempts are handled.
|
||||||
|
*/
|
||||||
|
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
|
||||||
|
#define event_groupsLOCK( pxEventBits ) prvLockEventGroupForTasks( pxEventBits )
|
||||||
|
#define event_groupsUNLOCK( pxEventBits ) prvUnlockEventGroupForTasks( pxEventBits );
|
||||||
|
#else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
|
||||||
|
#define event_groupsLOCK( pxEventBits ) vTaskSuspendAll()
|
||||||
|
#define event_groupsUNLOCK( pxEventBits ) xTaskResumeAll()
|
||||||
|
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
|
||||||
|
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
|
#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
|
||||||
|
@ -122,6 +208,13 @@
|
||||||
}
|
}
|
||||||
#endif /* configSUPPORT_DYNAMIC_ALLOCATION */
|
#endif /* configSUPPORT_DYNAMIC_ALLOCATION */
|
||||||
|
|
||||||
|
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
|
||||||
|
{
|
||||||
|
portINIT_SPINLOCK( &( pxEventBits->xTaskSpinlock ) );
|
||||||
|
portINIT_SPINLOCK( &( pxEventBits->xISRSpinlock ) );
|
||||||
|
}
|
||||||
|
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
|
||||||
|
|
||||||
traceEVENT_GROUP_CREATE( pxEventBits );
|
traceEVENT_GROUP_CREATE( pxEventBits );
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -167,6 +260,13 @@
|
||||||
}
|
}
|
||||||
#endif /* configSUPPORT_STATIC_ALLOCATION */
|
#endif /* configSUPPORT_STATIC_ALLOCATION */
|
||||||
|
|
||||||
|
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
|
||||||
|
{
|
||||||
|
portINIT_SPINLOCK( &( pxEventBits->xTaskSpinlock ) );
|
||||||
|
portINIT_SPINLOCK( &( pxEventBits->xISRSpinlock ) );
|
||||||
|
}
|
||||||
|
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
|
||||||
|
|
||||||
traceEVENT_GROUP_CREATE( pxEventBits );
|
traceEVENT_GROUP_CREATE( pxEventBits );
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -202,7 +302,7 @@
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
vTaskSuspendAll();
|
event_groupsLOCK( pxEventBits );
|
||||||
{
|
{
|
||||||
uxOriginalBitValue = pxEventBits->uxEventBits;
|
uxOriginalBitValue = pxEventBits->uxEventBits;
|
||||||
|
|
||||||
|
@ -245,7 +345,7 @@
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
xAlreadyYielded = xTaskResumeAll();
|
xAlreadyYielded = event_groupsUNLOCK( pxEventBits );
|
||||||
|
|
||||||
if( xTicksToWait != ( TickType_t ) 0 )
|
if( xTicksToWait != ( TickType_t ) 0 )
|
||||||
{
|
{
|
||||||
|
@ -267,7 +367,7 @@
|
||||||
if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
|
if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
|
||||||
{
|
{
|
||||||
/* The task timed out, just return the current event bit value. */
|
/* The task timed out, just return the current event bit value. */
|
||||||
taskENTER_CRITICAL();
|
event_groupsENTER_CRITICAL( pxEventBits );
|
||||||
{
|
{
|
||||||
uxReturn = pxEventBits->uxEventBits;
|
uxReturn = pxEventBits->uxEventBits;
|
||||||
|
|
||||||
|
@ -284,7 +384,7 @@
|
||||||
mtCOVERAGE_TEST_MARKER();
|
mtCOVERAGE_TEST_MARKER();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
taskEXIT_CRITICAL();
|
event_groupsEXIT_CRITICAL( pxEventBits );
|
||||||
|
|
||||||
xTimeoutOccurred = pdTRUE;
|
xTimeoutOccurred = pdTRUE;
|
||||||
}
|
}
|
||||||
|
@ -333,7 +433,7 @@
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
vTaskSuspendAll();
|
event_groupsLOCK( pxEventBits );
|
||||||
{
|
{
|
||||||
const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits;
|
const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits;
|
||||||
|
|
||||||
|
@ -401,7 +501,7 @@
|
||||||
traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor );
|
traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
xAlreadyYielded = xTaskResumeAll();
|
xAlreadyYielded = event_groupsUNLOCK( pxEventBits );
|
||||||
|
|
||||||
if( xTicksToWait != ( TickType_t ) 0 )
|
if( xTicksToWait != ( TickType_t ) 0 )
|
||||||
{
|
{
|
||||||
|
@ -422,7 +522,7 @@
|
||||||
|
|
||||||
if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
|
if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
|
||||||
{
|
{
|
||||||
taskENTER_CRITICAL();
|
event_groupsENTER_CRITICAL( pxEventBits );
|
||||||
{
|
{
|
||||||
/* The task timed out, just return the current event bit value. */
|
/* The task timed out, just return the current event bit value. */
|
||||||
uxReturn = pxEventBits->uxEventBits;
|
uxReturn = pxEventBits->uxEventBits;
|
||||||
|
@ -447,7 +547,7 @@
|
||||||
|
|
||||||
xTimeoutOccurred = pdTRUE;
|
xTimeoutOccurred = pdTRUE;
|
||||||
}
|
}
|
||||||
taskEXIT_CRITICAL();
|
event_groupsEXIT_CRITICAL( pxEventBits );
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -482,7 +582,7 @@
|
||||||
configASSERT( xEventGroup );
|
configASSERT( xEventGroup );
|
||||||
configASSERT( ( uxBitsToClear & eventEVENT_BITS_CONTROL_BYTES ) == 0 );
|
configASSERT( ( uxBitsToClear & eventEVENT_BITS_CONTROL_BYTES ) == 0 );
|
||||||
|
|
||||||
taskENTER_CRITICAL();
|
event_groupsENTER_CRITICAL( pxEventBits );
|
||||||
{
|
{
|
||||||
traceEVENT_GROUP_CLEAR_BITS( xEventGroup, uxBitsToClear );
|
traceEVENT_GROUP_CLEAR_BITS( xEventGroup, uxBitsToClear );
|
||||||
|
|
||||||
|
@ -493,7 +593,7 @@
|
||||||
/* Clear the bits. */
|
/* Clear the bits. */
|
||||||
pxEventBits->uxEventBits &= ~uxBitsToClear;
|
pxEventBits->uxEventBits &= ~uxBitsToClear;
|
||||||
}
|
}
|
||||||
taskEXIT_CRITICAL();
|
event_groupsEXIT_CRITICAL( pxEventBits );
|
||||||
|
|
||||||
traceRETURN_xEventGroupClearBits( uxReturn );
|
traceRETURN_xEventGroupClearBits( uxReturn );
|
||||||
|
|
||||||
|
@ -524,7 +624,7 @@
|
||||||
EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup )
|
EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup )
|
||||||
{
|
{
|
||||||
UBaseType_t uxSavedInterruptStatus;
|
UBaseType_t uxSavedInterruptStatus;
|
||||||
EventGroup_t const * const pxEventBits = xEventGroup;
|
EventGroup_t * const pxEventBits = xEventGroup;
|
||||||
EventBits_t uxReturn;
|
EventBits_t uxReturn;
|
||||||
|
|
||||||
traceENTER_xEventGroupGetBitsFromISR( xEventGroup );
|
traceENTER_xEventGroupGetBitsFromISR( xEventGroup );
|
||||||
|
@ -532,11 +632,11 @@
|
||||||
/* MISRA Ref 4.7.1 [Return value shall be checked] */
|
/* MISRA Ref 4.7.1 [Return value shall be checked] */
|
||||||
/* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
|
/* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
|
||||||
/* coverity[misra_c_2012_directive_4_7_violation] */
|
/* coverity[misra_c_2012_directive_4_7_violation] */
|
||||||
uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
|
uxSavedInterruptStatus = event_groupsENTER_CRITICAL_FROM_ISR( pxEventBits );
|
||||||
{
|
{
|
||||||
uxReturn = pxEventBits->uxEventBits;
|
uxReturn = pxEventBits->uxEventBits;
|
||||||
}
|
}
|
||||||
taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
|
event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits );
|
||||||
|
|
||||||
traceRETURN_xEventGroupGetBitsFromISR( uxReturn );
|
traceRETURN_xEventGroupGetBitsFromISR( uxReturn );
|
||||||
|
|
||||||
|
@ -564,10 +664,17 @@
|
||||||
|
|
||||||
pxList = &( pxEventBits->xTasksWaitingForBits );
|
pxList = &( pxEventBits->xTasksWaitingForBits );
|
||||||
pxListEnd = listGET_END_MARKER( pxList );
|
pxListEnd = listGET_END_MARKER( pxList );
|
||||||
vTaskSuspendAll();
|
event_groupsLOCK( pxEventBits );
|
||||||
{
|
{
|
||||||
traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet );
|
traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet );
|
||||||
|
|
||||||
|
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
|
||||||
|
|
||||||
|
/* We are about to access the kernel data group non-deterministically,
|
||||||
|
* thus we suspend the kernel data group.*/
|
||||||
|
vTaskSuspendAll();
|
||||||
|
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
|
||||||
|
|
||||||
pxListItem = listGET_HEAD_ENTRY( pxList );
|
pxListItem = listGET_HEAD_ENTRY( pxList );
|
||||||
|
|
||||||
/* Set the bits. */
|
/* Set the bits. */
|
||||||
|
@ -638,8 +745,12 @@
|
||||||
|
|
||||||
/* Snapshot resulting bits. */
|
/* Snapshot resulting bits. */
|
||||||
uxReturnBits = pxEventBits->uxEventBits;
|
uxReturnBits = pxEventBits->uxEventBits;
|
||||||
}
|
|
||||||
|
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
|
||||||
( void ) xTaskResumeAll();
|
( void ) xTaskResumeAll();
|
||||||
|
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
|
||||||
|
}
|
||||||
|
( void ) event_groupsUNLOCK( pxEventBits );
|
||||||
|
|
||||||
traceRETURN_xEventGroupSetBits( uxReturnBits );
|
traceRETURN_xEventGroupSetBits( uxReturnBits );
|
||||||
|
|
||||||
|
@ -658,10 +769,17 @@
|
||||||
|
|
||||||
pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits );
|
pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits );
|
||||||
|
|
||||||
vTaskSuspendAll();
|
event_groupsLOCK( pxEventBits );
|
||||||
{
|
{
|
||||||
traceEVENT_GROUP_DELETE( xEventGroup );
|
traceEVENT_GROUP_DELETE( xEventGroup );
|
||||||
|
|
||||||
|
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
|
||||||
|
|
||||||
|
/* We are about to access the kernel data group non-deterministically,
|
||||||
|
* thus we suspend the kernel data group.*/
|
||||||
|
vTaskSuspendAll();
|
||||||
|
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
|
||||||
|
|
||||||
while( listCURRENT_LIST_LENGTH( pxTasksWaitingForBits ) > ( UBaseType_t ) 0 )
|
while( listCURRENT_LIST_LENGTH( pxTasksWaitingForBits ) > ( UBaseType_t ) 0 )
|
||||||
{
|
{
|
||||||
/* Unblock the task, returning 0 as the event list is being deleted
|
/* Unblock the task, returning 0 as the event list is being deleted
|
||||||
|
@ -669,8 +787,12 @@
|
||||||
configASSERT( pxTasksWaitingForBits->xListEnd.pxNext != ( const ListItem_t * ) &( pxTasksWaitingForBits->xListEnd ) );
|
configASSERT( pxTasksWaitingForBits->xListEnd.pxNext != ( const ListItem_t * ) &( pxTasksWaitingForBits->xListEnd ) );
|
||||||
vTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET );
|
vTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET );
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
|
||||||
( void ) xTaskResumeAll();
|
( void ) xTaskResumeAll();
|
||||||
|
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
|
||||||
|
}
|
||||||
|
( void ) event_groupsUNLOCK( pxEventBits );
|
||||||
|
|
||||||
#if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
|
#if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
|
||||||
{
|
{
|
||||||
|
@ -774,6 +896,129 @@
|
||||||
traceRETURN_vEventGroupClearBitsCallback();
|
traceRETURN_vEventGroupClearBitsCallback();
|
||||||
}
|
}
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
|
||||||
|
static void vEventGroupsEnterCritical( EventGroup_t * pxEventBits )
|
||||||
|
{
|
||||||
|
portDISABLE_INTERRUPTS();
|
||||||
|
{
|
||||||
|
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
|
||||||
|
|
||||||
|
/* Task spinlock is always taken first */
|
||||||
|
portGET_SPINLOCK( xCoreID, &( pxEventBits->xTaskSpinlock ) );
|
||||||
|
|
||||||
|
/* Take the ISR spinlock next */
|
||||||
|
portGET_SPINLOCK( xCoreID, &( pxEventBits->xISRSpinlock ) );
|
||||||
|
|
||||||
|
/* Increment the critical nesting count */
|
||||||
|
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
|
||||||
|
/*-----------------------------------------------------------*/
|
||||||
|
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
|
||||||
|
static UBaseType_t uxEventGroupsEnterCriticalFromISR( EventGroup_t * pxEventBits )
|
||||||
|
{
|
||||||
|
UBaseType_t uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||||
|
|
||||||
|
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
|
||||||
|
|
||||||
|
/* Take the ISR spinlock */
|
||||||
|
portGET_SPINLOCK( xCoreID, &( pxEventBits->xISRSpinlock ) );
|
||||||
|
|
||||||
|
/* Increment the critical nesting count */
|
||||||
|
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
|
||||||
|
|
||||||
|
return uxSavedInterruptStatus;
|
||||||
|
}
|
||||||
|
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
|
||||||
|
/*-----------------------------------------------------------*/
|
||||||
|
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
|
||||||
|
static void vEventGroupsExitCritical( EventGroup_t * pxEventBits )
|
||||||
|
{
|
||||||
|
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
|
||||||
|
|
||||||
|
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U );
|
||||||
|
|
||||||
|
/* Get the xYieldPending stats inside the critical section. */
|
||||||
|
BaseType_t xYieldCurrentTask = xTaskUnlockCanYield();
|
||||||
|
|
||||||
|
/* Decrement the critical nesting count */
|
||||||
|
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID );
|
||||||
|
|
||||||
|
/* Release the ISR spinlock */
|
||||||
|
portRELEASE_SPINLOCK( xCoreID, &( pxEventBits->xISRSpinlock ) );
|
||||||
|
|
||||||
|
/* Release the task spinlock */
|
||||||
|
portRELEASE_SPINLOCK( xCoreID, &( pxEventBits->xTaskSpinlock ) );
|
||||||
|
|
||||||
|
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 )
|
||||||
|
{
|
||||||
|
portENABLE_INTERRUPTS();
|
||||||
|
|
||||||
|
if( xYieldCurrentTask != pdFALSE )
|
||||||
|
{
|
||||||
|
portYIELD();
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
mtCOVERAGE_TEST_MARKER();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
mtCOVERAGE_TEST_MARKER();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
|
||||||
|
/*-----------------------------------------------------------*/
|
||||||
|
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
|
||||||
|
static void vEventGroupsExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus,
|
||||||
|
EventGroup_t * pxEventBits )
|
||||||
|
{
|
||||||
|
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
|
||||||
|
|
||||||
|
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U );
|
||||||
|
|
||||||
|
/* Decrement the critical nesting count */
|
||||||
|
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID );
|
||||||
|
|
||||||
|
/* Release the ISR spinlock */
|
||||||
|
portRELEASE_SPINLOCK( xCoreID, &( pxEventBits->xISRSpinlock ) );
|
||||||
|
|
||||||
|
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 )
|
||||||
|
{
|
||||||
|
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
|
||||||
|
/*-----------------------------------------------------------*/
|
||||||
|
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
|
||||||
|
static void prvLockEventGroupForTasks( EventGroup_t * pxEventBits )
|
||||||
|
{
|
||||||
|
/* Disable preemption so that the current task cannot be preempted by another task */
|
||||||
|
vTaskPreemptionDisable( NULL );
|
||||||
|
|
||||||
|
/* Keep holding xTaskSpinlock to prevent tasks on other cores from accessing
|
||||||
|
* the event group while it is suspended. */
|
||||||
|
portGET_SPINLOCK( portGET_CORE_ID(), &( pxEventBits->xTaskSpinlock ) );
|
||||||
|
}
|
||||||
|
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
|
||||||
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
|
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
|
||||||
|
static BaseType_t prvUnlockEventGroupForTasks( EventGroup_t * pxEventBits )
|
||||||
|
{
|
||||||
|
/* Release the previously held task spinlock */
|
||||||
|
portRELEASE_SPINLOCK( portGET_CORE_ID(), &( pxEventBits->xTaskSpinlock ) );
|
||||||
|
|
||||||
|
/* Re-enable preemption */
|
||||||
|
vTaskPreemptionEnable( NULL );
|
||||||
|
|
||||||
|
/* We assume that the task was preempted when preemption was enabled */
|
||||||
|
return pdTRUE;
|
||||||
|
}
|
||||||
|
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
|
||||||
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits,
|
static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits,
|
||||||
const EventBits_t uxBitsToWaitFor,
|
const EventBits_t uxBitsToWaitFor,
|
||||||
|
|
|
@ -3351,6 +3351,10 @@ typedef struct xSTATIC_EVENT_GROUP
|
||||||
#if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
|
#if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
|
||||||
uint8_t ucDummy4;
|
uint8_t ucDummy4;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
|
||||||
|
portSPINLOCK_TYPE xDummySpinlock[ 2 ];
|
||||||
|
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
|
||||||
} StaticEventGroup_t;
|
} StaticEventGroup_t;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in a new issue