Compare commits

..

7 commits

Author SHA1 Message Date
Darian Leung 8eb906d08d feat(freertos/smp): Add Granular Locking V4 proposal documents
Co-authored-by: Sudeep Mohanty <sudeep.mohanty@espressif.com>
2025-05-22 15:27:06 +02:00
Darian Leung 6faa6e2463 change(freertos/smp): Update timers.c locking
Updated timers.c to use granular locking

- Added xTaskSpinlock and xISRSpinlock
- Replaced critical section macros with data group critical section macros
such as taskENTER/EXIT_CRITICAL() with tmrENTER/EXIT_CRITICAL().
- Added vTimerEnterCritical() and vTimerExitCritical() to map to the
  data group critical section macros.

Co-authored-by: Sudeep Mohanty <sudeep.mohanty@espressif.com>
2025-05-22 15:26:56 +02:00
Darian Leung 2f2b7e500a change(freertos/smp): Update stream_buffer.c locking
Updated stream_buffer.c to use granular locking

- Added xTaskSpinlock and xISRSpinlock
- Replaced critical section macros with data group critical section macros
such as taskENTER/EXIT_CRITICAL/_FROM_ISR() with sbENTER/EXIT_CRITICAL_FROM_ISR().
- Added vStreambuffersEnterCritical/FromISR() and
  vStreambuffersExitCritical/FromISR() to map to the data group critical
section macros.
- Added prvLockStreamBufferForTasks() and prvUnlockStreamBufferForTasks() to suspend the stream
buffer when executing non-deterministic code.

Co-authored-by: Sudeep Mohanty <sudeep.mohanty@espressif.com>
2025-05-22 15:26:48 +02:00
Darian Leung 2f58dd59c3 change(freertos/smp): Update event_groups.c locking
Updated event_groups.c to use granular locking

- Added xTaskSpinlock and xISRSpinlock
- Replaced critical section macros with data group critical section macros
such as taskENTER/EXIT_CRITICAL/_FROM_ISR() with event_groupsENTER/EXIT_CRITICAL/_FROM_ISR().
- Added vEventGroupsEnterCritical/FromISR() and
  vEventGroupsExitCriti/FromISR() functions that map to the data group
critical section macros.
- Added prvLockEventGroupForTasks() and prvUnlockEventGroupForTasks() to suspend the event
group when executing non-deterministic code.
- xEventGroupSetBits() and vEventGroupDelete() accesses the kernel data group
directly. Thus, added vTaskSuspendAll()/xTaskResumeAll() to these fucntions.

Co-authored-by: Sudeep Mohanty <sudeep.mohanty@espressif.com>
2025-05-22 15:26:42 +02:00
Darian Leung 3176808c81 change(freertos/smp): Update queue.c locking
Updated queue.c to use granular locking

- Added xTaskSpinlock and xISRSpinlock
- Replaced  critical section macros with data group critical section macros
such as taskENTER/EXIT_CRITICAL/_FROM_ISR() with queueENTER/EXIT_CRITICAL_FROM_ISR().
- Added vQueueEnterCritical/FromISR() and vQueueExitCritical/FromISR()
  which map to the data group critical section macros.
- Added prvLockQueueForTasks() and prvUnlockQueueForTasks() as the granular locking equivalents
to prvLockQueue() and prvUnlockQueue() respectively

Co-authored-by: Sudeep Mohanty <sudeep.mohanty@espressif.com>
2025-05-22 15:26:34 +02:00
Darian Leung fd5037e7cc change(freertos/smp): Update tasks.c locking
Updated critical section macros with granular locks.

Some tasks.c API relied on their callers to enter critical sections. This
assumption no longer works under granular locking. Critical sections added to
the following functions:

- `vTaskInternalSetTimeOutState()`
- `xTaskIncrementTick()`
- `vTaskSwitchContext()`
- `xTaskRemoveFromEventList()`
- `vTaskInternalSetTimeOutState()`
- `eTaskConfirmSleepModeStatus()`
- `xTaskPriorityDisinherit()`
- `pvTaskIncrementMutexHeldCount()`

Added missing suspensions to the following functions:

- `vTaskPlaceOnEventList()`
- `vTaskPlaceOnUnorderedEventList()`
- `vTaskPlaceOnEventListRestricted()`

Fixed the locking in vTaskSwitchContext()

vTaskSwitchContext() must aquire both kernel locks, viz., task lock and
ISR lock. This is because, vTaskSwitchContext() can be called from
either task context or ISR context. Also, vTaskSwitchContext() must not
alter the interrupt state prematurely.

Co-authored-by: Sudeep Mohanty <sudeep.mohanty@espressif.com>
2025-05-22 15:26:26 +02:00
Darian Leung 7502d940d9 feat(granular_locks): Add granular locking functions
- Updated prvCheckForRunStateChange() for granular locks
- Updated vTaskSuspendAll() and xTaskResumeAll()
    - Now holds the xTaskSpinlock during kernel suspension
    - Increments/decrements xPreemptionDisable. Only yields when 0, thus allowing
    for nested suspensions across different data groups

Co-authored-by: Sudeep Mohanty <sudeep.mohanty@espressif.com>
2025-05-22 15:26:17 +02:00
7 changed files with 278 additions and 493 deletions

View file

@ -76,10 +76,10 @@
* Macros to mark the start and end of a critical code region. * Macros to mark the start and end of a critical code region.
*/ */
#if ( portUSING_GRANULAR_LOCKS == 1 ) #if ( portUSING_GRANULAR_LOCKS == 1 )
#define event_groupsENTER_CRITICAL( pxEventBits ) vEventGroupsEnterCritical( pxEventBits ) #define event_groupsENTER_CRITICAL( pxEventBits ) taskDATA_GROUP_ENTER_CRITICAL( pxEventBits )
#define event_groupsENTER_CRITICAL_FROM_ISR( pxEventBits ) uxEventGroupsEnterCriticalFromISR( pxEventBits ) #define event_groupsENTER_CRITICAL_FROM_ISR( pxEventBits ) taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR( pxEventBits )
#define event_groupsEXIT_CRITICAL( pxEventBits ) vEventGroupsExitCritical( pxEventBits ) #define event_groupsEXIT_CRITICAL( pxEventBits ) taskDATA_GROUP_EXIT_CRITICAL( pxEventBits )
#define event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) vEventGroupsExitCriticalFromISR( uxSavedInterruptStatus, pxEventBits ) #define event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) taskDATA_GROUP_EXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits )
#else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ #else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
#define event_groupsENTER_CRITICAL( pxEventBits ) taskENTER_CRITICAL(); #define event_groupsENTER_CRITICAL( pxEventBits ) taskENTER_CRITICAL();
#define event_groupsENTER_CRITICAL_FROM_ISR( pxEventBits ) taskENTER_CRITICAL_FROM_ISR(); #define event_groupsENTER_CRITICAL_FROM_ISR( pxEventBits ) taskENTER_CRITICAL_FROM_ISR();
@ -87,35 +87,6 @@
#define event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); #define event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
/*
* Enters a critical section for an event group. Disables interrupts and takes
* both task and ISR spinlocks to ensure thread safety.
*/
static void vEventGroupsEnterCritical( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION;
/*
* Enters a critical section for an event group from an ISR context. Takes the ISR
* spinlock and returns the previous interrupt state.
*/
static UBaseType_t uxEventGroupsEnterCriticalFromISR( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION;
/*
* Exits a critical section for an event group. Releases spinlocks in reverse order
* and conditionally re-enables interrupts and yields if required.
*/
static void vEventGroupsExitCritical( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION;
/*
* Exits a critical section for an event group from an ISR context. Releases the ISR
* spinlock and conditionally restores the previous interrupt state.
*/
static void vEventGroupsExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus,
EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION;
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
/* /*
* Locks an event group for tasks. Prevents other tasks from accessing the event group but allows * Locks an event group for tasks. Prevents other tasks from accessing the event group but allows
* ISRs to pend access to the event group. Caller cannot be preempted by other tasks * ISRs to pend access to the event group. Caller cannot be preempted by other tasks
@ -895,102 +866,6 @@
traceRETURN_vEventGroupClearBitsCallback(); traceRETURN_vEventGroupClearBitsCallback();
} }
/*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
static void vEventGroupsEnterCritical( EventGroup_t * pxEventBits )
{
portDISABLE_INTERRUPTS();
{
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
/* Task spinlock is always taken first */
portGET_SPINLOCK( xCoreID, &( pxEventBits->xTaskSpinlock ) );
/* Take the ISR spinlock next */
portGET_SPINLOCK( xCoreID, &( pxEventBits->xISRSpinlock ) );
/* Increment the critical nesting count */
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
}
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
/*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
static UBaseType_t uxEventGroupsEnterCriticalFromISR( EventGroup_t * pxEventBits )
{
UBaseType_t uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
/* Take the ISR spinlock */
portGET_SPINLOCK( xCoreID, &( pxEventBits->xISRSpinlock ) );
/* Increment the critical nesting count */
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
return uxSavedInterruptStatus;
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
/*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
static void vEventGroupsExitCritical( EventGroup_t * pxEventBits )
{
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U );
/* Get the xYieldPending stats inside the critical section. */
BaseType_t xYieldCurrentTask = xTaskUnlockCanYield();
/* Decrement the critical nesting count */
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID );
/* Release the ISR spinlock */
portRELEASE_SPINLOCK( xCoreID, &( pxEventBits->xISRSpinlock ) );
/* Release the task spinlock */
portRELEASE_SPINLOCK( xCoreID, &( pxEventBits->xTaskSpinlock ) );
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 )
{
portENABLE_INTERRUPTS();
if( xYieldCurrentTask != pdFALSE )
{
portYIELD();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
/*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
static void vEventGroupsExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus,
EventGroup_t * pxEventBits )
{
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U );
/* Decrement the critical nesting count */
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID );
/* Release the ISR spinlock */
portRELEASE_SPINLOCK( xCoreID, &( pxEventBits->xISRSpinlock ) );
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 )
{
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
}
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
static void prvLockEventGroupForTasks( EventGroup_t * pxEventBits ) static void prvLockEventGroupForTasks( EventGroup_t * pxEventBits )

View file

@ -3238,6 +3238,9 @@ typedef struct xSTATIC_TCB
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
UBaseType_t xDummy25; UBaseType_t xDummy25;
#endif #endif
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
BaseType_t xDummy26;
#endif
#if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) )
void * pxDummy8; void * pxDummy8;
#endif #endif

View file

@ -283,6 +283,114 @@ typedef enum
/* Checks if core ID is valid. */ /* Checks if core ID is valid. */
#define taskVALID_CORE_ID( xCoreID ) ( ( ( ( ( BaseType_t ) 0 <= ( xCoreID ) ) && ( ( xCoreID ) < ( BaseType_t ) configNUMBER_OF_CORES ) ) ) ? ( pdTRUE ) : ( pdFALSE ) ) #define taskVALID_CORE_ID( xCoreID ) ( ( ( ( ( BaseType_t ) 0 <= ( xCoreID ) ) && ( ( xCoreID ) < ( BaseType_t ) configNUMBER_OF_CORES ) ) ) ? ( pdTRUE ) : ( pdFALSE ) )
/**
* task. h
*
* Macro to enter a data group critical section.
*
* \defgroup taskDATA_GROUP_ENTER_CRITICAL taskDATA_GROUP_ENTER_CRITICAL
* \ingroup GranularLocks
*/
#if ( portUSING_GRANULAR_LOCKS == 1 )
#define taskDATA_GROUP_ENTER_CRITICAL( pxDataGroup ) \
do { \
/* Disable preemption to avoid task state changes during the critical section. */ \
vTaskPreemptionDisable( NULL ); \
{ \
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U ) { \
/* Task spinlock is always taken first */ \
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( ( pxDataGroup )->xTaskSpinlock ) ); \
/* Disable interrupts */ \
portDISABLE_INTERRUPTS(); \
/* Take the ISR spinlock next */ \
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( ( pxDataGroup )->xISRSpinlock ) ); \
} \
else \
{ \
mtCOVERAGE_TEST_MARKER(); \
} \
/* Increment the critical nesting count */ \
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \
} \
} while( 0 )
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
/**
* task. h
*
* Macro to enter a data group critical section from an interrupt.
*
* \defgroup taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR
* \ingroup GranularLocks
*/
#if ( portUSING_GRANULAR_LOCKS == 1 )
#define taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR( pxDataGroup ) \
( { \
UBaseType_t uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); \
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \
/* Take the ISR spinlock */ \
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( ( pxDataGroup )->xISRSpinlock ) ); \
/* Increment the critical nesting count */ \
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \
uxSavedInterruptStatus; \
} )
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
/**
* task. h
*
* Macro to exit a data group critical section.
*
* \defgroup taskDATA_GROUP_EXIT_CRITICAL taskDATA_GROUP_EXIT_CRITICAL
* \ingroup GranularLocks
*/
#if ( portUSING_GRANULAR_LOCKS == 1 )
#define taskDATA_GROUP_EXIT_CRITICAL( pxDataGroup ) \
do { \
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ); \
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) \
{ \
/* Release the ISR spinlock */ \
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( ( pxDataGroup )->xISRSpinlock ) ); \
/* Enable interrupts */ \
portENABLE_INTERRUPTS(); \
/* Release the task spinlock */ \
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( ( pxDataGroup )->xTaskSpinlock ) ); \
} \
else \
{ \
mtCOVERAGE_TEST_MARKER(); \
} \
/* Re-enable preemption */ \
vTaskPreemptionEnable( NULL ); \
} while( 0 )
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
/**
* task. h
*
* Macro to exit a data group critical section from an interrupt.
*
* \defgroup taskDATA_GROUP_EXIT_CRITICAL_FROM_ISR taskDATA_GROUP_EXIT_CRITICAL_FROM_ISR
* \ingroup GranularLocks
*/
#if ( portUSING_GRANULAR_LOCKS == 1 )
#define taskDATA_GROUP_EXIT_CRITICAL_FROM_ISR( xSavedInterruptStatus, pxDataGroup ) \
do { \
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ); \
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxDataGroup->xISRSpinlock ) ); \
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) \
{ \
portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); \
} \
} while( 0 )
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
/*----------------------------------------------------------- /*-----------------------------------------------------------
* TASK CREATION API * TASK CREATION API
*----------------------------------------------------------*/ *----------------------------------------------------------*/

136
queue.c
View file

@ -260,10 +260,10 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
* Macros to mark the start and end of a critical code region. * Macros to mark the start and end of a critical code region.
*/ */
#if ( portUSING_GRANULAR_LOCKS == 1 ) #if ( portUSING_GRANULAR_LOCKS == 1 )
#define queueENTER_CRITICAL( pxQueue ) vQueueEnterCritical( pxQueue ) #define queueENTER_CRITICAL( pxQueue ) taskDATA_GROUP_ENTER_CRITICAL( pxQueue )
#define queueENTER_CRITICAL_FROM_ISR( pxQueue ) uxQueueEnterCriticalFromISR( pxQueue ) #define queueENTER_CRITICAL_FROM_ISR( pxQueue ) taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR( pxQueue )
#define queueEXIT_CRITICAL( pxQueue ) vQueueExitCritical( pxQueue ) #define queueEXIT_CRITICAL( pxQueue ) taskDATA_GROUP_EXIT_CRITICAL( pxQueue )
#define queueEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue ) vQueueExitCriticalFromISR( uxSavedInterruptStatus, pxQueue ) #define queueEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue ) taskDATA_GROUP_EXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue )
#else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ #else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
#define queueENTER_CRITICAL( pxQueue ) taskENTER_CRITICAL(); #define queueENTER_CRITICAL( pxQueue ) taskENTER_CRITICAL();
#define queueENTER_CRITICAL_FROM_ISR( pxQueue ) taskENTER_CRITICAL_FROM_ISR(); #define queueENTER_CRITICAL_FROM_ISR( pxQueue ) taskENTER_CRITICAL_FROM_ISR();
@ -271,34 +271,6 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
#define queueEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue ) taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); #define queueEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue ) taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
#if ( portUSING_GRANULAR_LOCKS == 1 )
/*
* Enters a critical section for a queue. Disables interrupts and takes
* both task and ISR spinlocks to ensure thread safety.
*/
static void vQueueEnterCritical( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION;
/*
* Enters a critical section for a queue from an ISR context. Takes the ISR
* spinlock and returns the previous interrupt state.
*/
static UBaseType_t uxQueueEnterCriticalFromISR( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION;
/*
* Exits a critical section for a queue. Releases spinlocks in reverse order
* and conditionally re-enables interrupts and yields if required.
*/
static void vQueueExitCritical( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION;
/*
* Exits a critical section for a queue from an ISR context. Releases the ISR
* spinlock and conditionally restores the previous interrupt state.
*/
static void vQueueExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus,
const Queue_t * pxQueue ) PRIVILEGED_FUNCTION;
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
/* /*
* Macro to mark a queue as locked. Locking a queue prevents an ISR from * Macro to mark a queue as locked. Locking a queue prevents an ISR from
* accessing the queue event lists. * accessing the queue event lists.
@ -2665,106 +2637,6 @@ static void prvUnlockQueue( Queue_t * const pxQueue )
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
static void vQueueEnterCritical( const Queue_t * pxQueue )
{
portDISABLE_INTERRUPTS();
{
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
/* Task spinlock is always taken first */
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xTaskSpinlock ) );
/* Take the ISR spinlock next */
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xISRSpinlock ) );
/* Increment the critical nesting count */
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
}
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
/*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
static UBaseType_t uxQueueEnterCriticalFromISR( const Queue_t * pxQueue )
{
UBaseType_t uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
/* Take the ISR spinlock */
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xISRSpinlock ) );
/* Increment the critical nesting count */
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
return uxSavedInterruptStatus;
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
/*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
static void vQueueExitCritical( const Queue_t * pxQueue )
{
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U );
/* Get the xYieldPending status inside the critical section. */
BaseType_t xYieldCurrentTask = xTaskUnlockCanYield();
/* Decrement the critical nesting count */
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID );
/* Release the ISR spinlock */
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xISRSpinlock ) );
/* Release the task spinlock */
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xTaskSpinlock ) );
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 )
{
portENABLE_INTERRUPTS();
if( xYieldCurrentTask != pdFALSE )
{
portYIELD();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
/*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
static void vQueueExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus,
const Queue_t * pxQueue )
{
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U );
/* Decrement the critical nesting count */
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID );
/* Release the ISR spinlock */
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xISRSpinlock ) );
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 )
{
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
}
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
/*-----------------------------------------------------------*/
static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue ) static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue )
{ {
BaseType_t xReturn; BaseType_t xReturn;

View file

@ -63,10 +63,10 @@
* Macros to mark the start and end of a critical code region. * Macros to mark the start and end of a critical code region.
*/ */
#if ( portUSING_GRANULAR_LOCKS == 1 ) #if ( portUSING_GRANULAR_LOCKS == 1 )
#define sbENTER_CRITICAL( pxStreamBuffer ) vStreamBufferEnterCritical( pxStreamBuffer ) #define sbENTER_CRITICAL( pxStreamBuffer ) taskDATA_GROUP_ENTER_CRITICAL( pxStreamBuffer )
#define sbENTER_CRITICAL_FROM_ISR( pxStreamBuffer ) uxStreamBufferEnterCriticalFromISR( pxStreamBuffer ) #define sbENTER_CRITICAL_FROM_ISR( pxStreamBuffer ) taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR( pxStreamBuffer )
#define sbEXIT_CRITICAL( pxStreamBuffer ) vStreamBufferExitCritical( pxStreamBuffer ) #define sbEXIT_CRITICAL( pxStreamBuffer ) taskDATA_GROUP_EXIT_CRITICAL( pxStreamBuffer )
#define sbEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxStreamBuffer ) vStreamBufferExitCriticalFromISR( uxSavedInterruptStatus, pxStreamBuffer ) #define sbEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxStreamBuffer ) taskDATA_GROUP_EXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxStreamBuffer )
#else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ #else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
#define sbENTER_CRITICAL( pxEventBits ) taskENTER_CRITICAL(); #define sbENTER_CRITICAL( pxEventBits ) taskENTER_CRITICAL();
#define sbENTER_CRITICAL_FROM_ISR( pxEventBits ) taskENTER_CRITICAL_FROM_ISR(); #define sbENTER_CRITICAL_FROM_ISR( pxEventBits ) taskENTER_CRITICAL_FROM_ISR();
@ -288,35 +288,6 @@ typedef struct StreamBufferDef_t
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
} StreamBuffer_t; } StreamBuffer_t;
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
/*
* Enters a critical section for a stream buffer. Disables interrupts and takes
* both task and ISR spinlocks to ensure thread safety.
*/
static void vStreamBufferEnterCritical( StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION;
/*
* Enters a critical section for a stream buffer from an ISR context. Takes the ISR
* spinlock and returns the previous interrupt state.
*/
static UBaseType_t uxStreamBufferEnterCriticalFromISR( StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION;
/*
* Exits a critical section for a stream buffer. Releases spinlocks in reverse order
* and conditionally re-enables interrupts and yields if required.
*/
static void vStreamBufferExitCritical( StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION;
/*
* Exits a critical section for a stream buffer from an ISR context. Releases the ISR
* spinlock and conditionally restores the previous interrupt state.
*/
static void vStreamBufferExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus,
StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION;
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) ) */
/* /*
* Locks a stream buffer for tasks. Prevents other tasks from accessing the stream buffer * Locks a stream buffer for tasks. Prevents other tasks from accessing the stream buffer
* but allows ISRs to pend access to the stream buffer. Caller cannot be preempted * but allows ISRs to pend access to the stream buffer. Caller cannot be preempted
@ -410,105 +381,6 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer,
StreamBufferCallbackFunction_t pxSendCompletedCallback, StreamBufferCallbackFunction_t pxSendCompletedCallback,
StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) PRIVILEGED_FUNCTION; StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) PRIVILEGED_FUNCTION;
/*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
static void vStreamBufferEnterCritical( StreamBuffer_t * const pxStreamBuffer )
{
portDISABLE_INTERRUPTS();
{
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
/* Task spinlock is always taken first */
portGET_SPINLOCK( xCoreID, &( pxStreamBuffer->xTaskSpinlock ) );
/* Take the ISR spinlock next */
portGET_SPINLOCK( xCoreID, &( pxStreamBuffer->xISRSpinlock ) );
/* Increment the critical nesting count */
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
}
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
/*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
static UBaseType_t uxStreamBufferEnterCriticalFromISR( StreamBuffer_t * const pxStreamBuffer )
{
UBaseType_t uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
/* Take the ISR spinlock */
portGET_SPINLOCK( xCoreID, &( pxStreamBuffer->xISRSpinlock ) );
/* Increment the critical nesting count */
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
return uxSavedInterruptStatus;
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
/*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
static void vStreamBufferExitCritical( StreamBuffer_t * const pxStreamBuffer )
{
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U );
/* Get the xYieldPending status inside the critical section. */
BaseType_t xYieldCurrentTask = xTaskUnlockCanYield();
/* Decrement the critical nesting count */
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID );
/* Release the ISR spinlock */
portRELEASE_SPINLOCK( xCoreID, &( pxStreamBuffer->xISRSpinlock ) );
/* Release the task spinlock */
portRELEASE_SPINLOCK( xCoreID, &( pxStreamBuffer->xTaskSpinlock ) );
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 )
{
portENABLE_INTERRUPTS();
if( xYieldCurrentTask != pdFALSE )
{
portYIELD();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
/*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
static void vStreamBufferExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus,
StreamBuffer_t * const pxStreamBuffer )
{
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U );
/* Decrement the critical nesting count */
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID );
/* Release the ISR spinlock */
portRELEASE_SPINLOCK( xCoreID, &( pxStreamBuffer->xISRSpinlock ) );
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 )
{
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
}
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )

164
tasks.c
View file

@ -136,11 +136,15 @@
/* /*
* Macros used by vListTask to indicate which state a task is in. * Macros used by vListTask to indicate which state a task is in.
*/ */
#define tskRUNNING_CHAR ( 'X' ) #define tskRUNNING_CHAR ( 'X' )
#define tskBLOCKED_CHAR ( 'B' ) #define tskBLOCKED_CHAR ( 'B' )
#define tskREADY_CHAR ( 'R' ) #define tskREADY_CHAR ( 'R' )
#define tskDELETED_CHAR ( 'D' ) #define tskDELETED_CHAR ( 'D' )
#define tskSUSPENDED_CHAR ( 'S' ) #define tskSUSPENDED_CHAR ( 'S' )
/* Bits used to record a deferred state change of a task. */
#define tskDEFERRED_DELETION ( UBaseType_t ) ( 1U << 0U )
#define tskDEFERRED_SUSPENSION ( UBaseType_t ) ( 1U << 1U )
/* /*
* Some kernel aware debuggers require the data the debugger needs access to be * Some kernel aware debuggers require the data the debugger needs access to be
@ -346,7 +350,33 @@
/* Yields the given core. This must be called from a critical section and xCoreID /* Yields the given core. This must be called from a critical section and xCoreID
* must be valid. This macro is not required in single core since there is only * must be valid. This macro is not required in single core since there is only
* one core to yield. */ * one core to yield. */
#define prvYieldCore( xCoreID ) \ #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
#define prvYieldCore( xCoreID ) \
do { \
if( ( xCoreID ) == ( BaseType_t ) portGET_CORE_ID() ) \
{ \
/* Pending a yield for this core since it is in the critical section. */ \
xYieldPendings[ ( xCoreID ) ] = pdTRUE; \
} \
else \
{ \
if( pxCurrentTCBs[ ( xCoreID ) ]->xPreemptionDisable == 0U ) \
{ \
/* Request other core to yield if it is not requested before. */ \
if( pxCurrentTCBs[ ( xCoreID ) ]->xTaskRunState != taskTASK_SCHEDULED_TO_YIELD ) \
{ \
portYIELD_CORE( xCoreID ); \
pxCurrentTCBs[ ( xCoreID ) ]->xTaskRunState = taskTASK_SCHEDULED_TO_YIELD; \
} \
} \
else \
{ \
xYieldPendings[ ( xCoreID ) ] = pdTRUE; \
} \
} \
} while( 0 )
#else /* if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
#define prvYieldCore( xCoreID ) \
do { \ do { \
if( ( xCoreID ) == ( BaseType_t ) portGET_CORE_ID() ) \ if( ( xCoreID ) == ( BaseType_t ) portGET_CORE_ID() ) \
{ \ { \
@ -363,6 +393,7 @@
} \ } \
} \ } \
} while( 0 ) } while( 0 )
#endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@ -429,6 +460,10 @@ typedef struct tskTaskControlBlock /* The old naming convention is used to
UBaseType_t xPreemptionDisable; /**< Used to prevent the task from being preempted. */ UBaseType_t xPreemptionDisable; /**< Used to prevent the task from being preempted. */
#endif #endif
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
BaseType_t xDeferredStateChange; /**< Used to indicate if the task's state change is deferred. */
#endif
#if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) )
StackType_t * pxEndOfStack; /**< Points to the highest valid address for the stack. */ StackType_t * pxEndOfStack; /**< Points to the highest valid address for the stack. */
#endif #endif
@ -2264,6 +2299,23 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
pxTCB = prvGetTCBFromHandle( xTaskToDelete ); pxTCB = prvGetTCBFromHandle( xTaskToDelete );
configASSERT( pxTCB != NULL ); configASSERT( pxTCB != NULL );
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
/* If the task has disabled preemption, we need to defer the deletion until the
* task enables preemption. The deletion will be performed in vTaskPreemptionEnable(). */
if( pxTCB->xPreemptionDisable > 0U )
{
pxTCB->xDeferredStateChange |= tskDEFERRED_DELETION;
kernelEXIT_CRITICAL();
traceRETURN_vTaskDelete();
return;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
#endif /* configUSE_TASK_PREEMPTION_DISABLE */
/* Remove task from the ready/delayed list. */ /* Remove task from the ready/delayed list. */
if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
{ {
@ -3137,10 +3189,17 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
kernelENTER_CRITICAL(); kernelENTER_CRITICAL();
{ {
pxTCB = prvGetTCBFromHandle( xTask ); if( xSchedulerRunning != pdFALSE )
configASSERT( pxTCB != NULL ); {
pxTCB = prvGetTCBFromHandle( xTask );
configASSERT( pxTCB != NULL );
pxTCB->xPreemptionDisable++; pxTCB->xPreemptionDisable++;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
} }
kernelEXIT_CRITICAL(); kernelEXIT_CRITICAL();
@ -3155,25 +3214,63 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
void vTaskPreemptionEnable( const TaskHandle_t xTask ) void vTaskPreemptionEnable( const TaskHandle_t xTask )
{ {
TCB_t * pxTCB; TCB_t * pxTCB;
BaseType_t xCoreID;
traceENTER_vTaskPreemptionEnable( xTask ); traceENTER_vTaskPreemptionEnable( xTask );
kernelENTER_CRITICAL(); kernelENTER_CRITICAL();
{ {
pxTCB = prvGetTCBFromHandle( xTask );
configASSERT( pxTCB != NULL );
configASSERT( pxTCB->xPreemptionDisable > 0U );
pxTCB->xPreemptionDisable--;
if( xSchedulerRunning != pdFALSE ) if( xSchedulerRunning != pdFALSE )
{ {
if( ( pxTCB->xPreemptionDisable == 0U ) && ( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) ) pxTCB = prvGetTCBFromHandle( xTask );
configASSERT( pxTCB != NULL );
configASSERT( pxTCB->xPreemptionDisable > 0U );
pxTCB->xPreemptionDisable--;
if( pxTCB->xPreemptionDisable == 0U )
{ {
xCoreID = ( BaseType_t ) pxTCB->xTaskRunState; /* Process deferred state changes which were inflicted while
prvYieldCore( xCoreID ); * preemption was disabled. */
if( pxTCB->xDeferredStateChange != 0U )
{
if( pxTCB->xDeferredStateChange & tskDEFERRED_DELETION )
{
vTaskDelete( xTask );
}
else if( pxTCB->xDeferredStateChange & tskDEFERRED_SUSPENSION )
{
vTaskSuspend( xTask );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
pxTCB->xDeferredStateChange = 0U;
kernelEXIT_CRITICAL();
traceRETURN_vTaskPreemptionEnable();
return;
}
else
{
if( ( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) )
{
prvYieldCore( pxTCB->xTaskRunState );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
} }
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
} }
} }
kernelEXIT_CRITICAL(); kernelEXIT_CRITICAL();
@ -3199,6 +3296,23 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
pxTCB = prvGetTCBFromHandle( xTaskToSuspend ); pxTCB = prvGetTCBFromHandle( xTaskToSuspend );
configASSERT( pxTCB != NULL ); configASSERT( pxTCB != NULL );
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
/* If the task has disabled preemption, we need to defer the suspension until the
* task enables preemption. The suspension will be performed in vTaskPreemptionEnable(). */
if( pxTCB->xPreemptionDisable > 0U )
{
pxTCB->xDeferredStateChange |= tskDEFERRED_SUSPENSION;
kernelEXIT_CRITICAL();
traceRETURN_vTaskSuspend();
return;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
#endif /* configUSE_TASK_PREEMPTION_DISABLE */
traceTASK_SUSPEND( pxTCB ); traceTASK_SUSPEND( pxTCB );
/* Remove task from the ready/delayed list and place in the /* Remove task from the ready/delayed list and place in the
@ -7384,7 +7498,11 @@ static void prvResetNextTaskUnblockTime( void )
BaseType_t xYieldCurrentTask; BaseType_t xYieldCurrentTask;
/* Get the xYieldPending stats inside the critical section. */ /* Get the xYieldPending stats inside the critical section. */
xYieldCurrentTask = xYieldPendings[ xCoreID ]; #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
xYieldCurrentTask = xTaskUnlockCanYield();
#else
xYieldCurrentTask = xYieldPendings[ xCoreID ];
#endif /* configUSE_TASK_PREEMPTION_DISABLE */
kernelRELEASE_ISR_LOCK( xCoreID ); kernelRELEASE_ISR_LOCK( xCoreID );
kernelRELEASE_TASK_LOCK( xCoreID ); kernelRELEASE_TASK_LOCK( xCoreID );
@ -7473,7 +7591,11 @@ static void prvResetNextTaskUnblockTime( void )
BaseType_t xReturn; BaseType_t xReturn;
BaseType_t xCoreID = portGET_CORE_ID(); BaseType_t xCoreID = portGET_CORE_ID();
if( ( xYieldPendings[ xCoreID ] == pdTRUE ) && ( uxSchedulerSuspended == pdFALSE ) && ( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == 0U ) ) if( ( xYieldPendings[ xCoreID ] == pdTRUE ) && ( uxSchedulerSuspended == pdFALSE )
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
&& ( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == 0U )
#endif /* ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
)
{ {
xReturn = pdTRUE; xReturn = pdTRUE;
} }

View file

@ -83,8 +83,8 @@
* Macros to mark the start and end of a critical code region. * Macros to mark the start and end of a critical code region.
*/ */
#if ( portUSING_GRANULAR_LOCKS == 1 ) #if ( portUSING_GRANULAR_LOCKS == 1 )
#define tmrENTER_CRITICAL() vTimerEnterCritical() #define tmrENTER_CRITICAL() taskDATA_GROUP_ENTER_CRITICAL( &xTimerDataGroupLocks )
#define tmrEXIT_CRITICAL() vTimerExitCritical() #define tmrEXIT_CRITICAL() taskDATA_GROUP_EXIT_CRITICAL( &xTimerDataGroupLocks )
#else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ #else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
#define tmrENTER_CRITICAL() taskENTER_CRITICAL() #define tmrENTER_CRITICAL() taskENTER_CRITICAL()
#define tmrEXIT_CRITICAL() taskEXIT_CRITICAL() #define tmrEXIT_CRITICAL() taskEXIT_CRITICAL()
@ -161,24 +161,18 @@
PRIVILEGED_DATA static TaskHandle_t xTimerTaskHandle = NULL; PRIVILEGED_DATA static TaskHandle_t xTimerTaskHandle = NULL;
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
PRIVILEGED_DATA static portSPINLOCK_TYPE xTaskSpinlock = portINIT_SPINLOCK_STATIC; PRIVILEGED_DATA static struct
PRIVILEGED_DATA static portSPINLOCK_TYPE xISRSpinlock = portINIT_SPINLOCK_STATIC; {
portSPINLOCK_TYPE xTaskSpinlock;
portSPINLOCK_TYPE xISRSpinlock;
}
xTimerDataGroupLocks =
{
.xTaskSpinlock = portINIT_SPINLOCK_STATIC,
.xISRSpinlock = portINIT_SPINLOCK_STATIC
};
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
#if ( portUSING_GRANULAR_LOCKS == 1 )
/*
* Enters a critical section for timers. Disables interrupts and takes
* both task and ISR spinlocks to ensure thread safety.
*/
static void vTimerEnterCritical( void ) PRIVILEGED_FUNCTION;
/*
* Exits a critical section for timers. Releases spinlocks in reverse order
* and conditionally re-enables interrupts and yields if required.
*/
static void vTimerExitCritical( void ) PRIVILEGED_FUNCTION;
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
/* /*
@ -1367,67 +1361,6 @@
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
#if ( portUSING_GRANULAR_LOCKS == 1 )
static void vTimerEnterCritical( void )
{
portDISABLE_INTERRUPTS();
{
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
/* Task spinlock is always taken first */
portGET_SPINLOCK( xCoreID, &xTaskSpinlock );
/* Take the ISR spinlock next */
portGET_SPINLOCK( xCoreID, &xISRSpinlock );
/* Increment the critical nesting count */
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
}
}
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
/*-----------------------------------------------------------*/
#if ( portUSING_GRANULAR_LOCKS == 1 )
static void vTimerExitCritical( void )
{
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U );
/* Get the xYieldPending status inside the critical section. */
BaseType_t xYieldCurrentTask = xTaskUnlockCanYield();
/* Decrement the critical nesting count */
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID );
/* Release the ISR spinlock */
portRELEASE_SPINLOCK( xCoreID, &xISRSpinlock );
/* Release the task spinlock */
portRELEASE_SPINLOCK( xCoreID, &xTaskSpinlock );
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 )
{
portENABLE_INTERRUPTS();
if( xYieldCurrentTask != pdFALSE )
{
portYIELD();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
/*-----------------------------------------------------------*/
/* This entire source file will be skipped if the application is not configured /* This entire source file will be skipped if the application is not configured
* to include software timer functionality. If you want to include software timer * to include software timer functionality. If you want to include software timer
* functionality then ensure configUSE_TIMERS is set to 1 in FreeRTOSConfig.h. */ * functionality then ensure configUSE_TIMERS is set to 1 in FreeRTOSConfig.h. */