mirror of
https://github.com/FreeRTOS/FreeRTOS-Kernel.git
synced 2025-07-05 03:47:15 -04:00
Compare commits
8 commits
b437a7cc18
...
d00fdd8313
Author | SHA1 | Date | |
---|---|---|---|
|
d00fdd8313 | ||
|
8eb906d08d | ||
|
6faa6e2463 | ||
|
2f2b7e500a | ||
|
2f58dd59c3 | ||
|
3176808c81 | ||
|
fd5037e7cc | ||
|
7502d940d9 |
133
event_groups.c
133
event_groups.c
|
@ -76,10 +76,10 @@
|
|||
* Macros to mark the start and end of a critical code region.
|
||||
*/
|
||||
#if ( portUSING_GRANULAR_LOCKS == 1 )
|
||||
#define event_groupsENTER_CRITICAL( pxEventBits ) vEventGroupsEnterCritical( pxEventBits )
|
||||
#define event_groupsENTER_CRITICAL_FROM_ISR( pxEventBits ) uxEventGroupsEnterCriticalFromISR( pxEventBits )
|
||||
#define event_groupsEXIT_CRITICAL( pxEventBits ) vEventGroupsExitCritical( pxEventBits )
|
||||
#define event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) vEventGroupsExitCriticalFromISR( uxSavedInterruptStatus, pxEventBits )
|
||||
#define event_groupsENTER_CRITICAL( pxEventBits ) taskDATA_GROUP_ENTER_CRITICAL( pxEventBits )
|
||||
#define event_groupsENTER_CRITICAL_FROM_ISR( pxEventBits ) taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR( pxEventBits )
|
||||
#define event_groupsEXIT_CRITICAL( pxEventBits ) taskDATA_GROUP_EXIT_CRITICAL( pxEventBits )
|
||||
#define event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) taskDATA_GROUP_EXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits )
|
||||
#else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
|
||||
#define event_groupsENTER_CRITICAL( pxEventBits ) taskENTER_CRITICAL();
|
||||
#define event_groupsENTER_CRITICAL_FROM_ISR( pxEventBits ) taskENTER_CRITICAL_FROM_ISR();
|
||||
|
@ -87,35 +87,6 @@
|
|||
#define event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
|
||||
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
|
||||
|
||||
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
|
||||
|
||||
/*
|
||||
* Enters a critical section for an event group. Disables interrupts and takes
|
||||
* both task and ISR spinlocks to ensure thread safety.
|
||||
*/
|
||||
static void vEventGroupsEnterCritical( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/*
|
||||
* Enters a critical section for an event group from an ISR context. Takes the ISR
|
||||
* spinlock and returns the previous interrupt state.
|
||||
*/
|
||||
static UBaseType_t uxEventGroupsEnterCriticalFromISR( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/*
|
||||
* Exits a critical section for an event group. Releases spinlocks in reverse order
|
||||
* and conditionally re-enables interrupts and yields if required.
|
||||
*/
|
||||
static void vEventGroupsExitCritical( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/*
|
||||
* Exits a critical section for an event group from an ISR context. Releases the ISR
|
||||
* spinlock and conditionally restores the previous interrupt state.
|
||||
*/
|
||||
static void vEventGroupsExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus,
|
||||
EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION;
|
||||
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
|
||||
|
||||
/*
|
||||
* Locks an event group for tasks. Prevents other tasks from accessing the event group but allows
|
||||
* ISRs to pend access to the event group. Caller cannot be preempted by other tasks
|
||||
|
@ -895,102 +866,6 @@
|
|||
|
||||
traceRETURN_vEventGroupClearBitsCallback();
|
||||
}
|
||||
/*-----------------------------------------------------------*/
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
|
||||
static void vEventGroupsEnterCritical( EventGroup_t * pxEventBits )
|
||||
{
|
||||
portDISABLE_INTERRUPTS();
|
||||
{
|
||||
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
|
||||
|
||||
/* Task spinlock is always taken first */
|
||||
portGET_SPINLOCK( xCoreID, &( pxEventBits->xTaskSpinlock ) );
|
||||
|
||||
/* Take the ISR spinlock next */
|
||||
portGET_SPINLOCK( xCoreID, &( pxEventBits->xISRSpinlock ) );
|
||||
|
||||
/* Increment the critical nesting count */
|
||||
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
|
||||
}
|
||||
}
|
||||
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
|
||||
/*-----------------------------------------------------------*/
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
|
||||
static UBaseType_t uxEventGroupsEnterCriticalFromISR( EventGroup_t * pxEventBits )
|
||||
{
|
||||
UBaseType_t uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||
|
||||
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
|
||||
|
||||
/* Take the ISR spinlock */
|
||||
portGET_SPINLOCK( xCoreID, &( pxEventBits->xISRSpinlock ) );
|
||||
|
||||
/* Increment the critical nesting count */
|
||||
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
|
||||
|
||||
return uxSavedInterruptStatus;
|
||||
}
|
||||
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
|
||||
/*-----------------------------------------------------------*/
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
|
||||
static void vEventGroupsExitCritical( EventGroup_t * pxEventBits )
|
||||
{
|
||||
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
|
||||
|
||||
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U );
|
||||
|
||||
/* Get the xYieldPending stats inside the critical section. */
|
||||
BaseType_t xYieldCurrentTask = xTaskUnlockCanYield();
|
||||
|
||||
/* Decrement the critical nesting count */
|
||||
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID );
|
||||
|
||||
/* Release the ISR spinlock */
|
||||
portRELEASE_SPINLOCK( xCoreID, &( pxEventBits->xISRSpinlock ) );
|
||||
|
||||
/* Release the task spinlock */
|
||||
portRELEASE_SPINLOCK( xCoreID, &( pxEventBits->xTaskSpinlock ) );
|
||||
|
||||
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 )
|
||||
{
|
||||
portENABLE_INTERRUPTS();
|
||||
|
||||
if( xYieldCurrentTask != pdFALSE )
|
||||
{
|
||||
portYIELD();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
|
||||
/*-----------------------------------------------------------*/
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
|
||||
static void vEventGroupsExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus,
|
||||
EventGroup_t * pxEventBits )
|
||||
{
|
||||
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
|
||||
|
||||
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U );
|
||||
|
||||
/* Decrement the critical nesting count */
|
||||
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID );
|
||||
|
||||
/* Release the ISR spinlock */
|
||||
portRELEASE_SPINLOCK( xCoreID, &( pxEventBits->xISRSpinlock ) );
|
||||
|
||||
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 )
|
||||
{
|
||||
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
||||
}
|
||||
}
|
||||
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
|
||||
/*-----------------------------------------------------------*/
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
|
||||
static void prvLockEventGroupForTasks( EventGroup_t * pxEventBits )
|
||||
|
|
|
@ -3238,6 +3238,9 @@ typedef struct xSTATIC_TCB
|
|||
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
|
||||
UBaseType_t xDummy25;
|
||||
#endif
|
||||
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
|
||||
BaseType_t xDummy26;
|
||||
#endif
|
||||
#if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) )
|
||||
void * pxDummy8;
|
||||
#endif
|
||||
|
|
108
include/task.h
108
include/task.h
|
@ -283,6 +283,114 @@ typedef enum
|
|||
/* Checks if core ID is valid. */
|
||||
#define taskVALID_CORE_ID( xCoreID ) ( ( ( ( ( BaseType_t ) 0 <= ( xCoreID ) ) && ( ( xCoreID ) < ( BaseType_t ) configNUMBER_OF_CORES ) ) ) ? ( pdTRUE ) : ( pdFALSE ) )
|
||||
|
||||
/**
|
||||
* task. h
|
||||
*
|
||||
* Macro to enter a data group critical section.
|
||||
*
|
||||
* \defgroup taskDATA_GROUP_ENTER_CRITICAL taskDATA_GROUP_ENTER_CRITICAL
|
||||
* \ingroup GranularLocks
|
||||
*/
|
||||
#if ( portUSING_GRANULAR_LOCKS == 1 )
|
||||
#define taskDATA_GROUP_ENTER_CRITICAL( pxDataGroup ) \
|
||||
do { \
|
||||
/* Disable preemption to avoid task state changes during the critical section. */ \
|
||||
vTaskPreemptionDisable( NULL ); \
|
||||
{ \
|
||||
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \
|
||||
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U ) { \
|
||||
/* Task spinlock is always taken first */ \
|
||||
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( ( pxDataGroup )->xTaskSpinlock ) ); \
|
||||
/* Disable interrupts */ \
|
||||
portDISABLE_INTERRUPTS(); \
|
||||
/* Take the ISR spinlock next */ \
|
||||
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( ( pxDataGroup )->xISRSpinlock ) ); \
|
||||
} \
|
||||
else \
|
||||
{ \
|
||||
mtCOVERAGE_TEST_MARKER(); \
|
||||
} \
|
||||
/* Increment the critical nesting count */ \
|
||||
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \
|
||||
} \
|
||||
} while( 0 )
|
||||
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
|
||||
|
||||
/**
|
||||
* task. h
|
||||
*
|
||||
* Macro to enter a data group critical section from an interrupt.
|
||||
*
|
||||
* \defgroup taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR
|
||||
* \ingroup GranularLocks
|
||||
*/
|
||||
#if ( portUSING_GRANULAR_LOCKS == 1 )
|
||||
#define taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR( pxDataGroup ) \
|
||||
( { \
|
||||
UBaseType_t uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); \
|
||||
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \
|
||||
/* Take the ISR spinlock */ \
|
||||
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( ( pxDataGroup )->xISRSpinlock ) ); \
|
||||
/* Increment the critical nesting count */ \
|
||||
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \
|
||||
uxSavedInterruptStatus; \
|
||||
} )
|
||||
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
|
||||
|
||||
/**
|
||||
* task. h
|
||||
*
|
||||
* Macro to exit a data group critical section.
|
||||
*
|
||||
* \defgroup taskDATA_GROUP_EXIT_CRITICAL taskDATA_GROUP_EXIT_CRITICAL
|
||||
* \ingroup GranularLocks
|
||||
*/
|
||||
#if ( portUSING_GRANULAR_LOCKS == 1 )
|
||||
#define taskDATA_GROUP_EXIT_CRITICAL( pxDataGroup ) \
|
||||
do { \
|
||||
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \
|
||||
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ); \
|
||||
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \
|
||||
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) \
|
||||
{ \
|
||||
/* Release the ISR spinlock */ \
|
||||
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( ( pxDataGroup )->xISRSpinlock ) ); \
|
||||
/* Enable interrupts */ \
|
||||
portENABLE_INTERRUPTS(); \
|
||||
/* Release the task spinlock */ \
|
||||
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( ( pxDataGroup )->xTaskSpinlock ) ); \
|
||||
} \
|
||||
else \
|
||||
{ \
|
||||
mtCOVERAGE_TEST_MARKER(); \
|
||||
} \
|
||||
/* Re-enable preemption */ \
|
||||
vTaskPreemptionEnable( NULL ); \
|
||||
} while( 0 )
|
||||
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
|
||||
|
||||
/**
|
||||
* task. h
|
||||
*
|
||||
* Macro to exit a data group critical section from an interrupt.
|
||||
*
|
||||
* \defgroup taskDATA_GROUP_EXIT_CRITICAL_FROM_ISR taskDATA_GROUP_EXIT_CRITICAL_FROM_ISR
|
||||
* \ingroup GranularLocks
|
||||
*/
|
||||
#if ( portUSING_GRANULAR_LOCKS == 1 )
|
||||
#define taskDATA_GROUP_EXIT_CRITICAL_FROM_ISR( xSavedInterruptStatus, pxDataGroup ) \
|
||||
do { \
|
||||
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \
|
||||
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ); \
|
||||
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \
|
||||
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxDataGroup->xISRSpinlock ) ); \
|
||||
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) \
|
||||
{ \
|
||||
portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); \
|
||||
} \
|
||||
} while( 0 )
|
||||
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
|
||||
|
||||
/*-----------------------------------------------------------
|
||||
* TASK CREATION API
|
||||
*----------------------------------------------------------*/
|
||||
|
|
136
queue.c
136
queue.c
|
@ -260,10 +260,10 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
|
|||
* Macros to mark the start and end of a critical code region.
|
||||
*/
|
||||
#if ( portUSING_GRANULAR_LOCKS == 1 )
|
||||
#define queueENTER_CRITICAL( pxQueue ) vQueueEnterCritical( pxQueue )
|
||||
#define queueENTER_CRITICAL_FROM_ISR( pxQueue ) uxQueueEnterCriticalFromISR( pxQueue )
|
||||
#define queueEXIT_CRITICAL( pxQueue ) vQueueExitCritical( pxQueue )
|
||||
#define queueEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue ) vQueueExitCriticalFromISR( uxSavedInterruptStatus, pxQueue )
|
||||
#define queueENTER_CRITICAL( pxQueue ) taskDATA_GROUP_ENTER_CRITICAL( pxQueue )
|
||||
#define queueENTER_CRITICAL_FROM_ISR( pxQueue ) taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR( pxQueue )
|
||||
#define queueEXIT_CRITICAL( pxQueue ) taskDATA_GROUP_EXIT_CRITICAL( pxQueue )
|
||||
#define queueEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue ) taskDATA_GROUP_EXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue )
|
||||
#else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
|
||||
#define queueENTER_CRITICAL( pxQueue ) taskENTER_CRITICAL();
|
||||
#define queueENTER_CRITICAL_FROM_ISR( pxQueue ) taskENTER_CRITICAL_FROM_ISR();
|
||||
|
@ -271,34 +271,6 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
|
|||
#define queueEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue ) taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
|
||||
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
|
||||
|
||||
#if ( portUSING_GRANULAR_LOCKS == 1 )
|
||||
|
||||
/*
|
||||
* Enters a critical section for a queue. Disables interrupts and takes
|
||||
* both task and ISR spinlocks to ensure thread safety.
|
||||
*/
|
||||
static void vQueueEnterCritical( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/*
|
||||
* Enters a critical section for a queue from an ISR context. Takes the ISR
|
||||
* spinlock and returns the previous interrupt state.
|
||||
*/
|
||||
static UBaseType_t uxQueueEnterCriticalFromISR( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/*
|
||||
* Exits a critical section for a queue. Releases spinlocks in reverse order
|
||||
* and conditionally re-enables interrupts and yields if required.
|
||||
*/
|
||||
static void vQueueExitCritical( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/*
|
||||
* Exits a critical section for a queue from an ISR context. Releases the ISR
|
||||
* spinlock and conditionally restores the previous interrupt state.
|
||||
*/
|
||||
static void vQueueExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus,
|
||||
const Queue_t * pxQueue ) PRIVILEGED_FUNCTION;
|
||||
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
|
||||
|
||||
/*
|
||||
* Macro to mark a queue as locked. Locking a queue prevents an ISR from
|
||||
* accessing the queue event lists.
|
||||
|
@ -2665,106 +2637,6 @@ static void prvUnlockQueue( Queue_t * const pxQueue )
|
|||
}
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
|
||||
static void vQueueEnterCritical( const Queue_t * pxQueue )
|
||||
{
|
||||
portDISABLE_INTERRUPTS();
|
||||
{
|
||||
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
|
||||
|
||||
/* Task spinlock is always taken first */
|
||||
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xTaskSpinlock ) );
|
||||
|
||||
/* Take the ISR spinlock next */
|
||||
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xISRSpinlock ) );
|
||||
|
||||
/* Increment the critical nesting count */
|
||||
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
|
||||
}
|
||||
}
|
||||
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
|
||||
static UBaseType_t uxQueueEnterCriticalFromISR( const Queue_t * pxQueue )
|
||||
{
|
||||
UBaseType_t uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||
|
||||
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
|
||||
|
||||
/* Take the ISR spinlock */
|
||||
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xISRSpinlock ) );
|
||||
|
||||
/* Increment the critical nesting count */
|
||||
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
|
||||
|
||||
return uxSavedInterruptStatus;
|
||||
}
|
||||
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
|
||||
static void vQueueExitCritical( const Queue_t * pxQueue )
|
||||
{
|
||||
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
|
||||
|
||||
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U );
|
||||
|
||||
/* Get the xYieldPending status inside the critical section. */
|
||||
BaseType_t xYieldCurrentTask = xTaskUnlockCanYield();
|
||||
|
||||
/* Decrement the critical nesting count */
|
||||
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID );
|
||||
|
||||
/* Release the ISR spinlock */
|
||||
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xISRSpinlock ) );
|
||||
|
||||
/* Release the task spinlock */
|
||||
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xTaskSpinlock ) );
|
||||
|
||||
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 )
|
||||
{
|
||||
portENABLE_INTERRUPTS();
|
||||
|
||||
if( xYieldCurrentTask != pdFALSE )
|
||||
{
|
||||
portYIELD();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
|
||||
static void vQueueExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus,
|
||||
const Queue_t * pxQueue )
|
||||
{
|
||||
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
|
||||
|
||||
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U );
|
||||
|
||||
/* Decrement the critical nesting count */
|
||||
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID );
|
||||
|
||||
/* Release the ISR spinlock */
|
||||
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xISRSpinlock ) );
|
||||
|
||||
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 )
|
||||
{
|
||||
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
||||
}
|
||||
}
|
||||
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue )
|
||||
{
|
||||
BaseType_t xReturn;
|
||||
|
|
136
stream_buffer.c
136
stream_buffer.c
|
@ -63,10 +63,10 @@
|
|||
* Macros to mark the start and end of a critical code region.
|
||||
*/
|
||||
#if ( portUSING_GRANULAR_LOCKS == 1 )
|
||||
#define sbENTER_CRITICAL( pxStreamBuffer ) vStreamBufferEnterCritical( pxStreamBuffer )
|
||||
#define sbENTER_CRITICAL_FROM_ISR( pxStreamBuffer ) uxStreamBufferEnterCriticalFromISR( pxStreamBuffer )
|
||||
#define sbEXIT_CRITICAL( pxStreamBuffer ) vStreamBufferExitCritical( pxStreamBuffer )
|
||||
#define sbEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxStreamBuffer ) vStreamBufferExitCriticalFromISR( uxSavedInterruptStatus, pxStreamBuffer )
|
||||
#define sbENTER_CRITICAL( pxStreamBuffer ) taskDATA_GROUP_ENTER_CRITICAL( pxStreamBuffer )
|
||||
#define sbENTER_CRITICAL_FROM_ISR( pxStreamBuffer ) taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR( pxStreamBuffer )
|
||||
#define sbEXIT_CRITICAL( pxStreamBuffer ) taskDATA_GROUP_EXIT_CRITICAL( pxStreamBuffer )
|
||||
#define sbEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxStreamBuffer ) taskDATA_GROUP_EXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxStreamBuffer )
|
||||
#else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
|
||||
#define sbENTER_CRITICAL( pxEventBits ) taskENTER_CRITICAL();
|
||||
#define sbENTER_CRITICAL_FROM_ISR( pxEventBits ) taskENTER_CRITICAL_FROM_ISR();
|
||||
|
@ -288,35 +288,6 @@ typedef struct StreamBufferDef_t
|
|||
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
|
||||
} StreamBuffer_t;
|
||||
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
|
||||
|
||||
/*
|
||||
* Enters a critical section for a stream buffer. Disables interrupts and takes
|
||||
* both task and ISR spinlocks to ensure thread safety.
|
||||
*/
|
||||
static void vStreamBufferEnterCritical( StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/*
|
||||
* Enters a critical section for a stream buffer from an ISR context. Takes the ISR
|
||||
* spinlock and returns the previous interrupt state.
|
||||
*/
|
||||
static UBaseType_t uxStreamBufferEnterCriticalFromISR( StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/*
|
||||
* Exits a critical section for a stream buffer. Releases spinlocks in reverse order
|
||||
* and conditionally re-enables interrupts and yields if required.
|
||||
*/
|
||||
static void vStreamBufferExitCritical( StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/*
|
||||
* Exits a critical section for a stream buffer from an ISR context. Releases the ISR
|
||||
* spinlock and conditionally restores the previous interrupt state.
|
||||
*/
|
||||
static void vStreamBufferExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus,
|
||||
StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION;
|
||||
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) ) */
|
||||
|
||||
|
||||
/*
|
||||
* Locks a stream buffer for tasks. Prevents other tasks from accessing the stream buffer
|
||||
* but allows ISRs to pend access to the stream buffer. Caller cannot be preempted
|
||||
|
@ -410,105 +381,6 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer,
|
|||
StreamBufferCallbackFunction_t pxSendCompletedCallback,
|
||||
StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/*-----------------------------------------------------------*/
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
|
||||
static void vStreamBufferEnterCritical( StreamBuffer_t * const pxStreamBuffer )
|
||||
{
|
||||
portDISABLE_INTERRUPTS();
|
||||
{
|
||||
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
|
||||
|
||||
/* Task spinlock is always taken first */
|
||||
portGET_SPINLOCK( xCoreID, &( pxStreamBuffer->xTaskSpinlock ) );
|
||||
|
||||
/* Take the ISR spinlock next */
|
||||
portGET_SPINLOCK( xCoreID, &( pxStreamBuffer->xISRSpinlock ) );
|
||||
|
||||
/* Increment the critical nesting count */
|
||||
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
|
||||
}
|
||||
}
|
||||
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
|
||||
static UBaseType_t uxStreamBufferEnterCriticalFromISR( StreamBuffer_t * const pxStreamBuffer )
|
||||
{
|
||||
UBaseType_t uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||
|
||||
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
|
||||
|
||||
/* Take the ISR spinlock */
|
||||
portGET_SPINLOCK( xCoreID, &( pxStreamBuffer->xISRSpinlock ) );
|
||||
|
||||
/* Increment the critical nesting count */
|
||||
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
|
||||
|
||||
return uxSavedInterruptStatus;
|
||||
}
|
||||
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
|
||||
static void vStreamBufferExitCritical( StreamBuffer_t * const pxStreamBuffer )
|
||||
{
|
||||
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
|
||||
|
||||
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U );
|
||||
|
||||
/* Get the xYieldPending status inside the critical section. */
|
||||
BaseType_t xYieldCurrentTask = xTaskUnlockCanYield();
|
||||
|
||||
/* Decrement the critical nesting count */
|
||||
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID );
|
||||
|
||||
/* Release the ISR spinlock */
|
||||
portRELEASE_SPINLOCK( xCoreID, &( pxStreamBuffer->xISRSpinlock ) );
|
||||
|
||||
/* Release the task spinlock */
|
||||
portRELEASE_SPINLOCK( xCoreID, &( pxStreamBuffer->xTaskSpinlock ) );
|
||||
|
||||
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 )
|
||||
{
|
||||
portENABLE_INTERRUPTS();
|
||||
|
||||
if( xYieldCurrentTask != pdFALSE )
|
||||
{
|
||||
portYIELD();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
|
||||
static void vStreamBufferExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus,
|
||||
StreamBuffer_t * const pxStreamBuffer )
|
||||
{
|
||||
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
|
||||
|
||||
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U );
|
||||
|
||||
/* Decrement the critical nesting count */
|
||||
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID );
|
||||
|
||||
/* Release the ISR spinlock */
|
||||
portRELEASE_SPINLOCK( xCoreID, &( pxStreamBuffer->xISRSpinlock ) );
|
||||
|
||||
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 )
|
||||
{
|
||||
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
||||
}
|
||||
}
|
||||
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
|
||||
|
|
164
tasks.c
164
tasks.c
|
@ -136,11 +136,15 @@
|
|||
/*
|
||||
* Macros used by vListTask to indicate which state a task is in.
|
||||
*/
|
||||
#define tskRUNNING_CHAR ( 'X' )
|
||||
#define tskBLOCKED_CHAR ( 'B' )
|
||||
#define tskREADY_CHAR ( 'R' )
|
||||
#define tskDELETED_CHAR ( 'D' )
|
||||
#define tskSUSPENDED_CHAR ( 'S' )
|
||||
#define tskRUNNING_CHAR ( 'X' )
|
||||
#define tskBLOCKED_CHAR ( 'B' )
|
||||
#define tskREADY_CHAR ( 'R' )
|
||||
#define tskDELETED_CHAR ( 'D' )
|
||||
#define tskSUSPENDED_CHAR ( 'S' )
|
||||
|
||||
/* Bits used to record a deferred state change of a task. */
|
||||
#define tskDEFERRED_DELETION ( UBaseType_t ) ( 1U << 0U )
|
||||
#define tskDEFERRED_SUSPENSION ( UBaseType_t ) ( 1U << 1U )
|
||||
|
||||
/*
|
||||
* Some kernel aware debuggers require the data the debugger needs access to be
|
||||
|
@ -346,7 +350,33 @@
|
|||
/* Yields the given core. This must be called from a critical section and xCoreID
|
||||
* must be valid. This macro is not required in single core since there is only
|
||||
* one core to yield. */
|
||||
#define prvYieldCore( xCoreID ) \
|
||||
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
|
||||
#define prvYieldCore( xCoreID ) \
|
||||
do { \
|
||||
if( ( xCoreID ) == ( BaseType_t ) portGET_CORE_ID() ) \
|
||||
{ \
|
||||
/* Pending a yield for this core since it is in the critical section. */ \
|
||||
xYieldPendings[ ( xCoreID ) ] = pdTRUE; \
|
||||
} \
|
||||
else \
|
||||
{ \
|
||||
if( pxCurrentTCBs[ ( xCoreID ) ]->xPreemptionDisable == 0U ) \
|
||||
{ \
|
||||
/* Request other core to yield if it is not requested before. */ \
|
||||
if( pxCurrentTCBs[ ( xCoreID ) ]->xTaskRunState != taskTASK_SCHEDULED_TO_YIELD ) \
|
||||
{ \
|
||||
portYIELD_CORE( xCoreID ); \
|
||||
pxCurrentTCBs[ ( xCoreID ) ]->xTaskRunState = taskTASK_SCHEDULED_TO_YIELD; \
|
||||
} \
|
||||
} \
|
||||
else \
|
||||
{ \
|
||||
xYieldPendings[ ( xCoreID ) ] = pdTRUE; \
|
||||
} \
|
||||
} \
|
||||
} while( 0 )
|
||||
#else /* if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
|
||||
#define prvYieldCore( xCoreID ) \
|
||||
do { \
|
||||
if( ( xCoreID ) == ( BaseType_t ) portGET_CORE_ID() ) \
|
||||
{ \
|
||||
|
@ -363,6 +393,7 @@
|
|||
} \
|
||||
} \
|
||||
} while( 0 )
|
||||
#endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
|
||||
#endif /* #if ( configNUMBER_OF_CORES > 1 ) */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
|
@ -429,6 +460,10 @@ typedef struct tskTaskControlBlock /* The old naming convention is used to
|
|||
UBaseType_t xPreemptionDisable; /**< Used to prevent the task from being preempted. */
|
||||
#endif
|
||||
|
||||
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
|
||||
BaseType_t xDeferredStateChange; /**< Used to indicate if the task's state change is deferred. */
|
||||
#endif
|
||||
|
||||
#if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) )
|
||||
StackType_t * pxEndOfStack; /**< Points to the highest valid address for the stack. */
|
||||
#endif
|
||||
|
@ -2264,6 +2299,23 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
|
|||
pxTCB = prvGetTCBFromHandle( xTaskToDelete );
|
||||
configASSERT( pxTCB != NULL );
|
||||
|
||||
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
|
||||
|
||||
/* If the task has disabled preemption, we need to defer the deletion until the
|
||||
* task enables preemption. The deletion will be performed in vTaskPreemptionEnable(). */
|
||||
if( pxTCB->xPreemptionDisable > 0U )
|
||||
{
|
||||
pxTCB->xDeferredStateChange |= tskDEFERRED_DELETION;
|
||||
kernelEXIT_CRITICAL();
|
||||
traceRETURN_vTaskDelete();
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
#endif /* configUSE_TASK_PREEMPTION_DISABLE */
|
||||
|
||||
/* Remove task from the ready/delayed list. */
|
||||
if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
|
||||
{
|
||||
|
@ -3137,10 +3189,17 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
|
|||
|
||||
kernelENTER_CRITICAL();
|
||||
{
|
||||
pxTCB = prvGetTCBFromHandle( xTask );
|
||||
configASSERT( pxTCB != NULL );
|
||||
if( xSchedulerRunning != pdFALSE )
|
||||
{
|
||||
pxTCB = prvGetTCBFromHandle( xTask );
|
||||
configASSERT( pxTCB != NULL );
|
||||
|
||||
pxTCB->xPreemptionDisable++;
|
||||
pxTCB->xPreemptionDisable++;
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
kernelEXIT_CRITICAL();
|
||||
|
||||
|
@ -3155,25 +3214,63 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
|
|||
void vTaskPreemptionEnable( const TaskHandle_t xTask )
|
||||
{
|
||||
TCB_t * pxTCB;
|
||||
BaseType_t xCoreID;
|
||||
|
||||
traceENTER_vTaskPreemptionEnable( xTask );
|
||||
|
||||
kernelENTER_CRITICAL();
|
||||
{
|
||||
pxTCB = prvGetTCBFromHandle( xTask );
|
||||
configASSERT( pxTCB != NULL );
|
||||
configASSERT( pxTCB->xPreemptionDisable > 0U );
|
||||
|
||||
pxTCB->xPreemptionDisable--;
|
||||
|
||||
if( xSchedulerRunning != pdFALSE )
|
||||
{
|
||||
if( ( pxTCB->xPreemptionDisable == 0U ) && ( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) )
|
||||
pxTCB = prvGetTCBFromHandle( xTask );
|
||||
configASSERT( pxTCB != NULL );
|
||||
configASSERT( pxTCB->xPreemptionDisable > 0U );
|
||||
|
||||
pxTCB->xPreemptionDisable--;
|
||||
|
||||
if( pxTCB->xPreemptionDisable == 0U )
|
||||
{
|
||||
xCoreID = ( BaseType_t ) pxTCB->xTaskRunState;
|
||||
prvYieldCore( xCoreID );
|
||||
/* Process deferred state changes which were inflicted while
|
||||
* preemption was disabled. */
|
||||
if( pxTCB->xDeferredStateChange != 0U )
|
||||
{
|
||||
if( pxTCB->xDeferredStateChange & tskDEFERRED_DELETION )
|
||||
{
|
||||
vTaskDelete( xTask );
|
||||
}
|
||||
else if( pxTCB->xDeferredStateChange & tskDEFERRED_SUSPENSION )
|
||||
{
|
||||
vTaskSuspend( xTask );
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
|
||||
pxTCB->xDeferredStateChange = 0U;
|
||||
kernelEXIT_CRITICAL();
|
||||
traceRETURN_vTaskPreemptionEnable();
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
if( ( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) )
|
||||
{
|
||||
prvYieldCore( pxTCB->xTaskRunState );
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
kernelEXIT_CRITICAL();
|
||||
|
@ -3199,6 +3296,23 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
|
|||
pxTCB = prvGetTCBFromHandle( xTaskToSuspend );
|
||||
configASSERT( pxTCB != NULL );
|
||||
|
||||
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
|
||||
|
||||
/* If the task has disabled preemption, we need to defer the suspension until the
|
||||
* task enables preemption. The suspension will be performed in vTaskPreemptionEnable(). */
|
||||
if( pxTCB->xPreemptionDisable > 0U )
|
||||
{
|
||||
pxTCB->xDeferredStateChange |= tskDEFERRED_SUSPENSION;
|
||||
kernelEXIT_CRITICAL();
|
||||
traceRETURN_vTaskSuspend();
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
#endif /* configUSE_TASK_PREEMPTION_DISABLE */
|
||||
|
||||
traceTASK_SUSPEND( pxTCB );
|
||||
|
||||
/* Remove task from the ready/delayed list and place in the
|
||||
|
@ -7384,7 +7498,11 @@ static void prvResetNextTaskUnblockTime( void )
|
|||
BaseType_t xYieldCurrentTask;
|
||||
|
||||
/* Get the xYieldPending stats inside the critical section. */
|
||||
xYieldCurrentTask = xYieldPendings[ xCoreID ];
|
||||
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
|
||||
xYieldCurrentTask = xTaskUnlockCanYield();
|
||||
#else
|
||||
xYieldCurrentTask = xYieldPendings[ xCoreID ];
|
||||
#endif /* configUSE_TASK_PREEMPTION_DISABLE */
|
||||
|
||||
kernelRELEASE_ISR_LOCK( xCoreID );
|
||||
kernelRELEASE_TASK_LOCK( xCoreID );
|
||||
|
@ -7473,7 +7591,11 @@ static void prvResetNextTaskUnblockTime( void )
|
|||
BaseType_t xReturn;
|
||||
BaseType_t xCoreID = portGET_CORE_ID();
|
||||
|
||||
if( ( xYieldPendings[ xCoreID ] == pdTRUE ) && ( uxSchedulerSuspended == pdFALSE ) && ( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == 0U ) )
|
||||
if( ( xYieldPendings[ xCoreID ] == pdTRUE ) && ( uxSchedulerSuspended == pdFALSE )
|
||||
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
|
||||
&& ( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == 0U )
|
||||
#endif /* ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
|
||||
)
|
||||
{
|
||||
xReturn = pdTRUE;
|
||||
}
|
||||
|
|
91
timers.c
91
timers.c
|
@ -83,8 +83,8 @@
|
|||
* Macros to mark the start and end of a critical code region.
|
||||
*/
|
||||
#if ( portUSING_GRANULAR_LOCKS == 1 )
|
||||
#define tmrENTER_CRITICAL() vTimerEnterCritical()
|
||||
#define tmrEXIT_CRITICAL() vTimerExitCritical()
|
||||
#define tmrENTER_CRITICAL() taskDATA_GROUP_ENTER_CRITICAL( &xTimerDataGroupLocks )
|
||||
#define tmrEXIT_CRITICAL() taskDATA_GROUP_EXIT_CRITICAL( &xTimerDataGroupLocks )
|
||||
#else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
|
||||
#define tmrENTER_CRITICAL() taskENTER_CRITICAL()
|
||||
#define tmrEXIT_CRITICAL() taskEXIT_CRITICAL()
|
||||
|
@ -161,24 +161,18 @@
|
|||
PRIVILEGED_DATA static TaskHandle_t xTimerTaskHandle = NULL;
|
||||
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
|
||||
PRIVILEGED_DATA static portSPINLOCK_TYPE xTaskSpinlock = portINIT_SPINLOCK_STATIC;
|
||||
PRIVILEGED_DATA static portSPINLOCK_TYPE xISRSpinlock = portINIT_SPINLOCK_STATIC;
|
||||
PRIVILEGED_DATA static struct
|
||||
{
|
||||
portSPINLOCK_TYPE xTaskSpinlock;
|
||||
portSPINLOCK_TYPE xISRSpinlock;
|
||||
}
|
||||
xTimerDataGroupLocks =
|
||||
{
|
||||
.xTaskSpinlock = portINIT_SPINLOCK_STATIC,
|
||||
.xISRSpinlock = portINIT_SPINLOCK_STATIC
|
||||
};
|
||||
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
|
||||
|
||||
#if ( portUSING_GRANULAR_LOCKS == 1 )
|
||||
|
||||
/*
|
||||
* Enters a critical section for timers. Disables interrupts and takes
|
||||
* both task and ISR spinlocks to ensure thread safety.
|
||||
*/
|
||||
static void vTimerEnterCritical( void ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/*
|
||||
* Exits a critical section for timers. Releases spinlocks in reverse order
|
||||
* and conditionally re-enables interrupts and yields if required.
|
||||
*/
|
||||
static void vTimerExitCritical( void ) PRIVILEGED_FUNCTION;
|
||||
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
/*
|
||||
|
@ -1367,67 +1361,6 @@
|
|||
}
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
#if ( portUSING_GRANULAR_LOCKS == 1 )
|
||||
static void vTimerEnterCritical( void )
|
||||
{
|
||||
portDISABLE_INTERRUPTS();
|
||||
{
|
||||
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
|
||||
|
||||
/* Task spinlock is always taken first */
|
||||
portGET_SPINLOCK( xCoreID, &xTaskSpinlock );
|
||||
|
||||
/* Take the ISR spinlock next */
|
||||
portGET_SPINLOCK( xCoreID, &xISRSpinlock );
|
||||
|
||||
/* Increment the critical nesting count */
|
||||
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
|
||||
}
|
||||
}
|
||||
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
|
||||
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
#if ( portUSING_GRANULAR_LOCKS == 1 )
|
||||
static void vTimerExitCritical( void )
|
||||
{
|
||||
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
|
||||
|
||||
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U );
|
||||
|
||||
/* Get the xYieldPending status inside the critical section. */
|
||||
BaseType_t xYieldCurrentTask = xTaskUnlockCanYield();
|
||||
|
||||
/* Decrement the critical nesting count */
|
||||
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID );
|
||||
|
||||
/* Release the ISR spinlock */
|
||||
portRELEASE_SPINLOCK( xCoreID, &xISRSpinlock );
|
||||
|
||||
/* Release the task spinlock */
|
||||
portRELEASE_SPINLOCK( xCoreID, &xTaskSpinlock );
|
||||
|
||||
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 )
|
||||
{
|
||||
portENABLE_INTERRUPTS();
|
||||
|
||||
if( xYieldCurrentTask != pdFALSE )
|
||||
{
|
||||
portYIELD();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
/* This entire source file will be skipped if the application is not configured
|
||||
* to include software timer functionality. If you want to include software timer
|
||||
* functionality then ensure configUSE_TIMERS is set to 1 in FreeRTOSConfig.h. */
|
||||
|
|
Loading…
Reference in a new issue