diff --git a/event_groups.c b/event_groups.c index 7c1e954f6..cfcbd6aa7 100644 --- a/event_groups.c +++ b/event_groups.c @@ -76,10 +76,10 @@ * Macros to mark the start and end of a critical code region. */ #if ( portUSING_GRANULAR_LOCKS == 1 ) - #define event_groupsENTER_CRITICAL( pxEventBits ) taskDATA_GROUP_ENTER_CRITICAL( pxEventBits ) - #define event_groupsENTER_CRITICAL_FROM_ISR( pxEventBits ) taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR( pxEventBits ) - #define event_groupsEXIT_CRITICAL( pxEventBits ) taskDATA_GROUP_EXIT_CRITICAL( pxEventBits ) - #define event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) taskDATA_GROUP_EXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) + #define event_groupsENTER_CRITICAL( pxEventBits ) vEventGroupsEnterCritical( pxEventBits ) + #define event_groupsENTER_CRITICAL_FROM_ISR( pxEventBits ) uxEventGroupsEnterCriticalFromISR( pxEventBits ) + #define event_groupsEXIT_CRITICAL( pxEventBits ) vEventGroupsExitCritical( pxEventBits ) + #define event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) vEventGroupsExitCriticalFromISR( uxSavedInterruptStatus, pxEventBits ) #else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ #define event_groupsENTER_CRITICAL( pxEventBits ) taskENTER_CRITICAL(); #define event_groupsENTER_CRITICAL_FROM_ISR( pxEventBits ) taskENTER_CRITICAL_FROM_ISR(); @@ -87,6 +87,35 @@ #define event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) + +/* + * Enters a critical section for an event group. Disables interrupts and takes + * both task and ISR spinlocks to ensure thread safety. + */ + static void vEventGroupsEnterCritical( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION; + +/* + * Enters a critical section for an event group from an ISR context. Takes the ISR + * spinlock and returns the previous interrupt state. + */ + static UBaseType_t uxEventGroupsEnterCriticalFromISR( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION; + +/* + * Exits a critical section for an event group. Releases spinlocks in reverse order + * and conditionally re-enables interrupts and yields if required. + */ + static void vEventGroupsExitCritical( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION; + +/* + * Exits a critical section for an event group from an ISR context. Releases the ISR + * spinlock and conditionally restores the previous interrupt state. + */ + static void vEventGroupsExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus, + EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION; + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */ + /* * Locks an event group for tasks. Prevents other tasks from accessing the event group but allows * ISRs to pend access to the event group. Caller cannot be preempted by other tasks @@ -866,6 +895,102 @@ traceRETURN_vEventGroupClearBitsCallback(); } +/*-----------------------------------------------------------*/ + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) + static void vEventGroupsEnterCritical( EventGroup_t * pxEventBits ) + { + portDISABLE_INTERRUPTS(); + { + const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); + + /* Task spinlock is always taken first */ + portGET_SPINLOCK( xCoreID, &( pxEventBits->xTaskSpinlock ) ); + + /* Take the ISR spinlock next */ + portGET_SPINLOCK( xCoreID, &( pxEventBits->xISRSpinlock ) ); + + /* Increment the critical nesting count */ + portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); + } + } + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */ +/*-----------------------------------------------------------*/ + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) + static UBaseType_t uxEventGroupsEnterCriticalFromISR( EventGroup_t * pxEventBits ) + { + UBaseType_t uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + + const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); + + /* Take the ISR spinlock */ + portGET_SPINLOCK( xCoreID, &( pxEventBits->xISRSpinlock ) ); + + /* Increment the critical nesting count */ + portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); + + return uxSavedInterruptStatus; + } + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */ +/*-----------------------------------------------------------*/ + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) + static void vEventGroupsExitCritical( EventGroup_t * pxEventBits ) + { + const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); + + configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ); + + /* Get the xYieldPending stats inside the critical section. */ + BaseType_t xYieldCurrentTask = xTaskUnlockCanYield(); + + /* Decrement the critical nesting count */ + portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); + + /* Release the ISR spinlock */ + portRELEASE_SPINLOCK( xCoreID, &( pxEventBits->xISRSpinlock ) ); + + /* Release the task spinlock */ + portRELEASE_SPINLOCK( xCoreID, &( pxEventBits->xTaskSpinlock ) ); + + if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) + { + portENABLE_INTERRUPTS(); + + if( xYieldCurrentTask != pdFALSE ) + { + portYIELD(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */ +/*-----------------------------------------------------------*/ + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) + static void vEventGroupsExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus, + EventGroup_t * pxEventBits ) + { + const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); + + configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ); + + /* Decrement the critical nesting count */ + portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); + + /* Release the ISR spinlock */ + portRELEASE_SPINLOCK( xCoreID, &( pxEventBits->xISRSpinlock ) ); + + if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) + { + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + } + } + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */ /*-----------------------------------------------------------*/ #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) static void prvLockEventGroupForTasks( EventGroup_t * pxEventBits ) diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index 23110dd93..9a83aec2c 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -3238,9 +3238,6 @@ typedef struct xSTATIC_TCB #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) UBaseType_t xDummy25; #endif - #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - BaseType_t xDummy26; - #endif #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) void * pxDummy8; #endif diff --git a/include/task.h b/include/task.h index 3c6f9dde5..0d843f0c2 100644 --- a/include/task.h +++ b/include/task.h @@ -283,114 +283,6 @@ typedef enum /* Checks if core ID is valid. */ #define taskVALID_CORE_ID( xCoreID ) ( ( ( ( ( BaseType_t ) 0 <= ( xCoreID ) ) && ( ( xCoreID ) < ( BaseType_t ) configNUMBER_OF_CORES ) ) ) ? ( pdTRUE ) : ( pdFALSE ) ) -/** - * task. h - * - * Macro to enter a data group critical section. - * - * \defgroup taskDATA_GROUP_ENTER_CRITICAL taskDATA_GROUP_ENTER_CRITICAL - * \ingroup GranularLocks - */ -#if ( portUSING_GRANULAR_LOCKS == 1 ) - #define taskDATA_GROUP_ENTER_CRITICAL( pxDataGroup ) \ - do { \ - /* Disable preemption to avoid task state changes during the critical section. */ \ - vTaskPreemptionDisable( NULL ); \ - { \ - const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \ - if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U ) { \ - /* Task spinlock is always taken first */ \ - portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( ( pxDataGroup )->xTaskSpinlock ) ); \ - /* Disable interrupts */ \ - portDISABLE_INTERRUPTS(); \ - /* Take the ISR spinlock next */ \ - portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( ( pxDataGroup )->xISRSpinlock ) ); \ - } \ - else \ - { \ - mtCOVERAGE_TEST_MARKER(); \ - } \ - /* Increment the critical nesting count */ \ - portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \ - } \ - } while( 0 ) -#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ - -/** - * task. h - * - * Macro to enter a data group critical section from an interrupt. - * - * \defgroup taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR - * \ingroup GranularLocks - */ -#if ( portUSING_GRANULAR_LOCKS == 1 ) - #define taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR( pxDataGroup ) \ - ( { \ - UBaseType_t uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); \ - const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \ - /* Take the ISR spinlock */ \ - portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( ( pxDataGroup )->xISRSpinlock ) ); \ - /* Increment the critical nesting count */ \ - portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \ - uxSavedInterruptStatus; \ - } ) -#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ - -/** - * task. h - * - * Macro to exit a data group critical section. - * - * \defgroup taskDATA_GROUP_EXIT_CRITICAL taskDATA_GROUP_EXIT_CRITICAL - * \ingroup GranularLocks - */ -#if ( portUSING_GRANULAR_LOCKS == 1 ) - #define taskDATA_GROUP_EXIT_CRITICAL( pxDataGroup ) \ - do { \ - const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \ - configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ); \ - portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \ - if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) \ - { \ - /* Release the ISR spinlock */ \ - portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( ( pxDataGroup )->xISRSpinlock ) ); \ - /* Enable interrupts */ \ - portENABLE_INTERRUPTS(); \ - /* Release the task spinlock */ \ - portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( ( pxDataGroup )->xTaskSpinlock ) ); \ - } \ - else \ - { \ - mtCOVERAGE_TEST_MARKER(); \ - } \ - /* Re-enable preemption */ \ - vTaskPreemptionEnable( NULL ); \ - } while( 0 ) -#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ - -/** - * task. h - * - * Macro to exit a data group critical section from an interrupt. - * - * \defgroup taskDATA_GROUP_EXIT_CRITICAL_FROM_ISR taskDATA_GROUP_EXIT_CRITICAL_FROM_ISR - * \ingroup GranularLocks - */ -#if ( portUSING_GRANULAR_LOCKS == 1 ) - #define taskDATA_GROUP_EXIT_CRITICAL_FROM_ISR( xSavedInterruptStatus, pxDataGroup ) \ - do { \ - const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \ - configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ); \ - portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \ - portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxDataGroup->xISRSpinlock ) ); \ - if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) \ - { \ - portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); \ - } \ - } while( 0 ) -#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ - /*----------------------------------------------------------- * TASK CREATION API *----------------------------------------------------------*/ diff --git a/queue.c b/queue.c index 1e5e3e591..0f5990cea 100644 --- a/queue.c +++ b/queue.c @@ -260,10 +260,10 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, * Macros to mark the start and end of a critical code region. */ #if ( portUSING_GRANULAR_LOCKS == 1 ) - #define queueENTER_CRITICAL( pxQueue ) taskDATA_GROUP_ENTER_CRITICAL( pxQueue ) - #define queueENTER_CRITICAL_FROM_ISR( pxQueue ) taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR( pxQueue ) - #define queueEXIT_CRITICAL( pxQueue ) taskDATA_GROUP_EXIT_CRITICAL( pxQueue ) - #define queueEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue ) taskDATA_GROUP_EXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue ) + #define queueENTER_CRITICAL( pxQueue ) vQueueEnterCritical( pxQueue ) + #define queueENTER_CRITICAL_FROM_ISR( pxQueue ) uxQueueEnterCriticalFromISR( pxQueue ) + #define queueEXIT_CRITICAL( pxQueue ) vQueueExitCritical( pxQueue ) + #define queueEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue ) vQueueExitCriticalFromISR( uxSavedInterruptStatus, pxQueue ) #else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ #define queueENTER_CRITICAL( pxQueue ) taskENTER_CRITICAL(); #define queueENTER_CRITICAL_FROM_ISR( pxQueue ) taskENTER_CRITICAL_FROM_ISR(); @@ -271,6 +271,34 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, #define queueEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue ) taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ +#if ( portUSING_GRANULAR_LOCKS == 1 ) + +/* + * Enters a critical section for a queue. Disables interrupts and takes + * both task and ISR spinlocks to ensure thread safety. + */ + static void vQueueEnterCritical( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION; + +/* + * Enters a critical section for a queue from an ISR context. Takes the ISR + * spinlock and returns the previous interrupt state. + */ + static UBaseType_t uxQueueEnterCriticalFromISR( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION; + +/* + * Exits a critical section for a queue. Releases spinlocks in reverse order + * and conditionally re-enables interrupts and yields if required. + */ + static void vQueueExitCritical( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION; + +/* + * Exits a critical section for a queue from an ISR context. Releases the ISR + * spinlock and conditionally restores the previous interrupt state. + */ + static void vQueueExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus, + const Queue_t * pxQueue ) PRIVILEGED_FUNCTION; +#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + /* * Macro to mark a queue as locked. Locking a queue prevents an ISR from * accessing the queue event lists. @@ -2637,6 +2665,106 @@ static void prvUnlockQueue( Queue_t * const pxQueue ) } /*-----------------------------------------------------------*/ +#if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) + static void vQueueEnterCritical( const Queue_t * pxQueue ) + { + portDISABLE_INTERRUPTS(); + { + const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); + + /* Task spinlock is always taken first */ + portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xTaskSpinlock ) ); + + /* Take the ISR spinlock next */ + portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xISRSpinlock ) ); + + /* Increment the critical nesting count */ + portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); + } + } +#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) + static UBaseType_t uxQueueEnterCriticalFromISR( const Queue_t * pxQueue ) + { + UBaseType_t uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + + const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); + + /* Take the ISR spinlock */ + portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xISRSpinlock ) ); + + /* Increment the critical nesting count */ + portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); + + return uxSavedInterruptStatus; + } +#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) + static void vQueueExitCritical( const Queue_t * pxQueue ) + { + const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); + + configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ); + + /* Get the xYieldPending status inside the critical section. */ + BaseType_t xYieldCurrentTask = xTaskUnlockCanYield(); + + /* Decrement the critical nesting count */ + portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); + + /* Release the ISR spinlock */ + portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xISRSpinlock ) ); + + /* Release the task spinlock */ + portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xTaskSpinlock ) ); + + if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) + { + portENABLE_INTERRUPTS(); + + if( xYieldCurrentTask != pdFALSE ) + { + portYIELD(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } +#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) + static void vQueueExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus, + const Queue_t * pxQueue ) + { + const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); + + configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ); + + /* Decrement the critical nesting count */ + portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); + + /* Release the ISR spinlock */ + portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xISRSpinlock ) ); + + if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) + { + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + } + } +#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */ +/*-----------------------------------------------------------*/ + static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue ) { BaseType_t xReturn; diff --git a/stream_buffer.c b/stream_buffer.c index cc3b7c35f..bcff1eea7 100644 --- a/stream_buffer.c +++ b/stream_buffer.c @@ -63,10 +63,10 @@ * Macros to mark the start and end of a critical code region. */ #if ( portUSING_GRANULAR_LOCKS == 1 ) - #define sbENTER_CRITICAL( pxStreamBuffer ) taskDATA_GROUP_ENTER_CRITICAL( pxStreamBuffer ) - #define sbENTER_CRITICAL_FROM_ISR( pxStreamBuffer ) taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR( pxStreamBuffer ) - #define sbEXIT_CRITICAL( pxStreamBuffer ) taskDATA_GROUP_EXIT_CRITICAL( pxStreamBuffer ) - #define sbEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxStreamBuffer ) taskDATA_GROUP_EXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxStreamBuffer ) + #define sbENTER_CRITICAL( pxStreamBuffer ) vStreamBufferEnterCritical( pxStreamBuffer ) + #define sbENTER_CRITICAL_FROM_ISR( pxStreamBuffer ) uxStreamBufferEnterCriticalFromISR( pxStreamBuffer ) + #define sbEXIT_CRITICAL( pxStreamBuffer ) vStreamBufferExitCritical( pxStreamBuffer ) + #define sbEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxStreamBuffer ) vStreamBufferExitCriticalFromISR( uxSavedInterruptStatus, pxStreamBuffer ) #else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ #define sbENTER_CRITICAL( pxEventBits ) taskENTER_CRITICAL(); #define sbENTER_CRITICAL_FROM_ISR( pxEventBits ) taskENTER_CRITICAL_FROM_ISR(); @@ -288,6 +288,35 @@ typedef struct StreamBufferDef_t #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ } StreamBuffer_t; + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) + +/* + * Enters a critical section for a stream buffer. Disables interrupts and takes + * both task and ISR spinlocks to ensure thread safety. + */ + static void vStreamBufferEnterCritical( StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION; + +/* + * Enters a critical section for a stream buffer from an ISR context. Takes the ISR + * spinlock and returns the previous interrupt state. + */ + static UBaseType_t uxStreamBufferEnterCriticalFromISR( StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION; + +/* + * Exits a critical section for a stream buffer. Releases spinlocks in reverse order + * and conditionally re-enables interrupts and yields if required. + */ + static void vStreamBufferExitCritical( StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION; + +/* + * Exits a critical section for a stream buffer from an ISR context. Releases the ISR + * spinlock and conditionally restores the previous interrupt state. + */ + static void vStreamBufferExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus, + StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION; + #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) ) */ + + /* * Locks a stream buffer for tasks. Prevents other tasks from accessing the stream buffer * but allows ISRs to pend access to the stream buffer. Caller cannot be preempted @@ -381,6 +410,105 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, StreamBufferCallbackFunction_t pxSendCompletedCallback, StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) PRIVILEGED_FUNCTION; +/*-----------------------------------------------------------*/ + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) + static void vStreamBufferEnterCritical( StreamBuffer_t * const pxStreamBuffer ) + { + portDISABLE_INTERRUPTS(); + { + const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); + + /* Task spinlock is always taken first */ + portGET_SPINLOCK( xCoreID, &( pxStreamBuffer->xTaskSpinlock ) ); + + /* Take the ISR spinlock next */ + portGET_SPINLOCK( xCoreID, &( pxStreamBuffer->xISRSpinlock ) ); + + /* Increment the critical nesting count */ + portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); + } + } + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */ +/*-----------------------------------------------------------*/ + + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) + static UBaseType_t uxStreamBufferEnterCriticalFromISR( StreamBuffer_t * const pxStreamBuffer ) + { + UBaseType_t uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + + const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); + + /* Take the ISR spinlock */ + portGET_SPINLOCK( xCoreID, &( pxStreamBuffer->xISRSpinlock ) ); + + /* Increment the critical nesting count */ + portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); + + return uxSavedInterruptStatus; + } + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */ +/*-----------------------------------------------------------*/ + + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + static void vStreamBufferExitCritical( StreamBuffer_t * const pxStreamBuffer ) + { + const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); + + configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ); + + /* Get the xYieldPending status inside the critical section. */ + BaseType_t xYieldCurrentTask = xTaskUnlockCanYield(); + + /* Decrement the critical nesting count */ + portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); + + /* Release the ISR spinlock */ + portRELEASE_SPINLOCK( xCoreID, &( pxStreamBuffer->xISRSpinlock ) ); + + /* Release the task spinlock */ + portRELEASE_SPINLOCK( xCoreID, &( pxStreamBuffer->xTaskSpinlock ) ); + + if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) + { + portENABLE_INTERRUPTS(); + + if( xYieldCurrentTask != pdFALSE ) + { + portYIELD(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */ +/*-----------------------------------------------------------*/ + + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) + static void vStreamBufferExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus, + StreamBuffer_t * const pxStreamBuffer ) + { + const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); + + configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ); + + /* Decrement the critical nesting count */ + portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); + + /* Release the ISR spinlock */ + portRELEASE_SPINLOCK( xCoreID, &( pxStreamBuffer->xISRSpinlock ) ); + + if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) + { + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + } + } + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */ /*-----------------------------------------------------------*/ #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) diff --git a/tasks.c b/tasks.c index e1299c945..99c0b9079 100644 --- a/tasks.c +++ b/tasks.c @@ -136,15 +136,11 @@ /* * Macros used by vListTask to indicate which state a task is in. */ -#define tskRUNNING_CHAR ( 'X' ) -#define tskBLOCKED_CHAR ( 'B' ) -#define tskREADY_CHAR ( 'R' ) -#define tskDELETED_CHAR ( 'D' ) -#define tskSUSPENDED_CHAR ( 'S' ) - -/* Bits used to record a deferred state change of a task. */ -#define tskDEFERRED_DELETION ( UBaseType_t ) ( 1U << 0U ) -#define tskDEFERRED_SUSPENSION ( UBaseType_t ) ( 1U << 1U ) +#define tskRUNNING_CHAR ( 'X' ) +#define tskBLOCKED_CHAR ( 'B' ) +#define tskREADY_CHAR ( 'R' ) +#define tskDELETED_CHAR ( 'D' ) +#define tskSUSPENDED_CHAR ( 'S' ) /* * Some kernel aware debuggers require the data the debugger needs access to be @@ -350,33 +346,7 @@ /* Yields the given core. This must be called from a critical section and xCoreID * must be valid. This macro is not required in single core since there is only * one core to yield. */ - #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - #define prvYieldCore( xCoreID ) \ - do { \ - if( ( xCoreID ) == ( BaseType_t ) portGET_CORE_ID() ) \ - { \ - /* Pending a yield for this core since it is in the critical section. */ \ - xYieldPendings[ ( xCoreID ) ] = pdTRUE; \ - } \ - else \ - { \ - if( pxCurrentTCBs[ ( xCoreID ) ]->xPreemptionDisable == 0U ) \ - { \ - /* Request other core to yield if it is not requested before. */ \ - if( pxCurrentTCBs[ ( xCoreID ) ]->xTaskRunState != taskTASK_SCHEDULED_TO_YIELD ) \ - { \ - portYIELD_CORE( xCoreID ); \ - pxCurrentTCBs[ ( xCoreID ) ]->xTaskRunState = taskTASK_SCHEDULED_TO_YIELD; \ - } \ - } \ - else \ - { \ - xYieldPendings[ ( xCoreID ) ] = pdTRUE; \ - } \ - } \ - } while( 0 ) - #else /* if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ - #define prvYieldCore( xCoreID ) \ + #define prvYieldCore( xCoreID ) \ do { \ if( ( xCoreID ) == ( BaseType_t ) portGET_CORE_ID() ) \ { \ @@ -393,7 +363,6 @@ } \ } \ } while( 0 ) - #endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ #endif /* #if ( configNUMBER_OF_CORES > 1 ) */ /*-----------------------------------------------------------*/ @@ -460,10 +429,6 @@ typedef struct tskTaskControlBlock /* The old naming convention is used to UBaseType_t xPreemptionDisable; /**< Used to prevent the task from being preempted. */ #endif - #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - BaseType_t xDeferredStateChange; /**< Used to indicate if the task's state change is deferred. */ - #endif - #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) StackType_t * pxEndOfStack; /**< Points to the highest valid address for the stack. */ #endif @@ -2299,23 +2264,6 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, pxTCB = prvGetTCBFromHandle( xTaskToDelete ); configASSERT( pxTCB != NULL ); - #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - - /* If the task has disabled preemption, we need to defer the deletion until the - * task enables preemption. The deletion will be performed in vTaskPreemptionEnable(). */ - if( pxTCB->xPreemptionDisable > 0U ) - { - pxTCB->xDeferredStateChange |= tskDEFERRED_DELETION; - kernelEXIT_CRITICAL(); - traceRETURN_vTaskDelete(); - return; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - #endif /* configUSE_TASK_PREEMPTION_DISABLE */ - /* Remove task from the ready/delayed list. */ if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) { @@ -3189,17 +3137,10 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, kernelENTER_CRITICAL(); { - if( xSchedulerRunning != pdFALSE ) - { - pxTCB = prvGetTCBFromHandle( xTask ); - configASSERT( pxTCB != NULL ); + pxTCB = prvGetTCBFromHandle( xTask ); + configASSERT( pxTCB != NULL ); - pxTCB->xPreemptionDisable++; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + pxTCB->xPreemptionDisable++; } kernelEXIT_CRITICAL(); @@ -3214,63 +3155,25 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, void vTaskPreemptionEnable( const TaskHandle_t xTask ) { TCB_t * pxTCB; + BaseType_t xCoreID; traceENTER_vTaskPreemptionEnable( xTask ); kernelENTER_CRITICAL(); { + pxTCB = prvGetTCBFromHandle( xTask ); + configASSERT( pxTCB != NULL ); + configASSERT( pxTCB->xPreemptionDisable > 0U ); + + pxTCB->xPreemptionDisable--; + if( xSchedulerRunning != pdFALSE ) { - pxTCB = prvGetTCBFromHandle( xTask ); - configASSERT( pxTCB != NULL ); - configASSERT( pxTCB->xPreemptionDisable > 0U ); - - pxTCB->xPreemptionDisable--; - - if( pxTCB->xPreemptionDisable == 0U ) + if( ( pxTCB->xPreemptionDisable == 0U ) && ( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) ) { - /* Process deferred state changes which were inflicted while - * preemption was disabled. */ - if( pxTCB->xDeferredStateChange != 0U ) - { - if( pxTCB->xDeferredStateChange & tskDEFERRED_DELETION ) - { - vTaskDelete( xTask ); - } - else if( pxTCB->xDeferredStateChange & tskDEFERRED_SUSPENSION ) - { - vTaskSuspend( xTask ); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - pxTCB->xDeferredStateChange = 0U; - kernelEXIT_CRITICAL(); - traceRETURN_vTaskPreemptionEnable(); - return; - } - else - { - if( ( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) ) - { - prvYieldCore( pxTCB->xTaskRunState ); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } + xCoreID = ( BaseType_t ) pxTCB->xTaskRunState; + prvYieldCore( xCoreID ); } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - mtCOVERAGE_TEST_MARKER(); } } kernelEXIT_CRITICAL(); @@ -3296,23 +3199,6 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, pxTCB = prvGetTCBFromHandle( xTaskToSuspend ); configASSERT( pxTCB != NULL ); - #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - - /* If the task has disabled preemption, we need to defer the suspension until the - * task enables preemption. The suspension will be performed in vTaskPreemptionEnable(). */ - if( pxTCB->xPreemptionDisable > 0U ) - { - pxTCB->xDeferredStateChange |= tskDEFERRED_SUSPENSION; - kernelEXIT_CRITICAL(); - traceRETURN_vTaskSuspend(); - return; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - #endif /* configUSE_TASK_PREEMPTION_DISABLE */ - traceTASK_SUSPEND( pxTCB ); /* Remove task from the ready/delayed list and place in the @@ -7498,11 +7384,7 @@ static void prvResetNextTaskUnblockTime( void ) BaseType_t xYieldCurrentTask; /* Get the xYieldPending stats inside the critical section. */ - #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - xYieldCurrentTask = xTaskUnlockCanYield(); - #else - xYieldCurrentTask = xYieldPendings[ xCoreID ]; - #endif /* configUSE_TASK_PREEMPTION_DISABLE */ + xYieldCurrentTask = xYieldPendings[ xCoreID ]; kernelRELEASE_ISR_LOCK( xCoreID ); kernelRELEASE_TASK_LOCK( xCoreID ); @@ -7591,11 +7473,7 @@ static void prvResetNextTaskUnblockTime( void ) BaseType_t xReturn; BaseType_t xCoreID = portGET_CORE_ID(); - if( ( xYieldPendings[ xCoreID ] == pdTRUE ) && ( uxSchedulerSuspended == pdFALSE ) - #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - && ( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == 0U ) - #endif /* ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ - ) + if( ( xYieldPendings[ xCoreID ] == pdTRUE ) && ( uxSchedulerSuspended == pdFALSE ) && ( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == 0U ) ) { xReturn = pdTRUE; } diff --git a/timers.c b/timers.c index d63667e8d..7be48935d 100644 --- a/timers.c +++ b/timers.c @@ -83,8 +83,8 @@ * Macros to mark the start and end of a critical code region. */ #if ( portUSING_GRANULAR_LOCKS == 1 ) - #define tmrENTER_CRITICAL() taskDATA_GROUP_ENTER_CRITICAL( &xTimerDataGroupLocks ) - #define tmrEXIT_CRITICAL() taskDATA_GROUP_EXIT_CRITICAL( &xTimerDataGroupLocks ) + #define tmrENTER_CRITICAL() vTimerEnterCritical() + #define tmrEXIT_CRITICAL() vTimerExitCritical() #else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ #define tmrENTER_CRITICAL() taskENTER_CRITICAL() #define tmrEXIT_CRITICAL() taskEXIT_CRITICAL() @@ -161,18 +161,24 @@ PRIVILEGED_DATA static TaskHandle_t xTimerTaskHandle = NULL; #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - PRIVILEGED_DATA static struct - { - portSPINLOCK_TYPE xTaskSpinlock; - portSPINLOCK_TYPE xISRSpinlock; - } - xTimerDataGroupLocks = - { - .xTaskSpinlock = portINIT_SPINLOCK_STATIC, - .xISRSpinlock = portINIT_SPINLOCK_STATIC - }; + PRIVILEGED_DATA static portSPINLOCK_TYPE xTaskSpinlock = portINIT_SPINLOCK_STATIC; + PRIVILEGED_DATA static portSPINLOCK_TYPE xISRSpinlock = portINIT_SPINLOCK_STATIC; #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + #if ( portUSING_GRANULAR_LOCKS == 1 ) + +/* + * Enters a critical section for timers. Disables interrupts and takes + * both task and ISR spinlocks to ensure thread safety. + */ + static void vTimerEnterCritical( void ) PRIVILEGED_FUNCTION; + +/* + * Exits a critical section for timers. Releases spinlocks in reverse order + * and conditionally re-enables interrupts and yields if required. + */ + static void vTimerExitCritical( void ) PRIVILEGED_FUNCTION; + #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ /*-----------------------------------------------------------*/ /* @@ -1361,6 +1367,67 @@ } /*-----------------------------------------------------------*/ + #if ( portUSING_GRANULAR_LOCKS == 1 ) + static void vTimerEnterCritical( void ) + { + portDISABLE_INTERRUPTS(); + { + const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); + + /* Task spinlock is always taken first */ + portGET_SPINLOCK( xCoreID, &xTaskSpinlock ); + + /* Take the ISR spinlock next */ + portGET_SPINLOCK( xCoreID, &xISRSpinlock ); + + /* Increment the critical nesting count */ + portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); + } + } + #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + +/*-----------------------------------------------------------*/ + + #if ( portUSING_GRANULAR_LOCKS == 1 ) + static void vTimerExitCritical( void ) + { + const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); + + configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ); + + /* Get the xYieldPending status inside the critical section. */ + BaseType_t xYieldCurrentTask = xTaskUnlockCanYield(); + + /* Decrement the critical nesting count */ + portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); + + /* Release the ISR spinlock */ + portRELEASE_SPINLOCK( xCoreID, &xISRSpinlock ); + + /* Release the task spinlock */ + portRELEASE_SPINLOCK( xCoreID, &xTaskSpinlock ); + + if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) + { + portENABLE_INTERRUPTS(); + + if( xYieldCurrentTask != pdFALSE ) + { + portYIELD(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ +/*-----------------------------------------------------------*/ + /* This entire source file will be skipped if the application is not configured * to include software timer functionality. If you want to include software timer * functionality then ensure configUSE_TIMERS is set to 1 in FreeRTOSConfig.h. */