diff --git a/event_groups.c b/event_groups.c index cfcbd6aa7..7c1e954f6 100644 --- a/event_groups.c +++ b/event_groups.c @@ -76,10 +76,10 @@ * Macros to mark the start and end of a critical code region. */ #if ( portUSING_GRANULAR_LOCKS == 1 ) - #define event_groupsENTER_CRITICAL( pxEventBits ) vEventGroupsEnterCritical( pxEventBits ) - #define event_groupsENTER_CRITICAL_FROM_ISR( pxEventBits ) uxEventGroupsEnterCriticalFromISR( pxEventBits ) - #define event_groupsEXIT_CRITICAL( pxEventBits ) vEventGroupsExitCritical( pxEventBits ) - #define event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) vEventGroupsExitCriticalFromISR( uxSavedInterruptStatus, pxEventBits ) + #define event_groupsENTER_CRITICAL( pxEventBits ) taskDATA_GROUP_ENTER_CRITICAL( pxEventBits ) + #define event_groupsENTER_CRITICAL_FROM_ISR( pxEventBits ) taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR( pxEventBits ) + #define event_groupsEXIT_CRITICAL( pxEventBits ) taskDATA_GROUP_EXIT_CRITICAL( pxEventBits ) + #define event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) taskDATA_GROUP_EXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) #else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ #define event_groupsENTER_CRITICAL( pxEventBits ) taskENTER_CRITICAL(); #define event_groupsENTER_CRITICAL_FROM_ISR( pxEventBits ) taskENTER_CRITICAL_FROM_ISR(); @@ -87,35 +87,6 @@ #define event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ - - #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) - -/* - * Enters a critical section for an event group. Disables interrupts and takes - * both task and ISR spinlocks to ensure thread safety. - */ - static void vEventGroupsEnterCritical( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION; - -/* - * Enters a critical section for an event group from an ISR context. Takes the ISR - * spinlock and returns the previous interrupt state. - */ - static UBaseType_t uxEventGroupsEnterCriticalFromISR( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION; - -/* - * Exits a critical section for an event group. Releases spinlocks in reverse order - * and conditionally re-enables interrupts and yields if required. - */ - static void vEventGroupsExitCritical( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION; - -/* - * Exits a critical section for an event group from an ISR context. Releases the ISR - * spinlock and conditionally restores the previous interrupt state. - */ - static void vEventGroupsExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus, - EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION; - #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */ - /* * Locks an event group for tasks. Prevents other tasks from accessing the event group but allows * ISRs to pend access to the event group. Caller cannot be preempted by other tasks @@ -895,102 +866,6 @@ traceRETURN_vEventGroupClearBitsCallback(); } -/*-----------------------------------------------------------*/ - #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) - static void vEventGroupsEnterCritical( EventGroup_t * pxEventBits ) - { - portDISABLE_INTERRUPTS(); - { - const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); - - /* Task spinlock is always taken first */ - portGET_SPINLOCK( xCoreID, &( pxEventBits->xTaskSpinlock ) ); - - /* Take the ISR spinlock next */ - portGET_SPINLOCK( xCoreID, &( pxEventBits->xISRSpinlock ) ); - - /* Increment the critical nesting count */ - portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); - } - } - #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */ -/*-----------------------------------------------------------*/ - #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) - static UBaseType_t uxEventGroupsEnterCriticalFromISR( EventGroup_t * pxEventBits ) - { - UBaseType_t uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); - - const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); - - /* Take the ISR spinlock */ - portGET_SPINLOCK( xCoreID, &( pxEventBits->xISRSpinlock ) ); - - /* Increment the critical nesting count */ - portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); - - return uxSavedInterruptStatus; - } - #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */ -/*-----------------------------------------------------------*/ - #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) - static void vEventGroupsExitCritical( EventGroup_t * pxEventBits ) - { - const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); - - configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ); - - /* Get the xYieldPending stats inside the critical section. */ - BaseType_t xYieldCurrentTask = xTaskUnlockCanYield(); - - /* Decrement the critical nesting count */ - portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); - - /* Release the ISR spinlock */ - portRELEASE_SPINLOCK( xCoreID, &( pxEventBits->xISRSpinlock ) ); - - /* Release the task spinlock */ - portRELEASE_SPINLOCK( xCoreID, &( pxEventBits->xTaskSpinlock ) ); - - if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) - { - portENABLE_INTERRUPTS(); - - if( xYieldCurrentTask != pdFALSE ) - { - portYIELD(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */ -/*-----------------------------------------------------------*/ - #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) - static void vEventGroupsExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus, - EventGroup_t * pxEventBits ) - { - const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); - - configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ); - - /* Decrement the critical nesting count */ - portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); - - /* Release the ISR spinlock */ - portRELEASE_SPINLOCK( xCoreID, &( pxEventBits->xISRSpinlock ) ); - - if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) - { - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); - } - } - #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */ /*-----------------------------------------------------------*/ #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) static void prvLockEventGroupForTasks( EventGroup_t * pxEventBits ) diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index 9a83aec2c..23110dd93 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -3238,6 +3238,9 @@ typedef struct xSTATIC_TCB #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) UBaseType_t xDummy25; #endif + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + BaseType_t xDummy26; + #endif #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) void * pxDummy8; #endif diff --git a/include/task.h b/include/task.h index 0d843f0c2..3c6f9dde5 100644 --- a/include/task.h +++ b/include/task.h @@ -283,6 +283,114 @@ typedef enum /* Checks if core ID is valid. */ #define taskVALID_CORE_ID( xCoreID ) ( ( ( ( ( BaseType_t ) 0 <= ( xCoreID ) ) && ( ( xCoreID ) < ( BaseType_t ) configNUMBER_OF_CORES ) ) ) ? ( pdTRUE ) : ( pdFALSE ) ) +/** + * task. h + * + * Macro to enter a data group critical section. + * + * \defgroup taskDATA_GROUP_ENTER_CRITICAL taskDATA_GROUP_ENTER_CRITICAL + * \ingroup GranularLocks + */ +#if ( portUSING_GRANULAR_LOCKS == 1 ) + #define taskDATA_GROUP_ENTER_CRITICAL( pxDataGroup ) \ + do { \ + /* Disable preemption to avoid task state changes during the critical section. */ \ + vTaskPreemptionDisable( NULL ); \ + { \ + const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \ + if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U ) { \ + /* Task spinlock is always taken first */ \ + portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( ( pxDataGroup )->xTaskSpinlock ) ); \ + /* Disable interrupts */ \ + portDISABLE_INTERRUPTS(); \ + /* Take the ISR spinlock next */ \ + portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( ( pxDataGroup )->xISRSpinlock ) ); \ + } \ + else \ + { \ + mtCOVERAGE_TEST_MARKER(); \ + } \ + /* Increment the critical nesting count */ \ + portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \ + } \ + } while( 0 ) +#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + +/** + * task. h + * + * Macro to enter a data group critical section from an interrupt. + * + * \defgroup taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR + * \ingroup GranularLocks + */ +#if ( portUSING_GRANULAR_LOCKS == 1 ) + #define taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR( pxDataGroup ) \ + ( { \ + UBaseType_t uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); \ + const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \ + /* Take the ISR spinlock */ \ + portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( ( pxDataGroup )->xISRSpinlock ) ); \ + /* Increment the critical nesting count */ \ + portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \ + uxSavedInterruptStatus; \ + } ) +#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + +/** + * task. h + * + * Macro to exit a data group critical section. + * + * \defgroup taskDATA_GROUP_EXIT_CRITICAL taskDATA_GROUP_EXIT_CRITICAL + * \ingroup GranularLocks + */ +#if ( portUSING_GRANULAR_LOCKS == 1 ) + #define taskDATA_GROUP_EXIT_CRITICAL( pxDataGroup ) \ + do { \ + const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \ + configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ); \ + portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \ + if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) \ + { \ + /* Release the ISR spinlock */ \ + portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( ( pxDataGroup )->xISRSpinlock ) ); \ + /* Enable interrupts */ \ + portENABLE_INTERRUPTS(); \ + /* Release the task spinlock */ \ + portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( ( pxDataGroup )->xTaskSpinlock ) ); \ + } \ + else \ + { \ + mtCOVERAGE_TEST_MARKER(); \ + } \ + /* Re-enable preemption */ \ + vTaskPreemptionEnable( NULL ); \ + } while( 0 ) +#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + +/** + * task. h + * + * Macro to exit a data group critical section from an interrupt. + * + * \defgroup taskDATA_GROUP_EXIT_CRITICAL_FROM_ISR taskDATA_GROUP_EXIT_CRITICAL_FROM_ISR + * \ingroup GranularLocks + */ +#if ( portUSING_GRANULAR_LOCKS == 1 ) + #define taskDATA_GROUP_EXIT_CRITICAL_FROM_ISR( xSavedInterruptStatus, pxDataGroup ) \ + do { \ + const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \ + configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ); \ + portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \ + portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxDataGroup->xISRSpinlock ) ); \ + if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) \ + { \ + portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); \ + } \ + } while( 0 ) +#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + /*----------------------------------------------------------- * TASK CREATION API *----------------------------------------------------------*/ diff --git a/queue.c b/queue.c index 0f5990cea..1e5e3e591 100644 --- a/queue.c +++ b/queue.c @@ -260,10 +260,10 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, * Macros to mark the start and end of a critical code region. */ #if ( portUSING_GRANULAR_LOCKS == 1 ) - #define queueENTER_CRITICAL( pxQueue ) vQueueEnterCritical( pxQueue ) - #define queueENTER_CRITICAL_FROM_ISR( pxQueue ) uxQueueEnterCriticalFromISR( pxQueue ) - #define queueEXIT_CRITICAL( pxQueue ) vQueueExitCritical( pxQueue ) - #define queueEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue ) vQueueExitCriticalFromISR( uxSavedInterruptStatus, pxQueue ) + #define queueENTER_CRITICAL( pxQueue ) taskDATA_GROUP_ENTER_CRITICAL( pxQueue ) + #define queueENTER_CRITICAL_FROM_ISR( pxQueue ) taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR( pxQueue ) + #define queueEXIT_CRITICAL( pxQueue ) taskDATA_GROUP_EXIT_CRITICAL( pxQueue ) + #define queueEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue ) taskDATA_GROUP_EXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue ) #else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ #define queueENTER_CRITICAL( pxQueue ) taskENTER_CRITICAL(); #define queueENTER_CRITICAL_FROM_ISR( pxQueue ) taskENTER_CRITICAL_FROM_ISR(); @@ -271,34 +271,6 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, #define queueEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxQueue ) taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ -#if ( portUSING_GRANULAR_LOCKS == 1 ) - -/* - * Enters a critical section for a queue. Disables interrupts and takes - * both task and ISR spinlocks to ensure thread safety. - */ - static void vQueueEnterCritical( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION; - -/* - * Enters a critical section for a queue from an ISR context. Takes the ISR - * spinlock and returns the previous interrupt state. - */ - static UBaseType_t uxQueueEnterCriticalFromISR( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION; - -/* - * Exits a critical section for a queue. Releases spinlocks in reverse order - * and conditionally re-enables interrupts and yields if required. - */ - static void vQueueExitCritical( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION; - -/* - * Exits a critical section for a queue from an ISR context. Releases the ISR - * spinlock and conditionally restores the previous interrupt state. - */ - static void vQueueExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus, - const Queue_t * pxQueue ) PRIVILEGED_FUNCTION; -#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ - /* * Macro to mark a queue as locked. Locking a queue prevents an ISR from * accessing the queue event lists. @@ -2665,106 +2637,6 @@ static void prvUnlockQueue( Queue_t * const pxQueue ) } /*-----------------------------------------------------------*/ -#if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) - static void vQueueEnterCritical( const Queue_t * pxQueue ) - { - portDISABLE_INTERRUPTS(); - { - const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); - - /* Task spinlock is always taken first */ - portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xTaskSpinlock ) ); - - /* Take the ISR spinlock next */ - portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xISRSpinlock ) ); - - /* Increment the critical nesting count */ - portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); - } - } -#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */ -/*-----------------------------------------------------------*/ - -#if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) - static UBaseType_t uxQueueEnterCriticalFromISR( const Queue_t * pxQueue ) - { - UBaseType_t uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); - - const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); - - /* Take the ISR spinlock */ - portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xISRSpinlock ) ); - - /* Increment the critical nesting count */ - portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); - - return uxSavedInterruptStatus; - } -#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */ -/*-----------------------------------------------------------*/ - -#if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) - static void vQueueExitCritical( const Queue_t * pxQueue ) - { - const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); - - configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ); - - /* Get the xYieldPending status inside the critical section. */ - BaseType_t xYieldCurrentTask = xTaskUnlockCanYield(); - - /* Decrement the critical nesting count */ - portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); - - /* Release the ISR spinlock */ - portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xISRSpinlock ) ); - - /* Release the task spinlock */ - portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xTaskSpinlock ) ); - - if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) - { - portENABLE_INTERRUPTS(); - - if( xYieldCurrentTask != pdFALSE ) - { - portYIELD(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } -#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */ -/*-----------------------------------------------------------*/ - -#if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) - static void vQueueExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus, - const Queue_t * pxQueue ) - { - const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); - - configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ); - - /* Decrement the critical nesting count */ - portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); - - /* Release the ISR spinlock */ - portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xISRSpinlock ) ); - - if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) - { - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); - } - } -#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */ -/*-----------------------------------------------------------*/ - static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue ) { BaseType_t xReturn; diff --git a/stream_buffer.c b/stream_buffer.c index bcff1eea7..cc3b7c35f 100644 --- a/stream_buffer.c +++ b/stream_buffer.c @@ -63,10 +63,10 @@ * Macros to mark the start and end of a critical code region. */ #if ( portUSING_GRANULAR_LOCKS == 1 ) - #define sbENTER_CRITICAL( pxStreamBuffer ) vStreamBufferEnterCritical( pxStreamBuffer ) - #define sbENTER_CRITICAL_FROM_ISR( pxStreamBuffer ) uxStreamBufferEnterCriticalFromISR( pxStreamBuffer ) - #define sbEXIT_CRITICAL( pxStreamBuffer ) vStreamBufferExitCritical( pxStreamBuffer ) - #define sbEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxStreamBuffer ) vStreamBufferExitCriticalFromISR( uxSavedInterruptStatus, pxStreamBuffer ) + #define sbENTER_CRITICAL( pxStreamBuffer ) taskDATA_GROUP_ENTER_CRITICAL( pxStreamBuffer ) + #define sbENTER_CRITICAL_FROM_ISR( pxStreamBuffer ) taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR( pxStreamBuffer ) + #define sbEXIT_CRITICAL( pxStreamBuffer ) taskDATA_GROUP_EXIT_CRITICAL( pxStreamBuffer ) + #define sbEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxStreamBuffer ) taskDATA_GROUP_EXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxStreamBuffer ) #else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ #define sbENTER_CRITICAL( pxEventBits ) taskENTER_CRITICAL(); #define sbENTER_CRITICAL_FROM_ISR( pxEventBits ) taskENTER_CRITICAL_FROM_ISR(); @@ -288,35 +288,6 @@ typedef struct StreamBufferDef_t #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ } StreamBuffer_t; - #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) - -/* - * Enters a critical section for a stream buffer. Disables interrupts and takes - * both task and ISR spinlocks to ensure thread safety. - */ - static void vStreamBufferEnterCritical( StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION; - -/* - * Enters a critical section for a stream buffer from an ISR context. Takes the ISR - * spinlock and returns the previous interrupt state. - */ - static UBaseType_t uxStreamBufferEnterCriticalFromISR( StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION; - -/* - * Exits a critical section for a stream buffer. Releases spinlocks in reverse order - * and conditionally re-enables interrupts and yields if required. - */ - static void vStreamBufferExitCritical( StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION; - -/* - * Exits a critical section for a stream buffer from an ISR context. Releases the ISR - * spinlock and conditionally restores the previous interrupt state. - */ - static void vStreamBufferExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus, - StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION; - #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) ) */ - - /* * Locks a stream buffer for tasks. Prevents other tasks from accessing the stream buffer * but allows ISRs to pend access to the stream buffer. Caller cannot be preempted @@ -410,105 +381,6 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, StreamBufferCallbackFunction_t pxSendCompletedCallback, StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) PRIVILEGED_FUNCTION; -/*-----------------------------------------------------------*/ - #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) - static void vStreamBufferEnterCritical( StreamBuffer_t * const pxStreamBuffer ) - { - portDISABLE_INTERRUPTS(); - { - const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); - - /* Task spinlock is always taken first */ - portGET_SPINLOCK( xCoreID, &( pxStreamBuffer->xTaskSpinlock ) ); - - /* Take the ISR spinlock next */ - portGET_SPINLOCK( xCoreID, &( pxStreamBuffer->xISRSpinlock ) ); - - /* Increment the critical nesting count */ - portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); - } - } - #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */ -/*-----------------------------------------------------------*/ - - #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) - static UBaseType_t uxStreamBufferEnterCriticalFromISR( StreamBuffer_t * const pxStreamBuffer ) - { - UBaseType_t uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); - - const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); - - /* Take the ISR spinlock */ - portGET_SPINLOCK( xCoreID, &( pxStreamBuffer->xISRSpinlock ) ); - - /* Increment the critical nesting count */ - portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); - - return uxSavedInterruptStatus; - } - #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */ -/*-----------------------------------------------------------*/ - - #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - static void vStreamBufferExitCritical( StreamBuffer_t * const pxStreamBuffer ) - { - const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); - - configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ); - - /* Get the xYieldPending status inside the critical section. */ - BaseType_t xYieldCurrentTask = xTaskUnlockCanYield(); - - /* Decrement the critical nesting count */ - portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); - - /* Release the ISR spinlock */ - portRELEASE_SPINLOCK( xCoreID, &( pxStreamBuffer->xISRSpinlock ) ); - - /* Release the task spinlock */ - portRELEASE_SPINLOCK( xCoreID, &( pxStreamBuffer->xTaskSpinlock ) ); - - if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) - { - portENABLE_INTERRUPTS(); - - if( xYieldCurrentTask != pdFALSE ) - { - portYIELD(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */ -/*-----------------------------------------------------------*/ - - #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) - static void vStreamBufferExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus, - StreamBuffer_t * const pxStreamBuffer ) - { - const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); - - configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ); - - /* Decrement the critical nesting count */ - portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); - - /* Release the ISR spinlock */ - portRELEASE_SPINLOCK( xCoreID, &( pxStreamBuffer->xISRSpinlock ) ); - - if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) - { - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); - } - } - #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */ /*-----------------------------------------------------------*/ #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) diff --git a/tasks.c b/tasks.c index 99c0b9079..e1299c945 100644 --- a/tasks.c +++ b/tasks.c @@ -136,11 +136,15 @@ /* * Macros used by vListTask to indicate which state a task is in. */ -#define tskRUNNING_CHAR ( 'X' ) -#define tskBLOCKED_CHAR ( 'B' ) -#define tskREADY_CHAR ( 'R' ) -#define tskDELETED_CHAR ( 'D' ) -#define tskSUSPENDED_CHAR ( 'S' ) +#define tskRUNNING_CHAR ( 'X' ) +#define tskBLOCKED_CHAR ( 'B' ) +#define tskREADY_CHAR ( 'R' ) +#define tskDELETED_CHAR ( 'D' ) +#define tskSUSPENDED_CHAR ( 'S' ) + +/* Bits used to record a deferred state change of a task. */ +#define tskDEFERRED_DELETION ( UBaseType_t ) ( 1U << 0U ) +#define tskDEFERRED_SUSPENSION ( UBaseType_t ) ( 1U << 1U ) /* * Some kernel aware debuggers require the data the debugger needs access to be @@ -346,7 +350,33 @@ /* Yields the given core. This must be called from a critical section and xCoreID * must be valid. This macro is not required in single core since there is only * one core to yield. */ - #define prvYieldCore( xCoreID ) \ + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + #define prvYieldCore( xCoreID ) \ + do { \ + if( ( xCoreID ) == ( BaseType_t ) portGET_CORE_ID() ) \ + { \ + /* Pending a yield for this core since it is in the critical section. */ \ + xYieldPendings[ ( xCoreID ) ] = pdTRUE; \ + } \ + else \ + { \ + if( pxCurrentTCBs[ ( xCoreID ) ]->xPreemptionDisable == 0U ) \ + { \ + /* Request other core to yield if it is not requested before. */ \ + if( pxCurrentTCBs[ ( xCoreID ) ]->xTaskRunState != taskTASK_SCHEDULED_TO_YIELD ) \ + { \ + portYIELD_CORE( xCoreID ); \ + pxCurrentTCBs[ ( xCoreID ) ]->xTaskRunState = taskTASK_SCHEDULED_TO_YIELD; \ + } \ + } \ + else \ + { \ + xYieldPendings[ ( xCoreID ) ] = pdTRUE; \ + } \ + } \ + } while( 0 ) + #else /* if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ + #define prvYieldCore( xCoreID ) \ do { \ if( ( xCoreID ) == ( BaseType_t ) portGET_CORE_ID() ) \ { \ @@ -363,6 +393,7 @@ } \ } \ } while( 0 ) + #endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ #endif /* #if ( configNUMBER_OF_CORES > 1 ) */ /*-----------------------------------------------------------*/ @@ -429,6 +460,10 @@ typedef struct tskTaskControlBlock /* The old naming convention is used to UBaseType_t xPreemptionDisable; /**< Used to prevent the task from being preempted. */ #endif + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + BaseType_t xDeferredStateChange; /**< Used to indicate if the task's state change is deferred. */ + #endif + #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) StackType_t * pxEndOfStack; /**< Points to the highest valid address for the stack. */ #endif @@ -2264,6 +2299,23 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, pxTCB = prvGetTCBFromHandle( xTaskToDelete ); configASSERT( pxTCB != NULL ); + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + + /* If the task has disabled preemption, we need to defer the deletion until the + * task enables preemption. The deletion will be performed in vTaskPreemptionEnable(). */ + if( pxTCB->xPreemptionDisable > 0U ) + { + pxTCB->xDeferredStateChange |= tskDEFERRED_DELETION; + kernelEXIT_CRITICAL(); + traceRETURN_vTaskDelete(); + return; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + #endif /* configUSE_TASK_PREEMPTION_DISABLE */ + /* Remove task from the ready/delayed list. */ if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) { @@ -3137,10 +3189,17 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, kernelENTER_CRITICAL(); { - pxTCB = prvGetTCBFromHandle( xTask ); - configASSERT( pxTCB != NULL ); + if( xSchedulerRunning != pdFALSE ) + { + pxTCB = prvGetTCBFromHandle( xTask ); + configASSERT( pxTCB != NULL ); - pxTCB->xPreemptionDisable++; + pxTCB->xPreemptionDisable++; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } kernelEXIT_CRITICAL(); @@ -3155,25 +3214,63 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, void vTaskPreemptionEnable( const TaskHandle_t xTask ) { TCB_t * pxTCB; - BaseType_t xCoreID; traceENTER_vTaskPreemptionEnable( xTask ); kernelENTER_CRITICAL(); { - pxTCB = prvGetTCBFromHandle( xTask ); - configASSERT( pxTCB != NULL ); - configASSERT( pxTCB->xPreemptionDisable > 0U ); - - pxTCB->xPreemptionDisable--; - if( xSchedulerRunning != pdFALSE ) { - if( ( pxTCB->xPreemptionDisable == 0U ) && ( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) ) + pxTCB = prvGetTCBFromHandle( xTask ); + configASSERT( pxTCB != NULL ); + configASSERT( pxTCB->xPreemptionDisable > 0U ); + + pxTCB->xPreemptionDisable--; + + if( pxTCB->xPreemptionDisable == 0U ) { - xCoreID = ( BaseType_t ) pxTCB->xTaskRunState; - prvYieldCore( xCoreID ); + /* Process deferred state changes which were inflicted while + * preemption was disabled. */ + if( pxTCB->xDeferredStateChange != 0U ) + { + if( pxTCB->xDeferredStateChange & tskDEFERRED_DELETION ) + { + vTaskDelete( xTask ); + } + else if( pxTCB->xDeferredStateChange & tskDEFERRED_SUSPENSION ) + { + vTaskSuspend( xTask ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + pxTCB->xDeferredStateChange = 0U; + kernelEXIT_CRITICAL(); + traceRETURN_vTaskPreemptionEnable(); + return; + } + else + { + if( ( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) ) + { + prvYieldCore( pxTCB->xTaskRunState ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); } } kernelEXIT_CRITICAL(); @@ -3199,6 +3296,23 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, pxTCB = prvGetTCBFromHandle( xTaskToSuspend ); configASSERT( pxTCB != NULL ); + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + + /* If the task has disabled preemption, we need to defer the suspension until the + * task enables preemption. The suspension will be performed in vTaskPreemptionEnable(). */ + if( pxTCB->xPreemptionDisable > 0U ) + { + pxTCB->xDeferredStateChange |= tskDEFERRED_SUSPENSION; + kernelEXIT_CRITICAL(); + traceRETURN_vTaskSuspend(); + return; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + #endif /* configUSE_TASK_PREEMPTION_DISABLE */ + traceTASK_SUSPEND( pxTCB ); /* Remove task from the ready/delayed list and place in the @@ -7384,7 +7498,11 @@ static void prvResetNextTaskUnblockTime( void ) BaseType_t xYieldCurrentTask; /* Get the xYieldPending stats inside the critical section. */ - xYieldCurrentTask = xYieldPendings[ xCoreID ]; + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + xYieldCurrentTask = xTaskUnlockCanYield(); + #else + xYieldCurrentTask = xYieldPendings[ xCoreID ]; + #endif /* configUSE_TASK_PREEMPTION_DISABLE */ kernelRELEASE_ISR_LOCK( xCoreID ); kernelRELEASE_TASK_LOCK( xCoreID ); @@ -7473,7 +7591,11 @@ static void prvResetNextTaskUnblockTime( void ) BaseType_t xReturn; BaseType_t xCoreID = portGET_CORE_ID(); - if( ( xYieldPendings[ xCoreID ] == pdTRUE ) && ( uxSchedulerSuspended == pdFALSE ) && ( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == 0U ) ) + if( ( xYieldPendings[ xCoreID ] == pdTRUE ) && ( uxSchedulerSuspended == pdFALSE ) + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + && ( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == 0U ) + #endif /* ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ + ) { xReturn = pdTRUE; } diff --git a/timers.c b/timers.c index 7be48935d..d63667e8d 100644 --- a/timers.c +++ b/timers.c @@ -83,8 +83,8 @@ * Macros to mark the start and end of a critical code region. */ #if ( portUSING_GRANULAR_LOCKS == 1 ) - #define tmrENTER_CRITICAL() vTimerEnterCritical() - #define tmrEXIT_CRITICAL() vTimerExitCritical() + #define tmrENTER_CRITICAL() taskDATA_GROUP_ENTER_CRITICAL( &xTimerDataGroupLocks ) + #define tmrEXIT_CRITICAL() taskDATA_GROUP_EXIT_CRITICAL( &xTimerDataGroupLocks ) #else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ #define tmrENTER_CRITICAL() taskENTER_CRITICAL() #define tmrEXIT_CRITICAL() taskEXIT_CRITICAL() @@ -161,24 +161,18 @@ PRIVILEGED_DATA static TaskHandle_t xTimerTaskHandle = NULL; #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - PRIVILEGED_DATA static portSPINLOCK_TYPE xTaskSpinlock = portINIT_SPINLOCK_STATIC; - PRIVILEGED_DATA static portSPINLOCK_TYPE xISRSpinlock = portINIT_SPINLOCK_STATIC; + PRIVILEGED_DATA static struct + { + portSPINLOCK_TYPE xTaskSpinlock; + portSPINLOCK_TYPE xISRSpinlock; + } + xTimerDataGroupLocks = + { + .xTaskSpinlock = portINIT_SPINLOCK_STATIC, + .xISRSpinlock = portINIT_SPINLOCK_STATIC + }; #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ - #if ( portUSING_GRANULAR_LOCKS == 1 ) - -/* - * Enters a critical section for timers. Disables interrupts and takes - * both task and ISR spinlocks to ensure thread safety. - */ - static void vTimerEnterCritical( void ) PRIVILEGED_FUNCTION; - -/* - * Exits a critical section for timers. Releases spinlocks in reverse order - * and conditionally re-enables interrupts and yields if required. - */ - static void vTimerExitCritical( void ) PRIVILEGED_FUNCTION; - #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ /*-----------------------------------------------------------*/ /* @@ -1367,67 +1361,6 @@ } /*-----------------------------------------------------------*/ - #if ( portUSING_GRANULAR_LOCKS == 1 ) - static void vTimerEnterCritical( void ) - { - portDISABLE_INTERRUPTS(); - { - const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); - - /* Task spinlock is always taken first */ - portGET_SPINLOCK( xCoreID, &xTaskSpinlock ); - - /* Take the ISR spinlock next */ - portGET_SPINLOCK( xCoreID, &xISRSpinlock ); - - /* Increment the critical nesting count */ - portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); - } - } - #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ - -/*-----------------------------------------------------------*/ - - #if ( portUSING_GRANULAR_LOCKS == 1 ) - static void vTimerExitCritical( void ) - { - const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); - - configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ); - - /* Get the xYieldPending status inside the critical section. */ - BaseType_t xYieldCurrentTask = xTaskUnlockCanYield(); - - /* Decrement the critical nesting count */ - portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); - - /* Release the ISR spinlock */ - portRELEASE_SPINLOCK( xCoreID, &xISRSpinlock ); - - /* Release the task spinlock */ - portRELEASE_SPINLOCK( xCoreID, &xTaskSpinlock ); - - if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) - { - portENABLE_INTERRUPTS(); - - if( xYieldCurrentTask != pdFALSE ) - { - portYIELD(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ -/*-----------------------------------------------------------*/ - /* This entire source file will be skipped if the application is not configured * to include software timer functionality. If you want to include software timer * functionality then ensure configUSE_TIMERS is set to 1 in FreeRTOSConfig.h. */