mirror of
https://github.com/FreeRTOS/FreeRTOS-Kernel.git
synced 2026-05-12 11:42:57 -04:00
change(freertos-smp): Reconcile granular lock changes from upstream development
Integrate latest granular lock reconciliation changes from the development branch (chinglee-iot/FreeRTOS-Kernel, branch dev/update_for_granular_lock_demo_TCB_lock) to establish a common baseline for continued granular lock development. Key changes: - Eliminate configUSE_TCB_DATA_GROUP_LOCK; TCB locks now integral to portUSING_GRANULAR_LOCKS - Convert taskDATA_GROUP_ENTER_CRITICAL to function for run-state checking - Rename prvTaskPreemptionEnable to xTaskPreemptionEnableWithYieldStatus - Add prvTaskDataGroupCheckForRunStateChange, prvKernelEnterISROnlyCritical, xTaskUnlockCanYield - Restructure prvYieldCore and prvYieldForTask with TCB spinlock protection - Add scheduler suspension in event group list walking - Add queue set container spinlock protection - Enable single-priority mode with preemption disable - Expose timer spinlocks for testing Bug fixes applied during reconciliation: - Fix typo: vTaskTCBExtiCritical -> vTaskTCBExitCritical - Fix unused variable in xTaskPreemptionEnableWithYieldStatus - Fix broken tick hook preprocessor guard - Fix unused variable in prvNotifyQueueSetContainerFromISR
This commit is contained in:
parent
d33a460b99
commit
3346ee9078
6 changed files with 591 additions and 289 deletions
|
|
@ -292,22 +292,12 @@ typedef enum
|
|||
* \ingroup GranularLocks
|
||||
*/
|
||||
#if ( portUSING_GRANULAR_LOCKS == 1 )
|
||||
#define taskDATA_GROUP_ENTER_CRITICAL( pxTaskSpinlock, pxISRSpinlock ) \
|
||||
do { \
|
||||
/* Disable preemption to avoid task state changes during the critical section. */ \
|
||||
vTaskPreemptionDisable( NULL ); \
|
||||
{ \
|
||||
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \
|
||||
/* Task spinlock is always taken first */ \
|
||||
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) ( pxTaskSpinlock ) ); \
|
||||
/* Disable interrupts */ \
|
||||
portDISABLE_INTERRUPTS(); \
|
||||
/* Take the ISR spinlock next */ \
|
||||
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) ( pxISRSpinlock ) ); \
|
||||
/* Increment the critical nesting count */ \
|
||||
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \
|
||||
} \
|
||||
} while( 0 )
|
||||
|
||||
/* Using a function implementation now since the data group entering critical
|
||||
* section needs to check for run state change. */
|
||||
void taskDataGroupEnterCritical( portSPINLOCK_TYPE * pxTaskSpinlock,
|
||||
portSPINLOCK_TYPE * pxISRSpinlock );
|
||||
#define taskDATA_GROUP_ENTER_CRITICAL taskDataGroupEnterCritical
|
||||
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
|
||||
|
||||
/**
|
||||
|
|
@ -361,7 +351,7 @@ typedef enum
|
|||
mtCOVERAGE_TEST_MARKER(); \
|
||||
} \
|
||||
/* Re-enable preemption */ \
|
||||
prvTaskPreemptionEnable( NULL ); \
|
||||
( void ) xTaskPreemptionEnableWithYieldStatus( NULL ); \
|
||||
} while( 0 )
|
||||
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
|
||||
|
||||
|
|
@ -421,7 +411,7 @@ typedef enum
|
|||
( { \
|
||||
portRELEASE_SPINLOCK( portGET_CORE_ID(), ( portSPINLOCK_TYPE * ) ( pxTaskSpinlock ) ); \
|
||||
/* Re-enable preemption after releasing the task spinlock. */ \
|
||||
prvTaskPreemptionEnable( NULL ); \
|
||||
xTaskPreemptionEnableWithYieldStatus( NULL ); \
|
||||
} )
|
||||
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
|
||||
|
||||
|
|
@ -1639,7 +1629,7 @@ BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION;
|
|||
* switch, otherwise pdFALSE. This is used by the scheduler to determine if a
|
||||
* context switch may be required following the enable.
|
||||
*/
|
||||
BaseType_t prvTaskPreemptionEnable( const TaskHandle_t xTask );
|
||||
BaseType_t xTaskPreemptionEnableWithYieldStatus( const TaskHandle_t xTask );
|
||||
#endif
|
||||
|
||||
/*-----------------------------------------------------------
|
||||
|
|
@ -4011,6 +4001,14 @@ void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNC
|
|||
void vKernelLightWeightExitCritical( void );
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Checks whether a yield is required after portUNLOCK_DATA_GROUP() returns.
|
||||
* To be called while data group is locked.
|
||||
*/
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
|
||||
BaseType_t xTaskUnlockCanYield( void );
|
||||
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
|
||||
|
||||
#if ( portUSING_MPU_WRAPPERS == 1 )
|
||||
|
||||
/*
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue