mirror of
https://github.com/FreeRTOS/FreeRTOS-Kernel.git
synced 2026-05-12 11:42:57 -04:00
change(freertos-smp): Reconcile granular lock changes from upstream development
Integrate latest granular lock reconciliation changes from the development branch (chinglee-iot/FreeRTOS-Kernel, branch dev/update_for_granular_lock_demo_TCB_lock) to establish a common baseline for continued granular lock development. Key changes: - Eliminate configUSE_TCB_DATA_GROUP_LOCK; TCB locks now integral to portUSING_GRANULAR_LOCKS - Convert taskDATA_GROUP_ENTER_CRITICAL to function for run-state checking - Rename prvTaskPreemptionEnable to xTaskPreemptionEnableWithYieldStatus - Add prvTaskDataGroupCheckForRunStateChange, prvKernelEnterISROnlyCritical, xTaskUnlockCanYield - Restructure prvYieldCore and prvYieldForTask with TCB spinlock protection - Add scheduler suspension in event group list walking - Add queue set container spinlock protection - Enable single-priority mode with preemption disable - Expose timer spinlocks for testing Bug fixes applied during reconciliation: - Fix typo: vTaskTCBExtiCritical -> vTaskTCBExitCritical - Fix unused variable in xTaskPreemptionEnableWithYieldStatus - Fix broken tick hook preprocessor guard - Fix unused variable in prvNotifyQueueSetContainerFromISR
This commit is contained in:
parent
d33a460b99
commit
3346ee9078
6 changed files with 591 additions and 289 deletions
|
|
@ -373,25 +373,6 @@
|
|||
#define portUSING_GRANULAR_LOCKS 0
|
||||
#endif
|
||||
|
||||
/* configUSE_TCB_DATA_GROUP_LOCK enables per-TCB spinlocks to protect TCB-specific
|
||||
* data such as uxPreemptionDisable. This reduces lock contention compared to using
|
||||
* the global kernel lock. When enabled:
|
||||
* - Each TCB has its own spinlock (xTCBSpinlock)
|
||||
* - vTaskPreemptionDisable/Enable use the TCB lock instead of kernel lock
|
||||
* - prvYieldCore acquires the target TCB's lock before checking uxPreemptionDisable
|
||||
* This feature requires portUSING_GRANULAR_LOCKS and multi-core. */
|
||||
#ifndef configUSE_TCB_DATA_GROUP_LOCK
|
||||
#define configUSE_TCB_DATA_GROUP_LOCK 0
|
||||
#endif
|
||||
|
||||
#if ( ( configUSE_TCB_DATA_GROUP_LOCK == 1 ) && ( portUSING_GRANULAR_LOCKS != 1 ) )
|
||||
#error configUSE_TCB_DATA_GROUP_LOCK requires portUSING_GRANULAR_LOCKS to be enabled
|
||||
#endif
|
||||
|
||||
#if ( ( configUSE_TCB_DATA_GROUP_LOCK == 1 ) && ( configNUMBER_OF_CORES == 1 ) )
|
||||
#error configUSE_TCB_DATA_GROUP_LOCK is not supported in single core FreeRTOS
|
||||
#endif
|
||||
|
||||
#ifndef configMAX_TASK_NAME_LEN
|
||||
#define configMAX_TASK_NAME_LEN 16
|
||||
#endif
|
||||
|
|
@ -2965,10 +2946,6 @@
|
|||
#error configUSE_MUTEXES must be set to 1 to use recursive mutexes
|
||||
#endif
|
||||
|
||||
#if ( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configUSE_TASK_PREEMPTION_DISABLE != 0 ) )
|
||||
#error configRUN_MULTIPLE_PRIORITIES must be set to 1 to use task preemption disable
|
||||
#endif
|
||||
|
||||
#if ( ( configUSE_PREEMPTION == 0 ) && ( configUSE_TASK_PREEMPTION_DISABLE != 0 ) )
|
||||
#error configUSE_PREEMPTION must be set to 1 to use task preemption disable
|
||||
#endif
|
||||
|
|
@ -3272,6 +3249,9 @@ typedef struct xSTATIC_TCB
|
|||
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
|
||||
BaseType_t xDummy26;
|
||||
#endif
|
||||
#if ( portUSING_GRANULAR_LOCKS == 1 )
|
||||
portSPINLOCK_TYPE xDummy27;
|
||||
#endif
|
||||
#if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) )
|
||||
void * pxDummy8;
|
||||
#endif
|
||||
|
|
@ -3315,9 +3295,6 @@ typedef struct xSTATIC_TCB
|
|||
void * pvDummyDirectTransferBuffer;
|
||||
BaseType_t xDummyDirectTransferPosition;
|
||||
#endif
|
||||
#if ( configUSE_TCB_DATA_GROUP_LOCK == 1 )
|
||||
portSPINLOCK_TYPE xTCBDummySpinlock; /**< Spinlock protecting TCB-specific data (uxPreemptionDisable, uxDeferredStateChange). */
|
||||
#endif
|
||||
} StaticTask_t;
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -292,22 +292,12 @@ typedef enum
|
|||
* \ingroup GranularLocks
|
||||
*/
|
||||
#if ( portUSING_GRANULAR_LOCKS == 1 )
|
||||
#define taskDATA_GROUP_ENTER_CRITICAL( pxTaskSpinlock, pxISRSpinlock ) \
|
||||
do { \
|
||||
/* Disable preemption to avoid task state changes during the critical section. */ \
|
||||
vTaskPreemptionDisable( NULL ); \
|
||||
{ \
|
||||
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \
|
||||
/* Task spinlock is always taken first */ \
|
||||
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) ( pxTaskSpinlock ) ); \
|
||||
/* Disable interrupts */ \
|
||||
portDISABLE_INTERRUPTS(); \
|
||||
/* Take the ISR spinlock next */ \
|
||||
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) ( pxISRSpinlock ) ); \
|
||||
/* Increment the critical nesting count */ \
|
||||
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \
|
||||
} \
|
||||
} while( 0 )
|
||||
|
||||
/* Using a function implementation now since the data group entering critical
|
||||
* section needs to check for run state change. */
|
||||
void taskDataGroupEnterCritical( portSPINLOCK_TYPE * pxTaskSpinlock,
|
||||
portSPINLOCK_TYPE * pxISRSpinlock );
|
||||
#define taskDATA_GROUP_ENTER_CRITICAL taskDataGroupEnterCritical
|
||||
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
|
||||
|
||||
/**
|
||||
|
|
@ -361,7 +351,7 @@ typedef enum
|
|||
mtCOVERAGE_TEST_MARKER(); \
|
||||
} \
|
||||
/* Re-enable preemption */ \
|
||||
prvTaskPreemptionEnable( NULL ); \
|
||||
( void ) xTaskPreemptionEnableWithYieldStatus( NULL ); \
|
||||
} while( 0 )
|
||||
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
|
||||
|
||||
|
|
@ -421,7 +411,7 @@ typedef enum
|
|||
( { \
|
||||
portRELEASE_SPINLOCK( portGET_CORE_ID(), ( portSPINLOCK_TYPE * ) ( pxTaskSpinlock ) ); \
|
||||
/* Re-enable preemption after releasing the task spinlock. */ \
|
||||
prvTaskPreemptionEnable( NULL ); \
|
||||
xTaskPreemptionEnableWithYieldStatus( NULL ); \
|
||||
} )
|
||||
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
|
||||
|
||||
|
|
@ -1639,7 +1629,7 @@ BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION;
|
|||
* switch, otherwise pdFALSE. This is used by the scheduler to determine if a
|
||||
* context switch may be required following the enable.
|
||||
*/
|
||||
BaseType_t prvTaskPreemptionEnable( const TaskHandle_t xTask );
|
||||
BaseType_t xTaskPreemptionEnableWithYieldStatus( const TaskHandle_t xTask );
|
||||
#endif
|
||||
|
||||
/*-----------------------------------------------------------
|
||||
|
|
@ -4011,6 +4001,14 @@ void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNC
|
|||
void vKernelLightWeightExitCritical( void );
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Checks whether a yield is required after portUNLOCK_DATA_GROUP() returns.
|
||||
* To be called while data group is locked.
|
||||
*/
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
|
||||
BaseType_t xTaskUnlockCanYield( void );
|
||||
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
|
||||
|
||||
#if ( portUSING_MPU_WRAPPERS == 1 )
|
||||
|
||||
/*
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue