change(freertos-smp): Reconcile granular lock changes from upstream development

Integrate latest granular lock reconciliation changes from the development
branch (chinglee-iot/FreeRTOS-Kernel, branch
dev/update_for_granular_lock_demo_TCB_lock) to establish a common baseline
for continued granular lock development.

Key changes:
- Eliminate configUSE_TCB_DATA_GROUP_LOCK; TCB locks now integral to
  portUSING_GRANULAR_LOCKS
- Convert taskDATA_GROUP_ENTER_CRITICAL to function for run-state checking
- Rename prvTaskPreemptionEnable to xTaskPreemptionEnableWithYieldStatus
- Add prvTaskDataGroupCheckForRunStateChange, prvKernelEnterISROnlyCritical,
  xTaskUnlockCanYield
- Restructure prvYieldCore and prvYieldForTask with TCB spinlock protection
- Add scheduler suspension in event group list walking
- Add queue set container spinlock protection
- Enable single-priority mode with preemption disable
- Expose timer spinlocks for testing

Bug fixes applied during reconciliation:
- Fix typo: vTaskTCBExtiCritical -> vTaskTCBExitCritical
- Fix unused variable in xTaskPreemptionEnableWithYieldStatus
- Fix broken tick hook preprocessor guard
- Fix unused variable in prvNotifyQueueSetContainerFromISR
This commit is contained in:
Sudeep Mohanty 2026-04-23 15:36:59 +02:00
parent d33a460b99
commit 3346ee9078
6 changed files with 591 additions and 289 deletions

View file

@ -113,9 +113,17 @@
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
#define event_groupsLOCK( pxEventBits ) taskDATA_GROUP_LOCK( &( ( pxEventBits )->xTaskSpinlock ) )
#define event_groupsUNLOCK( pxEventBits ) taskDATA_GROUP_UNLOCK( &( ( pxEventBits )->xTaskSpinlock ) )
#define event_groupsUNLOCK_WITH_YIELD_STATUS( pxEventBits, pxAlreadyYielded ) \
do { \
*( pxAlreadyYielded ) = taskDATA_GROUP_UNLOCK( &( ( pxEventBits )->xTaskSpinlock ) ); \
} while( 0 )
#else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
#define event_groupsLOCK( pxEventBits ) vTaskSuspendAll()
#define event_groupsUNLOCK( pxEventBits ) xTaskResumeAll()
#define event_groupsUNLOCK_WITH_YIELD_STATUS( pxEventBits, pxAlreadyYielded ) \
do { \
*( pxAlreadyYielded ) = xTaskResumeAll(); \
} while( 0 )
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
/*-----------------------------------------------------------*/
@ -298,7 +306,7 @@
}
}
}
xAlreadyYielded = event_groupsUNLOCK( pxEventBits );
event_groupsUNLOCK_WITH_YIELD_STATUS( pxEventBits, &xAlreadyYielded );
if( xTicksToWait != ( TickType_t ) 0 )
{
@ -454,7 +462,7 @@
traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor );
}
}
xAlreadyYielded = event_groupsUNLOCK( pxEventBits );
event_groupsUNLOCK_WITH_YIELD_STATUS( pxEventBits, &xAlreadyYielded );
if( xTicksToWait != ( TickType_t ) 0 )
{
@ -621,6 +629,13 @@
{
traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet );
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
/* We are about to access the kernel data group non-deterministically,
* thus we suspend the kernel data group.*/
vTaskSuspendAll();
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
pxListItem = listGET_HEAD_ENTRY( pxList );
/* Set the bits. */
@ -691,8 +706,12 @@
/* Snapshot resulting bits. */
uxReturnBits = pxEventBits->uxEventBits;
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
( void ) xTaskResumeAll();
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
}
( void ) event_groupsUNLOCK( pxEventBits );
event_groupsUNLOCK( pxEventBits );
traceRETURN_xEventGroupSetBits( uxReturnBits );
@ -715,6 +734,13 @@
{
traceEVENT_GROUP_DELETE( xEventGroup );
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
/* We are about to access the kernel data group non-deterministically,
* thus we suspend the kernel data group.*/
vTaskSuspendAll();
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
while( listCURRENT_LIST_LENGTH( pxTasksWaitingForBits ) > ( UBaseType_t ) 0 )
{
/* Unblock the task, returning 0 as the event list is being deleted
@ -722,8 +748,12 @@
configASSERT( pxTasksWaitingForBits->xListEnd.pxNext != ( const ListItem_t * ) &( pxTasksWaitingForBits->xListEnd ) );
vTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET );
}
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
( void ) xTaskResumeAll();
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
}
( void ) event_groupsUNLOCK( pxEventBits );
event_groupsUNLOCK( pxEventBits );
#if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
{

View file

@ -373,25 +373,6 @@
#define portUSING_GRANULAR_LOCKS 0
#endif
/* configUSE_TCB_DATA_GROUP_LOCK enables per-TCB spinlocks to protect TCB-specific
* data such as uxPreemptionDisable. This reduces lock contention compared to using
* the global kernel lock. When enabled:
* - Each TCB has its own spinlock (xTCBSpinlock)
* - vTaskPreemptionDisable/Enable use the TCB lock instead of kernel lock
* - prvYieldCore acquires the target TCB's lock before checking uxPreemptionDisable
* This feature requires portUSING_GRANULAR_LOCKS and multi-core. */
#ifndef configUSE_TCB_DATA_GROUP_LOCK
#define configUSE_TCB_DATA_GROUP_LOCK 0
#endif
#if ( ( configUSE_TCB_DATA_GROUP_LOCK == 1 ) && ( portUSING_GRANULAR_LOCKS != 1 ) )
#error configUSE_TCB_DATA_GROUP_LOCK requires portUSING_GRANULAR_LOCKS to be enabled
#endif
#if ( ( configUSE_TCB_DATA_GROUP_LOCK == 1 ) && ( configNUMBER_OF_CORES == 1 ) )
#error configUSE_TCB_DATA_GROUP_LOCK is not supported in single core FreeRTOS
#endif
#ifndef configMAX_TASK_NAME_LEN
#define configMAX_TASK_NAME_LEN 16
#endif
@ -2965,10 +2946,6 @@
#error configUSE_MUTEXES must be set to 1 to use recursive mutexes
#endif
#if ( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configUSE_TASK_PREEMPTION_DISABLE != 0 ) )
#error configRUN_MULTIPLE_PRIORITIES must be set to 1 to use task preemption disable
#endif
#if ( ( configUSE_PREEMPTION == 0 ) && ( configUSE_TASK_PREEMPTION_DISABLE != 0 ) )
#error configUSE_PREEMPTION must be set to 1 to use task preemption disable
#endif
@ -3272,6 +3249,9 @@ typedef struct xSTATIC_TCB
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
BaseType_t xDummy26;
#endif
#if ( portUSING_GRANULAR_LOCKS == 1 )
portSPINLOCK_TYPE xDummy27;
#endif
#if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) )
void * pxDummy8;
#endif
@ -3315,9 +3295,6 @@ typedef struct xSTATIC_TCB
void * pvDummyDirectTransferBuffer;
BaseType_t xDummyDirectTransferPosition;
#endif
#if ( configUSE_TCB_DATA_GROUP_LOCK == 1 )
portSPINLOCK_TYPE xTCBDummySpinlock; /**< Spinlock protecting TCB-specific data (uxPreemptionDisable, uxDeferredStateChange). */
#endif
} StaticTask_t;
/*

View file

@ -292,22 +292,12 @@ typedef enum
* \ingroup GranularLocks
*/
#if ( portUSING_GRANULAR_LOCKS == 1 )
#define taskDATA_GROUP_ENTER_CRITICAL( pxTaskSpinlock, pxISRSpinlock ) \
do { \
/* Disable preemption to avoid task state changes during the critical section. */ \
vTaskPreemptionDisable( NULL ); \
{ \
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \
/* Task spinlock is always taken first */ \
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) ( pxTaskSpinlock ) ); \
/* Disable interrupts */ \
portDISABLE_INTERRUPTS(); \
/* Take the ISR spinlock next */ \
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) ( pxISRSpinlock ) ); \
/* Increment the critical nesting count */ \
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \
} \
} while( 0 )
/* Using a function implementation now since the data group entering critical
* section needs to check for run state change. */
void taskDataGroupEnterCritical( portSPINLOCK_TYPE * pxTaskSpinlock,
portSPINLOCK_TYPE * pxISRSpinlock );
#define taskDATA_GROUP_ENTER_CRITICAL taskDataGroupEnterCritical
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
/**
@ -361,7 +351,7 @@ typedef enum
mtCOVERAGE_TEST_MARKER(); \
} \
/* Re-enable preemption */ \
prvTaskPreemptionEnable( NULL ); \
( void ) xTaskPreemptionEnableWithYieldStatus( NULL ); \
} while( 0 )
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
@ -421,7 +411,7 @@ typedef enum
( { \
portRELEASE_SPINLOCK( portGET_CORE_ID(), ( portSPINLOCK_TYPE * ) ( pxTaskSpinlock ) ); \
/* Re-enable preemption after releasing the task spinlock. */ \
prvTaskPreemptionEnable( NULL ); \
xTaskPreemptionEnableWithYieldStatus( NULL ); \
} )
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
@ -1639,7 +1629,7 @@ BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION;
* switch, otherwise pdFALSE. This is used by the scheduler to determine if a
* context switch may be required following the enable.
*/
BaseType_t prvTaskPreemptionEnable( const TaskHandle_t xTask );
BaseType_t xTaskPreemptionEnableWithYieldStatus( const TaskHandle_t xTask );
#endif
/*-----------------------------------------------------------
@ -4011,6 +4001,14 @@ void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNC
void vKernelLightWeightExitCritical( void );
#endif
/*
* Checks whether a yield is required after portUNLOCK_DATA_GROUP() returns.
* To be called while data group is locked.
*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
BaseType_t xTaskUnlockCanYield( void );
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
#if ( portUSING_MPU_WRAPPERS == 1 )
/*

57
queue.c
View file

@ -2806,8 +2806,9 @@ static void prvUnlockQueue( Queue_t * const pxQueue )
* locked items can be added or removed, but the event lists cannot be
* updated. */
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
const BaseType_t xCoreID = portGET_CORE_ID();
portDISABLE_INTERRUPTS();
BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xISRSpinlock ) );
#else
@ -2891,10 +2892,10 @@ static void prvUnlockQueue( Queue_t * const pxQueue )
pxQueue->cTxLock = queueUNLOCKED;
}
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
portRELEASE_SPINLOCK( portGET_CORE_ID(), ( portSPINLOCK_TYPE * ) &( pxQueue->xISRSpinlock ) );
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xISRSpinlock ) );
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID );
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 )
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U )
{
portENABLE_INTERRUPTS();
}
@ -2905,7 +2906,6 @@ static void prvUnlockQueue( Queue_t * const pxQueue )
/* Do the same for the Rx lock. */
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
portDISABLE_INTERRUPTS();
xCoreID = ( BaseType_t ) portGET_CORE_ID();
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->xISRSpinlock ) );
#else
@ -3667,17 +3667,60 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
/*-----------------------------------------------------------*/
#if ( configUSE_QUEUE_SETS == 1 )
static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue )
{
BaseType_t xReturn;
/* Call the generic version with xIsISR = pdFALSE to indicate task context */
return prvNotifyQueueSetContainerGeneric( pxQueue, pdFALSE );
#if ( portUSING_GRANULAR_LOCKS == 0 )
{
xReturn = prvNotifyQueueSetContainerGeneric( pxQueue, pdFALSE );
}
#else
{
const BaseType_t xCoreID = portGET_CORE_ID();
/* This API must be called in a critical section which already has preemption
* and interrupt disabled. */
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->pxQueueSetContainer->xTaskSpinlock ) );
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->pxQueueSetContainer->xISRSpinlock ) );
{
xReturn = prvNotifyQueueSetContainerGeneric( pxQueue, pdFALSE );
}
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->pxQueueSetContainer->xISRSpinlock ) );
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->pxQueueSetContainer->xTaskSpinlock ) );
}
#endif /* if ( portUSING_GRANULAR_LOCKS == 0 ) */
return xReturn;
}
static BaseType_t prvNotifyQueueSetContainerFromISR( const Queue_t * const pxQueue )
{
BaseType_t xReturn;
/* Call the generic version with xIsISR = pdTRUE to indicate ISR context */
return prvNotifyQueueSetContainerGeneric( pxQueue, pdTRUE );
#if ( portUSING_GRANULAR_LOCKS == 0 )
{
xReturn = prvNotifyQueueSetContainerGeneric( pxQueue, pdTRUE );
}
#else
{
UBaseType_t uxSavedInterruptStatus;
const BaseType_t xCoreID = portGET_CORE_ID();
/* This API must be called in a critical section which already has interrupt disabled. */
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->pxQueueSetContainer->xISRSpinlock ) );
{
xReturn = prvNotifyQueueSetContainerGeneric( pxQueue, pdTRUE );
}
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxQueue->pxQueueSetContainer->xISRSpinlock ) );
}
#endif /* if ( portUSING_GRANULAR_LOCKS == 0 ) */
return xReturn;
}
static BaseType_t prvNotifyQueueSetContainerGeneric( const Queue_t * const pxQueue,

711
tasks.c

File diff suppressed because it is too large Load diff

View file

@ -161,8 +161,13 @@
PRIVILEGED_DATA static TaskHandle_t xTimerTaskHandle = NULL;
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
PRIVILEGED_DATA static portSPINLOCK_TYPE xTimerTaskSpinlock = portINIT_SPINLOCK_STATIC;
PRIVILEGED_DATA static portSPINLOCK_TYPE xTimerISRSpinlock = portINIT_SPINLOCK_STATIC;
#ifdef portREMOVE_STATIC_QUALIFIER
PRIVILEGED_DATA portSPINLOCK_TYPE xTimerTaskSpinlock = portINIT_SPINLOCK_STATIC;
PRIVILEGED_DATA portSPINLOCK_TYPE xTimerISRSpinlock = portINIT_SPINLOCK_STATIC;
#else
PRIVILEGED_DATA static portSPINLOCK_TYPE xTimerTaskSpinlock = portINIT_SPINLOCK_STATIC;
PRIVILEGED_DATA static portSPINLOCK_TYPE xTimerISRSpinlock = portINIT_SPINLOCK_STATIC;
#endif
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
/*-----------------------------------------------------------*/