Merge branch 'main' into update-pxcurrentTCB

This commit is contained in:
chinglee-iot 2024-08-26 13:49:41 +08:00 committed by GitHub
commit cd0d09cc96
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 22 additions and 42 deletions

View file

@ -798,8 +798,8 @@ target_include_directories(freertos_kernel_port_headers INTERFACE
$<$<STREQUAL:${FREERTOS_PORT},GCC_ARM_CA9>:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CA9> $<$<STREQUAL:${FREERTOS_PORT},GCC_ARM_CA9>:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CA9>
# ARMv8-A ports for GCC # ARMv8-A ports for GCC
$<$<STREQUAL:${FREERTOS_PORT},GCC_ARM_AARCH64>:${CMAKE_CURRENT_LIST_DIR}/GCC/Arm_AARCH64> $<$<STREQUAL:${FREERTOS_PORT},GCC_ARM_AARCH64>:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_AARCH64>
$<$<STREQUAL:${FREERTOS_PORT},GCC_ARM_AARCH64_SRE>:${CMAKE_CURRENT_LIST_DIR}/GCC/Arm_AARCH64_SRE> $<$<STREQUAL:${FREERTOS_PORT},GCC_ARM_AARCH64_SRE>:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_AARCH64_SRE>
# ARMv6-M port for GCC # ARMv6-M port for GCC
$<$<STREQUAL:${FREERTOS_PORT},GCC_ARM_CM0>:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CM0> $<$<STREQUAL:${FREERTOS_PORT},GCC_ARM_CM0>:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CM0>

View file

@ -3838,27 +3838,16 @@
BaseType_t xReturn = pdFALSE; BaseType_t xReturn = pdFALSE;
TimerHandle_t xInternalTimerHandle = NULL; TimerHandle_t xInternalTimerHandle = NULL;
int32_t lIndex; int32_t lIndex;
BaseType_t xIsHigherPriorityTaskWokenWriteable = pdFALSE;
if( pxHigherPriorityTaskWoken != NULL ) lIndex = ( int32_t ) xTimer;
if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
{ {
xIsHigherPriorityTaskWokenWriteable = xPortIsAuthorizedToAccessBuffer( pxHigherPriorityTaskWoken, xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
sizeof( BaseType_t ),
tskMPU_WRITE_PERMISSION );
}
if( ( pxHigherPriorityTaskWoken == NULL ) || ( xIsHigherPriorityTaskWokenWriteable == pdTRUE ) ) if( xInternalTimerHandle != NULL )
{
lIndex = ( int32_t ) xTimer;
if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
{ {
xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); xReturn = xTimerGenericCommandFromISR( xInternalTimerHandle, xCommandID, xOptionalValue, pxHigherPriorityTaskWoken, xTicksToWait );
if( xInternalTimerHandle != NULL )
{
xReturn = xTimerGenericCommandFromISR( xInternalTimerHandle, xCommandID, xOptionalValue, pxHigherPriorityTaskWoken, xTicksToWait );
}
} }
} }

View file

@ -308,7 +308,7 @@ void * pvPortMalloc( size_t xWantedSize )
/* The block is being returned - it is allocated and owned /* The block is being returned - it is allocated and owned
* by the application and has no "next" block. */ * by the application and has no "next" block. */
heapALLOCATE_BLOCK( pxBlock ); heapALLOCATE_BLOCK( pxBlock );
pxBlock->pxNextFreeBlock = NULL; pxBlock->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( NULL );
xNumberOfSuccessfulAllocations++; xNumberOfSuccessfulAllocations++;
} }
else else
@ -367,11 +367,11 @@ void vPortFree( void * pv )
heapVALIDATE_BLOCK_POINTER( pxLink ); heapVALIDATE_BLOCK_POINTER( pxLink );
configASSERT( heapBLOCK_IS_ALLOCATED( pxLink ) != 0 ); configASSERT( heapBLOCK_IS_ALLOCATED( pxLink ) != 0 );
configASSERT( pxLink->pxNextFreeBlock == NULL ); configASSERT( pxLink->pxNextFreeBlock == heapPROTECT_BLOCK_POINTER( NULL ) );
if( heapBLOCK_IS_ALLOCATED( pxLink ) != 0 ) if( heapBLOCK_IS_ALLOCATED( pxLink ) != 0 )
{ {
if( pxLink->pxNextFreeBlock == NULL ) if( pxLink->pxNextFreeBlock == heapPROTECT_BLOCK_POINTER( NULL ) )
{ {
/* The block is being returned to the heap - it is no longer /* The block is being returned to the heap - it is no longer
* allocated. */ * allocated. */

View file

@ -336,7 +336,7 @@ void * pvPortMalloc( size_t xWantedSize )
/* The block is being returned - it is allocated and owned /* The block is being returned - it is allocated and owned
* by the application and has no "next" block. */ * by the application and has no "next" block. */
heapALLOCATE_BLOCK( pxBlock ); heapALLOCATE_BLOCK( pxBlock );
pxBlock->pxNextFreeBlock = NULL; pxBlock->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( NULL );
xNumberOfSuccessfulAllocations++; xNumberOfSuccessfulAllocations++;
} }
else else
@ -395,11 +395,11 @@ void vPortFree( void * pv )
heapVALIDATE_BLOCK_POINTER( pxLink ); heapVALIDATE_BLOCK_POINTER( pxLink );
configASSERT( heapBLOCK_IS_ALLOCATED( pxLink ) != 0 ); configASSERT( heapBLOCK_IS_ALLOCATED( pxLink ) != 0 );
configASSERT( pxLink->pxNextFreeBlock == NULL ); configASSERT( pxLink->pxNextFreeBlock == heapPROTECT_BLOCK_POINTER( NULL ) );
if( heapBLOCK_IS_ALLOCATED( pxLink ) != 0 ) if( heapBLOCK_IS_ALLOCATED( pxLink ) != 0 )
{ {
if( pxLink->pxNextFreeBlock == NULL ) if( pxLink->pxNextFreeBlock == heapPROTECT_BLOCK_POINTER( NULL ) )
{ {
/* The block is being returned to the heap - it is no longer /* The block is being returned to the heap - it is no longer
* allocated. */ * allocated. */

25
tasks.c
View file

@ -3031,11 +3031,6 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
{ {
TCB_t * pxTCB; TCB_t * pxTCB;
BaseType_t xCoreID; BaseType_t xCoreID;
UBaseType_t uxPrevCoreAffinityMask;
#if ( configUSE_PREEMPTION == 1 )
UBaseType_t uxPrevNotAllowedCores;
#endif
traceENTER_vTaskCoreAffinitySet( xTask, uxCoreAffinityMask ); traceENTER_vTaskCoreAffinitySet( xTask, uxCoreAffinityMask );
@ -3043,7 +3038,6 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
{ {
pxTCB = prvGetTCBFromHandle( xTask ); pxTCB = prvGetTCBFromHandle( xTask );
uxPrevCoreAffinityMask = pxTCB->uxCoreAffinityMask;
pxTCB->uxCoreAffinityMask = uxCoreAffinityMask; pxTCB->uxCoreAffinityMask = uxCoreAffinityMask;
if( xSchedulerRunning != pdFALSE ) if( xSchedulerRunning != pdFALSE )
@ -3063,17 +3057,14 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
{ {
#if ( configUSE_PREEMPTION == 1 ) #if ( configUSE_PREEMPTION == 1 )
{ {
/* Calculate the cores on which this task was not allowed to /* The SMP scheduler requests a core to yield when a ready
* run previously. */ * task is able to run. It is possible that the core affinity
uxPrevNotAllowedCores = ( ~uxPrevCoreAffinityMask ) & ( ( 1U << configNUMBER_OF_CORES ) - 1U ); * of the ready task is changed before the requested core
* can select it to run. In that case, the task may not be
/* Does the new core mask enables this task to run on any of the * selected by the previously requested core due to core affinity
* previously not allowed cores? If yes, check if this task can be * constraint and the SMP scheduler must select a new core to
* scheduled on any of those cores. */ * yield for the task. */
if( ( uxPrevNotAllowedCores & uxCoreAffinityMask ) != 0U ) prvYieldForTask( xTask );
{
prvYieldForTask( pxTCB );
}
} }
#else /* #if( configUSE_PREEMPTION == 1 ) */ #else /* #if( configUSE_PREEMPTION == 1 ) */
{ {