Merge branch 'main' into backwards-compatibility-config-num-cores

This commit is contained in:
Soren Ptak 2024-02-13 07:33:04 -05:00 committed by GitHub
commit d4bba920be
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
23 changed files with 232 additions and 250 deletions

View file

@ -2803,9 +2803,9 @@
#ifndef configSTACK_DEPTH_TYPE
/* Defaults to uint16_t for backward compatibility, but can be overridden
* in FreeRTOSConfig.h if uint16_t is too restrictive. */
#define configSTACK_DEPTH_TYPE uint16_t
/* Defaults to StackType_t for backward compatibility, but can be overridden
* in FreeRTOSConfig.h if StackType_t is too restrictive. */
#define configSTACK_DEPTH_TYPE StackType_t
#endif
#ifndef configRUN_TIME_COUNTER_TYPE

View file

@ -53,10 +53,10 @@
* The tskKERNEL_VERSION_MAJOR, tskKERNEL_VERSION_MINOR, tskKERNEL_VERSION_BUILD
* values will reflect the last released version number.
*/
#define tskKERNEL_VERSION_NUMBER "V10.4.4+"
#define tskKERNEL_VERSION_MAJOR 10
#define tskKERNEL_VERSION_MINOR 4
#define tskKERNEL_VERSION_BUILD 4
#define tskKERNEL_VERSION_NUMBER "V11.0.1+"
#define tskKERNEL_VERSION_MAJOR 11
#define tskKERNEL_VERSION_MINOR 0
#define tskKERNEL_VERSION_BUILD 1
/* MPU region parameters passed in ulParameters
* of MemoryRegion_t struct. */

View file

@ -1,4 +1,4 @@
name : "FreeRTOS-Kernel"
version: "v10.5.1"
version: "v11.0.1+"
description: "FreeRTOS Kernel."
license: "MIT"

View file

@ -68,12 +68,12 @@ add_library(freertos_kernel_port OBJECT
# ARMv8-A ports for GCC
$<$<STREQUAL:${FREERTOS_PORT},GCC_ARM_AARCH64>:
GCC/Arm_AARCH64/port.c
GCC/Arm_AARCH64/portASM.S>
GCC/ARM_AARCH64/port.c
GCC/ARM_AARCH64/portASM.S>
$<$<STREQUAL:${FREERTOS_PORT},GCC_ARM_AARCH64_SRE>:
GCC/Arm_AARCH64_SRE/port.c
GCC/Arm_AARCH64_SRE/portASM.S>
GCC/ARM_AARCH64_SRE/port.c
GCC/ARM_AARCH64_SRE/portASM.S>
# ARMv6-M port for GCC
$<$<STREQUAL:${FREERTOS_PORT},GCC_ARM_CM0>:

View file

@ -92,16 +92,16 @@
/**
* @brief Checks whether an external index is valid or not.
*/
#define IS_EXTERNAL_INDEX_VALID( lIndex ) \
( ( ( lIndex ) >= INDEX_OFFSET ) && \
( ( lIndex ) < ( configPROTECTED_KERNEL_OBJECT_POOL_SIZE + INDEX_OFFSET ) ) )
#define IS_EXTERNAL_INDEX_VALID( lIndex ) \
( ( ( ( lIndex ) >= INDEX_OFFSET ) && \
( ( lIndex ) < ( configPROTECTED_KERNEL_OBJECT_POOL_SIZE + INDEX_OFFSET ) ) ) ? pdTRUE : pdFALSE )
/**
* @brief Checks whether an internal index is valid or not.
*/
#define IS_INTERNAL_INDEX_VALID( lIndex ) \
( ( ( lIndex ) >= 0 ) && \
( ( lIndex ) < ( configPROTECTED_KERNEL_OBJECT_POOL_SIZE ) ) )
#define IS_INTERNAL_INDEX_VALID( lIndex ) \
( ( ( ( lIndex ) >= 0 ) && \
( ( lIndex ) < ( configPROTECTED_KERNEL_OBJECT_POOL_SIZE ) ) ) ? pdTRUE : pdFALSE )
/**
* @brief Converts an internal index into external.
@ -2197,7 +2197,7 @@
if( ( !( ( pvItemToQueue == NULL ) && ( uxQueueItemSize != ( UBaseType_t ) 0U ) ) ) &&
( !( ( xCopyPosition == queueOVERWRITE ) && ( uxQueueLength != ( UBaseType_t ) 1U ) ) )
#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
&& ( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) )
&& ( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0U ) ) )
#endif
)
{
@ -2312,7 +2312,7 @@
if( ( !( ( ( pvBuffer ) == NULL ) && ( uxQueueItemSize != ( UBaseType_t ) 0U ) ) )
#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
&& ( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) )
&& ( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0U ) ) )
#endif
)
{
@ -2364,7 +2364,7 @@
if( ( !( ( ( pvBuffer ) == NULL ) && ( uxQueueItemSize != ( UBaseType_t ) 0U ) ) )
#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
&& ( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) )
&& ( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0U ) ) )
#endif
)
{
@ -2411,9 +2411,9 @@
{
uxQueueItemSize = uxQueueGetQueueItemSize( xInternalQueueHandle );
if( ( uxQueueItemSize == 0 )
if( ( uxQueueItemSize == 0U )
#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
&& ( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) )
&& ( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0U ) ) )
#endif
)
{
@ -3906,10 +3906,10 @@
if( xAreParamsReadable == pdTRUE )
{
if( ( ( pxParams->uxBitsToWaitFor & eventEVENT_BITS_CONTROL_BYTES ) == 0 ) &&
( pxParams->uxBitsToWaitFor != 0 )
if( ( ( pxParams->uxBitsToWaitFor & eventEVENT_BITS_CONTROL_BYTES ) == 0U ) &&
( pxParams->uxBitsToWaitFor != 0U )
#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
&& ( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( pxParams->xTicksToWait != 0 ) ) )
&& ( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( pxParams->xTicksToWait != 0U ) ) )
#endif
)
{
@ -3951,7 +3951,7 @@
int32_t lIndex;
BaseType_t xCallingTaskIsAuthorizedToAccessEventGroup = pdFALSE;
if( ( uxBitsToClear & eventEVENT_BITS_CONTROL_BYTES ) == 0 )
if( ( uxBitsToClear & eventEVENT_BITS_CONTROL_BYTES ) == 0U )
{
lIndex = ( int32_t ) xEventGroup;
@ -3986,7 +3986,7 @@
int32_t lIndex;
BaseType_t xCallingTaskIsAuthorizedToAccessEventGroup = pdFALSE;
if( ( uxBitsToSet & eventEVENT_BITS_CONTROL_BYTES ) == 0 )
if( ( uxBitsToSet & eventEVENT_BITS_CONTROL_BYTES ) == 0U )
{
lIndex = ( int32_t ) xEventGroup;
@ -4025,10 +4025,10 @@
int32_t lIndex;
BaseType_t xCallingTaskIsAuthorizedToAccessEventGroup = pdFALSE;
if( ( ( uxBitsToWaitFor & eventEVENT_BITS_CONTROL_BYTES ) == 0 ) &&
( uxBitsToWaitFor != 0 )
if( ( ( uxBitsToWaitFor & eventEVENT_BITS_CONTROL_BYTES ) == 0U ) &&
( uxBitsToWaitFor != 0U )
#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
&& ( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) )
&& ( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0U ) ) )
#endif
)
{

View file

@ -13,11 +13,11 @@ the T32 and A32 instruction sets. Follow the
[link](https://developer.arm.com/Architectures/A-Profile%20Architecture)
for more information.
## Arm_AARCH64 port
## ARM_AARCH64 port
This port adds support for Armv8-A architecture AArch64 execution state.
This port is generic and can be used as a starting point for Armv8-A
application processors.
* Arm_AARCH64
* ARM_AARCH64
* Memory mapped interace to access Arm GIC registers

View file

@ -13,11 +13,11 @@ the T32 and A32 instruction sets. Follow the
[link](https://developer.arm.com/Architectures/A-Profile%20Architecture)
for more information.
## Arm_AARCH64_SRE port
## ARM_AARCH64_SRE port
This port adds support for Armv8-A architecture AArch64 execution state.
This port is generic and can be used as a starting point for Armv8-A
application processors.
* Arm_AARCH64_SRE
* ARM_AARCH64_SRE
* System Register interace to access Arm GIC registers

View file

@ -8,8 +8,8 @@ Arm Cortex-A53 processor.
This port is generic and can be used as a starting point for other Armv8-A
application processors. Therefore, the port `ARM_CA53_64_BIT` is renamed as
`Arm_AARCH64`. The existing projects that use old port `ARM_CA53_64_BIT`,
should migrate to renamed port `Arm_AARCH64`.
`ARM_AARCH64`. The existing projects that use old port `ARM_CA53_64_BIT`,
should migrate to renamed port `ARM_AARCH64`.
**NOTE**

View file

@ -7,9 +7,9 @@ Arm Cortex-A53 processor.
* System Register interace to access Arm GIC registers
This port is generic and can be used as a starting point for other Armv8-A
application processors. Therefore, the port `Arm_AARCH64_SRE` is renamed as
`Arm_AARCH64_SRE`. The existing projects that use old port `Arm_AARCH64_SRE`,
should migrate to renamed port `Arm_AARCH64_SRE`.
application processors. Therefore, the port `ARM_AARCH64_SRE` is renamed as
`ARM_AARCH64_SRE`. The existing projects that use old port `ARM_AARCH64_SRE`,
should migrate to renamed port `ARM_AARCH64_SRE`.
**NOTE**

View file

@ -1,5 +1,5 @@
/*
* FreeRTOS Kernel V10.4.3
* FreeRTOS Kernel <DEVELOPMENT BRANCH>
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
* Copyright (c) 2021 Raspberry Pi (Trading) Ltd.
*

View file

@ -1,5 +1,5 @@
/*
* FreeRTOS Kernel V10.4.3
* FreeRTOS Kernel <DEVELOPMENT BRANCH>
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
* Copyright (c) 2021 Raspberry Pi (Trading) Ltd.
*

View file

@ -1,5 +1,5 @@
/*
* FreeRTOS Kernel V10.4.3
* FreeRTOS Kernel <DEVELOPMENT BRANCH>
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
* Copyright (c) 2021 Raspberry Pi (Trading) Ltd.
*

View file

@ -1,5 +1,5 @@
/*
* FreeRTOS Kernel V10.4.3
* FreeRTOS Kernel <DEVELOPMENT BRANCH>
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
* Copyright (c) 2021 Raspberry Pi (Trading) Ltd.
*

View file

@ -7,7 +7,7 @@
*/
/*
* FreeRTOS Kernel V10.4.3
* FreeRTOS Kernel <DEVELOPMENT BRANCH>
* Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of

View file

@ -8,7 +8,7 @@
*/
/*
* FreeRTOS Kernel V10.4.3
* FreeRTOS Kernel <DEVELOPMENT BRANCH>
* Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of

View file

@ -8,7 +8,7 @@
*/
/*
* FreeRTOS Kernel V10.4.3
* FreeRTOS Kernel <DEVELOPMENT BRANCH>
* Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of

388
tasks.c
View file

@ -2191,6 +2191,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
{
TCB_t * pxTCB;
BaseType_t xDeleteTCBInIdleTask = pdFALSE;
BaseType_t xTaskIsRunningOrYielding;
traceENTER_vTaskDelete( xTaskToDelete );
@ -2226,10 +2227,15 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
* not return. */
uxTaskNumber++;
/* Use temp variable as distinct sequence points for reading volatile
* variables prior to a logical operator to ensure compliance with
* MISRA C 2012 Rule 13.5. */
xTaskIsRunningOrYielding = taskTASK_IS_RUNNING_OR_SCHEDULED_TO_YIELD( pxTCB );
/* If the task is running (or yielding), we must add it to the
* termination list so that an idle task can delete it when it is
* no longer running. */
if( ( xSchedulerRunning != pdFALSE ) && ( taskTASK_IS_RUNNING_OR_SCHEDULED_TO_YIELD( pxTCB ) != pdFALSE ) )
if( ( xSchedulerRunning != pdFALSE ) && ( xTaskIsRunningOrYielding != pdFALSE ) )
{
/* A running task or a task which is scheduled to yield is being
* deleted. This cannot complete when the task is still running
@ -2261,6 +2267,30 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
#else
portPRE_TASK_DELETE_HOOK( pxTCB, &( xYieldPendings[ pxTCB->xTaskRunState ] ) );
#endif
/* In the case of SMP, it is possible that the task being deleted
* is running on another core. We must evict the task before
* exiting the critical section to ensure that the task cannot
* take an action which puts it back on ready/state/event list,
* thereby nullifying the delete operation. Once evicted, the
* task won't be scheduled ever as it will no longer be on the
* ready list. */
#if ( configNUMBER_OF_CORES > 1 )
{
if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
{
if( pxTCB->xTaskRunState == ( BaseType_t ) portGET_CORE_ID() )
{
configASSERT( uxSchedulerSuspended == 0 );
taskYIELD_WITHIN_API();
}
else
{
prvYieldCore( pxTCB->xTaskRunState );
}
}
}
#endif /* #if ( configNUMBER_OF_CORES > 1 ) */
}
else
{
@ -2284,9 +2314,9 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
/* Force a reschedule if it is the currently running task that has just
* been deleted. */
if( xSchedulerRunning != pdFALSE )
#if ( configNUMBER_OF_CORES == 1 )
{
#if ( configNUMBER_OF_CORES == 1 )
if( xSchedulerRunning != pdFALSE )
{
if( pxTCB == pxCurrentTCB )
{
@ -2298,30 +2328,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
mtCOVERAGE_TEST_MARKER();
}
}
#else /* #if ( configNUMBER_OF_CORES == 1 ) */
{
/* It is important to use critical section here because
* checking run state of a task must be done inside a
* critical section. */
taskENTER_CRITICAL();
{
if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
{
if( pxTCB->xTaskRunState == ( BaseType_t ) portGET_CORE_ID() )
{
configASSERT( uxSchedulerSuspended == 0 );
taskYIELD_WITHIN_API();
}
else
{
prvYieldCore( pxTCB->xTaskRunState );
}
}
}
taskEXIT_CRITICAL();
}
#endif /* #if ( configNUMBER_OF_CORES == 1 ) */
}
#endif /* #if ( configNUMBER_OF_CORES == 1 ) */
traceRETURN_vTaskDelete();
}
@ -3155,26 +3163,68 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
}
}
#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
/* In the case of SMP, it is possible that the task being suspended
* is running on another core. We must evict the task before
* exiting the critical section to ensure that the task cannot
* take an action which puts it back on ready/state/event list,
* thereby nullifying the suspend operation. Once evicted, the
* task won't be scheduled before it is resumed as it will no longer
* be on the ready list. */
#if ( configNUMBER_OF_CORES > 1 )
{
if( xSchedulerRunning != pdFALSE )
{
/* Reset the next expected unblock time in case it referred to the
* task that is now in the Suspended state. */
prvResetNextTaskUnblockTime();
if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
{
if( pxTCB->xTaskRunState == ( BaseType_t ) portGET_CORE_ID() )
{
/* The current task has just been suspended. */
configASSERT( uxSchedulerSuspended == 0 );
vTaskYieldWithinAPI();
}
else
{
prvYieldCore( pxTCB->xTaskRunState );
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* #if ( configNUMBER_OF_CORES > 1 ) */
}
taskEXIT_CRITICAL();
if( xSchedulerRunning != pdFALSE )
{
/* Reset the next expected unblock time in case it referred to the
* task that is now in the Suspended state. */
taskENTER_CRITICAL();
{
prvResetNextTaskUnblockTime();
}
taskEXIT_CRITICAL();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
#if ( configNUMBER_OF_CORES == 1 )
{
UBaseType_t uxCurrentListLength;
if( xSchedulerRunning != pdFALSE )
{
/* Reset the next expected unblock time in case it referred to the
* task that is now in the Suspended state. */
taskENTER_CRITICAL();
{
prvResetNextTaskUnblockTime();
}
taskEXIT_CRITICAL();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
if( pxTCB == pxCurrentTCB )
{
if( xSchedulerRunning != pdFALSE )
@ -3188,7 +3238,13 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
/* The scheduler is not running, but the task that was pointed
* to by pxCurrentTCB has just been suspended and pxCurrentTCB
* must be adjusted to point to a different task. */
if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == uxCurrentNumberOfTasks )
/* Use a temp variable as a distinct sequence point for reading
* volatile variables prior to a comparison to ensure compliance
* with MISRA C 2012 Rule 13.2. */
uxCurrentListLength = listCURRENT_LIST_LENGTH( &xSuspendedTaskList );
if( uxCurrentListLength == uxCurrentNumberOfTasks )
{
/* No other tasks are ready, so set pxCurrentTCB back to
* NULL so when the next task is created pxCurrentTCB will
@ -3207,43 +3263,6 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
mtCOVERAGE_TEST_MARKER();
}
}
#else /* #if ( configNUMBER_OF_CORES == 1 ) */
{
/* Enter critical section here to check run state of a task. */
taskENTER_CRITICAL();
{
if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
{
if( xSchedulerRunning != pdFALSE )
{
if( pxTCB->xTaskRunState == ( BaseType_t ) portGET_CORE_ID() )
{
/* The current task has just been suspended. */
configASSERT( uxSchedulerSuspended == 0 );
vTaskYieldWithinAPI();
}
else
{
prvYieldCore( pxTCB->xTaskRunState );
}
}
else
{
/* This code path is not possible because only Idle tasks are
* assigned a core before the scheduler is started ( i.e.
* taskTASK_IS_RUNNING is only true for idle tasks before
* the scheduler is started ) and idle tasks cannot be
* suspended. */
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
taskEXIT_CRITICAL();
}
#endif /* #if ( configNUMBER_OF_CORES == 1 ) */
traceRETURN_vTaskSuspend();
@ -3811,6 +3830,9 @@ void vTaskSuspendAll( void )
if( xSchedulerRunning != pdFALSE )
{
/* This must never be called from inside a critical section. */
configASSERT( portGET_CRITICAL_NESTING_COUNT() == 0 );
/* Writes to uxSchedulerSuspended must be protected by both the task AND ISR locks.
* We must disable interrupts before we grab the locks in the event that this task is
* interrupted and switches context before incrementing uxSchedulerSuspended.
@ -3829,14 +3851,7 @@ void vTaskSuspendAll( void )
* it. */
if( uxSchedulerSuspended == 0U )
{
if( portGET_CRITICAL_NESTING_COUNT() == 0U )
{
prvCheckForRunStateChange();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
prvCheckForRunStateChange();
}
else
{
@ -7665,83 +7680,67 @@ TickType_t uxTaskResetEventItemValue( void )
TickType_t xTicksToWait )
{
uint32_t ulReturn;
BaseType_t xAlreadyYielded;
BaseType_t xAlreadyYielded, xShouldBlock = pdFALSE;
traceENTER_ulTaskGenericNotifyTake( uxIndexToWaitOn, xClearCountOnExit, xTicksToWait );
configASSERT( uxIndexToWaitOn < configTASK_NOTIFICATION_ARRAY_ENTRIES );
taskENTER_CRITICAL();
/* Only block if the notification count is not already non-zero. */
if( pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] == 0UL )
/* We suspend the scheduler here as prvAddCurrentTaskToDelayedList is a
* non-deterministic operation. */
vTaskSuspendAll();
{
/* Mark this task as waiting for a notification. */
pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskWAITING_NOTIFICATION;
if( xTicksToWait > ( TickType_t ) 0 )
/* We MUST enter a critical section to atomically check if a notification
* has occurred and set the flag to indicate that we are waiting for
* a notification. If we do not do so, a notification sent from an ISR
* will get lost. */
taskENTER_CRITICAL();
{
traceTASK_NOTIFY_TAKE_BLOCK( uxIndexToWaitOn );
/* We MUST suspend the scheduler before exiting the critical
* section (i.e. before enabling interrupts).
*
* If we do not do so, a notification sent from an ISR, which
* happens after exiting the critical section and before
* suspending the scheduler, will get lost. The sequence of
* events will be:
* 1. Exit critical section.
* 2. Interrupt - ISR calls xTaskNotifyFromISR which adds the
* task to the Ready list.
* 3. Suspend scheduler.
* 4. prvAddCurrentTaskToDelayedList moves the task to the
* delayed or suspended list.
* 5. Resume scheduler does not touch the task (because it is
* not on the pendingReady list), effectively losing the
* notification from the ISR.
*
* The same does not happen when we suspend the scheduler before
* exiting the critical section. The sequence of events in this
* case will be:
* 1. Suspend scheduler.
* 2. Exit critical section.
* 3. Interrupt - ISR calls xTaskNotifyFromISR which adds the
* task to the pendingReady list as the scheduler is
* suspended.
* 4. prvAddCurrentTaskToDelayedList adds the task to delayed or
* suspended list. Note that this operation does not nullify
* the add to pendingReady list done in the above step because
* a different list item, namely xEventListItem, is used for
* adding the task to the pendingReady list. In other words,
* the task still remains on the pendingReady list.
* 5. Resume scheduler moves the task from pendingReady list to
* the Ready list.
*/
vTaskSuspendAll();
/* Only block if the notification count is not already non-zero. */
if( pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] == 0UL )
{
taskEXIT_CRITICAL();
/* Mark this task as waiting for a notification. */
pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskWAITING_NOTIFICATION;
prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
}
xAlreadyYielded = xTaskResumeAll();
if( xAlreadyYielded == pdFALSE )
{
taskYIELD_WITHIN_API();
if( xTicksToWait > ( TickType_t ) 0 )
{
xShouldBlock = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
taskEXIT_CRITICAL();
/* We are now out of the critical section but the scheduler is still
* suspended, so we are safe to do non-deterministic operations such
* as prvAddCurrentTaskToDelayedList. */
if( xShouldBlock == pdTRUE )
{
traceTASK_NOTIFY_TAKE_BLOCK( uxIndexToWaitOn );
prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
}
else
{
taskEXIT_CRITICAL();
mtCOVERAGE_TEST_MARKER();
}
}
xAlreadyYielded = xTaskResumeAll();
/* Force a reschedule if xTaskResumeAll has not already done so. */
if( ( xShouldBlock == pdTRUE ) && ( xAlreadyYielded == pdFALSE ) )
{
taskYIELD_WITHIN_API();
}
else
{
taskEXIT_CRITICAL();
mtCOVERAGE_TEST_MARKER();
}
taskENTER_CRITICAL();
@ -7785,88 +7784,71 @@ TickType_t uxTaskResetEventItemValue( void )
uint32_t * pulNotificationValue,
TickType_t xTicksToWait )
{
BaseType_t xReturn, xAlreadyYielded;
BaseType_t xReturn, xAlreadyYielded, xShouldBlock = pdFALSE;
traceENTER_xTaskGenericNotifyWait( uxIndexToWaitOn, ulBitsToClearOnEntry, ulBitsToClearOnExit, pulNotificationValue, xTicksToWait );
configASSERT( uxIndexToWaitOn < configTASK_NOTIFICATION_ARRAY_ENTRIES );
taskENTER_CRITICAL();
/* Only block if a notification is not already pending. */
if( pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] != taskNOTIFICATION_RECEIVED )
/* We suspend the scheduler here as prvAddCurrentTaskToDelayedList is a
* non-deterministic operation. */
vTaskSuspendAll();
{
/* Clear bits in the task's notification value as bits may get
* set by the notifying task or interrupt. This can be used to
* clear the value to zero. */
pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] &= ~ulBitsToClearOnEntry;
/* Mark this task as waiting for a notification. */
pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskWAITING_NOTIFICATION;
if( xTicksToWait > ( TickType_t ) 0 )
/* We MUST enter a critical section to atomically check and update the
* task notification value. If we do not do so, a notification from
* an ISR will get lost. */
taskENTER_CRITICAL();
{
traceTASK_NOTIFY_WAIT_BLOCK( uxIndexToWaitOn );
/* We MUST suspend the scheduler before exiting the critical
* section (i.e. before enabling interrupts).
*
* If we do not do so, a notification sent from an ISR, which
* happens after exiting the critical section and before
* suspending the scheduler, will get lost. The sequence of
* events will be:
* 1. Exit critical section.
* 2. Interrupt - ISR calls xTaskNotifyFromISR which adds the
* task to the Ready list.
* 3. Suspend scheduler.
* 4. prvAddCurrentTaskToDelayedList moves the task to the
* delayed or suspended list.
* 5. Resume scheduler does not touch the task (because it is
* not on the pendingReady list), effectively losing the
* notification from the ISR.
*
* The same does not happen when we suspend the scheduler before
* exiting the critical section. The sequence of events in this
* case will be:
* 1. Suspend scheduler.
* 2. Exit critical section.
* 3. Interrupt - ISR calls xTaskNotifyFromISR which adds the
* task to the pendingReady list as the scheduler is
* suspended.
* 4. prvAddCurrentTaskToDelayedList adds the task to delayed or
* suspended list. Note that this operation does not nullify
* the add to pendingReady list done in the above step because
* a different list item, namely xEventListItem, is used for
* adding the task to the pendingReady list. In other words,
* the task still remains on the pendingReady list.
* 5. Resume scheduler moves the task from pendingReady list to
* the Ready list.
*/
vTaskSuspendAll();
/* Only block if a notification is not already pending. */
if( pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] != taskNOTIFICATION_RECEIVED )
{
taskEXIT_CRITICAL();
/* Clear bits in the task's notification value as bits may get
* set by the notifying task or interrupt. This can be used
* to clear the value to zero. */
pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] &= ~ulBitsToClearOnEntry;
prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
}
xAlreadyYielded = xTaskResumeAll();
/* Mark this task as waiting for a notification. */
pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskWAITING_NOTIFICATION;
if( xAlreadyYielded == pdFALSE )
{
taskYIELD_WITHIN_API();
if( xTicksToWait > ( TickType_t ) 0 )
{
xShouldBlock = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
taskEXIT_CRITICAL();
/* We are now out of the critical section but the scheduler is still
* suspended, so we are safe to do non-deterministic operations such
* as prvAddCurrentTaskToDelayedList. */
if( xShouldBlock == pdTRUE )
{
traceTASK_NOTIFY_WAIT_BLOCK( uxIndexToWaitOn );
prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
}
else
{
taskEXIT_CRITICAL();
mtCOVERAGE_TEST_MARKER();
}
}
xAlreadyYielded = xTaskResumeAll();
/* Force a reschedule if xTaskResumeAll has not already done so. */
if( ( xShouldBlock == pdTRUE ) && ( xAlreadyYielded == pdFALSE ) )
{
taskYIELD_WITHIN_API();
}
else
{
taskEXIT_CRITICAL();
mtCOVERAGE_TEST_MARKER();
}
taskENTER_CRITICAL();