Merge branch 'main' into update-cmm_posix-reinit

This commit is contained in:
Gaurav-Aggarwal-AWS 2024-02-16 11:14:03 +05:30 committed by GitHub
commit f4c7106de2
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
26 changed files with 254 additions and 258 deletions

View file

@ -496,6 +496,15 @@
* run any task on any available core. */
#define configUSE_CORE_AFFINITY 0
/* When using SMP with core affinity feature enabled, set
* configTASK_DEFAULT_CORE_AFFINITY to change the default core affinity mask for
* tasks created without an affinity mask specified. Setting the define to 1 would
* make such tasks run on core 0 and setting it to (1 << portGET_CORE_ID()) would
* make such tasks run on the current core. This config value is useful, if
* swapping tasks between cores is not supported (e.g. Tricore) or if legacy code
* should be controlled. Defaults to tskNO_AFFINITY if left undefined. */
#define configTASK_DEFAULT_CORE_AFFINITY tskNO_AFFINITY
/* When using SMP (i.e. configNUMBER_OF_CORES is greater than one), if
* configUSE_TASK_PREEMPTION_DISABLE is set to 1, individual tasks can be set to
* either pre-emptive or co-operative mode using the vTaskPreemptionDisable and

View file

@ -484,6 +484,12 @@
#define configUSE_CORE_AFFINITY 0
#endif /* configUSE_CORE_AFFINITY */
#if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
#ifndef configTASK_DEFAULT_CORE_AFFINITY
#define configTASK_DEFAULT_CORE_AFFINITY tskNO_AFFINITY
#endif
#endif
#ifndef configUSE_PASSIVE_IDLE_HOOK
#define configUSE_PASSIVE_IDLE_HOOK 0
#endif /* configUSE_PASSIVE_IDLE_HOOK */
@ -2799,9 +2805,9 @@
#ifndef configSTACK_DEPTH_TYPE
/* Defaults to uint16_t for backward compatibility, but can be overridden
* in FreeRTOSConfig.h if uint16_t is too restrictive. */
#define configSTACK_DEPTH_TYPE uint16_t
/* Defaults to StackType_t for backward compatibility, but can be overridden
* in FreeRTOSConfig.h if StackType_t is too restrictive. */
#define configSTACK_DEPTH_TYPE StackType_t
#endif
#ifndef configRUN_TIME_COUNTER_TYPE

View file

@ -326,7 +326,7 @@ typedef struct xLIST
} \
\
( pxItemToRemove )->pxContainer = NULL; \
( pxList->uxNumberOfItems )--; \
( ( pxList )->uxNumberOfItems ) -= ( UBaseType_t ) 1U; \
} while( 0 )
/*
@ -363,17 +363,17 @@ typedef struct xLIST
\
/* Insert a new list item into ( pxList ), but rather than sort the list, \
* makes the new list item the last item to be removed by a call to \
* listGET_OWNER_OF_NEXT_ENTRY(). */ \
( pxNewListItem )->pxNext = pxIndex; \
( pxNewListItem )->pxPrevious = pxIndex->pxPrevious; \
\
pxIndex->pxPrevious->pxNext = ( pxNewListItem ); \
pxIndex->pxPrevious = ( pxNewListItem ); \
\
/* Remember which list the item is in. */ \
( pxNewListItem )->pxContainer = ( pxList ); \
\
( ( pxList )->uxNumberOfItems )++; \
* listGET_OWNER_OF_NEXT_ENTRY(). */ \
( pxNewListItem )->pxNext = pxIndex; \
( pxNewListItem )->pxPrevious = pxIndex->pxPrevious; \
\
pxIndex->pxPrevious->pxNext = ( pxNewListItem ); \
pxIndex->pxPrevious = ( pxNewListItem ); \
\
/* Remember which list the item is in. */ \
( pxNewListItem )->pxContainer = ( pxList ); \
\
( ( pxList )->uxNumberOfItems ) += ( UBaseType_t ) 1U; \
} while( 0 )
/*

View file

@ -795,7 +795,7 @@ typedef StreamBufferHandle_t MessageBufferHandle_t;
* \ingroup MessageBufferManagement
*/
#define xMessageBufferNextLengthBytes( xMessageBuffer ) \
xStreamBufferNextMessageLengthBytes( xMessageBuffer ) PRIVILEGED_FUNCTION;
xStreamBufferNextMessageLengthBytes( xMessageBuffer )
/**
* message_buffer.h

View file

@ -53,10 +53,10 @@
* The tskKERNEL_VERSION_MAJOR, tskKERNEL_VERSION_MINOR, tskKERNEL_VERSION_BUILD
* values will reflect the last released version number.
*/
#define tskKERNEL_VERSION_NUMBER "V10.4.4+"
#define tskKERNEL_VERSION_MAJOR 10
#define tskKERNEL_VERSION_MINOR 4
#define tskKERNEL_VERSION_BUILD 4
#define tskKERNEL_VERSION_NUMBER "V11.0.1+"
#define tskKERNEL_VERSION_MAJOR 11
#define tskKERNEL_VERSION_MINOR 0
#define tskKERNEL_VERSION_BUILD 1
/* MPU region parameters passed in ulParameters
* of MemoryRegion_t struct. */

9
list.c
View file

@ -130,7 +130,7 @@ void vListInsertEnd( List_t * const pxList,
/* Remember which list the item is in. */
pxNewListItem->pxContainer = pxList;
( pxList->uxNumberOfItems )++;
( pxList->uxNumberOfItems ) += ( UBaseType_t ) 1U;
traceRETURN_vListInsertEnd();
}
@ -205,12 +205,13 @@ void vListInsert( List_t * const pxList,
* item later. */
pxNewListItem->pxContainer = pxList;
( pxList->uxNumberOfItems )++;
( pxList->uxNumberOfItems ) += ( UBaseType_t ) 1U;
traceRETURN_vListInsert();
}
/*-----------------------------------------------------------*/
UBaseType_t uxListRemove( ListItem_t * const pxItemToRemove )
{
/* The list item knows which list it is in. Obtain the list from the list
@ -219,8 +220,6 @@ UBaseType_t uxListRemove( ListItem_t * const pxItemToRemove )
traceENTER_uxListRemove( pxItemToRemove );
pxItemToRemove->pxNext->pxPrevious = pxItemToRemove->pxPrevious;
pxItemToRemove->pxPrevious->pxNext = pxItemToRemove->pxNext;
@ -238,7 +237,7 @@ UBaseType_t uxListRemove( ListItem_t * const pxItemToRemove )
}
pxItemToRemove->pxContainer = NULL;
( pxList->uxNumberOfItems )--;
( pxList->uxNumberOfItems ) -= ( UBaseType_t ) 1U;
traceRETURN_uxListRemove( pxList->uxNumberOfItems );

View file

@ -1,4 +1,4 @@
name : "FreeRTOS-Kernel"
version: "v10.5.1"
version: "v11.0.1+"
description: "FreeRTOS Kernel."
license: "MIT"

View file

@ -68,12 +68,12 @@ add_library(freertos_kernel_port OBJECT
# ARMv8-A ports for GCC
$<$<STREQUAL:${FREERTOS_PORT},GCC_ARM_AARCH64>:
GCC/Arm_AARCH64/port.c
GCC/Arm_AARCH64/portASM.S>
GCC/ARM_AARCH64/port.c
GCC/ARM_AARCH64/portASM.S>
$<$<STREQUAL:${FREERTOS_PORT},GCC_ARM_AARCH64_SRE>:
GCC/Arm_AARCH64_SRE/port.c
GCC/Arm_AARCH64_SRE/portASM.S>
GCC/ARM_AARCH64_SRE/port.c
GCC/ARM_AARCH64_SRE/portASM.S>
# ARMv6-M port for GCC
$<$<STREQUAL:${FREERTOS_PORT},GCC_ARM_CM0>:

View file

@ -13,11 +13,11 @@ the T32 and A32 instruction sets. Follow the
[link](https://developer.arm.com/Architectures/A-Profile%20Architecture)
for more information.
## Arm_AARCH64 port
## ARM_AARCH64 port
This port adds support for Armv8-A architecture AArch64 execution state.
This port is generic and can be used as a starting point for Armv8-A
application processors.
* Arm_AARCH64
* ARM_AARCH64
* Memory mapped interace to access Arm GIC registers

View file

@ -13,11 +13,11 @@ the T32 and A32 instruction sets. Follow the
[link](https://developer.arm.com/Architectures/A-Profile%20Architecture)
for more information.
## Arm_AARCH64_SRE port
## ARM_AARCH64_SRE port
This port adds support for Armv8-A architecture AArch64 execution state.
This port is generic and can be used as a starting point for Armv8-A
application processors.
* Arm_AARCH64_SRE
* ARM_AARCH64_SRE
* System Register interace to access Arm GIC registers

View file

@ -8,8 +8,8 @@ Arm Cortex-A53 processor.
This port is generic and can be used as a starting point for other Armv8-A
application processors. Therefore, the port `ARM_CA53_64_BIT` is renamed as
`Arm_AARCH64`. The existing projects that use old port `ARM_CA53_64_BIT`,
should migrate to renamed port `Arm_AARCH64`.
`ARM_AARCH64`. The existing projects that use old port `ARM_CA53_64_BIT`,
should migrate to renamed port `ARM_AARCH64`.
**NOTE**

View file

@ -7,9 +7,9 @@ Arm Cortex-A53 processor.
* System Register interace to access Arm GIC registers
This port is generic and can be used as a starting point for other Armv8-A
application processors. Therefore, the port `Arm_AARCH64_SRE` is renamed as
`Arm_AARCH64_SRE`. The existing projects that use old port `Arm_AARCH64_SRE`,
should migrate to renamed port `Arm_AARCH64_SRE`.
application processors. Therefore, the port `ARM_AARCH64_SRE` is renamed as
`ARM_AARCH64_SRE`. The existing projects that use old port `ARM_AARCH64_SRE`,
should migrate to renamed port `ARM_AARCH64_SRE`.
**NOTE**

View file

@ -1,5 +1,5 @@
/*
* FreeRTOS Kernel V10.4.3
* FreeRTOS Kernel <DEVELOPMENT BRANCH>
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
* Copyright (c) 2021 Raspberry Pi (Trading) Ltd.
*

View file

@ -1,5 +1,5 @@
/*
* FreeRTOS Kernel V10.4.3
* FreeRTOS Kernel <DEVELOPMENT BRANCH>
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
* Copyright (c) 2021 Raspberry Pi (Trading) Ltd.
*

View file

@ -1,5 +1,5 @@
/*
* FreeRTOS Kernel V10.4.3
* FreeRTOS Kernel <DEVELOPMENT BRANCH>
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
* Copyright (c) 2021 Raspberry Pi (Trading) Ltd.
*

View file

@ -1,5 +1,5 @@
/*
* FreeRTOS Kernel V10.4.3
* FreeRTOS Kernel <DEVELOPMENT BRANCH>
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
* Copyright (c) 2021 Raspberry Pi (Trading) Ltd.
*

View file

@ -7,7 +7,7 @@
*/
/*
* FreeRTOS Kernel V10.4.3
* FreeRTOS Kernel <DEVELOPMENT BRANCH>
* Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of

View file

@ -8,7 +8,7 @@
*/
/*
* FreeRTOS Kernel V10.4.3
* FreeRTOS Kernel <DEVELOPMENT BRANCH>
* Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of

View file

@ -8,7 +8,7 @@
*/
/*
* FreeRTOS Kernel V10.4.3
* FreeRTOS Kernel <DEVELOPMENT BRANCH>
* Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of

406
tasks.c
View file

@ -255,7 +255,7 @@
pxTemp = pxDelayedTaskList; \
pxDelayedTaskList = pxOverflowDelayedTaskList; \
pxOverflowDelayedTaskList = pxTemp; \
xNumOfOverflows++; \
xNumOfOverflows += ( BaseType_t ) 1; \
prvResetNextTaskUnblockTime(); \
} while( 0 )
@ -1325,7 +1325,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION;
#if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
{
/* Set the task's affinity before scheduling it. */
pxNewTCB->uxCoreAffinityMask = tskNO_AFFINITY;
pxNewTCB->uxCoreAffinityMask = configTASK_DEFAULT_CORE_AFFINITY;
}
#endif
@ -1442,7 +1442,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION;
#if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
{
/* Set the task's affinity before scheduling it. */
pxNewTCB->uxCoreAffinityMask = tskNO_AFFINITY;
pxNewTCB->uxCoreAffinityMask = configTASK_DEFAULT_CORE_AFFINITY;
}
#endif
@ -1560,7 +1560,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION;
#if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
{
/* Set the task's affinity before scheduling it. */
pxNewTCB->uxCoreAffinityMask = tskNO_AFFINITY;
pxNewTCB->uxCoreAffinityMask = configTASK_DEFAULT_CORE_AFFINITY;
}
#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */
@ -1733,7 +1733,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION;
#if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
{
/* Set the task's affinity before scheduling it. */
pxNewTCB->uxCoreAffinityMask = tskNO_AFFINITY;
pxNewTCB->uxCoreAffinityMask = configTASK_DEFAULT_CORE_AFFINITY;
}
#endif
@ -2021,7 +2021,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
* updated. */
taskENTER_CRITICAL();
{
uxCurrentNumberOfTasks++;
uxCurrentNumberOfTasks += ( UBaseType_t ) 1U;
if( pxCurrentTCB == NULL )
{
@ -2191,6 +2191,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
{
TCB_t * pxTCB;
BaseType_t xDeleteTCBInIdleTask = pdFALSE;
BaseType_t xTaskIsRunningOrYielding;
traceENTER_vTaskDelete( xTaskToDelete );
@ -2226,10 +2227,15 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
* not return. */
uxTaskNumber++;
/* Use temp variable as distinct sequence points for reading volatile
* variables prior to a logical operator to ensure compliance with
* MISRA C 2012 Rule 13.5. */
xTaskIsRunningOrYielding = taskTASK_IS_RUNNING_OR_SCHEDULED_TO_YIELD( pxTCB );
/* If the task is running (or yielding), we must add it to the
* termination list so that an idle task can delete it when it is
* no longer running. */
if( ( xSchedulerRunning != pdFALSE ) && ( taskTASK_IS_RUNNING_OR_SCHEDULED_TO_YIELD( pxTCB ) != pdFALSE ) )
if( ( xSchedulerRunning != pdFALSE ) && ( xTaskIsRunningOrYielding != pdFALSE ) )
{
/* A running task or a task which is scheduled to yield is being
* deleted. This cannot complete when the task is still running
@ -2261,6 +2267,30 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
#else
portPRE_TASK_DELETE_HOOK( pxTCB, &( xYieldPendings[ pxTCB->xTaskRunState ] ) );
#endif
/* In the case of SMP, it is possible that the task being deleted
* is running on another core. We must evict the task before
* exiting the critical section to ensure that the task cannot
* take an action which puts it back on ready/state/event list,
* thereby nullifying the delete operation. Once evicted, the
* task won't be scheduled ever as it will no longer be on the
* ready list. */
#if ( configNUMBER_OF_CORES > 1 )
{
if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
{
if( pxTCB->xTaskRunState == ( BaseType_t ) portGET_CORE_ID() )
{
configASSERT( uxSchedulerSuspended == 0 );
taskYIELD_WITHIN_API();
}
else
{
prvYieldCore( pxTCB->xTaskRunState );
}
}
}
#endif /* #if ( configNUMBER_OF_CORES > 1 ) */
}
else
{
@ -2284,9 +2314,9 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
/* Force a reschedule if it is the currently running task that has just
* been deleted. */
if( xSchedulerRunning != pdFALSE )
#if ( configNUMBER_OF_CORES == 1 )
{
#if ( configNUMBER_OF_CORES == 1 )
if( xSchedulerRunning != pdFALSE )
{
if( pxTCB == pxCurrentTCB )
{
@ -2298,30 +2328,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
mtCOVERAGE_TEST_MARKER();
}
}
#else /* #if ( configNUMBER_OF_CORES == 1 ) */
{
/* It is important to use critical section here because
* checking run state of a task must be done inside a
* critical section. */
taskENTER_CRITICAL();
{
if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
{
if( pxTCB->xTaskRunState == ( BaseType_t ) portGET_CORE_ID() )
{
configASSERT( uxSchedulerSuspended == 0 );
taskYIELD_WITHIN_API();
}
else
{
prvYieldCore( pxTCB->xTaskRunState );
}
}
}
taskEXIT_CRITICAL();
}
#endif /* #if ( configNUMBER_OF_CORES == 1 ) */
}
#endif /* #if ( configNUMBER_OF_CORES == 1 ) */
traceRETURN_vTaskDelete();
}
@ -3155,26 +3163,68 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
}
}
#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
/* In the case of SMP, it is possible that the task being suspended
* is running on another core. We must evict the task before
* exiting the critical section to ensure that the task cannot
* take an action which puts it back on ready/state/event list,
* thereby nullifying the suspend operation. Once evicted, the
* task won't be scheduled before it is resumed as it will no longer
* be on the ready list. */
#if ( configNUMBER_OF_CORES > 1 )
{
if( xSchedulerRunning != pdFALSE )
{
/* Reset the next expected unblock time in case it referred to the
* task that is now in the Suspended state. */
prvResetNextTaskUnblockTime();
if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
{
if( pxTCB->xTaskRunState == ( BaseType_t ) portGET_CORE_ID() )
{
/* The current task has just been suspended. */
configASSERT( uxSchedulerSuspended == 0 );
vTaskYieldWithinAPI();
}
else
{
prvYieldCore( pxTCB->xTaskRunState );
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* #if ( configNUMBER_OF_CORES > 1 ) */
}
taskEXIT_CRITICAL();
if( xSchedulerRunning != pdFALSE )
{
/* Reset the next expected unblock time in case it referred to the
* task that is now in the Suspended state. */
taskENTER_CRITICAL();
{
prvResetNextTaskUnblockTime();
}
taskEXIT_CRITICAL();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
#if ( configNUMBER_OF_CORES == 1 )
{
UBaseType_t uxCurrentListLength;
if( xSchedulerRunning != pdFALSE )
{
/* Reset the next expected unblock time in case it referred to the
* task that is now in the Suspended state. */
taskENTER_CRITICAL();
{
prvResetNextTaskUnblockTime();
}
taskEXIT_CRITICAL();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
if( pxTCB == pxCurrentTCB )
{
if( xSchedulerRunning != pdFALSE )
@ -3188,7 +3238,13 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
/* The scheduler is not running, but the task that was pointed
* to by pxCurrentTCB has just been suspended and pxCurrentTCB
* must be adjusted to point to a different task. */
if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == uxCurrentNumberOfTasks )
/* Use a temp variable as a distinct sequence point for reading
* volatile variables prior to a comparison to ensure compliance
* with MISRA C 2012 Rule 13.2. */
uxCurrentListLength = listCURRENT_LIST_LENGTH( &xSuspendedTaskList );
if( uxCurrentListLength == uxCurrentNumberOfTasks )
{
/* No other tasks are ready, so set pxCurrentTCB back to
* NULL so when the next task is created pxCurrentTCB will
@ -3207,43 +3263,6 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
mtCOVERAGE_TEST_MARKER();
}
}
#else /* #if ( configNUMBER_OF_CORES == 1 ) */
{
/* Enter critical section here to check run state of a task. */
taskENTER_CRITICAL();
{
if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
{
if( xSchedulerRunning != pdFALSE )
{
if( pxTCB->xTaskRunState == ( BaseType_t ) portGET_CORE_ID() )
{
/* The current task has just been suspended. */
configASSERT( uxSchedulerSuspended == 0 );
vTaskYieldWithinAPI();
}
else
{
prvYieldCore( pxTCB->xTaskRunState );
}
}
else
{
/* This code path is not possible because only Idle tasks are
* assigned a core before the scheduler is started ( i.e.
* taskTASK_IS_RUNNING is only true for idle tasks before
* the scheduler is started ) and idle tasks cannot be
* suspended. */
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
taskEXIT_CRITICAL();
}
#endif /* #if ( configNUMBER_OF_CORES == 1 ) */
traceRETURN_vTaskSuspend();
@ -3796,7 +3815,7 @@ void vTaskSuspendAll( void )
/* The scheduler is suspended if uxSchedulerSuspended is non-zero. An increment
* is used to allow calls to vTaskSuspendAll() to nest. */
++uxSchedulerSuspended;
uxSchedulerSuspended += ( UBaseType_t ) 1U;
/* Enforces ordering for ports and optimised compilers that may otherwise place
* the above increment elsewhere. */
@ -3811,6 +3830,9 @@ void vTaskSuspendAll( void )
if( xSchedulerRunning != pdFALSE )
{
/* This must never be called from inside a critical section. */
configASSERT( portGET_CRITICAL_NESTING_COUNT() == 0 );
/* Writes to uxSchedulerSuspended must be protected by both the task AND ISR locks.
* We must disable interrupts before we grab the locks in the event that this task is
* interrupted and switches context before incrementing uxSchedulerSuspended.
@ -3829,14 +3851,7 @@ void vTaskSuspendAll( void )
* it. */
if( uxSchedulerSuspended == 0U )
{
if( portGET_CRITICAL_NESTING_COUNT() == 0U )
{
prvCheckForRunStateChange();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
prvCheckForRunStateChange();
}
else
{
@ -3953,7 +3968,7 @@ BaseType_t xTaskResumeAll( void )
* previous call to vTaskSuspendAll(). */
configASSERT( uxSchedulerSuspended != 0U );
--uxSchedulerSuspended;
uxSchedulerSuspended -= ( UBaseType_t ) 1U;
portRELEASE_TASK_LOCK();
if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
@ -4953,7 +4968,7 @@ BaseType_t xTaskIncrementTick( void )
}
else
{
++xPendedTicks;
xPendedTicks += 1U;
/* The tick hook gets called at regular intervals, even if the
* scheduler is locked. */
@ -7665,83 +7680,67 @@ TickType_t uxTaskResetEventItemValue( void )
TickType_t xTicksToWait )
{
uint32_t ulReturn;
BaseType_t xAlreadyYielded;
BaseType_t xAlreadyYielded, xShouldBlock = pdFALSE;
traceENTER_ulTaskGenericNotifyTake( uxIndexToWaitOn, xClearCountOnExit, xTicksToWait );
configASSERT( uxIndexToWaitOn < configTASK_NOTIFICATION_ARRAY_ENTRIES );
taskENTER_CRITICAL();
/* Only block if the notification count is not already non-zero. */
if( pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] == 0UL )
/* We suspend the scheduler here as prvAddCurrentTaskToDelayedList is a
* non-deterministic operation. */
vTaskSuspendAll();
{
/* Mark this task as waiting for a notification. */
pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskWAITING_NOTIFICATION;
if( xTicksToWait > ( TickType_t ) 0 )
/* We MUST enter a critical section to atomically check if a notification
* has occurred and set the flag to indicate that we are waiting for
* a notification. If we do not do so, a notification sent from an ISR
* will get lost. */
taskENTER_CRITICAL();
{
traceTASK_NOTIFY_TAKE_BLOCK( uxIndexToWaitOn );
/* We MUST suspend the scheduler before exiting the critical
* section (i.e. before enabling interrupts).
*
* If we do not do so, a notification sent from an ISR, which
* happens after exiting the critical section and before
* suspending the scheduler, will get lost. The sequence of
* events will be:
* 1. Exit critical section.
* 2. Interrupt - ISR calls xTaskNotifyFromISR which adds the
* task to the Ready list.
* 3. Suspend scheduler.
* 4. prvAddCurrentTaskToDelayedList moves the task to the
* delayed or suspended list.
* 5. Resume scheduler does not touch the task (because it is
* not on the pendingReady list), effectively losing the
* notification from the ISR.
*
* The same does not happen when we suspend the scheduler before
* exiting the critical section. The sequence of events in this
* case will be:
* 1. Suspend scheduler.
* 2. Exit critical section.
* 3. Interrupt - ISR calls xTaskNotifyFromISR which adds the
* task to the pendingReady list as the scheduler is
* suspended.
* 4. prvAddCurrentTaskToDelayedList adds the task to delayed or
* suspended list. Note that this operation does not nullify
* the add to pendingReady list done in the above step because
* a different list item, namely xEventListItem, is used for
* adding the task to the pendingReady list. In other words,
* the task still remains on the pendingReady list.
* 5. Resume scheduler moves the task from pendingReady list to
* the Ready list.
*/
vTaskSuspendAll();
/* Only block if the notification count is not already non-zero. */
if( pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] == 0UL )
{
taskEXIT_CRITICAL();
/* Mark this task as waiting for a notification. */
pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskWAITING_NOTIFICATION;
prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
}
xAlreadyYielded = xTaskResumeAll();
if( xAlreadyYielded == pdFALSE )
{
taskYIELD_WITHIN_API();
if( xTicksToWait > ( TickType_t ) 0 )
{
xShouldBlock = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
taskEXIT_CRITICAL();
/* We are now out of the critical section but the scheduler is still
* suspended, so we are safe to do non-deterministic operations such
* as prvAddCurrentTaskToDelayedList. */
if( xShouldBlock == pdTRUE )
{
traceTASK_NOTIFY_TAKE_BLOCK( uxIndexToWaitOn );
prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
}
else
{
taskEXIT_CRITICAL();
mtCOVERAGE_TEST_MARKER();
}
}
xAlreadyYielded = xTaskResumeAll();
/* Force a reschedule if xTaskResumeAll has not already done so. */
if( ( xShouldBlock == pdTRUE ) && ( xAlreadyYielded == pdFALSE ) )
{
taskYIELD_WITHIN_API();
}
else
{
taskEXIT_CRITICAL();
mtCOVERAGE_TEST_MARKER();
}
taskENTER_CRITICAL();
@ -7785,88 +7784,71 @@ TickType_t uxTaskResetEventItemValue( void )
uint32_t * pulNotificationValue,
TickType_t xTicksToWait )
{
BaseType_t xReturn, xAlreadyYielded;
BaseType_t xReturn, xAlreadyYielded, xShouldBlock = pdFALSE;
traceENTER_xTaskGenericNotifyWait( uxIndexToWaitOn, ulBitsToClearOnEntry, ulBitsToClearOnExit, pulNotificationValue, xTicksToWait );
configASSERT( uxIndexToWaitOn < configTASK_NOTIFICATION_ARRAY_ENTRIES );
taskENTER_CRITICAL();
/* Only block if a notification is not already pending. */
if( pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] != taskNOTIFICATION_RECEIVED )
/* We suspend the scheduler here as prvAddCurrentTaskToDelayedList is a
* non-deterministic operation. */
vTaskSuspendAll();
{
/* Clear bits in the task's notification value as bits may get
* set by the notifying task or interrupt. This can be used to
* clear the value to zero. */
pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] &= ~ulBitsToClearOnEntry;
/* Mark this task as waiting for a notification. */
pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskWAITING_NOTIFICATION;
if( xTicksToWait > ( TickType_t ) 0 )
/* We MUST enter a critical section to atomically check and update the
* task notification value. If we do not do so, a notification from
* an ISR will get lost. */
taskENTER_CRITICAL();
{
traceTASK_NOTIFY_WAIT_BLOCK( uxIndexToWaitOn );
/* We MUST suspend the scheduler before exiting the critical
* section (i.e. before enabling interrupts).
*
* If we do not do so, a notification sent from an ISR, which
* happens after exiting the critical section and before
* suspending the scheduler, will get lost. The sequence of
* events will be:
* 1. Exit critical section.
* 2. Interrupt - ISR calls xTaskNotifyFromISR which adds the
* task to the Ready list.
* 3. Suspend scheduler.
* 4. prvAddCurrentTaskToDelayedList moves the task to the
* delayed or suspended list.
* 5. Resume scheduler does not touch the task (because it is
* not on the pendingReady list), effectively losing the
* notification from the ISR.
*
* The same does not happen when we suspend the scheduler before
* exiting the critical section. The sequence of events in this
* case will be:
* 1. Suspend scheduler.
* 2. Exit critical section.
* 3. Interrupt - ISR calls xTaskNotifyFromISR which adds the
* task to the pendingReady list as the scheduler is
* suspended.
* 4. prvAddCurrentTaskToDelayedList adds the task to delayed or
* suspended list. Note that this operation does not nullify
* the add to pendingReady list done in the above step because
* a different list item, namely xEventListItem, is used for
* adding the task to the pendingReady list. In other words,
* the task still remains on the pendingReady list.
* 5. Resume scheduler moves the task from pendingReady list to
* the Ready list.
*/
vTaskSuspendAll();
/* Only block if a notification is not already pending. */
if( pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] != taskNOTIFICATION_RECEIVED )
{
taskEXIT_CRITICAL();
/* Clear bits in the task's notification value as bits may get
* set by the notifying task or interrupt. This can be used
* to clear the value to zero. */
pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] &= ~ulBitsToClearOnEntry;
prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
}
xAlreadyYielded = xTaskResumeAll();
/* Mark this task as waiting for a notification. */
pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskWAITING_NOTIFICATION;
if( xAlreadyYielded == pdFALSE )
{
taskYIELD_WITHIN_API();
if( xTicksToWait > ( TickType_t ) 0 )
{
xShouldBlock = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
taskEXIT_CRITICAL();
/* We are now out of the critical section but the scheduler is still
* suspended, so we are safe to do non-deterministic operations such
* as prvAddCurrentTaskToDelayedList. */
if( xShouldBlock == pdTRUE )
{
traceTASK_NOTIFY_WAIT_BLOCK( uxIndexToWaitOn );
prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
}
else
{
taskEXIT_CRITICAL();
mtCOVERAGE_TEST_MARKER();
}
}
xAlreadyYielded = xTaskResumeAll();
/* Force a reschedule if xTaskResumeAll has not already done so. */
if( ( xShouldBlock == pdTRUE ) && ( xAlreadyYielded == pdFALSE ) )
{
taskYIELD_WITHIN_API();
}
else
{
taskEXIT_CRITICAL();
mtCOVERAGE_TEST_MARKER();
}
taskENTER_CRITICAL();