diff --git a/include/task.h b/include/task.h index 3c6f9dde5..883f4edd0 100644 --- a/include/task.h +++ b/include/task.h @@ -333,6 +333,7 @@ typedef enum portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( ( pxDataGroup )->xISRSpinlock ) ); \ /* Increment the critical nesting count */ \ portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \ + /* Return the previous interrupt status */ \ uxSavedInterruptStatus; \ } ) #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ @@ -350,6 +351,7 @@ typedef enum do { \ const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \ configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ); \ + /* Decrement the critical nesting count */ \ portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \ if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) \ { \ @@ -382,7 +384,9 @@ typedef enum do { \ const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \ configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ); \ + /* Decrement the critical nesting count */ \ portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \ + /* Release the ISR spinlock */ \ portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) &( pxDataGroup->xISRSpinlock ) ); \ if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) \ { \ diff --git a/queue.c b/queue.c index 1e5e3e591..0aac2317f 100644 --- a/queue.c +++ b/queue.c @@ -339,6 +339,14 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, prvUnlockQueue( ( pxQueue ) ); \ portRELEASE_SPINLOCK( portGET_CORE_ID(), &( pxQueue->xTaskSpinlock ) ); \ vTaskPreemptionEnable( NULL ); \ + if( ( xYieldAPI ) == pdTRUE ) \ + { \ + taskYIELD_WITHIN_API(); \ + } \ + else \ + { \ + mtCOVERAGE_TEST_MARKER(); \ + } \ } while( 0 ) #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ #define queueLOCK( pxQueue ) \ diff --git a/tasks.c b/tasks.c index e1299c945..68620d854 100644 --- a/tasks.c +++ b/tasks.c @@ -5406,11 +5406,13 @@ BaseType_t xTaskIncrementTick( void ) * SMP port. */ configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ); - if( uxSchedulerSuspended != ( UBaseType_t ) 0U - #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - || ( ( taskTASK_IS_RUNNING( pxCurrentTCBs[ xCoreID ] ) ) && ( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable > 0U ) ) - #endif - ) + /* vTaskSwitchContext() must not be called with a task that has + * preemption disabled. */ + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + configASSERT( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == 0U ); + #endif + + if( uxSchedulerSuspended != ( UBaseType_t ) 0U ) { /* The scheduler is currently suspended or the task * has requested to not be preempted - do not allow @@ -7498,11 +7500,11 @@ static void prvResetNextTaskUnblockTime( void ) BaseType_t xYieldCurrentTask; /* Get the xYieldPending stats inside the critical section. */ - #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + #if ( portUSING_GRANULAR_LOCKS == 1 ) xYieldCurrentTask = xTaskUnlockCanYield(); #else xYieldCurrentTask = xYieldPendings[ xCoreID ]; - #endif /* configUSE_TASK_PREEMPTION_DISABLE */ + #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ kernelRELEASE_ISR_LOCK( xCoreID ); kernelRELEASE_TASK_LOCK( xCoreID );