Assert if prvCheckForRunStateChange is called in ISR (#779)

* Assert if prvCheckForRunStateChange is called in ISR
This commit is contained in:
chinglee-iot 2023-09-06 17:47:41 +08:00 committed by GitHub
parent 288d143357
commit 53229b1537
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23

88
tasks.c
View file

@ -693,62 +693,58 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION;
UBaseType_t uxPrevCriticalNesting; UBaseType_t uxPrevCriticalNesting;
const TCB_t * pxThisTCB; const TCB_t * pxThisTCB;
/* This should be skipped if called from an ISR. If the task on the current /* This must only be called from within a task. */
* core is no longer running, then vTaskSwitchContext() probably should portASSERT_IF_IN_ISR();
* be run before returning, but we don't have a way to force that to happen
* from here. */ /* This function is always called with interrupts disabled
if( portCHECK_IF_IN_ISR() == pdFALSE ) * so this is safe. */
pxThisTCB = pxCurrentTCBs[ portGET_CORE_ID() ];
while( pxThisTCB->xTaskRunState == taskTASK_YIELDING )
{ {
/* This function is always called with interrupts disabled /* We are only here if we just entered a critical section
* so this is safe. */ * or if we just suspended the scheduler, and another task
pxThisTCB = pxCurrentTCBs[ portGET_CORE_ID() ]; * has requested that we yield.
*
* This is slightly complicated since we need to save and restore
* the suspension and critical nesting counts, as well as release
* and reacquire the correct locks. And then, do it all over again
* if our state changed again during the reacquisition. */
uxPrevCriticalNesting = portGET_CRITICAL_NESTING_COUNT();
while( pxThisTCB->xTaskRunState == taskTASK_YIELDING ) if( uxPrevCriticalNesting > 0U )
{ {
/* We are only here if we just entered a critical section portSET_CRITICAL_NESTING_COUNT( 0U );
* or if we just suspended the scheduler, and another task portRELEASE_ISR_LOCK();
* has requested that we yield. }
* else
* This is slightly complicated since we need to save and restore {
* the suspension and critical nesting counts, as well as release /* The scheduler is suspended. uxSchedulerSuspended is updated
* and reacquire the correct locks. And then, do it all over again * only when the task is not requested to yield. */
* if our state changed again during the reacquisition. */ mtCOVERAGE_TEST_MARKER();
uxPrevCriticalNesting = portGET_CRITICAL_NESTING_COUNT(); }
if( uxPrevCriticalNesting > 0U ) portRELEASE_TASK_LOCK();
{
portSET_CRITICAL_NESTING_COUNT( 0U );
portRELEASE_ISR_LOCK();
}
else
{
/* The scheduler is suspended. uxSchedulerSuspended is updated
* only when the task is not requested to yield. */
mtCOVERAGE_TEST_MARKER();
}
portRELEASE_TASK_LOCK(); portMEMORY_BARRIER();
configASSERT( pxThisTCB->xTaskRunState == taskTASK_YIELDING );
portMEMORY_BARRIER(); portENABLE_INTERRUPTS();
configASSERT( pxThisTCB->xTaskRunState == taskTASK_YIELDING );
portENABLE_INTERRUPTS(); /* Enabling interrupts should cause this core to immediately
* service the pending interrupt and yield. If the run state is still
* yielding here then that is a problem. */
configASSERT( pxThisTCB->xTaskRunState != taskTASK_YIELDING );
/* Enabling interrupts should cause this core to immediately portDISABLE_INTERRUPTS();
* service the pending interrupt and yield. If the run state is still portGET_TASK_LOCK();
* yielding here then that is a problem. */ portGET_ISR_LOCK();
configASSERT( pxThisTCB->xTaskRunState != taskTASK_YIELDING );
portDISABLE_INTERRUPTS(); portSET_CRITICAL_NESTING_COUNT( uxPrevCriticalNesting );
portGET_TASK_LOCK();
portGET_ISR_LOCK();
portSET_CRITICAL_NESTING_COUNT( uxPrevCriticalNesting ); if( uxPrevCriticalNesting == 0U )
{
if( uxPrevCriticalNesting == 0U ) portRELEASE_ISR_LOCK();
{
portRELEASE_ISR_LOCK();
}
} }
} }
} }