Proved that decrementing uxTopReadyPriority does not lead to underflow.

This commit is contained in:
Tobias Reinhard 2022-12-04 13:46:32 -05:00
parent 00bb9d4a17
commit e71756e4cb
3 changed files with 57 additions and 10 deletions

17
tasks.c
View file

@ -946,7 +946,7 @@ static void prvYieldForTask( TCB_t * pxTCB,
#if ( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configNUM_CORES > 1 ) ) #if ( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configNUM_CORES > 1 ) )
BaseType_t xPriorityDropped = pdFALSE; BaseType_t xPriorityDropped = pdFALSE;
#endif #endif
//@ close taskISRLockInv_p(); //@ close _taskISRLockInv_p(gTopReadyPriority);
while( xTaskScheduled == pdFALSE ) while( xTaskScheduled == pdFALSE )
/*@ invariant /*@ invariant
@ -958,7 +958,7 @@ static void prvYieldForTask( TCB_t * pxTCB,
interruptsDisabled_f(state) == true &*& interruptsDisabled_f(state) == true &*&
taskLockInv_p() &*& taskLockInv_p() &*&
isrLockInv_p() &*& isrLockInv_p() &*&
taskISRLockInv_p() _taskISRLockInv_p(gTopReadyPriority)
&*& &*&
// opened predicate `coreLocalInterruptInv_p()` // opened predicate `coreLocalInterruptInv_p()`
[0.5]pointer(&pxCurrentTCBs[coreID_f], gCurrentTCB) &*& [0.5]pointer(&pxCurrentTCBs[coreID_f], gCurrentTCB) &*&
@ -971,8 +971,8 @@ static void prvYieldForTask( TCB_t * pxTCB,
&*& &*&
// additional knowledge // additional knowledge
0 <= uxCurrentPriority &*& uxCurrentPriority <= gTopReadyPriority &*& 0 <= uxCurrentPriority &*& uxCurrentPriority <= gTopReadyPriority &*&
gTopReadyPriority < configMAX_PRIORITIES gTopReadyPriority < configMAX_PRIORITIES;
; // 0 <= uxCurrentPriority &*& uxCurrentPriority < configMAX_PRIORITIES;
@*/ @*/
{ {
#if ( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configNUM_CORES > 1 ) ) #if ( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configNUM_CORES > 1 ) )
@ -987,8 +987,10 @@ static void prvYieldForTask( TCB_t * pxTCB,
} }
#endif #endif
//@ open taskISRLockInv_p(); //@ open _taskISRLockInv_p(gTopReadyPriority);
//@ assert( exists_in_taskISRLockInv_p(?gTasks, ?gStates0) ); //@ assert( exists_in_taskISRLockInv_p(?gTasks, ?gStates0) );
//@ assert( integer_((void*) &uxTopReadyPriority, sizeof(UBaseType_t), false, gTopReadyPriority) );
//@ assert( gTopReadyPriority == uxTopReadyPriority);
//@ open readyLists_p(?gCellLists, ?gOwnerLists); //@ open readyLists_p(?gCellLists, ?gOwnerLists);
//@ List_array_p_index_within_limits(&pxReadyTasksLists, uxCurrentPriority); //@ List_array_p_index_within_limits(&pxReadyTasksLists, uxCurrentPriority);
@ -999,6 +1001,7 @@ static void prvYieldForTask( TCB_t * pxTCB,
//@ assert( mem(gOwners, gOwnerLists) == true ); //@ assert( mem(gOwners, gOwnerLists) == true );
//@ open xLIST(gReadyList, _, _, _, _, _, _); //@ open xLIST(gReadyList, _, _, _, _, _, _);
//@ assert( length(gCells) == gReadyList->uxNumberOfItems + 1 );
if( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxCurrentPriority ] ) ) == pdFALSE ) if( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxCurrentPriority ] ) ) == pdFALSE )
{ {
List_t * const pxReadyList = &( pxReadyTasksLists[ uxCurrentPriority ] ); List_t * const pxReadyList = &( pxReadyTasksLists[ uxCurrentPriority ] );
@ -1058,11 +1061,9 @@ static void prvYieldForTask( TCB_t * pxTCB,
&*& &*&
// Write permission for task scheduled on this core // Write permission for task scheduled on this core
[1/2]sharedSeg_TCB_p(gCurrentTCB, ?gCurrentTCB_state) &*& [1/2]sharedSeg_TCB_p(gCurrentTCB, ?gCurrentTCB_state) &*&
// gCurrentTCB_state != taskTASK_NOT_RUNNING &*&
(gCurrentTCB_state == coreID_f() || gCurrentTCB_state == taskTASK_YIELDING) &*& (gCurrentTCB_state == coreID_f() || gCurrentTCB_state == taskTASK_YIELDING) &*&
nth(index_of(gCurrentTCB, gTasks), gStates) == gCurrentTCB_state nth(index_of(gCurrentTCB, gTasks), gStates) == gCurrentTCB_state
&*& &*&
// TODO:
// Write permissions for unscheduled tasks // Write permissions for unscheduled tasks
foreach(gTasks, readOnly_sharedSeg_TCB_IF_not_running_p(gTasks, gStates)) foreach(gTasks, readOnly_sharedSeg_TCB_IF_not_running_p(gTasks, gStates))
&*& &*&
@ -1281,8 +1282,6 @@ static void prvYieldForTask( TCB_t * pxTCB,
{ {
if( xDecrementTopPriority != pdFALSE ) if( xDecrementTopPriority != pdFALSE )
{ {
// TODO: Remove tmp assumptions
//@ assume( uxTopReadyPriority > 0);
uxTopReadyPriority--; uxTopReadyPriority--;
#if ( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configNUM_CORES > 1 ) ) #if ( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configNUM_CORES > 1 ) )
{ {

View file

@ -11,7 +11,10 @@ predicate readyLists_p(list<list<struct xLIST_ITEM*> > gCellLists,
list<list<void*> > gOwnerLists) = list<list<void*> > gOwnerLists) =
configMAX_PRIORITIES == length(gCellLists) &*& configMAX_PRIORITIES == length(gCellLists) &*&
List_array_p(&pxReadyTasksLists, configMAX_PRIORITIES, List_array_p(&pxReadyTasksLists, configMAX_PRIORITIES,
gCellLists, gOwnerLists); gCellLists, gOwnerLists) &*&
// List of priority 0 always contains the idle task and the end marker
// nothing else
length( nth(0, gCellLists) ) == 2;
predicate List_array_p(List_t* array, int size, predicate List_array_p(List_t* array, int size,

View file

@ -127,6 +127,51 @@ predicate taskISRLockInv_p() =
forall(gOwnerLists, (superset)(gTasks)) == true; forall(gOwnerLists, (superset)(gTasks)) == true;
// Auxiliary predicate. Equal to the lock invariant above but exposes
// some details.
predicate _taskISRLockInv_p(UBaseType_t gTopReadyPriority) =
// Access to global variables
[0.5]pointer(&pxCurrentTCBs[coreID_f], ?gCurrentTCB) &*&
integer_((void*) &uxSchedulerSuspended, sizeof(UBaseType_t), false, _) &*&
integer_(&xSchedulerRunning, sizeof(BaseType_t), true, _)
&*&
// top ready priority must be in range
integer_((void*) &uxTopReadyPriority, sizeof(UBaseType_t), false, gTopReadyPriority) &*&
0 <= gTopReadyPriority &*& gTopReadyPriority < configMAX_PRIORITIES
&*&
// tasks / TCBs
exists_in_taskISRLockInv_p(?gTasks, ?gStates)
&*&
// (RP-All) Read permissions for every task
// and recording of task states in state list
// (∀t ∈ gTasks.
// [1/2]sharedSeg_TCB_p(t, _))
// ∧
// ∀i. ∀t. gTasks[i] == t -> gStates[i] == t->xTaskRunState
foreach(gTasks, readOnly_sharedSeg_TCB_p(gTasks, gStates))
&*&
// (RP-Current) Read permission for task currently scheduled on this core
// (RP-All) + (RP-Current) => Write permission for scheduled task
[1/2]sharedSeg_TCB_p(gCurrentTCB, ?gCurrentTCB_state) &*&
// gCurrentTCB_state != taskTASK_NOT_RUNNING &*&
(gCurrentTCB_state == coreID_f() || gCurrentTCB_state == taskTASK_YIELDING) &*&
nth(index_of(gCurrentTCB, gTasks), gStates) == gCurrentTCB_state
&*&
// (RP-Unsched) Read permissions for unscheduled tasks
// (RP-All) + (RP-Unsched) => Write permissions for unscheduled tasks
// ∀t ∈ tasks. t->xTaskState == taskTASK_NOT_RUNNING
// -> [1/2]shared_TCB_p(t, taskTASK_NOT_RUNNING)
foreach(gTasks, readOnly_sharedSeg_TCB_IF_not_running_p(gTasks, gStates))
&*&
readyLists_p(?gCellLists, ?gOwnerLists)
&*&
// gTasks contains all relevant tasks
mem(gCurrentTCB, gTasks) == true
&*&
// ∀l ∈ gOwnerLists. l ⊆ gTasks
forall(gOwnerLists, (superset)(gTasks)) == true;
lemma void produce_taskISRLockInv(); lemma void produce_taskISRLockInv();
requires locked_p(?heldLocks) &*& requires locked_p(?heldLocks) &*&
heldLocks == cons(?i, cons(?t, nil)) &*& heldLocks == cons(?i, cons(?t, nil)) &*&