Refactored lock predicates to improve readability.

This commit is contained in:
Tobias Reinhard 2022-11-29 09:37:23 -05:00
parent b310efa029
commit 014acb9a00
6 changed files with 56 additions and 68 deletions

View file

@ -96,22 +96,22 @@
#define taskCHECK_FOR_STACK_OVERFLOW() VF__taskCHECK_FOR_STACK_OVERFLOW()
void VF__taskCHECK_FOR_STACK_OVERFLOW()
/*@ requires prvTCB_p(?gCurrentTCB, ?ulFreeBytesOnStack) &*&
pubTCB_p(gCurrentTCB, ?uxCriticalNesting) &*&
/*@ requires prvSeg_TCB_p(?gCurrentTCB, ?ulFreeBytesOnStack) &*&
coreLocalSeg_TCB_p(gCurrentTCB, ?uxCriticalNesting) &*&
// chunks required by `pxCurrentTCB` aka `xTaskGetCurrentTaskHandle()`
interruptState_p(coreID_f(), ?state) &*&
interruptsDisabled_f(state) == true &*&
pointer(&pxCurrentTCBs[coreID_f], gCurrentTCB);
@*/
/*@ ensures prvTCB_p(gCurrentTCB, ulFreeBytesOnStack) &*&
pubTCB_p(gCurrentTCB, uxCriticalNesting) &*&
/*@ ensures prvSeg_TCB_p(gCurrentTCB, ulFreeBytesOnStack) &*&
coreLocalSeg_TCB_p(gCurrentTCB, uxCriticalNesting) &*&
// chunks required by `pxCurrentTCB` aka `xTaskGetCurrentTaskHandle()`
interruptState_p(coreID_f(), state) &*&
interruptsDisabled_f(state) == true &*&
pointer(&pxCurrentTCBs[coreID_f], gCurrentTCB); \
@*/ \
{ \
/*@ open prvTCB_p(gCurrentTCB, ulFreeBytesOnStack); @*/ \
/*@ open prvSeg_TCB_p(gCurrentTCB, ulFreeBytesOnStack); @*/ \
/*@ assert( stack_p_2(?pxStack, ?ulStackDepth, ?pxTopOfStack, \
?ulFreeBytes, ?ulUsedCells, ?ulUnalignedBytes) ); \
@*/ \
@ -139,7 +139,7 @@
/*@ close stack_p_2(pxStack, ulStackDepth, pxTopOfStack, \
ulFreeBytes, ulUsedCells, ulUnalignedBytes); \
@*/ \
/*@ close prvTCB_p(gCurrentTCB, ulFreeBytesOnStack); @*/ \
/*@ close prvSeg_TCB_p(gCurrentTCB, ulFreeBytesOnStack); @*/ \
TCB_t* tcb1 = pxCurrentTCB; \
TCB_t* tcb2 = pxCurrentTCB; \
vApplicationStackOverflowHook( ( TaskHandle_t ) tcb1, tcb2->pcTaskName ); \
@ -151,7 +151,7 @@
chars_split((char*) pxStack, ulFreeBytesOnStack); \
close stack_p_2(pxStack, ulStackDepth, pxTopOfStack, \
ulFreeBytes, ulUsedCells, ulUnalignedBytes); \
close prvTCB_p(gCurrentTCB, ulFreeBytesOnStack); \
close prvSeg_TCB_p(gCurrentTCB, ulFreeBytesOnStack); \
} \
@*/ \
}

View file

@ -1863,11 +1863,11 @@ configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) PRIVIL
*/
void vApplicationStackOverflowHook( TaskHandle_t xTask,
char * pcTaskName );
/*@ requires prvTCB_p(xTask, ?ulFreeBytesOnStack) &*&
pubTCB_p(xTask, ?uxCriticalNesting);
/*@ requires prvSeg_TCB_p(xTask, ?ulFreeBytesOnStack) &*&
coreLocalSeg_TCB_p(xTask, ?uxCriticalNesting);
@*/
/*@ ensures prvTCB_p(xTask, ulFreeBytesOnStack) &*&
pubTCB_p(xTask, uxCriticalNesting);
/*@ ensures prvSeg_TCB_p(xTask, ulFreeBytesOnStack) &*&
coreLocalSeg_TCB_p(xTask, uxCriticalNesting);
@*/
#endif

70
tasks.c
View file

@ -908,11 +908,11 @@ static void prvYieldForTask( TCB_t * pxTCB,
&*&
// opened predicate `coreLocalInterruptInv_p()`
pointer(&pxCurrentTCBs[coreID_f], ?gCurrentTCB) &*&
pubTCB_p(gCurrentTCB, 0) &*&
integer_(&xYieldPendings[coreID_f], sizeof(BaseType_t), true, _)
integer_(&xYieldPendings[coreID_f], sizeof(BaseType_t), true, _) &*&
coreLocalSeg_TCB_p(gCurrentTCB, 0)
&*&
// read access to current task's stack pointer, etc
prvTCB_p(gCurrentTCB, ?ulFreeBytesOnStack);
prvSeg_TCB_p(gCurrentTCB, ?ulFreeBytesOnStack);
@*/
/*@ ensures 0 <= xCoreID &*& xCoreID < configNUM_CORES &*&
xCoreID == coreID_f() &*&
@ -925,11 +925,11 @@ static void prvYieldForTask( TCB_t * pxTCB,
&*&
// opened predicate `coreLocalInterruptInv_p()`
pointer(&pxCurrentTCBs[coreID_f], gCurrentTCB) &*&
pubTCB_p(gCurrentTCB, 0) &*&
integer_(&xYieldPendings[coreID_f], sizeof(BaseType_t), true, _)
integer_(&xYieldPendings[coreID_f], sizeof(BaseType_t), true, _) &*&
coreLocalSeg_TCB_p(gCurrentTCB, 0)
&*&
// read access to current task's stack pointer, etc
prvTCB_p(gCurrentTCB, ulFreeBytesOnStack);
prvSeg_TCB_p(gCurrentTCB, ulFreeBytesOnStack);
@*/
{
//@ open taskISRLockInv();
@ -961,11 +961,12 @@ static void prvYieldForTask( TCB_t * pxTCB,
&*&
// opened predicate `coreLocalInterruptInv_p()`
pointer(&pxCurrentTCBs[coreID_f], gCurrentTCB) &*&
pubTCB_p(gCurrentTCB, 0) &*&
integer_(&xYieldPendings[coreID_f], sizeof(BaseType_t), true, _)
// pubTCB_p(gCurrentTCB, 0) &*&
integer_(&xYieldPendings[coreID_f], sizeof(BaseType_t), true, _) &*&
coreLocalSeg_TCB_p(gCurrentTCB, 0)
&*&
// read access to current task's stack pointer, etc
prvTCB_p(gCurrentTCB, ulFreeBytesOnStack)
prvSeg_TCB_p(gCurrentTCB, ulFreeBytesOnStack)
&*&
// additional knowledge
0 <= uxCurrentPriority &*& uxCurrentPriority <= gTopReadyPriority &*&
@ -986,19 +987,19 @@ static void prvYieldForTask( TCB_t * pxTCB,
#endif
//@ open taskISRLockInv();
//@ open readyLists_p();
//@ open readyLists_p(?gCellLists);
//@ List_array_p_index_within_limits(&pxReadyTasksLists, uxCurrentPriority);
//@ List_array_get_l(pxReadyTasksLists, uxCurrentPriority);
//@ List_array_split(pxReadyTasksLists, uxCurrentPriority);
//@ List_t* gReadyList = &pxReadyTasksLists[uxCurrentPriority];
/*@ assert( xLIST(gReadyList, ?gNumberOfItems, ?gIndex, ?gListEnd,
?gCells, ?gVals) );
@*/
//@ assert( xLIST(gReadyList, ?gSize, ?gIndex, ?gEnd, ?gCells, ?gVals) );
//@ open xLIST(gReadyList, _, _, _, _, _);
if( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxCurrentPriority ] ) ) == pdFALSE )
{
List_t * const pxReadyList = &( pxReadyTasksLists[ uxCurrentPriority ] );
//@ assert( pxReadyList->pxIndex |-> gIndex );
/*@ assert( DLS(gListEnd, ?gEndPrev, gListEnd, gEndPrev,
/*@ assert( DLS(gEnd, ?gEndPrev, gEnd, gEndPrev,
gCells, gVals, gReadyList) );
@*/
@ -1012,7 +1013,7 @@ static void prvYieldForTask( TCB_t * pxTCB,
//@ assert( mem(pxTaskItem, gCells) == true);
//@ open DLS(gListEnd, gEndPrev, gListEnd, gEndPrev, gCells, gVals, gReadyList);
//@ open DLS(gEnd, gEndPrev, gEnd, gEndPrev, gCells, gVals, gReadyList);
//@ assert( xLIST_ITEM(&pxReadyList->xListEnd, _, _, _, gReadyList) );
//@ open xLIST_ITEM(&pxReadyList->xListEnd, _, _, _, gReadyList);
// opening required to prove validity of `&( pxReadyList->xListEnd )`
@ -1025,10 +1026,8 @@ static void prvYieldForTask( TCB_t * pxTCB,
pxLastTaskItem = pxLastTaskItem->pxPrevious;
//@ close xLIST_ITEM(gOldLastTaskItem, gV, gO, gEndPrev, gReadyList);
}
//@ close DLS(gListEnd, gEndPrev, gListEnd, gEndPrev, gCells, gVals, gReadyList);
/*@ close xLIST(gReadyList, gNumberOfItems, gIndex, gListEnd,
gCells, gVals);
@*/
//@ close DLS(gEnd, gEndPrev, gEnd, gEndPrev, gCells, gVals, gReadyList);
//@ close xLIST(gReadyList, _, gIndex, gEnd, gCells, gVals);
/* The ready task list for uxCurrentPriority is not empty, so uxTopReadyPriority
* must not be decremented any further */
@ -1036,16 +1035,14 @@ static void prvYieldForTask( TCB_t * pxTCB,
do
/*@ invariant
xLIST(gReadyList, gNumberOfItems, gIndex, gListEnd,
gCells, gVals)
&*&
mem(pxTaskItem, gCells) == true;
mem(pxTaskItem, gCells) == true &*&
xLIST(gReadyList, gSize, gIndex, gEnd, gCells, gVals);
@*/
{
TCB_t * pxTCB;
//@ open xLIST(gReadyList, gNumberOfItems, gIndex, gListEnd, gCells, gVals);
//@ assert( DLS(gListEnd, ?gEndPrev2, gListEnd, gEndPrev2, gCells, gVals, gReadyList) );
//@ open xLIST(gReadyList, gSize, gIndex, gEnd, gCells, gVals);
//@ assert( DLS(gEnd, ?gEndPrev2, gEnd, gEndPrev2, gCells, gVals, gReadyList) );
// Building an SSA for important variables helps us to
// refer to the right instances.
@ -1075,10 +1072,6 @@ static void prvYieldForTask( TCB_t * pxTCB,
//@ close xLIST_ITEM(gTaskItem_3, _, _, _, gReadyList);
//@ DLS_close_2(gTaskItem_3, gCells, gVals);
//@ assume(false);
// Make sure that we covered all cases so far
//@ assume(false);
#ifdef IGNORED
/*debug_printf("Attempting to schedule %s on core %d\n", pxTCB->pcTaskName, portGET_CORE_ID() ); */
@ -1138,7 +1131,6 @@ static void prvYieldForTask( TCB_t * pxTCB,
vListInsertEnd( pxReadyList, pxTaskItem );
break;
}
#endif /* IGNORED */
} while( pxTaskItem != pxLastTaskItem );
}
else
@ -4297,11 +4289,11 @@ void vTaskSwitchContext( BaseType_t xCoreID )
&*&
// opened predicate `coreLocalInterruptInv_p()`
pointer(&pxCurrentTCBs[coreID_f], ?gCurrentTCB) &*&
pubTCB_p(gCurrentTCB, 0) &*&
integer_(&xYieldPendings[coreID_f], sizeof(BaseType_t), true, _)
integer_(&xYieldPendings[coreID_f], sizeof(BaseType_t), true, _) &*&
coreLocalSeg_TCB_p(gCurrentTCB, 0)
&*&
// read access to current task's stack pointer, etc
prvTCB_p(gCurrentTCB, ?ulFreeBytesOnStack);
prvSeg_TCB_p(gCurrentTCB, ?ulFreeBytesOnStack);
@*/
/*@ ensures // all locks are released and interrupts remain disabled
@ -4312,11 +4304,11 @@ void vTaskSwitchContext( BaseType_t xCoreID )
&*&
// opened predicate `coreLocalInterruptInv_p()`
pointer(&pxCurrentTCBs[coreID_f], ?gNewCurrentTCB) &*&
pubTCB_p(gNewCurrentTCB, 0) &*&
integer_(&xYieldPendings[coreID_f], sizeof(BaseType_t), true, _)
integer_(&xYieldPendings[coreID_f], sizeof(BaseType_t), true, _) &*&
coreLocalSeg_TCB_p(gCurrentTCB, 0)
&*&
// read access to current task's stack pointer, etc
prvTCB_p(gCurrentTCB, ulFreeBytesOnStack);
prvSeg_TCB_p(gCurrentTCB, ulFreeBytesOnStack);
// Remark: the part of the post condition relating to TCBs will have to change.
@*/
{
@ -4346,10 +4338,10 @@ void vTaskSwitchContext( BaseType_t xCoreID )
// TODO: Inspect reason.
TaskHandle_t currentHandle = pxCurrentTCB;
//@ assert( currentHandle == gCurrentTCB );
//@ open pubTCB_p(gCurrentTCB, 0);
//@ open coreLocalSeg_TCB_p(gCurrentTCB, 0);
UBaseType_t nesting = currentHandle->uxCriticalNesting;
configASSERT( nesting == 0 );
//@ close pubTCB_p(gCurrentTCB, 0);
//@ close coreLocalSeg_TCB_p(gCurrentTCB, 0);
}
#else
configASSERT( pxCurrentTCB->uxCriticalNesting == 0 );

View file

@ -77,8 +77,8 @@ ensures
/*@
// TODO: We know that the list of priority 0 is never empty.
// It contains the idle task and nothing else.
predicate readyLists_p() =
List_array_p(&pxReadyTasksLists, configMAX_PRIORITIES, ?gCellLists);
predicate readyLists_p(list<list<struct xLIST_ITEM*> > gCellLists) =
List_array_p(&pxReadyTasksLists, configMAX_PRIORITIES, gCellLists);
@*/

View file

@ -66,7 +66,7 @@ predicate TCB_p(TCB_t * tcb, uint32_t ulFreeBytesOnStack) =
tcb->uxPriority |-> _ &*&
tcb->xTaskRunState |-> _ &*&
tcb->xTaskRunState |-> ?gTaskRunState &*&
tcb->xIsIdle |-> _ &*&
// Assumes macro `configMAX_TASK_NAME_LEN` evaluates to 16.
@ -102,13 +102,17 @@ predicate absTCB_p(TCB_t* tcb) =
//
// The predicates below will be expanded iteratively.
predicate prvTCB_p(TCB_t* tcb, uint32_t ulFreeBytesOnStack) =
predicate prvSeg_TCB_p(TCB_t* tcb, uint32_t ulFreeBytesOnStack) =
tcb->pxStack |-> ?stackPtr &*&
tcb->pxTopOfStack |-> ?topPtr &*&
stack_p_2(stackPtr, ?ulStackDepth, topPtr,
ulFreeBytesOnStack, ?ulUsedCells, ?ulUnalignedBytes);
predicate pubTCB_p(TCB_t* tcb, UBaseType_t uxCriticalNesting) =
predicate sharedSeg_TCB_p(TCB_t* tcb) =
tcb->xTaskRunState |-> ?gTaskRunState &*&
true;
predicate coreLocalSeg_TCB_p(TCB_t* tcb, UBaseType_t uxCriticalNesting) =
tcb->uxCriticalNesting |-> uxCriticalNesting;
@*/

View file

@ -29,8 +29,9 @@ fixpoint bool interruptsDisabled_f(uint32_t);
predicate coreLocalInterruptInv_p() =
pointer(&pxCurrentTCBs[coreID_f], ?currentTCB) &*&
pubTCB_p(currentTCB, 0) &*&
integer_(&xYieldPendings[coreID_f], sizeof(BaseType_t), true, _);
//pubTCB_p(currentTCB, 0) &*&
integer_(&xYieldPendings[coreID_f], sizeof(BaseType_t), true, _) &*&
coreLocalSeg_TCB_p(currentTCB, ?gCriticalNesting);
predicate coreLocalLocked(uint32_t coreID);
@ -94,20 +95,11 @@ predicate taskISRLockInv() =
integer_((void*) &uxTopReadyPriority, sizeof(UBaseType_t), false, ?gTopReadyPriority) &*&
0 <= gTopReadyPriority &*& gTopReadyPriority < configMAX_PRIORITIES
&*&
readyLists_p() &*&
// Update: The current task on this core is interrupt protected.
// TODO: Exclude from `allTasks`.
// `allTasks` stores pointers to all currently valid tasks (i.e. TCB_t instances)
//foreach(?tasks, absTCB_p) &*&
// If a task is scheduled, it must be valid
//[0.5]pointer(&pxCurrentTCBs[coreID_f()], ?scheduledTask) &*&
//scheduledTask != NULL
// ? mem(scheduledTask, tasks) == true
// : true
//&*&
readyLists_p(?gCellLists) &*&
true;
lemma void produce_taskISRLockInv();
requires locked(?heldLocks) &*&
heldLocks == cons(?i, cons(?t, nil)) &*&