mirror of
https://github.com/FreeRTOS/FreeRTOS-Kernel.git
synced 2025-12-12 06:35:19 -05:00
Added documentation of the locking discipline and renamed some predicates.
This commit is contained in:
parent
3057a186c2
commit
4033b09210
5 changed files with 58 additions and 18 deletions
|
|
@ -95,14 +95,14 @@
|
|||
|
||||
void VF__taskCHECK_FOR_STACK_OVERFLOW()
|
||||
/*@ requires TCB_stack_p(?gCurrentTCB, ?ulFreeBytesOnStack) &*&
|
||||
coreLocalSeg_TCB_p(gCurrentTCB, ?uxCriticalNesting) &*&
|
||||
TCB_criticalNesting_p(gCurrentTCB, ?uxCriticalNesting) &*&
|
||||
// chunks required by `pxCurrentTCB` aka `xTaskGetCurrentTaskHandle()`
|
||||
interruptState_p(coreID_f(), ?state) &*&
|
||||
interruptsDisabled_f(state) == true &*&
|
||||
pointer(&pxCurrentTCBs[coreID_f], gCurrentTCB);
|
||||
@*/
|
||||
/*@ ensures TCB_stack_p(gCurrentTCB, ulFreeBytesOnStack) &*&
|
||||
coreLocalSeg_TCB_p(gCurrentTCB, uxCriticalNesting) &*&
|
||||
TCB_criticalNesting_p(gCurrentTCB, uxCriticalNesting) &*&
|
||||
// chunks required by `pxCurrentTCB` aka `xTaskGetCurrentTaskHandle()`
|
||||
interruptState_p(coreID_f(), state) &*&
|
||||
interruptsDisabled_f(state) == true &*&
|
||||
|
|
|
|||
|
|
@ -1864,10 +1864,10 @@ configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) PRIVIL
|
|||
void vApplicationStackOverflowHook( TaskHandle_t xTask,
|
||||
char * pcTaskName );
|
||||
/*@ requires TCB_stack_p(xTask, ?ulFreeBytesOnStack) &*&
|
||||
coreLocalSeg_TCB_p(xTask, ?uxCriticalNesting);
|
||||
TCB_criticalNesting_p(xTask, ?uxCriticalNesting);
|
||||
@*/
|
||||
/*@ ensures TCB_stack_p(xTask, ulFreeBytesOnStack) &*&
|
||||
coreLocalSeg_TCB_p(xTask, uxCriticalNesting);
|
||||
TCB_criticalNesting_p(xTask, uxCriticalNesting);
|
||||
@*/
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -7,6 +7,50 @@
|
|||
#include "verifast_lists_extended.h"
|
||||
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
* Locking discipline explained:
|
||||
* FreeRTOS uses the following synchronization mechanisms:
|
||||
* - Deactivating interrupts:
|
||||
* Some data is only meant to be accessed on a specific core C. Such data
|
||||
* may only be accessed after interrupts on core C have been deactivated.
|
||||
* For instance the global array `pxCurrentTCBs` in `tasks.c` has an entry for
|
||||
* every core. `pxCurrentTCBs[C]` stores a pointer to the TCB of the task
|
||||
* running on core C. Core C is always allowed to read `pxCurrentTCBs[C]`.
|
||||
* However, writing requires the interrupts on core C to be deactivated.
|
||||
*
|
||||
* The resources protected by disabling interrupts are represented by the
|
||||
* predicate `coreLocalInterruptInv_p` defined below.
|
||||
*
|
||||
* - task lock:
|
||||
* The task lock is used to protect ciritical sections and resources from
|
||||
* being accessed by multiple tasks simultaneously. The resources protected
|
||||
* by the task lock are represented by the abstract predicate `taskLockInv_p`
|
||||
* defined below. This proof does not deal with resources or code segments
|
||||
* only protected by the task lock. Hence, we leave the predicate abstract.
|
||||
*
|
||||
* - ISR lock:
|
||||
* The ISR/ interrupt lock is used to protect critical sections and resources
|
||||
* from being accessed by multiple interrupts simultaneously. The resources
|
||||
* protected by the ISR lock are represented by the abstract predicate
|
||||
* `isrLockInv_p` defined below. This proof does not deal with resources or
|
||||
* code segments only protected by the ISR lock. Hence, we leave the predicate
|
||||
* abstract.
|
||||
*
|
||||
* - task lock + ISR lock:
|
||||
* Access to certain resources and ciritical sections are protected by both
|
||||
* the task lock and the ISR lock. For these, it is crucial that we first
|
||||
* acquire the task lock and then the ISR lock. Likewise, we must release them
|
||||
* in opposite order. Failure to comply with this order may lead to deadlocks.
|
||||
* The resources protected by both locks are the main resources this proof
|
||||
* deals with. These include the ready lists and the certain access rights
|
||||
* to the tasks' run states. The access rights protected by both locks are
|
||||
* represented by the predicate `taskISRLockInv_p` defined below.
|
||||
* Once both locks have been acquired in the right order, this lock invariant
|
||||
* can be produced by calling the lemma `produce_taskISRLockInv`. Before the
|
||||
* locks can be released, the invariant must be consumed by calling
|
||||
* `consume_taskISRLockInv`. Both lemmas are defined below.
|
||||
*/
|
||||
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
* Core local data and access restrictions.
|
||||
|
|
@ -22,9 +66,8 @@ fixpoint bool interruptsDisabled_f(uint32_t);
|
|||
|
||||
predicate coreLocalInterruptInv_p() =
|
||||
[0.5]pointer(&pxCurrentTCBs[coreID_f], ?currentTCB) &*&
|
||||
//pubTCB_p(currentTCB, 0) &*&
|
||||
integer_(&xYieldPendings[coreID_f], sizeof(BaseType_t), true, _) &*&
|
||||
coreLocalSeg_TCB_p(currentTCB, ?gCriticalNesting);
|
||||
TCB_criticalNesting_p(currentTCB, ?gCriticalNesting);
|
||||
@*/
|
||||
|
||||
|
||||
|
|
@ -39,7 +82,7 @@ predicate locked_p(list< pair<real, int> > lockHistory);
|
|||
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
* Task lock and associated global variables from `tasks.c`
|
||||
* Task lock
|
||||
*/
|
||||
|
||||
/*@
|
||||
|
|
@ -54,7 +97,7 @@ predicate taskLockInv_p();
|
|||
@*/
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
* ISR lock and associated global variables from `tasks.c`
|
||||
* ISR lock
|
||||
*/
|
||||
|
||||
/*@
|
||||
|
|
|
|||
|
|
@ -63,7 +63,7 @@ predicate TCB_runState_p(TCB_t* tcb, TaskRunning_t state;) =
|
|||
// This predicate represents write access to the nesting level of a TCB.
|
||||
// Entering a critical section increases the nesting level. Leaving it,
|
||||
// decreases it.
|
||||
predicate coreLocalSeg_TCB_p(TCB_t* tcb, UBaseType_t uxCriticalNesting) =
|
||||
predicate TCB_criticalNesting_p(TCB_t* tcb, UBaseType_t uxCriticalNesting) =
|
||||
tcb->uxCriticalNesting |-> uxCriticalNesting;
|
||||
@*/
|
||||
|
||||
|
|
|
|||
|
|
@ -905,7 +905,7 @@ static void prvYieldForTask( TCB_t * pxTCB,
|
|||
// opened predicate `coreLocalInterruptInv_p()`
|
||||
[1/2]pointer(&pxCurrentTCBs[coreID_f], ?gCurrentTCB0) &*&
|
||||
integer_(&xYieldPendings[coreID_f], sizeof(BaseType_t), true, _)
|
||||
// coreLocalSeg_TCB_p(gCurrentTCB0, 0)
|
||||
// TCB_criticalNesting_p(gCurrentTCB0, 0)
|
||||
&*&
|
||||
// read access to current task's stack pointer, etc
|
||||
// TCB_stack_p(gCurrentTCB0, ?ulFreeBytesOnStack);
|
||||
|
|
@ -923,7 +923,7 @@ static void prvYieldForTask( TCB_t * pxTCB,
|
|||
// opened predicate `coreLocalInterruptInv_p()`
|
||||
[1/2]pointer(&pxCurrentTCBs[coreID_f], ?gCurrentTCB) &*&
|
||||
integer_(&xYieldPendings[coreID_f], sizeof(BaseType_t), true, _)
|
||||
// coreLocalSeg_TCB_p(gCurrentTCB, 0)
|
||||
// TCB_criticalNesting_p(gCurrentTCB, 0)
|
||||
&*&
|
||||
// read access to current task's stack pointer, etc
|
||||
// TCB_stack_p(gCurrentTCB, ulFreeBytesOnStack);
|
||||
|
|
@ -4384,7 +4384,7 @@ void vTaskSwitchContext( BaseType_t xCoreID )
|
|||
// opened predicate `coreLocalInterruptInv_p()`
|
||||
pointer(&pxCurrentTCBs[coreID_f], ?gCurrentTCB) &*&
|
||||
integer_(&xYieldPendings[coreID_f], sizeof(BaseType_t), true, _) &*&
|
||||
coreLocalSeg_TCB_p(gCurrentTCB, 0)
|
||||
TCB_criticalNesting_p(gCurrentTCB, 0)
|
||||
&*&
|
||||
// read access to current task's stack pointer, etc
|
||||
TCB_stack_p(gCurrentTCB, ?ulFreeBytesOnStack);
|
||||
|
|
@ -4399,7 +4399,7 @@ void vTaskSwitchContext( BaseType_t xCoreID )
|
|||
// opened predicate `coreLocalInterruptInv_p()`
|
||||
pointer(&pxCurrentTCBs[coreID_f], ?gNewCurrentTCB) &*&
|
||||
integer_(&xYieldPendings[coreID_f], sizeof(BaseType_t), true, _) &*&
|
||||
coreLocalSeg_TCB_p(gCurrentTCB, 0)
|
||||
TCB_criticalNesting_p(gCurrentTCB, 0)
|
||||
&*&
|
||||
// read access to current task's stack pointer, etc
|
||||
TCB_stack_p(gCurrentTCB, ulFreeBytesOnStack);
|
||||
|
|
@ -4432,10 +4432,10 @@ void vTaskSwitchContext( BaseType_t xCoreID )
|
|||
// TODO: Inspect reason.
|
||||
TaskHandle_t currentHandle = pxCurrentTCB;
|
||||
//@ assert( currentHandle == gCurrentTCB );
|
||||
//@ open coreLocalSeg_TCB_p(gCurrentTCB, 0);
|
||||
//@ open TCB_criticalNesting_p(gCurrentTCB, 0);
|
||||
UBaseType_t nesting = currentHandle->uxCriticalNesting;
|
||||
configASSERT( nesting == 0 );
|
||||
//@ close coreLocalSeg_TCB_p(gCurrentTCB, 0);
|
||||
//@ close TCB_criticalNesting_p(gCurrentTCB, 0);
|
||||
}
|
||||
#else
|
||||
configASSERT( pxCurrentTCB->uxCriticalNesting == 0 );
|
||||
|
|
@ -5925,11 +5925,8 @@ void vTaskYieldWithinAPI( void )
|
|||
#if ( portCRITICAL_NESTING_IN_TCB == 1 )
|
||||
|
||||
void vTaskEnterCritical( void )
|
||||
///@ requires interruptState_p(?coreID, _) &*& unprotectedGlobalVars();
|
||||
///@ ensures false;
|
||||
{
|
||||
portDISABLE_INTERRUPTS();
|
||||
//@ open unprotectedGlobalVars();
|
||||
|
||||
if( xSchedulerRunning != pdFALSE )
|
||||
{
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue