Add uncrustify github workflow (#659)

* Add uncrustify github workflow

* Fix exclusion pattern

* fix find expression

* exclude uncrustify files

* Uncrustify common demo and test files

* exlude white space checking files

* Fix EOL whitespace checker

* Remove whitespaces from EOL

* Fix space at EOL

* Fix find spaces at EOL

Co-authored-by: Archit Aggarwal <architag@amazon.com>
This commit is contained in:
alfred gedeon 2021-07-22 14:23:48 -07:00 committed by GitHub
parent dd80d615b5
commit ae92d8c6ee
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
191 changed files with 17540 additions and 17102 deletions

View file

@ -30,14 +30,14 @@
#define VERIFAST
#include <stdlib.h>
#include <stdint.h>
//@#include "common.gh"
/*@#include "common.gh" */
typedef size_t TickType_t;
typedef size_t UBaseType_t;
typedef ssize_t BaseType_t;
typedef size_t TickType_t;
typedef size_t UBaseType_t;
typedef ssize_t BaseType_t;
#define pdTRUE 1
#define pdFALSE 0
#define pdTRUE 1
#define pdFALSE 0
/* Empty/no-op macros */
#define mtCOVERAGE_TEST_MARKER()
@ -50,323 +50,327 @@ typedef ssize_t BaseType_t;
#define listSET_SECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxListItem )
/* Max value stored in sentinel xListEnd element */
#define portMAX_DELAY UINT_MAX
#define portMAX_DELAY UINT_MAX
struct xLIST;
struct xLIST_ITEM {
TickType_t xItemValue;
struct xLIST_ITEM * pxNext;
struct xLIST_ITEM * pxPrevious;
void * pvOwner;
struct xLIST *pxContainer;
struct xLIST_ITEM
{
TickType_t xItemValue;
struct xLIST_ITEM * pxNext;
struct xLIST_ITEM * pxPrevious;
void * pvOwner;
struct xLIST * pxContainer;
};
typedef struct xLIST_ITEM ListItem_t;
typedef struct xLIST {
UBaseType_t uxNumberOfItems;
struct xLIST_ITEM *pxIndex;
#ifdef VERIFAST /*< ***change MiniList_t to ListItem_t*** */
struct xLIST_ITEM xListEnd;
#else
MiniListItem_t xListEnd;
#endif
typedef struct xLIST
{
UBaseType_t uxNumberOfItems;
struct xLIST_ITEM * pxIndex;
#ifdef VERIFAST /*< ***change MiniList_t to ListItem_t*** */
struct xLIST_ITEM xListEnd;
#else
MiniListItem_t xListEnd;
#endif
} List_t;
/*@
predicate xLIST_ITEM(
struct xLIST_ITEM *n,
TickType_t xItemValue,
struct xLIST_ITEM *pxNext,
struct xLIST_ITEM *pxPrevious,
struct xLIST *pxContainer;) =
n->xItemValue |-> xItemValue &*&
n->pxNext |-> pxNext &*&
n->pxPrevious |-> pxPrevious &*&
n->pvOwner |-> _ &*&
n->pxContainer |-> pxContainer;
@*/
* predicate xLIST_ITEM(
* struct xLIST_ITEM *n,
* TickType_t xItemValue,
* struct xLIST_ITEM *pxNext,
* struct xLIST_ITEM *pxPrevious,
* struct xLIST *pxContainer;) =
* n->xItemValue |-> xItemValue &*&
* n->pxNext |-> pxNext &*&
* n->pxPrevious |-> pxPrevious &*&
* n->pvOwner |-> _ &*&
* n->pxContainer |-> pxContainer;
* @*/
/* Ferreira et al. (STTT'14) doubly-linked list segment (DLS). */
/*@
predicate DLS(
struct xLIST_ITEM *n,
struct xLIST_ITEM *nprev,
struct xLIST_ITEM *mnext,
struct xLIST_ITEM *m,
list<struct xLIST_ITEM * > cells,
list<TickType_t > vals,
struct xLIST *pxContainer) =
n == m
? cells == cons(n, nil) &*&
vals == cons(?v, nil) &*&
xLIST_ITEM(n, v, mnext, nprev, pxContainer)
: cells == cons(n, ?cells0) &*&
vals == cons(?v, ?vals0) &*&
xLIST_ITEM(n, v, ?o, nprev, pxContainer) &*& DLS(o, n, mnext, m, cells0, vals0, pxContainer);
lemma void dls_star_item(
struct xLIST_ITEM *n,
struct xLIST_ITEM *m,
struct xLIST_ITEM *o)
requires DLS(n, ?nprev, ?mnext, m, ?cells, ?vals, ?l) &*& xLIST_ITEM(o, ?v, ?onext, ?oprev, ?l2);
ensures DLS(n, nprev, mnext, m, cells, vals, l) &*& xLIST_ITEM(o, v, onext, oprev, l2) &*& mem(o, cells) == false;
{
open DLS(n, nprev, mnext, m, cells, vals, l);
if (n == m) {
assert xLIST_ITEM(n, _, _, _, _);
open xLIST_ITEM(n, _, _, _, _);
open xLIST_ITEM(o, _, _, _, _);
assert n != o;
close xLIST_ITEM(o, _, _, _, _);
close xLIST_ITEM(n, _, _, _, _);
close DLS(n, nprev, mnext, m, cells, vals, l);
}
else {
assert DLS(?nnext, n, mnext, m, tail(cells), tail(vals), l);
dls_star_item(nnext, m, o);
open xLIST_ITEM(n, _, _, _, _);
open xLIST_ITEM(o, _, _, _, _);
assert n != o;
close xLIST_ITEM(o, _, _, _, _);
close xLIST_ITEM(n, _, _, _, _);
close DLS(n, nprev, mnext, m, cells, vals, l);
}
}
lemma void dls_distinct(
struct xLIST_ITEM *n,
struct xLIST_ITEM *nprev,
struct xLIST_ITEM *mnext,
struct xLIST_ITEM *m,
list<struct xLIST_ITEM * > cells)
requires DLS(n, nprev, mnext, m, cells, ?vals, ?l);
ensures DLS(n, nprev, mnext, m, cells, vals, l) &*& distinct(cells) == true;
{
if (n == m) {
open DLS(n, nprev, mnext, m, cells, vals, l);
close DLS(n, nprev, mnext, m, cells, vals, l);
} else {
open DLS(n, nprev, mnext, m, cells, vals, l);
assert DLS(?nnext, n, mnext, m, tail(cells), tail(vals), l);
dls_distinct(nnext, n, mnext, m, tail(cells));
dls_star_item(nnext, m, n);
close DLS(n, nprev, mnext, m, cells, vals, l);
}
}
predicate xLIST(
struct xLIST *l,
int uxNumberOfItems,
struct xLIST_ITEM *pxIndex,
struct xLIST_ITEM *xListEnd,
list<struct xLIST_ITEM *>cells,
list<TickType_t >vals) =
l->uxNumberOfItems |-> uxNumberOfItems &*&
l->pxIndex |-> pxIndex &*&
mem(pxIndex, cells) == true &*&
xListEnd == &(l->xListEnd) &*&
xListEnd == head(cells) &*&
portMAX_DELAY == head(vals) &*&
struct_xLIST_ITEM_padding(&l->xListEnd) &*&
length(cells) == length(vals) &*&
uxNumberOfItems + 1 == length(cells) &*&
DLS(xListEnd, ?endprev, xListEnd, endprev, cells, vals, l);
lemma void xLIST_distinct_cells(struct xLIST *l)
requires xLIST(l, ?n, ?idx, ?end, ?cells, ?vals);
ensures xLIST(l, n, idx, end, cells, vals) &*& distinct(cells) == true;
{
open xLIST(l, n, idx, end, cells, vals);
assert DLS(end, ?endprev, end, _, cells, vals, l);
dls_distinct(end, endprev, end, endprev, cells);
close xLIST(l, n, idx, end, cells, vals);
}
lemma void xLIST_star_item(struct xLIST *l, struct xLIST_ITEM *x)
requires xLIST(l, ?n, ?idx, ?end, ?cells, ?vals) &*& xLIST_ITEM(x, ?v, ?xnext, ?xprev, ?l2);
ensures xLIST(l, n, idx, end, cells, vals) &*& xLIST_ITEM(x, v, xnext, xprev, l2) &*& mem(x, cells) == false;
{
open xLIST(l, n, idx, end, cells, vals);
assert DLS(end, ?endprev, end, _, cells, vals, l);
dls_distinct(end, endprev, end, endprev, cells);
dls_star_item(end, endprev, x);
close xLIST(l, n, idx, end, cells, vals);
}
lemma void dls_first_mem(
struct xLIST_ITEM *n,
struct xLIST_ITEM *nprev,
struct xLIST_ITEM *mnext,
struct xLIST_ITEM *m,
list<struct xLIST_ITEM * > cells)
requires DLS(n, nprev, mnext, m, cells, ?vals, ?l);
ensures DLS(n, nprev, mnext, m, cells, vals, l) &*& mem(n, cells) == true &*& index_of(n, cells) == 0;
{
open DLS(n, nprev, mnext, m, cells, vals, l);
if (n == m) {
assert cells == cons(n, nil);
close DLS(n, nprev, mnext, m, cells, vals, l);
} else {
assert cells == cons(n, ?tail);
close DLS(n, nprev, mnext, m, cells, vals, l);
}
}
lemma void dls_not_empty(
struct xLIST_ITEM *n,
struct xLIST_ITEM *m,
list<struct xLIST_ITEM * > cells,
struct xLIST_ITEM *x)
requires DLS(n, m, n, m, cells, ?vals, ?l) &*& mem(x, cells) == true &*& x != n;
ensures DLS(n, m, n, m, cells, vals, l) &*& n != m;
{
open DLS(n, m, n, m, cells, vals, l);
close DLS(n, m, n, m, cells, vals, l);
}
lemma void dls_last_mem(
struct xLIST_ITEM *n,
struct xLIST_ITEM *nprev,
struct xLIST_ITEM *mnext,
struct xLIST_ITEM *m,
list<struct xLIST_ITEM * > cells)
requires DLS(n, nprev, mnext, m, cells, ?vals, ?l);
ensures DLS(n, nprev, mnext, m, cells, vals, l) &*& mem(m, cells) == true &*& index_of(m, cells) == length(cells) - 1;
{
open DLS(n, nprev, mnext, m, cells, vals, l);
if (n == m) {
// trivial
} else {
open xLIST_ITEM(n, _, ?nnext, _, l);
assert DLS(?o, n, mnext, m, tail(cells), tail(vals), l);
dls_last_mem(o, n, mnext, m, tail(cells));
close xLIST_ITEM(n, _, nnext, _, l);
}
close DLS(n, nprev, mnext, m, cells, vals, l);
}
lemma void split(
struct xLIST_ITEM *n,
struct xLIST_ITEM *nprev,
struct xLIST_ITEM *mnext,
struct xLIST_ITEM *m,
list<struct xLIST_ITEM * > cells,
list<TickType_t > vals,
struct xLIST_ITEM *x,
int i)
requires DLS(n, nprev, mnext, m, cells, vals, ?l) &*& x != n &*& mem(x, cells) == true &*& index_of(x,cells) == i;
ensures DLS(n, nprev, x, ?xprev, take(i, cells), take(i, vals), l) &*& DLS(x, xprev, mnext, m, drop(i, cells), drop(i, vals), l) &*& xprev == nth(i-1, cells);
{
open DLS(n, nprev, mnext, m, cells, vals, l);
assert n != m;
assert xLIST_ITEM(n, ?v, ?nnext, _, _);
assert DLS(nnext, n, mnext, m, tail(cells), tail(vals), l);
if (nnext == x) {
close DLS(n, nprev, x, n, singleton(n), singleton(v), l);
open DLS(x, n, mnext, m, tail(cells), tail(vals), l);
open xLIST_ITEM(x, _, ?xnext, ?xprev, l);
close xLIST_ITEM(x, _, xnext, xprev, l);
close DLS(x, n, mnext, m, tail(cells), tail(vals), l);
} else {
assert nnext != x;
split(nnext, n, mnext, m, tail(cells), tail(vals), x, i - 1);
assert DLS(nnext, n, x, ?xprev, take(i-1, tail(cells)), take(i-1, tail(vals)), l);
dls_distinct(nnext, n, x, xprev, take(i-1, tail(cells)));
dls_star_item(nnext, xprev, n);
dls_last_mem(nnext, n, x, xprev, take(i-1, tail(cells)));
assert n != xprev;
close DLS(n, nprev, x, xprev, take(i, cells), take(i, vals), l);
}
}
lemma void join(
struct xLIST_ITEM *n1,
struct xLIST_ITEM *nprev1,
struct xLIST_ITEM *mnext1,
struct xLIST_ITEM *m1,
list<struct xLIST_ITEM * > cells1,
list<TickType_t > vals1,
struct xLIST_ITEM *n2,
struct xLIST_ITEM *nprev2,
struct xLIST_ITEM *mnext2,
struct xLIST_ITEM *m2,
list<struct xLIST_ITEM * > cells2,
list<TickType_t > vals2)
requires
DLS(n1, nprev1, mnext1, m1, cells1, vals1, ?l) &*&
DLS(n2, nprev2, mnext2, m2, cells2, vals2, l) &*&
mnext1 == n2 &*& m1 == nprev2;
ensures DLS(n1, nprev1, mnext2, m2, append(cells1, cells2), append(vals1, vals2), l);
{
if (n1 == m1) {
dls_first_mem(n1, nprev1, mnext1, m1, cells1);
dls_last_mem(n2, nprev2, mnext2, m2, cells2);
open DLS(n1, nprev1, mnext1, m1, cells1, vals1, l);
dls_star_item(n2, m2, n1);
close DLS(n1, nprev1, mnext2, m2, append(singleton(n1), cells2), append(vals1, vals2), l);
} else {
open DLS(n1, nprev1, mnext1, m1, cells1, vals1, l);
assert DLS(?o, n1, mnext1, m1, ?cells1_tail, ?vals1_tail, l);
join(o, n1, mnext1, m1, cells1_tail, vals1_tail,
n2, nprev2, mnext2, m2, cells2, vals2);
assert DLS(o, n1, mnext2, m2, append(cells1_tail, cells2), append(vals1_tail, vals2), l);
dls_last_mem(o, n1, mnext2, m2, append(cells1_tail, cells2));
dls_star_item(o, m2, n1);
close DLS(n1, nprev1, mnext2, m2, append(cells1, cells2), append(vals1, vals2), l);
}
}
lemma void idx_remains_in_list<t>(
list<t> cells,
t idx,
t x,
int ix)
requires
idx != x &*&
mem(idx, cells) == true &*&
mem(x, cells) == true &*&
index_of(x, cells) == ix;
ensures mem(idx, remove_nth(ix, cells)) == true;
{
neq_mem_remove(idx, x, cells);
remove_remove_nth(cells, x);
}
@*/
* predicate DLS(
* struct xLIST_ITEM *n,
* struct xLIST_ITEM *nprev,
* struct xLIST_ITEM *mnext,
* struct xLIST_ITEM *m,
* list<struct xLIST_ITEM * > cells,
* list<TickType_t > vals,
* struct xLIST *pxContainer) =
* n == m
* ? cells == cons(n, nil) &*&
* vals == cons(?v, nil) &*&
* xLIST_ITEM(n, v, mnext, nprev, pxContainer)
* : cells == cons(n, ?cells0) &*&
* vals == cons(?v, ?vals0) &*&
* xLIST_ITEM(n, v, ?o, nprev, pxContainer) &*& DLS(o, n, mnext, m, cells0, vals0, pxContainer);
*
* lemma void dls_star_item(
* struct xLIST_ITEM *n,
* struct xLIST_ITEM *m,
* struct xLIST_ITEM *o)
* requires DLS(n, ?nprev, ?mnext, m, ?cells, ?vals, ?l) &*& xLIST_ITEM(o, ?v, ?onext, ?oprev, ?l2);
* ensures DLS(n, nprev, mnext, m, cells, vals, l) &*& xLIST_ITEM(o, v, onext, oprev, l2) &*& mem(o, cells) == false;
* {
* open DLS(n, nprev, mnext, m, cells, vals, l);
* if (n == m) {
* assert xLIST_ITEM(n, _, _, _, _);
* open xLIST_ITEM(n, _, _, _, _);
* open xLIST_ITEM(o, _, _, _, _);
* assert n != o;
* close xLIST_ITEM(o, _, _, _, _);
* close xLIST_ITEM(n, _, _, _, _);
* close DLS(n, nprev, mnext, m, cells, vals, l);
* }
* else {
* assert DLS(?nnext, n, mnext, m, tail(cells), tail(vals), l);
* dls_star_item(nnext, m, o);
* open xLIST_ITEM(n, _, _, _, _);
* open xLIST_ITEM(o, _, _, _, _);
* assert n != o;
* close xLIST_ITEM(o, _, _, _, _);
* close xLIST_ITEM(n, _, _, _, _);
* close DLS(n, nprev, mnext, m, cells, vals, l);
* }
* }
*
* lemma void dls_distinct(
* struct xLIST_ITEM *n,
* struct xLIST_ITEM *nprev,
* struct xLIST_ITEM *mnext,
* struct xLIST_ITEM *m,
* list<struct xLIST_ITEM * > cells)
* requires DLS(n, nprev, mnext, m, cells, ?vals, ?l);
* ensures DLS(n, nprev, mnext, m, cells, vals, l) &*& distinct(cells) == true;
* {
* if (n == m) {
* open DLS(n, nprev, mnext, m, cells, vals, l);
* close DLS(n, nprev, mnext, m, cells, vals, l);
* } else {
* open DLS(n, nprev, mnext, m, cells, vals, l);
* assert DLS(?nnext, n, mnext, m, tail(cells), tail(vals), l);
* dls_distinct(nnext, n, mnext, m, tail(cells));
* dls_star_item(nnext, m, n);
* close DLS(n, nprev, mnext, m, cells, vals, l);
* }
* }
*
* predicate xLIST(
* struct xLIST *l,
* int uxNumberOfItems,
* struct xLIST_ITEM *pxIndex,
* struct xLIST_ITEM *xListEnd,
* list<struct xLIST_ITEM *>cells,
* list<TickType_t >vals) =
* l->uxNumberOfItems |-> uxNumberOfItems &*&
* l->pxIndex |-> pxIndex &*&
* mem(pxIndex, cells) == true &*&
* xListEnd == &(l->xListEnd) &*&
* xListEnd == head(cells) &*&
* portMAX_DELAY == head(vals) &*&
* struct_xLIST_ITEM_padding(&l->xListEnd) &*&
* length(cells) == length(vals) &*&
* uxNumberOfItems + 1 == length(cells) &*&
* DLS(xListEnd, ?endprev, xListEnd, endprev, cells, vals, l);
*
* lemma void xLIST_distinct_cells(struct xLIST *l)
* requires xLIST(l, ?n, ?idx, ?end, ?cells, ?vals);
* ensures xLIST(l, n, idx, end, cells, vals) &*& distinct(cells) == true;
* {
* open xLIST(l, n, idx, end, cells, vals);
* assert DLS(end, ?endprev, end, _, cells, vals, l);
* dls_distinct(end, endprev, end, endprev, cells);
* close xLIST(l, n, idx, end, cells, vals);
* }
*
* lemma void xLIST_star_item(struct xLIST *l, struct xLIST_ITEM *x)
* requires xLIST(l, ?n, ?idx, ?end, ?cells, ?vals) &*& xLIST_ITEM(x, ?v, ?xnext, ?xprev, ?l2);
* ensures xLIST(l, n, idx, end, cells, vals) &*& xLIST_ITEM(x, v, xnext, xprev, l2) &*& mem(x, cells) == false;
* {
* open xLIST(l, n, idx, end, cells, vals);
* assert DLS(end, ?endprev, end, _, cells, vals, l);
* dls_distinct(end, endprev, end, endprev, cells);
* dls_star_item(end, endprev, x);
* close xLIST(l, n, idx, end, cells, vals);
* }
*
* lemma void dls_first_mem(
* struct xLIST_ITEM *n,
* struct xLIST_ITEM *nprev,
* struct xLIST_ITEM *mnext,
* struct xLIST_ITEM *m,
* list<struct xLIST_ITEM * > cells)
* requires DLS(n, nprev, mnext, m, cells, ?vals, ?l);
* ensures DLS(n, nprev, mnext, m, cells, vals, l) &*& mem(n, cells) == true &*& index_of(n, cells) == 0;
* {
* open DLS(n, nprev, mnext, m, cells, vals, l);
* if (n == m) {
* assert cells == cons(n, nil);
* close DLS(n, nprev, mnext, m, cells, vals, l);
* } else {
* assert cells == cons(n, ?tail);
* close DLS(n, nprev, mnext, m, cells, vals, l);
* }
* }
*
* lemma void dls_not_empty(
* struct xLIST_ITEM *n,
* struct xLIST_ITEM *m,
* list<struct xLIST_ITEM * > cells,
* struct xLIST_ITEM *x)
* requires DLS(n, m, n, m, cells, ?vals, ?l) &*& mem(x, cells) == true &*& x != n;
* ensures DLS(n, m, n, m, cells, vals, l) &*& n != m;
* {
* open DLS(n, m, n, m, cells, vals, l);
* close DLS(n, m, n, m, cells, vals, l);
* }
*
* lemma void dls_last_mem(
* struct xLIST_ITEM *n,
* struct xLIST_ITEM *nprev,
* struct xLIST_ITEM *mnext,
* struct xLIST_ITEM *m,
* list<struct xLIST_ITEM * > cells)
* requires DLS(n, nprev, mnext, m, cells, ?vals, ?l);
* ensures DLS(n, nprev, mnext, m, cells, vals, l) &*& mem(m, cells) == true &*& index_of(m, cells) == length(cells) - 1;
* {
* open DLS(n, nprev, mnext, m, cells, vals, l);
* if (n == m) {
* // trivial
* } else {
* open xLIST_ITEM(n, _, ?nnext, _, l);
* assert DLS(?o, n, mnext, m, tail(cells), tail(vals), l);
* dls_last_mem(o, n, mnext, m, tail(cells));
* close xLIST_ITEM(n, _, nnext, _, l);
* }
* close DLS(n, nprev, mnext, m, cells, vals, l);
* }
*
* lemma void split(
* struct xLIST_ITEM *n,
* struct xLIST_ITEM *nprev,
* struct xLIST_ITEM *mnext,
* struct xLIST_ITEM *m,
* list<struct xLIST_ITEM * > cells,
* list<TickType_t > vals,
* struct xLIST_ITEM *x,
* int i)
* requires DLS(n, nprev, mnext, m, cells, vals, ?l) &*& x != n &*& mem(x, cells) == true &*& index_of(x,cells) == i;
* ensures DLS(n, nprev, x, ?xprev, take(i, cells), take(i, vals), l) &*& DLS(x, xprev, mnext, m, drop(i, cells), drop(i, vals), l) &*& xprev == nth(i-1, cells);
* {
* open DLS(n, nprev, mnext, m, cells, vals, l);
* assert n != m;
* assert xLIST_ITEM(n, ?v, ?nnext, _, _);
* assert DLS(nnext, n, mnext, m, tail(cells), tail(vals), l);
* if (nnext == x) {
* close DLS(n, nprev, x, n, singleton(n), singleton(v), l);
* open DLS(x, n, mnext, m, tail(cells), tail(vals), l);
* open xLIST_ITEM(x, _, ?xnext, ?xprev, l);
* close xLIST_ITEM(x, _, xnext, xprev, l);
* close DLS(x, n, mnext, m, tail(cells), tail(vals), l);
* } else {
* assert nnext != x;
* split(nnext, n, mnext, m, tail(cells), tail(vals), x, i - 1);
* assert DLS(nnext, n, x, ?xprev, take(i-1, tail(cells)), take(i-1, tail(vals)), l);
* dls_distinct(nnext, n, x, xprev, take(i-1, tail(cells)));
* dls_star_item(nnext, xprev, n);
* dls_last_mem(nnext, n, x, xprev, take(i-1, tail(cells)));
* assert n != xprev;
* close DLS(n, nprev, x, xprev, take(i, cells), take(i, vals), l);
* }
* }
*
* lemma void join(
* struct xLIST_ITEM *n1,
* struct xLIST_ITEM *nprev1,
* struct xLIST_ITEM *mnext1,
* struct xLIST_ITEM *m1,
* list<struct xLIST_ITEM * > cells1,
* list<TickType_t > vals1,
* struct xLIST_ITEM *n2,
* struct xLIST_ITEM *nprev2,
* struct xLIST_ITEM *mnext2,
* struct xLIST_ITEM *m2,
* list<struct xLIST_ITEM * > cells2,
* list<TickType_t > vals2)
* requires
* DLS(n1, nprev1, mnext1, m1, cells1, vals1, ?l) &*&
* DLS(n2, nprev2, mnext2, m2, cells2, vals2, l) &*&
* mnext1 == n2 &*& m1 == nprev2;
* ensures DLS(n1, nprev1, mnext2, m2, append(cells1, cells2), append(vals1, vals2), l);
* {
* if (n1 == m1) {
* dls_first_mem(n1, nprev1, mnext1, m1, cells1);
* dls_last_mem(n2, nprev2, mnext2, m2, cells2);
* open DLS(n1, nprev1, mnext1, m1, cells1, vals1, l);
* dls_star_item(n2, m2, n1);
* close DLS(n1, nprev1, mnext2, m2, append(singleton(n1), cells2), append(vals1, vals2), l);
* } else {
* open DLS(n1, nprev1, mnext1, m1, cells1, vals1, l);
* assert DLS(?o, n1, mnext1, m1, ?cells1_tail, ?vals1_tail, l);
* join(o, n1, mnext1, m1, cells1_tail, vals1_tail,
* n2, nprev2, mnext2, m2, cells2, vals2);
* assert DLS(o, n1, mnext2, m2, append(cells1_tail, cells2), append(vals1_tail, vals2), l);
* dls_last_mem(o, n1, mnext2, m2, append(cells1_tail, cells2));
* dls_star_item(o, m2, n1);
* close DLS(n1, nprev1, mnext2, m2, append(cells1, cells2), append(vals1, vals2), l);
* }
* }
*
* lemma void idx_remains_in_list<t>(
* list<t> cells,
* t idx,
* t x,
* int ix)
* requires
* idx != x &*&
* mem(idx, cells) == true &*&
* mem(x, cells) == true &*&
* index_of(x, cells) == ix;
* ensures mem(idx, remove_nth(ix, cells)) == true;
* {
* neq_mem_remove(idx, x, cells);
* remove_remove_nth(cells, x);
* }
* @*/
/* Following lemma from `verifast/examples/shared_boxes/concurrentqueue.c`.
Used in the uxListRemove proof to show that the item to remove `x` must
have value `nth(i, vals)` where `i == index_of(x, cells)`. */
/*@
lemma void drop_nth_index_of<t>(list<t> vs, int i)
requires
0 <= i && i < length(vs);
ensures
head(drop(i , vs)) == nth(i, vs);
{
switch(vs) {
case nil:
case cons(h, t):
if (i == 0) {
// trivial
} else {
drop_nth_index_of(t, i - 1);
}
}
}
@*/
* Used in the uxListRemove proof to show that the item to remove `x` must
* have value `nth(i, vals)` where `i == index_of(x, cells)`. */
/*@
lemma void remove_append<t>(t x, list<t> l1, list<t> l2)
requires mem(x, l1) == false;
ensures remove(x, append(l1, l2)) == append(l1, remove(x, l2));
{
switch(l1) {
case nil:
case cons(h1, t1):
remove_append(x, t1, l2);
}
}
@*/
* lemma void drop_nth_index_of<t>(list<t> vs, int i)
* requires
* 0 <= i && i < length(vs);
* ensures
* head(drop(i , vs)) == nth(i, vs);
* {
* switch(vs) {
* case nil:
* case cons(h, t):
* if (i == 0) {
* // trivial
* } else {
* drop_nth_index_of(t, i - 1);
* }
* }
* }
* @*/
/*@
* lemma void remove_append<t>(t x, list<t> l1, list<t> l2)
* requires mem(x, l1) == false;
* ensures remove(x, append(l1, l2)) == append(l1, remove(x, l2));
* {
* switch(l1) {
* case nil:
* case cons(h1, t1):
* remove_append(x, t1, l2);
* }
* }
* @*/
#endif /* LIST_H */

View file

@ -34,51 +34,51 @@
#include <threading.h>
/*@#include "common.gh"@*/
typedef size_t TickType_t;
typedef size_t UBaseType_t;
typedef ssize_t BaseType_t;
typedef size_t TickType_t;
typedef size_t UBaseType_t;
typedef ssize_t BaseType_t;
/* Empty/no-op macros */
/* Tracing */
#define traceBLOCKING_ON_QUEUE_PEEK(x)
#define traceBLOCKING_ON_QUEUE_RECEIVE(x)
#define traceBLOCKING_ON_QUEUE_SEND(x)
#define traceQUEUE_CREATE(x)
#define traceQUEUE_CREATE_FAILED(x)
#define traceQUEUE_DELETE(x)
#define traceQUEUE_PEEK(x)
#define traceQUEUE_PEEK_FAILED(x)
#define traceQUEUE_PEEK_FROM_ISR(x)
#define traceQUEUE_PEEK_FROM_ISR_FAILED(x)
#define traceQUEUE_RECEIVE(x)
#define traceQUEUE_RECEIVE_FAILED(x)
#define traceQUEUE_RECEIVE_FROM_ISR(x)
#define traceQUEUE_RECEIVE_FROM_ISR_FAILED(x)
#define traceQUEUE_SEND(x)
#define traceQUEUE_SEND_FAILED(x)
#define traceQUEUE_SEND_FROM_ISR(x)
#define traceQUEUE_SEND_FROM_ISR_FAILED(x)
#define traceBLOCKING_ON_QUEUE_PEEK( x )
#define traceBLOCKING_ON_QUEUE_RECEIVE( x )
#define traceBLOCKING_ON_QUEUE_SEND( x )
#define traceQUEUE_CREATE( x )
#define traceQUEUE_CREATE_FAILED( x )
#define traceQUEUE_DELETE( x )
#define traceQUEUE_PEEK( x )
#define traceQUEUE_PEEK_FAILED( x )
#define traceQUEUE_PEEK_FROM_ISR( x )
#define traceQUEUE_PEEK_FROM_ISR_FAILED( x )
#define traceQUEUE_RECEIVE( x )
#define traceQUEUE_RECEIVE_FAILED( x )
#define traceQUEUE_RECEIVE_FROM_ISR( x )
#define traceQUEUE_RECEIVE_FROM_ISR_FAILED( x )
#define traceQUEUE_SEND( x )
#define traceQUEUE_SEND_FAILED( x )
#define traceQUEUE_SEND_FROM_ISR( x )
#define traceQUEUE_SEND_FROM_ISR_FAILED( x )
/* Coverage */
#define mtCOVERAGE_TEST_MARKER()
/* Asserts */
#define configASSERT(x)
#define configASSERT( x )
#define portASSERT_IF_INTERRUPT_PRIORITY_INVALID()
/* Map portable memory management functions */
#define pvPortMalloc malloc
#define vPortFree free
#define pvPortMalloc malloc
#define vPortFree free
#define queueSEND_TO_BACK ( ( BaseType_t ) 0 )
#define queueSEND_TO_FRONT ( ( BaseType_t ) 1 )
#define queueOVERWRITE ( ( BaseType_t ) 2 )
#define queueSEND_TO_BACK ( ( BaseType_t ) 0 )
#define queueSEND_TO_FRONT ( ( BaseType_t ) 1 )
#define queueOVERWRITE ( ( BaseType_t ) 2 )
#define pdTRUE 1
#define pdFALSE 0
#define pdTRUE 1
#define pdFALSE 0
#define pdPASS pdTRUE
#define pdFAIL pdFALSE
#define errQUEUE_FULL 0
#define errQUEUE_EMPTY 0
#define pdPASS pdTRUE
#define pdFAIL pdFALSE
#define errQUEUE_FULL 0
#define errQUEUE_EMPTY 0
/* Constants used with the cRxLock and cTxLock structure members. */
#define queueUNLOCKED ( ( int8_t ) -1 )
@ -93,26 +93,28 @@ typedef struct QueuePointers
typedef struct SemaphoreData
{
#ifdef VERIFAST /*< do not model xMutexHolder */
void *xMutexHolder;
#else
TaskHandle_t xMutexHolder; /*< The handle of the task that holds the mutex. */
#endif
#ifdef VERIFAST /*< do not model xMutexHolder */
void * xMutexHolder;
#else
TaskHandle_t xMutexHolder; /*< The handle of the task that holds the mutex. */
#endif
UBaseType_t uxRecursiveCallCount; /*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
} SemaphoreData_t;
/* VeriFast does not support unions so we replace with a struct */
struct fake_union_t {
struct fake_union_t
{
QueuePointers_t xQueue;
SemaphoreData_t xSemaphore;
};
typedef struct xLIST {
typedef struct xLIST
{
UBaseType_t uxNumberOfItems;
#ifndef VERIFAST /*< do not model pxIndex and xListEnd of xLIST struct */
struct xLIST_ITEM *pxIndex;
MiniListItem_t xListEnd;
#endif
#ifndef VERIFAST /*< do not model pxIndex and xListEnd of xLIST struct */
struct xLIST_ITEM * pxIndex;
MiniListItem_t xListEnd;
#endif
} List_t;
typedef struct QueueDefinition /* The old naming convention is used to prevent breaking kernel aware debuggers. */
@ -120,15 +122,15 @@ typedef struct QueueDefinition /* The old naming convention is used to prevent b
int8_t * pcHead; /*< Points to the beginning of the queue storage area. */
int8_t * pcWriteTo; /*< Points to the free next place in the storage area. */
#ifdef VERIFAST /*< VeriFast does not model unions */
struct fake_union_t u;
#else
union
{
QueuePointers_t xQueue; /*< Data required exclusively when this structure is used as a queue. */
SemaphoreData_t xSemaphore; /*< Data required exclusively when this structure is used as a semaphore. */
} u;
#endif
#ifdef VERIFAST /*< VeriFast does not model unions */
struct fake_union_t u;
#else
union
{
QueuePointers_t xQueue; /*< Data required exclusively when this structure is used as a queue. */
SemaphoreData_t xSemaphore; /*< Data required exclusively when this structure is used as a semaphore. */
} u;
#endif
List_t xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */
List_t xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */
@ -158,271 +160,278 @@ typedef struct QueueDefinition /* The old naming convention is used to prevent b
/*@struct mutex *locked;@*/ /*< Ghost mutex simulates the effect of queue locking */
} xQUEUE;
typedef xQUEUE Queue_t;
typedef xQUEUE Queue_t;
typedef struct QueueDefinition * QueueHandle_t;
typedef struct QueueDefinition * QueueHandle_t;
/*@
#define QUEUE_SHAPE(q, Storage, N, M, K) \
malloc_block_QueueDefinition(q) &*& \
q->pcHead |-> Storage &*& \
q->pcWriteTo |-> ?WPtr &*& \
q->u.xQueue.pcTail |-> ?End &*& \
q->u.xQueue.pcReadFrom |-> ?RPtr &*& \
q->uxItemSize |-> M &*& \
q->uxLength |-> N &*& \
q->uxMessagesWaiting |-> K &*& \
q->cRxLock |-> ?rxLock &*& \
q->cTxLock |-> ?txLock &*& \
struct_QueuePointers_padding(&q->u.xQueue) &*& \
struct_SemaphoreData_padding(&q->u.xSemaphore) &*& \
struct_fake_union_t_padding(&q->u) &*& \
struct_xLIST_padding(&q->xTasksWaitingToSend) &*& \
struct_xLIST_padding(&q->xTasksWaitingToReceive) &*& \
q->u.xSemaphore.xMutexHolder |-> _ &*& \
q->u.xSemaphore.uxRecursiveCallCount |-> _ &*& \
true
predicate queue(QueueHandle_t q, int8_t *Storage, size_t N, size_t M, size_t W, size_t R, size_t K, bool is_locked; list<list<char> >abs) =
QUEUE_SHAPE(q, Storage, N, M, K) &*&
0 < N &*&
0 < M &*&
0 <= W &*& W < N &*&
0 <= R &*& R < N &*&
0 <= K &*& K <= N &*&
W == (R + 1 + K) % N &*&
(-1) <= rxLock &*&
(-1) <= txLock &*&
(is_locked ? 0 <= rxLock : (-1) == rxLock) &*&
(is_locked ? 0 <= txLock : (-1) == txLock) &*&
WPtr == Storage + (W*M) &*&
RPtr == Storage + (R*M) &*&
End == Storage + (N*M) &*&
buffer(Storage, N, M, ?contents) &*&
length(contents) == N &*&
abs == take(K, rotate_left((R+1)%N, contents)) &*&
malloc_block(Storage, N*M) &*&
true
;
@*/
#define QUEUE_SHAPE(q, Storage, N, M, K) \
* malloc_block_QueueDefinition(q) &*& \
* q->pcHead |-> Storage &*& \
* q->pcWriteTo |-> ?WPtr &*& \
* q->u.xQueue.pcTail |-> ?End &*& \
* q->u.xQueue.pcReadFrom |-> ?RPtr &*& \
* q->uxItemSize |-> M &*& \
* q->uxLength |-> N &*& \
* q->uxMessagesWaiting |-> K &*& \
* q->cRxLock |-> ?rxLock &*& \
* q->cTxLock |-> ?txLock &*& \
* struct_QueuePointers_padding(&q->u.xQueue) &*& \
* struct_SemaphoreData_padding(&q->u.xSemaphore) &*& \
* struct_fake_union_t_padding(&q->u) &*& \
* struct_xLIST_padding(&q->xTasksWaitingToSend) &*& \
* struct_xLIST_padding(&q->xTasksWaitingToReceive) &*& \
* q->u.xSemaphore.xMutexHolder |-> _ &*& \
* q->u.xSemaphore.uxRecursiveCallCount |-> _ &*& \
* true
*
* predicate queue(QueueHandle_t q, int8_t *Storage, size_t N, size_t M, size_t W, size_t R, size_t K, bool is_locked; list<list<char> >abs) =
* QUEUE_SHAPE(q, Storage, N, M, K) &*&
* 0 < N &*&
* 0 < M &*&
* 0 <= W &*& W < N &*&
* 0 <= R &*& R < N &*&
* 0 <= K &*& K <= N &*&
* W == (R + 1 + K) % N &*&
* (-1) <= rxLock &*&
* (-1) <= txLock &*&
* (is_locked ? 0 <= rxLock : (-1) == rxLock) &*&
* (is_locked ? 0 <= txLock : (-1) == txLock) &*&
* WPtr == Storage + (W*M) &*&
* RPtr == Storage + (R*M) &*&
* End == Storage + (N*M) &*&
* buffer(Storage, N, M, ?contents) &*&
* length(contents) == N &*&
* abs == take(K, rotate_left((R+1)%N, contents)) &*&
* malloc_block(Storage, N*M) &*&
* true
* ;
* @*/
/* A buffer allows us to interpret a flat character array of `N*M` bytes as a
list of `N` elements where each element is `M` bytes */
/*@
predicate buffer(char *buffer, size_t N, size_t M; list<list<char> > elements) =
N == 0
? elements == nil
: chars(buffer, M, ?x) &*& buffer(buffer + M, N - 1, M, ?xs) &*& elements == cons(x, xs);
* list of `N` elements where each element is `M` bytes */
lemma void buffer_length(char *buffer, size_t N, size_t M)
requires buffer(buffer, N, M, ?elements);
ensures buffer(buffer, N, M, elements) &*& length(elements) == N;
{
if (N == 0) {
open buffer(buffer, N, M, elements);
close buffer(buffer, N, M, elements);
} else {
open buffer(buffer, N, M, elements);
buffer_length(buffer+M, N-1, M);
}
}
@*/
/*@
* predicate buffer(char *buffer, size_t N, size_t M; list<list<char> > elements) =
* N == 0
* ? elements == nil
* : chars(buffer, M, ?x) &*& buffer(buffer + M, N - 1, M, ?xs) &*& elements == cons(x, xs);
*
* lemma void buffer_length(char *buffer, size_t N, size_t M)
* requires buffer(buffer, N, M, ?elements);
* ensures buffer(buffer, N, M, elements) &*& length(elements) == N;
* {
* if (N == 0) {
* open buffer(buffer, N, M, elements);
* close buffer(buffer, N, M, elements);
* } else {
* open buffer(buffer, N, M, elements);
* buffer_length(buffer+M, N-1, M);
* }
* }
* @*/
/*
There is no need in the queue proofs to preserve a relationship between `cs`
and `elements` (i.e., `flatten(elements) == cs`) because we only move in one
direction from `cs` to `elements` during queue creation when the contents is
fresh from `malloc` (i.e., uninitialized). If we needed to do a roundtrip from
elements back to cs then this would require a stronger lemma.
*/
* There is no need in the queue proofs to preserve a relationship between `cs`
* and `elements` (i.e., `flatten(elements) == cs`) because we only move in one
* direction from `cs` to `elements` during queue creation when the contents is
* fresh from `malloc` (i.e., uninitialized). If we needed to do a roundtrip from
* elements back to cs then this would require a stronger lemma.
*/
/*@
lemma void buffer_from_chars(char *buffer, size_t N, size_t M)
requires chars(buffer, N*M, ?cs) &*& 0 <= N &*& 0 < M;
ensures exists<list<list<char> > >(?elements) &*& buffer(buffer, N, M, elements) &*& length(elements) == N;
{
if (N == 0) {
close exists(nil);
} else {
int i = 0;
while (i < N)
invariant 0 <= i &*& i <= N &*&
chars(buffer, (N-i)*M, ?xs) &*& xs == take((N-i)*M, cs) &*&
buffer(buffer + (N-i)*M, i, M, ?ys);
decreases N-i;
{
mul_mono_l(0, N-i-1, M);
chars_split(buffer, (N-i-1)*M);
mul_mono_l(i, N, M);
mul_mono_l(N-i, N, M);
take_take((N-i-1)*M, (N-i)*M, cs);
i++;
}
close exists(ys);
buffer_length(buffer, N, M);
}
}
lemma void append_buffer(char *buffer, size_t N1, size_t N2, size_t M)
requires
buffer(buffer, N1, M, ?elements1) &*&
buffer(buffer + N1 * M, N2, M, ?elements2) &*&
0 <= N1 &*& 0 <= N2;
ensures buffer(buffer, N1+N2, M, append(elements1, elements2));
{
if (N1 == 0) {
open buffer(buffer, 0, M, _);
} else if (N2 == 0) {
open buffer(buffer + N1 * M, 0, M, _);
} else {
open buffer(buffer, N1, M, elements1);
append_buffer(buffer + M, N1-1, N2, M);
close buffer(buffer, N1+N2, M, cons(?x, append(xs, elements2)));
}
}
lemma void split_element<t>(char *buffer, size_t N, size_t M, size_t i)
requires buffer(buffer, N, M, ?elements) &*& 0 <= i &*& i < N;
ensures
buffer(buffer, i, M, take(i, elements)) &*&
chars(buffer + i * M, M, nth(i, elements)) &*&
buffer(buffer + (i + 1) * M, (N-1-i), M, drop(i+1, elements));
{
if (i == 0) {
// straightforward
} else {
buffer_length(buffer, N, M);
int j = 0;
while (j < i)
invariant 0 <= j &*& j <= i &*&
buffer(buffer, j, M, take(j, elements)) &*&
buffer(buffer + j * M, N-j, M, drop(j, elements));
decreases i-j;
{
drop_drop(1, j, elements);
nth_drop2(elements, j);
open buffer(buffer + j * M, N-j, M, drop(j, elements));
assert chars(buffer + j * M, M, ?x) &*& x == nth(j, elements);
close buffer(buffer + j * M, 1, M, singleton(x));
append_buffer(buffer, j, 1, M);
take_plus_one(j, elements);
j++;
}
drop_drop(1, j, elements);
nth_drop2(elements, i);
}
}
lemma void join_element(char *buffer, size_t N, size_t M, size_t i)
requires
0 <= i &*& i < N &*&
buffer(buffer, i, M, ?prefix) &*&
chars(buffer + i * M, M, ?element) &*&
buffer(buffer + (i + 1) * M, (N-1-i), M, ?suffix);
ensures buffer(buffer, N, M, append(prefix, cons(element, suffix)));
{
if (i == 0) {
open buffer(buffer, i, M, prefix);
assert prefix == nil;
close buffer(buffer, N, M, cons(element, suffix));
} else {
close buffer(buffer + i * M, N-i, M, cons(element, suffix));
append_buffer(buffer, i, N-i, M);
}
}
predicate list(List_t *l;) =
l->uxNumberOfItems |-> _;
predicate queuelists(QueueHandle_t q;) =
list(&q->xTasksWaitingToSend) &*&
list(&q->xTasksWaitingToReceive);
@*/
* lemma void buffer_from_chars(char *buffer, size_t N, size_t M)
* requires chars(buffer, N*M, ?cs) &*& 0 <= N &*& 0 < M;
* ensures exists<list<list<char> > >(?elements) &*& buffer(buffer, N, M, elements) &*& length(elements) == N;
* {
* if (N == 0) {
* close exists(nil);
* } else {
* int i = 0;
* while (i < N)
* invariant 0 <= i &*& i <= N &*&
* chars(buffer, (N-i)*M, ?xs) &*& xs == take((N-i)*M, cs) &*&
* buffer(buffer + (N-i)*M, i, M, ?ys);
* decreases N-i;
* {
* mul_mono_l(0, N-i-1, M);
* chars_split(buffer, (N-i-1)*M);
* mul_mono_l(i, N, M);
* mul_mono_l(N-i, N, M);
* take_take((N-i-1)*M, (N-i)*M, cs);
* i++;
* }
* close exists(ys);
* buffer_length(buffer, N, M);
* }
* }
*
* lemma void append_buffer(char *buffer, size_t N1, size_t N2, size_t M)
* requires
* buffer(buffer, N1, M, ?elements1) &*&
* buffer(buffer + N1 * M, N2, M, ?elements2) &*&
* 0 <= N1 &*& 0 <= N2;
* ensures buffer(buffer, N1+N2, M, append(elements1, elements2));
* {
* if (N1 == 0) {
* open buffer(buffer, 0, M, _);
* } else if (N2 == 0) {
* open buffer(buffer + N1 * M, 0, M, _);
* } else {
* open buffer(buffer, N1, M, elements1);
* append_buffer(buffer + M, N1-1, N2, M);
* close buffer(buffer, N1+N2, M, cons(?x, append(xs, elements2)));
* }
* }
*
* lemma void split_element<t>(char *buffer, size_t N, size_t M, size_t i)
* requires buffer(buffer, N, M, ?elements) &*& 0 <= i &*& i < N;
* ensures
* buffer(buffer, i, M, take(i, elements)) &*&
* chars(buffer + i * M, M, nth(i, elements)) &*&
* buffer(buffer + (i + 1) * M, (N-1-i), M, drop(i+1, elements));
* {
* if (i == 0) {
* // straightforward
* } else {
* buffer_length(buffer, N, M);
* int j = 0;
* while (j < i)
* invariant 0 <= j &*& j <= i &*&
* buffer(buffer, j, M, take(j, elements)) &*&
* buffer(buffer + j * M, N-j, M, drop(j, elements));
* decreases i-j;
* {
* drop_drop(1, j, elements);
* nth_drop2(elements, j);
* open buffer(buffer + j * M, N-j, M, drop(j, elements));
* assert chars(buffer + j * M, M, ?x) &*& x == nth(j, elements);
* close buffer(buffer + j * M, 1, M, singleton(x));
* append_buffer(buffer, j, 1, M);
* take_plus_one(j, elements);
* j++;
* }
* drop_drop(1, j, elements);
* nth_drop2(elements, i);
* }
* }
*
* lemma void join_element(char *buffer, size_t N, size_t M, size_t i)
* requires
* 0 <= i &*& i < N &*&
* buffer(buffer, i, M, ?prefix) &*&
* chars(buffer + i * M, M, ?element) &*&
* buffer(buffer + (i + 1) * M, (N-1-i), M, ?suffix);
* ensures buffer(buffer, N, M, append(prefix, cons(element, suffix)));
* {
* if (i == 0) {
* open buffer(buffer, i, M, prefix);
* assert prefix == nil;
* close buffer(buffer, N, M, cons(element, suffix));
* } else {
* close buffer(buffer + i * M, N-i, M, cons(element, suffix));
* append_buffer(buffer, i, N-i, M);
* }
* }
*
* predicate list(List_t *l;) =
* l->uxNumberOfItems |-> _;
*
* predicate queuelists(QueueHandle_t q;) =
* list(&q->xTasksWaitingToSend) &*&
* list(&q->xTasksWaitingToReceive);
* @*/
/* Because prvCopyDataFromQueue does *not* decrement uxMessagesWaiting (K) the
queue predicate above does not hold as a postcondition. If the caller
subsequently decrements K then the queue predicate can be reinstated. */
* queue predicate above does not hold as a postcondition. If the caller
* subsequently decrements K then the queue predicate can be reinstated. */
/*@
predicate queue_after_prvCopyDataFromQueue(QueueHandle_t q, int8_t *Storage, size_t N, size_t M, size_t W, size_t R, size_t K, bool is_locked; list<list<char> >abs) =
QUEUE_SHAPE(q, Storage, N, M, K) &*&
0 < N &*&
0 < M &*&
0 <= W &*& W < N &*&
0 <= R &*& R < N &*&
0 <= K &*& K <= N &*&
W == (R + K) % N &*& //< Differs from queue predicate
(-1) <= rxLock &*&
(-1) <= txLock &*&
(is_locked ? 0 <= rxLock : (-1) == rxLock) &*&
(is_locked ? 0 <= txLock : (-1) == txLock) &*&
WPtr == Storage + (W*M) &*&
RPtr == Storage + (R*M) &*&
End == Storage + (N*M) &*&
buffer(Storage, N, M, ?contents) &*&
length(contents) == N &*&
abs == take(K, rotate_left(R, contents)) &*& //< Differs from queue predicate
malloc_block(Storage, N*M) &*&
true
;
@*/
* predicate queue_after_prvCopyDataFromQueue(QueueHandle_t q, int8_t *Storage, size_t N, size_t M, size_t W, size_t R, size_t K, bool is_locked; list<list<char> >abs) =
* QUEUE_SHAPE(q, Storage, N, M, K) &*&
* 0 < N &*&
* 0 < M &*&
* 0 <= W &*& W < N &*&
* 0 <= R &*& R < N &*&
* 0 <= K &*& K <= N &*&
* W == (R + K) % N &*& //< Differs from queue predicate
* (-1) <= rxLock &*&
* (-1) <= txLock &*&
* (is_locked ? 0 <= rxLock : (-1) == rxLock) &*&
* (is_locked ? 0 <= txLock : (-1) == txLock) &*&
* WPtr == Storage + (W*M) &*&
* RPtr == Storage + (R*M) &*&
* End == Storage + (N*M) &*&
* buffer(Storage, N, M, ?contents) &*&
* length(contents) == N &*&
* abs == take(K, rotate_left(R, contents)) &*& //< Differs from queue predicate
* malloc_block(Storage, N*M) &*&
* true
* ;
* @*/
/* Can't be called `mutex` as this clashes with VeriFast's predicate */
/*@
predicate freertos_mutex(QueueHandle_t q, int8_t *Storage, size_t N, size_t K;) =
QUEUE_SHAPE(q, Storage, N, 0, K) &*&
queuelists(q) &*&
0 < N &*&
0 <= K &*& K <= N &*&
(-1) <= rxLock &*&
(-1) <= txLock &*&
WPtr == Storage &*&
RPtr == Storage &*&
End == Storage &*&
malloc_block(Storage, 0) &*&
chars(Storage, 0, _) &*&
true
;
@*/
* predicate freertos_mutex(QueueHandle_t q, int8_t *Storage, size_t N, size_t K;) =
* QUEUE_SHAPE(q, Storage, N, 0, K) &*&
* queuelists(q) &*&
* 0 < N &*&
* 0 <= K &*& K <= N &*&
* (-1) <= rxLock &*&
* (-1) <= txLock &*&
* WPtr == Storage &*&
* RPtr == Storage &*&
* End == Storage &*&
* malloc_block(Storage, 0) &*&
* chars(Storage, 0, _) &*&
* true
* ;
* @*/
/* A queuehandle can be shared between tasks and ISRs. Acquiring the ghost
`irqMask` gives access to the core queue resources. The permissions granted
after masking interrupts depends on the caller:
- A task has access to the queue and the queuelists
- An ISR has access to the queue and, if the queue is unlocked, the queuelists */
/*@
predicate queuehandle(QueueHandle_t q, size_t N, size_t M, bool is_isr;) =
q->irqMask |-> ?m &*& mutex(m, irqs_masked_invariant(q, N, M, is_isr));
* `irqMask` gives access to the core queue resources. The permissions granted
* after masking interrupts depends on the caller:
* - A task has access to the queue and the queuelists
* - An ISR has access to the queue and, if the queue is unlocked, the queuelists */
predicate_ctor irqs_masked_invariant(QueueHandle_t queue, size_t N, size_t M, bool is_isr)() =
queue(queue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
(is_isr && is_locked ? true : queuelists(queue));
@*/
/*@
* predicate queuehandle(QueueHandle_t q, size_t N, size_t M, bool is_isr;) =
* q->irqMask |-> ?m &*& mutex(m, irqs_masked_invariant(q, N, M, is_isr));
*
* predicate_ctor irqs_masked_invariant(QueueHandle_t queue, size_t N, size_t M, bool is_isr)() =
* queue(queue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
* (is_isr && is_locked ? true : queuelists(queue));
* @*/
/* A queuesuspend can be shared between tasks. Acquiring the ghost `schedulerSuspend` gives access to the `locked` mutex. */
/*@
predicate_ctor scheduler_suspended_invariant(QueueHandle_t queue)() =
queue->locked |-> ?m &*&
mutex(m, queue_locked_invariant(queue));
predicate queuesuspend(QueueHandle_t q;) =
q->schedulerSuspend |-> ?m &*&
mutex(m, scheduler_suspended_invariant(q));
@*/
/*@
* predicate_ctor scheduler_suspended_invariant(QueueHandle_t queue)() =
* queue->locked |-> ?m &*&
* mutex(m, queue_locked_invariant(queue));
*
* predicate queuesuspend(QueueHandle_t q;) =
* q->schedulerSuspend |-> ?m &*&
* mutex(m, scheduler_suspended_invariant(q));
* @*/
/* A queuelock is exclusively acquired by a task. Acquiring the ghost `queuelock` gives access to the queue list resources. */
/*@
predicate queuelock(QueueHandle_t q;) =
q->locked |-> ?m &*&
mutex(m, queue_locked_invariant(q));
* predicate queuelock(QueueHandle_t q;) =
* q->locked |-> ?m &*&
* mutex(m, queue_locked_invariant(q));
*
* predicate_ctor queue_locked_invariant(QueueHandle_t queue)() =
* queuelists(queue);
* @*/
predicate_ctor queue_locked_invariant(QueueHandle_t queue)() =
queuelists(queue);
@*/
BaseType_t vListInitialise(List_t *list);
BaseType_t vListInitialise( List_t * list );
/*@requires list(list);@*/
/*@ensures list(list);@*/
BaseType_t listLIST_IS_EMPTY(List_t *list);
BaseType_t listLIST_IS_EMPTY( List_t * list );
/*@requires list->uxNumberOfItems |-> ?len;@*/
/*@ensures list->uxNumberOfItems |-> len &*& result == (len == 0 ? pdTRUE : pdFALSE);@*/
@ -433,25 +442,27 @@ typedef struct xTIME_OUT
} TimeOut_t;
/*@
predicate xTIME_OUT(struct xTIME_OUT *to;) =
to->xOverflowCount |-> _ &*&
to->xTimeOnEntering |-> _ &*&
struct_xTIME_OUT_padding(to);
@*/
* predicate xTIME_OUT(struct xTIME_OUT *to;) =
* to->xOverflowCount |-> _ &*&
* to->xTimeOnEntering |-> _ &*&
* struct_xTIME_OUT_padding(to);
* @*/
void vTaskInternalSetTimeOutState( TimeOut_t * x);
void vTaskInternalSetTimeOutState( TimeOut_t * x );
/*@requires xTIME_OUT(x);@*/
/*@ensures xTIME_OUT(x);@*/
BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait );
BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
TickType_t * const pxTicksToWait );
/*@requires xTIME_OUT(pxTimeOut) &*& u_integer(pxTicksToWait, _);@*/
/*@ensures xTIME_OUT(pxTimeOut) &*& u_integer(pxTicksToWait, _);@*/
BaseType_t xTaskRemoveFromEventList(List_t *list);
BaseType_t xTaskRemoveFromEventList( List_t * list );
/*@requires list(list);@*/
/*@ensures list(list);@*/
void vTaskPlaceOnEventList( List_t * const pxEventList, const TickType_t xTicksToWait );
void vTaskPlaceOnEventList( List_t * const pxEventList,
const TickType_t xTicksToWait );
/*@requires list(pxEventList);@*/
/*@ensures list(pxEventList);@*/
@ -460,94 +471,107 @@ void vTaskMissedYield();
/*@ensures true;@*/
void vTaskSuspendAll();
/*@requires exists<QueueHandle_t>(?xQueue) &*&
[1/2]xQueue->schedulerSuspend |-> ?m &*&
[1/2]mutex(m, scheduler_suspended_invariant(xQueue));@*/
* [1/2]xQueue->schedulerSuspend |-> ?m &*&
* [1/2]mutex(m, scheduler_suspended_invariant(xQueue));@*/
/*@ensures [1/2]xQueue->schedulerSuspend |-> m &*&
mutex_held(m, scheduler_suspended_invariant(xQueue), currentThread, 1/2) &*&
xQueue->locked |-> ?m2 &*&
mutex(m2, queue_locked_invariant(xQueue));@*/
* mutex_held(m, scheduler_suspended_invariant(xQueue), currentThread, 1/2) &*&
* xQueue->locked |-> ?m2 &*&
* mutex(m2, queue_locked_invariant(xQueue));@*/
BaseType_t xTaskResumeAll( void );
/*@requires exists<QueueHandle_t>(?xQueue) &*&
[1/2]xQueue->schedulerSuspend |-> ?m &*&
mutex_held(m, scheduler_suspended_invariant(xQueue), currentThread, 1/2) &*&
xQueue->locked |-> ?m2 &*&
mutex(m2, queue_locked_invariant(xQueue));@*/
* [1/2]xQueue->schedulerSuspend |-> ?m &*&
* mutex_held(m, scheduler_suspended_invariant(xQueue), currentThread, 1/2) &*&
* xQueue->locked |-> ?m2 &*&
* mutex(m2, queue_locked_invariant(xQueue));@*/
/*@ensures [1/2]xQueue->schedulerSuspend |-> m &*&
[1/2]mutex(m, scheduler_suspended_invariant(xQueue));@*/
* [1/2]mutex(m, scheduler_suspended_invariant(xQueue));@*/
void prvLockQueue( QueueHandle_t xQueue );
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*&
[1/2]queuelock(xQueue); @*/
* [1/2]queuelock(xQueue); @*/
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
[1/2]xQueue->locked |-> ?m &*&
mutex_held(m, queue_locked_invariant(xQueue), currentThread, 1/2) &*&
queue_locked_invariant(xQueue)();@*/
* [1/2]xQueue->locked |-> ?m &*&
* mutex_held(m, queue_locked_invariant(xQueue), currentThread, 1/2) &*&
* queue_locked_invariant(xQueue)();@*/
void prvUnlockQueue( QueueHandle_t xQueue );
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*&
[1/2]xQueue->locked |-> ?m &*&
mutex_held(m, queue_locked_invariant(xQueue), currentThread, 1/2) &*&
queue_locked_invariant(xQueue)();@*/
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
[1/2]queuelock(xQueue);@*/
void setInterruptMask(QueueHandle_t xQueue)
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*&
* [1/2]xQueue->locked |-> ?m &*&
* mutex_held(m, queue_locked_invariant(xQueue), currentThread, 1/2) &*&
* queue_locked_invariant(xQueue)();@*/
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
* [1/2]queuelock(xQueue);@*/
void setInterruptMask( QueueHandle_t xQueue )
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false;@*/
/*@ensures [1/2]xQueue->irqMask |-> ?m &*&
mutex_held(m, irqs_masked_invariant(xQueue, N, M, is_isr), currentThread, 1/2) &*&
queue(xQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
queuelists(xQueue);@*/
* mutex_held(m, irqs_masked_invariant(xQueue, N, M, is_isr), currentThread, 1/2) &*&
* queue(xQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
* queuelists(xQueue);@*/
{
/*@open queuehandle(xQueue, N, M, is_isr);@*/
mutex_acquire(xQueue->irqMask);
mutex_acquire( xQueue->irqMask );
/*@open irqs_masked_invariant(xQueue, N, M, is_isr)();@*/
}
void clearInterruptMask(QueueHandle_t xQueue)
void clearInterruptMask( QueueHandle_t xQueue )
/*@requires queue(xQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
[1/2]xQueue->irqMask |-> ?m &*&
mutex_held(m, irqs_masked_invariant(xQueue, N, M, false), currentThread, 1/2) &*&
queuelists(xQueue);@*/
* [1/2]xQueue->irqMask |-> ?m &*&
* mutex_held(m, irqs_masked_invariant(xQueue, N, M, false), currentThread, 1/2) &*&
* queuelists(xQueue);@*/
/*@ensures [1/2]queuehandle(xQueue, N, M, false);@*/
{
/*@close irqs_masked_invariant(xQueue, N, M, false)();@*/
mutex_release(xQueue->irqMask);
mutex_release( xQueue->irqMask );
/*@close [1/2]queuehandle(xQueue, N, M, false);@*/
}
#define taskENTER_CRITICAL() setInterruptMask(xQueue)
#define taskEXIT_CRITICAL() clearInterruptMask(xQueue)
#define taskENTER_CRITICAL() setInterruptMask( xQueue )
#define taskEXIT_CRITICAL() clearInterruptMask( xQueue )
#define portYIELD_WITHIN_API()
#define queueYIELD_IF_USING_PREEMPTION()
UBaseType_t setInterruptMaskFromISR(QueueHandle_t xQueue)
UBaseType_t setInterruptMaskFromISR( QueueHandle_t xQueue )
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == true;@*/
/*@ensures [1/2]xQueue->irqMask |-> ?m &*&
mutex_held(m, irqs_masked_invariant(xQueue, N, M, is_isr), currentThread, 1/2) &*&
queue(xQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
(is_locked ? true : queuelists(xQueue));@*/
* mutex_held(m, irqs_masked_invariant(xQueue, N, M, is_isr), currentThread, 1/2) &*&
* queue(xQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
* (is_locked ? true : queuelists(xQueue));@*/
{
/*@open queuehandle(xQueue, N, M, is_isr);@*/
mutex_acquire(xQueue->irqMask);
mutex_acquire( xQueue->irqMask );
/*@open irqs_masked_invariant(xQueue, N, M, is_isr)();@*/
return 0;
}
void clearInterruptMaskFromISR(QueueHandle_t xQueue, UBaseType_t uxSavedInterruptStatus)
void clearInterruptMaskFromISR( QueueHandle_t xQueue,
UBaseType_t uxSavedInterruptStatus )
/*@requires queue(xQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
[1/2]xQueue->irqMask |-> ?m &*&
mutex_held(m, irqs_masked_invariant(xQueue, N, M, true), currentThread, 1/2) &*&
(is_locked ? true : queuelists(xQueue));@*/
* [1/2]xQueue->irqMask |-> ?m &*&
* mutex_held(m, irqs_masked_invariant(xQueue, N, M, true), currentThread, 1/2) &*&
* (is_locked ? true : queuelists(xQueue));@*/
/*@ensures [1/2]queuehandle(xQueue, N, M, true);@*/
{
/*@close irqs_masked_invariant(xQueue, N, M, true)();@*/
mutex_release(xQueue->irqMask);
mutex_release( xQueue->irqMask );
/*@close [1/2]queuehandle(xQueue, N, M, true);@*/
}
#define portSET_INTERRUPT_MASK_FROM_ISR() setInterruptMaskFromISR(xQueue)
#define portCLEAR_INTERRUPT_MASK_FROM_ISR(uxSavedInterruptStatus) clearInterruptMaskFromISR(xQueue, uxSavedInterruptStatus)
#define portSET_INTERRUPT_MASK_FROM_ISR() setInterruptMaskFromISR( xQueue )
#define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ) clearInterruptMaskFromISR( xQueue, uxSavedInterruptStatus )
#endif /* QUEUE_H */

View file

@ -29,26 +29,32 @@
#include "queue.h"
void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer );
void prvCopyDataFromQueue( Queue_t * const pxQueue,
void * const pvBuffer );
/*@requires queue(pxQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*& 0 < K &*& chars(pvBuffer, M, _);@*/
/*@ensures queue_after_prvCopyDataFromQueue(pxQueue, Storage, N, M, W, (R+1)%N, K, is_locked, abs) &*&
chars(pvBuffer, M, head(abs));@*/
BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition );
/*@ensures queue_after_prvCopyDataFromQueue(pxQueue, Storage, N, M, W, (R+1)%N, K, is_locked, abs) &*&
* chars(pvBuffer, M, head(abs));@*/
BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue,
const void * pvItemToQueue,
const BaseType_t xPosition );
/*@requires queue(pxQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
(K < N || xPosition == queueOVERWRITE) &*&
chars(pvItemToQueue, M, ?x) &*&
(xPosition == queueSEND_TO_BACK || xPosition == queueSEND_TO_FRONT || (xPosition == queueOVERWRITE && N == 1));@*/
* (K < N || xPosition == queueOVERWRITE) &*&
* chars(pvItemToQueue, M, ?x) &*&
* (xPosition == queueSEND_TO_BACK || xPosition == queueSEND_TO_FRONT || (xPosition == queueOVERWRITE && N == 1));@*/
/*@ensures
(xPosition == queueSEND_TO_BACK
? queue(pxQueue, Storage, N, M, (W+1)%N, R, (K+1), is_locked, append(abs, singleton(x)))
: (xPosition == queueSEND_TO_FRONT
? (R == 0
? queue(pxQueue, Storage, N, M, W, (N-1), (K+1), is_locked, cons(x, abs))
: queue(pxQueue, Storage, N, M, W, (R-1), (K+1), is_locked, cons(x, abs)))
: xPosition == queueOVERWRITE &*& queue(pxQueue, Storage, N, M, W, R, 1, is_locked, singleton(x)))
) &*&
chars(pvItemToQueue, M, x);@*/
* (xPosition == queueSEND_TO_BACK
* ? queue(pxQueue, Storage, N, M, (W+1)%N, R, (K+1), is_locked, append(abs, singleton(x)))
* : (xPosition == queueSEND_TO_FRONT
* ? (R == 0
* ? queue(pxQueue, Storage, N, M, W, (N-1), (K+1), is_locked, cons(x, abs))
* : queue(pxQueue, Storage, N, M, W, (R-1), (K+1), is_locked, cons(x, abs)))
* : xPosition == queueOVERWRITE &*& queue(pxQueue, Storage, N, M, W, R, 1, is_locked, singleton(x)))
* ) &*&
* chars(pvItemToQueue, M, x);@*/
BaseType_t prvIsQueueEmpty( Queue_t * pxQueue );
/*@requires [1/2]queuehandle(pxQueue, ?N, ?M, ?is_isr) &*& is_isr == false;@*/