Add uncrustify github workflow (#659)

* Add uncrustify github workflow

* Fix exclusion pattern

* fix find expression

* exclude uncrustify files

* Uncrustify common demo and test files

* exlude white space checking files

* Fix EOL whitespace checker

* Remove whitespaces from EOL

* Fix space at EOL

* Fix find spaces at EOL

Co-authored-by: Archit Aggarwal <architag@amazon.com>
This commit is contained in:
alfred gedeon 2021-07-22 14:23:48 -07:00 committed by GitHub
parent dd80d615b5
commit ae92d8c6ee
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
191 changed files with 17540 additions and 17102 deletions

View file

@ -1,3 +1,5 @@
eFrameProcessingResult_t publicProcessIPPacket( IPPacket_t * const pxIPPacket, NetworkBufferDescriptor_t * const pxNetworkBuffer ) {
prvProcessIPPacket(pxIPPacket, pxNetworkBuffer);
eFrameProcessingResult_t publicProcessIPPacket( IPPacket_t * const pxIPPacket,
NetworkBufferDescriptor_t * const pxNetworkBuffer )
{
prvProcessIPPacket( pxIPPacket, pxNetworkBuffer );
}

View file

@ -1,12 +1,20 @@
int32_t publicTCPPrepareSend( FreeRTOS_Socket_t *pxSocket, NetworkBufferDescriptor_t **ppxNetworkBuffer, UBaseType_t uxOptionsLength ) {
prvTCPPrepareSend( pxSocket, ppxNetworkBuffer, uxOptionsLength );
int32_t publicTCPPrepareSend( FreeRTOS_Socket_t * pxSocket,
NetworkBufferDescriptor_t ** ppxNetworkBuffer,
UBaseType_t uxOptionsLength )
{
prvTCPPrepareSend( pxSocket, ppxNetworkBuffer, uxOptionsLength );
}
BaseType_t publicTCPHandleState( FreeRTOS_Socket_t *pxSocket, NetworkBufferDescriptor_t **ppxNetworkBuffer ) {
prvTCPHandleState(pxSocket, ppxNetworkBuffer);
BaseType_t publicTCPHandleState( FreeRTOS_Socket_t * pxSocket,
NetworkBufferDescriptor_t ** ppxNetworkBuffer )
{
prvTCPHandleState( pxSocket, ppxNetworkBuffer );
}
void publicTCPReturnPacket( FreeRTOS_Socket_t *pxSocket, NetworkBufferDescriptor_t *pxNetworkBuffer,
uint32_t ulLen, BaseType_t xReleaseAfterSend ) {
prvTCPReturnPacket(pxSocket, pxNetworkBuffer, ulLen, xReleaseAfterSend );
void publicTCPReturnPacket( FreeRTOS_Socket_t * pxSocket,
NetworkBufferDescriptor_t * pxNetworkBuffer,
uint32_t ulLen,
BaseType_t xReleaseAfterSend )
{
prvTCPReturnPacket( pxSocket, pxNetworkBuffer, ulLen, xReleaseAfterSend );
}

View file

@ -18,12 +18,12 @@
#include "NetworkInterface.h"
/*
* CBMC models a pointer as an object id and an offset into that
* object. The top bits of a pointer encode the object id and the
* remaining bits encode the offset. This means there is a bound on
* the maximum offset into an object in CBMC, and hence a bound on the
* size of objects in CBMC.
*/
* CBMC models a pointer as an object id and an offset into that
* object. The top bits of a pointer encode the object id and the
* remaining bits encode the offset. This means there is a bound on
* the maximum offset into an object in CBMC, and hence a bound on the
* size of objects in CBMC.
*/
#define CBMC_BITS 7
#define CBMC_MAX_OBJECT_SIZE ( 0xFFFFFFFF >> ( CBMC_BITS + 1 ) )
@ -70,19 +70,19 @@ enum CBMC_LOOP_CONDITION
#define __CPROVER_printf2_ptr( str, exp ) { uint8_t * ValueOf_ ## str = ( uint8_t * ) ( exp ); }
/*
* An assertion that pvPortMalloc returns NULL when asked to allocate 0 bytes.
* This assertion is used in some of the TaskPool proofs.
*/
* An assertion that pvPortMalloc returns NULL when asked to allocate 0 bytes.
* This assertion is used in some of the TaskPool proofs.
*/
#define __CPROVER_assert_zero_allocation() \
__CPROVER_assert( pvPortMalloc( 0 ) == NULL, \
"pvPortMalloc allows zero-allocated memory." )
/*
* A stub for pvPortMalloc that nondeterministically chooses to return
* either NULL or an allocation of the requested space. The stub is
* guaranteed to return NULL when asked to allocate 0 bytes.
* This stub is used in some of the TaskPool proofs.
*/
* A stub for pvPortMalloc that nondeterministically chooses to return
* either NULL or an allocation of the requested space. The stub is
* guaranteed to return NULL when asked to allocate 0 bytes.
* This stub is used in some of the TaskPool proofs.
*/
void * pvPortMalloc( size_t xWantedSize )
{
if( xWantedSize == 0 )

View file

@ -3,144 +3,170 @@
#include "queue_datastructure.h"
#ifndef CBMC_OBJECT_BITS
#define CBMC_OBJECT_BITS 7
#define CBMC_OBJECT_BITS 7
#endif
#ifndef CBMC_OBJECT_MAX_SIZE
#define CBMC_OBJECT_MAX_SIZE (UINT32_MAX>>(CBMC_OBJECT_BITS+1))
#define CBMC_OBJECT_MAX_SIZE ( UINT32_MAX >> ( CBMC_OBJECT_BITS + 1 ) )
#endif
/* Using prvCopyDataToQueue together with prvNotifyQueueSetContainer
leads to a problem space explosion. Therefore, we use this stub
and a sepearted proof on prvCopyDataToQueue to deal with it.
As prvNotifyQueueSetContainer is disabled if configUSE_QUEUE_SETS != 1,
in other cases the original implementation should be used. */
#if( configUSE_QUEUE_SETS == 1 )
BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )
{
if(pxQueue->uxItemSize > ( UBaseType_t ) 0)
{
__CPROVER_assert(__CPROVER_r_ok(pvItemToQueue, ( size_t ) pxQueue->uxItemSize), "pvItemToQueue region must be readable");
if(xPosition == queueSEND_TO_BACK){
__CPROVER_assert(__CPROVER_w_ok(( void * ) pxQueue->pcWriteTo, ( size_t ) pxQueue->uxItemSize), "pxQueue->pcWriteTo region must be writable");
}else{
__CPROVER_assert(__CPROVER_w_ok(( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize), "pxQueue->u.xQueue.pcReadFrom region must be writable");
}
return pdFALSE;
}else
{
return nondet_BaseType_t();
}
}
#endif
* leads to a problem space explosion. Therefore, we use this stub
* and a sepearted proof on prvCopyDataToQueue to deal with it.
* As prvNotifyQueueSetContainer is disabled if configUSE_QUEUE_SETS != 1,
* in other cases the original implementation should be used. */
#if ( configUSE_QUEUE_SETS == 1 )
BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue,
const void * pvItemToQueue,
const BaseType_t xPosition )
{
if( pxQueue->uxItemSize > ( UBaseType_t ) 0 )
{
__CPROVER_assert( __CPROVER_r_ok( pvItemToQueue, ( size_t ) pxQueue->uxItemSize ), "pvItemToQueue region must be readable" );
if( xPosition == queueSEND_TO_BACK )
{
__CPROVER_assert( __CPROVER_w_ok( ( void * ) pxQueue->pcWriteTo, ( size_t ) pxQueue->uxItemSize ), "pxQueue->pcWriteTo region must be writable" );
}
else
{
__CPROVER_assert( __CPROVER_w_ok( ( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize ), "pxQueue->u.xQueue.pcReadFrom region must be writable" );
}
return pdFALSE;
}
else
{
return nondet_BaseType_t();
}
}
#endif /* if ( configUSE_QUEUE_SETS == 1 ) */
/* xQueueCreateSet is compiled out if configUSE_QUEUE_SETS != 1.*/
#if( configUSE_QUEUE_SETS == 1 )
QueueSetHandle_t xUnconstrainedQueueSet()
{
UBaseType_t uxEventQueueLength = 2;
QueueSetHandle_t xSet = xQueueCreateSet(uxEventQueueLength);
if( xSet )
{
xSet->cTxLock = nondet_int8_t();
__CPROVER_assume(xSet->cTxLock != 127);
xSet->cRxLock = nondet_int8_t();
xSet->uxMessagesWaiting = nondet_UBaseType_t();
xSet->xTasksWaitingToReceive.uxNumberOfItems = nondet_UBaseType_t();
/* This is an invariant checked with a couple of asserts in the code base.
If it is false from the beginning, the CBMC proofs are not able to succeed*/
__CPROVER_assume(xSet->uxMessagesWaiting < xSet->uxLength);
xSet->xTasksWaitingToSend.uxNumberOfItems = nondet_UBaseType_t();
}
return xSet;
}
#endif
#if ( configUSE_QUEUE_SETS == 1 )
QueueSetHandle_t xUnconstrainedQueueSet()
{
UBaseType_t uxEventQueueLength = 2;
QueueSetHandle_t xSet = xQueueCreateSet( uxEventQueueLength );
if( xSet )
{
xSet->cTxLock = nondet_int8_t();
__CPROVER_assume( xSet->cTxLock != 127 );
xSet->cRxLock = nondet_int8_t();
xSet->uxMessagesWaiting = nondet_UBaseType_t();
xSet->xTasksWaitingToReceive.uxNumberOfItems = nondet_UBaseType_t();
/* This is an invariant checked with a couple of asserts in the code base.
* If it is false from the beginning, the CBMC proofs are not able to succeed*/
__CPROVER_assume( xSet->uxMessagesWaiting < xSet->uxLength );
xSet->xTasksWaitingToSend.uxNumberOfItems = nondet_UBaseType_t();
}
return xSet;
}
#endif /* if ( configUSE_QUEUE_SETS == 1 ) */
/* Create a mostly unconstrained Queue but bound the max item size.
This is required for performance reasons in CBMC at the moment. */
QueueHandle_t xUnconstrainedQueueBoundedItemSize( UBaseType_t uxItemSizeBound ) {
UBaseType_t uxQueueLength;
UBaseType_t uxItemSize;
uint8_t ucQueueType;
__CPROVER_assume(uxQueueLength > 0);
__CPROVER_assume(uxItemSize < uxItemSizeBound);
* This is required for performance reasons in CBMC at the moment. */
QueueHandle_t xUnconstrainedQueueBoundedItemSize( UBaseType_t uxItemSizeBound )
{
UBaseType_t uxQueueLength;
UBaseType_t uxItemSize;
uint8_t ucQueueType;
// QueueGenericCreate method does not check for multiplication overflow
size_t uxQueueStorageSize;
__CPROVER_assume(uxQueueStorageSize < CBMC_OBJECT_MAX_SIZE);
__CPROVER_assume(uxItemSize < uxQueueStorageSize/uxQueueLength);
__CPROVER_assume( uxQueueLength > 0 );
__CPROVER_assume( uxItemSize < uxItemSizeBound );
QueueHandle_t xQueue =
xQueueGenericCreate(uxQueueLength, uxItemSize, ucQueueType);
if(xQueue){
xQueue->cTxLock = nondet_int8_t();
__CPROVER_assume(xQueue->cTxLock != 127);
xQueue->cRxLock = nondet_int8_t();
__CPROVER_assume(xQueue->cRxLock != 127);
xQueue->uxMessagesWaiting = nondet_UBaseType_t();
/* This is an invariant checked with a couple of asserts in the code base.
If it is false from the beginning, the CBMC proofs are not able to succeed*/
__CPROVER_assume(xQueue->uxMessagesWaiting < xQueue->uxLength);
xQueue->xTasksWaitingToReceive.uxNumberOfItems = nondet_UBaseType_t();
xQueue->xTasksWaitingToSend.uxNumberOfItems = nondet_UBaseType_t();
#if( configUSE_QUEUE_SETS == 1)
xQueueAddToSet(xQueue, xUnconstrainedQueueSet());
#endif
}
return xQueue;
/* QueueGenericCreate method does not check for multiplication overflow */
size_t uxQueueStorageSize;
__CPROVER_assume( uxQueueStorageSize < CBMC_OBJECT_MAX_SIZE );
__CPROVER_assume( uxItemSize < uxQueueStorageSize / uxQueueLength );
QueueHandle_t xQueue =
xQueueGenericCreate( uxQueueLength, uxItemSize, ucQueueType );
if( xQueue )
{
xQueue->cTxLock = nondet_int8_t();
__CPROVER_assume( xQueue->cTxLock != 127 );
xQueue->cRxLock = nondet_int8_t();
__CPROVER_assume( xQueue->cRxLock != 127 );
xQueue->uxMessagesWaiting = nondet_UBaseType_t();
/* This is an invariant checked with a couple of asserts in the code base.
* If it is false from the beginning, the CBMC proofs are not able to succeed*/
__CPROVER_assume( xQueue->uxMessagesWaiting < xQueue->uxLength );
xQueue->xTasksWaitingToReceive.uxNumberOfItems = nondet_UBaseType_t();
xQueue->xTasksWaitingToSend.uxNumberOfItems = nondet_UBaseType_t();
#if ( configUSE_QUEUE_SETS == 1 )
xQueueAddToSet( xQueue, xUnconstrainedQueueSet() );
#endif
}
return xQueue;
}
/* Create a mostly unconstrained Queue */
QueueHandle_t xUnconstrainedQueue( void ) {
UBaseType_t uxQueueLength;
UBaseType_t uxItemSize;
uint8_t ucQueueType;
QueueHandle_t xUnconstrainedQueue( void )
{
UBaseType_t uxQueueLength;
UBaseType_t uxItemSize;
uint8_t ucQueueType;
__CPROVER_assume(uxQueueLength > 0);
__CPROVER_assume( uxQueueLength > 0 );
// QueueGenericCreate method does not check for multiplication overflow
size_t uxQueueStorageSize;
__CPROVER_assume(uxQueueStorageSize < CBMC_OBJECT_MAX_SIZE);
__CPROVER_assume(uxItemSize < uxQueueStorageSize/uxQueueLength);
/* QueueGenericCreate method does not check for multiplication overflow */
size_t uxQueueStorageSize;
__CPROVER_assume( uxQueueStorageSize < CBMC_OBJECT_MAX_SIZE );
__CPROVER_assume( uxItemSize < uxQueueStorageSize / uxQueueLength );
QueueHandle_t xQueue =
xQueueGenericCreate(uxQueueLength, uxItemSize, ucQueueType);
QueueHandle_t xQueue =
xQueueGenericCreate( uxQueueLength, uxItemSize, ucQueueType );
if(xQueue){
xQueue->cTxLock = nondet_int8_t();
__CPROVER_assume(xQueue->cTxLock != 127);
xQueue->cRxLock = nondet_int8_t();
xQueue->uxMessagesWaiting = nondet_UBaseType_t();
/* This is an invariant checked with a couple of asserts in the code base.
If it is false from the beginning, the CBMC proofs are not able to succeed*/
__CPROVER_assume(xQueue->uxMessagesWaiting < xQueue->uxLength);
xQueue->xTasksWaitingToReceive.uxNumberOfItems = nondet_UBaseType_t();
xQueue->xTasksWaitingToSend.uxNumberOfItems = nondet_UBaseType_t();
#if( configUSE_QUEUE_SETS == 1)
xQueueAddToSet(xQueue, xUnconstrainedQueueSet());
#endif
}
return xQueue;
if( xQueue )
{
xQueue->cTxLock = nondet_int8_t();
__CPROVER_assume( xQueue->cTxLock != 127 );
xQueue->cRxLock = nondet_int8_t();
xQueue->uxMessagesWaiting = nondet_UBaseType_t();
/* This is an invariant checked with a couple of asserts in the code base.
* If it is false from the beginning, the CBMC proofs are not able to succeed*/
__CPROVER_assume( xQueue->uxMessagesWaiting < xQueue->uxLength );
xQueue->xTasksWaitingToReceive.uxNumberOfItems = nondet_UBaseType_t();
xQueue->xTasksWaitingToSend.uxNumberOfItems = nondet_UBaseType_t();
#if ( configUSE_QUEUE_SETS == 1 )
xQueueAddToSet( xQueue, xUnconstrainedQueueSet() );
#endif
}
return xQueue;
}
/* Create a mostly unconstrained Mutex */
QueueHandle_t xUnconstrainedMutex( void ) {
uint8_t ucQueueType;
QueueHandle_t xQueue =
xQueueCreateMutex(ucQueueType);
if(xQueue){
xQueue->cTxLock = nondet_int8_t();
__CPROVER_assume(xQueue->cTxLock != 127);
xQueue->cRxLock = nondet_int8_t();
xQueue->uxMessagesWaiting = nondet_UBaseType_t();
/* This is an invariant checked with a couple of asserts in the code base.
If it is false from the beginning, the CBMC proofs are not able to succeed*/
__CPROVER_assume(xQueue->uxMessagesWaiting < xQueue->uxLength);
xQueue->xTasksWaitingToReceive.uxNumberOfItems = nondet_UBaseType_t();
xQueue->xTasksWaitingToSend.uxNumberOfItems = nondet_UBaseType_t();
#if( configUSE_QUEUE_SETS == 1)
xQueueAddToSet(xQueue, xUnconstrainedQueueSet());
#endif
}
return xQueue;
QueueHandle_t xUnconstrainedMutex( void )
{
uint8_t ucQueueType;
QueueHandle_t xQueue =
xQueueCreateMutex( ucQueueType );
if( xQueue )
{
xQueue->cTxLock = nondet_int8_t();
__CPROVER_assume( xQueue->cTxLock != 127 );
xQueue->cRxLock = nondet_int8_t();
xQueue->uxMessagesWaiting = nondet_UBaseType_t();
/* This is an invariant checked with a couple of asserts in the code base.
* If it is false from the beginning, the CBMC proofs are not able to succeed*/
__CPROVER_assume( xQueue->uxMessagesWaiting < xQueue->uxLength );
xQueue->xTasksWaitingToReceive.uxNumberOfItems = nondet_UBaseType_t();
xQueue->xTasksWaitingToSend.uxNumberOfItems = nondet_UBaseType_t();
#if ( configUSE_QUEUE_SETS == 1 )
xQueueAddToSet( xQueue, xUnconstrainedQueueSet() );
#endif
}
return xQueue;
}

View file

@ -5,6 +5,7 @@
#include "task.h"
BaseType_t xState;
void vInitTaskCheckForTimeOut(BaseType_t maxCounter, BaseType_t maxCounter_limit);
void vInitTaskCheckForTimeOut( BaseType_t maxCounter,
BaseType_t maxCounter_limit );
#endif /* INC_TASK_STUBS_H */

View file

@ -53,10 +53,10 @@
#define configIDLE_SHOULD_YIELD 1
#define configUSE_CO_ROUTINES 0
#ifndef configUSE_MUTEXES
#define configUSE_MUTEXES 1
#define configUSE_MUTEXES 1
#endif
#ifndef configUSE_RECURSIVE_MUTEXES
#define configUSE_RECURSIVE_MUTEXES 1
#define configUSE_RECURSIVE_MUTEXES 1
#endif
#define configQUEUE_REGISTRY_SIZE 0
#define configUSE_APPLICATION_TASK_TAG 1
@ -67,31 +67,31 @@
/* Hook function related definitions. */
#ifndef configUSE_TICK_HOOK
#define configUSE_TICK_HOOK 0
#define configUSE_TICK_HOOK 0
#endif
#define configUSE_IDLE_HOOK 1
#define configUSE_MALLOC_FAILED_HOOK 1
#define configCHECK_FOR_STACK_OVERFLOW 0 /* Not applicable to the Win32 port. */
#define configUSE_IDLE_HOOK 1
#define configUSE_MALLOC_FAILED_HOOK 1
#define configCHECK_FOR_STACK_OVERFLOW 0 /* Not applicable to the Win32 port. */
/* Software timer related definitions. */
#define configUSE_TIMERS 1
#define configTIMER_TASK_PRIORITY ( configMAX_PRIORITIES - 1 )
#define configTIMER_QUEUE_LENGTH 5
#define configTIMER_TASK_STACK_DEPTH ( configMINIMAL_STACK_SIZE * 2 )
#define configUSE_TIMERS 1
#define configTIMER_TASK_PRIORITY ( configMAX_PRIORITIES - 1 )
#define configTIMER_QUEUE_LENGTH 5
#define configTIMER_TASK_STACK_DEPTH ( configMINIMAL_STACK_SIZE * 2 )
/* Event group related definitions. */
#define configUSE_EVENT_GROUPS 1
#define configUSE_EVENT_GROUPS 1
/* Co-routine definitions. */
#define configUSE_CO_ROUTINES 0
#define configMAX_CO_ROUTINE_PRIORITIES ( 2 )
#define configUSE_CO_ROUTINES 0
#define configMAX_CO_ROUTINE_PRIORITIES ( 2 )
/* Memory allocation strategy. */
#ifndef configSUPPORT_DYNAMIC_ALLOCATION
#define configSUPPORT_DYNAMIC_ALLOCATION 1
#define configSUPPORT_DYNAMIC_ALLOCATION 1
#endif
#ifndef configSUPPORT_STATIC_ALLOCATION
#define configSUPPORT_STATIC_ALLOCATION 1
#define configSUPPORT_STATIC_ALLOCATION 1
#endif
@ -102,13 +102,13 @@
#define INCLUDE_vTaskDelete 1
#define INCLUDE_vTaskCleanUpResources 0
#ifndef INCLUDE_vTaskSuspend
#define INCLUDE_vTaskSuspend 1
#define INCLUDE_vTaskSuspend 1
#endif
#define INCLUDE_vTaskDelayUntil 1
#define INCLUDE_vTaskDelay 1
#define INCLUDE_uxTaskGetStackHighWaterMark 1
#ifndef INCLUDE_xTaskGetSchedulerState
#define INCLUDE_xTaskGetSchedulerState 1
#define INCLUDE_xTaskGetSchedulerState 1
#endif
#define INCLUDE_xTimerGetTimerTaskHandle 0
#define INCLUDE_xTaskGetIdleTaskHandle 0
@ -132,7 +132,7 @@
extern void vAssertCalled( const char * pcFile,
uint32_t ulLine );
#ifndef configASSERT
#define configASSERT( x ) if( ( x ) == 0 ) vAssertCalled( __FILE__, __LINE__ )
#define configASSERT( x ) if( ( x ) == 0 ) vAssertCalled( __FILE__, __LINE__ )
#endif
/* Remove logging in formal verification */

View file

@ -1,27 +1,27 @@
/*
FreeRTOS V202104.00
Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
http://aws.amazon.com/freertos
http://www.FreeRTOS.org
*/
* FreeRTOS V202104.00
* Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://aws.amazon.com/freertos
* http://www.FreeRTOS.org
*/
/*****************************************************************************
@ -156,19 +156,19 @@ extern uint32_t ulRand();
* cache then the UDP message is replaced by a ARP message that solicits the
* required MAC address information. ipconfigARP_CACHE_ENTRIES defines the maximum
* number of entries that can exist in the ARP table at any one time. */
#define ipconfigARP_CACHE_ENTRIES 6
#define ipconfigARP_CACHE_ENTRIES 6
/* ARP requests that do not result in an ARP response will be re-transmitted a
* maximum of ipconfigMAX_ARP_RETRANSMISSIONS times before the ARP request is
* aborted. */
#define ipconfigMAX_ARP_RETRANSMISSIONS ( 5 )
#define ipconfigMAX_ARP_RETRANSMISSIONS ( 5 )
/* ipconfigMAX_ARP_AGE defines the maximum time between an entry in the ARP
* table being created or refreshed and the entry being removed because it is stale.
* New ARP requests are sent for ARP cache entries that are nearing their maximum
* age. ipconfigMAX_ARP_AGE is specified in tens of seconds, so a value of 150 is
* equal to 1500 seconds (or 25 minutes). */
#define ipconfigMAX_ARP_AGE 150
#define ipconfigMAX_ARP_AGE 150
/* Implementing FreeRTOS_inet_addr() necessitates the use of string handling
* routines, which are relatively large. To save code space the full
@ -180,14 +180,14 @@ extern uint32_t ulRand();
* ipconfigINCLUDE_FULL_INET_ADDR is set to 1 then both FreeRTOS_inet_addr() and
* FreeRTOS_indet_addr_quick() are available. If ipconfigINCLUDE_FULL_INET_ADDR is
* not set to 1 then only FreeRTOS_indet_addr_quick() is available. */
#define ipconfigINCLUDE_FULL_INET_ADDR 1
#define ipconfigINCLUDE_FULL_INET_ADDR 1
/* ipconfigNUM_NETWORK_BUFFER_DESCRIPTORS defines the total number of network buffer that
* are available to the IP stack. The total number of network buffers is limited
* to ensure the total amount of RAM that can be consumed by the IP stack is capped
* to a pre-determinable value. */
#ifndef ipconfigNUM_NETWORK_BUFFER_DESCRIPTORS
#define ipconfigNUM_NETWORK_BUFFER_DESCRIPTORS 60
#define ipconfigNUM_NETWORK_BUFFER_DESCRIPTORS 60
#endif
/* A FreeRTOS queue is used to send events from application tasks to the IP

View file

@ -3,15 +3,15 @@
#include "tasksStubs.h"
#ifndef TASK_STUB_COUNTER
#define TASK_STUB_COUNTER 0;
#define TASK_STUB_COUNTER 0;
#endif
/* 5 is a magic number, but we need some number here as a default value.
This value is used to bound any loop depending on xTaskCheckForTimeOut
as a loop bound. It should be overwritten in the Makefile.json adapting
to the performance requirements of the harness. */
* This value is used to bound any loop depending on xTaskCheckForTimeOut
* as a loop bound. It should be overwritten in the Makefile.json adapting
* to the performance requirements of the harness. */
#ifndef TASK_STUB_COUNTER_LIMIT
#define TASK_STUB_COUNTER_LIMIT 5;
#define TASK_STUB_COUNTER_LIMIT 5;
#endif
@ -20,28 +20,32 @@ static BaseType_t xCounterLimit = TASK_STUB_COUNTER_LIMIT;
BaseType_t xTaskGetSchedulerState( void )
{
return xState;
return xState;
}
/* This function is another method apart from overwritting the defines to init the max
loop bound. */
void vInitTaskCheckForTimeOut(BaseType_t maxCounter, BaseType_t maxCounter_limit)
* loop bound. */
void vInitTaskCheckForTimeOut( BaseType_t maxCounter,
BaseType_t maxCounter_limit )
{
xCounter = maxCounter;
xCounterLimit = maxCounter_limit;
xCounter = maxCounter;
xCounterLimit = maxCounter_limit;
}
/* This is mostly called in a loop. For CBMC, we have to bound the loop
to a max limits of calls. Therefore this Stub models a nondet timeout in
max TASK_STUB_COUNTER_LIMIT iterations.*/
BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait ) {
++xCounter;
if(xCounter == xCounterLimit)
{
return pdTRUE;
}
else
{
return nondet_basetype();
}
* to a max limits of calls. Therefore this Stub models a nondet timeout in
* max TASK_STUB_COUNTER_LIMIT iterations.*/
BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
TickType_t * const pxTicksToWait )
{
++xCounter;
if( xCounter == xCounterLimit )
{
return pdTRUE;
}
else
{
return nondet_basetype();
}
}

View file

@ -17,7 +17,7 @@ list(APPEND cbmc_compile_includes
${CMAKE_SOURCE_DIR}/Source/include
${CMAKE_SOURCE_DIR}/Source/portable/MSVC-MingW
${CMAKE_SOURCE_DIR}/Source/../../FreeRTOS-Plus/Source/FreeRTOS-Plus-TCP/portable/BufferManagement
${CMAKE_SOURCE_DIR}/Source/../../FreeRTOS-Plus/Source/FreeRTOS-Plus-TCP/include
${CMAKE_SOURCE_DIR}/Source/../../FreeRTOS-Plus/Source/FreeRTOS-Plus-TCP/include
${CMAKE_SOURCE_DIR}/Source/../../FreeRTOS-Plus/Source/FreeRTOS-Plus-TCP/portable/Compiler/MSVC
${cbmc_dir}/include
${cbmc_dir}/windows

View file

@ -31,8 +31,9 @@
#include "cbmc.h"
void harness() {
uint8_t ucQueueType;
void harness()
{
uint8_t ucQueueType;
xQueueCreateMutex(ucQueueType);
xQueueCreateMutex( ucQueueType );
}

View file

@ -34,94 +34,112 @@
#include "cbmc.h"
#ifndef LOCK_BOUND
#define LOCK_BOUND 4
#define LOCK_BOUND 4
#endif
#ifndef QUEUE_SEND_BOUND
#define QUEUE_SEND_BOUND 4
#define QUEUE_SEND_BOUND 4
#endif
#if( configUSE_QUEUE_SETS == 0 )
BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )
#if ( configUSE_QUEUE_SETS == 0 )
BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue,
const void * pvItemToQueue,
const BaseType_t xPosition )
{
if( pxQueue->uxItemSize > ( UBaseType_t ) 0 )
{
__CPROVER_assert( __CPROVER_r_ok( pvItemToQueue, ( size_t ) pxQueue->uxItemSize ), "pvItemToQueue region must be readable" );
if( xPosition == queueSEND_TO_BACK )
{
__CPROVER_assert( __CPROVER_w_ok( ( void * ) pxQueue->pcWriteTo, ( size_t ) pxQueue->uxItemSize ), "pxQueue->pcWriteTo region must be writable" );
}
else
{
__CPROVER_assert( __CPROVER_w_ok( ( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize ), "pxQueue->u.xQueue.pcReadFrom region must be writable" );
}
return pdFALSE;
}
else
{
return nondet_BaseType_t();
}
}
#else /* if ( configUSE_QUEUE_SETS == 0 ) */
BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue )
{
Queue_t * pxQueueSetContainer = pxQueue->pxQueueSetContainer;
configASSERT( pxQueueSetContainer );
}
void prvUnlockQueue( Queue_t * const pxQueue )
{
configASSERT( pxQueue );
if( pxQueue->pxQueueSetContainer != NULL )
{
prvNotifyQueueSetContainer( pxQueue );
}
listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) );
pxQueue->cTxLock = queueUNLOCKED;
listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) );
pxQueue->cRxLock = queueUNLOCKED;
}
#endif /* if ( configUSE_QUEUE_SETS == 0 ) */
void harness()
{
if(pxQueue->uxItemSize > ( UBaseType_t ) 0)
{
__CPROVER_assert(__CPROVER_r_ok(pvItemToQueue, ( size_t ) pxQueue->uxItemSize), "pvItemToQueue region must be readable");
if(xPosition == queueSEND_TO_BACK){
__CPROVER_assert(__CPROVER_w_ok(( void * ) pxQueue->pcWriteTo, ( size_t ) pxQueue->uxItemSize), "pxQueue->pcWriteTo region must be writable");
}else{
__CPROVER_assert(__CPROVER_w_ok(( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize), "pxQueue->u.xQueue.pcReadFrom region must be writable");
}
return pdFALSE;
}else
{
return nondet_BaseType_t();
}
/*Initialise the tasksStubs */
vInitTaskCheckForTimeOut( 0, QUEUE_SEND_BOUND - 1 );
xState = nondet_basetype();
QueueHandle_t xQueue =
xUnconstrainedQueueBoundedItemSize( 2 );
TickType_t xTicksToWait;
if( xState == taskSCHEDULER_SUSPENDED )
{
xTicksToWait = 0;
}
if( xQueue )
{
void * pvItemToQueue = pvPortMalloc( xQueue->uxItemSize );
BaseType_t xCopyPosition;
if( xCopyPosition == queueOVERWRITE )
{
xQueue->uxLength = 1;
}
if( xQueue->uxItemSize == 0 )
{
/* uxQueue->xQueueType is a pointer to the head of the queue storage area.
* If an item has a sice, this pointer must not be modified after init.
* Otherwise some of the write statements will fail. */
xQueue->uxQueueType = nondet_int8_t();
pvItemToQueue = 0;
}
/* This code checks explicitly for violations of the pxQueue->uxMessagesWaiting < pxQueue->uxLength
* invariant. */
xQueue->uxMessagesWaiting = nondet_UBaseType_t();
/* These values are decremented during a while loop interacting with task.c.
* This interaction is currently abstracted away.*/
xQueue->cTxLock = LOCK_BOUND - 1;
xQueue->cRxLock = LOCK_BOUND - 1;
if( !pvItemToQueue )
{
xQueue->uxItemSize = 0;
}
xQueueGenericSend( xQueue, pvItemToQueue, xTicksToWait, xCopyPosition );
}
}
#else
BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue )
{
Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer;
configASSERT( pxQueueSetContainer );
}
void prvUnlockQueue( Queue_t * const pxQueue ) {
configASSERT( pxQueue );
if( pxQueue->pxQueueSetContainer != NULL )
{
prvNotifyQueueSetContainer(pxQueue);
}
listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) );
pxQueue->cTxLock = queueUNLOCKED;
listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) );
pxQueue->cRxLock = queueUNLOCKED;
}
#endif
void harness(){
//Initialise the tasksStubs
vInitTaskCheckForTimeOut(0, QUEUE_SEND_BOUND - 1);
xState = nondet_basetype();
QueueHandle_t xQueue =
xUnconstrainedQueueBoundedItemSize(2);
TickType_t xTicksToWait;
if(xState == taskSCHEDULER_SUSPENDED){
xTicksToWait = 0;
}
if(xQueue){
void *pvItemToQueue = pvPortMalloc(xQueue->uxItemSize);
BaseType_t xCopyPosition;
if(xCopyPosition == queueOVERWRITE){
xQueue->uxLength = 1;
}
if(xQueue->uxItemSize == 0)
{
/* uxQueue->xQueueType is a pointer to the head of the queue storage area.
If an item has a sice, this pointer must not be modified after init.
Otherwise some of the write statements will fail. */
xQueue->uxQueueType = nondet_int8_t();
pvItemToQueue = 0;
}
/* This code checks explicitly for violations of the pxQueue->uxMessagesWaiting < pxQueue->uxLength
invariant. */
xQueue->uxMessagesWaiting = nondet_UBaseType_t();
/* These values are decremented during a while loop interacting with task.c.
This interaction is currently abstracted away.*/
xQueue->cTxLock = LOCK_BOUND - 1;
xQueue->cRxLock = LOCK_BOUND - 1;
if(!pvItemToQueue){
xQueue->uxItemSize = 0;
}
xQueueGenericSend( xQueue, pvItemToQueue, xTicksToWait, xCopyPosition );
}
}

View file

@ -33,53 +33,70 @@
#include "cbmc.h"
#ifndef ITEM_BOUND
#define ITEM_BOUND 10
#define ITEM_BOUND 10
#endif
#if( configUSE_QUEUE_SETS == 0 )
BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )
{
if(pxQueue->uxItemSize > ( UBaseType_t ) 0)
{
__CPROVER_assert(__CPROVER_r_ok(pvItemToQueue, ( size_t ) pxQueue->uxItemSize), "pvItemToQueue region must be readable");
if(xPosition == queueSEND_TO_BACK){
__CPROVER_assert(__CPROVER_w_ok(( void * ) pxQueue->pcWriteTo, ( size_t ) pxQueue->uxItemSize), "pxQueue->pcWriteTo region must be writable");
}else{
__CPROVER_assert(__CPROVER_w_ok(( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize), "pxQueue->u.xQueue.pcReadFrom region must be writable");
}
return pdFALSE;
}else
{
return nondet_BaseType_t();
}
}
#endif
#if ( configUSE_QUEUE_SETS == 0 )
BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue,
const void * pvItemToQueue,
const BaseType_t xPosition )
{
if( pxQueue->uxItemSize > ( UBaseType_t ) 0 )
{
__CPROVER_assert( __CPROVER_r_ok( pvItemToQueue, ( size_t ) pxQueue->uxItemSize ), "pvItemToQueue region must be readable" );
void harness(){
QueueHandle_t xQueue = xUnconstrainedQueueBoundedItemSize(ITEM_BOUND);
if( xPosition == queueSEND_TO_BACK )
{
__CPROVER_assert( __CPROVER_w_ok( ( void * ) pxQueue->pcWriteTo, ( size_t ) pxQueue->uxItemSize ), "pxQueue->pcWriteTo region must be writable" );
}
else
{
__CPROVER_assert( __CPROVER_w_ok( ( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize ), "pxQueue->u.xQueue.pcReadFrom region must be writable" );
}
return pdFALSE;
}
else
{
return nondet_BaseType_t();
}
}
#endif /* if ( configUSE_QUEUE_SETS == 0 ) */
void harness()
{
QueueHandle_t xQueue = xUnconstrainedQueueBoundedItemSize( ITEM_BOUND );
if( xQueue ){
void *pvItemToQueue = pvPortMalloc(xQueue->uxItemSize);
BaseType_t *xHigherPriorityTaskWoken = pvPortMalloc(sizeof(BaseType_t));
BaseType_t xCopyPosition;
if(xQueue->uxItemSize == 0)
{
/* uxQueue->xQueueType is a pointer to the head of the queue storage area.
If an item has a size, this pointer must not be modified after init.
Otherwise some of the write statements will fail. */
xQueue->uxQueueType = nondet_int8_t();
pvItemToQueue = 0;
}
/* This code checks explicitly for violations of the pxQueue->uxMessagesWaiting < pxQueue->uxLength
invariant. */
xQueue->uxMessagesWaiting = nondet_UBaseType_t();
if(!pvItemToQueue){
xQueue->uxItemSize = 0;
}
if(xCopyPosition == 2 ){
__CPROVER_assume(xQueue->uxLength == 1);
}
xQueueGenericSendFromISR( xQueue, pvItemToQueue, xHigherPriorityTaskWoken, xCopyPosition );
}
if( xQueue )
{
void * pvItemToQueue = pvPortMalloc( xQueue->uxItemSize );
BaseType_t * xHigherPriorityTaskWoken = pvPortMalloc( sizeof( BaseType_t ) );
BaseType_t xCopyPosition;
if( xQueue->uxItemSize == 0 )
{
/* uxQueue->xQueueType is a pointer to the head of the queue storage area.
* If an item has a size, this pointer must not be modified after init.
* Otherwise some of the write statements will fail. */
xQueue->uxQueueType = nondet_int8_t();
pvItemToQueue = 0;
}
/* This code checks explicitly for violations of the pxQueue->uxMessagesWaiting < pxQueue->uxLength
* invariant. */
xQueue->uxMessagesWaiting = nondet_UBaseType_t();
if( !pvItemToQueue )
{
xQueue->uxItemSize = 0;
}
if( xCopyPosition == 2 )
{
__CPROVER_assume( xQueue->uxLength == 1 );
}
xQueueGenericSendFromISR( xQueue, pvItemToQueue, xHigherPriorityTaskWoken, xCopyPosition );
}
}

View file

@ -32,10 +32,13 @@
#include "cbmc.h"
void harness() {
QueueHandle_t xSemaphore = xUnconstrainedQueue();
if (xSemaphore) {
xSemaphore->uxQueueType = nondet_uint8_t();
xQueueGetMutexHolder(xSemaphore);
}
void harness()
{
QueueHandle_t xSemaphore = xUnconstrainedQueue();
if( xSemaphore )
{
xSemaphore->uxQueueType = nondet_uint8_t();
xQueueGetMutexHolder( xSemaphore );
}
}

View file

@ -32,10 +32,12 @@
#include "cbmc.h"
void harness(){
QueueHandle_t xSemaphore = pvPortMalloc(sizeof(Queue_t));
if (xSemaphore) {
xQueueGetMutexHolderFromISR( xSemaphore );
}
}
void harness()
{
QueueHandle_t xSemaphore = pvPortMalloc( sizeof( Queue_t ) );
if( xSemaphore )
{
xQueueGetMutexHolderFromISR( xSemaphore );
}
}

View file

@ -32,12 +32,14 @@
#include "cbmc.h"
void harness(){
QueueHandle_t xQueue = xUnconstrainedMutex();
BaseType_t *xHigherPriorityTaskWoken = pvPortMalloc(sizeof(BaseType_t));
if(xQueue){
xQueue->uxMessagesWaiting = nondet_UBaseType_t();
xQueueGiveFromISR( xQueue, xHigherPriorityTaskWoken );
}
void harness()
{
QueueHandle_t xQueue = xUnconstrainedMutex();
BaseType_t * xHigherPriorityTaskWoken = pvPortMalloc( sizeof( BaseType_t ) );
if( xQueue )
{
xQueue->uxMessagesWaiting = nondet_UBaseType_t();
xQueueGiveFromISR( xQueue, xHigherPriorityTaskWoken );
}
}

View file

@ -32,17 +32,21 @@
#include "cbmc.h"
void harness() {
uint8_t ucQueueType;
QueueHandle_t xMutex =
xQueueCreateMutex( ucQueueType);
if (xMutex) {
xMutex->uxQueueType = ucQueueType;
UBaseType_t uxCounter;
/* This assumption is explained in the queue.c file inside the method body
xQueueGiveMutexRecursive and guards against an underflow error. */
__CPROVER_assume(uxCounter > 0);
xMutex->u.xSemaphore.uxRecursiveCallCount = uxCounter;
xQueueGiveMutexRecursive(xMutex);
}
void harness()
{
uint8_t ucQueueType;
QueueHandle_t xMutex =
xQueueCreateMutex( ucQueueType );
if( xMutex )
{
xMutex->uxQueueType = ucQueueType;
UBaseType_t uxCounter;
/* This assumption is explained in the queue.c file inside the method body
* xQueueGiveMutexRecursive and guards against an underflow error. */
__CPROVER_assume( uxCounter > 0 );
xMutex->u.xSemaphore.uxRecursiveCallCount = uxCounter;
xQueueGiveMutexRecursive( xMutex );
}
}

View file

@ -32,10 +32,12 @@
#include "cbmc.h"
void harness(){
QueueHandle_t xQueue = pvPortMalloc(sizeof(Queue_t));
void harness()
{
QueueHandle_t xQueue = pvPortMalloc( sizeof( Queue_t ) );
if(xQueue){
uxQueueMessagesWaiting( xQueue );
}
if( xQueue )
{
uxQueueMessagesWaiting( xQueue );
}
}

View file

@ -34,46 +34,52 @@
#include "cbmc.h"
#ifndef LOCK_BOUND
#define LOCK_BOUND 4
#define LOCK_BOUND 4
#endif
#ifndef QUEUE_PEEK_BOUND
#define QUEUE_PEEK_BOUND 4
#define QUEUE_PEEK_BOUND 4
#endif
QueueHandle_t xQueue;
/* This method is called to initialize pxTimeOut.
Setting up the data structure is not interesting for the proof,
but the harness uses it to model a release
on the queue after first check. */
void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ){
xQueue-> uxMessagesWaiting = nondet_BaseType_t();
* Setting up the data structure is not interesting for the proof,
* but the harness uses it to model a release
* on the queue after first check. */
void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut )
{
xQueue->uxMessagesWaiting = nondet_BaseType_t();
}
void harness(){
xQueue = xUnconstrainedQueueBoundedItemSize(10);
void harness()
{
xQueue = xUnconstrainedQueueBoundedItemSize( 10 );
//Initialise the tasksStubs
vInitTaskCheckForTimeOut(0, QUEUE_PEEK_BOUND - 1);
/*Initialise the tasksStubs */
vInitTaskCheckForTimeOut( 0, QUEUE_PEEK_BOUND - 1 );
TickType_t xTicksToWait;
if(xState == taskSCHEDULER_SUSPENDED){
xTicksToWait = 0;
}
TickType_t xTicksToWait;
if(xQueue){
__CPROVER_assume(xQueue->cTxLock < LOCK_BOUND - 1);
__CPROVER_assume(xQueue->cRxLock < LOCK_BOUND - 1);
if( xState == taskSCHEDULER_SUSPENDED )
{
xTicksToWait = 0;
}
void *pvItemToQueue = pvPortMalloc(xQueue->uxItemSize);
if( xQueue )
{
__CPROVER_assume( xQueue->cTxLock < LOCK_BOUND - 1 );
__CPROVER_assume( xQueue->cRxLock < LOCK_BOUND - 1 );
/* In case malloc fails as this is otherwise an invariant violation. */
if(!pvItemToQueue){
xQueue->uxItemSize = 0;
}
void * pvItemToQueue = pvPortMalloc( xQueue->uxItemSize );
xQueuePeek( xQueue, pvItemToQueue, xTicksToWait );
}
/* In case malloc fails as this is otherwise an invariant violation. */
if( !pvItemToQueue )
{
xQueue->uxItemSize = 0;
}
xQueuePeek( xQueue, pvItemToQueue, xTicksToWait );
}
}

View file

@ -33,56 +33,63 @@
#include "cbmc.h"
/* prvUnlockQueue is going to decrement this value to 0 in the loop.
We need a bound for the loop. Using 4 has a reasonable performance resulting
in 3 unwinding iterations of the loop. The loop is mostly modifying a
data structure in task.c that is not in the scope of the proof. */
* We need a bound for the loop. Using 4 has a reasonable performance resulting
* in 3 unwinding iterations of the loop. The loop is mostly modifying a
* data structure in task.c that is not in the scope of the proof. */
#ifndef LOCK_BOUND
#define LOCK_BOUND 4
#define LOCK_BOUND 4
#endif
/* This code checks for time outs. This value is used to bound the time out
wait period. The stub function xTaskCheckForTimeOut used to model
this wait time will be bounded to this define. */
* wait period. The stub function xTaskCheckForTimeOut used to model
* this wait time will be bounded to this define. */
#ifndef QUEUE_RECEIVE_BOUND
#define QUEUE_RECEIVE_BOUND 4
#define QUEUE_RECEIVE_BOUND 4
#endif
/* If the item size is not bounded, the proof does not finish in a reasonable
time due to the involved memcpy commands. */
* time due to the involved memcpy commands. */
#ifndef MAX_ITEM_SIZE
#define MAX_ITEM_SIZE 20
#define MAX_ITEM_SIZE 20
#endif
QueueHandle_t xQueue;
/* This method is used to model side effects of concurrency.
The initialization of pxTimeOut is not relevant for this harness. */
void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ){
__CPROVER_assert(__CPROVER_w_ok(&(pxTimeOut->xOverflowCount), sizeof(BaseType_t)), "pxTimeOut should be a valid pointer and xOverflowCount writable");
__CPROVER_assert(__CPROVER_w_ok(&(pxTimeOut->xTimeOnEntering), sizeof(TickType_t)), "pxTimeOut should be a valid pointer and xTimeOnEntering writable");
xQueue->uxMessagesWaiting = nondet_BaseType_t();
* The initialization of pxTimeOut is not relevant for this harness. */
void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut )
{
__CPROVER_assert( __CPROVER_w_ok( &( pxTimeOut->xOverflowCount ), sizeof( BaseType_t ) ), "pxTimeOut should be a valid pointer and xOverflowCount writable" );
__CPROVER_assert( __CPROVER_w_ok( &( pxTimeOut->xTimeOnEntering ), sizeof( TickType_t ) ), "pxTimeOut should be a valid pointer and xTimeOnEntering writable" );
xQueue->uxMessagesWaiting = nondet_BaseType_t();
}
void harness(){
vInitTaskCheckForTimeOut(0, QUEUE_RECEIVE_BOUND - 1);
void harness()
{
vInitTaskCheckForTimeOut( 0, QUEUE_RECEIVE_BOUND - 1 );
xQueue = xUnconstrainedQueueBoundedItemSize(MAX_ITEM_SIZE);
xQueue = xUnconstrainedQueueBoundedItemSize( MAX_ITEM_SIZE );
TickType_t xTicksToWait;
if(xState == taskSCHEDULER_SUSPENDED){
xTicksToWait = 0;
}
TickType_t xTicksToWait;
if(xQueue){
xQueue->cTxLock = LOCK_BOUND - 1;
xQueue->cRxLock = LOCK_BOUND - 1;
void *pvBuffer = pvPortMalloc(xQueue->uxItemSize);
if(!pvBuffer){
xQueue->uxItemSize = 0;
}
xQueueReceive( xQueue, pvBuffer, xTicksToWait );
if( xState == taskSCHEDULER_SUSPENDED )
{
xTicksToWait = 0;
}
if( xQueue )
{
xQueue->cTxLock = LOCK_BOUND - 1;
xQueue->cRxLock = LOCK_BOUND - 1;
void * pvBuffer = pvPortMalloc( xQueue->uxItemSize );
if( !pvBuffer )
{
xQueue->uxItemSize = 0;
}
xQueueReceive( xQueue, pvBuffer, xTicksToWait );
}
}

View file

@ -32,22 +32,27 @@
#include "cbmc.h"
/* If the item size is not bounded, the proof does not finish in a reasonable
time due to the involved memcpy commands. */
* time due to the involved memcpy commands. */
#ifndef MAX_ITEM_SIZE
#define MAX_ITEM_SIZE 10
#define MAX_ITEM_SIZE 10
#endif
void harness(){
QueueHandle_t xQueue =
xUnconstrainedQueueBoundedItemSize(MAX_ITEM_SIZE);
void harness()
{
QueueHandle_t xQueue =
xUnconstrainedQueueBoundedItemSize( MAX_ITEM_SIZE );
BaseType_t *xHigherPriorityTaskWoken = pvPortMalloc(sizeof(BaseType_t));
BaseType_t * xHigherPriorityTaskWoken = pvPortMalloc( sizeof( BaseType_t ) );
if(xQueue){
void *pvBuffer = pvPortMalloc(xQueue->uxItemSize);
if(!pvBuffer){
xQueue->uxItemSize = 0;
}
xQueueReceiveFromISR( xQueue, pvBuffer, xHigherPriorityTaskWoken );
}
if( xQueue )
{
void * pvBuffer = pvPortMalloc( xQueue->uxItemSize );
if( !pvBuffer )
{
xQueue->uxItemSize = 0;
}
xQueueReceiveFromISR( xQueue, pvBuffer, xHigherPriorityTaskWoken );
}
}

View file

@ -36,53 +36,57 @@ BaseType_t state;
QueueHandle_t xQueue;
BaseType_t counter;
BaseType_t xTaskGetSchedulerState(void)
BaseType_t xTaskGetSchedulerState( void )
{
return state;
return state;
}
void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut )
{
/* QueueSemaphoreTake might be blocked to wait for
another process to release a token to the semaphore.
This is currently not in the CBMC model. Anyhow,
vTaskInternalSetTimeOutState is set a timeout for
QueueSemaphoreTake operation. We use this to model a successful
release during wait time. */
UBaseType_t bound;
__CPROVER_assume((bound >= 0 && xQueue->uxLength >= bound));
xQueue->uxMessagesWaiting = bound;
/* QueueSemaphoreTake might be blocked to wait for
* another process to release a token to the semaphore.
* This is currently not in the CBMC model. Anyhow,
* vTaskInternalSetTimeOutState is set a timeout for
* QueueSemaphoreTake operation. We use this to model a successful
* release during wait time. */
UBaseType_t bound;
__CPROVER_assume( ( bound >= 0 && xQueue->uxLength >= bound ) );
xQueue->uxMessagesWaiting = bound;
}
void harness()
{
/* Init task stub to make sure that the third loop iteration
simulates a time out */
vInitTaskCheckForTimeOut(0, 3);
/* Init task stub to make sure that the third loop iteration
* simulates a time out */
vInitTaskCheckForTimeOut( 0, 3 );
xQueue = xUnconstrainedMutex();
TickType_t xTicksToWait;
xQueue = xUnconstrainedMutex();
TickType_t xTicksToWait;
if(state == taskSCHEDULER_SUSPENDED){
xTicksToWait = 0;
}
if (xQueue) {
/* Bounding the loop in prvUnlockQueue to
PRV_UNLOCK_QUEUE_BOUND. As the loop is not relevant
in this proof the value might be set to any
positive 8-bit integer value. We subtract one,
because the bound must be one greater than the
amount of loop iterations. */
__CPROVER_assert(PRV_UNLOCK_QUEUE_BOUND > 0, "Make sure, a valid macro value is chosen.");
xQueue->cTxLock = PRV_UNLOCK_QUEUE_BOUND - 1;
xQueue->cRxLock = PRV_UNLOCK_QUEUE_BOUND - 1;
((&(xQueue->xTasksWaitingToReceive))->xListEnd).pxNext->xItemValue = nondet_ticktype();
if( state == taskSCHEDULER_SUSPENDED )
{
xTicksToWait = 0;
}
/* This assumptions is required to prevent an overflow in l. 2057 of queue.c
in the prvGetDisinheritPriorityAfterTimeout() function. */
__CPROVER_assume( (
( UBaseType_t ) listGET_ITEM_VALUE_OF_HEAD_ENTRY( &( xQueue->xTasksWaitingToReceive ) )
<= ( ( UBaseType_t ) configMAX_PRIORITIES)));
xQueueSemaphoreTake(xQueue, xTicksToWait);
}
if( xQueue )
{
/* Bounding the loop in prvUnlockQueue to
* PRV_UNLOCK_QUEUE_BOUND. As the loop is not relevant
* in this proof the value might be set to any
* positive 8-bit integer value. We subtract one,
* because the bound must be one greater than the
* amount of loop iterations. */
__CPROVER_assert( PRV_UNLOCK_QUEUE_BOUND > 0, "Make sure, a valid macro value is chosen." );
xQueue->cTxLock = PRV_UNLOCK_QUEUE_BOUND - 1;
xQueue->cRxLock = PRV_UNLOCK_QUEUE_BOUND - 1;
( ( &( xQueue->xTasksWaitingToReceive ) )->xListEnd ).pxNext->xItemValue = nondet_ticktype();
/* This assumptions is required to prevent an overflow in l. 2057 of queue.c
* in the prvGetDisinheritPriorityAfterTimeout() function. */
__CPROVER_assume( (
( UBaseType_t ) listGET_ITEM_VALUE_OF_HEAD_ENTRY( &( xQueue->xTasksWaitingToReceive ) )
<= ( ( UBaseType_t ) configMAX_PRIORITIES ) ) );
xQueueSemaphoreTake( xQueue, xTicksToWait );
}
}

View file

@ -31,11 +31,12 @@
#include "queue_init.h"
#include "cbmc.h"
void harness(){
QueueHandle_t xQueue = xUnconstrainedQueue();
void harness()
{
QueueHandle_t xQueue = xUnconstrainedQueue();
// QueueSpacesAvailable asserts nonnull pointer
__CPROVER_assume(xQueue);
/* QueueSpacesAvailable asserts nonnull pointer */
__CPROVER_assume( xQueue );
uxQueueSpacesAvailable( xQueue );
uxQueueSpacesAvailable( xQueue );
}

View file

@ -36,36 +36,42 @@ QueueHandle_t xMutex;
void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut )
{
/* QueueSemaphoreTake might be blocked to wait for
another process to release a token to the semaphore.
This is currently not in the CBMC model. Anyhow,
vTaskInternalSetTimeOutState is set a timeout for
QueueSemaphoreTake operation. We use this to model a successful
release during wait time. */
UBaseType_t bound;
__CPROVER_assume((bound >= 0 && xMutex->uxLength >= bound));
xMutex->uxMessagesWaiting = bound;
/* QueueSemaphoreTake might be blocked to wait for
* another process to release a token to the semaphore.
* This is currently not in the CBMC model. Anyhow,
* vTaskInternalSetTimeOutState is set a timeout for
* QueueSemaphoreTake operation. We use this to model a successful
* release during wait time. */
UBaseType_t bound;
__CPROVER_assume( ( bound >= 0 && xMutex->uxLength >= bound ) );
xMutex->uxMessagesWaiting = bound;
}
BaseType_t xTaskGetSchedulerState( void ) {
BaseType_t ret;
__CPROVER_assume(ret != taskSCHEDULER_SUSPENDED);
return ret;
BaseType_t xTaskGetSchedulerState( void )
{
BaseType_t ret;
__CPROVER_assume( ret != taskSCHEDULER_SUSPENDED );
return ret;
}
void harness() {
uint8_t ucQueueType;
xMutex = xQueueCreateMutex(ucQueueType);
TickType_t xTicksToWait;
void harness()
{
uint8_t ucQueueType;
/* Init task stub to make sure that the QueueSemaphoreTake_BOUND - 1
loop iteration simulates a time out */
vInitTaskCheckForTimeOut(0, QueueSemaphoreTake_BOUND - 1);
xMutex = xQueueCreateMutex( ucQueueType );
TickType_t xTicksToWait;
if(xMutex){
xMutex->cTxLock = PRV_UNLOCK_UNWINDING_BOUND - 1;
xMutex->cRxLock = PRV_UNLOCK_UNWINDING_BOUND - 1;
xMutex->uxMessagesWaiting = nondet_UBaseType_t();
xQueueTakeMutexRecursive(xMutex, xTicksToWait);
}
/* Init task stub to make sure that the QueueSemaphoreTake_BOUND - 1
* loop iteration simulates a time out */
vInitTaskCheckForTimeOut( 0, QueueSemaphoreTake_BOUND - 1 );
if( xMutex )
{
xMutex->cTxLock = PRV_UNLOCK_UNWINDING_BOUND - 1;
xMutex->cRxLock = PRV_UNLOCK_UNWINDING_BOUND - 1;
xMutex->uxMessagesWaiting = nondet_UBaseType_t();
xQueueTakeMutexRecursive( xMutex, xTicksToWait );
}
}

View file

@ -31,24 +31,30 @@
#include "queue_init.h"
#include "cbmc.h"
BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition );
BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue,
const void * pvItemToQueue,
const BaseType_t xPosition );
void harness(){
QueueHandle_t xQueue = xUnconstrainedQueueBoundedItemSize(10);
void harness()
{
QueueHandle_t xQueue = xUnconstrainedQueueBoundedItemSize( 10 );
if( xQueue ){
void *pvItemToQueue = pvPortMalloc(xQueue->uxItemSize);
if( !pvItemToQueue )
{
xQueue->uxItemSize = 0;
}
if(xQueue->uxItemSize == 0)
{
xQueue->uxQueueType = nondet_int8_t();
}
BaseType_t xPosition;
prvCopyDataToQueue( xQueue, pvItemToQueue, xPosition );
}
if( xQueue )
{
void * pvItemToQueue = pvPortMalloc( xQueue->uxItemSize );
if( !pvItemToQueue )
{
xQueue->uxItemSize = 0;
}
if( xQueue->uxItemSize == 0 )
{
xQueue->uxQueueType = nondet_int8_t();
}
BaseType_t xPosition;
prvCopyDataToQueue( xQueue, pvItemToQueue, xPosition );
}
}

View file

@ -32,63 +32,81 @@
#include "cbmc.h"
#ifndef LOCK_BOUND
#define LOCK_BOUND 4
#define LOCK_BOUND 4
#endif
BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition );
BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue,
const BaseType_t xCopyPosition );
BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )
BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue,
const void * pvItemToQueue,
const BaseType_t xPosition )
{
if(pxQueue->uxItemSize > ( UBaseType_t ) 0)
{
__CPROVER_assert(__CPROVER_r_ok(pvItemToQueue, ( size_t ) pxQueue->uxItemSize), "pvItemToQueue region must be readable");
if(xPosition == queueSEND_TO_BACK){
__CPROVER_assert(__CPROVER_w_ok(( void * ) pxQueue->pcWriteTo, ( size_t ) pxQueue->uxItemSize), "pxQueue->pcWriteTo region must be writable");
}else{
__CPROVER_assert(__CPROVER_w_ok(( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize), "pxQueue->u.xQueue.pcReadFrom region must be writable");
}
return pdFALSE;
}else
{
return nondet_BaseType_t();
}
if( pxQueue->uxItemSize > ( UBaseType_t ) 0 )
{
__CPROVER_assert( __CPROVER_r_ok( pvItemToQueue, ( size_t ) pxQueue->uxItemSize ), "pvItemToQueue region must be readable" );
if( xPosition == queueSEND_TO_BACK )
{
__CPROVER_assert( __CPROVER_w_ok( ( void * ) pxQueue->pcWriteTo, ( size_t ) pxQueue->uxItemSize ), "pxQueue->pcWriteTo region must be writable" );
}
else
{
__CPROVER_assert( __CPROVER_w_ok( ( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize ), "pxQueue->u.xQueue.pcReadFrom region must be writable" );
}
return pdFALSE;
}
else
{
return nondet_BaseType_t();
}
}
QueueSetHandle_t xUnconstrainedQueueSet()
{
UBaseType_t uxEventQueueLength = 2;
QueueSetHandle_t xSet = xQueueCreateSet(uxEventQueueLength);
if( xSet )
{
xSet->cTxLock = nondet_int8_t();
__CPROVER_assume(xSet->cTxLock != 127);
xSet->cRxLock = nondet_int8_t();
xSet->uxMessagesWaiting = nondet_UBaseType_t();
xSet->xTasksWaitingToReceive.uxNumberOfItems = nondet_UBaseType_t();
xSet->xTasksWaitingToSend.uxNumberOfItems = nondet_UBaseType_t();
}
return xSet;
UBaseType_t uxEventQueueLength = 2;
QueueSetHandle_t xSet = xQueueCreateSet( uxEventQueueLength );
if( xSet )
{
xSet->cTxLock = nondet_int8_t();
__CPROVER_assume( xSet->cTxLock != 127 );
xSet->cRxLock = nondet_int8_t();
xSet->uxMessagesWaiting = nondet_UBaseType_t();
xSet->xTasksWaitingToReceive.uxNumberOfItems = nondet_UBaseType_t();
xSet->xTasksWaitingToSend.uxNumberOfItems = nondet_UBaseType_t();
}
return xSet;
}
void harness(){
UBaseType_t uxQueueLength;
UBaseType_t uxItemSize;
uint8_t ucQueueType;
__CPROVER_assume(uxQueueLength > 0);
__CPROVER_assume(uxItemSize < 10);
/* The implicit assumption for the QueueGenericCreate method is,
that there are no overflows in the computation and the inputs are safe.
There is no check for this in the code base */
UBaseType_t upper_bound = portMAX_DELAY - sizeof(Queue_t);
__CPROVER_assume(uxItemSize < (upper_bound)/uxQueueLength);
QueueHandle_t xQueue =
xQueueGenericCreate(uxQueueLength, uxItemSize, ucQueueType);
if( xQueue ){
xQueueAddToSet(xQueue, xUnconstrainedQueueSet());
if(xQueue->pxQueueSetContainer) {
__CPROVER_assume(xQueue->pxQueueSetContainer->uxMessagesWaiting < xQueue->pxQueueSetContainer->uxLength);
BaseType_t xCopyPosition = nondet_BaseType_t();
prvNotifyQueueSetContainer(xQueue, xCopyPosition );
}
}
void harness()
{
UBaseType_t uxQueueLength;
UBaseType_t uxItemSize;
uint8_t ucQueueType;
__CPROVER_assume( uxQueueLength > 0 );
__CPROVER_assume( uxItemSize < 10 );
/* The implicit assumption for the QueueGenericCreate method is,
* that there are no overflows in the computation and the inputs are safe.
* There is no check for this in the code base */
UBaseType_t upper_bound = portMAX_DELAY - sizeof( Queue_t );
__CPROVER_assume( uxItemSize < ( upper_bound ) / uxQueueLength );
QueueHandle_t xQueue =
xQueueGenericCreate( uxQueueLength, uxItemSize, ucQueueType );
if( xQueue )
{
xQueueAddToSet( xQueue, xUnconstrainedQueueSet() );
if( xQueue->pxQueueSetContainer )
{
__CPROVER_assume( xQueue->pxQueueSetContainer->uxMessagesWaiting < xQueue->pxQueueSetContainer->uxLength );
BaseType_t xCopyPosition = nondet_BaseType_t();
prvNotifyQueueSetContainer( xQueue, xCopyPosition );
}
}
}

View file

@ -33,72 +33,89 @@
#include "cbmc.h"
#ifndef LOCK_BOUND
#define LOCK_BOUND 4
#define LOCK_BOUND 4
#endif
void prvUnlockQueue( Queue_t * const pxQueue );
BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )
BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue,
const void * pvItemToQueue,
const BaseType_t xPosition )
{
if(pxQueue->uxItemSize > ( UBaseType_t ) 0)
{
__CPROVER_assert(__CPROVER_r_ok(pvItemToQueue, ( size_t ) pxQueue->uxItemSize), "pvItemToQueue region must be readable");
if(xPosition == queueSEND_TO_BACK){
__CPROVER_assert(__CPROVER_w_ok(( void * ) pxQueue->pcWriteTo, ( size_t ) pxQueue->uxItemSize), "pxQueue->pcWriteTo region must be writable");
}else{
__CPROVER_assert(__CPROVER_w_ok(( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize), "pxQueue->u.xQueue.pcReadFrom region must be writable");
}
return pdFALSE;
}else
{
return nondet_BaseType_t();
}
if( pxQueue->uxItemSize > ( UBaseType_t ) 0 )
{
__CPROVER_assert( __CPROVER_r_ok( pvItemToQueue, ( size_t ) pxQueue->uxItemSize ), "pvItemToQueue region must be readable" );
if( xPosition == queueSEND_TO_BACK )
{
__CPROVER_assert( __CPROVER_w_ok( ( void * ) pxQueue->pcWriteTo, ( size_t ) pxQueue->uxItemSize ), "pxQueue->pcWriteTo region must be writable" );
}
else
{
__CPROVER_assert( __CPROVER_w_ok( ( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize ), "pxQueue->u.xQueue.pcReadFrom region must be writable" );
}
return pdFALSE;
}
else
{
return nondet_BaseType_t();
}
}
QueueSetHandle_t xUnconstrainedQueueSet()
{
UBaseType_t uxEventQueueLength = 2;
QueueSetHandle_t xSet = xQueueCreateSet(uxEventQueueLength);
if( xSet )
{
xSet->cTxLock = nondet_int8_t();
__CPROVER_assume( xSet->cTxLock != 127 );
xSet->cRxLock = nondet_int8_t();
xSet->uxMessagesWaiting = nondet_UBaseType_t();
xSet->xTasksWaitingToReceive.uxNumberOfItems = nondet_UBaseType_t();
/* This is an invariant checked with a couple of asserts in the code base.
If it is false from the beginning, there is no chance for the proof to succeed*/
__CPROVER_assume(xSet->uxMessagesWaiting < xSet->uxLength);
xSet->xTasksWaitingToSend.uxNumberOfItems = nondet_UBaseType_t();
}
return xSet;
UBaseType_t uxEventQueueLength = 2;
QueueSetHandle_t xSet = xQueueCreateSet( uxEventQueueLength );
if( xSet )
{
xSet->cTxLock = nondet_int8_t();
__CPROVER_assume( xSet->cTxLock != 127 );
xSet->cRxLock = nondet_int8_t();
xSet->uxMessagesWaiting = nondet_UBaseType_t();
xSet->xTasksWaitingToReceive.uxNumberOfItems = nondet_UBaseType_t();
/* This is an invariant checked with a couple of asserts in the code base.
* If it is false from the beginning, there is no chance for the proof to succeed*/
__CPROVER_assume( xSet->uxMessagesWaiting < xSet->uxLength );
xSet->xTasksWaitingToSend.uxNumberOfItems = nondet_UBaseType_t();
}
return xSet;
}
void harness(){
UBaseType_t uxQueueLength;
UBaseType_t uxItemSize;
uint8_t ucQueueType;
__CPROVER_assume(uxQueueLength > 0);
__CPROVER_assume(uxItemSize < 10);
/* The implicit assumption for the QueueGenericCreate method is,
that there are no overflows in the computation and the inputs are safe.
There is no check for this in the code base */
UBaseType_t upper_bound = portMAX_DELAY - sizeof(Queue_t);
__CPROVER_assume(uxItemSize < (upper_bound)/uxQueueLength);
QueueHandle_t xQueue =
xQueueGenericCreate(uxQueueLength, uxItemSize, ucQueueType);
if(xQueue){
xQueue->cTxLock = LOCK_BOUND - 1;
xQueue->cRxLock = LOCK_BOUND - 1;
xQueue->uxMessagesWaiting = nondet_UBaseType_t();
/* This is an invariant checked with a couple of asserts in the code base.
If it is false from the beginning, there is no chance for the proof to succeed*/
__CPROVER_assume(xQueue->uxMessagesWaiting < xQueue->uxLength);
xQueue->xTasksWaitingToReceive.uxNumberOfItems = nondet_UBaseType_t();
xQueue->xTasksWaitingToSend.uxNumberOfItems = nondet_UBaseType_t();
#if( configUSE_QUEUE_SETS == 1)
xQueueAddToSet(xQueue, xUnconstrainedQueueSet());
#endif
prvUnlockQueue(xQueue);
}
void harness()
{
UBaseType_t uxQueueLength;
UBaseType_t uxItemSize;
uint8_t ucQueueType;
__CPROVER_assume( uxQueueLength > 0 );
__CPROVER_assume( uxItemSize < 10 );
/* The implicit assumption for the QueueGenericCreate method is,
* that there are no overflows in the computation and the inputs are safe.
* There is no check for this in the code base */
UBaseType_t upper_bound = portMAX_DELAY - sizeof( Queue_t );
__CPROVER_assume( uxItemSize < ( upper_bound ) / uxQueueLength );
QueueHandle_t xQueue =
xQueueGenericCreate( uxQueueLength, uxItemSize, ucQueueType );
if( xQueue )
{
xQueue->cTxLock = LOCK_BOUND - 1;
xQueue->cRxLock = LOCK_BOUND - 1;
xQueue->uxMessagesWaiting = nondet_UBaseType_t();
/* This is an invariant checked with a couple of asserts in the code base.
* If it is false from the beginning, there is no chance for the proof to succeed*/
__CPROVER_assume( xQueue->uxMessagesWaiting < xQueue->uxLength );
xQueue->xTasksWaitingToReceive.uxNumberOfItems = nondet_UBaseType_t();
xQueue->xTasksWaitingToSend.uxNumberOfItems = nondet_UBaseType_t();
#if ( configUSE_QUEUE_SETS == 1 )
xQueueAddToSet( xQueue, xUnconstrainedQueueSet() );
#endif
prvUnlockQueue( xQueue );
}
}

View file

@ -41,14 +41,14 @@ BaseType_t xPrepareCurrentTCB( void );
*/
void harness()
{
UBaseType_t xTasksPrepared;
TimeOut_t pxTimeOut;
TickType_t pxTicksToWait;
UBaseType_t xTasksPrepared;
TimeOut_t pxTimeOut;
TickType_t pxTicksToWait;
xTasksPrepared = xPrepareCurrentTCB();
xTasksPrepared = xPrepareCurrentTCB();
if ( xTasksPrepared != pdFAIL )
{
xTaskCheckForTimeOut( &pxTimeOut, &pxTicksToWait );
}
if( xTasksPrepared != pdFAIL )
{
xTaskCheckForTimeOut( &pxTimeOut, &pxTicksToWait );
}
}

View file

@ -33,9 +33,9 @@
*/
TaskHandle_t xUnconstrainedTCB( void )
{
TCB_t * pxTCB = pvPortMalloc( sizeof( TCB_t ) );
TCB_t * pxTCB = pvPortMalloc( sizeof( TCB_t ) );
return pxTCB;
return pxTCB;
}
/*
@ -43,9 +43,9 @@ TaskHandle_t xUnconstrainedTCB( void )
*/
BaseType_t xPrepareCurrentTCB( void )
{
__CPROVER_assert_zero_allocation();
__CPROVER_assert_zero_allocation();
pxCurrentTCB = xUnconstrainedTCB();
pxCurrentTCB = xUnconstrainedTCB();
return pxCurrentTCB == NULL ? pdFAIL : pdPASS;
return pxCurrentTCB == NULL ? pdFAIL : pdPASS;
}

View file

@ -33,31 +33,32 @@
void vNondetSetCurrentTCB( void );
void vSetGlobalVariables( void );
void vPrepareTaskLists( void );
TaskHandle_t *pxNondetSetTaskHandle( void );
char *pcNondetSetString( size_t xSizeLength );
TaskHandle_t * pxNondetSetTaskHandle( void );
char * pcNondetSetString( size_t xSizeLength );
void harness()
{
TaskFunction_t pxTaskCode;
char * pcName;
configSTACK_DEPTH_TYPE usStackDepth = STACK_DEPTH;
void * pvParameters;
TaskHandle_t * pxCreatedTask;
TaskFunction_t pxTaskCode;
char * pcName;
configSTACK_DEPTH_TYPE usStackDepth = STACK_DEPTH;
void * pvParameters;
TaskHandle_t * pxCreatedTask;
UBaseType_t uxPriority;
UBaseType_t uxPriority;
__CPROVER_assume( uxPriority < configMAX_PRIORITIES );
vNondetSetCurrentTCB();
vSetGlobalVariables();
vPrepareTaskLists();
vNondetSetCurrentTCB();
vSetGlobalVariables();
vPrepareTaskLists();
pxCreatedTask = pxNondetSetTaskHandle();
pcName = pcNondetSetString( configMAX_TASK_NAME_LEN );
pxCreatedTask = pxNondetSetTaskHandle();
pcName = pcNondetSetString( configMAX_TASK_NAME_LEN );
xTaskCreate(pxTaskCode,
pcName,
usStackDepth,
pvParameters,
uxPriority,
pxCreatedTask );
xTaskCreate( pxTaskCode,
pcName,
usStackDepth,
pvParameters,
uxPriority,
pxCreatedTask );
}

View file

@ -34,16 +34,17 @@
*/
void vNondetSetCurrentTCB( void )
{
pxCurrentTCB = pvPortMalloc( sizeof(TCB_t) );
pxCurrentTCB = pvPortMalloc( sizeof( TCB_t ) );
}
/*
* We just require task lists to be initialized for this proof
*/
void vPrepareTaskLists( void )
{
__CPROVER_assert_zero_allocation();
__CPROVER_assert_zero_allocation();
prvInitialiseTaskLists();
prvInitialiseTaskLists();
}
/*
@ -52,33 +53,35 @@ void vPrepareTaskLists( void )
*/
void vSetGlobalVariables( void )
{
xSchedulerRunning = nondet_basetype();
uxCurrentNumberOfTasks = nondet_ubasetype();
xSchedulerRunning = nondet_basetype();
uxCurrentNumberOfTasks = nondet_ubasetype();
}
/*
* pvPortMalloc is nondeterministic by definition, thus we do not need
* to check for NULL allocation in this function
*/
TaskHandle_t *pxNondetSetTaskHandle( void )
TaskHandle_t * pxNondetSetTaskHandle( void )
{
TaskHandle_t *pxNondetTaskHandle = pvPortMalloc( sizeof(TaskHandle_t) );
return pxNondetTaskHandle;
TaskHandle_t * pxNondetTaskHandle = pvPortMalloc( sizeof( TaskHandle_t ) );
return pxNondetTaskHandle;
}
/*
* Tries to allocate a string of size xStringLength and sets the string
* to be terminated using a nondeterministic index if allocation was successful
*/
char *pcNondetSetString( size_t xStringLength )
char * pcNondetSetString( size_t xStringLength )
{
char *pcName = pvPortMalloc( xStringLength );
char * pcName = pvPortMalloc( xStringLength );
if ( pcName != NULL ) {
size_t uNondetIndex;
__CPROVER_assume( uNondetIndex < xStringLength );
pcName[uNondetIndex] = '\0';
}
if( pcName != NULL )
{
size_t uNondetIndex;
__CPROVER_assume( uNondetIndex < xStringLength );
pcName[ uNondetIndex ] = '\0';
}
return pcName;
return pcName;
}

View file

@ -40,8 +40,9 @@ BaseType_t xTaskResumeAllStub( void );
* This is a trick to overcome the "redefined twice" error
* when stubbing out the `xTaskResumeAll` function in the header
*/
BaseType_t xTaskResumeAll( void ) {
return xTaskResumeAllStub();
BaseType_t xTaskResumeAll( void )
{
return xTaskResumeAllStub();
}
/*
@ -50,16 +51,14 @@ BaseType_t xTaskResumeAll( void ) {
*/
void harness()
{
TickType_t xTicksToDelay;
BaseType_t xTasksPrepared;
TickType_t xTicksToDelay;
BaseType_t xTasksPrepared;
vSetGlobalVariables();
xTasksPrepared = xPrepareTaskLists();
vSetGlobalVariables();
xTasksPrepared = xPrepareTaskLists();
if ( xTasksPrepared != pdFAIL )
{
vTaskDelay( xTicksToDelay );
}
if( xTasksPrepared != pdFAIL )
{
vTaskDelay( xTicksToDelay );
}
}

View file

@ -33,37 +33,40 @@
*/
TaskHandle_t xUnconstrainedTCB( void )
{
TCB_t * pxTCB = pvPortMalloc(sizeof(TCB_t));
TCB_t * pxTCB = pvPortMalloc( sizeof( TCB_t ) );
if ( pxTCB == NULL )
return NULL;
if( pxTCB == NULL )
{
return NULL;
}
__CPROVER_assume( pxTCB->uxPriority < configMAX_PRIORITIES );
__CPROVER_assume( pxTCB->uxPriority < configMAX_PRIORITIES );
vListInitialiseItem( &( pxTCB->xStateListItem ) );
vListInitialiseItem( &( pxTCB->xEventListItem ) );
vListInitialiseItem( &( pxTCB->xStateListItem ) );
vListInitialiseItem( &( pxTCB->xEventListItem ) );
listSET_LIST_ITEM_OWNER( &( pxTCB->xStateListItem ), pxTCB );
listSET_LIST_ITEM_OWNER( &( pxTCB->xEventListItem ), pxTCB );
listSET_LIST_ITEM_OWNER( &( pxTCB->xStateListItem ), pxTCB );
listSET_LIST_ITEM_OWNER( &( pxTCB->xEventListItem ), pxTCB );
if ( nondet_bool() )
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ), pxTCB->uxPriority );
}
else
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ), portMAX_DELAY );
}
if( nondet_bool() )
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ), pxTCB->uxPriority );
}
else
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ), portMAX_DELAY );
}
if ( nondet_bool() )
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority );
}
else
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), portMAX_DELAY );
}
return pxTCB;
if( nondet_bool() )
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority );
}
else
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), portMAX_DELAY );
}
return pxTCB;
}
/*
@ -74,9 +77,9 @@ TaskHandle_t xUnconstrainedTCB( void )
void vSetGlobalVariables( void )
{
uxSchedulerSuspended = pdFALSE;
xTickCount = nondet_ticktype();
xNextTaskUnblockTime = nondet_ticktype();
uxSchedulerSuspended = pdFALSE;
xTickCount = nondet_ticktype();
xNextTaskUnblockTime = nondet_ticktype();
}
/*
@ -86,39 +89,42 @@ void vSetGlobalVariables( void )
*/
BaseType_t xPrepareTaskLists( void )
{
TCB_t * pxTCB = NULL;
TCB_t * pxTCB = NULL;
__CPROVER_assert_zero_allocation();
__CPROVER_assert_zero_allocation();
prvInitialiseTaskLists();
prvInitialiseTaskLists();
/* The current task will be moved to the delayed list */
pxCurrentTCB = xUnconstrainedTCB();
if ( pxCurrentTCB == NULL )
{
return pdFAIL;
}
vListInsert( &pxReadyTasksLists[ pxCurrentTCB->uxPriority ], &( pxCurrentTCB->xStateListItem ) );
/* The current task will be moved to the delayed list */
pxCurrentTCB = xUnconstrainedTCB();
/*
* Nondeterministic insertion of a task in the ready tasks list
* guarantees coverage in line 5104 (tasks.c)
*/
if ( nondet_bool() )
{
pxTCB = xUnconstrainedTCB();
if ( pxTCB == NULL )
{
return pdFAIL;
}
vListInsert( &pxReadyTasksLists[ pxTCB->uxPriority ], &( pxTCB->xStateListItem ) );
if( pxCurrentTCB == NULL )
{
return pdFAIL;
}
/* Use of this macro ensures coverage on line 185 (list.c) */
listGET_OWNER_OF_NEXT_ENTRY( pxTCB , &pxReadyTasksLists[ pxTCB->uxPriority ] );
}
vListInsert( &pxReadyTasksLists[ pxCurrentTCB->uxPriority ], &( pxCurrentTCB->xStateListItem ) );
/*
* Nondeterministic insertion of a task in the ready tasks list
* guarantees coverage in line 5104 (tasks.c)
*/
if( nondet_bool() )
{
pxTCB = xUnconstrainedTCB();
return pdPASS;
if( pxTCB == NULL )
{
return pdFAIL;
}
vListInsert( &pxReadyTasksLists[ pxTCB->uxPriority ], &( pxTCB->xStateListItem ) );
/* Use of this macro ensures coverage on line 185 (list.c) */
listGET_OWNER_OF_NEXT_ENTRY( pxTCB, &pxReadyTasksLists[ pxTCB->uxPriority ] );
}
return pdPASS;
}
/*
@ -129,17 +135,17 @@ BaseType_t xPrepareTaskLists( void )
*/
BaseType_t xTaskResumeAllStub( void )
{
BaseType_t xAlreadyYielded;
BaseType_t xAlreadyYielded;
configASSERT( uxSchedulerSuspended );
configASSERT( uxSchedulerSuspended );
taskENTER_CRITICAL();
{
--uxSchedulerSuspended;
__CPROVER_assert( listLIST_IS_EMPTY( &xPendingReadyList ), "Pending ready tasks list not empty." );
__CPROVER_assert( xPendedTicks == 0 , "xPendedTicks is not equal to zero.");
}
taskEXIT_CRITICAL();
taskENTER_CRITICAL();
{
--uxSchedulerSuspended;
__CPROVER_assert( listLIST_IS_EMPTY( &xPendingReadyList ), "Pending ready tasks list not empty." );
__CPROVER_assert( xPendedTicks == 0, "xPendedTicks is not equal to zero." );
}
taskEXIT_CRITICAL();
return xAlreadyYielded;
return xAlreadyYielded;
}

View file

@ -41,14 +41,14 @@ BaseType_t xPrepareTaskLists( TaskHandle_t * xTask );
*/
void harness()
{
TaskHandle_t xTaskToDelete;
BaseType_t xTasksPrepared;
TaskHandle_t xTaskToDelete;
BaseType_t xTasksPrepared;
vSetGlobalVariables();
xTasksPrepared = xPrepareTaskLists( &xTaskToDelete );
vSetGlobalVariables();
xTasksPrepared = xPrepareTaskLists( &xTaskToDelete );
if ( xTasksPrepared != pdFAIL )
{
vTaskDelete( xTaskToDelete );
}
if( xTasksPrepared != pdFAIL )
{
vTaskDelete( xTaskToDelete );
}
}

View file

@ -33,50 +33,53 @@
*/
TaskHandle_t xUnconstrainedTCB( void )
{
TCB_t * pxTCB = pvPortMalloc(sizeof(TCB_t));
uint8_t ucStaticAllocationFlag;
TCB_t * pxTCB = pvPortMalloc( sizeof( TCB_t ) );
uint8_t ucStaticAllocationFlag;
if ( pxTCB == NULL )
return NULL;
if( pxTCB == NULL )
{
return NULL;
}
__CPROVER_assume( pxTCB->uxPriority < configMAX_PRIORITIES );
__CPROVER_assume( pxTCB->uxPriority < configMAX_PRIORITIES );
vListInitialiseItem( &( pxTCB->xStateListItem ) );
vListInitialiseItem( &( pxTCB->xEventListItem ) );
vListInitialiseItem( &( pxTCB->xStateListItem ) );
vListInitialiseItem( &( pxTCB->xEventListItem ) );
listSET_LIST_ITEM_OWNER( &( pxTCB->xStateListItem ), pxTCB );
listSET_LIST_ITEM_OWNER( &( pxTCB->xEventListItem ), pxTCB );
listSET_LIST_ITEM_OWNER( &( pxTCB->xStateListItem ), pxTCB );
listSET_LIST_ITEM_OWNER( &( pxTCB->xEventListItem ), pxTCB );
if ( nondet_bool() )
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ), pxTCB->uxPriority );
}
else
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ), portMAX_DELAY );
}
if( nondet_bool() )
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ), pxTCB->uxPriority );
}
else
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ), portMAX_DELAY );
}
if ( nondet_bool() )
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority );
}
else
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), portMAX_DELAY );
}
if( nondet_bool() )
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority );
}
else
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), portMAX_DELAY );
}
pxTCB->pxStack = ( StackType_t * ) pvPortMalloc( ( ( ( size_t ) STACK_DEPTH ) * sizeof( StackType_t ) ) );
if ( pxTCB->pxStack == NULL )
{
vPortFree( pxTCB );
return NULL;
}
pxTCB->pxStack = ( StackType_t * ) pvPortMalloc( ( ( ( size_t ) STACK_DEPTH ) * sizeof( StackType_t ) ) );
__CPROVER_assume( ucStaticAllocationFlag <= tskSTATICALLY_ALLOCATED_STACK_AND_TCB );
__CPROVER_assume( ucStaticAllocationFlag >= tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB );
pxTCB->ucStaticallyAllocated = ucStaticAllocationFlag;
if( pxTCB->pxStack == NULL )
{
vPortFree( pxTCB );
return NULL;
}
return pxTCB;
__CPROVER_assume( ucStaticAllocationFlag <= tskSTATICALLY_ALLOCATED_STACK_AND_TCB );
__CPROVER_assume( ucStaticAllocationFlag >= tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB );
pxTCB->ucStaticallyAllocated = ucStaticAllocationFlag;
return pxTCB;
}
/*
@ -85,7 +88,7 @@ TaskHandle_t xUnconstrainedTCB( void )
*/
void vSetGlobalVariables()
{
xSchedulerRunning = nondet_basetype();
xSchedulerRunning = nondet_basetype();
}
/*
@ -95,78 +98,84 @@ void vSetGlobalVariables()
*/
BaseType_t xPrepareTaskLists( TaskHandle_t * xTask )
{
TCB_t * pxTCB = NULL;
TCB_t * pxTCB = NULL;
__CPROVER_assert_zero_allocation();
__CPROVER_assert_zero_allocation();
prvInitialiseTaskLists();
prvInitialiseTaskLists();
/*
* The task handle passed to TaskDelete can be NULL. In that case, the
* task to delete is the one in `pxCurrentTCB`, see the macro `prvGetTCBFromHandle`
* in line 1165 (tasks.c) for reference. For that reason, we provide a similar
* initialization for an arbitrary task `pxTCB` and `pxCurrentTCB`.
*/
/*
* The task handle passed to TaskDelete can be NULL. In that case, the
* task to delete is the one in `pxCurrentTCB`, see the macro `prvGetTCBFromHandle`
* in line 1165 (tasks.c) for reference. For that reason, we provide a similar
* initialization for an arbitrary task `pxTCB` and `pxCurrentTCB`.
*/
pxTCB = xUnconstrainedTCB();
if ( pxTCB != NULL )
{
if ( nondet_bool() )
{
TCB_t * pxOtherTCB;
pxOtherTCB = xUnconstrainedTCB();
/*
* Nondeterministic insertion of another TCB in the same list
* to guarantee coverage in line 1174 (tasks.c)
*/
if ( pxOtherTCB != NULL )
{
vListInsert( &xPendingReadyList, &( pxOtherTCB->xStateListItem ) );
}
}
vListInsert( &xPendingReadyList, &( pxTCB->xStateListItem ) );
pxTCB = xUnconstrainedTCB();
/*
* Nondeterministic insertion of an event list item to guarantee
* coverage in lines 1180-1184 (tasks.c)
*/
if ( nondet_bool() )
{
vListInsert( pxDelayedTaskList, &( pxTCB->xEventListItem ) );
}
}
if( pxTCB != NULL )
{
if( nondet_bool() )
{
TCB_t * pxOtherTCB;
pxOtherTCB = xUnconstrainedTCB();
/* Note that `*xTask = NULL` can happen here, but this is fine -- `pxCurrentTCB` will be used instead */
*xTask = pxTCB;
/*
* Nondeterministic insertion of another TCB in the same list
* to guarantee coverage in line 1174 (tasks.c)
*/
if( pxOtherTCB != NULL )
{
vListInsert( &xPendingReadyList, &( pxOtherTCB->xStateListItem ) );
}
}
/*
* `pxCurrentTCB` must be initialized the same way as the previous task, but an
* allocation failure cannot happen in this case (i.e., if the previous task is NULL)
*/
pxCurrentTCB = xUnconstrainedTCB();
if ( pxCurrentTCB == NULL )
{
return pdFAIL;
}
vListInsert( &xPendingReadyList, &( pxTCB->xStateListItem ) );
if ( nondet_bool() )
{
TCB_t * pxOtherTCB;
pxOtherTCB = xUnconstrainedTCB();
if ( pxOtherTCB != NULL )
{
vListInsert( &pxReadyTasksLists[ pxOtherTCB->uxPriority ], &( pxOtherTCB->xStateListItem ) );
}
}
vListInsert( &pxReadyTasksLists[ pxCurrentTCB->uxPriority ], &( pxCurrentTCB->xStateListItem ) );
/*
* Nondeterministic insertion of an event list item to guarantee
* coverage in lines 1180-1184 (tasks.c)
*/
if( nondet_bool() )
{
vListInsert( pxDelayedTaskList, &( pxTCB->xEventListItem ) );
}
}
/* Use of this macro ensures coverage on line 185 (list.c) */
listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB , &pxReadyTasksLists[ pxCurrentTCB->uxPriority ] );
/* Note that `*xTask = NULL` can happen here, but this is fine -- `pxCurrentTCB` will be used instead */
*xTask = pxTCB;
if ( nondet_bool() )
{
vListInsert( pxDelayedTaskList, &( pxCurrentTCB->xEventListItem ) );
}
/*
* `pxCurrentTCB` must be initialized the same way as the previous task, but an
* allocation failure cannot happen in this case (i.e., if the previous task is NULL)
*/
pxCurrentTCB = xUnconstrainedTCB();
return pdPASS;
if( pxCurrentTCB == NULL )
{
return pdFAIL;
}
if( nondet_bool() )
{
TCB_t * pxOtherTCB;
pxOtherTCB = xUnconstrainedTCB();
if( pxOtherTCB != NULL )
{
vListInsert( &pxReadyTasksLists[ pxOtherTCB->uxPriority ], &( pxOtherTCB->xStateListItem ) );
}
}
vListInsert( &pxReadyTasksLists[ pxCurrentTCB->uxPriority ], &( pxCurrentTCB->xStateListItem ) );
/* Use of this macro ensures coverage on line 185 (list.c) */
listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &pxReadyTasksLists[ pxCurrentTCB->uxPriority ] );
if( nondet_bool() )
{
vListInsert( pxDelayedTaskList, &( pxCurrentTCB->xEventListItem ) );
}
return pdPASS;
}

View file

@ -40,15 +40,15 @@ BaseType_t xPrepareCurrentTCB( void );
*/
void harness()
{
TaskHandle_t xTask;
BaseType_t xTasksPrepared;
TaskHandle_t xTask;
BaseType_t xTasksPrepared;
xTasksPrepared = xPrepareCurrentTCB();
xTasksPrepared = xPrepareCurrentTCB();
if ( xTasksPrepared != pdFAIL )
{
xTask = xTaskGetCurrentTaskHandle();
if( xTasksPrepared != pdFAIL )
{
xTask = xTaskGetCurrentTaskHandle();
__CPROVER_assert( xTask != NULL , "Current task handle is NULL!");
}
__CPROVER_assert( xTask != NULL, "Current task handle is NULL!" );
}
}

View file

@ -33,12 +33,14 @@
*/
TaskHandle_t xUnconstrainedTCB( void )
{
TCB_t * pxTCB = pvPortMalloc(sizeof(TCB_t));
TCB_t * pxTCB = pvPortMalloc( sizeof( TCB_t ) );
if ( pxTCB == NULL )
return NULL;
if( pxTCB == NULL )
{
return NULL;
}
return pxTCB;
return pxTCB;
}
/*
@ -46,13 +48,14 @@ TaskHandle_t xUnconstrainedTCB( void )
*/
BaseType_t xPrepareCurrentTCB( void )
{
__CPROVER_assert_zero_allocation();
__CPROVER_assert_zero_allocation();
pxCurrentTCB = xUnconstrainedTCB();
if ( pxCurrentTCB == NULL )
{
return pdFAIL;
}
pxCurrentTCB = xUnconstrainedTCB();
return pdPASS;
if( pxCurrentTCB == NULL )
{
return pdFAIL;
}
return pdPASS;
}

View file

@ -40,9 +40,9 @@ void vSetGlobalVariables( void );
*/
void harness()
{
BaseType_t xResult;
BaseType_t xResult;
vSetGlobalVariables();
vSetGlobalVariables();
xResult = xTaskGetSchedulerState();
xResult = xTaskGetSchedulerState();
}

View file

@ -32,6 +32,6 @@
*/
void vSetGlobalVariables( void )
{
xSchedulerRunning = nondet_basetype();
uxSchedulerSuspended = nondet_ubasetype();
xSchedulerRunning = nondet_basetype();
uxSchedulerSuspended = nondet_ubasetype();
}

View file

@ -40,10 +40,10 @@ void vPrepareTask( TaskHandle_t * xTask );
*/
void harness()
{
TaskHandle_t xTask;
UBaseType_t uxTaskNumber;
TaskHandle_t xTask;
UBaseType_t uxTaskNumber;
vPrepareTask( &xTask );
vPrepareTask( &xTask );
uxTaskNumber = uxTaskGetTaskNumber( xTask );
uxTaskNumber = uxTaskGetTaskNumber( xTask );
}

View file

@ -33,12 +33,14 @@
*/
TaskHandle_t xUnconstrainedTCB( void )
{
TCB_t * pxTCB = pvPortMalloc(sizeof(TCB_t));
TCB_t * pxTCB = pvPortMalloc( sizeof( TCB_t ) );
if ( pxTCB == NULL )
return NULL;
if( pxTCB == NULL )
{
return NULL;
}
return pxTCB;
return pxTCB;
}
/*
@ -47,7 +49,7 @@ TaskHandle_t xUnconstrainedTCB( void )
*/
void vPrepareTask( TaskHandle_t * xTask )
{
__CPROVER_assert_zero_allocation();
__CPROVER_assert_zero_allocation();
*xTask = xUnconstrainedTCB();
*xTask = xUnconstrainedTCB();
}

View file

@ -34,7 +34,7 @@
void harness()
{
TickType_t xTickCount;
TickType_t xTickCount;
xTickCount = xTaskGetTickCount();
xTickCount = xTaskGetTickCount();
}

View file

@ -41,13 +41,13 @@ BaseType_t xPrepareTaskLists( void );
*/
void harness()
{
BaseType_t xTasksPrepared;
BaseType_t xTasksPrepared;
vSetGlobalVariables();
xTasksPrepared = xPrepareTaskLists();
vSetGlobalVariables();
xTasksPrepared = xPrepareTaskLists();
if ( xTasksPrepared != pdFAIL )
{
xTaskIncrementTick();
}
if( xTasksPrepared != pdFAIL )
{
xTaskIncrementTick();
}
}

View file

@ -33,37 +33,40 @@
*/
TaskHandle_t xUnconstrainedTCB( void )
{
TCB_t * pxTCB = pvPortMalloc(sizeof(TCB_t));
TCB_t * pxTCB = pvPortMalloc( sizeof( TCB_t ) );
if ( pxTCB == NULL )
return NULL;
if( pxTCB == NULL )
{
return NULL;
}
__CPROVER_assume( pxTCB->uxPriority < configMAX_PRIORITIES );
__CPROVER_assume( pxTCB->uxPriority < configMAX_PRIORITIES );
vListInitialiseItem( &( pxTCB->xStateListItem ) );
vListInitialiseItem( &( pxTCB->xEventListItem ) );
vListInitialiseItem( &( pxTCB->xStateListItem ) );
vListInitialiseItem( &( pxTCB->xEventListItem ) );
listSET_LIST_ITEM_OWNER( &( pxTCB->xStateListItem ), pxTCB );
listSET_LIST_ITEM_OWNER( &( pxTCB->xEventListItem ), pxTCB );
listSET_LIST_ITEM_OWNER( &( pxTCB->xStateListItem ), pxTCB );
listSET_LIST_ITEM_OWNER( &( pxTCB->xEventListItem ), pxTCB );
if ( nondet_bool() )
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ), pxTCB->uxPriority );
}
else
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ), portMAX_DELAY );
}
if( nondet_bool() )
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ), pxTCB->uxPriority );
}
else
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ), portMAX_DELAY );
}
if ( nondet_bool() )
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority );
}
else
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), portMAX_DELAY );
}
return pxTCB;
if( nondet_bool() )
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority );
}
else
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), portMAX_DELAY );
}
return pxTCB;
}
/*
@ -72,10 +75,10 @@ TaskHandle_t xUnconstrainedTCB( void )
*/
void vSetGlobalVariables()
{
xPendedTicks = nondet_ubasetype();
uxSchedulerSuspended = nondet_ubasetype();
xYieldPending = nondet_basetype();
xTickCount = nondet_ticktype();
xPendedTicks = nondet_ubasetype();
uxSchedulerSuspended = nondet_ubasetype();
xYieldPending = nondet_basetype();
xTickCount = nondet_ticktype();
}
/*
@ -85,42 +88,48 @@ void vSetGlobalVariables()
*/
BaseType_t xPrepareTaskLists( void )
{
TCB_t * pxTCB = NULL;
TCB_t * pxTCB = NULL;
__CPROVER_assert_zero_allocation();
__CPROVER_assert_zero_allocation();
prvInitialiseTaskLists();
prvInitialiseTaskLists();
/* Needed for coverage: This task will be moved to a ready list */
pxTCB = xUnconstrainedTCB();
if ( pxTCB == NULL )
{
return pdFAIL;
}
vListInsert( pxOverflowDelayedTaskList, &( pxTCB->xStateListItem ) );
/* Needed for coverage: This task will be moved to a ready list */
pxTCB = xUnconstrainedTCB();
/* Needed for coverage. */
listGET_OWNER_OF_NEXT_ENTRY( pxTCB , pxOverflowDelayedTaskList );
if( pxTCB == NULL )
{
return pdFAIL;
}
pxTCB = xUnconstrainedTCB();
if ( pxTCB == NULL )
{
return pdFAIL;
}
vListInsert( &xPendingReadyList, &( pxTCB->xStateListItem ) );
vListInsert( pxOverflowDelayedTaskList, &( pxTCB->xStateListItem ) );
/* Needed for coverage: A nondeterministic choice */
if ( nondet_bool() )
{
vListInsert( pxOverflowDelayedTaskList, &( pxTCB->xEventListItem ) );
}
/* Needed for coverage. */
listGET_OWNER_OF_NEXT_ENTRY( pxTCB, pxOverflowDelayedTaskList );
pxCurrentTCB = xUnconstrainedTCB();
if ( pxCurrentTCB == NULL )
{
return pdFAIL;
}
vListInsert( &pxReadyTasksLists[ pxCurrentTCB->uxPriority ], &( pxCurrentTCB->xStateListItem ) );
pxTCB = xUnconstrainedTCB();
return pdPASS;
if( pxTCB == NULL )
{
return pdFAIL;
}
vListInsert( &xPendingReadyList, &( pxTCB->xStateListItem ) );
/* Needed for coverage: A nondeterministic choice */
if( nondet_bool() )
{
vListInsert( pxOverflowDelayedTaskList, &( pxTCB->xEventListItem ) );
}
pxCurrentTCB = xUnconstrainedTCB();
if( pxCurrentTCB == NULL )
{
return pdFAIL;
}
vListInsert( &pxReadyTasksLists[ pxCurrentTCB->uxPriority ], &( pxCurrentTCB->xStateListItem ) );
return pdPASS;
}

View file

@ -41,17 +41,17 @@ BaseType_t xPrepareTaskLists( TaskHandle_t * xTask );
*/
void harness()
{
TaskHandle_t xTask;
UBaseType_t uxNewPriority;
BaseType_t xTasksPrepared;
TaskHandle_t xTask;
UBaseType_t uxNewPriority;
BaseType_t xTasksPrepared;
__CPROVER_assume( uxNewPriority < configMAX_PRIORITIES );
__CPROVER_assume( uxNewPriority < configMAX_PRIORITIES );
xTasksPrepared = xPrepareTaskLists( &xTask );
xTasksPrepared = xPrepareTaskLists( &xTask );
/* Check that this second invocation of xPrepareTaskLists is needed. */
if ( xPrepareTaskLists( &xTask ) != pdFAIL )
{
vTaskPrioritySet( xTask, uxNewPriority );
}
/* Check that this second invocation of xPrepareTaskLists is needed. */
if( xPrepareTaskLists( &xTask ) != pdFAIL )
{
vTaskPrioritySet( xTask, uxNewPriority );
}
}

View file

@ -33,39 +33,41 @@
*/
TaskHandle_t xUnconstrainedTCB( void )
{
TCB_t * pxTCB = pvPortMalloc(sizeof(TCB_t));
uint8_t ucStaticAllocationFlag;
TCB_t * pxTCB = pvPortMalloc( sizeof( TCB_t ) );
uint8_t ucStaticAllocationFlag;
if ( pxTCB == NULL )
return NULL;
if( pxTCB == NULL )
{
return NULL;
}
__CPROVER_assume( pxTCB->uxPriority < configMAX_PRIORITIES );
__CPROVER_assume( pxTCB->uxPriority < configMAX_PRIORITIES );
vListInitialiseItem( &( pxTCB->xStateListItem ) );
vListInitialiseItem( &( pxTCB->xEventListItem ) );
vListInitialiseItem( &( pxTCB->xStateListItem ) );
vListInitialiseItem( &( pxTCB->xEventListItem ) );
listSET_LIST_ITEM_OWNER( &( pxTCB->xStateListItem ), pxTCB );
listSET_LIST_ITEM_OWNER( &( pxTCB->xEventListItem ), pxTCB );
listSET_LIST_ITEM_OWNER( &( pxTCB->xStateListItem ), pxTCB );
listSET_LIST_ITEM_OWNER( &( pxTCB->xEventListItem ), pxTCB );
if ( nondet_bool() )
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ), pxTCB->uxPriority );
}
else
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ), portMAX_DELAY );
}
if( nondet_bool() )
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ), pxTCB->uxPriority );
}
else
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ), portMAX_DELAY );
}
if ( nondet_bool() )
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority );
}
else
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), portMAX_DELAY );
}
if( nondet_bool() )
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority );
}
else
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), portMAX_DELAY );
}
return pxTCB;
return pxTCB;
}
/*
@ -75,51 +77,53 @@ TaskHandle_t xUnconstrainedTCB( void )
*/
BaseType_t xPrepareTaskLists( TaskHandle_t * xTask )
{
TCB_t * pxTCB = NULL;
TCB_t * pxTCB = NULL;
__CPROVER_assert_zero_allocation();
__CPROVER_assert_zero_allocation();
prvInitialiseTaskLists();
prvInitialiseTaskLists();
pxTCB = xUnconstrainedTCB();
pxTCB = xUnconstrainedTCB();
/* Needed for coverage: nondet insertion of task. */
if ( nondet_bool() )
{
TCB_t * pxOtherTCB;
pxOtherTCB = xUnconstrainedTCB();
if ( pxOtherTCB != NULL )
{
vListInsert( &pxReadyTasksLists[ pxOtherTCB->uxPriority ], &( pxOtherTCB->xStateListItem ) );
}
}
/* Needed for coverage: nondet insertion of task. */
if( nondet_bool() )
{
TCB_t * pxOtherTCB;
pxOtherTCB = xUnconstrainedTCB();
if ( pxTCB != NULL )
{
/* Needed for coverage: nondeterministic insertion of task */
if ( nondet_bool() )
{
vListInsert( &pxReadyTasksLists[ pxTCB->uxPriority ], &( pxTCB->xStateListItem ) );
}
}
if( pxOtherTCB != NULL )
{
vListInsert( &pxReadyTasksLists[ pxOtherTCB->uxPriority ], &( pxOtherTCB->xStateListItem ) );
}
}
/* Note that `*xTask = NULL` can happen here, but this is fine -- `pxCurrentTCB` will be used instead */
*xTask = pxTCB;
if( pxTCB != NULL )
{
/* Needed for coverage: nondeterministic insertion of task */
if( nondet_bool() )
{
vListInsert( &pxReadyTasksLists[ pxTCB->uxPriority ], &( pxTCB->xStateListItem ) );
}
}
pxCurrentTCB = xUnconstrainedTCB();
if ( pxCurrentTCB == NULL )
{
return pdFAIL;
}
/* Note that `*xTask = NULL` can happen here, but this is fine -- `pxCurrentTCB` will be used instead */
*xTask = pxTCB;
/* Needed for coverage: nondeterministic insertion of task */
if ( nondet_bool() )
{
vListInsert( &pxReadyTasksLists[ pxCurrentTCB->uxPriority ], &( pxCurrentTCB->xStateListItem ) );
pxCurrentTCB = xUnconstrainedTCB();
/* Needed for coverage. */
listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB , &pxReadyTasksLists[ pxCurrentTCB->uxPriority ] );
}
if( pxCurrentTCB == NULL )
{
return pdFAIL;
}
return pdPASS;
/* Needed for coverage: nondeterministic insertion of task */
if( nondet_bool() )
{
vListInsert( &pxReadyTasksLists[ pxCurrentTCB->uxPriority ], &( pxCurrentTCB->xStateListItem ) );
/* Needed for coverage. */
listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &pxReadyTasksLists[ pxCurrentTCB->uxPriority ] );
}
return pdPASS;
}

View file

@ -41,13 +41,13 @@ BaseType_t xPrepareTaskLists( void );
*/
void harness()
{
BaseType_t xTasksPrepared;
BaseType_t xTasksPrepared;
vSetGlobalVariables();
xTasksPrepared = xPrepareTaskLists();
vSetGlobalVariables();
xTasksPrepared = xPrepareTaskLists();
if ( xTasksPrepared != pdFAIL )
{
xTaskResumeAll();
}
if( xTasksPrepared != pdFAIL )
{
xTaskResumeAll();
}
}

View file

@ -33,52 +33,56 @@
*/
TaskHandle_t xUnconstrainedTCB( void )
{
TCB_t * pxTCB = pvPortMalloc(sizeof(TCB_t));
TCB_t * pxTCB = pvPortMalloc( sizeof( TCB_t ) );
if ( pxTCB == NULL )
return NULL;
if( pxTCB == NULL )
{
return NULL;
}
__CPROVER_assume( pxTCB->uxPriority < configMAX_PRIORITIES );
__CPROVER_assume( pxTCB->uxPriority < configMAX_PRIORITIES );
vListInitialiseItem( &( pxTCB->xStateListItem ) );
vListInitialiseItem( &( pxTCB->xEventListItem ) );
vListInitialiseItem( &( pxTCB->xStateListItem ) );
vListInitialiseItem( &( pxTCB->xEventListItem ) );
listSET_LIST_ITEM_OWNER( &( pxTCB->xStateListItem ), pxTCB );
listSET_LIST_ITEM_OWNER( &( pxTCB->xEventListItem ), pxTCB );
listSET_LIST_ITEM_OWNER( &( pxTCB->xStateListItem ), pxTCB );
listSET_LIST_ITEM_OWNER( &( pxTCB->xEventListItem ), pxTCB );
if ( nondet_bool() )
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ), pxTCB->uxPriority );
}
else
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ), portMAX_DELAY );
}
if( nondet_bool() )
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ), pxTCB->uxPriority );
}
else
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ), portMAX_DELAY );
}
if ( nondet_bool() )
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority );
}
else
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), portMAX_DELAY );
}
return pxTCB;
if( nondet_bool() )
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority );
}
else
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), portMAX_DELAY );
}
return pxTCB;
}
/*
* We set xPendedTicks since __CPROVER_assume does not work
* well with statically initialised variables
*/
void vSetGlobalVariables( void ) {
UBaseType_t uxNonZeroValue;
void vSetGlobalVariables( void )
{
UBaseType_t uxNonZeroValue;
__CPROVER_assume( uxNonZeroValue != 0 );
__CPROVER_assume( uxNonZeroValue != 0 );
uxSchedulerSuspended = uxNonZeroValue;
xPendedTicks = nondet_bool() ? PENDED_TICKS : 0;
uxCurrentNumberOfTasks = nondet_ubasetype();
xTickCount = nondet_ticktype();
uxSchedulerSuspended = uxNonZeroValue;
xPendedTicks = nondet_bool() ? PENDED_TICKS : 0;
uxCurrentNumberOfTasks = nondet_ubasetype();
xTickCount = nondet_ticktype();
}
/*
@ -88,51 +92,59 @@ void vSetGlobalVariables( void ) {
*/
BaseType_t xPrepareTaskLists( void )
{
TCB_t * pxTCB = NULL;
TCB_t * pxTCB = NULL;
__CPROVER_assert_zero_allocation();
__CPROVER_assert_zero_allocation();
prvInitialiseTaskLists();
prvInitialiseTaskLists();
/* This task will be moved to a ready list, granting coverage
* on lines 2780-2786 (tasks.c) */
pxTCB = xUnconstrainedTCB();
if ( pxTCB == NULL )
{
return pdFAIL;
}
vListInsert( pxOverflowDelayedTaskList, &( pxTCB->xStateListItem ) );
/* This task will be moved to a ready list, granting coverage
* on lines 2780-2786 (tasks.c) */
pxTCB = xUnconstrainedTCB();
/* Use of this macro ensures coverage on line 185 (list.c) */
listGET_OWNER_OF_NEXT_ENTRY( pxTCB , pxOverflowDelayedTaskList );
if( pxTCB == NULL )
{
return pdFAIL;
}
pxTCB = xUnconstrainedTCB();
if ( pxTCB == NULL )
{
return pdFAIL;
}
vListInsert( &xPendingReadyList, &( pxTCB->xStateListItem ) );
vListInsert( pxOverflowDelayedTaskList, &( pxTCB->xEventListItem ) );
vListInsert( pxOverflowDelayedTaskList, &( pxTCB->xStateListItem ) );
pxTCB = xUnconstrainedTCB();
if ( pxTCB == NULL )
{
return pdFAIL;
}
vListInsert( pxOverflowDelayedTaskList, &( pxTCB->xStateListItem ) );
/* Use of this macro ensures coverage on line 185 (list.c) */
listGET_OWNER_OF_NEXT_ENTRY( pxTCB, pxOverflowDelayedTaskList );
/* This nondeterministic choice ensure coverage in line 2746 (tasks.c) */
if ( nondet_bool() )
{
vListInsert( pxOverflowDelayedTaskList, &( pxTCB->xEventListItem ) );
}
pxTCB = xUnconstrainedTCB();
pxCurrentTCB = xUnconstrainedTCB();
if ( pxCurrentTCB == NULL )
{
return pdFAIL;
}
vListInsert( &pxReadyTasksLists[ pxCurrentTCB->uxPriority ], &( pxCurrentTCB->xStateListItem ) );
if( pxTCB == NULL )
{
return pdFAIL;
}
return pdPASS;
vListInsert( &xPendingReadyList, &( pxTCB->xStateListItem ) );
vListInsert( pxOverflowDelayedTaskList, &( pxTCB->xEventListItem ) );
pxTCB = xUnconstrainedTCB();
if( pxTCB == NULL )
{
return pdFAIL;
}
vListInsert( pxOverflowDelayedTaskList, &( pxTCB->xStateListItem ) );
/* This nondeterministic choice ensure coverage in line 2746 (tasks.c) */
if( nondet_bool() )
{
vListInsert( pxOverflowDelayedTaskList, &( pxTCB->xEventListItem ) );
}
pxCurrentTCB = xUnconstrainedTCB();
if( pxCurrentTCB == NULL )
{
return pdFAIL;
}
vListInsert( &pxReadyTasksLists[ pxCurrentTCB->uxPriority ], &( pxCurrentTCB->xStateListItem ) );
return pdPASS;
}

View file

@ -38,7 +38,7 @@
*/
void harness()
{
TimeOut_t xTime;
TimeOut_t xTime;
vTaskSetTimeOutState( &xTime );
vTaskSetTimeOutState( &xTime );
}

View file

@ -40,12 +40,12 @@ BaseType_t xPrepareTasks( void );
*/
void harness()
{
BaseType_t xTasksPrepared;
BaseType_t xTasksPrepared;
xTasksPrepared = xPrepareTasks();
xTasksPrepared = xPrepareTasks();
if ( xTasksPrepared != pdFAIL )
{
vTaskStartScheduler();
}
if( xTasksPrepared != pdFAIL )
{
vTaskStartScheduler();
}
}

View file

@ -35,18 +35,20 @@
*/
TaskHandle_t xUnconstrainedTCB( void )
{
TCB_t * pxTCB = pvPortMalloc(sizeof(TCB_t));
TCB_t * pxTCB = pvPortMalloc( sizeof( TCB_t ) );
if ( pxTCB == NULL )
return NULL;
if( pxTCB == NULL )
{
return NULL;
}
return pxTCB;
return pxTCB;
}
StaticTask_t *pxIdleTaskTCB;
StaticTask_t *pxTimerTaskTCB;
StackType_t *pxIdleTaskStack;
StackType_t *pxTimerTaskStack;
StaticTask_t * pxIdleTaskTCB;
StaticTask_t * pxTimerTaskTCB;
StackType_t * pxIdleTaskStack;
StackType_t * pxTimerTaskStack;
/*
* `pxCurrentTCB` allocation is allowed to fail. The global variables above
@ -56,37 +58,41 @@ StackType_t *pxTimerTaskStack;
*/
BaseType_t xPrepareTasks( void )
{
__CPROVER_assert_zero_allocation();
__CPROVER_assert_zero_allocation();
prvInitialiseTaskLists();
prvInitialiseTaskLists();
pxCurrentTCB = xUnconstrainedTCB();
pxCurrentTCB = xUnconstrainedTCB();
pxIdleTaskTCB = pvPortMalloc(sizeof(StaticTask_t));
if (pxIdleTaskTCB == NULL )
{
return pdFAIL;
}
pxIdleTaskTCB = pvPortMalloc( sizeof( StaticTask_t ) );
pxIdleTaskStack = pvPortMalloc( sizeof(StackType_t) * configMINIMAL_STACK_SIZE );
if ( pxIdleTaskStack == NULL )
{
return pdFAIL;
}
if( pxIdleTaskTCB == NULL )
{
return pdFAIL;
}
pxTimerTaskTCB = pvPortMalloc( sizeof(StaticTask_t) );
if ( pxTimerTaskTCB == NULL )
{
return pdFAIL;
}
pxIdleTaskStack = pvPortMalloc( sizeof( StackType_t ) * configMINIMAL_STACK_SIZE );
pxTimerTaskStack = pvPortMalloc( sizeof(StackType_t) * configTIMER_TASK_STACK_DEPTH );
if ( pxTimerTaskStack == NULL )
{
return pdFAIL;
}
if( pxIdleTaskStack == NULL )
{
return pdFAIL;
}
return pdPASS;
pxTimerTaskTCB = pvPortMalloc( sizeof( StaticTask_t ) );
if( pxTimerTaskTCB == NULL )
{
return pdFAIL;
}
pxTimerTaskStack = pvPortMalloc( sizeof( StackType_t ) * configTIMER_TASK_STACK_DEPTH );
if( pxTimerTaskStack == NULL )
{
return pdFAIL;
}
return pdPASS;
}
/*
@ -96,17 +102,19 @@ void vApplicationGetIdleTaskMemory( StaticTask_t ** ppxIdleTaskTCBBuffer,
StackType_t ** ppxIdleTaskStackBuffer,
uint32_t * pulIdleTaskStackSize )
{
*ppxIdleTaskTCBBuffer = pxIdleTaskTCB;
*ppxIdleTaskStackBuffer = pxIdleTaskStack;
*ppxIdleTaskTCBBuffer = pxIdleTaskTCB;
*ppxIdleTaskStackBuffer = pxIdleTaskStack;
*pulIdleTaskStackSize = configMINIMAL_STACK_SIZE;
}
/*
* The buffers used here have been successfully allocated before (global variables)
*/
void vApplicationGetTimerTaskMemory( StaticTask_t **ppxTimerTaskTCBBuffer, StackType_t **ppxTimerTaskStackBuffer, uint32_t *pulTimerTaskStackSize )
void vApplicationGetTimerTaskMemory( StaticTask_t ** ppxTimerTaskTCBBuffer,
StackType_t ** ppxTimerTaskStackBuffer,
uint32_t * pulTimerTaskStackSize )
{
*ppxTimerTaskTCBBuffer = pxTimerTaskTCB;
*ppxTimerTaskStackBuffer = pxTimerTaskStack;
*pulTimerTaskStackSize = configTIMER_TASK_STACK_DEPTH;
*ppxTimerTaskTCBBuffer = pxTimerTaskTCB;
*ppxTimerTaskStackBuffer = pxTimerTaskStack;
*pulTimerTaskStackSize = configTIMER_TASK_STACK_DEPTH;
}

View file

@ -38,5 +38,5 @@
*/
void harness()
{
vTaskSuspendAll();
vTaskSuspendAll();
}

View file

@ -42,13 +42,13 @@ BaseType_t xPrepareTaskLists( void );
*/
void harness()
{
BaseType_t xTasksPrepared;
BaseType_t xTasksPrepared;
vSetGlobalVariables();
xTasksPrepared = xPrepareTaskLists();
vSetGlobalVariables();
xTasksPrepared = xPrepareTaskLists();
if ( xTasksPrepared != pdFAIL )
{
vTaskSwitchContext();
}
if( xTasksPrepared != pdFAIL )
{
vTaskSwitchContext();
}
}

View file

@ -33,38 +33,41 @@
*/
TaskHandle_t xUnconstrainedTCB( UBaseType_t uxPriority )
{
TCB_t * pxTCB = pvPortMalloc(sizeof(TCB_t));
TCB_t * pxTCB = pvPortMalloc( sizeof( TCB_t ) );
if ( pxTCB == NULL )
return NULL;
if( pxTCB == NULL )
{
return NULL;
}
/* uxPriority is set to a specific priority */
pxTCB->uxPriority = uxPriority;
/* uxPriority is set to a specific priority */
pxTCB->uxPriority = uxPriority;
vListInitialiseItem( &( pxTCB->xStateListItem ) );
vListInitialiseItem( &( pxTCB->xEventListItem ) );
vListInitialiseItem( &( pxTCB->xStateListItem ) );
vListInitialiseItem( &( pxTCB->xEventListItem ) );
listSET_LIST_ITEM_OWNER( &( pxTCB->xStateListItem ), pxTCB );
listSET_LIST_ITEM_OWNER( &( pxTCB->xEventListItem ), pxTCB );
listSET_LIST_ITEM_OWNER( &( pxTCB->xStateListItem ), pxTCB );
listSET_LIST_ITEM_OWNER( &( pxTCB->xEventListItem ), pxTCB );
if ( nondet_bool() )
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ), pxTCB->uxPriority );
}
else
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ), portMAX_DELAY );
}
if( nondet_bool() )
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ), pxTCB->uxPriority );
}
else
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ), portMAX_DELAY );
}
if ( nondet_bool() )
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority );
}
else
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), portMAX_DELAY );
}
return pxTCB;
if( nondet_bool() )
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority );
}
else
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), portMAX_DELAY );
}
return pxTCB;
}
/*
@ -73,7 +76,7 @@ TaskHandle_t xUnconstrainedTCB( UBaseType_t uxPriority )
*/
void vSetGlobalVariables( void )
{
uxSchedulerSuspended = nondet_ubasetype();
uxSchedulerSuspended = nondet_ubasetype();
}
/*
@ -82,22 +85,25 @@ void vSetGlobalVariables( void )
*/
BaseType_t xPrepareTaskLists( void )
{
TCB_t * pxTCB = NULL;
TCB_t * pxTCB = NULL;
__CPROVER_assert_zero_allocation();
__CPROVER_assert_zero_allocation();
prvInitialiseTaskLists();
prvInitialiseTaskLists();
for ( int i = 0; i < configMAX_PRIORITIES; ++i )
{
pxTCB = xUnconstrainedTCB( i );
if ( pxTCB == NULL )
{
return pdFAIL;
}
vListInsert( &pxReadyTasksLists[ pxTCB->uxPriority ], &( pxTCB->xStateListItem ) );
}
listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &pxReadyTasksLists[ configMAX_PRIORITIES - 1 ] );
for( int i = 0; i < configMAX_PRIORITIES; ++i )
{
pxTCB = xUnconstrainedTCB( i );
return pdPASS;
if( pxTCB == NULL )
{
return pdFAIL;
}
vListInsert( &pxReadyTasksLists[ pxTCB->uxPriority ], &( pxTCB->xStateListItem ) );
}
listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &pxReadyTasksLists[ configMAX_PRIORITIES - 1 ] );
return pdPASS;
}

View file

@ -1,24 +1,30 @@
#define ensure_memory_is_valid( px, length ) (px != NULL) && __CPROVER_w_ok((px), length)
#define ensure_memory_is_valid( px, length ) ( px != NULL ) && __CPROVER_w_ok( ( px ), length )
/* Implementation of safe malloc which returns NULL if the requested size is 0.
Warning: The behavior of malloc(0) is platform dependent.
It is possible for malloc(0) to return an address without allocating memory.*/
void *safeMalloc(size_t xWantedSize) {
return nondet_bool() ? malloc(xWantedSize) : NULL;
* Warning: The behavior of malloc(0) is platform dependent.
* It is possible for malloc(0) to return an address without allocating memory.*/
void * safeMalloc( size_t xWantedSize )
{
return nondet_bool() ? malloc( xWantedSize ) : NULL;
}
/* Memory assignment for FreeRTOS_Socket_t */
FreeRTOS_Socket_t * ensure_FreeRTOS_Socket_t_is_allocated () {
FreeRTOS_Socket_t *pxSocket = safeMalloc(sizeof(FreeRTOS_Socket_t));
if (ensure_memory_is_valid(pxSocket, sizeof(FreeRTOS_Socket_t))) {
pxSocket->u.xTCP.rxStream = safeMalloc(sizeof(StreamBuffer_t));
pxSocket->u.xTCP.txStream = safeMalloc(sizeof(StreamBuffer_t));
pxSocket->u.xTCP.pxPeerSocket = safeMalloc(sizeof(FreeRTOS_Socket_t));
}
return pxSocket;
FreeRTOS_Socket_t * ensure_FreeRTOS_Socket_t_is_allocated()
{
FreeRTOS_Socket_t * pxSocket = safeMalloc( sizeof( FreeRTOS_Socket_t ) );
if( ensure_memory_is_valid( pxSocket, sizeof( FreeRTOS_Socket_t ) ) )
{
pxSocket->u.xTCP.rxStream = safeMalloc( sizeof( StreamBuffer_t ) );
pxSocket->u.xTCP.txStream = safeMalloc( sizeof( StreamBuffer_t ) );
pxSocket->u.xTCP.pxPeerSocket = safeMalloc( sizeof( FreeRTOS_Socket_t ) );
}
return pxSocket;
}
/* Memory assignment for FreeRTOS_Network_Buffer */
NetworkBufferDescriptor_t * ensure_FreeRTOS_NetworkBuffer_is_allocated () {
return safeMalloc(sizeof(NetworkBufferDescriptor_t));
NetworkBufferDescriptor_t * ensure_FreeRTOS_NetworkBuffer_is_allocated()
{
return safeMalloc( sizeof( NetworkBufferDescriptor_t ) );
}

View file

@ -1 +0,0 @@

View file

@ -126,7 +126,7 @@ void vConfigureTimerForRunTimeStats( void ); /* Prototype of function that in
{ \
vFakeAssert( false, __FILE__, __LINE__ ); \
} \
} while ( 0 )
} while( 0 )
#define mtCOVERAGE_TEST_MARKER() __asm volatile ( "NOP" )

View file

@ -194,7 +194,7 @@ void test_xEventGroupStaticCreate_Success( void )
/*!
* @brief validate statically creating and deleting a new RTOS event group,
*
*
*/
void test_xEventGroupStaticCreate_InvalidInput_Failed( void )
{
@ -435,7 +435,7 @@ void test_xEventGroupWaitBits_WhenNoBitWasSet_WaitForBoth_ClearBit_Success( void
vListInitialise_Expect( 0 );
vListInitialise_IgnoreArg_pxList();
vListInitialise_ReturnThruPtr_pxList( pxListTemp );
/* Expectation of Function: xEventGroupWaitBits */
vTaskSuspendAll_Ignore();
xTaskGetSchedulerState_IgnoreAndReturn( taskSCHEDULER_SUSPENDED );
@ -463,7 +463,7 @@ void test_xEventGroupWaitBits_WhenNoBitWasSet_WaitForBoth_ClearBit_Success( void
}
/*!
* @brief validate non-block waiting on for either one bits is set when currently no bits are set.
* @brief validate non-block waiting on for either one bits is set when currently no bits are set.
* Don't clear the bit before return.
* @coverage xEventGroupWaitBits
*/
@ -486,7 +486,7 @@ void test_xEventGroupWaitBits_WhenNoBitWasSet_NonBlock_WaitForEither_NoClear_Suc
StaticEventGroup_t xCreatedEventGroup = { 0 };
xEventGroupHandle = xEventGroupCreateStatic( &xCreatedEventGroup );
uxBitsSetVal = xEventGroupGetBits( xEventGroupHandle );
/* API to Test */
uxBitsGetVal = xEventGroupWaitBits(
xEventGroupHandle, /* The event group being tested. */
@ -500,7 +500,7 @@ void test_xEventGroupWaitBits_WhenNoBitWasSet_NonBlock_WaitForEither_NoClear_Suc
}
/*!
* @brief validate waiting on for either one bits. The function should return when one bits are set.
* @brief validate waiting on for either one bits. The function should return when one bits are set.
* Don't clear the bit before return.
* @coverage xEventGroupWaitBits
*/
@ -528,7 +528,7 @@ void test_xEventGroupWaitBits_WhenBitWasSet_WaitForEither_NoClear_Success( void
const TickType_t xTicksToWait = 100 / portTICK_PERIOD_MS;
StaticEventGroup_t xCreatedEventGroup = { 0 };
xEventGroupHandle = xEventGroupCreateStatic( &xCreatedEventGroup );
uxBitsSetVal = xEventGroupSetBits( xEventGroupHandle, BIT_0 ); /* BIT_0 was set */
uxBitsSetVal = xEventGroupSetBits( xEventGroupHandle, BIT_0 ); /* BIT_0 was set */
/* API to Test */
uxBitsSetVal = xEventGroupWaitBits(
@ -574,8 +574,8 @@ void test_xEventGroupWaitBits_WhenBitWasSet_WaitForBoth_WithClear_Success( void
const TickType_t xTicksToWait = 100 / portTICK_PERIOD_MS;
StaticEventGroup_t xCreatedEventGroup = { 0 };
xEventGroupHandle = xEventGroupCreateStatic( &xCreatedEventGroup );
uxBitsSetVal = xEventGroupSetBits( xEventGroupHandle, BIT_0 ); /* BIT_0 was set */
uxBitsSetVal = xEventGroupSetBits( xEventGroupHandle, BIT_4 ); /* BIT_4 was set */
uxBitsSetVal = xEventGroupSetBits( xEventGroupHandle, BIT_0 ); /* BIT_0 was set */
uxBitsSetVal = xEventGroupSetBits( xEventGroupHandle, BIT_4 ); /* BIT_4 was set */
TEST_ASSERT_EQUAL( BIT_0 | BIT_4, uxBitsSetVal );
/* API to Test */
@ -594,7 +594,7 @@ void test_xEventGroupWaitBits_WhenBitWasSet_WaitForBoth_WithClear_Success( void
/*!
* @brief validate tasks sync on event bits:
* Set BIT_0 before reach the sync point and wait for all sync bits are set.
* Set BIT_0 before reach the sync point and wait for all sync bits are set.
* Should return due to timeout.
* @coverage xEventGroupSync
*/
@ -611,7 +611,7 @@ void test_xEventGroupSync_SetBits_BlockWait_NotSynced_Success( void )
listGET_NEXT_ExpectAnyArgsAndReturn( ( ListItem_t * ) NULL );
xTaskResumeAll_IgnoreAndReturn( 1 );
listGET_LIST_ITEM_VALUE_IgnoreAndReturn( 0 );
/* Expectation of Function: xEventGroupSync */
xTaskGetSchedulerState_IgnoreAndReturn( taskSCHEDULER_SUSPENDED );
vTaskPlaceOnUnorderedEventList_Ignore();

View file

@ -98,7 +98,7 @@
{ \
TEST_ASSERT_EQUAL( configASSERT_E, e ); \
} \
} while ( 0 )
} while( 0 )
/* ============================ GLOBAL VARIABLES =========================== */

View file

@ -127,7 +127,7 @@ void vConfigureTimerForRunTimeStats( void ); /* Prototype of function that in
{ \
vFakeAssert( false, __FILE__, __LINE__ ); \
} \
} while ( 0 )
} while( 0 )
#define mtCOVERAGE_TEST_MARKER() __asm volatile ( "NOP" )

View file

@ -126,7 +126,7 @@ void vConfigureTimerForRunTimeStats( void ); /* Prototype of function that in
{ \
vFakeAssert( false, __FILE__, __LINE__ ); \
} \
} while ( 0 )
} while( 0 )
#define mtCOVERAGE_TEST_MARKER() __asm volatile ( "NOP" )

View file

@ -57,9 +57,9 @@ static uint32_t ulNumMallocCalls = 0;
/* =========================== HELPER FUNCTIONS =========================== */
void setxMaskAssertAndAbort( bool mask )
{
xMaskAssertAndAbort = mask;
xMaskAssertAndAbort = mask;
}
bool getxMaskAssertAndAbort( )
bool getxMaskAssertAndAbort()
{
return xMaskAssertAndAbort;
}

View file

@ -70,7 +70,7 @@
/* =========================== FUNCTION PROTOTYPES ======================== */
void setxMaskAssertAndAbort( bool mask );
bool getxMaskAssertAndAbort( );
bool getxMaskAssertAndAbort();
/* ============================ GLOBAL VARIABLES =========================== */
/* ================================= MACROS ================================ */
@ -93,7 +93,7 @@ bool getxMaskAssertAndAbort( );
} \
Catch( e ) \
TEST_ASSERT_EQUAL( configASSERT_E, e ); \
} while ( 0 )
} while( 0 )
/* ========================== CALLBACK FUNCTIONS =========================== */

View file

@ -126,7 +126,7 @@ void vConfigureTimerForRunTimeStats( void ); /* Prototype of function that in
{ \
vFakeAssert( false, __FILE__, __LINE__ ); \
} \
} while ( 0 )
} while( 0 )
#define mtCOVERAGE_TEST_MARKER() __asm volatile ( "NOP" )

View file

@ -126,7 +126,7 @@ void vConfigureTimerForRunTimeStats( void ); /* Prototype of function that in
{ \
vFakeAssert( false, __FILE__, __LINE__ ); \
} \
} while ( 0 )
} while( 0 )
#define mtCOVERAGE_TEST_MARKER() __asm volatile ( "NOP" )

View file

@ -127,7 +127,7 @@ void vConfigureTimerForRunTimeStats( void ); /* Prototype of function that in
{ \
vFakeAssert( false, __FILE__, __LINE__ ); \
} \
} while ( 0 )
} while( 0 )
#define mtCOVERAGE_TEST_MARKER() __asm volatile ( "NOP" )

View file

@ -126,7 +126,7 @@ void vConfigureTimerForRunTimeStats( void ); /* Prototype of function that in
{ \
vFakeAssert( false, __FILE__, __LINE__ ); \
} \
} while ( 0 )
} while( 0 )
#define mtCOVERAGE_TEST_MARKER() __asm volatile ( "NOP" )

View file

@ -99,7 +99,7 @@
{ \
TEST_ASSERT_EQUAL( configASSERT_E, e ); \
} \
} while ( 0 )
} while( 0 )
/* ============================ GLOBAL VARIABLES =========================== */

View file

@ -1,10 +1,10 @@
## Testing in FreeRTOS
FreeRTOS kernel consists of common code and porting layer. Extensive [static analysis](https://en.wikipedia.org/wiki/Static_program_analysis) and [dynamic analysis](https://en.wikipedia.org/wiki/Dynamic_program_analysis) are done on both to ensure functional correctness of FreeRTOS kernel.
FreeRTOS kernel consists of common code and porting layer. Extensive [static analysis](https://en.wikipedia.org/wiki/Static_program_analysis) and [dynamic analysis](https://en.wikipedia.org/wiki/Dynamic_program_analysis) are done on both to ensure functional correctness of FreeRTOS kernel.
For more information on FreeRTOS testing please refer to https://www.freertos.org/FreeRTOS-Coding-Standard-and-Style-Guide.html.
## Directory structure
This directory is in working progress -- we are migrating scattered test cases to this directory. Here only lists what's currently under this directory.
This directory is in working progress -- we are migrating scattered test cases to this directory. Here only lists what's currently under this directory.
- ```./CBMC```: This directory contains automated proofs of the memory safety of various parts of the FreeRTOS code base.
- ```./VeriFast```: This directory contains automated proofs of the functional correctness of various parts of the FreeRTOS code base.

View file

@ -30,14 +30,14 @@
#define VERIFAST
#include <stdlib.h>
#include <stdint.h>
//@#include "common.gh"
/*@#include "common.gh" */
typedef size_t TickType_t;
typedef size_t UBaseType_t;
typedef ssize_t BaseType_t;
typedef size_t TickType_t;
typedef size_t UBaseType_t;
typedef ssize_t BaseType_t;
#define pdTRUE 1
#define pdFALSE 0
#define pdTRUE 1
#define pdFALSE 0
/* Empty/no-op macros */
#define mtCOVERAGE_TEST_MARKER()
@ -50,323 +50,327 @@ typedef ssize_t BaseType_t;
#define listSET_SECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxListItem )
/* Max value stored in sentinel xListEnd element */
#define portMAX_DELAY UINT_MAX
#define portMAX_DELAY UINT_MAX
struct xLIST;
struct xLIST_ITEM {
TickType_t xItemValue;
struct xLIST_ITEM * pxNext;
struct xLIST_ITEM * pxPrevious;
void * pvOwner;
struct xLIST *pxContainer;
struct xLIST_ITEM
{
TickType_t xItemValue;
struct xLIST_ITEM * pxNext;
struct xLIST_ITEM * pxPrevious;
void * pvOwner;
struct xLIST * pxContainer;
};
typedef struct xLIST_ITEM ListItem_t;
typedef struct xLIST {
UBaseType_t uxNumberOfItems;
struct xLIST_ITEM *pxIndex;
#ifdef VERIFAST /*< ***change MiniList_t to ListItem_t*** */
struct xLIST_ITEM xListEnd;
#else
MiniListItem_t xListEnd;
#endif
typedef struct xLIST
{
UBaseType_t uxNumberOfItems;
struct xLIST_ITEM * pxIndex;
#ifdef VERIFAST /*< ***change MiniList_t to ListItem_t*** */
struct xLIST_ITEM xListEnd;
#else
MiniListItem_t xListEnd;
#endif
} List_t;
/*@
predicate xLIST_ITEM(
struct xLIST_ITEM *n,
TickType_t xItemValue,
struct xLIST_ITEM *pxNext,
struct xLIST_ITEM *pxPrevious,
struct xLIST *pxContainer;) =
n->xItemValue |-> xItemValue &*&
n->pxNext |-> pxNext &*&
n->pxPrevious |-> pxPrevious &*&
n->pvOwner |-> _ &*&
n->pxContainer |-> pxContainer;
@*/
* predicate xLIST_ITEM(
* struct xLIST_ITEM *n,
* TickType_t xItemValue,
* struct xLIST_ITEM *pxNext,
* struct xLIST_ITEM *pxPrevious,
* struct xLIST *pxContainer;) =
* n->xItemValue |-> xItemValue &*&
* n->pxNext |-> pxNext &*&
* n->pxPrevious |-> pxPrevious &*&
* n->pvOwner |-> _ &*&
* n->pxContainer |-> pxContainer;
* @*/
/* Ferreira et al. (STTT'14) doubly-linked list segment (DLS). */
/*@
predicate DLS(
struct xLIST_ITEM *n,
struct xLIST_ITEM *nprev,
struct xLIST_ITEM *mnext,
struct xLIST_ITEM *m,
list<struct xLIST_ITEM * > cells,
list<TickType_t > vals,
struct xLIST *pxContainer) =
n == m
? cells == cons(n, nil) &*&
vals == cons(?v, nil) &*&
xLIST_ITEM(n, v, mnext, nprev, pxContainer)
: cells == cons(n, ?cells0) &*&
vals == cons(?v, ?vals0) &*&
xLIST_ITEM(n, v, ?o, nprev, pxContainer) &*& DLS(o, n, mnext, m, cells0, vals0, pxContainer);
lemma void dls_star_item(
struct xLIST_ITEM *n,
struct xLIST_ITEM *m,
struct xLIST_ITEM *o)
requires DLS(n, ?nprev, ?mnext, m, ?cells, ?vals, ?l) &*& xLIST_ITEM(o, ?v, ?onext, ?oprev, ?l2);
ensures DLS(n, nprev, mnext, m, cells, vals, l) &*& xLIST_ITEM(o, v, onext, oprev, l2) &*& mem(o, cells) == false;
{
open DLS(n, nprev, mnext, m, cells, vals, l);
if (n == m) {
assert xLIST_ITEM(n, _, _, _, _);
open xLIST_ITEM(n, _, _, _, _);
open xLIST_ITEM(o, _, _, _, _);
assert n != o;
close xLIST_ITEM(o, _, _, _, _);
close xLIST_ITEM(n, _, _, _, _);
close DLS(n, nprev, mnext, m, cells, vals, l);
}
else {
assert DLS(?nnext, n, mnext, m, tail(cells), tail(vals), l);
dls_star_item(nnext, m, o);
open xLIST_ITEM(n, _, _, _, _);
open xLIST_ITEM(o, _, _, _, _);
assert n != o;
close xLIST_ITEM(o, _, _, _, _);
close xLIST_ITEM(n, _, _, _, _);
close DLS(n, nprev, mnext, m, cells, vals, l);
}
}
lemma void dls_distinct(
struct xLIST_ITEM *n,
struct xLIST_ITEM *nprev,
struct xLIST_ITEM *mnext,
struct xLIST_ITEM *m,
list<struct xLIST_ITEM * > cells)
requires DLS(n, nprev, mnext, m, cells, ?vals, ?l);
ensures DLS(n, nprev, mnext, m, cells, vals, l) &*& distinct(cells) == true;
{
if (n == m) {
open DLS(n, nprev, mnext, m, cells, vals, l);
close DLS(n, nprev, mnext, m, cells, vals, l);
} else {
open DLS(n, nprev, mnext, m, cells, vals, l);
assert DLS(?nnext, n, mnext, m, tail(cells), tail(vals), l);
dls_distinct(nnext, n, mnext, m, tail(cells));
dls_star_item(nnext, m, n);
close DLS(n, nprev, mnext, m, cells, vals, l);
}
}
predicate xLIST(
struct xLIST *l,
int uxNumberOfItems,
struct xLIST_ITEM *pxIndex,
struct xLIST_ITEM *xListEnd,
list<struct xLIST_ITEM *>cells,
list<TickType_t >vals) =
l->uxNumberOfItems |-> uxNumberOfItems &*&
l->pxIndex |-> pxIndex &*&
mem(pxIndex, cells) == true &*&
xListEnd == &(l->xListEnd) &*&
xListEnd == head(cells) &*&
portMAX_DELAY == head(vals) &*&
struct_xLIST_ITEM_padding(&l->xListEnd) &*&
length(cells) == length(vals) &*&
uxNumberOfItems + 1 == length(cells) &*&
DLS(xListEnd, ?endprev, xListEnd, endprev, cells, vals, l);
lemma void xLIST_distinct_cells(struct xLIST *l)
requires xLIST(l, ?n, ?idx, ?end, ?cells, ?vals);
ensures xLIST(l, n, idx, end, cells, vals) &*& distinct(cells) == true;
{
open xLIST(l, n, idx, end, cells, vals);
assert DLS(end, ?endprev, end, _, cells, vals, l);
dls_distinct(end, endprev, end, endprev, cells);
close xLIST(l, n, idx, end, cells, vals);
}
lemma void xLIST_star_item(struct xLIST *l, struct xLIST_ITEM *x)
requires xLIST(l, ?n, ?idx, ?end, ?cells, ?vals) &*& xLIST_ITEM(x, ?v, ?xnext, ?xprev, ?l2);
ensures xLIST(l, n, idx, end, cells, vals) &*& xLIST_ITEM(x, v, xnext, xprev, l2) &*& mem(x, cells) == false;
{
open xLIST(l, n, idx, end, cells, vals);
assert DLS(end, ?endprev, end, _, cells, vals, l);
dls_distinct(end, endprev, end, endprev, cells);
dls_star_item(end, endprev, x);
close xLIST(l, n, idx, end, cells, vals);
}
lemma void dls_first_mem(
struct xLIST_ITEM *n,
struct xLIST_ITEM *nprev,
struct xLIST_ITEM *mnext,
struct xLIST_ITEM *m,
list<struct xLIST_ITEM * > cells)
requires DLS(n, nprev, mnext, m, cells, ?vals, ?l);
ensures DLS(n, nprev, mnext, m, cells, vals, l) &*& mem(n, cells) == true &*& index_of(n, cells) == 0;
{
open DLS(n, nprev, mnext, m, cells, vals, l);
if (n == m) {
assert cells == cons(n, nil);
close DLS(n, nprev, mnext, m, cells, vals, l);
} else {
assert cells == cons(n, ?tail);
close DLS(n, nprev, mnext, m, cells, vals, l);
}
}
lemma void dls_not_empty(
struct xLIST_ITEM *n,
struct xLIST_ITEM *m,
list<struct xLIST_ITEM * > cells,
struct xLIST_ITEM *x)
requires DLS(n, m, n, m, cells, ?vals, ?l) &*& mem(x, cells) == true &*& x != n;
ensures DLS(n, m, n, m, cells, vals, l) &*& n != m;
{
open DLS(n, m, n, m, cells, vals, l);
close DLS(n, m, n, m, cells, vals, l);
}
lemma void dls_last_mem(
struct xLIST_ITEM *n,
struct xLIST_ITEM *nprev,
struct xLIST_ITEM *mnext,
struct xLIST_ITEM *m,
list<struct xLIST_ITEM * > cells)
requires DLS(n, nprev, mnext, m, cells, ?vals, ?l);
ensures DLS(n, nprev, mnext, m, cells, vals, l) &*& mem(m, cells) == true &*& index_of(m, cells) == length(cells) - 1;
{
open DLS(n, nprev, mnext, m, cells, vals, l);
if (n == m) {
// trivial
} else {
open xLIST_ITEM(n, _, ?nnext, _, l);
assert DLS(?o, n, mnext, m, tail(cells), tail(vals), l);
dls_last_mem(o, n, mnext, m, tail(cells));
close xLIST_ITEM(n, _, nnext, _, l);
}
close DLS(n, nprev, mnext, m, cells, vals, l);
}
lemma void split(
struct xLIST_ITEM *n,
struct xLIST_ITEM *nprev,
struct xLIST_ITEM *mnext,
struct xLIST_ITEM *m,
list<struct xLIST_ITEM * > cells,
list<TickType_t > vals,
struct xLIST_ITEM *x,
int i)
requires DLS(n, nprev, mnext, m, cells, vals, ?l) &*& x != n &*& mem(x, cells) == true &*& index_of(x,cells) == i;
ensures DLS(n, nprev, x, ?xprev, take(i, cells), take(i, vals), l) &*& DLS(x, xprev, mnext, m, drop(i, cells), drop(i, vals), l) &*& xprev == nth(i-1, cells);
{
open DLS(n, nprev, mnext, m, cells, vals, l);
assert n != m;
assert xLIST_ITEM(n, ?v, ?nnext, _, _);
assert DLS(nnext, n, mnext, m, tail(cells), tail(vals), l);
if (nnext == x) {
close DLS(n, nprev, x, n, singleton(n), singleton(v), l);
open DLS(x, n, mnext, m, tail(cells), tail(vals), l);
open xLIST_ITEM(x, _, ?xnext, ?xprev, l);
close xLIST_ITEM(x, _, xnext, xprev, l);
close DLS(x, n, mnext, m, tail(cells), tail(vals), l);
} else {
assert nnext != x;
split(nnext, n, mnext, m, tail(cells), tail(vals), x, i - 1);
assert DLS(nnext, n, x, ?xprev, take(i-1, tail(cells)), take(i-1, tail(vals)), l);
dls_distinct(nnext, n, x, xprev, take(i-1, tail(cells)));
dls_star_item(nnext, xprev, n);
dls_last_mem(nnext, n, x, xprev, take(i-1, tail(cells)));
assert n != xprev;
close DLS(n, nprev, x, xprev, take(i, cells), take(i, vals), l);
}
}
lemma void join(
struct xLIST_ITEM *n1,
struct xLIST_ITEM *nprev1,
struct xLIST_ITEM *mnext1,
struct xLIST_ITEM *m1,
list<struct xLIST_ITEM * > cells1,
list<TickType_t > vals1,
struct xLIST_ITEM *n2,
struct xLIST_ITEM *nprev2,
struct xLIST_ITEM *mnext2,
struct xLIST_ITEM *m2,
list<struct xLIST_ITEM * > cells2,
list<TickType_t > vals2)
requires
DLS(n1, nprev1, mnext1, m1, cells1, vals1, ?l) &*&
DLS(n2, nprev2, mnext2, m2, cells2, vals2, l) &*&
mnext1 == n2 &*& m1 == nprev2;
ensures DLS(n1, nprev1, mnext2, m2, append(cells1, cells2), append(vals1, vals2), l);
{
if (n1 == m1) {
dls_first_mem(n1, nprev1, mnext1, m1, cells1);
dls_last_mem(n2, nprev2, mnext2, m2, cells2);
open DLS(n1, nprev1, mnext1, m1, cells1, vals1, l);
dls_star_item(n2, m2, n1);
close DLS(n1, nprev1, mnext2, m2, append(singleton(n1), cells2), append(vals1, vals2), l);
} else {
open DLS(n1, nprev1, mnext1, m1, cells1, vals1, l);
assert DLS(?o, n1, mnext1, m1, ?cells1_tail, ?vals1_tail, l);
join(o, n1, mnext1, m1, cells1_tail, vals1_tail,
n2, nprev2, mnext2, m2, cells2, vals2);
assert DLS(o, n1, mnext2, m2, append(cells1_tail, cells2), append(vals1_tail, vals2), l);
dls_last_mem(o, n1, mnext2, m2, append(cells1_tail, cells2));
dls_star_item(o, m2, n1);
close DLS(n1, nprev1, mnext2, m2, append(cells1, cells2), append(vals1, vals2), l);
}
}
lemma void idx_remains_in_list<t>(
list<t> cells,
t idx,
t x,
int ix)
requires
idx != x &*&
mem(idx, cells) == true &*&
mem(x, cells) == true &*&
index_of(x, cells) == ix;
ensures mem(idx, remove_nth(ix, cells)) == true;
{
neq_mem_remove(idx, x, cells);
remove_remove_nth(cells, x);
}
@*/
* predicate DLS(
* struct xLIST_ITEM *n,
* struct xLIST_ITEM *nprev,
* struct xLIST_ITEM *mnext,
* struct xLIST_ITEM *m,
* list<struct xLIST_ITEM * > cells,
* list<TickType_t > vals,
* struct xLIST *pxContainer) =
* n == m
* ? cells == cons(n, nil) &*&
* vals == cons(?v, nil) &*&
* xLIST_ITEM(n, v, mnext, nprev, pxContainer)
* : cells == cons(n, ?cells0) &*&
* vals == cons(?v, ?vals0) &*&
* xLIST_ITEM(n, v, ?o, nprev, pxContainer) &*& DLS(o, n, mnext, m, cells0, vals0, pxContainer);
*
* lemma void dls_star_item(
* struct xLIST_ITEM *n,
* struct xLIST_ITEM *m,
* struct xLIST_ITEM *o)
* requires DLS(n, ?nprev, ?mnext, m, ?cells, ?vals, ?l) &*& xLIST_ITEM(o, ?v, ?onext, ?oprev, ?l2);
* ensures DLS(n, nprev, mnext, m, cells, vals, l) &*& xLIST_ITEM(o, v, onext, oprev, l2) &*& mem(o, cells) == false;
* {
* open DLS(n, nprev, mnext, m, cells, vals, l);
* if (n == m) {
* assert xLIST_ITEM(n, _, _, _, _);
* open xLIST_ITEM(n, _, _, _, _);
* open xLIST_ITEM(o, _, _, _, _);
* assert n != o;
* close xLIST_ITEM(o, _, _, _, _);
* close xLIST_ITEM(n, _, _, _, _);
* close DLS(n, nprev, mnext, m, cells, vals, l);
* }
* else {
* assert DLS(?nnext, n, mnext, m, tail(cells), tail(vals), l);
* dls_star_item(nnext, m, o);
* open xLIST_ITEM(n, _, _, _, _);
* open xLIST_ITEM(o, _, _, _, _);
* assert n != o;
* close xLIST_ITEM(o, _, _, _, _);
* close xLIST_ITEM(n, _, _, _, _);
* close DLS(n, nprev, mnext, m, cells, vals, l);
* }
* }
*
* lemma void dls_distinct(
* struct xLIST_ITEM *n,
* struct xLIST_ITEM *nprev,
* struct xLIST_ITEM *mnext,
* struct xLIST_ITEM *m,
* list<struct xLIST_ITEM * > cells)
* requires DLS(n, nprev, mnext, m, cells, ?vals, ?l);
* ensures DLS(n, nprev, mnext, m, cells, vals, l) &*& distinct(cells) == true;
* {
* if (n == m) {
* open DLS(n, nprev, mnext, m, cells, vals, l);
* close DLS(n, nprev, mnext, m, cells, vals, l);
* } else {
* open DLS(n, nprev, mnext, m, cells, vals, l);
* assert DLS(?nnext, n, mnext, m, tail(cells), tail(vals), l);
* dls_distinct(nnext, n, mnext, m, tail(cells));
* dls_star_item(nnext, m, n);
* close DLS(n, nprev, mnext, m, cells, vals, l);
* }
* }
*
* predicate xLIST(
* struct xLIST *l,
* int uxNumberOfItems,
* struct xLIST_ITEM *pxIndex,
* struct xLIST_ITEM *xListEnd,
* list<struct xLIST_ITEM *>cells,
* list<TickType_t >vals) =
* l->uxNumberOfItems |-> uxNumberOfItems &*&
* l->pxIndex |-> pxIndex &*&
* mem(pxIndex, cells) == true &*&
* xListEnd == &(l->xListEnd) &*&
* xListEnd == head(cells) &*&
* portMAX_DELAY == head(vals) &*&
* struct_xLIST_ITEM_padding(&l->xListEnd) &*&
* length(cells) == length(vals) &*&
* uxNumberOfItems + 1 == length(cells) &*&
* DLS(xListEnd, ?endprev, xListEnd, endprev, cells, vals, l);
*
* lemma void xLIST_distinct_cells(struct xLIST *l)
* requires xLIST(l, ?n, ?idx, ?end, ?cells, ?vals);
* ensures xLIST(l, n, idx, end, cells, vals) &*& distinct(cells) == true;
* {
* open xLIST(l, n, idx, end, cells, vals);
* assert DLS(end, ?endprev, end, _, cells, vals, l);
* dls_distinct(end, endprev, end, endprev, cells);
* close xLIST(l, n, idx, end, cells, vals);
* }
*
* lemma void xLIST_star_item(struct xLIST *l, struct xLIST_ITEM *x)
* requires xLIST(l, ?n, ?idx, ?end, ?cells, ?vals) &*& xLIST_ITEM(x, ?v, ?xnext, ?xprev, ?l2);
* ensures xLIST(l, n, idx, end, cells, vals) &*& xLIST_ITEM(x, v, xnext, xprev, l2) &*& mem(x, cells) == false;
* {
* open xLIST(l, n, idx, end, cells, vals);
* assert DLS(end, ?endprev, end, _, cells, vals, l);
* dls_distinct(end, endprev, end, endprev, cells);
* dls_star_item(end, endprev, x);
* close xLIST(l, n, idx, end, cells, vals);
* }
*
* lemma void dls_first_mem(
* struct xLIST_ITEM *n,
* struct xLIST_ITEM *nprev,
* struct xLIST_ITEM *mnext,
* struct xLIST_ITEM *m,
* list<struct xLIST_ITEM * > cells)
* requires DLS(n, nprev, mnext, m, cells, ?vals, ?l);
* ensures DLS(n, nprev, mnext, m, cells, vals, l) &*& mem(n, cells) == true &*& index_of(n, cells) == 0;
* {
* open DLS(n, nprev, mnext, m, cells, vals, l);
* if (n == m) {
* assert cells == cons(n, nil);
* close DLS(n, nprev, mnext, m, cells, vals, l);
* } else {
* assert cells == cons(n, ?tail);
* close DLS(n, nprev, mnext, m, cells, vals, l);
* }
* }
*
* lemma void dls_not_empty(
* struct xLIST_ITEM *n,
* struct xLIST_ITEM *m,
* list<struct xLIST_ITEM * > cells,
* struct xLIST_ITEM *x)
* requires DLS(n, m, n, m, cells, ?vals, ?l) &*& mem(x, cells) == true &*& x != n;
* ensures DLS(n, m, n, m, cells, vals, l) &*& n != m;
* {
* open DLS(n, m, n, m, cells, vals, l);
* close DLS(n, m, n, m, cells, vals, l);
* }
*
* lemma void dls_last_mem(
* struct xLIST_ITEM *n,
* struct xLIST_ITEM *nprev,
* struct xLIST_ITEM *mnext,
* struct xLIST_ITEM *m,
* list<struct xLIST_ITEM * > cells)
* requires DLS(n, nprev, mnext, m, cells, ?vals, ?l);
* ensures DLS(n, nprev, mnext, m, cells, vals, l) &*& mem(m, cells) == true &*& index_of(m, cells) == length(cells) - 1;
* {
* open DLS(n, nprev, mnext, m, cells, vals, l);
* if (n == m) {
* // trivial
* } else {
* open xLIST_ITEM(n, _, ?nnext, _, l);
* assert DLS(?o, n, mnext, m, tail(cells), tail(vals), l);
* dls_last_mem(o, n, mnext, m, tail(cells));
* close xLIST_ITEM(n, _, nnext, _, l);
* }
* close DLS(n, nprev, mnext, m, cells, vals, l);
* }
*
* lemma void split(
* struct xLIST_ITEM *n,
* struct xLIST_ITEM *nprev,
* struct xLIST_ITEM *mnext,
* struct xLIST_ITEM *m,
* list<struct xLIST_ITEM * > cells,
* list<TickType_t > vals,
* struct xLIST_ITEM *x,
* int i)
* requires DLS(n, nprev, mnext, m, cells, vals, ?l) &*& x != n &*& mem(x, cells) == true &*& index_of(x,cells) == i;
* ensures DLS(n, nprev, x, ?xprev, take(i, cells), take(i, vals), l) &*& DLS(x, xprev, mnext, m, drop(i, cells), drop(i, vals), l) &*& xprev == nth(i-1, cells);
* {
* open DLS(n, nprev, mnext, m, cells, vals, l);
* assert n != m;
* assert xLIST_ITEM(n, ?v, ?nnext, _, _);
* assert DLS(nnext, n, mnext, m, tail(cells), tail(vals), l);
* if (nnext == x) {
* close DLS(n, nprev, x, n, singleton(n), singleton(v), l);
* open DLS(x, n, mnext, m, tail(cells), tail(vals), l);
* open xLIST_ITEM(x, _, ?xnext, ?xprev, l);
* close xLIST_ITEM(x, _, xnext, xprev, l);
* close DLS(x, n, mnext, m, tail(cells), tail(vals), l);
* } else {
* assert nnext != x;
* split(nnext, n, mnext, m, tail(cells), tail(vals), x, i - 1);
* assert DLS(nnext, n, x, ?xprev, take(i-1, tail(cells)), take(i-1, tail(vals)), l);
* dls_distinct(nnext, n, x, xprev, take(i-1, tail(cells)));
* dls_star_item(nnext, xprev, n);
* dls_last_mem(nnext, n, x, xprev, take(i-1, tail(cells)));
* assert n != xprev;
* close DLS(n, nprev, x, xprev, take(i, cells), take(i, vals), l);
* }
* }
*
* lemma void join(
* struct xLIST_ITEM *n1,
* struct xLIST_ITEM *nprev1,
* struct xLIST_ITEM *mnext1,
* struct xLIST_ITEM *m1,
* list<struct xLIST_ITEM * > cells1,
* list<TickType_t > vals1,
* struct xLIST_ITEM *n2,
* struct xLIST_ITEM *nprev2,
* struct xLIST_ITEM *mnext2,
* struct xLIST_ITEM *m2,
* list<struct xLIST_ITEM * > cells2,
* list<TickType_t > vals2)
* requires
* DLS(n1, nprev1, mnext1, m1, cells1, vals1, ?l) &*&
* DLS(n2, nprev2, mnext2, m2, cells2, vals2, l) &*&
* mnext1 == n2 &*& m1 == nprev2;
* ensures DLS(n1, nprev1, mnext2, m2, append(cells1, cells2), append(vals1, vals2), l);
* {
* if (n1 == m1) {
* dls_first_mem(n1, nprev1, mnext1, m1, cells1);
* dls_last_mem(n2, nprev2, mnext2, m2, cells2);
* open DLS(n1, nprev1, mnext1, m1, cells1, vals1, l);
* dls_star_item(n2, m2, n1);
* close DLS(n1, nprev1, mnext2, m2, append(singleton(n1), cells2), append(vals1, vals2), l);
* } else {
* open DLS(n1, nprev1, mnext1, m1, cells1, vals1, l);
* assert DLS(?o, n1, mnext1, m1, ?cells1_tail, ?vals1_tail, l);
* join(o, n1, mnext1, m1, cells1_tail, vals1_tail,
* n2, nprev2, mnext2, m2, cells2, vals2);
* assert DLS(o, n1, mnext2, m2, append(cells1_tail, cells2), append(vals1_tail, vals2), l);
* dls_last_mem(o, n1, mnext2, m2, append(cells1_tail, cells2));
* dls_star_item(o, m2, n1);
* close DLS(n1, nprev1, mnext2, m2, append(cells1, cells2), append(vals1, vals2), l);
* }
* }
*
* lemma void idx_remains_in_list<t>(
* list<t> cells,
* t idx,
* t x,
* int ix)
* requires
* idx != x &*&
* mem(idx, cells) == true &*&
* mem(x, cells) == true &*&
* index_of(x, cells) == ix;
* ensures mem(idx, remove_nth(ix, cells)) == true;
* {
* neq_mem_remove(idx, x, cells);
* remove_remove_nth(cells, x);
* }
* @*/
/* Following lemma from `verifast/examples/shared_boxes/concurrentqueue.c`.
Used in the uxListRemove proof to show that the item to remove `x` must
have value `nth(i, vals)` where `i == index_of(x, cells)`. */
/*@
lemma void drop_nth_index_of<t>(list<t> vs, int i)
requires
0 <= i && i < length(vs);
ensures
head(drop(i , vs)) == nth(i, vs);
{
switch(vs) {
case nil:
case cons(h, t):
if (i == 0) {
// trivial
} else {
drop_nth_index_of(t, i - 1);
}
}
}
@*/
* Used in the uxListRemove proof to show that the item to remove `x` must
* have value `nth(i, vals)` where `i == index_of(x, cells)`. */
/*@
lemma void remove_append<t>(t x, list<t> l1, list<t> l2)
requires mem(x, l1) == false;
ensures remove(x, append(l1, l2)) == append(l1, remove(x, l2));
{
switch(l1) {
case nil:
case cons(h1, t1):
remove_append(x, t1, l2);
}
}
@*/
* lemma void drop_nth_index_of<t>(list<t> vs, int i)
* requires
* 0 <= i && i < length(vs);
* ensures
* head(drop(i , vs)) == nth(i, vs);
* {
* switch(vs) {
* case nil:
* case cons(h, t):
* if (i == 0) {
* // trivial
* } else {
* drop_nth_index_of(t, i - 1);
* }
* }
* }
* @*/
/*@
* lemma void remove_append<t>(t x, list<t> l1, list<t> l2)
* requires mem(x, l1) == false;
* ensures remove(x, append(l1, l2)) == append(l1, remove(x, l2));
* {
* switch(l1) {
* case nil:
* case cons(h1, t1):
* remove_append(x, t1, l2);
* }
* }
* @*/
#endif /* LIST_H */

View file

@ -34,51 +34,51 @@
#include <threading.h>
/*@#include "common.gh"@*/
typedef size_t TickType_t;
typedef size_t UBaseType_t;
typedef ssize_t BaseType_t;
typedef size_t TickType_t;
typedef size_t UBaseType_t;
typedef ssize_t BaseType_t;
/* Empty/no-op macros */
/* Tracing */
#define traceBLOCKING_ON_QUEUE_PEEK(x)
#define traceBLOCKING_ON_QUEUE_RECEIVE(x)
#define traceBLOCKING_ON_QUEUE_SEND(x)
#define traceQUEUE_CREATE(x)
#define traceQUEUE_CREATE_FAILED(x)
#define traceQUEUE_DELETE(x)
#define traceQUEUE_PEEK(x)
#define traceQUEUE_PEEK_FAILED(x)
#define traceQUEUE_PEEK_FROM_ISR(x)
#define traceQUEUE_PEEK_FROM_ISR_FAILED(x)
#define traceQUEUE_RECEIVE(x)
#define traceQUEUE_RECEIVE_FAILED(x)
#define traceQUEUE_RECEIVE_FROM_ISR(x)
#define traceQUEUE_RECEIVE_FROM_ISR_FAILED(x)
#define traceQUEUE_SEND(x)
#define traceQUEUE_SEND_FAILED(x)
#define traceQUEUE_SEND_FROM_ISR(x)
#define traceQUEUE_SEND_FROM_ISR_FAILED(x)
#define traceBLOCKING_ON_QUEUE_PEEK( x )
#define traceBLOCKING_ON_QUEUE_RECEIVE( x )
#define traceBLOCKING_ON_QUEUE_SEND( x )
#define traceQUEUE_CREATE( x )
#define traceQUEUE_CREATE_FAILED( x )
#define traceQUEUE_DELETE( x )
#define traceQUEUE_PEEK( x )
#define traceQUEUE_PEEK_FAILED( x )
#define traceQUEUE_PEEK_FROM_ISR( x )
#define traceQUEUE_PEEK_FROM_ISR_FAILED( x )
#define traceQUEUE_RECEIVE( x )
#define traceQUEUE_RECEIVE_FAILED( x )
#define traceQUEUE_RECEIVE_FROM_ISR( x )
#define traceQUEUE_RECEIVE_FROM_ISR_FAILED( x )
#define traceQUEUE_SEND( x )
#define traceQUEUE_SEND_FAILED( x )
#define traceQUEUE_SEND_FROM_ISR( x )
#define traceQUEUE_SEND_FROM_ISR_FAILED( x )
/* Coverage */
#define mtCOVERAGE_TEST_MARKER()
/* Asserts */
#define configASSERT(x)
#define configASSERT( x )
#define portASSERT_IF_INTERRUPT_PRIORITY_INVALID()
/* Map portable memory management functions */
#define pvPortMalloc malloc
#define vPortFree free
#define pvPortMalloc malloc
#define vPortFree free
#define queueSEND_TO_BACK ( ( BaseType_t ) 0 )
#define queueSEND_TO_FRONT ( ( BaseType_t ) 1 )
#define queueOVERWRITE ( ( BaseType_t ) 2 )
#define queueSEND_TO_BACK ( ( BaseType_t ) 0 )
#define queueSEND_TO_FRONT ( ( BaseType_t ) 1 )
#define queueOVERWRITE ( ( BaseType_t ) 2 )
#define pdTRUE 1
#define pdFALSE 0
#define pdTRUE 1
#define pdFALSE 0
#define pdPASS pdTRUE
#define pdFAIL pdFALSE
#define errQUEUE_FULL 0
#define errQUEUE_EMPTY 0
#define pdPASS pdTRUE
#define pdFAIL pdFALSE
#define errQUEUE_FULL 0
#define errQUEUE_EMPTY 0
/* Constants used with the cRxLock and cTxLock structure members. */
#define queueUNLOCKED ( ( int8_t ) -1 )
@ -93,26 +93,28 @@ typedef struct QueuePointers
typedef struct SemaphoreData
{
#ifdef VERIFAST /*< do not model xMutexHolder */
void *xMutexHolder;
#else
TaskHandle_t xMutexHolder; /*< The handle of the task that holds the mutex. */
#endif
#ifdef VERIFAST /*< do not model xMutexHolder */
void * xMutexHolder;
#else
TaskHandle_t xMutexHolder; /*< The handle of the task that holds the mutex. */
#endif
UBaseType_t uxRecursiveCallCount; /*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
} SemaphoreData_t;
/* VeriFast does not support unions so we replace with a struct */
struct fake_union_t {
struct fake_union_t
{
QueuePointers_t xQueue;
SemaphoreData_t xSemaphore;
};
typedef struct xLIST {
typedef struct xLIST
{
UBaseType_t uxNumberOfItems;
#ifndef VERIFAST /*< do not model pxIndex and xListEnd of xLIST struct */
struct xLIST_ITEM *pxIndex;
MiniListItem_t xListEnd;
#endif
#ifndef VERIFAST /*< do not model pxIndex and xListEnd of xLIST struct */
struct xLIST_ITEM * pxIndex;
MiniListItem_t xListEnd;
#endif
} List_t;
typedef struct QueueDefinition /* The old naming convention is used to prevent breaking kernel aware debuggers. */
@ -120,15 +122,15 @@ typedef struct QueueDefinition /* The old naming convention is used to prevent b
int8_t * pcHead; /*< Points to the beginning of the queue storage area. */
int8_t * pcWriteTo; /*< Points to the free next place in the storage area. */
#ifdef VERIFAST /*< VeriFast does not model unions */
struct fake_union_t u;
#else
union
{
QueuePointers_t xQueue; /*< Data required exclusively when this structure is used as a queue. */
SemaphoreData_t xSemaphore; /*< Data required exclusively when this structure is used as a semaphore. */
} u;
#endif
#ifdef VERIFAST /*< VeriFast does not model unions */
struct fake_union_t u;
#else
union
{
QueuePointers_t xQueue; /*< Data required exclusively when this structure is used as a queue. */
SemaphoreData_t xSemaphore; /*< Data required exclusively when this structure is used as a semaphore. */
} u;
#endif
List_t xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */
List_t xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */
@ -158,271 +160,278 @@ typedef struct QueueDefinition /* The old naming convention is used to prevent b
/*@struct mutex *locked;@*/ /*< Ghost mutex simulates the effect of queue locking */
} xQUEUE;
typedef xQUEUE Queue_t;
typedef xQUEUE Queue_t;
typedef struct QueueDefinition * QueueHandle_t;
typedef struct QueueDefinition * QueueHandle_t;
/*@
#define QUEUE_SHAPE(q, Storage, N, M, K) \
malloc_block_QueueDefinition(q) &*& \
q->pcHead |-> Storage &*& \
q->pcWriteTo |-> ?WPtr &*& \
q->u.xQueue.pcTail |-> ?End &*& \
q->u.xQueue.pcReadFrom |-> ?RPtr &*& \
q->uxItemSize |-> M &*& \
q->uxLength |-> N &*& \
q->uxMessagesWaiting |-> K &*& \
q->cRxLock |-> ?rxLock &*& \
q->cTxLock |-> ?txLock &*& \
struct_QueuePointers_padding(&q->u.xQueue) &*& \
struct_SemaphoreData_padding(&q->u.xSemaphore) &*& \
struct_fake_union_t_padding(&q->u) &*& \
struct_xLIST_padding(&q->xTasksWaitingToSend) &*& \
struct_xLIST_padding(&q->xTasksWaitingToReceive) &*& \
q->u.xSemaphore.xMutexHolder |-> _ &*& \
q->u.xSemaphore.uxRecursiveCallCount |-> _ &*& \
true
predicate queue(QueueHandle_t q, int8_t *Storage, size_t N, size_t M, size_t W, size_t R, size_t K, bool is_locked; list<list<char> >abs) =
QUEUE_SHAPE(q, Storage, N, M, K) &*&
0 < N &*&
0 < M &*&
0 <= W &*& W < N &*&
0 <= R &*& R < N &*&
0 <= K &*& K <= N &*&
W == (R + 1 + K) % N &*&
(-1) <= rxLock &*&
(-1) <= txLock &*&
(is_locked ? 0 <= rxLock : (-1) == rxLock) &*&
(is_locked ? 0 <= txLock : (-1) == txLock) &*&
WPtr == Storage + (W*M) &*&
RPtr == Storage + (R*M) &*&
End == Storage + (N*M) &*&
buffer(Storage, N, M, ?contents) &*&
length(contents) == N &*&
abs == take(K, rotate_left((R+1)%N, contents)) &*&
malloc_block(Storage, N*M) &*&
true
;
@*/
#define QUEUE_SHAPE(q, Storage, N, M, K) \
* malloc_block_QueueDefinition(q) &*& \
* q->pcHead |-> Storage &*& \
* q->pcWriteTo |-> ?WPtr &*& \
* q->u.xQueue.pcTail |-> ?End &*& \
* q->u.xQueue.pcReadFrom |-> ?RPtr &*& \
* q->uxItemSize |-> M &*& \
* q->uxLength |-> N &*& \
* q->uxMessagesWaiting |-> K &*& \
* q->cRxLock |-> ?rxLock &*& \
* q->cTxLock |-> ?txLock &*& \
* struct_QueuePointers_padding(&q->u.xQueue) &*& \
* struct_SemaphoreData_padding(&q->u.xSemaphore) &*& \
* struct_fake_union_t_padding(&q->u) &*& \
* struct_xLIST_padding(&q->xTasksWaitingToSend) &*& \
* struct_xLIST_padding(&q->xTasksWaitingToReceive) &*& \
* q->u.xSemaphore.xMutexHolder |-> _ &*& \
* q->u.xSemaphore.uxRecursiveCallCount |-> _ &*& \
* true
*
* predicate queue(QueueHandle_t q, int8_t *Storage, size_t N, size_t M, size_t W, size_t R, size_t K, bool is_locked; list<list<char> >abs) =
* QUEUE_SHAPE(q, Storage, N, M, K) &*&
* 0 < N &*&
* 0 < M &*&
* 0 <= W &*& W < N &*&
* 0 <= R &*& R < N &*&
* 0 <= K &*& K <= N &*&
* W == (R + 1 + K) % N &*&
* (-1) <= rxLock &*&
* (-1) <= txLock &*&
* (is_locked ? 0 <= rxLock : (-1) == rxLock) &*&
* (is_locked ? 0 <= txLock : (-1) == txLock) &*&
* WPtr == Storage + (W*M) &*&
* RPtr == Storage + (R*M) &*&
* End == Storage + (N*M) &*&
* buffer(Storage, N, M, ?contents) &*&
* length(contents) == N &*&
* abs == take(K, rotate_left((R+1)%N, contents)) &*&
* malloc_block(Storage, N*M) &*&
* true
* ;
* @*/
/* A buffer allows us to interpret a flat character array of `N*M` bytes as a
list of `N` elements where each element is `M` bytes */
/*@
predicate buffer(char *buffer, size_t N, size_t M; list<list<char> > elements) =
N == 0
? elements == nil
: chars(buffer, M, ?x) &*& buffer(buffer + M, N - 1, M, ?xs) &*& elements == cons(x, xs);
* list of `N` elements where each element is `M` bytes */
lemma void buffer_length(char *buffer, size_t N, size_t M)
requires buffer(buffer, N, M, ?elements);
ensures buffer(buffer, N, M, elements) &*& length(elements) == N;
{
if (N == 0) {
open buffer(buffer, N, M, elements);
close buffer(buffer, N, M, elements);
} else {
open buffer(buffer, N, M, elements);
buffer_length(buffer+M, N-1, M);
}
}
@*/
/*@
* predicate buffer(char *buffer, size_t N, size_t M; list<list<char> > elements) =
* N == 0
* ? elements == nil
* : chars(buffer, M, ?x) &*& buffer(buffer + M, N - 1, M, ?xs) &*& elements == cons(x, xs);
*
* lemma void buffer_length(char *buffer, size_t N, size_t M)
* requires buffer(buffer, N, M, ?elements);
* ensures buffer(buffer, N, M, elements) &*& length(elements) == N;
* {
* if (N == 0) {
* open buffer(buffer, N, M, elements);
* close buffer(buffer, N, M, elements);
* } else {
* open buffer(buffer, N, M, elements);
* buffer_length(buffer+M, N-1, M);
* }
* }
* @*/
/*
There is no need in the queue proofs to preserve a relationship between `cs`
and `elements` (i.e., `flatten(elements) == cs`) because we only move in one
direction from `cs` to `elements` during queue creation when the contents is
fresh from `malloc` (i.e., uninitialized). If we needed to do a roundtrip from
elements back to cs then this would require a stronger lemma.
*/
* There is no need in the queue proofs to preserve a relationship between `cs`
* and `elements` (i.e., `flatten(elements) == cs`) because we only move in one
* direction from `cs` to `elements` during queue creation when the contents is
* fresh from `malloc` (i.e., uninitialized). If we needed to do a roundtrip from
* elements back to cs then this would require a stronger lemma.
*/
/*@
lemma void buffer_from_chars(char *buffer, size_t N, size_t M)
requires chars(buffer, N*M, ?cs) &*& 0 <= N &*& 0 < M;
ensures exists<list<list<char> > >(?elements) &*& buffer(buffer, N, M, elements) &*& length(elements) == N;
{
if (N == 0) {
close exists(nil);
} else {
int i = 0;
while (i < N)
invariant 0 <= i &*& i <= N &*&
chars(buffer, (N-i)*M, ?xs) &*& xs == take((N-i)*M, cs) &*&
buffer(buffer + (N-i)*M, i, M, ?ys);
decreases N-i;
{
mul_mono_l(0, N-i-1, M);
chars_split(buffer, (N-i-1)*M);
mul_mono_l(i, N, M);
mul_mono_l(N-i, N, M);
take_take((N-i-1)*M, (N-i)*M, cs);
i++;
}
close exists(ys);
buffer_length(buffer, N, M);
}
}
lemma void append_buffer(char *buffer, size_t N1, size_t N2, size_t M)
requires
buffer(buffer, N1, M, ?elements1) &*&
buffer(buffer + N1 * M, N2, M, ?elements2) &*&
0 <= N1 &*& 0 <= N2;
ensures buffer(buffer, N1+N2, M, append(elements1, elements2));
{
if (N1 == 0) {
open buffer(buffer, 0, M, _);
} else if (N2 == 0) {
open buffer(buffer + N1 * M, 0, M, _);
} else {
open buffer(buffer, N1, M, elements1);
append_buffer(buffer + M, N1-1, N2, M);
close buffer(buffer, N1+N2, M, cons(?x, append(xs, elements2)));
}
}
lemma void split_element<t>(char *buffer, size_t N, size_t M, size_t i)
requires buffer(buffer, N, M, ?elements) &*& 0 <= i &*& i < N;
ensures
buffer(buffer, i, M, take(i, elements)) &*&
chars(buffer + i * M, M, nth(i, elements)) &*&
buffer(buffer + (i + 1) * M, (N-1-i), M, drop(i+1, elements));
{
if (i == 0) {
// straightforward
} else {
buffer_length(buffer, N, M);
int j = 0;
while (j < i)
invariant 0 <= j &*& j <= i &*&
buffer(buffer, j, M, take(j, elements)) &*&
buffer(buffer + j * M, N-j, M, drop(j, elements));
decreases i-j;
{
drop_drop(1, j, elements);
nth_drop2(elements, j);
open buffer(buffer + j * M, N-j, M, drop(j, elements));
assert chars(buffer + j * M, M, ?x) &*& x == nth(j, elements);
close buffer(buffer + j * M, 1, M, singleton(x));
append_buffer(buffer, j, 1, M);
take_plus_one(j, elements);
j++;
}
drop_drop(1, j, elements);
nth_drop2(elements, i);
}
}
lemma void join_element(char *buffer, size_t N, size_t M, size_t i)
requires
0 <= i &*& i < N &*&
buffer(buffer, i, M, ?prefix) &*&
chars(buffer + i * M, M, ?element) &*&
buffer(buffer + (i + 1) * M, (N-1-i), M, ?suffix);
ensures buffer(buffer, N, M, append(prefix, cons(element, suffix)));
{
if (i == 0) {
open buffer(buffer, i, M, prefix);
assert prefix == nil;
close buffer(buffer, N, M, cons(element, suffix));
} else {
close buffer(buffer + i * M, N-i, M, cons(element, suffix));
append_buffer(buffer, i, N-i, M);
}
}
predicate list(List_t *l;) =
l->uxNumberOfItems |-> _;
predicate queuelists(QueueHandle_t q;) =
list(&q->xTasksWaitingToSend) &*&
list(&q->xTasksWaitingToReceive);
@*/
* lemma void buffer_from_chars(char *buffer, size_t N, size_t M)
* requires chars(buffer, N*M, ?cs) &*& 0 <= N &*& 0 < M;
* ensures exists<list<list<char> > >(?elements) &*& buffer(buffer, N, M, elements) &*& length(elements) == N;
* {
* if (N == 0) {
* close exists(nil);
* } else {
* int i = 0;
* while (i < N)
* invariant 0 <= i &*& i <= N &*&
* chars(buffer, (N-i)*M, ?xs) &*& xs == take((N-i)*M, cs) &*&
* buffer(buffer + (N-i)*M, i, M, ?ys);
* decreases N-i;
* {
* mul_mono_l(0, N-i-1, M);
* chars_split(buffer, (N-i-1)*M);
* mul_mono_l(i, N, M);
* mul_mono_l(N-i, N, M);
* take_take((N-i-1)*M, (N-i)*M, cs);
* i++;
* }
* close exists(ys);
* buffer_length(buffer, N, M);
* }
* }
*
* lemma void append_buffer(char *buffer, size_t N1, size_t N2, size_t M)
* requires
* buffer(buffer, N1, M, ?elements1) &*&
* buffer(buffer + N1 * M, N2, M, ?elements2) &*&
* 0 <= N1 &*& 0 <= N2;
* ensures buffer(buffer, N1+N2, M, append(elements1, elements2));
* {
* if (N1 == 0) {
* open buffer(buffer, 0, M, _);
* } else if (N2 == 0) {
* open buffer(buffer + N1 * M, 0, M, _);
* } else {
* open buffer(buffer, N1, M, elements1);
* append_buffer(buffer + M, N1-1, N2, M);
* close buffer(buffer, N1+N2, M, cons(?x, append(xs, elements2)));
* }
* }
*
* lemma void split_element<t>(char *buffer, size_t N, size_t M, size_t i)
* requires buffer(buffer, N, M, ?elements) &*& 0 <= i &*& i < N;
* ensures
* buffer(buffer, i, M, take(i, elements)) &*&
* chars(buffer + i * M, M, nth(i, elements)) &*&
* buffer(buffer + (i + 1) * M, (N-1-i), M, drop(i+1, elements));
* {
* if (i == 0) {
* // straightforward
* } else {
* buffer_length(buffer, N, M);
* int j = 0;
* while (j < i)
* invariant 0 <= j &*& j <= i &*&
* buffer(buffer, j, M, take(j, elements)) &*&
* buffer(buffer + j * M, N-j, M, drop(j, elements));
* decreases i-j;
* {
* drop_drop(1, j, elements);
* nth_drop2(elements, j);
* open buffer(buffer + j * M, N-j, M, drop(j, elements));
* assert chars(buffer + j * M, M, ?x) &*& x == nth(j, elements);
* close buffer(buffer + j * M, 1, M, singleton(x));
* append_buffer(buffer, j, 1, M);
* take_plus_one(j, elements);
* j++;
* }
* drop_drop(1, j, elements);
* nth_drop2(elements, i);
* }
* }
*
* lemma void join_element(char *buffer, size_t N, size_t M, size_t i)
* requires
* 0 <= i &*& i < N &*&
* buffer(buffer, i, M, ?prefix) &*&
* chars(buffer + i * M, M, ?element) &*&
* buffer(buffer + (i + 1) * M, (N-1-i), M, ?suffix);
* ensures buffer(buffer, N, M, append(prefix, cons(element, suffix)));
* {
* if (i == 0) {
* open buffer(buffer, i, M, prefix);
* assert prefix == nil;
* close buffer(buffer, N, M, cons(element, suffix));
* } else {
* close buffer(buffer + i * M, N-i, M, cons(element, suffix));
* append_buffer(buffer, i, N-i, M);
* }
* }
*
* predicate list(List_t *l;) =
* l->uxNumberOfItems |-> _;
*
* predicate queuelists(QueueHandle_t q;) =
* list(&q->xTasksWaitingToSend) &*&
* list(&q->xTasksWaitingToReceive);
* @*/
/* Because prvCopyDataFromQueue does *not* decrement uxMessagesWaiting (K) the
queue predicate above does not hold as a postcondition. If the caller
subsequently decrements K then the queue predicate can be reinstated. */
* queue predicate above does not hold as a postcondition. If the caller
* subsequently decrements K then the queue predicate can be reinstated. */
/*@
predicate queue_after_prvCopyDataFromQueue(QueueHandle_t q, int8_t *Storage, size_t N, size_t M, size_t W, size_t R, size_t K, bool is_locked; list<list<char> >abs) =
QUEUE_SHAPE(q, Storage, N, M, K) &*&
0 < N &*&
0 < M &*&
0 <= W &*& W < N &*&
0 <= R &*& R < N &*&
0 <= K &*& K <= N &*&
W == (R + K) % N &*& //< Differs from queue predicate
(-1) <= rxLock &*&
(-1) <= txLock &*&
(is_locked ? 0 <= rxLock : (-1) == rxLock) &*&
(is_locked ? 0 <= txLock : (-1) == txLock) &*&
WPtr == Storage + (W*M) &*&
RPtr == Storage + (R*M) &*&
End == Storage + (N*M) &*&
buffer(Storage, N, M, ?contents) &*&
length(contents) == N &*&
abs == take(K, rotate_left(R, contents)) &*& //< Differs from queue predicate
malloc_block(Storage, N*M) &*&
true
;
@*/
* predicate queue_after_prvCopyDataFromQueue(QueueHandle_t q, int8_t *Storage, size_t N, size_t M, size_t W, size_t R, size_t K, bool is_locked; list<list<char> >abs) =
* QUEUE_SHAPE(q, Storage, N, M, K) &*&
* 0 < N &*&
* 0 < M &*&
* 0 <= W &*& W < N &*&
* 0 <= R &*& R < N &*&
* 0 <= K &*& K <= N &*&
* W == (R + K) % N &*& //< Differs from queue predicate
* (-1) <= rxLock &*&
* (-1) <= txLock &*&
* (is_locked ? 0 <= rxLock : (-1) == rxLock) &*&
* (is_locked ? 0 <= txLock : (-1) == txLock) &*&
* WPtr == Storage + (W*M) &*&
* RPtr == Storage + (R*M) &*&
* End == Storage + (N*M) &*&
* buffer(Storage, N, M, ?contents) &*&
* length(contents) == N &*&
* abs == take(K, rotate_left(R, contents)) &*& //< Differs from queue predicate
* malloc_block(Storage, N*M) &*&
* true
* ;
* @*/
/* Can't be called `mutex` as this clashes with VeriFast's predicate */
/*@
predicate freertos_mutex(QueueHandle_t q, int8_t *Storage, size_t N, size_t K;) =
QUEUE_SHAPE(q, Storage, N, 0, K) &*&
queuelists(q) &*&
0 < N &*&
0 <= K &*& K <= N &*&
(-1) <= rxLock &*&
(-1) <= txLock &*&
WPtr == Storage &*&
RPtr == Storage &*&
End == Storage &*&
malloc_block(Storage, 0) &*&
chars(Storage, 0, _) &*&
true
;
@*/
* predicate freertos_mutex(QueueHandle_t q, int8_t *Storage, size_t N, size_t K;) =
* QUEUE_SHAPE(q, Storage, N, 0, K) &*&
* queuelists(q) &*&
* 0 < N &*&
* 0 <= K &*& K <= N &*&
* (-1) <= rxLock &*&
* (-1) <= txLock &*&
* WPtr == Storage &*&
* RPtr == Storage &*&
* End == Storage &*&
* malloc_block(Storage, 0) &*&
* chars(Storage, 0, _) &*&
* true
* ;
* @*/
/* A queuehandle can be shared between tasks and ISRs. Acquiring the ghost
`irqMask` gives access to the core queue resources. The permissions granted
after masking interrupts depends on the caller:
- A task has access to the queue and the queuelists
- An ISR has access to the queue and, if the queue is unlocked, the queuelists */
/*@
predicate queuehandle(QueueHandle_t q, size_t N, size_t M, bool is_isr;) =
q->irqMask |-> ?m &*& mutex(m, irqs_masked_invariant(q, N, M, is_isr));
* `irqMask` gives access to the core queue resources. The permissions granted
* after masking interrupts depends on the caller:
* - A task has access to the queue and the queuelists
* - An ISR has access to the queue and, if the queue is unlocked, the queuelists */
predicate_ctor irqs_masked_invariant(QueueHandle_t queue, size_t N, size_t M, bool is_isr)() =
queue(queue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
(is_isr && is_locked ? true : queuelists(queue));
@*/
/*@
* predicate queuehandle(QueueHandle_t q, size_t N, size_t M, bool is_isr;) =
* q->irqMask |-> ?m &*& mutex(m, irqs_masked_invariant(q, N, M, is_isr));
*
* predicate_ctor irqs_masked_invariant(QueueHandle_t queue, size_t N, size_t M, bool is_isr)() =
* queue(queue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
* (is_isr && is_locked ? true : queuelists(queue));
* @*/
/* A queuesuspend can be shared between tasks. Acquiring the ghost `schedulerSuspend` gives access to the `locked` mutex. */
/*@
predicate_ctor scheduler_suspended_invariant(QueueHandle_t queue)() =
queue->locked |-> ?m &*&
mutex(m, queue_locked_invariant(queue));
predicate queuesuspend(QueueHandle_t q;) =
q->schedulerSuspend |-> ?m &*&
mutex(m, scheduler_suspended_invariant(q));
@*/
/*@
* predicate_ctor scheduler_suspended_invariant(QueueHandle_t queue)() =
* queue->locked |-> ?m &*&
* mutex(m, queue_locked_invariant(queue));
*
* predicate queuesuspend(QueueHandle_t q;) =
* q->schedulerSuspend |-> ?m &*&
* mutex(m, scheduler_suspended_invariant(q));
* @*/
/* A queuelock is exclusively acquired by a task. Acquiring the ghost `queuelock` gives access to the queue list resources. */
/*@
predicate queuelock(QueueHandle_t q;) =
q->locked |-> ?m &*&
mutex(m, queue_locked_invariant(q));
* predicate queuelock(QueueHandle_t q;) =
* q->locked |-> ?m &*&
* mutex(m, queue_locked_invariant(q));
*
* predicate_ctor queue_locked_invariant(QueueHandle_t queue)() =
* queuelists(queue);
* @*/
predicate_ctor queue_locked_invariant(QueueHandle_t queue)() =
queuelists(queue);
@*/
BaseType_t vListInitialise(List_t *list);
BaseType_t vListInitialise( List_t * list );
/*@requires list(list);@*/
/*@ensures list(list);@*/
BaseType_t listLIST_IS_EMPTY(List_t *list);
BaseType_t listLIST_IS_EMPTY( List_t * list );
/*@requires list->uxNumberOfItems |-> ?len;@*/
/*@ensures list->uxNumberOfItems |-> len &*& result == (len == 0 ? pdTRUE : pdFALSE);@*/
@ -433,25 +442,27 @@ typedef struct xTIME_OUT
} TimeOut_t;
/*@
predicate xTIME_OUT(struct xTIME_OUT *to;) =
to->xOverflowCount |-> _ &*&
to->xTimeOnEntering |-> _ &*&
struct_xTIME_OUT_padding(to);
@*/
* predicate xTIME_OUT(struct xTIME_OUT *to;) =
* to->xOverflowCount |-> _ &*&
* to->xTimeOnEntering |-> _ &*&
* struct_xTIME_OUT_padding(to);
* @*/
void vTaskInternalSetTimeOutState( TimeOut_t * x);
void vTaskInternalSetTimeOutState( TimeOut_t * x );
/*@requires xTIME_OUT(x);@*/
/*@ensures xTIME_OUT(x);@*/
BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait );
BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
TickType_t * const pxTicksToWait );
/*@requires xTIME_OUT(pxTimeOut) &*& u_integer(pxTicksToWait, _);@*/
/*@ensures xTIME_OUT(pxTimeOut) &*& u_integer(pxTicksToWait, _);@*/
BaseType_t xTaskRemoveFromEventList(List_t *list);
BaseType_t xTaskRemoveFromEventList( List_t * list );
/*@requires list(list);@*/
/*@ensures list(list);@*/
void vTaskPlaceOnEventList( List_t * const pxEventList, const TickType_t xTicksToWait );
void vTaskPlaceOnEventList( List_t * const pxEventList,
const TickType_t xTicksToWait );
/*@requires list(pxEventList);@*/
/*@ensures list(pxEventList);@*/
@ -460,94 +471,107 @@ void vTaskMissedYield();
/*@ensures true;@*/
void vTaskSuspendAll();
/*@requires exists<QueueHandle_t>(?xQueue) &*&
[1/2]xQueue->schedulerSuspend |-> ?m &*&
[1/2]mutex(m, scheduler_suspended_invariant(xQueue));@*/
* [1/2]xQueue->schedulerSuspend |-> ?m &*&
* [1/2]mutex(m, scheduler_suspended_invariant(xQueue));@*/
/*@ensures [1/2]xQueue->schedulerSuspend |-> m &*&
mutex_held(m, scheduler_suspended_invariant(xQueue), currentThread, 1/2) &*&
xQueue->locked |-> ?m2 &*&
mutex(m2, queue_locked_invariant(xQueue));@*/
* mutex_held(m, scheduler_suspended_invariant(xQueue), currentThread, 1/2) &*&
* xQueue->locked |-> ?m2 &*&
* mutex(m2, queue_locked_invariant(xQueue));@*/
BaseType_t xTaskResumeAll( void );
/*@requires exists<QueueHandle_t>(?xQueue) &*&
[1/2]xQueue->schedulerSuspend |-> ?m &*&
mutex_held(m, scheduler_suspended_invariant(xQueue), currentThread, 1/2) &*&
xQueue->locked |-> ?m2 &*&
mutex(m2, queue_locked_invariant(xQueue));@*/
* [1/2]xQueue->schedulerSuspend |-> ?m &*&
* mutex_held(m, scheduler_suspended_invariant(xQueue), currentThread, 1/2) &*&
* xQueue->locked |-> ?m2 &*&
* mutex(m2, queue_locked_invariant(xQueue));@*/
/*@ensures [1/2]xQueue->schedulerSuspend |-> m &*&
[1/2]mutex(m, scheduler_suspended_invariant(xQueue));@*/
* [1/2]mutex(m, scheduler_suspended_invariant(xQueue));@*/
void prvLockQueue( QueueHandle_t xQueue );
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*&
[1/2]queuelock(xQueue); @*/
* [1/2]queuelock(xQueue); @*/
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
[1/2]xQueue->locked |-> ?m &*&
mutex_held(m, queue_locked_invariant(xQueue), currentThread, 1/2) &*&
queue_locked_invariant(xQueue)();@*/
* [1/2]xQueue->locked |-> ?m &*&
* mutex_held(m, queue_locked_invariant(xQueue), currentThread, 1/2) &*&
* queue_locked_invariant(xQueue)();@*/
void prvUnlockQueue( QueueHandle_t xQueue );
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*&
[1/2]xQueue->locked |-> ?m &*&
mutex_held(m, queue_locked_invariant(xQueue), currentThread, 1/2) &*&
queue_locked_invariant(xQueue)();@*/
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
[1/2]queuelock(xQueue);@*/
void setInterruptMask(QueueHandle_t xQueue)
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*&
* [1/2]xQueue->locked |-> ?m &*&
* mutex_held(m, queue_locked_invariant(xQueue), currentThread, 1/2) &*&
* queue_locked_invariant(xQueue)();@*/
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
* [1/2]queuelock(xQueue);@*/
void setInterruptMask( QueueHandle_t xQueue )
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false;@*/
/*@ensures [1/2]xQueue->irqMask |-> ?m &*&
mutex_held(m, irqs_masked_invariant(xQueue, N, M, is_isr), currentThread, 1/2) &*&
queue(xQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
queuelists(xQueue);@*/
* mutex_held(m, irqs_masked_invariant(xQueue, N, M, is_isr), currentThread, 1/2) &*&
* queue(xQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
* queuelists(xQueue);@*/
{
/*@open queuehandle(xQueue, N, M, is_isr);@*/
mutex_acquire(xQueue->irqMask);
mutex_acquire( xQueue->irqMask );
/*@open irqs_masked_invariant(xQueue, N, M, is_isr)();@*/
}
void clearInterruptMask(QueueHandle_t xQueue)
void clearInterruptMask( QueueHandle_t xQueue )
/*@requires queue(xQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
[1/2]xQueue->irqMask |-> ?m &*&
mutex_held(m, irqs_masked_invariant(xQueue, N, M, false), currentThread, 1/2) &*&
queuelists(xQueue);@*/
* [1/2]xQueue->irqMask |-> ?m &*&
* mutex_held(m, irqs_masked_invariant(xQueue, N, M, false), currentThread, 1/2) &*&
* queuelists(xQueue);@*/
/*@ensures [1/2]queuehandle(xQueue, N, M, false);@*/
{
/*@close irqs_masked_invariant(xQueue, N, M, false)();@*/
mutex_release(xQueue->irqMask);
mutex_release( xQueue->irqMask );
/*@close [1/2]queuehandle(xQueue, N, M, false);@*/
}
#define taskENTER_CRITICAL() setInterruptMask(xQueue)
#define taskEXIT_CRITICAL() clearInterruptMask(xQueue)
#define taskENTER_CRITICAL() setInterruptMask( xQueue )
#define taskEXIT_CRITICAL() clearInterruptMask( xQueue )
#define portYIELD_WITHIN_API()
#define queueYIELD_IF_USING_PREEMPTION()
UBaseType_t setInterruptMaskFromISR(QueueHandle_t xQueue)
UBaseType_t setInterruptMaskFromISR( QueueHandle_t xQueue )
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == true;@*/
/*@ensures [1/2]xQueue->irqMask |-> ?m &*&
mutex_held(m, irqs_masked_invariant(xQueue, N, M, is_isr), currentThread, 1/2) &*&
queue(xQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
(is_locked ? true : queuelists(xQueue));@*/
* mutex_held(m, irqs_masked_invariant(xQueue, N, M, is_isr), currentThread, 1/2) &*&
* queue(xQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
* (is_locked ? true : queuelists(xQueue));@*/
{
/*@open queuehandle(xQueue, N, M, is_isr);@*/
mutex_acquire(xQueue->irqMask);
mutex_acquire( xQueue->irqMask );
/*@open irqs_masked_invariant(xQueue, N, M, is_isr)();@*/
return 0;
}
void clearInterruptMaskFromISR(QueueHandle_t xQueue, UBaseType_t uxSavedInterruptStatus)
void clearInterruptMaskFromISR( QueueHandle_t xQueue,
UBaseType_t uxSavedInterruptStatus )
/*@requires queue(xQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
[1/2]xQueue->irqMask |-> ?m &*&
mutex_held(m, irqs_masked_invariant(xQueue, N, M, true), currentThread, 1/2) &*&
(is_locked ? true : queuelists(xQueue));@*/
* [1/2]xQueue->irqMask |-> ?m &*&
* mutex_held(m, irqs_masked_invariant(xQueue, N, M, true), currentThread, 1/2) &*&
* (is_locked ? true : queuelists(xQueue));@*/
/*@ensures [1/2]queuehandle(xQueue, N, M, true);@*/
{
/*@close irqs_masked_invariant(xQueue, N, M, true)();@*/
mutex_release(xQueue->irqMask);
mutex_release( xQueue->irqMask );
/*@close [1/2]queuehandle(xQueue, N, M, true);@*/
}
#define portSET_INTERRUPT_MASK_FROM_ISR() setInterruptMaskFromISR(xQueue)
#define portCLEAR_INTERRUPT_MASK_FROM_ISR(uxSavedInterruptStatus) clearInterruptMaskFromISR(xQueue, uxSavedInterruptStatus)
#define portSET_INTERRUPT_MASK_FROM_ISR() setInterruptMaskFromISR( xQueue )
#define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ) clearInterruptMaskFromISR( xQueue, uxSavedInterruptStatus )
#endif /* QUEUE_H */

View file

@ -29,26 +29,32 @@
#include "queue.h"
void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer );
void prvCopyDataFromQueue( Queue_t * const pxQueue,
void * const pvBuffer );
/*@requires queue(pxQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*& 0 < K &*& chars(pvBuffer, M, _);@*/
/*@ensures queue_after_prvCopyDataFromQueue(pxQueue, Storage, N, M, W, (R+1)%N, K, is_locked, abs) &*&
chars(pvBuffer, M, head(abs));@*/
BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition );
/*@ensures queue_after_prvCopyDataFromQueue(pxQueue, Storage, N, M, W, (R+1)%N, K, is_locked, abs) &*&
* chars(pvBuffer, M, head(abs));@*/
BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue,
const void * pvItemToQueue,
const BaseType_t xPosition );
/*@requires queue(pxQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
(K < N || xPosition == queueOVERWRITE) &*&
chars(pvItemToQueue, M, ?x) &*&
(xPosition == queueSEND_TO_BACK || xPosition == queueSEND_TO_FRONT || (xPosition == queueOVERWRITE && N == 1));@*/
* (K < N || xPosition == queueOVERWRITE) &*&
* chars(pvItemToQueue, M, ?x) &*&
* (xPosition == queueSEND_TO_BACK || xPosition == queueSEND_TO_FRONT || (xPosition == queueOVERWRITE && N == 1));@*/
/*@ensures
(xPosition == queueSEND_TO_BACK
? queue(pxQueue, Storage, N, M, (W+1)%N, R, (K+1), is_locked, append(abs, singleton(x)))
: (xPosition == queueSEND_TO_FRONT
? (R == 0
? queue(pxQueue, Storage, N, M, W, (N-1), (K+1), is_locked, cons(x, abs))
: queue(pxQueue, Storage, N, M, W, (R-1), (K+1), is_locked, cons(x, abs)))
: xPosition == queueOVERWRITE &*& queue(pxQueue, Storage, N, M, W, R, 1, is_locked, singleton(x)))
) &*&
chars(pvItemToQueue, M, x);@*/
* (xPosition == queueSEND_TO_BACK
* ? queue(pxQueue, Storage, N, M, (W+1)%N, R, (K+1), is_locked, append(abs, singleton(x)))
* : (xPosition == queueSEND_TO_FRONT
* ? (R == 0
* ? queue(pxQueue, Storage, N, M, W, (N-1), (K+1), is_locked, cons(x, abs))
* : queue(pxQueue, Storage, N, M, W, (R-1), (K+1), is_locked, cons(x, abs)))
* : xPosition == queueOVERWRITE &*& queue(pxQueue, Storage, N, M, W, R, 1, is_locked, singleton(x)))
* ) &*&
* chars(pvItemToQueue, M, x);@*/
BaseType_t prvIsQueueEmpty( Queue_t * pxQueue );
/*@requires [1/2]queuehandle(pxQueue, ?N, ?M, ?is_isr) &*& is_isr == false;@*/

View file

@ -27,12 +27,13 @@
#include "proof/list.h"
/* Wrap the macro in a function call so we can give a function contract */
#define listLIST_IS_EMPTY( pxList ) ( ( ( pxList )->uxNumberOfItems == ( UBaseType_t ) 0 ) ? pdTRUE : pdFALSE )
#define listLIST_IS_EMPTY( pxList ) ( ( ( pxList )->uxNumberOfItems == ( UBaseType_t ) 0 ) ? pdTRUE : pdFALSE )
BaseType_t wrapper_listLIST_IS_EMPTY( List_t * pxList )
/*@requires xLIST(pxList, ?len, ?idx, ?end, ?cells, ?vals);@*/
/*@ensures xLIST(pxList, len, idx, end, cells, vals) &*&
result == ((len == 0) ? pdTRUE : pdFALSE); @*/
* result == ((len == 0) ? pdTRUE : pdFALSE); @*/
{
/*@open xLIST(pxList, len, _, _, _, _);@*/
return listLIST_IS_EMPTY( pxList );

View file

@ -27,16 +27,18 @@
#include "proof/list.h"
UBaseType_t uxListRemove( ListItem_t * const pxItemToRemove )
/*@requires
exists<struct xLIST * >(?l) &*&
xLIST(l, ?len, ?idx, ?end, ?cells, ?vals) &*&
end != pxItemToRemove &*&
mem(pxItemToRemove, cells) == true;@*/
* exists<struct xLIST * >(?l) &*&
* xLIST(l, ?len, ?idx, ?end, ?cells, ?vals) &*&
* end != pxItemToRemove &*&
* mem(pxItemToRemove, cells) == true;@*/
/*@ensures
result == len-1 &*&
xLIST_ITEM(pxItemToRemove, nth(index_of(pxItemToRemove, cells), vals), _, ?pxItemToRemovePrevious, NULL) &*&
pxItemToRemovePrevious == nth(index_of(pxItemToRemove, cells)-1, cells) &*&
xLIST(l, result, idx == pxItemToRemove ? pxItemToRemovePrevious : idx, end, remove(pxItemToRemove, cells), remove_nth(index_of(pxItemToRemove, cells), vals));@*/
* result == len-1 &*&
* xLIST_ITEM(pxItemToRemove, nth(index_of(pxItemToRemove, cells), vals), _, ?pxItemToRemovePrevious, NULL) &*&
* pxItemToRemovePrevious == nth(index_of(pxItemToRemove, cells)-1, cells) &*&
* xLIST(l, result, idx == pxItemToRemove ? pxItemToRemovePrevious : idx, end, remove(pxItemToRemove, cells), remove_nth(index_of(pxItemToRemove, cells), vals));@*/
{
/* For brevity we alias x to pxItemToRemove */
/*@struct xLIST_ITEM *x = pxItemToRemove;@*/
@ -48,7 +50,7 @@ UBaseType_t uxListRemove( ListItem_t * const pxItemToRemove )
/*@dls_not_empty(end, endprev, cells, x);@*/
/* We know the xLIST is a DLS: end...endprev
Split this into DLS1:end...xprev and DLS2:x...endprev */
* Split this into DLS1:end...xprev and DLS2:x...endprev */
/*@int i = index_of(x, cells);@*/
/*@split(end, endprev, end, endprev, cells, vals, x, i);@*/
/*@list<struct xLIST_ITEM *> ys = take(i, cells);@*/
@ -61,106 +63,107 @@ UBaseType_t uxListRemove( ListItem_t * const pxItemToRemove )
/*@assert DLS(x, xprev, end, endprev, zs, ws, l);@*/ /*< DLS2 (zs, ws) */
/* Now case split to open DLS1 and DLS2 appropriately */
/*@
if (end == xprev)
{
if (x == endprev)
{
//Case A
//DLS1: extract end=prev=next
open DLS(end, endprev, x, xprev, ys, vs, l);
open xLIST_ITEM(end, portMAX_DELAY, x, endprev, l);
//DLS2: extract x
open DLS(x, xprev, end, endprev, zs, ws, l);
//Lengths
assert length(ys) == 1;
assert length(zs) == 1;
}
else
{
//Case B
//DLS1: extract end=prev
open DLS(end, endprev, x, xprev, ys, vs, l);
open xLIST_ITEM(end, portMAX_DELAY, x, endprev, l);
//DLS2: extract next and x
open DLS(x, end, end, endprev, zs, ws, l);
assert DLS(?xnext, x, end, endprev, tail(zs), tail(ws), l);
open DLS(xnext, x, end, endprev, tail(zs), tail(ws), l);
open xLIST_ITEM(xnext, _, _, x, l);
//Lengths
assert length(ys) == 1;
}
}
else
{
if (x == endprev)
{
//Case C
//DLS1: extract end=next and prev
dls_last_mem(end, endprev, x, xprev, ys);
assert mem(xprev, ys) == true;
open DLS(end, endprev, x, xprev, ys, vs, l);
open xLIST_ITEM(end, portMAX_DELAY, ?endnext, endprev, l);
if (endnext == xprev)
{
open DLS(endnext, end, x, xprev, tail(ys), tail(vs), l);
open xLIST_ITEM(xprev, _, x, _, l);
}
else
{
assert DLS(endnext, end, x, xprev, tail(ys), tail(vs), l);
int k = index_of(xprev, tail(ys));
dls_last_mem(endnext, end, x, xprev, tail(ys));
split(endnext, end, x, xprev, tail(ys), tail(vs), xprev, k);
open DLS(xprev, _, x, xprev, _, _, l);
open xLIST_ITEM(xprev, _, x, _, l);
}
//DLS2: extract x
open DLS(x, xprev, end, endprev, zs, ws, l);
//Lengths
assert length(zs) == 1;
}
else
{
//Case D
//DLS1: extract prev
dls_last_mem(end, endprev, x, xprev, ys);
int j = index_of(xprev, ys);
open DLS(end, endprev, x, xprev, ys, vs, l);
open xLIST_ITEM(end, portMAX_DELAY, ?endnext, endprev, l);
if (endnext == xprev)
{
open DLS(endnext, end, x, xprev, tail(ys), tail(vs), l);
assert tail(ys) == singleton(xprev);
open xLIST_ITEM(xprev, _, x, _, l);
}
else
{
assert DLS(endnext, end, x, xprev, tail(ys), tail(vs), l);
int k = index_of(xprev, tail(ys));
dls_last_mem(endnext, end, x, xprev, tail(ys));
split(endnext, end, x, xprev, tail(ys), tail(vs), xprev, k);
open DLS(xprev, _, x, xprev, _, _, l);
open xLIST_ITEM(xprev, _, x, _, l);
}
//DLS2: extract next and x
open DLS(x, xprev, end, endprev, zs, ws, l);
assert xLIST_ITEM(x, _, ?xnext, _, l);
open DLS(xnext, x, end, endprev, tail(zs), tail(ws), l);
open xLIST_ITEM(xnext, _, _, x, l);
}
}
@*/
* if (end == xprev)
* {
* if (x == endprev)
* {
* //Case A
* //DLS1: extract end=prev=next
* open DLS(end, endprev, x, xprev, ys, vs, l);
* open xLIST_ITEM(end, portMAX_DELAY, x, endprev, l);
* //DLS2: extract x
* open DLS(x, xprev, end, endprev, zs, ws, l);
* //Lengths
* assert length(ys) == 1;
* assert length(zs) == 1;
* }
* else
* {
* //Case B
* //DLS1: extract end=prev
* open DLS(end, endprev, x, xprev, ys, vs, l);
* open xLIST_ITEM(end, portMAX_DELAY, x, endprev, l);
* //DLS2: extract next and x
* open DLS(x, end, end, endprev, zs, ws, l);
* assert DLS(?xnext, x, end, endprev, tail(zs), tail(ws), l);
* open DLS(xnext, x, end, endprev, tail(zs), tail(ws), l);
* open xLIST_ITEM(xnext, _, _, x, l);
* //Lengths
* assert length(ys) == 1;
* }
* }
* else
* {
* if (x == endprev)
* {
* //Case C
* //DLS1: extract end=next and prev
* dls_last_mem(end, endprev, x, xprev, ys);
* assert mem(xprev, ys) == true;
* open DLS(end, endprev, x, xprev, ys, vs, l);
* open xLIST_ITEM(end, portMAX_DELAY, ?endnext, endprev, l);
* if (endnext == xprev)
* {
* open DLS(endnext, end, x, xprev, tail(ys), tail(vs), l);
* open xLIST_ITEM(xprev, _, x, _, l);
* }
* else
* {
* assert DLS(endnext, end, x, xprev, tail(ys), tail(vs), l);
* int k = index_of(xprev, tail(ys));
* dls_last_mem(endnext, end, x, xprev, tail(ys));
* split(endnext, end, x, xprev, tail(ys), tail(vs), xprev, k);
* open DLS(xprev, _, x, xprev, _, _, l);
* open xLIST_ITEM(xprev, _, x, _, l);
* }
* //DLS2: extract x
* open DLS(x, xprev, end, endprev, zs, ws, l);
* //Lengths
* assert length(zs) == 1;
* }
* else
* {
* //Case D
* //DLS1: extract prev
* dls_last_mem(end, endprev, x, xprev, ys);
* int j = index_of(xprev, ys);
* open DLS(end, endprev, x, xprev, ys, vs, l);
* open xLIST_ITEM(end, portMAX_DELAY, ?endnext, endprev, l);
* if (endnext == xprev)
* {
* open DLS(endnext, end, x, xprev, tail(ys), tail(vs), l);
* assert tail(ys) == singleton(xprev);
* open xLIST_ITEM(xprev, _, x, _, l);
* }
* else
* {
* assert DLS(endnext, end, x, xprev, tail(ys), tail(vs), l);
* int k = index_of(xprev, tail(ys));
* dls_last_mem(endnext, end, x, xprev, tail(ys));
* split(endnext, end, x, xprev, tail(ys), tail(vs), xprev, k);
* open DLS(xprev, _, x, xprev, _, _, l);
* open xLIST_ITEM(xprev, _, x, _, l);
* }
* //DLS2: extract next and x
* open DLS(x, xprev, end, endprev, zs, ws, l);
* assert xLIST_ITEM(x, _, ?xnext, _, l);
* open DLS(xnext, x, end, endprev, tail(zs), tail(ws), l);
* open xLIST_ITEM(xnext, _, _, x, l);
* }
* }
* @*/
/*@drop_nth_index_of(vals, i);@*/
/*@open xLIST_ITEM(x, nth(i, vals), ?xnext, xprev, l);@*/
/* The list item knows which list it is in. Obtain the list from the list
* item. */
#ifdef VERIFAST /*< const pointer declaration */
List_t * pxList = pxItemToRemove->pxContainer;
#else
List_t * const pxList = pxItemToRemove->pxContainer;
#endif
#ifdef VERIFAST /*< const pointer declaration */
List_t * pxList = pxItemToRemove->pxContainer;
#else
List_t * const pxList = pxItemToRemove->pxContainer;
#endif
pxItemToRemove->pxNext->pxPrevious = pxItemToRemove->pxPrevious;
pxItemToRemove->pxPrevious->pxNext = pxItemToRemove->pxNext;
@ -184,101 +187,105 @@ UBaseType_t uxListRemove( ListItem_t * const pxItemToRemove )
return pxList->uxNumberOfItems;
/*@
// Reassemble DLS1 and a modified DLS2, which no longer includes x
if (end == xprev)
{
if (x == endprev)
{
//Case A
close xLIST_ITEM(end, portMAX_DELAY, _, _, _);
close DLS(end, end, end, end, singleton(end), singleton(portMAX_DELAY), l);
}
else
{
//Case B
close xLIST_ITEM(xprev, _, xnext, endprev, l);
close DLS(end, endprev, xnext, xprev, singleton(end), singleton(portMAX_DELAY), l);
close xLIST_ITEM(xnext, _, _, xprev, l);
close DLS(xnext, xprev, end, endprev, tail(zs), tail(ws), l);
join(end, endprev, xnext, xprev, singleton(end), singleton(portMAX_DELAY),
xnext, xprev, end, endprev, tail(zs), tail(ws));
}
}
else
{
if (x == endprev)
{
//Case C
close xLIST_ITEM(end, _, ?endnext, xprev, l);
close xLIST_ITEM(xprev, ?xprev_val, end, _, l);
if (endnext == xprev)
{
close DLS(xprev, end, end, xprev, singleton(xprev), singleton(xprev_val), l);
close DLS(end, xprev, end, xprev, cons(end, singleton(xprev)), cons(portMAX_DELAY, singleton(xprev_val)), l);
}
else
{
close DLS(xprev, ?xprevprev, xnext, xprev, singleton(xprev), singleton(xprev_val), l);
assert DLS(endnext, end, xprev, xprevprev, ?cells_endnext_to_xprevprev, ?vals_endnext_to_xprevprev, l);
join(endnext, end, xprev, xprevprev, cells_endnext_to_xprevprev, vals_endnext_to_xprevprev,
xprev, xprevprev, xnext, xprev, singleton(xprev), singleton(xprev_val));
close DLS(end, xprev, end, xprev, ys, vs, l);
}
}
else
{
//Case D
close xLIST_ITEM(xnext, _, ?xnextnext, xprev, l);
close DLS(xnext, xprev, end, endprev, tail(zs), tail(ws), l);
close xLIST_ITEM(end, _, ?endnext, endprev, l);
close xLIST_ITEM(xprev, ?xprev_val, xnext, _, l);
if (endnext == xprev)
{
close DLS(xprev, _, xnext, xprev, singleton(xprev), singleton(xprev_val), l);
close DLS(end, endprev, xnext, xprev, ys, vs, l);
join(end, endprev, xnext, xprev, ys, vs,
xnext, xprev, end, endprev, tail(zs), tail(ws));
}
else
{
close DLS(xprev, ?xprevprev, xnext, xprev, singleton(xprev), singleton(xprev_val), l);
assert DLS(endnext, end, xprev, xprevprev, ?cells_endnext_to_xprevprev, ?vals_endnext_to_xprevprev, l);
join(endnext, end, xprev, xprevprev, cells_endnext_to_xprevprev, vals_endnext_to_xprevprev,
xprev, xprevprev, xnext, xprev, singleton(xprev), singleton(xprev_val));
close DLS(end, endprev, xnext, xprev, ys, vs, l);
join(end, endprev, xnext, xprev, ys, vs,
xnext, xprev, end, endprev, tail(zs), tail(ws));
}
}
}
@*/
* // Reassemble DLS1 and a modified DLS2, which no longer includes x
* if (end == xprev)
* {
* if (x == endprev)
* {
* //Case A
* close xLIST_ITEM(end, portMAX_DELAY, _, _, _);
* close DLS(end, end, end, end, singleton(end), singleton(portMAX_DELAY), l);
* }
* else
* {
* //Case B
* close xLIST_ITEM(xprev, _, xnext, endprev, l);
* close DLS(end, endprev, xnext, xprev, singleton(end), singleton(portMAX_DELAY), l);
* close xLIST_ITEM(xnext, _, _, xprev, l);
* close DLS(xnext, xprev, end, endprev, tail(zs), tail(ws), l);
* join(end, endprev, xnext, xprev, singleton(end), singleton(portMAX_DELAY),
* xnext, xprev, end, endprev, tail(zs), tail(ws));
* }
* }
* else
* {
* if (x == endprev)
* {
* //Case C
* close xLIST_ITEM(end, _, ?endnext, xprev, l);
* close xLIST_ITEM(xprev, ?xprev_val, end, _, l);
* if (endnext == xprev)
* {
* close DLS(xprev, end, end, xprev, singleton(xprev), singleton(xprev_val), l);
* close DLS(end, xprev, end, xprev, cons(end, singleton(xprev)), cons(portMAX_DELAY, singleton(xprev_val)), l);
* }
* else
* {
* close DLS(xprev, ?xprevprev, xnext, xprev, singleton(xprev), singleton(xprev_val), l);
* assert DLS(endnext, end, xprev, xprevprev, ?cells_endnext_to_xprevprev, ?vals_endnext_to_xprevprev, l);
* join(endnext, end, xprev, xprevprev, cells_endnext_to_xprevprev, vals_endnext_to_xprevprev,
* xprev, xprevprev, xnext, xprev, singleton(xprev), singleton(xprev_val));
* close DLS(end, xprev, end, xprev, ys, vs, l);
* }
* }
* else
* {
* //Case D
* close xLIST_ITEM(xnext, _, ?xnextnext, xprev, l);
* close DLS(xnext, xprev, end, endprev, tail(zs), tail(ws), l);
* close xLIST_ITEM(end, _, ?endnext, endprev, l);
* close xLIST_ITEM(xprev, ?xprev_val, xnext, _, l);
* if (endnext == xprev)
* {
* close DLS(xprev, _, xnext, xprev, singleton(xprev), singleton(xprev_val), l);
* close DLS(end, endprev, xnext, xprev, ys, vs, l);
* join(end, endprev, xnext, xprev, ys, vs,
* xnext, xprev, end, endprev, tail(zs), tail(ws));
* }
* else
* {
* close DLS(xprev, ?xprevprev, xnext, xprev, singleton(xprev), singleton(xprev_val), l);
* assert DLS(endnext, end, xprev, xprevprev, ?cells_endnext_to_xprevprev, ?vals_endnext_to_xprevprev, l);
* join(endnext, end, xprev, xprevprev, cells_endnext_to_xprevprev, vals_endnext_to_xprevprev,
* xprev, xprevprev, xnext, xprev, singleton(xprev), singleton(xprev_val));
* close DLS(end, endprev, xnext, xprev, ys, vs, l);
* join(end, endprev, xnext, xprev, ys, vs,
* xnext, xprev, end, endprev, tail(zs), tail(ws));
* }
* }
* }
* @*/
/*@remove_remove_nth(cells, x);@*/
/*@
if (idx == x)
{
close xLIST(l, len-1, xprev, end, append(ys, tail(zs)), append(vs, tail(ws)));
}
else
{
idx_remains_in_list(cells, idx, x, i);
close xLIST(l, len-1, idx, end, append(ys, tail(zs)), append(vs, tail(ws)));
}
@*/
* if (idx == x)
* {
* close xLIST(l, len-1, xprev, end, append(ys, tail(zs)), append(vs, tail(ws)));
* }
* else
* {
* idx_remains_in_list(cells, idx, x, i);
* close xLIST(l, len-1, idx, end, append(ys, tail(zs)), append(vs, tail(ws)));
* }
* @*/
/*@close xLIST_ITEM(x, nth(i, vals), xnext, xprev, NULL);@*/
}
ListItem_t * client_example( List_t * l )
/*@requires
xLIST(l, ?len, ?idx, ?end, ?cells, ?vals) &*&
idx != end &*&
cells == cons(end, cons(idx, ?cells_tl)) &*&
vals == cons(portMAX_DELAY, cons(42, ?vals_tl));@*/
* xLIST(l, ?len, ?idx, ?end, ?cells, ?vals) &*&
* idx != end &*&
* cells == cons(end, cons(idx, ?cells_tl)) &*&
* vals == cons(portMAX_DELAY, cons(42, ?vals_tl));@*/
/*@ensures
xLIST(l, len - 1, _, end, cons(end, cells_tl), cons(portMAX_DELAY, vals_tl)) &*&
xLIST_ITEM(result, 42, _, _, NULL);@*/
* xLIST(l, len - 1, _, end, cons(end, cells_tl), cons(portMAX_DELAY, vals_tl)) &*&
* xLIST_ITEM(result, 42, _, _, NULL);@*/
{
/*@open xLIST(l, len, idx, end, cells, vals);@*/
ListItem_t *index = l->pxIndex;
ListItem_t * index = l->pxIndex;
/*@close xLIST(l, len, idx, end, cells, vals);@*/
/*@close exists(l);@*/
uxListRemove( index );
@ -286,19 +293,22 @@ ListItem_t * client_example( List_t * l )
}
void client_example2( List_t * l )
/*@requires
xLIST(l, ?len, ?idx, ?end, ?cells, ?vals) &*&
cells == cons(end, cons(?x1, cons(?x2, ?cells_tl))) &*&
idx == x2 &*&
vals == cons(portMAX_DELAY, cons(1, cons(2, ?vals_tl)));@*/
* xLIST(l, ?len, ?idx, ?end, ?cells, ?vals) &*&
* cells == cons(end, cons(?x1, cons(?x2, ?cells_tl))) &*&
* idx == x2 &*&
* vals == cons(portMAX_DELAY, cons(1, cons(2, ?vals_tl)));@*/
/*@ensures
xLIST(l, len-2, end, end, cons(end, cells_tl), cons(portMAX_DELAY, vals_tl)) &*&
xLIST_ITEM(_, 1, _, _, NULL) &*&
xLIST_ITEM(_, 2, _, _, NULL);@*/
* xLIST(l, len-2, end, end, cons(end, cells_tl), cons(portMAX_DELAY, vals_tl)) &*&
* xLIST_ITEM(_, 1, _, _, NULL) &*&
* xLIST_ITEM(_, 2, _, _, NULL);@*/
{
/*@xLIST_distinct_cells(l);@*/
/*@open xLIST(l, len, idx, end, cells, vals);@*/
ListItem_t *index = l->pxIndex;
ListItem_t * index = l->pxIndex;
/*@close xLIST(l, len, idx, end, cells, vals);@*/
/*@close exists(l);@*/
uxListRemove( index );

View file

@ -27,16 +27,16 @@
#include "proof/list.h"
/*@
predicate xLIST_uninitialised(struct xLIST *l) =
l->uxNumberOfItems |-> _ &*&
l->pxIndex |-> _ &*&
l->xListEnd.xItemValue |-> _ &*&
l->xListEnd.pxNext |-> _ &*&
l->xListEnd.pxPrevious |-> _ &*&
l->xListEnd.pvOwner |-> _ &*&
l->xListEnd.pxContainer |-> _ &*&
struct_xLIST_ITEM_padding(&l->xListEnd);
@*/
* predicate xLIST_uninitialised(struct xLIST *l) =
* l->uxNumberOfItems |-> _ &*&
* l->pxIndex |-> _ &*&
* l->xListEnd.xItemValue |-> _ &*&
* l->xListEnd.pxNext |-> _ &*&
* l->xListEnd.pxPrevious |-> _ &*&
* l->xListEnd.pvOwner |-> _ &*&
* l->xListEnd.pxContainer |-> _ &*&
* struct_xLIST_ITEM_padding(&l->xListEnd);
* @*/
void vListInitialise( List_t * const pxList )
/*@requires xLIST_uninitialised(pxList);@*/
@ -65,9 +65,9 @@ void vListInitialise( List_t * const pxList )
listSET_LIST_INTEGRITY_CHECK_1_VALUE( pxList );
listSET_LIST_INTEGRITY_CHECK_2_VALUE( pxList );
#ifdef VERIFAST /*< ***change MiniList_t to ListItem_t*** */
pxList->xListEnd.pxContainer = pxList;
#endif
#ifdef VERIFAST /*< ***change MiniList_t to ListItem_t*** */
pxList->xListEnd.pxContainer = pxList;
#endif
/*@ListItem_t *end = &(pxList->xListEnd);@*/
/*@close xLIST_ITEM(end, portMAX_DELAY, _, _, pxList);@*/
/*@close DLS(end, end, end, end, singleton(end), singleton(portMAX_DELAY), pxList);@*/

View file

@ -27,18 +27,21 @@
#include "proof/list.h"
ListItem_t * choose(List_t * list);
ListItem_t * choose( List_t * list );
/*@ requires DLS(&(list->xListEnd), ?endprev, &(list->xListEnd), endprev, ?cells, ?vals, ?container);@*/
/*@ ensures DLS(&(list->xListEnd), endprev, &(list->xListEnd), endprev, cells, vals, container) &*&
mem(result, cells) == true;@*/
* mem(result, cells) == true;@*/
void vListInsert( List_t * const pxList,
ListItem_t * const pxNewListItem )
/*@requires xLIST(pxList, ?len, ?idx, ?end, ?cells, ?vals) &*&
xLIST_ITEM(pxNewListItem, ?val, _, _, _);@*/
* xLIST_ITEM(pxNewListItem, ?val, _, _, _);@*/
/*@ensures xLIST(pxList, len+1, idx, end, ?new_cells, ?new_vals) &*&
remove(pxNewListItem, new_cells) == cells
;@*/
* remove(pxNewListItem, new_cells) == cells
* ;@*/
{
/*@xLIST_star_item(pxList, pxNewListItem);@*/
/*@open xLIST_ITEM(pxNewListItem, _, _, _, _);@*/
@ -65,23 +68,24 @@ void vListInsert( List_t * const pxList,
{
/*@open DLS(end, endprev, end, endprev, cells, vals, pxList);@*/
/*@open xLIST_ITEM(end, portMAX_DELAY, ?endnext, endprev, pxList);@*/
/*@
if (end != endprev)
{
assert DLS(endnext, end, end, endprev, tail(cells), tail(vals), pxList);
if (endnext == endprev)
{
// done
}
else
{
dls_last_mem(endnext, end, end, endprev, tail(cells));
split(endnext, end, end, endprev, tail(cells), tail(vals), endprev, index_of(endprev, tail(cells)));
}
open DLS(endprev, _, _, _, _, _, _);
open xLIST_ITEM(endprev, _, _, _, _);
}
@*/
* if (end != endprev)
* {
* assert DLS(endnext, end, end, endprev, tail(cells), tail(vals), pxList);
* if (endnext == endprev)
* {
* // done
* }
* else
* {
* dls_last_mem(endnext, end, end, endprev, tail(cells));
* split(endnext, end, end, endprev, tail(cells), tail(vals), endprev, index_of(endprev, tail(cells)));
* }
* open DLS(endprev, _, _, _, _, _, _);
* open xLIST_ITEM(endprev, _, _, _, _);
* }
* @*/
pxIterator = pxList->xListEnd.pxPrevious;
}
else
@ -108,52 +112,53 @@ void vListInsert( List_t * const pxList,
* before vTaskStartScheduler() has been called?).
**********************************************************************/
#ifdef VERIFAST /*< ***over-approximate list insert loop*** */
pxIterator = choose(pxList);
#else
for( pxIterator = ( ListItem_t * ) &( pxList->xListEnd ); pxIterator->pxNext->xItemValue <= xValueOfInsertion; pxIterator = pxIterator->pxNext ) /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. *//*lint !e440 The iterator moves to a different value, not xValueOfInsertion. */
{
/* There is nothing to do here, just iterating to the wanted
* insertion position. */
}
#endif
#ifdef VERIFAST /*< ***over-approximate list insert loop*** */
pxIterator = choose( pxList );
#else
for( pxIterator = ( ListItem_t * ) &( pxList->xListEnd ); pxIterator->pxNext->xItemValue <= xValueOfInsertion; pxIterator = pxIterator->pxNext ) /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. *//*lint !e440 The iterator moves to a different value, not xValueOfInsertion. */
{
/* There is nothing to do here, just iterating to the wanted
* insertion position. */
}
#endif
/*@int i = index_of(pxIterator, cells);@*/
/*@
if (pxIterator == end)
{
open DLS(end, endprev, end, endprev, cells, vals, pxList);
open xLIST_ITEM(end, portMAX_DELAY, ?endnext, endprev, pxList);
if (end != endprev)
{
assert DLS(endnext, end, end, endprev, tail(cells), tail(vals), pxList);
open DLS(endnext, _, _, _, _, _, _);
open xLIST_ITEM(endnext, _, _, _, _);
}
}
else
{
assert DLS(end, endprev, end, endprev, cells, vals, pxList);
dls_first_mem(end, endprev, end, endprev, cells);
assert pxIterator != end;
assert index_of(end, cells) == 0;
split(end, endprev, end, endprev, cells, vals, pxIterator, i);
assert DLS(end, endprev, pxIterator, ?iterprev, take(i, cells), take(i, vals), pxList);
assert DLS(pxIterator, iterprev, end, endprev, drop(i, cells), drop(i, vals), pxList);
open DLS(pxIterator, iterprev, end, endprev, drop(i, cells), drop(i, vals), pxList);
open xLIST_ITEM(pxIterator, _, ?iternext, iterprev, pxList);
if (pxIterator == endprev)
{
open DLS(end, endprev, pxIterator, iterprev, take(i, cells), take(i, vals), pxList);
take_take(1, i, vals);
assert xLIST_ITEM(end, portMAX_DELAY, _, _, _);
open xLIST_ITEM(iternext, _, _, pxIterator, _);
}
else
{
open DLS(iternext, pxIterator, end, endprev, _, _, _);
open xLIST_ITEM(iternext, _, _, pxIterator, _);
}
}@*/
* if (pxIterator == end)
* {
* open DLS(end, endprev, end, endprev, cells, vals, pxList);
* open xLIST_ITEM(end, portMAX_DELAY, ?endnext, endprev, pxList);
* if (end != endprev)
* {
* assert DLS(endnext, end, end, endprev, tail(cells), tail(vals), pxList);
* open DLS(endnext, _, _, _, _, _, _);
* open xLIST_ITEM(endnext, _, _, _, _);
* }
* }
* else
* {
* assert DLS(end, endprev, end, endprev, cells, vals, pxList);
* dls_first_mem(end, endprev, end, endprev, cells);
* assert pxIterator != end;
* assert index_of(end, cells) == 0;
* split(end, endprev, end, endprev, cells, vals, pxIterator, i);
* assert DLS(end, endprev, pxIterator, ?iterprev, take(i, cells), take(i, vals), pxList);
* assert DLS(pxIterator, iterprev, end, endprev, drop(i, cells), drop(i, vals), pxList);
* open DLS(pxIterator, iterprev, end, endprev, drop(i, cells), drop(i, vals), pxList);
* open xLIST_ITEM(pxIterator, _, ?iternext, iterprev, pxList);
* if (pxIterator == endprev)
* {
* open DLS(end, endprev, pxIterator, iterprev, take(i, cells), take(i, vals), pxList);
* take_take(1, i, vals);
* assert xLIST_ITEM(end, portMAX_DELAY, _, _, _);
* open xLIST_ITEM(iternext, _, _, pxIterator, _);
* }
* else
* {
* open DLS(iternext, pxIterator, end, endprev, _, _, _);
* open xLIST_ITEM(iternext, _, _, pxIterator, _);
* }
* }@*/
}
pxNewListItem->pxNext = pxIterator->pxNext;
@ -169,152 +174,153 @@ void vListInsert( List_t * const pxList,
/*@close xLIST_ITEM(pxNewListItem, val, ?iternext, pxIterator, pxList);@*/
/*@close xLIST_ITEM(pxIterator, ?iterval, pxNewListItem, ?iterprev, pxList);@*/
/*@
if( xValueOfInsertion == portMAX_DELAY )
{
assert iternext == end;
assert pxIterator == endprev;
if (end == endprev)
{
close DLS(end, pxNewListItem, pxNewListItem, end, cells, vals, pxList);
close DLS(pxNewListItem, end, end, pxNewListItem, singleton(pxNewListItem), singleton(val), pxList);
join(end, pxNewListItem, pxNewListItem, end, cells, vals,
pxNewListItem, end, end, pxNewListItem, singleton(pxNewListItem), singleton(val));
close xLIST(pxList, len+1, idx, end, append(cells, singleton(pxNewListItem)), append(vals, singleton(val)));
}
else
{
close xLIST_ITEM(end, portMAX_DELAY, ?endnext, pxNewListItem, pxList);
close DLS(pxNewListItem, endprev, end, pxNewListItem, singleton(pxNewListItem), singleton(val), pxList);
if (endnext == endprev)
{
assert xLIST_ITEM(endnext, ?endnextval, pxNewListItem, end, pxList);
close DLS(end, pxNewListItem, endnext, end, singleton(end), singleton(portMAX_DELAY), pxList);
close DLS(endnext, end, pxNewListItem, endnext, singleton(endnext), singleton(endnextval), pxList);
join(end, pxNewListItem, endnext, end, singleton(end), singleton(portMAX_DELAY),
endnext, end, pxNewListItem, endnext, singleton(endnext), singleton(endnextval));
assert DLS(end, pxNewListItem, pxNewListItem, endnext, cells, vals, pxList);
}
else
{
assert DLS(endnext, end, endprev, ?endprevprev, ?cells_endnext_to_endprevprev, ?vals_endnext_to_endprevprev, pxList);
assert cells_endnext_to_endprevprev == take(index_of(endprev, tail(cells)), tail(cells));
assert index_of(endprev, tail(cells)) == length(tail(cells)) - 1;
assert cells_endnext_to_endprevprev == take(length(tail(cells)) - 1, tail(cells));
assert xLIST_ITEM(endprev, ?endprevval, pxNewListItem, endprevprev, pxList);
close DLS(endprev, endprevprev, pxNewListItem, endprev, singleton(endprev), singleton(endprevval), pxList);
dls_last_mem(endnext, end, endprev, endprevprev, cells_endnext_to_endprevprev);
dls_star_item(endnext, endprevprev, end);
close DLS(end, pxNewListItem, endprev, endprevprev, cons(end, cells_endnext_to_endprevprev), cons(portMAX_DELAY, vals_endnext_to_endprevprev), pxList);
join(end, pxNewListItem, endprev, endprevprev, cons(end, cells_endnext_to_endprevprev), cons(portMAX_DELAY, vals_endnext_to_endprevprev),
endprev, endprevprev, pxNewListItem, endprev, singleton(endprev), singleton(endprevval));
assert DLS(end, pxNewListItem, pxNewListItem, endprev, cells, vals, pxList);
}
join(end, pxNewListItem, pxNewListItem, endprev, cells, vals,
pxNewListItem, endprev, end, pxNewListItem, singleton(pxNewListItem), singleton(val));
remove_append(pxNewListItem, cells, singleton(pxNewListItem));
close xLIST(pxList, len+1, idx, end, append(cells, singleton(pxNewListItem)), append(vals, singleton(val)));
}
}
else
{
if (pxIterator == end)
{
if (iternext == end)
{
close DLS(end, pxNewListItem, pxNewListItem, end, cells, vals, pxList);
close DLS(pxNewListItem, pxIterator, end, pxNewListItem, singleton(pxNewListItem), singleton(val), pxList);
join(end, pxNewListItem, pxNewListItem, end, cells, vals,
pxNewListItem, pxIterator, end, pxNewListItem, singleton(pxNewListItem), singleton(val));
close xLIST(pxList, len+1, idx, end, append(cells, singleton(pxNewListItem)), append(vals, singleton(val)));
}
else
{
close xLIST_ITEM(iternext, ?iternextval, _, pxNewListItem, pxList);
if (iternext == endprev)
{
close DLS(iternext, pxNewListItem, end, endprev, singleton(iternext), singleton(iternextval), pxList);
dls_last_mem(iternext, pxNewListItem, end, endprev, singleton(iternext));
}
else
{
assert DLS(?iternextnext, iternext, end, endprev, ?cells_iternextnext_to_endprev, ?vals_iternextnext_to_endprev, pxList);
close DLS(iternext, pxNewListItem, end, endprev, cons(iternext, cells_iternextnext_to_endprev), cons(iternextval, vals_iternextnext_to_endprev), pxList);
dls_last_mem(iternext, pxNewListItem, end, endprev, cons(iternext, cells_iternextnext_to_endprev));
}
close DLS(end, endprev, pxNewListItem, end, singleton(end), singleton(portMAX_DELAY), pxList);
assert DLS(iternext, pxNewListItem, end, endprev, ?cells_iternext_to_endprev, ?vals_iternext_to_endprev, pxList);
dls_star_item(iternext, endprev, pxNewListItem);
close DLS(pxNewListItem, pxIterator, end, endprev, cons(pxNewListItem, cells_iternext_to_endprev), cons(val, vals_iternext_to_endprev), pxList);
join(end, endprev, pxNewListItem, end, singleton(end), singleton(portMAX_DELAY),
pxNewListItem, pxIterator, end, endprev, cons(pxNewListItem, cells_iternext_to_endprev), cons(val, vals_iternext_to_endprev));
close xLIST(pxList, len+1, idx, end, cons(end, cons(pxNewListItem, cells_iternext_to_endprev)), cons(portMAX_DELAY, cons(val, vals_iternext_to_endprev)));
}
}
else
{
close xLIST_ITEM(iternext, ?iternextval, _, pxNewListItem, pxList);
if (pxIterator == endprev)
{
if (iterprev == end)
{
close DLS(end, pxNewListItem, pxIterator, end, singleton(end), singleton(portMAX_DELAY), pxList);
}
else
{
assert DLS(_, iternext, pxIterator, iterprev, ?cells1, ?vals1, _);
close DLS(end, pxNewListItem, pxIterator, iterprev, cons(end, cells1), cons(portMAX_DELAY, vals1), pxList);
}
int i = index_of(pxIterator, cells);
assert DLS(end, pxNewListItem, pxIterator, iterprev, take(i, cells), take(i, vals), pxList);
close DLS(pxIterator, iterprev, pxNewListItem, pxIterator, drop(i, cells), drop(i, vals), pxList);
close DLS(pxNewListItem, pxIterator, end, pxNewListItem, singleton(pxNewListItem), singleton(val), pxList);
join(end, pxNewListItem, pxIterator, iterprev, take(i, cells), take(i, vals),
pxIterator, iterprev, pxNewListItem, pxIterator, drop(i, cells), drop(i, vals));
join(end, pxNewListItem, pxNewListItem, pxIterator, cells, vals,
pxNewListItem, pxIterator, end, pxNewListItem, singleton(pxNewListItem), singleton(val));
close xLIST(pxList, len+1, idx, end, append(cells, singleton(pxNewListItem)), append(vals, singleton(val)));
remove_append(pxNewListItem, cells, singleton(pxNewListItem));
}
else
{
int i = index_of(pxIterator, cells);
if (iternext == endprev)
{
close DLS(iternext, pxNewListItem, end, endprev, singleton(iternext), singleton(iternextval), pxList);
}
else
{
assert DLS(_, iternext, end, endprev, ?cells0, ?vals0, pxList);
dls_star_item(end, iterprev, iternext);
close DLS(iternext, pxNewListItem, end, endprev, tail(drop(i, cells)), tail(drop(i, vals)), pxList);
}
drop_drop(1, i, cells);
drop_drop(1, i, vals);
assert DLS(iternext, pxNewListItem, end, endprev, drop(i+1, cells), drop(i+1, vals), pxList);
assert DLS(end, endprev, pxIterator, iterprev, take(i, cells), take(i, vals), pxList);
dls_star_item(iternext, endprev, pxNewListItem);
dls_last_mem(iternext, pxNewListItem, end, endprev, drop(i+1, cells));
close DLS(pxNewListItem, pxIterator, end, endprev, cons(pxNewListItem, drop(i+1, cells)), cons(val, drop(i+1, vals)), pxList);
close DLS(pxIterator, iterprev, end, endprev, cons(pxIterator, cons(pxNewListItem, drop(i+1, cells))), cons(iterval, cons(val, drop(i+1, vals))), pxList);
join(end, endprev, pxIterator, iterprev, take(i, cells), take(i, vals),
pxIterator, iterprev, end, endprev, cons(pxIterator, cons(pxNewListItem, drop(i+1, cells))), cons(iterval, cons(val, drop(i+1, vals))));
list<struct xLIST_ITEM * >new_cells = append(take(i, cells), cons(pxIterator, cons(pxNewListItem, drop(i+1, cells))));
list<TickType_t >new_vals = append(take(i, vals), cons(iterval, cons(val, drop(i+1, vals))));
head_append(take(i, cells), cons(pxIterator, cons(pxNewListItem, drop(i+1, cells))));
take_head(take(i, cells));
take_take(1, i, cells);
assert( end == head(new_cells) );
head_append(take(i, vals), cons(iterval, cons(val, drop(i+1, vals))));
take_head(take(i, vals));
take_take(1, i, vals);
assert( portMAX_DELAY == head(new_vals) );
append_take_drop_n(cells, index_of(pxIterator, cells));
close xLIST(pxList, len+1, idx, end, append(take(i, cells), cons(pxIterator, cons(pxNewListItem, drop(i+1, cells)))), append(take(i, vals), cons(iterval, cons(val, drop(i+1, vals)))));
mem_take_false(pxNewListItem, i, cells);
remove_append(pxNewListItem, take(i, cells), cons(pxIterator, cons(pxNewListItem, drop(i+1, cells))));
}
}
}@*/
/*@
* if( xValueOfInsertion == portMAX_DELAY )
* {
* assert iternext == end;
* assert pxIterator == endprev;
* if (end == endprev)
* {
* close DLS(end, pxNewListItem, pxNewListItem, end, cells, vals, pxList);
* close DLS(pxNewListItem, end, end, pxNewListItem, singleton(pxNewListItem), singleton(val), pxList);
* join(end, pxNewListItem, pxNewListItem, end, cells, vals,
* pxNewListItem, end, end, pxNewListItem, singleton(pxNewListItem), singleton(val));
* close xLIST(pxList, len+1, idx, end, append(cells, singleton(pxNewListItem)), append(vals, singleton(val)));
* }
* else
* {
* close xLIST_ITEM(end, portMAX_DELAY, ?endnext, pxNewListItem, pxList);
* close DLS(pxNewListItem, endprev, end, pxNewListItem, singleton(pxNewListItem), singleton(val), pxList);
* if (endnext == endprev)
* {
* assert xLIST_ITEM(endnext, ?endnextval, pxNewListItem, end, pxList);
* close DLS(end, pxNewListItem, endnext, end, singleton(end), singleton(portMAX_DELAY), pxList);
* close DLS(endnext, end, pxNewListItem, endnext, singleton(endnext), singleton(endnextval), pxList);
* join(end, pxNewListItem, endnext, end, singleton(end), singleton(portMAX_DELAY),
* endnext, end, pxNewListItem, endnext, singleton(endnext), singleton(endnextval));
* assert DLS(end, pxNewListItem, pxNewListItem, endnext, cells, vals, pxList);
* }
* else
* {
* assert DLS(endnext, end, endprev, ?endprevprev, ?cells_endnext_to_endprevprev, ?vals_endnext_to_endprevprev, pxList);
* assert cells_endnext_to_endprevprev == take(index_of(endprev, tail(cells)), tail(cells));
* assert index_of(endprev, tail(cells)) == length(tail(cells)) - 1;
* assert cells_endnext_to_endprevprev == take(length(tail(cells)) - 1, tail(cells));
* assert xLIST_ITEM(endprev, ?endprevval, pxNewListItem, endprevprev, pxList);
* close DLS(endprev, endprevprev, pxNewListItem, endprev, singleton(endprev), singleton(endprevval), pxList);
* dls_last_mem(endnext, end, endprev, endprevprev, cells_endnext_to_endprevprev);
* dls_star_item(endnext, endprevprev, end);
* close DLS(end, pxNewListItem, endprev, endprevprev, cons(end, cells_endnext_to_endprevprev), cons(portMAX_DELAY, vals_endnext_to_endprevprev), pxList);
* join(end, pxNewListItem, endprev, endprevprev, cons(end, cells_endnext_to_endprevprev), cons(portMAX_DELAY, vals_endnext_to_endprevprev),
* endprev, endprevprev, pxNewListItem, endprev, singleton(endprev), singleton(endprevval));
* assert DLS(end, pxNewListItem, pxNewListItem, endprev, cells, vals, pxList);
*
* }
* join(end, pxNewListItem, pxNewListItem, endprev, cells, vals,
* pxNewListItem, endprev, end, pxNewListItem, singleton(pxNewListItem), singleton(val));
* remove_append(pxNewListItem, cells, singleton(pxNewListItem));
* close xLIST(pxList, len+1, idx, end, append(cells, singleton(pxNewListItem)), append(vals, singleton(val)));
* }
* }
* else
* {
* if (pxIterator == end)
* {
* if (iternext == end)
* {
* close DLS(end, pxNewListItem, pxNewListItem, end, cells, vals, pxList);
* close DLS(pxNewListItem, pxIterator, end, pxNewListItem, singleton(pxNewListItem), singleton(val), pxList);
* join(end, pxNewListItem, pxNewListItem, end, cells, vals,
* pxNewListItem, pxIterator, end, pxNewListItem, singleton(pxNewListItem), singleton(val));
* close xLIST(pxList, len+1, idx, end, append(cells, singleton(pxNewListItem)), append(vals, singleton(val)));
* }
* else
* {
* close xLIST_ITEM(iternext, ?iternextval, _, pxNewListItem, pxList);
* if (iternext == endprev)
* {
* close DLS(iternext, pxNewListItem, end, endprev, singleton(iternext), singleton(iternextval), pxList);
* dls_last_mem(iternext, pxNewListItem, end, endprev, singleton(iternext));
* }
* else
* {
* assert DLS(?iternextnext, iternext, end, endprev, ?cells_iternextnext_to_endprev, ?vals_iternextnext_to_endprev, pxList);
* close DLS(iternext, pxNewListItem, end, endprev, cons(iternext, cells_iternextnext_to_endprev), cons(iternextval, vals_iternextnext_to_endprev), pxList);
* dls_last_mem(iternext, pxNewListItem, end, endprev, cons(iternext, cells_iternextnext_to_endprev));
* }
* close DLS(end, endprev, pxNewListItem, end, singleton(end), singleton(portMAX_DELAY), pxList);
* assert DLS(iternext, pxNewListItem, end, endprev, ?cells_iternext_to_endprev, ?vals_iternext_to_endprev, pxList);
* dls_star_item(iternext, endprev, pxNewListItem);
* close DLS(pxNewListItem, pxIterator, end, endprev, cons(pxNewListItem, cells_iternext_to_endprev), cons(val, vals_iternext_to_endprev), pxList);
* join(end, endprev, pxNewListItem, end, singleton(end), singleton(portMAX_DELAY),
* pxNewListItem, pxIterator, end, endprev, cons(pxNewListItem, cells_iternext_to_endprev), cons(val, vals_iternext_to_endprev));
* close xLIST(pxList, len+1, idx, end, cons(end, cons(pxNewListItem, cells_iternext_to_endprev)), cons(portMAX_DELAY, cons(val, vals_iternext_to_endprev)));
* }
* }
* else
* {
* close xLIST_ITEM(iternext, ?iternextval, _, pxNewListItem, pxList);
* if (pxIterator == endprev)
* {
* if (iterprev == end)
* {
* close DLS(end, pxNewListItem, pxIterator, end, singleton(end), singleton(portMAX_DELAY), pxList);
* }
* else
* {
* assert DLS(_, iternext, pxIterator, iterprev, ?cells1, ?vals1, _);
* close DLS(end, pxNewListItem, pxIterator, iterprev, cons(end, cells1), cons(portMAX_DELAY, vals1), pxList);
* }
* int i = index_of(pxIterator, cells);
* assert DLS(end, pxNewListItem, pxIterator, iterprev, take(i, cells), take(i, vals), pxList);
* close DLS(pxIterator, iterprev, pxNewListItem, pxIterator, drop(i, cells), drop(i, vals), pxList);
* close DLS(pxNewListItem, pxIterator, end, pxNewListItem, singleton(pxNewListItem), singleton(val), pxList);
* join(end, pxNewListItem, pxIterator, iterprev, take(i, cells), take(i, vals),
* pxIterator, iterprev, pxNewListItem, pxIterator, drop(i, cells), drop(i, vals));
* join(end, pxNewListItem, pxNewListItem, pxIterator, cells, vals,
* pxNewListItem, pxIterator, end, pxNewListItem, singleton(pxNewListItem), singleton(val));
* close xLIST(pxList, len+1, idx, end, append(cells, singleton(pxNewListItem)), append(vals, singleton(val)));
* remove_append(pxNewListItem, cells, singleton(pxNewListItem));
* }
* else
* {
* int i = index_of(pxIterator, cells);
* if (iternext == endprev)
* {
* close DLS(iternext, pxNewListItem, end, endprev, singleton(iternext), singleton(iternextval), pxList);
* }
* else
* {
* assert DLS(_, iternext, end, endprev, ?cells0, ?vals0, pxList);
* dls_star_item(end, iterprev, iternext);
* close DLS(iternext, pxNewListItem, end, endprev, tail(drop(i, cells)), tail(drop(i, vals)), pxList);
* }
* drop_drop(1, i, cells);
* drop_drop(1, i, vals);
* assert DLS(iternext, pxNewListItem, end, endprev, drop(i+1, cells), drop(i+1, vals), pxList);
* assert DLS(end, endprev, pxIterator, iterprev, take(i, cells), take(i, vals), pxList);
* dls_star_item(iternext, endprev, pxNewListItem);
* dls_last_mem(iternext, pxNewListItem, end, endprev, drop(i+1, cells));
* close DLS(pxNewListItem, pxIterator, end, endprev, cons(pxNewListItem, drop(i+1, cells)), cons(val, drop(i+1, vals)), pxList);
* close DLS(pxIterator, iterprev, end, endprev, cons(pxIterator, cons(pxNewListItem, drop(i+1, cells))), cons(iterval, cons(val, drop(i+1, vals))), pxList);
* join(end, endprev, pxIterator, iterprev, take(i, cells), take(i, vals),
* pxIterator, iterprev, end, endprev, cons(pxIterator, cons(pxNewListItem, drop(i+1, cells))), cons(iterval, cons(val, drop(i+1, vals))));
* list<struct xLIST_ITEM * >new_cells = append(take(i, cells), cons(pxIterator, cons(pxNewListItem, drop(i+1, cells))));
* list<TickType_t >new_vals = append(take(i, vals), cons(iterval, cons(val, drop(i+1, vals))));
* head_append(take(i, cells), cons(pxIterator, cons(pxNewListItem, drop(i+1, cells))));
* take_head(take(i, cells));
* take_take(1, i, cells);
* assert( end == head(new_cells) );
* head_append(take(i, vals), cons(iterval, cons(val, drop(i+1, vals))));
* take_head(take(i, vals));
* take_take(1, i, vals);
* assert( portMAX_DELAY == head(new_vals) );
* append_take_drop_n(cells, index_of(pxIterator, cells));
* close xLIST(pxList, len+1, idx, end, append(take(i, cells), cons(pxIterator, cons(pxNewListItem, drop(i+1, cells)))), append(take(i, vals), cons(iterval, cons(val, drop(i+1, vals)))));
* mem_take_false(pxNewListItem, i, cells);
* remove_append(pxNewListItem, take(i, cells), cons(pxIterator, cons(pxNewListItem, drop(i+1, cells))));
* }
* }
* }@*/
}

View file

@ -28,90 +28,93 @@
void vListInsertEnd( List_t * const pxList,
ListItem_t * const pxNewListItem )
/*@requires xLIST(pxList, ?len, ?idx, ?end, ?cells, ?vals) &*&
xLIST_ITEM(pxNewListItem, ?val, _, _, _);@*/
* xLIST_ITEM(pxNewListItem, ?val, _, _, _);@*/
/*@ensures xLIST(pxList, len+1, idx, end, ?new_cells, ?new_vals) &*&
idx == end
? (new_cells == append(cells, singleton(pxNewListItem)) &*&
new_vals == append(vals, singleton(val)))
: (new_cells == append(take(index_of(idx, cells), cells), append(singleton(pxNewListItem), drop(index_of(idx, cells), cells))) &*&
new_vals == append(take(index_of(idx, cells), vals), append(singleton(val), drop(index_of(idx, cells), vals))));@*/
* idx == end
* ? (new_cells == append(cells, singleton(pxNewListItem)) &*&
* new_vals == append(vals, singleton(val)))
* : (new_cells == append(take(index_of(idx, cells), cells), append(singleton(pxNewListItem), drop(index_of(idx, cells), cells))) &*&
* new_vals == append(take(index_of(idx, cells), vals), append(singleton(val), drop(index_of(idx, cells), vals))));@*/
{
/*@xLIST_star_item(pxList, pxNewListItem);@*/
/*@assert mem(pxNewListItem, cells) == false;@*/
/*@open xLIST(pxList, len, idx, end, cells, vals);@*/
#ifdef VERIFAST /*< const pointer declaration */
ListItem_t * pxIndex = pxList->pxIndex;
#else
ListItem_t * const pxIndex = pxList->pxIndex;
#ifdef VERIFAST /*< const pointer declaration */
ListItem_t * pxIndex = pxList->pxIndex;
#else
ListItem_t * const pxIndex = pxList->pxIndex;
/* Only effective when configASSERT() is also defined, these tests may catch
* the list data structures being overwritten in memory. They will not catch
* data errors caused by incorrect configuration or use of FreeRTOS. */
listTEST_LIST_INTEGRITY( pxList );
listTEST_LIST_ITEM_INTEGRITY( pxNewListItem );
#endif
/* Only effective when configASSERT() is also defined, these tests may catch
* the list data structures being overwritten in memory. They will not catch
* data errors caused by incorrect configuration or use of FreeRTOS. */
listTEST_LIST_INTEGRITY( pxList );
listTEST_LIST_ITEM_INTEGRITY( pxNewListItem );
#endif
/*@open xLIST_ITEM(pxNewListItem, _, _, _, _);@*/
/*@assert DLS(end, ?endprev, end, _, cells, vals, pxList);@*/
/*@dls_first_mem(end, endprev, end, endprev, cells);@*/
/*@dls_last_mem(end, endprev, end, endprev, cells);@*/
/*@
if (end == idx)
{
open DLS(end, endprev, end, endprev, cells, vals, pxList);
open xLIST_ITEM(end, portMAX_DELAY, ?endnext, endprev, pxList);
if (end == endprev)
{
// Case A (singleton): idx==end==endprev
}
else
{
assert DLS(endnext, end, end, endprev, tail(cells), tail(vals), pxList);
if (endnext == endprev)
{
// Case B (two): idx==end and endnext==endprev
open DLS(endnext, end, end, endnext, _, _, _);
open xLIST_ITEM(endnext, _, _, _, _);
}
else
{
// Case C: idx==end and DLS:endnext...endprev
split(endnext, end, end, endprev, tail(cells), tail(vals), endprev, index_of(endprev, tail(cells)));
open DLS(endprev, _, _, _, _, _, _);
open xLIST_ITEM(endprev, _, _, _, _);
}
}
}
else
{
int i = index_of(idx, cells);
split(end, endprev, end, endprev, cells, vals, idx, i);
assert DLS(end, endprev, idx, ?idxprev, take(i, cells), take(i, vals), pxList);
assert DLS(idx, idxprev, end, endprev, drop(i, cells), drop(i, vals), pxList);
open DLS(idx, idxprev, end, endprev, _, _, _);
open xLIST_ITEM(idx, _, _, _, _);
if (end == idxprev)
{
// Case D: end==idxprev and DLS:idx...endprev
take_take(1, i, vals);
take_head(vals);
open DLS(end, endprev, idx, idxprev, take(i, cells), take(i, vals), pxList);
open xLIST_ITEM(end, portMAX_DELAY, _, _, _);
assert length(take(i, cells)) == 1;
}
else
{
// Case E: DLS:end...idxprev and DLS:idx...endprev
dls_last_mem(end, endprev, idx, idxprev, take(i, cells));
split(end, endprev, idx, idxprev, take(i, cells), take(i, vals), idxprev, index_of(idxprev, take(i, cells)));
open DLS(idxprev, _, _, idxprev, _, _, _);
length_take(i, cells);
drop_take_singleton(i, vals);
open xLIST_ITEM(idxprev, nth(i-1, vals), _, _, _);
}
}
@*/
* if (end == idx)
* {
* open DLS(end, endprev, end, endprev, cells, vals, pxList);
* open xLIST_ITEM(end, portMAX_DELAY, ?endnext, endprev, pxList);
* if (end == endprev)
* {
* // Case A (singleton): idx==end==endprev
* }
* else
* {
* assert DLS(endnext, end, end, endprev, tail(cells), tail(vals), pxList);
* if (endnext == endprev)
* {
* // Case B (two): idx==end and endnext==endprev
* open DLS(endnext, end, end, endnext, _, _, _);
* open xLIST_ITEM(endnext, _, _, _, _);
* }
* else
* {
* // Case C: idx==end and DLS:endnext...endprev
* split(endnext, end, end, endprev, tail(cells), tail(vals), endprev, index_of(endprev, tail(cells)));
* open DLS(endprev, _, _, _, _, _, _);
* open xLIST_ITEM(endprev, _, _, _, _);
* }
* }
* }
* else
* {
* int i = index_of(idx, cells);
* split(end, endprev, end, endprev, cells, vals, idx, i);
* assert DLS(end, endprev, idx, ?idxprev, take(i, cells), take(i, vals), pxList);
* assert DLS(idx, idxprev, end, endprev, drop(i, cells), drop(i, vals), pxList);
* open DLS(idx, idxprev, end, endprev, _, _, _);
* open xLIST_ITEM(idx, _, _, _, _);
* if (end == idxprev)
* {
* // Case D: end==idxprev and DLS:idx...endprev
* take_take(1, i, vals);
* take_head(vals);
* open DLS(end, endprev, idx, idxprev, take(i, cells), take(i, vals), pxList);
* open xLIST_ITEM(end, portMAX_DELAY, _, _, _);
* assert length(take(i, cells)) == 1;
* }
* else
* {
* // Case E: DLS:end...idxprev and DLS:idx...endprev
* dls_last_mem(end, endprev, idx, idxprev, take(i, cells));
* split(end, endprev, idx, idxprev, take(i, cells), take(i, vals), idxprev, index_of(idxprev, take(i, cells)));
* open DLS(idxprev, _, _, idxprev, _, _, _);
* length_take(i, cells);
* drop_take_singleton(i, vals);
* open xLIST_ITEM(idxprev, nth(i-1, vals), _, _, _);
* }
* }
* @*/
/* Insert a new list item into pxList, but rather than sort the list,
* makes the new list item the last item to be removed by a call to
@ -131,108 +134,111 @@ void vListInsertEnd( List_t * const pxList,
( pxList->uxNumberOfItems )++;
/*@
if (end == idx)
{
close xLIST_ITEM(pxNewListItem, val, end, endprev, pxList);
close DLS(pxNewListItem, endprev, end, pxNewListItem, singleton(pxNewListItem), singleton(val), pxList);
close xLIST_ITEM(end, portMAX_DELAY, ?endnext, pxNewListItem, pxList);
if (end == endprev)
{
// Case A (singleton): idx==end==endprev
close DLS(end, pxNewListItem, endnext, end, cells, vals, pxList);
join(end, pxNewListItem, endnext, end, cells, vals,
pxNewListItem, endprev, end, pxNewListItem, singleton(pxNewListItem), singleton(val));
close xLIST(pxList, len+1, idx, end, append(cells, singleton(pxNewListItem)), append(vals, singleton(val)));
}
else
{
close xLIST_ITEM(endprev, ?endprevval, pxNewListItem, ?endprevprev, _);
if (endnext == endprev)
{
// Case B (two): idx==end and endnext==endprev
close DLS(endprev, end, pxNewListItem, endprev, singleton(endprev), singleton(endprevval), pxList);
close DLS(end, pxNewListItem, pxNewListItem, endprev, cells, vals, pxList);
join(end, pxNewListItem, pxNewListItem, endprev, cells, vals,
pxNewListItem, endprev, end, pxNewListItem, singleton(pxNewListItem), singleton(val));
close xLIST(pxList, len+1, idx, end, append(cells, singleton(pxNewListItem)), append(vals, singleton(val)));
}
else
{
// Case C: idx==end and DLS:endnext...endprev
close DLS(endprev, endprevprev, pxNewListItem, endprev, singleton(endprev), singleton(endprevval), pxList);
assert DLS(endnext, end, endprev, endprevprev, ?cells_endnext_to_endprevprev, ?vals_endnext_to_endprevprev, pxList);
join(endnext, end, endprev, endprevprev, cells_endnext_to_endprevprev, vals_endnext_to_endprevprev,
endprev, endprevprev, pxNewListItem, endprev, singleton(endprev), singleton(endprevval));
close DLS(end, pxNewListItem, pxNewListItem, endprev, cells, vals, pxList);
join(end, pxNewListItem, pxNewListItem, endprev, cells, vals,
pxNewListItem, endprev, end, pxNewListItem, singleton(pxNewListItem), singleton(val));
close xLIST(pxList, len+1, idx, end, append(cells, singleton(pxNewListItem)), append(vals, singleton(val)));
}
}
}
else
{
// Case D: end==idxprev and DLS:idx...endprev
// Case E: DLS:end...idxprev and DLS:idx...endprev
int i = index_of(idx, cells);
close xLIST_ITEM(pxNewListItem, val, idx, ?idxprev, pxList);
close xLIST_ITEM(idx, ?idxval, ?idxnext, pxNewListItem, pxList);
nth_drop2(vals, i);
assert idxval == nth(i, vals);
close xLIST_ITEM(idxprev, ?idxprevval, pxNewListItem, ?idxprevprev, pxList);
if (end == idxprev)
{
close DLS(end, endprev, pxNewListItem, end, singleton(end), singleton(portMAX_DELAY), pxList);
}
else
{
length_take(i, cells);
take_take(i-1, i, vals);
take_singleton(i-1, vals);
take_singleton(i, vals);
assert DLS(end, endprev, idxprev, idxprevprev, ?cells_end_to_idxprevprev, take(i-1, vals), pxList);
close DLS(idxprev, idxprevprev, pxNewListItem, idxprev, singleton(idxprev), singleton(idxprevval), pxList);
join(end, endprev, idxprev, idxprevprev, cells_end_to_idxprevprev, take(i-1, vals),
idxprev, idxprevprev, pxNewListItem, idxprev, singleton(idxprev), singleton(idxprevval));
}
if (idx == endprev)
{
close DLS(idx, pxNewListItem, end, idx, singleton(idx), singleton(idxval), pxList);
}
else
{
assert DLS(end, endprev, pxNewListItem, idxprev, ?cells_end_to_idxprev, ?vals_end_to_idxprev, pxList);
close DLS(idx, pxNewListItem, end, endprev, drop(i, cells), drop(i, vals), pxList);
}
assert DLS(end, endprev, pxNewListItem, idxprev, take(i, cells), take(i, vals), pxList);
assert DLS(idx, pxNewListItem, end, endprev, drop(i, cells), drop(i, vals), pxList);
assert xLIST_ITEM(pxNewListItem, val, idx, idxprev, pxList);
dls_star_item(idx, endprev, pxNewListItem);
close DLS(pxNewListItem, idxprev, end, endprev, cons(pxNewListItem, drop(i, cells)), cons(val, drop(i, vals)), pxList);
join(end, endprev, pxNewListItem, idxprev, take(i, cells), take(i, vals),
pxNewListItem, idxprev, end, endprev, cons(pxNewListItem, drop(i, cells)), cons(val, drop(i, vals)));
assert DLS(end, endprev, end, endprev, ?cells_new, ?vals_new, pxList);
assert cells_new == append(take(i, cells), append(singleton(pxNewListItem), drop(i, cells)));
assert vals_new == append(take(i, vals) , append(singleton(val), drop(i, vals)));
head_append(take(i, cells), append(singleton(pxNewListItem), drop(i, cells)));
take_take(1, i, cells);
head_append(take(i, vals), append(singleton(val), drop(i, vals)));
take_take(1, i, vals);
close xLIST(pxList, len+1, idx, end, cells_new, vals_new);
}
@*/
* if (end == idx)
* {
* close xLIST_ITEM(pxNewListItem, val, end, endprev, pxList);
* close DLS(pxNewListItem, endprev, end, pxNewListItem, singleton(pxNewListItem), singleton(val), pxList);
* close xLIST_ITEM(end, portMAX_DELAY, ?endnext, pxNewListItem, pxList);
* if (end == endprev)
* {
* // Case A (singleton): idx==end==endprev
* close DLS(end, pxNewListItem, endnext, end, cells, vals, pxList);
* join(end, pxNewListItem, endnext, end, cells, vals,
* pxNewListItem, endprev, end, pxNewListItem, singleton(pxNewListItem), singleton(val));
* close xLIST(pxList, len+1, idx, end, append(cells, singleton(pxNewListItem)), append(vals, singleton(val)));
* }
* else
* {
* close xLIST_ITEM(endprev, ?endprevval, pxNewListItem, ?endprevprev, _);
* if (endnext == endprev)
* {
* // Case B (two): idx==end and endnext==endprev
* close DLS(endprev, end, pxNewListItem, endprev, singleton(endprev), singleton(endprevval), pxList);
* close DLS(end, pxNewListItem, pxNewListItem, endprev, cells, vals, pxList);
* join(end, pxNewListItem, pxNewListItem, endprev, cells, vals,
* pxNewListItem, endprev, end, pxNewListItem, singleton(pxNewListItem), singleton(val));
* close xLIST(pxList, len+1, idx, end, append(cells, singleton(pxNewListItem)), append(vals, singleton(val)));
* }
* else
* {
* // Case C: idx==end and DLS:endnext...endprev
* close DLS(endprev, endprevprev, pxNewListItem, endprev, singleton(endprev), singleton(endprevval), pxList);
* assert DLS(endnext, end, endprev, endprevprev, ?cells_endnext_to_endprevprev, ?vals_endnext_to_endprevprev, pxList);
* join(endnext, end, endprev, endprevprev, cells_endnext_to_endprevprev, vals_endnext_to_endprevprev,
* endprev, endprevprev, pxNewListItem, endprev, singleton(endprev), singleton(endprevval));
* close DLS(end, pxNewListItem, pxNewListItem, endprev, cells, vals, pxList);
* join(end, pxNewListItem, pxNewListItem, endprev, cells, vals,
* pxNewListItem, endprev, end, pxNewListItem, singleton(pxNewListItem), singleton(val));
* close xLIST(pxList, len+1, idx, end, append(cells, singleton(pxNewListItem)), append(vals, singleton(val)));
* }
* }
* }
* else
* {
* // Case D: end==idxprev and DLS:idx...endprev
* // Case E: DLS:end...idxprev and DLS:idx...endprev
* int i = index_of(idx, cells);
* close xLIST_ITEM(pxNewListItem, val, idx, ?idxprev, pxList);
* close xLIST_ITEM(idx, ?idxval, ?idxnext, pxNewListItem, pxList);
* nth_drop2(vals, i);
* assert idxval == nth(i, vals);
* close xLIST_ITEM(idxprev, ?idxprevval, pxNewListItem, ?idxprevprev, pxList);
*
* if (end == idxprev)
* {
* close DLS(end, endprev, pxNewListItem, end, singleton(end), singleton(portMAX_DELAY), pxList);
* }
* else
* {
* length_take(i, cells);
* take_take(i-1, i, vals);
* take_singleton(i-1, vals);
* take_singleton(i, vals);
* assert DLS(end, endprev, idxprev, idxprevprev, ?cells_end_to_idxprevprev, take(i-1, vals), pxList);
* close DLS(idxprev, idxprevprev, pxNewListItem, idxprev, singleton(idxprev), singleton(idxprevval), pxList);
* join(end, endprev, idxprev, idxprevprev, cells_end_to_idxprevprev, take(i-1, vals),
* idxprev, idxprevprev, pxNewListItem, idxprev, singleton(idxprev), singleton(idxprevval));
* }
*
* if (idx == endprev)
* {
* close DLS(idx, pxNewListItem, end, idx, singleton(idx), singleton(idxval), pxList);
* }
* else
* {
* assert DLS(end, endprev, pxNewListItem, idxprev, ?cells_end_to_idxprev, ?vals_end_to_idxprev, pxList);
* close DLS(idx, pxNewListItem, end, endprev, drop(i, cells), drop(i, vals), pxList);
* }
*
* assert DLS(end, endprev, pxNewListItem, idxprev, take(i, cells), take(i, vals), pxList);
* assert DLS(idx, pxNewListItem, end, endprev, drop(i, cells), drop(i, vals), pxList);
* assert xLIST_ITEM(pxNewListItem, val, idx, idxprev, pxList);
* dls_star_item(idx, endprev, pxNewListItem);
* close DLS(pxNewListItem, idxprev, end, endprev, cons(pxNewListItem, drop(i, cells)), cons(val, drop(i, vals)), pxList);
* join(end, endprev, pxNewListItem, idxprev, take(i, cells), take(i, vals),
* pxNewListItem, idxprev, end, endprev, cons(pxNewListItem, drop(i, cells)), cons(val, drop(i, vals)));
* assert DLS(end, endprev, end, endprev, ?cells_new, ?vals_new, pxList);
* assert cells_new == append(take(i, cells), append(singleton(pxNewListItem), drop(i, cells)));
* assert vals_new == append(take(i, vals) , append(singleton(val), drop(i, vals)));
* head_append(take(i, cells), append(singleton(pxNewListItem), drop(i, cells)));
* take_take(1, i, cells);
* head_append(take(i, vals), append(singleton(val), drop(i, vals)));
* take_take(1, i, vals);
* close xLIST(pxList, len+1, idx, end, cells_new, vals_new);
* }
* @*/
}
void client_example1( List_t * const l, ListItem_t * const pxNewListItem )
void client_example1( List_t * const l,
ListItem_t * const pxNewListItem )
/*@requires
xLIST(l, ?len, ?idx, ?end, ?cells, ?vals) &*&
xLIST_ITEM(pxNewListItem, ?val, _, _, _) &*&
idx == end;@*/
* xLIST(l, ?len, ?idx, ?end, ?cells, ?vals) &*&
* xLIST_ITEM(pxNewListItem, ?val, _, _, _) &*&
* idx == end;@*/
/*@ensures
xLIST(l, len + 1, idx, end, _, append(vals, singleton(val)));@*/
* xLIST(l, len + 1, idx, end, _, append(vals, singleton(val)));@*/
{
vListInsertEnd(l, pxNewListItem);
vListInsertEnd( l, pxNewListItem );
}

View file

@ -37,35 +37,37 @@
/* The following intermediate queue predicates summarise states used by queue
* initialization but not used elsewhere so we confine them to these proofs
* locally. */
/*@
predicate queue_init1(QueueHandle_t q;) =
QUEUE_SHAPE(q, _, _, _, _) &*&
queuelists(q)
;
predicate queue_init2(QueueHandle_t q, int8_t *Storage, size_t N, size_t M;) =
QUEUE_SHAPE(q, Storage, N, M, _) &*&
queuelists(q) &*&
0 < N &*&
chars(Storage, (N*M), _) &*&
malloc_block(Storage, N*M) &*&
Storage + N * M <= (int8_t *)UINTPTR_MAX &*&
true
;
@*/
/*@
* predicate queue_init1(QueueHandle_t q;) =
* QUEUE_SHAPE(q, _, _, _, _) &*&
* queuelists(q)
* ;
*
* predicate queue_init2(QueueHandle_t q, int8_t *Storage, size_t N, size_t M;) =
* QUEUE_SHAPE(q, Storage, N, M, _) &*&
* queuelists(q) &*&
* 0 < N &*&
* chars(Storage, (N*M), _) &*&
* malloc_block(Storage, N*M) &*&
* Storage + N * M <= (int8_t *)UINTPTR_MAX &*&
* true
* ;
* @*/
BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
BaseType_t xNewQueue )
/*@requires queue_init2(xQueue, ?Storage, ?N, ?M);@*/
/*@ensures 0 == M
? freertos_mutex(xQueue, Storage, N, 0)
: queue(xQueue, Storage, N, M, 0, (N-1), 0, false, nil) &*& queuelists(xQueue);@*/
* ? freertos_mutex(xQueue, Storage, N, 0)
* : queue(xQueue, Storage, N, M, 0, (N-1), 0, false, nil) &*& queuelists(xQueue);@*/
{
#ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue;
#else
Queue_t * const pxQueue = xQueue;
#endif
#ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue;
#else
Queue_t * const pxQueue = xQueue;
#endif
configASSERT( pxQueue );
@ -125,18 +127,20 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
Queue_t * pxNewQueue )
/*@requires queue_init1(pxNewQueue) &*&
0 < uxQueueLength &*& 0 < uxItemSize &*&
malloc_block(pucQueueStorage, uxQueueLength * uxItemSize) &*&
pucQueueStorage + uxQueueLength * uxItemSize <= (uint8_t *)UINTPTR_MAX &*&
uchars(pucQueueStorage, uxQueueLength * uxItemSize,_);@*/
* 0 < uxQueueLength &*& 0 < uxItemSize &*&
* malloc_block(pucQueueStorage, uxQueueLength * uxItemSize) &*&
* pucQueueStorage + uxQueueLength * uxItemSize <= (uint8_t *)UINTPTR_MAX &*&
* uchars(pucQueueStorage, uxQueueLength * uxItemSize,_);@*/
/*@ensures queue(pxNewQueue, ((int8_t *)(void *)pucQueueStorage), uxQueueLength, uxItemSize, 0, (uxQueueLength-1), 0, false, nil) &*&
queuelists(pxNewQueue);@*/
* queuelists(pxNewQueue);@*/
{
#ifndef VERIFAST /*< void cast of unused var */
/* Remove compiler warnings about unused parameters should
* configUSE_TRACE_FACILITY not be set to 1. */
( void ) ucQueueType;
#endif
#ifndef VERIFAST /*< void cast of unused var */
/* Remove compiler warnings about unused parameters should
* configUSE_TRACE_FACILITY not be set to 1. */
( void ) ucQueueType;
#endif
if( uxItemSize == ( UBaseType_t ) 0 )
{
@ -144,20 +148,20 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
* be set to NULL because NULL is used as a key to say the queue is used as
* a mutex. Therefore just set pcHead to point to the queue as a benign
* value that is known to be within the memory map. */
#ifdef VERIFAST /*< stricter casting */
pxNewQueue->pcHead = ( int8_t * ) ( void * ) pxNewQueue;
#else
pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;
#endif
#ifdef VERIFAST /*< stricter casting */
pxNewQueue->pcHead = ( int8_t * ) ( void * ) pxNewQueue;
#else
pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;
#endif
}
else
{
/* Set the head to the start of the queue storage area. */
#ifdef VERIFAST /*< stricter casting */
pxNewQueue->pcHead = ( int8_t * ) ( void * ) pucQueueStorage;
#else
pxNewQueue->pcHead = ( int8_t * ) pucQueueStorage;
#endif
#ifdef VERIFAST /*< stricter casting */
pxNewQueue->pcHead = ( int8_t * ) ( void * ) pucQueueStorage;
#else
pxNewQueue->pcHead = ( int8_t * ) pucQueueStorage;
#endif
}
/* Initialise the queue members as described where the queue type is
@ -165,11 +169,11 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
pxNewQueue->uxLength = uxQueueLength;
pxNewQueue->uxItemSize = uxItemSize;
/*@close queue_init2(pxNewQueue, _, uxQueueLength, uxItemSize);@*/
#ifdef VERIFAST /*< void cast of unused return value */
xQueueGenericReset( pxNewQueue, pdTRUE );
#else
( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
#endif
#ifdef VERIFAST /*< void cast of unused return value */
xQueueGenericReset( pxNewQueue, pdTRUE );
#else
( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
#endif
#if ( configUSE_TRACE_FACILITY == 1 )
{
@ -187,41 +191,44 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
}
QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength,
const UBaseType_t uxItemSize,
const uint8_t ucQueueType )
/*@requires 0 < uxQueueLength &*&
0 < uxItemSize &*&
0 < uxQueueLength * uxItemSize &*&
uxQueueLength * uxItemSize <= UINT_MAX;@*/
/*@ensures result == NULL
? true
: queue(result, _, uxQueueLength, uxItemSize, 0, (uxQueueLength-1), 0, false, nil) &*&
queuelists(result) &*&
result->irqMask |-> _ &*&
result->schedulerSuspend |-> _ &*&
result->locked |-> _;@*/
{
Queue_t * pxNewQueue;
size_t xQueueSizeInBytes;
uint8_t * pucQueueStorage;
QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength,
const UBaseType_t uxItemSize,
const uint8_t ucQueueType )
configASSERT( uxQueueLength > ( UBaseType_t ) 0 );
/*@requires 0 < uxQueueLength &*&
* 0 < uxItemSize &*&
* 0 < uxQueueLength * uxItemSize &*&
* uxQueueLength * uxItemSize <= UINT_MAX;@*/
/* Allocate enough space to hold the maximum number of items that
* can be in the queue at any time. It is valid for uxItemSize to be
* zero in the case the queue is used as a semaphore. */
xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
/*@ensures result == NULL
* ? true
* : queue(result, _, uxQueueLength, uxItemSize, 0, (uxQueueLength-1), 0, false, nil) &*&
* queuelists(result) &*&
* result->irqMask |-> _ &*&
* result->schedulerSuspend |-> _ &*&
* result->locked |-> _;@*/
{
Queue_t * pxNewQueue;
size_t xQueueSizeInBytes;
uint8_t * pucQueueStorage;
/* Check for multiplication overflow. */
configASSERT( ( uxItemSize == 0 ) || ( uxQueueLength == ( xQueueSizeInBytes / uxItemSize ) ) );
configASSERT( uxQueueLength > ( UBaseType_t ) 0 );
/* Check for addition overflow. */
configASSERT( ( sizeof( Queue_t ) + xQueueSizeInBytes ) > xQueueSizeInBytes );
/* Allocate enough space to hold the maximum number of items that
* can be in the queue at any time. It is valid for uxItemSize to be
* zero in the case the queue is used as a semaphore. */
xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
#ifdef VERIFAST /*< ***model single malloc of struct and buffer*** */
/* Check for multiplication overflow. */
configASSERT( ( uxItemSize == 0 ) || ( uxQueueLength == ( xQueueSizeInBytes / uxItemSize ) ) );
/* Check for addition overflow. */
configASSERT( ( sizeof( Queue_t ) + xQueueSizeInBytes ) > xQueueSizeInBytes );
#ifdef VERIFAST /*< ***model single malloc of struct and buffer*** */
pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) );
#else
#else
/* Allocate the queue and storage area. Justification for MISRA
* deviation as follows: pvPortMalloc() always ensures returned memory
* blocks are aligned per the requirements of the MCU stack. In this case
@ -232,11 +239,11 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
* is safe. In other cases alignment requirements are not strict (one or
* two bytes). */
pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes ); /*lint !e9087 !e9079 see comment above. */
#endif
#endif
if( pxNewQueue != NULL )
{
#ifdef VERIFAST /*< ***model single malloc of struct and buffer*** */
if( pxNewQueue != NULL )
{
#ifdef VERIFAST /*< ***model single malloc of struct and buffer*** */
pucQueueStorage = ( uint8_t * ) pvPortMalloc( xQueueSizeInBytes );
if( pucQueueStorage == NULL )
@ -246,29 +253,30 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
}
/*@malloc_block_limits(pucQueueStorage);@*/
#else
#else
/* Jump past the queue structure to find the location of the queue
* storage area. */
pucQueueStorage = ( uint8_t * ) pxNewQueue;
pucQueueStorage += sizeof( Queue_t ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
#endif
#endif /* ifdef VERIFAST */
#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
{
/* Queues can be created either statically or dynamically, so
* note this task was created dynamically in case it is later
* deleted. */
pxNewQueue->ucStaticallyAllocated = pdFALSE;
}
#endif /* configSUPPORT_STATIC_ALLOCATION */
#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
{
/* Queues can be created either statically or dynamically, so
* note this task was created dynamically in case it is later
* deleted. */
pxNewQueue->ucStaticallyAllocated = pdFALSE;
}
#endif /* configSUPPORT_STATIC_ALLOCATION */
prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );
}
else
{
traceQUEUE_CREATE_FAILED( ucQueueType );
mtCOVERAGE_TEST_MARKER();
}
return pxNewQueue;
prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );
}
else
{
traceQUEUE_CREATE_FAILED( ucQueueType );
mtCOVERAGE_TEST_MARKER();
}
return pxNewQueue;
}

View file

@ -29,8 +29,9 @@
static void prvCopyDataFromQueue( Queue_t * const pxQueue,
void * const pvBuffer )
/*@requires queue(pxQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*& 0 < K &*& chars(pvBuffer, M, _);@*/
/*@ensures queue_after_prvCopyDataFromQueue(pxQueue, Storage, N, M, W, (R+1)%N, K, is_locked, abs) &*&
chars(pvBuffer, M, head(abs));@*/
* chars(pvBuffer, M, head(abs));@*/
{
if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )
{
@ -46,44 +47,48 @@ static void prvCopyDataFromQueue( Queue_t * const pxQueue,
else
{
/*@{
div_lt(R+1, N, M); // now we know R+1 < N
mod_lt(R+1, N); // so, R+1 == (R+1)%N
note(pxQueue->u.xQueue.pcReadFrom == Storage + ((R + 1) * M));
note( Storage + ((R + 1) * M) == Storage + (((R + 1) % N) * M));
}@*/
* div_lt(R+1, N, M); // now we know R+1 < N
* mod_lt(R+1, N); // so, R+1 == (R+1)%N
* note(pxQueue->u.xQueue.pcReadFrom == Storage + ((R + 1) * M));
* note( Storage + ((R + 1) * M) == Storage + (((R + 1) % N) * M));
* }@*/
mtCOVERAGE_TEST_MARKER();
}
/*@mod_plus(R+1, K, N);@*/
/*@mod_mod(R+1, N);@*/
/*@split_element(Storage, N, M, (R+1)%N);@*/
/*@assert
buffer(Storage, (R+1)%N, M, ?prefix) &*&
chars(Storage + ((R+1)%N) * M, M, ?element) &*&
buffer(Storage + ((R+1)%N + 1) * M, (N-1-(R+1)%N), M, ?suffix);@*/
#ifdef VERIFAST /*< void cast of unused return value */
memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize );
#else
( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */
#endif
* buffer(Storage, (R+1)%N, M, ?prefix) &*&
* chars(Storage + ((R+1)%N) * M, M, ?element) &*&
* buffer(Storage + ((R+1)%N + 1) * M, (N-1-(R+1)%N), M, ?suffix);@*/
#ifdef VERIFAST /*< void cast of unused return value */
memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize );
#else
( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */
#endif
/*@{
combine_list_no_change(prefix, element, suffix, (R+1)%N, contents);
join_element(Storage, N, M, (R+1)%N);
length_take(K, contents);
take_length_eq(K, rotate_left((R+1)%N, contents), abs);
deq_value_lemma(K, (R+1)%N, contents, abs);
}@*/
* combine_list_no_change(prefix, element, suffix, (R+1)%N, contents);
* join_element(Storage, N, M, (R+1)%N);
* length_take(K, contents);
* take_length_eq(K, rotate_left((R+1)%N, contents), abs);
* deq_value_lemma(K, (R+1)%N, contents, abs);
* }@*/
}
}
void caller_reinstates_queue_predicate( Queue_t * const pxQueue,
void * const pvBuffer )
/*@requires queue(pxQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
0 < K &*&
chars(pvBuffer, M, _);@*/
* 0 < K &*&
* chars(pvBuffer, M, _);@*/
/*@ensures
queue(pxQueue, Storage, N, M, W, (R+1)%N, K-1, is_locked, tail(abs)) &*&
chars(pvBuffer, M, head(abs));@*/
* queue(pxQueue, Storage, N, M, W, (R+1)%N, K-1, is_locked, tail(abs)) &*&
* chars(pvBuffer, M, head(abs));@*/
{
prvCopyDataFromQueue( pxQueue, pvBuffer );
/*@open queue_after_prvCopyDataFromQueue(pxQueue, Storage, N, M, W, (R+1)%N, K, is_locked, abs);@*/

View file

@ -29,20 +29,22 @@
static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue,
const void * pvItemToQueue,
const BaseType_t xPosition )
/*@requires queue(pxQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
(K < N || xPosition == queueOVERWRITE) &*&
chars(pvItemToQueue, M, ?x) &*&
(xPosition == queueSEND_TO_BACK || xPosition == queueSEND_TO_FRONT || (xPosition == queueOVERWRITE && N == 1));@*/
* (K < N || xPosition == queueOVERWRITE) &*&
* chars(pvItemToQueue, M, ?x) &*&
* (xPosition == queueSEND_TO_BACK || xPosition == queueSEND_TO_FRONT || (xPosition == queueOVERWRITE && N == 1));@*/
/*@ensures
(xPosition == queueSEND_TO_BACK
? queue(pxQueue, Storage, N, M, (W+1)%N, R, (K+1), is_locked, append(abs, singleton(x)))
: (xPosition == queueSEND_TO_FRONT
? (R == 0
? queue(pxQueue, Storage, N, M, W, (N-1), (K+1), is_locked, cons(x, abs))
: queue(pxQueue, Storage, N, M, W, (R-1), (K+1), is_locked, cons(x, abs)))
: xPosition == queueOVERWRITE &*& queue(pxQueue, Storage, N, M, W, R, 1, is_locked, singleton(x)))
) &*&
chars(pvItemToQueue, M, x);@*/
* (xPosition == queueSEND_TO_BACK
* ? queue(pxQueue, Storage, N, M, (W+1)%N, R, (K+1), is_locked, append(abs, singleton(x)))
* : (xPosition == queueSEND_TO_FRONT
* ? (R == 0
* ? queue(pxQueue, Storage, N, M, W, (N-1), (K+1), is_locked, cons(x, abs))
* : queue(pxQueue, Storage, N, M, W, (R-1), (K+1), is_locked, cons(x, abs)))
* : xPosition == queueOVERWRITE &*& queue(pxQueue, Storage, N, M, W, R, 1, is_locked, singleton(x)))
* ) &*&
* chars(pvItemToQueue, M, x);@*/
{
BaseType_t xReturn = pdFALSE;
UBaseType_t uxMessagesWaiting;
@ -74,27 +76,29 @@ static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue,
}
else if( xPosition == queueSEND_TO_BACK )
{
#ifdef VERIFAST /*< void cast of unused return value */
/* Now we focus the proof on the logical element of the buffer that
* will be updated using the following lemma to split the buffer into 3
* parts: a prefix, the element we want to update, and the suffix. This
* enables the subsequent memcpy to verify. */
/*@split_element(Storage, N, M, W);@*/
/*@assert
buffer(Storage, W, M, ?prefix) &*&
chars(Storage + W * M, M, _) &*&
buffer(Storage + (W + 1) * M, (N-1-W), M, ?suffix);@*/
memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize );
/* After the update we stitch the buffer back together */
/*@join_element(Storage, N, M, W);@*/
/*@combine_list_update(prefix, x, suffix, W, contents);@*/
#else
( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */
#endif
/*@mul_mono_l(W, N-1, M);@*/
pxQueue->pcWriteTo += pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types ok, especially in this use case where it is the clearest way of conveying intent. */
#ifdef VERIFAST /*< void cast of unused return value */
if( pxQueue->pcWriteTo >= pxQueue->u.xQueue.pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
/* Now we focus the proof on the logical element of the buffer that
* will be updated using the following lemma to split the buffer into 3
* parts: a prefix, the element we want to update, and the suffix. This
* enables the subsequent memcpy to verify. */
/*@split_element(Storage, N, M, W);@*/
/*@assert
* buffer(Storage, W, M, ?prefix) &*&
* chars(Storage + W * M, M, _) &*&
* buffer(Storage + (W + 1) * M, (N-1-W), M, ?suffix);@*/
memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize );
/* After the update we stitch the buffer back together */
/*@join_element(Storage, N, M, W);@*/
/*@combine_list_update(prefix, x, suffix, W, contents);@*/
#else
( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */
#endif /* ifdef VERIFAST */
/*@mul_mono_l(W, N-1, M);@*/
pxQueue->pcWriteTo += pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types ok, especially in this use case where it is the clearest way of conveying intent. */
if( pxQueue->pcWriteTo >= pxQueue->u.xQueue.pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
{
/*@div_leq(N, W+1, M);@*/ /* now we know W == N-1 so (W+1)%N == 0 */
pxQueue->pcWriteTo = pxQueue->pcHead;
@ -102,28 +106,29 @@ static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue,
else
{
/*@{
div_lt(W+1, N, M); // now we know W+1 < N
mod_lt(W+1, N); // so, W+1 == (W+1)%N
note(pxQueue->pcWriteTo == Storage + ((W + 1) * M));
note( Storage + ((W + 1) * M) == Storage + (((W + 1) % N) * M));
}@*/
* div_lt(W+1, N, M); // now we know W+1 < N
* mod_lt(W+1, N); // so, W+1 == (W+1)%N
* note(pxQueue->pcWriteTo == Storage + ((W + 1) * M));
* note( Storage + ((W + 1) * M) == Storage + (((W + 1) % N) * M));
* }@*/
mtCOVERAGE_TEST_MARKER();
}
}
else
{
#ifdef VERIFAST /*< void cast of unused return value */
/*@split_element(Storage, N, M, R);@*/
/*@assert
buffer(Storage, R, M, ?prefix) &*&
chars(Storage + R * M, M, _) &*&
buffer(Storage + (R + 1) * M, (N-1-R), M, ?suffix);@*/
memcpy( ( void * ) pxQueue->u.xQueue.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize );
/*@join_element(Storage, N, M, R);@*/
/*@combine_list_update(prefix, x, suffix, R, contents);@*/
#else
( void ) memcpy( ( void * ) pxQueue->u.xQueue.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e9087 !e418 MISRA exception as the casts are only redundant for some ports. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. Assert checks null pointer only used when length is 0. */
#endif
#ifdef VERIFAST /*< void cast of unused return value */
/*@split_element(Storage, N, M, R);@*/
/*@assert
* buffer(Storage, R, M, ?prefix) &*&
* chars(Storage + R * M, M, _) &*&
* buffer(Storage + (R + 1) * M, (N-1-R), M, ?suffix);@*/
memcpy( ( void * ) pxQueue->u.xQueue.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize );
/*@join_element(Storage, N, M, R);@*/
/*@combine_list_update(prefix, x, suffix, R, contents);@*/
#else
( void ) memcpy( ( void * ) pxQueue->u.xQueue.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e9087 !e418 MISRA exception as the casts are only redundant for some ports. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. Assert checks null pointer only used when length is 0. */
#endif
pxQueue->u.xQueue.pcReadFrom -= pxQueue->uxItemSize;
if( pxQueue->u.xQueue.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
@ -141,12 +146,12 @@ static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue,
}
/*@
if (R == 0)
{
mod_plus(N, (K+1), N); mod_same(N); mod_mod(K+1, N);
assert W == ((N-1) + 1 + (K+1)) % N;
}
@*/
* if (R == 0)
* {
* mod_plus(N, (K+1), N); mod_same(N); mod_mod(K+1, N);
* assert W == ((N-1) + 1 + (K+1)) % N;
* }
* @*/
if( xPosition == queueOVERWRITE )
{
if( uxMessagesWaiting > ( UBaseType_t ) 0 )
@ -171,20 +176,20 @@ static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue,
pxQueue->uxMessagesWaiting = uxMessagesWaiting + ( UBaseType_t ) 1;
/*@
if (xPosition == queueSEND_TO_BACK)
{
enq_lemma(K, (R+1)%N, contents, abs, x);
mod_plus_one(W, R + 1 + K, N);
mod_plus_distr(R+1, K, N);
}
else if (xPosition == queueSEND_TO_FRONT)
{
front_enq_lemma(K, R, contents, abs, x);
if (0 < R)
{
mod_lt(R, N);
}
}
@*/
* if (xPosition == queueSEND_TO_BACK)
* {
* enq_lemma(K, (R+1)%N, contents, abs, x);
* mod_plus_one(W, R + 1 + K, N);
* mod_plus_distr(R+1, K, N);
* }
* else if (xPosition == queueSEND_TO_FRONT)
* {
* front_enq_lemma(K, R, contents, abs, x);
* if (0 < R)
* {
* mod_lt(R, N);
* }
* }
* @*/
return xReturn;
}

View file

@ -46,11 +46,12 @@
void wrapper_prvLockQueue( QueueHandle_t xQueue )
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*&
[1/2]queuelock(xQueue);@*/
* [1/2]queuelock(xQueue);@*/
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
[1/2]xQueue->locked |-> ?m &*&
mutex_held(m, queue_locked_invariant(xQueue), currentThread, 1/2) &*&
queue_locked_invariant(xQueue)();@*/
* [1/2]xQueue->locked |-> ?m &*&
* mutex_held(m, queue_locked_invariant(xQueue), currentThread, 1/2) &*&
* queue_locked_invariant(xQueue)();@*/
{
taskENTER_CRITICAL();
/*@open queue(xQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
@ -67,7 +68,7 @@ void wrapper_prvLockQueue( QueueHandle_t xQueue )
}
/*@close queue(xQueue, Storage, N, M, W, R, K, true, abs);@*/
taskEXIT_CRITICAL();
#ifdef VERIFAST /*< ghost action */
mutex_acquire( xQueue->locked );
#endif
#ifdef VERIFAST /*< ghost action */
mutex_acquire( xQueue->locked );
#endif
}

View file

@ -32,12 +32,14 @@
* decrementing `cTxLock` and `cRxLock`. */
static void prvUnlockQueue( Queue_t * const pxQueue )
/*@requires [1/2]queuehandle(pxQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*&
[1/2]pxQueue->locked |-> ?m &*&
mutex_held(m, queue_locked_invariant(pxQueue), currentThread, 1/2) &*&
queue_locked_invariant(pxQueue)();@*/
* [1/2]pxQueue->locked |-> ?m &*&
* mutex_held(m, queue_locked_invariant(pxQueue), currentThread, 1/2) &*&
* queue_locked_invariant(pxQueue)();@*/
/*@ensures [1/2]queuehandle(pxQueue, N, M, is_isr) &*&
[1/2]queuelock(pxQueue);@*/
* [1/2]queuelock(pxQueue);@*/
{
/* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
@ -125,12 +127,12 @@ static void prvUnlockQueue( Queue_t * const pxQueue )
pxQueue->cTxLock = queueUNLOCKED;
}
#ifndef VERIFAST /*< ***merge cTxLock and cRxLock critical regions*** */
taskEXIT_CRITICAL();
#ifndef VERIFAST /*< ***merge cTxLock and cRxLock critical regions*** */
taskEXIT_CRITICAL();
/* Do the same for the Rx lock. */
taskENTER_CRITICAL();
#endif
/* Do the same for the Rx lock. */
taskENTER_CRITICAL();
#endif
{
int8_t cRxLock = pxQueue->cRxLock;
@ -160,7 +162,7 @@ static void prvUnlockQueue( Queue_t * const pxQueue )
}
/*@close queue(pxQueue, Storage, N, M, W, R, K, false, abs);@*/
taskEXIT_CRITICAL();
#ifdef VERIFAST /*< ghost action */
mutex_release( pxQueue->locked );
#endif
#ifdef VERIFAST /*< ghost action */
mutex_release( pxQueue->locked );
#endif
}

View file

@ -41,11 +41,11 @@ UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )
{
UBaseType_t uxReturn;
#ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue;
#else
Queue_t * const pxQueue = xQueue;
#endif
#ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue;
#else
Queue_t * const pxQueue = xQueue;
#endif
configASSERT( pxQueue );
uxReturn = pxQueue->uxMessagesWaiting;

View file

@ -32,11 +32,11 @@ UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )
{
UBaseType_t uxReturn;
#ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue;
#else
Queue_t * const pxQueue = xQueue;
#endif
#ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue;
#else
Queue_t * const pxQueue = xQueue;
#endif
configASSERT( pxQueue );

View file

@ -29,18 +29,19 @@
#define configSUPPORT_STATIC_ALLOCATION 0
void vQueueDelete( QueueHandle_t xQueue )
/*@requires queue(xQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
queuelists(xQueue) &*&
xQueue->irqMask |-> _ &*&
xQueue->schedulerSuspend |-> _ &*&
xQueue->locked |-> _;@*/
* queuelists(xQueue) &*&
* xQueue->irqMask |-> _ &*&
* xQueue->schedulerSuspend |-> _ &*&
* xQueue->locked |-> _;@*/
/*@ensures true;@*/
{
#ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue;
#else
Queue_t * const pxQueue = xQueue;
#endif
#ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue;
#else
Queue_t * const pxQueue = xQueue;
#endif
configASSERT( pxQueue );
traceQUEUE_DELETE( pxQueue );
@ -56,10 +57,10 @@ void vQueueDelete( QueueHandle_t xQueue )
/* The queue can only have been allocated dynamically - free it
* again. */
vPortFree( pxQueue );
#ifdef VERIFAST /*< leak ghost state on deletion */
/*@leak buffer(_, _, _, _);@*/
/*@leak malloc_block(_, _);@*/
#endif
#ifdef VERIFAST /*< leak ghost state on deletion */
/*@leak buffer(_, _, _, _);@*/
/*@leak malloc_block(_, _);@*/
#endif
}
#elif ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
{

View file

@ -31,45 +31,50 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
const void * const pvItemToQueue,
TickType_t xTicksToWait,
const BaseType_t xCopyPosition )
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*&
[1/2]queuesuspend(xQueue) &*&
chars(pvItemToQueue, M, ?x) &*&
(xCopyPosition == queueSEND_TO_BACK || xCopyPosition == queueSEND_TO_FRONT || (xCopyPosition == queueOVERWRITE && N == 1));@*/
* [1/2]queuesuspend(xQueue) &*&
* chars(pvItemToQueue, M, ?x) &*&
* (xCopyPosition == queueSEND_TO_BACK || xCopyPosition == queueSEND_TO_FRONT || (xCopyPosition == queueOVERWRITE && N == 1));@*/
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
[1/2]queuesuspend(xQueue) &*&
chars(pvItemToQueue, M, x);@*/
* [1/2]queuesuspend(xQueue) &*&
* chars(pvItemToQueue, M, x);@*/
{
BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
TimeOut_t xTimeOut;
#ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue;
#else
Queue_t * const pxQueue = xQueue;
configASSERT( pxQueue );
configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
{
configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
}
#endif
#endif
#ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue;
#else
Queue_t * const pxQueue = xQueue;
configASSERT( pxQueue );
configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
{
configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
}
#endif
#endif /* ifdef VERIFAST */
/*lint -save -e904 This function relaxes the coding standard somewhat to
* allow return statements within the function itself. This is done in the
* interest of execution time efficiency. */
for( ; ; )
/*@invariant [1/2]queuehandle(xQueue, N, M, is_isr) &*&
[1/2]queuesuspend(xQueue) &*&
chars(pvItemToQueue, M, x) &*&
u_integer(&xTicksToWait, _) &*&
(xCopyPosition == queueSEND_TO_BACK || xCopyPosition == queueSEND_TO_FRONT || (xCopyPosition == queueOVERWRITE && N == 1)) &*&
xTIME_OUT(&xTimeOut);@*/
* [1/2]queuesuspend(xQueue) &*&
* chars(pvItemToQueue, M, x) &*&
* u_integer(&xTicksToWait, _) &*&
* (xCopyPosition == queueSEND_TO_BACK || xCopyPosition == queueSEND_TO_FRONT || (xCopyPosition == queueOVERWRITE && N == 1)) &*&
* xTIME_OUT(&xTimeOut);@*/
{
taskENTER_CRITICAL();
{
/*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
/* Is there room on the queue now? The running task must be the
* highest priority task wanting to access the queue. If the head item
* in the queue is to be overwritten then it does not matter if the
@ -177,19 +182,19 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
#endif /* configUSE_QUEUE_SETS */
/*@
if (xCopyPosition == queueSEND_TO_BACK)
{
close queue(pxQueue, Storage, N, M, (W+1)%N, R, (K+1), is_locked, append(abs, singleton(x)));
}
else if (xCopyPosition == queueSEND_TO_FRONT)
{
close queue(pxQueue, Storage, N, M, W, (R == 0 ? (N-1) : (R-1)), (K+1), is_locked, cons(x, abs));
}
else if (xCopyPosition == queueOVERWRITE)
{
close queue(pxQueue, Storage, N, M, W, R, 1, is_locked, singleton(x));
}
@*/
* if (xCopyPosition == queueSEND_TO_BACK)
* {
* close queue(pxQueue, Storage, N, M, (W+1)%N, R, (K+1), is_locked, append(abs, singleton(x)));
* }
* else if (xCopyPosition == queueSEND_TO_FRONT)
* {
* close queue(pxQueue, Storage, N, M, W, (R == 0 ? (N-1) : (R-1)), (K+1), is_locked, cons(x, abs));
* }
* else if (xCopyPosition == queueOVERWRITE)
* {
* close queue(pxQueue, Storage, N, M, W, R, 1, is_locked, singleton(x));
* }
* @*/
taskEXIT_CRITICAL();
return pdPASS;
}
@ -198,6 +203,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
if( xTicksToWait == ( TickType_t ) 0 )
{
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
/* The queue was full and no block time is specified (or
* the block time has expired) so leave now. */
taskEXIT_CRITICAL();
@ -220,6 +226,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
mtCOVERAGE_TEST_MARKER();
}
}
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
}
taskEXIT_CRITICAL();
@ -263,24 +270,24 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
{
/* Try again. */
prvUnlockQueue( pxQueue );
#ifdef VERIFAST /*< void cast of unused return value */
/*@close exists(pxQueue);@*/
xTaskResumeAll();
#else
( void ) xTaskResumeAll();
#endif
#ifdef VERIFAST /*< void cast of unused return value */
/*@close exists(pxQueue);@*/
xTaskResumeAll();
#else
( void ) xTaskResumeAll();
#endif
}
}
else
{
/* The timeout has expired. */
prvUnlockQueue( pxQueue );
#ifdef VERIFAST /*< void cast of unused return value */
/*@close exists(pxQueue);@*/
xTaskResumeAll();
#else
( void ) xTaskResumeAll();
#endif
#ifdef VERIFAST /*< void cast of unused return value */
/*@close exists(pxQueue);@*/
xTaskResumeAll();
#else
( void ) xTaskResumeAll();
#endif
traceQUEUE_SEND_FAILED( pxQueue );
return errQUEUE_FULL;

View file

@ -31,28 +31,30 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
const void * const pvItemToQueue,
BaseType_t * const pxHigherPriorityTaskWoken,
const BaseType_t xCopyPosition )
/*@requires
[1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == true &*&
chars(pvItemToQueue, M, ?x) &*&
integer(pxHigherPriorityTaskWoken, _) &*&
(xCopyPosition == queueSEND_TO_BACK || xCopyPosition == queueSEND_TO_FRONT || (xCopyPosition == queueOVERWRITE && N == 1));@*/
* [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == true &*&
* chars(pvItemToQueue, M, ?x) &*&
* integer(pxHigherPriorityTaskWoken, _) &*&
* (xCopyPosition == queueSEND_TO_BACK || xCopyPosition == queueSEND_TO_FRONT || (xCopyPosition == queueOVERWRITE && N == 1));@*/
/*@ensures
[1/2]queuehandle(xQueue, N, M, is_isr) &*&
chars(pvItemToQueue, M, x) &*&
integer(pxHigherPriorityTaskWoken, _);@*/
* [1/2]queuehandle(xQueue, N, M, is_isr) &*&
* chars(pvItemToQueue, M, x) &*&
* integer(pxHigherPriorityTaskWoken, _);@*/
{
BaseType_t xReturn;
UBaseType_t uxSavedInterruptStatus;
#ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue;
#else
Queue_t * const pxQueue = xQueue;
#ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue;
#else
Queue_t * const pxQueue = xQueue;
configASSERT( pxQueue );
configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
#endif
configASSERT( pxQueue );
configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
#endif
/* RTOS ports that support interrupt nesting have the concept of a maximum
* system call (or maximum API call) interrupt priority. Interrupts that are
@ -90,12 +92,12 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
* in a task disinheriting a priority and prvCopyDataToQueue() can be
* called here even though the disinherit function does not check if
* the scheduler is suspended before accessing the ready lists. */
#ifdef VERIFAST /*< void cast of unused return value */
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
#else
( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
#endif
#ifdef VERIFAST /*< void cast of unused return value */
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
#else
( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
#endif
/*@open queue(pxQueue, _, N, M, _, _, _, _, _);@*/
/* The event list is not altered if the queue is locked. This will
@ -189,9 +191,9 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
}
/* Not used in this path. */
#ifndef VERIFAST /*< void cast of unused var */
( void ) uxPreviousMessagesWaiting;
#endif
#ifndef VERIFAST /*< void cast of unused var */
( void ) uxPreviousMessagesWaiting;
#endif
}
#endif /* configUSE_QUEUE_SETS */
}
@ -205,26 +207,27 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
}
xReturn = pdPASS;
/*@
if (xCopyPosition == queueSEND_TO_BACK)
{
close queue(pxQueue, Storage, N, M, (W+1)%N, R, (K+1), is_locked, append(abs, singleton(x)));
}
else if (xCopyPosition == queueSEND_TO_FRONT)
{
if (R == 0)
{
close queue(pxQueue, Storage, N, M, W, (N-1), (K+1), is_locked, cons(x, abs));
}
else
{
close queue(pxQueue, Storage, N, M, W, (R-1), (K+1), is_locked, cons(x, abs));
}
} else if (xCopyPosition == queueOVERWRITE)
{
close queue(pxQueue, Storage, N, M, W, R, 1, is_locked, singleton(x));
}
@*/
* if (xCopyPosition == queueSEND_TO_BACK)
* {
* close queue(pxQueue, Storage, N, M, (W+1)%N, R, (K+1), is_locked, append(abs, singleton(x)));
* }
* else if (xCopyPosition == queueSEND_TO_FRONT)
* {
* if (R == 0)
* {
* close queue(pxQueue, Storage, N, M, W, (N-1), (K+1), is_locked, cons(x, abs));
* }
* else
* {
* close queue(pxQueue, Storage, N, M, W, (R-1), (K+1), is_locked, cons(x, abs));
* }
* } else if (xCopyPosition == queueOVERWRITE)
* {
* close queue(pxQueue, Storage, N, M, W, R, 1, is_locked, singleton(x));
* }
* @*/
}
else
{

View file

@ -28,16 +28,17 @@
BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
/*@requires queue(xQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
/*@ensures queue(xQueue, Storage, N, M, W, R, K, is_locked, abs) &*&
result == ((K == 0) ? pdTRUE : pdFALSE);@*/
* result == ((K == 0) ? pdTRUE : pdFALSE);@*/
{
BaseType_t xReturn;
#ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue;
#else
Queue_t * const pxQueue = xQueue;
#endif
#ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue;
#else
Queue_t * const pxQueue = xQueue;
#endif
configASSERT( pxQueue );

View file

@ -28,16 +28,17 @@
BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
/*@requires queue(xQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
/*@ensures queue(xQueue, Storage, N, M, W, R, K, is_locked, abs) &*&
result == ((K == N) ? pdTRUE : pdFALSE);@*/
* result == ((K == N) ? pdTRUE : pdFALSE);@*/
{
BaseType_t xReturn;
#ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue;
#else
Queue_t * const pxQueue = xQueue;
#endif
#ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue;
#else
Queue_t * const pxQueue = xQueue;
#endif
configASSERT( pxQueue );

View file

@ -30,45 +30,49 @@
BaseType_t xQueuePeek( QueueHandle_t xQueue,
void * const pvBuffer,
TickType_t xTicksToWait )
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*&
[1/2]queuesuspend(xQueue) &*&
chars(pvBuffer, M, ?x);@*/
* [1/2]queuesuspend(xQueue) &*&
* chars(pvBuffer, M, ?x);@*/
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
[1/2]queuesuspend(xQueue) &*&
(result == pdPASS ? chars(pvBuffer, M, _) : chars(pvBuffer, M, x));@*/
* [1/2]queuesuspend(xQueue) &*&
* (result == pdPASS ? chars(pvBuffer, M, _) : chars(pvBuffer, M, x));@*/
{
BaseType_t xEntryTimeSet = pdFALSE;
TimeOut_t xTimeOut;
int8_t * pcOriginalReadPosition;
#ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue;
#else
Queue_t * const pxQueue = xQueue;
/* Check the pointer is not NULL. */
configASSERT( ( pxQueue ) );
#ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue;
#else
Queue_t * const pxQueue = xQueue;
/* The buffer into which data is received can only be NULL if the data size
* is zero (so no data is copied into the buffer. */
configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );
/* Check the pointer is not NULL. */
configASSERT( ( pxQueue ) );
/* Cannot block if the scheduler is suspended. */
#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
{
configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
}
#endif
#endif
/* The buffer into which data is received can only be NULL if the data size
* is zero (so no data is copied into the buffer. */
configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );
/* Cannot block if the scheduler is suspended. */
#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
{
configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
}
#endif
#endif /* ifdef VERIFAST */
/*lint -save -e904 This function relaxes the coding standard somewhat to
* allow return statements within the function itself. This is done in the
* interest of execution time efficiency. */
for( ; ; )
/*@invariant [1/2]queuehandle(xQueue, N, M, is_isr) &*&
[1/2]queuesuspend(xQueue) &*&
chars(pvBuffer, M, x) &*&
u_integer(&xTicksToWait, _) &*&
xTIME_OUT(&xTimeOut);@*/
* [1/2]queuesuspend(xQueue) &*&
* chars(pvBuffer, M, x) &*&
* u_integer(&xTicksToWait, _) &*&
* xTIME_OUT(&xTimeOut);@*/
{
taskENTER_CRITICAL();
/*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
@ -178,12 +182,12 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
/* There is data in the queue now, so don't enter the blocked
* state, instead return to try and obtain the data. */
prvUnlockQueue( pxQueue );
#ifdef VERIFAST /*< void cast of unused return value */
/*@close exists<QueueHandle_t>(pxQueue);@*/
xTaskResumeAll();
#else
( void ) xTaskResumeAll();
#endif
#ifdef VERIFAST /*< void cast of unused return value */
/*@close exists<QueueHandle_t>(pxQueue);@*/
xTaskResumeAll();
#else
( void ) xTaskResumeAll();
#endif
}
}
else
@ -191,12 +195,12 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
/* The timeout has expired. If there is still no data in the queue
* exit, otherwise go back and try to read the data again. */
prvUnlockQueue( pxQueue );
#ifdef VERIFAST /*< void cast of unused return value */
/*@close exists<QueueHandle_t>(pxQueue);@*/
xTaskResumeAll();
#else
( void ) xTaskResumeAll();
#endif
#ifdef VERIFAST /*< void cast of unused return value */
/*@close exists<QueueHandle_t>(pxQueue);@*/
xTaskResumeAll();
#else
( void ) xTaskResumeAll();
#endif
if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
{

View file

@ -29,23 +29,26 @@
BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
void * const pvBuffer )
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == true &*&
chars(pvBuffer, M, ?x);@*/
* chars(pvBuffer, M, ?x);@*/
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
(result == pdPASS ? chars(pvBuffer, M, _) : chars(pvBuffer, M, x));@*/
* (result == pdPASS ? chars(pvBuffer, M, _) : chars(pvBuffer, M, x));@*/
{
BaseType_t xReturn;
UBaseType_t uxSavedInterruptStatus;
int8_t * pcOriginalReadPosition;
#ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue;
#else
Queue_t * const pxQueue = xQueue;
configASSERT( pxQueue );
configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */
#endif
#ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue;
#else
Queue_t * const pxQueue = xQueue;
configASSERT( pxQueue );
configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */
#endif
/* RTOS ports that support interrupt nesting have the concept of a maximum
* system call (or maximum API call) interrupt priority. Interrupts that are

View file

@ -30,44 +30,48 @@
BaseType_t xQueueReceive( QueueHandle_t xQueue,
void * const pvBuffer,
TickType_t xTicksToWait )
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*&
[1/2]queuesuspend(xQueue) &*&
chars(pvBuffer, M, ?x);@*/
* [1/2]queuesuspend(xQueue) &*&
* chars(pvBuffer, M, ?x);@*/
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
[1/2]queuesuspend(xQueue) &*&
(result == pdPASS ? chars(pvBuffer, M, _) : chars(pvBuffer, M, x));@*/
* [1/2]queuesuspend(xQueue) &*&
* (result == pdPASS ? chars(pvBuffer, M, _) : chars(pvBuffer, M, x));@*/
{
BaseType_t xEntryTimeSet = pdFALSE;
TimeOut_t xTimeOut;
#ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue;
#else
Queue_t * const pxQueue = xQueue;
/* Check the pointer is not NULL. */
configASSERT( ( pxQueue ) );
#ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue;
#else
Queue_t * const pxQueue = xQueue;
/* The buffer into which data is received can only be NULL if the data size
* is zero (so no data is copied into the buffer). */
configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );
/* Check the pointer is not NULL. */
configASSERT( ( pxQueue ) );
/* Cannot block if the scheduler is suspended. */
#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
{
configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
}
#endif
#endif
/* The buffer into which data is received can only be NULL if the data size
* is zero (so no data is copied into the buffer). */
configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );
/* Cannot block if the scheduler is suspended. */
#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
{
configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
}
#endif
#endif /* ifdef VERIFAST */
/*lint -save -e904 This function relaxes the coding standard somewhat to
* allow return statements within the function itself. This is done in the
* interest of execution time efficiency. */
for( ; ; )
/*@invariant [1/2]queuehandle(xQueue, N, M, is_isr) &*&
[1/2]queuesuspend(xQueue) &*&
chars(pvBuffer, M, x) &*&
u_integer(&xTicksToWait, _) &*&
xTIME_OUT(&xTimeOut);@*/
* [1/2]queuesuspend(xQueue) &*&
* chars(pvBuffer, M, x) &*&
* u_integer(&xTicksToWait, _) &*&
* xTIME_OUT(&xTimeOut);@*/
{
taskENTER_CRITICAL();
/*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
@ -86,9 +90,10 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1;
/*@assert
pxQueue->pcHead |-> ?buffer &*&
buffer(buffer, N, M, ?contents);@*/
* pxQueue->pcHead |-> ?buffer &*&
* buffer(buffer, N, M, ?contents);@*/
/*@deq_lemma(K, (R+1)%N, contents, abs, head(abs));@*/
/* There is now space in the queue, were any tasks waiting to
* post to the queue? If so, unblock the highest priority waiting
* task. */
@ -118,6 +123,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
if( xTicksToWait == ( TickType_t ) 0 )
{
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
/* The queue was empty and no block time is specified (or
* the block time has expired) so leave now. */
taskEXIT_CRITICAL();
@ -176,12 +182,12 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
/* The queue contains data again. Loop back to try and read the
* data. */
prvUnlockQueue( pxQueue );
#ifdef VERIFAST /*< void cast of unused return value */
/*@close exists(pxQueue);@*/
xTaskResumeAll();
#else
( void ) xTaskResumeAll();
#endif
#ifdef VERIFAST /*< void cast of unused return value */
/*@close exists(pxQueue);@*/
xTaskResumeAll();
#else
( void ) xTaskResumeAll();
#endif
}
}
else
@ -189,12 +195,12 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
/* Timed out. If there is no data in the queue exit, otherwise loop
* back and attempt to read the data. */
prvUnlockQueue( pxQueue );
#ifdef VERIFAST /*< void cast of unused return value */
/*@close exists(pxQueue);@*/
xTaskResumeAll();
#else
( void ) xTaskResumeAll();
#endif
#ifdef VERIFAST /*< void cast of unused return value */
/*@close exists(pxQueue);@*/
xTaskResumeAll();
#else
( void ) xTaskResumeAll();
#endif
if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
{

View file

@ -30,23 +30,26 @@
BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
void * const pvBuffer,
BaseType_t * const pxHigherPriorityTaskWoken )
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == true &*&
chars(pvBuffer, M, ?x) &*&
pxHigherPriorityTaskWoken == NULL ? true : integer(pxHigherPriorityTaskWoken, _);@*/
* chars(pvBuffer, M, ?x) &*&
* pxHigherPriorityTaskWoken == NULL ? true : integer(pxHigherPriorityTaskWoken, _);@*/
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
(result == pdPASS ? chars(pvBuffer, M, _) : chars(pvBuffer, M, x)) &*&
(pxHigherPriorityTaskWoken == NULL ? true : integer(pxHigherPriorityTaskWoken, _));@*/
* (result == pdPASS ? chars(pvBuffer, M, _) : chars(pvBuffer, M, x)) &*&
* (pxHigherPriorityTaskWoken == NULL ? true : integer(pxHigherPriorityTaskWoken, _));@*/
{
BaseType_t xReturn;
UBaseType_t uxSavedInterruptStatus;
#ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue;
#else
Queue_t * const pxQueue = xQueue;
configASSERT( pxQueue );
configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
#endif
#ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue;
#else
Queue_t * const pxQueue = xQueue;
configASSERT( pxQueue );
configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
#endif
/* RTOS ports that support interrupt nesting have the concept of a maximum
* system call (or maximum API call) interrupt priority. Interrupts that are