Prove buffer lemmas (#124)

* Prove buffer lemmas

* Update queue proofs to latest kernel source

All changes were syntactic due to uncrustify code-formatting

* Strengthen prvCopyDataToQueue proof

* Add extract script for diff comparison

Co-authored-by: Yuhui Zheng <10982575+yuhui-zheng@users.noreply.github.com>
This commit is contained in:
Nathan Chong 2020-07-21 12:51:20 -04:00 committed by GitHub
parent c720c18ada
commit 8e36bee30e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
26 changed files with 2021 additions and 1762 deletions

View file

@ -23,256 +23,263 @@
#include "proof/queue.h"
#include "proof/queuecontracts.h"
BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )
BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
const void * const pvItemToQueue,
TickType_t xTicksToWait,
const BaseType_t xCopyPosition )
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*&
[1/2]queuesuspend(xQueue) &*&
chars(pvItemToQueue, M, ?x) &*&
(xCopyPosition == queueSEND_TO_BACK || xCopyPosition == queueSEND_TO_FRONT || (xCopyPosition == queueOVERWRITE && N == 1));@*/
[1/2]queuesuspend(xQueue) &*&
chars(pvItemToQueue, M, ?x) &*&
(xCopyPosition == queueSEND_TO_BACK || xCopyPosition == queueSEND_TO_FRONT || (xCopyPosition == queueOVERWRITE && N == 1));@*/
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
[1/2]queuesuspend(xQueue) &*&
chars(pvItemToQueue, M, x);@*/
[1/2]queuesuspend(xQueue) &*&
chars(pvItemToQueue, M, x);@*/
{
BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
TimeOut_t xTimeOut;
BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
TimeOut_t xTimeOut;
#ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue;
Queue_t * pxQueue = xQueue;
#else
Queue_t * const pxQueue = xQueue;
configASSERT( pxQueue );
configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
{
configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
}
#endif
Queue_t * const pxQueue = xQueue;
configASSERT( pxQueue );
configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
{
configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
}
#endif
#endif
/*lint -save -e904 This function relaxes the coding standard somewhat to
allow return statements within the function itself. This is done in the
interest of execution time efficiency. */
for( ;; )
/*@invariant [1/2]queuehandle(xQueue, N, M, is_isr) &*&
[1/2]queuesuspend(xQueue) &*&
chars(pvItemToQueue, M, x) &*&
u_integer(&xTicksToWait, _) &*&
(xCopyPosition == queueSEND_TO_BACK || xCopyPosition == queueSEND_TO_FRONT || (xCopyPosition == queueOVERWRITE && N == 1)) &*&
xTIME_OUT(&xTimeOut);@*/
{
taskENTER_CRITICAL();
{
/*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
/* Is there room on the queue now? The running task must be the
highest priority task wanting to access the queue. If the head item
in the queue is to be overwritten then it does not matter if the
queue is full. */
if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
{
traceQUEUE_SEND( pxQueue );
/*lint -save -e904 This function relaxes the coding standard somewhat to
* allow return statements within the function itself. This is done in the
* interest of execution time efficiency. */
for( ; ; )
/*@invariant [1/2]queuehandle(xQueue, N, M, is_isr) &*&
[1/2]queuesuspend(xQueue) &*&
chars(pvItemToQueue, M, x) &*&
u_integer(&xTicksToWait, _) &*&
(xCopyPosition == queueSEND_TO_BACK || xCopyPosition == queueSEND_TO_FRONT || (xCopyPosition == queueOVERWRITE && N == 1)) &*&
xTIME_OUT(&xTimeOut);@*/
{
taskENTER_CRITICAL();
{
/*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
/* Is there room on the queue now? The running task must be the
* highest priority task wanting to access the queue. If the head item
* in the queue is to be overwritten then it does not matter if the
* queue is full. */
if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
{
traceQUEUE_SEND( pxQueue );
/* VeriFast: we do not verify this configuration option */
#if ( configUSE_QUEUE_SETS == 1 )
{
const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
/* VeriFast: we do not verify this configuration option */
#if ( configUSE_QUEUE_SETS == 1 )
{
const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
if( pxQueue->pxQueueSetContainer != NULL )
{
if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) )
{
/* Do not notify the queue set as an existing item
was overwritten in the queue so the number of items
in the queue has not changed. */
mtCOVERAGE_TEST_MARKER();
}
else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
{
/* The queue is a member of a queue set, and posting
to the queue set caused a higher priority task to
unblock. A context switch is required. */
queueYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
/* If there was a task waiting for data to arrive on the
queue then unblock it now. */
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{
/* The unblocked task has a priority higher than
our own so yield immediately. Yes it is ok to
do this from within the critical section - the
kernel takes care of that. */
queueYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else if( xYieldRequired != pdFALSE )
{
/* This path is a special case that will only get
executed if the task was holding multiple mutexes
and the mutexes were given back in an order that is
different to that in which they were taken. */
queueYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
}
#else /* configUSE_QUEUE_SETS */
{
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
if( pxQueue->pxQueueSetContainer != NULL )
{
if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) )
{
/* Do not notify the queue set as an existing item
* was overwritten in the queue so the number of items
* in the queue has not changed. */
mtCOVERAGE_TEST_MARKER();
}
else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
{
/* The queue is a member of a queue set, and posting
* to the queue set caused a higher priority task to
* unblock. A context switch is required. */
queueYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
/* If there was a task waiting for data to arrive on the
* queue then unblock it now. */
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{
/* The unblocked task has a priority higher than
* our own so yield immediately. Yes it is ok to
* do this from within the critical section - the
* kernel takes care of that. */
queueYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else if( xYieldRequired != pdFALSE )
{
/* This path is a special case that will only get
* executed if the task was holding multiple mutexes
* and the mutexes were given back in an order that is
* different to that in which they were taken. */
queueYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
}
#else /* configUSE_QUEUE_SETS */
{
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
/* If there was a task waiting for data to arrive on the
queue then unblock it now. */
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{
/* The unblocked task has a priority higher than
our own so yield immediately. Yes it is ok to do
this from within the critical section - the kernel
takes care of that. */
queueYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else if( xYieldRequired != pdFALSE )
{
/* This path is a special case that will only get
executed if the task was holding multiple mutexes and
the mutexes were given back in an order that is
different to that in which they were taken. */
queueYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* configUSE_QUEUE_SETS */
/* If there was a task waiting for data to arrive on the
* queue then unblock it now. */
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{
/* The unblocked task has a priority higher than
* our own so yield immediately. Yes it is ok to do
* this from within the critical section - the kernel
* takes care of that. */
queueYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else if( xYieldRequired != pdFALSE )
{
/* This path is a special case that will only get
* executed if the task was holding multiple mutexes and
* the mutexes were given back in an order that is
* different to that in which they were taken. */
queueYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* configUSE_QUEUE_SETS */
/*@
if (xCopyPosition == queueSEND_TO_BACK) {
close queue(pxQueue, Storage, N, M, (W+1)%N, R, (K+1), is_locked, append(abs, singleton(x)));
} else if (xCopyPosition == queueSEND_TO_FRONT) {
close queue(pxQueue, Storage, N, M, W, (R == 0 ? (N-1) : (R-1)), (K+1), is_locked, cons(x, abs));
} else if (xCopyPosition == queueOVERWRITE) {
close queue(pxQueue, Storage, N, M, W, R, 1, is_locked, singleton(x));
}
@*/
taskEXIT_CRITICAL();
return pdPASS;
}
else
{
if( xTicksToWait == ( TickType_t ) 0 )
{
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
/* The queue was full and no block time is specified (or
the block time has expired) so leave now. */
taskEXIT_CRITICAL();
/*@
if (xCopyPosition == queueSEND_TO_BACK)
{
close queue(pxQueue, Storage, N, M, (W+1)%N, R, (K+1), is_locked, append(abs, singleton(x)));
}
else if (xCopyPosition == queueSEND_TO_FRONT)
{
close queue(pxQueue, Storage, N, M, W, (R == 0 ? (N-1) : (R-1)), (K+1), is_locked, cons(x, abs));
}
else if (xCopyPosition == queueOVERWRITE)
{
close queue(pxQueue, Storage, N, M, W, R, 1, is_locked, singleton(x));
}
@*/
taskEXIT_CRITICAL();
return pdPASS;
}
else
{
if( xTicksToWait == ( TickType_t ) 0 )
{
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
/* The queue was full and no block time is specified (or
* the block time has expired) so leave now. */
taskEXIT_CRITICAL();
/* Return to the original privilege level before exiting
the function. */
traceQUEUE_SEND_FAILED( pxQueue );
return errQUEUE_FULL;
}
else if( xEntryTimeSet == pdFALSE )
{
/* The queue was full and a block time was specified so
configure the timeout structure. */
vTaskInternalSetTimeOutState( &xTimeOut );
xEntryTimeSet = pdTRUE;
}
else
{
/* Entry time was already set. */
mtCOVERAGE_TEST_MARKER();
}
}
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
}
taskEXIT_CRITICAL();
/* Return to the original privilege level before exiting
* the function. */
traceQUEUE_SEND_FAILED( pxQueue );
return errQUEUE_FULL;
}
else if( xEntryTimeSet == pdFALSE )
{
/* The queue was full and a block time was specified so
* configure the timeout structure. */
vTaskInternalSetTimeOutState( &xTimeOut );
xEntryTimeSet = pdTRUE;
}
else
{
/* Entry time was already set. */
mtCOVERAGE_TEST_MARKER();
}
}
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
}
taskEXIT_CRITICAL();
/* Interrupts and other tasks can send to and receive from the queue
now the critical section has been exited. */
/* Interrupts and other tasks can send to and receive from the queue
* now the critical section has been exited. */
/*@close exists(pxQueue);@*/
vTaskSuspendAll();
prvLockQueue( pxQueue );
/*@close exists(pxQueue);@*/
vTaskSuspendAll();
prvLockQueue( pxQueue );
/* Update the timeout state to see if it has expired yet. */
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
{
if( prvIsQueueFull( pxQueue ) != pdFALSE )
{
traceBLOCKING_ON_QUEUE_SEND( pxQueue );
/*@open queue_locked_invariant(xQueue)();@*/
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
/* Update the timeout state to see if it has expired yet. */
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
{
if( prvIsQueueFull( pxQueue ) != pdFALSE )
{
traceBLOCKING_ON_QUEUE_SEND( pxQueue );
/*@open queue_locked_invariant(xQueue)();@*/
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
/* Unlocking the queue means queue events can effect the
event list. It is possible that interrupts occurring now
remove this task from the event list again - but as the
scheduler is suspended the task will go onto the pending
ready last instead of the actual ready list. */
/*@close queue_locked_invariant(xQueue)();@*/
prvUnlockQueue( pxQueue );
/* Unlocking the queue means queue events can effect the
* event list. It is possible that interrupts occurring now
* remove this task from the event list again - but as the
* scheduler is suspended the task will go onto the pending
* ready last instead of the actual ready list. */
/*@close queue_locked_invariant(xQueue)();@*/
prvUnlockQueue( pxQueue );
/* Resuming the scheduler will move tasks from the pending
ready list into the ready list - so it is feasible that this
task is already in a ready list before it yields - in which
case the yield will not cause a context switch unless there
is also a higher priority task in the pending ready list. */
/*@close exists(pxQueue);@*/
if( xTaskResumeAll() == pdFALSE )
{
portYIELD_WITHIN_API();
}
}
else
{
/* Try again. */
prvUnlockQueue( pxQueue );
/* Resuming the scheduler will move tasks from the pending
* ready list into the ready list - so it is feasible that this
* task is already in a ready list before it yields - in which
* case the yield will not cause a context switch unless there
* is also a higher priority task in the pending ready list. */
/*@close exists(pxQueue);@*/
if( xTaskResumeAll() == pdFALSE )
{
portYIELD_WITHIN_API();
}
}
else
{
/* Try again. */
prvUnlockQueue( pxQueue );
#ifdef VERIFAST /*< void cast of unused return value */
/*@close exists(pxQueue);@*/
xTaskResumeAll();
/*@close exists(pxQueue);@*/
xTaskResumeAll();
#else
( void ) xTaskResumeAll();
( void ) xTaskResumeAll();
#endif
}
}
else
{
/* The timeout has expired. */
prvUnlockQueue( pxQueue );
}
}
else
{
/* The timeout has expired. */
prvUnlockQueue( pxQueue );
#ifdef VERIFAST /*< void cast of unused return value */
/*@close exists(pxQueue);@*/
xTaskResumeAll();
/*@close exists(pxQueue);@*/
xTaskResumeAll();
#else
( void ) xTaskResumeAll();
( void ) xTaskResumeAll();
#endif
traceQUEUE_SEND_FAILED( pxQueue );
return errQUEUE_FULL;
}
} /*lint -restore */
traceQUEUE_SEND_FAILED( pxQueue );
return errQUEUE_FULL;
}
} /*lint -restore */
}