Prove buffer lemmas ()

* Prove buffer lemmas

* Update queue proofs to latest kernel source

All changes were syntactic due to uncrustify code-formatting

* Strengthen prvCopyDataToQueue proof

* Add extract script for diff comparison

Co-authored-by: Yuhui Zheng <10982575+yuhui-zheng@users.noreply.github.com>
This commit is contained in:
Nathan Chong
2020-07-21 12:51:20 -04:00
committed by GitHub
parent c720c18ada
commit 8e36bee30e
26 changed files with 2021 additions and 1762 deletions

@ -18,35 +18,35 @@ all: queue
.PHONY: queue .PHONY: queue
queue: queue:
@$(VERIFAST) $(VERIFAST_ARGS) queue/create.c | $(call check_coverage,269) @$(VERIFAST) $(VERIFAST_ARGS) queue/create.c | $(call check_coverage,317)
@$(VERIFAST) $(VERIFAST_ARGS) queue/prvCopyDataFromQueue.c | $(call check_coverage,253) @$(VERIFAST) $(VERIFAST_ARGS) queue/prvCopyDataFromQueue.c | $(call check_coverage,301)
@$(VERIFAST) $(VERIFAST_ARGS) -disable_overflow_check queue/prvCopyDataToQueue.c | $(call check_coverage,280) @$(VERIFAST) $(VERIFAST_ARGS) -disable_overflow_check queue/prvCopyDataToQueue.c | $(call check_coverage,329)
@$(VERIFAST) $(VERIFAST_ARGS) queue/prvIsQueueEmpty.c | $(call check_coverage,234) @$(VERIFAST) $(VERIFAST_ARGS) queue/prvIsQueueEmpty.c | $(call check_coverage,282)
@$(VERIFAST) $(VERIFAST_ARGS) queue/prvIsQueueFull.c | $(call check_coverage,234) @$(VERIFAST) $(VERIFAST_ARGS) queue/prvIsQueueFull.c | $(call check_coverage,282)
@$(VERIFAST) $(VERIFAST_ARGS) queue/prvLockQueue.c | $(call check_coverage,235) @$(VERIFAST) $(VERIFAST_ARGS) queue/prvLockQueue.c | $(call check_coverage,283)
@$(VERIFAST) $(VERIFAST_ARGS) queue/prvUnlockQueue.c | $(call check_coverage,249) @$(VERIFAST) $(VERIFAST_ARGS) queue/prvUnlockQueue.c | $(call check_coverage,297)
@$(VERIFAST) $(VERIFAST_ARGS) queue/uxQueueMessagesWaiting.c | $(call check_coverage,237) @$(VERIFAST) $(VERIFAST_ARGS) queue/uxQueueMessagesWaiting.c | $(call check_coverage,285)
@$(VERIFAST) $(VERIFAST_ARGS) queue/uxQueueSpacesAvailable.c | $(call check_coverage,235) @$(VERIFAST) $(VERIFAST_ARGS) queue/uxQueueSpacesAvailable.c | $(call check_coverage,283)
@$(VERIFAST) $(VERIFAST_ARGS) queue/vQueueDelete.c | $(call check_coverage,232) @$(VERIFAST) $(VERIFAST_ARGS) queue/vQueueDelete.c | $(call check_coverage,280)
@$(VERIFAST) $(VERIFAST_ARGS) queue/xQueueGenericSend.c | $(call check_coverage,280) @$(VERIFAST) $(VERIFAST_ARGS) queue/xQueueGenericSend.c | $(call check_coverage,328)
@$(VERIFAST) $(VERIFAST_ARGS) -disable_overflow_check queue/xQueueGenericSendFromISR.c | $(call check_coverage,262) @$(VERIFAST) $(VERIFAST_ARGS) -disable_overflow_check queue/xQueueGenericSendFromISR.c | $(call check_coverage,310)
@$(VERIFAST) $(VERIFAST_ARGS) queue/xQueueIsQueueEmptyFromISR.c | $(call check_coverage,232) @$(VERIFAST) $(VERIFAST_ARGS) queue/xQueueIsQueueEmptyFromISR.c | $(call check_coverage,280)
@$(VERIFAST) $(VERIFAST_ARGS) queue/xQueueIsQueueFullFromISR.c | $(call check_coverage,232) @$(VERIFAST) $(VERIFAST_ARGS) queue/xQueueIsQueueFullFromISR.c | $(call check_coverage,280)
@$(VERIFAST) $(VERIFAST_ARGS) queue/xQueuePeek.c | $(call check_coverage,280) @$(VERIFAST) $(VERIFAST_ARGS) queue/xQueuePeek.c | $(call check_coverage,328)
@$(VERIFAST) $(VERIFAST_ARGS) queue/xQueuePeekFromISR.c | $(call check_coverage,245) @$(VERIFAST) $(VERIFAST_ARGS) queue/xQueuePeekFromISR.c | $(call check_coverage,293)
@$(VERIFAST) $(VERIFAST_ARGS) queue/xQueueReceive.c | $(call check_coverage,282) @$(VERIFAST) $(VERIFAST_ARGS) queue/xQueueReceive.c | $(call check_coverage,330)
@$(VERIFAST) $(VERIFAST_ARGS) -disable_overflow_check queue/xQueueReceiveFromISR.c | $(call check_coverage,259) @$(VERIFAST) $(VERIFAST_ARGS) -disable_overflow_check queue/xQueueReceiveFromISR.c | $(call check_coverage,307)
.PHONY: proof_changes .PHONY: proof_changes
proof_changes: proof_changes:
@git grep "if[n]*def VERIFAST" -- '*.c' | cut -f 3- -d ' ' | sort | uniq @git grep "if[n]*def VERIFAST" | cut -f 3- -d ' ' | sort | uniq
GIT?=git GIT?=git
NO_CHANGE_CHECKOUT_DIR=no-change-check-freertos-kernel NO_CHANGE_CHECKOUT_DIR=no-change-check-freertos-kernel
NO_CHANGE_EXPECTED_HASH=4a61f9ff7e2 NO_CHANGE_EXPECTED_HASH=587a83d6476
.PHONY: synced_with_source_check .PHONY: synced_with_source_check
synced_with_source_check: synced_with_source_check:
@rm -rf $(NO_CHANGE_CHECKOUT_DIR) @rm -rf $(NO_CHANGE_CHECKOUT_DIR)
@$(GIT) clone --shallow-since="30-06-2020" https://github.com/FreeRTOS/FreeRTOS-Kernel.git $(NO_CHANGE_CHECKOUT_DIR) @$(GIT) clone https://github.com/FreeRTOS/FreeRTOS-Kernel.git $(NO_CHANGE_CHECKOUT_DIR)
@cd $(NO_CHANGE_CHECKOUT_DIR) && $(GIT) diff --quiet $(NO_CHANGE_EXPECTED_HASH) queue.c @cd $(NO_CHANGE_CHECKOUT_DIR) && $(GIT) diff --quiet $(NO_CHANGE_EXPECTED_HASH) queue.c
@cd $(NO_CHANGE_CHECKOUT_DIR) && $(GIT) diff --quiet $(NO_CHANGE_EXPECTED_HASH) include/queue.h @cd $(NO_CHANGE_CHECKOUT_DIR) && $(GIT) diff --quiet $(NO_CHANGE_EXPECTED_HASH) include/queue.h

@ -49,7 +49,7 @@ and uncheck `Check arithmetic overflow`).
- `queue/xQueueReceiveFromISR.c` - `queue/xQueueReceiveFromISR.c`
A successful proof results in the top banner turning green with a statement A successful proof results in the top banner turning green with a statement
similar to: `0 errors found (286 statements verified)`. similar to: `0 errors found (328 statements verified)`.
## Proof checking a single proof at the command-line ## Proof checking a single proof at the command-line
@ -65,7 +65,7 @@ A successful proof results in output similar to:
``` ```
queue/xQueueGenericSend.c queue/xQueueGenericSend.c
0 errors found (286 statements verified) 0 errors found (328 statements verified)
``` ```
## Running proof regression ## Running proof regression
@ -87,7 +87,7 @@ $ VERIFAST=/path/to/verifast NO_COVERAGE=1 make
## Annotation burden ## Annotation burden
VeriFast can emit statistics about the number of source code lines and VeriFast can emit statistics about the number of source code lines and
annotations. annotations. These range from .3-2x annotations per line of source code.
``` ```
$ VERIFAST=/path/to/verifast ./scripts/annotation_overhead.sh $ VERIFAST=/path/to/verifast ./scripts/annotation_overhead.sh

@ -328,6 +328,22 @@ lemma void remove_remove_nth<t>(list<t> xs, t x)
} }
} }
/* Following lemma from `verifast/bin/rt/_list.java`. Renamed to
avoid clash with listex.c's nth_drop lemma. */
lemma void nth_drop2<t>(list<t> vs, int i)
requires 0 <= i && i < length(vs);
ensures nth(i, vs) == head(drop(i, vs));
{
switch (vs) {
case nil:
case cons(v, vs0):
if (i == 0) {
} else {
nth_drop2(vs0, i - 1);
}
}
}
lemma void enq_lemma<t>(int k, int i, list<t> xs, list<t> ys, t z) lemma void enq_lemma<t>(int k, int i, list<t> xs, list<t> ys, t z)
requires 0 <= k && 0 <= i && 0 < length(xs) && k < length(xs) && i < length(xs) && take(k, rotate_left(i, xs)) == ys; requires 0 <= k && 0 <= i && 0 < length(xs) && k < length(xs) && i < length(xs) && take(k, rotate_left(i, xs)) == ys;
ensures take(k+1, rotate_left(i, update((i+k)%length(xs), z, xs))) == append(ys, cons(z, nil)); ensures take(k+1, rotate_left(i, update((i+k)%length(xs), z, xs))) == append(ys, cons(z, nil));

@ -64,79 +64,94 @@ typedef ssize_t BaseType_t;
#define pvPortMalloc malloc #define pvPortMalloc malloc
#define vPortFree free #define vPortFree free
#define queueSEND_TO_BACK 0 #define queueSEND_TO_BACK ( ( BaseType_t ) 0 )
#define queueSEND_TO_FRONT 1 #define queueSEND_TO_FRONT ( ( BaseType_t ) 1 )
#define queueOVERWRITE 2 #define queueOVERWRITE ( ( BaseType_t ) 2 )
#define pdTRUE 1 #define pdTRUE 1
#define pdFALSE 0 #define pdFALSE 0
#define pdPASS pdTRUE #define pdPASS pdTRUE
#define pdFAIL pdFALSE #define pdFAIL pdFALSE
#define errQUEUE_FULL 0 #define errQUEUE_FULL 0
#define errQUEUE_EMPTY 0 #define errQUEUE_EMPTY 0
#define queueUNLOCKED ( ( int8_t ) -1 ) /* Constants used with the cRxLock and cTxLock structure members. */
#define queueLOCKED_UNMODIFIED ( ( int8_t ) 0 ) #define queueUNLOCKED ( ( int8_t ) -1 )
#define queueINT8_MAX ( ( int8_t ) 127 ) #define queueLOCKED_UNMODIFIED ( ( int8_t ) 0 )
#define queueINT8_MAX ( ( int8_t ) 127 )
typedef struct QueuePointers typedef struct QueuePointers
{ {
int8_t *pcTail; /*< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */ int8_t * pcTail; /*< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
int8_t *pcReadFrom; /*< Points to the last place that a queued item was read from when the structure is used as a queue. */ int8_t * pcReadFrom; /*< Points to the last place that a queued item was read from when the structure is used as a queue. */
} QueuePointers_t; } QueuePointers_t;
typedef struct SemaphoreData typedef struct SemaphoreData
{ {
#ifdef VERIFAST /*< do not model xMutexHolder */ #ifdef VERIFAST /*< do not model xMutexHolder */
void *xMutexHolder; void *xMutexHolder;
#else #else
TaskHandle_t xMutexHolder; /*< The handle of the task that holds the mutex. */ TaskHandle_t xMutexHolder; /*< The handle of the task that holds the mutex. */
#endif #endif
UBaseType_t uxRecursiveCallCount;/*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */ UBaseType_t uxRecursiveCallCount; /*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
} SemaphoreData_t; } SemaphoreData_t;
/* VeriFast does not support unions so we replace with a struct */ /* VeriFast does not support unions so we replace with a struct */
struct fake_union_t { struct fake_union_t {
QueuePointers_t xQueue; QueuePointers_t xQueue;
SemaphoreData_t xSemaphore; SemaphoreData_t xSemaphore;
}; };
typedef struct xLIST { typedef struct xLIST {
UBaseType_t uxNumberOfItems; UBaseType_t uxNumberOfItems;
#ifndef VERIFAST /*< do not model pxIndex and xListEnd of xLIST struct */ #ifndef VERIFAST /*< do not model pxIndex and xListEnd of xLIST struct */
struct xLIST_ITEM *pxIndex; struct xLIST_ITEM *pxIndex;
MiniListItem_t xListEnd; MiniListItem_t xListEnd;
#endif #endif
} List_t; } List_t;
typedef struct QueueDefinition /* The old naming convention is used to prevent breaking kernel aware debuggers. */ { typedef struct QueueDefinition /* The old naming convention is used to prevent breaking kernel aware debuggers. */
int8_t *pcHead; /*< Points to the beginning of the queue storage area. */ {
int8_t *pcWriteTo; /*< Points to the free next place in the storage area. */ int8_t * pcHead; /*< Points to the beginning of the queue storage area. */
int8_t * pcWriteTo; /*< Points to the free next place in the storage area. */
#ifdef VERIFAST /*< VeriFast does not model unions */ #ifdef VERIFAST /*< VeriFast does not model unions */
struct fake_union_t u; struct fake_union_t u;
#else #else
union union
{ {
QueuePointers_t xQueue; /*< Data required exclusively when this structure is used as a queue. */ QueuePointers_t xQueue; /*< Data required exclusively when this structure is used as a queue. */
SemaphoreData_t xSemaphore; /*< Data required exclusively when this structure is used as a semaphore. */ SemaphoreData_t xSemaphore; /*< Data required exclusively when this structure is used as a semaphore. */
} u; } u;
#endif #endif
List_t xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */ List_t xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */
List_t xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */ List_t xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */
volatile UBaseType_t uxMessagesWaiting;/*< The number of items currently in the queue. */ volatile UBaseType_t uxMessagesWaiting; /*< The number of items currently in the queue. */
UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */ UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */
UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */ UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */
volatile int8_t cRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ volatile int8_t cRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
volatile int8_t cTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ volatile int8_t cTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
/*@struct mutex *irqMask;@*/ /*< Ghost mutex simulates the effect of irq masking */ #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
/*@struct mutex *schedulerSuspend;@*/ /*< Ghost mutex simulates the effect of scheduler suspension */ uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */
/*@struct mutex *locked;@*/ /*< Ghost mutex simulates the effect of queue locking */ #endif
#if ( configUSE_QUEUE_SETS == 1 )
struct QueueDefinition * pxQueueSetContainer;
#endif
#if ( configUSE_TRACE_FACILITY == 1 )
UBaseType_t uxQueueNumber;
uint8_t ucQueueType;
#endif
/*@struct mutex *irqMask;@*/ /*< Ghost mutex simulates the effect of irq masking */
/*@struct mutex *schedulerSuspend;@*/ /*< Ghost mutex simulates the effect of scheduler suspension */
/*@struct mutex *locked;@*/ /*< Ghost mutex simulates the effect of queue locking */
} xQUEUE; } xQUEUE;
typedef xQUEUE Queue_t; typedef xQUEUE Queue_t;
@ -144,80 +159,180 @@ typedef xQUEUE Queue_t;
typedef struct QueueDefinition * QueueHandle_t; typedef struct QueueDefinition * QueueHandle_t;
/*@ /*@
#define QUEUE_SHAPE(q, Storage, N, M, K) \ #define QUEUE_SHAPE(q, Storage, N, M, K) \
malloc_block_QueueDefinition(q) &*& \ malloc_block_QueueDefinition(q) &*& \
q->pcHead |-> Storage &*& \ q->pcHead |-> Storage &*& \
q->pcWriteTo |-> ?WPtr &*& \ q->pcWriteTo |-> ?WPtr &*& \
q->u.xQueue.pcTail |-> ?End &*& \ q->u.xQueue.pcTail |-> ?End &*& \
q->u.xQueue.pcReadFrom |-> ?RPtr &*& \ q->u.xQueue.pcReadFrom |-> ?RPtr &*& \
q->uxItemSize |-> M &*& \ q->uxItemSize |-> M &*& \
q->uxLength |-> N &*& \ q->uxLength |-> N &*& \
q->uxMessagesWaiting |-> K &*& \ q->uxMessagesWaiting |-> K &*& \
q->cRxLock |-> ?rxLock &*& \ q->cRxLock |-> ?rxLock &*& \
q->cTxLock |-> ?txLock &*& \ q->cTxLock |-> ?txLock &*& \
struct_QueuePointers_padding(&q->u.xQueue) &*& \ struct_QueuePointers_padding(&q->u.xQueue) &*& \
struct_SemaphoreData_padding(&q->u.xSemaphore) &*& \ struct_SemaphoreData_padding(&q->u.xSemaphore) &*& \
struct_fake_union_t_padding(&q->u) &*& \ struct_fake_union_t_padding(&q->u) &*& \
struct_xLIST_padding(&q->xTasksWaitingToSend) &*& \ struct_xLIST_padding(&q->xTasksWaitingToSend) &*& \
struct_xLIST_padding(&q->xTasksWaitingToReceive) &*& \ struct_xLIST_padding(&q->xTasksWaitingToReceive) &*& \
q->u.xSemaphore.xMutexHolder |-> _ &*& \ q->u.xSemaphore.xMutexHolder |-> _ &*& \
q->u.xSemaphore.uxRecursiveCallCount |-> _ &*& \ q->u.xSemaphore.uxRecursiveCallCount |-> _ &*& \
true true
predicate queue(QueueHandle_t q, int8_t *Storage, size_t N, size_t M, size_t W, size_t R, size_t K, bool is_locked; list<list<char> >abs) = predicate queue(QueueHandle_t q, int8_t *Storage, size_t N, size_t M, size_t W, size_t R, size_t K, bool is_locked; list<list<char> >abs) =
QUEUE_SHAPE(q, Storage, N, M, K) &*& QUEUE_SHAPE(q, Storage, N, M, K) &*&
0 < N &*& 0 < N &*&
0 < M &*& 0 < M &*&
0 <= W &*& W < N &*& 0 <= W &*& W < N &*&
0 <= R &*& R < N &*& 0 <= R &*& R < N &*&
0 <= K &*& K <= N &*& 0 <= K &*& K <= N &*&
W == (R + 1 + K) % N &*& W == (R + 1 + K) % N &*&
(-1) <= rxLock &*& (-1) <= rxLock &*&
(-1) <= txLock &*& (-1) <= txLock &*&
(is_locked ? 0 <= rxLock : (-1) == rxLock) &*& (is_locked ? 0 <= rxLock : (-1) == rxLock) &*&
(is_locked ? 0 <= txLock : (-1) == txLock) &*& (is_locked ? 0 <= txLock : (-1) == txLock) &*&
WPtr == Storage + (W*M) &*& WPtr == Storage + (W*M) &*&
RPtr == Storage + (R*M) &*& RPtr == Storage + (R*M) &*&
End == Storage + (N*M) &*& End == Storage + (N*M) &*&
buffer(Storage, N, M, ?contents) &*& buffer(Storage, N, M, ?contents) &*&
length(contents) == N &*& length(contents) == N &*&
abs == take(K, rotate_left((R+1)%N, contents)) &*& abs == take(K, rotate_left((R+1)%N, contents)) &*&
malloc_block(Storage, N*M) &*& malloc_block(Storage, N*M) &*&
true true
; ;
@*/
/* A buffer allows us to interpret a flat character array of `N*M` bytes as a
list of `N` elements where each element is `M` bytes */
/*@
predicate buffer(char *buffer, size_t N, size_t M; list<list<char> > elements) = predicate buffer(char *buffer, size_t N, size_t M; list<list<char> > elements) =
N == 0 N == 0
? elements == nil ? elements == nil
: chars(buffer, M, ?x) &*& buffer(buffer + M, N - 1, M, ?xs) &*& elements == cons(x, xs); : chars(buffer, M, ?x) &*& buffer(buffer + M, N - 1, M, ?xs) &*& elements == cons(x, xs);
// TODO: buffer_from_chars proof lemma void buffer_length(char *buffer, size_t N, size_t M)
lemma void buffer_from_chars(char *buffer, size_t N, size_t M); requires buffer(buffer, N, M, ?elements);
requires chars(buffer, N*M, _); ensures buffer(buffer, N, M, elements) &*& length(elements) == N;
{
if (N == 0) {
open buffer(buffer, N, M, elements);
close buffer(buffer, N, M, elements);
} else {
open buffer(buffer, N, M, elements);
buffer_length(buffer+M, N-1, M);
}
}
@*/
/*
There is no need in the queue proofs to preserve a relationship between `cs`
and `elements` (i.e., `flatten(elements) == cs`) because we only move in one
direction from `cs` to `elements` during queue creation when the contents is
fresh from `malloc` (i.e., uninitialized). If we needed to do a roundtrip from
elements back to cs then this would require a stronger lemma.
*/
/*@
lemma void buffer_from_chars(char *buffer, size_t N, size_t M)
requires chars(buffer, N*M, ?cs) &*& 0 <= N &*& 0 < M;
ensures exists<list<list<char> > >(?elements) &*& buffer(buffer, N, M, elements) &*& length(elements) == N; ensures exists<list<list<char> > >(?elements) &*& buffer(buffer, N, M, elements) &*& length(elements) == N;
{
if (N == 0) {
close exists(nil);
} else {
int i = 0;
while (i < N)
invariant 0 <= i &*& i <= N &*&
chars(buffer, (N-i)*M, ?xs) &*& xs == take((N-i)*M, cs) &*&
buffer(buffer + (N-i)*M, i, M, ?ys);
decreases N-i;
{
mul_mono_l(0, N-i-1, M);
chars_split(buffer, (N-i-1)*M);
mul_mono_l(i, N, M);
mul_mono_l(N-i, N, M);
take_take((N-i-1)*M, (N-i)*M, cs);
i++;
}
close exists(ys);
buffer_length(buffer, N, M);
}
}
// TODO: split_element proof lemma void append_buffer(char *buffer, size_t N1, size_t N2, size_t M)
lemma void split_element<t>(char *buffer, size_t N, size_t M, size_t i);
requires buffer(buffer, N, M, ?elements) &*& i < N;
ensures
buffer(buffer, i, M, take(i, elements)) &*&
chars(buffer + i * M, M, nth(i, elements)) &*&
buffer(buffer + (i + 1) * M, (N-1-i), M, drop(i+1, elements));
// TODO: join_element proof
lemma void join_element(char *buffer, size_t N, size_t M, size_t i);
requires requires
buffer(buffer, i, M, ?prefix) &*& buffer(buffer, N1, M, ?elements1) &*&
chars(buffer + i * M, M, ?element) &*& buffer(buffer + N1 * M, N2, M, ?elements2) &*&
buffer(buffer + (i + 1) * M, (N-1-i), M, ?suffix); 0 <= N1 &*& 0 <= N2;
ensures buffer(buffer, N1+N2, M, append(elements1, elements2));
{
if (N1 == 0) {
open buffer(buffer, 0, M, _);
} else if (N2 == 0) {
open buffer(buffer + N1 * M, 0, M, _);
} else {
open buffer(buffer, N1, M, elements1);
append_buffer(buffer + M, N1-1, N2, M);
close buffer(buffer, N1+N2, M, cons(?x, append(xs, elements2)));
}
}
lemma void split_element<t>(char *buffer, size_t N, size_t M, size_t i)
requires buffer(buffer, N, M, ?elements) &*& 0 <= i &*& i < N;
ensures
buffer(buffer, i, M, take(i, elements)) &*&
chars(buffer + i * M, M, nth(i, elements)) &*&
buffer(buffer + (i + 1) * M, (N-1-i), M, drop(i+1, elements));
{
if (i == 0) {
// straightforward
} else {
buffer_length(buffer, N, M);
int j = 0;
while (j < i)
invariant 0 <= j &*& j <= i &*&
buffer(buffer, j, M, take(j, elements)) &*&
buffer(buffer + j * M, N-j, M, drop(j, elements));
decreases i-j;
{
drop_drop(1, j, elements);
nth_drop2(elements, j);
open buffer(buffer + j * M, N-j, M, drop(j, elements));
assert chars(buffer + j * M, M, ?x) &*& x == nth(j, elements);
close buffer(buffer + j * M, 1, M, singleton(x));
append_buffer(buffer, j, 1, M);
take_plus_one(j, elements);
j++;
}
drop_drop(1, j, elements);
nth_drop2(elements, i);
open buffer(buffer + (i+1) * M, (N-1-i), M, _);
}
}
lemma void join_element(char *buffer, size_t N, size_t M, size_t i)
requires
0 <= i &*& i < N &*&
buffer(buffer, i, M, ?prefix) &*&
chars(buffer + i * M, M, ?element) &*&
buffer(buffer + (i + 1) * M, (N-1-i), M, ?suffix);
ensures buffer(buffer, N, M, append(prefix, cons(element, suffix))); ensures buffer(buffer, N, M, append(prefix, cons(element, suffix)));
{
if (i == 0) {
open buffer(buffer, i, M, prefix);
assert prefix == nil;
close buffer(buffer, N, M, cons(element, suffix));
} else {
close buffer(buffer + i * M, N-i, M, cons(element, suffix));
append_buffer(buffer, i, N-i, M);
}
}
predicate list(List_t *l;) = predicate list(List_t *l;) =
l->uxNumberOfItems |-> _; l->uxNumberOfItems |-> _;
predicate queuelists(QueueHandle_t q;) = predicate queuelists(QueueHandle_t q;) =
list(&q->xTasksWaitingToSend) &*& list(&q->xTasksWaitingToSend) &*&
list(&q->xTasksWaitingToReceive); list(&q->xTasksWaitingToReceive);
@*/ @*/
/* Because prvCopyDataFromQueue does *not* decrement uxMessagesWaiting (K) the /* Because prvCopyDataFromQueue does *not* decrement uxMessagesWaiting (K) the
@ -225,44 +340,44 @@ queue predicate above does not hold as a postcondition. If the caller
subsequently decrements K then the queue predicate can be reinstated. */ subsequently decrements K then the queue predicate can be reinstated. */
/*@ /*@
predicate queue_after_prvCopyDataFromQueue(QueueHandle_t q, int8_t *Storage, size_t N, size_t M, size_t W, size_t R, size_t K, bool is_locked; list<list<char> >abs) = predicate queue_after_prvCopyDataFromQueue(QueueHandle_t q, int8_t *Storage, size_t N, size_t M, size_t W, size_t R, size_t K, bool is_locked; list<list<char> >abs) =
QUEUE_SHAPE(q, Storage, N, M, K) &*& QUEUE_SHAPE(q, Storage, N, M, K) &*&
0 < N &*& 0 < N &*&
0 < M &*& 0 < M &*&
0 <= W &*& W < N &*& 0 <= W &*& W < N &*&
0 <= R &*& R < N &*& 0 <= R &*& R < N &*&
0 <= K &*& K <= N &*& 0 <= K &*& K <= N &*&
W == (R + K) % N &*& //< Differs from queue predicate W == (R + K) % N &*& //< Differs from queue predicate
(-1) <= rxLock &*& (-1) <= rxLock &*&
(-1) <= txLock &*& (-1) <= txLock &*&
(is_locked ? 0 <= rxLock : (-1) == rxLock) &*& (is_locked ? 0 <= rxLock : (-1) == rxLock) &*&
(is_locked ? 0 <= txLock : (-1) == txLock) &*& (is_locked ? 0 <= txLock : (-1) == txLock) &*&
WPtr == Storage + (W*M) &*& WPtr == Storage + (W*M) &*&
RPtr == Storage + (R*M) &*& RPtr == Storage + (R*M) &*&
End == Storage + (N*M) &*& End == Storage + (N*M) &*&
buffer(Storage, N, M, ?contents) &*& buffer(Storage, N, M, ?contents) &*&
length(contents) == N &*& length(contents) == N &*&
abs == take(K, rotate_left(R, contents)) &*& //< Differs from queue predicate abs == take(K, rotate_left(R, contents)) &*& //< Differs from queue predicate
malloc_block(Storage, N*M) &*& malloc_block(Storage, N*M) &*&
true true
; ;
@*/ @*/
/* Can't be called `mutex` as this clashes with VeriFast's predicate */ /* Can't be called `mutex` as this clashes with VeriFast's predicate */
/*@ /*@
predicate freertos_mutex(QueueHandle_t q, int8_t *Storage, size_t N, size_t K;) = predicate freertos_mutex(QueueHandle_t q, int8_t *Storage, size_t N, size_t K;) =
QUEUE_SHAPE(q, Storage, N, 0, K) &*& QUEUE_SHAPE(q, Storage, N, 0, K) &*&
queuelists(q) &*& queuelists(q) &*&
0 < N &*& 0 < N &*&
0 <= K &*& K <= N &*& 0 <= K &*& K <= N &*&
(-1) <= rxLock &*& (-1) <= rxLock &*&
(-1) <= txLock &*& (-1) <= txLock &*&
WPtr == Storage &*& WPtr == Storage &*&
RPtr == Storage &*& RPtr == Storage &*&
End == Storage &*& End == Storage &*&
malloc_block(Storage, 0) &*& malloc_block(Storage, 0) &*&
chars(Storage, 0, _) &*& chars(Storage, 0, _) &*&
true true
; ;
@*/ @*/
/* A queuehandle can be shared between tasks and ISRs. Acquiring the ghost /* A queuehandle can be shared between tasks and ISRs. Acquiring the ghost
@ -272,32 +387,32 @@ after masking interrupts depends on the caller:
- An ISR has access to the queue and, if the queue is unlocked, the queuelists */ - An ISR has access to the queue and, if the queue is unlocked, the queuelists */
/*@ /*@
predicate queuehandle(QueueHandle_t q, size_t N, size_t M, bool is_isr;) = predicate queuehandle(QueueHandle_t q, size_t N, size_t M, bool is_isr;) =
q->irqMask |-> ?m &*& mutex(m, irqs_masked_invariant(q, N, M, is_isr)); q->irqMask |-> ?m &*& mutex(m, irqs_masked_invariant(q, N, M, is_isr));
predicate_ctor irqs_masked_invariant(QueueHandle_t queue, size_t N, size_t M, bool is_isr)() = predicate_ctor irqs_masked_invariant(QueueHandle_t queue, size_t N, size_t M, bool is_isr)() =
queue(queue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs) &*& queue(queue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
(is_isr && is_locked ? true : queuelists(queue)); (is_isr && is_locked ? true : queuelists(queue));
@*/ @*/
/* A queuesuspend can be shared between tasks. Acquiring the ghost `schedulerSuspend` gives access to the `locked` mutex. */ /* A queuesuspend can be shared between tasks. Acquiring the ghost `schedulerSuspend` gives access to the `locked` mutex. */
/*@ /*@
predicate_ctor scheduler_suspended_invariant(QueueHandle_t queue)() = predicate_ctor scheduler_suspended_invariant(QueueHandle_t queue)() =
queue->locked |-> ?m &*& queue->locked |-> ?m &*&
mutex(m, queue_locked_invariant(queue)); mutex(m, queue_locked_invariant(queue));
predicate queuesuspend(QueueHandle_t q;) = predicate queuesuspend(QueueHandle_t q;) =
q->schedulerSuspend |-> ?m &*& q->schedulerSuspend |-> ?m &*&
mutex(m, scheduler_suspended_invariant(q)); mutex(m, scheduler_suspended_invariant(q));
@*/ @*/
/* A queuelock is exclusively acquired by a task. Acquiring the ghost `queuelock` gives access to the queue list resources. */ /* A queuelock is exclusively acquired by a task. Acquiring the ghost `queuelock` gives access to the queue list resources. */
/*@ /*@
predicate queuelock(QueueHandle_t q;) = predicate queuelock(QueueHandle_t q;) =
q->locked |-> ?m &*& q->locked |-> ?m &*&
mutex(m, queue_locked_invariant(q)); mutex(m, queue_locked_invariant(q));
predicate_ctor queue_locked_invariant(QueueHandle_t queue)() = predicate_ctor queue_locked_invariant(QueueHandle_t queue)() =
queuelists(queue); queuelists(queue);
@*/ @*/
BaseType_t vListInitialise(List_t *list); BaseType_t vListInitialise(List_t *list);
@ -310,15 +425,15 @@ BaseType_t listLIST_IS_EMPTY(List_t *list);
typedef struct xTIME_OUT typedef struct xTIME_OUT
{ {
BaseType_t xOverflowCount; BaseType_t xOverflowCount;
TickType_t xTimeOnEntering; TickType_t xTimeOnEntering;
} TimeOut_t; } TimeOut_t;
/*@ /*@
predicate xTIME_OUT(struct xTIME_OUT *to;) = predicate xTIME_OUT(struct xTIME_OUT *to;) =
to->xOverflowCount |-> _ &*& to->xOverflowCount |-> _ &*&
to->xTimeOnEntering |-> _ &*& to->xTimeOnEntering |-> _ &*&
struct_xTIME_OUT_padding(to); struct_xTIME_OUT_padding(to);
@*/ @*/
void vTaskInternalSetTimeOutState( TimeOut_t * x); void vTaskInternalSetTimeOutState( TimeOut_t * x);
@ -343,60 +458,60 @@ void vTaskMissedYield();
void vTaskSuspendAll(); void vTaskSuspendAll();
/*@requires exists<QueueHandle_t>(?xQueue) &*& /*@requires exists<QueueHandle_t>(?xQueue) &*&
[1/2]xQueue->schedulerSuspend |-> ?m &*& [1/2]xQueue->schedulerSuspend |-> ?m &*&
[1/2]mutex(m, scheduler_suspended_invariant(xQueue));@*/ [1/2]mutex(m, scheduler_suspended_invariant(xQueue));@*/
/*@ensures [1/2]xQueue->schedulerSuspend |-> m &*& /*@ensures [1/2]xQueue->schedulerSuspend |-> m &*&
mutex_held(m, scheduler_suspended_invariant(xQueue), currentThread, 1/2) &*& mutex_held(m, scheduler_suspended_invariant(xQueue), currentThread, 1/2) &*&
xQueue->locked |-> ?m2 &*& xQueue->locked |-> ?m2 &*&
mutex(m2, queue_locked_invariant(xQueue));@*/ mutex(m2, queue_locked_invariant(xQueue));@*/
BaseType_t xTaskResumeAll( void ); BaseType_t xTaskResumeAll( void );
/*@requires exists<QueueHandle_t>(?xQueue) &*& /*@requires exists<QueueHandle_t>(?xQueue) &*&
[1/2]xQueue->schedulerSuspend |-> ?m &*& [1/2]xQueue->schedulerSuspend |-> ?m &*&
mutex_held(m, scheduler_suspended_invariant(xQueue), currentThread, 1/2) &*& mutex_held(m, scheduler_suspended_invariant(xQueue), currentThread, 1/2) &*&
xQueue->locked |-> ?m2 &*& xQueue->locked |-> ?m2 &*&
mutex(m2, queue_locked_invariant(xQueue));@*/ mutex(m2, queue_locked_invariant(xQueue));@*/
/*@ensures [1/2]xQueue->schedulerSuspend |-> m &*& /*@ensures [1/2]xQueue->schedulerSuspend |-> m &*&
[1/2]mutex(m, scheduler_suspended_invariant(xQueue));@*/ [1/2]mutex(m, scheduler_suspended_invariant(xQueue));@*/
void prvLockQueue( QueueHandle_t xQueue ); void prvLockQueue( QueueHandle_t xQueue );
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*& /*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*&
[1/2]queuelock(xQueue); @*/ [1/2]queuelock(xQueue); @*/
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*& /*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
[1/2]xQueue->locked |-> ?m &*& [1/2]xQueue->locked |-> ?m &*&
mutex_held(m, queue_locked_invariant(xQueue), currentThread, 1/2) &*& mutex_held(m, queue_locked_invariant(xQueue), currentThread, 1/2) &*&
queue_locked_invariant(xQueue)();@*/ queue_locked_invariant(xQueue)();@*/
void prvUnlockQueue( QueueHandle_t xQueue ); void prvUnlockQueue( QueueHandle_t xQueue );
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*& /*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*&
[1/2]xQueue->locked |-> ?m &*& [1/2]xQueue->locked |-> ?m &*&
mutex_held(m, queue_locked_invariant(xQueue), currentThread, 1/2) &*& mutex_held(m, queue_locked_invariant(xQueue), currentThread, 1/2) &*&
queue_locked_invariant(xQueue)();@*/ queue_locked_invariant(xQueue)();@*/
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*& /*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
[1/2]queuelock(xQueue);@*/ [1/2]queuelock(xQueue);@*/
void setInterruptMask(QueueHandle_t xQueue) void setInterruptMask(QueueHandle_t xQueue)
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false;@*/ /*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false;@*/
/*@ensures [1/2]xQueue->irqMask |-> ?m &*& /*@ensures [1/2]xQueue->irqMask |-> ?m &*&
mutex_held(m, irqs_masked_invariant(xQueue, N, M, is_isr), currentThread, 1/2) &*& mutex_held(m, irqs_masked_invariant(xQueue, N, M, is_isr), currentThread, 1/2) &*&
queue(xQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs) &*& queue(xQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
queuelists(xQueue);@*/ queuelists(xQueue);@*/
{ {
/*@open queuehandle(xQueue, N, M, is_isr);@*/ /*@open queuehandle(xQueue, N, M, is_isr);@*/
mutex_acquire(xQueue->irqMask); mutex_acquire(xQueue->irqMask);
/*@open irqs_masked_invariant(xQueue, N, M, is_isr)();@*/ /*@open irqs_masked_invariant(xQueue, N, M, is_isr)();@*/
} }
void clearInterruptMask(QueueHandle_t xQueue) void clearInterruptMask(QueueHandle_t xQueue)
/*@requires queue(xQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*& /*@requires queue(xQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
[1/2]xQueue->irqMask |-> ?m &*& [1/2]xQueue->irqMask |-> ?m &*&
mutex_held(m, irqs_masked_invariant(xQueue, N, M, false), currentThread, 1/2) &*& mutex_held(m, irqs_masked_invariant(xQueue, N, M, false), currentThread, 1/2) &*&
queuelists(xQueue);@*/ queuelists(xQueue);@*/
/*@ensures [1/2]queuehandle(xQueue, N, M, false);@*/ /*@ensures [1/2]queuehandle(xQueue, N, M, false);@*/
{ {
/*@close irqs_masked_invariant(xQueue, N, M, false)();@*/ /*@close irqs_masked_invariant(xQueue, N, M, false)();@*/
mutex_release(xQueue->irqMask); mutex_release(xQueue->irqMask);
/*@close [1/2]queuehandle(xQueue, N, M, false);@*/ /*@close [1/2]queuehandle(xQueue, N, M, false);@*/
} }
#define taskENTER_CRITICAL() setInterruptMask(xQueue) #define taskENTER_CRITICAL() setInterruptMask(xQueue)
@ -407,26 +522,26 @@ void clearInterruptMask(QueueHandle_t xQueue)
UBaseType_t setInterruptMaskFromISR(QueueHandle_t xQueue) UBaseType_t setInterruptMaskFromISR(QueueHandle_t xQueue)
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == true;@*/ /*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == true;@*/
/*@ensures [1/2]xQueue->irqMask |-> ?m &*& /*@ensures [1/2]xQueue->irqMask |-> ?m &*&
mutex_held(m, irqs_masked_invariant(xQueue, N, M, is_isr), currentThread, 1/2) &*& mutex_held(m, irqs_masked_invariant(xQueue, N, M, is_isr), currentThread, 1/2) &*&
queue(xQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs) &*& queue(xQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
(is_locked ? true : queuelists(xQueue));@*/ (is_locked ? true : queuelists(xQueue));@*/
{ {
/*@open queuehandle(xQueue, N, M, is_isr);@*/ /*@open queuehandle(xQueue, N, M, is_isr);@*/
mutex_acquire(xQueue->irqMask); mutex_acquire(xQueue->irqMask);
/*@open irqs_masked_invariant(xQueue, N, M, is_isr)();@*/ /*@open irqs_masked_invariant(xQueue, N, M, is_isr)();@*/
return 0; return 0;
} }
void clearInterruptMaskFromISR(QueueHandle_t xQueue, UBaseType_t uxSavedInterruptStatus) void clearInterruptMaskFromISR(QueueHandle_t xQueue, UBaseType_t uxSavedInterruptStatus)
/*@requires queue(xQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*& /*@requires queue(xQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
[1/2]xQueue->irqMask |-> ?m &*& [1/2]xQueue->irqMask |-> ?m &*&
mutex_held(m, irqs_masked_invariant(xQueue, N, M, true), currentThread, 1/2) &*& mutex_held(m, irqs_masked_invariant(xQueue, N, M, true), currentThread, 1/2) &*&
(is_locked ? true : queuelists(xQueue));@*/ (is_locked ? true : queuelists(xQueue));@*/
/*@ensures [1/2]queuehandle(xQueue, N, M, true);@*/ /*@ensures [1/2]queuehandle(xQueue, N, M, true);@*/
{ {
/*@close irqs_masked_invariant(xQueue, N, M, true)();@*/ /*@close irqs_masked_invariant(xQueue, N, M, true)();@*/
mutex_release(xQueue->irqMask); mutex_release(xQueue->irqMask);
/*@close [1/2]queuehandle(xQueue, N, M, true);@*/ /*@close [1/2]queuehandle(xQueue, N, M, true);@*/
} }
#define portSET_INTERRUPT_MASK_FROM_ISR() setInterruptMaskFromISR(xQueue) #define portSET_INTERRUPT_MASK_FROM_ISR() setInterruptMaskFromISR(xQueue)

@ -28,23 +28,23 @@
void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ); void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer );
/*@requires queue(pxQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*& 0 < K &*& chars(pvBuffer, M, _);@*/ /*@requires queue(pxQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*& 0 < K &*& chars(pvBuffer, M, _);@*/
/*@ensures queue_after_prvCopyDataFromQueue(pxQueue, Storage, N, M, W, (R+1)%N, K, is_locked, abs) &*& /*@ensures queue_after_prvCopyDataFromQueue(pxQueue, Storage, N, M, W, (R+1)%N, K, is_locked, abs) &*&
chars(pvBuffer, M, head(abs));@*/ chars(pvBuffer, M, head(abs));@*/
BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ); BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition );
/*@requires queue(pxQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*& /*@requires queue(pxQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
(K < N || xPosition == queueOVERWRITE) &*& (K < N || xPosition == queueOVERWRITE) &*&
chars(pvItemToQueue, M, ?x) &*& chars(pvItemToQueue, M, ?x) &*&
(xPosition == queueSEND_TO_BACK || xPosition == queueSEND_TO_FRONT || (xPosition == queueOVERWRITE && N == 1));@*/ (xPosition == queueSEND_TO_BACK || xPosition == queueSEND_TO_FRONT || (xPosition == queueOVERWRITE && N == 1));@*/
/*@ensures /*@ensures
(xPosition == queueSEND_TO_BACK (xPosition == queueSEND_TO_BACK
? queue(pxQueue, Storage, N, M, (W+1)%N, R, (K+1), is_locked, append(abs, singleton(x))) ? queue(pxQueue, Storage, N, M, (W+1)%N, R, (K+1), is_locked, append(abs, singleton(x)))
: (xPosition == queueSEND_TO_FRONT : (xPosition == queueSEND_TO_FRONT
? (R == 0 ? (R == 0
? queue(pxQueue, Storage, N, M, W, (N-1), (K+1), is_locked, cons(x, abs)) ? queue(pxQueue, Storage, N, M, W, (N-1), (K+1), is_locked, cons(x, abs))
: queue(pxQueue, Storage, N, M, W, (R-1), (K+1), is_locked, cons(x, abs))) : queue(pxQueue, Storage, N, M, W, (R-1), (K+1), is_locked, cons(x, abs)))
: xPosition == queueOVERWRITE &*& queue(pxQueue, Storage, N, M, W, R, 1, is_locked, singleton(x))) : xPosition == queueOVERWRITE &*& queue(pxQueue, Storage, N, M, W, R, 1, is_locked, singleton(x)))
) &*& ) &*&
chars(pvItemToQueue, M, x);@*/ chars(pvItemToQueue, M, x);@*/
BaseType_t prvIsQueueEmpty( Queue_t * pxQueue ); BaseType_t prvIsQueueEmpty( Queue_t * pxQueue );
/*@requires [1/2]queuehandle(pxQueue, ?N, ?M, ?is_isr) &*& is_isr == false;@*/ /*@requires [1/2]queuehandle(pxQueue, ?N, ?M, ?is_isr) &*& is_isr == false;@*/

@ -2,24 +2,27 @@
In the queue predicates and proofs we use the following variable names. In the queue predicates and proofs we use the following variable names.
- `N` : the queue length (i.e., the maximum number of items the queue can - `Storage` : The concrete queue storage of `N*M` bytes. The `buffer`
store) predicate, defined in `include/proof/queue.h` allows us to treat the
- `M` : the size in bytes of each element storage as a list `contents` of `N` items, each of which is `M` bytes.
- `W` : the logical index of the write pointer, necessarily between `0..(N-1)` - `N` : queue length (i.e., the maximum number of items the queue can store)
- `R` : the logical index of the read pointer, necessarily between `0..(N-1)` - `M` : size in bytes of each element
- `K` : the number of items currently in the queue - `W` : logical index of the write pointer, necessarily between
`0..(N-1)` such that the write pointer `pcWriteTo == Storage + W * M`.
Consequently, the size of the concrete queue storage is `N*M` bytes. The - `R` : logical index of the read pointer, necessarily between
`buffer` predicate, defined in `include/proof/queue.h` allows us to treat the `0..(N-1)` such that the read pointer `pcReadFrom == Storage + R * M`.
queue storage as a list `contents` of `N` items, each of which is `M` bytes. - `K` : number of items currently in the queue corresponding to
`uxMessagesWaiting`
The `queue` predicate, defined in `include/proof/queue.h`, relates the concrete The `queue` predicate, defined in `include/proof/queue.h`, relates the concrete
representation to an abstract list `abs` of `K` items. More precisely, the main queue storage to an abstract list `abs` of `K` items. More precisely, the key
queue invariant is: queue invariant is:
``` ```
abs == take(K, rotate_left((R+1)%N, contents)) abs == take(K, rotate_left((R+1)%N, contents)) &*&
W == (R + 1 + K) % N
``` ```
where `(R+1)%N` is the front of the queue, `rotate_left` allows for the where `(R+1)%N` is the front of the queue, `W` is the back of the queue,
wraparound of queue storage, and `take` gives the first `K` elements. `rotate_left` allows for the wraparound of queue storage, and `take` gives the
first `K` elements.

@ -23,236 +23,245 @@
#include "proof/queue.h" #include "proof/queue.h"
/* Simplifying assumption: we do not verify queue initialisation in a /* Simplifying assumption: we do not verify queue initialisation in a
concurrent environment. We assume the queue initialization (including reset) * concurrent environment. We assume the queue initialization (including reset)
happens-before all concurrent send/receives. */ * happens-before all concurrent send/receives. */
#ifdef VERIFAST /*< ***xQueueGenericReset happens-before concurrent behavior*** */ #ifdef VERIFAST /*< ***xQueueGenericReset happens-before concurrent behavior*** */
#define taskENTER_CRITICAL() #define taskENTER_CRITICAL()
#define taskEXIT_CRITICAL() #define taskEXIT_CRITICAL()
#endif #endif
/* The following intermediate queue predicates summarise states used by queue /* The following intermediate queue predicates summarise states used by queue
initialization but not used elsewhere so we confine them to these proofs * initialization but not used elsewhere so we confine them to these proofs
locally. */ * locally. */
/*@ /*@
predicate queue_init1(QueueHandle_t q;) = predicate queue_init1(QueueHandle_t q;) =
QUEUE_SHAPE(q, _, _, _, _) &*& QUEUE_SHAPE(q, _, _, _, _) &*&
queuelists(q) queuelists(q)
; ;
predicate queue_init2(QueueHandle_t q, int8_t *Storage, size_t N, size_t M;) = predicate queue_init2(QueueHandle_t q, int8_t *Storage, size_t N, size_t M;) =
QUEUE_SHAPE(q, Storage, N, M, _) &*& QUEUE_SHAPE(q, Storage, N, M, _) &*&
queuelists(q) &*& queuelists(q) &*&
0 < N &*& 0 < N &*&
chars(Storage, (N*M), _) &*& chars(Storage, (N*M), _) &*&
malloc_block(Storage, N*M) &*& malloc_block(Storage, N*M) &*&
Storage + N * M <= (int8_t *)UINTPTR_MAX &*& Storage + N * M <= (int8_t *)UINTPTR_MAX &*&
true true
; ;
@*/ @*/
BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue ) BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
BaseType_t xNewQueue )
/*@requires queue_init2(xQueue, ?Storage, ?N, ?M);@*/ /*@requires queue_init2(xQueue, ?Storage, ?N, ?M);@*/
/*@ensures 0 == M /*@ensures 0 == M
? freertos_mutex(xQueue, Storage, N, 0) ? freertos_mutex(xQueue, Storage, N, 0)
: queue(xQueue, Storage, N, M, 0, (N-1), 0, false, nil) &*& queuelists(xQueue);@*/ : queue(xQueue, Storage, N, M, 0, (N-1), 0, false, nil) &*& queuelists(xQueue);@*/
{ {
#ifdef VERIFAST /*< const pointer declaration */ #ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue; Queue_t * pxQueue = xQueue;
#else #else
Queue_t * const pxQueue = xQueue; Queue_t * const pxQueue = xQueue;
#endif #endif
configASSERT( pxQueue ); configASSERT( pxQueue );
taskENTER_CRITICAL(); taskENTER_CRITICAL();
{ {
pxQueue->u.xQueue.pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */ pxQueue->u.xQueue.pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U; pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
pxQueue->pcWriteTo = pxQueue->pcHead; pxQueue->pcWriteTo = pxQueue->pcHead;
/*@mul_mono_l(0, N-1, M);@*/ /*@mul_mono_l(0, N-1, M);@*/
pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - 1U ) * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */ pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - 1U ) * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
pxQueue->cRxLock = queueUNLOCKED; pxQueue->cRxLock = queueUNLOCKED;
pxQueue->cTxLock = queueUNLOCKED; pxQueue->cTxLock = queueUNLOCKED;
if( xNewQueue == pdFALSE ) if( xNewQueue == pdFALSE )
{ {
/* If there are tasks blocked waiting to read from the queue, then /* If there are tasks blocked waiting to read from the queue, then
the tasks will remain blocked as after this function exits the queue * the tasks will remain blocked as after this function exits the queue
will still be empty. If there are tasks blocked waiting to write to * will still be empty. If there are tasks blocked waiting to write to
the queue, then one should be unblocked as after this function exits * the queue, then one should be unblocked as after this function exits
it will be possible to write to it. */ * it will be possible to write to it. */
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
{ {
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
{ {
queueYIELD_IF_USING_PREEMPTION(); queueYIELD_IF_USING_PREEMPTION();
} }
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
else else
{ {
/* Ensure the event queues start in the correct state. */ /* Ensure the event queues start in the correct state. */
vListInitialise( &( pxQueue->xTasksWaitingToSend ) ); vListInitialise( &( pxQueue->xTasksWaitingToSend ) );
vListInitialise( &( pxQueue->xTasksWaitingToReceive ) ); vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
} }
/* Logically, we move from a flat character array of `N*M` bytes (using
the `chars` predicate) to an array of `N` elements where each element
is `M` bytes (using the `buffer` predicate) */
/*@if (M != 0) { buffer_from_chars(pxQueue->pcHead, N, M); }@*/
}
taskEXIT_CRITICAL();
/* A value is returned for calling semantic consistency with previous /*@if (M != 0) { buffer_from_chars(pxQueue->pcHead, N, M); }@*/
versions. */ }
return pdPASS; taskEXIT_CRITICAL();
/* A value is returned for calling semantic consistency with previous
* versions. */
return pdPASS;
} }
static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, const uint8_t ucQueueType, Queue_t *pxNewQueue ) static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
const UBaseType_t uxItemSize,
uint8_t * pucQueueStorage,
const uint8_t ucQueueType,
Queue_t * pxNewQueue )
/*@requires queue_init1(pxNewQueue) &*& /*@requires queue_init1(pxNewQueue) &*&
0 < uxQueueLength &*& 0 < uxItemSize &*& 0 < uxQueueLength &*& 0 < uxItemSize &*&
malloc_block(pucQueueStorage, uxQueueLength * uxItemSize) &*& malloc_block(pucQueueStorage, uxQueueLength * uxItemSize) &*&
pucQueueStorage + uxQueueLength * uxItemSize <= (uint8_t *)UINTPTR_MAX &*& pucQueueStorage + uxQueueLength * uxItemSize <= (uint8_t *)UINTPTR_MAX &*&
uchars(pucQueueStorage, uxQueueLength * uxItemSize,_);@*/ uchars(pucQueueStorage, uxQueueLength * uxItemSize,_);@*/
/*@ensures queue(pxNewQueue, ((int8_t *)(void *)pucQueueStorage), uxQueueLength, uxItemSize, 0, (uxQueueLength-1), 0, false, nil) &*& /*@ensures queue(pxNewQueue, ((int8_t *)(void *)pucQueueStorage), uxQueueLength, uxItemSize, 0, (uxQueueLength-1), 0, false, nil) &*&
queuelists(pxNewQueue);@*/ queuelists(pxNewQueue);@*/
{ {
#ifndef VERIFAST /*< void cast of unused var */ #ifndef VERIFAST /*< void cast of unused var */
/* Remove compiler warnings about unused parameters should /* Remove compiler warnings about unused parameters should
configUSE_TRACE_FACILITY not be set to 1. */ * configUSE_TRACE_FACILITY not be set to 1. */
( void ) ucQueueType; ( void ) ucQueueType;
#endif #endif
if( uxItemSize == ( UBaseType_t ) 0 ) if( uxItemSize == ( UBaseType_t ) 0 )
{ {
/* No RAM was allocated for the queue storage area, but PC head cannot /* No RAM was allocated for the queue storage area, but PC head cannot
be set to NULL because NULL is used as a key to say the queue is used as * be set to NULL because NULL is used as a key to say the queue is used as
a mutex. Therefore just set pcHead to point to the queue as a benign * a mutex. Therefore just set pcHead to point to the queue as a benign
value that is known to be within the memory map. */ * value that is known to be within the memory map. */
#ifdef VERIFAST /*< stricter casting */ #ifdef VERIFAST /*< stricter casting */
pxNewQueue->pcHead = ( int8_t * ) ( void * ) pxNewQueue; pxNewQueue->pcHead = ( int8_t * ) ( void * ) pxNewQueue;
#else #else
pxNewQueue->pcHead = ( int8_t * ) pxNewQueue; pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;
#endif #endif
} }
else else
{ {
/* Set the head to the start of the queue storage area. */ /* Set the head to the start of the queue storage area. */
#ifdef VERIFAST /*< stricter casting */ #ifdef VERIFAST /*< stricter casting */
pxNewQueue->pcHead = ( int8_t * ) ( void * ) pucQueueStorage; pxNewQueue->pcHead = ( int8_t * ) ( void * ) pucQueueStorage;
#else #else
pxNewQueue->pcHead = ( int8_t * ) pucQueueStorage; pxNewQueue->pcHead = ( int8_t * ) pucQueueStorage;
#endif #endif
} }
/* Initialise the queue members as described where the queue type is /* Initialise the queue members as described where the queue type is
defined. */ * defined. */
pxNewQueue->uxLength = uxQueueLength; pxNewQueue->uxLength = uxQueueLength;
pxNewQueue->uxItemSize = uxItemSize; pxNewQueue->uxItemSize = uxItemSize;
/*@close queue_init2(pxNewQueue, _, uxQueueLength, uxItemSize);@*/ /*@close queue_init2(pxNewQueue, _, uxQueueLength, uxItemSize);@*/
#ifdef VERIFAST /*< void cast of unused return value */ #ifdef VERIFAST /*< void cast of unused return value */
xQueueGenericReset( pxNewQueue, pdTRUE ); xQueueGenericReset( pxNewQueue, pdTRUE );
#else #else
( void ) xQueueGenericReset( pxNewQueue, pdTRUE ); ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
#endif #endif
#if ( configUSE_TRACE_FACILITY == 1 ) #if ( configUSE_TRACE_FACILITY == 1 )
{ {
pxNewQueue->ucQueueType = ucQueueType; pxNewQueue->ucQueueType = ucQueueType;
} }
#endif /* configUSE_TRACE_FACILITY */ #endif /* configUSE_TRACE_FACILITY */
#if( configUSE_QUEUE_SETS == 1 ) #if ( configUSE_QUEUE_SETS == 1 )
{ {
pxNewQueue->pxQueueSetContainer = NULL; pxNewQueue->pxQueueSetContainer = NULL;
} }
#endif /* configUSE_QUEUE_SETS */ #endif /* configUSE_QUEUE_SETS */
traceQUEUE_CREATE( pxNewQueue ); traceQUEUE_CREATE( pxNewQueue );
} }
QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType )
/*@requires 0 < uxQueueLength &*&
0 < uxItemSize &*&
0 < uxQueueLength * uxItemSize &*&
uxQueueLength * uxItemSize <= UINT_MAX;@*/
/*@ensures result == NULL
? true
: queue(result, _, uxQueueLength, uxItemSize, 0, (uxQueueLength-1), 0, false, nil) &*&
queuelists(result) &*&
result->irqMask |-> _ &*&
result->schedulerSuspend |-> _ &*&
result->locked |-> _;@*/
{
Queue_t *pxNewQueue;
size_t xQueueSizeInBytes;
uint8_t *pucQueueStorage;
configASSERT( uxQueueLength > ( UBaseType_t ) 0 ); QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength,
const UBaseType_t uxItemSize,
const uint8_t ucQueueType )
/*@requires 0 < uxQueueLength &*&
0 < uxItemSize &*&
0 < uxQueueLength * uxItemSize &*&
uxQueueLength * uxItemSize <= UINT_MAX;@*/
/*@ensures result == NULL
? true
: queue(result, _, uxQueueLength, uxItemSize, 0, (uxQueueLength-1), 0, false, nil) &*&
queuelists(result) &*&
result->irqMask |-> _ &*&
result->schedulerSuspend |-> _ &*&
result->locked |-> _;@*/
{
Queue_t * pxNewQueue;
size_t xQueueSizeInBytes;
uint8_t * pucQueueStorage;
/* Allocate enough space to hold the maximum number of items that configASSERT( uxQueueLength > ( UBaseType_t ) 0 );
can be in the queue at any time. It is valid for uxItemSize to be
zero in the case the queue is used as a semaphore. */
xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
/* Check for multiplication overflow. */ /* Allocate enough space to hold the maximum number of items that
configASSERT( ( uxItemSize == 0 ) || ( uxQueueLength == ( xQueueSizeInBytes / uxItemSize ) ) ); * can be in the queue at any time. It is valid for uxItemSize to be
* zero in the case the queue is used as a semaphore. */
xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
/* Check for multiplication overflow. */
configASSERT( ( uxItemSize == 0 ) || ( uxQueueLength == ( xQueueSizeInBytes / uxItemSize ) ) );
#ifdef VERIFAST /*< ***model single malloc of struct and buffer*** */ #ifdef VERIFAST /*< ***model single malloc of struct and buffer*** */
pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) ); pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) );
#else #else
/* Allocate the queue and storage area. Justification for MISRA /* Allocate the queue and storage area. Justification for MISRA
deviation as follows: pvPortMalloc() always ensures returned memory * deviation as follows: pvPortMalloc() always ensures returned memory
blocks are aligned per the requirements of the MCU stack. In this case * blocks are aligned per the requirements of the MCU stack. In this case
pvPortMalloc() must return a pointer that is guaranteed to meet the * pvPortMalloc() must return a pointer that is guaranteed to meet the
alignment requirements of the Queue_t structure - which in this case * alignment requirements of the Queue_t structure - which in this case
is an int8_t *. Therefore, whenever the stack alignment requirements * is an int8_t *. Therefore, whenever the stack alignment requirements
are greater than or equal to the pointer to char requirements the cast * are greater than or equal to the pointer to char requirements the cast
is safe. In other cases alignment requirements are not strict (one or * is safe. In other cases alignment requirements are not strict (one or
two bytes). */ * two bytes). */
pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes ); /*lint !e9087 !e9079 see comment above. */ pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes ); /*lint !e9087 !e9079 see comment above. */
#endif #endif
if( pxNewQueue != NULL ) if( pxNewQueue != NULL )
{ {
#ifdef VERIFAST /*< ***model single malloc of struct and buffer*** */ #ifdef VERIFAST /*< ***model single malloc of struct and buffer*** */
pucQueueStorage = ( uint8_t * ) pvPortMalloc( xQueueSizeInBytes ); pucQueueStorage = ( uint8_t * ) pvPortMalloc( xQueueSizeInBytes );
if ( pucQueueStorage == NULL ) {
vPortFree( pxNewQueue ); if( pucQueueStorage == NULL )
return NULL; {
} vPortFree( pxNewQueue );
/*@malloc_block_limits(pucQueueStorage);@*/ return NULL;
}
/*@malloc_block_limits(pucQueueStorage);@*/
#else #else
/* Jump past the queue structure to find the location of the queue /* Jump past the queue structure to find the location of the queue
storage area. */ * storage area. */
pucQueueStorage = ( uint8_t * ) pxNewQueue; pucQueueStorage = ( uint8_t * ) pxNewQueue;
pucQueueStorage += sizeof( Queue_t ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */ pucQueueStorage += sizeof( Queue_t ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
#endif #endif
#if( configSUPPORT_STATIC_ALLOCATION == 1 ) #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
{ {
/* Queues can be created either statically or dynamically, so /* Queues can be created either statically or dynamically, so
note this task was created dynamically in case it is later * note this task was created dynamically in case it is later
deleted. */ * deleted. */
pxNewQueue->ucStaticallyAllocated = pdFALSE; pxNewQueue->ucStaticallyAllocated = pdFALSE;
} }
#endif /* configSUPPORT_STATIC_ALLOCATION */ #endif /* configSUPPORT_STATIC_ALLOCATION */
prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue ); prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );
} }
else else
{ {
traceQUEUE_CREATE_FAILED( ucQueueType ); traceQUEUE_CREATE_FAILED( ucQueueType );
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
return pxNewQueue;
}
return pxNewQueue;
}

@ -22,64 +22,68 @@
#include "proof/queue.h" #include "proof/queue.h"
static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) static void prvCopyDataFromQueue( Queue_t * const pxQueue,
void * const pvBuffer )
/*@requires queue(pxQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*& 0 < K &*& chars(pvBuffer, M, _);@*/ /*@requires queue(pxQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*& 0 < K &*& chars(pvBuffer, M, _);@*/
/*@ensures queue_after_prvCopyDataFromQueue(pxQueue, Storage, N, M, W, (R+1)%N, K, is_locked, abs) &*& /*@ensures queue_after_prvCopyDataFromQueue(pxQueue, Storage, N, M, W, (R+1)%N, K, is_locked, abs) &*&
chars(pvBuffer, M, head(abs));@*/ chars(pvBuffer, M, head(abs));@*/
{ {
if( pxQueue->uxItemSize != ( UBaseType_t ) 0 ) if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )
{ {
/*@assert buffer(Storage, N, M, ?contents);@*/ /*@assert buffer(Storage, N, M, ?contents);@*/
/*@mul_mono_l(R, N-1, M);@*/ /*@mul_mono_l(R, N-1, M);@*/
pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types ok, especially in this use case where it is the clearest way of conveying intent. */ pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types ok, especially in this use case where it is the clearest way of conveying intent. */
if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */
{ if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */
/*@div_leq(N, R+1, M);@*/ // now we know R == N-1 {
pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead; /*@div_leq(N, R+1, M);@*/ /* now we know R == N-1 */
} pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
else }
{ else
/*@{ {
div_lt(R+1, N, M); // now we know R+1 < N /*@{
mod_lt(R+1, N); // so, R+1 == (R+1)%N div_lt(R+1, N, M); // now we know R+1 < N
note(pxQueue->u.xQueue.pcReadFrom == Storage + ((R + 1) * M)); mod_lt(R+1, N); // so, R+1 == (R+1)%N
note( Storage + ((R + 1) * M) == Storage + (((R + 1) % N) * M)); note(pxQueue->u.xQueue.pcReadFrom == Storage + ((R + 1) * M));
}@*/ note( Storage + ((R + 1) * M) == Storage + (((R + 1) % N) * M));
mtCOVERAGE_TEST_MARKER(); }@*/
} mtCOVERAGE_TEST_MARKER();
/*@mod_plus(R+1, K, N);@*/ }
/*@mod_mod(R+1, N);@*/
/*@split_element(Storage, N, M, (R+1)%N);@*/ /*@mod_plus(R+1, K, N);@*/
/*@assert /*@mod_mod(R+1, N);@*/
buffer(Storage, (R+1)%N, M, ?prefix) &*& /*@split_element(Storage, N, M, (R+1)%N);@*/
chars(Storage + ((R+1)%N) * M, M, ?element) &*& /*@assert
buffer(Storage + ((R+1)%N + 1) * M, (N-1-(R+1)%N), M, ?suffix);@*/ buffer(Storage, (R+1)%N, M, ?prefix) &*&
chars(Storage + ((R+1)%N) * M, M, ?element) &*&
buffer(Storage + ((R+1)%N + 1) * M, (N-1-(R+1)%N), M, ?suffix);@*/
#ifdef VERIFAST /*< void cast of unused return value */ #ifdef VERIFAST /*< void cast of unused return value */
memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize );
#else #else
( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */ ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */
#endif #endif
/*@{ /*@{
combine_list_no_change(prefix, element, suffix, (R+1)%N, contents); combine_list_no_change(prefix, element, suffix, (R+1)%N, contents);
join_element(Storage, N, M, (R+1)%N); join_element(Storage, N, M, (R+1)%N);
length_take(K, contents); length_take(K, contents);
take_length_eq(K, rotate_left((R+1)%N, contents), abs); take_length_eq(K, rotate_left((R+1)%N, contents), abs);
deq_value_lemma(K, (R+1)%N, contents, abs); deq_value_lemma(K, (R+1)%N, contents, abs);
}@*/ }@*/
} }
} }
void caller_reinstates_queue_predicate(Queue_t *const pxQueue, void *const pvBuffer) void caller_reinstates_queue_predicate( Queue_t * const pxQueue,
void * const pvBuffer )
/*@requires queue(pxQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*& /*@requires queue(pxQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
0 < K &*& 0 < K &*&
chars(pvBuffer, M, _);@*/ chars(pvBuffer, M, _);@*/
/*@ensures /*@ensures
queue(pxQueue, Storage, N, M, W, (R+1)%N, K-1, is_locked, tail(abs)) &*& queue(pxQueue, Storage, N, M, W, (R+1)%N, K-1, is_locked, tail(abs)) &*&
chars(pvBuffer, M, head(abs));@*/ chars(pvBuffer, M, head(abs));@*/
{ {
prvCopyDataFromQueue(pxQueue, pvBuffer); prvCopyDataFromQueue( pxQueue, pvBuffer );
/*@open queue_after_prvCopyDataFromQueue(pxQueue, Storage, N, M, W, (R+1)%N, K, is_locked, abs);@*/ /*@open queue_after_prvCopyDataFromQueue(pxQueue, Storage, N, M, W, (R+1)%N, K, is_locked, abs);@*/
/*@assert buffer(Storage, N, M, ?contents);@*/ /*@assert buffer(Storage, N, M, ?contents);@*/
pxQueue->uxMessagesWaiting = pxQueue->uxMessagesWaiting - 1; pxQueue->uxMessagesWaiting = pxQueue->uxMessagesWaiting - 1;
/*@deq_lemma(K, (R+1)%N, contents, abs, head(abs));@*/ /*@deq_lemma(K, (R+1)%N, contents, abs, head(abs));@*/
} }

@ -22,158 +22,165 @@
#include "proof/queue.h" #include "proof/queue.h"
static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue,
const void * pvItemToQueue,
const BaseType_t xPosition )
/*@requires queue(pxQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*& /*@requires queue(pxQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
(K < N || xPosition == queueOVERWRITE) &*& (K < N || xPosition == queueOVERWRITE) &*&
chars(pvItemToQueue, M, ?x) &*& chars(pvItemToQueue, M, ?x) &*&
(xPosition == queueSEND_TO_BACK || xPosition == queueSEND_TO_FRONT || (xPosition == queueOVERWRITE && N == 1));@*/ (xPosition == queueSEND_TO_BACK || xPosition == queueSEND_TO_FRONT || (xPosition == queueOVERWRITE && N == 1));@*/
/*@ensures /*@ensures
(xPosition == queueSEND_TO_BACK (xPosition == queueSEND_TO_BACK
? queue(pxQueue, Storage, N, M, (W+1)%N, R, (K+1), is_locked, append(abs, singleton(x))) ? queue(pxQueue, Storage, N, M, (W+1)%N, R, (K+1), is_locked, append(abs, singleton(x)))
: (xPosition == queueSEND_TO_FRONT : (xPosition == queueSEND_TO_FRONT
? (R == 0 ? (R == 0
? queue(pxQueue, Storage, N, M, W, (N-1), (K+1), is_locked, cons(x, abs)) ? queue(pxQueue, Storage, N, M, W, (N-1), (K+1), is_locked, cons(x, abs))
: queue(pxQueue, Storage, N, M, W, (R-1), (K+1), is_locked, cons(x, abs))) : queue(pxQueue, Storage, N, M, W, (R-1), (K+1), is_locked, cons(x, abs)))
: xPosition == queueOVERWRITE &*& queue(pxQueue, Storage, N, M, W, R, 1, is_locked, singleton(x))) : xPosition == queueOVERWRITE &*& queue(pxQueue, Storage, N, M, W, R, 1, is_locked, singleton(x)))
) &*& ) &*&
chars(pvItemToQueue, M, x);@*/ chars(pvItemToQueue, M, x);@*/
{ {
BaseType_t xReturn = pdFALSE; BaseType_t xReturn = pdFALSE;
UBaseType_t uxMessagesWaiting; UBaseType_t uxMessagesWaiting;
/* This function is called from a critical section. */ /* This function is called from a critical section. */
uxMessagesWaiting = pxQueue->uxMessagesWaiting; uxMessagesWaiting = pxQueue->uxMessagesWaiting;
/* The abstract list of list of chars of `Storage` is `contents` */ /* The abstract list of list of chars of `Storage` is `contents` */
/*@assert buffer(Storage, N, M, ?contents);@*/ /*@assert buffer(Storage, N, M, ?contents);@*/
if( pxQueue->uxItemSize == ( UBaseType_t ) 0 ) if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )
{ {
/* This case is unreachable for queues */ /* This case is unreachable for queues */
/*@assert false;@*/ /*@assert false;@*/
#if ( configUSE_MUTEXES == 1 ) #if ( configUSE_MUTEXES == 1 )
{ {
if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
{ {
/* The mutex is no longer being held. */ /* The mutex is no longer being held. */
xReturn = xTaskPriorityDisinherit( pxQueue->u.xSemaphore.xMutexHolder ); xReturn = xTaskPriorityDisinherit( pxQueue->u.xSemaphore.xMutexHolder );
pxQueue->u.xSemaphore.xMutexHolder = NULL; pxQueue->u.xSemaphore.xMutexHolder = NULL;
} }
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
#endif /* configUSE_MUTEXES */ #endif /* configUSE_MUTEXES */
} }
else if( xPosition == queueSEND_TO_BACK ) else if( xPosition == queueSEND_TO_BACK )
{ {
#ifdef VERIFAST /*< void cast of unused return value */ #ifdef VERIFAST /*< void cast of unused return value */
/* Now we focus the proof on the logical element of the buffer that /* Now we focus the proof on the logical element of the buffer that
will be updated using the following lemma to split the buffer into 3 * will be updated using the following lemma to split the buffer into 3
parts: a prefix, the element we want to update, and the suffix. This * parts: a prefix, the element we want to update, and the suffix. This
enables the subsequent memcpy to verify. */ * enables the subsequent memcpy to verify. */
/*@split_element(Storage, N, M, W);@*/ /*@split_element(Storage, N, M, W);@*/
/*@assert /*@assert
buffer(Storage, W, M, ?prefix) &*& buffer(Storage, W, M, ?prefix) &*&
chars(Storage + W * M, M, _) &*& chars(Storage + W * M, M, _) &*&
buffer(Storage + (W + 1) * M, (N-1-W), M, ?suffix);@*/ buffer(Storage + (W + 1) * M, (N-1-W), M, ?suffix);@*/
memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize );
/* After the update we stitch the buffer back together */ /* After the update we stitch the buffer back together */
/*@join_element(Storage, N, M, W);@*/ /*@join_element(Storage, N, M, W);@*/
/*@combine_list_update(prefix, x, suffix, W, contents);@*/ /*@combine_list_update(prefix, x, suffix, W, contents);@*/
#else #else
( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */ ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */
#endif #endif
pxQueue->pcWriteTo += pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types ok, especially in this use case where it is the clearest way of conveying intent. */ /*@mul_mono_l(W, N-1, M);@*/
if( pxQueue->pcWriteTo >= pxQueue->u.xQueue.pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */ pxQueue->pcWriteTo += pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types ok, especially in this use case where it is the clearest way of conveying intent. */
{
/*@div_leq(N, W+1, M);@*/ // now we know W == N-1 if( pxQueue->pcWriteTo >= pxQueue->u.xQueue.pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
pxQueue->pcWriteTo = pxQueue->pcHead; {
} /*@div_leq(N, W+1, M);@*/ /* now we know W == N-1 so (W+1)%N == 0 */
else pxQueue->pcWriteTo = pxQueue->pcHead;
{ }
/*@{ else
div_lt(W+1, N, M); // now we know W+1 < N {
mod_lt(W+1, N); // so, W+1 == (W+1)%N /*@{
note(pxQueue->pcWriteTo == Storage + ((W + 1) * M)); div_lt(W+1, N, M); // now we know W+1 < N
note( Storage + ((W + 1) * M) == Storage + (((W + 1) % N) * M)); mod_lt(W+1, N); // so, W+1 == (W+1)%N
}@*/ note(pxQueue->pcWriteTo == Storage + ((W + 1) * M));
mtCOVERAGE_TEST_MARKER(); note( Storage + ((W + 1) * M) == Storage + (((W + 1) % N) * M));
} }@*/
} mtCOVERAGE_TEST_MARKER();
else }
{ }
else
{
#ifdef VERIFAST /*< void cast of unused return value */ #ifdef VERIFAST /*< void cast of unused return value */
/*@split_element(Storage, N, M, R);@*/ /*@split_element(Storage, N, M, R);@*/
/*@assert /*@assert
buffer(Storage, R, M, ?prefix) &*& buffer(Storage, R, M, ?prefix) &*&
chars(Storage + R * M, M, _) &*& chars(Storage + R * M, M, _) &*&
buffer(Storage + (R + 1) * M, (N-1-R), M, ?suffix);@*/ buffer(Storage + (R + 1) * M, (N-1-R), M, ?suffix);@*/
memcpy( ( void * ) pxQueue->u.xQueue.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); memcpy( ( void * ) pxQueue->u.xQueue.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize );
/*@join_element(Storage, N, M, R);@*/ /*@join_element(Storage, N, M, R);@*/
/*@combine_list_update(prefix, x, suffix, R, contents);@*/ /*@combine_list_update(prefix, x, suffix, R, contents);@*/
#else #else
( void ) memcpy( ( void * ) pxQueue->u.xQueue.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e9087 !e418 MISRA exception as the casts are only redundant for some ports. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. Assert checks null pointer only used when length is 0. */ ( void ) memcpy( ( void * ) pxQueue->u.xQueue.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e9087 !e418 MISRA exception as the casts are only redundant for some ports. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. Assert checks null pointer only used when length is 0. */
#endif #endif
pxQueue->u.xQueue.pcReadFrom -= pxQueue->uxItemSize; pxQueue->u.xQueue.pcReadFrom -= pxQueue->uxItemSize;
if( pxQueue->u.xQueue.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
{
pxQueue->u.xQueue.pcReadFrom = ( pxQueue->u.xQueue.pcTail - pxQueue->uxItemSize );
/*@{
div_leq(R-1, 0, M);
leq_bound(R, 0);
}@*/
/*@assert R == 0;@*/
/*@assert pxQueue->u.xQueue.pcReadFrom == Storage + (N-1) * M;@*/
}
else
{
/*@assert 0 < R;@*/
/*@assert pxQueue->u.xQueue.pcReadFrom == Storage + (R-1) * M;@*/
mtCOVERAGE_TEST_MARKER();
}
/*@ if( pxQueue->u.xQueue.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
if (R == 0) { {
mod_plus(N, (K+1), N); mod_same(N); mod_mod(K+1, N); pxQueue->u.xQueue.pcReadFrom = ( pxQueue->u.xQueue.pcTail - pxQueue->uxItemSize );
assert W == ((N-1) + 1 + (K+1)) % N; /*@{ div_leq(R-1, 0, M); leq_bound(R, 0); }@*/
} /*@assert R == 0;@*/
@*/ /*@assert pxQueue->u.xQueue.pcReadFrom == Storage + (N-1) * M;@*/
if( xPosition == queueOVERWRITE ) }
{ else
if( uxMessagesWaiting > ( UBaseType_t ) 0 ) {
{ /*@assert 0 < R;@*/
/* An item is not being added but overwritten, so subtract /*@assert pxQueue->u.xQueue.pcReadFrom == Storage + (R-1) * M;@*/
one from the recorded number of items in the queue so when mtCOVERAGE_TEST_MARKER();
one is added again below the number of recorded items remains }
correct. */
--uxMessagesWaiting;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
pxQueue->uxMessagesWaiting = uxMessagesWaiting + ( UBaseType_t ) 1; /*@
if (R == 0)
{
mod_plus(N, (K+1), N); mod_same(N); mod_mod(K+1, N);
assert W == ((N-1) + 1 + (K+1)) % N;
}
@*/
if( xPosition == queueOVERWRITE )
{
if( uxMessagesWaiting > ( UBaseType_t ) 0 )
{
/* An item is not being added but overwritten, so subtract
* one from the recorded number of items in the queue so when
* one is added again below the number of recorded items remains
* correct. */
--uxMessagesWaiting;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
/*@ pxQueue->uxMessagesWaiting = uxMessagesWaiting + ( UBaseType_t ) 1;
if (xPosition == queueSEND_TO_BACK) {
enq_lemma(K, (R+1)%N, contents, abs, x); /*@
mod_plus_one(W, R + 1 + K, N); if (xPosition == queueSEND_TO_BACK)
mod_plus_distr(R+1, K, N); {
} else if (xPosition == queueSEND_TO_FRONT) { enq_lemma(K, (R+1)%N, contents, abs, x);
front_enq_lemma(K, R, contents, abs, x); mod_plus_one(W, R + 1 + K, N);
if (0 < R) { mod_plus_distr(R+1, K, N);
mod_lt(R, N); }
} else if (xPosition == queueSEND_TO_FRONT)
} {
@*/ front_enq_lemma(K, R, contents, abs, x);
return xReturn; if (0 < R)
{
mod_lt(R, N);
}
}
@*/
return xReturn;
} }

@ -21,29 +21,29 @@
*/ */
#include "proof/queue.h" #include "proof/queue.h"
#define taskENTER_CRITICAL() setInterruptMask(pxQueue) #define taskENTER_CRITICAL() setInterruptMask( pxQueue )
#define taskEXIT_CRITICAL() clearInterruptMask(pxQueue) #define taskEXIT_CRITICAL() clearInterruptMask( pxQueue )
static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue )
/*@requires [1/2]queuehandle(pxQueue, ?N, ?M, ?is_isr) &*& is_isr == false;@*/ /*@requires [1/2]queuehandle(pxQueue, ?N, ?M, ?is_isr) &*& is_isr == false;@*/
/*@ensures [1/2]queuehandle(pxQueue, N, M, is_isr);@*/ /*@ensures [1/2]queuehandle(pxQueue, N, M, is_isr);@*/
{ {
BaseType_t xReturn; BaseType_t xReturn;
taskENTER_CRITICAL(); taskENTER_CRITICAL();
/*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/ /*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
{ {
if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 ) if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
{ {
xReturn = pdTRUE; xReturn = pdTRUE;
} }
else else
{ {
xReturn = pdFALSE; xReturn = pdFALSE;
} }
} }
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/ /*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
taskEXIT_CRITICAL(); taskEXIT_CRITICAL();
return xReturn; return xReturn;
} }

@ -21,30 +21,29 @@
*/ */
#include "proof/queue.h" #include "proof/queue.h"
#define taskENTER_CRITICAL() setInterruptMask(pxQueue) #define taskENTER_CRITICAL() setInterruptMask( pxQueue )
#define taskEXIT_CRITICAL() clearInterruptMask(pxQueue) #define taskEXIT_CRITICAL() clearInterruptMask( pxQueue )
static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) static BaseType_t prvIsQueueFull( const Queue_t * pxQueue )
/*@requires [1/2]queuehandle(pxQueue, ?N, ?M, ?is_isr) &*& is_isr == false;@*/ /*@requires [1/2]queuehandle(pxQueue, ?N, ?M, ?is_isr) &*& is_isr == false;@*/
/*@ensures [1/2]queuehandle(pxQueue, N, M, is_isr);@*/ /*@ensures [1/2]queuehandle(pxQueue, N, M, is_isr);@*/
{ {
BaseType_t xReturn; BaseType_t xReturn;
taskENTER_CRITICAL(); taskENTER_CRITICAL();
/*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/ /*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
{ {
if( pxQueue->uxMessagesWaiting == pxQueue->uxLength ) if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
{ {
xReturn = pdTRUE; xReturn = pdTRUE;
} }
else else
{ {
xReturn = pdFALSE; xReturn = pdFALSE;
} }
} }
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/ /*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
taskEXIT_CRITICAL(); taskEXIT_CRITICAL();
return xReturn; return xReturn;
} }

@ -23,45 +23,47 @@
#include "proof/queue.h" #include "proof/queue.h"
/* In this case we cannot wrap the macro in a function call to give a function /* In this case we cannot wrap the macro in a function call to give a function
contract because we require annotations within the macro body, which is not * contract because we require annotations within the macro body, which is not
supported by VeriFast */ * supported by VeriFast */
#define prvLockQueue( pxQueue ) \ #define prvLockQueue( pxQueue ) \
taskENTER_CRITICAL(); \ taskENTER_CRITICAL(); \
{ \ { \
if( ( pxQueue )->cRxLock == queueUNLOCKED ) \ if( ( pxQueue )->cRxLock == queueUNLOCKED ) \
{ \ { \
( pxQueue )->cRxLock = queueLOCKED_UNMODIFIED; \ ( pxQueue )->cRxLock = queueLOCKED_UNMODIFIED; \
} \ } \
if( ( pxQueue )->cTxLock == queueUNLOCKED ) \ if( ( pxQueue )->cTxLock == queueUNLOCKED ) \
{ \ { \
( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED; \ ( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED; \
} \ } \
} \ } \
taskEXIT_CRITICAL() taskEXIT_CRITICAL()
void wrapper_prvLockQueue( QueueHandle_t xQueue ) void wrapper_prvLockQueue( QueueHandle_t xQueue )
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*& /*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*&
[1/2]queuelock(xQueue);@*/ [1/2]queuelock(xQueue);@*/
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*& /*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
[1/2]xQueue->locked |-> ?m &*& [1/2]xQueue->locked |-> ?m &*&
mutex_held(m, queue_locked_invariant(xQueue), currentThread, 1/2) &*& mutex_held(m, queue_locked_invariant(xQueue), currentThread, 1/2) &*&
queue_locked_invariant(xQueue)();@*/ queue_locked_invariant(xQueue)();@*/
{ {
taskENTER_CRITICAL(); taskENTER_CRITICAL();
/*@open queue(xQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/ /*@open queue(xQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
{ {
if( ( xQueue )->cRxLock == queueUNLOCKED ) if( ( xQueue )->cRxLock == queueUNLOCKED )
{ {
( xQueue )->cRxLock = queueLOCKED_UNMODIFIED; ( xQueue )->cRxLock = queueLOCKED_UNMODIFIED;
} }
if( ( xQueue )->cTxLock == queueUNLOCKED )
{ if( ( xQueue )->cTxLock == queueUNLOCKED )
( xQueue )->cTxLock = queueLOCKED_UNMODIFIED; {
} ( xQueue )->cTxLock = queueLOCKED_UNMODIFIED;
} }
/*@close queue(xQueue, Storage, N, M, W, R, K, true, abs);@*/ }
taskEXIT_CRITICAL(); /*@close queue(xQueue, Storage, N, M, W, R, K, true, abs);@*/
taskEXIT_CRITICAL();
#ifdef VERIFAST /*< ghost action */ #ifdef VERIFAST /*< ghost action */
mutex_acquire(xQueue->locked); mutex_acquire( xQueue->locked );
#endif #endif
} }

@ -21,142 +21,142 @@
*/ */
#include "proof/queue.h" #include "proof/queue.h"
#define taskENTER_CRITICAL() setInterruptMask(pxQueue) #define taskENTER_CRITICAL() setInterruptMask( pxQueue )
#define taskEXIT_CRITICAL() clearInterruptMask(pxQueue) #define taskEXIT_CRITICAL() clearInterruptMask( pxQueue )
/* VeriFast: we make one major change. We merge the critical regions for /* VeriFast: we make one major change. We merge the critical regions for
decrementing `cTxLock` and `cRxLock`. */ * decrementing `cTxLock` and `cRxLock`. */
static void prvUnlockQueue( Queue_t * const pxQueue ) static void prvUnlockQueue( Queue_t * const pxQueue )
/*@requires [1/2]queuehandle(pxQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*& /*@requires [1/2]queuehandle(pxQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*&
[1/2]pxQueue->locked |-> ?m &*& [1/2]pxQueue->locked |-> ?m &*&
mutex_held(m, queue_locked_invariant(pxQueue), currentThread, 1/2) &*& mutex_held(m, queue_locked_invariant(pxQueue), currentThread, 1/2) &*&
queue_locked_invariant(pxQueue)();@*/ queue_locked_invariant(pxQueue)();@*/
/*@ensures [1/2]queuehandle(pxQueue, N, M, is_isr) &*& /*@ensures [1/2]queuehandle(pxQueue, N, M, is_isr) &*&
[1/2]queuelock(pxQueue);@*/ [1/2]queuelock(pxQueue);@*/
{ {
/* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */ /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
/* The lock counts contains the number of extra data items placed or /* The lock counts contains the number of extra data items placed or
removed from the queue while the queue was locked. When a queue is * removed from the queue while the queue was locked. When a queue is
locked items can be added or removed, but the event lists cannot be * locked items can be added or removed, but the event lists cannot be
updated. */ * updated. */
taskENTER_CRITICAL(); taskENTER_CRITICAL();
/*@open queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, _, ?abs);@*/ /*@open queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, _, ?abs);@*/
{ {
int8_t cTxLock = pxQueue->cTxLock; int8_t cTxLock = pxQueue->cTxLock;
/* See if data was added to the queue while it was locked. */ /* See if data was added to the queue while it was locked. */
while( cTxLock > queueLOCKED_UNMODIFIED ) while( cTxLock > queueLOCKED_UNMODIFIED )
/*@invariant queuelists(pxQueue);@*/ /*@invariant queuelists(pxQueue);@*/
{ {
/* Data was posted while the queue was locked. Are any tasks /* Data was posted while the queue was locked. Are any tasks
blocked waiting for data to become available? */ * blocked waiting for data to become available? */
#if ( configUSE_QUEUE_SETS == 1 ) #if ( configUSE_QUEUE_SETS == 1 )
{ {
if( pxQueue->pxQueueSetContainer != NULL ) if( pxQueue->pxQueueSetContainer != NULL )
{ {
if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE ) if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
{ {
/* The queue is a member of a queue set, and posting to /* The queue is a member of a queue set, and posting to
the queue set caused a higher priority task to unblock. * the queue set caused a higher priority task to unblock.
A context switch is required. */ * A context switch is required. */
vTaskMissedYield(); vTaskMissedYield();
} }
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
else else
{ {
/* Tasks that are removed from the event list will get /* Tasks that are removed from the event list will get
added to the pending ready list as the scheduler is still * added to the pending ready list as the scheduler is still
suspended. */ * suspended. */
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{ {
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{ {
/* The task waiting has a higher priority so record that a /* The task waiting has a higher priority so record that a
context switch is required. */ * context switch is required. */
vTaskMissedYield(); vTaskMissedYield();
} }
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
else else
{ {
break; break;
} }
} }
} }
#else /* configUSE_QUEUE_SETS */ #else /* configUSE_QUEUE_SETS */
{ {
/* Tasks that are removed from the event list will get added to /* Tasks that are removed from the event list will get added to
the pending ready list as the scheduler is still suspended. */ * the pending ready list as the scheduler is still suspended. */
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{ {
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{ {
/* The task waiting has a higher priority so record that /* The task waiting has a higher priority so record that
a context switch is required. */ * a context switch is required. */
vTaskMissedYield(); vTaskMissedYield();
} }
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
else else
{ {
break; break;
} }
} }
#endif /* configUSE_QUEUE_SETS */ #endif /* configUSE_QUEUE_SETS */
--cTxLock; --cTxLock;
} }
pxQueue->cTxLock = queueUNLOCKED; pxQueue->cTxLock = queueUNLOCKED;
} }
#ifndef VERIFAST /*< ***merge cTxLock and cRxLock critical regions*** */ #ifndef VERIFAST /*< ***merge cTxLock and cRxLock critical regions*** */
taskEXIT_CRITICAL(); taskEXIT_CRITICAL();
/* Do the same for the Rx lock. */ /* Do the same for the Rx lock. */
taskENTER_CRITICAL(); taskENTER_CRITICAL();
#endif #endif
{ {
int8_t cRxLock = pxQueue->cRxLock; int8_t cRxLock = pxQueue->cRxLock;
while( cRxLock > queueLOCKED_UNMODIFIED ) while( cRxLock > queueLOCKED_UNMODIFIED )
/*@invariant queuelists(pxQueue);@*/ /*@invariant queuelists(pxQueue);@*/
{ {
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
{ {
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
{ {
vTaskMissedYield(); vTaskMissedYield();
} }
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
--cRxLock; --cRxLock;
} }
else else
{ {
break; break;
} }
} }
pxQueue->cRxLock = queueUNLOCKED; pxQueue->cRxLock = queueUNLOCKED;
} }
/*@close queue(pxQueue, Storage, N, M, W, R, K, false, abs);@*/ /*@close queue(pxQueue, Storage, N, M, W, R, K, false, abs);@*/
taskEXIT_CRITICAL(); taskEXIT_CRITICAL();
#ifdef VERIFAST /*< ghost action */ #ifdef VERIFAST /*< ghost action */
mutex_release(pxQueue->locked); mutex_release( pxQueue->locked );
#endif #endif
} }

@ -23,46 +23,47 @@
#include "proof/queue.h" #include "proof/queue.h"
/* It may seem that the read of `pxQueue->uxMessagesWaiting` is required to be /* It may seem that the read of `pxQueue->uxMessagesWaiting` is required to be
contained in a critical region to be thread-safe. However, it is impossible for * contained in a critical region to be thread-safe. However, it is impossible for
this read to be involved in a data race due to the atomicity mechanism used by * this read to be involved in a data race due to the atomicity mechanism used by
tasks and ISRs: masking and enabling interrupts. If we assume (1) a * tasks and ISRs: masking and enabling interrupts. If we assume (1) a
uniprocessor system and (2) that higher priority ISRs never call queue API * uniprocessor system and (2) that higher priority ISRs never call queue API
functions then masking interrupts ensures *strong isolation* meaning critical * functions then masking interrupts ensures *strong isolation* meaning critical
regions protected by interrupt masking/enabling are isolated from other * regions protected by interrupt masking/enabling are isolated from other
critical regions and code outside of critical regions. */ * critical regions and code outside of critical regions. */
UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue ) UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )
/*@requires queue(xQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs);@*/ /*@requires queue(xQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
/*@ensures queue(xQueue, Storage, N, M, W, R, K, is_locked, abs) &*& result == K;@*/ /*@ensures queue(xQueue, Storage, N, M, W, R, K, is_locked, abs) &*& result == K;@*/
{ {
UBaseType_t uxReturn; UBaseType_t uxReturn;
#ifdef VERIFAST /*< const pointer declaration */ #ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue; Queue_t * pxQueue = xQueue;
#else #else
Queue_t * const pxQueue = xQueue; Queue_t * const pxQueue = xQueue;
#endif #endif
configASSERT( pxQueue ); configASSERT( pxQueue );
uxReturn = pxQueue->uxMessagesWaiting; uxReturn = pxQueue->uxMessagesWaiting;
return uxReturn; return uxReturn;
} }
UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue ) UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false;@*/ /*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false;@*/
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr);@*/ /*@ensures [1/2]queuehandle(xQueue, N, M, is_isr);@*/
{ {
UBaseType_t uxReturn; UBaseType_t uxReturn;
configASSERT( xQueue ); configASSERT( xQueue );
taskENTER_CRITICAL(); taskENTER_CRITICAL();
{ {
/*@assert queue(xQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/ /*@assert queue(xQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting; uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
/*@close queue(xQueue, Storage, N, M, W, R, K, is_locked, abs);@*/ /*@close queue(xQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
} }
taskEXIT_CRITICAL(); taskEXIT_CRITICAL();
return uxReturn; return uxReturn;
} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */ } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */

@ -26,23 +26,24 @@ UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false;@*/ /*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false;@*/
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr);@*/ /*@ensures [1/2]queuehandle(xQueue, N, M, is_isr);@*/
{ {
UBaseType_t uxReturn; UBaseType_t uxReturn;
#ifdef VERIFAST /*< const pointer declaration */ #ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue; Queue_t * pxQueue = xQueue;
#else #else
Queue_t * const pxQueue = xQueue; Queue_t * const pxQueue = xQueue;
#endif #endif
configASSERT( pxQueue ); configASSERT( pxQueue );
taskENTER_CRITICAL(); taskENTER_CRITICAL();
/*@assert queue(xQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/ /*@assert queue(xQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
{ {
uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting; uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;
/*@assert uxReturn == N - K;@*/ /*@assert uxReturn == N - K;@*/
} }
/*@close queue(xQueue, Storage, N, M, W, R, K, is_locked, abs);@*/ /*@close queue(xQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
taskEXIT_CRITICAL(); taskEXIT_CRITICAL();
return uxReturn; return uxReturn;
} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */ } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */

@ -21,60 +21,60 @@
*/ */
#include "proof/queue.h" #include "proof/queue.h"
#define configSUPPORT_DYNAMIC_ALLOCATION 1 #define configSUPPORT_DYNAMIC_ALLOCATION 1
#define configSUPPORT_STATIC_ALLOCATION 0 #define configSUPPORT_STATIC_ALLOCATION 0
void vQueueDelete( QueueHandle_t xQueue ) void vQueueDelete( QueueHandle_t xQueue )
/*@requires queue(xQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*& /*@requires queue(xQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
queuelists(xQueue) &*& queuelists(xQueue) &*&
xQueue->irqMask |-> _ &*& xQueue->irqMask |-> _ &*&
xQueue->schedulerSuspend |-> _ &*& xQueue->schedulerSuspend |-> _ &*&
xQueue->locked |-> _;@*/ xQueue->locked |-> _;@*/
/*@ensures true;@*/ /*@ensures true;@*/
{ {
#ifdef VERIFAST /*< const pointer declaration */ #ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue; Queue_t * pxQueue = xQueue;
#else #else
Queue_t * const pxQueue = xQueue; Queue_t * const pxQueue = xQueue;
#endif #endif
configASSERT( pxQueue ); configASSERT( pxQueue );
traceQUEUE_DELETE( pxQueue ); traceQUEUE_DELETE( pxQueue );
#if ( configQUEUE_REGISTRY_SIZE > 0 ) #if ( configQUEUE_REGISTRY_SIZE > 0 )
{ {
vQueueUnregisterQueue( pxQueue ); vQueueUnregisterQueue( pxQueue );
} }
#endif #endif
#if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) ) #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
{ {
/* The queue can only have been allocated dynamically - free it /* The queue can only have been allocated dynamically - free it
again. */ * again. */
vPortFree( pxQueue ); vPortFree( pxQueue );
#ifdef VERIFAST /*< leak ghost state on deletion */ #ifdef VERIFAST /*< leak ghost state on deletion */
/*@leak buffer(_, _, _, _);@*/ /*@leak buffer(_, _, _, _);@*/
/*@leak malloc_block(_, _);@*/ /*@leak malloc_block(_, _);@*/
#endif #endif
} }
#elif( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) #elif ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
{ {
/* The queue could have been allocated statically or dynamically, so /* The queue could have been allocated statically or dynamically, so
check before attempting to free the memory. */ * check before attempting to free the memory. */
if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdFALSE ) if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdFALSE )
{ {
vPortFree( pxQueue ); vPortFree( pxQueue );
} }
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
#else #else /* if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) ) */
{ {
/* The queue must have been statically allocated, so is not going to be /* The queue must have been statically allocated, so is not going to be
deleted. Avoid compiler warnings about the unused parameter. */ * deleted. Avoid compiler warnings about the unused parameter. */
( void ) pxQueue; ( void ) pxQueue;
} }
#endif /* configSUPPORT_DYNAMIC_ALLOCATION */ #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
} }

@ -23,256 +23,263 @@
#include "proof/queue.h" #include "proof/queue.h"
#include "proof/queuecontracts.h" #include "proof/queuecontracts.h"
BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition ) BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
const void * const pvItemToQueue,
TickType_t xTicksToWait,
const BaseType_t xCopyPosition )
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*& /*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*&
[1/2]queuesuspend(xQueue) &*& [1/2]queuesuspend(xQueue) &*&
chars(pvItemToQueue, M, ?x) &*& chars(pvItemToQueue, M, ?x) &*&
(xCopyPosition == queueSEND_TO_BACK || xCopyPosition == queueSEND_TO_FRONT || (xCopyPosition == queueOVERWRITE && N == 1));@*/ (xCopyPosition == queueSEND_TO_BACK || xCopyPosition == queueSEND_TO_FRONT || (xCopyPosition == queueOVERWRITE && N == 1));@*/
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*& /*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
[1/2]queuesuspend(xQueue) &*& [1/2]queuesuspend(xQueue) &*&
chars(pvItemToQueue, M, x);@*/ chars(pvItemToQueue, M, x);@*/
{ {
BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired; BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
TimeOut_t xTimeOut; TimeOut_t xTimeOut;
#ifdef VERIFAST /*< const pointer declaration */ #ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue; Queue_t * pxQueue = xQueue;
#else #else
Queue_t * const pxQueue = xQueue; Queue_t * const pxQueue = xQueue;
configASSERT( pxQueue );
configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
{
configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
}
#endif
configASSERT( pxQueue );
configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
{
configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
}
#endif
#endif #endif
/*lint -save -e904 This function relaxes the coding standard somewhat to /*lint -save -e904 This function relaxes the coding standard somewhat to
allow return statements within the function itself. This is done in the * allow return statements within the function itself. This is done in the
interest of execution time efficiency. */ * interest of execution time efficiency. */
for( ;; ) for( ; ; )
/*@invariant [1/2]queuehandle(xQueue, N, M, is_isr) &*& /*@invariant [1/2]queuehandle(xQueue, N, M, is_isr) &*&
[1/2]queuesuspend(xQueue) &*& [1/2]queuesuspend(xQueue) &*&
chars(pvItemToQueue, M, x) &*& chars(pvItemToQueue, M, x) &*&
u_integer(&xTicksToWait, _) &*& u_integer(&xTicksToWait, _) &*&
(xCopyPosition == queueSEND_TO_BACK || xCopyPosition == queueSEND_TO_FRONT || (xCopyPosition == queueOVERWRITE && N == 1)) &*& (xCopyPosition == queueSEND_TO_BACK || xCopyPosition == queueSEND_TO_FRONT || (xCopyPosition == queueOVERWRITE && N == 1)) &*&
xTIME_OUT(&xTimeOut);@*/ xTIME_OUT(&xTimeOut);@*/
{ {
taskENTER_CRITICAL(); taskENTER_CRITICAL();
{ {
/*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/ /*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
/* Is there room on the queue now? The running task must be the /* Is there room on the queue now? The running task must be the
highest priority task wanting to access the queue. If the head item * highest priority task wanting to access the queue. If the head item
in the queue is to be overwritten then it does not matter if the * in the queue is to be overwritten then it does not matter if the
queue is full. */ * queue is full. */
if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) ) if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
{ {
traceQUEUE_SEND( pxQueue ); traceQUEUE_SEND( pxQueue );
/* VeriFast: we do not verify this configuration option */ /* VeriFast: we do not verify this configuration option */
#if ( configUSE_QUEUE_SETS == 1 ) #if ( configUSE_QUEUE_SETS == 1 )
{ {
const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting; const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition ); xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
if( pxQueue->pxQueueSetContainer != NULL ) if( pxQueue->pxQueueSetContainer != NULL )
{ {
if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) ) if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) )
{ {
/* Do not notify the queue set as an existing item /* Do not notify the queue set as an existing item
was overwritten in the queue so the number of items * was overwritten in the queue so the number of items
in the queue has not changed. */ * in the queue has not changed. */
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE ) else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
{ {
/* The queue is a member of a queue set, and posting /* The queue is a member of a queue set, and posting
to the queue set caused a higher priority task to * to the queue set caused a higher priority task to
unblock. A context switch is required. */ * unblock. A context switch is required. */
queueYIELD_IF_USING_PREEMPTION(); queueYIELD_IF_USING_PREEMPTION();
} }
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
else else
{ {
/* If there was a task waiting for data to arrive on the /* If there was a task waiting for data to arrive on the
queue then unblock it now. */ * queue then unblock it now. */
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{ {
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{ {
/* The unblocked task has a priority higher than /* The unblocked task has a priority higher than
our own so yield immediately. Yes it is ok to * our own so yield immediately. Yes it is ok to
do this from within the critical section - the * do this from within the critical section - the
kernel takes care of that. */ * kernel takes care of that. */
queueYIELD_IF_USING_PREEMPTION(); queueYIELD_IF_USING_PREEMPTION();
} }
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
else if( xYieldRequired != pdFALSE ) else if( xYieldRequired != pdFALSE )
{ {
/* This path is a special case that will only get /* This path is a special case that will only get
executed if the task was holding multiple mutexes * executed if the task was holding multiple mutexes
and the mutexes were given back in an order that is * and the mutexes were given back in an order that is
different to that in which they were taken. */ * different to that in which they were taken. */
queueYIELD_IF_USING_PREEMPTION(); queueYIELD_IF_USING_PREEMPTION();
} }
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
} }
#else /* configUSE_QUEUE_SETS */ #else /* configUSE_QUEUE_SETS */
{ {
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/ /*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition ); xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
/* If there was a task waiting for data to arrive on the /* If there was a task waiting for data to arrive on the
queue then unblock it now. */ * queue then unblock it now. */
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{ {
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{ {
/* The unblocked task has a priority higher than /* The unblocked task has a priority higher than
our own so yield immediately. Yes it is ok to do * our own so yield immediately. Yes it is ok to do
this from within the critical section - the kernel * this from within the critical section - the kernel
takes care of that. */ * takes care of that. */
queueYIELD_IF_USING_PREEMPTION(); queueYIELD_IF_USING_PREEMPTION();
} }
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
else if( xYieldRequired != pdFALSE ) else if( xYieldRequired != pdFALSE )
{ {
/* This path is a special case that will only get /* This path is a special case that will only get
executed if the task was holding multiple mutexes and * executed if the task was holding multiple mutexes and
the mutexes were given back in an order that is * the mutexes were given back in an order that is
different to that in which they were taken. */ * different to that in which they were taken. */
queueYIELD_IF_USING_PREEMPTION(); queueYIELD_IF_USING_PREEMPTION();
} }
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
#endif /* configUSE_QUEUE_SETS */ #endif /* configUSE_QUEUE_SETS */
/*@ /*@
if (xCopyPosition == queueSEND_TO_BACK) { if (xCopyPosition == queueSEND_TO_BACK)
close queue(pxQueue, Storage, N, M, (W+1)%N, R, (K+1), is_locked, append(abs, singleton(x))); {
} else if (xCopyPosition == queueSEND_TO_FRONT) { close queue(pxQueue, Storage, N, M, (W+1)%N, R, (K+1), is_locked, append(abs, singleton(x)));
close queue(pxQueue, Storage, N, M, W, (R == 0 ? (N-1) : (R-1)), (K+1), is_locked, cons(x, abs)); }
} else if (xCopyPosition == queueOVERWRITE) { else if (xCopyPosition == queueSEND_TO_FRONT)
close queue(pxQueue, Storage, N, M, W, R, 1, is_locked, singleton(x)); {
} close queue(pxQueue, Storage, N, M, W, (R == 0 ? (N-1) : (R-1)), (K+1), is_locked, cons(x, abs));
@*/ }
taskEXIT_CRITICAL(); else if (xCopyPosition == queueOVERWRITE)
return pdPASS; {
} close queue(pxQueue, Storage, N, M, W, R, 1, is_locked, singleton(x));
else }
{ @*/
if( xTicksToWait == ( TickType_t ) 0 ) taskEXIT_CRITICAL();
{ return pdPASS;
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/ }
/* The queue was full and no block time is specified (or else
the block time has expired) so leave now. */ {
taskEXIT_CRITICAL(); if( xTicksToWait == ( TickType_t ) 0 )
{
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
/* The queue was full and no block time is specified (or
* the block time has expired) so leave now. */
taskEXIT_CRITICAL();
/* Return to the original privilege level before exiting /* Return to the original privilege level before exiting
the function. */ * the function. */
traceQUEUE_SEND_FAILED( pxQueue ); traceQUEUE_SEND_FAILED( pxQueue );
return errQUEUE_FULL; return errQUEUE_FULL;
} }
else if( xEntryTimeSet == pdFALSE ) else if( xEntryTimeSet == pdFALSE )
{ {
/* The queue was full and a block time was specified so /* The queue was full and a block time was specified so
configure the timeout structure. */ * configure the timeout structure. */
vTaskInternalSetTimeOutState( &xTimeOut ); vTaskInternalSetTimeOutState( &xTimeOut );
xEntryTimeSet = pdTRUE; xEntryTimeSet = pdTRUE;
} }
else else
{ {
/* Entry time was already set. */ /* Entry time was already set. */
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/ /*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
} }
taskEXIT_CRITICAL(); taskEXIT_CRITICAL();
/* Interrupts and other tasks can send to and receive from the queue /* Interrupts and other tasks can send to and receive from the queue
now the critical section has been exited. */ * now the critical section has been exited. */
/*@close exists(pxQueue);@*/ /*@close exists(pxQueue);@*/
vTaskSuspendAll(); vTaskSuspendAll();
prvLockQueue( pxQueue ); prvLockQueue( pxQueue );
/* Update the timeout state to see if it has expired yet. */ /* Update the timeout state to see if it has expired yet. */
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
{ {
if( prvIsQueueFull( pxQueue ) != pdFALSE ) if( prvIsQueueFull( pxQueue ) != pdFALSE )
{ {
traceBLOCKING_ON_QUEUE_SEND( pxQueue ); traceBLOCKING_ON_QUEUE_SEND( pxQueue );
/*@open queue_locked_invariant(xQueue)();@*/ /*@open queue_locked_invariant(xQueue)();@*/
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait ); vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
/* Unlocking the queue means queue events can effect the /* Unlocking the queue means queue events can effect the
event list. It is possible that interrupts occurring now * event list. It is possible that interrupts occurring now
remove this task from the event list again - but as the * remove this task from the event list again - but as the
scheduler is suspended the task will go onto the pending * scheduler is suspended the task will go onto the pending
ready last instead of the actual ready list. */ * ready last instead of the actual ready list. */
/*@close queue_locked_invariant(xQueue)();@*/ /*@close queue_locked_invariant(xQueue)();@*/
prvUnlockQueue( pxQueue ); prvUnlockQueue( pxQueue );
/* Resuming the scheduler will move tasks from the pending /* Resuming the scheduler will move tasks from the pending
ready list into the ready list - so it is feasible that this * ready list into the ready list - so it is feasible that this
task is already in a ready list before it yields - in which * task is already in a ready list before it yields - in which
case the yield will not cause a context switch unless there * case the yield will not cause a context switch unless there
is also a higher priority task in the pending ready list. */ * is also a higher priority task in the pending ready list. */
/*@close exists(pxQueue);@*/ /*@close exists(pxQueue);@*/
if( xTaskResumeAll() == pdFALSE ) if( xTaskResumeAll() == pdFALSE )
{ {
portYIELD_WITHIN_API(); portYIELD_WITHIN_API();
} }
} }
else else
{ {
/* Try again. */ /* Try again. */
prvUnlockQueue( pxQueue ); prvUnlockQueue( pxQueue );
#ifdef VERIFAST /*< void cast of unused return value */ #ifdef VERIFAST /*< void cast of unused return value */
/*@close exists(pxQueue);@*/ /*@close exists(pxQueue);@*/
xTaskResumeAll(); xTaskResumeAll();
#else #else
( void ) xTaskResumeAll(); ( void ) xTaskResumeAll();
#endif #endif
} }
} }
else else
{ {
/* The timeout has expired. */ /* The timeout has expired. */
prvUnlockQueue( pxQueue ); prvUnlockQueue( pxQueue );
#ifdef VERIFAST /*< void cast of unused return value */ #ifdef VERIFAST /*< void cast of unused return value */
/*@close exists(pxQueue);@*/ /*@close exists(pxQueue);@*/
xTaskResumeAll(); xTaskResumeAll();
#else #else
( void ) xTaskResumeAll(); ( void ) xTaskResumeAll();
#endif #endif
traceQUEUE_SEND_FAILED( pxQueue ); traceQUEUE_SEND_FAILED( pxQueue );
return errQUEUE_FULL; return errQUEUE_FULL;
} }
} /*lint -restore */ } /*lint -restore */
} }

@ -23,202 +23,213 @@
#include "proof/queue.h" #include "proof/queue.h"
#include "proof/queuecontracts.h" #include "proof/queuecontracts.h"
BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition ) BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
const void * const pvItemToQueue,
BaseType_t * const pxHigherPriorityTaskWoken,
const BaseType_t xCopyPosition )
/*@requires /*@requires
[1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == true &*& [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == true &*&
chars(pvItemToQueue, M, ?x) &*& chars(pvItemToQueue, M, ?x) &*&
integer(pxHigherPriorityTaskWoken, _) &*& integer(pxHigherPriorityTaskWoken, _) &*&
(xCopyPosition == queueSEND_TO_BACK || xCopyPosition == queueSEND_TO_FRONT || (xCopyPosition == queueOVERWRITE && N == 1));@*/ (xCopyPosition == queueSEND_TO_BACK || xCopyPosition == queueSEND_TO_FRONT || (xCopyPosition == queueOVERWRITE && N == 1));@*/
/*@ensures /*@ensures
[1/2]queuehandle(xQueue, N, M, is_isr) &*& [1/2]queuehandle(xQueue, N, M, is_isr) &*&
chars(pvItemToQueue, M, x) &*& chars(pvItemToQueue, M, x) &*&
integer(pxHigherPriorityTaskWoken, _);@*/ integer(pxHigherPriorityTaskWoken, _);@*/
{ {
BaseType_t xReturn; BaseType_t xReturn;
UBaseType_t uxSavedInterruptStatus; UBaseType_t uxSavedInterruptStatus;
#ifdef VERIFAST /*< const pointer declaration */ #ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue; Queue_t * pxQueue = xQueue;
#else #else
Queue_t * const pxQueue = xQueue; Queue_t * const pxQueue = xQueue;
configASSERT( pxQueue ); configASSERT( pxQueue );
configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) ); configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) ); configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
#endif #endif
/* RTOS ports that support interrupt nesting have the concept of a maximum /* RTOS ports that support interrupt nesting have the concept of a maximum
system call (or maximum API call) interrupt priority. Interrupts that are * system call (or maximum API call) interrupt priority. Interrupts that are
above the maximum system call priority are kept permanently enabled, even * above the maximum system call priority are kept permanently enabled, even
when the RTOS kernel is in a critical section, but cannot make any calls to * when the RTOS kernel is in a critical section, but cannot make any calls to
FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h * FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
failure if a FreeRTOS API function is called from an interrupt that has been * failure if a FreeRTOS API function is called from an interrupt that has been
assigned a priority above the configured maximum system call priority. * assigned a priority above the configured maximum system call priority.
Only FreeRTOS functions that end in FromISR can be called from interrupts * Only FreeRTOS functions that end in FromISR can be called from interrupts
that have been assigned a priority at or (logically) below the maximum * that have been assigned a priority at or (logically) below the maximum
system call interrupt priority. FreeRTOS maintains a separate interrupt * system call interrupt priority. FreeRTOS maintains a separate interrupt
safe API to ensure interrupt entry is as fast and as simple as possible. * safe API to ensure interrupt entry is as fast and as simple as possible.
More information (albeit Cortex-M specific) is provided on the following * More information (albeit Cortex-M specific) is provided on the following
link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */ * link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
/* Similar to xQueueGenericSend, except without blocking if there is no room /* Similar to xQueueGenericSend, except without blocking if there is no room
in the queue. Also don't directly wake a task that was blocked on a queue * in the queue. Also don't directly wake a task that was blocked on a queue
read, instead return a flag to say whether a context switch is required or * read, instead return a flag to say whether a context switch is required or
not (i.e. has a task with a higher priority than us been woken by this * not (i.e. has a task with a higher priority than us been woken by this
post). */ * post). */
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
/*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/ /*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
{ {
if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) ) if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
{ {
const int8_t cTxLock = pxQueue->cTxLock; const int8_t cTxLock = pxQueue->cTxLock;
const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting; const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
traceQUEUE_SEND_FROM_ISR( pxQueue ); traceQUEUE_SEND_FROM_ISR( pxQueue );
/* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a
semaphore or mutex. That means prvCopyDataToQueue() cannot result * semaphore or mutex. That means prvCopyDataToQueue() cannot result
in a task disinheriting a priority and prvCopyDataToQueue() can be * in a task disinheriting a priority and prvCopyDataToQueue() can be
called here even though the disinherit function does not check if * called here even though the disinherit function does not check if
the scheduler is suspended before accessing the ready lists. */ * the scheduler is suspended before accessing the ready lists. */
#ifdef VERIFAST /*< void cast of unused return value */ #ifdef VERIFAST /*< void cast of unused return value */
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/ /*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition ); prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
#else #else
( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition ); ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
#endif #endif
/*@open queue(pxQueue, _, N, M, _, _, _, _, _);@*/ /*@open queue(pxQueue, _, N, M, _, _, _, _, _);@*/
/* The event list is not altered if the queue is locked. This will /* The event list is not altered if the queue is locked. This will
be done when the queue is unlocked later. */ * be done when the queue is unlocked later. */
if( cTxLock == queueUNLOCKED ) if( cTxLock == queueUNLOCKED )
{ {
/* VeriFast: we do not verify this configuration option */ /* VeriFast: we do not verify this configuration option */
#if ( configUSE_QUEUE_SETS == 1 ) #if ( configUSE_QUEUE_SETS == 1 )
{ {
if( pxQueue->pxQueueSetContainer != NULL ) if( pxQueue->pxQueueSetContainer != NULL )
{ {
if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) ) if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) )
{ {
/* Do not notify the queue set as an existing item /* Do not notify the queue set as an existing item
was overwritten in the queue so the number of items * was overwritten in the queue so the number of items
in the queue has not changed. */ * in the queue has not changed. */
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE ) else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
{ {
/* The queue is a member of a queue set, and posting /* The queue is a member of a queue set, and posting
to the queue set caused a higher priority task to * to the queue set caused a higher priority task to
unblock. A context switch is required. */ * unblock. A context switch is required. */
if( pxHigherPriorityTaskWoken != NULL ) if( pxHigherPriorityTaskWoken != NULL )
{ {
*pxHigherPriorityTaskWoken = pdTRUE; *pxHigherPriorityTaskWoken = pdTRUE;
} }
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
else else
{ {
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{ {
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{ {
/* The task waiting has a higher priority so /* The task waiting has a higher priority so
record that a context switch is required. */ * record that a context switch is required. */
if( pxHigherPriorityTaskWoken != NULL ) if( pxHigherPriorityTaskWoken != NULL )
{ {
*pxHigherPriorityTaskWoken = pdTRUE; *pxHigherPriorityTaskWoken = pdTRUE;
} }
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
} }
#else /* configUSE_QUEUE_SETS */ #else /* configUSE_QUEUE_SETS */
{ {
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{ {
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{ {
/* The task waiting has a higher priority so record that a /* The task waiting has a higher priority so record that a
context switch is required. */ * context switch is required. */
if( pxHigherPriorityTaskWoken != NULL ) if( pxHigherPriorityTaskWoken != NULL )
{ {
*pxHigherPriorityTaskWoken = pdTRUE; *pxHigherPriorityTaskWoken = pdTRUE;
} }
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
/* Not used in this path. */ /* Not used in this path. */
#ifndef VERIFAST /*< void cast of unused var */ #ifndef VERIFAST /*< void cast of unused var */
( void ) uxPreviousMessagesWaiting; ( void ) uxPreviousMessagesWaiting;
#endif #endif
} }
#endif /* configUSE_QUEUE_SETS */ #endif /* configUSE_QUEUE_SETS */
} }
else else
{ {
/* Increment the lock count so the task that unlocks the queue /* Increment the lock count so the task that unlocks the queue
knows that data was posted while it was locked. */ * knows that data was posted while it was locked. */
configASSERT( cTxLock != queueINT8_MAX); configASSERT( cTxLock != queueINT8_MAX );
pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 ); pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 );
} }
xReturn = pdPASS; xReturn = pdPASS;
/*@ /*@
if (xCopyPosition == queueSEND_TO_BACK) { if (xCopyPosition == queueSEND_TO_BACK)
close queue(pxQueue, Storage, N, M, (W+1)%N, R, (K+1), is_locked, append(abs, singleton(x))); {
} else if (xCopyPosition == queueSEND_TO_FRONT) { close queue(pxQueue, Storage, N, M, (W+1)%N, R, (K+1), is_locked, append(abs, singleton(x)));
if (R == 0) { }
close queue(pxQueue, Storage, N, M, W, (N-1), (K+1), is_locked, cons(x, abs)); else if (xCopyPosition == queueSEND_TO_FRONT)
} else { {
close queue(pxQueue, Storage, N, M, W, (R-1), (K+1), is_locked, cons(x, abs)); if (R == 0)
} {
} else if (xCopyPosition == queueOVERWRITE) { close queue(pxQueue, Storage, N, M, W, (N-1), (K+1), is_locked, cons(x, abs));
close queue(pxQueue, Storage, N, M, W, R, 1, is_locked, singleton(x)); }
} else
@*/ {
} close queue(pxQueue, Storage, N, M, W, (R-1), (K+1), is_locked, cons(x, abs));
else }
{ } else if (xCopyPosition == queueOVERWRITE)
traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue ); {
xReturn = errQUEUE_FULL; close queue(pxQueue, Storage, N, M, W, R, 1, is_locked, singleton(x));
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/ }
} @*/
} }
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); else
{
traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
xReturn = errQUEUE_FULL;
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
}
}
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
return xReturn; return xReturn;
} }

@ -25,24 +25,26 @@
BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue ) BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
/*@requires queue(xQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs);@*/ /*@requires queue(xQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
/*@ensures queue(xQueue, Storage, N, M, W, R, K, is_locked, abs) &*& /*@ensures queue(xQueue, Storage, N, M, W, R, K, is_locked, abs) &*&
result == ((K == 0) ? pdTRUE : pdFALSE);@*/ result == ((K == 0) ? pdTRUE : pdFALSE);@*/
{ {
BaseType_t xReturn; BaseType_t xReturn;
#ifdef VERIFAST /*< const pointer declaration */ #ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue; Queue_t * pxQueue = xQueue;
#else #else
Queue_t * const pxQueue = xQueue; Queue_t * const pxQueue = xQueue;
#endif #endif
configASSERT( pxQueue ); configASSERT( pxQueue );
if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
{
xReturn = pdTRUE;
}
else
{
xReturn = pdFALSE;
}
return xReturn; if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
{
xReturn = pdTRUE;
}
else
{
xReturn = pdFALSE;
}
return xReturn;
} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */ } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */

@ -25,24 +25,26 @@
BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
/*@requires queue(xQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs);@*/ /*@requires queue(xQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
/*@ensures queue(xQueue, Storage, N, M, W, R, K, is_locked, abs) &*& /*@ensures queue(xQueue, Storage, N, M, W, R, K, is_locked, abs) &*&
result == ((K == N) ? pdTRUE : pdFALSE);@*/ result == ((K == N) ? pdTRUE : pdFALSE);@*/
{ {
BaseType_t xReturn; BaseType_t xReturn;
#ifdef VERIFAST /*< const pointer declaration */ #ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue; Queue_t * pxQueue = xQueue;
#else #else
Queue_t * const pxQueue = xQueue; Queue_t * const pxQueue = xQueue;
#endif #endif
configASSERT( pxQueue ); configASSERT( pxQueue );
if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
{
xReturn = pdTRUE;
}
else
{
xReturn = pdFALSE;
}
return xReturn; if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
{
xReturn = pdTRUE;
}
else
{
xReturn = pdFALSE;
}
return xReturn;
} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */ } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */

@ -23,184 +23,186 @@
#include "proof/queue.h" #include "proof/queue.h"
#include "proof/queuecontracts.h" #include "proof/queuecontracts.h"
BaseType_t xQueuePeek( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait ) BaseType_t xQueuePeek( QueueHandle_t xQueue,
void * const pvBuffer,
TickType_t xTicksToWait )
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*& /*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*&
[1/2]queuesuspend(xQueue) &*& [1/2]queuesuspend(xQueue) &*&
chars(pvBuffer, M, ?x);@*/ chars(pvBuffer, M, ?x);@*/
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*& /*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
[1/2]queuesuspend(xQueue) &*& [1/2]queuesuspend(xQueue) &*&
(result == pdPASS ? chars(pvBuffer, M, _) : chars(pvBuffer, M, x));@*/ (result == pdPASS ? chars(pvBuffer, M, _) : chars(pvBuffer, M, x));@*/
{ {
BaseType_t xEntryTimeSet = pdFALSE; BaseType_t xEntryTimeSet = pdFALSE;
TimeOut_t xTimeOut; TimeOut_t xTimeOut;
int8_t *pcOriginalReadPosition; int8_t * pcOriginalReadPosition;
#ifdef VERIFAST /*< const pointer declaration */ #ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue; Queue_t * pxQueue = xQueue;
#else #else
Queue_t * const pxQueue = xQueue; Queue_t * const pxQueue = xQueue;
/* Check the pointer is not NULL. */ /* Check the pointer is not NULL. */
configASSERT( ( pxQueue ) ); configASSERT( ( pxQueue ) );
/* The buffer into which data is received can only be NULL if the data size /* The buffer into which data is received can only be NULL if the data size
is zero (so no data is copied into the buffer. */ * is zero (so no data is copied into the buffer. */
configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) ); configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );
/* Cannot block if the scheduler is suspended. */
#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
{
configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
}
#endif
/* Cannot block if the scheduler is suspended. */
#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
{
configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
}
#endif
#endif #endif
/*lint -save -e904 This function relaxes the coding standard somewhat to /*lint -save -e904 This function relaxes the coding standard somewhat to
allow return statements within the function itself. This is done in the * allow return statements within the function itself. This is done in the
interest of execution time efficiency. */ * interest of execution time efficiency. */
for( ;; ) for( ; ; )
/*@invariant [1/2]queuehandle(xQueue, N, M, is_isr) &*& /*@invariant [1/2]queuehandle(xQueue, N, M, is_isr) &*&
[1/2]queuesuspend(xQueue) &*& [1/2]queuesuspend(xQueue) &*&
chars(pvBuffer, M, x) &*& chars(pvBuffer, M, x) &*&
u_integer(&xTicksToWait, _) &*& u_integer(&xTicksToWait, _) &*&
xTIME_OUT(&xTimeOut);@*/ xTIME_OUT(&xTimeOut);@*/
{ {
taskENTER_CRITICAL(); taskENTER_CRITICAL();
/*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/ /*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
{ {
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
/* Is there data in the queue now? To be running the calling task /* Is there data in the queue now? To be running the calling task
must be the highest priority task wanting to access the queue. */ * must be the highest priority task wanting to access the queue. */
if( uxMessagesWaiting > ( UBaseType_t ) 0 ) if( uxMessagesWaiting > ( UBaseType_t ) 0 )
{ {
/* Remember the read position so it can be reset after the data /* Remember the read position so it can be reset after the data
is read from the queue as this function is only peeking the * is read from the queue as this function is only peeking the
data, not removing it. */ * data, not removing it. */
pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom; pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom;
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/ /*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
prvCopyDataFromQueue( pxQueue, pvBuffer ); prvCopyDataFromQueue( pxQueue, pvBuffer );
traceQUEUE_PEEK( pxQueue ); traceQUEUE_PEEK( pxQueue );
/* The data is not being removed, so reset the read pointer. */ /* The data is not being removed, so reset the read pointer. */
pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition; pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition;
/* The data is being left in the queue, so see if there are /* The data is being left in the queue, so see if there are
any other tasks waiting for the data. */ * any other tasks waiting for the data. */
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{ {
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{ {
/* The task waiting has a higher priority than this task. */ /* The task waiting has a higher priority than this task. */
queueYIELD_IF_USING_PREEMPTION(); queueYIELD_IF_USING_PREEMPTION();
} }
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/ /*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
taskEXIT_CRITICAL(); taskEXIT_CRITICAL();
return pdPASS; return pdPASS;
} }
else else
{ {
if( xTicksToWait == ( TickType_t ) 0 ) if( xTicksToWait == ( TickType_t ) 0 )
{ {
/* The queue was empty and no block time is specified (or /* The queue was empty and no block time is specified (or
the block time has expired) so leave now. */ * the block time has expired) so leave now. */
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/ /*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
taskEXIT_CRITICAL(); taskEXIT_CRITICAL();
traceQUEUE_PEEK_FAILED( pxQueue ); traceQUEUE_PEEK_FAILED( pxQueue );
return errQUEUE_EMPTY; return errQUEUE_EMPTY;
} }
else if( xEntryTimeSet == pdFALSE ) else if( xEntryTimeSet == pdFALSE )
{ {
/* The queue was empty and a block time was specified so /* The queue was empty and a block time was specified so
configure the timeout structure ready to enter the blocked * configure the timeout structure ready to enter the blocked
state. */ * state. */
vTaskInternalSetTimeOutState( &xTimeOut ); vTaskInternalSetTimeOutState( &xTimeOut );
xEntryTimeSet = pdTRUE; xEntryTimeSet = pdTRUE;
} }
else else
{ {
/* Entry time was already set. */ /* Entry time was already set. */
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
} }
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/ /*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
taskEXIT_CRITICAL(); taskEXIT_CRITICAL();
/* Interrupts and other tasks can send to and receive from the queue /* Interrupts and other tasks can send to and receive from the queue
now the critical section has been exited. */ * now the critical section has been exited. */
/*@close exists<QueueHandle_t>(pxQueue);@*/ /*@close exists<QueueHandle_t>(pxQueue);@*/
vTaskSuspendAll(); vTaskSuspendAll();
prvLockQueue( pxQueue ); prvLockQueue( pxQueue );
/* Update the timeout state to see if it has expired yet. */ /* Update the timeout state to see if it has expired yet. */
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
{ {
/* Timeout has not expired yet, check to see if there is data in the /* Timeout has not expired yet, check to see if there is data in the
queue now, and if not enter the Blocked state to wait for data. */ * queue now, and if not enter the Blocked state to wait for data. */
if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
{ {
traceBLOCKING_ON_QUEUE_PEEK( pxQueue ); traceBLOCKING_ON_QUEUE_PEEK( pxQueue );
/*@open queue_locked_invariant(xQueue)();@*/ /*@open queue_locked_invariant(xQueue)();@*/
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
/*@close queue_locked_invariant(xQueue)();@*/ /*@close queue_locked_invariant(xQueue)();@*/
prvUnlockQueue( pxQueue ); prvUnlockQueue( pxQueue );
/*@close exists<QueueHandle_t>(pxQueue);@*/
if( xTaskResumeAll() == pdFALSE ) /*@close exists<QueueHandle_t>(pxQueue);@*/
{ if( xTaskResumeAll() == pdFALSE )
portYIELD_WITHIN_API(); {
} portYIELD_WITHIN_API();
else }
{ else
mtCOVERAGE_TEST_MARKER(); {
} mtCOVERAGE_TEST_MARKER();
} }
else }
{ else
/* There is data in the queue now, so don't enter the blocked {
state, instead return to try and obtain the data. */ /* There is data in the queue now, so don't enter the blocked
prvUnlockQueue( pxQueue ); * state, instead return to try and obtain the data. */
prvUnlockQueue( pxQueue );
#ifdef VERIFAST /*< void cast of unused return value */ #ifdef VERIFAST /*< void cast of unused return value */
/*@close exists<QueueHandle_t>(pxQueue);@*/ /*@close exists<QueueHandle_t>(pxQueue);@*/
xTaskResumeAll(); xTaskResumeAll();
#else #else
( void ) xTaskResumeAll(); ( void ) xTaskResumeAll();
#endif #endif
} }
} }
else else
{ {
/* The timeout has expired. If there is still no data in the queue /* The timeout has expired. If there is still no data in the queue
exit, otherwise go back and try to read the data again. */ * exit, otherwise go back and try to read the data again. */
prvUnlockQueue( pxQueue ); prvUnlockQueue( pxQueue );
#ifdef VERIFAST /*< void cast of unused return value */ #ifdef VERIFAST /*< void cast of unused return value */
/*@close exists<QueueHandle_t>(pxQueue);@*/ /*@close exists<QueueHandle_t>(pxQueue);@*/
xTaskResumeAll(); xTaskResumeAll();
#else #else
( void ) xTaskResumeAll(); ( void ) xTaskResumeAll();
#endif #endif
if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
{ {
traceQUEUE_PEEK_FAILED( pxQueue ); traceQUEUE_PEEK_FAILED( pxQueue );
return errQUEUE_EMPTY; return errQUEUE_EMPTY;
} }
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
} /*lint -restore */ } /*lint -restore */
} }

@ -23,67 +23,68 @@
#include "proof/queue.h" #include "proof/queue.h"
#include "proof/queuecontracts.h" #include "proof/queuecontracts.h"
BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, void * const pvBuffer ) BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
void * const pvBuffer )
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == true &*& /*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == true &*&
chars(pvBuffer, M, ?x);@*/ chars(pvBuffer, M, ?x);@*/
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*& /*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
(result == pdPASS ? chars(pvBuffer, M, _) : chars(pvBuffer, M, x));@*/ (result == pdPASS ? chars(pvBuffer, M, _) : chars(pvBuffer, M, x));@*/
{ {
BaseType_t xReturn; BaseType_t xReturn;
UBaseType_t uxSavedInterruptStatus; UBaseType_t uxSavedInterruptStatus;
int8_t *pcOriginalReadPosition; int8_t * pcOriginalReadPosition;
#ifdef VERIFAST /*< const pointer declaration */ #ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue; Queue_t * pxQueue = xQueue;
#else #else
Queue_t * const pxQueue = xQueue; Queue_t * const pxQueue = xQueue;
configASSERT( pxQueue ); configASSERT( pxQueue );
configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) ); configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */ configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */
#endif #endif
/* RTOS ports that support interrupt nesting have the concept of a maximum /* RTOS ports that support interrupt nesting have the concept of a maximum
system call (or maximum API call) interrupt priority. Interrupts that are * system call (or maximum API call) interrupt priority. Interrupts that are
above the maximum system call priority are kept permanently enabled, even * above the maximum system call priority are kept permanently enabled, even
when the RTOS kernel is in a critical section, but cannot make any calls to * when the RTOS kernel is in a critical section, but cannot make any calls to
FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h * FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
failure if a FreeRTOS API function is called from an interrupt that has been * failure if a FreeRTOS API function is called from an interrupt that has been
assigned a priority above the configured maximum system call priority. * assigned a priority above the configured maximum system call priority.
Only FreeRTOS functions that end in FromISR can be called from interrupts * Only FreeRTOS functions that end in FromISR can be called from interrupts
that have been assigned a priority at or (logically) below the maximum * that have been assigned a priority at or (logically) below the maximum
system call interrupt priority. FreeRTOS maintains a separate interrupt * system call interrupt priority. FreeRTOS maintains a separate interrupt
safe API to ensure interrupt entry is as fast and as simple as possible. * safe API to ensure interrupt entry is as fast and as simple as possible.
More information (albeit Cortex-M specific) is provided on the following * More information (albeit Cortex-M specific) is provided on the following
link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */ * link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
/*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/ /*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
{ {
/* Cannot block in an ISR, so check there is data available. */ /* Cannot block in an ISR, so check there is data available. */
if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 ) if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
{ {
traceQUEUE_PEEK_FROM_ISR( pxQueue ); traceQUEUE_PEEK_FROM_ISR( pxQueue );
/* Remember the read position so it can be reset as nothing is /* Remember the read position so it can be reset as nothing is
actually being removed from the queue. */ * actually being removed from the queue. */
pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom; pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom;
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/ /*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
prvCopyDataFromQueue( pxQueue, pvBuffer ); prvCopyDataFromQueue( pxQueue, pvBuffer );
pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition; pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition;
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/ /*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
xReturn = pdPASS; xReturn = pdPASS;
} }
else else
{ {
xReturn = pdFAIL; xReturn = pdFAIL;
traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue ); traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
} }
} }
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/ /*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
return xReturn; return xReturn;
} }

@ -23,182 +23,184 @@
#include "proof/queue.h" #include "proof/queue.h"
#include "proof/queuecontracts.h" #include "proof/queuecontracts.h"
BaseType_t xQueueReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait ) BaseType_t xQueueReceive( QueueHandle_t xQueue,
void * const pvBuffer,
TickType_t xTicksToWait )
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*& /*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*&
[1/2]queuesuspend(xQueue) &*& [1/2]queuesuspend(xQueue) &*&
chars(pvBuffer, M, ?x);@*/ chars(pvBuffer, M, ?x);@*/
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*& /*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
[1/2]queuesuspend(xQueue) &*& [1/2]queuesuspend(xQueue) &*&
(result == pdPASS ? chars(pvBuffer, M, _) : chars(pvBuffer, M, x));@*/ (result == pdPASS ? chars(pvBuffer, M, _) : chars(pvBuffer, M, x));@*/
{ {
BaseType_t xEntryTimeSet = pdFALSE; BaseType_t xEntryTimeSet = pdFALSE;
TimeOut_t xTimeOut; TimeOut_t xTimeOut;
#ifdef VERIFAST /*< const pointer declaration */ #ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue; Queue_t * pxQueue = xQueue;
#else #else
Queue_t * const pxQueue = xQueue; Queue_t * const pxQueue = xQueue;
/* Check the pointer is not NULL. */ /* Check the pointer is not NULL. */
configASSERT( ( pxQueue ) ); configASSERT( ( pxQueue ) );
/* The buffer into which data is received can only be NULL if the data size /* The buffer into which data is received can only be NULL if the data size
is zero (so no data is copied into the buffer). */ * is zero (so no data is copied into the buffer). */
configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) ); configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );
/* Cannot block if the scheduler is suspended. */
#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
{
configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
}
#endif
/* Cannot block if the scheduler is suspended. */
#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
{
configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
}
#endif
#endif #endif
/*lint -save -e904 This function relaxes the coding standard somewhat to /*lint -save -e904 This function relaxes the coding standard somewhat to
allow return statements within the function itself. This is done in the * allow return statements within the function itself. This is done in the
interest of execution time efficiency. */ * interest of execution time efficiency. */
for( ;; ) for( ; ; )
/*@invariant [1/2]queuehandle(xQueue, N, M, is_isr) &*& /*@invariant [1/2]queuehandle(xQueue, N, M, is_isr) &*&
[1/2]queuesuspend(xQueue) &*& [1/2]queuesuspend(xQueue) &*&
chars(pvBuffer, M, x) &*& chars(pvBuffer, M, x) &*&
u_integer(&xTicksToWait, _) &*& u_integer(&xTicksToWait, _) &*&
xTIME_OUT(&xTimeOut);@*/ xTIME_OUT(&xTimeOut);@*/
{ {
taskENTER_CRITICAL(); taskENTER_CRITICAL();
/*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/ /*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
{ {
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
/* Is there data in the queue now? To be running the calling task /* Is there data in the queue now? To be running the calling task
must be the highest priority task wanting to access the queue. */ * must be the highest priority task wanting to access the queue. */
if( uxMessagesWaiting > ( UBaseType_t ) 0 ) if( uxMessagesWaiting > ( UBaseType_t ) 0 )
{ {
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/ /*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
/* Data available, remove one item. */ /* Data available, remove one item. */
prvCopyDataFromQueue( pxQueue, pvBuffer ); prvCopyDataFromQueue( pxQueue, pvBuffer );
/*@open queue_after_prvCopyDataFromQueue(pxQueue, Storage, N, M, W, (R+1)%N, K, is_locked, abs);@*/ /*@open queue_after_prvCopyDataFromQueue(pxQueue, Storage, N, M, W, (R+1)%N, K, is_locked, abs);@*/
traceQUEUE_RECEIVE( pxQueue ); traceQUEUE_RECEIVE( pxQueue );
pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1; pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1;
/*@assert
pxQueue->pcHead |-> ?buffer &*&
buffer(buffer, N, M, ?contents);@*/
/*@deq_lemma(K, (R+1)%N, contents, abs, head(abs));@*/
/* There is now space in the queue, were any tasks waiting to /*@assert
post to the queue? If so, unblock the highest priority waiting pxQueue->pcHead |-> ?buffer &*&
task. */ buffer(buffer, N, M, ?contents);@*/
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) /*@deq_lemma(K, (R+1)%N, contents, abs, head(abs));@*/
{ /* There is now space in the queue, were any tasks waiting to
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) * post to the queue? If so, unblock the highest priority waiting
{ * task. */
queueYIELD_IF_USING_PREEMPTION(); if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
} {
else if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
{ {
mtCOVERAGE_TEST_MARKER(); queueYIELD_IF_USING_PREEMPTION();
} }
} else
else {
{ mtCOVERAGE_TEST_MARKER();
mtCOVERAGE_TEST_MARKER(); }
} }
else
{
mtCOVERAGE_TEST_MARKER();
}
/*@close queue(pxQueue, Storage, N, M, W, (R+1)%N, K-1, is_locked, tail(abs));@*/ /*@close queue(pxQueue, Storage, N, M, W, (R+1)%N, K-1, is_locked, tail(abs));@*/
/*@assert chars(pvBuffer, M, head(abs));@*/ /*@assert chars(pvBuffer, M, head(abs));@*/
taskEXIT_CRITICAL(); taskEXIT_CRITICAL();
return pdPASS; return pdPASS;
} }
else else
{ {
if( xTicksToWait == ( TickType_t ) 0 ) if( xTicksToWait == ( TickType_t ) 0 )
{ {
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/ /*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
/* The queue was empty and no block time is specified (or /* The queue was empty and no block time is specified (or
the block time has expired) so leave now. */ * the block time has expired) so leave now. */
taskEXIT_CRITICAL(); taskEXIT_CRITICAL();
traceQUEUE_RECEIVE_FAILED( pxQueue ); traceQUEUE_RECEIVE_FAILED( pxQueue );
return errQUEUE_EMPTY; return errQUEUE_EMPTY;
} }
else if( xEntryTimeSet == pdFALSE ) else if( xEntryTimeSet == pdFALSE )
{ {
/* The queue was empty and a block time was specified so /* The queue was empty and a block time was specified so
configure the timeout structure. */ * configure the timeout structure. */
vTaskInternalSetTimeOutState( &xTimeOut ); vTaskInternalSetTimeOutState( &xTimeOut );
xEntryTimeSet = pdTRUE; xEntryTimeSet = pdTRUE;
} }
else else
{ {
/* Entry time was already set. */ /* Entry time was already set. */
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
} }
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/ /*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
taskEXIT_CRITICAL(); taskEXIT_CRITICAL();
/* Interrupts and other tasks can send to and receive from the queue /* Interrupts and other tasks can send to and receive from the queue
now the critical section has been exited. */ * now the critical section has been exited. */
/*@close exists(pxQueue);@*/ /*@close exists(pxQueue);@*/
vTaskSuspendAll(); vTaskSuspendAll();
prvLockQueue( pxQueue ); prvLockQueue( pxQueue );
/* Update the timeout state to see if it has expired yet. */ /* Update the timeout state to see if it has expired yet. */
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
{ {
/* The timeout has not expired. If the queue is still empty place /* The timeout has not expired. If the queue is still empty place
the task on the list of tasks waiting to receive from the queue. */ * the task on the list of tasks waiting to receive from the queue. */
if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
{ {
traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
/*@open queue_locked_invariant(xQueue)();@*/ /*@open queue_locked_invariant(xQueue)();@*/
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
/*@close queue_locked_invariant(xQueue)();@*/ /*@close queue_locked_invariant(xQueue)();@*/
prvUnlockQueue( pxQueue ); prvUnlockQueue( pxQueue );
/*@close exists(pxQueue);@*/
if( xTaskResumeAll() == pdFALSE ) /*@close exists(pxQueue);@*/
{ if( xTaskResumeAll() == pdFALSE )
portYIELD_WITHIN_API(); {
} portYIELD_WITHIN_API();
else }
{ else
mtCOVERAGE_TEST_MARKER(); {
} mtCOVERAGE_TEST_MARKER();
} }
else }
{ else
/* The queue contains data again. Loop back to try and read the {
data. */ /* The queue contains data again. Loop back to try and read the
prvUnlockQueue( pxQueue ); * data. */
prvUnlockQueue( pxQueue );
#ifdef VERIFAST /*< void cast of unused return value */ #ifdef VERIFAST /*< void cast of unused return value */
/*@close exists(pxQueue);@*/ /*@close exists(pxQueue);@*/
xTaskResumeAll(); xTaskResumeAll();
#else #else
( void ) xTaskResumeAll(); ( void ) xTaskResumeAll();
#endif #endif
} }
} }
else else
{ {
/* Timed out. If there is no data in the queue exit, otherwise loop /* Timed out. If there is no data in the queue exit, otherwise loop
back and attempt to read the data. */ * back and attempt to read the data. */
prvUnlockQueue( pxQueue ); prvUnlockQueue( pxQueue );
#ifdef VERIFAST /*< void cast of unused return value */ #ifdef VERIFAST /*< void cast of unused return value */
/*@close exists(pxQueue);@*/ /*@close exists(pxQueue);@*/
xTaskResumeAll(); xTaskResumeAll();
#else #else
( void ) xTaskResumeAll(); ( void ) xTaskResumeAll();
#endif #endif
if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
{ {
traceQUEUE_RECEIVE_FAILED( pxQueue ); traceQUEUE_RECEIVE_FAILED( pxQueue );
return errQUEUE_EMPTY; return errQUEUE_EMPTY;
} }
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
} /*lint -restore */ } /*lint -restore */
} }

@ -23,112 +23,114 @@
#include "proof/queue.h" #include "proof/queue.h"
#include "proof/queuecontracts.h" #include "proof/queuecontracts.h"
BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken ) BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
void * const pvBuffer,
BaseType_t * const pxHigherPriorityTaskWoken )
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == true &*& /*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == true &*&
chars(pvBuffer, M, ?x) &*& chars(pvBuffer, M, ?x) &*&
pxHigherPriorityTaskWoken == NULL ? true : integer(pxHigherPriorityTaskWoken, _);@*/ pxHigherPriorityTaskWoken == NULL ? true : integer(pxHigherPriorityTaskWoken, _);@*/
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*& /*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
(result == pdPASS ? chars(pvBuffer, M, _) : chars(pvBuffer, M, x)) &*& (result == pdPASS ? chars(pvBuffer, M, _) : chars(pvBuffer, M, x)) &*&
(pxHigherPriorityTaskWoken == NULL ? true : integer(pxHigherPriorityTaskWoken, _));@*/ (pxHigherPriorityTaskWoken == NULL ? true : integer(pxHigherPriorityTaskWoken, _));@*/
{ {
BaseType_t xReturn; BaseType_t xReturn;
UBaseType_t uxSavedInterruptStatus; UBaseType_t uxSavedInterruptStatus;
#ifdef VERIFAST /*< const pointer declaration */ #ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue; Queue_t * pxQueue = xQueue;
#else #else
Queue_t * const pxQueue = xQueue; Queue_t * const pxQueue = xQueue;
configASSERT( pxQueue ); configASSERT( pxQueue );
configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) ); configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
#endif #endif
/* RTOS ports that support interrupt nesting have the concept of a maximum /* RTOS ports that support interrupt nesting have the concept of a maximum
system call (or maximum API call) interrupt priority. Interrupts that are * system call (or maximum API call) interrupt priority. Interrupts that are
above the maximum system call priority are kept permanently enabled, even * above the maximum system call priority are kept permanently enabled, even
when the RTOS kernel is in a critical section, but cannot make any calls to * when the RTOS kernel is in a critical section, but cannot make any calls to
FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h * FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
failure if a FreeRTOS API function is called from an interrupt that has been * failure if a FreeRTOS API function is called from an interrupt that has been
assigned a priority above the configured maximum system call priority. * assigned a priority above the configured maximum system call priority.
Only FreeRTOS functions that end in FromISR can be called from interrupts * Only FreeRTOS functions that end in FromISR can be called from interrupts
that have been assigned a priority at or (logically) below the maximum * that have been assigned a priority at or (logically) below the maximum
system call interrupt priority. FreeRTOS maintains a separate interrupt * system call interrupt priority. FreeRTOS maintains a separate interrupt
safe API to ensure interrupt entry is as fast and as simple as possible. * safe API to ensure interrupt entry is as fast and as simple as possible.
More information (albeit Cortex-M specific) is provided on the following * More information (albeit Cortex-M specific) is provided on the following
link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */ * link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
/*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/ /*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
{ {
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
/* Cannot block in an ISR, so check there is data available. */ /* Cannot block in an ISR, so check there is data available. */
if( uxMessagesWaiting > ( UBaseType_t ) 0 ) if( uxMessagesWaiting > ( UBaseType_t ) 0 )
{ {
const int8_t cRxLock = pxQueue->cRxLock; const int8_t cRxLock = pxQueue->cRxLock;
traceQUEUE_RECEIVE_FROM_ISR( pxQueue ); traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/ /*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
prvCopyDataFromQueue( pxQueue, pvBuffer ); prvCopyDataFromQueue( pxQueue, pvBuffer );
/*@open queue_after_prvCopyDataFromQueue(pxQueue, Storage, N, M, W, (R+1)%N, K, is_locked, abs);@*/ /*@open queue_after_prvCopyDataFromQueue(pxQueue, Storage, N, M, W, (R+1)%N, K, is_locked, abs);@*/
pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1; pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1;
/*@assert buffer(Storage, N, M, ?contents);@*/ /*@assert buffer(Storage, N, M, ?contents);@*/
/*@deq_lemma(K, (R+1)%N, contents, abs, head(abs));@*/ /*@deq_lemma(K, (R+1)%N, contents, abs, head(abs));@*/
/* If the queue is locked the event list will not be modified. /* If the queue is locked the event list will not be modified.
Instead update the lock count so the task that unlocks the queue * Instead update the lock count so the task that unlocks the queue
will know that an ISR has removed data while the queue was * will know that an ISR has removed data while the queue was
locked. */ * locked. */
if( cRxLock == queueUNLOCKED ) if( cRxLock == queueUNLOCKED )
{ {
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
{ {
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
{ {
/* The task waiting has a higher priority than us so /* The task waiting has a higher priority than us so
force a context switch. */ * force a context switch. */
if( pxHigherPriorityTaskWoken != NULL ) if( pxHigherPriorityTaskWoken != NULL )
{ {
*pxHigherPriorityTaskWoken = pdTRUE; *pxHigherPriorityTaskWoken = pdTRUE;
} }
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
else else
{ {
/* Increment the lock count so the task that unlocks the queue /* Increment the lock count so the task that unlocks the queue
knows that data was removed while it was locked. */ * knows that data was removed while it was locked. */
configASSERT( cRxLock != queueINT8_MAX); configASSERT( cRxLock != queueINT8_MAX );
pxQueue->cRxLock = ( int8_t ) ( cRxLock + 1 ); pxQueue->cRxLock = ( int8_t ) ( cRxLock + 1 );
} }
/*@close queue(pxQueue, Storage, N, M, W, (R+1)%N, K-1, is_locked, tail(abs));@*/ /*@close queue(pxQueue, Storage, N, M, W, (R+1)%N, K-1, is_locked, tail(abs));@*/
/*@assert chars(pvBuffer, M, head(abs));@*/ /*@assert chars(pvBuffer, M, head(abs));@*/
xReturn = pdPASS; xReturn = pdPASS;
} }
else else
{ {
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/ /*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
xReturn = pdFAIL; xReturn = pdFAIL;
traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ); traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
} }
} }
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
return xReturn; return xReturn;
} }

@ -1,3 +1,3 @@
#!/bin/bash -eu #!/bin/bash -eu
NO_COVERAGE=1 EXTRA_VERIFAST_ARGS=-stats make list queue | grep overhead: | sort | uniq NO_COVERAGE=1 EXTRA_VERIFAST_ARGS=-stats make queue | grep overhead: | sort | uniq

@ -0,0 +1,73 @@
#!/usr/bin/env python3
from __future__ import print_function
import sys
from enum import Enum
class Extractor(object):
@staticmethod
def __parse_ctags(tags_filename):
def convert_excmd(excmd):
assert excmd.endswith(';"')
linenum = excmd[:-2] # remove ';"'
return int(linenum)
result = {}
with open(tags_filename) as f:
for line in f:
if line.startswith('!'):
continue
parts = line.split('\t')
funcname = parts[0]
funcfile = parts[1]
linenum = convert_excmd(parts[2])
result[funcname] = (funcfile, linenum)
return result
def __init__(self, tags_filename):
self.map = Extractor.__parse_ctags(tags_filename)
class State(Enum):
INIT = 0
HEAD = 1
BODY = 2
def text_of_funcname(self, funcname):
if funcname not in self.map:
return []
funcfile, linenum = self.map[funcname]
result = []
state, bracecount = Extractor.State.INIT, 0
with open(funcfile) as f:
for i, line in enumerate(f, start=1): # ctags counts linenums from 1
if state == Extractor.State.INIT and linenum <= i:
state = Extractor.State.HEAD
if state == Extractor.State.HEAD:
result.append(line)
lbrace = line.count('{')
rbrace = line.count('}')
bracecount += lbrace
bracecount -= rbrace
if '{' in line:
state = Extractor.State.BODY
continue
if state == Extractor.State.BODY:
result.append(line)
lbrace = line.count('{')
rbrace = line.count('}')
bracecount += lbrace
bracecount -= rbrace
if bracecount == 0:
break
return result
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: %s <tagfile> <funcname>" % sys.argv[0])
sys.exit(1)
tag_filename = sys.argv[1]
funcname = sys.argv[2]
extractor = Extractor('tags')
result = extractor.text_of_funcname(funcname)
print(''.join(result))
sys.exit(0)