mirror of
https://github.com/FreeRTOS/FreeRTOS.git
synced 2025-06-18 14:08:21 +08:00
Prove buffer lemmas (#124)
* Prove buffer lemmas * Update queue proofs to latest kernel source All changes were syntactic due to uncrustify code-formatting * Strengthen prvCopyDataToQueue proof * Add extract script for diff comparison Co-authored-by: Yuhui Zheng <10982575+yuhui-zheng@users.noreply.github.com>
This commit is contained in:
@ -18,35 +18,35 @@ all: queue
|
||||
|
||||
.PHONY: queue
|
||||
queue:
|
||||
@$(VERIFAST) $(VERIFAST_ARGS) queue/create.c | $(call check_coverage,269)
|
||||
@$(VERIFAST) $(VERIFAST_ARGS) queue/prvCopyDataFromQueue.c | $(call check_coverage,253)
|
||||
@$(VERIFAST) $(VERIFAST_ARGS) -disable_overflow_check queue/prvCopyDataToQueue.c | $(call check_coverage,280)
|
||||
@$(VERIFAST) $(VERIFAST_ARGS) queue/prvIsQueueEmpty.c | $(call check_coverage,234)
|
||||
@$(VERIFAST) $(VERIFAST_ARGS) queue/prvIsQueueFull.c | $(call check_coverage,234)
|
||||
@$(VERIFAST) $(VERIFAST_ARGS) queue/prvLockQueue.c | $(call check_coverage,235)
|
||||
@$(VERIFAST) $(VERIFAST_ARGS) queue/prvUnlockQueue.c | $(call check_coverage,249)
|
||||
@$(VERIFAST) $(VERIFAST_ARGS) queue/uxQueueMessagesWaiting.c | $(call check_coverage,237)
|
||||
@$(VERIFAST) $(VERIFAST_ARGS) queue/uxQueueSpacesAvailable.c | $(call check_coverage,235)
|
||||
@$(VERIFAST) $(VERIFAST_ARGS) queue/vQueueDelete.c | $(call check_coverage,232)
|
||||
@$(VERIFAST) $(VERIFAST_ARGS) queue/xQueueGenericSend.c | $(call check_coverage,280)
|
||||
@$(VERIFAST) $(VERIFAST_ARGS) -disable_overflow_check queue/xQueueGenericSendFromISR.c | $(call check_coverage,262)
|
||||
@$(VERIFAST) $(VERIFAST_ARGS) queue/xQueueIsQueueEmptyFromISR.c | $(call check_coverage,232)
|
||||
@$(VERIFAST) $(VERIFAST_ARGS) queue/xQueueIsQueueFullFromISR.c | $(call check_coverage,232)
|
||||
@$(VERIFAST) $(VERIFAST_ARGS) queue/xQueuePeek.c | $(call check_coverage,280)
|
||||
@$(VERIFAST) $(VERIFAST_ARGS) queue/xQueuePeekFromISR.c | $(call check_coverage,245)
|
||||
@$(VERIFAST) $(VERIFAST_ARGS) queue/xQueueReceive.c | $(call check_coverage,282)
|
||||
@$(VERIFAST) $(VERIFAST_ARGS) -disable_overflow_check queue/xQueueReceiveFromISR.c | $(call check_coverage,259)
|
||||
@$(VERIFAST) $(VERIFAST_ARGS) queue/create.c | $(call check_coverage,317)
|
||||
@$(VERIFAST) $(VERIFAST_ARGS) queue/prvCopyDataFromQueue.c | $(call check_coverage,301)
|
||||
@$(VERIFAST) $(VERIFAST_ARGS) -disable_overflow_check queue/prvCopyDataToQueue.c | $(call check_coverage,329)
|
||||
@$(VERIFAST) $(VERIFAST_ARGS) queue/prvIsQueueEmpty.c | $(call check_coverage,282)
|
||||
@$(VERIFAST) $(VERIFAST_ARGS) queue/prvIsQueueFull.c | $(call check_coverage,282)
|
||||
@$(VERIFAST) $(VERIFAST_ARGS) queue/prvLockQueue.c | $(call check_coverage,283)
|
||||
@$(VERIFAST) $(VERIFAST_ARGS) queue/prvUnlockQueue.c | $(call check_coverage,297)
|
||||
@$(VERIFAST) $(VERIFAST_ARGS) queue/uxQueueMessagesWaiting.c | $(call check_coverage,285)
|
||||
@$(VERIFAST) $(VERIFAST_ARGS) queue/uxQueueSpacesAvailable.c | $(call check_coverage,283)
|
||||
@$(VERIFAST) $(VERIFAST_ARGS) queue/vQueueDelete.c | $(call check_coverage,280)
|
||||
@$(VERIFAST) $(VERIFAST_ARGS) queue/xQueueGenericSend.c | $(call check_coverage,328)
|
||||
@$(VERIFAST) $(VERIFAST_ARGS) -disable_overflow_check queue/xQueueGenericSendFromISR.c | $(call check_coverage,310)
|
||||
@$(VERIFAST) $(VERIFAST_ARGS) queue/xQueueIsQueueEmptyFromISR.c | $(call check_coverage,280)
|
||||
@$(VERIFAST) $(VERIFAST_ARGS) queue/xQueueIsQueueFullFromISR.c | $(call check_coverage,280)
|
||||
@$(VERIFAST) $(VERIFAST_ARGS) queue/xQueuePeek.c | $(call check_coverage,328)
|
||||
@$(VERIFAST) $(VERIFAST_ARGS) queue/xQueuePeekFromISR.c | $(call check_coverage,293)
|
||||
@$(VERIFAST) $(VERIFAST_ARGS) queue/xQueueReceive.c | $(call check_coverage,330)
|
||||
@$(VERIFAST) $(VERIFAST_ARGS) -disable_overflow_check queue/xQueueReceiveFromISR.c | $(call check_coverage,307)
|
||||
|
||||
.PHONY: proof_changes
|
||||
proof_changes:
|
||||
@git grep "if[n]*def VERIFAST" -- '*.c' | cut -f 3- -d ' ' | sort | uniq
|
||||
@git grep "if[n]*def VERIFAST" | cut -f 3- -d ' ' | sort | uniq
|
||||
|
||||
GIT?=git
|
||||
NO_CHANGE_CHECKOUT_DIR=no-change-check-freertos-kernel
|
||||
NO_CHANGE_EXPECTED_HASH=4a61f9ff7e2
|
||||
NO_CHANGE_EXPECTED_HASH=587a83d6476
|
||||
.PHONY: synced_with_source_check
|
||||
synced_with_source_check:
|
||||
@rm -rf $(NO_CHANGE_CHECKOUT_DIR)
|
||||
@$(GIT) clone --shallow-since="30-06-2020" https://github.com/FreeRTOS/FreeRTOS-Kernel.git $(NO_CHANGE_CHECKOUT_DIR)
|
||||
@$(GIT) clone https://github.com/FreeRTOS/FreeRTOS-Kernel.git $(NO_CHANGE_CHECKOUT_DIR)
|
||||
@cd $(NO_CHANGE_CHECKOUT_DIR) && $(GIT) diff --quiet $(NO_CHANGE_EXPECTED_HASH) queue.c
|
||||
@cd $(NO_CHANGE_CHECKOUT_DIR) && $(GIT) diff --quiet $(NO_CHANGE_EXPECTED_HASH) include/queue.h
|
||||
|
@ -49,7 +49,7 @@ and uncheck `Check arithmetic overflow`).
|
||||
- `queue/xQueueReceiveFromISR.c`
|
||||
|
||||
A successful proof results in the top banner turning green with a statement
|
||||
similar to: `0 errors found (286 statements verified)`.
|
||||
similar to: `0 errors found (328 statements verified)`.
|
||||
|
||||
## Proof checking a single proof at the command-line
|
||||
|
||||
@ -65,7 +65,7 @@ A successful proof results in output similar to:
|
||||
|
||||
```
|
||||
queue/xQueueGenericSend.c
|
||||
0 errors found (286 statements verified)
|
||||
0 errors found (328 statements verified)
|
||||
```
|
||||
|
||||
## Running proof regression
|
||||
@ -87,7 +87,7 @@ $ VERIFAST=/path/to/verifast NO_COVERAGE=1 make
|
||||
## Annotation burden
|
||||
|
||||
VeriFast can emit statistics about the number of source code lines and
|
||||
annotations.
|
||||
annotations. These range from .3-2x annotations per line of source code.
|
||||
|
||||
```
|
||||
$ VERIFAST=/path/to/verifast ./scripts/annotation_overhead.sh
|
||||
|
@ -328,6 +328,22 @@ lemma void remove_remove_nth<t>(list<t> xs, t x)
|
||||
}
|
||||
}
|
||||
|
||||
/* Following lemma from `verifast/bin/rt/_list.java`. Renamed to
|
||||
avoid clash with listex.c's nth_drop lemma. */
|
||||
lemma void nth_drop2<t>(list<t> vs, int i)
|
||||
requires 0 <= i && i < length(vs);
|
||||
ensures nth(i, vs) == head(drop(i, vs));
|
||||
{
|
||||
switch (vs) {
|
||||
case nil:
|
||||
case cons(v, vs0):
|
||||
if (i == 0) {
|
||||
} else {
|
||||
nth_drop2(vs0, i - 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
lemma void enq_lemma<t>(int k, int i, list<t> xs, list<t> ys, t z)
|
||||
requires 0 <= k && 0 <= i && 0 < length(xs) && k < length(xs) && i < length(xs) && take(k, rotate_left(i, xs)) == ys;
|
||||
ensures take(k+1, rotate_left(i, update((i+k)%length(xs), z, xs))) == append(ys, cons(z, nil));
|
||||
|
@ -64,79 +64,94 @@ typedef ssize_t BaseType_t;
|
||||
#define pvPortMalloc malloc
|
||||
#define vPortFree free
|
||||
|
||||
#define queueSEND_TO_BACK 0
|
||||
#define queueSEND_TO_FRONT 1
|
||||
#define queueOVERWRITE 2
|
||||
#define queueSEND_TO_BACK ( ( BaseType_t ) 0 )
|
||||
#define queueSEND_TO_FRONT ( ( BaseType_t ) 1 )
|
||||
#define queueOVERWRITE ( ( BaseType_t ) 2 )
|
||||
|
||||
#define pdTRUE 1
|
||||
#define pdFALSE 0
|
||||
#define pdTRUE 1
|
||||
#define pdFALSE 0
|
||||
|
||||
#define pdPASS pdTRUE
|
||||
#define pdFAIL pdFALSE
|
||||
#define errQUEUE_FULL 0
|
||||
#define errQUEUE_EMPTY 0
|
||||
#define errQUEUE_FULL 0
|
||||
#define errQUEUE_EMPTY 0
|
||||
|
||||
#define queueUNLOCKED ( ( int8_t ) -1 )
|
||||
#define queueLOCKED_UNMODIFIED ( ( int8_t ) 0 )
|
||||
#define queueINT8_MAX ( ( int8_t ) 127 )
|
||||
/* Constants used with the cRxLock and cTxLock structure members. */
|
||||
#define queueUNLOCKED ( ( int8_t ) -1 )
|
||||
#define queueLOCKED_UNMODIFIED ( ( int8_t ) 0 )
|
||||
#define queueINT8_MAX ( ( int8_t ) 127 )
|
||||
|
||||
typedef struct QueuePointers
|
||||
{
|
||||
int8_t *pcTail; /*< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
|
||||
int8_t *pcReadFrom; /*< Points to the last place that a queued item was read from when the structure is used as a queue. */
|
||||
int8_t * pcTail; /*< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
|
||||
int8_t * pcReadFrom; /*< Points to the last place that a queued item was read from when the structure is used as a queue. */
|
||||
} QueuePointers_t;
|
||||
|
||||
typedef struct SemaphoreData
|
||||
{
|
||||
#ifdef VERIFAST /*< do not model xMutexHolder */
|
||||
void *xMutexHolder;
|
||||
void *xMutexHolder;
|
||||
#else
|
||||
TaskHandle_t xMutexHolder; /*< The handle of the task that holds the mutex. */
|
||||
TaskHandle_t xMutexHolder; /*< The handle of the task that holds the mutex. */
|
||||
#endif
|
||||
UBaseType_t uxRecursiveCallCount;/*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
|
||||
UBaseType_t uxRecursiveCallCount; /*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
|
||||
} SemaphoreData_t;
|
||||
|
||||
/* VeriFast does not support unions so we replace with a struct */
|
||||
struct fake_union_t {
|
||||
QueuePointers_t xQueue;
|
||||
SemaphoreData_t xSemaphore;
|
||||
QueuePointers_t xQueue;
|
||||
SemaphoreData_t xSemaphore;
|
||||
};
|
||||
|
||||
typedef struct xLIST {
|
||||
UBaseType_t uxNumberOfItems;
|
||||
UBaseType_t uxNumberOfItems;
|
||||
#ifndef VERIFAST /*< do not model pxIndex and xListEnd of xLIST struct */
|
||||
struct xLIST_ITEM *pxIndex;
|
||||
MiniListItem_t xListEnd;
|
||||
struct xLIST_ITEM *pxIndex;
|
||||
MiniListItem_t xListEnd;
|
||||
#endif
|
||||
} List_t;
|
||||
|
||||
typedef struct QueueDefinition /* The old naming convention is used to prevent breaking kernel aware debuggers. */ {
|
||||
int8_t *pcHead; /*< Points to the beginning of the queue storage area. */
|
||||
int8_t *pcWriteTo; /*< Points to the free next place in the storage area. */
|
||||
typedef struct QueueDefinition /* The old naming convention is used to prevent breaking kernel aware debuggers. */
|
||||
{
|
||||
int8_t * pcHead; /*< Points to the beginning of the queue storage area. */
|
||||
int8_t * pcWriteTo; /*< Points to the free next place in the storage area. */
|
||||
|
||||
#ifdef VERIFAST /*< VeriFast does not model unions */
|
||||
struct fake_union_t u;
|
||||
struct fake_union_t u;
|
||||
#else
|
||||
union
|
||||
{
|
||||
QueuePointers_t xQueue; /*< Data required exclusively when this structure is used as a queue. */
|
||||
SemaphoreData_t xSemaphore; /*< Data required exclusively when this structure is used as a semaphore. */
|
||||
} u;
|
||||
union
|
||||
{
|
||||
QueuePointers_t xQueue; /*< Data required exclusively when this structure is used as a queue. */
|
||||
SemaphoreData_t xSemaphore; /*< Data required exclusively when this structure is used as a semaphore. */
|
||||
} u;
|
||||
#endif
|
||||
|
||||
List_t xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */
|
||||
List_t xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */
|
||||
List_t xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */
|
||||
List_t xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */
|
||||
|
||||
volatile UBaseType_t uxMessagesWaiting;/*< The number of items currently in the queue. */
|
||||
UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */
|
||||
UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */
|
||||
volatile UBaseType_t uxMessagesWaiting; /*< The number of items currently in the queue. */
|
||||
UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */
|
||||
UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */
|
||||
|
||||
volatile int8_t cRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
|
||||
volatile int8_t cTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
|
||||
volatile int8_t cRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
|
||||
volatile int8_t cTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
|
||||
|
||||
/*@struct mutex *irqMask;@*/ /*< Ghost mutex simulates the effect of irq masking */
|
||||
/*@struct mutex *schedulerSuspend;@*/ /*< Ghost mutex simulates the effect of scheduler suspension */
|
||||
/*@struct mutex *locked;@*/ /*< Ghost mutex simulates the effect of queue locking */
|
||||
#if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
|
||||
uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */
|
||||
#endif
|
||||
|
||||
#if ( configUSE_QUEUE_SETS == 1 )
|
||||
struct QueueDefinition * pxQueueSetContainer;
|
||||
#endif
|
||||
|
||||
#if ( configUSE_TRACE_FACILITY == 1 )
|
||||
UBaseType_t uxQueueNumber;
|
||||
uint8_t ucQueueType;
|
||||
#endif
|
||||
|
||||
/*@struct mutex *irqMask;@*/ /*< Ghost mutex simulates the effect of irq masking */
|
||||
/*@struct mutex *schedulerSuspend;@*/ /*< Ghost mutex simulates the effect of scheduler suspension */
|
||||
/*@struct mutex *locked;@*/ /*< Ghost mutex simulates the effect of queue locking */
|
||||
} xQUEUE;
|
||||
|
||||
typedef xQUEUE Queue_t;
|
||||
@ -144,80 +159,180 @@ typedef xQUEUE Queue_t;
|
||||
typedef struct QueueDefinition * QueueHandle_t;
|
||||
|
||||
/*@
|
||||
#define QUEUE_SHAPE(q, Storage, N, M, K) \
|
||||
malloc_block_QueueDefinition(q) &*& \
|
||||
q->pcHead |-> Storage &*& \
|
||||
q->pcWriteTo |-> ?WPtr &*& \
|
||||
q->u.xQueue.pcTail |-> ?End &*& \
|
||||
q->u.xQueue.pcReadFrom |-> ?RPtr &*& \
|
||||
q->uxItemSize |-> M &*& \
|
||||
q->uxLength |-> N &*& \
|
||||
q->uxMessagesWaiting |-> K &*& \
|
||||
q->cRxLock |-> ?rxLock &*& \
|
||||
q->cTxLock |-> ?txLock &*& \
|
||||
struct_QueuePointers_padding(&q->u.xQueue) &*& \
|
||||
struct_SemaphoreData_padding(&q->u.xSemaphore) &*& \
|
||||
struct_fake_union_t_padding(&q->u) &*& \
|
||||
struct_xLIST_padding(&q->xTasksWaitingToSend) &*& \
|
||||
struct_xLIST_padding(&q->xTasksWaitingToReceive) &*& \
|
||||
q->u.xSemaphore.xMutexHolder |-> _ &*& \
|
||||
q->u.xSemaphore.uxRecursiveCallCount |-> _ &*& \
|
||||
true
|
||||
#define QUEUE_SHAPE(q, Storage, N, M, K) \
|
||||
malloc_block_QueueDefinition(q) &*& \
|
||||
q->pcHead |-> Storage &*& \
|
||||
q->pcWriteTo |-> ?WPtr &*& \
|
||||
q->u.xQueue.pcTail |-> ?End &*& \
|
||||
q->u.xQueue.pcReadFrom |-> ?RPtr &*& \
|
||||
q->uxItemSize |-> M &*& \
|
||||
q->uxLength |-> N &*& \
|
||||
q->uxMessagesWaiting |-> K &*& \
|
||||
q->cRxLock |-> ?rxLock &*& \
|
||||
q->cTxLock |-> ?txLock &*& \
|
||||
struct_QueuePointers_padding(&q->u.xQueue) &*& \
|
||||
struct_SemaphoreData_padding(&q->u.xSemaphore) &*& \
|
||||
struct_fake_union_t_padding(&q->u) &*& \
|
||||
struct_xLIST_padding(&q->xTasksWaitingToSend) &*& \
|
||||
struct_xLIST_padding(&q->xTasksWaitingToReceive) &*& \
|
||||
q->u.xSemaphore.xMutexHolder |-> _ &*& \
|
||||
q->u.xSemaphore.uxRecursiveCallCount |-> _ &*& \
|
||||
true
|
||||
|
||||
predicate queue(QueueHandle_t q, int8_t *Storage, size_t N, size_t M, size_t W, size_t R, size_t K, bool is_locked; list<list<char> >abs) =
|
||||
QUEUE_SHAPE(q, Storage, N, M, K) &*&
|
||||
0 < N &*&
|
||||
0 < M &*&
|
||||
0 <= W &*& W < N &*&
|
||||
0 <= R &*& R < N &*&
|
||||
0 <= K &*& K <= N &*&
|
||||
W == (R + 1 + K) % N &*&
|
||||
(-1) <= rxLock &*&
|
||||
(-1) <= txLock &*&
|
||||
(is_locked ? 0 <= rxLock : (-1) == rxLock) &*&
|
||||
(is_locked ? 0 <= txLock : (-1) == txLock) &*&
|
||||
WPtr == Storage + (W*M) &*&
|
||||
RPtr == Storage + (R*M) &*&
|
||||
End == Storage + (N*M) &*&
|
||||
buffer(Storage, N, M, ?contents) &*&
|
||||
length(contents) == N &*&
|
||||
abs == take(K, rotate_left((R+1)%N, contents)) &*&
|
||||
malloc_block(Storage, N*M) &*&
|
||||
true
|
||||
;
|
||||
QUEUE_SHAPE(q, Storage, N, M, K) &*&
|
||||
0 < N &*&
|
||||
0 < M &*&
|
||||
0 <= W &*& W < N &*&
|
||||
0 <= R &*& R < N &*&
|
||||
0 <= K &*& K <= N &*&
|
||||
W == (R + 1 + K) % N &*&
|
||||
(-1) <= rxLock &*&
|
||||
(-1) <= txLock &*&
|
||||
(is_locked ? 0 <= rxLock : (-1) == rxLock) &*&
|
||||
(is_locked ? 0 <= txLock : (-1) == txLock) &*&
|
||||
WPtr == Storage + (W*M) &*&
|
||||
RPtr == Storage + (R*M) &*&
|
||||
End == Storage + (N*M) &*&
|
||||
buffer(Storage, N, M, ?contents) &*&
|
||||
length(contents) == N &*&
|
||||
abs == take(K, rotate_left((R+1)%N, contents)) &*&
|
||||
malloc_block(Storage, N*M) &*&
|
||||
true
|
||||
;
|
||||
@*/
|
||||
|
||||
/* A buffer allows us to interpret a flat character array of `N*M` bytes as a
|
||||
list of `N` elements where each element is `M` bytes */
|
||||
/*@
|
||||
predicate buffer(char *buffer, size_t N, size_t M; list<list<char> > elements) =
|
||||
N == 0
|
||||
? elements == nil
|
||||
: chars(buffer, M, ?x) &*& buffer(buffer + M, N - 1, M, ?xs) &*& elements == cons(x, xs);
|
||||
N == 0
|
||||
? elements == nil
|
||||
: chars(buffer, M, ?x) &*& buffer(buffer + M, N - 1, M, ?xs) &*& elements == cons(x, xs);
|
||||
|
||||
// TODO: buffer_from_chars proof
|
||||
lemma void buffer_from_chars(char *buffer, size_t N, size_t M);
|
||||
requires chars(buffer, N*M, _);
|
||||
lemma void buffer_length(char *buffer, size_t N, size_t M)
|
||||
requires buffer(buffer, N, M, ?elements);
|
||||
ensures buffer(buffer, N, M, elements) &*& length(elements) == N;
|
||||
{
|
||||
if (N == 0) {
|
||||
open buffer(buffer, N, M, elements);
|
||||
close buffer(buffer, N, M, elements);
|
||||
} else {
|
||||
open buffer(buffer, N, M, elements);
|
||||
buffer_length(buffer+M, N-1, M);
|
||||
}
|
||||
}
|
||||
@*/
|
||||
|
||||
/*
|
||||
There is no need in the queue proofs to preserve a relationship between `cs`
|
||||
and `elements` (i.e., `flatten(elements) == cs`) because we only move in one
|
||||
direction from `cs` to `elements` during queue creation when the contents is
|
||||
fresh from `malloc` (i.e., uninitialized). If we needed to do a roundtrip from
|
||||
elements back to cs then this would require a stronger lemma.
|
||||
*/
|
||||
/*@
|
||||
lemma void buffer_from_chars(char *buffer, size_t N, size_t M)
|
||||
requires chars(buffer, N*M, ?cs) &*& 0 <= N &*& 0 < M;
|
||||
ensures exists<list<list<char> > >(?elements) &*& buffer(buffer, N, M, elements) &*& length(elements) == N;
|
||||
{
|
||||
if (N == 0) {
|
||||
close exists(nil);
|
||||
} else {
|
||||
int i = 0;
|
||||
while (i < N)
|
||||
invariant 0 <= i &*& i <= N &*&
|
||||
chars(buffer, (N-i)*M, ?xs) &*& xs == take((N-i)*M, cs) &*&
|
||||
buffer(buffer + (N-i)*M, i, M, ?ys);
|
||||
decreases N-i;
|
||||
{
|
||||
mul_mono_l(0, N-i-1, M);
|
||||
chars_split(buffer, (N-i-1)*M);
|
||||
mul_mono_l(i, N, M);
|
||||
mul_mono_l(N-i, N, M);
|
||||
take_take((N-i-1)*M, (N-i)*M, cs);
|
||||
i++;
|
||||
}
|
||||
close exists(ys);
|
||||
buffer_length(buffer, N, M);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: split_element proof
|
||||
lemma void split_element<t>(char *buffer, size_t N, size_t M, size_t i);
|
||||
requires buffer(buffer, N, M, ?elements) &*& i < N;
|
||||
ensures
|
||||
buffer(buffer, i, M, take(i, elements)) &*&
|
||||
chars(buffer + i * M, M, nth(i, elements)) &*&
|
||||
buffer(buffer + (i + 1) * M, (N-1-i), M, drop(i+1, elements));
|
||||
|
||||
// TODO: join_element proof
|
||||
lemma void join_element(char *buffer, size_t N, size_t M, size_t i);
|
||||
lemma void append_buffer(char *buffer, size_t N1, size_t N2, size_t M)
|
||||
requires
|
||||
buffer(buffer, i, M, ?prefix) &*&
|
||||
chars(buffer + i * M, M, ?element) &*&
|
||||
buffer(buffer + (i + 1) * M, (N-1-i), M, ?suffix);
|
||||
buffer(buffer, N1, M, ?elements1) &*&
|
||||
buffer(buffer + N1 * M, N2, M, ?elements2) &*&
|
||||
0 <= N1 &*& 0 <= N2;
|
||||
ensures buffer(buffer, N1+N2, M, append(elements1, elements2));
|
||||
{
|
||||
if (N1 == 0) {
|
||||
open buffer(buffer, 0, M, _);
|
||||
} else if (N2 == 0) {
|
||||
open buffer(buffer + N1 * M, 0, M, _);
|
||||
} else {
|
||||
open buffer(buffer, N1, M, elements1);
|
||||
append_buffer(buffer + M, N1-1, N2, M);
|
||||
close buffer(buffer, N1+N2, M, cons(?x, append(xs, elements2)));
|
||||
}
|
||||
}
|
||||
|
||||
lemma void split_element<t>(char *buffer, size_t N, size_t M, size_t i)
|
||||
requires buffer(buffer, N, M, ?elements) &*& 0 <= i &*& i < N;
|
||||
ensures
|
||||
buffer(buffer, i, M, take(i, elements)) &*&
|
||||
chars(buffer + i * M, M, nth(i, elements)) &*&
|
||||
buffer(buffer + (i + 1) * M, (N-1-i), M, drop(i+1, elements));
|
||||
{
|
||||
if (i == 0) {
|
||||
// straightforward
|
||||
} else {
|
||||
buffer_length(buffer, N, M);
|
||||
int j = 0;
|
||||
while (j < i)
|
||||
invariant 0 <= j &*& j <= i &*&
|
||||
buffer(buffer, j, M, take(j, elements)) &*&
|
||||
buffer(buffer + j * M, N-j, M, drop(j, elements));
|
||||
decreases i-j;
|
||||
{
|
||||
drop_drop(1, j, elements);
|
||||
nth_drop2(elements, j);
|
||||
open buffer(buffer + j * M, N-j, M, drop(j, elements));
|
||||
assert chars(buffer + j * M, M, ?x) &*& x == nth(j, elements);
|
||||
close buffer(buffer + j * M, 1, M, singleton(x));
|
||||
append_buffer(buffer, j, 1, M);
|
||||
take_plus_one(j, elements);
|
||||
j++;
|
||||
}
|
||||
drop_drop(1, j, elements);
|
||||
nth_drop2(elements, i);
|
||||
open buffer(buffer + (i+1) * M, (N-1-i), M, _);
|
||||
}
|
||||
}
|
||||
|
||||
lemma void join_element(char *buffer, size_t N, size_t M, size_t i)
|
||||
requires
|
||||
0 <= i &*& i < N &*&
|
||||
buffer(buffer, i, M, ?prefix) &*&
|
||||
chars(buffer + i * M, M, ?element) &*&
|
||||
buffer(buffer + (i + 1) * M, (N-1-i), M, ?suffix);
|
||||
ensures buffer(buffer, N, M, append(prefix, cons(element, suffix)));
|
||||
{
|
||||
if (i == 0) {
|
||||
open buffer(buffer, i, M, prefix);
|
||||
assert prefix == nil;
|
||||
close buffer(buffer, N, M, cons(element, suffix));
|
||||
} else {
|
||||
close buffer(buffer + i * M, N-i, M, cons(element, suffix));
|
||||
append_buffer(buffer, i, N-i, M);
|
||||
}
|
||||
}
|
||||
|
||||
predicate list(List_t *l;) =
|
||||
l->uxNumberOfItems |-> _;
|
||||
l->uxNumberOfItems |-> _;
|
||||
|
||||
predicate queuelists(QueueHandle_t q;) =
|
||||
list(&q->xTasksWaitingToSend) &*&
|
||||
list(&q->xTasksWaitingToReceive);
|
||||
list(&q->xTasksWaitingToSend) &*&
|
||||
list(&q->xTasksWaitingToReceive);
|
||||
@*/
|
||||
|
||||
/* Because prvCopyDataFromQueue does *not* decrement uxMessagesWaiting (K) the
|
||||
@ -225,44 +340,44 @@ queue predicate above does not hold as a postcondition. If the caller
|
||||
subsequently decrements K then the queue predicate can be reinstated. */
|
||||
/*@
|
||||
predicate queue_after_prvCopyDataFromQueue(QueueHandle_t q, int8_t *Storage, size_t N, size_t M, size_t W, size_t R, size_t K, bool is_locked; list<list<char> >abs) =
|
||||
QUEUE_SHAPE(q, Storage, N, M, K) &*&
|
||||
0 < N &*&
|
||||
0 < M &*&
|
||||
0 <= W &*& W < N &*&
|
||||
0 <= R &*& R < N &*&
|
||||
0 <= K &*& K <= N &*&
|
||||
W == (R + K) % N &*& //< Differs from queue predicate
|
||||
(-1) <= rxLock &*&
|
||||
(-1) <= txLock &*&
|
||||
(is_locked ? 0 <= rxLock : (-1) == rxLock) &*&
|
||||
(is_locked ? 0 <= txLock : (-1) == txLock) &*&
|
||||
WPtr == Storage + (W*M) &*&
|
||||
RPtr == Storage + (R*M) &*&
|
||||
End == Storage + (N*M) &*&
|
||||
buffer(Storage, N, M, ?contents) &*&
|
||||
length(contents) == N &*&
|
||||
abs == take(K, rotate_left(R, contents)) &*& //< Differs from queue predicate
|
||||
malloc_block(Storage, N*M) &*&
|
||||
true
|
||||
;
|
||||
QUEUE_SHAPE(q, Storage, N, M, K) &*&
|
||||
0 < N &*&
|
||||
0 < M &*&
|
||||
0 <= W &*& W < N &*&
|
||||
0 <= R &*& R < N &*&
|
||||
0 <= K &*& K <= N &*&
|
||||
W == (R + K) % N &*& //< Differs from queue predicate
|
||||
(-1) <= rxLock &*&
|
||||
(-1) <= txLock &*&
|
||||
(is_locked ? 0 <= rxLock : (-1) == rxLock) &*&
|
||||
(is_locked ? 0 <= txLock : (-1) == txLock) &*&
|
||||
WPtr == Storage + (W*M) &*&
|
||||
RPtr == Storage + (R*M) &*&
|
||||
End == Storage + (N*M) &*&
|
||||
buffer(Storage, N, M, ?contents) &*&
|
||||
length(contents) == N &*&
|
||||
abs == take(K, rotate_left(R, contents)) &*& //< Differs from queue predicate
|
||||
malloc_block(Storage, N*M) &*&
|
||||
true
|
||||
;
|
||||
@*/
|
||||
|
||||
/* Can't be called `mutex` as this clashes with VeriFast's predicate */
|
||||
/*@
|
||||
predicate freertos_mutex(QueueHandle_t q, int8_t *Storage, size_t N, size_t K;) =
|
||||
QUEUE_SHAPE(q, Storage, N, 0, K) &*&
|
||||
queuelists(q) &*&
|
||||
0 < N &*&
|
||||
0 <= K &*& K <= N &*&
|
||||
(-1) <= rxLock &*&
|
||||
(-1) <= txLock &*&
|
||||
WPtr == Storage &*&
|
||||
RPtr == Storage &*&
|
||||
End == Storage &*&
|
||||
malloc_block(Storage, 0) &*&
|
||||
chars(Storage, 0, _) &*&
|
||||
true
|
||||
;
|
||||
QUEUE_SHAPE(q, Storage, N, 0, K) &*&
|
||||
queuelists(q) &*&
|
||||
0 < N &*&
|
||||
0 <= K &*& K <= N &*&
|
||||
(-1) <= rxLock &*&
|
||||
(-1) <= txLock &*&
|
||||
WPtr == Storage &*&
|
||||
RPtr == Storage &*&
|
||||
End == Storage &*&
|
||||
malloc_block(Storage, 0) &*&
|
||||
chars(Storage, 0, _) &*&
|
||||
true
|
||||
;
|
||||
@*/
|
||||
|
||||
/* A queuehandle can be shared between tasks and ISRs. Acquiring the ghost
|
||||
@ -272,32 +387,32 @@ after masking interrupts depends on the caller:
|
||||
- An ISR has access to the queue and, if the queue is unlocked, the queuelists */
|
||||
/*@
|
||||
predicate queuehandle(QueueHandle_t q, size_t N, size_t M, bool is_isr;) =
|
||||
q->irqMask |-> ?m &*& mutex(m, irqs_masked_invariant(q, N, M, is_isr));
|
||||
q->irqMask |-> ?m &*& mutex(m, irqs_masked_invariant(q, N, M, is_isr));
|
||||
|
||||
predicate_ctor irqs_masked_invariant(QueueHandle_t queue, size_t N, size_t M, bool is_isr)() =
|
||||
queue(queue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
|
||||
(is_isr && is_locked ? true : queuelists(queue));
|
||||
queue(queue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
|
||||
(is_isr && is_locked ? true : queuelists(queue));
|
||||
@*/
|
||||
|
||||
/* A queuesuspend can be shared between tasks. Acquiring the ghost `schedulerSuspend` gives access to the `locked` mutex. */
|
||||
/*@
|
||||
predicate_ctor scheduler_suspended_invariant(QueueHandle_t queue)() =
|
||||
queue->locked |-> ?m &*&
|
||||
mutex(m, queue_locked_invariant(queue));
|
||||
queue->locked |-> ?m &*&
|
||||
mutex(m, queue_locked_invariant(queue));
|
||||
|
||||
predicate queuesuspend(QueueHandle_t q;) =
|
||||
q->schedulerSuspend |-> ?m &*&
|
||||
mutex(m, scheduler_suspended_invariant(q));
|
||||
q->schedulerSuspend |-> ?m &*&
|
||||
mutex(m, scheduler_suspended_invariant(q));
|
||||
@*/
|
||||
|
||||
/* A queuelock is exclusively acquired by a task. Acquiring the ghost `queuelock` gives access to the queue list resources. */
|
||||
/*@
|
||||
predicate queuelock(QueueHandle_t q;) =
|
||||
q->locked |-> ?m &*&
|
||||
mutex(m, queue_locked_invariant(q));
|
||||
q->locked |-> ?m &*&
|
||||
mutex(m, queue_locked_invariant(q));
|
||||
|
||||
predicate_ctor queue_locked_invariant(QueueHandle_t queue)() =
|
||||
queuelists(queue);
|
||||
queuelists(queue);
|
||||
@*/
|
||||
|
||||
BaseType_t vListInitialise(List_t *list);
|
||||
@ -310,15 +425,15 @@ BaseType_t listLIST_IS_EMPTY(List_t *list);
|
||||
|
||||
typedef struct xTIME_OUT
|
||||
{
|
||||
BaseType_t xOverflowCount;
|
||||
TickType_t xTimeOnEntering;
|
||||
BaseType_t xOverflowCount;
|
||||
TickType_t xTimeOnEntering;
|
||||
} TimeOut_t;
|
||||
|
||||
/*@
|
||||
predicate xTIME_OUT(struct xTIME_OUT *to;) =
|
||||
to->xOverflowCount |-> _ &*&
|
||||
to->xTimeOnEntering |-> _ &*&
|
||||
struct_xTIME_OUT_padding(to);
|
||||
to->xOverflowCount |-> _ &*&
|
||||
to->xTimeOnEntering |-> _ &*&
|
||||
struct_xTIME_OUT_padding(to);
|
||||
@*/
|
||||
|
||||
void vTaskInternalSetTimeOutState( TimeOut_t * x);
|
||||
@ -343,60 +458,60 @@ void vTaskMissedYield();
|
||||
|
||||
void vTaskSuspendAll();
|
||||
/*@requires exists<QueueHandle_t>(?xQueue) &*&
|
||||
[1/2]xQueue->schedulerSuspend |-> ?m &*&
|
||||
[1/2]mutex(m, scheduler_suspended_invariant(xQueue));@*/
|
||||
[1/2]xQueue->schedulerSuspend |-> ?m &*&
|
||||
[1/2]mutex(m, scheduler_suspended_invariant(xQueue));@*/
|
||||
/*@ensures [1/2]xQueue->schedulerSuspend |-> m &*&
|
||||
mutex_held(m, scheduler_suspended_invariant(xQueue), currentThread, 1/2) &*&
|
||||
xQueue->locked |-> ?m2 &*&
|
||||
mutex(m2, queue_locked_invariant(xQueue));@*/
|
||||
mutex_held(m, scheduler_suspended_invariant(xQueue), currentThread, 1/2) &*&
|
||||
xQueue->locked |-> ?m2 &*&
|
||||
mutex(m2, queue_locked_invariant(xQueue));@*/
|
||||
|
||||
BaseType_t xTaskResumeAll( void );
|
||||
/*@requires exists<QueueHandle_t>(?xQueue) &*&
|
||||
[1/2]xQueue->schedulerSuspend |-> ?m &*&
|
||||
mutex_held(m, scheduler_suspended_invariant(xQueue), currentThread, 1/2) &*&
|
||||
xQueue->locked |-> ?m2 &*&
|
||||
mutex(m2, queue_locked_invariant(xQueue));@*/
|
||||
[1/2]xQueue->schedulerSuspend |-> ?m &*&
|
||||
mutex_held(m, scheduler_suspended_invariant(xQueue), currentThread, 1/2) &*&
|
||||
xQueue->locked |-> ?m2 &*&
|
||||
mutex(m2, queue_locked_invariant(xQueue));@*/
|
||||
/*@ensures [1/2]xQueue->schedulerSuspend |-> m &*&
|
||||
[1/2]mutex(m, scheduler_suspended_invariant(xQueue));@*/
|
||||
[1/2]mutex(m, scheduler_suspended_invariant(xQueue));@*/
|
||||
|
||||
void prvLockQueue( QueueHandle_t xQueue );
|
||||
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*&
|
||||
[1/2]queuelock(xQueue); @*/
|
||||
[1/2]queuelock(xQueue); @*/
|
||||
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
|
||||
[1/2]xQueue->locked |-> ?m &*&
|
||||
mutex_held(m, queue_locked_invariant(xQueue), currentThread, 1/2) &*&
|
||||
queue_locked_invariant(xQueue)();@*/
|
||||
[1/2]xQueue->locked |-> ?m &*&
|
||||
mutex_held(m, queue_locked_invariant(xQueue), currentThread, 1/2) &*&
|
||||
queue_locked_invariant(xQueue)();@*/
|
||||
|
||||
void prvUnlockQueue( QueueHandle_t xQueue );
|
||||
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*&
|
||||
[1/2]xQueue->locked |-> ?m &*&
|
||||
mutex_held(m, queue_locked_invariant(xQueue), currentThread, 1/2) &*&
|
||||
queue_locked_invariant(xQueue)();@*/
|
||||
[1/2]xQueue->locked |-> ?m &*&
|
||||
mutex_held(m, queue_locked_invariant(xQueue), currentThread, 1/2) &*&
|
||||
queue_locked_invariant(xQueue)();@*/
|
||||
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
|
||||
[1/2]queuelock(xQueue);@*/
|
||||
[1/2]queuelock(xQueue);@*/
|
||||
|
||||
void setInterruptMask(QueueHandle_t xQueue)
|
||||
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false;@*/
|
||||
/*@ensures [1/2]xQueue->irqMask |-> ?m &*&
|
||||
mutex_held(m, irqs_masked_invariant(xQueue, N, M, is_isr), currentThread, 1/2) &*&
|
||||
queue(xQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
|
||||
queuelists(xQueue);@*/
|
||||
mutex_held(m, irqs_masked_invariant(xQueue, N, M, is_isr), currentThread, 1/2) &*&
|
||||
queue(xQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
|
||||
queuelists(xQueue);@*/
|
||||
{
|
||||
/*@open queuehandle(xQueue, N, M, is_isr);@*/
|
||||
mutex_acquire(xQueue->irqMask);
|
||||
/*@open irqs_masked_invariant(xQueue, N, M, is_isr)();@*/
|
||||
/*@open queuehandle(xQueue, N, M, is_isr);@*/
|
||||
mutex_acquire(xQueue->irqMask);
|
||||
/*@open irqs_masked_invariant(xQueue, N, M, is_isr)();@*/
|
||||
}
|
||||
|
||||
void clearInterruptMask(QueueHandle_t xQueue)
|
||||
/*@requires queue(xQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
|
||||
[1/2]xQueue->irqMask |-> ?m &*&
|
||||
mutex_held(m, irqs_masked_invariant(xQueue, N, M, false), currentThread, 1/2) &*&
|
||||
queuelists(xQueue);@*/
|
||||
[1/2]xQueue->irqMask |-> ?m &*&
|
||||
mutex_held(m, irqs_masked_invariant(xQueue, N, M, false), currentThread, 1/2) &*&
|
||||
queuelists(xQueue);@*/
|
||||
/*@ensures [1/2]queuehandle(xQueue, N, M, false);@*/
|
||||
{
|
||||
/*@close irqs_masked_invariant(xQueue, N, M, false)();@*/
|
||||
mutex_release(xQueue->irqMask);
|
||||
/*@close [1/2]queuehandle(xQueue, N, M, false);@*/
|
||||
/*@close irqs_masked_invariant(xQueue, N, M, false)();@*/
|
||||
mutex_release(xQueue->irqMask);
|
||||
/*@close [1/2]queuehandle(xQueue, N, M, false);@*/
|
||||
}
|
||||
|
||||
#define taskENTER_CRITICAL() setInterruptMask(xQueue)
|
||||
@ -407,26 +522,26 @@ void clearInterruptMask(QueueHandle_t xQueue)
|
||||
UBaseType_t setInterruptMaskFromISR(QueueHandle_t xQueue)
|
||||
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == true;@*/
|
||||
/*@ensures [1/2]xQueue->irqMask |-> ?m &*&
|
||||
mutex_held(m, irqs_masked_invariant(xQueue, N, M, is_isr), currentThread, 1/2) &*&
|
||||
queue(xQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
|
||||
(is_locked ? true : queuelists(xQueue));@*/
|
||||
mutex_held(m, irqs_masked_invariant(xQueue, N, M, is_isr), currentThread, 1/2) &*&
|
||||
queue(xQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
|
||||
(is_locked ? true : queuelists(xQueue));@*/
|
||||
{
|
||||
/*@open queuehandle(xQueue, N, M, is_isr);@*/
|
||||
mutex_acquire(xQueue->irqMask);
|
||||
/*@open irqs_masked_invariant(xQueue, N, M, is_isr)();@*/
|
||||
return 0;
|
||||
/*@open queuehandle(xQueue, N, M, is_isr);@*/
|
||||
mutex_acquire(xQueue->irqMask);
|
||||
/*@open irqs_masked_invariant(xQueue, N, M, is_isr)();@*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
void clearInterruptMaskFromISR(QueueHandle_t xQueue, UBaseType_t uxSavedInterruptStatus)
|
||||
/*@requires queue(xQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
|
||||
[1/2]xQueue->irqMask |-> ?m &*&
|
||||
mutex_held(m, irqs_masked_invariant(xQueue, N, M, true), currentThread, 1/2) &*&
|
||||
(is_locked ? true : queuelists(xQueue));@*/
|
||||
[1/2]xQueue->irqMask |-> ?m &*&
|
||||
mutex_held(m, irqs_masked_invariant(xQueue, N, M, true), currentThread, 1/2) &*&
|
||||
(is_locked ? true : queuelists(xQueue));@*/
|
||||
/*@ensures [1/2]queuehandle(xQueue, N, M, true);@*/
|
||||
{
|
||||
/*@close irqs_masked_invariant(xQueue, N, M, true)();@*/
|
||||
mutex_release(xQueue->irqMask);
|
||||
/*@close [1/2]queuehandle(xQueue, N, M, true);@*/
|
||||
/*@close irqs_masked_invariant(xQueue, N, M, true)();@*/
|
||||
mutex_release(xQueue->irqMask);
|
||||
/*@close [1/2]queuehandle(xQueue, N, M, true);@*/
|
||||
}
|
||||
|
||||
#define portSET_INTERRUPT_MASK_FROM_ISR() setInterruptMaskFromISR(xQueue)
|
||||
|
@ -28,23 +28,23 @@
|
||||
void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer );
|
||||
/*@requires queue(pxQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*& 0 < K &*& chars(pvBuffer, M, _);@*/
|
||||
/*@ensures queue_after_prvCopyDataFromQueue(pxQueue, Storage, N, M, W, (R+1)%N, K, is_locked, abs) &*&
|
||||
chars(pvBuffer, M, head(abs));@*/
|
||||
chars(pvBuffer, M, head(abs));@*/
|
||||
|
||||
BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition );
|
||||
/*@requires queue(pxQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
|
||||
(K < N || xPosition == queueOVERWRITE) &*&
|
||||
chars(pvItemToQueue, M, ?x) &*&
|
||||
(xPosition == queueSEND_TO_BACK || xPosition == queueSEND_TO_FRONT || (xPosition == queueOVERWRITE && N == 1));@*/
|
||||
(K < N || xPosition == queueOVERWRITE) &*&
|
||||
chars(pvItemToQueue, M, ?x) &*&
|
||||
(xPosition == queueSEND_TO_BACK || xPosition == queueSEND_TO_FRONT || (xPosition == queueOVERWRITE && N == 1));@*/
|
||||
/*@ensures
|
||||
(xPosition == queueSEND_TO_BACK
|
||||
? queue(pxQueue, Storage, N, M, (W+1)%N, R, (K+1), is_locked, append(abs, singleton(x)))
|
||||
: (xPosition == queueSEND_TO_FRONT
|
||||
? (R == 0
|
||||
? queue(pxQueue, Storage, N, M, W, (N-1), (K+1), is_locked, cons(x, abs))
|
||||
: queue(pxQueue, Storage, N, M, W, (R-1), (K+1), is_locked, cons(x, abs)))
|
||||
: xPosition == queueOVERWRITE &*& queue(pxQueue, Storage, N, M, W, R, 1, is_locked, singleton(x)))
|
||||
) &*&
|
||||
chars(pvItemToQueue, M, x);@*/
|
||||
(xPosition == queueSEND_TO_BACK
|
||||
? queue(pxQueue, Storage, N, M, (W+1)%N, R, (K+1), is_locked, append(abs, singleton(x)))
|
||||
: (xPosition == queueSEND_TO_FRONT
|
||||
? (R == 0
|
||||
? queue(pxQueue, Storage, N, M, W, (N-1), (K+1), is_locked, cons(x, abs))
|
||||
: queue(pxQueue, Storage, N, M, W, (R-1), (K+1), is_locked, cons(x, abs)))
|
||||
: xPosition == queueOVERWRITE &*& queue(pxQueue, Storage, N, M, W, R, 1, is_locked, singleton(x)))
|
||||
) &*&
|
||||
chars(pvItemToQueue, M, x);@*/
|
||||
|
||||
BaseType_t prvIsQueueEmpty( Queue_t * pxQueue );
|
||||
/*@requires [1/2]queuehandle(pxQueue, ?N, ?M, ?is_isr) &*& is_isr == false;@*/
|
||||
|
@ -2,24 +2,27 @@
|
||||
|
||||
In the queue predicates and proofs we use the following variable names.
|
||||
|
||||
- `N` : the queue length (i.e., the maximum number of items the queue can
|
||||
store)
|
||||
- `M` : the size in bytes of each element
|
||||
- `W` : the logical index of the write pointer, necessarily between `0..(N-1)`
|
||||
- `R` : the logical index of the read pointer, necessarily between `0..(N-1)`
|
||||
- `K` : the number of items currently in the queue
|
||||
|
||||
Consequently, the size of the concrete queue storage is `N*M` bytes. The
|
||||
`buffer` predicate, defined in `include/proof/queue.h` allows us to treat the
|
||||
queue storage as a list `contents` of `N` items, each of which is `M` bytes.
|
||||
- `Storage` : The concrete queue storage of `N*M` bytes. The `buffer`
|
||||
predicate, defined in `include/proof/queue.h` allows us to treat the
|
||||
storage as a list `contents` of `N` items, each of which is `M` bytes.
|
||||
- `N` : queue length (i.e., the maximum number of items the queue can store)
|
||||
- `M` : size in bytes of each element
|
||||
- `W` : logical index of the write pointer, necessarily between
|
||||
`0..(N-1)` such that the write pointer `pcWriteTo == Storage + W * M`.
|
||||
- `R` : logical index of the read pointer, necessarily between
|
||||
`0..(N-1)` such that the read pointer `pcReadFrom == Storage + R * M`.
|
||||
- `K` : number of items currently in the queue corresponding to
|
||||
`uxMessagesWaiting`
|
||||
|
||||
The `queue` predicate, defined in `include/proof/queue.h`, relates the concrete
|
||||
representation to an abstract list `abs` of `K` items. More precisely, the main
|
||||
queue storage to an abstract list `abs` of `K` items. More precisely, the key
|
||||
queue invariant is:
|
||||
|
||||
```
|
||||
abs == take(K, rotate_left((R+1)%N, contents))
|
||||
abs == take(K, rotate_left((R+1)%N, contents)) &*&
|
||||
W == (R + 1 + K) % N
|
||||
```
|
||||
|
||||
where `(R+1)%N` is the front of the queue, `rotate_left` allows for the
|
||||
wraparound of queue storage, and `take` gives the first `K` elements.
|
||||
where `(R+1)%N` is the front of the queue, `W` is the back of the queue,
|
||||
`rotate_left` allows for the wraparound of queue storage, and `take` gives the
|
||||
first `K` elements.
|
||||
|
@ -23,236 +23,245 @@
|
||||
#include "proof/queue.h"
|
||||
|
||||
/* Simplifying assumption: we do not verify queue initialisation in a
|
||||
concurrent environment. We assume the queue initialization (including reset)
|
||||
happens-before all concurrent send/receives. */
|
||||
* concurrent environment. We assume the queue initialization (including reset)
|
||||
* happens-before all concurrent send/receives. */
|
||||
#ifdef VERIFAST /*< ***xQueueGenericReset happens-before concurrent behavior*** */
|
||||
#define taskENTER_CRITICAL()
|
||||
#define taskEXIT_CRITICAL()
|
||||
#define taskENTER_CRITICAL()
|
||||
#define taskEXIT_CRITICAL()
|
||||
#endif
|
||||
|
||||
/* The following intermediate queue predicates summarise states used by queue
|
||||
initialization but not used elsewhere so we confine them to these proofs
|
||||
locally. */
|
||||
* initialization but not used elsewhere so we confine them to these proofs
|
||||
* locally. */
|
||||
/*@
|
||||
predicate queue_init1(QueueHandle_t q;) =
|
||||
QUEUE_SHAPE(q, _, _, _, _) &*&
|
||||
queuelists(q)
|
||||
;
|
||||
QUEUE_SHAPE(q, _, _, _, _) &*&
|
||||
queuelists(q)
|
||||
;
|
||||
|
||||
predicate queue_init2(QueueHandle_t q, int8_t *Storage, size_t N, size_t M;) =
|
||||
QUEUE_SHAPE(q, Storage, N, M, _) &*&
|
||||
queuelists(q) &*&
|
||||
0 < N &*&
|
||||
chars(Storage, (N*M), _) &*&
|
||||
malloc_block(Storage, N*M) &*&
|
||||
Storage + N * M <= (int8_t *)UINTPTR_MAX &*&
|
||||
true
|
||||
;
|
||||
QUEUE_SHAPE(q, Storage, N, M, _) &*&
|
||||
queuelists(q) &*&
|
||||
0 < N &*&
|
||||
chars(Storage, (N*M), _) &*&
|
||||
malloc_block(Storage, N*M) &*&
|
||||
Storage + N * M <= (int8_t *)UINTPTR_MAX &*&
|
||||
true
|
||||
;
|
||||
@*/
|
||||
|
||||
BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue )
|
||||
BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
|
||||
BaseType_t xNewQueue )
|
||||
/*@requires queue_init2(xQueue, ?Storage, ?N, ?M);@*/
|
||||
/*@ensures 0 == M
|
||||
? freertos_mutex(xQueue, Storage, N, 0)
|
||||
: queue(xQueue, Storage, N, M, 0, (N-1), 0, false, nil) &*& queuelists(xQueue);@*/
|
||||
? freertos_mutex(xQueue, Storage, N, 0)
|
||||
: queue(xQueue, Storage, N, M, 0, (N-1), 0, false, nil) &*& queuelists(xQueue);@*/
|
||||
{
|
||||
#ifdef VERIFAST /*< const pointer declaration */
|
||||
Queue_t * pxQueue = xQueue;
|
||||
Queue_t * pxQueue = xQueue;
|
||||
#else
|
||||
Queue_t * const pxQueue = xQueue;
|
||||
Queue_t * const pxQueue = xQueue;
|
||||
#endif
|
||||
|
||||
configASSERT( pxQueue );
|
||||
configASSERT( pxQueue );
|
||||
|
||||
taskENTER_CRITICAL();
|
||||
{
|
||||
pxQueue->u.xQueue.pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
|
||||
pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
|
||||
pxQueue->pcWriteTo = pxQueue->pcHead;
|
||||
/*@mul_mono_l(0, N-1, M);@*/
|
||||
pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - 1U ) * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
|
||||
pxQueue->cRxLock = queueUNLOCKED;
|
||||
pxQueue->cTxLock = queueUNLOCKED;
|
||||
taskENTER_CRITICAL();
|
||||
{
|
||||
pxQueue->u.xQueue.pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
|
||||
pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
|
||||
pxQueue->pcWriteTo = pxQueue->pcHead;
|
||||
/*@mul_mono_l(0, N-1, M);@*/
|
||||
pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - 1U ) * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
|
||||
pxQueue->cRxLock = queueUNLOCKED;
|
||||
pxQueue->cTxLock = queueUNLOCKED;
|
||||
|
||||
if( xNewQueue == pdFALSE )
|
||||
{
|
||||
/* If there are tasks blocked waiting to read from the queue, then
|
||||
the tasks will remain blocked as after this function exits the queue
|
||||
will still be empty. If there are tasks blocked waiting to write to
|
||||
the queue, then one should be unblocked as after this function exits
|
||||
it will be possible to write to it. */
|
||||
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
|
||||
{
|
||||
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
|
||||
{
|
||||
queueYIELD_IF_USING_PREEMPTION();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Ensure the event queues start in the correct state. */
|
||||
vListInitialise( &( pxQueue->xTasksWaitingToSend ) );
|
||||
vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
|
||||
}
|
||||
/* Logically, we move from a flat character array of `N*M` bytes (using
|
||||
the `chars` predicate) to an array of `N` elements where each element
|
||||
is `M` bytes (using the `buffer` predicate) */
|
||||
/*@if (M != 0) { buffer_from_chars(pxQueue->pcHead, N, M); }@*/
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
if( xNewQueue == pdFALSE )
|
||||
{
|
||||
/* If there are tasks blocked waiting to read from the queue, then
|
||||
* the tasks will remain blocked as after this function exits the queue
|
||||
* will still be empty. If there are tasks blocked waiting to write to
|
||||
* the queue, then one should be unblocked as after this function exits
|
||||
* it will be possible to write to it. */
|
||||
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
|
||||
{
|
||||
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
|
||||
{
|
||||
queueYIELD_IF_USING_PREEMPTION();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Ensure the event queues start in the correct state. */
|
||||
vListInitialise( &( pxQueue->xTasksWaitingToSend ) );
|
||||
vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
|
||||
}
|
||||
|
||||
/* A value is returned for calling semantic consistency with previous
|
||||
versions. */
|
||||
return pdPASS;
|
||||
/*@if (M != 0) { buffer_from_chars(pxQueue->pcHead, N, M); }@*/
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
|
||||
/* A value is returned for calling semantic consistency with previous
|
||||
* versions. */
|
||||
return pdPASS;
|
||||
}
|
||||
|
||||
static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, const uint8_t ucQueueType, Queue_t *pxNewQueue )
|
||||
static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
|
||||
const UBaseType_t uxItemSize,
|
||||
uint8_t * pucQueueStorage,
|
||||
const uint8_t ucQueueType,
|
||||
Queue_t * pxNewQueue )
|
||||
|
||||
/*@requires queue_init1(pxNewQueue) &*&
|
||||
0 < uxQueueLength &*& 0 < uxItemSize &*&
|
||||
malloc_block(pucQueueStorage, uxQueueLength * uxItemSize) &*&
|
||||
pucQueueStorage + uxQueueLength * uxItemSize <= (uint8_t *)UINTPTR_MAX &*&
|
||||
uchars(pucQueueStorage, uxQueueLength * uxItemSize,_);@*/
|
||||
0 < uxQueueLength &*& 0 < uxItemSize &*&
|
||||
malloc_block(pucQueueStorage, uxQueueLength * uxItemSize) &*&
|
||||
pucQueueStorage + uxQueueLength * uxItemSize <= (uint8_t *)UINTPTR_MAX &*&
|
||||
uchars(pucQueueStorage, uxQueueLength * uxItemSize,_);@*/
|
||||
/*@ensures queue(pxNewQueue, ((int8_t *)(void *)pucQueueStorage), uxQueueLength, uxItemSize, 0, (uxQueueLength-1), 0, false, nil) &*&
|
||||
queuelists(pxNewQueue);@*/
|
||||
queuelists(pxNewQueue);@*/
|
||||
{
|
||||
#ifndef VERIFAST /*< void cast of unused var */
|
||||
/* Remove compiler warnings about unused parameters should
|
||||
configUSE_TRACE_FACILITY not be set to 1. */
|
||||
( void ) ucQueueType;
|
||||
/* Remove compiler warnings about unused parameters should
|
||||
* configUSE_TRACE_FACILITY not be set to 1. */
|
||||
( void ) ucQueueType;
|
||||
#endif
|
||||
|
||||
if( uxItemSize == ( UBaseType_t ) 0 )
|
||||
{
|
||||
/* No RAM was allocated for the queue storage area, but PC head cannot
|
||||
be set to NULL because NULL is used as a key to say the queue is used as
|
||||
a mutex. Therefore just set pcHead to point to the queue as a benign
|
||||
value that is known to be within the memory map. */
|
||||
if( uxItemSize == ( UBaseType_t ) 0 )
|
||||
{
|
||||
/* No RAM was allocated for the queue storage area, but PC head cannot
|
||||
* be set to NULL because NULL is used as a key to say the queue is used as
|
||||
* a mutex. Therefore just set pcHead to point to the queue as a benign
|
||||
* value that is known to be within the memory map. */
|
||||
#ifdef VERIFAST /*< stricter casting */
|
||||
pxNewQueue->pcHead = ( int8_t * ) ( void * ) pxNewQueue;
|
||||
pxNewQueue->pcHead = ( int8_t * ) ( void * ) pxNewQueue;
|
||||
#else
|
||||
pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;
|
||||
pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;
|
||||
#endif
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Set the head to the start of the queue storage area. */
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Set the head to the start of the queue storage area. */
|
||||
#ifdef VERIFAST /*< stricter casting */
|
||||
pxNewQueue->pcHead = ( int8_t * ) ( void * ) pucQueueStorage;
|
||||
pxNewQueue->pcHead = ( int8_t * ) ( void * ) pucQueueStorage;
|
||||
#else
|
||||
pxNewQueue->pcHead = ( int8_t * ) pucQueueStorage;
|
||||
pxNewQueue->pcHead = ( int8_t * ) pucQueueStorage;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
/* Initialise the queue members as described where the queue type is
|
||||
defined. */
|
||||
pxNewQueue->uxLength = uxQueueLength;
|
||||
pxNewQueue->uxItemSize = uxItemSize;
|
||||
/*@close queue_init2(pxNewQueue, _, uxQueueLength, uxItemSize);@*/
|
||||
/* Initialise the queue members as described where the queue type is
|
||||
* defined. */
|
||||
pxNewQueue->uxLength = uxQueueLength;
|
||||
pxNewQueue->uxItemSize = uxItemSize;
|
||||
/*@close queue_init2(pxNewQueue, _, uxQueueLength, uxItemSize);@*/
|
||||
#ifdef VERIFAST /*< void cast of unused return value */
|
||||
xQueueGenericReset( pxNewQueue, pdTRUE );
|
||||
xQueueGenericReset( pxNewQueue, pdTRUE );
|
||||
#else
|
||||
( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
|
||||
( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
|
||||
#endif
|
||||
|
||||
#if ( configUSE_TRACE_FACILITY == 1 )
|
||||
{
|
||||
pxNewQueue->ucQueueType = ucQueueType;
|
||||
}
|
||||
#endif /* configUSE_TRACE_FACILITY */
|
||||
#if ( configUSE_TRACE_FACILITY == 1 )
|
||||
{
|
||||
pxNewQueue->ucQueueType = ucQueueType;
|
||||
}
|
||||
#endif /* configUSE_TRACE_FACILITY */
|
||||
|
||||
#if( configUSE_QUEUE_SETS == 1 )
|
||||
{
|
||||
pxNewQueue->pxQueueSetContainer = NULL;
|
||||
}
|
||||
#endif /* configUSE_QUEUE_SETS */
|
||||
#if ( configUSE_QUEUE_SETS == 1 )
|
||||
{
|
||||
pxNewQueue->pxQueueSetContainer = NULL;
|
||||
}
|
||||
#endif /* configUSE_QUEUE_SETS */
|
||||
|
||||
traceQUEUE_CREATE( pxNewQueue );
|
||||
traceQUEUE_CREATE( pxNewQueue );
|
||||
}
|
||||
|
||||
QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType )
|
||||
/*@requires 0 < uxQueueLength &*&
|
||||
0 < uxItemSize &*&
|
||||
0 < uxQueueLength * uxItemSize &*&
|
||||
uxQueueLength * uxItemSize <= UINT_MAX;@*/
|
||||
/*@ensures result == NULL
|
||||
? true
|
||||
: queue(result, _, uxQueueLength, uxItemSize, 0, (uxQueueLength-1), 0, false, nil) &*&
|
||||
queuelists(result) &*&
|
||||
result->irqMask |-> _ &*&
|
||||
result->schedulerSuspend |-> _ &*&
|
||||
result->locked |-> _;@*/
|
||||
{
|
||||
Queue_t *pxNewQueue;
|
||||
size_t xQueueSizeInBytes;
|
||||
uint8_t *pucQueueStorage;
|
||||
|
||||
configASSERT( uxQueueLength > ( UBaseType_t ) 0 );
|
||||
QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength,
|
||||
const UBaseType_t uxItemSize,
|
||||
const uint8_t ucQueueType )
|
||||
/*@requires 0 < uxQueueLength &*&
|
||||
0 < uxItemSize &*&
|
||||
0 < uxQueueLength * uxItemSize &*&
|
||||
uxQueueLength * uxItemSize <= UINT_MAX;@*/
|
||||
/*@ensures result == NULL
|
||||
? true
|
||||
: queue(result, _, uxQueueLength, uxItemSize, 0, (uxQueueLength-1), 0, false, nil) &*&
|
||||
queuelists(result) &*&
|
||||
result->irqMask |-> _ &*&
|
||||
result->schedulerSuspend |-> _ &*&
|
||||
result->locked |-> _;@*/
|
||||
{
|
||||
Queue_t * pxNewQueue;
|
||||
size_t xQueueSizeInBytes;
|
||||
uint8_t * pucQueueStorage;
|
||||
|
||||
/* Allocate enough space to hold the maximum number of items that
|
||||
can be in the queue at any time. It is valid for uxItemSize to be
|
||||
zero in the case the queue is used as a semaphore. */
|
||||
xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
|
||||
configASSERT( uxQueueLength > ( UBaseType_t ) 0 );
|
||||
|
||||
/* Check for multiplication overflow. */
|
||||
configASSERT( ( uxItemSize == 0 ) || ( uxQueueLength == ( xQueueSizeInBytes / uxItemSize ) ) );
|
||||
/* Allocate enough space to hold the maximum number of items that
|
||||
* can be in the queue at any time. It is valid for uxItemSize to be
|
||||
* zero in the case the queue is used as a semaphore. */
|
||||
xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
|
||||
|
||||
/* Check for multiplication overflow. */
|
||||
configASSERT( ( uxItemSize == 0 ) || ( uxQueueLength == ( xQueueSizeInBytes / uxItemSize ) ) );
|
||||
|
||||
#ifdef VERIFAST /*< ***model single malloc of struct and buffer*** */
|
||||
pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) );
|
||||
pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) );
|
||||
#else
|
||||
/* Allocate the queue and storage area. Justification for MISRA
|
||||
deviation as follows: pvPortMalloc() always ensures returned memory
|
||||
blocks are aligned per the requirements of the MCU stack. In this case
|
||||
pvPortMalloc() must return a pointer that is guaranteed to meet the
|
||||
alignment requirements of the Queue_t structure - which in this case
|
||||
is an int8_t *. Therefore, whenever the stack alignment requirements
|
||||
are greater than or equal to the pointer to char requirements the cast
|
||||
is safe. In other cases alignment requirements are not strict (one or
|
||||
two bytes). */
|
||||
pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes ); /*lint !e9087 !e9079 see comment above. */
|
||||
/* Allocate the queue and storage area. Justification for MISRA
|
||||
* deviation as follows: pvPortMalloc() always ensures returned memory
|
||||
* blocks are aligned per the requirements of the MCU stack. In this case
|
||||
* pvPortMalloc() must return a pointer that is guaranteed to meet the
|
||||
* alignment requirements of the Queue_t structure - which in this case
|
||||
* is an int8_t *. Therefore, whenever the stack alignment requirements
|
||||
* are greater than or equal to the pointer to char requirements the cast
|
||||
* is safe. In other cases alignment requirements are not strict (one or
|
||||
* two bytes). */
|
||||
pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes ); /*lint !e9087 !e9079 see comment above. */
|
||||
#endif
|
||||
|
||||
if( pxNewQueue != NULL )
|
||||
{
|
||||
if( pxNewQueue != NULL )
|
||||
{
|
||||
#ifdef VERIFAST /*< ***model single malloc of struct and buffer*** */
|
||||
pucQueueStorage = ( uint8_t * ) pvPortMalloc( xQueueSizeInBytes );
|
||||
if ( pucQueueStorage == NULL ) {
|
||||
vPortFree( pxNewQueue );
|
||||
return NULL;
|
||||
}
|
||||
/*@malloc_block_limits(pucQueueStorage);@*/
|
||||
pucQueueStorage = ( uint8_t * ) pvPortMalloc( xQueueSizeInBytes );
|
||||
|
||||
if( pucQueueStorage == NULL )
|
||||
{
|
||||
vPortFree( pxNewQueue );
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*@malloc_block_limits(pucQueueStorage);@*/
|
||||
#else
|
||||
/* Jump past the queue structure to find the location of the queue
|
||||
storage area. */
|
||||
pucQueueStorage = ( uint8_t * ) pxNewQueue;
|
||||
pucQueueStorage += sizeof( Queue_t ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
|
||||
/* Jump past the queue structure to find the location of the queue
|
||||
* storage area. */
|
||||
pucQueueStorage = ( uint8_t * ) pxNewQueue;
|
||||
pucQueueStorage += sizeof( Queue_t ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
|
||||
#endif
|
||||
|
||||
#if( configSUPPORT_STATIC_ALLOCATION == 1 )
|
||||
{
|
||||
/* Queues can be created either statically or dynamically, so
|
||||
note this task was created dynamically in case it is later
|
||||
deleted. */
|
||||
pxNewQueue->ucStaticallyAllocated = pdFALSE;
|
||||
}
|
||||
#endif /* configSUPPORT_STATIC_ALLOCATION */
|
||||
#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
|
||||
{
|
||||
/* Queues can be created either statically or dynamically, so
|
||||
* note this task was created dynamically in case it is later
|
||||
* deleted. */
|
||||
pxNewQueue->ucStaticallyAllocated = pdFALSE;
|
||||
}
|
||||
#endif /* configSUPPORT_STATIC_ALLOCATION */
|
||||
|
||||
prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );
|
||||
}
|
||||
else
|
||||
{
|
||||
traceQUEUE_CREATE_FAILED( ucQueueType );
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
|
||||
return pxNewQueue;
|
||||
}
|
||||
prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );
|
||||
}
|
||||
else
|
||||
{
|
||||
traceQUEUE_CREATE_FAILED( ucQueueType );
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
|
||||
return pxNewQueue;
|
||||
}
|
||||
|
@ -22,64 +22,68 @@
|
||||
|
||||
#include "proof/queue.h"
|
||||
|
||||
static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer )
|
||||
static void prvCopyDataFromQueue( Queue_t * const pxQueue,
|
||||
void * const pvBuffer )
|
||||
/*@requires queue(pxQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*& 0 < K &*& chars(pvBuffer, M, _);@*/
|
||||
/*@ensures queue_after_prvCopyDataFromQueue(pxQueue, Storage, N, M, W, (R+1)%N, K, is_locked, abs) &*&
|
||||
chars(pvBuffer, M, head(abs));@*/
|
||||
chars(pvBuffer, M, head(abs));@*/
|
||||
{
|
||||
if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )
|
||||
{
|
||||
/*@assert buffer(Storage, N, M, ?contents);@*/
|
||||
/*@mul_mono_l(R, N-1, M);@*/
|
||||
pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types ok, especially in this use case where it is the clearest way of conveying intent. */
|
||||
if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */
|
||||
{
|
||||
/*@div_leq(N, R+1, M);@*/ // now we know R == N-1
|
||||
pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
|
||||
}
|
||||
else
|
||||
{
|
||||
/*@{
|
||||
div_lt(R+1, N, M); // now we know R+1 < N
|
||||
mod_lt(R+1, N); // so, R+1 == (R+1)%N
|
||||
note(pxQueue->u.xQueue.pcReadFrom == Storage + ((R + 1) * M));
|
||||
note( Storage + ((R + 1) * M) == Storage + (((R + 1) % N) * M));
|
||||
}@*/
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
/*@mod_plus(R+1, K, N);@*/
|
||||
/*@mod_mod(R+1, N);@*/
|
||||
/*@split_element(Storage, N, M, (R+1)%N);@*/
|
||||
/*@assert
|
||||
buffer(Storage, (R+1)%N, M, ?prefix) &*&
|
||||
chars(Storage + ((R+1)%N) * M, M, ?element) &*&
|
||||
buffer(Storage + ((R+1)%N + 1) * M, (N-1-(R+1)%N), M, ?suffix);@*/
|
||||
if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )
|
||||
{
|
||||
/*@assert buffer(Storage, N, M, ?contents);@*/
|
||||
/*@mul_mono_l(R, N-1, M);@*/
|
||||
pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types ok, especially in this use case where it is the clearest way of conveying intent. */
|
||||
|
||||
if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */
|
||||
{
|
||||
/*@div_leq(N, R+1, M);@*/ /* now we know R == N-1 */
|
||||
pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
|
||||
}
|
||||
else
|
||||
{
|
||||
/*@{
|
||||
div_lt(R+1, N, M); // now we know R+1 < N
|
||||
mod_lt(R+1, N); // so, R+1 == (R+1)%N
|
||||
note(pxQueue->u.xQueue.pcReadFrom == Storage + ((R + 1) * M));
|
||||
note( Storage + ((R + 1) * M) == Storage + (((R + 1) % N) * M));
|
||||
}@*/
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
|
||||
/*@mod_plus(R+1, K, N);@*/
|
||||
/*@mod_mod(R+1, N);@*/
|
||||
/*@split_element(Storage, N, M, (R+1)%N);@*/
|
||||
/*@assert
|
||||
buffer(Storage, (R+1)%N, M, ?prefix) &*&
|
||||
chars(Storage + ((R+1)%N) * M, M, ?element) &*&
|
||||
buffer(Storage + ((R+1)%N + 1) * M, (N-1-(R+1)%N), M, ?suffix);@*/
|
||||
#ifdef VERIFAST /*< void cast of unused return value */
|
||||
memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize );
|
||||
memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize );
|
||||
#else
|
||||
( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */
|
||||
( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */
|
||||
#endif
|
||||
/*@{
|
||||
combine_list_no_change(prefix, element, suffix, (R+1)%N, contents);
|
||||
join_element(Storage, N, M, (R+1)%N);
|
||||
length_take(K, contents);
|
||||
take_length_eq(K, rotate_left((R+1)%N, contents), abs);
|
||||
deq_value_lemma(K, (R+1)%N, contents, abs);
|
||||
}@*/
|
||||
}
|
||||
/*@{
|
||||
combine_list_no_change(prefix, element, suffix, (R+1)%N, contents);
|
||||
join_element(Storage, N, M, (R+1)%N);
|
||||
length_take(K, contents);
|
||||
take_length_eq(K, rotate_left((R+1)%N, contents), abs);
|
||||
deq_value_lemma(K, (R+1)%N, contents, abs);
|
||||
}@*/
|
||||
}
|
||||
}
|
||||
|
||||
void caller_reinstates_queue_predicate(Queue_t *const pxQueue, void *const pvBuffer)
|
||||
void caller_reinstates_queue_predicate( Queue_t * const pxQueue,
|
||||
void * const pvBuffer )
|
||||
/*@requires queue(pxQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
|
||||
0 < K &*&
|
||||
chars(pvBuffer, M, _);@*/
|
||||
0 < K &*&
|
||||
chars(pvBuffer, M, _);@*/
|
||||
/*@ensures
|
||||
queue(pxQueue, Storage, N, M, W, (R+1)%N, K-1, is_locked, tail(abs)) &*&
|
||||
chars(pvBuffer, M, head(abs));@*/
|
||||
queue(pxQueue, Storage, N, M, W, (R+1)%N, K-1, is_locked, tail(abs)) &*&
|
||||
chars(pvBuffer, M, head(abs));@*/
|
||||
{
|
||||
prvCopyDataFromQueue(pxQueue, pvBuffer);
|
||||
/*@open queue_after_prvCopyDataFromQueue(pxQueue, Storage, N, M, W, (R+1)%N, K, is_locked, abs);@*/
|
||||
/*@assert buffer(Storage, N, M, ?contents);@*/
|
||||
pxQueue->uxMessagesWaiting = pxQueue->uxMessagesWaiting - 1;
|
||||
/*@deq_lemma(K, (R+1)%N, contents, abs, head(abs));@*/
|
||||
prvCopyDataFromQueue( pxQueue, pvBuffer );
|
||||
/*@open queue_after_prvCopyDataFromQueue(pxQueue, Storage, N, M, W, (R+1)%N, K, is_locked, abs);@*/
|
||||
/*@assert buffer(Storage, N, M, ?contents);@*/
|
||||
pxQueue->uxMessagesWaiting = pxQueue->uxMessagesWaiting - 1;
|
||||
/*@deq_lemma(K, (R+1)%N, contents, abs, head(abs));@*/
|
||||
}
|
||||
|
@ -22,158 +22,165 @@
|
||||
|
||||
#include "proof/queue.h"
|
||||
|
||||
static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )
|
||||
static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue,
|
||||
const void * pvItemToQueue,
|
||||
const BaseType_t xPosition )
|
||||
/*@requires queue(pxQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
|
||||
(K < N || xPosition == queueOVERWRITE) &*&
|
||||
chars(pvItemToQueue, M, ?x) &*&
|
||||
(xPosition == queueSEND_TO_BACK || xPosition == queueSEND_TO_FRONT || (xPosition == queueOVERWRITE && N == 1));@*/
|
||||
(K < N || xPosition == queueOVERWRITE) &*&
|
||||
chars(pvItemToQueue, M, ?x) &*&
|
||||
(xPosition == queueSEND_TO_BACK || xPosition == queueSEND_TO_FRONT || (xPosition == queueOVERWRITE && N == 1));@*/
|
||||
/*@ensures
|
||||
(xPosition == queueSEND_TO_BACK
|
||||
? queue(pxQueue, Storage, N, M, (W+1)%N, R, (K+1), is_locked, append(abs, singleton(x)))
|
||||
: (xPosition == queueSEND_TO_FRONT
|
||||
? (R == 0
|
||||
? queue(pxQueue, Storage, N, M, W, (N-1), (K+1), is_locked, cons(x, abs))
|
||||
: queue(pxQueue, Storage, N, M, W, (R-1), (K+1), is_locked, cons(x, abs)))
|
||||
: xPosition == queueOVERWRITE &*& queue(pxQueue, Storage, N, M, W, R, 1, is_locked, singleton(x)))
|
||||
) &*&
|
||||
chars(pvItemToQueue, M, x);@*/
|
||||
(xPosition == queueSEND_TO_BACK
|
||||
? queue(pxQueue, Storage, N, M, (W+1)%N, R, (K+1), is_locked, append(abs, singleton(x)))
|
||||
: (xPosition == queueSEND_TO_FRONT
|
||||
? (R == 0
|
||||
? queue(pxQueue, Storage, N, M, W, (N-1), (K+1), is_locked, cons(x, abs))
|
||||
: queue(pxQueue, Storage, N, M, W, (R-1), (K+1), is_locked, cons(x, abs)))
|
||||
: xPosition == queueOVERWRITE &*& queue(pxQueue, Storage, N, M, W, R, 1, is_locked, singleton(x)))
|
||||
) &*&
|
||||
chars(pvItemToQueue, M, x);@*/
|
||||
{
|
||||
BaseType_t xReturn = pdFALSE;
|
||||
UBaseType_t uxMessagesWaiting;
|
||||
BaseType_t xReturn = pdFALSE;
|
||||
UBaseType_t uxMessagesWaiting;
|
||||
|
||||
/* This function is called from a critical section. */
|
||||
/* This function is called from a critical section. */
|
||||
|
||||
uxMessagesWaiting = pxQueue->uxMessagesWaiting;
|
||||
uxMessagesWaiting = pxQueue->uxMessagesWaiting;
|
||||
|
||||
/* The abstract list of list of chars of `Storage` is `contents` */
|
||||
/*@assert buffer(Storage, N, M, ?contents);@*/
|
||||
if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )
|
||||
{
|
||||
/* This case is unreachable for queues */
|
||||
/*@assert false;@*/
|
||||
#if ( configUSE_MUTEXES == 1 )
|
||||
{
|
||||
if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
|
||||
{
|
||||
/* The mutex is no longer being held. */
|
||||
xReturn = xTaskPriorityDisinherit( pxQueue->u.xSemaphore.xMutexHolder );
|
||||
pxQueue->u.xSemaphore.xMutexHolder = NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
#endif /* configUSE_MUTEXES */
|
||||
}
|
||||
else if( xPosition == queueSEND_TO_BACK )
|
||||
{
|
||||
/* The abstract list of list of chars of `Storage` is `contents` */
|
||||
/*@assert buffer(Storage, N, M, ?contents);@*/
|
||||
if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )
|
||||
{
|
||||
/* This case is unreachable for queues */
|
||||
/*@assert false;@*/
|
||||
#if ( configUSE_MUTEXES == 1 )
|
||||
{
|
||||
if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
|
||||
{
|
||||
/* The mutex is no longer being held. */
|
||||
xReturn = xTaskPriorityDisinherit( pxQueue->u.xSemaphore.xMutexHolder );
|
||||
pxQueue->u.xSemaphore.xMutexHolder = NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
#endif /* configUSE_MUTEXES */
|
||||
}
|
||||
else if( xPosition == queueSEND_TO_BACK )
|
||||
{
|
||||
#ifdef VERIFAST /*< void cast of unused return value */
|
||||
/* Now we focus the proof on the logical element of the buffer that
|
||||
will be updated using the following lemma to split the buffer into 3
|
||||
parts: a prefix, the element we want to update, and the suffix. This
|
||||
enables the subsequent memcpy to verify. */
|
||||
/*@split_element(Storage, N, M, W);@*/
|
||||
/*@assert
|
||||
buffer(Storage, W, M, ?prefix) &*&
|
||||
chars(Storage + W * M, M, _) &*&
|
||||
buffer(Storage + (W + 1) * M, (N-1-W), M, ?suffix);@*/
|
||||
memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize );
|
||||
/* After the update we stitch the buffer back together */
|
||||
/*@join_element(Storage, N, M, W);@*/
|
||||
/*@combine_list_update(prefix, x, suffix, W, contents);@*/
|
||||
/* Now we focus the proof on the logical element of the buffer that
|
||||
* will be updated using the following lemma to split the buffer into 3
|
||||
* parts: a prefix, the element we want to update, and the suffix. This
|
||||
* enables the subsequent memcpy to verify. */
|
||||
/*@split_element(Storage, N, M, W);@*/
|
||||
/*@assert
|
||||
buffer(Storage, W, M, ?prefix) &*&
|
||||
chars(Storage + W * M, M, _) &*&
|
||||
buffer(Storage + (W + 1) * M, (N-1-W), M, ?suffix);@*/
|
||||
memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize );
|
||||
/* After the update we stitch the buffer back together */
|
||||
/*@join_element(Storage, N, M, W);@*/
|
||||
/*@combine_list_update(prefix, x, suffix, W, contents);@*/
|
||||
#else
|
||||
( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */
|
||||
( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */
|
||||
#endif
|
||||
pxQueue->pcWriteTo += pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types ok, especially in this use case where it is the clearest way of conveying intent. */
|
||||
if( pxQueue->pcWriteTo >= pxQueue->u.xQueue.pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
|
||||
{
|
||||
/*@div_leq(N, W+1, M);@*/ // now we know W == N-1
|
||||
pxQueue->pcWriteTo = pxQueue->pcHead;
|
||||
}
|
||||
else
|
||||
{
|
||||
/*@{
|
||||
div_lt(W+1, N, M); // now we know W+1 < N
|
||||
mod_lt(W+1, N); // so, W+1 == (W+1)%N
|
||||
note(pxQueue->pcWriteTo == Storage + ((W + 1) * M));
|
||||
note( Storage + ((W + 1) * M) == Storage + (((W + 1) % N) * M));
|
||||
}@*/
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/*@mul_mono_l(W, N-1, M);@*/
|
||||
pxQueue->pcWriteTo += pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types ok, especially in this use case where it is the clearest way of conveying intent. */
|
||||
|
||||
if( pxQueue->pcWriteTo >= pxQueue->u.xQueue.pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
|
||||
{
|
||||
/*@div_leq(N, W+1, M);@*/ /* now we know W == N-1 so (W+1)%N == 0 */
|
||||
pxQueue->pcWriteTo = pxQueue->pcHead;
|
||||
}
|
||||
else
|
||||
{
|
||||
/*@{
|
||||
div_lt(W+1, N, M); // now we know W+1 < N
|
||||
mod_lt(W+1, N); // so, W+1 == (W+1)%N
|
||||
note(pxQueue->pcWriteTo == Storage + ((W + 1) * M));
|
||||
note( Storage + ((W + 1) * M) == Storage + (((W + 1) % N) * M));
|
||||
}@*/
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
#ifdef VERIFAST /*< void cast of unused return value */
|
||||
/*@split_element(Storage, N, M, R);@*/
|
||||
/*@assert
|
||||
buffer(Storage, R, M, ?prefix) &*&
|
||||
chars(Storage + R * M, M, _) &*&
|
||||
buffer(Storage + (R + 1) * M, (N-1-R), M, ?suffix);@*/
|
||||
memcpy( ( void * ) pxQueue->u.xQueue.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize );
|
||||
/*@join_element(Storage, N, M, R);@*/
|
||||
/*@combine_list_update(prefix, x, suffix, R, contents);@*/
|
||||
/*@split_element(Storage, N, M, R);@*/
|
||||
/*@assert
|
||||
buffer(Storage, R, M, ?prefix) &*&
|
||||
chars(Storage + R * M, M, _) &*&
|
||||
buffer(Storage + (R + 1) * M, (N-1-R), M, ?suffix);@*/
|
||||
memcpy( ( void * ) pxQueue->u.xQueue.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize );
|
||||
/*@join_element(Storage, N, M, R);@*/
|
||||
/*@combine_list_update(prefix, x, suffix, R, contents);@*/
|
||||
#else
|
||||
( void ) memcpy( ( void * ) pxQueue->u.xQueue.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e9087 !e418 MISRA exception as the casts are only redundant for some ports. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. Assert checks null pointer only used when length is 0. */
|
||||
( void ) memcpy( ( void * ) pxQueue->u.xQueue.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e9087 !e418 MISRA exception as the casts are only redundant for some ports. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. Assert checks null pointer only used when length is 0. */
|
||||
#endif
|
||||
pxQueue->u.xQueue.pcReadFrom -= pxQueue->uxItemSize;
|
||||
if( pxQueue->u.xQueue.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
|
||||
{
|
||||
pxQueue->u.xQueue.pcReadFrom = ( pxQueue->u.xQueue.pcTail - pxQueue->uxItemSize );
|
||||
/*@{
|
||||
div_leq(R-1, 0, M);
|
||||
leq_bound(R, 0);
|
||||
}@*/
|
||||
/*@assert R == 0;@*/
|
||||
/*@assert pxQueue->u.xQueue.pcReadFrom == Storage + (N-1) * M;@*/
|
||||
}
|
||||
else
|
||||
{
|
||||
/*@assert 0 < R;@*/
|
||||
/*@assert pxQueue->u.xQueue.pcReadFrom == Storage + (R-1) * M;@*/
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
pxQueue->u.xQueue.pcReadFrom -= pxQueue->uxItemSize;
|
||||
|
||||
/*@
|
||||
if (R == 0) {
|
||||
mod_plus(N, (K+1), N); mod_same(N); mod_mod(K+1, N);
|
||||
assert W == ((N-1) + 1 + (K+1)) % N;
|
||||
}
|
||||
@*/
|
||||
if( xPosition == queueOVERWRITE )
|
||||
{
|
||||
if( uxMessagesWaiting > ( UBaseType_t ) 0 )
|
||||
{
|
||||
/* An item is not being added but overwritten, so subtract
|
||||
one from the recorded number of items in the queue so when
|
||||
one is added again below the number of recorded items remains
|
||||
correct. */
|
||||
--uxMessagesWaiting;
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
if( pxQueue->u.xQueue.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
|
||||
{
|
||||
pxQueue->u.xQueue.pcReadFrom = ( pxQueue->u.xQueue.pcTail - pxQueue->uxItemSize );
|
||||
/*@{ div_leq(R-1, 0, M); leq_bound(R, 0); }@*/
|
||||
/*@assert R == 0;@*/
|
||||
/*@assert pxQueue->u.xQueue.pcReadFrom == Storage + (N-1) * M;@*/
|
||||
}
|
||||
else
|
||||
{
|
||||
/*@assert 0 < R;@*/
|
||||
/*@assert pxQueue->u.xQueue.pcReadFrom == Storage + (R-1) * M;@*/
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
|
||||
pxQueue->uxMessagesWaiting = uxMessagesWaiting + ( UBaseType_t ) 1;
|
||||
/*@
|
||||
if (R == 0)
|
||||
{
|
||||
mod_plus(N, (K+1), N); mod_same(N); mod_mod(K+1, N);
|
||||
assert W == ((N-1) + 1 + (K+1)) % N;
|
||||
}
|
||||
@*/
|
||||
if( xPosition == queueOVERWRITE )
|
||||
{
|
||||
if( uxMessagesWaiting > ( UBaseType_t ) 0 )
|
||||
{
|
||||
/* An item is not being added but overwritten, so subtract
|
||||
* one from the recorded number of items in the queue so when
|
||||
* one is added again below the number of recorded items remains
|
||||
* correct. */
|
||||
--uxMessagesWaiting;
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
|
||||
/*@
|
||||
if (xPosition == queueSEND_TO_BACK) {
|
||||
enq_lemma(K, (R+1)%N, contents, abs, x);
|
||||
mod_plus_one(W, R + 1 + K, N);
|
||||
mod_plus_distr(R+1, K, N);
|
||||
} else if (xPosition == queueSEND_TO_FRONT) {
|
||||
front_enq_lemma(K, R, contents, abs, x);
|
||||
if (0 < R) {
|
||||
mod_lt(R, N);
|
||||
}
|
||||
}
|
||||
@*/
|
||||
return xReturn;
|
||||
pxQueue->uxMessagesWaiting = uxMessagesWaiting + ( UBaseType_t ) 1;
|
||||
|
||||
/*@
|
||||
if (xPosition == queueSEND_TO_BACK)
|
||||
{
|
||||
enq_lemma(K, (R+1)%N, contents, abs, x);
|
||||
mod_plus_one(W, R + 1 + K, N);
|
||||
mod_plus_distr(R+1, K, N);
|
||||
}
|
||||
else if (xPosition == queueSEND_TO_FRONT)
|
||||
{
|
||||
front_enq_lemma(K, R, contents, abs, x);
|
||||
if (0 < R)
|
||||
{
|
||||
mod_lt(R, N);
|
||||
}
|
||||
}
|
||||
@*/
|
||||
return xReturn;
|
||||
}
|
||||
|
@ -21,29 +21,29 @@
|
||||
*/
|
||||
|
||||
#include "proof/queue.h"
|
||||
#define taskENTER_CRITICAL() setInterruptMask(pxQueue)
|
||||
#define taskEXIT_CRITICAL() clearInterruptMask(pxQueue)
|
||||
#define taskENTER_CRITICAL() setInterruptMask( pxQueue )
|
||||
#define taskEXIT_CRITICAL() clearInterruptMask( pxQueue )
|
||||
|
||||
static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue )
|
||||
static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue )
|
||||
/*@requires [1/2]queuehandle(pxQueue, ?N, ?M, ?is_isr) &*& is_isr == false;@*/
|
||||
/*@ensures [1/2]queuehandle(pxQueue, N, M, is_isr);@*/
|
||||
{
|
||||
BaseType_t xReturn;
|
||||
BaseType_t xReturn;
|
||||
|
||||
taskENTER_CRITICAL();
|
||||
/*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
|
||||
{
|
||||
if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
|
||||
{
|
||||
xReturn = pdTRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
xReturn = pdFALSE;
|
||||
}
|
||||
}
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
taskEXIT_CRITICAL();
|
||||
taskENTER_CRITICAL();
|
||||
/*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
|
||||
{
|
||||
if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
|
||||
{
|
||||
xReturn = pdTRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
xReturn = pdFALSE;
|
||||
}
|
||||
}
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
taskEXIT_CRITICAL();
|
||||
|
||||
return xReturn;
|
||||
return xReturn;
|
||||
}
|
||||
|
@ -21,30 +21,29 @@
|
||||
*/
|
||||
|
||||
#include "proof/queue.h"
|
||||
#define taskENTER_CRITICAL() setInterruptMask(pxQueue)
|
||||
#define taskEXIT_CRITICAL() clearInterruptMask(pxQueue)
|
||||
#define taskENTER_CRITICAL() setInterruptMask( pxQueue )
|
||||
#define taskEXIT_CRITICAL() clearInterruptMask( pxQueue )
|
||||
|
||||
static BaseType_t prvIsQueueFull( const Queue_t *pxQueue )
|
||||
static BaseType_t prvIsQueueFull( const Queue_t * pxQueue )
|
||||
/*@requires [1/2]queuehandle(pxQueue, ?N, ?M, ?is_isr) &*& is_isr == false;@*/
|
||||
/*@ensures [1/2]queuehandle(pxQueue, N, M, is_isr);@*/
|
||||
{
|
||||
BaseType_t xReturn;
|
||||
BaseType_t xReturn;
|
||||
|
||||
taskENTER_CRITICAL();
|
||||
/*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
|
||||
{
|
||||
if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
|
||||
{
|
||||
xReturn = pdTRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
xReturn = pdFALSE;
|
||||
}
|
||||
}
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
taskEXIT_CRITICAL();
|
||||
taskENTER_CRITICAL();
|
||||
/*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
|
||||
{
|
||||
if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
|
||||
{
|
||||
xReturn = pdTRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
xReturn = pdFALSE;
|
||||
}
|
||||
}
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
taskEXIT_CRITICAL();
|
||||
|
||||
return xReturn;
|
||||
return xReturn;
|
||||
}
|
||||
|
||||
|
@ -23,45 +23,47 @@
|
||||
#include "proof/queue.h"
|
||||
|
||||
/* In this case we cannot wrap the macro in a function call to give a function
|
||||
contract because we require annotations within the macro body, which is not
|
||||
supported by VeriFast */
|
||||
#define prvLockQueue( pxQueue ) \
|
||||
taskENTER_CRITICAL(); \
|
||||
{ \
|
||||
if( ( pxQueue )->cRxLock == queueUNLOCKED ) \
|
||||
{ \
|
||||
( pxQueue )->cRxLock = queueLOCKED_UNMODIFIED; \
|
||||
} \
|
||||
if( ( pxQueue )->cTxLock == queueUNLOCKED ) \
|
||||
{ \
|
||||
( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED; \
|
||||
} \
|
||||
} \
|
||||
taskEXIT_CRITICAL()
|
||||
* contract because we require annotations within the macro body, which is not
|
||||
* supported by VeriFast */
|
||||
#define prvLockQueue( pxQueue ) \
|
||||
taskENTER_CRITICAL(); \
|
||||
{ \
|
||||
if( ( pxQueue )->cRxLock == queueUNLOCKED ) \
|
||||
{ \
|
||||
( pxQueue )->cRxLock = queueLOCKED_UNMODIFIED; \
|
||||
} \
|
||||
if( ( pxQueue )->cTxLock == queueUNLOCKED ) \
|
||||
{ \
|
||||
( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED; \
|
||||
} \
|
||||
} \
|
||||
taskEXIT_CRITICAL()
|
||||
|
||||
void wrapper_prvLockQueue( QueueHandle_t xQueue )
|
||||
|
||||
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*&
|
||||
[1/2]queuelock(xQueue);@*/
|
||||
[1/2]queuelock(xQueue);@*/
|
||||
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
|
||||
[1/2]xQueue->locked |-> ?m &*&
|
||||
mutex_held(m, queue_locked_invariant(xQueue), currentThread, 1/2) &*&
|
||||
queue_locked_invariant(xQueue)();@*/
|
||||
[1/2]xQueue->locked |-> ?m &*&
|
||||
mutex_held(m, queue_locked_invariant(xQueue), currentThread, 1/2) &*&
|
||||
queue_locked_invariant(xQueue)();@*/
|
||||
{
|
||||
taskENTER_CRITICAL();
|
||||
/*@open queue(xQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
|
||||
{
|
||||
if( ( xQueue )->cRxLock == queueUNLOCKED )
|
||||
{
|
||||
( xQueue )->cRxLock = queueLOCKED_UNMODIFIED;
|
||||
}
|
||||
if( ( xQueue )->cTxLock == queueUNLOCKED )
|
||||
{
|
||||
( xQueue )->cTxLock = queueLOCKED_UNMODIFIED;
|
||||
}
|
||||
}
|
||||
/*@close queue(xQueue, Storage, N, M, W, R, K, true, abs);@*/
|
||||
taskEXIT_CRITICAL();
|
||||
taskENTER_CRITICAL();
|
||||
/*@open queue(xQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
|
||||
{
|
||||
if( ( xQueue )->cRxLock == queueUNLOCKED )
|
||||
{
|
||||
( xQueue )->cRxLock = queueLOCKED_UNMODIFIED;
|
||||
}
|
||||
|
||||
if( ( xQueue )->cTxLock == queueUNLOCKED )
|
||||
{
|
||||
( xQueue )->cTxLock = queueLOCKED_UNMODIFIED;
|
||||
}
|
||||
}
|
||||
/*@close queue(xQueue, Storage, N, M, W, R, K, true, abs);@*/
|
||||
taskEXIT_CRITICAL();
|
||||
#ifdef VERIFAST /*< ghost action */
|
||||
mutex_acquire(xQueue->locked);
|
||||
mutex_acquire( xQueue->locked );
|
||||
#endif
|
||||
}
|
||||
|
@ -21,142 +21,142 @@
|
||||
*/
|
||||
|
||||
#include "proof/queue.h"
|
||||
#define taskENTER_CRITICAL() setInterruptMask(pxQueue)
|
||||
#define taskEXIT_CRITICAL() clearInterruptMask(pxQueue)
|
||||
#define taskENTER_CRITICAL() setInterruptMask( pxQueue )
|
||||
#define taskEXIT_CRITICAL() clearInterruptMask( pxQueue )
|
||||
|
||||
/* VeriFast: we make one major change. We merge the critical regions for
|
||||
decrementing `cTxLock` and `cRxLock`. */
|
||||
* decrementing `cTxLock` and `cRxLock`. */
|
||||
|
||||
static void prvUnlockQueue( Queue_t * const pxQueue )
|
||||
/*@requires [1/2]queuehandle(pxQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*&
|
||||
[1/2]pxQueue->locked |-> ?m &*&
|
||||
mutex_held(m, queue_locked_invariant(pxQueue), currentThread, 1/2) &*&
|
||||
queue_locked_invariant(pxQueue)();@*/
|
||||
[1/2]pxQueue->locked |-> ?m &*&
|
||||
mutex_held(m, queue_locked_invariant(pxQueue), currentThread, 1/2) &*&
|
||||
queue_locked_invariant(pxQueue)();@*/
|
||||
/*@ensures [1/2]queuehandle(pxQueue, N, M, is_isr) &*&
|
||||
[1/2]queuelock(pxQueue);@*/
|
||||
[1/2]queuelock(pxQueue);@*/
|
||||
{
|
||||
/* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
|
||||
/* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
|
||||
|
||||
/* The lock counts contains the number of extra data items placed or
|
||||
removed from the queue while the queue was locked. When a queue is
|
||||
locked items can be added or removed, but the event lists cannot be
|
||||
updated. */
|
||||
taskENTER_CRITICAL();
|
||||
/*@open queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, _, ?abs);@*/
|
||||
{
|
||||
int8_t cTxLock = pxQueue->cTxLock;
|
||||
/* The lock counts contains the number of extra data items placed or
|
||||
* removed from the queue while the queue was locked. When a queue is
|
||||
* locked items can be added or removed, but the event lists cannot be
|
||||
* updated. */
|
||||
taskENTER_CRITICAL();
|
||||
/*@open queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, _, ?abs);@*/
|
||||
{
|
||||
int8_t cTxLock = pxQueue->cTxLock;
|
||||
|
||||
/* See if data was added to the queue while it was locked. */
|
||||
while( cTxLock > queueLOCKED_UNMODIFIED )
|
||||
/*@invariant queuelists(pxQueue);@*/
|
||||
{
|
||||
/* Data was posted while the queue was locked. Are any tasks
|
||||
blocked waiting for data to become available? */
|
||||
#if ( configUSE_QUEUE_SETS == 1 )
|
||||
{
|
||||
if( pxQueue->pxQueueSetContainer != NULL )
|
||||
{
|
||||
if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
|
||||
{
|
||||
/* The queue is a member of a queue set, and posting to
|
||||
the queue set caused a higher priority task to unblock.
|
||||
A context switch is required. */
|
||||
vTaskMissedYield();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Tasks that are removed from the event list will get
|
||||
added to the pending ready list as the scheduler is still
|
||||
suspended. */
|
||||
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
|
||||
{
|
||||
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
|
||||
{
|
||||
/* The task waiting has a higher priority so record that a
|
||||
context switch is required. */
|
||||
vTaskMissedYield();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
#else /* configUSE_QUEUE_SETS */
|
||||
{
|
||||
/* Tasks that are removed from the event list will get added to
|
||||
the pending ready list as the scheduler is still suspended. */
|
||||
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
|
||||
{
|
||||
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
|
||||
{
|
||||
/* The task waiting has a higher priority so record that
|
||||
a context switch is required. */
|
||||
vTaskMissedYield();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif /* configUSE_QUEUE_SETS */
|
||||
/* See if data was added to the queue while it was locked. */
|
||||
while( cTxLock > queueLOCKED_UNMODIFIED )
|
||||
/*@invariant queuelists(pxQueue);@*/
|
||||
{
|
||||
/* Data was posted while the queue was locked. Are any tasks
|
||||
* blocked waiting for data to become available? */
|
||||
#if ( configUSE_QUEUE_SETS == 1 )
|
||||
{
|
||||
if( pxQueue->pxQueueSetContainer != NULL )
|
||||
{
|
||||
if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
|
||||
{
|
||||
/* The queue is a member of a queue set, and posting to
|
||||
* the queue set caused a higher priority task to unblock.
|
||||
* A context switch is required. */
|
||||
vTaskMissedYield();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Tasks that are removed from the event list will get
|
||||
* added to the pending ready list as the scheduler is still
|
||||
* suspended. */
|
||||
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
|
||||
{
|
||||
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
|
||||
{
|
||||
/* The task waiting has a higher priority so record that a
|
||||
* context switch is required. */
|
||||
vTaskMissedYield();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
#else /* configUSE_QUEUE_SETS */
|
||||
{
|
||||
/* Tasks that are removed from the event list will get added to
|
||||
* the pending ready list as the scheduler is still suspended. */
|
||||
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
|
||||
{
|
||||
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
|
||||
{
|
||||
/* The task waiting has a higher priority so record that
|
||||
* a context switch is required. */
|
||||
vTaskMissedYield();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif /* configUSE_QUEUE_SETS */
|
||||
|
||||
--cTxLock;
|
||||
}
|
||||
--cTxLock;
|
||||
}
|
||||
|
||||
pxQueue->cTxLock = queueUNLOCKED;
|
||||
}
|
||||
pxQueue->cTxLock = queueUNLOCKED;
|
||||
}
|
||||
#ifndef VERIFAST /*< ***merge cTxLock and cRxLock critical regions*** */
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL();
|
||||
|
||||
/* Do the same for the Rx lock. */
|
||||
taskENTER_CRITICAL();
|
||||
/* Do the same for the Rx lock. */
|
||||
taskENTER_CRITICAL();
|
||||
#endif
|
||||
{
|
||||
int8_t cRxLock = pxQueue->cRxLock;
|
||||
{
|
||||
int8_t cRxLock = pxQueue->cRxLock;
|
||||
|
||||
while( cRxLock > queueLOCKED_UNMODIFIED )
|
||||
/*@invariant queuelists(pxQueue);@*/
|
||||
{
|
||||
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
|
||||
{
|
||||
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
|
||||
{
|
||||
vTaskMissedYield();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
while( cRxLock > queueLOCKED_UNMODIFIED )
|
||||
/*@invariant queuelists(pxQueue);@*/
|
||||
{
|
||||
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
|
||||
{
|
||||
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
|
||||
{
|
||||
vTaskMissedYield();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
|
||||
--cRxLock;
|
||||
}
|
||||
else
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
--cRxLock;
|
||||
}
|
||||
else
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
pxQueue->cRxLock = queueUNLOCKED;
|
||||
}
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, false, abs);@*/
|
||||
taskEXIT_CRITICAL();
|
||||
pxQueue->cRxLock = queueUNLOCKED;
|
||||
}
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, false, abs);@*/
|
||||
taskEXIT_CRITICAL();
|
||||
#ifdef VERIFAST /*< ghost action */
|
||||
mutex_release(pxQueue->locked);
|
||||
mutex_release( pxQueue->locked );
|
||||
#endif
|
||||
}
|
||||
|
@ -23,46 +23,47 @@
|
||||
#include "proof/queue.h"
|
||||
|
||||
/* It may seem that the read of `pxQueue->uxMessagesWaiting` is required to be
|
||||
contained in a critical region to be thread-safe. However, it is impossible for
|
||||
this read to be involved in a data race due to the atomicity mechanism used by
|
||||
tasks and ISRs: masking and enabling interrupts. If we assume (1) a
|
||||
uniprocessor system and (2) that higher priority ISRs never call queue API
|
||||
functions then masking interrupts ensures *strong isolation* meaning critical
|
||||
regions protected by interrupt masking/enabling are isolated from other
|
||||
critical regions and code outside of critical regions. */
|
||||
* contained in a critical region to be thread-safe. However, it is impossible for
|
||||
* this read to be involved in a data race due to the atomicity mechanism used by
|
||||
* tasks and ISRs: masking and enabling interrupts. If we assume (1) a
|
||||
* uniprocessor system and (2) that higher priority ISRs never call queue API
|
||||
* functions then masking interrupts ensures *strong isolation* meaning critical
|
||||
* regions protected by interrupt masking/enabling are isolated from other
|
||||
* critical regions and code outside of critical regions. */
|
||||
|
||||
UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )
|
||||
/*@requires queue(xQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
|
||||
/*@ensures queue(xQueue, Storage, N, M, W, R, K, is_locked, abs) &*& result == K;@*/
|
||||
{
|
||||
UBaseType_t uxReturn;
|
||||
UBaseType_t uxReturn;
|
||||
|
||||
#ifdef VERIFAST /*< const pointer declaration */
|
||||
Queue_t * pxQueue = xQueue;
|
||||
Queue_t * pxQueue = xQueue;
|
||||
#else
|
||||
Queue_t * const pxQueue = xQueue;
|
||||
Queue_t * const pxQueue = xQueue;
|
||||
#endif
|
||||
|
||||
configASSERT( pxQueue );
|
||||
uxReturn = pxQueue->uxMessagesWaiting;
|
||||
configASSERT( pxQueue );
|
||||
uxReturn = pxQueue->uxMessagesWaiting;
|
||||
|
||||
return uxReturn;
|
||||
return uxReturn;
|
||||
}
|
||||
|
||||
UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
|
||||
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false;@*/
|
||||
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr);@*/
|
||||
{
|
||||
UBaseType_t uxReturn;
|
||||
UBaseType_t uxReturn;
|
||||
|
||||
configASSERT( xQueue );
|
||||
configASSERT( xQueue );
|
||||
|
||||
taskENTER_CRITICAL();
|
||||
{
|
||||
/*@assert queue(xQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
|
||||
uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
|
||||
/*@close queue(xQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskENTER_CRITICAL();
|
||||
{
|
||||
/*@assert queue(xQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
|
||||
uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
|
||||
/*@close queue(xQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
|
||||
return uxReturn;
|
||||
return uxReturn;
|
||||
} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
|
||||
|
@ -26,23 +26,24 @@ UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )
|
||||
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false;@*/
|
||||
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr);@*/
|
||||
{
|
||||
UBaseType_t uxReturn;
|
||||
UBaseType_t uxReturn;
|
||||
|
||||
#ifdef VERIFAST /*< const pointer declaration */
|
||||
Queue_t * pxQueue = xQueue;
|
||||
Queue_t * pxQueue = xQueue;
|
||||
#else
|
||||
Queue_t * const pxQueue = xQueue;
|
||||
Queue_t * const pxQueue = xQueue;
|
||||
#endif
|
||||
|
||||
configASSERT( pxQueue );
|
||||
configASSERT( pxQueue );
|
||||
|
||||
taskENTER_CRITICAL();
|
||||
/*@assert queue(xQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
|
||||
{
|
||||
uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;
|
||||
/*@assert uxReturn == N - K;@*/
|
||||
}
|
||||
/*@close queue(xQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
taskEXIT_CRITICAL();
|
||||
taskENTER_CRITICAL();
|
||||
/*@assert queue(xQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
|
||||
{
|
||||
uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;
|
||||
/*@assert uxReturn == N - K;@*/
|
||||
}
|
||||
/*@close queue(xQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
taskEXIT_CRITICAL();
|
||||
|
||||
return uxReturn;
|
||||
return uxReturn;
|
||||
} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
|
||||
|
@ -21,60 +21,60 @@
|
||||
*/
|
||||
|
||||
#include "proof/queue.h"
|
||||
#define configSUPPORT_DYNAMIC_ALLOCATION 1
|
||||
#define configSUPPORT_STATIC_ALLOCATION 0
|
||||
#define configSUPPORT_DYNAMIC_ALLOCATION 1
|
||||
#define configSUPPORT_STATIC_ALLOCATION 0
|
||||
|
||||
void vQueueDelete( QueueHandle_t xQueue )
|
||||
/*@requires queue(xQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
|
||||
queuelists(xQueue) &*&
|
||||
xQueue->irqMask |-> _ &*&
|
||||
xQueue->schedulerSuspend |-> _ &*&
|
||||
xQueue->locked |-> _;@*/
|
||||
queuelists(xQueue) &*&
|
||||
xQueue->irqMask |-> _ &*&
|
||||
xQueue->schedulerSuspend |-> _ &*&
|
||||
xQueue->locked |-> _;@*/
|
||||
/*@ensures true;@*/
|
||||
{
|
||||
#ifdef VERIFAST /*< const pointer declaration */
|
||||
Queue_t * pxQueue = xQueue;
|
||||
Queue_t * pxQueue = xQueue;
|
||||
#else
|
||||
Queue_t * const pxQueue = xQueue;
|
||||
Queue_t * const pxQueue = xQueue;
|
||||
#endif
|
||||
|
||||
configASSERT( pxQueue );
|
||||
traceQUEUE_DELETE( pxQueue );
|
||||
configASSERT( pxQueue );
|
||||
traceQUEUE_DELETE( pxQueue );
|
||||
|
||||
#if ( configQUEUE_REGISTRY_SIZE > 0 )
|
||||
{
|
||||
vQueueUnregisterQueue( pxQueue );
|
||||
}
|
||||
#endif
|
||||
#if ( configQUEUE_REGISTRY_SIZE > 0 )
|
||||
{
|
||||
vQueueUnregisterQueue( pxQueue );
|
||||
}
|
||||
#endif
|
||||
|
||||
#if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
|
||||
{
|
||||
/* The queue can only have been allocated dynamically - free it
|
||||
again. */
|
||||
vPortFree( pxQueue );
|
||||
#if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
|
||||
{
|
||||
/* The queue can only have been allocated dynamically - free it
|
||||
* again. */
|
||||
vPortFree( pxQueue );
|
||||
#ifdef VERIFAST /*< leak ghost state on deletion */
|
||||
/*@leak buffer(_, _, _, _);@*/
|
||||
/*@leak malloc_block(_, _);@*/
|
||||
/*@leak buffer(_, _, _, _);@*/
|
||||
/*@leak malloc_block(_, _);@*/
|
||||
#endif
|
||||
}
|
||||
#elif( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
|
||||
{
|
||||
/* The queue could have been allocated statically or dynamically, so
|
||||
check before attempting to free the memory. */
|
||||
if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdFALSE )
|
||||
{
|
||||
vPortFree( pxQueue );
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
#else
|
||||
{
|
||||
/* The queue must have been statically allocated, so is not going to be
|
||||
deleted. Avoid compiler warnings about the unused parameter. */
|
||||
( void ) pxQueue;
|
||||
}
|
||||
#endif /* configSUPPORT_DYNAMIC_ALLOCATION */
|
||||
}
|
||||
#elif ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
|
||||
{
|
||||
/* The queue could have been allocated statically or dynamically, so
|
||||
* check before attempting to free the memory. */
|
||||
if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdFALSE )
|
||||
{
|
||||
vPortFree( pxQueue );
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
#else /* if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) ) */
|
||||
{
|
||||
/* The queue must have been statically allocated, so is not going to be
|
||||
* deleted. Avoid compiler warnings about the unused parameter. */
|
||||
( void ) pxQueue;
|
||||
}
|
||||
#endif /* configSUPPORT_DYNAMIC_ALLOCATION */
|
||||
}
|
||||
|
@ -23,256 +23,263 @@
|
||||
#include "proof/queue.h"
|
||||
#include "proof/queuecontracts.h"
|
||||
|
||||
BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )
|
||||
BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
|
||||
const void * const pvItemToQueue,
|
||||
TickType_t xTicksToWait,
|
||||
const BaseType_t xCopyPosition )
|
||||
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*&
|
||||
[1/2]queuesuspend(xQueue) &*&
|
||||
chars(pvItemToQueue, M, ?x) &*&
|
||||
(xCopyPosition == queueSEND_TO_BACK || xCopyPosition == queueSEND_TO_FRONT || (xCopyPosition == queueOVERWRITE && N == 1));@*/
|
||||
[1/2]queuesuspend(xQueue) &*&
|
||||
chars(pvItemToQueue, M, ?x) &*&
|
||||
(xCopyPosition == queueSEND_TO_BACK || xCopyPosition == queueSEND_TO_FRONT || (xCopyPosition == queueOVERWRITE && N == 1));@*/
|
||||
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
|
||||
[1/2]queuesuspend(xQueue) &*&
|
||||
chars(pvItemToQueue, M, x);@*/
|
||||
[1/2]queuesuspend(xQueue) &*&
|
||||
chars(pvItemToQueue, M, x);@*/
|
||||
{
|
||||
BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
|
||||
TimeOut_t xTimeOut;
|
||||
BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
|
||||
TimeOut_t xTimeOut;
|
||||
#ifdef VERIFAST /*< const pointer declaration */
|
||||
Queue_t * pxQueue = xQueue;
|
||||
Queue_t * pxQueue = xQueue;
|
||||
#else
|
||||
Queue_t * const pxQueue = xQueue;
|
||||
|
||||
configASSERT( pxQueue );
|
||||
configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
|
||||
configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
|
||||
#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
|
||||
{
|
||||
configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
|
||||
}
|
||||
#endif
|
||||
Queue_t * const pxQueue = xQueue;
|
||||
|
||||
configASSERT( pxQueue );
|
||||
configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
|
||||
configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
|
||||
#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
|
||||
{
|
||||
configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*lint -save -e904 This function relaxes the coding standard somewhat to
|
||||
allow return statements within the function itself. This is done in the
|
||||
interest of execution time efficiency. */
|
||||
for( ;; )
|
||||
/*@invariant [1/2]queuehandle(xQueue, N, M, is_isr) &*&
|
||||
[1/2]queuesuspend(xQueue) &*&
|
||||
chars(pvItemToQueue, M, x) &*&
|
||||
u_integer(&xTicksToWait, _) &*&
|
||||
(xCopyPosition == queueSEND_TO_BACK || xCopyPosition == queueSEND_TO_FRONT || (xCopyPosition == queueOVERWRITE && N == 1)) &*&
|
||||
xTIME_OUT(&xTimeOut);@*/
|
||||
{
|
||||
taskENTER_CRITICAL();
|
||||
{
|
||||
/*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
|
||||
/* Is there room on the queue now? The running task must be the
|
||||
highest priority task wanting to access the queue. If the head item
|
||||
in the queue is to be overwritten then it does not matter if the
|
||||
queue is full. */
|
||||
if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
|
||||
{
|
||||
traceQUEUE_SEND( pxQueue );
|
||||
/*lint -save -e904 This function relaxes the coding standard somewhat to
|
||||
* allow return statements within the function itself. This is done in the
|
||||
* interest of execution time efficiency. */
|
||||
for( ; ; )
|
||||
/*@invariant [1/2]queuehandle(xQueue, N, M, is_isr) &*&
|
||||
[1/2]queuesuspend(xQueue) &*&
|
||||
chars(pvItemToQueue, M, x) &*&
|
||||
u_integer(&xTicksToWait, _) &*&
|
||||
(xCopyPosition == queueSEND_TO_BACK || xCopyPosition == queueSEND_TO_FRONT || (xCopyPosition == queueOVERWRITE && N == 1)) &*&
|
||||
xTIME_OUT(&xTimeOut);@*/
|
||||
{
|
||||
taskENTER_CRITICAL();
|
||||
{
|
||||
/*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
|
||||
/* Is there room on the queue now? The running task must be the
|
||||
* highest priority task wanting to access the queue. If the head item
|
||||
* in the queue is to be overwritten then it does not matter if the
|
||||
* queue is full. */
|
||||
if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
|
||||
{
|
||||
traceQUEUE_SEND( pxQueue );
|
||||
|
||||
/* VeriFast: we do not verify this configuration option */
|
||||
#if ( configUSE_QUEUE_SETS == 1 )
|
||||
{
|
||||
const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
|
||||
/* VeriFast: we do not verify this configuration option */
|
||||
#if ( configUSE_QUEUE_SETS == 1 )
|
||||
{
|
||||
const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
|
||||
|
||||
xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
|
||||
xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
|
||||
|
||||
if( pxQueue->pxQueueSetContainer != NULL )
|
||||
{
|
||||
if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) )
|
||||
{
|
||||
/* Do not notify the queue set as an existing item
|
||||
was overwritten in the queue so the number of items
|
||||
in the queue has not changed. */
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
|
||||
{
|
||||
/* The queue is a member of a queue set, and posting
|
||||
to the queue set caused a higher priority task to
|
||||
unblock. A context switch is required. */
|
||||
queueYIELD_IF_USING_PREEMPTION();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* If there was a task waiting for data to arrive on the
|
||||
queue then unblock it now. */
|
||||
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
|
||||
{
|
||||
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
|
||||
{
|
||||
/* The unblocked task has a priority higher than
|
||||
our own so yield immediately. Yes it is ok to
|
||||
do this from within the critical section - the
|
||||
kernel takes care of that. */
|
||||
queueYIELD_IF_USING_PREEMPTION();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else if( xYieldRequired != pdFALSE )
|
||||
{
|
||||
/* This path is a special case that will only get
|
||||
executed if the task was holding multiple mutexes
|
||||
and the mutexes were given back in an order that is
|
||||
different to that in which they were taken. */
|
||||
queueYIELD_IF_USING_PREEMPTION();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
}
|
||||
#else /* configUSE_QUEUE_SETS */
|
||||
{
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
|
||||
if( pxQueue->pxQueueSetContainer != NULL )
|
||||
{
|
||||
if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) )
|
||||
{
|
||||
/* Do not notify the queue set as an existing item
|
||||
* was overwritten in the queue so the number of items
|
||||
* in the queue has not changed. */
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
|
||||
{
|
||||
/* The queue is a member of a queue set, and posting
|
||||
* to the queue set caused a higher priority task to
|
||||
* unblock. A context switch is required. */
|
||||
queueYIELD_IF_USING_PREEMPTION();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* If there was a task waiting for data to arrive on the
|
||||
* queue then unblock it now. */
|
||||
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
|
||||
{
|
||||
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
|
||||
{
|
||||
/* The unblocked task has a priority higher than
|
||||
* our own so yield immediately. Yes it is ok to
|
||||
* do this from within the critical section - the
|
||||
* kernel takes care of that. */
|
||||
queueYIELD_IF_USING_PREEMPTION();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else if( xYieldRequired != pdFALSE )
|
||||
{
|
||||
/* This path is a special case that will only get
|
||||
* executed if the task was holding multiple mutexes
|
||||
* and the mutexes were given back in an order that is
|
||||
* different to that in which they were taken. */
|
||||
queueYIELD_IF_USING_PREEMPTION();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
}
|
||||
#else /* configUSE_QUEUE_SETS */
|
||||
{
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
|
||||
|
||||
/* If there was a task waiting for data to arrive on the
|
||||
queue then unblock it now. */
|
||||
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
|
||||
{
|
||||
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
|
||||
{
|
||||
/* The unblocked task has a priority higher than
|
||||
our own so yield immediately. Yes it is ok to do
|
||||
this from within the critical section - the kernel
|
||||
takes care of that. */
|
||||
queueYIELD_IF_USING_PREEMPTION();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else if( xYieldRequired != pdFALSE )
|
||||
{
|
||||
/* This path is a special case that will only get
|
||||
executed if the task was holding multiple mutexes and
|
||||
the mutexes were given back in an order that is
|
||||
different to that in which they were taken. */
|
||||
queueYIELD_IF_USING_PREEMPTION();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
#endif /* configUSE_QUEUE_SETS */
|
||||
/* If there was a task waiting for data to arrive on the
|
||||
* queue then unblock it now. */
|
||||
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
|
||||
{
|
||||
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
|
||||
{
|
||||
/* The unblocked task has a priority higher than
|
||||
* our own so yield immediately. Yes it is ok to do
|
||||
* this from within the critical section - the kernel
|
||||
* takes care of that. */
|
||||
queueYIELD_IF_USING_PREEMPTION();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else if( xYieldRequired != pdFALSE )
|
||||
{
|
||||
/* This path is a special case that will only get
|
||||
* executed if the task was holding multiple mutexes and
|
||||
* the mutexes were given back in an order that is
|
||||
* different to that in which they were taken. */
|
||||
queueYIELD_IF_USING_PREEMPTION();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
#endif /* configUSE_QUEUE_SETS */
|
||||
|
||||
/*@
|
||||
if (xCopyPosition == queueSEND_TO_BACK) {
|
||||
close queue(pxQueue, Storage, N, M, (W+1)%N, R, (K+1), is_locked, append(abs, singleton(x)));
|
||||
} else if (xCopyPosition == queueSEND_TO_FRONT) {
|
||||
close queue(pxQueue, Storage, N, M, W, (R == 0 ? (N-1) : (R-1)), (K+1), is_locked, cons(x, abs));
|
||||
} else if (xCopyPosition == queueOVERWRITE) {
|
||||
close queue(pxQueue, Storage, N, M, W, R, 1, is_locked, singleton(x));
|
||||
}
|
||||
@*/
|
||||
taskEXIT_CRITICAL();
|
||||
return pdPASS;
|
||||
}
|
||||
else
|
||||
{
|
||||
if( xTicksToWait == ( TickType_t ) 0 )
|
||||
{
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
/* The queue was full and no block time is specified (or
|
||||
the block time has expired) so leave now. */
|
||||
taskEXIT_CRITICAL();
|
||||
/*@
|
||||
if (xCopyPosition == queueSEND_TO_BACK)
|
||||
{
|
||||
close queue(pxQueue, Storage, N, M, (W+1)%N, R, (K+1), is_locked, append(abs, singleton(x)));
|
||||
}
|
||||
else if (xCopyPosition == queueSEND_TO_FRONT)
|
||||
{
|
||||
close queue(pxQueue, Storage, N, M, W, (R == 0 ? (N-1) : (R-1)), (K+1), is_locked, cons(x, abs));
|
||||
}
|
||||
else if (xCopyPosition == queueOVERWRITE)
|
||||
{
|
||||
close queue(pxQueue, Storage, N, M, W, R, 1, is_locked, singleton(x));
|
||||
}
|
||||
@*/
|
||||
taskEXIT_CRITICAL();
|
||||
return pdPASS;
|
||||
}
|
||||
else
|
||||
{
|
||||
if( xTicksToWait == ( TickType_t ) 0 )
|
||||
{
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
/* The queue was full and no block time is specified (or
|
||||
* the block time has expired) so leave now. */
|
||||
taskEXIT_CRITICAL();
|
||||
|
||||
/* Return to the original privilege level before exiting
|
||||
the function. */
|
||||
traceQUEUE_SEND_FAILED( pxQueue );
|
||||
return errQUEUE_FULL;
|
||||
}
|
||||
else if( xEntryTimeSet == pdFALSE )
|
||||
{
|
||||
/* The queue was full and a block time was specified so
|
||||
configure the timeout structure. */
|
||||
vTaskInternalSetTimeOutState( &xTimeOut );
|
||||
xEntryTimeSet = pdTRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Entry time was already set. */
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
/* Return to the original privilege level before exiting
|
||||
* the function. */
|
||||
traceQUEUE_SEND_FAILED( pxQueue );
|
||||
return errQUEUE_FULL;
|
||||
}
|
||||
else if( xEntryTimeSet == pdFALSE )
|
||||
{
|
||||
/* The queue was full and a block time was specified so
|
||||
* configure the timeout structure. */
|
||||
vTaskInternalSetTimeOutState( &xTimeOut );
|
||||
xEntryTimeSet = pdTRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Entry time was already set. */
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
|
||||
/* Interrupts and other tasks can send to and receive from the queue
|
||||
now the critical section has been exited. */
|
||||
/* Interrupts and other tasks can send to and receive from the queue
|
||||
* now the critical section has been exited. */
|
||||
|
||||
/*@close exists(pxQueue);@*/
|
||||
vTaskSuspendAll();
|
||||
prvLockQueue( pxQueue );
|
||||
/*@close exists(pxQueue);@*/
|
||||
vTaskSuspendAll();
|
||||
prvLockQueue( pxQueue );
|
||||
|
||||
/* Update the timeout state to see if it has expired yet. */
|
||||
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
|
||||
{
|
||||
if( prvIsQueueFull( pxQueue ) != pdFALSE )
|
||||
{
|
||||
traceBLOCKING_ON_QUEUE_SEND( pxQueue );
|
||||
/*@open queue_locked_invariant(xQueue)();@*/
|
||||
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
|
||||
/* Update the timeout state to see if it has expired yet. */
|
||||
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
|
||||
{
|
||||
if( prvIsQueueFull( pxQueue ) != pdFALSE )
|
||||
{
|
||||
traceBLOCKING_ON_QUEUE_SEND( pxQueue );
|
||||
/*@open queue_locked_invariant(xQueue)();@*/
|
||||
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
|
||||
|
||||
/* Unlocking the queue means queue events can effect the
|
||||
event list. It is possible that interrupts occurring now
|
||||
remove this task from the event list again - but as the
|
||||
scheduler is suspended the task will go onto the pending
|
||||
ready last instead of the actual ready list. */
|
||||
/*@close queue_locked_invariant(xQueue)();@*/
|
||||
prvUnlockQueue( pxQueue );
|
||||
/* Unlocking the queue means queue events can effect the
|
||||
* event list. It is possible that interrupts occurring now
|
||||
* remove this task from the event list again - but as the
|
||||
* scheduler is suspended the task will go onto the pending
|
||||
* ready last instead of the actual ready list. */
|
||||
/*@close queue_locked_invariant(xQueue)();@*/
|
||||
prvUnlockQueue( pxQueue );
|
||||
|
||||
/* Resuming the scheduler will move tasks from the pending
|
||||
ready list into the ready list - so it is feasible that this
|
||||
task is already in a ready list before it yields - in which
|
||||
case the yield will not cause a context switch unless there
|
||||
is also a higher priority task in the pending ready list. */
|
||||
/*@close exists(pxQueue);@*/
|
||||
if( xTaskResumeAll() == pdFALSE )
|
||||
{
|
||||
portYIELD_WITHIN_API();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Try again. */
|
||||
prvUnlockQueue( pxQueue );
|
||||
/* Resuming the scheduler will move tasks from the pending
|
||||
* ready list into the ready list - so it is feasible that this
|
||||
* task is already in a ready list before it yields - in which
|
||||
* case the yield will not cause a context switch unless there
|
||||
* is also a higher priority task in the pending ready list. */
|
||||
/*@close exists(pxQueue);@*/
|
||||
if( xTaskResumeAll() == pdFALSE )
|
||||
{
|
||||
portYIELD_WITHIN_API();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Try again. */
|
||||
prvUnlockQueue( pxQueue );
|
||||
#ifdef VERIFAST /*< void cast of unused return value */
|
||||
/*@close exists(pxQueue);@*/
|
||||
xTaskResumeAll();
|
||||
/*@close exists(pxQueue);@*/
|
||||
xTaskResumeAll();
|
||||
#else
|
||||
( void ) xTaskResumeAll();
|
||||
( void ) xTaskResumeAll();
|
||||
#endif
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* The timeout has expired. */
|
||||
prvUnlockQueue( pxQueue );
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* The timeout has expired. */
|
||||
prvUnlockQueue( pxQueue );
|
||||
#ifdef VERIFAST /*< void cast of unused return value */
|
||||
/*@close exists(pxQueue);@*/
|
||||
xTaskResumeAll();
|
||||
/*@close exists(pxQueue);@*/
|
||||
xTaskResumeAll();
|
||||
#else
|
||||
( void ) xTaskResumeAll();
|
||||
( void ) xTaskResumeAll();
|
||||
#endif
|
||||
|
||||
traceQUEUE_SEND_FAILED( pxQueue );
|
||||
return errQUEUE_FULL;
|
||||
}
|
||||
} /*lint -restore */
|
||||
traceQUEUE_SEND_FAILED( pxQueue );
|
||||
return errQUEUE_FULL;
|
||||
}
|
||||
} /*lint -restore */
|
||||
}
|
||||
|
@ -23,202 +23,213 @@
|
||||
#include "proof/queue.h"
|
||||
#include "proof/queuecontracts.h"
|
||||
|
||||
BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition )
|
||||
BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
|
||||
const void * const pvItemToQueue,
|
||||
BaseType_t * const pxHigherPriorityTaskWoken,
|
||||
const BaseType_t xCopyPosition )
|
||||
/*@requires
|
||||
[1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == true &*&
|
||||
chars(pvItemToQueue, M, ?x) &*&
|
||||
integer(pxHigherPriorityTaskWoken, _) &*&
|
||||
(xCopyPosition == queueSEND_TO_BACK || xCopyPosition == queueSEND_TO_FRONT || (xCopyPosition == queueOVERWRITE && N == 1));@*/
|
||||
[1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == true &*&
|
||||
chars(pvItemToQueue, M, ?x) &*&
|
||||
integer(pxHigherPriorityTaskWoken, _) &*&
|
||||
(xCopyPosition == queueSEND_TO_BACK || xCopyPosition == queueSEND_TO_FRONT || (xCopyPosition == queueOVERWRITE && N == 1));@*/
|
||||
/*@ensures
|
||||
[1/2]queuehandle(xQueue, N, M, is_isr) &*&
|
||||
chars(pvItemToQueue, M, x) &*&
|
||||
integer(pxHigherPriorityTaskWoken, _);@*/
|
||||
[1/2]queuehandle(xQueue, N, M, is_isr) &*&
|
||||
chars(pvItemToQueue, M, x) &*&
|
||||
integer(pxHigherPriorityTaskWoken, _);@*/
|
||||
{
|
||||
BaseType_t xReturn;
|
||||
UBaseType_t uxSavedInterruptStatus;
|
||||
BaseType_t xReturn;
|
||||
UBaseType_t uxSavedInterruptStatus;
|
||||
|
||||
#ifdef VERIFAST /*< const pointer declaration */
|
||||
Queue_t * pxQueue = xQueue;
|
||||
Queue_t * pxQueue = xQueue;
|
||||
#else
|
||||
Queue_t * const pxQueue = xQueue;
|
||||
Queue_t * const pxQueue = xQueue;
|
||||
|
||||
configASSERT( pxQueue );
|
||||
configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
|
||||
configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
|
||||
configASSERT( pxQueue );
|
||||
configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
|
||||
configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
|
||||
#endif
|
||||
|
||||
/* RTOS ports that support interrupt nesting have the concept of a maximum
|
||||
system call (or maximum API call) interrupt priority. Interrupts that are
|
||||
above the maximum system call priority are kept permanently enabled, even
|
||||
when the RTOS kernel is in a critical section, but cannot make any calls to
|
||||
FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
|
||||
then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
|
||||
failure if a FreeRTOS API function is called from an interrupt that has been
|
||||
assigned a priority above the configured maximum system call priority.
|
||||
Only FreeRTOS functions that end in FromISR can be called from interrupts
|
||||
that have been assigned a priority at or (logically) below the maximum
|
||||
system call interrupt priority. FreeRTOS maintains a separate interrupt
|
||||
safe API to ensure interrupt entry is as fast and as simple as possible.
|
||||
More information (albeit Cortex-M specific) is provided on the following
|
||||
link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
|
||||
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
|
||||
/* RTOS ports that support interrupt nesting have the concept of a maximum
|
||||
* system call (or maximum API call) interrupt priority. Interrupts that are
|
||||
* above the maximum system call priority are kept permanently enabled, even
|
||||
* when the RTOS kernel is in a critical section, but cannot make any calls to
|
||||
* FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
|
||||
* then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
|
||||
* failure if a FreeRTOS API function is called from an interrupt that has been
|
||||
* assigned a priority above the configured maximum system call priority.
|
||||
* Only FreeRTOS functions that end in FromISR can be called from interrupts
|
||||
* that have been assigned a priority at or (logically) below the maximum
|
||||
* system call interrupt priority. FreeRTOS maintains a separate interrupt
|
||||
* safe API to ensure interrupt entry is as fast and as simple as possible.
|
||||
* More information (albeit Cortex-M specific) is provided on the following
|
||||
* link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
|
||||
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
|
||||
|
||||
/* Similar to xQueueGenericSend, except without blocking if there is no room
|
||||
in the queue. Also don't directly wake a task that was blocked on a queue
|
||||
read, instead return a flag to say whether a context switch is required or
|
||||
not (i.e. has a task with a higher priority than us been woken by this
|
||||
post). */
|
||||
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||
/*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
|
||||
{
|
||||
if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
|
||||
{
|
||||
const int8_t cTxLock = pxQueue->cTxLock;
|
||||
const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
|
||||
/* Similar to xQueueGenericSend, except without blocking if there is no room
|
||||
* in the queue. Also don't directly wake a task that was blocked on a queue
|
||||
* read, instead return a flag to say whether a context switch is required or
|
||||
* not (i.e. has a task with a higher priority than us been woken by this
|
||||
* post). */
|
||||
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||
/*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
|
||||
{
|
||||
if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
|
||||
{
|
||||
const int8_t cTxLock = pxQueue->cTxLock;
|
||||
const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
|
||||
|
||||
traceQUEUE_SEND_FROM_ISR( pxQueue );
|
||||
traceQUEUE_SEND_FROM_ISR( pxQueue );
|
||||
|
||||
/* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a
|
||||
semaphore or mutex. That means prvCopyDataToQueue() cannot result
|
||||
in a task disinheriting a priority and prvCopyDataToQueue() can be
|
||||
called here even though the disinherit function does not check if
|
||||
the scheduler is suspended before accessing the ready lists. */
|
||||
/* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a
|
||||
* semaphore or mutex. That means prvCopyDataToQueue() cannot result
|
||||
* in a task disinheriting a priority and prvCopyDataToQueue() can be
|
||||
* called here even though the disinherit function does not check if
|
||||
* the scheduler is suspended before accessing the ready lists. */
|
||||
#ifdef VERIFAST /*< void cast of unused return value */
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
|
||||
#else
|
||||
( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
|
||||
( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
|
||||
#endif
|
||||
/*@open queue(pxQueue, _, N, M, _, _, _, _, _);@*/
|
||||
/*@open queue(pxQueue, _, N, M, _, _, _, _, _);@*/
|
||||
|
||||
/* The event list is not altered if the queue is locked. This will
|
||||
be done when the queue is unlocked later. */
|
||||
if( cTxLock == queueUNLOCKED )
|
||||
{
|
||||
/* VeriFast: we do not verify this configuration option */
|
||||
#if ( configUSE_QUEUE_SETS == 1 )
|
||||
{
|
||||
if( pxQueue->pxQueueSetContainer != NULL )
|
||||
{
|
||||
if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) )
|
||||
{
|
||||
/* Do not notify the queue set as an existing item
|
||||
was overwritten in the queue so the number of items
|
||||
in the queue has not changed. */
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
|
||||
{
|
||||
/* The queue is a member of a queue set, and posting
|
||||
to the queue set caused a higher priority task to
|
||||
unblock. A context switch is required. */
|
||||
if( pxHigherPriorityTaskWoken != NULL )
|
||||
{
|
||||
*pxHigherPriorityTaskWoken = pdTRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
|
||||
{
|
||||
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
|
||||
{
|
||||
/* The task waiting has a higher priority so
|
||||
record that a context switch is required. */
|
||||
if( pxHigherPriorityTaskWoken != NULL )
|
||||
{
|
||||
*pxHigherPriorityTaskWoken = pdTRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
}
|
||||
#else /* configUSE_QUEUE_SETS */
|
||||
{
|
||||
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
|
||||
{
|
||||
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
|
||||
{
|
||||
/* The task waiting has a higher priority so record that a
|
||||
context switch is required. */
|
||||
if( pxHigherPriorityTaskWoken != NULL )
|
||||
{
|
||||
*pxHigherPriorityTaskWoken = pdTRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
/* The event list is not altered if the queue is locked. This will
|
||||
* be done when the queue is unlocked later. */
|
||||
if( cTxLock == queueUNLOCKED )
|
||||
{
|
||||
/* VeriFast: we do not verify this configuration option */
|
||||
#if ( configUSE_QUEUE_SETS == 1 )
|
||||
{
|
||||
if( pxQueue->pxQueueSetContainer != NULL )
|
||||
{
|
||||
if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) )
|
||||
{
|
||||
/* Do not notify the queue set as an existing item
|
||||
* was overwritten in the queue so the number of items
|
||||
* in the queue has not changed. */
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
|
||||
{
|
||||
/* The queue is a member of a queue set, and posting
|
||||
* to the queue set caused a higher priority task to
|
||||
* unblock. A context switch is required. */
|
||||
if( pxHigherPriorityTaskWoken != NULL )
|
||||
{
|
||||
*pxHigherPriorityTaskWoken = pdTRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
|
||||
{
|
||||
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
|
||||
{
|
||||
/* The task waiting has a higher priority so
|
||||
* record that a context switch is required. */
|
||||
if( pxHigherPriorityTaskWoken != NULL )
|
||||
{
|
||||
*pxHigherPriorityTaskWoken = pdTRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
}
|
||||
#else /* configUSE_QUEUE_SETS */
|
||||
{
|
||||
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
|
||||
{
|
||||
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
|
||||
{
|
||||
/* The task waiting has a higher priority so record that a
|
||||
* context switch is required. */
|
||||
if( pxHigherPriorityTaskWoken != NULL )
|
||||
{
|
||||
*pxHigherPriorityTaskWoken = pdTRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
|
||||
/* Not used in this path. */
|
||||
/* Not used in this path. */
|
||||
#ifndef VERIFAST /*< void cast of unused var */
|
||||
( void ) uxPreviousMessagesWaiting;
|
||||
( void ) uxPreviousMessagesWaiting;
|
||||
#endif
|
||||
}
|
||||
#endif /* configUSE_QUEUE_SETS */
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Increment the lock count so the task that unlocks the queue
|
||||
knows that data was posted while it was locked. */
|
||||
configASSERT( cTxLock != queueINT8_MAX);
|
||||
}
|
||||
#endif /* configUSE_QUEUE_SETS */
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Increment the lock count so the task that unlocks the queue
|
||||
* knows that data was posted while it was locked. */
|
||||
configASSERT( cTxLock != queueINT8_MAX );
|
||||
|
||||
pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 );
|
||||
}
|
||||
pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 );
|
||||
}
|
||||
|
||||
xReturn = pdPASS;
|
||||
/*@
|
||||
if (xCopyPosition == queueSEND_TO_BACK) {
|
||||
close queue(pxQueue, Storage, N, M, (W+1)%N, R, (K+1), is_locked, append(abs, singleton(x)));
|
||||
} else if (xCopyPosition == queueSEND_TO_FRONT) {
|
||||
if (R == 0) {
|
||||
close queue(pxQueue, Storage, N, M, W, (N-1), (K+1), is_locked, cons(x, abs));
|
||||
} else {
|
||||
close queue(pxQueue, Storage, N, M, W, (R-1), (K+1), is_locked, cons(x, abs));
|
||||
}
|
||||
} else if (xCopyPosition == queueOVERWRITE) {
|
||||
close queue(pxQueue, Storage, N, M, W, R, 1, is_locked, singleton(x));
|
||||
}
|
||||
@*/
|
||||
}
|
||||
else
|
||||
{
|
||||
traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
|
||||
xReturn = errQUEUE_FULL;
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
}
|
||||
}
|
||||
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
||||
xReturn = pdPASS;
|
||||
/*@
|
||||
if (xCopyPosition == queueSEND_TO_BACK)
|
||||
{
|
||||
close queue(pxQueue, Storage, N, M, (W+1)%N, R, (K+1), is_locked, append(abs, singleton(x)));
|
||||
}
|
||||
else if (xCopyPosition == queueSEND_TO_FRONT)
|
||||
{
|
||||
if (R == 0)
|
||||
{
|
||||
close queue(pxQueue, Storage, N, M, W, (N-1), (K+1), is_locked, cons(x, abs));
|
||||
}
|
||||
else
|
||||
{
|
||||
close queue(pxQueue, Storage, N, M, W, (R-1), (K+1), is_locked, cons(x, abs));
|
||||
}
|
||||
} else if (xCopyPosition == queueOVERWRITE)
|
||||
{
|
||||
close queue(pxQueue, Storage, N, M, W, R, 1, is_locked, singleton(x));
|
||||
}
|
||||
@*/
|
||||
}
|
||||
else
|
||||
{
|
||||
traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
|
||||
xReturn = errQUEUE_FULL;
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
}
|
||||
}
|
||||
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
||||
|
||||
return xReturn;
|
||||
return xReturn;
|
||||
}
|
||||
|
@ -25,24 +25,26 @@
|
||||
BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
|
||||
/*@requires queue(xQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
|
||||
/*@ensures queue(xQueue, Storage, N, M, W, R, K, is_locked, abs) &*&
|
||||
result == ((K == 0) ? pdTRUE : pdFALSE);@*/
|
||||
result == ((K == 0) ? pdTRUE : pdFALSE);@*/
|
||||
{
|
||||
BaseType_t xReturn;
|
||||
BaseType_t xReturn;
|
||||
|
||||
#ifdef VERIFAST /*< const pointer declaration */
|
||||
Queue_t * pxQueue = xQueue;
|
||||
Queue_t * pxQueue = xQueue;
|
||||
#else
|
||||
Queue_t * const pxQueue = xQueue;
|
||||
Queue_t * const pxQueue = xQueue;
|
||||
#endif
|
||||
|
||||
configASSERT( pxQueue );
|
||||
if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
|
||||
{
|
||||
xReturn = pdTRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
xReturn = pdFALSE;
|
||||
}
|
||||
configASSERT( pxQueue );
|
||||
|
||||
return xReturn;
|
||||
if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
|
||||
{
|
||||
xReturn = pdTRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
xReturn = pdFALSE;
|
||||
}
|
||||
|
||||
return xReturn;
|
||||
} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
|
||||
|
@ -25,24 +25,26 @@
|
||||
BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
/*@requires queue(xQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
|
||||
/*@ensures queue(xQueue, Storage, N, M, W, R, K, is_locked, abs) &*&
|
||||
result == ((K == N) ? pdTRUE : pdFALSE);@*/
|
||||
result == ((K == N) ? pdTRUE : pdFALSE);@*/
|
||||
{
|
||||
BaseType_t xReturn;
|
||||
BaseType_t xReturn;
|
||||
|
||||
#ifdef VERIFAST /*< const pointer declaration */
|
||||
Queue_t * pxQueue = xQueue;
|
||||
Queue_t * pxQueue = xQueue;
|
||||
#else
|
||||
Queue_t * const pxQueue = xQueue;
|
||||
Queue_t * const pxQueue = xQueue;
|
||||
#endif
|
||||
|
||||
configASSERT( pxQueue );
|
||||
if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
|
||||
{
|
||||
xReturn = pdTRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
xReturn = pdFALSE;
|
||||
}
|
||||
configASSERT( pxQueue );
|
||||
|
||||
return xReturn;
|
||||
if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
|
||||
{
|
||||
xReturn = pdTRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
xReturn = pdFALSE;
|
||||
}
|
||||
|
||||
return xReturn;
|
||||
} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
|
||||
|
@ -23,184 +23,186 @@
|
||||
#include "proof/queue.h"
|
||||
#include "proof/queuecontracts.h"
|
||||
|
||||
BaseType_t xQueuePeek( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait )
|
||||
BaseType_t xQueuePeek( QueueHandle_t xQueue,
|
||||
void * const pvBuffer,
|
||||
TickType_t xTicksToWait )
|
||||
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*&
|
||||
[1/2]queuesuspend(xQueue) &*&
|
||||
chars(pvBuffer, M, ?x);@*/
|
||||
[1/2]queuesuspend(xQueue) &*&
|
||||
chars(pvBuffer, M, ?x);@*/
|
||||
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
|
||||
[1/2]queuesuspend(xQueue) &*&
|
||||
(result == pdPASS ? chars(pvBuffer, M, _) : chars(pvBuffer, M, x));@*/
|
||||
[1/2]queuesuspend(xQueue) &*&
|
||||
(result == pdPASS ? chars(pvBuffer, M, _) : chars(pvBuffer, M, x));@*/
|
||||
{
|
||||
BaseType_t xEntryTimeSet = pdFALSE;
|
||||
TimeOut_t xTimeOut;
|
||||
int8_t *pcOriginalReadPosition;
|
||||
BaseType_t xEntryTimeSet = pdFALSE;
|
||||
TimeOut_t xTimeOut;
|
||||
int8_t * pcOriginalReadPosition;
|
||||
#ifdef VERIFAST /*< const pointer declaration */
|
||||
Queue_t * pxQueue = xQueue;
|
||||
Queue_t * pxQueue = xQueue;
|
||||
#else
|
||||
Queue_t * const pxQueue = xQueue;
|
||||
Queue_t * const pxQueue = xQueue;
|
||||
|
||||
/* Check the pointer is not NULL. */
|
||||
configASSERT( ( pxQueue ) );
|
||||
/* Check the pointer is not NULL. */
|
||||
configASSERT( ( pxQueue ) );
|
||||
|
||||
/* The buffer into which data is received can only be NULL if the data size
|
||||
is zero (so no data is copied into the buffer. */
|
||||
configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );
|
||||
|
||||
/* Cannot block if the scheduler is suspended. */
|
||||
#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
|
||||
{
|
||||
configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
|
||||
}
|
||||
#endif
|
||||
/* The buffer into which data is received can only be NULL if the data size
|
||||
* is zero (so no data is copied into the buffer. */
|
||||
configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );
|
||||
|
||||
/* Cannot block if the scheduler is suspended. */
|
||||
#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
|
||||
{
|
||||
configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*lint -save -e904 This function relaxes the coding standard somewhat to
|
||||
allow return statements within the function itself. This is done in the
|
||||
interest of execution time efficiency. */
|
||||
for( ;; )
|
||||
/*@invariant [1/2]queuehandle(xQueue, N, M, is_isr) &*&
|
||||
[1/2]queuesuspend(xQueue) &*&
|
||||
chars(pvBuffer, M, x) &*&
|
||||
u_integer(&xTicksToWait, _) &*&
|
||||
xTIME_OUT(&xTimeOut);@*/
|
||||
{
|
||||
taskENTER_CRITICAL();
|
||||
/*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
|
||||
{
|
||||
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
|
||||
/*lint -save -e904 This function relaxes the coding standard somewhat to
|
||||
* allow return statements within the function itself. This is done in the
|
||||
* interest of execution time efficiency. */
|
||||
for( ; ; )
|
||||
/*@invariant [1/2]queuehandle(xQueue, N, M, is_isr) &*&
|
||||
[1/2]queuesuspend(xQueue) &*&
|
||||
chars(pvBuffer, M, x) &*&
|
||||
u_integer(&xTicksToWait, _) &*&
|
||||
xTIME_OUT(&xTimeOut);@*/
|
||||
{
|
||||
taskENTER_CRITICAL();
|
||||
/*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
|
||||
{
|
||||
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
|
||||
|
||||
/* Is there data in the queue now? To be running the calling task
|
||||
must be the highest priority task wanting to access the queue. */
|
||||
if( uxMessagesWaiting > ( UBaseType_t ) 0 )
|
||||
{
|
||||
/* Remember the read position so it can be reset after the data
|
||||
is read from the queue as this function is only peeking the
|
||||
data, not removing it. */
|
||||
pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom;
|
||||
/* Is there data in the queue now? To be running the calling task
|
||||
* must be the highest priority task wanting to access the queue. */
|
||||
if( uxMessagesWaiting > ( UBaseType_t ) 0 )
|
||||
{
|
||||
/* Remember the read position so it can be reset after the data
|
||||
* is read from the queue as this function is only peeking the
|
||||
* data, not removing it. */
|
||||
pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom;
|
||||
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
prvCopyDataFromQueue( pxQueue, pvBuffer );
|
||||
traceQUEUE_PEEK( pxQueue );
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
prvCopyDataFromQueue( pxQueue, pvBuffer );
|
||||
traceQUEUE_PEEK( pxQueue );
|
||||
|
||||
/* The data is not being removed, so reset the read pointer. */
|
||||
pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition;
|
||||
/* The data is not being removed, so reset the read pointer. */
|
||||
pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition;
|
||||
|
||||
/* The data is being left in the queue, so see if there are
|
||||
any other tasks waiting for the data. */
|
||||
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
|
||||
{
|
||||
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
|
||||
{
|
||||
/* The task waiting has a higher priority than this task. */
|
||||
queueYIELD_IF_USING_PREEMPTION();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
/* The data is being left in the queue, so see if there are
|
||||
* any other tasks waiting for the data. */
|
||||
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
|
||||
{
|
||||
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
|
||||
{
|
||||
/* The task waiting has a higher priority than this task. */
|
||||
queueYIELD_IF_USING_PREEMPTION();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
taskEXIT_CRITICAL();
|
||||
return pdPASS;
|
||||
}
|
||||
else
|
||||
{
|
||||
if( xTicksToWait == ( TickType_t ) 0 )
|
||||
{
|
||||
/* The queue was empty and no block time is specified (or
|
||||
the block time has expired) so leave now. */
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
taskEXIT_CRITICAL();
|
||||
traceQUEUE_PEEK_FAILED( pxQueue );
|
||||
return errQUEUE_EMPTY;
|
||||
}
|
||||
else if( xEntryTimeSet == pdFALSE )
|
||||
{
|
||||
/* The queue was empty and a block time was specified so
|
||||
configure the timeout structure ready to enter the blocked
|
||||
state. */
|
||||
vTaskInternalSetTimeOutState( &xTimeOut );
|
||||
xEntryTimeSet = pdTRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Entry time was already set. */
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
}
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
taskEXIT_CRITICAL();
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
taskEXIT_CRITICAL();
|
||||
return pdPASS;
|
||||
}
|
||||
else
|
||||
{
|
||||
if( xTicksToWait == ( TickType_t ) 0 )
|
||||
{
|
||||
/* The queue was empty and no block time is specified (or
|
||||
* the block time has expired) so leave now. */
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
taskEXIT_CRITICAL();
|
||||
traceQUEUE_PEEK_FAILED( pxQueue );
|
||||
return errQUEUE_EMPTY;
|
||||
}
|
||||
else if( xEntryTimeSet == pdFALSE )
|
||||
{
|
||||
/* The queue was empty and a block time was specified so
|
||||
* configure the timeout structure ready to enter the blocked
|
||||
* state. */
|
||||
vTaskInternalSetTimeOutState( &xTimeOut );
|
||||
xEntryTimeSet = pdTRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Entry time was already set. */
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
}
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
taskEXIT_CRITICAL();
|
||||
|
||||
/* Interrupts and other tasks can send to and receive from the queue
|
||||
now the critical section has been exited. */
|
||||
/* Interrupts and other tasks can send to and receive from the queue
|
||||
* now the critical section has been exited. */
|
||||
|
||||
/*@close exists<QueueHandle_t>(pxQueue);@*/
|
||||
vTaskSuspendAll();
|
||||
prvLockQueue( pxQueue );
|
||||
/*@close exists<QueueHandle_t>(pxQueue);@*/
|
||||
vTaskSuspendAll();
|
||||
prvLockQueue( pxQueue );
|
||||
|
||||
/* Update the timeout state to see if it has expired yet. */
|
||||
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
|
||||
{
|
||||
/* Timeout has not expired yet, check to see if there is data in the
|
||||
queue now, and if not enter the Blocked state to wait for data. */
|
||||
if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
|
||||
{
|
||||
traceBLOCKING_ON_QUEUE_PEEK( pxQueue );
|
||||
/*@open queue_locked_invariant(xQueue)();@*/
|
||||
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
|
||||
/*@close queue_locked_invariant(xQueue)();@*/
|
||||
prvUnlockQueue( pxQueue );
|
||||
/*@close exists<QueueHandle_t>(pxQueue);@*/
|
||||
if( xTaskResumeAll() == pdFALSE )
|
||||
{
|
||||
portYIELD_WITHIN_API();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* There is data in the queue now, so don't enter the blocked
|
||||
state, instead return to try and obtain the data. */
|
||||
prvUnlockQueue( pxQueue );
|
||||
/* Update the timeout state to see if it has expired yet. */
|
||||
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
|
||||
{
|
||||
/* Timeout has not expired yet, check to see if there is data in the
|
||||
* queue now, and if not enter the Blocked state to wait for data. */
|
||||
if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
|
||||
{
|
||||
traceBLOCKING_ON_QUEUE_PEEK( pxQueue );
|
||||
/*@open queue_locked_invariant(xQueue)();@*/
|
||||
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
|
||||
/*@close queue_locked_invariant(xQueue)();@*/
|
||||
prvUnlockQueue( pxQueue );
|
||||
|
||||
/*@close exists<QueueHandle_t>(pxQueue);@*/
|
||||
if( xTaskResumeAll() == pdFALSE )
|
||||
{
|
||||
portYIELD_WITHIN_API();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* There is data in the queue now, so don't enter the blocked
|
||||
* state, instead return to try and obtain the data. */
|
||||
prvUnlockQueue( pxQueue );
|
||||
#ifdef VERIFAST /*< void cast of unused return value */
|
||||
/*@close exists<QueueHandle_t>(pxQueue);@*/
|
||||
xTaskResumeAll();
|
||||
/*@close exists<QueueHandle_t>(pxQueue);@*/
|
||||
xTaskResumeAll();
|
||||
#else
|
||||
( void ) xTaskResumeAll();
|
||||
( void ) xTaskResumeAll();
|
||||
#endif
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* The timeout has expired. If there is still no data in the queue
|
||||
exit, otherwise go back and try to read the data again. */
|
||||
prvUnlockQueue( pxQueue );
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* The timeout has expired. If there is still no data in the queue
|
||||
* exit, otherwise go back and try to read the data again. */
|
||||
prvUnlockQueue( pxQueue );
|
||||
#ifdef VERIFAST /*< void cast of unused return value */
|
||||
/*@close exists<QueueHandle_t>(pxQueue);@*/
|
||||
xTaskResumeAll();
|
||||
/*@close exists<QueueHandle_t>(pxQueue);@*/
|
||||
xTaskResumeAll();
|
||||
#else
|
||||
( void ) xTaskResumeAll();
|
||||
( void ) xTaskResumeAll();
|
||||
#endif
|
||||
|
||||
if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
|
||||
{
|
||||
traceQUEUE_PEEK_FAILED( pxQueue );
|
||||
return errQUEUE_EMPTY;
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
} /*lint -restore */
|
||||
if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
|
||||
{
|
||||
traceQUEUE_PEEK_FAILED( pxQueue );
|
||||
return errQUEUE_EMPTY;
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
} /*lint -restore */
|
||||
}
|
||||
|
@ -23,67 +23,68 @@
|
||||
#include "proof/queue.h"
|
||||
#include "proof/queuecontracts.h"
|
||||
|
||||
BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, void * const pvBuffer )
|
||||
BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
|
||||
void * const pvBuffer )
|
||||
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == true &*&
|
||||
chars(pvBuffer, M, ?x);@*/
|
||||
chars(pvBuffer, M, ?x);@*/
|
||||
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
|
||||
(result == pdPASS ? chars(pvBuffer, M, _) : chars(pvBuffer, M, x));@*/
|
||||
(result == pdPASS ? chars(pvBuffer, M, _) : chars(pvBuffer, M, x));@*/
|
||||
{
|
||||
BaseType_t xReturn;
|
||||
UBaseType_t uxSavedInterruptStatus;
|
||||
int8_t *pcOriginalReadPosition;
|
||||
BaseType_t xReturn;
|
||||
UBaseType_t uxSavedInterruptStatus;
|
||||
int8_t * pcOriginalReadPosition;
|
||||
#ifdef VERIFAST /*< const pointer declaration */
|
||||
Queue_t * pxQueue = xQueue;
|
||||
Queue_t * pxQueue = xQueue;
|
||||
#else
|
||||
Queue_t * const pxQueue = xQueue;
|
||||
Queue_t * const pxQueue = xQueue;
|
||||
|
||||
configASSERT( pxQueue );
|
||||
configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
|
||||
configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */
|
||||
configASSERT( pxQueue );
|
||||
configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
|
||||
configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */
|
||||
#endif
|
||||
|
||||
/* RTOS ports that support interrupt nesting have the concept of a maximum
|
||||
system call (or maximum API call) interrupt priority. Interrupts that are
|
||||
above the maximum system call priority are kept permanently enabled, even
|
||||
when the RTOS kernel is in a critical section, but cannot make any calls to
|
||||
FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
|
||||
then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
|
||||
failure if a FreeRTOS API function is called from an interrupt that has been
|
||||
assigned a priority above the configured maximum system call priority.
|
||||
Only FreeRTOS functions that end in FromISR can be called from interrupts
|
||||
that have been assigned a priority at or (logically) below the maximum
|
||||
system call interrupt priority. FreeRTOS maintains a separate interrupt
|
||||
safe API to ensure interrupt entry is as fast and as simple as possible.
|
||||
More information (albeit Cortex-M specific) is provided on the following
|
||||
link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
|
||||
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
|
||||
/* RTOS ports that support interrupt nesting have the concept of a maximum
|
||||
* system call (or maximum API call) interrupt priority. Interrupts that are
|
||||
* above the maximum system call priority are kept permanently enabled, even
|
||||
* when the RTOS kernel is in a critical section, but cannot make any calls to
|
||||
* FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
|
||||
* then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
|
||||
* failure if a FreeRTOS API function is called from an interrupt that has been
|
||||
* assigned a priority above the configured maximum system call priority.
|
||||
* Only FreeRTOS functions that end in FromISR can be called from interrupts
|
||||
* that have been assigned a priority at or (logically) below the maximum
|
||||
* system call interrupt priority. FreeRTOS maintains a separate interrupt
|
||||
* safe API to ensure interrupt entry is as fast and as simple as possible.
|
||||
* More information (albeit Cortex-M specific) is provided on the following
|
||||
* link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
|
||||
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
|
||||
|
||||
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||
/*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
|
||||
{
|
||||
/* Cannot block in an ISR, so check there is data available. */
|
||||
if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
|
||||
{
|
||||
traceQUEUE_PEEK_FROM_ISR( pxQueue );
|
||||
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||
/*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
|
||||
{
|
||||
/* Cannot block in an ISR, so check there is data available. */
|
||||
if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
|
||||
{
|
||||
traceQUEUE_PEEK_FROM_ISR( pxQueue );
|
||||
|
||||
/* Remember the read position so it can be reset as nothing is
|
||||
actually being removed from the queue. */
|
||||
pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom;
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
prvCopyDataFromQueue( pxQueue, pvBuffer );
|
||||
pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition;
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
/* Remember the read position so it can be reset as nothing is
|
||||
* actually being removed from the queue. */
|
||||
pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom;
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
prvCopyDataFromQueue( pxQueue, pvBuffer );
|
||||
pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition;
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
|
||||
xReturn = pdPASS;
|
||||
}
|
||||
else
|
||||
{
|
||||
xReturn = pdFAIL;
|
||||
traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
|
||||
}
|
||||
}
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
||||
xReturn = pdPASS;
|
||||
}
|
||||
else
|
||||
{
|
||||
xReturn = pdFAIL;
|
||||
traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
|
||||
}
|
||||
}
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
||||
|
||||
return xReturn;
|
||||
return xReturn;
|
||||
}
|
||||
|
@ -23,182 +23,184 @@
|
||||
#include "proof/queue.h"
|
||||
#include "proof/queuecontracts.h"
|
||||
|
||||
BaseType_t xQueueReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait )
|
||||
BaseType_t xQueueReceive( QueueHandle_t xQueue,
|
||||
void * const pvBuffer,
|
||||
TickType_t xTicksToWait )
|
||||
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*&
|
||||
[1/2]queuesuspend(xQueue) &*&
|
||||
chars(pvBuffer, M, ?x);@*/
|
||||
[1/2]queuesuspend(xQueue) &*&
|
||||
chars(pvBuffer, M, ?x);@*/
|
||||
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
|
||||
[1/2]queuesuspend(xQueue) &*&
|
||||
(result == pdPASS ? chars(pvBuffer, M, _) : chars(pvBuffer, M, x));@*/
|
||||
[1/2]queuesuspend(xQueue) &*&
|
||||
(result == pdPASS ? chars(pvBuffer, M, _) : chars(pvBuffer, M, x));@*/
|
||||
{
|
||||
BaseType_t xEntryTimeSet = pdFALSE;
|
||||
TimeOut_t xTimeOut;
|
||||
BaseType_t xEntryTimeSet = pdFALSE;
|
||||
TimeOut_t xTimeOut;
|
||||
#ifdef VERIFAST /*< const pointer declaration */
|
||||
Queue_t * pxQueue = xQueue;
|
||||
Queue_t * pxQueue = xQueue;
|
||||
#else
|
||||
Queue_t * const pxQueue = xQueue;
|
||||
Queue_t * const pxQueue = xQueue;
|
||||
|
||||
/* Check the pointer is not NULL. */
|
||||
configASSERT( ( pxQueue ) );
|
||||
/* Check the pointer is not NULL. */
|
||||
configASSERT( ( pxQueue ) );
|
||||
|
||||
/* The buffer into which data is received can only be NULL if the data size
|
||||
is zero (so no data is copied into the buffer). */
|
||||
configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );
|
||||
|
||||
/* Cannot block if the scheduler is suspended. */
|
||||
#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
|
||||
{
|
||||
configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
|
||||
}
|
||||
#endif
|
||||
/* The buffer into which data is received can only be NULL if the data size
|
||||
* is zero (so no data is copied into the buffer). */
|
||||
configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );
|
||||
|
||||
/* Cannot block if the scheduler is suspended. */
|
||||
#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
|
||||
{
|
||||
configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*lint -save -e904 This function relaxes the coding standard somewhat to
|
||||
allow return statements within the function itself. This is done in the
|
||||
interest of execution time efficiency. */
|
||||
for( ;; )
|
||||
/*@invariant [1/2]queuehandle(xQueue, N, M, is_isr) &*&
|
||||
[1/2]queuesuspend(xQueue) &*&
|
||||
chars(pvBuffer, M, x) &*&
|
||||
u_integer(&xTicksToWait, _) &*&
|
||||
xTIME_OUT(&xTimeOut);@*/
|
||||
{
|
||||
taskENTER_CRITICAL();
|
||||
/*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
|
||||
{
|
||||
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
|
||||
/*lint -save -e904 This function relaxes the coding standard somewhat to
|
||||
* allow return statements within the function itself. This is done in the
|
||||
* interest of execution time efficiency. */
|
||||
for( ; ; )
|
||||
/*@invariant [1/2]queuehandle(xQueue, N, M, is_isr) &*&
|
||||
[1/2]queuesuspend(xQueue) &*&
|
||||
chars(pvBuffer, M, x) &*&
|
||||
u_integer(&xTicksToWait, _) &*&
|
||||
xTIME_OUT(&xTimeOut);@*/
|
||||
{
|
||||
taskENTER_CRITICAL();
|
||||
/*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
|
||||
{
|
||||
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
|
||||
|
||||
/* Is there data in the queue now? To be running the calling task
|
||||
must be the highest priority task wanting to access the queue. */
|
||||
if( uxMessagesWaiting > ( UBaseType_t ) 0 )
|
||||
{
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
/* Data available, remove one item. */
|
||||
prvCopyDataFromQueue( pxQueue, pvBuffer );
|
||||
/*@open queue_after_prvCopyDataFromQueue(pxQueue, Storage, N, M, W, (R+1)%N, K, is_locked, abs);@*/
|
||||
traceQUEUE_RECEIVE( pxQueue );
|
||||
pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1;
|
||||
/*@assert
|
||||
pxQueue->pcHead |-> ?buffer &*&
|
||||
buffer(buffer, N, M, ?contents);@*/
|
||||
/*@deq_lemma(K, (R+1)%N, contents, abs, head(abs));@*/
|
||||
/* Is there data in the queue now? To be running the calling task
|
||||
* must be the highest priority task wanting to access the queue. */
|
||||
if( uxMessagesWaiting > ( UBaseType_t ) 0 )
|
||||
{
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
/* Data available, remove one item. */
|
||||
prvCopyDataFromQueue( pxQueue, pvBuffer );
|
||||
/*@open queue_after_prvCopyDataFromQueue(pxQueue, Storage, N, M, W, (R+1)%N, K, is_locked, abs);@*/
|
||||
traceQUEUE_RECEIVE( pxQueue );
|
||||
pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1;
|
||||
|
||||
/* There is now space in the queue, were any tasks waiting to
|
||||
post to the queue? If so, unblock the highest priority waiting
|
||||
task. */
|
||||
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
|
||||
{
|
||||
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
|
||||
{
|
||||
queueYIELD_IF_USING_PREEMPTION();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
/*@assert
|
||||
pxQueue->pcHead |-> ?buffer &*&
|
||||
buffer(buffer, N, M, ?contents);@*/
|
||||
/*@deq_lemma(K, (R+1)%N, contents, abs, head(abs));@*/
|
||||
/* There is now space in the queue, were any tasks waiting to
|
||||
* post to the queue? If so, unblock the highest priority waiting
|
||||
* task. */
|
||||
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
|
||||
{
|
||||
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
|
||||
{
|
||||
queueYIELD_IF_USING_PREEMPTION();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
|
||||
/*@close queue(pxQueue, Storage, N, M, W, (R+1)%N, K-1, is_locked, tail(abs));@*/
|
||||
/*@assert chars(pvBuffer, M, head(abs));@*/
|
||||
taskEXIT_CRITICAL();
|
||||
return pdPASS;
|
||||
}
|
||||
else
|
||||
{
|
||||
if( xTicksToWait == ( TickType_t ) 0 )
|
||||
{
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
/* The queue was empty and no block time is specified (or
|
||||
the block time has expired) so leave now. */
|
||||
taskEXIT_CRITICAL();
|
||||
traceQUEUE_RECEIVE_FAILED( pxQueue );
|
||||
return errQUEUE_EMPTY;
|
||||
}
|
||||
else if( xEntryTimeSet == pdFALSE )
|
||||
{
|
||||
/* The queue was empty and a block time was specified so
|
||||
configure the timeout structure. */
|
||||
vTaskInternalSetTimeOutState( &xTimeOut );
|
||||
xEntryTimeSet = pdTRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Entry time was already set. */
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
}
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
taskEXIT_CRITICAL();
|
||||
/*@close queue(pxQueue, Storage, N, M, W, (R+1)%N, K-1, is_locked, tail(abs));@*/
|
||||
/*@assert chars(pvBuffer, M, head(abs));@*/
|
||||
taskEXIT_CRITICAL();
|
||||
return pdPASS;
|
||||
}
|
||||
else
|
||||
{
|
||||
if( xTicksToWait == ( TickType_t ) 0 )
|
||||
{
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
/* The queue was empty and no block time is specified (or
|
||||
* the block time has expired) so leave now. */
|
||||
taskEXIT_CRITICAL();
|
||||
traceQUEUE_RECEIVE_FAILED( pxQueue );
|
||||
return errQUEUE_EMPTY;
|
||||
}
|
||||
else if( xEntryTimeSet == pdFALSE )
|
||||
{
|
||||
/* The queue was empty and a block time was specified so
|
||||
* configure the timeout structure. */
|
||||
vTaskInternalSetTimeOutState( &xTimeOut );
|
||||
xEntryTimeSet = pdTRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Entry time was already set. */
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
}
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
taskEXIT_CRITICAL();
|
||||
|
||||
/* Interrupts and other tasks can send to and receive from the queue
|
||||
now the critical section has been exited. */
|
||||
/* Interrupts and other tasks can send to and receive from the queue
|
||||
* now the critical section has been exited. */
|
||||
|
||||
/*@close exists(pxQueue);@*/
|
||||
vTaskSuspendAll();
|
||||
prvLockQueue( pxQueue );
|
||||
/*@close exists(pxQueue);@*/
|
||||
vTaskSuspendAll();
|
||||
prvLockQueue( pxQueue );
|
||||
|
||||
/* Update the timeout state to see if it has expired yet. */
|
||||
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
|
||||
{
|
||||
/* The timeout has not expired. If the queue is still empty place
|
||||
the task on the list of tasks waiting to receive from the queue. */
|
||||
if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
|
||||
{
|
||||
traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
|
||||
/*@open queue_locked_invariant(xQueue)();@*/
|
||||
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
|
||||
/*@close queue_locked_invariant(xQueue)();@*/
|
||||
prvUnlockQueue( pxQueue );
|
||||
/*@close exists(pxQueue);@*/
|
||||
if( xTaskResumeAll() == pdFALSE )
|
||||
{
|
||||
portYIELD_WITHIN_API();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* The queue contains data again. Loop back to try and read the
|
||||
data. */
|
||||
prvUnlockQueue( pxQueue );
|
||||
/* Update the timeout state to see if it has expired yet. */
|
||||
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
|
||||
{
|
||||
/* The timeout has not expired. If the queue is still empty place
|
||||
* the task on the list of tasks waiting to receive from the queue. */
|
||||
if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
|
||||
{
|
||||
traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
|
||||
/*@open queue_locked_invariant(xQueue)();@*/
|
||||
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
|
||||
/*@close queue_locked_invariant(xQueue)();@*/
|
||||
prvUnlockQueue( pxQueue );
|
||||
|
||||
/*@close exists(pxQueue);@*/
|
||||
if( xTaskResumeAll() == pdFALSE )
|
||||
{
|
||||
portYIELD_WITHIN_API();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* The queue contains data again. Loop back to try and read the
|
||||
* data. */
|
||||
prvUnlockQueue( pxQueue );
|
||||
#ifdef VERIFAST /*< void cast of unused return value */
|
||||
/*@close exists(pxQueue);@*/
|
||||
xTaskResumeAll();
|
||||
/*@close exists(pxQueue);@*/
|
||||
xTaskResumeAll();
|
||||
#else
|
||||
( void ) xTaskResumeAll();
|
||||
( void ) xTaskResumeAll();
|
||||
#endif
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Timed out. If there is no data in the queue exit, otherwise loop
|
||||
back and attempt to read the data. */
|
||||
prvUnlockQueue( pxQueue );
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Timed out. If there is no data in the queue exit, otherwise loop
|
||||
* back and attempt to read the data. */
|
||||
prvUnlockQueue( pxQueue );
|
||||
#ifdef VERIFAST /*< void cast of unused return value */
|
||||
/*@close exists(pxQueue);@*/
|
||||
xTaskResumeAll();
|
||||
/*@close exists(pxQueue);@*/
|
||||
xTaskResumeAll();
|
||||
#else
|
||||
( void ) xTaskResumeAll();
|
||||
( void ) xTaskResumeAll();
|
||||
#endif
|
||||
|
||||
if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
|
||||
{
|
||||
traceQUEUE_RECEIVE_FAILED( pxQueue );
|
||||
return errQUEUE_EMPTY;
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
} /*lint -restore */
|
||||
if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
|
||||
{
|
||||
traceQUEUE_RECEIVE_FAILED( pxQueue );
|
||||
return errQUEUE_EMPTY;
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
} /*lint -restore */
|
||||
}
|
||||
|
@ -23,112 +23,114 @@
|
||||
#include "proof/queue.h"
|
||||
#include "proof/queuecontracts.h"
|
||||
|
||||
BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken )
|
||||
BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
|
||||
void * const pvBuffer,
|
||||
BaseType_t * const pxHigherPriorityTaskWoken )
|
||||
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == true &*&
|
||||
chars(pvBuffer, M, ?x) &*&
|
||||
pxHigherPriorityTaskWoken == NULL ? true : integer(pxHigherPriorityTaskWoken, _);@*/
|
||||
chars(pvBuffer, M, ?x) &*&
|
||||
pxHigherPriorityTaskWoken == NULL ? true : integer(pxHigherPriorityTaskWoken, _);@*/
|
||||
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
|
||||
(result == pdPASS ? chars(pvBuffer, M, _) : chars(pvBuffer, M, x)) &*&
|
||||
(pxHigherPriorityTaskWoken == NULL ? true : integer(pxHigherPriorityTaskWoken, _));@*/
|
||||
(result == pdPASS ? chars(pvBuffer, M, _) : chars(pvBuffer, M, x)) &*&
|
||||
(pxHigherPriorityTaskWoken == NULL ? true : integer(pxHigherPriorityTaskWoken, _));@*/
|
||||
{
|
||||
BaseType_t xReturn;
|
||||
UBaseType_t uxSavedInterruptStatus;
|
||||
BaseType_t xReturn;
|
||||
UBaseType_t uxSavedInterruptStatus;
|
||||
#ifdef VERIFAST /*< const pointer declaration */
|
||||
Queue_t * pxQueue = xQueue;
|
||||
Queue_t * pxQueue = xQueue;
|
||||
#else
|
||||
Queue_t * const pxQueue = xQueue;
|
||||
Queue_t * const pxQueue = xQueue;
|
||||
|
||||
configASSERT( pxQueue );
|
||||
configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
|
||||
configASSERT( pxQueue );
|
||||
configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
|
||||
#endif
|
||||
|
||||
/* RTOS ports that support interrupt nesting have the concept of a maximum
|
||||
system call (or maximum API call) interrupt priority. Interrupts that are
|
||||
above the maximum system call priority are kept permanently enabled, even
|
||||
when the RTOS kernel is in a critical section, but cannot make any calls to
|
||||
FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
|
||||
then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
|
||||
failure if a FreeRTOS API function is called from an interrupt that has been
|
||||
assigned a priority above the configured maximum system call priority.
|
||||
Only FreeRTOS functions that end in FromISR can be called from interrupts
|
||||
that have been assigned a priority at or (logically) below the maximum
|
||||
system call interrupt priority. FreeRTOS maintains a separate interrupt
|
||||
safe API to ensure interrupt entry is as fast and as simple as possible.
|
||||
More information (albeit Cortex-M specific) is provided on the following
|
||||
link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
|
||||
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
|
||||
/* RTOS ports that support interrupt nesting have the concept of a maximum
|
||||
* system call (or maximum API call) interrupt priority. Interrupts that are
|
||||
* above the maximum system call priority are kept permanently enabled, even
|
||||
* when the RTOS kernel is in a critical section, but cannot make any calls to
|
||||
* FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
|
||||
* then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
|
||||
* failure if a FreeRTOS API function is called from an interrupt that has been
|
||||
* assigned a priority above the configured maximum system call priority.
|
||||
* Only FreeRTOS functions that end in FromISR can be called from interrupts
|
||||
* that have been assigned a priority at or (logically) below the maximum
|
||||
* system call interrupt priority. FreeRTOS maintains a separate interrupt
|
||||
* safe API to ensure interrupt entry is as fast and as simple as possible.
|
||||
* More information (albeit Cortex-M specific) is provided on the following
|
||||
* link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
|
||||
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
|
||||
|
||||
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||
/*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
|
||||
{
|
||||
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
|
||||
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||
/*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
|
||||
{
|
||||
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
|
||||
|
||||
/* Cannot block in an ISR, so check there is data available. */
|
||||
if( uxMessagesWaiting > ( UBaseType_t ) 0 )
|
||||
{
|
||||
const int8_t cRxLock = pxQueue->cRxLock;
|
||||
/* Cannot block in an ISR, so check there is data available. */
|
||||
if( uxMessagesWaiting > ( UBaseType_t ) 0 )
|
||||
{
|
||||
const int8_t cRxLock = pxQueue->cRxLock;
|
||||
|
||||
traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
|
||||
traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
|
||||
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
prvCopyDataFromQueue( pxQueue, pvBuffer );
|
||||
/*@open queue_after_prvCopyDataFromQueue(pxQueue, Storage, N, M, W, (R+1)%N, K, is_locked, abs);@*/
|
||||
pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1;
|
||||
/*@assert buffer(Storage, N, M, ?contents);@*/
|
||||
/*@deq_lemma(K, (R+1)%N, contents, abs, head(abs));@*/
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
prvCopyDataFromQueue( pxQueue, pvBuffer );
|
||||
/*@open queue_after_prvCopyDataFromQueue(pxQueue, Storage, N, M, W, (R+1)%N, K, is_locked, abs);@*/
|
||||
pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1;
|
||||
/*@assert buffer(Storage, N, M, ?contents);@*/
|
||||
/*@deq_lemma(K, (R+1)%N, contents, abs, head(abs));@*/
|
||||
|
||||
/* If the queue is locked the event list will not be modified.
|
||||
Instead update the lock count so the task that unlocks the queue
|
||||
will know that an ISR has removed data while the queue was
|
||||
locked. */
|
||||
if( cRxLock == queueUNLOCKED )
|
||||
{
|
||||
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
|
||||
{
|
||||
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
|
||||
{
|
||||
/* The task waiting has a higher priority than us so
|
||||
force a context switch. */
|
||||
if( pxHigherPriorityTaskWoken != NULL )
|
||||
{
|
||||
*pxHigherPriorityTaskWoken = pdTRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Increment the lock count so the task that unlocks the queue
|
||||
knows that data was removed while it was locked. */
|
||||
configASSERT( cRxLock != queueINT8_MAX);
|
||||
/* If the queue is locked the event list will not be modified.
|
||||
* Instead update the lock count so the task that unlocks the queue
|
||||
* will know that an ISR has removed data while the queue was
|
||||
* locked. */
|
||||
if( cRxLock == queueUNLOCKED )
|
||||
{
|
||||
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
|
||||
{
|
||||
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
|
||||
{
|
||||
/* The task waiting has a higher priority than us so
|
||||
* force a context switch. */
|
||||
if( pxHigherPriorityTaskWoken != NULL )
|
||||
{
|
||||
*pxHigherPriorityTaskWoken = pdTRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Increment the lock count so the task that unlocks the queue
|
||||
* knows that data was removed while it was locked. */
|
||||
configASSERT( cRxLock != queueINT8_MAX );
|
||||
|
||||
pxQueue->cRxLock = ( int8_t ) ( cRxLock + 1 );
|
||||
}
|
||||
pxQueue->cRxLock = ( int8_t ) ( cRxLock + 1 );
|
||||
}
|
||||
|
||||
/*@close queue(pxQueue, Storage, N, M, W, (R+1)%N, K-1, is_locked, tail(abs));@*/
|
||||
/*@assert chars(pvBuffer, M, head(abs));@*/
|
||||
xReturn = pdPASS;
|
||||
}
|
||||
else
|
||||
{
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
xReturn = pdFAIL;
|
||||
traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
|
||||
}
|
||||
}
|
||||
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
||||
/*@close queue(pxQueue, Storage, N, M, W, (R+1)%N, K-1, is_locked, tail(abs));@*/
|
||||
/*@assert chars(pvBuffer, M, head(abs));@*/
|
||||
xReturn = pdPASS;
|
||||
}
|
||||
else
|
||||
{
|
||||
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
|
||||
xReturn = pdFAIL;
|
||||
traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
|
||||
}
|
||||
}
|
||||
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
||||
|
||||
return xReturn;
|
||||
return xReturn;
|
||||
}
|
||||
|
@ -1,3 +1,3 @@
|
||||
#!/bin/bash -eu
|
||||
|
||||
NO_COVERAGE=1 EXTRA_VERIFAST_ARGS=-stats make list queue | grep overhead: | sort | uniq
|
||||
NO_COVERAGE=1 EXTRA_VERIFAST_ARGS=-stats make queue | grep overhead: | sort | uniq
|
||||
|
73
FreeRTOS/Test/VeriFast/scripts/extract.py
Executable file
73
FreeRTOS/Test/VeriFast/scripts/extract.py
Executable file
@ -0,0 +1,73 @@
|
||||
#!/usr/bin/env python3
|
||||
from __future__ import print_function
|
||||
import sys
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class Extractor(object):
|
||||
@staticmethod
|
||||
def __parse_ctags(tags_filename):
|
||||
def convert_excmd(excmd):
|
||||
assert excmd.endswith(';"')
|
||||
linenum = excmd[:-2] # remove ';"'
|
||||
return int(linenum)
|
||||
result = {}
|
||||
with open(tags_filename) as f:
|
||||
for line in f:
|
||||
if line.startswith('!'):
|
||||
continue
|
||||
parts = line.split('\t')
|
||||
funcname = parts[0]
|
||||
funcfile = parts[1]
|
||||
linenum = convert_excmd(parts[2])
|
||||
result[funcname] = (funcfile, linenum)
|
||||
return result
|
||||
|
||||
def __init__(self, tags_filename):
|
||||
self.map = Extractor.__parse_ctags(tags_filename)
|
||||
|
||||
class State(Enum):
|
||||
INIT = 0
|
||||
HEAD = 1
|
||||
BODY = 2
|
||||
|
||||
def text_of_funcname(self, funcname):
|
||||
if funcname not in self.map:
|
||||
return []
|
||||
funcfile, linenum = self.map[funcname]
|
||||
result = []
|
||||
state, bracecount = Extractor.State.INIT, 0
|
||||
with open(funcfile) as f:
|
||||
for i, line in enumerate(f, start=1): # ctags counts linenums from 1
|
||||
if state == Extractor.State.INIT and linenum <= i:
|
||||
state = Extractor.State.HEAD
|
||||
if state == Extractor.State.HEAD:
|
||||
result.append(line)
|
||||
lbrace = line.count('{')
|
||||
rbrace = line.count('}')
|
||||
bracecount += lbrace
|
||||
bracecount -= rbrace
|
||||
if '{' in line:
|
||||
state = Extractor.State.BODY
|
||||
continue
|
||||
if state == Extractor.State.BODY:
|
||||
result.append(line)
|
||||
lbrace = line.count('{')
|
||||
rbrace = line.count('}')
|
||||
bracecount += lbrace
|
||||
bracecount -= rbrace
|
||||
if bracecount == 0:
|
||||
break
|
||||
return result
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) != 3:
|
||||
print("Usage: %s <tagfile> <funcname>" % sys.argv[0])
|
||||
sys.exit(1)
|
||||
tag_filename = sys.argv[1]
|
||||
funcname = sys.argv[2]
|
||||
extractor = Extractor('tags')
|
||||
result = extractor.text_of_funcname(funcname)
|
||||
print(''.join(result))
|
||||
sys.exit(0)
|
Reference in New Issue
Block a user