-// LTTng ltt-tracer.c atomic lockless buffering scheme Promela model v1
+// LTTng ltt-tracer.c atomic lockless buffering scheme Promela model v2
// Created for the Spin validator.
// Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
-// June 2008
+// October 2008
// TODO : create test cases that will generate an overflow on the offset and
// counter type. Counter types smaller than a byte should be used.
#define NUMPROCS 4
// NUMPROCS 3 : does not cause event loss because buffers are big enough.
-//#define NUMPROCS 3
+// #define NUMPROCS 3
// e.g. 3 events, 1 switch, read 1 subbuffer
#define NUMSWITCH 1
// Reader counters
byte read_off = 0;
-byte retrieve_count[NR_SUBBUFS];
byte events_lost = 0;
byte refcount = 0;
tmp_commit = commit_count[(prev_off % BUFSIZE) / SUBBUF_SIZE] + size;
commit_count[(prev_off % BUFSIZE) / SUBBUF_SIZE] = tmp_commit;
if
- :: tmp_commit % SUBBUF_SIZE == 0 -> deliver = 1
- :: else -> skip
+ :: (((prev_off / BUFSIZE) * BUFSIZE) / NR_SUBBUFS) + SUBBUF_SIZE -
+ tmp_commit
+ -> deliver = 1
+ :: else
+ -> skip
fi;
refcount = refcount - 1;
}
tmp_commit = commit_count[(prev_off % BUFSIZE) / SUBBUF_SIZE] + size;
commit_count[(prev_off % BUFSIZE) / SUBBUF_SIZE] = tmp_commit;
if
- :: tmp_commit % SUBBUF_SIZE == 0 -> deliver = 1;
- :: else -> skip
+ :: (((prev_off / BUFSIZE) * BUFSIZE) / NR_SUBBUFS) + SUBBUF_SIZE -
+ tmp_commit
+ -> deliver = 1
+ :: else
+ -> skip
fi;
}
atomic {
proctype reader()
{
byte i, j;
- byte tmp_retrieve;
- byte lwrite_off, lcommit_count;
do
:: (write_off / SUBBUF_SIZE) - (read_off / SUBBUF_SIZE) > 0
&& (write_off / SUBBUF_SIZE) - (read_off / SUBBUF_SIZE) < HALF_UCHAR
- && commit_count[(read_off % BUFSIZE) / SUBBUF_SIZE]
- - retrieve_count[(read_off % BUFSIZE) / SUBBUF_SIZE]
- == SUBBUF_SIZE ->
+ && (commit_count[(read_off % BUFSIZE) / SUBBUF_SIZE]
+ - SUBBUF_SIZE - (((read_off / BUFSIZE) * BUFSIZE) / NR_SUBBUFS)
+ == 0) ->
atomic {
i = 0;
do
}
// reading from buffer...
- // Since there is only one reader per buffer at any given time,
- // we don't care about retrieve_count vs read_off ordering :
- // combined use of retrieve_count and read_off are made atomic by a
- // mutex.
atomic {
i = 0;
do
i++
:: i >= SUBBUF_SIZE -> break
od;
- tmp_retrieve = retrieve_count[(read_off % BUFSIZE) / SUBBUF_SIZE]
- + SUBBUF_SIZE;
- retrieve_count[(read_off % BUFSIZE) / SUBBUF_SIZE] = tmp_retrieve;
read_off = read_off + SUBBUF_SIZE;
}
:: read_off >= (NUMPROCS - events_lost) -> break;
do
:: i < NR_SUBBUFS ->
commit_count[i] = 0;
- retrieve_count[i] = 0;
i++
:: i >= NR_SUBBUFS -> break
od;
commit_sum = commit_sum + commit_count[j];
// The commit count of a particular subbuffer must always be higher
// or equal to the retrieve_count of this subbuffer.
- assert(commit_count[j] - retrieve_count[j] >= 0 &&
- commit_count[j] - retrieve_count[j] < HALF_UCHAR);
+ // assert(commit_count[j] - retrieve_count[j] >= 0 &&
+ // commit_count[j] - retrieve_count[j] < HALF_UCHAR);
j++
:: j >= NR_SUBBUFS -> break
od;