convert from svn repository: remove tags directory
[lttv.git] / trunk / verif / lttng / buffer.spin
CommitLineData
0b55f123 1
2eb837fb 2// LTTng ltt-tracer.c atomic lockless buffering scheme Promela model v2
0b55f123 3// Created for the Spin validator.
4// Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
2eb837fb 5// October 2008
0b55f123 6
7// TODO : create test cases that will generate an overflow on the offset and
8// counter type. Counter types smaller than a byte should be used.
9
10// Promela only has unsigned char, no signed char.
11// Because detection of difference < 0 depends on a signed type, but we want
12// compactness, check also for the values being higher than half of the unsigned
13// char range (and consider them negative). The model, by design, does not use
14// offsets or counts higher than 127 because we would then have to use a larger
15// type (short or int).
16#define HALF_UCHAR (255/2)
17
18// NUMPROCS 4 : causes event loss with some reader timings.
19// e.g. 3 events, 1 switch, 1 event (lost, buffer full), read 1 subbuffer
20#define NUMPROCS 4
21
22// NUMPROCS 3 : does not cause event loss because buffers are big enough.
2eb837fb 23// #define NUMPROCS 3
0b55f123 24// e.g. 3 events, 1 switch, read 1 subbuffer
25
26#define NUMSWITCH 1
27#define BUFSIZE 4
28#define NR_SUBBUFS 2
29#define SUBBUF_SIZE (BUFSIZE / NR_SUBBUFS)
30
31// Writer counters
32byte write_off = 0;
33byte commit_count[NR_SUBBUFS];
34
35// Reader counters
36byte read_off = 0;
0b55f123 37
38byte events_lost = 0;
39byte refcount = 0;
40
41bool deliver = 0;
42
43// buffer slot in-use bit. Detects racy use (more than a single process
44// accessing a slot at any given step).
45bool buffer_use[BUFSIZE];
46
47// Proceed to a sub-subber switch is needed.
48// Used in a periodical timer interrupt to fill and ship the current subbuffer
49// to the reader so we can guarantee a steady flow. If a subbuffer is
50// completely empty, don't switch.
51// Also used as "finalize" operation to complete the last subbuffer after
52// all writers have finished so the last subbuffer can be read by the reader.
53proctype switcher()
54{
55 byte prev_off, new_off, tmp_commit;
56 byte size;
57
58cmpxchg_loop:
59 atomic {
60 prev_off = write_off;
61 size = SUBBUF_SIZE - (prev_off % SUBBUF_SIZE);
62 new_off = prev_off + size;
63 if
64 :: (new_off - read_off > BUFSIZE && new_off - read_off < HALF_UCHAR)
65 || size == SUBBUF_SIZE ->
66 refcount = refcount - 1;
67 goto not_needed;
68 :: else -> skip
69 fi;
70 }
71 atomic {
72 if
73 :: prev_off != write_off -> goto cmpxchg_loop
74 :: else -> write_off = new_off;
75 fi;
76 }
77
78 atomic {
79 tmp_commit = commit_count[(prev_off % BUFSIZE) / SUBBUF_SIZE] + size;
80 commit_count[(prev_off % BUFSIZE) / SUBBUF_SIZE] = tmp_commit;
81 if
2eb837fb 82 :: (((prev_off / BUFSIZE) * BUFSIZE) / NR_SUBBUFS) + SUBBUF_SIZE -
83 tmp_commit
84 -> deliver = 1
85 :: else
86 -> skip
0b55f123 87 fi;
88 refcount = refcount - 1;
89 }
90not_needed:
91 skip;
92}
93
94// tracer
95// Writes 1 byte of information in the buffer at the current
96// "write_off" position and then increment the commit_count of the sub-buffer
97// the information has been written to.
98proctype tracer()
99{
100 byte size = 1;
101 byte prev_off, new_off, tmp_commit;
102 byte i, j;
103
104cmpxchg_loop:
105 atomic {
106 prev_off = write_off;
107 new_off = prev_off + size;
108 }
109 atomic {
110 if
111 :: new_off - read_off > BUFSIZE && new_off - read_off < HALF_UCHAR ->
112 goto lost
113 :: else -> skip
114 fi;
115 }
116 atomic {
117 if
118 :: prev_off != write_off -> goto cmpxchg_loop
119 :: else -> write_off = new_off;
120 fi;
121 i = 0;
122 do
123 :: i < size ->
124 assert(buffer_use[(prev_off + i) % BUFSIZE] == 0);
125 buffer_use[(prev_off + i) % BUFSIZE] = 1;
126 i++
127 :: i >= size -> break
128 od;
129 }
130
131 // writing to buffer...
132
133 atomic {
134 i = 0;
135 do
136 :: i < size ->
137 buffer_use[(prev_off + i) % BUFSIZE] = 0;
138 i++
139 :: i >= size -> break
140 od;
141 tmp_commit = commit_count[(prev_off % BUFSIZE) / SUBBUF_SIZE] + size;
142 commit_count[(prev_off % BUFSIZE) / SUBBUF_SIZE] = tmp_commit;
143 if
2eb837fb 144 :: (((prev_off / BUFSIZE) * BUFSIZE) / NR_SUBBUFS) + SUBBUF_SIZE -
145 tmp_commit
146 -> deliver = 1
147 :: else
148 -> skip
0b55f123 149 fi;
150 }
151 atomic {
152 goto end;
153lost:
154 events_lost++;
155end:
156 refcount = refcount - 1;
157 }
158}
159
160// reader
161// Read the information sub-buffer per sub-buffer when available.
162//
163// Reads the information as soon as it is ready, or may be delayed by
164// an asynchronous delivery. Being modeled as a process insures all cases
165// (scheduled very quickly or very late, causing event loss) are covered.
166// Only one reader per buffer (normally ensured by a mutex). This is modeled
167// by using a single reader process.
168proctype reader()
169{
170 byte i, j;
0b55f123 171
172 do
173 :: (write_off / SUBBUF_SIZE) - (read_off / SUBBUF_SIZE) > 0
174 && (write_off / SUBBUF_SIZE) - (read_off / SUBBUF_SIZE) < HALF_UCHAR
2eb837fb 175 && (commit_count[(read_off % BUFSIZE) / SUBBUF_SIZE]
176 - SUBBUF_SIZE - (((read_off / BUFSIZE) * BUFSIZE) / NR_SUBBUFS)
177 == 0) ->
0b55f123 178 atomic {
179 i = 0;
180 do
181 :: i < SUBBUF_SIZE ->
182 assert(buffer_use[(read_off + i) % BUFSIZE] == 0);
183 buffer_use[(read_off + i) % BUFSIZE] = 1;
184 i++
185 :: i >= SUBBUF_SIZE -> break
186 od;
187 }
188 // reading from buffer...
189
0b55f123 190 atomic {
191 i = 0;
192 do
193 :: i < SUBBUF_SIZE ->
194 buffer_use[(read_off + i) % BUFSIZE] = 0;
195 i++
196 :: i >= SUBBUF_SIZE -> break
197 od;
0b55f123 198 read_off = read_off + SUBBUF_SIZE;
199 }
200 :: read_off >= (NUMPROCS - events_lost) -> break;
201 od;
202}
203
204// Waits for all tracer and switcher processes to finish before finalizing
205// the buffer. Only after that will the reader be allowed to read the
206// last subbuffer.
207proctype cleaner()
208{
209 atomic {
210 do
211 :: refcount == 0 ->
212 refcount = refcount + 1;
213 run switcher(); // Finalize the last sub-buffer so it can be read.
214 break;
215 od;
216 }
217}
218
219init {
220 byte i = 0;
221 byte j = 0;
222 byte sum = 0;
223 byte commit_sum = 0;
224
225 atomic {
226 i = 0;
227 do
228 :: i < NR_SUBBUFS ->
229 commit_count[i] = 0;
0b55f123 230 i++
231 :: i >= NR_SUBBUFS -> break
232 od;
233 i = 0;
234 do
235 :: i < BUFSIZE ->
236 buffer_use[i] = 0;
237 i++
238 :: i >= BUFSIZE -> break
239 od;
240 run reader();
241 run cleaner();
242 i = 0;
243 do
244 :: i < NUMPROCS ->
245 refcount = refcount + 1;
246 run tracer();
247 i++
248 :: i >= NUMPROCS -> break
249 od;
250 i = 0;
251 do
252 :: i < NUMSWITCH ->
253 refcount = refcount + 1;
254 run switcher();
255 i++
256 :: i >= NUMSWITCH -> break
257 od;
258 }
259 // Assertions.
260 atomic {
261 // The writer head must always be superior or equal to the reader head.
262 assert(write_off - read_off >= 0 && write_off - read_off < HALF_UCHAR);
263 j = 0;
264 commit_sum = 0;
265 do
266 :: j < NR_SUBBUFS ->
267 commit_sum = commit_sum + commit_count[j];
268 // The commit count of a particular subbuffer must always be higher
269 // or equal to the retrieve_count of this subbuffer.
2eb837fb 270 // assert(commit_count[j] - retrieve_count[j] >= 0 &&
271 // commit_count[j] - retrieve_count[j] < HALF_UCHAR);
0b55f123 272 j++
273 :: j >= NR_SUBBUFS -> break
274 od;
275 // The sum of all subbuffer commit counts must always be lower or equal
276 // to the writer head, because space must be reserved before it is
277 // written to and then committed.
278 assert(write_off - commit_sum >= 0 && write_off - commit_sum < HALF_UCHAR);
279
280 // If we have less writers than the buffer space available, we should
281 // not lose events
282 assert(NUMPROCS + NUMSWITCH > BUFSIZE || events_lost == 0);
283 }
284}
This page took 0.032602 seconds and 4 git commands to generate.