move everything out of trunk
[lttv.git] / verif / md / model_01.spin
1 /* LTTng ltt-tracer.c atomic lockless buffering scheme Promela model v2
2 * Created for the Spin validator.
3 * Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
4 * October 2008
5
6 * TODO : create test cases that will generate an overflow on the offset and
7 * counter type. Counter types smaller than a byte should be used.
8
9 * Promela only has unsigned char, no signed char.
10 * Because detection of difference < 0 depends on a signed type, but we want
11 * compactness, check also for the values being higher than half of the unsigned
12 * char range (and consider them negative). The model, by design, does not use
13 * offsets or counts higher than 127 because we would then have to use a larger
14 * type (short or int).
15 */
16 #define HALF_UCHAR (255/2)
17
18 /* NUMPROCS 4 : causes event loss with some reader timings.
19 * e.g. 3 events, 1 switch, 1 event (lost, buffer full), read 1 subbuffer
20 */
21 #define NUMPROCS 4
22
23 /* NUMPROCS 3 : does not cause event loss because buffers are big enough.
24 * #define NUMPROCS 3
25 * e.g. 3 events, 1 switch, read 1 subbuffer
26 */
27
28 #define NUMSWITCH 1
29 #define BUFSIZE 4
30 #define NR_SUBBUFS 2
31 #define SUBBUF_SIZE (BUFSIZE / NR_SUBBUFS)
32
33 /* Writer counters
34 */
35 byte write_off = 0;
36 byte commit_count[NR_SUBBUFS];
37
38 /* Reader counters
39 */
40 byte read_off = 0;
41
42 byte events_lost = 0;
43 byte refcount = 0;
44
45 bool deliver = 0;
46
47 /* buffer slot in-use bit. Detects racy use (more than a single process
48 * accessing a slot at any given step).
49 */
50 bool buffer_use[BUFSIZE];
51
52 /* Proceed to a sub-subber switch is needed.
53 * Used in a periodical timer interrupt to fill and ship the current subbuffer
54 * to the reader so we can guarantee a steady flow. If a subbuffer is
55 * completely empty, do not switch.
56 * Also used as "finalize" operation to complete the last subbuffer after
57 * all writers have finished so the last subbuffer can be read by the reader.
58 */
59 proctype switcher()
60 {
61 byte prev_off, new_off, tmp_commit;
62 byte size;
63
64 cmpxchg_loop:
65 atomic {
66 prev_off = write_off;
67 size = SUBBUF_SIZE - (prev_off % SUBBUF_SIZE);
68 new_off = prev_off + size;
69 if
70 :: (new_off - read_off > BUFSIZE && new_off - read_off < HALF_UCHAR)
71 || size == SUBBUF_SIZE ->
72 refcount = refcount - 1;
73 goto not_needed;
74 :: else -> skip
75 fi;
76 }
77 atomic {
78 if
79 :: prev_off != write_off -> goto cmpxchg_loop
80 :: else -> write_off = new_off;
81 fi;
82 }
83
84 atomic {
85 tmp_commit = commit_count[(prev_off % BUFSIZE) / SUBBUF_SIZE] + size;
86 commit_count[(prev_off % BUFSIZE) / SUBBUF_SIZE] = tmp_commit;
87 if
88 :: (((prev_off / BUFSIZE) * BUFSIZE) / NR_SUBBUFS) + SUBBUF_SIZE -
89 tmp_commit
90 -> deliver = 1
91 :: else
92 -> skip
93 fi;
94 refcount = refcount - 1;
95 }
96 not_needed:
97 skip;
98 }
99
100 /* tracer
101 * Writes 1 byte of information in the buffer at the current
102 * "write_off" position and then increment the commit_count of the sub-buffer
103 * the information has been written to.
104 */
105 proctype tracer()
106 {
107 byte size = 1;
108 byte prev_off, new_off, tmp_commit;
109 byte i, j;
110
111 cmpxchg_loop:
112 atomic {
113 prev_off = write_off;
114 new_off = prev_off + size;
115 }
116 atomic {
117 if
118 :: new_off - read_off > BUFSIZE && new_off - read_off < HALF_UCHAR ->
119 goto lost
120 :: else -> skip
121 fi;
122 }
123 atomic {
124 if
125 :: prev_off != write_off -> goto cmpxchg_loop
126 :: else -> write_off = new_off;
127 fi;
128 i = 0;
129 do
130 :: i < size ->
131 assert(buffer_use[(prev_off + i) % BUFSIZE] == 0);
132 buffer_use[(prev_off + i) % BUFSIZE] = 1;
133 i++
134 :: i >= size -> break
135 od;
136 }
137
138 /* writing to buffer...
139 */
140
141 atomic {
142 i = 0;
143 do
144 :: i < size ->
145 buffer_use[(prev_off + i) % BUFSIZE] = 0;
146 i++
147 :: i >= size -> break
148 od;
149 tmp_commit = commit_count[(prev_off % BUFSIZE) / SUBBUF_SIZE] + size;
150 commit_count[(prev_off % BUFSIZE) / SUBBUF_SIZE] = tmp_commit;
151 if
152 :: (((prev_off / BUFSIZE) * BUFSIZE) / NR_SUBBUFS) + SUBBUF_SIZE -
153 tmp_commit
154 -> deliver = 1
155 :: else
156 -> skip
157 fi;
158 }
159 atomic {
160 goto end;
161 lost:
162 events_lost++;
163 end:
164 refcount = refcount - 1;
165 }
166 }
167
168 /* reader
169 * Read the information sub-buffer per sub-buffer when available.
170 *
171 * Reads the information as soon as it is ready, or may be delayed by
172 * an asynchronous delivery. Being modeled as a process insures all cases
173 * (scheduled very quickly or very late, causing event loss) are covered.
174 * Only one reader per buffer (normally ensured by a mutex). This is modeled
175 * by using a single reader process.
176 */
177 proctype reader()
178 {
179 byte i, j;
180
181 do
182 :: (write_off / SUBBUF_SIZE) - (read_off / SUBBUF_SIZE) > 0
183 && (write_off / SUBBUF_SIZE) - (read_off / SUBBUF_SIZE) < HALF_UCHAR
184 && (commit_count[(read_off % BUFSIZE) / SUBBUF_SIZE]
185 - SUBBUF_SIZE - (((read_off / BUFSIZE) * BUFSIZE) / NR_SUBBUFS)
186 == 0) ->
187 atomic {
188 i = 0;
189 do
190 :: i < SUBBUF_SIZE ->
191 assert(buffer_use[(read_off + i) % BUFSIZE] == 0);
192 buffer_use[(read_off + i) % BUFSIZE] = 1;
193 i++
194 :: i >= SUBBUF_SIZE -> break
195 od;
196 }
197
198 /* reading from buffer...
199 */
200
201 atomic {
202 i = 0;
203 do
204 :: i < SUBBUF_SIZE ->
205 buffer_use[(read_off + i) % BUFSIZE] = 0;
206 i++
207 :: i >= SUBBUF_SIZE -> break
208 od;
209 read_off = read_off + SUBBUF_SIZE;
210 }
211 :: read_off >= (NUMPROCS - events_lost) -> break;
212 od;
213 }
214
215 /* Waits for all tracer and switcher processes to finish before finalizing
216 * the buffer. Only after that will the reader be allowed to read the
217 * last subbuffer.
218 */
219 proctype cleaner()
220 {
221 atomic {
222 do
223 :: refcount == 0 ->
224 refcount = refcount + 1;
225 run switcher(); /* Finalize the last sub-buffer so it can be read. */
226 break;
227 od;
228 }
229 }
230
231 init {
232 byte i = 0;
233 byte j = 0;
234 byte sum = 0;
235 byte commit_sum = 0;
236
237 atomic {
238 i = 0;
239 do
240 :: i < NR_SUBBUFS ->
241 commit_count[i] = 0;
242 i++
243 :: i >= NR_SUBBUFS -> break
244 od;
245 i = 0;
246 do
247 :: i < BUFSIZE ->
248 buffer_use[i] = 0;
249 i++
250 :: i >= BUFSIZE -> break
251 od;
252 run reader();
253 run cleaner();
254 i = 0;
255 do
256 :: i < NUMPROCS ->
257 refcount = refcount + 1;
258 run tracer();
259 i++
260 :: i >= NUMPROCS -> break
261 od;
262 i = 0;
263 do
264 :: i < NUMSWITCH ->
265 refcount = refcount + 1;
266 run switcher();
267 i++
268 :: i >= NUMSWITCH -> break
269 od;
270 }
271
272 /* Assertions.
273 */
274 atomic {
275 /* The writer head must always be superior or equal to the reader head.
276 */
277 assert(write_off - read_off >= 0 && write_off - read_off < HALF_UCHAR);
278 j = 0;
279 commit_sum = 0;
280 do
281 :: j < NR_SUBBUFS ->
282 commit_sum = commit_sum + commit_count[j];
283 /* The commit count of a particular subbuffer must always be higher
284 * or equal to the retrieve_count of this subbuffer.
285 * assert(commit_count[j] - retrieve_count[j] >= 0 &&
286 * commit_count[j] - retrieve_count[j] < HALF_UCHAR);
287 */
288 j++
289 :: j >= NR_SUBBUFS -> break
290 od;
291 /* The sum of all subbuffer commit counts must always be lower or equal
292 * to the writer head, because space must be reserved before it is
293 * written to and then committed.
294 */
295 assert(write_off - commit_sum >= 0 && write_off - commit_sum < HALF_UCHAR);
296
297 /* If we have less writers than the buffer space available, we should
298 * not lose events
299 */
300 assert(NUMPROCS + NUMSWITCH > BUFSIZE || events_lost == 0);
301 }
302 }
303
This page took 0.034786 seconds and 4 git commands to generate.