convert from svn repository: remove tags directory
[lttv.git] / trunk / verif / md / pan.oin
1 /* LTTng ltt-tracer.c atomic lockless buffering scheme Promela model v2
2 * Created for the Spin validator.
3 * Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
4 * October 2008
5
6 * TODO : create test cases that will generate an overflow on the offset and
7 * counter type. Counter types smaller than a byte should be used.
8
9 * Promela only has unsigned char, no signed char.
10 * Because detection of difference < 0 depends on a signed type, but we want
11 * compactness, check also for the values being higher than half of the unsigned
12 * char range (and consider them negative). The model, by design, does not use
13 * offsets or counts higher than 127 because we would then have to use a larger
14 * type (short or int).
15 */
16 #define HALF_UCHAR (255/2)
17
18 /* NUMPROCS 4 : causes event loss with some reader timings.
19 * e.g. 3 events, 1 switch, 1 event (lost, buffer full), read 1 subbuffer
20 */
21 #define NUMPROCS 4
22
23 /* NUMPROCS 3 : does not cause event loss because buffers are big enough.
24 * #define NUMPROCS 3
25 * e.g. 3 events, 1 switch, read 1 subbuffer
26 */
27
28 #define NUMSWITCH 1
29 #define BUFSIZE 4
30 #define NR_SUBBUFS 2
31 #define SUBBUF_SIZE (BUFSIZE / NR_SUBBUFS)
32
33 /* Writer counters
34 */
35 byte write_off = 0;
36 byte commit_count[NR_SUBBUFS];
37
38 /* <formal_verif> */
39 byte _commit_sum;
40 /* </formal_verif> */
41
42 /* Reader counters
43 */
44 byte read_off = 0;
45
46 byte events_lost = 0;
47 byte refcount = 0;
48
49 bool deliver = 0;
50
51 /* buffer slot in-use bit. Detects racy use (more than a single process
52 * accessing a slot at any given step).
53 */
54 bool buffer_use[BUFSIZE];
55
56 /* Proceed to a sub-subber switch is needed.
57 * Used in a periodical timer interrupt to fill and ship the current subbuffer
58 * to the reader so we can guarantee a steady flow. If a subbuffer is
59 * completely empty, do not switch.
60 * Also used as "finalize" operation to complete the last subbuffer after
61 * all writers have finished so the last subbuffer can be read by the reader.
62 */
63 proctype switcher()
64 {
65 byte prev_off, new_off, tmp_commit;
66 byte size;
67
68 cmpxchg_loop:
69 atomic {
70 prev_off = write_off;
71 size = SUBBUF_SIZE - (prev_off % SUBBUF_SIZE);
72 new_off = prev_off + size;
73 if
74 :: (new_off - read_off > BUFSIZE && new_off - read_off < HALF_UCHAR)
75 || size == SUBBUF_SIZE ->
76 refcount = refcount - 1;
77 goto not_needed;
78 :: else -> skip
79 fi;
80 }
81 atomic {
82 if
83 :: prev_off != write_off -> goto cmpxchg_loop
84 :: else -> write_off = new_off;
85 fi;
86 }
87
88 atomic {
89 tmp_commit = commit_count[(prev_off % BUFSIZE) / SUBBUF_SIZE] + size;
90 /* <formal_verif> */
91 _commit_sum = _commit_sum - commit_count[(prev_off % BUFSIZE) / SUBBUF_SIZE]
92 + tmp_commit;
93 /* </formal_verif> */
94 commit_count[(prev_off % BUFSIZE) / SUBBUF_SIZE] = tmp_commit;
95 if
96 :: (((prev_off / BUFSIZE) * BUFSIZE) / NR_SUBBUFS) + SUBBUF_SIZE -
97 tmp_commit
98 -> deliver = 1
99 :: else
100 -> skip
101 fi;
102 refcount = refcount - 1;
103 }
104 not_needed:
105 skip;
106 }
107
108 /* tracer
109 * Writes 1 byte of information in the buffer at the current
110 * "write_off" position and then increment the commit_count of the sub-buffer
111 * the information has been written to.
112 */
113 proctype tracer()
114 {
115 byte size = 1;
116 byte prev_off, new_off, tmp_commit;
117 byte i, j;
118
119 cmpxchg_loop:
120 atomic {
121 prev_off = write_off;
122 new_off = prev_off + size;
123 }
124 atomic {
125 if
126 :: new_off - read_off > BUFSIZE && new_off - read_off < HALF_UCHAR ->
127 goto lost
128 :: else -> skip
129 fi;
130 }
131 atomic {
132 if
133 :: prev_off != write_off -> goto cmpxchg_loop
134 :: else -> write_off = new_off;
135 fi;
136 i = 0;
137 do
138 :: i < size ->
139 assert(buffer_use[(prev_off + i) % BUFSIZE] == 0);
140 buffer_use[(prev_off + i) % BUFSIZE] = 1;
141 i++
142 :: i >= size -> break
143 od;
144 }
145
146 /* writing to buffer...
147 */
148
149 atomic {
150 i = 0;
151 do
152 :: i < size ->
153 buffer_use[(prev_off + i) % BUFSIZE] = 0;
154 i++
155 :: i >= size -> break
156 od;
157 tmp_commit = commit_count[(prev_off % BUFSIZE) / SUBBUF_SIZE] + size;
158 /* <formal_verif> */
159 _commit_sum = _commit_sum - commit_count[(prev_off % BUFSIZE) / SUBBUF_SIZE]
160 + tmp_commit;
161 /* </formal_verif> */
162 commit_count[(prev_off % BUFSIZE) / SUBBUF_SIZE] = tmp_commit;
163 if
164 :: (((prev_off / BUFSIZE) * BUFSIZE) / NR_SUBBUFS) + SUBBUF_SIZE -
165 tmp_commit
166 -> deliver = 1
167 :: else
168 -> skip
169 fi;
170 }
171 atomic {
172 goto end;
173 lost:
174 events_lost++;
175 end:
176 refcount = refcount - 1;
177 }
178 }
179
180 /* reader
181 * Read the information sub-buffer per sub-buffer when available.
182 *
183 * Reads the information as soon as it is ready, or may be delayed by
184 * an asynchronous delivery. Being modeled as a process insures all cases
185 * (scheduled very quickly or very late, causing event loss) are covered.
186 * Only one reader per buffer (normally ensured by a mutex). This is modeled
187 * by using a single reader process.
188 */
189 proctype reader()
190 {
191 byte i, j;
192
193 do
194 :: (write_off / SUBBUF_SIZE) - (read_off / SUBBUF_SIZE) > 0
195 && (write_off / SUBBUF_SIZE) - (read_off / SUBBUF_SIZE) < HALF_UCHAR
196 && (commit_count[(read_off % BUFSIZE) / SUBBUF_SIZE]
197 - SUBBUF_SIZE - (((read_off / BUFSIZE) * BUFSIZE) / NR_SUBBUFS)
198 == 0) ->
199 atomic {
200 i = 0;
201 do
202 :: i < SUBBUF_SIZE ->
203 assert(buffer_use[(read_off + i) % BUFSIZE] == 0);
204 buffer_use[(read_off + i) % BUFSIZE] = 1;
205 i++
206 :: i >= SUBBUF_SIZE -> break
207 od;
208 }
209
210 /* reading from buffer...
211 */
212
213 atomic {
214 i = 0;
215 do
216 :: i < SUBBUF_SIZE ->
217 buffer_use[(read_off + i) % BUFSIZE] = 0;
218 i++
219 :: i >= SUBBUF_SIZE -> break
220 od;
221 read_off = read_off + SUBBUF_SIZE;
222 }
223 :: read_off >= (NUMPROCS - events_lost) -> break;
224 od;
225 }
226
227 /* Waits for all tracer and switcher processes to finish before finalizing
228 * the buffer. Only after that will the reader be allowed to read the
229 * last subbuffer.
230 */
231 proctype cleaner()
232 {
233 atomic {
234 do
235 :: refcount == 0 ->
236 refcount = refcount + 1;
237 run switcher(); /* Finalize the last sub-buffer so it can be read. */
238 break;
239 od;
240 }
241 }
242
243 init {
244 byte i = 0;
245 byte j = 0;
246 byte sum = 0;
247 byte commit_sum = 0;
248
249 atomic {
250 i = 0;
251 do
252 :: i < NR_SUBBUFS ->
253 commit_count[i] = 0;
254 i++
255 :: i >= NR_SUBBUFS -> break
256 od;
257 /* <formal_verif> */
258 _commit_sum = 0;
259 /* </formal_verif> */
260 i = 0;
261 do
262 :: i < BUFSIZE ->
263 buffer_use[i] = 0;
264 i++
265 :: i >= BUFSIZE -> break
266 od;
267 run reader();
268 run cleaner();
269 i = 0;
270 do
271 :: i < NUMPROCS ->
272 refcount = refcount + 1;
273 run tracer();
274 i++
275 :: i >= NUMPROCS -> break
276 od;
277 i = 0;
278 do
279 :: i < NUMSWITCH ->
280 refcount = refcount + 1;
281 run switcher();
282 i++
283 :: i >= NUMSWITCH -> break
284 od;
285 _commit_sum = 0;
286 }
287
288 /* Assertions.
289 */
290 atomic {
291 /* The writer head must always be superior or equal to the reader head.
292 */
293 //assert(write_off - read_off >= 0 && write_off - read_off < HALF_UCHAR);
294 /* The sum of all subbuffer commit counts must always be lower or equal
295 * to the writer head, because space must be reserved before it is
296 * written to and then committed.
297 */
298 assert(write_off - _commit_sum >= 0 && write_off - _commit_sum < HALF_UCHAR);
299
300 /* If we have less writers than the buffer space available, we should
301 * not loose events
302 */
303 assert(NUMPROCS + NUMSWITCH > BUFSIZE || events_lost == 0);
304 }
305 }
306
307
This page took 0.035322 seconds and 4 git commands to generate.