Update formal model from local copy
[urcu.git] / formal-model / urcu-nosched-model / result-signal-over-writer / testmerge / urcu_free.spin.input
CommitLineData
06e8b2a8
MD
1
2#define NR_READERS 1
3#define NR_WRITERS 1
4
5#define NR_PROCS 2
6
7
8#if (NR_READERS == 1)
9
10#define read_free_race (read_generation[0] == last_free_gen)
11#define read_free (free_done && data_access[0])
12
13#elif (NR_READERS == 2)
14
15#define read_free_race (read_generation[0] == last_free_gen || read_generation[1] == last_free_gen)
16#define read_free (free_done && (data_access[0] || data_access[1]))
17
18#else
19
20#error "Too many readers"
21
22#endif
23
24#define RCU_GP_CTR_BIT (1 << 7)
25#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
26
27#ifndef READER_NEST_LEVEL
28#define READER_NEST_LEVEL 2
29#endif
30
31#define REMOTE_BARRIERS
32/*
33 * mem.spin: Promela code to validate memory barriers with OOO memory.
34 *
35 * This program is free software; you can redistribute it and/or modify
36 * it under the terms of the GNU General Public License as published by
37 * the Free Software Foundation; either version 2 of the License, or
38 * (at your option) any later version.
39 *
40 * This program is distributed in the hope that it will be useful,
41 * but WITHOUT ANY WARRANTY; without even the implied warranty of
42 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
43 * GNU General Public License for more details.
44 *
45 * You should have received a copy of the GNU General Public License
46 * along with this program; if not, write to the Free Software
47 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
48 *
49 * Copyright (c) 2009 Mathieu Desnoyers
50 */
51
52/* Promela validation variables. */
53
54/* specific defines "included" here */
55/* DEFINES file "included" here */
56
57#define get_pid() (_pid)
58
59/*
60 * Each process have its own data in cache. Caches are randomly updated.
61 * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
62 * both.
63 */
64
65typedef per_proc_byte {
66 byte val[NR_PROCS];
67};
68
69/* Bitfield has a maximum of 8 procs */
70typedef per_proc_bit {
71 byte bitfield;
72};
73
74#define DECLARE_CACHED_VAR(type, x) \
75 type mem_##x; \
76 per_proc_##type cached_##x; \
77 per_proc_bit cache_dirty_##x;
78
79#define INIT_CACHED_VAR(x, v, j) \
80 mem_##x = v; \
81 cache_dirty_##x.bitfield = 0; \
82 j = 0; \
83 do \
84 :: j < NR_PROCS -> \
85 cached_##x.val[j] = v; \
86 j++ \
87 :: j >= NR_PROCS -> break \
88 od;
89
90#define IS_CACHE_DIRTY(x, id) (cache_dirty_##x.bitfield & (1 << id))
91
92#define READ_CACHED_VAR(x) (cached_##x.val[get_pid()])
93
94#define WRITE_CACHED_VAR(x, v) \
95 atomic { \
96 cached_##x.val[get_pid()] = v; \
97 cache_dirty_##x.bitfield = \
98 cache_dirty_##x.bitfield | (1 << get_pid()); \
99 }
100
101#define CACHE_WRITE_TO_MEM(x, id) \
102 if \
103 :: IS_CACHE_DIRTY(x, id) -> \
104 mem_##x = cached_##x.val[id]; \
105 cache_dirty_##x.bitfield = \
106 cache_dirty_##x.bitfield & (~(1 << id)); \
107 :: else -> \
108 skip \
109 fi;
110
111#define CACHE_READ_FROM_MEM(x, id) \
112 if \
113 :: !IS_CACHE_DIRTY(x, id) -> \
114 cached_##x.val[id] = mem_##x;\
115 :: else -> \
116 skip \
117 fi;
118
119/*
120 * May update other caches if cache is dirty, or not.
121 */
122#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
123 if \
124 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
125 :: 1 -> skip \
126 fi;
127
128#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
129 if \
130 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
131 :: 1 -> skip \
132 fi;
133
134/*
135 * Remote barriers tests the scheme where a signal (or IPI) is sent to all
136 * reader threads to promote their compiler barrier to a smp_mb().
137 */
138#ifdef REMOTE_BARRIERS
139
140inline smp_rmb_pid(i, j)
141{
142 atomic {
143 CACHE_READ_FROM_MEM(urcu_gp_ctr, i);
144 j = 0;
145 do
146 :: j < NR_READERS ->
147 CACHE_READ_FROM_MEM(urcu_active_readers[j], i);
148 j++
149 :: j >= NR_READERS -> break
150 od;
151 CACHE_READ_FROM_MEM(generation_ptr, i);
152 }
153}
154
155inline smp_wmb_pid(i, j)
156{
157 atomic {
158 CACHE_WRITE_TO_MEM(urcu_gp_ctr, i);
159 j = 0;
160 do
161 :: j < NR_READERS ->
162 CACHE_WRITE_TO_MEM(urcu_active_readers[j], i);
163 j++
164 :: j >= NR_READERS -> break
165 od;
166 CACHE_WRITE_TO_MEM(generation_ptr, i);
167 }
168}
169
170inline smp_mb_pid(i, j)
171{
172 atomic {
173#ifndef NO_WMB
174 smp_wmb_pid(i, j);
175#endif
176#ifndef NO_RMB
177 smp_rmb_pid(i, j);
178#endif
179#ifdef NO_WMB
180#ifdef NO_RMB
181 ooo_mem(i);
182#endif
183#endif
184 }
185}
186
187/*
188 * Readers do a simple barrier(), writers are doing a smp_mb() _and_ sending a
189 * signal or IPI to have all readers execute a smp_mb.
190 * We are not modeling the whole rendez-vous between readers and writers here,
191 * we just let the writer update each reader's caches remotely.
192 */
193inline smp_mb(i, j)
194{
195 if
196 :: get_pid() >= NR_READERS ->
197 smp_mb_pid(get_pid(), j);
198 i = 0;
199 do
200 :: i < NR_READERS ->
201 smp_mb_pid(i, j);
202 i++;
203 :: i >= NR_READERS -> break
204 od;
205 smp_mb_pid(get_pid(), j);
206 :: else -> skip;
207 fi;
208}
209
210#else
211
212inline smp_rmb(i, j)
213{
214 atomic {
215 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
216 i = 0;
217 do
218 :: i < NR_READERS ->
219 CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
220 i++
221 :: i >= NR_READERS -> break
222 od;
223 CACHE_READ_FROM_MEM(generation_ptr, get_pid());
224 }
225}
226
227inline smp_wmb(i, j)
228{
229 atomic {
230 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
231 i = 0;
232 do
233 :: i < NR_READERS ->
234 CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
235 i++
236 :: i >= NR_READERS -> break
237 od;
238 CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
239 }
240}
241
242inline smp_mb(i, j)
243{
244 atomic {
245#ifndef NO_WMB
246 smp_wmb(i, j);
247#endif
248#ifndef NO_RMB
249 smp_rmb(i, j);
250#endif
251#ifdef NO_WMB
252#ifdef NO_RMB
253 ooo_mem(i);
254#endif
255#endif
256 }
257}
258
259#endif
260
261/* Keep in sync manually with smp_rmb, wmp_wmb, ooo_mem and init() */
262DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
263/* Note ! currently only two readers */
264DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
265/* pointer generation */
266DECLARE_CACHED_VAR(byte, generation_ptr);
267
268byte last_free_gen = 0;
269bit free_done = 0;
270byte read_generation[NR_READERS];
271bit data_access[NR_READERS];
272
273bit write_lock = 0;
274
275bit init_done = 0;
276
277inline wait_init_done()
278{
279 do
280 :: init_done == 0 -> skip;
281 :: else -> break;
282 od;
283}
284
285inline ooo_mem(i)
286{
287 atomic {
288 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
289 i = 0;
290 do
291 :: i < NR_READERS ->
292 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
293 get_pid());
294 i++
295 :: i >= NR_READERS -> break
296 od;
297 RANDOM_CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
298 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
299 i = 0;
300 do
301 :: i < NR_READERS ->
302 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
303 get_pid());
304 i++
305 :: i >= NR_READERS -> break
306 od;
307 RANDOM_CACHE_READ_FROM_MEM(generation_ptr, get_pid());
308 }
309}
310
311#define get_readerid() (get_pid())
312#define get_writerid() (get_readerid() + NR_READERS)
313
314inline wait_for_reader(tmp, tmp2, i, j)
315{
316 do
317 :: 1 ->
318 tmp2 = READ_CACHED_VAR(urcu_active_readers[tmp]);
319 ooo_mem(i);
320 if
321 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
322 && ((tmp2 ^ READ_CACHED_VAR(urcu_gp_ctr))
323 & RCU_GP_CTR_BIT) ->
324#ifndef GEN_ERROR_WRITER_PROGRESS
325 smp_mb(i, j);
326#else
327 ooo_mem(i);
328#endif
329 :: else ->
330 break;
331 fi;
332 od;
333}
334
335inline wait_for_quiescent_state(tmp, tmp2, i, j)
336{
337 tmp = 0;
338 do
339 :: tmp < NR_READERS ->
340 wait_for_reader(tmp, tmp2, i, j);
341 if
342 :: (NR_READERS > 1) && (tmp < NR_READERS - 1)
343 -> ooo_mem(i);
344 :: else
345 -> skip;
346 fi;
347 tmp++
348 :: tmp >= NR_READERS -> break
349 od;
350}
351
352/* Model the RCU read-side critical section. */
353
354inline urcu_one_read(i, j, nest_i, tmp, tmp2)
355{
356 nest_i = 0;
357 do
358 :: nest_i < READER_NEST_LEVEL ->
359 ooo_mem(i);
360 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
361 ooo_mem(i);
362 if
363 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
364 ->
365 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
366 ooo_mem(i);
367 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
368 tmp2);
369 :: else ->
370 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
371 tmp + 1);
372 fi;
373 smp_mb(i, j);
374 nest_i++;
375 :: nest_i >= READER_NEST_LEVEL -> break;
376 od;
377
378 ooo_mem(i);
379 read_generation[get_readerid()] = READ_CACHED_VAR(generation_ptr);
380 ooo_mem(i);
381 data_access[get_readerid()] = 1;
382 ooo_mem(i);
383 data_access[get_readerid()] = 0;
384
385 nest_i = 0;
386 do
387 :: nest_i < READER_NEST_LEVEL ->
388 smp_mb(i, j);
389 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
390 ooo_mem(i);
391 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);
392 nest_i++;
393 :: nest_i >= READER_NEST_LEVEL -> break;
394 od;
395 ooo_mem(i);
396 //smp_mc(i); /* added */
397}
398
399active [NR_READERS] proctype urcu_reader()
400{
401 byte i, j, nest_i;
402 byte tmp, tmp2;
403
404 wait_init_done();
405
406 assert(get_pid() < NR_PROCS);
407
408end_reader:
409 do
410 :: 1 ->
411 /*
412 * We do not test reader's progress here, because we are mainly
413 * interested in writer's progress. The reader never blocks
414 * anyway. We have to test for reader/writer's progress
415 * separately, otherwise we could think the writer is doing
416 * progress when it's blocked by an always progressing reader.
417 */
418#ifdef READER_PROGRESS
419 /* Only test progress of one random reader. They are all the
420 * same. */
421 atomic {
422 if
423 :: get_readerid() == 0 ->
424progress_reader:
425 skip;
426 :: else ->
427 skip;
428 fi;
429 }
430#endif
431 urcu_one_read(i, j, nest_i, tmp, tmp2);
432 od;
433}
434
435/* Model the RCU update process. */
436
437active proctype urcu_writer()
438{
439 byte i, j;
440 byte tmp, tmp2;
441 byte old_gen;
442
443 wait_init_done();
444
445 assert(get_pid() < NR_PROCS);
446
447 do
448 :: (READ_CACHED_VAR(generation_ptr) < 5) ->
449#ifdef WRITER_PROGRESS
450progress_writer1:
451#endif
452 ooo_mem(i);
453 atomic {
454 old_gen = READ_CACHED_VAR(generation_ptr);
455 WRITE_CACHED_VAR(generation_ptr, old_gen + 1);
456 }
457 ooo_mem(i);
458
459 do
460 :: 1 ->
461 atomic {
462 if
463 :: write_lock == 0 ->
464 write_lock = 1;
465 break;
466 :: else ->
467 skip;
468 fi;
469 }
470 od;
471 smp_mb(i, j);
472 tmp = READ_CACHED_VAR(urcu_gp_ctr);
473 ooo_mem(i);
474 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
475 ooo_mem(i);
476 //smp_mc(i);
477 wait_for_quiescent_state(tmp, tmp2, i, j);
478 //smp_mc(i);
479#ifndef SINGLE_FLIP
480 ooo_mem(i);
481 tmp = READ_CACHED_VAR(urcu_gp_ctr);
482 ooo_mem(i);
483 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
484 //smp_mc(i);
485 ooo_mem(i);
486 wait_for_quiescent_state(tmp, tmp2, i, j);
487#endif
488 smp_mb(i, j);
489 write_lock = 0;
490 /* free-up step, e.g., kfree(). */
491 atomic {
492 last_free_gen = old_gen;
493 free_done = 1;
494 }
495 :: else -> break;
496 od;
497 /*
498 * Given the reader loops infinitely, let the writer also busy-loop
499 * with progress here so, with weak fairness, we can test the
500 * writer's progress.
501 */
502end_writer:
503 do
504 :: 1 ->
505#ifdef WRITER_PROGRESS
506progress_writer2:
507#endif
508 skip;
509 od;
510}
511
512/* Leave after the readers and writers so the pid count is ok. */
513init {
514 byte i, j;
515
516 atomic {
517 INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
518 INIT_CACHED_VAR(generation_ptr, 0, j);
519
520 i = 0;
521 do
522 :: i < NR_READERS ->
523 INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
524 read_generation[i] = 1;
525 data_access[i] = 0;
526 i++;
527 :: i >= NR_READERS -> break
528 od;
529 init_done = 1;
530 }
531}
This page took 0.042169 seconds and 4 git commands to generate.