RCU signal handler reader over reader
[urcu.git] / formal-model / urcu / urcu.spin
1 /*
2 * mem.spin: Promela code to validate memory barriers with OOO memory.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (c) 2009 Mathieu Desnoyers
19 */
20
21 /* Promela validation variables. */
22
23 /* specific defines "included" here */
24 /* DEFINES file "included" here */
25
26 /* All signal readers have same PID and uses same reader variable */
27 #ifdef TEST_SIGNAL_ON_WRITE
28 #define get_pid() ((_pid < 1) -> 0 : 1)
29 #elif defined(TEST_SIGNAL_ON_READ)
30 #define get_pid() ((_pid < 2) -> 0 : 1)
31 #else
32 #define get_pid() (_pid)
33 #endif
34
35 #define get_readerid() (get_pid())
36
37 /*
38 * Each process have its own data in cache. Caches are randomly updated.
39 * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
40 * both.
41 */
42
43 typedef per_proc_byte {
44 byte val[NR_PROCS];
45 };
46
47 /* Bitfield has a maximum of 8 procs */
48 typedef per_proc_bit {
49 byte bitfield;
50 };
51
52 #define DECLARE_CACHED_VAR(type, x) \
53 type mem_##x; \
54 per_proc_##type cached_##x; \
55 per_proc_bit cache_dirty_##x;
56
57 #define INIT_CACHED_VAR(x, v, j) \
58 mem_##x = v; \
59 cache_dirty_##x.bitfield = 0; \
60 j = 0; \
61 do \
62 :: j < NR_PROCS -> \
63 cached_##x.val[j] = v; \
64 j++ \
65 :: j >= NR_PROCS -> break \
66 od;
67
68 #define IS_CACHE_DIRTY(x, id) (cache_dirty_##x.bitfield & (1 << id))
69
70 #define READ_CACHED_VAR(x) (cached_##x.val[get_pid()])
71
72 #define WRITE_CACHED_VAR(x, v) \
73 atomic { \
74 cached_##x.val[get_pid()] = v; \
75 cache_dirty_##x.bitfield = \
76 cache_dirty_##x.bitfield | (1 << get_pid()); \
77 }
78
79 #define CACHE_WRITE_TO_MEM(x, id) \
80 if \
81 :: IS_CACHE_DIRTY(x, id) -> \
82 mem_##x = cached_##x.val[id]; \
83 cache_dirty_##x.bitfield = \
84 cache_dirty_##x.bitfield & (~(1 << id)); \
85 :: else -> \
86 skip \
87 fi;
88
89 #define CACHE_READ_FROM_MEM(x, id) \
90 if \
91 :: !IS_CACHE_DIRTY(x, id) -> \
92 cached_##x.val[id] = mem_##x;\
93 :: else -> \
94 skip \
95 fi;
96
97 /*
98 * May update other caches if cache is dirty, or not.
99 */
100 #define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
101 if \
102 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
103 :: 1 -> skip \
104 fi;
105
106 #define RANDOM_CACHE_READ_FROM_MEM(x, id)\
107 if \
108 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
109 :: 1 -> skip \
110 fi;
111
112 /*
113 * Remote barriers tests the scheme where a signal (or IPI) is sent to all
114 * reader threads to promote their compiler barrier to a smp_mb().
115 */
116 #ifdef REMOTE_BARRIERS
117
118 inline smp_rmb_pid(i, j)
119 {
120 atomic {
121 CACHE_READ_FROM_MEM(urcu_gp_ctr, i);
122 j = 0;
123 do
124 :: j < NR_READERS ->
125 CACHE_READ_FROM_MEM(urcu_active_readers[j], i);
126 j++
127 :: j >= NR_READERS -> break
128 od;
129 CACHE_READ_FROM_MEM(generation_ptr, i);
130 }
131 }
132
133 inline smp_wmb_pid(i, j)
134 {
135 atomic {
136 CACHE_WRITE_TO_MEM(urcu_gp_ctr, i);
137 j = 0;
138 do
139 :: j < NR_READERS ->
140 CACHE_WRITE_TO_MEM(urcu_active_readers[j], i);
141 j++
142 :: j >= NR_READERS -> break
143 od;
144 CACHE_WRITE_TO_MEM(generation_ptr, i);
145 }
146 }
147
148 inline smp_mb_pid(i, j)
149 {
150 atomic {
151 #ifndef NO_WMB
152 smp_wmb_pid(i, j);
153 #endif
154 #ifndef NO_RMB
155 smp_rmb_pid(i, j);
156 #endif
157 #ifdef NO_WMB
158 #ifdef NO_RMB
159 ooo_mem(j);
160 #endif
161 #endif
162 }
163 }
164
165 /*
166 * Readers do a simple barrier(), writers are doing a smp_mb() _and_ sending a
167 * signal or IPI to have all readers execute a smp_mb.
168 * We are not modeling the whole rendez-vous between readers and writers here,
169 * we just let the writer update each reader's caches remotely.
170 */
171 inline smp_mb(i, j)
172 {
173 if
174 :: get_pid() >= NR_READERS ->
175 smp_mb_pid(get_pid(), j);
176 i = 0;
177 do
178 :: i < NR_READERS ->
179 smp_mb_pid(i, j);
180 i++;
181 :: i >= NR_READERS -> break
182 od;
183 smp_mb_pid(get_pid(), j);
184 :: else -> skip;
185 fi;
186 }
187
188 #else
189
190 inline smp_rmb(i, j)
191 {
192 atomic {
193 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
194 i = 0;
195 do
196 :: i < NR_READERS ->
197 CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
198 i++
199 :: i >= NR_READERS -> break
200 od;
201 CACHE_READ_FROM_MEM(generation_ptr, get_pid());
202 }
203 }
204
205 inline smp_wmb(i, j)
206 {
207 atomic {
208 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
209 i = 0;
210 do
211 :: i < NR_READERS ->
212 CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
213 i++
214 :: i >= NR_READERS -> break
215 od;
216 CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
217 }
218 }
219
220 inline smp_mb(i, j)
221 {
222 atomic {
223 #ifndef NO_WMB
224 smp_wmb(i, j);
225 #endif
226 #ifndef NO_RMB
227 smp_rmb(i, j);
228 #endif
229 #ifdef NO_WMB
230 #ifdef NO_RMB
231 ooo_mem(i);
232 #endif
233 #endif
234 }
235 }
236
237 #endif
238
239 /* Keep in sync manually with smp_rmb, wmp_wmb, ooo_mem and init() */
240 DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
241 /* Note ! currently only two readers */
242 DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
243 /* pointer generation */
244 DECLARE_CACHED_VAR(byte, generation_ptr);
245
246 byte last_free_gen = 0;
247 bit free_done = 0;
248 byte read_generation[NR_READERS];
249 bit data_access[NR_READERS];
250
251 bit write_lock = 0;
252
253 bit init_done = 0;
254
255 bit sighand_exec = 0;
256
257 inline wait_init_done()
258 {
259 do
260 :: init_done == 0 -> skip;
261 :: else -> break;
262 od;
263 }
264
265 #ifdef TEST_SIGNAL
266
267 inline wait_for_sighand_exec()
268 {
269 sighand_exec = 0;
270 do
271 :: sighand_exec == 0 -> skip;
272 :: else ->
273 if
274 :: 1 -> break;
275 :: 1 -> sighand_exec = 0;
276 skip;
277 fi;
278 od;
279 }
280
281 #else
282
283 inline wait_for_sighand_exec()
284 {
285 skip;
286 }
287
288 #endif
289
290 #ifdef TEST_SIGNAL_ON_WRITE
291 /* Block on signal handler execution */
292 inline dispatch_sighand_write_exec()
293 {
294 sighand_exec = 1;
295 do
296 :: sighand_exec == 1 ->
297 skip;
298 :: else ->
299 break;
300 od;
301 }
302
303 #else
304
305 inline dispatch_sighand_write_exec()
306 {
307 skip;
308 }
309
310 #endif
311
312 #ifdef TEST_SIGNAL_ON_READ
313 /* Block on signal handler execution */
314 inline dispatch_sighand_read_exec()
315 {
316 sighand_exec = 1;
317 do
318 :: sighand_exec == 1 ->
319 skip;
320 :: else ->
321 break;
322 od;
323 }
324
325 #else
326
327 inline dispatch_sighand_read_exec()
328 {
329 skip;
330 }
331
332 #endif
333
334
335 inline ooo_mem(i)
336 {
337 atomic {
338 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
339 i = 0;
340 do
341 :: i < NR_READERS ->
342 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
343 get_pid());
344 i++
345 :: i >= NR_READERS -> break
346 od;
347 RANDOM_CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
348 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
349 i = 0;
350 do
351 :: i < NR_READERS ->
352 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
353 get_pid());
354 i++
355 :: i >= NR_READERS -> break
356 od;
357 RANDOM_CACHE_READ_FROM_MEM(generation_ptr, get_pid());
358 }
359 }
360
361 inline wait_for_reader(tmp, tmp2, i, j)
362 {
363 do
364 :: 1 ->
365 tmp2 = READ_CACHED_VAR(urcu_active_readers[tmp]);
366 ooo_mem(i);
367 dispatch_sighand_write_exec();
368 if
369 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
370 && ((tmp2 ^ READ_CACHED_VAR(urcu_gp_ctr))
371 & RCU_GP_CTR_BIT) ->
372 #ifndef GEN_ERROR_WRITER_PROGRESS
373 smp_mb(i, j);
374 #else
375 ooo_mem(i);
376 #endif
377 dispatch_sighand_write_exec();
378 :: else ->
379 break;
380 fi;
381 od;
382 }
383
384 inline wait_for_quiescent_state(tmp, tmp2, i, j)
385 {
386 tmp = 0;
387 do
388 :: tmp < NR_READERS ->
389 wait_for_reader(tmp, tmp2, i, j);
390 if
391 :: (NR_READERS > 1) && (tmp < NR_READERS - 1)
392 -> ooo_mem(i);
393 dispatch_sighand_write_exec();
394 :: else
395 -> skip;
396 fi;
397 tmp++
398 :: tmp >= NR_READERS -> break
399 od;
400 }
401
402 /* Model the RCU read-side critical section. */
403
404 inline urcu_one_read(i, j, nest_i, tmp, tmp2)
405 {
406 nest_i = 0;
407 do
408 :: nest_i < READER_NEST_LEVEL ->
409 ooo_mem(i);
410 dispatch_sighand_read_exec();
411 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
412 ooo_mem(i);
413 dispatch_sighand_read_exec();
414 if
415 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
416 ->
417 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
418 ooo_mem(i);
419 dispatch_sighand_read_exec();
420 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
421 tmp2);
422 :: else ->
423 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
424 tmp + 1);
425 fi;
426 smp_mb(i, j);
427 dispatch_sighand_read_exec();
428 nest_i++;
429 :: nest_i >= READER_NEST_LEVEL -> break;
430 od;
431
432 read_generation[get_readerid()] = READ_CACHED_VAR(generation_ptr);
433 data_access[get_readerid()] = 1;
434 data_access[get_readerid()] = 0;
435
436 nest_i = 0;
437 do
438 :: nest_i < READER_NEST_LEVEL ->
439 smp_mb(i, j);
440 dispatch_sighand_read_exec();
441 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
442 ooo_mem(i);
443 dispatch_sighand_read_exec();
444 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);
445 nest_i++;
446 :: nest_i >= READER_NEST_LEVEL -> break;
447 od;
448 //ooo_mem(i);
449 //dispatch_sighand_read_exec();
450 //smp_mc(i); /* added */
451 }
452
453 active proctype urcu_reader()
454 {
455 byte i, j, nest_i;
456 byte tmp, tmp2;
457
458 wait_init_done();
459
460 assert(get_pid() < NR_PROCS);
461
462 end_reader:
463 do
464 :: 1 ->
465 /*
466 * We do not test reader's progress here, because we are mainly
467 * interested in writer's progress. The reader never blocks
468 * anyway. We have to test for reader/writer's progress
469 * separately, otherwise we could think the writer is doing
470 * progress when it's blocked by an always progressing reader.
471 */
472 #ifdef READER_PROGRESS
473 /* Only test progress of one random reader. They are all the
474 * same. */
475 if
476 :: get_readerid() == 0 ->
477 progress_reader:
478 skip;
479 fi;
480 #endif
481 urcu_one_read(i, j, nest_i, tmp, tmp2);
482 od;
483 }
484
485 #ifdef TEST_SIGNAL
486 /* signal handler reader */
487
488 inline urcu_one_read_sig(i, j, nest_i, tmp, tmp2)
489 {
490 nest_i = 0;
491 do
492 :: nest_i < READER_NEST_LEVEL ->
493 ooo_mem(i);
494 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
495 ooo_mem(i);
496 if
497 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
498 ->
499 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
500 ooo_mem(i);
501 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
502 tmp2);
503 :: else ->
504 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
505 tmp + 1);
506 fi;
507 smp_mb(i, j);
508 nest_i++;
509 :: nest_i >= READER_NEST_LEVEL -> break;
510 od;
511
512 read_generation[get_readerid()] = READ_CACHED_VAR(generation_ptr);
513 data_access[get_readerid()] = 1;
514 data_access[get_readerid()] = 0;
515
516 nest_i = 0;
517 do
518 :: nest_i < READER_NEST_LEVEL ->
519 smp_mb(i, j);
520 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
521 ooo_mem(i);
522 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);
523 nest_i++;
524 :: nest_i >= READER_NEST_LEVEL -> break;
525 od;
526 //ooo_mem(i);
527 //smp_mc(i); /* added */
528 }
529
530 active proctype urcu_reader_sig()
531 {
532 byte i, j, nest_i;
533 byte tmp, tmp2;
534
535 wait_init_done();
536
537 assert(get_pid() < NR_PROCS);
538
539 end_reader:
540 do
541 :: 1 ->
542 wait_for_sighand_exec();
543 /*
544 * We do not test reader's progress here, because we are mainly
545 * interested in writer's progress. The reader never blocks
546 * anyway. We have to test for reader/writer's progress
547 * separately, otherwise we could think the writer is doing
548 * progress when it's blocked by an always progressing reader.
549 */
550 #ifdef READER_PROGRESS
551 /* Only test progress of one random reader. They are all the
552 * same. */
553 if
554 :: get_readerid() == 0 ->
555 progress_reader:
556 skip;
557 fi;
558 #endif
559 urcu_one_read_sig(i, j, nest_i, tmp, tmp2);
560 od;
561 }
562
563 #endif
564
565 /* Model the RCU update process. */
566
567 active proctype urcu_writer()
568 {
569 byte i, j;
570 byte tmp, tmp2;
571 byte old_gen;
572
573 wait_init_done();
574
575 assert(get_pid() < NR_PROCS);
576
577 do
578 :: (READ_CACHED_VAR(generation_ptr) < 5) ->
579 #ifdef WRITER_PROGRESS
580 progress_writer1:
581 #endif
582 ooo_mem(i);
583 dispatch_sighand_write_exec();
584 atomic {
585 old_gen = READ_CACHED_VAR(generation_ptr);
586 WRITE_CACHED_VAR(generation_ptr, old_gen + 1);
587 }
588 ooo_mem(i);
589 dispatch_sighand_write_exec();
590
591 do
592 :: 1 ->
593 atomic {
594 if
595 :: write_lock == 0 ->
596 write_lock = 1;
597 break;
598 :: else ->
599 skip;
600 fi;
601 }
602 od;
603 smp_mb(i, j);
604 dispatch_sighand_write_exec();
605 tmp = READ_CACHED_VAR(urcu_gp_ctr);
606 ooo_mem(i);
607 dispatch_sighand_write_exec();
608 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
609 ooo_mem(i);
610 dispatch_sighand_write_exec();
611 //smp_mc(i);
612 wait_for_quiescent_state(tmp, tmp2, i, j);
613 //smp_mc(i);
614 #ifndef SINGLE_FLIP
615 ooo_mem(i);
616 dispatch_sighand_write_exec();
617 tmp = READ_CACHED_VAR(urcu_gp_ctr);
618 ooo_mem(i);
619 dispatch_sighand_write_exec();
620 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
621 //smp_mc(i);
622 ooo_mem(i);
623 dispatch_sighand_write_exec();
624 wait_for_quiescent_state(tmp, tmp2, i, j);
625 #endif
626 smp_mb(i, j);
627 dispatch_sighand_write_exec();
628 write_lock = 0;
629 /* free-up step, e.g., kfree(). */
630 atomic {
631 last_free_gen = old_gen;
632 free_done = 1;
633 }
634 :: else -> break;
635 od;
636 /*
637 * Given the reader loops infinitely, let the writer also busy-loop
638 * with progress here so, with weak fairness, we can test the
639 * writer's progress.
640 */
641 end_writer:
642 do
643 :: 1 ->
644 #ifdef WRITER_PROGRESS
645 progress_writer2:
646 #endif
647 dispatch_sighand_write_exec();
648 od;
649 }
650
651 /* Leave after the readers and writers so the pid count is ok. */
652 init {
653 byte i, j;
654
655 atomic {
656 INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
657 INIT_CACHED_VAR(generation_ptr, 0, j);
658
659 i = 0;
660 do
661 :: i < NR_READERS ->
662 INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
663 read_generation[i] = 1;
664 data_access[i] = 0;
665 i++;
666 :: i >= NR_READERS -> break
667 od;
668 init_done = 1;
669 }
670 }
This page took 0.042161 seconds and 4 git commands to generate.