Move formal-model to root
[urcu.git] / urcu / result-signal-over-reader / urcu_progress_reader.spin.input
1 #define READER_PROGRESS
2
3 #define read_free_race (read_generation[0] == last_free_gen)
4 #define read_free (free_done && data_access[0])
5
6 #define TEST_SIGNAL
7 #define TEST_SIGNAL_ON_READ
8 //#define TEST_SIGNAL_ON_WRITE
9
10 #define RCU_GP_CTR_BIT (1 << 7)
11 #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
12
13 #ifndef READER_NEST_LEVEL
14 #define READER_NEST_LEVEL 1
15 //#define READER_NEST_LEVEL 2
16 #endif
17
18 #define REMOTE_BARRIERS
19 /*
20 * mem.spin: Promela code to validate memory barriers with OOO memory.
21 *
22 * This program is free software; you can redistribute it and/or modify
23 * it under the terms of the GNU General Public License as published by
24 * the Free Software Foundation; either version 2 of the License, or
25 * (at your option) any later version.
26 *
27 * This program is distributed in the hope that it will be useful,
28 * but WITHOUT ANY WARRANTY; without even the implied warranty of
29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30 * GNU General Public License for more details.
31 *
32 * You should have received a copy of the GNU General Public License
33 * along with this program; if not, write to the Free Software
34 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
35 *
36 * Copyright (c) 2009 Mathieu Desnoyers
37 */
38
39 /* Promela validation variables. */
40
41 /* specific defines "included" here */
42 /* DEFINES file "included" here */
43
44 /* All signal readers have same PID and uses same reader variable */
45 #ifdef TEST_SIGNAL_ON_WRITE
46
47 #define NR_READERS 1 /* the writer is also a signal reader */
48 #define NR_WRITERS 1
49
50 #define NR_PROCS 1
51
52 #define get_pid() (0)
53
54 #elif defined(TEST_SIGNAL_ON_READ)
55
56 #define get_pid() ((_pid < 2) -> 0 : 1)
57
58 #define NR_READERS 1
59 #define NR_WRITERS 1
60
61 #define NR_PROCS 2
62
63 #else
64
65 #define get_pid() (_pid)
66
67 #define NR_READERS 1
68 #define NR_WRITERS 1
69
70 #define NR_PROCS 2
71
72 #endif
73
74 #define get_readerid() (get_pid())
75
76 /*
77 * Each process have its own data in cache. Caches are randomly updated.
78 * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
79 * both.
80 */
81
82 typedef per_proc_byte {
83 byte val[NR_PROCS];
84 };
85
86 /* Bitfield has a maximum of 8 procs */
87 typedef per_proc_bit {
88 byte bitfield;
89 };
90
91 #define DECLARE_CACHED_VAR(type, x) \
92 type mem_##x; \
93 per_proc_##type cached_##x; \
94 per_proc_bit cache_dirty_##x;
95
96 #define INIT_CACHED_VAR(x, v, j) \
97 mem_##x = v; \
98 cache_dirty_##x.bitfield = 0; \
99 j = 0; \
100 do \
101 :: j < NR_PROCS -> \
102 cached_##x.val[j] = v; \
103 j++ \
104 :: j >= NR_PROCS -> break \
105 od;
106
107 #define IS_CACHE_DIRTY(x, id) (cache_dirty_##x.bitfield & (1 << id))
108
109 #define READ_CACHED_VAR(x) (cached_##x.val[get_pid()])
110
111 #define WRITE_CACHED_VAR(x, v) \
112 atomic { \
113 cached_##x.val[get_pid()] = v; \
114 cache_dirty_##x.bitfield = \
115 cache_dirty_##x.bitfield | (1 << get_pid()); \
116 }
117
118 #define CACHE_WRITE_TO_MEM(x, id) \
119 if \
120 :: IS_CACHE_DIRTY(x, id) -> \
121 mem_##x = cached_##x.val[id]; \
122 cache_dirty_##x.bitfield = \
123 cache_dirty_##x.bitfield & (~(1 << id)); \
124 :: else -> \
125 skip \
126 fi;
127
128 #define CACHE_READ_FROM_MEM(x, id) \
129 if \
130 :: !IS_CACHE_DIRTY(x, id) -> \
131 cached_##x.val[id] = mem_##x;\
132 :: else -> \
133 skip \
134 fi;
135
136 /*
137 * May update other caches if cache is dirty, or not.
138 */
139 #define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
140 if \
141 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
142 :: 1 -> skip \
143 fi;
144
145 #define RANDOM_CACHE_READ_FROM_MEM(x, id)\
146 if \
147 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
148 :: 1 -> skip \
149 fi;
150
151 /*
152 * Remote barriers tests the scheme where a signal (or IPI) is sent to all
153 * reader threads to promote their compiler barrier to a smp_mb().
154 */
155 #ifdef REMOTE_BARRIERS
156
157 inline smp_rmb_pid(i, j)
158 {
159 atomic {
160 CACHE_READ_FROM_MEM(urcu_gp_ctr, i);
161 j = 0;
162 do
163 :: j < NR_READERS ->
164 CACHE_READ_FROM_MEM(urcu_active_readers[j], i);
165 j++
166 :: j >= NR_READERS -> break
167 od;
168 CACHE_READ_FROM_MEM(generation_ptr, i);
169 }
170 }
171
172 inline smp_wmb_pid(i, j)
173 {
174 atomic {
175 CACHE_WRITE_TO_MEM(urcu_gp_ctr, i);
176 j = 0;
177 do
178 :: j < NR_READERS ->
179 CACHE_WRITE_TO_MEM(urcu_active_readers[j], i);
180 j++
181 :: j >= NR_READERS -> break
182 od;
183 CACHE_WRITE_TO_MEM(generation_ptr, i);
184 }
185 }
186
187 inline smp_mb_pid(i, j)
188 {
189 atomic {
190 #ifndef NO_WMB
191 smp_wmb_pid(i, j);
192 #endif
193 #ifndef NO_RMB
194 smp_rmb_pid(i, j);
195 #endif
196 #ifdef NO_WMB
197 #ifdef NO_RMB
198 ooo_mem(j);
199 #endif
200 #endif
201 }
202 }
203
204 /*
205 * Readers do a simple barrier(), writers are doing a smp_mb() _and_ sending a
206 * signal or IPI to have all readers execute a smp_mb.
207 * We are not modeling the whole rendez-vous between readers and writers here,
208 * we just let the writer update each reader's caches remotely.
209 */
210 inline smp_mb_writer(i, j)
211 {
212 smp_mb_pid(get_pid(), j);
213 i = 0;
214 do
215 :: i < NR_READERS ->
216 smp_mb_pid(i, j);
217 i++;
218 :: i >= NR_READERS -> break
219 od;
220 smp_mb_pid(get_pid(), j);
221 }
222
223 inline smp_mb_reader(i, j)
224 {
225 skip;
226 }
227
228 #else
229
230 inline smp_rmb(i, j)
231 {
232 atomic {
233 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
234 i = 0;
235 do
236 :: i < NR_READERS ->
237 CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
238 i++
239 :: i >= NR_READERS -> break
240 od;
241 CACHE_READ_FROM_MEM(generation_ptr, get_pid());
242 }
243 }
244
245 inline smp_wmb(i, j)
246 {
247 atomic {
248 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
249 i = 0;
250 do
251 :: i < NR_READERS ->
252 CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
253 i++
254 :: i >= NR_READERS -> break
255 od;
256 CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
257 }
258 }
259
260 inline smp_mb(i, j)
261 {
262 atomic {
263 #ifndef NO_WMB
264 smp_wmb(i, j);
265 #endif
266 #ifndef NO_RMB
267 smp_rmb(i, j);
268 #endif
269 #ifdef NO_WMB
270 #ifdef NO_RMB
271 ooo_mem(i);
272 #endif
273 #endif
274 }
275 }
276
277 inline smp_mb_writer(i, j)
278 {
279 smp_mb(i, j);
280 }
281
282 inline smp_mb_reader(i, j)
283 {
284 smp_mb(i, j);
285 }
286
287 #endif
288
289 /* Keep in sync manually with smp_rmb, wmp_wmb, ooo_mem and init() */
290 DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
291 /* Note ! currently only two readers */
292 DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
293 /* pointer generation */
294 DECLARE_CACHED_VAR(byte, generation_ptr);
295
296 byte last_free_gen = 0;
297 bit free_done = 0;
298 byte read_generation[NR_READERS];
299 bit data_access[NR_READERS];
300
301 bit write_lock = 0;
302
303 bit init_done = 0;
304
305 bit sighand_exec = 0;
306
307 inline wait_init_done()
308 {
309 do
310 :: init_done == 0 -> skip;
311 :: else -> break;
312 od;
313 }
314
315 #ifdef TEST_SIGNAL
316
317 inline wait_for_sighand_exec()
318 {
319 sighand_exec = 0;
320 do
321 :: sighand_exec == 0 -> skip;
322 :: else -> break;
323 od;
324 }
325
326 #ifdef TOO_BIG_STATE_SPACE
327 inline wait_for_sighand_exec()
328 {
329 sighand_exec = 0;
330 do
331 :: sighand_exec == 0 -> skip;
332 :: else ->
333 if
334 :: 1 -> break;
335 :: 1 -> sighand_exec = 0;
336 skip;
337 fi;
338 od;
339 }
340 #endif
341
342 #else
343
344 inline wait_for_sighand_exec()
345 {
346 skip;
347 }
348
349 #endif
350
351 #ifdef TEST_SIGNAL_ON_WRITE
352 /* Block on signal handler execution */
353 inline dispatch_sighand_write_exec()
354 {
355 sighand_exec = 1;
356 do
357 :: sighand_exec == 1 ->
358 skip;
359 :: else ->
360 break;
361 od;
362 }
363
364 #else
365
366 inline dispatch_sighand_write_exec()
367 {
368 skip;
369 }
370
371 #endif
372
373 #ifdef TEST_SIGNAL_ON_READ
374 /* Block on signal handler execution */
375 inline dispatch_sighand_read_exec()
376 {
377 sighand_exec = 1;
378 do
379 :: sighand_exec == 1 ->
380 skip;
381 :: else ->
382 break;
383 od;
384 }
385
386 #else
387
388 inline dispatch_sighand_read_exec()
389 {
390 skip;
391 }
392
393 #endif
394
395
396 inline ooo_mem(i)
397 {
398 atomic {
399 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
400 i = 0;
401 do
402 :: i < NR_READERS ->
403 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
404 get_pid());
405 i++
406 :: i >= NR_READERS -> break
407 od;
408 RANDOM_CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
409 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
410 i = 0;
411 do
412 :: i < NR_READERS ->
413 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
414 get_pid());
415 i++
416 :: i >= NR_READERS -> break
417 od;
418 RANDOM_CACHE_READ_FROM_MEM(generation_ptr, get_pid());
419 }
420 }
421
422 inline wait_for_reader(tmp, tmp2, i, j)
423 {
424 do
425 :: 1 ->
426 tmp2 = READ_CACHED_VAR(urcu_active_readers[tmp]);
427 ooo_mem(i);
428 dispatch_sighand_write_exec();
429 if
430 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
431 && ((tmp2 ^ READ_CACHED_VAR(urcu_gp_ctr))
432 & RCU_GP_CTR_BIT) ->
433 #ifndef GEN_ERROR_WRITER_PROGRESS
434 smp_mb_writer(i, j);
435 #else
436 ooo_mem(i);
437 #endif
438 dispatch_sighand_write_exec();
439 :: else ->
440 break;
441 fi;
442 od;
443 }
444
445 inline wait_for_quiescent_state(tmp, tmp2, i, j)
446 {
447 tmp = 0;
448 do
449 :: tmp < NR_READERS ->
450 wait_for_reader(tmp, tmp2, i, j);
451 if
452 :: (NR_READERS > 1) && (tmp < NR_READERS - 1)
453 -> ooo_mem(i);
454 dispatch_sighand_write_exec();
455 :: else
456 -> skip;
457 fi;
458 tmp++
459 :: tmp >= NR_READERS -> break
460 od;
461 }
462
463 /* Model the RCU read-side critical section. */
464
465 #ifndef TEST_SIGNAL_ON_WRITE
466
467 inline urcu_one_read(i, j, nest_i, tmp, tmp2)
468 {
469 nest_i = 0;
470 do
471 :: nest_i < READER_NEST_LEVEL ->
472 ooo_mem(i);
473 dispatch_sighand_read_exec();
474 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
475 ooo_mem(i);
476 dispatch_sighand_read_exec();
477 if
478 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
479 ->
480 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
481 ooo_mem(i);
482 dispatch_sighand_read_exec();
483 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
484 tmp2);
485 :: else ->
486 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
487 tmp + 1);
488 fi;
489 smp_mb_reader(i, j);
490 dispatch_sighand_read_exec();
491 nest_i++;
492 :: nest_i >= READER_NEST_LEVEL -> break;
493 od;
494
495 read_generation[get_readerid()] = READ_CACHED_VAR(generation_ptr);
496 data_access[get_readerid()] = 1;
497 data_access[get_readerid()] = 0;
498
499 nest_i = 0;
500 do
501 :: nest_i < READER_NEST_LEVEL ->
502 smp_mb_reader(i, j);
503 dispatch_sighand_read_exec();
504 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
505 ooo_mem(i);
506 dispatch_sighand_read_exec();
507 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);
508 nest_i++;
509 :: nest_i >= READER_NEST_LEVEL -> break;
510 od;
511 //ooo_mem(i);
512 //dispatch_sighand_read_exec();
513 //smp_mc(i); /* added */
514 }
515
516 active proctype urcu_reader()
517 {
518 byte i, j, nest_i;
519 byte tmp, tmp2;
520
521 wait_init_done();
522
523 assert(get_pid() < NR_PROCS);
524
525 end_reader:
526 do
527 :: 1 ->
528 /*
529 * We do not test reader's progress here, because we are mainly
530 * interested in writer's progress. The reader never blocks
531 * anyway. We have to test for reader/writer's progress
532 * separately, otherwise we could think the writer is doing
533 * progress when it's blocked by an always progressing reader.
534 */
535 #ifdef READER_PROGRESS
536 progress_reader:
537 #endif
538 urcu_one_read(i, j, nest_i, tmp, tmp2);
539 od;
540 }
541
542 #endif //!TEST_SIGNAL_ON_WRITE
543
544 #ifdef TEST_SIGNAL
545 /* signal handler reader */
546
547 inline urcu_one_read_sig(i, j, nest_i, tmp, tmp2)
548 {
549 nest_i = 0;
550 do
551 :: nest_i < READER_NEST_LEVEL ->
552 ooo_mem(i);
553 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
554 ooo_mem(i);
555 if
556 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
557 ->
558 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
559 ooo_mem(i);
560 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
561 tmp2);
562 :: else ->
563 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
564 tmp + 1);
565 fi;
566 smp_mb_reader(i, j);
567 nest_i++;
568 :: nest_i >= READER_NEST_LEVEL -> break;
569 od;
570
571 read_generation[get_readerid()] = READ_CACHED_VAR(generation_ptr);
572 data_access[get_readerid()] = 1;
573 data_access[get_readerid()] = 0;
574
575 nest_i = 0;
576 do
577 :: nest_i < READER_NEST_LEVEL ->
578 smp_mb_reader(i, j);
579 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
580 ooo_mem(i);
581 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);
582 nest_i++;
583 :: nest_i >= READER_NEST_LEVEL -> break;
584 od;
585 //ooo_mem(i);
586 //smp_mc(i); /* added */
587 }
588
589 active proctype urcu_reader_sig()
590 {
591 byte i, j, nest_i;
592 byte tmp, tmp2;
593
594 wait_init_done();
595
596 assert(get_pid() < NR_PROCS);
597
598 end_reader:
599 do
600 :: 1 ->
601 wait_for_sighand_exec();
602 /*
603 * We do not test reader's progress here, because we are mainly
604 * interested in writer's progress. The reader never blocks
605 * anyway. We have to test for reader/writer's progress
606 * separately, otherwise we could think the writer is doing
607 * progress when it's blocked by an always progressing reader.
608 */
609 #ifdef READER_PROGRESS
610 progress_reader:
611 #endif
612 urcu_one_read_sig(i, j, nest_i, tmp, tmp2);
613 od;
614 }
615
616 #endif
617
618 /* Model the RCU update process. */
619
620 active proctype urcu_writer()
621 {
622 byte i, j;
623 byte tmp, tmp2;
624 byte old_gen;
625
626 wait_init_done();
627
628 assert(get_pid() < NR_PROCS);
629
630 do
631 :: (READ_CACHED_VAR(generation_ptr) < 5) ->
632 #ifdef WRITER_PROGRESS
633 progress_writer1:
634 #endif
635 ooo_mem(i);
636 dispatch_sighand_write_exec();
637 atomic {
638 old_gen = READ_CACHED_VAR(generation_ptr);
639 WRITE_CACHED_VAR(generation_ptr, old_gen + 1);
640 }
641 ooo_mem(i);
642 dispatch_sighand_write_exec();
643
644 do
645 :: 1 ->
646 atomic {
647 if
648 :: write_lock == 0 ->
649 write_lock = 1;
650 break;
651 :: else ->
652 skip;
653 fi;
654 }
655 od;
656 smp_mb_writer(i, j);
657 dispatch_sighand_write_exec();
658 tmp = READ_CACHED_VAR(urcu_gp_ctr);
659 ooo_mem(i);
660 dispatch_sighand_write_exec();
661 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
662 ooo_mem(i);
663 dispatch_sighand_write_exec();
664 //smp_mc(i);
665 wait_for_quiescent_state(tmp, tmp2, i, j);
666 //smp_mc(i);
667 #ifndef SINGLE_FLIP
668 ooo_mem(i);
669 dispatch_sighand_write_exec();
670 tmp = READ_CACHED_VAR(urcu_gp_ctr);
671 ooo_mem(i);
672 dispatch_sighand_write_exec();
673 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
674 //smp_mc(i);
675 ooo_mem(i);
676 dispatch_sighand_write_exec();
677 wait_for_quiescent_state(tmp, tmp2, i, j);
678 #endif
679 smp_mb_writer(i, j);
680 dispatch_sighand_write_exec();
681 write_lock = 0;
682 /* free-up step, e.g., kfree(). */
683 atomic {
684 last_free_gen = old_gen;
685 free_done = 1;
686 }
687 :: else -> break;
688 od;
689 /*
690 * Given the reader loops infinitely, let the writer also busy-loop
691 * with progress here so, with weak fairness, we can test the
692 * writer's progress.
693 */
694 end_writer:
695 do
696 :: 1 ->
697 #ifdef WRITER_PROGRESS
698 progress_writer2:
699 #endif
700 dispatch_sighand_write_exec();
701 od;
702 }
703
704 /* Leave after the readers and writers so the pid count is ok. */
705 init {
706 byte i, j;
707
708 atomic {
709 INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
710 INIT_CACHED_VAR(generation_ptr, 0, j);
711
712 i = 0;
713 do
714 :: i < NR_READERS ->
715 INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
716 read_generation[i] = 1;
717 data_access[i] = 0;
718 i++;
719 :: i >= NR_READERS -> break
720 od;
721 init_done = 1;
722 }
723 }
This page took 0.052758 seconds and 5 git commands to generate.