Update formal model from local copy
[urcu.git] / formal-model / urcu-nosched-model / result-signal-over-writer / urcu_progress_reader.spin.input
1 #define READER_PROGRESS
2
3 #define read_free_race (read_generation[0] == last_free_gen)
4 #define read_free (free_done && data_access[0])
5
6 #define TEST_SIGNAL
7 //#define TEST_SIGNAL_ON_READ
8 #define TEST_SIGNAL_ON_WRITE
9
10 #define RCU_GP_CTR_BIT (1 << 7)
11 #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
12
13 #ifndef READER_NEST_LEVEL
14 #define READER_NEST_LEVEL 1
15 #endif
16
17 #define REMOTE_BARRIERS
18 /*
19 * mem.spin: Promela code to validate memory barriers with OOO memory.
20 *
21 * This program is free software; you can redistribute it and/or modify
22 * it under the terms of the GNU General Public License as published by
23 * the Free Software Foundation; either version 2 of the License, or
24 * (at your option) any later version.
25 *
26 * This program is distributed in the hope that it will be useful,
27 * but WITHOUT ANY WARRANTY; without even the implied warranty of
28 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
29 * GNU General Public License for more details.
30 *
31 * You should have received a copy of the GNU General Public License
32 * along with this program; if not, write to the Free Software
33 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
34 *
35 * Copyright (c) 2009 Mathieu Desnoyers
36 */
37
38 /* Promela validation variables. */
39
40 /* specific defines "included" here */
41 /* DEFINES file "included" here */
42
43 /* All signal readers have same PID and uses same reader variable */
44 #ifdef TEST_SIGNAL_ON_WRITE
45
46 #define NR_READERS 1 /* the writer is also a signal reader */
47 #define NR_WRITERS 1
48
49 #define NR_PROCS 1
50
51 #define get_pid() (0)
52
53 #elif defined(TEST_SIGNAL_ON_READ)
54
55 #define get_pid() ((_pid < 2) -> 0 : 1)
56
57 #define NR_READERS 1
58 #define NR_WRITERS 1
59
60 #define NR_PROCS 2
61
62 #else
63
64 #define get_pid() (_pid)
65
66 #define NR_READERS 1
67 #define NR_WRITERS 1
68
69 #define NR_PROCS 2
70
71 #endif
72
73 #define get_readerid() (get_pid())
74
75 /*
76 * Each process have its own data in cache. Caches are randomly updated.
77 * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
78 * both.
79 */
80
81 typedef per_proc_byte {
82 byte val[NR_PROCS];
83 };
84
85 /* Bitfield has a maximum of 8 procs */
86 typedef per_proc_bit {
87 byte bitfield;
88 };
89
90 #define DECLARE_CACHED_VAR(type, x) \
91 type mem_##x; \
92 per_proc_##type cached_##x; \
93 per_proc_bit cache_dirty_##x;
94
95 #define INIT_CACHED_VAR(x, v, j) \
96 mem_##x = v; \
97 cache_dirty_##x.bitfield = 0; \
98 j = 0; \
99 do \
100 :: j < NR_PROCS -> \
101 cached_##x.val[j] = v; \
102 j++ \
103 :: j >= NR_PROCS -> break \
104 od;
105
106 #define IS_CACHE_DIRTY(x, id) (cache_dirty_##x.bitfield & (1 << id))
107
108 #define READ_CACHED_VAR(x) (cached_##x.val[get_pid()])
109
110 #define WRITE_CACHED_VAR(x, v) \
111 atomic { \
112 cached_##x.val[get_pid()] = v; \
113 cache_dirty_##x.bitfield = \
114 cache_dirty_##x.bitfield | (1 << get_pid()); \
115 }
116
117 #define CACHE_WRITE_TO_MEM(x, id) \
118 if \
119 :: IS_CACHE_DIRTY(x, id) -> \
120 mem_##x = cached_##x.val[id]; \
121 cache_dirty_##x.bitfield = \
122 cache_dirty_##x.bitfield & (~(1 << id)); \
123 :: else -> \
124 skip \
125 fi;
126
127 #define CACHE_READ_FROM_MEM(x, id) \
128 if \
129 :: !IS_CACHE_DIRTY(x, id) -> \
130 cached_##x.val[id] = mem_##x;\
131 :: else -> \
132 skip \
133 fi;
134
135 /*
136 * May update other caches if cache is dirty, or not.
137 */
138 #define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
139 if \
140 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
141 :: 1 -> skip \
142 fi;
143
144 #define RANDOM_CACHE_READ_FROM_MEM(x, id)\
145 if \
146 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
147 :: 1 -> skip \
148 fi;
149
150 /*
151 * Remote barriers tests the scheme where a signal (or IPI) is sent to all
152 * reader threads to promote their compiler barrier to a smp_mb().
153 */
154 #ifdef REMOTE_BARRIERS
155
156 inline smp_rmb_pid(i, j)
157 {
158 atomic {
159 CACHE_READ_FROM_MEM(urcu_gp_ctr, i);
160 j = 0;
161 do
162 :: j < NR_READERS ->
163 CACHE_READ_FROM_MEM(urcu_active_readers[j], i);
164 j++
165 :: j >= NR_READERS -> break
166 od;
167 CACHE_READ_FROM_MEM(generation_ptr, i);
168 }
169 }
170
171 inline smp_wmb_pid(i, j)
172 {
173 atomic {
174 CACHE_WRITE_TO_MEM(urcu_gp_ctr, i);
175 j = 0;
176 do
177 :: j < NR_READERS ->
178 CACHE_WRITE_TO_MEM(urcu_active_readers[j], i);
179 j++
180 :: j >= NR_READERS -> break
181 od;
182 CACHE_WRITE_TO_MEM(generation_ptr, i);
183 }
184 }
185
186 inline smp_mb_pid(i, j)
187 {
188 atomic {
189 #ifndef NO_WMB
190 smp_wmb_pid(i, j);
191 #endif
192 #ifndef NO_RMB
193 smp_rmb_pid(i, j);
194 #endif
195 #ifdef NO_WMB
196 #ifdef NO_RMB
197 ooo_mem(j);
198 #endif
199 #endif
200 }
201 }
202
203 /*
204 * Readers do a simple barrier(), writers are doing a smp_mb() _and_ sending a
205 * signal or IPI to have all readers execute a smp_mb.
206 * We are not modeling the whole rendez-vous between readers and writers here,
207 * we just let the writer update each reader's caches remotely.
208 */
209 inline smp_mb_writer(i, j)
210 {
211 smp_mb_pid(get_pid(), j);
212 i = 0;
213 do
214 :: i < NR_READERS ->
215 smp_mb_pid(i, j);
216 i++;
217 :: i >= NR_READERS -> break
218 od;
219 smp_mb_pid(get_pid(), j);
220 }
221
222 inline smp_mb_reader(i, j)
223 {
224 skip;
225 }
226
227 #else
228
229 inline smp_rmb(i, j)
230 {
231 atomic {
232 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
233 i = 0;
234 do
235 :: i < NR_READERS ->
236 CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
237 i++
238 :: i >= NR_READERS -> break
239 od;
240 CACHE_READ_FROM_MEM(generation_ptr, get_pid());
241 }
242 }
243
244 inline smp_wmb(i, j)
245 {
246 atomic {
247 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
248 i = 0;
249 do
250 :: i < NR_READERS ->
251 CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
252 i++
253 :: i >= NR_READERS -> break
254 od;
255 CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
256 }
257 }
258
259 inline smp_mb(i, j)
260 {
261 atomic {
262 #ifndef NO_WMB
263 smp_wmb(i, j);
264 #endif
265 #ifndef NO_RMB
266 smp_rmb(i, j);
267 #endif
268 #ifdef NO_WMB
269 #ifdef NO_RMB
270 ooo_mem(i);
271 #endif
272 #endif
273 }
274 }
275
276 inline smp_mb_writer(i, j)
277 {
278 smp_mb(i, j);
279 }
280
281 inline smp_mb_reader(i, j)
282 {
283 smp_mb(i, j);
284 }
285
286 #endif
287
288 /* Keep in sync manually with smp_rmb, wmp_wmb, ooo_mem and init() */
289 DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
290 /* Note ! currently only two readers */
291 DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
292 /* pointer generation */
293 DECLARE_CACHED_VAR(byte, generation_ptr);
294
295 byte last_free_gen = 0;
296 bit free_done = 0;
297 byte read_generation[NR_READERS];
298 bit data_access[NR_READERS];
299
300 bit write_lock = 0;
301
302 bit init_done = 0;
303
304 bit sighand_exec = 0;
305
306 inline wait_init_done()
307 {
308 do
309 :: init_done == 0 -> skip;
310 :: else -> break;
311 od;
312 }
313
314 #ifdef TEST_SIGNAL
315
316 inline wait_for_sighand_exec()
317 {
318 sighand_exec = 0;
319 do
320 :: sighand_exec == 0 -> skip;
321 :: else -> break;
322 od;
323 }
324
325 #ifdef TOO_BIG_STATE_SPACE
326 inline wait_for_sighand_exec()
327 {
328 sighand_exec = 0;
329 do
330 :: sighand_exec == 0 -> skip;
331 :: else ->
332 if
333 :: 1 -> break;
334 :: 1 -> sighand_exec = 0;
335 skip;
336 fi;
337 od;
338 }
339 #endif
340
341 #else
342
343 inline wait_for_sighand_exec()
344 {
345 skip;
346 }
347
348 #endif
349
350 #ifdef TEST_SIGNAL_ON_WRITE
351 /* Block on signal handler execution */
352 inline dispatch_sighand_write_exec()
353 {
354 sighand_exec = 1;
355 do
356 :: sighand_exec == 1 ->
357 skip;
358 :: else ->
359 break;
360 od;
361 }
362
363 #else
364
365 inline dispatch_sighand_write_exec()
366 {
367 skip;
368 }
369
370 #endif
371
372 #ifdef TEST_SIGNAL_ON_READ
373 /* Block on signal handler execution */
374 inline dispatch_sighand_read_exec()
375 {
376 sighand_exec = 1;
377 do
378 :: sighand_exec == 1 ->
379 skip;
380 :: else ->
381 break;
382 od;
383 }
384
385 #else
386
387 inline dispatch_sighand_read_exec()
388 {
389 skip;
390 }
391
392 #endif
393
394
395 inline ooo_mem(i)
396 {
397 atomic {
398 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
399 i = 0;
400 do
401 :: i < NR_READERS ->
402 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
403 get_pid());
404 i++
405 :: i >= NR_READERS -> break
406 od;
407 RANDOM_CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
408 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
409 i = 0;
410 do
411 :: i < NR_READERS ->
412 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
413 get_pid());
414 i++
415 :: i >= NR_READERS -> break
416 od;
417 RANDOM_CACHE_READ_FROM_MEM(generation_ptr, get_pid());
418 }
419 }
420
421 inline wait_for_reader(tmp, tmp2, i, j)
422 {
423 do
424 :: 1 ->
425 tmp2 = READ_CACHED_VAR(urcu_active_readers[tmp]);
426 ooo_mem(i);
427 dispatch_sighand_write_exec();
428 if
429 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
430 && ((tmp2 ^ READ_CACHED_VAR(urcu_gp_ctr))
431 & RCU_GP_CTR_BIT) ->
432 #ifndef GEN_ERROR_WRITER_PROGRESS
433 smp_mb_writer(i, j);
434 #else
435 ooo_mem(i);
436 #endif
437 dispatch_sighand_write_exec();
438 :: else ->
439 break;
440 fi;
441 od;
442 }
443
444 inline wait_for_quiescent_state(tmp, tmp2, i, j)
445 {
446 tmp = 0;
447 do
448 :: tmp < NR_READERS ->
449 wait_for_reader(tmp, tmp2, i, j);
450 if
451 :: (NR_READERS > 1) && (tmp < NR_READERS - 1)
452 -> ooo_mem(i);
453 dispatch_sighand_write_exec();
454 :: else
455 -> skip;
456 fi;
457 tmp++
458 :: tmp >= NR_READERS -> break
459 od;
460 }
461
462 /* Model the RCU read-side critical section. */
463
464 #ifndef TEST_SIGNAL_ON_WRITE
465
466 inline urcu_one_read(i, j, nest_i, tmp, tmp2)
467 {
468 nest_i = 0;
469 do
470 :: nest_i < READER_NEST_LEVEL ->
471 ooo_mem(i);
472 dispatch_sighand_read_exec();
473 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
474 ooo_mem(i);
475 dispatch_sighand_read_exec();
476 if
477 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
478 ->
479 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
480 ooo_mem(i);
481 dispatch_sighand_read_exec();
482 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
483 tmp2);
484 :: else ->
485 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
486 tmp + 1);
487 fi;
488 smp_mb_reader(i, j);
489 dispatch_sighand_read_exec();
490 nest_i++;
491 :: nest_i >= READER_NEST_LEVEL -> break;
492 od;
493
494 read_generation[get_readerid()] = READ_CACHED_VAR(generation_ptr);
495 data_access[get_readerid()] = 1;
496 data_access[get_readerid()] = 0;
497
498 nest_i = 0;
499 do
500 :: nest_i < READER_NEST_LEVEL ->
501 smp_mb_reader(i, j);
502 dispatch_sighand_read_exec();
503 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
504 ooo_mem(i);
505 dispatch_sighand_read_exec();
506 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);
507 nest_i++;
508 :: nest_i >= READER_NEST_LEVEL -> break;
509 od;
510 //ooo_mem(i);
511 //dispatch_sighand_read_exec();
512 //smp_mc(i); /* added */
513 }
514
515 active proctype urcu_reader()
516 {
517 byte i, j, nest_i;
518 byte tmp, tmp2;
519
520 wait_init_done();
521
522 assert(get_pid() < NR_PROCS);
523
524 end_reader:
525 do
526 :: 1 ->
527 /*
528 * We do not test reader's progress here, because we are mainly
529 * interested in writer's progress. The reader never blocks
530 * anyway. We have to test for reader/writer's progress
531 * separately, otherwise we could think the writer is doing
532 * progress when it's blocked by an always progressing reader.
533 */
534 #ifdef READER_PROGRESS
535 progress_reader:
536 #endif
537 urcu_one_read(i, j, nest_i, tmp, tmp2);
538 od;
539 }
540
541 #endif //!TEST_SIGNAL_ON_WRITE
542
543 #ifdef TEST_SIGNAL
544 /* signal handler reader */
545
546 inline urcu_one_read_sig(i, j, nest_i, tmp, tmp2)
547 {
548 nest_i = 0;
549 do
550 :: nest_i < READER_NEST_LEVEL ->
551 ooo_mem(i);
552 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
553 ooo_mem(i);
554 if
555 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
556 ->
557 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
558 ooo_mem(i);
559 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
560 tmp2);
561 :: else ->
562 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
563 tmp + 1);
564 fi;
565 smp_mb_reader(i, j);
566 nest_i++;
567 :: nest_i >= READER_NEST_LEVEL -> break;
568 od;
569
570 read_generation[get_readerid()] = READ_CACHED_VAR(generation_ptr);
571 data_access[get_readerid()] = 1;
572 data_access[get_readerid()] = 0;
573
574 nest_i = 0;
575 do
576 :: nest_i < READER_NEST_LEVEL ->
577 smp_mb_reader(i, j);
578 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
579 ooo_mem(i);
580 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);
581 nest_i++;
582 :: nest_i >= READER_NEST_LEVEL -> break;
583 od;
584 //ooo_mem(i);
585 //smp_mc(i); /* added */
586 }
587
588 active proctype urcu_reader_sig()
589 {
590 byte i, j, nest_i;
591 byte tmp, tmp2;
592
593 wait_init_done();
594
595 assert(get_pid() < NR_PROCS);
596
597 end_reader:
598 do
599 :: 1 ->
600 wait_for_sighand_exec();
601 /*
602 * We do not test reader's progress here, because we are mainly
603 * interested in writer's progress. The reader never blocks
604 * anyway. We have to test for reader/writer's progress
605 * separately, otherwise we could think the writer is doing
606 * progress when it's blocked by an always progressing reader.
607 */
608 #ifdef READER_PROGRESS
609 progress_reader:
610 #endif
611 urcu_one_read_sig(i, j, nest_i, tmp, tmp2);
612 od;
613 }
614
615 #endif
616
617 /* Model the RCU update process. */
618
619 active proctype urcu_writer()
620 {
621 byte i, j;
622 byte tmp, tmp2;
623 byte old_gen;
624
625 wait_init_done();
626
627 assert(get_pid() < NR_PROCS);
628
629 do
630 :: (READ_CACHED_VAR(generation_ptr) < 5) ->
631 #ifdef WRITER_PROGRESS
632 progress_writer1:
633 #endif
634 ooo_mem(i);
635 dispatch_sighand_write_exec();
636 atomic {
637 old_gen = READ_CACHED_VAR(generation_ptr);
638 WRITE_CACHED_VAR(generation_ptr, old_gen + 1);
639 }
640 ooo_mem(i);
641 dispatch_sighand_write_exec();
642
643 do
644 :: 1 ->
645 atomic {
646 if
647 :: write_lock == 0 ->
648 write_lock = 1;
649 break;
650 :: else ->
651 skip;
652 fi;
653 }
654 od;
655 smp_mb_writer(i, j);
656 dispatch_sighand_write_exec();
657 tmp = READ_CACHED_VAR(urcu_gp_ctr);
658 ooo_mem(i);
659 dispatch_sighand_write_exec();
660 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
661 ooo_mem(i);
662 dispatch_sighand_write_exec();
663 //smp_mc(i);
664 wait_for_quiescent_state(tmp, tmp2, i, j);
665 //smp_mc(i);
666 #ifndef SINGLE_FLIP
667 ooo_mem(i);
668 dispatch_sighand_write_exec();
669 tmp = READ_CACHED_VAR(urcu_gp_ctr);
670 ooo_mem(i);
671 dispatch_sighand_write_exec();
672 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
673 //smp_mc(i);
674 ooo_mem(i);
675 dispatch_sighand_write_exec();
676 wait_for_quiescent_state(tmp, tmp2, i, j);
677 #endif
678 smp_mb_writer(i, j);
679 dispatch_sighand_write_exec();
680 write_lock = 0;
681 /* free-up step, e.g., kfree(). */
682 atomic {
683 last_free_gen = old_gen;
684 free_done = 1;
685 }
686 :: else -> break;
687 od;
688 /*
689 * Given the reader loops infinitely, let the writer also busy-loop
690 * with progress here so, with weak fairness, we can test the
691 * writer's progress.
692 */
693 end_writer:
694 do
695 :: 1 ->
696 #ifdef WRITER_PROGRESS
697 progress_writer2:
698 #endif
699 dispatch_sighand_write_exec();
700 od;
701 }
702
703 /* Leave after the readers and writers so the pid count is ok. */
704 init {
705 byte i, j;
706
707 atomic {
708 INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
709 INIT_CACHED_VAR(generation_ptr, 0, j);
710
711 i = 0;
712 do
713 :: i < NR_READERS ->
714 INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
715 read_generation[i] = 1;
716 data_access[i] = 0;
717 i++;
718 :: i >= NR_READERS -> break
719 od;
720 init_done = 1;
721 }
722 }
This page took 0.043999 seconds and 4 git commands to generate.