update
[urcu.git] / urcu / result-signal-over-reader / urcu.spin
1 /*
2 * mem.spin: Promela code to validate memory barriers with OOO memory.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (c) 2009 Mathieu Desnoyers
19 */
20
21 /* Promela validation variables. */
22
23 /* specific defines "included" here */
24 /* DEFINES file "included" here */
25
26 /* All signal readers have same PID and uses same reader variable */
27 #ifdef TEST_SIGNAL_ON_WRITE
28
29 #define NR_READERS 1 /* the writer is also a signal reader */
30 #define NR_WRITERS 1
31
32 #define NR_PROCS 1
33
34 #define get_pid() (0)
35
36 #elif defined(TEST_SIGNAL_ON_READ)
37
38 #define get_pid() ((_pid < 2) -> 0 : 1)
39
40 #define NR_READERS 1
41 #define NR_WRITERS 1
42
43 #define NR_PROCS 2
44
45 #else
46
47 #define get_pid() (_pid)
48
49 #define NR_READERS 1
50 #define NR_WRITERS 1
51
52 #define NR_PROCS 2
53
54 #endif
55
56 #define get_readerid() (get_pid())
57
58 /*
59 * Each process have its own data in cache. Caches are randomly updated.
60 * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
61 * both.
62 */
63
64 typedef per_proc_byte {
65 byte val[NR_PROCS];
66 };
67
68 /* Bitfield has a maximum of 8 procs */
69 typedef per_proc_bit {
70 byte bitfield;
71 };
72
73 #define DECLARE_CACHED_VAR(type, x) \
74 type mem_##x; \
75 per_proc_##type cached_##x; \
76 per_proc_bit cache_dirty_##x;
77
78 #define INIT_CACHED_VAR(x, v, j) \
79 mem_##x = v; \
80 cache_dirty_##x.bitfield = 0; \
81 j = 0; \
82 do \
83 :: j < NR_PROCS -> \
84 cached_##x.val[j] = v; \
85 j++ \
86 :: j >= NR_PROCS -> break \
87 od;
88
89 #define IS_CACHE_DIRTY(x, id) (cache_dirty_##x.bitfield & (1 << id))
90
91 #define READ_CACHED_VAR(x) (cached_##x.val[get_pid()])
92
93 #define WRITE_CACHED_VAR(x, v) \
94 atomic { \
95 cached_##x.val[get_pid()] = v; \
96 cache_dirty_##x.bitfield = \
97 cache_dirty_##x.bitfield | (1 << get_pid()); \
98 }
99
100 #define CACHE_WRITE_TO_MEM(x, id) \
101 if \
102 :: IS_CACHE_DIRTY(x, id) -> \
103 mem_##x = cached_##x.val[id]; \
104 cache_dirty_##x.bitfield = \
105 cache_dirty_##x.bitfield & (~(1 << id)); \
106 :: else -> \
107 skip \
108 fi;
109
110 #define CACHE_READ_FROM_MEM(x, id) \
111 if \
112 :: !IS_CACHE_DIRTY(x, id) -> \
113 cached_##x.val[id] = mem_##x;\
114 :: else -> \
115 skip \
116 fi;
117
118 /*
119 * May update other caches if cache is dirty, or not.
120 */
121 #define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
122 if \
123 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
124 :: 1 -> skip \
125 fi;
126
127 #define RANDOM_CACHE_READ_FROM_MEM(x, id)\
128 if \
129 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
130 :: 1 -> skip \
131 fi;
132
133 /*
134 * Remote barriers tests the scheme where a signal (or IPI) is sent to all
135 * reader threads to promote their compiler barrier to a smp_mb().
136 */
137 #ifdef REMOTE_BARRIERS
138
139 inline smp_rmb_pid(i, j)
140 {
141 atomic {
142 CACHE_READ_FROM_MEM(urcu_gp_ctr, i);
143 j = 0;
144 do
145 :: j < NR_READERS ->
146 CACHE_READ_FROM_MEM(urcu_active_readers[j], i);
147 j++
148 :: j >= NR_READERS -> break
149 od;
150 CACHE_READ_FROM_MEM(generation_ptr, i);
151 }
152 }
153
154 inline smp_wmb_pid(i, j)
155 {
156 atomic {
157 CACHE_WRITE_TO_MEM(urcu_gp_ctr, i);
158 j = 0;
159 do
160 :: j < NR_READERS ->
161 CACHE_WRITE_TO_MEM(urcu_active_readers[j], i);
162 j++
163 :: j >= NR_READERS -> break
164 od;
165 CACHE_WRITE_TO_MEM(generation_ptr, i);
166 }
167 }
168
169 inline smp_mb_pid(i, j)
170 {
171 atomic {
172 #ifndef NO_WMB
173 smp_wmb_pid(i, j);
174 #endif
175 #ifndef NO_RMB
176 smp_rmb_pid(i, j);
177 #endif
178 #ifdef NO_WMB
179 #ifdef NO_RMB
180 ooo_mem(j);
181 #endif
182 #endif
183 }
184 }
185
186 /*
187 * Readers do a simple barrier(), writers are doing a smp_mb() _and_ sending a
188 * signal or IPI to have all readers execute a smp_mb.
189 * We are not modeling the whole rendez-vous between readers and writers here,
190 * we just let the writer update each reader's caches remotely.
191 */
192 inline smp_mb_writer(i, j)
193 {
194 smp_mb_pid(get_pid(), j);
195 i = 0;
196 do
197 :: i < NR_READERS ->
198 smp_mb_pid(i, j);
199 i++;
200 :: i >= NR_READERS -> break
201 od;
202 smp_mb_pid(get_pid(), j);
203 }
204
205 inline smp_mb_reader(i, j)
206 {
207 skip;
208 }
209
210 #else
211
212 inline smp_rmb(i, j)
213 {
214 atomic {
215 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
216 i = 0;
217 do
218 :: i < NR_READERS ->
219 CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
220 i++
221 :: i >= NR_READERS -> break
222 od;
223 CACHE_READ_FROM_MEM(generation_ptr, get_pid());
224 }
225 }
226
227 inline smp_wmb(i, j)
228 {
229 atomic {
230 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
231 i = 0;
232 do
233 :: i < NR_READERS ->
234 CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
235 i++
236 :: i >= NR_READERS -> break
237 od;
238 CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
239 }
240 }
241
242 inline smp_mb(i, j)
243 {
244 atomic {
245 #ifndef NO_WMB
246 smp_wmb(i, j);
247 #endif
248 #ifndef NO_RMB
249 smp_rmb(i, j);
250 #endif
251 #ifdef NO_WMB
252 #ifdef NO_RMB
253 ooo_mem(i);
254 #endif
255 #endif
256 }
257 }
258
259 inline smp_mb_writer(i, j)
260 {
261 smp_mb(i, j);
262 }
263
264 inline smp_mb_reader(i, j)
265 {
266 smp_mb(i, j);
267 }
268
269 #endif
270
271 /* Keep in sync manually with smp_rmb, wmp_wmb, ooo_mem and init() */
272 DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
273 /* Note ! currently only two readers */
274 DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
275 /* pointer generation */
276 DECLARE_CACHED_VAR(byte, generation_ptr);
277
278 byte last_free_gen = 0;
279 bit free_done = 0;
280 byte read_generation[NR_READERS];
281 bit data_access[NR_READERS];
282
283 bit write_lock = 0;
284
285 bit init_done = 0;
286
287 bit sighand_exec = 0;
288
289 inline wait_init_done()
290 {
291 do
292 :: init_done == 0 -> skip;
293 :: else -> break;
294 od;
295 }
296
297 #ifdef TEST_SIGNAL
298
299 inline wait_for_sighand_exec()
300 {
301 sighand_exec = 0;
302 do
303 :: sighand_exec == 0 -> skip;
304 :: else -> break;
305 od;
306 }
307
308 #ifdef TOO_BIG_STATE_SPACE
309 inline wait_for_sighand_exec()
310 {
311 sighand_exec = 0;
312 do
313 :: sighand_exec == 0 -> skip;
314 :: else ->
315 if
316 :: 1 -> break;
317 :: 1 -> sighand_exec = 0;
318 skip;
319 fi;
320 od;
321 }
322 #endif
323
324 #else
325
326 inline wait_for_sighand_exec()
327 {
328 skip;
329 }
330
331 #endif
332
333 #ifdef TEST_SIGNAL_ON_WRITE
334 /* Block on signal handler execution */
335 inline dispatch_sighand_write_exec()
336 {
337 sighand_exec = 1;
338 do
339 :: sighand_exec == 1 ->
340 skip;
341 :: else ->
342 break;
343 od;
344 }
345
346 #else
347
348 inline dispatch_sighand_write_exec()
349 {
350 skip;
351 }
352
353 #endif
354
355 #ifdef TEST_SIGNAL_ON_READ
356 /* Block on signal handler execution */
357 inline dispatch_sighand_read_exec()
358 {
359 sighand_exec = 1;
360 do
361 :: sighand_exec == 1 ->
362 skip;
363 :: else ->
364 break;
365 od;
366 }
367
368 #else
369
370 inline dispatch_sighand_read_exec()
371 {
372 skip;
373 }
374
375 #endif
376
377
378 inline ooo_mem(i)
379 {
380 atomic {
381 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
382 i = 0;
383 do
384 :: i < NR_READERS ->
385 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
386 get_pid());
387 i++
388 :: i >= NR_READERS -> break
389 od;
390 RANDOM_CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
391 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
392 i = 0;
393 do
394 :: i < NR_READERS ->
395 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
396 get_pid());
397 i++
398 :: i >= NR_READERS -> break
399 od;
400 RANDOM_CACHE_READ_FROM_MEM(generation_ptr, get_pid());
401 }
402 }
403
404 inline wait_for_reader(tmp, tmp2, i, j)
405 {
406 do
407 :: 1 ->
408 tmp2 = READ_CACHED_VAR(urcu_active_readers[tmp]);
409 ooo_mem(i);
410 dispatch_sighand_write_exec();
411 if
412 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
413 && ((tmp2 ^ READ_CACHED_VAR(urcu_gp_ctr))
414 & RCU_GP_CTR_BIT) ->
415 #ifndef GEN_ERROR_WRITER_PROGRESS
416 smp_mb_writer(i, j);
417 #else
418 ooo_mem(i);
419 #endif
420 dispatch_sighand_write_exec();
421 :: else ->
422 break;
423 fi;
424 od;
425 }
426
427 inline wait_for_quiescent_state(tmp, tmp2, i, j)
428 {
429 tmp = 0;
430 do
431 :: tmp < NR_READERS ->
432 wait_for_reader(tmp, tmp2, i, j);
433 if
434 :: (NR_READERS > 1) && (tmp < NR_READERS - 1)
435 -> ooo_mem(i);
436 dispatch_sighand_write_exec();
437 :: else
438 -> skip;
439 fi;
440 tmp++
441 :: tmp >= NR_READERS -> break
442 od;
443 }
444
445 /* Model the RCU read-side critical section. */
446
447 #ifndef TEST_SIGNAL_ON_WRITE
448
449 inline urcu_one_read(i, j, nest_i, tmp, tmp2)
450 {
451 nest_i = 0;
452 do
453 :: nest_i < READER_NEST_LEVEL ->
454 ooo_mem(i);
455 dispatch_sighand_read_exec();
456 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
457 ooo_mem(i);
458 dispatch_sighand_read_exec();
459 if
460 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
461 ->
462 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
463 ooo_mem(i);
464 dispatch_sighand_read_exec();
465 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
466 tmp2);
467 :: else ->
468 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
469 tmp + 1);
470 fi;
471 smp_mb_reader(i, j);
472 dispatch_sighand_read_exec();
473 nest_i++;
474 :: nest_i >= READER_NEST_LEVEL -> break;
475 od;
476
477 read_generation[get_readerid()] = READ_CACHED_VAR(generation_ptr);
478 data_access[get_readerid()] = 1;
479 data_access[get_readerid()] = 0;
480
481 nest_i = 0;
482 do
483 :: nest_i < READER_NEST_LEVEL ->
484 smp_mb_reader(i, j);
485 dispatch_sighand_read_exec();
486 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
487 ooo_mem(i);
488 dispatch_sighand_read_exec();
489 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);
490 nest_i++;
491 :: nest_i >= READER_NEST_LEVEL -> break;
492 od;
493 //ooo_mem(i);
494 //dispatch_sighand_read_exec();
495 //smp_mc(i); /* added */
496 }
497
498 active proctype urcu_reader()
499 {
500 byte i, j, nest_i;
501 byte tmp, tmp2;
502
503 wait_init_done();
504
505 assert(get_pid() < NR_PROCS);
506
507 end_reader:
508 do
509 :: 1 ->
510 /*
511 * We do not test reader's progress here, because we are mainly
512 * interested in writer's progress. The reader never blocks
513 * anyway. We have to test for reader/writer's progress
514 * separately, otherwise we could think the writer is doing
515 * progress when it's blocked by an always progressing reader.
516 */
517 #ifdef READER_PROGRESS
518 progress_reader:
519 #endif
520 urcu_one_read(i, j, nest_i, tmp, tmp2);
521 od;
522 }
523
524 #endif //!TEST_SIGNAL_ON_WRITE
525
526 #ifdef TEST_SIGNAL
527 /* signal handler reader */
528
529 inline urcu_one_read_sig(i, j, nest_i, tmp, tmp2)
530 {
531 nest_i = 0;
532 do
533 :: nest_i < READER_NEST_LEVEL ->
534 ooo_mem(i);
535 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
536 ooo_mem(i);
537 if
538 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
539 ->
540 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
541 ooo_mem(i);
542 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
543 tmp2);
544 :: else ->
545 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
546 tmp + 1);
547 fi;
548 smp_mb_reader(i, j);
549 nest_i++;
550 :: nest_i >= READER_NEST_LEVEL -> break;
551 od;
552
553 read_generation[get_readerid()] = READ_CACHED_VAR(generation_ptr);
554 data_access[get_readerid()] = 1;
555 data_access[get_readerid()] = 0;
556
557 nest_i = 0;
558 do
559 :: nest_i < READER_NEST_LEVEL ->
560 smp_mb_reader(i, j);
561 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
562 ooo_mem(i);
563 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);
564 nest_i++;
565 :: nest_i >= READER_NEST_LEVEL -> break;
566 od;
567 //ooo_mem(i);
568 //smp_mc(i); /* added */
569 }
570
571 active proctype urcu_reader_sig()
572 {
573 byte i, j, nest_i;
574 byte tmp, tmp2;
575
576 wait_init_done();
577
578 assert(get_pid() < NR_PROCS);
579
580 end_reader:
581 do
582 :: 1 ->
583 wait_for_sighand_exec();
584 /*
585 * We do not test reader's progress here, because we are mainly
586 * interested in writer's progress. The reader never blocks
587 * anyway. We have to test for reader/writer's progress
588 * separately, otherwise we could think the writer is doing
589 * progress when it's blocked by an always progressing reader.
590 */
591 #ifdef READER_PROGRESS
592 progress_reader:
593 #endif
594 urcu_one_read_sig(i, j, nest_i, tmp, tmp2);
595 od;
596 }
597
598 #endif
599
600 /* Model the RCU update process. */
601
602 active proctype urcu_writer()
603 {
604 byte i, j;
605 byte tmp, tmp2;
606 byte old_gen;
607
608 wait_init_done();
609
610 assert(get_pid() < NR_PROCS);
611
612 do
613 :: (READ_CACHED_VAR(generation_ptr) < 5) ->
614 #ifdef WRITER_PROGRESS
615 progress_writer1:
616 #endif
617 ooo_mem(i);
618 dispatch_sighand_write_exec();
619 atomic {
620 old_gen = READ_CACHED_VAR(generation_ptr);
621 WRITE_CACHED_VAR(generation_ptr, old_gen + 1);
622 }
623 ooo_mem(i);
624 dispatch_sighand_write_exec();
625
626 do
627 :: 1 ->
628 atomic {
629 if
630 :: write_lock == 0 ->
631 write_lock = 1;
632 break;
633 :: else ->
634 skip;
635 fi;
636 }
637 od;
638 smp_mb_writer(i, j);
639 dispatch_sighand_write_exec();
640 tmp = READ_CACHED_VAR(urcu_gp_ctr);
641 ooo_mem(i);
642 dispatch_sighand_write_exec();
643 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
644 ooo_mem(i);
645 dispatch_sighand_write_exec();
646 //smp_mc(i);
647 wait_for_quiescent_state(tmp, tmp2, i, j);
648 //smp_mc(i);
649 #ifndef SINGLE_FLIP
650 ooo_mem(i);
651 dispatch_sighand_write_exec();
652 tmp = READ_CACHED_VAR(urcu_gp_ctr);
653 ooo_mem(i);
654 dispatch_sighand_write_exec();
655 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
656 //smp_mc(i);
657 ooo_mem(i);
658 dispatch_sighand_write_exec();
659 wait_for_quiescent_state(tmp, tmp2, i, j);
660 #endif
661 smp_mb_writer(i, j);
662 dispatch_sighand_write_exec();
663 write_lock = 0;
664 /* free-up step, e.g., kfree(). */
665 atomic {
666 last_free_gen = old_gen;
667 free_done = 1;
668 }
669 :: else -> break;
670 od;
671 /*
672 * Given the reader loops infinitely, let the writer also busy-loop
673 * with progress here so, with weak fairness, we can test the
674 * writer's progress.
675 */
676 end_writer:
677 do
678 :: 1 ->
679 #ifdef WRITER_PROGRESS
680 progress_writer2:
681 #endif
682 dispatch_sighand_write_exec();
683 od;
684 }
685
686 /* Leave after the readers and writers so the pid count is ok. */
687 init {
688 byte i, j;
689
690 atomic {
691 INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
692 INIT_CACHED_VAR(generation_ptr, 0, j);
693
694 i = 0;
695 do
696 :: i < NR_READERS ->
697 INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
698 read_generation[i] = 1;
699 data_access[i] = 0;
700 i++;
701 :: i >= NR_READERS -> break
702 od;
703 init_done = 1;
704 }
705 }
This page took 0.046365 seconds and 4 git commands to generate.