RCU signal handler reader over reader
[urcu.git] / formal-model / urcu / urcu.spin
CommitLineData
60a1db9d
MD
1/*
2 * mem.spin: Promela code to validate memory barriers with OOO memory.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (c) 2009 Mathieu Desnoyers
19 */
20
21/* Promela validation variables. */
22
8bc62ca4
MD
23/* specific defines "included" here */
24/* DEFINES file "included" here */
25
26/* All signal readers have same PID and uses same reader variable */
27#ifdef TEST_SIGNAL_ON_WRITE
28#define get_pid() ((_pid < 1) -> 0 : 1)
29#elif defined(TEST_SIGNAL_ON_READ)
30#define get_pid() ((_pid < 2) -> 0 : 1)
31#else
60a1db9d 32#define get_pid() (_pid)
8bc62ca4
MD
33#endif
34
35#define get_readerid() (get_pid())
60a1db9d
MD
36
37/*
38 * Each process have its own data in cache. Caches are randomly updated.
a5b558b0 39 * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
60a1db9d
MD
40 * both.
41 */
42
8bc62ca4
MD
43typedef per_proc_byte {
44 byte val[NR_PROCS];
45};
46
47/* Bitfield has a maximum of 8 procs */
48typedef per_proc_bit {
49 byte bitfield;
50};
51
52#define DECLARE_CACHED_VAR(type, x) \
53 type mem_##x; \
54 per_proc_##type cached_##x; \
55 per_proc_bit cache_dirty_##x;
56
57#define INIT_CACHED_VAR(x, v, j) \
58 mem_##x = v; \
59 cache_dirty_##x.bitfield = 0; \
60 j = 0; \
61 do \
62 :: j < NR_PROCS -> \
63 cached_##x.val[j] = v; \
64 j++ \
65 :: j >= NR_PROCS -> break \
66 od;
60a1db9d 67
8bc62ca4 68#define IS_CACHE_DIRTY(x, id) (cache_dirty_##x.bitfield & (1 << id))
60a1db9d 69
8bc62ca4 70#define READ_CACHED_VAR(x) (cached_##x.val[get_pid()])
60a1db9d 71
8bc62ca4
MD
72#define WRITE_CACHED_VAR(x, v) \
73 atomic { \
74 cached_##x.val[get_pid()] = v; \
75 cache_dirty_##x.bitfield = \
76 cache_dirty_##x.bitfield | (1 << get_pid()); \
60a1db9d
MD
77 }
78
8bc62ca4
MD
79#define CACHE_WRITE_TO_MEM(x, id) \
80 if \
81 :: IS_CACHE_DIRTY(x, id) -> \
82 mem_##x = cached_##x.val[id]; \
83 cache_dirty_##x.bitfield = \
84 cache_dirty_##x.bitfield & (~(1 << id)); \
85 :: else -> \
86 skip \
60a1db9d
MD
87 fi;
88
89#define CACHE_READ_FROM_MEM(x, id) \
90 if \
91 :: !IS_CACHE_DIRTY(x, id) -> \
8bc62ca4 92 cached_##x.val[id] = mem_##x;\
60a1db9d
MD
93 :: else -> \
94 skip \
95 fi;
96
97/*
98 * May update other caches if cache is dirty, or not.
99 */
100#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
101 if \
102 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
103 :: 1 -> skip \
104 fi;
105
106#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
107 if \
108 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
109 :: 1 -> skip \
110 fi;
111
cc76fd1d
MD
112/*
113 * Remote barriers tests the scheme where a signal (or IPI) is sent to all
114 * reader threads to promote their compiler barrier to a smp_mb().
115 */
116#ifdef REMOTE_BARRIERS
117
8bc62ca4 118inline smp_rmb_pid(i, j)
cc76fd1d
MD
119{
120 atomic {
121 CACHE_READ_FROM_MEM(urcu_gp_ctr, i);
8bc62ca4
MD
122 j = 0;
123 do
124 :: j < NR_READERS ->
125 CACHE_READ_FROM_MEM(urcu_active_readers[j], i);
126 j++
127 :: j >= NR_READERS -> break
128 od;
cc76fd1d
MD
129 CACHE_READ_FROM_MEM(generation_ptr, i);
130 }
131}
132
8bc62ca4 133inline smp_wmb_pid(i, j)
cc76fd1d
MD
134{
135 atomic {
136 CACHE_WRITE_TO_MEM(urcu_gp_ctr, i);
8bc62ca4
MD
137 j = 0;
138 do
139 :: j < NR_READERS ->
140 CACHE_WRITE_TO_MEM(urcu_active_readers[j], i);
141 j++
142 :: j >= NR_READERS -> break
143 od;
cc76fd1d
MD
144 CACHE_WRITE_TO_MEM(generation_ptr, i);
145 }
146}
147
8bc62ca4 148inline smp_mb_pid(i, j)
cc76fd1d
MD
149{
150 atomic {
151#ifndef NO_WMB
8bc62ca4 152 smp_wmb_pid(i, j);
cc76fd1d
MD
153#endif
154#ifndef NO_RMB
8bc62ca4 155 smp_rmb_pid(i, j);
cc76fd1d 156#endif
a5b558b0
MD
157#ifdef NO_WMB
158#ifdef NO_RMB
8bc62ca4 159 ooo_mem(j);
a5b558b0
MD
160#endif
161#endif
cc76fd1d
MD
162 }
163}
164
165/*
166 * Readers do a simple barrier(), writers are doing a smp_mb() _and_ sending a
167 * signal or IPI to have all readers execute a smp_mb.
168 * We are not modeling the whole rendez-vous between readers and writers here,
169 * we just let the writer update each reader's caches remotely.
170 */
8bc62ca4 171inline smp_mb(i, j)
cc76fd1d
MD
172{
173 if
174 :: get_pid() >= NR_READERS ->
8bc62ca4 175 smp_mb_pid(get_pid(), j);
cc76fd1d
MD
176 i = 0;
177 do
178 :: i < NR_READERS ->
8bc62ca4 179 smp_mb_pid(i, j);
cc76fd1d
MD
180 i++;
181 :: i >= NR_READERS -> break
182 od;
8bc62ca4 183 smp_mb_pid(get_pid(), j);
cc76fd1d
MD
184 :: else -> skip;
185 fi;
186}
187
188#else
189
8bc62ca4 190inline smp_rmb(i, j)
60a1db9d
MD
191{
192 atomic {
193 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
8bc62ca4
MD
194 i = 0;
195 do
196 :: i < NR_READERS ->
197 CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
198 i++
199 :: i >= NR_READERS -> break
200 od;
60a1db9d
MD
201 CACHE_READ_FROM_MEM(generation_ptr, get_pid());
202 }
203}
204
8bc62ca4 205inline smp_wmb(i, j)
60a1db9d
MD
206{
207 atomic {
208 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
8bc62ca4
MD
209 i = 0;
210 do
211 :: i < NR_READERS ->
212 CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
213 i++
214 :: i >= NR_READERS -> break
215 od;
60a1db9d
MD
216 CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
217 }
218}
219
8bc62ca4 220inline smp_mb(i, j)
60a1db9d
MD
221{
222 atomic {
223#ifndef NO_WMB
8bc62ca4 224 smp_wmb(i, j);
60a1db9d
MD
225#endif
226#ifndef NO_RMB
8bc62ca4 227 smp_rmb(i, j);
60a1db9d 228#endif
a5b558b0
MD
229#ifdef NO_WMB
230#ifdef NO_RMB
231 ooo_mem(i);
232#endif
233#endif
60a1db9d
MD
234 }
235}
236
cc76fd1d
MD
237#endif
238
8bc62ca4
MD
239/* Keep in sync manually with smp_rmb, wmp_wmb, ooo_mem and init() */
240DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
241/* Note ! currently only two readers */
242DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
60a1db9d 243/* pointer generation */
8bc62ca4 244DECLARE_CACHED_VAR(byte, generation_ptr);
60a1db9d
MD
245
246byte last_free_gen = 0;
247bit free_done = 0;
8bc62ca4
MD
248byte read_generation[NR_READERS];
249bit data_access[NR_READERS];
60a1db9d 250
2ba2a48d
MD
251bit write_lock = 0;
252
8bc62ca4
MD
253bit init_done = 0;
254
255bit sighand_exec = 0;
256
257inline wait_init_done()
258{
259 do
260 :: init_done == 0 -> skip;
261 :: else -> break;
262 od;
263}
264
265#ifdef TEST_SIGNAL
266
267inline wait_for_sighand_exec()
268{
269 sighand_exec = 0;
270 do
271 :: sighand_exec == 0 -> skip;
272 :: else ->
273 if
274 :: 1 -> break;
275 :: 1 -> sighand_exec = 0;
276 skip;
277 fi;
278 od;
279}
280
281#else
282
283inline wait_for_sighand_exec()
284{
285 skip;
286}
287
288#endif
289
290#ifdef TEST_SIGNAL_ON_WRITE
291/* Block on signal handler execution */
292inline dispatch_sighand_write_exec()
293{
294 sighand_exec = 1;
295 do
296 :: sighand_exec == 1 ->
297 skip;
298 :: else ->
299 break;
300 od;
301}
302
303#else
304
305inline dispatch_sighand_write_exec()
306{
307 skip;
308}
309
310#endif
311
312#ifdef TEST_SIGNAL_ON_READ
313/* Block on signal handler execution */
314inline dispatch_sighand_read_exec()
315{
316 sighand_exec = 1;
317 do
318 :: sighand_exec == 1 ->
319 skip;
320 :: else ->
321 break;
322 od;
323}
324
325#else
326
327inline dispatch_sighand_read_exec()
328{
329 skip;
330}
331
332#endif
333
334
60a1db9d
MD
335inline ooo_mem(i)
336{
337 atomic {
338 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
8bc62ca4
MD
339 i = 0;
340 do
341 :: i < NR_READERS ->
342 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
343 get_pid());
344 i++
345 :: i >= NR_READERS -> break
346 od;
60a1db9d
MD
347 RANDOM_CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
348 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
8bc62ca4
MD
349 i = 0;
350 do
351 :: i < NR_READERS ->
352 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
353 get_pid());
354 i++
355 :: i >= NR_READERS -> break
356 od;
60a1db9d
MD
357 RANDOM_CACHE_READ_FROM_MEM(generation_ptr, get_pid());
358 }
359}
360
8bc62ca4 361inline wait_for_reader(tmp, tmp2, i, j)
60a1db9d 362{
60a1db9d 363 do
89674313 364 :: 1 ->
8bc62ca4 365 tmp2 = READ_CACHED_VAR(urcu_active_readers[tmp]);
a570e118 366 ooo_mem(i);
8bc62ca4 367 dispatch_sighand_write_exec();
89674313 368 if
8bc62ca4
MD
369 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
370 && ((tmp2 ^ READ_CACHED_VAR(urcu_gp_ctr))
89674313
MD
371 & RCU_GP_CTR_BIT) ->
372#ifndef GEN_ERROR_WRITER_PROGRESS
8bc62ca4 373 smp_mb(i, j);
89674313 374#else
a5b558b0 375 ooo_mem(i);
89674313 376#endif
8bc62ca4 377 dispatch_sighand_write_exec();
89674313 378 :: else ->
60a1db9d 379 break;
89674313 380 fi;
60a1db9d
MD
381 od;
382}
383
8bc62ca4 384inline wait_for_quiescent_state(tmp, tmp2, i, j)
60a1db9d 385{
8bc62ca4 386 tmp = 0;
60a1db9d 387 do
8bc62ca4
MD
388 :: tmp < NR_READERS ->
389 wait_for_reader(tmp, tmp2, i, j);
a5b558b0 390 if
8bc62ca4
MD
391 :: (NR_READERS > 1) && (tmp < NR_READERS - 1)
392 -> ooo_mem(i);
393 dispatch_sighand_write_exec();
a5b558b0
MD
394 :: else
395 -> skip;
396 fi;
8bc62ca4
MD
397 tmp++
398 :: tmp >= NR_READERS -> break
60a1db9d
MD
399 od;
400}
401
402/* Model the RCU read-side critical section. */
403
8bc62ca4 404inline urcu_one_read(i, j, nest_i, tmp, tmp2)
6ae334b0 405{
406 nest_i = 0;
407 do
408 :: nest_i < READER_NEST_LEVEL ->
409 ooo_mem(i);
8bc62ca4
MD
410 dispatch_sighand_read_exec();
411 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
6ae334b0 412 ooo_mem(i);
8bc62ca4 413 dispatch_sighand_read_exec();
6ae334b0 414 if
415 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
416 ->
417 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
418 ooo_mem(i);
8bc62ca4
MD
419 dispatch_sighand_read_exec();
420 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
421 tmp2);
6ae334b0 422 :: else ->
8bc62ca4 423 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
6ae334b0 424 tmp + 1);
425 fi;
8bc62ca4
MD
426 smp_mb(i, j);
427 dispatch_sighand_read_exec();
6ae334b0 428 nest_i++;
429 :: nest_i >= READER_NEST_LEVEL -> break;
430 od;
431
8bc62ca4
MD
432 read_generation[get_readerid()] = READ_CACHED_VAR(generation_ptr);
433 data_access[get_readerid()] = 1;
434 data_access[get_readerid()] = 0;
6ae334b0 435
436 nest_i = 0;
437 do
438 :: nest_i < READER_NEST_LEVEL ->
8bc62ca4
MD
439 smp_mb(i, j);
440 dispatch_sighand_read_exec();
441 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
6ae334b0 442 ooo_mem(i);
8bc62ca4
MD
443 dispatch_sighand_read_exec();
444 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);
6ae334b0 445 nest_i++;
446 :: nest_i >= READER_NEST_LEVEL -> break;
447 od;
8bc62ca4
MD
448 //ooo_mem(i);
449 //dispatch_sighand_read_exec();
6ae334b0 450 //smp_mc(i); /* added */
451}
452
8bc62ca4 453active proctype urcu_reader()
60a1db9d 454{
8bc62ca4 455 byte i, j, nest_i;
60a1db9d
MD
456 byte tmp, tmp2;
457
8bc62ca4
MD
458 wait_init_done();
459
60a1db9d
MD
460 assert(get_pid() < NR_PROCS);
461
89674313
MD
462end_reader:
463 do
464 :: 1 ->
465 /*
466 * We do not test reader's progress here, because we are mainly
467 * interested in writer's progress. The reader never blocks
468 * anyway. We have to test for reader/writer's progress
469 * separately, otherwise we could think the writer is doing
470 * progress when it's blocked by an always progressing reader.
471 */
472#ifdef READER_PROGRESS
8bc62ca4
MD
473 /* Only test progress of one random reader. They are all the
474 * same. */
475 if
476 :: get_readerid() == 0 ->
89674313 477progress_reader:
8bc62ca4
MD
478 skip;
479 fi;
89674313 480#endif
8bc62ca4
MD
481 urcu_one_read(i, j, nest_i, tmp, tmp2);
482 od;
483}
484
485#ifdef TEST_SIGNAL
486/* signal handler reader */
487
488inline urcu_one_read_sig(i, j, nest_i, tmp, tmp2)
489{
490 nest_i = 0;
491 do
492 :: nest_i < READER_NEST_LEVEL ->
493 ooo_mem(i);
494 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
495 ooo_mem(i);
496 if
497 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
498 ->
499 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
500 ooo_mem(i);
501 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
502 tmp2);
503 :: else ->
504 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
505 tmp + 1);
506 fi;
507 smp_mb(i, j);
508 nest_i++;
509 :: nest_i >= READER_NEST_LEVEL -> break;
510 od;
511
512 read_generation[get_readerid()] = READ_CACHED_VAR(generation_ptr);
513 data_access[get_readerid()] = 1;
514 data_access[get_readerid()] = 0;
515
516 nest_i = 0;
517 do
518 :: nest_i < READER_NEST_LEVEL ->
519 smp_mb(i, j);
520 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
521 ooo_mem(i);
522 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);
523 nest_i++;
524 :: nest_i >= READER_NEST_LEVEL -> break;
89674313 525 od;
8bc62ca4
MD
526 //ooo_mem(i);
527 //smp_mc(i); /* added */
60a1db9d
MD
528}
529
8bc62ca4
MD
530active proctype urcu_reader_sig()
531{
532 byte i, j, nest_i;
533 byte tmp, tmp2;
534
535 wait_init_done();
536
537 assert(get_pid() < NR_PROCS);
538
539end_reader:
540 do
541 :: 1 ->
542 wait_for_sighand_exec();
543 /*
544 * We do not test reader's progress here, because we are mainly
545 * interested in writer's progress. The reader never blocks
546 * anyway. We have to test for reader/writer's progress
547 * separately, otherwise we could think the writer is doing
548 * progress when it's blocked by an always progressing reader.
549 */
550#ifdef READER_PROGRESS
551 /* Only test progress of one random reader. They are all the
552 * same. */
553 if
554 :: get_readerid() == 0 ->
555progress_reader:
556 skip;
557 fi;
558#endif
559 urcu_one_read_sig(i, j, nest_i, tmp, tmp2);
560 od;
561}
562
563#endif
564
60a1db9d
MD
565/* Model the RCU update process. */
566
8bc62ca4 567active proctype urcu_writer()
60a1db9d
MD
568{
569 byte i, j;
8bc62ca4 570 byte tmp, tmp2;
60a1db9d
MD
571 byte old_gen;
572
8bc62ca4
MD
573 wait_init_done();
574
60a1db9d
MD
575 assert(get_pid() < NR_PROCS);
576
2ba2a48d 577 do
89674313
MD
578 :: (READ_CACHED_VAR(generation_ptr) < 5) ->
579#ifdef WRITER_PROGRESS
580progress_writer1:
581#endif
582 ooo_mem(i);
8bc62ca4 583 dispatch_sighand_write_exec();
710b09b7 584 atomic {
89674313
MD
585 old_gen = READ_CACHED_VAR(generation_ptr);
586 WRITE_CACHED_VAR(generation_ptr, old_gen + 1);
710b09b7 587 }
89674313 588 ooo_mem(i);
8bc62ca4 589 dispatch_sighand_write_exec();
89674313
MD
590
591 do
592 :: 1 ->
593 atomic {
594 if
595 :: write_lock == 0 ->
596 write_lock = 1;
597 break;
598 :: else ->
599 skip;
600 fi;
601 }
602 od;
8bc62ca4
MD
603 smp_mb(i, j);
604 dispatch_sighand_write_exec();
89674313
MD
605 tmp = READ_CACHED_VAR(urcu_gp_ctr);
606 ooo_mem(i);
8bc62ca4 607 dispatch_sighand_write_exec();
89674313
MD
608 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
609 ooo_mem(i);
8bc62ca4 610 dispatch_sighand_write_exec();
89674313 611 //smp_mc(i);
8bc62ca4 612 wait_for_quiescent_state(tmp, tmp2, i, j);
89674313 613 //smp_mc(i);
d4e437ba 614#ifndef SINGLE_FLIP
89674313 615 ooo_mem(i);
8bc62ca4 616 dispatch_sighand_write_exec();
89674313
MD
617 tmp = READ_CACHED_VAR(urcu_gp_ctr);
618 ooo_mem(i);
8bc62ca4 619 dispatch_sighand_write_exec();
89674313
MD
620 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
621 //smp_mc(i);
622 ooo_mem(i);
8bc62ca4
MD
623 dispatch_sighand_write_exec();
624 wait_for_quiescent_state(tmp, tmp2, i, j);
d4e437ba 625#endif
8bc62ca4
MD
626 smp_mb(i, j);
627 dispatch_sighand_write_exec();
89674313
MD
628 write_lock = 0;
629 /* free-up step, e.g., kfree(). */
630 atomic {
631 last_free_gen = old_gen;
632 free_done = 1;
633 }
634 :: else -> break;
2ba2a48d 635 od;
89674313
MD
636 /*
637 * Given the reader loops infinitely, let the writer also busy-loop
638 * with progress here so, with weak fairness, we can test the
639 * writer's progress.
640 */
641end_writer:
642 do
643 :: 1 ->
644#ifdef WRITER_PROGRESS
645progress_writer2:
2ba2a48d 646#endif
8bc62ca4 647 dispatch_sighand_write_exec();
89674313 648 od;
60a1db9d 649}
8bc62ca4
MD
650
651/* Leave after the readers and writers so the pid count is ok. */
652init {
653 byte i, j;
654
655 atomic {
656 INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
657 INIT_CACHED_VAR(generation_ptr, 0, j);
658
659 i = 0;
660 do
661 :: i < NR_READERS ->
662 INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
663 read_generation[i] = 1;
664 data_access[i] = 0;
665 i++;
666 :: i >= NR_READERS -> break
667 od;
668 init_done = 1;
669 }
670}
This page took 0.051025 seconds and 4 git commands to generate.