Update formal model from local copy
[urcu.git] / formal-model / urcu-nosched-model / result-signal-over-reader / urcu_free_nested.spin.input
1 #define READER_NEST_LEVEL 1
2
3 #define NR_READERS 1
4 #define NR_WRITERS 1
5
6 #define NR_PROCS 2
7
8 #define read_free_race (read_generation[0] == last_free_gen)
9 #define read_free (free_done && data_access[0])
10
11 #define TEST_SIGNAL
12 #define TEST_SIGNAL_ON_READ
13
14 #define RCU_GP_CTR_BIT (1 << 7)
15 #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
16
17 #ifndef READER_NEST_LEVEL
18 #define READER_NEST_LEVEL 2
19 #endif
20
21 #define REMOTE_BARRIERS
22 /*
23 * mem.spin: Promela code to validate memory barriers with OOO memory.
24 *
25 * This program is free software; you can redistribute it and/or modify
26 * it under the terms of the GNU General Public License as published by
27 * the Free Software Foundation; either version 2 of the License, or
28 * (at your option) any later version.
29 *
30 * This program is distributed in the hope that it will be useful,
31 * but WITHOUT ANY WARRANTY; without even the implied warranty of
32 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
33 * GNU General Public License for more details.
34 *
35 * You should have received a copy of the GNU General Public License
36 * along with this program; if not, write to the Free Software
37 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
38 *
39 * Copyright (c) 2009 Mathieu Desnoyers
40 */
41
42 /* Promela validation variables. */
43
44 /* specific defines "included" here */
45 /* DEFINES file "included" here */
46
47 /* All signal readers have same PID and uses same reader variable */
48 #ifdef TEST_SIGNAL_ON_WRITE
49 #define get_pid() ((_pid < 1) -> 0 : 1)
50 #elif defined(TEST_SIGNAL_ON_READ)
51 #define get_pid() ((_pid < 2) -> 0 : 1)
52 #else
53 #define get_pid() (_pid)
54 #endif
55
56 /*
57 * Each process have its own data in cache. Caches are randomly updated.
58 * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
59 * both.
60 */
61
62 typedef per_proc_byte {
63 byte val[NR_PROCS];
64 };
65
66 /* Bitfield has a maximum of 8 procs */
67 typedef per_proc_bit {
68 byte bitfield;
69 };
70
71 #define DECLARE_CACHED_VAR(type, x) \
72 type mem_##x; \
73 per_proc_##type cached_##x; \
74 per_proc_bit cache_dirty_##x;
75
76 #define INIT_CACHED_VAR(x, v, j) \
77 mem_##x = v; \
78 cache_dirty_##x.bitfield = 0; \
79 j = 0; \
80 do \
81 :: j < NR_PROCS -> \
82 cached_##x.val[j] = v; \
83 j++ \
84 :: j >= NR_PROCS -> break \
85 od;
86
87 #define IS_CACHE_DIRTY(x, id) (cache_dirty_##x.bitfield & (1 << id))
88
89 #define READ_CACHED_VAR(x) (cached_##x.val[get_pid()])
90
91 #define WRITE_CACHED_VAR(x, v) \
92 atomic { \
93 cached_##x.val[get_pid()] = v; \
94 cache_dirty_##x.bitfield = \
95 cache_dirty_##x.bitfield | (1 << get_pid()); \
96 }
97
98 #define CACHE_WRITE_TO_MEM(x, id) \
99 if \
100 :: IS_CACHE_DIRTY(x, id) -> \
101 mem_##x = cached_##x.val[id]; \
102 cache_dirty_##x.bitfield = \
103 cache_dirty_##x.bitfield & (~(1 << id)); \
104 :: else -> \
105 skip \
106 fi;
107
108 #define CACHE_READ_FROM_MEM(x, id) \
109 if \
110 :: !IS_CACHE_DIRTY(x, id) -> \
111 cached_##x.val[id] = mem_##x;\
112 :: else -> \
113 skip \
114 fi;
115
116 /*
117 * May update other caches if cache is dirty, or not.
118 */
119 #define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
120 if \
121 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
122 :: 1 -> skip \
123 fi;
124
125 #define RANDOM_CACHE_READ_FROM_MEM(x, id)\
126 if \
127 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
128 :: 1 -> skip \
129 fi;
130
131 /*
132 * Remote barriers tests the scheme where a signal (or IPI) is sent to all
133 * reader threads to promote their compiler barrier to a smp_mb().
134 */
135 #ifdef REMOTE_BARRIERS
136
137 inline smp_rmb_pid(i, j)
138 {
139 atomic {
140 CACHE_READ_FROM_MEM(urcu_gp_ctr, i);
141 j = 0;
142 do
143 :: j < NR_READERS ->
144 CACHE_READ_FROM_MEM(urcu_active_readers[j], i);
145 j++
146 :: j >= NR_READERS -> break
147 od;
148 CACHE_READ_FROM_MEM(generation_ptr, i);
149 }
150 }
151
152 inline smp_wmb_pid(i, j)
153 {
154 atomic {
155 CACHE_WRITE_TO_MEM(urcu_gp_ctr, i);
156 j = 0;
157 do
158 :: j < NR_READERS ->
159 CACHE_WRITE_TO_MEM(urcu_active_readers[j], i);
160 j++
161 :: j >= NR_READERS -> break
162 od;
163 CACHE_WRITE_TO_MEM(generation_ptr, i);
164 }
165 }
166
167 inline smp_mb_pid(i, j)
168 {
169 atomic {
170 #ifndef NO_WMB
171 smp_wmb_pid(i, j);
172 #endif
173 #ifndef NO_RMB
174 smp_rmb_pid(i, j);
175 #endif
176 #ifdef NO_WMB
177 #ifdef NO_RMB
178 ooo_mem(j);
179 #endif
180 #endif
181 }
182 }
183
184 /*
185 * Readers do a simple barrier(), writers are doing a smp_mb() _and_ sending a
186 * signal or IPI to have all readers execute a smp_mb.
187 * We are not modeling the whole rendez-vous between readers and writers here,
188 * we just let the writer update each reader's caches remotely.
189 */
190 inline smp_mb(i, j)
191 {
192 if
193 :: get_pid() >= NR_READERS ->
194 smp_mb_pid(get_pid(), j);
195 i = 0;
196 do
197 :: i < NR_READERS ->
198 smp_mb_pid(i, j);
199 i++;
200 :: i >= NR_READERS -> break
201 od;
202 smp_mb_pid(get_pid(), j);
203 :: else -> skip;
204 fi;
205 }
206
207 #else
208
209 inline smp_rmb(i, j)
210 {
211 atomic {
212 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
213 i = 0;
214 do
215 :: i < NR_READERS ->
216 CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
217 i++
218 :: i >= NR_READERS -> break
219 od;
220 CACHE_READ_FROM_MEM(generation_ptr, get_pid());
221 }
222 }
223
224 inline smp_wmb(i, j)
225 {
226 atomic {
227 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
228 i = 0;
229 do
230 :: i < NR_READERS ->
231 CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
232 i++
233 :: i >= NR_READERS -> break
234 od;
235 CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
236 }
237 }
238
239 inline smp_mb(i, j)
240 {
241 atomic {
242 #ifndef NO_WMB
243 smp_wmb(i, j);
244 #endif
245 #ifndef NO_RMB
246 smp_rmb(i, j);
247 #endif
248 #ifdef NO_WMB
249 #ifdef NO_RMB
250 ooo_mem(i);
251 #endif
252 #endif
253 }
254 }
255
256 #endif
257
258 /* Keep in sync manually with smp_rmb, wmp_wmb, ooo_mem and init() */
259 DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
260 /* Note ! currently only two readers */
261 DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
262 /* pointer generation */
263 DECLARE_CACHED_VAR(byte, generation_ptr);
264
265 byte last_free_gen = 0;
266 bit free_done = 0;
267 byte read_generation[NR_READERS];
268 bit data_access[NR_READERS];
269
270 bit write_lock = 0;
271
272 bit init_done = 0;
273
274 bit sighand_exec = 0;
275
276 inline wait_init_done()
277 {
278 do
279 :: init_done == 0 -> skip;
280 :: else -> break;
281 od;
282 }
283
284 #ifdef TEST_SIGNAL
285
286 inline wait_for_sighand_exec()
287 {
288 sighand_exec = 0;
289 do
290 :: sighand_exec == 0 -> skip;
291 :: else -> break;
292 od;
293 }
294
295 #else
296
297 inline wait_for_sighand_exec()
298 {
299 skip;
300 }
301
302 #endif
303
304 #ifdef TEST_SIGNAL_ON_WRITE
305 /* Block on signal handler execution */
306 inline dispatch_sighand_write_exec()
307 {
308 sighand_exec = 1;
309 do
310 :: sighand_exec == 1 ->
311 skip;
312 :: else ->
313 break;
314 od;
315 }
316
317 #else
318
319 inline dispatch_sighand_write_exec()
320 {
321 skip;
322 }
323
324 #endif
325
326 #ifdef TEST_SIGNAL_ON_READ
327 /* Block on signal handler execution */
328 inline dispatch_sighand_read_exec()
329 {
330 sighand_exec = 1;
331 do
332 :: sighand_exec == 1 ->
333 skip;
334 :: else ->
335 break;
336 od;
337 }
338
339 #else
340
341 inline dispatch_sighand_read_exec()
342 {
343 skip;
344 }
345
346 #endif
347
348
349 inline ooo_mem(i)
350 {
351 atomic {
352 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
353 i = 0;
354 do
355 :: i < NR_READERS ->
356 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
357 get_pid());
358 i++
359 :: i >= NR_READERS -> break
360 od;
361 RANDOM_CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
362 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
363 i = 0;
364 do
365 :: i < NR_READERS ->
366 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
367 get_pid());
368 i++
369 :: i >= NR_READERS -> break
370 od;
371 RANDOM_CACHE_READ_FROM_MEM(generation_ptr, get_pid());
372 }
373 }
374
375 #define get_readerid() (get_pid())
376 #define get_writerid() (get_readerid() + NR_READERS)
377
378 inline wait_for_reader(tmp, tmp2, i, j)
379 {
380 do
381 :: 1 ->
382 tmp2 = READ_CACHED_VAR(urcu_active_readers[tmp]);
383 ooo_mem(i);
384 dispatch_sighand_write_exec();
385 if
386 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
387 && ((tmp2 ^ READ_CACHED_VAR(urcu_gp_ctr))
388 & RCU_GP_CTR_BIT) ->
389 #ifndef GEN_ERROR_WRITER_PROGRESS
390 smp_mb(i, j);
391 #else
392 ooo_mem(i);
393 #endif
394 dispatch_sighand_write_exec();
395 :: else ->
396 break;
397 fi;
398 od;
399 }
400
401 inline wait_for_quiescent_state(tmp, tmp2, i, j)
402 {
403 tmp = 0;
404 do
405 :: tmp < NR_READERS ->
406 wait_for_reader(tmp, tmp2, i, j);
407 if
408 :: (NR_READERS > 1) && (tmp < NR_READERS - 1)
409 -> ooo_mem(i);
410 dispatch_sighand_write_exec();
411 :: else
412 -> skip;
413 fi;
414 tmp++
415 :: tmp >= NR_READERS -> break
416 od;
417 }
418
419 /* Model the RCU read-side critical section. */
420
421 inline urcu_one_read(i, j, nest_i, tmp, tmp2)
422 {
423 nest_i = 0;
424 do
425 :: nest_i < READER_NEST_LEVEL ->
426 ooo_mem(i);
427 dispatch_sighand_read_exec();
428 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
429 ooo_mem(i);
430 dispatch_sighand_read_exec();
431 if
432 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
433 ->
434 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
435 ooo_mem(i);
436 dispatch_sighand_read_exec();
437 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
438 tmp2);
439 :: else ->
440 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
441 tmp + 1);
442 fi;
443 smp_mb(i, j);
444 dispatch_sighand_read_exec();
445 nest_i++;
446 :: nest_i >= READER_NEST_LEVEL -> break;
447 od;
448
449 ooo_mem(i);
450 dispatch_sighand_read_exec();
451 read_generation[get_readerid()] = READ_CACHED_VAR(generation_ptr);
452 ooo_mem(i);
453 dispatch_sighand_read_exec();
454 data_access[get_readerid()] = 1;
455 ooo_mem(i);
456 dispatch_sighand_read_exec();
457 data_access[get_readerid()] = 0;
458
459 nest_i = 0;
460 do
461 :: nest_i < READER_NEST_LEVEL ->
462 smp_mb(i, j);
463 dispatch_sighand_read_exec();
464 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
465 ooo_mem(i);
466 dispatch_sighand_read_exec();
467 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);
468 nest_i++;
469 :: nest_i >= READER_NEST_LEVEL -> break;
470 od;
471 ooo_mem(i);
472 dispatch_sighand_read_exec();
473 //smp_mc(i); /* added */
474 }
475
476 active proctype urcu_reader()
477 {
478 byte i, j, nest_i;
479 byte tmp, tmp2;
480
481 wait_init_done();
482
483 assert(get_pid() < NR_PROCS);
484
485 end_reader:
486 do
487 :: 1 ->
488 /*
489 * We do not test reader's progress here, because we are mainly
490 * interested in writer's progress. The reader never blocks
491 * anyway. We have to test for reader/writer's progress
492 * separately, otherwise we could think the writer is doing
493 * progress when it's blocked by an always progressing reader.
494 */
495 #ifdef READER_PROGRESS
496 /* Only test progress of one random reader. They are all the
497 * same. */
498 if
499 :: get_readerid() == 0 ->
500 progress_reader:
501 skip;
502 fi;
503 #endif
504 urcu_one_read(i, j, nest_i, tmp, tmp2);
505 od;
506 }
507
508 #ifdef TEST_SIGNAL
509 /* signal handler reader */
510
511 inline urcu_one_read_sig(i, j, nest_i, tmp, tmp2)
512 {
513 nest_i = 0;
514 do
515 :: nest_i < READER_NEST_LEVEL ->
516 ooo_mem(i);
517 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
518 ooo_mem(i);
519 if
520 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
521 ->
522 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
523 ooo_mem(i);
524 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
525 tmp2);
526 :: else ->
527 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
528 tmp + 1);
529 fi;
530 smp_mb(i, j);
531 nest_i++;
532 :: nest_i >= READER_NEST_LEVEL -> break;
533 od;
534
535 ooo_mem(i);
536 read_generation[get_readerid()] = READ_CACHED_VAR(generation_ptr);
537 ooo_mem(i);
538 data_access[get_readerid()] = 1;
539 ooo_mem(i);
540 data_access[get_readerid()] = 0;
541
542 nest_i = 0;
543 do
544 :: nest_i < READER_NEST_LEVEL ->
545 smp_mb(i, j);
546 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
547 ooo_mem(i);
548 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);
549 nest_i++;
550 :: nest_i >= READER_NEST_LEVEL -> break;
551 od;
552 ooo_mem(i);
553 //smp_mc(i); /* added */
554 }
555
556 active proctype urcu_reader_sig()
557 {
558 byte i, j, nest_i;
559 byte tmp, tmp2;
560
561 wait_init_done();
562
563 assert(get_pid() < NR_PROCS);
564
565 end_reader:
566 do
567 :: 1 ->
568 wait_for_sighand_exec();
569 /*
570 * We do not test reader's progress here, because we are mainly
571 * interested in writer's progress. The reader never blocks
572 * anyway. We have to test for reader/writer's progress
573 * separately, otherwise we could think the writer is doing
574 * progress when it's blocked by an always progressing reader.
575 */
576 #ifdef READER_PROGRESS
577 /* Only test progress of one random reader. They are all the
578 * same. */
579 if
580 :: get_readerid() == 0 ->
581 progress_reader:
582 skip;
583 fi;
584 #endif
585 urcu_one_read(i, j, nest_i, tmp, tmp2);
586 od;
587 }
588
589 #endif
590
591 /* Model the RCU update process. */
592
593 active proctype urcu_writer()
594 {
595 byte i, j;
596 byte tmp, tmp2;
597 byte old_gen;
598
599 wait_init_done();
600
601 assert(get_pid() < NR_PROCS);
602
603 do
604 :: (READ_CACHED_VAR(generation_ptr) < 5) ->
605 #ifdef WRITER_PROGRESS
606 progress_writer1:
607 #endif
608 ooo_mem(i);
609 dispatch_sighand_write_exec();
610 atomic {
611 old_gen = READ_CACHED_VAR(generation_ptr);
612 WRITE_CACHED_VAR(generation_ptr, old_gen + 1);
613 }
614 ooo_mem(i);
615 dispatch_sighand_write_exec();
616
617 do
618 :: 1 ->
619 atomic {
620 if
621 :: write_lock == 0 ->
622 write_lock = 1;
623 break;
624 :: else ->
625 skip;
626 fi;
627 }
628 od;
629 smp_mb(i, j);
630 dispatch_sighand_write_exec();
631 tmp = READ_CACHED_VAR(urcu_gp_ctr);
632 ooo_mem(i);
633 dispatch_sighand_write_exec();
634 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
635 ooo_mem(i);
636 dispatch_sighand_write_exec();
637 //smp_mc(i);
638 wait_for_quiescent_state(tmp, tmp2, i, j);
639 //smp_mc(i);
640 #ifndef SINGLE_FLIP
641 ooo_mem(i);
642 dispatch_sighand_write_exec();
643 tmp = READ_CACHED_VAR(urcu_gp_ctr);
644 ooo_mem(i);
645 dispatch_sighand_write_exec();
646 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
647 //smp_mc(i);
648 ooo_mem(i);
649 dispatch_sighand_write_exec();
650 wait_for_quiescent_state(tmp, tmp2, i, j);
651 #endif
652 smp_mb(i, j);
653 dispatch_sighand_write_exec();
654 write_lock = 0;
655 /* free-up step, e.g., kfree(). */
656 atomic {
657 last_free_gen = old_gen;
658 free_done = 1;
659 }
660 :: else -> break;
661 od;
662 /*
663 * Given the reader loops infinitely, let the writer also busy-loop
664 * with progress here so, with weak fairness, we can test the
665 * writer's progress.
666 */
667 end_writer:
668 do
669 :: 1 ->
670 #ifdef WRITER_PROGRESS
671 progress_writer2:
672 #endif
673 dispatch_sighand_write_exec();
674 od;
675 }
676
677 /* Leave after the readers and writers so the pid count is ok. */
678 init {
679 byte i, j;
680
681 atomic {
682 INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
683 INIT_CACHED_VAR(generation_ptr, 0, j);
684
685 i = 0;
686 do
687 :: i < NR_READERS ->
688 INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
689 read_generation[i] = 1;
690 data_access[i] = 0;
691 i++;
692 :: i >= NR_READERS -> break
693 od;
694 init_done = 1;
695 }
696 }
This page took 0.043803 seconds and 4 git commands to generate.