2 * mem.spin: Promela code to validate memory barriers with OOO memory.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (c) 2009 Mathieu Desnoyers
21 /* Promela validation variables. */
23 /* specific defines "included" here */
24 /* DEFINES file "included" here */
26 /* All signal readers have same PID and uses same reader variable */
27 #ifdef TEST_SIGNAL_ON_WRITE
29 #define NR_READERS 1 /* the writer is also a signal reader */
36 #elif defined(TEST_SIGNAL_ON_READ)
38 #define get_pid() ((_pid < 2) -> 0 : 1)
47 #define get_pid() (_pid)
56 #define get_readerid() (get_pid())
59 * Each process have its own data in cache. Caches are randomly updated.
60 * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
64 typedef per_proc_byte {
68 /* Bitfield has a maximum of 8 procs */
69 typedef per_proc_bit {
73 #define DECLARE_CACHED_VAR(type, x) \
75 per_proc_##type cached_##x; \
76 per_proc_bit cache_dirty_##x;
78 #define INIT_CACHED_VAR(x, v, j) \
80 cache_dirty_##x.bitfield = 0; \
84 cached_##x.val[j] = v; \
86 :: j >= NR_PROCS -> break \
89 #define IS_CACHE_DIRTY(x, id) (cache_dirty_##x.bitfield & (1 << id))
91 #define READ_CACHED_VAR(x) (cached_##x.val[get_pid()])
93 #define WRITE_CACHED_VAR(x, v) \
95 cached_##x.val[get_pid()] = v; \
96 cache_dirty_##x.bitfield = \
97 cache_dirty_##x.bitfield | (1 << get_pid()); \
100 #define CACHE_WRITE_TO_MEM(x, id) \
102 :: IS_CACHE_DIRTY(x, id) -> \
103 mem_##x = cached_##x.val[id]; \
104 cache_dirty_##x.bitfield = \
105 cache_dirty_##x.bitfield & (~(1 << id)); \
110 #define CACHE_READ_FROM_MEM(x, id) \
112 :: !IS_CACHE_DIRTY(x, id) -> \
113 cached_##x.val[id] = mem_##x;\
119 * May update other caches if cache is dirty, or not.
121 #define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
123 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
127 #define RANDOM_CACHE_READ_FROM_MEM(x, id)\
129 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
134 * Remote barriers tests the scheme where a signal (or IPI) is sent to all
135 * reader threads to promote their compiler barrier to a smp_mb().
137 #ifdef REMOTE_BARRIERS
139 inline smp_rmb_pid(i, j)
142 CACHE_READ_FROM_MEM(urcu_gp_ctr, i);
146 CACHE_READ_FROM_MEM(urcu_active_readers[j], i);
148 :: j >= NR_READERS -> break
150 CACHE_READ_FROM_MEM(generation_ptr, i);
154 inline smp_wmb_pid(i, j)
157 CACHE_WRITE_TO_MEM(urcu_gp_ctr, i);
161 CACHE_WRITE_TO_MEM(urcu_active_readers[j], i);
163 :: j >= NR_READERS -> break
165 CACHE_WRITE_TO_MEM(generation_ptr, i);
169 inline smp_mb_pid(i, j)
187 * Readers do a simple barrier(), writers are doing a smp_mb() _and_ sending a
188 * signal or IPI to have all readers execute a smp_mb.
189 * We are not modeling the whole rendez-vous between readers and writers here,
190 * we just let the writer update each reader's caches remotely.
192 inline smp_mb_writer(i, j)
194 smp_mb_pid(get_pid(), j);
200 :: i >= NR_READERS -> break
202 smp_mb_pid(get_pid(), j);
205 inline smp_mb_reader(i, j)
215 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
219 CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
221 :: i >= NR_READERS -> break
223 CACHE_READ_FROM_MEM(generation_ptr, get_pid());
230 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
234 CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
236 :: i >= NR_READERS -> break
238 CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
259 inline smp_mb_writer(i, j)
264 inline smp_mb_reader(i, j)
271 /* Keep in sync manually with smp_rmb, wmp_wmb, ooo_mem and init() */
272 DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
273 /* Note ! currently only two readers */
274 DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
275 /* pointer generation */
276 DECLARE_CACHED_VAR(byte, generation_ptr);
278 byte last_free_gen = 0;
280 byte read_generation[NR_READERS];
281 bit data_access[NR_READERS];
287 bit sighand_exec = 0;
289 inline wait_init_done()
292 :: init_done == 0 -> skip;
299 inline wait_for_sighand_exec()
303 :: sighand_exec == 0 -> skip;
308 #ifdef TOO_BIG_STATE_SPACE
309 inline wait_for_sighand_exec()
313 :: sighand_exec == 0 -> skip;
317 :: 1 -> sighand_exec = 0;
326 inline wait_for_sighand_exec()
333 #ifdef TEST_SIGNAL_ON_WRITE
334 /* Block on signal handler execution */
335 inline dispatch_sighand_write_exec()
339 :: sighand_exec == 1 ->
348 inline dispatch_sighand_write_exec()
355 #ifdef TEST_SIGNAL_ON_READ
356 /* Block on signal handler execution */
357 inline dispatch_sighand_read_exec()
361 :: sighand_exec == 1 ->
370 inline dispatch_sighand_read_exec()
381 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
385 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
388 :: i >= NR_READERS -> break
390 RANDOM_CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
391 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
395 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
398 :: i >= NR_READERS -> break
400 RANDOM_CACHE_READ_FROM_MEM(generation_ptr, get_pid());
404 inline wait_for_reader(tmp, tmp2, i, j)
408 tmp2 = READ_CACHED_VAR(urcu_active_readers[tmp]);
410 dispatch_sighand_write_exec();
412 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
413 && ((tmp2 ^ READ_CACHED_VAR(urcu_gp_ctr))
415 #ifndef GEN_ERROR_WRITER_PROGRESS
420 dispatch_sighand_write_exec();
427 inline wait_for_quiescent_state(tmp, tmp2, i, j)
431 :: tmp < NR_READERS ->
432 wait_for_reader(tmp, tmp2, i, j);
434 :: (NR_READERS > 1) && (tmp < NR_READERS - 1)
436 dispatch_sighand_write_exec();
441 :: tmp >= NR_READERS -> break
445 /* Model the RCU read-side critical section. */
447 #ifndef TEST_SIGNAL_ON_WRITE
449 inline urcu_one_read(i, j, nest_i, tmp, tmp2)
453 :: nest_i < READER_NEST_LEVEL ->
455 dispatch_sighand_read_exec();
456 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
458 dispatch_sighand_read_exec();
460 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
462 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
464 dispatch_sighand_read_exec();
465 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
468 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
472 dispatch_sighand_read_exec();
474 :: nest_i >= READER_NEST_LEVEL -> break;
477 read_generation[get_readerid()] = READ_CACHED_VAR(generation_ptr);
478 data_access[get_readerid()] = 1;
479 data_access[get_readerid()] = 0;
483 :: nest_i < READER_NEST_LEVEL ->
485 dispatch_sighand_read_exec();
486 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
488 dispatch_sighand_read_exec();
489 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);
491 :: nest_i >= READER_NEST_LEVEL -> break;
494 //dispatch_sighand_read_exec();
495 //smp_mc(i); /* added */
498 active proctype urcu_reader()
505 assert(get_pid() < NR_PROCS);
511 * We do not test reader's progress here, because we are mainly
512 * interested in writer's progress. The reader never blocks
513 * anyway. We have to test for reader/writer's progress
514 * separately, otherwise we could think the writer is doing
515 * progress when it's blocked by an always progressing reader.
517 #ifdef READER_PROGRESS
520 urcu_one_read(i, j, nest_i, tmp, tmp2);
524 #endif //!TEST_SIGNAL_ON_WRITE
527 /* signal handler reader */
529 inline urcu_one_read_sig(i, j, nest_i, tmp, tmp2)
533 :: nest_i < READER_NEST_LEVEL ->
535 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
538 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
540 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
542 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
545 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
550 :: nest_i >= READER_NEST_LEVEL -> break;
553 read_generation[get_readerid()] = READ_CACHED_VAR(generation_ptr);
554 data_access[get_readerid()] = 1;
555 data_access[get_readerid()] = 0;
559 :: nest_i < READER_NEST_LEVEL ->
561 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
563 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);
565 :: nest_i >= READER_NEST_LEVEL -> break;
568 //smp_mc(i); /* added */
571 active proctype urcu_reader_sig()
578 assert(get_pid() < NR_PROCS);
583 wait_for_sighand_exec();
585 * We do not test reader's progress here, because we are mainly
586 * interested in writer's progress. The reader never blocks
587 * anyway. We have to test for reader/writer's progress
588 * separately, otherwise we could think the writer is doing
589 * progress when it's blocked by an always progressing reader.
591 #ifdef READER_PROGRESS
594 urcu_one_read_sig(i, j, nest_i, tmp, tmp2);
600 /* Model the RCU update process. */
602 active proctype urcu_writer()
610 assert(get_pid() < NR_PROCS);
613 :: (READ_CACHED_VAR(generation_ptr) < 5) ->
614 #ifdef WRITER_PROGRESS
618 dispatch_sighand_write_exec();
620 old_gen = READ_CACHED_VAR(generation_ptr);
621 WRITE_CACHED_VAR(generation_ptr, old_gen + 1);
624 dispatch_sighand_write_exec();
630 :: write_lock == 0 ->
639 dispatch_sighand_write_exec();
640 tmp = READ_CACHED_VAR(urcu_gp_ctr);
642 dispatch_sighand_write_exec();
643 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
645 dispatch_sighand_write_exec();
647 wait_for_quiescent_state(tmp, tmp2, i, j);
651 dispatch_sighand_write_exec();
652 tmp = READ_CACHED_VAR(urcu_gp_ctr);
654 dispatch_sighand_write_exec();
655 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
658 dispatch_sighand_write_exec();
659 wait_for_quiescent_state(tmp, tmp2, i, j);
662 dispatch_sighand_write_exec();
664 /* free-up step, e.g., kfree(). */
666 last_free_gen = old_gen;
672 * Given the reader loops infinitely, let the writer also busy-loop
673 * with progress here so, with weak fairness, we can test the
679 #ifdef WRITER_PROGRESS
682 dispatch_sighand_write_exec();
686 /* Leave after the readers and writers so the pid count is ok. */
691 INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
692 INIT_CACHED_VAR(generation_ptr, 0, j);
697 INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
698 read_generation[i] = 1;
701 :: i >= NR_READERS -> break