1 #define READER_PROGRESS
3 #define read_free_race (read_generation[0] == last_free_gen)
4 #define read_free (free_done && data_access[0])
7 #define TEST_SIGNAL_ON_READ
8 //#define TEST_SIGNAL_ON_WRITE
10 #define RCU_GP_CTR_BIT (1 << 7)
11 #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
13 #ifndef READER_NEST_LEVEL
14 #define READER_NEST_LEVEL 1
15 //#define READER_NEST_LEVEL 2
18 #define REMOTE_BARRIERS
20 * mem.spin: Promela code to validate memory barriers with OOO memory.
22 * This program is free software; you can redistribute it and/or modify
23 * it under the terms of the GNU General Public License as published by
24 * the Free Software Foundation; either version 2 of the License, or
25 * (at your option) any later version.
27 * This program is distributed in the hope that it will be useful,
28 * but WITHOUT ANY WARRANTY; without even the implied warranty of
29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30 * GNU General Public License for more details.
32 * You should have received a copy of the GNU General Public License
33 * along with this program; if not, write to the Free Software
34 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
36 * Copyright (c) 2009 Mathieu Desnoyers
39 /* Promela validation variables. */
41 /* specific defines "included" here */
42 /* DEFINES file "included" here */
44 /* All signal readers have same PID and uses same reader variable */
45 #ifdef TEST_SIGNAL_ON_WRITE
47 #define NR_READERS 1 /* the writer is also a signal reader */
54 #elif defined(TEST_SIGNAL_ON_READ)
56 #define get_pid() ((_pid < 2) -> 0 : 1)
65 #define get_pid() (_pid)
74 #define get_readerid() (get_pid())
77 * Each process have its own data in cache. Caches are randomly updated.
78 * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
82 typedef per_proc_byte {
86 /* Bitfield has a maximum of 8 procs */
87 typedef per_proc_bit {
91 #define DECLARE_CACHED_VAR(type, x) \
93 per_proc_##type cached_##x; \
94 per_proc_bit cache_dirty_##x;
96 #define INIT_CACHED_VAR(x, v, j) \
98 cache_dirty_##x.bitfield = 0; \
102 cached_##x.val[j] = v; \
104 :: j >= NR_PROCS -> break \
107 #define IS_CACHE_DIRTY(x, id) (cache_dirty_##x.bitfield & (1 << id))
109 #define READ_CACHED_VAR(x) (cached_##x.val[get_pid()])
111 #define WRITE_CACHED_VAR(x, v) \
113 cached_##x.val[get_pid()] = v; \
114 cache_dirty_##x.bitfield = \
115 cache_dirty_##x.bitfield | (1 << get_pid()); \
118 #define CACHE_WRITE_TO_MEM(x, id) \
120 :: IS_CACHE_DIRTY(x, id) -> \
121 mem_##x = cached_##x.val[id]; \
122 cache_dirty_##x.bitfield = \
123 cache_dirty_##x.bitfield & (~(1 << id)); \
128 #define CACHE_READ_FROM_MEM(x, id) \
130 :: !IS_CACHE_DIRTY(x, id) -> \
131 cached_##x.val[id] = mem_##x;\
137 * May update other caches if cache is dirty, or not.
139 #define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
141 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
145 #define RANDOM_CACHE_READ_FROM_MEM(x, id)\
147 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
152 * Remote barriers tests the scheme where a signal (or IPI) is sent to all
153 * reader threads to promote their compiler barrier to a smp_mb().
155 #ifdef REMOTE_BARRIERS
157 inline smp_rmb_pid(i, j)
160 CACHE_READ_FROM_MEM(urcu_gp_ctr, i);
164 CACHE_READ_FROM_MEM(urcu_active_readers[j], i);
166 :: j >= NR_READERS -> break
168 CACHE_READ_FROM_MEM(generation_ptr, i);
172 inline smp_wmb_pid(i, j)
175 CACHE_WRITE_TO_MEM(urcu_gp_ctr, i);
179 CACHE_WRITE_TO_MEM(urcu_active_readers[j], i);
181 :: j >= NR_READERS -> break
183 CACHE_WRITE_TO_MEM(generation_ptr, i);
187 inline smp_mb_pid(i, j)
205 * Readers do a simple barrier(), writers are doing a smp_mb() _and_ sending a
206 * signal or IPI to have all readers execute a smp_mb.
207 * We are not modeling the whole rendez-vous between readers and writers here,
208 * we just let the writer update each reader's caches remotely.
210 inline smp_mb_writer(i, j)
212 smp_mb_pid(get_pid(), j);
218 :: i >= NR_READERS -> break
220 smp_mb_pid(get_pid(), j);
223 inline smp_mb_reader(i, j)
233 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
237 CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
239 :: i >= NR_READERS -> break
241 CACHE_READ_FROM_MEM(generation_ptr, get_pid());
248 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
252 CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
254 :: i >= NR_READERS -> break
256 CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
277 inline smp_mb_writer(i, j)
282 inline smp_mb_reader(i, j)
289 /* Keep in sync manually with smp_rmb, wmp_wmb, ooo_mem and init() */
290 DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
291 /* Note ! currently only two readers */
292 DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
293 /* pointer generation */
294 DECLARE_CACHED_VAR(byte, generation_ptr);
296 byte last_free_gen = 0;
298 byte read_generation[NR_READERS];
299 bit data_access[NR_READERS];
305 bit sighand_exec = 0;
307 inline wait_init_done()
310 :: init_done == 0 -> skip;
317 inline wait_for_sighand_exec()
321 :: sighand_exec == 0 -> skip;
326 #ifdef TOO_BIG_STATE_SPACE
327 inline wait_for_sighand_exec()
331 :: sighand_exec == 0 -> skip;
335 :: 1 -> sighand_exec = 0;
344 inline wait_for_sighand_exec()
351 #ifdef TEST_SIGNAL_ON_WRITE
352 /* Block on signal handler execution */
353 inline dispatch_sighand_write_exec()
357 :: sighand_exec == 1 ->
366 inline dispatch_sighand_write_exec()
373 #ifdef TEST_SIGNAL_ON_READ
374 /* Block on signal handler execution */
375 inline dispatch_sighand_read_exec()
379 :: sighand_exec == 1 ->
388 inline dispatch_sighand_read_exec()
399 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
403 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
406 :: i >= NR_READERS -> break
408 RANDOM_CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
409 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
413 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
416 :: i >= NR_READERS -> break
418 RANDOM_CACHE_READ_FROM_MEM(generation_ptr, get_pid());
422 inline wait_for_reader(tmp, tmp2, i, j)
426 tmp2 = READ_CACHED_VAR(urcu_active_readers[tmp]);
428 dispatch_sighand_write_exec();
430 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
431 && ((tmp2 ^ READ_CACHED_VAR(urcu_gp_ctr))
433 #ifndef GEN_ERROR_WRITER_PROGRESS
438 dispatch_sighand_write_exec();
445 inline wait_for_quiescent_state(tmp, tmp2, i, j)
449 :: tmp < NR_READERS ->
450 wait_for_reader(tmp, tmp2, i, j);
452 :: (NR_READERS > 1) && (tmp < NR_READERS - 1)
454 dispatch_sighand_write_exec();
459 :: tmp >= NR_READERS -> break
463 /* Model the RCU read-side critical section. */
465 #ifndef TEST_SIGNAL_ON_WRITE
467 inline urcu_one_read(i, j, nest_i, tmp, tmp2)
471 :: nest_i < READER_NEST_LEVEL ->
473 dispatch_sighand_read_exec();
474 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
476 dispatch_sighand_read_exec();
478 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
480 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
482 dispatch_sighand_read_exec();
483 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
486 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
490 dispatch_sighand_read_exec();
492 :: nest_i >= READER_NEST_LEVEL -> break;
495 read_generation[get_readerid()] = READ_CACHED_VAR(generation_ptr);
496 data_access[get_readerid()] = 1;
497 data_access[get_readerid()] = 0;
501 :: nest_i < READER_NEST_LEVEL ->
503 dispatch_sighand_read_exec();
504 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
506 dispatch_sighand_read_exec();
507 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);
509 :: nest_i >= READER_NEST_LEVEL -> break;
512 //dispatch_sighand_read_exec();
513 //smp_mc(i); /* added */
516 active proctype urcu_reader()
523 assert(get_pid() < NR_PROCS);
529 * We do not test reader's progress here, because we are mainly
530 * interested in writer's progress. The reader never blocks
531 * anyway. We have to test for reader/writer's progress
532 * separately, otherwise we could think the writer is doing
533 * progress when it's blocked by an always progressing reader.
535 #ifdef READER_PROGRESS
538 urcu_one_read(i, j, nest_i, tmp, tmp2);
542 #endif //!TEST_SIGNAL_ON_WRITE
545 /* signal handler reader */
547 inline urcu_one_read_sig(i, j, nest_i, tmp, tmp2)
551 :: nest_i < READER_NEST_LEVEL ->
553 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
556 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
558 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
560 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
563 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
568 :: nest_i >= READER_NEST_LEVEL -> break;
571 read_generation[get_readerid()] = READ_CACHED_VAR(generation_ptr);
572 data_access[get_readerid()] = 1;
573 data_access[get_readerid()] = 0;
577 :: nest_i < READER_NEST_LEVEL ->
579 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
581 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);
583 :: nest_i >= READER_NEST_LEVEL -> break;
586 //smp_mc(i); /* added */
589 active proctype urcu_reader_sig()
596 assert(get_pid() < NR_PROCS);
601 wait_for_sighand_exec();
603 * We do not test reader's progress here, because we are mainly
604 * interested in writer's progress. The reader never blocks
605 * anyway. We have to test for reader/writer's progress
606 * separately, otherwise we could think the writer is doing
607 * progress when it's blocked by an always progressing reader.
609 #ifdef READER_PROGRESS
612 urcu_one_read_sig(i, j, nest_i, tmp, tmp2);
618 /* Model the RCU update process. */
620 active proctype urcu_writer()
628 assert(get_pid() < NR_PROCS);
631 :: (READ_CACHED_VAR(generation_ptr) < 5) ->
632 #ifdef WRITER_PROGRESS
636 dispatch_sighand_write_exec();
638 old_gen = READ_CACHED_VAR(generation_ptr);
639 WRITE_CACHED_VAR(generation_ptr, old_gen + 1);
642 dispatch_sighand_write_exec();
648 :: write_lock == 0 ->
657 dispatch_sighand_write_exec();
658 tmp = READ_CACHED_VAR(urcu_gp_ctr);
660 dispatch_sighand_write_exec();
661 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
663 dispatch_sighand_write_exec();
665 wait_for_quiescent_state(tmp, tmp2, i, j);
669 dispatch_sighand_write_exec();
670 tmp = READ_CACHED_VAR(urcu_gp_ctr);
672 dispatch_sighand_write_exec();
673 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
676 dispatch_sighand_write_exec();
677 wait_for_quiescent_state(tmp, tmp2, i, j);
680 dispatch_sighand_write_exec();
682 /* free-up step, e.g., kfree(). */
684 last_free_gen = old_gen;
690 * Given the reader loops infinitely, let the writer also busy-loop
691 * with progress here so, with weak fairness, we can test the
697 #ifdef WRITER_PROGRESS
700 dispatch_sighand_write_exec();
704 /* Leave after the readers and writers so the pid count is ok. */
709 INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
710 INIT_CACHED_VAR(generation_ptr, 0, j);
715 INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
716 read_generation[i] = 1;
719 :: i >= NR_READERS -> break