2 * mem.spin: Promela code to validate memory barriers with OOO memory.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (c) 2009 Mathieu Desnoyers
21 /* Promela validation variables. */
23 /* specific defines "included" here */
24 /* DEFINES file "included" here */
26 #define get_pid() (_pid)
29 * Each process have its own data in cache. Caches are randomly updated.
30 * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
34 typedef per_proc_byte {
38 /* Bitfield has a maximum of 8 procs */
39 typedef per_proc_bit {
43 #define DECLARE_CACHED_VAR(type, x) \
45 per_proc_##type cached_##x; \
46 per_proc_bit cache_dirty_##x;
48 #define INIT_CACHED_VAR(x, v, j) \
50 cache_dirty_##x.bitfield = 0; \
54 cached_##x.val[j] = v; \
56 :: j >= NR_PROCS -> break \
59 #define IS_CACHE_DIRTY(x, id) (cache_dirty_##x.bitfield & (1 << id))
61 #define READ_CACHED_VAR(x) (cached_##x.val[get_pid()])
63 #define WRITE_CACHED_VAR(x, v) \
65 cached_##x.val[get_pid()] = v; \
66 cache_dirty_##x.bitfield = \
67 cache_dirty_##x.bitfield | (1 << get_pid()); \
70 #define CACHE_WRITE_TO_MEM(x, id) \
72 :: IS_CACHE_DIRTY(x, id) -> \
73 mem_##x = cached_##x.val[id]; \
74 cache_dirty_##x.bitfield = \
75 cache_dirty_##x.bitfield & (~(1 << id)); \
80 #define CACHE_READ_FROM_MEM(x, id) \
82 :: !IS_CACHE_DIRTY(x, id) -> \
83 cached_##x.val[id] = mem_##x;\
89 * May update other caches if cache is dirty, or not.
91 #define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
93 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
97 #define RANDOM_CACHE_READ_FROM_MEM(x, id)\
99 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
104 * Remote barriers tests the scheme where a signal (or IPI) is sent to all
105 * reader threads to promote their compiler barrier to a smp_mb().
107 #ifdef REMOTE_BARRIERS
109 inline smp_rmb_pid(i, j)
112 CACHE_READ_FROM_MEM(urcu_gp_ctr, i);
116 CACHE_READ_FROM_MEM(urcu_active_readers[j], i);
118 :: j >= NR_READERS -> break
120 CACHE_READ_FROM_MEM(generation_ptr, i);
124 inline smp_wmb_pid(i, j)
127 CACHE_WRITE_TO_MEM(urcu_gp_ctr, i);
131 CACHE_WRITE_TO_MEM(urcu_active_readers[j], i);
133 :: j >= NR_READERS -> break
135 CACHE_WRITE_TO_MEM(generation_ptr, i);
139 inline smp_mb_pid(i, j)
157 * Readers do a simple barrier(), writers are doing a smp_mb() _and_ sending a
158 * signal or IPI to have all readers execute a smp_mb.
159 * We are not modeling the whole rendez-vous between readers and writers here,
160 * we just let the writer update each reader's caches remotely.
165 :: get_pid() >= NR_READERS ->
166 smp_mb_pid(get_pid(), j);
172 :: i >= NR_READERS -> break
174 smp_mb_pid(get_pid(), j);
184 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
188 CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
190 :: i >= NR_READERS -> break
192 CACHE_READ_FROM_MEM(generation_ptr, get_pid());
199 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
203 CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
205 :: i >= NR_READERS -> break
207 CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
230 /* Keep in sync manually with smp_rmb, wmp_wmb, ooo_mem and init() */
231 DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
232 /* Note ! currently only two readers */
233 DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
234 /* pointer generation */
235 DECLARE_CACHED_VAR(byte, generation_ptr);
237 byte last_free_gen = 0;
239 byte read_generation[NR_READERS];
240 bit data_access[NR_READERS];
246 inline wait_init_done()
249 :: init_done == 0 -> skip;
257 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
261 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
264 :: i >= NR_READERS -> break
266 RANDOM_CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
267 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
271 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
274 :: i >= NR_READERS -> break
276 RANDOM_CACHE_READ_FROM_MEM(generation_ptr, get_pid());
280 #define get_readerid() (get_pid())
281 #define get_writerid() (get_readerid() + NR_READERS)
283 inline wait_for_reader(tmp, tmp2, i, j)
287 tmp2 = READ_CACHED_VAR(urcu_active_readers[tmp]);
290 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
291 && ((tmp2 ^ READ_CACHED_VAR(urcu_gp_ctr))
293 #ifndef GEN_ERROR_WRITER_PROGRESS
304 inline wait_for_quiescent_state(tmp, tmp2, i, j)
308 :: tmp < NR_READERS ->
309 wait_for_reader(tmp, tmp2, i, j);
311 :: (NR_READERS > 1) && (tmp < NR_READERS - 1)
317 :: tmp >= NR_READERS -> break
321 /* Model the RCU read-side critical section. */
323 inline urcu_one_read(i, j, nest_i, tmp, tmp2)
327 :: nest_i < READER_NEST_LEVEL ->
329 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
332 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
334 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
336 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
339 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
344 :: nest_i >= READER_NEST_LEVEL -> break;
348 read_generation[get_readerid()] = READ_CACHED_VAR(generation_ptr);
350 data_access[get_readerid()] = 1;
352 data_access[get_readerid()] = 0;
356 :: nest_i < READER_NEST_LEVEL ->
358 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
360 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);
362 :: nest_i >= READER_NEST_LEVEL -> break;
365 //smp_mc(i); /* added */
368 active [NR_READERS] proctype urcu_reader()
375 assert(get_pid() < NR_PROCS);
381 * We do not test reader's progress here, because we are mainly
382 * interested in writer's progress. The reader never blocks
383 * anyway. We have to test for reader/writer's progress
384 * separately, otherwise we could think the writer is doing
385 * progress when it's blocked by an always progressing reader.
387 #ifdef READER_PROGRESS
388 /* Only test progress of one random reader. They are all the
392 :: get_readerid() == 0 ->
400 urcu_one_read(i, j, nest_i, tmp, tmp2);
404 /* Model the RCU update process. */
406 active proctype urcu_writer()
414 assert(get_pid() < NR_PROCS);
417 :: (READ_CACHED_VAR(generation_ptr) < 5) ->
418 #ifdef WRITER_PROGRESS
423 old_gen = READ_CACHED_VAR(generation_ptr);
424 WRITE_CACHED_VAR(generation_ptr, old_gen + 1);
432 :: write_lock == 0 ->
441 tmp = READ_CACHED_VAR(urcu_gp_ctr);
443 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
446 wait_for_quiescent_state(tmp, tmp2, i, j);
450 tmp = READ_CACHED_VAR(urcu_gp_ctr);
452 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
455 wait_for_quiescent_state(tmp, tmp2, i, j);
459 /* free-up step, e.g., kfree(). */
461 last_free_gen = old_gen;
467 * Given the reader loops infinitely, let the writer also busy-loop
468 * with progress here so, with weak fairness, we can test the
474 #ifdef WRITER_PROGRESS
481 /* Leave after the readers and writers so the pid count is ok. */
486 INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
487 INIT_CACHED_VAR(generation_ptr, 0, j);
492 INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
493 read_generation[i] = 1;
496 :: i >= NR_READERS -> break