1 #define READER_PROGRESS
2 #define RCU_GP_CTR_BIT (1 << 7)
3 #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
5 #define read_free_race (read_generation == last_free_gen)
6 #define read_free (free_done && data_access)
8 #ifndef READER_NEST_LEVEL
9 #define READER_NEST_LEVEL 2
12 #define REMOTE_BARRIERS
14 * mem.spin: Promela code to validate memory barriers with OOO memory.
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
30 * Copyright (c) 2009 Mathieu Desnoyers
33 /* Promela validation variables. */
40 #define get_pid() (_pid)
43 * Each process have its own data in cache. Caches are randomly updated.
44 * smp_wmb and smp_rmb forces cache updates (write and read), wmb_mb forces
48 #define DECLARE_CACHED_VAR(type, x, v) \
50 type cached_##x[NR_PROCS] = v; \
51 bit cache_dirty_##x[NR_PROCS] = 0
53 #define IS_CACHE_DIRTY(x, id) (cache_dirty_##x[id])
55 #define READ_CACHED_VAR(x) (cached_##x[get_pid()])
57 #define WRITE_CACHED_VAR(x, v) \
59 cached_##x[get_pid()] = v; \
60 cache_dirty_##x[get_pid()] = 1; \
63 #define CACHE_WRITE_TO_MEM(x, id) \
65 :: IS_CACHE_DIRTY(x, id) -> \
66 mem_##x = cached_##x[id]; \
67 cache_dirty_##x[id] = 0; \
72 #define CACHE_READ_FROM_MEM(x, id) \
74 :: !IS_CACHE_DIRTY(x, id) -> \
75 cached_##x[id] = mem_##x;\
81 * May update other caches if cache is dirty, or not.
83 #define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
85 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
89 #define RANDOM_CACHE_READ_FROM_MEM(x, id)\
91 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
96 * Remote barriers tests the scheme where a signal (or IPI) is sent to all
97 * reader threads to promote their compiler barrier to a smp_mb().
99 #ifdef REMOTE_BARRIERS
101 inline smp_rmb_pid(i)
104 CACHE_READ_FROM_MEM(urcu_gp_ctr, i);
105 CACHE_READ_FROM_MEM(urcu_active_readers_one, i);
106 CACHE_READ_FROM_MEM(generation_ptr, i);
110 inline smp_wmb_pid(i)
113 CACHE_WRITE_TO_MEM(urcu_gp_ctr, i);
114 CACHE_WRITE_TO_MEM(urcu_active_readers_one, i);
115 CACHE_WRITE_TO_MEM(generation_ptr, i);
137 * Readers do a simple barrier(), writers are doing a smp_mb() _and_ sending a
138 * signal or IPI to have all readers execute a smp_mb.
139 * We are not modeling the whole rendez-vous between readers and writers here,
140 * we just let the writer update each reader's caches remotely.
145 :: get_pid() >= NR_READERS ->
146 smp_mb_pid(get_pid());
152 :: i >= NR_READERS -> break
154 smp_mb_pid(get_pid());
164 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
165 CACHE_READ_FROM_MEM(urcu_active_readers_one, get_pid());
166 CACHE_READ_FROM_MEM(generation_ptr, get_pid());
173 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
174 CACHE_WRITE_TO_MEM(urcu_active_readers_one, get_pid());
175 CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
198 /* Keep in sync manually with smp_rmb, wmp_wmb and ooo_mem */
199 DECLARE_CACHED_VAR(byte, urcu_gp_ctr, 1);
200 /* Note ! currently only one reader */
201 DECLARE_CACHED_VAR(byte, urcu_active_readers_one, 0);
202 /* pointer generation */
203 DECLARE_CACHED_VAR(byte, generation_ptr, 0);
205 byte last_free_gen = 0;
207 byte read_generation = 1;
215 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
216 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers_one,
218 RANDOM_CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
219 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
220 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers_one,
222 RANDOM_CACHE_READ_FROM_MEM(generation_ptr, get_pid());
226 #define get_readerid() (get_pid())
227 #define get_writerid() (get_readerid() + NR_READERS)
229 inline wait_for_reader(tmp, id, i)
233 tmp = READ_CACHED_VAR(urcu_active_readers_one);
236 :: (tmp & RCU_GP_CTR_NEST_MASK)
237 && ((tmp ^ READ_CACHED_VAR(urcu_gp_ctr))
239 #ifndef GEN_ERROR_WRITER_PROGRESS
250 inline wait_for_quiescent_state(tmp, i, j)
255 wait_for_reader(tmp, i, j);
257 :: (NR_READERS > 1) && (i < NR_READERS - 1)
263 :: i >= NR_READERS -> break
267 /* Model the RCU read-side critical section. */
269 inline urcu_one_read(i, nest_i, tmp, tmp2)
273 :: nest_i < READER_NEST_LEVEL ->
275 tmp = READ_CACHED_VAR(urcu_active_readers_one);
278 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
280 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
282 WRITE_CACHED_VAR(urcu_active_readers_one, tmp2);
284 WRITE_CACHED_VAR(urcu_active_readers_one,
289 :: nest_i >= READER_NEST_LEVEL -> break;
293 read_generation = READ_CACHED_VAR(generation_ptr);
301 :: nest_i < READER_NEST_LEVEL ->
303 tmp2 = READ_CACHED_VAR(urcu_active_readers_one);
305 WRITE_CACHED_VAR(urcu_active_readers_one, tmp2 - 1);
307 :: nest_i >= READER_NEST_LEVEL -> break;
310 //smp_mc(i); /* added */
313 active [NR_READERS] proctype urcu_reader()
318 assert(get_pid() < NR_PROCS);
324 * We do not test reader's progress here, because we are mainly
325 * interested in writer's progress. The reader never blocks
326 * anyway. We have to test for reader/writer's progress
327 * separately, otherwise we could think the writer is doing
328 * progress when it's blocked by an always progressing reader.
330 #ifdef READER_PROGRESS
333 urcu_one_read(i, nest_i, tmp, tmp2);
337 /* Model the RCU update process. */
339 active [NR_WRITERS] proctype urcu_writer()
345 assert(get_pid() < NR_PROCS);
348 :: (READ_CACHED_VAR(generation_ptr) < 5) ->
349 #ifdef WRITER_PROGRESS
354 old_gen = READ_CACHED_VAR(generation_ptr);
355 WRITE_CACHED_VAR(generation_ptr, old_gen + 1);
363 :: write_lock == 0 ->
372 tmp = READ_CACHED_VAR(urcu_gp_ctr);
374 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
377 wait_for_quiescent_state(tmp, i, j);
381 tmp = READ_CACHED_VAR(urcu_gp_ctr);
383 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
386 wait_for_quiescent_state(tmp, i, j);
390 /* free-up step, e.g., kfree(). */
392 last_free_gen = old_gen;
398 * Given the reader loops infinitely, let the writer also busy-loop
399 * with progress here so, with weak fairness, we can test the
405 #ifdef WRITER_PROGRESS