Fix makefile, set default nesting to 2
[urcu.git] / formal-model / urcu / urcu.spin
CommitLineData
60a1db9d
MD
1/*
2 * mem.spin: Promela code to validate memory barriers with OOO memory.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (c) 2009 Mathieu Desnoyers
19 */
20
21/* Promela validation variables. */
22
23#define NR_READERS 1
89674313 24#define NR_WRITERS 1
60a1db9d 25
89674313 26#define NR_PROCS 2
60a1db9d
MD
27
28#define get_pid() (_pid)
29
30/*
31 * Each process have its own data in cache. Caches are randomly updated.
32 * smp_wmb and smp_rmb forces cache updates (write and read), wmb_mb forces
33 * both.
34 */
35
36#define DECLARE_CACHED_VAR(type, x, v) \
37 type mem_##x = v; \
38 type cached_##x[NR_PROCS] = v; \
39 bit cache_dirty_##x[NR_PROCS] = 0
40
41#define IS_CACHE_DIRTY(x, id) (cache_dirty_##x[id])
42
43#define READ_CACHED_VAR(x) (cached_##x[get_pid()])
44
45#define WRITE_CACHED_VAR(x, v) \
46 atomic { \
47 cached_##x[get_pid()] = v; \
48 cache_dirty_##x[get_pid()] = 1; \
49 }
50
51#define CACHE_WRITE_TO_MEM(x, id) \
52 if \
53 :: IS_CACHE_DIRTY(x, id) -> \
54 mem_##x = cached_##x[id]; \
55 cache_dirty_##x[id] = 0; \
56 :: else -> \
57 skip \
58 fi;
59
60#define CACHE_READ_FROM_MEM(x, id) \
61 if \
62 :: !IS_CACHE_DIRTY(x, id) -> \
63 cached_##x[id] = mem_##x;\
64 :: else -> \
65 skip \
66 fi;
67
68/*
69 * May update other caches if cache is dirty, or not.
70 */
71#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
72 if \
73 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
74 :: 1 -> skip \
75 fi;
76
77#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
78 if \
79 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
80 :: 1 -> skip \
81 fi;
82
83inline smp_rmb(i)
84{
85 atomic {
86 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
87 CACHE_READ_FROM_MEM(urcu_active_readers_one, get_pid());
88 CACHE_READ_FROM_MEM(generation_ptr, get_pid());
89 }
90}
91
92inline smp_wmb(i)
93{
94 atomic {
95 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
96 CACHE_WRITE_TO_MEM(urcu_active_readers_one, get_pid());
97 CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
98 }
99}
100
101inline smp_mb(i)
102{
103 atomic {
104#ifndef NO_WMB
105 smp_wmb(i);
106#endif
107#ifndef NO_RMB
108 smp_rmb(i);
109#endif
110 skip;
111 }
112}
113
114/* Keep in sync manually with smp_rmb, wmp_wmb and ooo_mem */
115DECLARE_CACHED_VAR(byte, urcu_gp_ctr, 1);
116/* Note ! currently only one reader */
117DECLARE_CACHED_VAR(byte, urcu_active_readers_one, 0);
118/* pointer generation */
119DECLARE_CACHED_VAR(byte, generation_ptr, 0);
120
121byte last_free_gen = 0;
122bit free_done = 0;
123byte read_generation = 1;
124bit data_access = 0;
125
2ba2a48d
MD
126bit write_lock = 0;
127
60a1db9d
MD
128inline ooo_mem(i)
129{
130 atomic {
131 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
132 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers_one,
133 get_pid());
134 RANDOM_CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
135 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
136 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers_one,
137 get_pid());
138 RANDOM_CACHE_READ_FROM_MEM(generation_ptr, get_pid());
139 }
140}
141
142#define get_readerid() (get_pid())
143#define get_writerid() (get_readerid() + NR_READERS)
144
145inline wait_for_reader(tmp, id, i)
146{
60a1db9d 147 do
89674313
MD
148 :: 1 ->
149 ooo_mem(i);
150 tmp = READ_CACHED_VAR(urcu_active_readers_one);
151 if
152 :: (tmp & RCU_GP_CTR_NEST_MASK)
153 && ((tmp ^ READ_CACHED_VAR(urcu_gp_ctr))
154 & RCU_GP_CTR_BIT) ->
155#ifndef GEN_ERROR_WRITER_PROGRESS
156 smp_mb(i);
157#else
60a1db9d 158 skip;
89674313
MD
159#endif
160 :: else ->
60a1db9d 161 break;
89674313 162 fi;
60a1db9d
MD
163 od;
164}
165
166inline wait_for_quiescent_state(tmp, i, j)
167{
168 i = 0;
169 do
170 :: i < NR_READERS ->
171 wait_for_reader(tmp, i, j);
172 i++
173 :: i >= NR_READERS -> break
174 od;
175}
176
177/* Model the RCU read-side critical section. */
178
179active [NR_READERS] proctype urcu_reader()
180{
06d6106d 181 byte i, nest_i;
60a1db9d
MD
182 byte tmp, tmp2;
183
184 assert(get_pid() < NR_PROCS);
185
89674313
MD
186end_reader:
187 do
188 :: 1 ->
189 /*
190 * We do not test reader's progress here, because we are mainly
191 * interested in writer's progress. The reader never blocks
192 * anyway. We have to test for reader/writer's progress
193 * separately, otherwise we could think the writer is doing
194 * progress when it's blocked by an always progressing reader.
195 */
196#ifdef READER_PROGRESS
197progress_reader:
198#endif
06d6106d
MD
199 nest_i = 0;
200 do
201 :: nest_i < READER_NEST_LEVEL ->
89674313 202 ooo_mem(i);
06d6106d
MD
203 tmp = READ_CACHED_VAR(urcu_active_readers_one);
204 ooo_mem(i);
205 if
206 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
207 ->
208 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
209 ooo_mem(i);
210 WRITE_CACHED_VAR(urcu_active_readers_one, tmp2);
211 :: else ->
d4e437ba
MD
212 WRITE_CACHED_VAR(urcu_active_readers_one,
213 tmp + 1);
06d6106d
MD
214 fi;
215 ooo_mem(i);
216 smp_mb(i);
217 nest_i++;
218 :: nest_i >= READER_NEST_LEVEL -> break;
219 od;
220
89674313 221 ooo_mem(i);
89674313
MD
222 read_generation = READ_CACHED_VAR(generation_ptr);
223 ooo_mem(i);
224 data_access = 1;
225 ooo_mem(i);
226 data_access = 0;
06d6106d
MD
227
228 nest_i = 0;
229 do
230 :: nest_i < READER_NEST_LEVEL ->
231 ooo_mem(i);
232 smp_mb(i);
233 ooo_mem(i);
234 tmp2 = READ_CACHED_VAR(urcu_active_readers_one);
235 ooo_mem(i);
236 WRITE_CACHED_VAR(urcu_active_readers_one, tmp2 - 1);
237 nest_i++;
238 :: nest_i >= READER_NEST_LEVEL -> break;
239 od;
89674313 240 ooo_mem(i);
89674313
MD
241 //smp_mc(i); /* added */
242 od;
60a1db9d
MD
243}
244
245
246/* Model the RCU update process. */
247
248active [NR_WRITERS] proctype urcu_writer()
249{
250 byte i, j;
251 byte tmp;
252 byte old_gen;
253
254 assert(get_pid() < NR_PROCS);
255
2ba2a48d 256 do
89674313
MD
257 :: (READ_CACHED_VAR(generation_ptr) < 5) ->
258#ifdef WRITER_PROGRESS
259progress_writer1:
260#endif
261 ooo_mem(i);
710b09b7 262 atomic {
89674313
MD
263 old_gen = READ_CACHED_VAR(generation_ptr);
264 WRITE_CACHED_VAR(generation_ptr, old_gen + 1);
710b09b7 265 }
89674313
MD
266 ooo_mem(i);
267
268 do
269 :: 1 ->
270 atomic {
271 if
272 :: write_lock == 0 ->
273 write_lock = 1;
274 break;
275 :: else ->
276 skip;
277 fi;
278 }
279 od;
280 smp_mb(i);
281 ooo_mem(i);
282 tmp = READ_CACHED_VAR(urcu_gp_ctr);
283 ooo_mem(i);
284 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
285 ooo_mem(i);
286 //smp_mc(i);
287 wait_for_quiescent_state(tmp, i, j);
288 //smp_mc(i);
d4e437ba 289#ifndef SINGLE_FLIP
89674313
MD
290 ooo_mem(i);
291 tmp = READ_CACHED_VAR(urcu_gp_ctr);
292 ooo_mem(i);
293 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
294 //smp_mc(i);
295 ooo_mem(i);
296 wait_for_quiescent_state(tmp, i, j);
d4e437ba 297#endif
89674313
MD
298 ooo_mem(i);
299 smp_mb(i);
300 ooo_mem(i);
301 write_lock = 0;
302 /* free-up step, e.g., kfree(). */
303 atomic {
304 last_free_gen = old_gen;
305 free_done = 1;
306 }
307 :: else -> break;
2ba2a48d 308 od;
89674313
MD
309 /*
310 * Given the reader loops infinitely, let the writer also busy-loop
311 * with progress here so, with weak fairness, we can test the
312 * writer's progress.
313 */
314end_writer:
315 do
316 :: 1 ->
317#ifdef WRITER_PROGRESS
318progress_writer2:
2ba2a48d 319#endif
89674313
MD
320 skip;
321 od;
60a1db9d 322}
This page took 0.034128 seconds and 4 git commands to generate.