Update formal model from local copy
[urcu.git] / formal-model / urcu-nosched-model / result-signal-over-writer / testmerge / asserts.spin.input
1 #define RCU_GP_CTR_BIT (1 << 7)
2 #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
3
4 #define read_free_race (read_generation == last_free_gen)
5 #define read_free (free_done && data_access)
6
7 #ifndef READER_NEST_LEVEL
8 #define READER_NEST_LEVEL 2
9 #endif
10
11 #define REMOTE_BARRIERS
12 /*
13 * mem.spin: Promela code to validate memory barriers with OOO memory.
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
28 *
29 * Copyright (c) 2009 Mathieu Desnoyers
30 */
31
32 /* Promela validation variables. */
33
34 #define NR_READERS 1
35 #define NR_WRITERS 1
36
37 #define NR_PROCS 2
38
39 #define get_pid() (_pid)
40
41 /*
42 * Each process have its own data in cache. Caches are randomly updated.
43 * smp_wmb and smp_rmb forces cache updates (write and read), wmb_mb forces
44 * both.
45 */
46
47 #define DECLARE_CACHED_VAR(type, x, v) \
48 type mem_##x = v; \
49 type cached_##x[NR_PROCS] = v; \
50 bit cache_dirty_##x[NR_PROCS] = 0
51
52 #define IS_CACHE_DIRTY(x, id) (cache_dirty_##x[id])
53
54 #define READ_CACHED_VAR(x) (cached_##x[get_pid()])
55
56 #define WRITE_CACHED_VAR(x, v) \
57 atomic { \
58 cached_##x[get_pid()] = v; \
59 cache_dirty_##x[get_pid()] = 1; \
60 }
61
62 #define CACHE_WRITE_TO_MEM(x, id) \
63 if \
64 :: IS_CACHE_DIRTY(x, id) -> \
65 mem_##x = cached_##x[id]; \
66 cache_dirty_##x[id] = 0; \
67 :: else -> \
68 skip \
69 fi;
70
71 #define CACHE_READ_FROM_MEM(x, id) \
72 if \
73 :: !IS_CACHE_DIRTY(x, id) -> \
74 cached_##x[id] = mem_##x;\
75 :: else -> \
76 skip \
77 fi;
78
79 /*
80 * May update other caches if cache is dirty, or not.
81 */
82 #define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
83 if \
84 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
85 :: 1 -> skip \
86 fi;
87
88 #define RANDOM_CACHE_READ_FROM_MEM(x, id)\
89 if \
90 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
91 :: 1 -> skip \
92 fi;
93
94 /*
95 * Remote barriers tests the scheme where a signal (or IPI) is sent to all
96 * reader threads to promote their compiler barrier to a smp_mb().
97 */
98 #ifdef REMOTE_BARRIERS
99
100 inline smp_rmb_pid(i)
101 {
102 atomic {
103 CACHE_READ_FROM_MEM(urcu_gp_ctr, i);
104 CACHE_READ_FROM_MEM(urcu_active_readers_one, i);
105 CACHE_READ_FROM_MEM(generation_ptr, i);
106 }
107 }
108
109 inline smp_wmb_pid(i)
110 {
111 atomic {
112 CACHE_WRITE_TO_MEM(urcu_gp_ctr, i);
113 CACHE_WRITE_TO_MEM(urcu_active_readers_one, i);
114 CACHE_WRITE_TO_MEM(generation_ptr, i);
115 }
116 }
117
118 inline smp_mb_pid(i)
119 {
120 atomic {
121 #ifndef NO_WMB
122 smp_wmb_pid(i);
123 #endif
124 #ifndef NO_RMB
125 smp_rmb_pid(i);
126 #endif
127 #ifdef NO_WMB
128 #ifdef NO_RMB
129 ooo_mem(i);
130 #endif
131 #endif
132 }
133 }
134
135 /*
136 * Readers do a simple barrier(), writers are doing a smp_mb() _and_ sending a
137 * signal or IPI to have all readers execute a smp_mb.
138 * We are not modeling the whole rendez-vous between readers and writers here,
139 * we just let the writer update each reader's caches remotely.
140 */
141 inline smp_mb(i)
142 {
143 if
144 :: get_pid() >= NR_READERS ->
145 smp_mb_pid(get_pid());
146 i = 0;
147 do
148 :: i < NR_READERS ->
149 smp_mb_pid(i);
150 i++;
151 :: i >= NR_READERS -> break
152 od;
153 smp_mb_pid(get_pid());
154 :: else -> skip;
155 fi;
156 }
157
158 #else
159
160 inline smp_rmb(i)
161 {
162 atomic {
163 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
164 CACHE_READ_FROM_MEM(urcu_active_readers_one, get_pid());
165 CACHE_READ_FROM_MEM(generation_ptr, get_pid());
166 }
167 }
168
169 inline smp_wmb(i)
170 {
171 atomic {
172 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
173 CACHE_WRITE_TO_MEM(urcu_active_readers_one, get_pid());
174 CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
175 }
176 }
177
178 inline smp_mb(i)
179 {
180 atomic {
181 #ifndef NO_WMB
182 smp_wmb(i);
183 #endif
184 #ifndef NO_RMB
185 smp_rmb(i);
186 #endif
187 #ifdef NO_WMB
188 #ifdef NO_RMB
189 ooo_mem(i);
190 #endif
191 #endif
192 }
193 }
194
195 #endif
196
197 /* Keep in sync manually with smp_rmb, wmp_wmb and ooo_mem */
198 DECLARE_CACHED_VAR(byte, urcu_gp_ctr, 1);
199 /* Note ! currently only one reader */
200 DECLARE_CACHED_VAR(byte, urcu_active_readers_one, 0);
201 /* pointer generation */
202 DECLARE_CACHED_VAR(byte, generation_ptr, 0);
203
204 byte last_free_gen = 0;
205 bit free_done = 0;
206 byte read_generation = 1;
207 bit data_access = 0;
208
209 bit write_lock = 0;
210
211 inline ooo_mem(i)
212 {
213 atomic {
214 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
215 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers_one,
216 get_pid());
217 RANDOM_CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
218 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
219 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers_one,
220 get_pid());
221 RANDOM_CACHE_READ_FROM_MEM(generation_ptr, get_pid());
222 }
223 }
224
225 #define get_readerid() (get_pid())
226 #define get_writerid() (get_readerid() + NR_READERS)
227
228 inline wait_for_reader(tmp, id, i)
229 {
230 do
231 :: 1 ->
232 tmp = READ_CACHED_VAR(urcu_active_readers_one);
233 ooo_mem(i);
234 if
235 :: (tmp & RCU_GP_CTR_NEST_MASK)
236 && ((tmp ^ READ_CACHED_VAR(urcu_gp_ctr))
237 & RCU_GP_CTR_BIT) ->
238 #ifndef GEN_ERROR_WRITER_PROGRESS
239 smp_mb(i);
240 #else
241 ooo_mem(i);
242 #endif
243 :: else ->
244 break;
245 fi;
246 od;
247 }
248
249 inline wait_for_quiescent_state(tmp, i, j)
250 {
251 i = 0;
252 do
253 :: i < NR_READERS ->
254 wait_for_reader(tmp, i, j);
255 if
256 :: (NR_READERS > 1) && (i < NR_READERS - 1)
257 -> ooo_mem(j);
258 :: else
259 -> skip;
260 fi;
261 i++
262 :: i >= NR_READERS -> break
263 od;
264 }
265
266 /* Model the RCU read-side critical section. */
267
268 inline urcu_one_read(i, nest_i, tmp, tmp2)
269 {
270 nest_i = 0;
271 do
272 :: nest_i < READER_NEST_LEVEL ->
273 ooo_mem(i);
274 tmp = READ_CACHED_VAR(urcu_active_readers_one);
275 ooo_mem(i);
276 if
277 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
278 ->
279 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
280 ooo_mem(i);
281 WRITE_CACHED_VAR(urcu_active_readers_one, tmp2);
282 :: else ->
283 WRITE_CACHED_VAR(urcu_active_readers_one,
284 tmp + 1);
285 fi;
286 smp_mb(i);
287 nest_i++;
288 :: nest_i >= READER_NEST_LEVEL -> break;
289 od;
290
291 ooo_mem(i);
292 read_generation = READ_CACHED_VAR(generation_ptr);
293 ooo_mem(i);
294 data_access = 1;
295 ooo_mem(i);
296 data_access = 0;
297
298 nest_i = 0;
299 do
300 :: nest_i < READER_NEST_LEVEL ->
301 smp_mb(i);
302 tmp2 = READ_CACHED_VAR(urcu_active_readers_one);
303 ooo_mem(i);
304 WRITE_CACHED_VAR(urcu_active_readers_one, tmp2 - 1);
305 nest_i++;
306 :: nest_i >= READER_NEST_LEVEL -> break;
307 od;
308 ooo_mem(i);
309 //smp_mc(i); /* added */
310 }
311
312 active [NR_READERS] proctype urcu_reader()
313 {
314 byte i, nest_i;
315 byte tmp, tmp2;
316
317 assert(get_pid() < NR_PROCS);
318
319 end_reader:
320 do
321 :: 1 ->
322 /*
323 * We do not test reader's progress here, because we are mainly
324 * interested in writer's progress. The reader never blocks
325 * anyway. We have to test for reader/writer's progress
326 * separately, otherwise we could think the writer is doing
327 * progress when it's blocked by an always progressing reader.
328 */
329 #ifdef READER_PROGRESS
330 progress_reader:
331 #endif
332 urcu_one_read(i, nest_i, tmp, tmp2);
333 od;
334 }
335
336 /* Model the RCU update process. */
337
338 active [NR_WRITERS] proctype urcu_writer()
339 {
340 byte i, j;
341 byte tmp;
342 byte old_gen;
343
344 assert(get_pid() < NR_PROCS);
345
346 do
347 :: (READ_CACHED_VAR(generation_ptr) < 5) ->
348 #ifdef WRITER_PROGRESS
349 progress_writer1:
350 #endif
351 ooo_mem(i);
352 atomic {
353 old_gen = READ_CACHED_VAR(generation_ptr);
354 WRITE_CACHED_VAR(generation_ptr, old_gen + 1);
355 }
356 ooo_mem(i);
357
358 do
359 :: 1 ->
360 atomic {
361 if
362 :: write_lock == 0 ->
363 write_lock = 1;
364 break;
365 :: else ->
366 skip;
367 fi;
368 }
369 od;
370 smp_mb(i);
371 tmp = READ_CACHED_VAR(urcu_gp_ctr);
372 ooo_mem(i);
373 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
374 ooo_mem(i);
375 //smp_mc(i);
376 wait_for_quiescent_state(tmp, i, j);
377 //smp_mc(i);
378 #ifndef SINGLE_FLIP
379 ooo_mem(i);
380 tmp = READ_CACHED_VAR(urcu_gp_ctr);
381 ooo_mem(i);
382 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
383 //smp_mc(i);
384 ooo_mem(i);
385 wait_for_quiescent_state(tmp, i, j);
386 #endif
387 smp_mb(i);
388 write_lock = 0;
389 /* free-up step, e.g., kfree(). */
390 atomic {
391 last_free_gen = old_gen;
392 free_done = 1;
393 }
394 :: else -> break;
395 od;
396 /*
397 * Given the reader loops infinitely, let the writer also busy-loop
398 * with progress here so, with weak fairness, we can test the
399 * writer's progress.
400 */
401 end_writer:
402 do
403 :: 1 ->
404 #ifdef WRITER_PROGRESS
405 progress_writer2:
406 #endif
407 skip;
408 od;
409 }
This page took 0.037806 seconds and 4 git commands to generate.