Update formal model from local copy
[urcu.git] / formal-model / urcu-nosched-model / result-signal-over-writer / testmerge / urcu.spin
1 /*
2 * mem.spin: Promela code to validate memory barriers with OOO memory.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (c) 2009 Mathieu Desnoyers
19 */
20
21 /* Promela validation variables. */
22
23 /* specific defines "included" here */
24 /* DEFINES file "included" here */
25
26 #define get_pid() (_pid)
27
28 /*
29 * Each process have its own data in cache. Caches are randomly updated.
30 * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
31 * both.
32 */
33
34 typedef per_proc_byte {
35 byte val[NR_PROCS];
36 };
37
38 /* Bitfield has a maximum of 8 procs */
39 typedef per_proc_bit {
40 byte bitfield;
41 };
42
43 #define DECLARE_CACHED_VAR(type, x) \
44 type mem_##x; \
45 per_proc_##type cached_##x; \
46 per_proc_bit cache_dirty_##x;
47
48 #define INIT_CACHED_VAR(x, v, j) \
49 mem_##x = v; \
50 cache_dirty_##x.bitfield = 0; \
51 j = 0; \
52 do \
53 :: j < NR_PROCS -> \
54 cached_##x.val[j] = v; \
55 j++ \
56 :: j >= NR_PROCS -> break \
57 od;
58
59 #define IS_CACHE_DIRTY(x, id) (cache_dirty_##x.bitfield & (1 << id))
60
61 #define READ_CACHED_VAR(x) (cached_##x.val[get_pid()])
62
63 #define WRITE_CACHED_VAR(x, v) \
64 atomic { \
65 cached_##x.val[get_pid()] = v; \
66 cache_dirty_##x.bitfield = \
67 cache_dirty_##x.bitfield | (1 << get_pid()); \
68 }
69
70 #define CACHE_WRITE_TO_MEM(x, id) \
71 if \
72 :: IS_CACHE_DIRTY(x, id) -> \
73 mem_##x = cached_##x.val[id]; \
74 cache_dirty_##x.bitfield = \
75 cache_dirty_##x.bitfield & (~(1 << id)); \
76 :: else -> \
77 skip \
78 fi;
79
80 #define CACHE_READ_FROM_MEM(x, id) \
81 if \
82 :: !IS_CACHE_DIRTY(x, id) -> \
83 cached_##x.val[id] = mem_##x;\
84 :: else -> \
85 skip \
86 fi;
87
88 /*
89 * May update other caches if cache is dirty, or not.
90 */
91 #define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
92 if \
93 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
94 :: 1 -> skip \
95 fi;
96
97 #define RANDOM_CACHE_READ_FROM_MEM(x, id)\
98 if \
99 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
100 :: 1 -> skip \
101 fi;
102
103 /*
104 * Remote barriers tests the scheme where a signal (or IPI) is sent to all
105 * reader threads to promote their compiler barrier to a smp_mb().
106 */
107 #ifdef REMOTE_BARRIERS
108
109 inline smp_rmb_pid(i, j)
110 {
111 atomic {
112 CACHE_READ_FROM_MEM(urcu_gp_ctr, i);
113 j = 0;
114 do
115 :: j < NR_READERS ->
116 CACHE_READ_FROM_MEM(urcu_active_readers[j], i);
117 j++
118 :: j >= NR_READERS -> break
119 od;
120 CACHE_READ_FROM_MEM(generation_ptr, i);
121 }
122 }
123
124 inline smp_wmb_pid(i, j)
125 {
126 atomic {
127 CACHE_WRITE_TO_MEM(urcu_gp_ctr, i);
128 j = 0;
129 do
130 :: j < NR_READERS ->
131 CACHE_WRITE_TO_MEM(urcu_active_readers[j], i);
132 j++
133 :: j >= NR_READERS -> break
134 od;
135 CACHE_WRITE_TO_MEM(generation_ptr, i);
136 }
137 }
138
139 inline smp_mb_pid(i, j)
140 {
141 atomic {
142 #ifndef NO_WMB
143 smp_wmb_pid(i, j);
144 #endif
145 #ifndef NO_RMB
146 smp_rmb_pid(i, j);
147 #endif
148 #ifdef NO_WMB
149 #ifdef NO_RMB
150 ooo_mem(j);
151 #endif
152 #endif
153 }
154 }
155
156 /*
157 * Readers do a simple barrier(), writers are doing a smp_mb() _and_ sending a
158 * signal or IPI to have all readers execute a smp_mb.
159 * We are not modeling the whole rendez-vous between readers and writers here,
160 * we just let the writer update each reader's caches remotely.
161 */
162 inline smp_mb(i, j)
163 {
164 if
165 :: get_pid() >= NR_READERS ->
166 smp_mb_pid(get_pid(), j);
167 i = 0;
168 do
169 :: i < NR_READERS ->
170 smp_mb_pid(i, j);
171 i++;
172 :: i >= NR_READERS -> break
173 od;
174 smp_mb_pid(get_pid(), j);
175 :: else -> skip;
176 fi;
177 }
178
179 #else
180
181 inline smp_rmb(i, j)
182 {
183 atomic {
184 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
185 i = 0;
186 do
187 :: i < NR_READERS ->
188 CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
189 i++
190 :: i >= NR_READERS -> break
191 od;
192 CACHE_READ_FROM_MEM(generation_ptr, get_pid());
193 }
194 }
195
196 inline smp_wmb(i, j)
197 {
198 atomic {
199 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
200 i = 0;
201 do
202 :: i < NR_READERS ->
203 CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
204 i++
205 :: i >= NR_READERS -> break
206 od;
207 CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
208 }
209 }
210
211 inline smp_mb(i, j)
212 {
213 atomic {
214 #ifndef NO_WMB
215 smp_wmb(i, j);
216 #endif
217 #ifndef NO_RMB
218 smp_rmb(i, j);
219 #endif
220 #ifdef NO_WMB
221 #ifdef NO_RMB
222 ooo_mem(i);
223 #endif
224 #endif
225 }
226 }
227
228 #endif
229
230 /* Keep in sync manually with smp_rmb, wmp_wmb, ooo_mem and init() */
231 DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
232 /* Note ! currently only two readers */
233 DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
234 /* pointer generation */
235 DECLARE_CACHED_VAR(byte, generation_ptr);
236
237 byte last_free_gen = 0;
238 bit free_done = 0;
239 byte read_generation[NR_READERS];
240 bit data_access[NR_READERS];
241
242 bit write_lock = 0;
243
244 bit init_done = 0;
245
246 inline wait_init_done()
247 {
248 do
249 :: init_done == 0 -> skip;
250 :: else -> break;
251 od;
252 }
253
254 inline ooo_mem(i)
255 {
256 atomic {
257 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
258 i = 0;
259 do
260 :: i < NR_READERS ->
261 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
262 get_pid());
263 i++
264 :: i >= NR_READERS -> break
265 od;
266 RANDOM_CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
267 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
268 i = 0;
269 do
270 :: i < NR_READERS ->
271 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
272 get_pid());
273 i++
274 :: i >= NR_READERS -> break
275 od;
276 RANDOM_CACHE_READ_FROM_MEM(generation_ptr, get_pid());
277 }
278 }
279
280 #define get_readerid() (get_pid())
281 #define get_writerid() (get_readerid() + NR_READERS)
282
283 inline wait_for_reader(tmp, tmp2, i, j)
284 {
285 do
286 :: 1 ->
287 tmp2 = READ_CACHED_VAR(urcu_active_readers[tmp]);
288 ooo_mem(i);
289 if
290 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
291 && ((tmp2 ^ READ_CACHED_VAR(urcu_gp_ctr))
292 & RCU_GP_CTR_BIT) ->
293 #ifndef GEN_ERROR_WRITER_PROGRESS
294 smp_mb(i, j);
295 #else
296 ooo_mem(i);
297 #endif
298 :: else ->
299 break;
300 fi;
301 od;
302 }
303
304 inline wait_for_quiescent_state(tmp, tmp2, i, j)
305 {
306 tmp = 0;
307 do
308 :: tmp < NR_READERS ->
309 wait_for_reader(tmp, tmp2, i, j);
310 if
311 :: (NR_READERS > 1) && (tmp < NR_READERS - 1)
312 -> ooo_mem(i);
313 :: else
314 -> skip;
315 fi;
316 tmp++
317 :: tmp >= NR_READERS -> break
318 od;
319 }
320
321 /* Model the RCU read-side critical section. */
322
323 inline urcu_one_read(i, j, nest_i, tmp, tmp2)
324 {
325 nest_i = 0;
326 do
327 :: nest_i < READER_NEST_LEVEL ->
328 ooo_mem(i);
329 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
330 ooo_mem(i);
331 if
332 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
333 ->
334 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
335 ooo_mem(i);
336 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
337 tmp2);
338 :: else ->
339 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
340 tmp + 1);
341 fi;
342 smp_mb(i, j);
343 nest_i++;
344 :: nest_i >= READER_NEST_LEVEL -> break;
345 od;
346
347 ooo_mem(i);
348 read_generation[get_readerid()] = READ_CACHED_VAR(generation_ptr);
349 ooo_mem(i);
350 data_access[get_readerid()] = 1;
351 ooo_mem(i);
352 data_access[get_readerid()] = 0;
353
354 nest_i = 0;
355 do
356 :: nest_i < READER_NEST_LEVEL ->
357 smp_mb(i, j);
358 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
359 ooo_mem(i);
360 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);
361 nest_i++;
362 :: nest_i >= READER_NEST_LEVEL -> break;
363 od;
364 ooo_mem(i);
365 //smp_mc(i); /* added */
366 }
367
368 active [NR_READERS] proctype urcu_reader()
369 {
370 byte i, j, nest_i;
371 byte tmp, tmp2;
372
373 wait_init_done();
374
375 assert(get_pid() < NR_PROCS);
376
377 end_reader:
378 do
379 :: 1 ->
380 /*
381 * We do not test reader's progress here, because we are mainly
382 * interested in writer's progress. The reader never blocks
383 * anyway. We have to test for reader/writer's progress
384 * separately, otherwise we could think the writer is doing
385 * progress when it's blocked by an always progressing reader.
386 */
387 #ifdef READER_PROGRESS
388 /* Only test progress of one random reader. They are all the
389 * same. */
390 atomic {
391 if
392 :: get_readerid() == 0 ->
393 progress_reader:
394 skip;
395 :: else ->
396 skip;
397 fi;
398 }
399 #endif
400 urcu_one_read(i, j, nest_i, tmp, tmp2);
401 od;
402 }
403
404 /* Model the RCU update process. */
405
406 active proctype urcu_writer()
407 {
408 byte i, j;
409 byte tmp, tmp2;
410 byte old_gen;
411
412 wait_init_done();
413
414 assert(get_pid() < NR_PROCS);
415
416 do
417 :: (READ_CACHED_VAR(generation_ptr) < 5) ->
418 #ifdef WRITER_PROGRESS
419 progress_writer1:
420 #endif
421 ooo_mem(i);
422 atomic {
423 old_gen = READ_CACHED_VAR(generation_ptr);
424 WRITE_CACHED_VAR(generation_ptr, old_gen + 1);
425 }
426 ooo_mem(i);
427
428 do
429 :: 1 ->
430 atomic {
431 if
432 :: write_lock == 0 ->
433 write_lock = 1;
434 break;
435 :: else ->
436 skip;
437 fi;
438 }
439 od;
440 smp_mb(i, j);
441 tmp = READ_CACHED_VAR(urcu_gp_ctr);
442 ooo_mem(i);
443 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
444 ooo_mem(i);
445 //smp_mc(i);
446 wait_for_quiescent_state(tmp, tmp2, i, j);
447 //smp_mc(i);
448 #ifndef SINGLE_FLIP
449 ooo_mem(i);
450 tmp = READ_CACHED_VAR(urcu_gp_ctr);
451 ooo_mem(i);
452 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
453 //smp_mc(i);
454 ooo_mem(i);
455 wait_for_quiescent_state(tmp, tmp2, i, j);
456 #endif
457 smp_mb(i, j);
458 write_lock = 0;
459 /* free-up step, e.g., kfree(). */
460 atomic {
461 last_free_gen = old_gen;
462 free_done = 1;
463 }
464 :: else -> break;
465 od;
466 /*
467 * Given the reader loops infinitely, let the writer also busy-loop
468 * with progress here so, with weak fairness, we can test the
469 * writer's progress.
470 */
471 end_writer:
472 do
473 :: 1 ->
474 #ifdef WRITER_PROGRESS
475 progress_writer2:
476 #endif
477 skip;
478 od;
479 }
480
481 /* Leave after the readers and writers so the pid count is ok. */
482 init {
483 byte i, j;
484
485 atomic {
486 INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
487 INIT_CACHED_VAR(generation_ptr, 0, j);
488
489 i = 0;
490 do
491 :: i < NR_READERS ->
492 INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
493 read_generation[i] = 1;
494 data_access[i] = 0;
495 i++;
496 :: i >= NR_READERS -> break
497 od;
498 init_done = 1;
499 }
500 }
This page took 0.040716 seconds and 4 git commands to generate.