Update formal model from local copy
[urcu.git] / formal-model / urcu-controldataflow-intel-no-ipi / asserts.spin.input
1
2 // Poison value for freed memory
3 #define POISON 1
4 // Memory with correct data
5 #define WINE 0
6 #define SLAB_SIZE 2
7
8 #define read_poison (data_read_first[0] == POISON || data_read_second[0] == POISON)
9
10 #define RCU_GP_CTR_BIT (1 << 7)
11 #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
12
13 //disabled
14 //#define REMOTE_BARRIERS
15
16 //#define ARCH_ALPHA
17 #define ARCH_INTEL
18 //#define ARCH_POWERPC
19 /*
20 * mem.spin: Promela code to validate memory barriers with OOO memory
21 * and out-of-order instruction scheduling.
22 *
23 * This program is free software; you can redistribute it and/or modify
24 * it under the terms of the GNU General Public License as published by
25 * the Free Software Foundation; either version 2 of the License, or
26 * (at your option) any later version.
27 *
28 * This program is distributed in the hope that it will be useful,
29 * but WITHOUT ANY WARRANTY; without even the implied warranty of
30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
31 * GNU General Public License for more details.
32 *
33 * You should have received a copy of the GNU General Public License
34 * along with this program; if not, write to the Free Software
35 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
36 *
37 * Copyright (c) 2009 Mathieu Desnoyers
38 */
39
40 /* Promela validation variables. */
41
42 /* specific defines "included" here */
43 /* DEFINES file "included" here */
44
45 #define NR_READERS 1
46 #define NR_WRITERS 1
47
48 #define NR_PROCS 2
49
50 #define get_pid() (_pid)
51
52 #define get_readerid() (get_pid())
53
54 /*
55 * Produced process control and data flow. Updated after each instruction to
56 * show which variables are ready. Using one-hot bit encoding per variable to
57 * save state space. Used as triggers to execute the instructions having those
58 * variables as input. Leaving bits active to inhibit instruction execution.
59 * Scheme used to make instruction disabling and automatic dependency fall-back
60 * automatic.
61 */
62
63 #define CONSUME_TOKENS(state, bits, notbits) \
64 ((!(state & (notbits))) && (state & (bits)) == (bits))
65
66 #define PRODUCE_TOKENS(state, bits) \
67 state = state | (bits);
68
69 #define CLEAR_TOKENS(state, bits) \
70 state = state & ~(bits)
71
72 /*
73 * Types of dependency :
74 *
75 * Data dependency
76 *
77 * - True dependency, Read-after-Write (RAW)
78 *
79 * This type of dependency happens when a statement depends on the result of a
80 * previous statement. This applies to any statement which needs to read a
81 * variable written by a preceding statement.
82 *
83 * - False dependency, Write-after-Read (WAR)
84 *
85 * Typically, variable renaming can ensure that this dependency goes away.
86 * However, if the statements must read and then write from/to the same variable
87 * in the OOO memory model, renaming may be impossible, and therefore this
88 * causes a WAR dependency.
89 *
90 * - Output dependency, Write-after-Write (WAW)
91 *
92 * Two writes to the same variable in subsequent statements. Variable renaming
93 * can ensure this is not needed, but can be required when writing multiple
94 * times to the same OOO mem model variable.
95 *
96 * Control dependency
97 *
98 * Execution of a given instruction depends on a previous instruction evaluating
99 * in a way that allows its execution. E.g. : branches.
100 *
101 * Useful considerations for joining dependencies after branch
102 *
103 * - Pre-dominance
104 *
105 * "We say box i dominates box j if every path (leading from input to output
106 * through the diagram) which passes through box j must also pass through box
107 * i. Thus box i dominates box j if box j is subordinate to box i in the
108 * program."
109 *
110 * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
111 * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
112 *
113 * - Post-dominance
114 *
115 * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
116 * output exchanged. Therefore, i post-dominating j ensures that every path
117 * passing by j will pass by i before reaching the output.
118 *
119 * Prefetch and speculative execution
120 *
121 * If an instruction depends on the result of a previous branch, but it does not
122 * have side-effects, it can be executed before the branch result is known.
123 * however, it must be restarted if a core-synchronizing instruction is issued.
124 * Note that instructions which depend on the speculative instruction result
125 * but that have side-effects must depend on the branch completion in addition
126 * to the speculatively executed instruction.
127 *
128 * Other considerations
129 *
130 * Note about "volatile" keyword dependency : The compiler will order volatile
131 * accesses so they appear in the right order on a given CPU. They can be
132 * reordered by the CPU instruction scheduling. This therefore cannot be
133 * considered as a depencency.
134 *
135 * References :
136 *
137 * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
138 * Kaufmann. ISBN 1-55860-698-X.
139 * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
140 * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
141 * 1-55860-286-0.
142 * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
143 * Morgan Kaufmann. ISBN 1-55860-320-4.
144 */
145
146 /*
147 * Note about loops and nested calls
148 *
149 * To keep this model simple, loops expressed in the framework will behave as if
150 * there was a core synchronizing instruction between loops. To see the effect
151 * of loop unrolling, manually unrolling loops is required. Note that if loops
152 * end or start with a core synchronizing instruction, the model is appropriate.
153 * Nested calls are not supported.
154 */
155
156 /*
157 * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
158 * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
159 * http://www.linuxjournal.com/article/8212)
160 */
161 #ifdef ARCH_ALPHA
162 #define HAVE_OOO_CACHE_READ
163 #endif
164
165 /*
166 * Each process have its own data in cache. Caches are randomly updated.
167 * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
168 * both.
169 */
170
171 typedef per_proc_byte {
172 byte val[NR_PROCS];
173 };
174
175 typedef per_proc_bit {
176 bit val[NR_PROCS];
177 };
178
179 /* Bitfield has a maximum of 8 procs */
180 typedef per_proc_bitfield {
181 byte bitfield;
182 };
183
184 #define DECLARE_CACHED_VAR(type, x) \
185 type mem_##x; \
186 per_proc_##type cached_##x; \
187 per_proc_bitfield cache_dirty_##x;
188
189 #define INIT_CACHED_VAR(x, v, j) \
190 mem_##x = v; \
191 cache_dirty_##x.bitfield = 0; \
192 j = 0; \
193 do \
194 :: j < NR_PROCS -> \
195 cached_##x.val[j] = v; \
196 j++ \
197 :: j >= NR_PROCS -> break \
198 od;
199
200 #define IS_CACHE_DIRTY(x, id) (cache_dirty_##x.bitfield & (1 << id))
201
202 #define READ_CACHED_VAR(x) (cached_##x.val[get_pid()])
203
204 #define WRITE_CACHED_VAR(x, v) \
205 atomic { \
206 cached_##x.val[get_pid()] = v; \
207 cache_dirty_##x.bitfield = \
208 cache_dirty_##x.bitfield | (1 << get_pid()); \
209 }
210
211 #define CACHE_WRITE_TO_MEM(x, id) \
212 if \
213 :: IS_CACHE_DIRTY(x, id) -> \
214 mem_##x = cached_##x.val[id]; \
215 cache_dirty_##x.bitfield = \
216 cache_dirty_##x.bitfield & (~(1 << id)); \
217 :: else -> \
218 skip \
219 fi;
220
221 #define CACHE_READ_FROM_MEM(x, id) \
222 if \
223 :: !IS_CACHE_DIRTY(x, id) -> \
224 cached_##x.val[id] = mem_##x;\
225 :: else -> \
226 skip \
227 fi;
228
229 /*
230 * May update other caches if cache is dirty, or not.
231 */
232 #define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
233 if \
234 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
235 :: 1 -> skip \
236 fi;
237
238 #define RANDOM_CACHE_READ_FROM_MEM(x, id)\
239 if \
240 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
241 :: 1 -> skip \
242 fi;
243
244 /* Must consume all prior read tokens. All subsequent reads depend on it. */
245 inline smp_rmb(i)
246 {
247 atomic {
248 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
249 i = 0;
250 do
251 :: i < NR_READERS ->
252 CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
253 i++
254 :: i >= NR_READERS -> break
255 od;
256 CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
257 i = 0;
258 do
259 :: i < SLAB_SIZE ->
260 CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
261 i++
262 :: i >= SLAB_SIZE -> break
263 od;
264 }
265 }
266
267 /* Must consume all prior write tokens. All subsequent writes depend on it. */
268 inline smp_wmb(i)
269 {
270 atomic {
271 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
272 i = 0;
273 do
274 :: i < NR_READERS ->
275 CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
276 i++
277 :: i >= NR_READERS -> break
278 od;
279 CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
280 i = 0;
281 do
282 :: i < SLAB_SIZE ->
283 CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
284 i++
285 :: i >= SLAB_SIZE -> break
286 od;
287 }
288 }
289
290 /* Synchronization point. Must consume all prior read and write tokens. All
291 * subsequent reads and writes depend on it. */
292 inline smp_mb(i)
293 {
294 atomic {
295 smp_wmb(i);
296 smp_rmb(i);
297 }
298 }
299
300 #ifdef REMOTE_BARRIERS
301
302 bit reader_barrier[NR_READERS];
303
304 /*
305 * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
306 * because they would add unexisting core synchronization and would therefore
307 * create an incomplete model.
308 * Therefore, we model the read-side memory barriers by completely disabling the
309 * memory barriers and their dependencies from the read-side. One at a time
310 * (different verification runs), we make a different instruction listen for
311 * signals.
312 */
313
314 #define smp_mb_reader(i, j)
315
316 /*
317 * Service 0, 1 or many barrier requests.
318 */
319 inline smp_mb_recv(i, j)
320 {
321 do
322 :: (reader_barrier[get_readerid()] == 1) ->
323 /*
324 * We choose to ignore cycles caused by writer busy-looping,
325 * waiting for the reader, sending barrier requests, and the
326 * reader always services them without continuing execution.
327 */
328 progress_ignoring_mb1:
329 smp_mb(i);
330 reader_barrier[get_readerid()] = 0;
331 :: 1 ->
332 /*
333 * We choose to ignore writer's non-progress caused by the
334 * reader ignoring the writer's mb() requests.
335 */
336 progress_ignoring_mb2:
337 break;
338 od;
339 }
340
341 #define PROGRESS_LABEL(progressid) progress_writer_progid_##progressid:
342
343 #define smp_mb_send(i, j, progressid) \
344 { \
345 smp_mb(i); \
346 i = 0; \
347 do \
348 :: i < NR_READERS -> \
349 reader_barrier[i] = 1; \
350 /* \
351 * Busy-looping waiting for reader barrier handling is of little\
352 * interest, given the reader has the ability to totally ignore \
353 * barrier requests. \
354 */ \
355 do \
356 :: (reader_barrier[i] == 1) -> \
357 PROGRESS_LABEL(progressid) \
358 skip; \
359 :: (reader_barrier[i] == 0) -> break; \
360 od; \
361 i++; \
362 :: i >= NR_READERS -> \
363 break \
364 od; \
365 smp_mb(i); \
366 }
367
368 #else
369
370 #define smp_mb_send(i, j, progressid) smp_mb(i)
371 #define smp_mb_reader(i, j) smp_mb(i)
372 #define smp_mb_recv(i, j)
373
374 #endif
375
376 /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
377 DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
378 /* Note ! currently only one reader */
379 DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
380 /* RCU data */
381 DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
382
383 /* RCU pointer */
384 #if (SLAB_SIZE == 2)
385 DECLARE_CACHED_VAR(bit, rcu_ptr);
386 bit ptr_read_first[NR_READERS];
387 bit ptr_read_second[NR_READERS];
388 #else
389 DECLARE_CACHED_VAR(byte, rcu_ptr);
390 byte ptr_read_first[NR_READERS];
391 byte ptr_read_second[NR_READERS];
392 #endif
393
394 bit data_read_first[NR_READERS];
395 bit data_read_second[NR_READERS];
396
397 bit init_done = 0;
398
399 inline wait_init_done()
400 {
401 do
402 :: init_done == 0 -> skip;
403 :: else -> break;
404 od;
405 }
406
407 inline ooo_mem(i)
408 {
409 atomic {
410 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
411 i = 0;
412 do
413 :: i < NR_READERS ->
414 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
415 get_pid());
416 i++
417 :: i >= NR_READERS -> break
418 od;
419 RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
420 i = 0;
421 do
422 :: i < SLAB_SIZE ->
423 RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
424 i++
425 :: i >= SLAB_SIZE -> break
426 od;
427 #ifdef HAVE_OOO_CACHE_READ
428 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
429 i = 0;
430 do
431 :: i < NR_READERS ->
432 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
433 get_pid());
434 i++
435 :: i >= NR_READERS -> break
436 od;
437 RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
438 i = 0;
439 do
440 :: i < SLAB_SIZE ->
441 RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
442 i++
443 :: i >= SLAB_SIZE -> break
444 od;
445 #else
446 smp_rmb(i);
447 #endif /* HAVE_OOO_CACHE_READ */
448 }
449 }
450
451 /*
452 * Bit encoding, urcu_reader :
453 */
454
455 int _proc_urcu_reader;
456 #define proc_urcu_reader _proc_urcu_reader
457
458 /* Body of PROCEDURE_READ_LOCK */
459 #define READ_PROD_A_READ (1 << 0)
460 #define READ_PROD_B_IF_TRUE (1 << 1)
461 #define READ_PROD_B_IF_FALSE (1 << 2)
462 #define READ_PROD_C_IF_TRUE_READ (1 << 3)
463
464 #define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken) \
465 :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) -> \
466 ooo_mem(i); \
467 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]); \
468 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base); \
469 :: CONSUME_TOKENS(proc_urcu_reader, \
470 READ_PROD_A_READ << base, /* RAW, pre-dominant */ \
471 (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) -> \
472 if \
473 :: (!(tmp & RCU_GP_CTR_NEST_MASK)) -> \
474 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base); \
475 :: else -> \
476 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
477 fi; \
478 /* IF TRUE */ \
479 :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */ \
480 READ_PROD_C_IF_TRUE_READ << base) -> \
481 ooo_mem(i); \
482 tmp2 = READ_CACHED_VAR(urcu_gp_ctr); \
483 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base); \
484 :: CONSUME_TOKENS(proc_urcu_reader, \
485 (READ_PROD_B_IF_TRUE \
486 | READ_PROD_C_IF_TRUE_READ /* pre-dominant */ \
487 | READ_PROD_A_READ) << base, /* WAR */ \
488 producetoken) -> \
489 ooo_mem(i); \
490 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2); \
491 PRODUCE_TOKENS(proc_urcu_reader, producetoken); \
492 /* IF_MERGE implies \
493 * post-dominance */ \
494 /* ELSE */ \
495 :: CONSUME_TOKENS(proc_urcu_reader, \
496 (READ_PROD_B_IF_FALSE /* pre-dominant */ \
497 | READ_PROD_A_READ) << base, /* WAR */ \
498 producetoken) -> \
499 ooo_mem(i); \
500 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], \
501 tmp + 1); \
502 PRODUCE_TOKENS(proc_urcu_reader, producetoken); \
503 /* IF_MERGE implies \
504 * post-dominance */ \
505 /* ENDIF */ \
506 skip
507
508 /* Body of PROCEDURE_READ_LOCK */
509 #define READ_PROC_READ_UNLOCK (1 << 0)
510
511 #define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken) \
512 :: CONSUME_TOKENS(proc_urcu_reader, \
513 consumetoken, \
514 READ_PROC_READ_UNLOCK << base) -> \
515 ooo_mem(i); \
516 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]); \
517 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base); \
518 :: CONSUME_TOKENS(proc_urcu_reader, \
519 consumetoken \
520 | (READ_PROC_READ_UNLOCK << base), /* WAR */ \
521 producetoken) -> \
522 ooo_mem(i); \
523 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1); \
524 PRODUCE_TOKENS(proc_urcu_reader, producetoken); \
525 skip
526
527
528 #define READ_PROD_NONE (1 << 0)
529
530 /* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
531 #define READ_LOCK_BASE 1
532 #define READ_LOCK_OUT (1 << 5)
533
534 #define READ_PROC_FIRST_MB (1 << 6)
535
536 /* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
537 #define READ_LOCK_NESTED_BASE 7
538 #define READ_LOCK_NESTED_OUT (1 << 11)
539
540 #define READ_PROC_READ_GEN (1 << 12)
541 #define READ_PROC_ACCESS_GEN (1 << 13)
542
543 /* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
544 #define READ_UNLOCK_NESTED_BASE 14
545 #define READ_UNLOCK_NESTED_OUT (1 << 15)
546
547 #define READ_PROC_SECOND_MB (1 << 16)
548
549 /* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
550 #define READ_UNLOCK_BASE 17
551 #define READ_UNLOCK_OUT (1 << 18)
552
553 /* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
554 #define READ_LOCK_UNROLL_BASE 19
555 #define READ_LOCK_OUT_UNROLL (1 << 23)
556
557 #define READ_PROC_THIRD_MB (1 << 24)
558
559 #define READ_PROC_READ_GEN_UNROLL (1 << 25)
560 #define READ_PROC_ACCESS_GEN_UNROLL (1 << 26)
561
562 #define READ_PROC_FOURTH_MB (1 << 27)
563
564 /* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
565 #define READ_UNLOCK_UNROLL_BASE 28
566 #define READ_UNLOCK_OUT_UNROLL (1 << 29)
567
568
569 /* Should not include branches */
570 #define READ_PROC_ALL_TOKENS (READ_PROD_NONE \
571 | READ_LOCK_OUT \
572 | READ_PROC_FIRST_MB \
573 | READ_LOCK_NESTED_OUT \
574 | READ_PROC_READ_GEN \
575 | READ_PROC_ACCESS_GEN \
576 | READ_UNLOCK_NESTED_OUT \
577 | READ_PROC_SECOND_MB \
578 | READ_UNLOCK_OUT \
579 | READ_LOCK_OUT_UNROLL \
580 | READ_PROC_THIRD_MB \
581 | READ_PROC_READ_GEN_UNROLL \
582 | READ_PROC_ACCESS_GEN_UNROLL \
583 | READ_PROC_FOURTH_MB \
584 | READ_UNLOCK_OUT_UNROLL)
585
586 /* Must clear all tokens, including branches */
587 #define READ_PROC_ALL_TOKENS_CLEAR ((1 << 30) - 1)
588
589 inline urcu_one_read(i, j, nest_i, tmp, tmp2)
590 {
591 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
592
593 #ifdef NO_MB
594 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
595 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
596 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
597 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
598 #endif
599
600 #ifdef REMOTE_BARRIERS
601 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
602 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
603 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
604 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
605 #endif
606
607 do
608 :: 1 ->
609
610 #ifdef REMOTE_BARRIERS
611 /*
612 * Signal-based memory barrier will only execute when the
613 * execution order appears in program order.
614 */
615 if
616 :: 1 ->
617 atomic {
618 if
619 :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
620 READ_LOCK_OUT | READ_LOCK_NESTED_OUT
621 | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
622 | READ_UNLOCK_OUT
623 | READ_LOCK_OUT_UNROLL
624 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
625 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
626 READ_LOCK_NESTED_OUT
627 | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
628 | READ_UNLOCK_OUT
629 | READ_LOCK_OUT_UNROLL
630 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
631 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
632 READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
633 | READ_UNLOCK_OUT
634 | READ_LOCK_OUT_UNROLL
635 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
636 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
637 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
638 READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
639 | READ_UNLOCK_OUT
640 | READ_LOCK_OUT_UNROLL
641 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
642 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
643 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
644 READ_UNLOCK_NESTED_OUT
645 | READ_UNLOCK_OUT
646 | READ_LOCK_OUT_UNROLL
647 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
648 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
649 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
650 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
651 READ_UNLOCK_OUT
652 | READ_LOCK_OUT_UNROLL
653 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
654 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
655 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
656 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
657 | READ_UNLOCK_OUT,
658 READ_LOCK_OUT_UNROLL
659 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
660 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
661 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
662 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
663 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
664 READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
665 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
666 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
667 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
668 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
669 | READ_PROC_READ_GEN_UNROLL,
670 READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
671 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
672 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
673 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
674 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
675 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
676 READ_UNLOCK_OUT_UNROLL)
677 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
678 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
679 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
680 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
681 0) ->
682 goto non_atomic3;
683 non_atomic3_end:
684 skip;
685 fi;
686 }
687 fi;
688
689 goto non_atomic3_skip;
690 non_atomic3:
691 smp_mb_recv(i, j);
692 goto non_atomic3_end;
693 non_atomic3_skip:
694
695 #endif /* REMOTE_BARRIERS */
696
697 atomic {
698 if
699 PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
700
701 :: CONSUME_TOKENS(proc_urcu_reader,
702 READ_LOCK_OUT, /* post-dominant */
703 READ_PROC_FIRST_MB) ->
704 smp_mb_reader(i, j);
705 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
706
707 PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
708 READ_LOCK_NESTED_OUT);
709
710 :: CONSUME_TOKENS(proc_urcu_reader,
711 READ_PROC_FIRST_MB, /* mb() orders reads */
712 READ_PROC_READ_GEN) ->
713 ooo_mem(i);
714 ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
715 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
716
717 :: CONSUME_TOKENS(proc_urcu_reader,
718 READ_PROC_FIRST_MB /* mb() orders reads */
719 | READ_PROC_READ_GEN,
720 READ_PROC_ACCESS_GEN) ->
721 /* smp_read_barrier_depends */
722 goto rmb1;
723 rmb1_end:
724 data_read_first[get_readerid()] =
725 READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
726 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
727
728
729 /* Note : we remove the nested memory barrier from the read unlock
730 * model, given it is not usually needed. The implementation has the barrier
731 * because the performance impact added by a branch in the common case does not
732 * justify it.
733 */
734
735 PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
736 READ_PROC_FIRST_MB
737 | READ_LOCK_OUT
738 | READ_LOCK_NESTED_OUT,
739 READ_UNLOCK_NESTED_OUT);
740
741
742 :: CONSUME_TOKENS(proc_urcu_reader,
743 READ_PROC_ACCESS_GEN /* mb() orders reads */
744 | READ_PROC_READ_GEN /* mb() orders reads */
745 | READ_PROC_FIRST_MB /* mb() ordered */
746 | READ_LOCK_OUT /* post-dominant */
747 | READ_LOCK_NESTED_OUT /* post-dominant */
748 | READ_UNLOCK_NESTED_OUT,
749 READ_PROC_SECOND_MB) ->
750 smp_mb_reader(i, j);
751 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
752
753 PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
754 READ_PROC_SECOND_MB /* mb() orders reads */
755 | READ_PROC_FIRST_MB /* mb() orders reads */
756 | READ_LOCK_NESTED_OUT /* RAW */
757 | READ_LOCK_OUT /* RAW */
758 | READ_UNLOCK_NESTED_OUT, /* RAW */
759 READ_UNLOCK_OUT);
760
761 /* Unrolling loop : second consecutive lock */
762 /* reading urcu_active_readers, which have been written by
763 * READ_UNLOCK_OUT : RAW */
764 PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
765 READ_PROC_SECOND_MB /* mb() orders reads */
766 | READ_PROC_FIRST_MB, /* mb() orders reads */
767 READ_LOCK_NESTED_OUT /* RAW */
768 | READ_LOCK_OUT /* RAW */
769 | READ_UNLOCK_NESTED_OUT /* RAW */
770 | READ_UNLOCK_OUT, /* RAW */
771 READ_LOCK_OUT_UNROLL);
772
773
774 :: CONSUME_TOKENS(proc_urcu_reader,
775 READ_PROC_FIRST_MB /* mb() ordered */
776 | READ_PROC_SECOND_MB /* mb() ordered */
777 | READ_LOCK_OUT_UNROLL /* post-dominant */
778 | READ_LOCK_NESTED_OUT
779 | READ_LOCK_OUT
780 | READ_UNLOCK_NESTED_OUT
781 | READ_UNLOCK_OUT,
782 READ_PROC_THIRD_MB) ->
783 smp_mb_reader(i, j);
784 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
785
786 :: CONSUME_TOKENS(proc_urcu_reader,
787 READ_PROC_FIRST_MB /* mb() orders reads */
788 | READ_PROC_SECOND_MB /* mb() orders reads */
789 | READ_PROC_THIRD_MB, /* mb() orders reads */
790 READ_PROC_READ_GEN_UNROLL) ->
791 ooo_mem(i);
792 ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
793 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
794
795 :: CONSUME_TOKENS(proc_urcu_reader,
796 READ_PROC_READ_GEN_UNROLL
797 | READ_PROC_FIRST_MB /* mb() orders reads */
798 | READ_PROC_SECOND_MB /* mb() orders reads */
799 | READ_PROC_THIRD_MB, /* mb() orders reads */
800 READ_PROC_ACCESS_GEN_UNROLL) ->
801 /* smp_read_barrier_depends */
802 goto rmb2;
803 rmb2_end:
804 data_read_second[get_readerid()] =
805 READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
806 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
807
808 :: CONSUME_TOKENS(proc_urcu_reader,
809 READ_PROC_READ_GEN_UNROLL /* mb() orders reads */
810 | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
811 | READ_PROC_FIRST_MB /* mb() ordered */
812 | READ_PROC_SECOND_MB /* mb() ordered */
813 | READ_PROC_THIRD_MB /* mb() ordered */
814 | READ_LOCK_OUT_UNROLL /* post-dominant */
815 | READ_LOCK_NESTED_OUT
816 | READ_LOCK_OUT
817 | READ_UNLOCK_NESTED_OUT
818 | READ_UNLOCK_OUT,
819 READ_PROC_FOURTH_MB) ->
820 smp_mb_reader(i, j);
821 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
822
823 PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
824 READ_PROC_FOURTH_MB /* mb() orders reads */
825 | READ_PROC_THIRD_MB /* mb() orders reads */
826 | READ_LOCK_OUT_UNROLL /* RAW */
827 | READ_PROC_SECOND_MB /* mb() orders reads */
828 | READ_PROC_FIRST_MB /* mb() orders reads */
829 | READ_LOCK_NESTED_OUT /* RAW */
830 | READ_LOCK_OUT /* RAW */
831 | READ_UNLOCK_NESTED_OUT, /* RAW */
832 READ_UNLOCK_OUT_UNROLL);
833 :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
834 CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
835 break;
836 fi;
837 }
838 od;
839 /*
840 * Dependency between consecutive loops :
841 * RAW dependency on
842 * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
843 * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
844 * between loops.
845 * _WHEN THE MB()s are in place_, they add full ordering of the
846 * generation pointer read wrt active reader count read, which ensures
847 * execution will not spill across loop execution.
848 * However, in the event mb()s are removed (execution using signal
849 * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
850 * to spill its execution on other loop's execution.
851 */
852 goto end;
853 rmb1:
854 #ifndef NO_RMB
855 smp_rmb(i);
856 #else
857 ooo_mem(i);
858 #endif
859 goto rmb1_end;
860 rmb2:
861 #ifndef NO_RMB
862 smp_rmb(i);
863 #else
864 ooo_mem(i);
865 #endif
866 goto rmb2_end;
867 end:
868 skip;
869 }
870
871
872
873 active proctype urcu_reader()
874 {
875 byte i, j, nest_i;
876 byte tmp, tmp2;
877
878 wait_init_done();
879
880 assert(get_pid() < NR_PROCS);
881
882 end_reader:
883 do
884 :: 1 ->
885 /*
886 * We do not test reader's progress here, because we are mainly
887 * interested in writer's progress. The reader never blocks
888 * anyway. We have to test for reader/writer's progress
889 * separately, otherwise we could think the writer is doing
890 * progress when it's blocked by an always progressing reader.
891 */
892 #ifdef READER_PROGRESS
893 progress_reader:
894 #endif
895 urcu_one_read(i, j, nest_i, tmp, tmp2);
896 od;
897 }
898
899 /* no name clash please */
900 #undef proc_urcu_reader
901
902
903 /* Model the RCU update process. */
904
905 /*
906 * Bit encoding, urcu_writer :
907 * Currently only supports one reader.
908 */
909
910 int _proc_urcu_writer;
911 #define proc_urcu_writer _proc_urcu_writer
912
913 #define WRITE_PROD_NONE (1 << 0)
914
915 #define WRITE_DATA (1 << 1)
916 #define WRITE_PROC_WMB (1 << 2)
917 #define WRITE_XCHG_PTR (1 << 3)
918
919 #define WRITE_PROC_FIRST_MB (1 << 4)
920
921 /* first flip */
922 #define WRITE_PROC_FIRST_READ_GP (1 << 5)
923 #define WRITE_PROC_FIRST_WRITE_GP (1 << 6)
924 #define WRITE_PROC_FIRST_WAIT (1 << 7)
925 #define WRITE_PROC_FIRST_WAIT_LOOP (1 << 8)
926
927 /* second flip */
928 #define WRITE_PROC_SECOND_READ_GP (1 << 9)
929 #define WRITE_PROC_SECOND_WRITE_GP (1 << 10)
930 #define WRITE_PROC_SECOND_WAIT (1 << 11)
931 #define WRITE_PROC_SECOND_WAIT_LOOP (1 << 12)
932
933 #define WRITE_PROC_SECOND_MB (1 << 13)
934
935 #define WRITE_FREE (1 << 14)
936
937 #define WRITE_PROC_ALL_TOKENS (WRITE_PROD_NONE \
938 | WRITE_DATA \
939 | WRITE_PROC_WMB \
940 | WRITE_XCHG_PTR \
941 | WRITE_PROC_FIRST_MB \
942 | WRITE_PROC_FIRST_READ_GP \
943 | WRITE_PROC_FIRST_WRITE_GP \
944 | WRITE_PROC_FIRST_WAIT \
945 | WRITE_PROC_SECOND_READ_GP \
946 | WRITE_PROC_SECOND_WRITE_GP \
947 | WRITE_PROC_SECOND_WAIT \
948 | WRITE_PROC_SECOND_MB \
949 | WRITE_FREE)
950
951 #define WRITE_PROC_ALL_TOKENS_CLEAR ((1 << 15) - 1)
952
953 /*
954 * Mutexes are implied around writer execution. A single writer at a time.
955 */
956 active proctype urcu_writer()
957 {
958 byte i, j;
959 byte tmp, tmp2, tmpa;
960 byte cur_data = 0, old_data, loop_nr = 0;
961 byte cur_gp_val = 0; /*
962 * Keep a local trace of the current parity so
963 * we don't add non-existing dependencies on the global
964 * GP update. Needed to test single flip case.
965 */
966
967 wait_init_done();
968
969 assert(get_pid() < NR_PROCS);
970
971 do
972 :: (loop_nr < 3) ->
973 #ifdef WRITER_PROGRESS
974 progress_writer1:
975 #endif
976 loop_nr = loop_nr + 1;
977
978 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
979
980 #ifdef NO_WMB
981 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
982 #endif
983
984 #ifdef NO_MB
985 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
986 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
987 #endif
988
989 #ifdef SINGLE_FLIP
990 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
991 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
992 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
993 /* For single flip, we need to know the current parity */
994 cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
995 #endif
996
997 do :: 1 ->
998 atomic {
999 if
1000
1001 :: CONSUME_TOKENS(proc_urcu_writer,
1002 WRITE_PROD_NONE,
1003 WRITE_DATA) ->
1004 ooo_mem(i);
1005 cur_data = (cur_data + 1) % SLAB_SIZE;
1006 WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
1007 PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
1008
1009
1010 :: CONSUME_TOKENS(proc_urcu_writer,
1011 WRITE_DATA,
1012 WRITE_PROC_WMB) ->
1013 smp_wmb(i);
1014 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
1015
1016 :: CONSUME_TOKENS(proc_urcu_writer,
1017 WRITE_PROC_WMB,
1018 WRITE_XCHG_PTR) ->
1019 /* rcu_xchg_pointer() */
1020 atomic {
1021 old_data = READ_CACHED_VAR(rcu_ptr);
1022 WRITE_CACHED_VAR(rcu_ptr, cur_data);
1023 }
1024 PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
1025
1026 :: CONSUME_TOKENS(proc_urcu_writer,
1027 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
1028 WRITE_PROC_FIRST_MB) ->
1029 goto smp_mb_send1;
1030 smp_mb_send1_end:
1031 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
1032
1033 /* first flip */
1034 :: CONSUME_TOKENS(proc_urcu_writer,
1035 WRITE_PROC_FIRST_MB,
1036 WRITE_PROC_FIRST_READ_GP) ->
1037 tmpa = READ_CACHED_VAR(urcu_gp_ctr);
1038 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
1039 :: CONSUME_TOKENS(proc_urcu_writer,
1040 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
1041 | WRITE_PROC_FIRST_READ_GP,
1042 WRITE_PROC_FIRST_WRITE_GP) ->
1043 ooo_mem(i);
1044 WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
1045 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
1046
1047 :: CONSUME_TOKENS(proc_urcu_writer,
1048 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
1049 WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1050 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
1051 ooo_mem(i);
1052 //smp_mb(i); /* TEST */
1053 /* ONLY WAITING FOR READER 0 */
1054 tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
1055 #ifndef SINGLE_FLIP
1056 /* In normal execution, we are always starting by
1057 * waiting for the even parity.
1058 */
1059 cur_gp_val = RCU_GP_CTR_BIT;
1060 #endif
1061 if
1062 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
1063 && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
1064 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
1065 :: else ->
1066 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
1067 fi;
1068
1069 :: CONSUME_TOKENS(proc_urcu_writer,
1070 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
1071 WRITE_PROC_FIRST_WRITE_GP
1072 | WRITE_PROC_FIRST_READ_GP
1073 | WRITE_PROC_FIRST_WAIT_LOOP
1074 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
1075 | WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1076 0) ->
1077 #ifndef GEN_ERROR_WRITER_PROGRESS
1078 goto smp_mb_send2;
1079 smp_mb_send2_end:
1080 /* The memory barrier will invalidate the
1081 * second read done as prefetching. Note that all
1082 * instructions with side-effects depending on
1083 * WRITE_PROC_SECOND_READ_GP should also depend on
1084 * completion of this busy-waiting loop. */
1085 CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
1086 #else
1087 ooo_mem(i);
1088 #endif
1089 /* This instruction loops to WRITE_PROC_FIRST_WAIT */
1090 CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
1091
1092 /* second flip */
1093 :: CONSUME_TOKENS(proc_urcu_writer,
1094 //WRITE_PROC_FIRST_WAIT | //test /* no dependency. Could pre-fetch, no side-effect. */
1095 WRITE_PROC_FIRST_WRITE_GP
1096 | WRITE_PROC_FIRST_READ_GP
1097 | WRITE_PROC_FIRST_MB,
1098 WRITE_PROC_SECOND_READ_GP) ->
1099 ooo_mem(i);
1100 //smp_mb(i); /* TEST */
1101 tmpa = READ_CACHED_VAR(urcu_gp_ctr);
1102 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
1103 :: CONSUME_TOKENS(proc_urcu_writer,
1104 WRITE_PROC_FIRST_WAIT /* dependency on first wait, because this
1105 * instruction has globally observable
1106 * side-effects.
1107 */
1108 | WRITE_PROC_FIRST_MB
1109 | WRITE_PROC_WMB
1110 | WRITE_PROC_FIRST_READ_GP
1111 | WRITE_PROC_FIRST_WRITE_GP
1112 | WRITE_PROC_SECOND_READ_GP,
1113 WRITE_PROC_SECOND_WRITE_GP) ->
1114 ooo_mem(i);
1115 WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
1116 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
1117
1118 :: CONSUME_TOKENS(proc_urcu_writer,
1119 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
1120 WRITE_PROC_FIRST_WAIT
1121 | WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1122 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
1123 ooo_mem(i);
1124 //smp_mb(i); /* TEST */
1125 /* ONLY WAITING FOR READER 0 */
1126 tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
1127 if
1128 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
1129 && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
1130 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
1131 :: else ->
1132 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
1133 fi;
1134
1135 :: CONSUME_TOKENS(proc_urcu_writer,
1136 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
1137 WRITE_PROC_SECOND_WRITE_GP
1138 | WRITE_PROC_FIRST_WRITE_GP
1139 | WRITE_PROC_SECOND_READ_GP
1140 | WRITE_PROC_FIRST_READ_GP
1141 | WRITE_PROC_SECOND_WAIT_LOOP
1142 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
1143 | WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1144 0) ->
1145 #ifndef GEN_ERROR_WRITER_PROGRESS
1146 goto smp_mb_send3;
1147 smp_mb_send3_end:
1148 #else
1149 ooo_mem(i);
1150 #endif
1151 /* This instruction loops to WRITE_PROC_SECOND_WAIT */
1152 CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
1153
1154
1155 :: CONSUME_TOKENS(proc_urcu_writer,
1156 WRITE_PROC_FIRST_WAIT
1157 | WRITE_PROC_SECOND_WAIT
1158 | WRITE_PROC_FIRST_READ_GP
1159 | WRITE_PROC_SECOND_READ_GP
1160 | WRITE_PROC_FIRST_WRITE_GP
1161 | WRITE_PROC_SECOND_WRITE_GP
1162 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
1163 | WRITE_PROC_FIRST_MB,
1164 WRITE_PROC_SECOND_MB) ->
1165 goto smp_mb_send4;
1166 smp_mb_send4_end:
1167 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
1168
1169 :: CONSUME_TOKENS(proc_urcu_writer,
1170 WRITE_XCHG_PTR
1171 | WRITE_PROC_FIRST_WAIT
1172 | WRITE_PROC_SECOND_WAIT
1173 | WRITE_PROC_WMB /* No dependency on
1174 * WRITE_DATA because we
1175 * write to a
1176 * different location. */
1177 | WRITE_PROC_SECOND_MB
1178 | WRITE_PROC_FIRST_MB,
1179 WRITE_FREE) ->
1180 WRITE_CACHED_VAR(rcu_data[old_data], POISON);
1181 PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
1182
1183 :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
1184 CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
1185 break;
1186 fi;
1187 }
1188 od;
1189 /*
1190 * Note : Promela model adds implicit serialization of the
1191 * WRITE_FREE instruction. Normally, it would be permitted to
1192 * spill on the next loop execution. Given the validation we do
1193 * checks for the data entry read to be poisoned, it's ok if
1194 * we do not check "late arriving" memory poisoning.
1195 */
1196 :: else -> break;
1197 od;
1198 /*
1199 * Given the reader loops infinitely, let the writer also busy-loop
1200 * with progress here so, with weak fairness, we can test the
1201 * writer's progress.
1202 */
1203 end_writer:
1204 do
1205 :: 1 ->
1206 #ifdef WRITER_PROGRESS
1207 progress_writer2:
1208 #endif
1209 #ifdef READER_PROGRESS
1210 /*
1211 * Make sure we don't block the reader's progress.
1212 */
1213 smp_mb_send(i, j, 5);
1214 #endif
1215 skip;
1216 od;
1217
1218 /* Non-atomic parts of the loop */
1219 goto end;
1220 smp_mb_send1:
1221 smp_mb_send(i, j, 1);
1222 goto smp_mb_send1_end;
1223 #ifndef GEN_ERROR_WRITER_PROGRESS
1224 smp_mb_send2:
1225 smp_mb_send(i, j, 2);
1226 goto smp_mb_send2_end;
1227 smp_mb_send3:
1228 smp_mb_send(i, j, 3);
1229 goto smp_mb_send3_end;
1230 #endif
1231 smp_mb_send4:
1232 smp_mb_send(i, j, 4);
1233 goto smp_mb_send4_end;
1234 end:
1235 skip;
1236 }
1237
1238 /* no name clash please */
1239 #undef proc_urcu_writer
1240
1241
1242 /* Leave after the readers and writers so the pid count is ok. */
1243 init {
1244 byte i, j;
1245
1246 atomic {
1247 INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
1248 INIT_CACHED_VAR(rcu_ptr, 0, j);
1249
1250 i = 0;
1251 do
1252 :: i < NR_READERS ->
1253 INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
1254 ptr_read_first[i] = 1;
1255 ptr_read_second[i] = 1;
1256 data_read_first[i] = WINE;
1257 data_read_second[i] = WINE;
1258 i++;
1259 :: i >= NR_READERS -> break
1260 od;
1261 INIT_CACHED_VAR(rcu_data[0], WINE, j);
1262 i = 1;
1263 do
1264 :: i < SLAB_SIZE ->
1265 INIT_CACHED_VAR(rcu_data[i], POISON, j);
1266 i++
1267 :: i >= SLAB_SIZE -> break
1268 od;
1269
1270 init_done = 1;
1271 }
1272 }
This page took 0.056007 seconds and 4 git commands to generate.