Update formal model from local copy
[urcu.git] / formal-model / urcu-controldataflow-intel-no-ipi / urcu_progress_writer_error.spin.input
CommitLineData
b6b17880
MD
1#define WRITER_PROGRESS
2#define GEN_ERROR_WRITER_PROGRESS
3
4// Poison value for freed memory
5#define POISON 1
6// Memory with correct data
7#define WINE 0
8#define SLAB_SIZE 2
9
10#define read_poison (data_read_first[0] == POISON || data_read_second[0] == POISON)
11
12#define RCU_GP_CTR_BIT (1 << 7)
13#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
14
15//disabled
16//#define REMOTE_BARRIERS
17
18//#define ARCH_ALPHA
19#define ARCH_INTEL
20//#define ARCH_POWERPC
21/*
22 * mem.spin: Promela code to validate memory barriers with OOO memory
23 * and out-of-order instruction scheduling.
24 *
25 * This program is free software; you can redistribute it and/or modify
26 * it under the terms of the GNU General Public License as published by
27 * the Free Software Foundation; either version 2 of the License, or
28 * (at your option) any later version.
29 *
30 * This program is distributed in the hope that it will be useful,
31 * but WITHOUT ANY WARRANTY; without even the implied warranty of
32 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
33 * GNU General Public License for more details.
34 *
35 * You should have received a copy of the GNU General Public License
36 * along with this program; if not, write to the Free Software
37 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
38 *
39 * Copyright (c) 2009 Mathieu Desnoyers
40 */
41
42/* Promela validation variables. */
43
44/* specific defines "included" here */
45/* DEFINES file "included" here */
46
47#define NR_READERS 1
48#define NR_WRITERS 1
49
50#define NR_PROCS 2
51
52#define get_pid() (_pid)
53
54#define get_readerid() (get_pid())
55
56/*
57 * Produced process control and data flow. Updated after each instruction to
58 * show which variables are ready. Using one-hot bit encoding per variable to
59 * save state space. Used as triggers to execute the instructions having those
60 * variables as input. Leaving bits active to inhibit instruction execution.
61 * Scheme used to make instruction disabling and automatic dependency fall-back
62 * automatic.
63 */
64
65#define CONSUME_TOKENS(state, bits, notbits) \
66 ((!(state & (notbits))) && (state & (bits)) == (bits))
67
68#define PRODUCE_TOKENS(state, bits) \
69 state = state | (bits);
70
71#define CLEAR_TOKENS(state, bits) \
72 state = state & ~(bits)
73
74/*
75 * Types of dependency :
76 *
77 * Data dependency
78 *
79 * - True dependency, Read-after-Write (RAW)
80 *
81 * This type of dependency happens when a statement depends on the result of a
82 * previous statement. This applies to any statement which needs to read a
83 * variable written by a preceding statement.
84 *
85 * - False dependency, Write-after-Read (WAR)
86 *
87 * Typically, variable renaming can ensure that this dependency goes away.
88 * However, if the statements must read and then write from/to the same variable
89 * in the OOO memory model, renaming may be impossible, and therefore this
90 * causes a WAR dependency.
91 *
92 * - Output dependency, Write-after-Write (WAW)
93 *
94 * Two writes to the same variable in subsequent statements. Variable renaming
95 * can ensure this is not needed, but can be required when writing multiple
96 * times to the same OOO mem model variable.
97 *
98 * Control dependency
99 *
100 * Execution of a given instruction depends on a previous instruction evaluating
101 * in a way that allows its execution. E.g. : branches.
102 *
103 * Useful considerations for joining dependencies after branch
104 *
105 * - Pre-dominance
106 *
107 * "We say box i dominates box j if every path (leading from input to output
108 * through the diagram) which passes through box j must also pass through box
109 * i. Thus box i dominates box j if box j is subordinate to box i in the
110 * program."
111 *
112 * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
113 * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
114 *
115 * - Post-dominance
116 *
117 * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
118 * output exchanged. Therefore, i post-dominating j ensures that every path
119 * passing by j will pass by i before reaching the output.
120 *
121 * Prefetch and speculative execution
122 *
123 * If an instruction depends on the result of a previous branch, but it does not
124 * have side-effects, it can be executed before the branch result is known.
125 * however, it must be restarted if a core-synchronizing instruction is issued.
126 * Note that instructions which depend on the speculative instruction result
127 * but that have side-effects must depend on the branch completion in addition
128 * to the speculatively executed instruction.
129 *
130 * Other considerations
131 *
132 * Note about "volatile" keyword dependency : The compiler will order volatile
133 * accesses so they appear in the right order on a given CPU. They can be
134 * reordered by the CPU instruction scheduling. This therefore cannot be
135 * considered as a depencency.
136 *
137 * References :
138 *
139 * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
140 * Kaufmann. ISBN 1-55860-698-X.
141 * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
142 * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
143 * 1-55860-286-0.
144 * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
145 * Morgan Kaufmann. ISBN 1-55860-320-4.
146 */
147
148/*
149 * Note about loops and nested calls
150 *
151 * To keep this model simple, loops expressed in the framework will behave as if
152 * there was a core synchronizing instruction between loops. To see the effect
153 * of loop unrolling, manually unrolling loops is required. Note that if loops
154 * end or start with a core synchronizing instruction, the model is appropriate.
155 * Nested calls are not supported.
156 */
157
158/*
159 * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
160 * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
161 * http://www.linuxjournal.com/article/8212)
162 */
163#ifdef ARCH_ALPHA
164#define HAVE_OOO_CACHE_READ
165#endif
166
167/*
168 * Each process have its own data in cache. Caches are randomly updated.
169 * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
170 * both.
171 */
172
173typedef per_proc_byte {
174 byte val[NR_PROCS];
175};
176
177typedef per_proc_bit {
178 bit val[NR_PROCS];
179};
180
181/* Bitfield has a maximum of 8 procs */
182typedef per_proc_bitfield {
183 byte bitfield;
184};
185
186#define DECLARE_CACHED_VAR(type, x) \
187 type mem_##x; \
188 per_proc_##type cached_##x; \
189 per_proc_bitfield cache_dirty_##x;
190
191#define INIT_CACHED_VAR(x, v, j) \
192 mem_##x = v; \
193 cache_dirty_##x.bitfield = 0; \
194 j = 0; \
195 do \
196 :: j < NR_PROCS -> \
197 cached_##x.val[j] = v; \
198 j++ \
199 :: j >= NR_PROCS -> break \
200 od;
201
202#define IS_CACHE_DIRTY(x, id) (cache_dirty_##x.bitfield & (1 << id))
203
204#define READ_CACHED_VAR(x) (cached_##x.val[get_pid()])
205
206#define WRITE_CACHED_VAR(x, v) \
207 atomic { \
208 cached_##x.val[get_pid()] = v; \
209 cache_dirty_##x.bitfield = \
210 cache_dirty_##x.bitfield | (1 << get_pid()); \
211 }
212
213#define CACHE_WRITE_TO_MEM(x, id) \
214 if \
215 :: IS_CACHE_DIRTY(x, id) -> \
216 mem_##x = cached_##x.val[id]; \
217 cache_dirty_##x.bitfield = \
218 cache_dirty_##x.bitfield & (~(1 << id)); \
219 :: else -> \
220 skip \
221 fi;
222
223#define CACHE_READ_FROM_MEM(x, id) \
224 if \
225 :: !IS_CACHE_DIRTY(x, id) -> \
226 cached_##x.val[id] = mem_##x;\
227 :: else -> \
228 skip \
229 fi;
230
231/*
232 * May update other caches if cache is dirty, or not.
233 */
234#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
235 if \
236 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
237 :: 1 -> skip \
238 fi;
239
240#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
241 if \
242 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
243 :: 1 -> skip \
244 fi;
245
246/* Must consume all prior read tokens. All subsequent reads depend on it. */
247inline smp_rmb(i)
248{
249 atomic {
250 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
251 i = 0;
252 do
253 :: i < NR_READERS ->
254 CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
255 i++
256 :: i >= NR_READERS -> break
257 od;
258 CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
259 i = 0;
260 do
261 :: i < SLAB_SIZE ->
262 CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
263 i++
264 :: i >= SLAB_SIZE -> break
265 od;
266 }
267}
268
269/* Must consume all prior write tokens. All subsequent writes depend on it. */
270inline smp_wmb(i)
271{
272 atomic {
273 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
274 i = 0;
275 do
276 :: i < NR_READERS ->
277 CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
278 i++
279 :: i >= NR_READERS -> break
280 od;
281 CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
282 i = 0;
283 do
284 :: i < SLAB_SIZE ->
285 CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
286 i++
287 :: i >= SLAB_SIZE -> break
288 od;
289 }
290}
291
292/* Synchronization point. Must consume all prior read and write tokens. All
293 * subsequent reads and writes depend on it. */
294inline smp_mb(i)
295{
296 atomic {
297 smp_wmb(i);
298 smp_rmb(i);
299 }
300}
301
302#ifdef REMOTE_BARRIERS
303
304bit reader_barrier[NR_READERS];
305
306/*
307 * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
308 * because they would add unexisting core synchronization and would therefore
309 * create an incomplete model.
310 * Therefore, we model the read-side memory barriers by completely disabling the
311 * memory barriers and their dependencies from the read-side. One at a time
312 * (different verification runs), we make a different instruction listen for
313 * signals.
314 */
315
316#define smp_mb_reader(i, j)
317
318/*
319 * Service 0, 1 or many barrier requests.
320 */
321inline smp_mb_recv(i, j)
322{
323 do
324 :: (reader_barrier[get_readerid()] == 1) ->
325 /*
326 * We choose to ignore cycles caused by writer busy-looping,
327 * waiting for the reader, sending barrier requests, and the
328 * reader always services them without continuing execution.
329 */
330progress_ignoring_mb1:
331 smp_mb(i);
332 reader_barrier[get_readerid()] = 0;
333 :: 1 ->
334 /*
335 * We choose to ignore writer's non-progress caused by the
336 * reader ignoring the writer's mb() requests.
337 */
338progress_ignoring_mb2:
339 break;
340 od;
341}
342
343#define PROGRESS_LABEL(progressid) progress_writer_progid_##progressid:
344
345#define smp_mb_send(i, j, progressid) \
346{ \
347 smp_mb(i); \
348 i = 0; \
349 do \
350 :: i < NR_READERS -> \
351 reader_barrier[i] = 1; \
352 /* \
353 * Busy-looping waiting for reader barrier handling is of little\
354 * interest, given the reader has the ability to totally ignore \
355 * barrier requests. \
356 */ \
357 do \
358 :: (reader_barrier[i] == 1) -> \
359PROGRESS_LABEL(progressid) \
360 skip; \
361 :: (reader_barrier[i] == 0) -> break; \
362 od; \
363 i++; \
364 :: i >= NR_READERS -> \
365 break \
366 od; \
367 smp_mb(i); \
368}
369
370#else
371
372#define smp_mb_send(i, j, progressid) smp_mb(i)
373#define smp_mb_reader(i, j) smp_mb(i)
374#define smp_mb_recv(i, j)
375
376#endif
377
378/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
379DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
380/* Note ! currently only one reader */
381DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
382/* RCU data */
383DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
384
385/* RCU pointer */
386#if (SLAB_SIZE == 2)
387DECLARE_CACHED_VAR(bit, rcu_ptr);
388bit ptr_read_first[NR_READERS];
389bit ptr_read_second[NR_READERS];
390#else
391DECLARE_CACHED_VAR(byte, rcu_ptr);
392byte ptr_read_first[NR_READERS];
393byte ptr_read_second[NR_READERS];
394#endif
395
396bit data_read_first[NR_READERS];
397bit data_read_second[NR_READERS];
398
399bit init_done = 0;
400
401inline wait_init_done()
402{
403 do
404 :: init_done == 0 -> skip;
405 :: else -> break;
406 od;
407}
408
409inline ooo_mem(i)
410{
411 atomic {
412 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
413 i = 0;
414 do
415 :: i < NR_READERS ->
416 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
417 get_pid());
418 i++
419 :: i >= NR_READERS -> break
420 od;
421 RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
422 i = 0;
423 do
424 :: i < SLAB_SIZE ->
425 RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
426 i++
427 :: i >= SLAB_SIZE -> break
428 od;
429#ifdef HAVE_OOO_CACHE_READ
430 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
431 i = 0;
432 do
433 :: i < NR_READERS ->
434 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
435 get_pid());
436 i++
437 :: i >= NR_READERS -> break
438 od;
439 RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
440 i = 0;
441 do
442 :: i < SLAB_SIZE ->
443 RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
444 i++
445 :: i >= SLAB_SIZE -> break
446 od;
447#else
448 smp_rmb(i);
449#endif /* HAVE_OOO_CACHE_READ */
450 }
451}
452
453/*
454 * Bit encoding, urcu_reader :
455 */
456
457int _proc_urcu_reader;
458#define proc_urcu_reader _proc_urcu_reader
459
460/* Body of PROCEDURE_READ_LOCK */
461#define READ_PROD_A_READ (1 << 0)
462#define READ_PROD_B_IF_TRUE (1 << 1)
463#define READ_PROD_B_IF_FALSE (1 << 2)
464#define READ_PROD_C_IF_TRUE_READ (1 << 3)
465
466#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken) \
467 :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) -> \
468 ooo_mem(i); \
469 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]); \
470 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base); \
471 :: CONSUME_TOKENS(proc_urcu_reader, \
472 READ_PROD_A_READ << base, /* RAW, pre-dominant */ \
473 (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) -> \
474 if \
475 :: (!(tmp & RCU_GP_CTR_NEST_MASK)) -> \
476 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base); \
477 :: else -> \
478 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
479 fi; \
480 /* IF TRUE */ \
481 :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */ \
482 READ_PROD_C_IF_TRUE_READ << base) -> \
483 ooo_mem(i); \
484 tmp2 = READ_CACHED_VAR(urcu_gp_ctr); \
485 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base); \
486 :: CONSUME_TOKENS(proc_urcu_reader, \
487 (READ_PROD_B_IF_TRUE \
488 | READ_PROD_C_IF_TRUE_READ /* pre-dominant */ \
489 | READ_PROD_A_READ) << base, /* WAR */ \
490 producetoken) -> \
491 ooo_mem(i); \
492 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2); \
493 PRODUCE_TOKENS(proc_urcu_reader, producetoken); \
494 /* IF_MERGE implies \
495 * post-dominance */ \
496 /* ELSE */ \
497 :: CONSUME_TOKENS(proc_urcu_reader, \
498 (READ_PROD_B_IF_FALSE /* pre-dominant */ \
499 | READ_PROD_A_READ) << base, /* WAR */ \
500 producetoken) -> \
501 ooo_mem(i); \
502 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], \
503 tmp + 1); \
504 PRODUCE_TOKENS(proc_urcu_reader, producetoken); \
505 /* IF_MERGE implies \
506 * post-dominance */ \
507 /* ENDIF */ \
508 skip
509
510/* Body of PROCEDURE_READ_LOCK */
511#define READ_PROC_READ_UNLOCK (1 << 0)
512
513#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken) \
514 :: CONSUME_TOKENS(proc_urcu_reader, \
515 consumetoken, \
516 READ_PROC_READ_UNLOCK << base) -> \
517 ooo_mem(i); \
518 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]); \
519 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base); \
520 :: CONSUME_TOKENS(proc_urcu_reader, \
521 consumetoken \
522 | (READ_PROC_READ_UNLOCK << base), /* WAR */ \
523 producetoken) -> \
524 ooo_mem(i); \
525 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1); \
526 PRODUCE_TOKENS(proc_urcu_reader, producetoken); \
527 skip
528
529
530#define READ_PROD_NONE (1 << 0)
531
532/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
533#define READ_LOCK_BASE 1
534#define READ_LOCK_OUT (1 << 5)
535
536#define READ_PROC_FIRST_MB (1 << 6)
537
538/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
539#define READ_LOCK_NESTED_BASE 7
540#define READ_LOCK_NESTED_OUT (1 << 11)
541
542#define READ_PROC_READ_GEN (1 << 12)
543#define READ_PROC_ACCESS_GEN (1 << 13)
544
545/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
546#define READ_UNLOCK_NESTED_BASE 14
547#define READ_UNLOCK_NESTED_OUT (1 << 15)
548
549#define READ_PROC_SECOND_MB (1 << 16)
550
551/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
552#define READ_UNLOCK_BASE 17
553#define READ_UNLOCK_OUT (1 << 18)
554
555/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
556#define READ_LOCK_UNROLL_BASE 19
557#define READ_LOCK_OUT_UNROLL (1 << 23)
558
559#define READ_PROC_THIRD_MB (1 << 24)
560
561#define READ_PROC_READ_GEN_UNROLL (1 << 25)
562#define READ_PROC_ACCESS_GEN_UNROLL (1 << 26)
563
564#define READ_PROC_FOURTH_MB (1 << 27)
565
566/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
567#define READ_UNLOCK_UNROLL_BASE 28
568#define READ_UNLOCK_OUT_UNROLL (1 << 29)
569
570
571/* Should not include branches */
572#define READ_PROC_ALL_TOKENS (READ_PROD_NONE \
573 | READ_LOCK_OUT \
574 | READ_PROC_FIRST_MB \
575 | READ_LOCK_NESTED_OUT \
576 | READ_PROC_READ_GEN \
577 | READ_PROC_ACCESS_GEN \
578 | READ_UNLOCK_NESTED_OUT \
579 | READ_PROC_SECOND_MB \
580 | READ_UNLOCK_OUT \
581 | READ_LOCK_OUT_UNROLL \
582 | READ_PROC_THIRD_MB \
583 | READ_PROC_READ_GEN_UNROLL \
584 | READ_PROC_ACCESS_GEN_UNROLL \
585 | READ_PROC_FOURTH_MB \
586 | READ_UNLOCK_OUT_UNROLL)
587
588/* Must clear all tokens, including branches */
589#define READ_PROC_ALL_TOKENS_CLEAR ((1 << 30) - 1)
590
591inline urcu_one_read(i, j, nest_i, tmp, tmp2)
592{
593 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
594
595#ifdef NO_MB
596 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
597 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
598 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
599 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
600#endif
601
602#ifdef REMOTE_BARRIERS
603 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
604 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
605 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
606 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
607#endif
608
609 do
610 :: 1 ->
611
612#ifdef REMOTE_BARRIERS
613 /*
614 * Signal-based memory barrier will only execute when the
615 * execution order appears in program order.
616 */
617 if
618 :: 1 ->
619 atomic {
620 if
621 :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
622 READ_LOCK_OUT | READ_LOCK_NESTED_OUT
623 | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
624 | READ_UNLOCK_OUT
625 | READ_LOCK_OUT_UNROLL
626 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
627 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
628 READ_LOCK_NESTED_OUT
629 | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
630 | READ_UNLOCK_OUT
631 | READ_LOCK_OUT_UNROLL
632 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
633 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
634 READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
635 | READ_UNLOCK_OUT
636 | READ_LOCK_OUT_UNROLL
637 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
638 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
639 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
640 READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
641 | READ_UNLOCK_OUT
642 | READ_LOCK_OUT_UNROLL
643 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
644 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
645 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
646 READ_UNLOCK_NESTED_OUT
647 | READ_UNLOCK_OUT
648 | READ_LOCK_OUT_UNROLL
649 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
650 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
651 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
652 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
653 READ_UNLOCK_OUT
654 | READ_LOCK_OUT_UNROLL
655 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
656 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
657 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
658 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
659 | READ_UNLOCK_OUT,
660 READ_LOCK_OUT_UNROLL
661 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
662 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
663 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
664 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
665 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
666 READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
667 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
668 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
669 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
670 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
671 | READ_PROC_READ_GEN_UNROLL,
672 READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
673 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
674 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
675 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
676 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
677 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
678 READ_UNLOCK_OUT_UNROLL)
679 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
680 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
681 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
682 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
683 0) ->
684 goto non_atomic3;
685non_atomic3_end:
686 skip;
687 fi;
688 }
689 fi;
690
691 goto non_atomic3_skip;
692non_atomic3:
693 smp_mb_recv(i, j);
694 goto non_atomic3_end;
695non_atomic3_skip:
696
697#endif /* REMOTE_BARRIERS */
698
699 atomic {
700 if
701 PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
702
703 :: CONSUME_TOKENS(proc_urcu_reader,
704 READ_LOCK_OUT, /* post-dominant */
705 READ_PROC_FIRST_MB) ->
706 smp_mb_reader(i, j);
707 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
708
709 PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
710 READ_LOCK_NESTED_OUT);
711
712 :: CONSUME_TOKENS(proc_urcu_reader,
713 READ_PROC_FIRST_MB, /* mb() orders reads */
714 READ_PROC_READ_GEN) ->
715 ooo_mem(i);
716 ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
717 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
718
719 :: CONSUME_TOKENS(proc_urcu_reader,
720 READ_PROC_FIRST_MB /* mb() orders reads */
721 | READ_PROC_READ_GEN,
722 READ_PROC_ACCESS_GEN) ->
723 /* smp_read_barrier_depends */
724 goto rmb1;
725rmb1_end:
726 data_read_first[get_readerid()] =
727 READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
728 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
729
730
731 /* Note : we remove the nested memory barrier from the read unlock
732 * model, given it is not usually needed. The implementation has the barrier
733 * because the performance impact added by a branch in the common case does not
734 * justify it.
735 */
736
737 PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
738 READ_PROC_FIRST_MB
739 | READ_LOCK_OUT
740 | READ_LOCK_NESTED_OUT,
741 READ_UNLOCK_NESTED_OUT);
742
743
744 :: CONSUME_TOKENS(proc_urcu_reader,
745 READ_PROC_ACCESS_GEN /* mb() orders reads */
746 | READ_PROC_READ_GEN /* mb() orders reads */
747 | READ_PROC_FIRST_MB /* mb() ordered */
748 | READ_LOCK_OUT /* post-dominant */
749 | READ_LOCK_NESTED_OUT /* post-dominant */
750 | READ_UNLOCK_NESTED_OUT,
751 READ_PROC_SECOND_MB) ->
752 smp_mb_reader(i, j);
753 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
754
755 PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
756 READ_PROC_SECOND_MB /* mb() orders reads */
757 | READ_PROC_FIRST_MB /* mb() orders reads */
758 | READ_LOCK_NESTED_OUT /* RAW */
759 | READ_LOCK_OUT /* RAW */
760 | READ_UNLOCK_NESTED_OUT, /* RAW */
761 READ_UNLOCK_OUT);
762
763 /* Unrolling loop : second consecutive lock */
764 /* reading urcu_active_readers, which have been written by
765 * READ_UNLOCK_OUT : RAW */
766 PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
767 READ_PROC_SECOND_MB /* mb() orders reads */
768 | READ_PROC_FIRST_MB, /* mb() orders reads */
769 READ_LOCK_NESTED_OUT /* RAW */
770 | READ_LOCK_OUT /* RAW */
771 | READ_UNLOCK_NESTED_OUT /* RAW */
772 | READ_UNLOCK_OUT, /* RAW */
773 READ_LOCK_OUT_UNROLL);
774
775
776 :: CONSUME_TOKENS(proc_urcu_reader,
777 READ_PROC_FIRST_MB /* mb() ordered */
778 | READ_PROC_SECOND_MB /* mb() ordered */
779 | READ_LOCK_OUT_UNROLL /* post-dominant */
780 | READ_LOCK_NESTED_OUT
781 | READ_LOCK_OUT
782 | READ_UNLOCK_NESTED_OUT
783 | READ_UNLOCK_OUT,
784 READ_PROC_THIRD_MB) ->
785 smp_mb_reader(i, j);
786 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
787
788 :: CONSUME_TOKENS(proc_urcu_reader,
789 READ_PROC_FIRST_MB /* mb() orders reads */
790 | READ_PROC_SECOND_MB /* mb() orders reads */
791 | READ_PROC_THIRD_MB, /* mb() orders reads */
792 READ_PROC_READ_GEN_UNROLL) ->
793 ooo_mem(i);
794 ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
795 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
796
797 :: CONSUME_TOKENS(proc_urcu_reader,
798 READ_PROC_READ_GEN_UNROLL
799 | READ_PROC_FIRST_MB /* mb() orders reads */
800 | READ_PROC_SECOND_MB /* mb() orders reads */
801 | READ_PROC_THIRD_MB, /* mb() orders reads */
802 READ_PROC_ACCESS_GEN_UNROLL) ->
803 /* smp_read_barrier_depends */
804 goto rmb2;
805rmb2_end:
806 data_read_second[get_readerid()] =
807 READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
808 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
809
810 :: CONSUME_TOKENS(proc_urcu_reader,
811 READ_PROC_READ_GEN_UNROLL /* mb() orders reads */
812 | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
813 | READ_PROC_FIRST_MB /* mb() ordered */
814 | READ_PROC_SECOND_MB /* mb() ordered */
815 | READ_PROC_THIRD_MB /* mb() ordered */
816 | READ_LOCK_OUT_UNROLL /* post-dominant */
817 | READ_LOCK_NESTED_OUT
818 | READ_LOCK_OUT
819 | READ_UNLOCK_NESTED_OUT
820 | READ_UNLOCK_OUT,
821 READ_PROC_FOURTH_MB) ->
822 smp_mb_reader(i, j);
823 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
824
825 PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
826 READ_PROC_FOURTH_MB /* mb() orders reads */
827 | READ_PROC_THIRD_MB /* mb() orders reads */
828 | READ_LOCK_OUT_UNROLL /* RAW */
829 | READ_PROC_SECOND_MB /* mb() orders reads */
830 | READ_PROC_FIRST_MB /* mb() orders reads */
831 | READ_LOCK_NESTED_OUT /* RAW */
832 | READ_LOCK_OUT /* RAW */
833 | READ_UNLOCK_NESTED_OUT, /* RAW */
834 READ_UNLOCK_OUT_UNROLL);
835 :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
836 CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
837 break;
838 fi;
839 }
840 od;
841 /*
842 * Dependency between consecutive loops :
843 * RAW dependency on
844 * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
845 * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
846 * between loops.
847 * _WHEN THE MB()s are in place_, they add full ordering of the
848 * generation pointer read wrt active reader count read, which ensures
849 * execution will not spill across loop execution.
850 * However, in the event mb()s are removed (execution using signal
851 * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
852 * to spill its execution on other loop's execution.
853 */
854 goto end;
855rmb1:
856#ifndef NO_RMB
857 smp_rmb(i);
858#else
859 ooo_mem(i);
860#endif
861 goto rmb1_end;
862rmb2:
863#ifndef NO_RMB
864 smp_rmb(i);
865#else
866 ooo_mem(i);
867#endif
868 goto rmb2_end;
869end:
870 skip;
871}
872
873
874
875active proctype urcu_reader()
876{
877 byte i, j, nest_i;
878 byte tmp, tmp2;
879
880 wait_init_done();
881
882 assert(get_pid() < NR_PROCS);
883
884end_reader:
885 do
886 :: 1 ->
887 /*
888 * We do not test reader's progress here, because we are mainly
889 * interested in writer's progress. The reader never blocks
890 * anyway. We have to test for reader/writer's progress
891 * separately, otherwise we could think the writer is doing
892 * progress when it's blocked by an always progressing reader.
893 */
894#ifdef READER_PROGRESS
895progress_reader:
896#endif
897 urcu_one_read(i, j, nest_i, tmp, tmp2);
898 od;
899}
900
901/* no name clash please */
902#undef proc_urcu_reader
903
904
905/* Model the RCU update process. */
906
907/*
908 * Bit encoding, urcu_writer :
909 * Currently only supports one reader.
910 */
911
912int _proc_urcu_writer;
913#define proc_urcu_writer _proc_urcu_writer
914
915#define WRITE_PROD_NONE (1 << 0)
916
917#define WRITE_DATA (1 << 1)
918#define WRITE_PROC_WMB (1 << 2)
919#define WRITE_XCHG_PTR (1 << 3)
920
921#define WRITE_PROC_FIRST_MB (1 << 4)
922
923/* first flip */
924#define WRITE_PROC_FIRST_READ_GP (1 << 5)
925#define WRITE_PROC_FIRST_WRITE_GP (1 << 6)
926#define WRITE_PROC_FIRST_WAIT (1 << 7)
927#define WRITE_PROC_FIRST_WAIT_LOOP (1 << 8)
928
929/* second flip */
930#define WRITE_PROC_SECOND_READ_GP (1 << 9)
931#define WRITE_PROC_SECOND_WRITE_GP (1 << 10)
932#define WRITE_PROC_SECOND_WAIT (1 << 11)
933#define WRITE_PROC_SECOND_WAIT_LOOP (1 << 12)
934
935#define WRITE_PROC_SECOND_MB (1 << 13)
936
937#define WRITE_FREE (1 << 14)
938
939#define WRITE_PROC_ALL_TOKENS (WRITE_PROD_NONE \
940 | WRITE_DATA \
941 | WRITE_PROC_WMB \
942 | WRITE_XCHG_PTR \
943 | WRITE_PROC_FIRST_MB \
944 | WRITE_PROC_FIRST_READ_GP \
945 | WRITE_PROC_FIRST_WRITE_GP \
946 | WRITE_PROC_FIRST_WAIT \
947 | WRITE_PROC_SECOND_READ_GP \
948 | WRITE_PROC_SECOND_WRITE_GP \
949 | WRITE_PROC_SECOND_WAIT \
950 | WRITE_PROC_SECOND_MB \
951 | WRITE_FREE)
952
953#define WRITE_PROC_ALL_TOKENS_CLEAR ((1 << 15) - 1)
954
955/*
956 * Mutexes are implied around writer execution. A single writer at a time.
957 */
958active proctype urcu_writer()
959{
960 byte i, j;
961 byte tmp, tmp2, tmpa;
962 byte cur_data = 0, old_data, loop_nr = 0;
963 byte cur_gp_val = 0; /*
964 * Keep a local trace of the current parity so
965 * we don't add non-existing dependencies on the global
966 * GP update. Needed to test single flip case.
967 */
968
969 wait_init_done();
970
971 assert(get_pid() < NR_PROCS);
972
973 do
974 :: (loop_nr < 3) ->
975#ifdef WRITER_PROGRESS
976progress_writer1:
977#endif
978 loop_nr = loop_nr + 1;
979
980 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
981
982#ifdef NO_WMB
983 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
984#endif
985
986#ifdef NO_MB
987 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
988 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
989#endif
990
991#ifdef SINGLE_FLIP
992 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
993 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
994 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
995 /* For single flip, we need to know the current parity */
996 cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
997#endif
998
999 do :: 1 ->
1000 atomic {
1001 if
1002
1003 :: CONSUME_TOKENS(proc_urcu_writer,
1004 WRITE_PROD_NONE,
1005 WRITE_DATA) ->
1006 ooo_mem(i);
1007 cur_data = (cur_data + 1) % SLAB_SIZE;
1008 WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
1009 PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
1010
1011
1012 :: CONSUME_TOKENS(proc_urcu_writer,
1013 WRITE_DATA,
1014 WRITE_PROC_WMB) ->
1015 smp_wmb(i);
1016 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
1017
1018 :: CONSUME_TOKENS(proc_urcu_writer,
1019 WRITE_PROC_WMB,
1020 WRITE_XCHG_PTR) ->
1021 /* rcu_xchg_pointer() */
1022 atomic {
1023 old_data = READ_CACHED_VAR(rcu_ptr);
1024 WRITE_CACHED_VAR(rcu_ptr, cur_data);
1025 }
1026 PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
1027
1028 :: CONSUME_TOKENS(proc_urcu_writer,
1029 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
1030 WRITE_PROC_FIRST_MB) ->
1031 goto smp_mb_send1;
1032smp_mb_send1_end:
1033 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
1034
1035 /* first flip */
1036 :: CONSUME_TOKENS(proc_urcu_writer,
1037 WRITE_PROC_FIRST_MB,
1038 WRITE_PROC_FIRST_READ_GP) ->
1039 tmpa = READ_CACHED_VAR(urcu_gp_ctr);
1040 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
1041 :: CONSUME_TOKENS(proc_urcu_writer,
1042 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
1043 | WRITE_PROC_FIRST_READ_GP,
1044 WRITE_PROC_FIRST_WRITE_GP) ->
1045 ooo_mem(i);
1046 WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
1047 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
1048
1049 :: CONSUME_TOKENS(proc_urcu_writer,
1050 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
1051 WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1052 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
1053 ooo_mem(i);
1054 //smp_mb(i); /* TEST */
1055 /* ONLY WAITING FOR READER 0 */
1056 tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
1057#ifndef SINGLE_FLIP
1058 /* In normal execution, we are always starting by
1059 * waiting for the even parity.
1060 */
1061 cur_gp_val = RCU_GP_CTR_BIT;
1062#endif
1063 if
1064 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
1065 && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
1066 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
1067 :: else ->
1068 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
1069 fi;
1070
1071 :: CONSUME_TOKENS(proc_urcu_writer,
1072 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
1073 WRITE_PROC_FIRST_WRITE_GP
1074 | WRITE_PROC_FIRST_READ_GP
1075 | WRITE_PROC_FIRST_WAIT_LOOP
1076 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
1077 | WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1078 0) ->
1079#ifndef GEN_ERROR_WRITER_PROGRESS
1080 goto smp_mb_send2;
1081smp_mb_send2_end:
1082 /* The memory barrier will invalidate the
1083 * second read done as prefetching. Note that all
1084 * instructions with side-effects depending on
1085 * WRITE_PROC_SECOND_READ_GP should also depend on
1086 * completion of this busy-waiting loop. */
1087 CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
1088#else
1089 ooo_mem(i);
1090#endif
1091 /* This instruction loops to WRITE_PROC_FIRST_WAIT */
1092 CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
1093
1094 /* second flip */
1095 :: CONSUME_TOKENS(proc_urcu_writer,
1096 //WRITE_PROC_FIRST_WAIT | //test /* no dependency. Could pre-fetch, no side-effect. */
1097 WRITE_PROC_FIRST_WRITE_GP
1098 | WRITE_PROC_FIRST_READ_GP
1099 | WRITE_PROC_FIRST_MB,
1100 WRITE_PROC_SECOND_READ_GP) ->
1101 ooo_mem(i);
1102 //smp_mb(i); /* TEST */
1103 tmpa = READ_CACHED_VAR(urcu_gp_ctr);
1104 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
1105 :: CONSUME_TOKENS(proc_urcu_writer,
1106 WRITE_PROC_FIRST_WAIT /* dependency on first wait, because this
1107 * instruction has globally observable
1108 * side-effects.
1109 */
1110 | WRITE_PROC_FIRST_MB
1111 | WRITE_PROC_WMB
1112 | WRITE_PROC_FIRST_READ_GP
1113 | WRITE_PROC_FIRST_WRITE_GP
1114 | WRITE_PROC_SECOND_READ_GP,
1115 WRITE_PROC_SECOND_WRITE_GP) ->
1116 ooo_mem(i);
1117 WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
1118 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
1119
1120 :: CONSUME_TOKENS(proc_urcu_writer,
1121 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
1122 WRITE_PROC_FIRST_WAIT
1123 | WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1124 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
1125 ooo_mem(i);
1126 //smp_mb(i); /* TEST */
1127 /* ONLY WAITING FOR READER 0 */
1128 tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
1129 if
1130 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
1131 && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
1132 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
1133 :: else ->
1134 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
1135 fi;
1136
1137 :: CONSUME_TOKENS(proc_urcu_writer,
1138 //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
1139 WRITE_PROC_SECOND_WRITE_GP
1140 | WRITE_PROC_FIRST_WRITE_GP
1141 | WRITE_PROC_SECOND_READ_GP
1142 | WRITE_PROC_FIRST_READ_GP
1143 | WRITE_PROC_SECOND_WAIT_LOOP
1144 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
1145 | WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1146 0) ->
1147#ifndef GEN_ERROR_WRITER_PROGRESS
1148 goto smp_mb_send3;
1149smp_mb_send3_end:
1150#else
1151 ooo_mem(i);
1152#endif
1153 /* This instruction loops to WRITE_PROC_SECOND_WAIT */
1154 CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
1155
1156
1157 :: CONSUME_TOKENS(proc_urcu_writer,
1158 WRITE_PROC_FIRST_WAIT
1159 | WRITE_PROC_SECOND_WAIT
1160 | WRITE_PROC_FIRST_READ_GP
1161 | WRITE_PROC_SECOND_READ_GP
1162 | WRITE_PROC_FIRST_WRITE_GP
1163 | WRITE_PROC_SECOND_WRITE_GP
1164 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
1165 | WRITE_PROC_FIRST_MB,
1166 WRITE_PROC_SECOND_MB) ->
1167 goto smp_mb_send4;
1168smp_mb_send4_end:
1169 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
1170
1171 :: CONSUME_TOKENS(proc_urcu_writer,
1172 WRITE_XCHG_PTR
1173 | WRITE_PROC_FIRST_WAIT
1174 | WRITE_PROC_SECOND_WAIT
1175 | WRITE_PROC_WMB /* No dependency on
1176 * WRITE_DATA because we
1177 * write to a
1178 * different location. */
1179 | WRITE_PROC_SECOND_MB
1180 | WRITE_PROC_FIRST_MB,
1181 WRITE_FREE) ->
1182 WRITE_CACHED_VAR(rcu_data[old_data], POISON);
1183 PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
1184
1185 :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
1186 CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
1187 break;
1188 fi;
1189 }
1190 od;
1191 /*
1192 * Note : Promela model adds implicit serialization of the
1193 * WRITE_FREE instruction. Normally, it would be permitted to
1194 * spill on the next loop execution. Given the validation we do
1195 * checks for the data entry read to be poisoned, it's ok if
1196 * we do not check "late arriving" memory poisoning.
1197 */
1198 :: else -> break;
1199 od;
1200 /*
1201 * Given the reader loops infinitely, let the writer also busy-loop
1202 * with progress here so, with weak fairness, we can test the
1203 * writer's progress.
1204 */
1205end_writer:
1206 do
1207 :: 1 ->
1208#ifdef WRITER_PROGRESS
1209progress_writer2:
1210#endif
1211#ifdef READER_PROGRESS
1212 /*
1213 * Make sure we don't block the reader's progress.
1214 */
1215 smp_mb_send(i, j, 5);
1216#endif
1217 skip;
1218 od;
1219
1220 /* Non-atomic parts of the loop */
1221 goto end;
1222smp_mb_send1:
1223 smp_mb_send(i, j, 1);
1224 goto smp_mb_send1_end;
1225#ifndef GEN_ERROR_WRITER_PROGRESS
1226smp_mb_send2:
1227 smp_mb_send(i, j, 2);
1228 goto smp_mb_send2_end;
1229smp_mb_send3:
1230 smp_mb_send(i, j, 3);
1231 goto smp_mb_send3_end;
1232#endif
1233smp_mb_send4:
1234 smp_mb_send(i, j, 4);
1235 goto smp_mb_send4_end;
1236end:
1237 skip;
1238}
1239
1240/* no name clash please */
1241#undef proc_urcu_writer
1242
1243
1244/* Leave after the readers and writers so the pid count is ok. */
1245init {
1246 byte i, j;
1247
1248 atomic {
1249 INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
1250 INIT_CACHED_VAR(rcu_ptr, 0, j);
1251
1252 i = 0;
1253 do
1254 :: i < NR_READERS ->
1255 INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
1256 ptr_read_first[i] = 1;
1257 ptr_read_second[i] = 1;
1258 data_read_first[i] = WINE;
1259 data_read_second[i] = WINE;
1260 i++;
1261 :: i >= NR_READERS -> break
1262 od;
1263 INIT_CACHED_VAR(rcu_data[0], WINE, j);
1264 i = 1;
1265 do
1266 :: i < SLAB_SIZE ->
1267 INIT_CACHED_VAR(rcu_data[i], POISON, j);
1268 i++
1269 :: i >= SLAB_SIZE -> break
1270 od;
1271
1272 init_done = 1;
1273 }
1274}
This page took 0.073629 seconds and 4 git commands to generate.