Fix single flip test
[urcu.git] / formal-model / urcu-controldataflow / urcu.spin
CommitLineData
551ac1a3 1/*
a60dadc5
MD
2 * mem.spin: Promela code to validate memory barriers with OOO memory
3 * and out-of-order instruction scheduling.
551ac1a3
MD
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * Copyright (c) 2009 Mathieu Desnoyers
20 */
21
22/* Promela validation variables. */
23
24/* specific defines "included" here */
25/* DEFINES file "included" here */
26
27#define NR_READERS 1
28#define NR_WRITERS 1
29
30#define NR_PROCS 2
31
32#define get_pid() (_pid)
33
34#define get_readerid() (get_pid())
35
36/*
37 * Produced process control and data flow. Updated after each instruction to
38 * show which variables are ready. Using one-hot bit encoding per variable to
39 * save state space. Used as triggers to execute the instructions having those
40 * variables as input. Leaving bits active to inhibit instruction execution.
41 * Scheme used to make instruction disabling and automatic dependency fall-back
42 * automatic.
43 */
44
45#define CONSUME_TOKENS(state, bits, notbits) \
46 ((!(state & (notbits))) && (state & (bits)) == (bits))
47
48#define PRODUCE_TOKENS(state, bits) \
49 state = state | (bits);
50
51#define CLEAR_TOKENS(state, bits) \
52 state = state & ~(bits)
53
54/*
55 * Types of dependency :
56 *
57 * Data dependency
58 *
59 * - True dependency, Read-after-Write (RAW)
60 *
61 * This type of dependency happens when a statement depends on the result of a
62 * previous statement. This applies to any statement which needs to read a
63 * variable written by a preceding statement.
64 *
65 * - False dependency, Write-after-Read (WAR)
66 *
67 * Typically, variable renaming can ensure that this dependency goes away.
68 * However, if the statements must read and then write from/to the same variable
69 * in the OOO memory model, renaming may be impossible, and therefore this
70 * causes a WAR dependency.
71 *
72 * - Output dependency, Write-after-Write (WAW)
73 *
74 * Two writes to the same variable in subsequent statements. Variable renaming
75 * can ensure this is not needed, but can be required when writing multiple
76 * times to the same OOO mem model variable.
77 *
78 * Control dependency
79 *
80 * Execution of a given instruction depends on a previous instruction evaluating
81 * in a way that allows its execution. E.g. : branches.
82 *
83 * Useful considerations for joining dependencies after branch
84 *
85 * - Pre-dominance
86 *
87 * "We say box i dominates box j if every path (leading from input to output
88 * through the diagram) which passes through box j must also pass through box
89 * i. Thus box i dominates box j if box j is subordinate to box i in the
90 * program."
91 *
92 * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
93 * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
94 *
95 * - Post-dominance
96 *
97 * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
98 * output exchanged. Therefore, i post-dominating j ensures that every path
99 * passing by j will pass by i before reaching the output.
100 *
101 * Other considerations
102 *
103 * Note about "volatile" keyword dependency : The compiler will order volatile
104 * accesses so they appear in the right order on a given CPU. They can be
105 * reordered by the CPU instruction scheduling. This therefore cannot be
106 * considered as a depencency.
107 *
108 * References :
109 *
110 * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
111 * Kaufmann. ISBN 1-55860-698-X.
112 * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
113 * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
114 * 1-55860-286-0.
115 * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
116 * Morgan Kaufmann. ISBN 1-55860-320-4.
117 */
118
119/*
120 * Note about loops and nested calls
121 *
122 * To keep this model simple, loops expressed in the framework will behave as if
123 * there was a core synchronizing instruction between loops. To see the effect
124 * of loop unrolling, manually unrolling loops is required. Note that if loops
125 * end or start with a core synchronizing instruction, the model is appropriate.
126 * Nested calls are not supported.
127 */
128
129/*
130 * Each process have its own data in cache. Caches are randomly updated.
131 * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
132 * both.
133 */
134
135typedef per_proc_byte {
136 byte val[NR_PROCS];
137};
138
139/* Bitfield has a maximum of 8 procs */
140typedef per_proc_bit {
141 byte bitfield;
142};
143
144#define DECLARE_CACHED_VAR(type, x) \
145 type mem_##x; \
146 per_proc_##type cached_##x; \
147 per_proc_bit cache_dirty_##x;
148
149#define INIT_CACHED_VAR(x, v, j) \
150 mem_##x = v; \
151 cache_dirty_##x.bitfield = 0; \
152 j = 0; \
153 do \
154 :: j < NR_PROCS -> \
155 cached_##x.val[j] = v; \
156 j++ \
157 :: j >= NR_PROCS -> break \
158 od;
159
160#define IS_CACHE_DIRTY(x, id) (cache_dirty_##x.bitfield & (1 << id))
161
162#define READ_CACHED_VAR(x) (cached_##x.val[get_pid()])
163
164#define WRITE_CACHED_VAR(x, v) \
165 atomic { \
166 cached_##x.val[get_pid()] = v; \
167 cache_dirty_##x.bitfield = \
168 cache_dirty_##x.bitfield | (1 << get_pid()); \
169 }
170
171#define CACHE_WRITE_TO_MEM(x, id) \
172 if \
173 :: IS_CACHE_DIRTY(x, id) -> \
174 mem_##x = cached_##x.val[id]; \
175 cache_dirty_##x.bitfield = \
176 cache_dirty_##x.bitfield & (~(1 << id)); \
177 :: else -> \
178 skip \
179 fi;
180
181#define CACHE_READ_FROM_MEM(x, id) \
182 if \
183 :: !IS_CACHE_DIRTY(x, id) -> \
184 cached_##x.val[id] = mem_##x;\
185 :: else -> \
186 skip \
187 fi;
188
189/*
190 * May update other caches if cache is dirty, or not.
191 */
192#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
193 if \
194 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
195 :: 1 -> skip \
196 fi;
197
198#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
199 if \
200 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
201 :: 1 -> skip \
202 fi;
203
204/* Must consume all prior read tokens. All subsequent reads depend on it. */
205inline smp_rmb(i, j)
206{
207 atomic {
208 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
209 i = 0;
210 do
211 :: i < NR_READERS ->
212 CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
213 i++
214 :: i >= NR_READERS -> break
215 od;
216 CACHE_READ_FROM_MEM(generation_ptr, get_pid());
217 }
218}
219
220/* Must consume all prior write tokens. All subsequent writes depend on it. */
221inline smp_wmb(i, j)
222{
223 atomic {
224 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
225 i = 0;
226 do
227 :: i < NR_READERS ->
228 CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
229 i++
230 :: i >= NR_READERS -> break
231 od;
232 CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
233 }
234}
235
236/* Synchronization point. Must consume all prior read and write tokens. All
237 * subsequent reads and writes depend on it. */
238inline smp_mb(i, j)
239{
240 atomic {
241 smp_wmb(i, j);
242 smp_rmb(i, j);
243 }
244}
245
246
247#ifdef REMOTE_BARRIERS
248
249bit reader_barrier[NR_READERS];
250
251/*
252 * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
253 * because they would add unexisting core synchronization and would therefore
254 * create an incomplete model.
255 * Therefore, we model the read-side memory barriers by completely disabling the
256 * memory barriers and their dependencies from the read-side. One at a time
257 * (different verification runs), we make a different instruction listen for
258 * signals.
259 */
260
261#define smp_mb_reader(i, j)
262
263/*
264 * Service 0, 1 or many barrier requests.
265 */
266inline smp_mb_recv(i, j)
267{
268 do
269 :: (reader_barrier[get_readerid()] == 1) ->
270 smp_mb(i, j);
271 reader_barrier[get_readerid()] = 0;
272 :: 1 -> skip;
273 :: 1 -> break;
274 od;
275}
276
277inline smp_mb_send(i, j)
278{
279 smp_mb(i, j);
280 i = 0;
281 do
282 :: i < NR_READERS ->
283 reader_barrier[i] = 1;
284 do
285 :: (reader_barrier[i] == 1) -> skip;
286 :: (reader_barrier[i] == 0) -> break;
287 od;
288 i++;
289 :: i >= NR_READERS ->
290 break
291 od;
292 smp_mb(i, j);
293}
294
295#else
296
297#define smp_mb_send smp_mb
298#define smp_mb_reader smp_mb
299#define smp_mb_recv(i, j)
300
301#endif
302
303/* Keep in sync manually with smp_rmb, wmp_wmb, ooo_mem and init() */
304DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
305/* Note ! currently only two readers */
306DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
307/* pointer generation */
308DECLARE_CACHED_VAR(byte, generation_ptr);
309
310byte last_free_gen = 0;
311bit free_done = 0;
312byte read_generation[NR_READERS];
313bit data_access[NR_READERS];
314
315bit write_lock = 0;
316
317bit init_done = 0;
318
319bit sighand_exec = 0;
320
321inline wait_init_done()
322{
323 do
324 :: init_done == 0 -> skip;
325 :: else -> break;
326 od;
327}
328
329inline ooo_mem(i)
330{
331 atomic {
332 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
333 i = 0;
334 do
335 :: i < NR_READERS ->
336 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
337 get_pid());
338 i++
339 :: i >= NR_READERS -> break
340 od;
341 RANDOM_CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
342 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
343 i = 0;
344 do
345 :: i < NR_READERS ->
346 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
347 get_pid());
348 i++
349 :: i >= NR_READERS -> break
350 od;
351 RANDOM_CACHE_READ_FROM_MEM(generation_ptr, get_pid());
352 }
353}
354
355/*
356 * Bit encoding, urcu_reader :
357 */
358
359int _proc_urcu_reader;
360#define proc_urcu_reader _proc_urcu_reader
361
362/* Body of PROCEDURE_READ_LOCK */
363#define READ_PROD_A_READ (1 << 0)
364#define READ_PROD_B_IF_TRUE (1 << 1)
365#define READ_PROD_B_IF_FALSE (1 << 2)
366#define READ_PROD_C_IF_TRUE_READ (1 << 3)
367
368#define PROCEDURE_READ_LOCK(base, consumetoken, producetoken) \
369 :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, READ_PROD_A_READ << base) -> \
370 ooo_mem(i); \
371 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]); \
372 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base); \
373 :: CONSUME_TOKENS(proc_urcu_reader, \
374 READ_PROD_A_READ << base, /* RAW, pre-dominant */ \
375 (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) -> \
376 if \
377 :: (!(tmp & RCU_GP_CTR_NEST_MASK)) -> \
378 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base); \
379 :: else -> \
380 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
381 fi; \
382 /* IF TRUE */ \
383 :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base, \
384 READ_PROD_C_IF_TRUE_READ << base) -> \
385 ooo_mem(i); \
386 tmp2 = READ_CACHED_VAR(urcu_gp_ctr); \
387 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base); \
388 :: CONSUME_TOKENS(proc_urcu_reader, \
389 (READ_PROD_C_IF_TRUE_READ /* pre-dominant */ \
390 | READ_PROD_A_READ) << base, /* WAR */ \
391 producetoken) -> \
392 ooo_mem(i); \
393 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2); \
394 PRODUCE_TOKENS(proc_urcu_reader, producetoken); \
395 /* IF_MERGE implies \
396 * post-dominance */ \
397 /* ELSE */ \
398 :: CONSUME_TOKENS(proc_urcu_reader, \
399 (READ_PROD_B_IF_FALSE /* pre-dominant */ \
400 | READ_PROD_A_READ) << base, /* WAR */ \
401 producetoken) -> \
402 ooo_mem(i); \
403 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], \
404 tmp + 1); \
405 PRODUCE_TOKENS(proc_urcu_reader, producetoken); \
406 /* IF_MERGE implies \
407 * post-dominance */ \
408 /* ENDIF */ \
409 skip
410
411/* Body of PROCEDURE_READ_LOCK */
412#define READ_PROC_READ_UNLOCK (1 << 0)
413
414#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken) \
415 :: CONSUME_TOKENS(proc_urcu_reader, \
416 consumetoken, \
417 READ_PROC_READ_UNLOCK << base) -> \
418 ooo_mem(i); \
419 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]); \
420 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base); \
421 :: CONSUME_TOKENS(proc_urcu_reader, \
422 consumetoken \
423 | (READ_PROC_READ_UNLOCK << base), /* WAR */ \
424 producetoken) -> \
425 ooo_mem(i); \
426 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1); \
427 PRODUCE_TOKENS(proc_urcu_reader, producetoken); \
428 skip
429
430
431#define READ_PROD_NONE (1 << 0)
432
433/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
434#define READ_LOCK_BASE 1
435#define READ_LOCK_OUT (1 << 5)
436
437#define READ_PROC_FIRST_MB (1 << 6)
438
439/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
440#define READ_LOCK_NESTED_BASE 7
441#define READ_LOCK_NESTED_OUT (1 << 11)
442
443#define READ_PROC_READ_GEN (1 << 12)
19d8de31 444#define READ_PROC_ACCESS_GEN (1 << 13)
551ac1a3 445
19d8de31
MD
446/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
447#define READ_UNLOCK_NESTED_BASE 14
448#define READ_UNLOCK_NESTED_OUT (1 << 15)
551ac1a3 449
19d8de31 450#define READ_PROC_SECOND_MB (1 << 16)
551ac1a3 451
19d8de31
MD
452/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
453#define READ_UNLOCK_BASE 17
454#define READ_UNLOCK_OUT (1 << 18)
551ac1a3 455
19d8de31
MD
456/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
457#define READ_LOCK_UNROLL_BASE 19
458#define READ_LOCK_OUT_UNROLL (1 << 23)
551ac1a3 459
19d8de31 460#define READ_PROC_THIRD_MB (1 << 24)
551ac1a3 461
19d8de31
MD
462#define READ_PROC_READ_GEN_UNROLL (1 << 25)
463#define READ_PROC_ACCESS_GEN_UNROLL (1 << 26)
551ac1a3 464
19d8de31 465#define READ_PROC_FOURTH_MB (1 << 27)
551ac1a3 466
19d8de31
MD
467/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
468#define READ_UNLOCK_UNROLL_BASE 28
469#define READ_UNLOCK_OUT_UNROLL (1 << 29)
551ac1a3
MD
470
471
472/* Should not include branches */
473#define READ_PROC_ALL_TOKENS (READ_PROD_NONE \
474 | READ_LOCK_OUT \
475 | READ_PROC_FIRST_MB \
476 | READ_LOCK_NESTED_OUT \
477 | READ_PROC_READ_GEN \
19d8de31 478 | READ_PROC_ACCESS_GEN \
551ac1a3
MD
479 | READ_UNLOCK_NESTED_OUT \
480 | READ_PROC_SECOND_MB \
481 | READ_UNLOCK_OUT \
482 | READ_LOCK_OUT_UNROLL \
483 | READ_PROC_THIRD_MB \
484 | READ_PROC_READ_GEN_UNROLL \
19d8de31 485 | READ_PROC_ACCESS_GEN_UNROLL \
551ac1a3
MD
486 | READ_PROC_FOURTH_MB \
487 | READ_UNLOCK_OUT_UNROLL)
488
489/* Must clear all tokens, including branches */
19d8de31 490#define READ_PROC_ALL_TOKENS_CLEAR ((1 << 30) - 1)
551ac1a3
MD
491
492inline urcu_one_read(i, j, nest_i, tmp, tmp2)
493{
494 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
495
496#ifdef NO_MB
497 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
498 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
499 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
500 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
501#endif
502
503#ifdef REMOTE_BARRIERS
504 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
505 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
506 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
507 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
508#endif
509
510 do
511 :: 1 ->
512
513#ifdef REMOTE_BARRIERS
514 /*
515 * Signal-based memory barrier will only execute when the
516 * execution order appears in program order.
517 */
518 if
519 :: 1 ->
520 atomic {
521 if
522 :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
523 READ_LOCK_OUT | READ_LOCK_NESTED_OUT
19d8de31 524 | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
551ac1a3
MD
525 | READ_UNLOCK_OUT
526 | READ_LOCK_OUT_UNROLL
19d8de31 527 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
551ac1a3
MD
528 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
529 READ_LOCK_NESTED_OUT
19d8de31 530 | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
551ac1a3
MD
531 | READ_UNLOCK_OUT
532 | READ_LOCK_OUT_UNROLL
19d8de31 533 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
551ac1a3 534 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
19d8de31 535 READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
551ac1a3
MD
536 | READ_UNLOCK_OUT
537 | READ_LOCK_OUT_UNROLL
19d8de31 538 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
551ac1a3
MD
539 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
540 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
19d8de31
MD
541 READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
542 | READ_UNLOCK_OUT
543 | READ_LOCK_OUT_UNROLL
544 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
545 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
546 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
551ac1a3
MD
547 READ_UNLOCK_NESTED_OUT
548 | READ_UNLOCK_OUT
549 | READ_LOCK_OUT_UNROLL
19d8de31 550 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
551ac1a3 551 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
19d8de31
MD
552 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
553 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
551ac1a3
MD
554 READ_UNLOCK_OUT
555 | READ_LOCK_OUT_UNROLL
19d8de31 556 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
551ac1a3 557 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
19d8de31
MD
558 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
559 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
551ac1a3
MD
560 | READ_UNLOCK_OUT,
561 READ_LOCK_OUT_UNROLL
19d8de31 562 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
551ac1a3 563 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
19d8de31
MD
564 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
565 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
551ac1a3 566 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
19d8de31 567 READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
551ac1a3 568 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
19d8de31
MD
569 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
570 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
551ac1a3
MD
571 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
572 | READ_PROC_READ_GEN_UNROLL,
19d8de31
MD
573 READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
574 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
575 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
576 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
577 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
578 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
551ac1a3
MD
579 READ_UNLOCK_OUT_UNROLL)
580 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
19d8de31 581 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
551ac1a3 582 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
19d8de31 583 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
551ac1a3
MD
584 0) ->
585 goto non_atomic3;
586non_atomic3_end:
587 skip;
588 fi;
589 }
590 :: 1 -> skip;
591 fi;
592
593 goto non_atomic3_skip;
594non_atomic3:
f089ec24 595 smp_mb_recv(i, j);
551ac1a3
MD
596 goto non_atomic3_end;
597non_atomic3_skip:
598
599#endif /* REMOTE_BARRIERS */
600
601 atomic {
602 if
603 PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, READ_LOCK_OUT);
604
605 :: CONSUME_TOKENS(proc_urcu_reader,
606 READ_LOCK_OUT, /* post-dominant */
607 READ_PROC_FIRST_MB) ->
608 smp_mb_reader(i, j);
609 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
610
611 PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB | READ_LOCK_OUT,
612 READ_LOCK_NESTED_OUT);
613
614 :: CONSUME_TOKENS(proc_urcu_reader,
615 READ_PROC_FIRST_MB, /* mb() orders reads */
616 READ_PROC_READ_GEN) ->
617 ooo_mem(i);
618 read_generation[get_readerid()] =
619 READ_CACHED_VAR(generation_ptr);
19d8de31
MD
620 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
621
622 :: CONSUME_TOKENS(proc_urcu_reader,
623 READ_PROC_FIRST_MB /* mb() orders reads */
624 | READ_PROC_READ_GEN,
625 READ_PROC_ACCESS_GEN) ->
626 ooo_mem(i);
551ac1a3
MD
627 goto non_atomic;
628non_atomic_end:
19d8de31
MD
629 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
630
551ac1a3
MD
631
632 /* Note : we remove the nested memory barrier from the read unlock
633 * model, given it is not usually needed. The implementation has the barrier
634 * because the performance impact added by a branch in the common case does not
635 * justify it.
636 */
637
638 PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
639 READ_PROC_FIRST_MB
640 | READ_LOCK_OUT
641 | READ_LOCK_NESTED_OUT,
642 READ_UNLOCK_NESTED_OUT);
643
644
645 :: CONSUME_TOKENS(proc_urcu_reader,
19d8de31
MD
646 READ_PROC_ACCESS_GEN /* mb() orders reads */
647 | READ_PROC_READ_GEN /* mb() orders reads */
551ac1a3
MD
648 | READ_PROC_FIRST_MB /* mb() ordered */
649 | READ_LOCK_OUT /* post-dominant */
650 | READ_LOCK_NESTED_OUT /* post-dominant */
651 | READ_UNLOCK_NESTED_OUT,
652 READ_PROC_SECOND_MB) ->
653 smp_mb_reader(i, j);
654 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
655
656 PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
657 READ_PROC_SECOND_MB /* mb() orders reads */
658 | READ_PROC_FIRST_MB /* mb() orders reads */
659 | READ_LOCK_NESTED_OUT /* RAW */
660 | READ_LOCK_OUT /* RAW */
661 | READ_UNLOCK_NESTED_OUT, /* RAW */
662 READ_UNLOCK_OUT);
663
664 /* Unrolling loop : second consecutive lock */
665 /* reading urcu_active_readers, which have been written by
666 * READ_UNLOCK_OUT : RAW */
667 PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
668 READ_UNLOCK_OUT /* RAW */
669 | READ_PROC_SECOND_MB /* mb() orders reads */
670 | READ_PROC_FIRST_MB /* mb() orders reads */
671 | READ_LOCK_NESTED_OUT /* RAW */
672 | READ_LOCK_OUT /* RAW */
673 | READ_UNLOCK_NESTED_OUT, /* RAW */
674 READ_LOCK_OUT_UNROLL);
675
676
677 :: CONSUME_TOKENS(proc_urcu_reader,
678 READ_PROC_FIRST_MB /* mb() ordered */
679 | READ_PROC_SECOND_MB /* mb() ordered */
680 | READ_LOCK_OUT_UNROLL /* post-dominant */
681 | READ_LOCK_NESTED_OUT
682 | READ_LOCK_OUT
683 | READ_UNLOCK_NESTED_OUT
684 | READ_UNLOCK_OUT,
685 READ_PROC_THIRD_MB) ->
686 smp_mb_reader(i, j);
687 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
688
689 :: CONSUME_TOKENS(proc_urcu_reader,
690 READ_PROC_FIRST_MB /* mb() orders reads */
691 | READ_PROC_SECOND_MB /* mb() orders reads */
692 | READ_PROC_THIRD_MB, /* mb() orders reads */
693 READ_PROC_READ_GEN_UNROLL) ->
694 ooo_mem(i);
695 read_generation[get_readerid()] =
696 READ_CACHED_VAR(generation_ptr);
19d8de31
MD
697 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
698
699 :: CONSUME_TOKENS(proc_urcu_reader,
700 READ_PROC_READ_GEN_UNROLL
701 | READ_PROC_FIRST_MB /* mb() orders reads */
702 | READ_PROC_SECOND_MB /* mb() orders reads */
703 | READ_PROC_THIRD_MB, /* mb() orders reads */
704 READ_PROC_ACCESS_GEN_UNROLL) ->
705 ooo_mem(i);
551ac1a3
MD
706 goto non_atomic2;
707non_atomic2_end:
19d8de31 708 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
551ac1a3
MD
709
710 :: CONSUME_TOKENS(proc_urcu_reader,
711 READ_PROC_READ_GEN_UNROLL /* mb() orders reads */
19d8de31 712 | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
551ac1a3
MD
713 | READ_PROC_FIRST_MB /* mb() ordered */
714 | READ_PROC_SECOND_MB /* mb() ordered */
715 | READ_PROC_THIRD_MB /* mb() ordered */
716 | READ_LOCK_OUT_UNROLL /* post-dominant */
717 | READ_LOCK_NESTED_OUT
718 | READ_LOCK_OUT
719 | READ_UNLOCK_NESTED_OUT
720 | READ_UNLOCK_OUT,
721 READ_PROC_FOURTH_MB) ->
722 smp_mb_reader(i, j);
723 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
724
725 PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
726 READ_PROC_FOURTH_MB /* mb() orders reads */
727 | READ_PROC_THIRD_MB /* mb() orders reads */
728 | READ_LOCK_OUT_UNROLL /* RAW */
729 | READ_PROC_SECOND_MB /* mb() orders reads */
730 | READ_PROC_FIRST_MB /* mb() orders reads */
731 | READ_LOCK_NESTED_OUT /* RAW */
732 | READ_LOCK_OUT /* RAW */
733 | READ_UNLOCK_NESTED_OUT, /* RAW */
734 READ_UNLOCK_OUT_UNROLL);
735 :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
736 CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
737 break;
738 fi;
739 }
740 od;
741 /*
742 * Dependency between consecutive loops :
743 * RAW dependency on
744 * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
745 * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
746 * between loops.
747 * _WHEN THE MB()s are in place_, they add full ordering of the
748 * generation pointer read wrt active reader count read, which ensures
749 * execution will not spill across loop execution.
750 * However, in the event mb()s are removed (execution using signal
751 * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
752 * to spill its execution on other loop's execution.
753 */
754 goto end;
755non_atomic:
756 data_access[get_readerid()] = 1;
757 data_access[get_readerid()] = 0;
758 goto non_atomic_end;
759non_atomic2:
760 data_access[get_readerid()] = 1;
761 data_access[get_readerid()] = 0;
762 goto non_atomic2_end;
763end:
764 skip;
765}
766
767
768
769active proctype urcu_reader()
770{
771 byte i, j, nest_i;
772 byte tmp, tmp2;
773
774 wait_init_done();
775
776 assert(get_pid() < NR_PROCS);
777
778end_reader:
779 do
780 :: 1 ->
781 /*
782 * We do not test reader's progress here, because we are mainly
783 * interested in writer's progress. The reader never blocks
784 * anyway. We have to test for reader/writer's progress
785 * separately, otherwise we could think the writer is doing
786 * progress when it's blocked by an always progressing reader.
787 */
788#ifdef READER_PROGRESS
789progress_reader:
790#endif
791 urcu_one_read(i, j, nest_i, tmp, tmp2);
792 od;
793}
794
795/* no name clash please */
796#undef proc_urcu_reader
797
798
799/* Model the RCU update process. */
800
801/*
802 * Bit encoding, urcu_writer :
803 * Currently only supports one reader.
804 */
805
806int _proc_urcu_writer;
807#define proc_urcu_writer _proc_urcu_writer
808
809#define WRITE_PROD_NONE (1 << 0)
810
811#define WRITE_PROC_FIRST_MB (1 << 1)
812
813/* first flip */
814#define WRITE_PROC_FIRST_READ_GP (1 << 2)
815#define WRITE_PROC_FIRST_WRITE_GP (1 << 3)
816#define WRITE_PROC_FIRST_WAIT (1 << 4)
817#define WRITE_PROC_FIRST_WAIT_LOOP (1 << 5)
818
819/* second flip */
820#define WRITE_PROC_SECOND_READ_GP (1 << 6)
821#define WRITE_PROC_SECOND_WRITE_GP (1 << 7)
822#define WRITE_PROC_SECOND_WAIT (1 << 8)
823#define WRITE_PROC_SECOND_WAIT_LOOP (1 << 9)
824
825#define WRITE_PROC_SECOND_MB (1 << 10)
826
827#define WRITE_PROC_ALL_TOKENS (WRITE_PROD_NONE \
828 | WRITE_PROC_FIRST_MB \
829 | WRITE_PROC_FIRST_READ_GP \
830 | WRITE_PROC_FIRST_WRITE_GP \
831 | WRITE_PROC_FIRST_WAIT \
832 | WRITE_PROC_SECOND_READ_GP \
833 | WRITE_PROC_SECOND_WRITE_GP \
834 | WRITE_PROC_SECOND_WAIT \
835 | WRITE_PROC_SECOND_MB)
836
837#define WRITE_PROC_ALL_TOKENS_CLEAR ((1 << 11) - 1)
838
839active proctype urcu_writer()
840{
841 byte i, j;
842 byte tmp, tmp2, tmpa;
843 byte old_gen;
f089ec24
MD
844 byte cur_gp_val = 0; /*
845 * Keep a local trace of the current parity so
846 * we don't add non-existing dependencies on the global
847 * GP update. Needed to test single flip case.
848 */
551ac1a3
MD
849
850 wait_init_done();
851
852 assert(get_pid() < NR_PROCS);
853
854 do
855 :: (READ_CACHED_VAR(generation_ptr) < 5) ->
856#ifdef WRITER_PROGRESS
857progress_writer1:
858#endif
859 ooo_mem(i);
860 atomic {
861 old_gen = READ_CACHED_VAR(generation_ptr);
862 WRITE_CACHED_VAR(generation_ptr, old_gen + 1);
863 }
864 ooo_mem(i);
865
866 do
867 :: 1 ->
868 atomic {
869 if
870 :: write_lock == 0 ->
871 write_lock = 1;
872 break;
873 :: else ->
874 skip;
875 fi;
876 }
877 od;
878
879 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
880
881#ifdef NO_MB
882 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
883 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
884#endif
885
886#ifdef SINGLE_FLIP
887 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
888 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
889 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
890#endif
891
f089ec24
MD
892 do :: 1 ->
893 atomic {
894 if
551ac1a3
MD
895 :: CONSUME_TOKENS(proc_urcu_writer,
896 WRITE_PROD_NONE,
897 WRITE_PROC_FIRST_MB) ->
f089ec24
MD
898 goto smp_mb_send1;
899smp_mb_send1_end:
551ac1a3
MD
900 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
901
902 /* first flip */
903 :: CONSUME_TOKENS(proc_urcu_writer,
904 WRITE_PROC_FIRST_MB,
905 WRITE_PROC_FIRST_READ_GP) ->
906 tmpa = READ_CACHED_VAR(urcu_gp_ctr);
907 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
908 :: CONSUME_TOKENS(proc_urcu_writer,
909 WRITE_PROC_FIRST_MB | WRITE_PROC_FIRST_READ_GP,
910 WRITE_PROC_FIRST_WRITE_GP) ->
911 ooo_mem(i);
912 WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
913 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
914
915 :: CONSUME_TOKENS(proc_urcu_writer,
916 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
917 WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
918 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
919 ooo_mem(i);
920 /* ONLY WAITING FOR READER 0 */
921 tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
f089ec24 922 cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
551ac1a3
MD
923 if
924 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
f089ec24 925 && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
551ac1a3
MD
926 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
927 :: else ->
928 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
929 fi;
930
931 :: CONSUME_TOKENS(proc_urcu_writer,
932 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
933 WRITE_PROC_FIRST_WRITE_GP
934 | WRITE_PROC_FIRST_READ_GP
935 | WRITE_PROC_FIRST_WAIT_LOOP
936 | WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
937 0) ->
938#ifndef GEN_ERROR_WRITER_PROGRESS
f089ec24
MD
939 goto smp_mb_send2;
940smp_mb_send2_end:
551ac1a3
MD
941#else
942 ooo_mem(i);
943#endif
944 /* This instruction loops to WRITE_PROC_FIRST_WAIT */
945 CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
946
947 /* second flip */
948 :: CONSUME_TOKENS(proc_urcu_writer,
949 WRITE_PROC_FIRST_WAIT /* Control dependency : need to branch out of
950 * the loop to execute the next flip (CHECK) */
951 | WRITE_PROC_FIRST_WRITE_GP
952 | WRITE_PROC_FIRST_READ_GP
953 | WRITE_PROC_FIRST_MB,
954 WRITE_PROC_SECOND_READ_GP) ->
551ac1a3
MD
955 ooo_mem(i);
956 tmpa = READ_CACHED_VAR(urcu_gp_ctr);
957 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
958 :: CONSUME_TOKENS(proc_urcu_writer,
959 WRITE_PROC_FIRST_MB
960 | WRITE_PROC_FIRST_READ_GP
961 | WRITE_PROC_FIRST_WRITE_GP
962 | WRITE_PROC_SECOND_READ_GP,
963 WRITE_PROC_SECOND_WRITE_GP) ->
964 ooo_mem(i);
965 WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
966 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
967
968 :: CONSUME_TOKENS(proc_urcu_writer,
969 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
970 WRITE_PROC_FIRST_WAIT
971 | WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
972 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
973 ooo_mem(i);
974 /* ONLY WAITING FOR READER 0 */
975 tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
f089ec24 976 cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
551ac1a3
MD
977 if
978 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
f089ec24 979 && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
551ac1a3
MD
980 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
981 :: else ->
982 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
983 fi;
984
985 :: CONSUME_TOKENS(proc_urcu_writer,
986 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
987 WRITE_PROC_SECOND_WRITE_GP
988 | WRITE_PROC_FIRST_WRITE_GP
989 | WRITE_PROC_SECOND_READ_GP
990 | WRITE_PROC_FIRST_READ_GP
991 | WRITE_PROC_SECOND_WAIT_LOOP
992 | WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
993 0) ->
994#ifndef GEN_ERROR_WRITER_PROGRESS
f089ec24
MD
995 goto smp_mb_send3;
996smp_mb_send3_end:
551ac1a3
MD
997#else
998 ooo_mem(i);
999#endif
1000 /* This instruction loops to WRITE_PROC_SECOND_WAIT */
1001 CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
1002
1003
1004 :: CONSUME_TOKENS(proc_urcu_writer,
1005 WRITE_PROC_FIRST_WAIT
1006 | WRITE_PROC_SECOND_WAIT
1007 | WRITE_PROC_FIRST_READ_GP
1008 | WRITE_PROC_SECOND_READ_GP
1009 | WRITE_PROC_FIRST_WRITE_GP
1010 | WRITE_PROC_SECOND_WRITE_GP
1011 | WRITE_PROC_FIRST_MB,
1012 WRITE_PROC_SECOND_MB) ->
f089ec24
MD
1013 goto smp_mb_send4;
1014smp_mb_send4_end:
551ac1a3
MD
1015 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
1016
1017 :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
1018 CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
1019 break;
f089ec24
MD
1020 fi;
1021 }
551ac1a3
MD
1022 od;
1023
1024 write_lock = 0;
1025 /* free-up step, e.g., kfree(). */
1026 atomic {
1027 last_free_gen = old_gen;
1028 free_done = 1;
1029 }
1030 :: else -> break;
1031 od;
1032 /*
1033 * Given the reader loops infinitely, let the writer also busy-loop
1034 * with progress here so, with weak fairness, we can test the
1035 * writer's progress.
1036 */
1037end_writer:
1038 do
1039 :: 1 ->
1040#ifdef WRITER_PROGRESS
1041progress_writer2:
1042#endif
1043 skip;
1044 od;
f089ec24
MD
1045
1046 /* Non-atomic parts of the loop */
1047 goto end;
1048smp_mb_send1:
1049 smp_mb_send(i, j);
1050 goto smp_mb_send1_end;
1051#ifndef GEN_ERROR_WRITER_PROGRESS
1052smp_mb_send2:
1053 smp_mb_send(i, j);
1054 goto smp_mb_send2_end;
1055smp_mb_send3:
1056 smp_mb_send(i, j);
1057 goto smp_mb_send3_end;
1058#endif
1059smp_mb_send4:
1060 smp_mb_send(i, j);
1061 goto smp_mb_send4_end;
1062end:
1063 skip;
551ac1a3
MD
1064}
1065
1066/* no name clash please */
1067#undef proc_urcu_writer
1068
1069
1070/* Leave after the readers and writers so the pid count is ok. */
1071init {
1072 byte i, j;
1073
1074 atomic {
1075 INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
1076 INIT_CACHED_VAR(generation_ptr, 0, j);
1077
1078 i = 0;
1079 do
1080 :: i < NR_READERS ->
1081 INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
1082 read_generation[i] = 1;
1083 data_access[i] = 0;
1084 i++;
1085 :: i >= NR_READERS -> break
1086 od;
1087 init_done = 1;
1088 }
1089}
This page took 0.062901 seconds and 4 git commands to generate.