Update formal model from local copy
[urcu.git] / formal-model / urcu-nosched-model / result-signal-over-writer / testmerge / urcu.spin.bkp5
1 /*
2 * mem.spin: Promela code to validate memory barriers with OOO memory.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (c) 2009 Mathieu Desnoyers
19 */
20
21 /* Promela validation variables. */
22
23 #define NR_READERS 1
24 #define NR_WRITERS 1
25
26 #define NR_PROCS 2
27
28 #define get_pid() (_pid)
29
30 /*
31 * Each process have its own data in cache. Caches are randomly updated.
32 * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
33 * both.
34 */
35
36 #define DECLARE_CACHED_VAR(type, x, v) \
37 type mem_##x = v; \
38 type cached_##x[NR_PROCS] = v; \
39 bit cache_dirty_##x[NR_PROCS] = 0
40
41 #define IS_CACHE_DIRTY(x, id) (cache_dirty_##x[id])
42
43 #define READ_CACHED_VAR(x) (cached_##x[get_pid()])
44
45 #define WRITE_CACHED_VAR(x, v) \
46 atomic { \
47 cached_##x[get_pid()] = v; \
48 cache_dirty_##x[get_pid()] = 1; \
49 }
50
51 #define CACHE_WRITE_TO_MEM(x, id) \
52 if \
53 :: IS_CACHE_DIRTY(x, id) -> \
54 mem_##x = cached_##x[id]; \
55 cache_dirty_##x[id] = 0; \
56 :: else -> \
57 skip \
58 fi;
59
60 #define CACHE_READ_FROM_MEM(x, id) \
61 if \
62 :: !IS_CACHE_DIRTY(x, id) -> \
63 cached_##x[id] = mem_##x;\
64 :: else -> \
65 skip \
66 fi;
67
68 /*
69 * May update other caches if cache is dirty, or not.
70 */
71 #define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
72 if \
73 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
74 :: 1 -> skip \
75 fi;
76
77 #define RANDOM_CACHE_READ_FROM_MEM(x, id)\
78 if \
79 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
80 :: 1 -> skip \
81 fi;
82
83 /*
84 * Remote barriers tests the scheme where a signal (or IPI) is sent to all
85 * reader threads to promote their compiler barrier to a smp_mb().
86 */
87 #ifdef REMOTE_BARRIERS
88
89 inline smp_rmb_pid(i)
90 {
91 atomic {
92 CACHE_READ_FROM_MEM(urcu_gp_ctr, i);
93 CACHE_READ_FROM_MEM(urcu_active_readers_one, i);
94 CACHE_READ_FROM_MEM(generation_ptr, i);
95 }
96 }
97
98 inline smp_wmb_pid(i)
99 {
100 atomic {
101 CACHE_WRITE_TO_MEM(urcu_gp_ctr, i);
102 CACHE_WRITE_TO_MEM(urcu_active_readers_one, i);
103 CACHE_WRITE_TO_MEM(generation_ptr, i);
104 }
105 }
106
107 inline smp_mb_pid(i)
108 {
109 atomic {
110 #ifndef NO_WMB
111 smp_wmb_pid(i);
112 #endif
113 #ifndef NO_RMB
114 smp_rmb_pid(i);
115 #endif
116 #ifdef NO_WMB
117 #ifdef NO_RMB
118 ooo_mem(i);
119 #endif
120 #endif
121 }
122 }
123
124 /*
125 * Readers do a simple barrier(), writers are doing a smp_mb() _and_ sending a
126 * signal or IPI to have all readers execute a smp_mb.
127 * We are not modeling the whole rendez-vous between readers and writers here,
128 * we just let the writer update each reader's caches remotely.
129 */
130 inline smp_mb(i)
131 {
132 if
133 :: get_pid() >= NR_READERS ->
134 smp_mb_pid(get_pid());
135 i = 0;
136 do
137 :: i < NR_READERS ->
138 smp_mb_pid(i);
139 i++;
140 :: i >= NR_READERS -> break
141 od;
142 smp_mb_pid(get_pid());
143 :: else -> skip;
144 fi;
145 }
146
147 #else
148
149 inline smp_rmb(i)
150 {
151 atomic {
152 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
153 CACHE_READ_FROM_MEM(urcu_active_readers_one, get_pid());
154 CACHE_READ_FROM_MEM(generation_ptr, get_pid());
155 }
156 }
157
158 inline smp_wmb(i)
159 {
160 atomic {
161 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
162 CACHE_WRITE_TO_MEM(urcu_active_readers_one, get_pid());
163 CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
164 }
165 }
166
167 inline smp_mb(i)
168 {
169 atomic {
170 #ifndef NO_WMB
171 smp_wmb(i);
172 #endif
173 #ifndef NO_RMB
174 smp_rmb(i);
175 #endif
176 #ifdef NO_WMB
177 #ifdef NO_RMB
178 ooo_mem(i);
179 #endif
180 #endif
181 }
182 }
183
184 #endif
185
186 /* Keep in sync manually with smp_rmb, wmp_wmb and ooo_mem */
187 DECLARE_CACHED_VAR(byte, urcu_gp_ctr, 1);
188 /* Note ! currently only one reader */
189 DECLARE_CACHED_VAR(byte, urcu_active_readers_one, 0);
190 /* pointer generation */
191 DECLARE_CACHED_VAR(byte, generation_ptr, 0);
192
193 byte last_free_gen = 0;
194 bit free_done = 0;
195 byte read_generation = 1;
196 bit data_access = 0;
197
198 bit write_lock = 0;
199
200 inline ooo_mem(i)
201 {
202 atomic {
203 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
204 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers_one,
205 get_pid());
206 RANDOM_CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
207 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
208 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers_one,
209 get_pid());
210 RANDOM_CACHE_READ_FROM_MEM(generation_ptr, get_pid());
211 }
212 }
213
214 #define get_readerid() (get_pid())
215 #define get_writerid() (get_readerid() + NR_READERS)
216
217 inline wait_for_reader(tmp, id, i)
218 {
219 do
220 :: 1 ->
221 tmp = READ_CACHED_VAR(urcu_active_readers_one);
222 ooo_mem(i);
223 if
224 :: (tmp & RCU_GP_CTR_NEST_MASK)
225 && ((tmp ^ READ_CACHED_VAR(urcu_gp_ctr))
226 & RCU_GP_CTR_BIT) ->
227 #ifndef GEN_ERROR_WRITER_PROGRESS
228 smp_mb(i);
229 #else
230 ooo_mem(i);
231 #endif
232 :: else ->
233 break;
234 fi;
235 od;
236 }
237
238 inline wait_for_quiescent_state(tmp, i, j)
239 {
240 i = 0;
241 do
242 :: i < NR_READERS ->
243 wait_for_reader(tmp, i, j);
244 if
245 :: (NR_READERS > 1) && (i < NR_READERS - 1)
246 -> ooo_mem(j);
247 :: else
248 -> skip;
249 fi;
250 i++
251 :: i >= NR_READERS -> break
252 od;
253 }
254
255 /* Model the RCU read-side critical section. */
256
257 inline urcu_one_read(i, nest_i, tmp, tmp2)
258 {
259 nest_i = 0;
260 do
261 :: nest_i < READER_NEST_LEVEL ->
262 ooo_mem(i);
263 tmp = READ_CACHED_VAR(urcu_active_readers_one);
264 ooo_mem(i);
265 if
266 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
267 ->
268 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
269 ooo_mem(i);
270 WRITE_CACHED_VAR(urcu_active_readers_one, tmp2);
271 :: else ->
272 WRITE_CACHED_VAR(urcu_active_readers_one,
273 tmp + 1);
274 fi;
275 smp_mb(i);
276 nest_i++;
277 :: nest_i >= READER_NEST_LEVEL -> break;
278 od;
279
280 ooo_mem(i);
281 read_generation = READ_CACHED_VAR(generation_ptr);
282 ooo_mem(i);
283 data_access = 1;
284 ooo_mem(i);
285 data_access = 0;
286
287 nest_i = 0;
288 do
289 :: nest_i < READER_NEST_LEVEL ->
290 smp_mb(i);
291 tmp2 = READ_CACHED_VAR(urcu_active_readers_one);
292 ooo_mem(i);
293 WRITE_CACHED_VAR(urcu_active_readers_one, tmp2 - 1);
294 nest_i++;
295 :: nest_i >= READER_NEST_LEVEL -> break;
296 od;
297 ooo_mem(i);
298 //smp_mc(i); /* added */
299 }
300
301 active [NR_READERS] proctype urcu_reader()
302 {
303 byte i, nest_i;
304 byte tmp, tmp2;
305
306 assert(get_pid() < NR_PROCS);
307
308 end_reader:
309 do
310 :: 1 ->
311 /*
312 * We do not test reader's progress here, because we are mainly
313 * interested in writer's progress. The reader never blocks
314 * anyway. We have to test for reader/writer's progress
315 * separately, otherwise we could think the writer is doing
316 * progress when it's blocked by an always progressing reader.
317 */
318 #ifdef READER_PROGRESS
319 progress_reader:
320 #endif
321 urcu_one_read(i, nest_i, tmp, tmp2);
322 od;
323 }
324
325 /* Model the RCU update process. */
326
327 active [NR_WRITERS] proctype urcu_writer()
328 {
329 byte i, j;
330 byte tmp;
331 byte old_gen;
332
333 assert(get_pid() < NR_PROCS);
334
335 do
336 :: (READ_CACHED_VAR(generation_ptr) < 5) ->
337 #ifdef WRITER_PROGRESS
338 progress_writer1:
339 #endif
340 ooo_mem(i);
341 atomic {
342 old_gen = READ_CACHED_VAR(generation_ptr);
343 WRITE_CACHED_VAR(generation_ptr, old_gen + 1);
344 }
345 ooo_mem(i);
346
347 do
348 :: 1 ->
349 atomic {
350 if
351 :: write_lock == 0 ->
352 write_lock = 1;
353 break;
354 :: else ->
355 skip;
356 fi;
357 }
358 od;
359 smp_mb(i);
360 tmp = READ_CACHED_VAR(urcu_gp_ctr);
361 ooo_mem(i);
362 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
363 ooo_mem(i);
364 //smp_mc(i);
365 wait_for_quiescent_state(tmp, i, j);
366 //smp_mc(i);
367 #ifndef SINGLE_FLIP
368 ooo_mem(i);
369 tmp = READ_CACHED_VAR(urcu_gp_ctr);
370 ooo_mem(i);
371 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
372 //smp_mc(i);
373 ooo_mem(i);
374 wait_for_quiescent_state(tmp, i, j);
375 #endif
376 smp_mb(i);
377 write_lock = 0;
378 /* free-up step, e.g., kfree(). */
379 atomic {
380 last_free_gen = old_gen;
381 free_done = 1;
382 }
383 :: else -> break;
384 od;
385 /*
386 * Given the reader loops infinitely, let the writer also busy-loop
387 * with progress here so, with weak fairness, we can test the
388 * writer's progress.
389 */
390 end_writer:
391 do
392 :: 1 ->
393 #ifdef WRITER_PROGRESS
394 progress_writer2:
395 #endif
396 skip;
397 od;
398 }
This page took 0.12775 seconds and 4 git commands to generate.