Commit | Line | Data |
---|---|---|
8a953620 MD |
1 | /* |
2 | * rcutorture.h: simple user-level performance/stress test of RCU. | |
3 | * | |
4 | * Usage: | |
5 | * ./rcu <nreaders> rperf [ <cpustride> ] | |
6 | * Run a read-side performance test with the specified | |
7 | * number of readers spaced by <cpustride>. | |
8 | * Thus "./rcu 16 rperf 2" would run 16 readers on even-numbered | |
9 | * CPUs from 0 to 30. | |
10 | * ./rcu <nupdaters> uperf [ <cpustride> ] | |
11 | * Run an update-side performance test with the specified | |
12 | * number of updaters and specified CPU spacing. | |
13 | * ./rcu <nreaders> perf [ <cpustride> ] | |
14 | * Run a combined read/update performance test with the specified | |
15 | * number of readers and one updater and specified CPU spacing. | |
16 | * The readers run on the low-numbered CPUs and the updater | |
17 | * of the highest-numbered CPU. | |
18 | * | |
19 | * The above tests produce output as follows: | |
20 | * | |
21 | * n_reads: 46008000 n_updates: 146026 nreaders: 2 nupdaters: 1 duration: 1 | |
22 | * ns/read: 43.4707 ns/update: 6848.1 | |
23 | * | |
24 | * The first line lists the total number of RCU reads and updates executed | |
25 | * during the test, the number of reader threads, the number of updater | |
26 | * threads, and the duration of the test in seconds. The second line | |
27 | * lists the average duration of each type of operation in nanoseconds, | |
28 | * or "nan" if the corresponding type of operation was not performed. | |
29 | * | |
30 | * ./rcu <nreaders> stress | |
31 | * Run a stress test with the specified number of readers and | |
32 | * one updater. None of the threads are affinitied to any | |
33 | * particular CPU. | |
34 | * | |
35 | * This test produces output as follows: | |
36 | * | |
37 | * n_reads: 114633217 n_updates: 3903415 n_mberror: 0 | |
38 | * rcu_stress_count: 114618391 14826 0 0 0 0 0 0 0 0 0 | |
39 | * | |
40 | * The first line lists the number of RCU read and update operations | |
41 | * executed, followed by the number of memory-ordering violations | |
42 | * (which will be zero in a correct RCU implementation). The second | |
43 | * line lists the number of readers observing progressively more stale | |
44 | * data. A correct RCU implementation will have all but the first two | |
45 | * numbers non-zero. | |
46 | * | |
47 | * This program is free software; you can redistribute it and/or modify | |
48 | * it under the terms of the GNU General Public License as published by | |
49 | * the Free Software Foundation; either version 2 of the License, or | |
50 | * (at your option) any later version. | |
51 | * | |
52 | * This program is distributed in the hope that it will be useful, | |
53 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
54 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
55 | * GNU General Public License for more details. | |
56 | * | |
57 | * You should have received a copy of the GNU General Public License | |
58 | * along with this program; if not, write to the Free Software | |
3282a76b | 59 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. |
8a953620 MD |
60 | * |
61 | * Copyright (c) 2008 Paul E. McKenney, IBM Corporation. | |
62 | */ | |
63 | ||
64 | /* | |
65 | * Test variables. | |
66 | */ | |
67 | ||
b57aee66 | 68 | #include <stdlib.h> |
ad460058 MD |
69 | #include "tap.h" |
70 | ||
71 | #define NR_TESTS 1 | |
b57aee66 | 72 | |
8a953620 MD |
73 | DEFINE_PER_THREAD(long long, n_reads_pt); |
74 | DEFINE_PER_THREAD(long long, n_updates_pt); | |
75 | ||
26b5a74b MD |
76 | enum callrcu_type { |
77 | CALLRCU_GLOBAL, | |
78 | CALLRCU_PERCPU, | |
79 | CALLRCU_PERTHREAD, | |
80 | }; | |
81 | ||
eb218d4f MD |
82 | enum writer_state { |
83 | WRITER_STATE_SYNC_RCU, | |
84 | WRITER_STATE_CALL_RCU, | |
85 | WRITER_STATE_POLL_RCU, | |
86 | }; | |
87 | ||
26b5a74b MD |
88 | static enum callrcu_type callrcu_type = CALLRCU_GLOBAL; |
89 | ||
8a953620 MD |
90 | long long n_reads = 0LL; |
91 | long n_updates = 0L; | |
6ee91d83 | 92 | int nthreadsrunning; |
8a953620 MD |
93 | char argsbuf[64]; |
94 | ||
95 | #define GOFLAG_INIT 0 | |
96 | #define GOFLAG_RUN 1 | |
97 | #define GOFLAG_STOP 2 | |
98 | ||
4967f005 PB |
99 | volatile int goflag __attribute__((__aligned__(CAA_CACHE_LINE_SIZE))) |
100 | = GOFLAG_INIT; | |
8a953620 MD |
101 | |
102 | #define RCU_READ_RUN 1000 | |
103 | ||
104 | //MD | |
105 | #define RCU_READ_NESTABLE | |
106 | ||
107 | #ifdef RCU_READ_NESTABLE | |
108 | #define rcu_read_lock_nest() rcu_read_lock() | |
109 | #define rcu_read_unlock_nest() rcu_read_unlock() | |
110 | #else /* #ifdef RCU_READ_NESTABLE */ | |
111 | #define rcu_read_lock_nest() | |
112 | #define rcu_read_unlock_nest() | |
113 | #endif /* #else #ifdef RCU_READ_NESTABLE */ | |
114 | ||
1a43bbd8 MD |
115 | #ifdef TORTURE_QSBR |
116 | #define mark_rcu_quiescent_state rcu_quiescent_state | |
117 | #define put_thread_offline rcu_thread_offline | |
118 | #define put_thread_online rcu_thread_online | |
119 | #endif | |
120 | ||
8a953620 | 121 | #ifndef mark_rcu_quiescent_state |
447c9339 | 122 | #define mark_rcu_quiescent_state() do {} while (0) |
8a953620 MD |
123 | #endif /* #ifdef mark_rcu_quiescent_state */ |
124 | ||
125 | #ifndef put_thread_offline | |
447c9339 MJ |
126 | #define put_thread_offline() do {} while (0) |
127 | #define put_thread_online() do {} while (0) | |
128 | #define put_thread_online_delay() do {} while (0) | |
8a953620 MD |
129 | #else /* #ifndef put_thread_offline */ |
130 | #define put_thread_online_delay() synchronize_rcu() | |
131 | #endif /* #else #ifndef put_thread_offline */ | |
132 | ||
133 | /* | |
134 | * Performance test. | |
135 | */ | |
136 | ||
61c3fb60 | 137 | static |
8a953620 MD |
138 | void *rcu_read_perf_test(void *arg) |
139 | { | |
140 | int i; | |
141 | int me = (long)arg; | |
8a953620 MD |
142 | long long n_reads_local = 0; |
143 | ||
121a5d44 | 144 | rcu_register_thread(); |
8a953620 | 145 | run_on(me); |
ec4e58a3 | 146 | uatomic_inc(&nthreadsrunning); |
3bf02b5b | 147 | put_thread_offline(); |
8a953620 | 148 | while (goflag == GOFLAG_INIT) |
775aff2e | 149 | (void) poll(NULL, 0, 1); |
3bf02b5b | 150 | put_thread_online(); |
8a953620 MD |
151 | while (goflag == GOFLAG_RUN) { |
152 | for (i = 0; i < RCU_READ_RUN; i++) { | |
153 | rcu_read_lock(); | |
154 | /* rcu_read_lock_nest(); */ | |
155 | /* rcu_read_unlock_nest(); */ | |
156 | rcu_read_unlock(); | |
157 | } | |
158 | n_reads_local += RCU_READ_RUN; | |
159 | mark_rcu_quiescent_state(); | |
160 | } | |
161 | __get_thread_var(n_reads_pt) += n_reads_local; | |
162 | put_thread_offline(); | |
121a5d44 | 163 | rcu_unregister_thread(); |
8a953620 MD |
164 | |
165 | return (NULL); | |
166 | } | |
167 | ||
61c3fb60 | 168 | static |
70469b43 | 169 | void *rcu_update_perf_test(void *arg __attribute__((unused))) |
8a953620 MD |
170 | { |
171 | long long n_updates_local = 0; | |
172 | ||
26b5a74b | 173 | if (callrcu_type == CALLRCU_PERTHREAD) { |
b57aee66 PM |
174 | struct call_rcu_data *crdp; |
175 | ||
c1d2c60b | 176 | crdp = create_call_rcu_data(0, -1); |
b57aee66 | 177 | if (crdp != NULL) { |
26b5a74b | 178 | diag("Successfully using per-thread call_rcu() worker."); |
b57aee66 PM |
179 | set_thread_call_rcu_data(crdp); |
180 | } | |
181 | } | |
ec4e58a3 | 182 | uatomic_inc(&nthreadsrunning); |
8a953620 | 183 | while (goflag == GOFLAG_INIT) |
775aff2e | 184 | (void) poll(NULL, 0, 1); |
8a953620 MD |
185 | while (goflag == GOFLAG_RUN) { |
186 | synchronize_rcu(); | |
187 | n_updates_local++; | |
188 | } | |
189 | __get_thread_var(n_updates_pt) += n_updates_local; | |
26b5a74b MD |
190 | if (callrcu_type == CALLRCU_PERTHREAD) { |
191 | struct call_rcu_data *crdp; | |
192 | ||
193 | crdp = get_thread_call_rcu_data(); | |
194 | set_thread_call_rcu_data(NULL); | |
195 | call_rcu_data_free(crdp); | |
196 | } | |
b0b31506 | 197 | return NULL; |
8a953620 MD |
198 | } |
199 | ||
61c3fb60 | 200 | static |
8a953620 MD |
201 | void perftestinit(void) |
202 | { | |
203 | init_per_thread(n_reads_pt, 0LL); | |
204 | init_per_thread(n_updates_pt, 0LL); | |
ec4e58a3 | 205 | uatomic_set(&nthreadsrunning, 0); |
8a953620 MD |
206 | } |
207 | ||
61c3fb60 | 208 | static |
ad460058 | 209 | int perftestrun(int nthreads, int nreaders, int nupdaters) |
8a953620 MD |
210 | { |
211 | int t; | |
212 | int duration = 1; | |
213 | ||
5481ddb3 | 214 | cmm_smp_mb(); |
ec4e58a3 | 215 | while (uatomic_read(&nthreadsrunning) < nthreads) |
775aff2e | 216 | (void) poll(NULL, 0, 1); |
8a953620 | 217 | goflag = GOFLAG_RUN; |
5481ddb3 | 218 | cmm_smp_mb(); |
8a953620 | 219 | sleep(duration); |
5481ddb3 | 220 | cmm_smp_mb(); |
8a953620 | 221 | goflag = GOFLAG_STOP; |
5481ddb3 | 222 | cmm_smp_mb(); |
8a953620 MD |
223 | wait_all_threads(); |
224 | for_each_thread(t) { | |
225 | n_reads += per_thread(n_reads_pt, t); | |
226 | n_updates += per_thread(n_updates_pt, t); | |
227 | } | |
ad460058 | 228 | diag("n_reads: %lld n_updates: %ld nreaders: %d nupdaters: %d duration: %d", |
8a953620 | 229 | n_reads, n_updates, nreaders, nupdaters, duration); |
ad460058 | 230 | diag("ns/read: %g ns/update: %g", |
8a953620 MD |
231 | ((duration * 1000*1000*1000.*(double)nreaders) / |
232 | (double)n_reads), | |
233 | ((duration * 1000*1000*1000.*(double)nupdaters) / | |
234 | (double)n_updates)); | |
7106ddf8 | 235 | if (get_cpu_call_rcu_data(0)) { |
ad460058 | 236 | diag("Deallocating per-CPU call_rcu threads.\n"); |
7106ddf8 PM |
237 | free_all_cpu_call_rcu_data(); |
238 | } | |
ad460058 | 239 | return 0; |
8a953620 MD |
240 | } |
241 | ||
61c3fb60 | 242 | static |
ad460058 | 243 | int perftest(int nreaders, int cpustride) |
8a953620 MD |
244 | { |
245 | int i; | |
246 | long arg; | |
247 | ||
248 | perftestinit(); | |
249 | for (i = 0; i < nreaders; i++) { | |
250 | arg = (long)(i * cpustride); | |
251 | create_thread(rcu_read_perf_test, (void *)arg); | |
252 | } | |
253 | arg = (long)(i * cpustride); | |
254 | create_thread(rcu_update_perf_test, (void *)arg); | |
ad460058 | 255 | return perftestrun(i + 1, nreaders, 1); |
8a953620 MD |
256 | } |
257 | ||
61c3fb60 | 258 | static |
ad460058 | 259 | int rperftest(int nreaders, int cpustride) |
8a953620 MD |
260 | { |
261 | int i; | |
262 | long arg; | |
263 | ||
264 | perftestinit(); | |
265 | init_per_thread(n_reads_pt, 0LL); | |
266 | for (i = 0; i < nreaders; i++) { | |
267 | arg = (long)(i * cpustride); | |
268 | create_thread(rcu_read_perf_test, (void *)arg); | |
269 | } | |
ad460058 | 270 | return perftestrun(i, nreaders, 0); |
8a953620 MD |
271 | } |
272 | ||
61c3fb60 | 273 | static |
ad460058 | 274 | int uperftest(int nupdaters, int cpustride) |
8a953620 MD |
275 | { |
276 | int i; | |
277 | long arg; | |
278 | ||
279 | perftestinit(); | |
280 | init_per_thread(n_reads_pt, 0LL); | |
281 | for (i = 0; i < nupdaters; i++) { | |
282 | arg = (long)(i * cpustride); | |
283 | create_thread(rcu_update_perf_test, (void *)arg); | |
284 | } | |
ad460058 | 285 | return perftestrun(i, 0, nupdaters); |
8a953620 MD |
286 | } |
287 | ||
288 | /* | |
289 | * Stress test. | |
290 | */ | |
291 | ||
292 | #define RCU_STRESS_PIPE_LEN 10 | |
293 | ||
294 | struct rcu_stress { | |
295 | int pipe_count; | |
296 | int mbtest; | |
297 | }; | |
298 | ||
153b081a | 299 | struct rcu_stress rcu_stress_array[RCU_STRESS_PIPE_LEN] = { { 0, 0 } }; |
8a953620 MD |
300 | struct rcu_stress *rcu_stress_current; |
301 | int rcu_stress_idx = 0; | |
302 | ||
303 | int n_mberror = 0; | |
304 | DEFINE_PER_THREAD(long long [RCU_STRESS_PIPE_LEN + 1], rcu_stress_count); | |
305 | ||
306 | int garbage = 0; | |
307 | ||
61c3fb60 | 308 | static |
70469b43 | 309 | void *rcu_read_stress_test(void *arg __attribute__((unused))) |
8a953620 MD |
310 | { |
311 | int i; | |
312 | int itercnt = 0; | |
313 | struct rcu_stress *p; | |
314 | int pc; | |
315 | ||
121a5d44 | 316 | rcu_register_thread(); |
3bf02b5b | 317 | put_thread_offline(); |
8a953620 | 318 | while (goflag == GOFLAG_INIT) |
775aff2e | 319 | (void) poll(NULL, 0, 1); |
3bf02b5b | 320 | put_thread_online(); |
8a953620 MD |
321 | while (goflag == GOFLAG_RUN) { |
322 | rcu_read_lock(); | |
323 | p = rcu_dereference(rcu_stress_current); | |
324 | if (p->mbtest == 0) | |
325 | n_mberror++; | |
326 | rcu_read_lock_nest(); | |
327 | for (i = 0; i < 100; i++) | |
328 | garbage++; | |
329 | rcu_read_unlock_nest(); | |
330 | pc = p->pipe_count; | |
331 | rcu_read_unlock(); | |
332 | if ((pc > RCU_STRESS_PIPE_LEN) || (pc < 0)) | |
333 | pc = RCU_STRESS_PIPE_LEN; | |
334 | __get_thread_var(rcu_stress_count)[pc]++; | |
335 | __get_thread_var(n_reads_pt)++; | |
336 | mark_rcu_quiescent_state(); | |
337 | if ((++itercnt % 0x1000) == 0) { | |
338 | put_thread_offline(); | |
339 | put_thread_online_delay(); | |
340 | put_thread_online(); | |
341 | } | |
342 | } | |
343 | put_thread_offline(); | |
121a5d44 | 344 | rcu_unregister_thread(); |
8a953620 MD |
345 | |
346 | return (NULL); | |
347 | } | |
348 | ||
b57aee66 PM |
349 | static pthread_mutex_t call_rcu_test_mutex = PTHREAD_MUTEX_INITIALIZER; |
350 | static pthread_cond_t call_rcu_test_cond = PTHREAD_COND_INITIALIZER; | |
ad79eaca | 351 | static bool call_rcu_wait; |
b57aee66 | 352 | |
61c3fb60 | 353 | static |
70469b43 | 354 | void rcu_update_stress_test_rcu(struct rcu_head *head __attribute__((unused))) |
b57aee66 | 355 | { |
ad460058 MD |
356 | int ret; |
357 | ||
358 | ret = pthread_mutex_lock(&call_rcu_test_mutex); | |
359 | if (ret) { | |
360 | errno = ret; | |
361 | diag("pthread_mutex_lock: %s", | |
362 | strerror(errno)); | |
363 | abort(); | |
b57aee66 | 364 | } |
ad460058 MD |
365 | ret = pthread_cond_signal(&call_rcu_test_cond); |
366 | if (ret) { | |
367 | errno = ret; | |
368 | diag("pthread_cond_signal: %s", | |
369 | strerror(errno)); | |
370 | abort(); | |
b57aee66 | 371 | } |
ad79eaca | 372 | call_rcu_wait = false; |
ad460058 MD |
373 | ret = pthread_mutex_unlock(&call_rcu_test_mutex); |
374 | if (ret) { | |
375 | errno = ret; | |
376 | diag("pthread_mutex_unlock: %s", | |
377 | strerror(errno)); | |
378 | abort(); | |
b57aee66 PM |
379 | } |
380 | } | |
381 | ||
eb218d4f MD |
382 | static |
383 | void advance_writer_state(enum writer_state *state) | |
384 | { | |
385 | switch (*state) { | |
386 | case WRITER_STATE_SYNC_RCU: | |
387 | *state = WRITER_STATE_CALL_RCU; | |
388 | break; | |
389 | case WRITER_STATE_CALL_RCU: | |
390 | *state = WRITER_STATE_POLL_RCU; | |
391 | break; | |
392 | case WRITER_STATE_POLL_RCU: | |
393 | *state = WRITER_STATE_SYNC_RCU; | |
394 | break; | |
395 | } | |
396 | } | |
397 | ||
61c3fb60 | 398 | static |
70469b43 | 399 | void *rcu_update_stress_test(void *arg __attribute__((unused))) |
8a953620 MD |
400 | { |
401 | int i; | |
402 | struct rcu_stress *p; | |
b57aee66 | 403 | struct rcu_head rh; |
eb218d4f | 404 | enum writer_state writer_state = WRITER_STATE_SYNC_RCU; |
8a953620 MD |
405 | |
406 | while (goflag == GOFLAG_INIT) | |
775aff2e | 407 | (void) poll(NULL, 0, 1); |
8a953620 MD |
408 | while (goflag == GOFLAG_RUN) { |
409 | i = rcu_stress_idx + 1; | |
410 | if (i >= RCU_STRESS_PIPE_LEN) | |
411 | i = 0; | |
412 | p = &rcu_stress_array[i]; | |
413 | p->mbtest = 0; | |
5481ddb3 | 414 | cmm_smp_mb(); |
8a953620 MD |
415 | p->pipe_count = 0; |
416 | p->mbtest = 1; | |
417 | rcu_assign_pointer(rcu_stress_current, p); | |
418 | rcu_stress_idx = i; | |
419 | for (i = 0; i < RCU_STRESS_PIPE_LEN; i++) | |
420 | if (i != rcu_stress_idx) | |
421 | rcu_stress_array[i].pipe_count++; | |
eb218d4f MD |
422 | switch (writer_state) { |
423 | case WRITER_STATE_SYNC_RCU: | |
b57aee66 | 424 | synchronize_rcu(); |
eb218d4f MD |
425 | break; |
426 | case WRITER_STATE_CALL_RCU: | |
427 | { | |
ad460058 MD |
428 | int ret; |
429 | ||
430 | ret = pthread_mutex_lock(&call_rcu_test_mutex); | |
431 | if (ret) { | |
432 | errno = ret; | |
433 | diag("pthread_mutex_lock: %s", | |
434 | strerror(errno)); | |
435 | abort(); | |
b57aee66 | 436 | } |
0b9c513b | 437 | rcu_register_thread(); |
b57aee66 | 438 | call_rcu(&rh, rcu_update_stress_test_rcu); |
0b9c513b MD |
439 | rcu_unregister_thread(); |
440 | /* | |
441 | * Our MacOS X test machine with the following | |
442 | * config: | |
443 | * 15.6.0 Darwin Kernel Version 15.6.0 | |
444 | * root:xnu-3248.60.10~1/RELEASE_X86_64 | |
445 | * appears to have issues with liburcu-signal | |
446 | * signal being delivered on top of | |
447 | * pthread_cond_wait. It seems to make the | |
448 | * thread continue, and therefore corrupt the | |
449 | * rcu_head. Work around this issue by | |
450 | * unregistering the RCU read-side thread | |
451 | * immediately after call_rcu (call_rcu needs | |
452 | * us to be registered RCU readers). | |
453 | */ | |
ad79eaca OD |
454 | call_rcu_wait = true; |
455 | do { | |
456 | ret = pthread_cond_wait(&call_rcu_test_cond, | |
457 | &call_rcu_test_mutex); | |
458 | } while (call_rcu_wait); | |
ad460058 MD |
459 | if (ret) { |
460 | errno = ret; | |
461 | diag("pthread_cond_signal: %s", | |
462 | strerror(errno)); | |
463 | abort(); | |
b57aee66 | 464 | } |
ad460058 MD |
465 | ret = pthread_mutex_unlock(&call_rcu_test_mutex); |
466 | if (ret) { | |
467 | errno = ret; | |
468 | diag("pthread_mutex_unlock: %s", | |
469 | strerror(errno)); | |
470 | abort(); | |
b57aee66 | 471 | } |
eb218d4f MD |
472 | break; |
473 | } | |
474 | case WRITER_STATE_POLL_RCU: | |
475 | { | |
476 | struct urcu_gp_poll_state poll_state; | |
477 | ||
478 | rcu_register_thread(); | |
479 | poll_state = start_poll_synchronize_rcu(); | |
480 | rcu_unregister_thread(); | |
481 | while (!poll_state_synchronize_rcu(poll_state)) | |
482 | (void) poll(NULL, 0, 1); /* Wait for 1ms */ | |
483 | break; | |
484 | } | |
b57aee66 | 485 | } |
8a953620 | 486 | n_updates++; |
eb218d4f | 487 | advance_writer_state(&writer_state); |
8a953620 | 488 | } |
a13ef613 | 489 | |
b0b31506 | 490 | return NULL; |
8a953620 MD |
491 | } |
492 | ||
61c3fb60 | 493 | static |
70469b43 | 494 | void *rcu_fake_update_stress_test(void *arg __attribute__((unused))) |
8a953620 | 495 | { |
26b5a74b | 496 | if (callrcu_type == CALLRCU_PERTHREAD) { |
b57aee66 PM |
497 | struct call_rcu_data *crdp; |
498 | ||
c1d2c60b | 499 | crdp = create_call_rcu_data(0, -1); |
b57aee66 | 500 | if (crdp != NULL) { |
26b5a74b | 501 | diag("Successfully using per-thread call_rcu() worker."); |
b57aee66 PM |
502 | set_thread_call_rcu_data(crdp); |
503 | } | |
504 | } | |
8a953620 | 505 | while (goflag == GOFLAG_INIT) |
775aff2e | 506 | (void) poll(NULL, 0, 1); |
8a953620 MD |
507 | while (goflag == GOFLAG_RUN) { |
508 | synchronize_rcu(); | |
775aff2e | 509 | (void) poll(NULL, 0, 1); |
8a953620 | 510 | } |
26b5a74b MD |
511 | if (callrcu_type == CALLRCU_PERTHREAD) { |
512 | struct call_rcu_data *crdp; | |
513 | ||
514 | crdp = get_thread_call_rcu_data(); | |
515 | set_thread_call_rcu_data(NULL); | |
516 | call_rcu_data_free(crdp); | |
517 | } | |
b0b31506 | 518 | return NULL; |
8a953620 MD |
519 | } |
520 | ||
61c3fb60 | 521 | static |
ad460058 | 522 | int stresstest(int nreaders) |
8a953620 MD |
523 | { |
524 | int i; | |
525 | int t; | |
526 | long long *p; | |
527 | long long sum; | |
528 | ||
529 | init_per_thread(n_reads_pt, 0LL); | |
530 | for_each_thread(t) { | |
531 | p = &per_thread(rcu_stress_count,t)[0]; | |
532 | for (i = 0; i <= RCU_STRESS_PIPE_LEN; i++) | |
533 | p[i] = 0LL; | |
534 | } | |
535 | rcu_stress_current = &rcu_stress_array[0]; | |
536 | rcu_stress_current->pipe_count = 0; | |
537 | rcu_stress_current->mbtest = 1; | |
538 | for (i = 0; i < nreaders; i++) | |
539 | create_thread(rcu_read_stress_test, NULL); | |
540 | create_thread(rcu_update_stress_test, NULL); | |
541 | for (i = 0; i < 5; i++) | |
542 | create_thread(rcu_fake_update_stress_test, NULL); | |
5481ddb3 | 543 | cmm_smp_mb(); |
8a953620 | 544 | goflag = GOFLAG_RUN; |
5481ddb3 | 545 | cmm_smp_mb(); |
8a953620 | 546 | sleep(10); |
5481ddb3 | 547 | cmm_smp_mb(); |
8a953620 | 548 | goflag = GOFLAG_STOP; |
5481ddb3 | 549 | cmm_smp_mb(); |
8a953620 MD |
550 | wait_all_threads(); |
551 | for_each_thread(t) | |
552 | n_reads += per_thread(n_reads_pt, t); | |
ad460058 | 553 | diag("n_reads: %lld n_updates: %ld n_mberror: %d", |
8a953620 | 554 | n_reads, n_updates, n_mberror); |
ad460058 MD |
555 | rdiag_start(); |
556 | rdiag("rcu_stress_count:"); | |
8a953620 MD |
557 | for (i = 0; i <= RCU_STRESS_PIPE_LEN; i++) { |
558 | sum = 0LL; | |
559 | for_each_thread(t) { | |
560 | sum += per_thread(rcu_stress_count, t)[i]; | |
561 | } | |
ad460058 | 562 | rdiag(" %lld", sum); |
8a953620 | 563 | } |
ad460058 | 564 | rdiag_end(); |
7106ddf8 | 565 | if (get_cpu_call_rcu_data(0)) { |
ad460058 | 566 | diag("Deallocating per-CPU call_rcu threads."); |
7106ddf8 PM |
567 | free_all_cpu_call_rcu_data(); |
568 | } | |
ad460058 MD |
569 | if (!n_mberror) |
570 | return 0; | |
571 | else | |
572 | return -1; | |
8a953620 MD |
573 | } |
574 | ||
575 | /* | |
576 | * Mainprogram. | |
577 | */ | |
578 | ||
a142df4e | 579 | static |
95ace157 | 580 | void usage(char *argv[]) __attribute__((__noreturn__)); |
a142df4e | 581 | |
61c3fb60 | 582 | static |
70469b43 | 583 | void usage(char *argv[]) |
8a953620 | 584 | { |
26b5a74b | 585 | diag("Usage: %s nreaders [ perf | rperf | uperf | stress ] [ stride ] [ callrcu_global | callrcu_percpu | callrcu_perthread ]\n", argv[0]); |
8a953620 MD |
586 | exit(-1); |
587 | } | |
588 | ||
589 | int main(int argc, char *argv[]) | |
590 | { | |
591 | int nreaders = 1; | |
592 | int cpustride = 1; | |
593 | ||
ad460058 MD |
594 | plan_tests(NR_TESTS); |
595 | ||
8a953620 MD |
596 | smp_init(); |
597 | //rcu_init(); | |
26b5a74b MD |
598 | if (argc > 4) { |
599 | const char *callrcu_str = argv[4];; | |
600 | ||
601 | if (strcmp(callrcu_str, "callrcu_global") == 0) { | |
602 | callrcu_type = CALLRCU_GLOBAL; | |
603 | } else if (strcmp(callrcu_str, "callrcu_percpu") == 0) { | |
604 | callrcu_type = CALLRCU_PERCPU; | |
605 | } else if (strcmp(callrcu_str, "callrcu_perthread") == 0) { | |
606 | callrcu_type = CALLRCU_PERTHREAD; | |
607 | } else { | |
70469b43 | 608 | usage(argv); |
26b5a74b MD |
609 | goto end; |
610 | } | |
611 | } | |
929cfaff | 612 | |
26b5a74b MD |
613 | switch (callrcu_type) { |
614 | case CALLRCU_GLOBAL: | |
615 | diag("Using global per-process call_rcu thread."); | |
616 | break; | |
617 | case CALLRCU_PERCPU: | |
618 | diag("Using per-CPU call_rcu threads."); | |
b57aee66 | 619 | if (create_all_cpu_call_rcu_data(0)) |
ad460058 MD |
620 | diag("create_all_cpu_call_rcu_data: %s", |
621 | strerror(errno)); | |
26b5a74b MD |
622 | break; |
623 | case CALLRCU_PERTHREAD: | |
624 | diag("Using per-thread call_rcu() worker."); | |
625 | break; | |
626 | default: | |
627 | abort(); | |
b57aee66 | 628 | } |
8a953620 | 629 | |
9b171f46 MD |
630 | #ifdef DEBUG_YIELD |
631 | yield_active |= YIELD_READ; | |
632 | yield_active |= YIELD_WRITE; | |
633 | #endif | |
634 | ||
8a953620 | 635 | if (argc > 1) { |
26b5a74b MD |
636 | if (strcmp(argv[1], "-h") == 0 |
637 | || strcmp(argv[1], "--help") == 0) { | |
70469b43 | 638 | usage(argv); |
26b5a74b MD |
639 | goto end; |
640 | } | |
8a953620 | 641 | nreaders = strtoul(argv[1], NULL, 0); |
ad460058 MD |
642 | if (argc == 2) { |
643 | ok(!perftest(nreaders, cpustride), | |
644 | "perftest readers: %d, stride: %d", | |
645 | nreaders, cpustride); | |
646 | goto end; | |
647 | } | |
8a953620 MD |
648 | if (argc > 3) |
649 | cpustride = strtoul(argv[3], NULL, 0); | |
650 | if (strcmp(argv[2], "perf") == 0) | |
ad460058 MD |
651 | ok(!perftest(nreaders, cpustride), |
652 | "perftest readers: %d, stride: %d", | |
653 | nreaders, cpustride); | |
8a953620 | 654 | else if (strcmp(argv[2], "rperf") == 0) |
ad460058 MD |
655 | ok(!rperftest(nreaders, cpustride), |
656 | "rperftest readers: %d, stride: %d", | |
657 | nreaders, cpustride); | |
8a953620 | 658 | else if (strcmp(argv[2], "uperf") == 0) |
ad460058 MD |
659 | ok(!uperftest(nreaders, cpustride), |
660 | "uperftest readers: %d, stride: %d", | |
661 | nreaders, cpustride); | |
8a953620 | 662 | else if (strcmp(argv[2], "stress") == 0) |
ad460058 MD |
663 | ok(!stresstest(nreaders), |
664 | "stresstest readers: %d, stride: %d", | |
665 | nreaders, cpustride); | |
666 | else | |
70469b43 | 667 | usage(argv); |
ad460058 | 668 | } else { |
70469b43 | 669 | usage(argv); |
8a953620 | 670 | } |
ad460058 MD |
671 | end: |
672 | return exit_status(); | |
8a953620 | 673 | } |