Adjust shell scripts to allow Bash in other locations
[urcu.git] / tests / regression / rcutorture.h
... / ...
CommitLineData
1/*
2 * rcutorture.h: simple user-level performance/stress test of RCU.
3 *
4 * Usage:
5 * ./rcu <nreaders> rperf [ <cpustride> ]
6 * Run a read-side performance test with the specified
7 * number of readers spaced by <cpustride>.
8 * Thus "./rcu 16 rperf 2" would run 16 readers on even-numbered
9 * CPUs from 0 to 30.
10 * ./rcu <nupdaters> uperf [ <cpustride> ]
11 * Run an update-side performance test with the specified
12 * number of updaters and specified CPU spacing.
13 * ./rcu <nreaders> perf [ <cpustride> ]
14 * Run a combined read/update performance test with the specified
15 * number of readers and one updater and specified CPU spacing.
16 * The readers run on the low-numbered CPUs and the updater
17 * of the highest-numbered CPU.
18 *
19 * The above tests produce output as follows:
20 *
21 * n_reads: 46008000 n_updates: 146026 nreaders: 2 nupdaters: 1 duration: 1
22 * ns/read: 43.4707 ns/update: 6848.1
23 *
24 * The first line lists the total number of RCU reads and updates executed
25 * during the test, the number of reader threads, the number of updater
26 * threads, and the duration of the test in seconds. The second line
27 * lists the average duration of each type of operation in nanoseconds,
28 * or "nan" if the corresponding type of operation was not performed.
29 *
30 * ./rcu <nreaders> stress
31 * Run a stress test with the specified number of readers and
32 * one updater. None of the threads are affinitied to any
33 * particular CPU.
34 *
35 * This test produces output as follows:
36 *
37 * n_reads: 114633217 n_updates: 3903415 n_mberror: 0
38 * rcu_stress_count: 114618391 14826 0 0 0 0 0 0 0 0 0
39 *
40 * The first line lists the number of RCU read and update operations
41 * executed, followed by the number of memory-ordering violations
42 * (which will be zero in a correct RCU implementation). The second
43 * line lists the number of readers observing progressively more stale
44 * data. A correct RCU implementation will have all but the first two
45 * numbers non-zero.
46 *
47 * This program is free software; you can redistribute it and/or modify
48 * it under the terms of the GNU General Public License as published by
49 * the Free Software Foundation; either version 2 of the License, or
50 * (at your option) any later version.
51 *
52 * This program is distributed in the hope that it will be useful,
53 * but WITHOUT ANY WARRANTY; without even the implied warranty of
54 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
55 * GNU General Public License for more details.
56 *
57 * You should have received a copy of the GNU General Public License
58 * along with this program; if not, write to the Free Software
59 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
60 *
61 * Copyright (c) 2008 Paul E. McKenney, IBM Corporation.
62 */
63
64/*
65 * Test variables.
66 */
67
68#include <stdlib.h>
69#include "tap.h"
70
71#define NR_TESTS 1
72
73DEFINE_PER_THREAD(long long, n_reads_pt);
74DEFINE_PER_THREAD(long long, n_updates_pt);
75
76enum callrcu_type {
77 CALLRCU_GLOBAL,
78 CALLRCU_PERCPU,
79 CALLRCU_PERTHREAD,
80};
81
82enum writer_state {
83 WRITER_STATE_SYNC_RCU,
84 WRITER_STATE_CALL_RCU,
85 WRITER_STATE_POLL_RCU,
86};
87
88static enum callrcu_type callrcu_type = CALLRCU_GLOBAL;
89
90long long n_reads = 0LL;
91long n_updates = 0L;
92int nthreadsrunning;
93char argsbuf[64];
94
95#define GOFLAG_INIT 0
96#define GOFLAG_RUN 1
97#define GOFLAG_STOP 2
98
99volatile int goflag __attribute__((__aligned__(CAA_CACHE_LINE_SIZE)))
100 = GOFLAG_INIT;
101
102#define RCU_READ_RUN 1000
103
104//MD
105#define RCU_READ_NESTABLE
106
107#ifdef RCU_READ_NESTABLE
108#define rcu_read_lock_nest() rcu_read_lock()
109#define rcu_read_unlock_nest() rcu_read_unlock()
110#else /* #ifdef RCU_READ_NESTABLE */
111#define rcu_read_lock_nest()
112#define rcu_read_unlock_nest()
113#endif /* #else #ifdef RCU_READ_NESTABLE */
114
115#ifdef TORTURE_QSBR
116#define mark_rcu_quiescent_state rcu_quiescent_state
117#define put_thread_offline rcu_thread_offline
118#define put_thread_online rcu_thread_online
119#endif
120
121#ifndef mark_rcu_quiescent_state
122#define mark_rcu_quiescent_state() do {} while (0)
123#endif /* #ifdef mark_rcu_quiescent_state */
124
125#ifndef put_thread_offline
126#define put_thread_offline() do {} while (0)
127#define put_thread_online() do {} while (0)
128#define put_thread_online_delay() do {} while (0)
129#else /* #ifndef put_thread_offline */
130#define put_thread_online_delay() synchronize_rcu()
131#endif /* #else #ifndef put_thread_offline */
132
133/*
134 * Performance test.
135 */
136
137static
138void *rcu_read_perf_test(void *arg)
139{
140 int i;
141 int me = (long)arg;
142 long long n_reads_local = 0;
143
144 rcu_register_thread();
145 run_on(me);
146 uatomic_inc(&nthreadsrunning);
147 put_thread_offline();
148 while (goflag == GOFLAG_INIT)
149 (void) poll(NULL, 0, 1);
150 put_thread_online();
151 while (goflag == GOFLAG_RUN) {
152 for (i = 0; i < RCU_READ_RUN; i++) {
153 rcu_read_lock();
154 /* rcu_read_lock_nest(); */
155 /* rcu_read_unlock_nest(); */
156 rcu_read_unlock();
157 }
158 n_reads_local += RCU_READ_RUN;
159 mark_rcu_quiescent_state();
160 }
161 __get_thread_var(n_reads_pt) += n_reads_local;
162 put_thread_offline();
163 rcu_unregister_thread();
164
165 return (NULL);
166}
167
168static
169void *rcu_update_perf_test(void *arg __attribute__((unused)))
170{
171 long long n_updates_local = 0;
172
173 if (callrcu_type == CALLRCU_PERTHREAD) {
174 struct call_rcu_data *crdp;
175
176 crdp = create_call_rcu_data(0, -1);
177 if (crdp != NULL) {
178 diag("Successfully using per-thread call_rcu() worker.");
179 set_thread_call_rcu_data(crdp);
180 }
181 }
182 uatomic_inc(&nthreadsrunning);
183 while (goflag == GOFLAG_INIT)
184 (void) poll(NULL, 0, 1);
185 while (goflag == GOFLAG_RUN) {
186 synchronize_rcu();
187 n_updates_local++;
188 }
189 __get_thread_var(n_updates_pt) += n_updates_local;
190 if (callrcu_type == CALLRCU_PERTHREAD) {
191 struct call_rcu_data *crdp;
192
193 crdp = get_thread_call_rcu_data();
194 set_thread_call_rcu_data(NULL);
195 call_rcu_data_free(crdp);
196 }
197 return NULL;
198}
199
200static
201void perftestinit(void)
202{
203 init_per_thread(n_reads_pt, 0LL);
204 init_per_thread(n_updates_pt, 0LL);
205 uatomic_set(&nthreadsrunning, 0);
206}
207
208static
209int perftestrun(int nthreads, int nreaders, int nupdaters)
210{
211 int t;
212 int duration = 1;
213
214 cmm_smp_mb();
215 while (uatomic_read(&nthreadsrunning) < nthreads)
216 (void) poll(NULL, 0, 1);
217 goflag = GOFLAG_RUN;
218 cmm_smp_mb();
219 sleep(duration);
220 cmm_smp_mb();
221 goflag = GOFLAG_STOP;
222 cmm_smp_mb();
223 wait_all_threads();
224 for_each_thread(t) {
225 n_reads += per_thread(n_reads_pt, t);
226 n_updates += per_thread(n_updates_pt, t);
227 }
228 diag("n_reads: %lld n_updates: %ld nreaders: %d nupdaters: %d duration: %d",
229 n_reads, n_updates, nreaders, nupdaters, duration);
230 diag("ns/read: %g ns/update: %g",
231 ((duration * 1000*1000*1000.*(double)nreaders) /
232 (double)n_reads),
233 ((duration * 1000*1000*1000.*(double)nupdaters) /
234 (double)n_updates));
235 if (get_cpu_call_rcu_data(0)) {
236 diag("Deallocating per-CPU call_rcu threads.\n");
237 free_all_cpu_call_rcu_data();
238 }
239 return 0;
240}
241
242static
243int perftest(int nreaders, int cpustride)
244{
245 int i;
246 long arg;
247
248 perftestinit();
249 for (i = 0; i < nreaders; i++) {
250 arg = (long)(i * cpustride);
251 create_thread(rcu_read_perf_test, (void *)arg);
252 }
253 arg = (long)(i * cpustride);
254 create_thread(rcu_update_perf_test, (void *)arg);
255 return perftestrun(i + 1, nreaders, 1);
256}
257
258static
259int rperftest(int nreaders, int cpustride)
260{
261 int i;
262 long arg;
263
264 perftestinit();
265 init_per_thread(n_reads_pt, 0LL);
266 for (i = 0; i < nreaders; i++) {
267 arg = (long)(i * cpustride);
268 create_thread(rcu_read_perf_test, (void *)arg);
269 }
270 return perftestrun(i, nreaders, 0);
271}
272
273static
274int uperftest(int nupdaters, int cpustride)
275{
276 int i;
277 long arg;
278
279 perftestinit();
280 init_per_thread(n_reads_pt, 0LL);
281 for (i = 0; i < nupdaters; i++) {
282 arg = (long)(i * cpustride);
283 create_thread(rcu_update_perf_test, (void *)arg);
284 }
285 return perftestrun(i, 0, nupdaters);
286}
287
288/*
289 * Stress test.
290 */
291
292#define RCU_STRESS_PIPE_LEN 10
293
294struct rcu_stress {
295 int pipe_count;
296 int mbtest;
297};
298
299struct rcu_stress rcu_stress_array[RCU_STRESS_PIPE_LEN] = { { 0, 0 } };
300struct rcu_stress *rcu_stress_current;
301int rcu_stress_idx = 0;
302
303int n_mberror = 0;
304DEFINE_PER_THREAD(long long [RCU_STRESS_PIPE_LEN + 1], rcu_stress_count);
305
306int garbage = 0;
307
308static
309void *rcu_read_stress_test(void *arg __attribute__((unused)))
310{
311 int i;
312 int itercnt = 0;
313 struct rcu_stress *p;
314 int pc;
315
316 rcu_register_thread();
317 put_thread_offline();
318 while (goflag == GOFLAG_INIT)
319 (void) poll(NULL, 0, 1);
320 put_thread_online();
321 while (goflag == GOFLAG_RUN) {
322 rcu_read_lock();
323 p = rcu_dereference(rcu_stress_current);
324 if (p->mbtest == 0)
325 n_mberror++;
326 rcu_read_lock_nest();
327 for (i = 0; i < 100; i++)
328 garbage++;
329 rcu_read_unlock_nest();
330 pc = p->pipe_count;
331 rcu_read_unlock();
332 if ((pc > RCU_STRESS_PIPE_LEN) || (pc < 0))
333 pc = RCU_STRESS_PIPE_LEN;
334 __get_thread_var(rcu_stress_count)[pc]++;
335 __get_thread_var(n_reads_pt)++;
336 mark_rcu_quiescent_state();
337 if ((++itercnt % 0x1000) == 0) {
338 put_thread_offline();
339 put_thread_online_delay();
340 put_thread_online();
341 }
342 }
343 put_thread_offline();
344 rcu_unregister_thread();
345
346 return (NULL);
347}
348
349static pthread_mutex_t call_rcu_test_mutex = PTHREAD_MUTEX_INITIALIZER;
350static pthread_cond_t call_rcu_test_cond = PTHREAD_COND_INITIALIZER;
351
352static
353void rcu_update_stress_test_rcu(struct rcu_head *head __attribute__((unused)))
354{
355 int ret;
356
357 ret = pthread_mutex_lock(&call_rcu_test_mutex);
358 if (ret) {
359 errno = ret;
360 diag("pthread_mutex_lock: %s",
361 strerror(errno));
362 abort();
363 }
364 ret = pthread_cond_signal(&call_rcu_test_cond);
365 if (ret) {
366 errno = ret;
367 diag("pthread_cond_signal: %s",
368 strerror(errno));
369 abort();
370 }
371 ret = pthread_mutex_unlock(&call_rcu_test_mutex);
372 if (ret) {
373 errno = ret;
374 diag("pthread_mutex_unlock: %s",
375 strerror(errno));
376 abort();
377 }
378}
379
380static
381void advance_writer_state(enum writer_state *state)
382{
383 switch (*state) {
384 case WRITER_STATE_SYNC_RCU:
385 *state = WRITER_STATE_CALL_RCU;
386 break;
387 case WRITER_STATE_CALL_RCU:
388 *state = WRITER_STATE_POLL_RCU;
389 break;
390 case WRITER_STATE_POLL_RCU:
391 *state = WRITER_STATE_SYNC_RCU;
392 break;
393 }
394}
395
396static
397void *rcu_update_stress_test(void *arg __attribute__((unused)))
398{
399 int i;
400 struct rcu_stress *p;
401 struct rcu_head rh;
402 enum writer_state writer_state = WRITER_STATE_SYNC_RCU;
403
404 while (goflag == GOFLAG_INIT)
405 (void) poll(NULL, 0, 1);
406 while (goflag == GOFLAG_RUN) {
407 i = rcu_stress_idx + 1;
408 if (i >= RCU_STRESS_PIPE_LEN)
409 i = 0;
410 p = &rcu_stress_array[i];
411 p->mbtest = 0;
412 cmm_smp_mb();
413 p->pipe_count = 0;
414 p->mbtest = 1;
415 rcu_assign_pointer(rcu_stress_current, p);
416 rcu_stress_idx = i;
417 for (i = 0; i < RCU_STRESS_PIPE_LEN; i++)
418 if (i != rcu_stress_idx)
419 rcu_stress_array[i].pipe_count++;
420 switch (writer_state) {
421 case WRITER_STATE_SYNC_RCU:
422 synchronize_rcu();
423 break;
424 case WRITER_STATE_CALL_RCU:
425 {
426 int ret;
427
428 ret = pthread_mutex_lock(&call_rcu_test_mutex);
429 if (ret) {
430 errno = ret;
431 diag("pthread_mutex_lock: %s",
432 strerror(errno));
433 abort();
434 }
435 rcu_register_thread();
436 call_rcu(&rh, rcu_update_stress_test_rcu);
437 rcu_unregister_thread();
438 /*
439 * Our MacOS X test machine with the following
440 * config:
441 * 15.6.0 Darwin Kernel Version 15.6.0
442 * root:xnu-3248.60.10~1/RELEASE_X86_64
443 * appears to have issues with liburcu-signal
444 * signal being delivered on top of
445 * pthread_cond_wait. It seems to make the
446 * thread continue, and therefore corrupt the
447 * rcu_head. Work around this issue by
448 * unregistering the RCU read-side thread
449 * immediately after call_rcu (call_rcu needs
450 * us to be registered RCU readers).
451 */
452 ret = pthread_cond_wait(&call_rcu_test_cond,
453 &call_rcu_test_mutex);
454 if (ret) {
455 errno = ret;
456 diag("pthread_cond_signal: %s",
457 strerror(errno));
458 abort();
459 }
460 ret = pthread_mutex_unlock(&call_rcu_test_mutex);
461 if (ret) {
462 errno = ret;
463 diag("pthread_mutex_unlock: %s",
464 strerror(errno));
465 abort();
466 }
467 break;
468 }
469 case WRITER_STATE_POLL_RCU:
470 {
471 struct urcu_gp_poll_state poll_state;
472
473 rcu_register_thread();
474 poll_state = start_poll_synchronize_rcu();
475 rcu_unregister_thread();
476 while (!poll_state_synchronize_rcu(poll_state))
477 (void) poll(NULL, 0, 1); /* Wait for 1ms */
478 break;
479 }
480 }
481 n_updates++;
482 advance_writer_state(&writer_state);
483 }
484
485 return NULL;
486}
487
488static
489void *rcu_fake_update_stress_test(void *arg __attribute__((unused)))
490{
491 if (callrcu_type == CALLRCU_PERTHREAD) {
492 struct call_rcu_data *crdp;
493
494 crdp = create_call_rcu_data(0, -1);
495 if (crdp != NULL) {
496 diag("Successfully using per-thread call_rcu() worker.");
497 set_thread_call_rcu_data(crdp);
498 }
499 }
500 while (goflag == GOFLAG_INIT)
501 (void) poll(NULL, 0, 1);
502 while (goflag == GOFLAG_RUN) {
503 synchronize_rcu();
504 (void) poll(NULL, 0, 1);
505 }
506 if (callrcu_type == CALLRCU_PERTHREAD) {
507 struct call_rcu_data *crdp;
508
509 crdp = get_thread_call_rcu_data();
510 set_thread_call_rcu_data(NULL);
511 call_rcu_data_free(crdp);
512 }
513 return NULL;
514}
515
516static
517int stresstest(int nreaders)
518{
519 int i;
520 int t;
521 long long *p;
522 long long sum;
523
524 init_per_thread(n_reads_pt, 0LL);
525 for_each_thread(t) {
526 p = &per_thread(rcu_stress_count,t)[0];
527 for (i = 0; i <= RCU_STRESS_PIPE_LEN; i++)
528 p[i] = 0LL;
529 }
530 rcu_stress_current = &rcu_stress_array[0];
531 rcu_stress_current->pipe_count = 0;
532 rcu_stress_current->mbtest = 1;
533 for (i = 0; i < nreaders; i++)
534 create_thread(rcu_read_stress_test, NULL);
535 create_thread(rcu_update_stress_test, NULL);
536 for (i = 0; i < 5; i++)
537 create_thread(rcu_fake_update_stress_test, NULL);
538 cmm_smp_mb();
539 goflag = GOFLAG_RUN;
540 cmm_smp_mb();
541 sleep(10);
542 cmm_smp_mb();
543 goflag = GOFLAG_STOP;
544 cmm_smp_mb();
545 wait_all_threads();
546 for_each_thread(t)
547 n_reads += per_thread(n_reads_pt, t);
548 diag("n_reads: %lld n_updates: %ld n_mberror: %d",
549 n_reads, n_updates, n_mberror);
550 rdiag_start();
551 rdiag("rcu_stress_count:");
552 for (i = 0; i <= RCU_STRESS_PIPE_LEN; i++) {
553 sum = 0LL;
554 for_each_thread(t) {
555 sum += per_thread(rcu_stress_count, t)[i];
556 }
557 rdiag(" %lld", sum);
558 }
559 rdiag_end();
560 if (get_cpu_call_rcu_data(0)) {
561 diag("Deallocating per-CPU call_rcu threads.");
562 free_all_cpu_call_rcu_data();
563 }
564 if (!n_mberror)
565 return 0;
566 else
567 return -1;
568}
569
570/*
571 * Mainprogram.
572 */
573
574static
575void usage(char *argv[]) __attribute__((noreturn));
576
577static
578void usage(char *argv[])
579{
580 diag("Usage: %s nreaders [ perf | rperf | uperf | stress ] [ stride ] [ callrcu_global | callrcu_percpu | callrcu_perthread ]\n", argv[0]);
581 exit(-1);
582}
583
584int main(int argc, char *argv[])
585{
586 int nreaders = 1;
587 int cpustride = 1;
588
589 plan_tests(NR_TESTS);
590
591 smp_init();
592 //rcu_init();
593 if (argc > 4) {
594 const char *callrcu_str = argv[4];;
595
596 if (strcmp(callrcu_str, "callrcu_global") == 0) {
597 callrcu_type = CALLRCU_GLOBAL;
598 } else if (strcmp(callrcu_str, "callrcu_percpu") == 0) {
599 callrcu_type = CALLRCU_PERCPU;
600 } else if (strcmp(callrcu_str, "callrcu_perthread") == 0) {
601 callrcu_type = CALLRCU_PERTHREAD;
602 } else {
603 usage(argv);
604 goto end;
605 }
606 }
607
608 switch (callrcu_type) {
609 case CALLRCU_GLOBAL:
610 diag("Using global per-process call_rcu thread.");
611 break;
612 case CALLRCU_PERCPU:
613 diag("Using per-CPU call_rcu threads.");
614 if (create_all_cpu_call_rcu_data(0))
615 diag("create_all_cpu_call_rcu_data: %s",
616 strerror(errno));
617 break;
618 case CALLRCU_PERTHREAD:
619 diag("Using per-thread call_rcu() worker.");
620 break;
621 default:
622 abort();
623 }
624
625#ifdef DEBUG_YIELD
626 yield_active |= YIELD_READ;
627 yield_active |= YIELD_WRITE;
628#endif
629
630 if (argc > 1) {
631 if (strcmp(argv[1], "-h") == 0
632 || strcmp(argv[1], "--help") == 0) {
633 usage(argv);
634 goto end;
635 }
636 nreaders = strtoul(argv[1], NULL, 0);
637 if (argc == 2) {
638 ok(!perftest(nreaders, cpustride),
639 "perftest readers: %d, stride: %d",
640 nreaders, cpustride);
641 goto end;
642 }
643 if (argc > 3)
644 cpustride = strtoul(argv[3], NULL, 0);
645 if (strcmp(argv[2], "perf") == 0)
646 ok(!perftest(nreaders, cpustride),
647 "perftest readers: %d, stride: %d",
648 nreaders, cpustride);
649 else if (strcmp(argv[2], "rperf") == 0)
650 ok(!rperftest(nreaders, cpustride),
651 "rperftest readers: %d, stride: %d",
652 nreaders, cpustride);
653 else if (strcmp(argv[2], "uperf") == 0)
654 ok(!uperftest(nreaders, cpustride),
655 "uperftest readers: %d, stride: %d",
656 nreaders, cpustride);
657 else if (strcmp(argv[2], "stress") == 0)
658 ok(!stresstest(nreaders),
659 "stresstest readers: %d, stride: %d",
660 nreaders, cpustride);
661 else
662 usage(argv);
663 } else {
664 usage(argv);
665 }
666end:
667 return exit_status();
668}
This page took 0.024302 seconds and 4 git commands to generate.