1 // SPDX-FileCopyrightText: 2008 Paul E. McKenney, IBM Corporation.
3 // SPDX-License-Identifier: GPL-2.0-or-later
6 * rcutorture.h: simple user-level performance/stress test of RCU.
9 * ./rcu <nreaders> rperf [ <cpustride> ]
10 * Run a read-side performance test with the specified
11 * number of readers spaced by <cpustride>.
12 * Thus "./rcu 16 rperf 2" would run 16 readers on even-numbered
14 * ./rcu <nupdaters> uperf [ <cpustride> ]
15 * Run an update-side performance test with the specified
16 * number of updaters and specified CPU spacing.
17 * ./rcu <nreaders> perf [ <cpustride> ]
18 * Run a combined read/update performance test with the specified
19 * number of readers and one updater and specified CPU spacing.
20 * The readers run on the low-numbered CPUs and the updater
21 * of the highest-numbered CPU.
23 * The above tests produce output as follows:
25 * n_reads: 46008000 n_updates: 146026 nreaders: 2 nupdaters: 1 duration: 1
26 * ns/read: 43.4707 ns/update: 6848.1
28 * The first line lists the total number of RCU reads and updates executed
29 * during the test, the number of reader threads, the number of updater
30 * threads, and the duration of the test in seconds. The second line
31 * lists the average duration of each type of operation in nanoseconds,
32 * or "nan" if the corresponding type of operation was not performed.
34 * ./rcu <nreaders> stress
35 * Run a stress test with the specified number of readers and
36 * one updater. None of the threads are affinitied to any
39 * This test produces output as follows:
41 * n_reads: 114633217 n_updates: 3903415 n_mberror: 0
42 * rcu_stress_count: 114618391 14826 0 0 0 0 0 0 0 0 0
44 * The first line lists the number of RCU read and update operations
45 * executed, followed by the number of memory-ordering violations
46 * (which will be zero in a correct RCU implementation). The second
47 * line lists the number of readers observing progressively more stale
48 * data. A correct RCU implementation will have all but the first two
59 #include "urcu-wait.h"
63 DEFINE_PER_THREAD(long long, n_reads_pt
);
64 DEFINE_PER_THREAD(long long, n_updates_pt
);
73 WRITER_STATE_SYNC_RCU
,
74 WRITER_STATE_CALL_RCU
,
75 WRITER_STATE_POLL_RCU
,
78 static enum callrcu_type callrcu_type
= CALLRCU_GLOBAL
;
80 long long n_reads
= 0LL;
89 volatile int goflag
__attribute__((__aligned__(CAA_CACHE_LINE_SIZE
)))
92 #define RCU_READ_RUN 1000
95 #define RCU_READ_NESTABLE
97 #ifdef RCU_READ_NESTABLE
98 #define rcu_read_lock_nest() rcu_read_lock()
99 #define rcu_read_unlock_nest() rcu_read_unlock()
100 #else /* #ifdef RCU_READ_NESTABLE */
101 #define rcu_read_lock_nest()
102 #define rcu_read_unlock_nest()
103 #endif /* #else #ifdef RCU_READ_NESTABLE */
106 #define mark_rcu_quiescent_state rcu_quiescent_state
107 #define put_thread_offline rcu_thread_offline
108 #define put_thread_online rcu_thread_online
111 #ifndef mark_rcu_quiescent_state
112 #define mark_rcu_quiescent_state() do {} while (0)
113 #endif /* #ifdef mark_rcu_quiescent_state */
115 #ifndef put_thread_offline
116 #define put_thread_offline() do {} while (0)
117 #define put_thread_online() do {} while (0)
118 #define put_thread_online_delay() do {} while (0)
119 #else /* #ifndef put_thread_offline */
120 #define put_thread_online_delay() synchronize_rcu()
121 #endif /* #else #ifndef put_thread_offline */
128 void *rcu_read_perf_test(void *arg
)
132 long long n_reads_local
= 0;
134 rcu_register_thread();
136 uatomic_inc(&nthreadsrunning
);
137 put_thread_offline();
138 while (goflag
== GOFLAG_INIT
)
139 (void) poll(NULL
, 0, 1);
141 while (goflag
== GOFLAG_RUN
) {
142 for (i
= 0; i
< RCU_READ_RUN
; i
++) {
144 /* rcu_read_lock_nest(); */
145 /* rcu_read_unlock_nest(); */
148 n_reads_local
+= RCU_READ_RUN
;
149 mark_rcu_quiescent_state();
151 __get_thread_var(n_reads_pt
) += n_reads_local
;
152 put_thread_offline();
153 rcu_unregister_thread();
159 void *rcu_update_perf_test(void *arg
__attribute__((unused
)))
161 long long n_updates_local
= 0;
163 if (callrcu_type
== CALLRCU_PERTHREAD
) {
164 struct call_rcu_data
*crdp
;
166 crdp
= create_call_rcu_data(0, -1);
168 diag("Successfully using per-thread call_rcu() worker.");
169 set_thread_call_rcu_data(crdp
);
172 uatomic_inc(&nthreadsrunning
);
173 while (goflag
== GOFLAG_INIT
)
174 (void) poll(NULL
, 0, 1);
175 while (goflag
== GOFLAG_RUN
) {
179 __get_thread_var(n_updates_pt
) += n_updates_local
;
180 if (callrcu_type
== CALLRCU_PERTHREAD
) {
181 struct call_rcu_data
*crdp
;
183 crdp
= get_thread_call_rcu_data();
184 set_thread_call_rcu_data(NULL
);
185 call_rcu_data_free(crdp
);
191 void perftestinit(void)
193 init_per_thread(n_reads_pt
, 0LL);
194 init_per_thread(n_updates_pt
, 0LL);
195 uatomic_set(&nthreadsrunning
, 0);
199 int perftestrun(int nthreads
, int nreaders
, int nupdaters
)
205 while (uatomic_read(&nthreadsrunning
) < nthreads
)
206 (void) poll(NULL
, 0, 1);
211 goflag
= GOFLAG_STOP
;
215 n_reads
+= per_thread(n_reads_pt
, t
);
216 n_updates
+= per_thread(n_updates_pt
, t
);
218 diag("n_reads: %lld n_updates: %ld nreaders: %d nupdaters: %d duration: %d",
219 n_reads
, n_updates
, nreaders
, nupdaters
, duration
);
220 diag("ns/read: %g ns/update: %g",
221 ((duration
* 1000*1000*1000.*(double)nreaders
) /
223 ((duration
* 1000*1000*1000.*(double)nupdaters
) /
225 if (get_cpu_call_rcu_data(0)) {
226 diag("Deallocating per-CPU call_rcu threads.\n");
227 free_all_cpu_call_rcu_data();
233 int perftest(int nreaders
, int cpustride
)
239 for (i
= 0; i
< nreaders
; i
++) {
240 arg
= (long)(i
* cpustride
);
241 create_thread(rcu_read_perf_test
, (void *)arg
);
243 arg
= (long)(i
* cpustride
);
244 create_thread(rcu_update_perf_test
, (void *)arg
);
245 return perftestrun(i
+ 1, nreaders
, 1);
249 int rperftest(int nreaders
, int cpustride
)
255 init_per_thread(n_reads_pt
, 0LL);
256 for (i
= 0; i
< nreaders
; i
++) {
257 arg
= (long)(i
* cpustride
);
258 create_thread(rcu_read_perf_test
, (void *)arg
);
260 return perftestrun(i
, nreaders
, 0);
264 int uperftest(int nupdaters
, int cpustride
)
270 init_per_thread(n_reads_pt
, 0LL);
271 for (i
= 0; i
< nupdaters
; i
++) {
272 arg
= (long)(i
* cpustride
);
273 create_thread(rcu_update_perf_test
, (void *)arg
);
275 return perftestrun(i
, 0, nupdaters
);
282 #define RCU_STRESS_PIPE_LEN 10
289 struct rcu_stress rcu_stress_array
[RCU_STRESS_PIPE_LEN
] = { { 0, 0 } };
290 struct rcu_stress
*rcu_stress_current
;
291 int rcu_stress_idx
= 0;
294 DEFINE_PER_THREAD(long long [RCU_STRESS_PIPE_LEN
+ 1], rcu_stress_count
);
299 void *rcu_read_stress_test(void *arg
__attribute__((unused
)))
303 struct rcu_stress
*p
;
306 rcu_register_thread();
307 put_thread_offline();
308 while (goflag
== GOFLAG_INIT
)
309 (void) poll(NULL
, 0, 1);
311 while (goflag
== GOFLAG_RUN
) {
313 p
= rcu_dereference(rcu_stress_current
);
316 rcu_read_lock_nest();
317 for (i
= 0; i
< 100; i
++)
319 rcu_read_unlock_nest();
322 if ((pc
> RCU_STRESS_PIPE_LEN
) || (pc
< 0))
323 pc
= RCU_STRESS_PIPE_LEN
;
324 __get_thread_var(rcu_stress_count
)[pc
]++;
325 __get_thread_var(n_reads_pt
)++;
326 mark_rcu_quiescent_state();
327 if ((++itercnt
% 0x1000) == 0) {
328 put_thread_offline();
329 put_thread_online_delay();
333 put_thread_offline();
334 rcu_unregister_thread();
339 static DEFINE_URCU_WAIT_QUEUE(call_rcu_waiters
);
342 void rcu_update_stress_test_rcu(struct rcu_head
*head
__attribute__((unused
)))
344 struct urcu_waiters waiters
;
346 urcu_move_waiters(&waiters
, &call_rcu_waiters
);
347 urcu_wake_all_waiters(&waiters
);
351 void advance_writer_state(enum writer_state
*state
)
354 case WRITER_STATE_SYNC_RCU
:
355 *state
= WRITER_STATE_CALL_RCU
;
357 case WRITER_STATE_CALL_RCU
:
358 *state
= WRITER_STATE_POLL_RCU
;
360 case WRITER_STATE_POLL_RCU
:
361 *state
= WRITER_STATE_SYNC_RCU
;
367 void *rcu_update_stress_test(void *arg
__attribute__((unused
)))
370 struct rcu_stress
*p
;
372 enum writer_state writer_state
= WRITER_STATE_SYNC_RCU
;
374 rcu_register_thread();
376 /* Offline for poll. */
377 put_thread_offline();
378 while (goflag
== GOFLAG_INIT
)
379 (void) poll(NULL
, 0, 1);
382 while (goflag
== GOFLAG_RUN
) {
383 i
= rcu_stress_idx
+ 1;
384 if (i
>= RCU_STRESS_PIPE_LEN
)
386 p
= &rcu_stress_array
[i
];
391 rcu_assign_pointer(rcu_stress_current
, p
);
393 for (i
= 0; i
< RCU_STRESS_PIPE_LEN
; i
++)
394 if (i
!= rcu_stress_idx
)
395 rcu_stress_array
[i
].pipe_count
++;
396 switch (writer_state
) {
397 case WRITER_STATE_SYNC_RCU
:
400 case WRITER_STATE_CALL_RCU
:
402 DEFINE_URCU_WAIT_NODE(wait
, URCU_WAIT_WAITING
);
404 urcu_wait_add(&call_rcu_waiters
, &wait
);
406 call_rcu(&rh
, rcu_update_stress_test_rcu
);
408 /* Offline for busy-wait. */
409 put_thread_offline();
410 urcu_adaptative_busy_wait(&wait
);
414 case WRITER_STATE_POLL_RCU
:
416 struct urcu_gp_poll_state poll_state
;
418 poll_state
= start_poll_synchronize_rcu();
420 /* Offline for poll. */
421 put_thread_offline();
422 while (!poll_state_synchronize_rcu(poll_state
))
423 (void) poll(NULL
, 0, 1); /* Wait for 1ms */
429 advance_writer_state(&writer_state
);
432 rcu_unregister_thread();
438 void *rcu_fake_update_stress_test(void *arg
__attribute__((unused
)))
440 if (callrcu_type
== CALLRCU_PERTHREAD
) {
441 struct call_rcu_data
*crdp
;
443 crdp
= create_call_rcu_data(0, -1);
445 diag("Successfully using per-thread call_rcu() worker.");
446 set_thread_call_rcu_data(crdp
);
449 while (goflag
== GOFLAG_INIT
)
450 (void) poll(NULL
, 0, 1);
451 while (goflag
== GOFLAG_RUN
) {
453 (void) poll(NULL
, 0, 1);
455 if (callrcu_type
== CALLRCU_PERTHREAD
) {
456 struct call_rcu_data
*crdp
;
458 crdp
= get_thread_call_rcu_data();
459 set_thread_call_rcu_data(NULL
);
460 call_rcu_data_free(crdp
);
466 int stresstest(int nreaders
)
473 init_per_thread(n_reads_pt
, 0LL);
475 p
= &per_thread(rcu_stress_count
,t
)[0];
476 for (i
= 0; i
<= RCU_STRESS_PIPE_LEN
; i
++)
479 rcu_stress_current
= &rcu_stress_array
[0];
480 rcu_stress_current
->pipe_count
= 0;
481 rcu_stress_current
->mbtest
= 1;
482 for (i
= 0; i
< nreaders
; i
++)
483 create_thread(rcu_read_stress_test
, NULL
);
484 create_thread(rcu_update_stress_test
, NULL
);
485 for (i
= 0; i
< 5; i
++)
486 create_thread(rcu_fake_update_stress_test
, NULL
);
492 goflag
= GOFLAG_STOP
;
496 n_reads
+= per_thread(n_reads_pt
, t
);
497 diag("n_reads: %lld n_updates: %ld n_mberror: %d",
498 n_reads
, n_updates
, n_mberror
);
500 rdiag("rcu_stress_count:");
501 for (i
= 0; i
<= RCU_STRESS_PIPE_LEN
; i
++) {
504 sum
+= per_thread(rcu_stress_count
, t
)[i
];
509 if (get_cpu_call_rcu_data(0)) {
510 diag("Deallocating per-CPU call_rcu threads.");
511 free_all_cpu_call_rcu_data();
524 void usage(char *argv
[]) __attribute__((__noreturn__
));
527 void usage(char *argv
[])
529 diag("Usage: %s nreaders [ perf | rperf | uperf | stress ] [ stride ] [ callrcu_global | callrcu_percpu | callrcu_perthread ]\n", argv
[0]);
533 int main(int argc
, char *argv
[])
538 plan_tests(NR_TESTS
);
543 const char *callrcu_str
= argv
[4];;
545 if (strcmp(callrcu_str
, "callrcu_global") == 0) {
546 callrcu_type
= CALLRCU_GLOBAL
;
547 } else if (strcmp(callrcu_str
, "callrcu_percpu") == 0) {
548 callrcu_type
= CALLRCU_PERCPU
;
549 } else if (strcmp(callrcu_str
, "callrcu_perthread") == 0) {
550 callrcu_type
= CALLRCU_PERTHREAD
;
557 switch (callrcu_type
) {
559 diag("Using global per-process call_rcu thread.");
562 diag("Using per-CPU call_rcu threads.");
563 if (create_all_cpu_call_rcu_data(0))
564 diag("create_all_cpu_call_rcu_data: %s",
567 case CALLRCU_PERTHREAD
:
568 diag("Using per-thread call_rcu() worker.");
575 yield_active
|= YIELD_READ
;
576 yield_active
|= YIELD_WRITE
;
580 if (strcmp(argv
[1], "-h") == 0
581 || strcmp(argv
[1], "--help") == 0) {
585 nreaders
= strtoul(argv
[1], NULL
, 0);
587 ok(!perftest(nreaders
, cpustride
),
588 "perftest readers: %d, stride: %d",
589 nreaders
, cpustride
);
593 cpustride
= strtoul(argv
[3], NULL
, 0);
594 if (strcmp(argv
[2], "perf") == 0)
595 ok(!perftest(nreaders
, cpustride
),
596 "perftest readers: %d, stride: %d",
597 nreaders
, cpustride
);
598 else if (strcmp(argv
[2], "rperf") == 0)
599 ok(!rperftest(nreaders
, cpustride
),
600 "rperftest readers: %d, stride: %d",
601 nreaders
, cpustride
);
602 else if (strcmp(argv
[2], "uperf") == 0)
603 ok(!uperftest(nreaders
, cpustride
),
604 "uperftest readers: %d, stride: %d",
605 nreaders
, cpustride
);
606 else if (strcmp(argv
[2], "stress") == 0)
607 ok(!stresstest(nreaders
),
608 "stresstest readers: %d, stride: %d",
609 nreaders
, cpustride
);
616 return exit_status();