2 * rcutorture.h: simple user-level performance/stress test of RCU.
5 * ./rcu <nreaders> rperf [ <cpustride> ]
6 * Run a read-side performance test with the specified
7 * number of readers spaced by <cpustride>.
8 * Thus "./rcu 16 rperf 2" would run 16 readers on even-numbered
10 * ./rcu <nupdaters> uperf [ <cpustride> ]
11 * Run an update-side performance test with the specified
12 * number of updaters and specified CPU spacing.
13 * ./rcu <nreaders> perf [ <cpustride> ]
14 * Run a combined read/update performance test with the specified
15 * number of readers and one updater and specified CPU spacing.
16 * The readers run on the low-numbered CPUs and the updater
17 * of the highest-numbered CPU.
19 * The above tests produce output as follows:
21 * n_reads: 46008000 n_updates: 146026 nreaders: 2 nupdaters: 1 duration: 1
22 * ns/read: 43.4707 ns/update: 6848.1
24 * The first line lists the total number of RCU reads and updates executed
25 * during the test, the number of reader threads, the number of updater
26 * threads, and the duration of the test in seconds. The second line
27 * lists the average duration of each type of operation in nanoseconds,
28 * or "nan" if the corresponding type of operation was not performed.
30 * ./rcu <nreaders> stress
31 * Run a stress test with the specified number of readers and
32 * one updater. None of the threads are affinitied to any
35 * This test produces output as follows:
37 * n_reads: 114633217 n_updates: 3903415 n_mberror: 0
38 * rcu_stress_count: 114618391 14826 0 0 0 0 0 0 0 0 0
40 * The first line lists the number of RCU read and update operations
41 * executed, followed by the number of memory-ordering violations
42 * (which will be zero in a correct RCU implementation). The second
43 * line lists the number of readers observing progressively more stale
44 * data. A correct RCU implementation will have all but the first two
47 * This program is free software; you can redistribute it and/or modify
48 * it under the terms of the GNU General Public License as published by
49 * the Free Software Foundation; either version 2 of the License, or
50 * (at your option) any later version.
52 * This program is distributed in the hope that it will be useful,
53 * but WITHOUT ANY WARRANTY; without even the implied warranty of
54 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
55 * GNU General Public License for more details.
57 * You should have received a copy of the GNU General Public License
58 * along with this program; if not, write to the Free Software
59 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
61 * Copyright (c) 2008 Paul E. McKenney, IBM Corporation.
70 DEFINE_PER_THREAD(long long, n_reads_pt
);
71 DEFINE_PER_THREAD(long long, n_updates_pt
);
73 long long n_reads
= 0LL;
82 int goflag
__attribute__((__aligned__(CAA_CACHE_LINE_SIZE
))) = GOFLAG_INIT
;
84 #define RCU_READ_RUN 1000
87 #define RCU_READ_NESTABLE
89 #ifdef RCU_READ_NESTABLE
90 #define rcu_read_lock_nest() rcu_read_lock()
91 #define rcu_read_unlock_nest() rcu_read_unlock()
92 #else /* #ifdef RCU_READ_NESTABLE */
93 #define rcu_read_lock_nest()
94 #define rcu_read_unlock_nest()
95 #endif /* #else #ifdef RCU_READ_NESTABLE */
98 #define mark_rcu_quiescent_state rcu_quiescent_state
99 #define put_thread_offline rcu_thread_offline
100 #define put_thread_online rcu_thread_online
103 #ifndef mark_rcu_quiescent_state
104 #define mark_rcu_quiescent_state() do ; while (0)
105 #endif /* #ifdef mark_rcu_quiescent_state */
107 #ifndef put_thread_offline
108 #define put_thread_offline() do ; while (0)
109 #define put_thread_online() do ; while (0)
110 #define put_thread_online_delay() do ; while (0)
111 #else /* #ifndef put_thread_offline */
112 #define put_thread_online_delay() synchronize_rcu()
113 #endif /* #else #ifndef put_thread_offline */
119 void *rcu_read_perf_test(void *arg
)
121 struct call_rcu_data
*crdp
;
124 long long n_reads_local
= 0;
126 rcu_register_thread();
128 uatomic_inc(&nthreadsrunning
);
129 while (goflag
== GOFLAG_INIT
)
131 mark_rcu_quiescent_state();
132 while (goflag
== GOFLAG_RUN
) {
133 for (i
= 0; i
< RCU_READ_RUN
; i
++) {
135 /* rcu_read_lock_nest(); */
136 /* rcu_read_unlock_nest(); */
139 n_reads_local
+= RCU_READ_RUN
;
140 mark_rcu_quiescent_state();
142 __get_thread_var(n_reads_pt
) += n_reads_local
;
143 put_thread_offline();
144 crdp
= get_thread_call_rcu_data();
145 set_thread_call_rcu_data(NULL
);
146 call_rcu_data_free(crdp
);
147 rcu_unregister_thread();
152 void *rcu_update_perf_test(void *arg
)
154 long long n_updates_local
= 0;
156 if ((random() & 0xf00) == 0) {
157 struct call_rcu_data
*crdp
;
159 crdp
= create_call_rcu_data(0);
162 "Using per-thread call_rcu() worker.\n");
163 set_thread_call_rcu_data(crdp
);
166 uatomic_inc(&nthreadsrunning
);
167 while (goflag
== GOFLAG_INIT
)
169 while (goflag
== GOFLAG_RUN
) {
173 __get_thread_var(n_updates_pt
) += n_updates_local
;
177 void perftestinit(void)
179 init_per_thread(n_reads_pt
, 0LL);
180 init_per_thread(n_updates_pt
, 0LL);
181 uatomic_set(&nthreadsrunning
, 0);
184 void perftestrun(int nthreads
, int nreaders
, int nupdaters
)
190 while (uatomic_read(&nthreadsrunning
) < nthreads
)
196 goflag
= GOFLAG_STOP
;
200 n_reads
+= per_thread(n_reads_pt
, t
);
201 n_updates
+= per_thread(n_updates_pt
, t
);
203 printf("n_reads: %lld n_updates: %ld nreaders: %d nupdaters: %d duration: %d\n",
204 n_reads
, n_updates
, nreaders
, nupdaters
, duration
);
205 printf("ns/read: %g ns/update: %g\n",
206 ((duration
* 1000*1000*1000.*(double)nreaders
) /
208 ((duration
* 1000*1000*1000.*(double)nupdaters
) /
210 if (get_cpu_call_rcu_data(0)) {
211 fprintf(stderr
, "Deallocating per-CPU call_rcu threads.\n");
212 free_all_cpu_call_rcu_data();
217 void perftest(int nreaders
, int cpustride
)
223 for (i
= 0; i
< nreaders
; i
++) {
224 arg
= (long)(i
* cpustride
);
225 create_thread(rcu_read_perf_test
, (void *)arg
);
227 arg
= (long)(i
* cpustride
);
228 create_thread(rcu_update_perf_test
, (void *)arg
);
229 perftestrun(i
+ 1, nreaders
, 1);
232 void rperftest(int nreaders
, int cpustride
)
238 init_per_thread(n_reads_pt
, 0LL);
239 for (i
= 0; i
< nreaders
; i
++) {
240 arg
= (long)(i
* cpustride
);
241 create_thread(rcu_read_perf_test
, (void *)arg
);
243 perftestrun(i
, nreaders
, 0);
246 void uperftest(int nupdaters
, int cpustride
)
252 init_per_thread(n_reads_pt
, 0LL);
253 for (i
= 0; i
< nupdaters
; i
++) {
254 arg
= (long)(i
* cpustride
);
255 create_thread(rcu_update_perf_test
, (void *)arg
);
257 perftestrun(i
, 0, nupdaters
);
264 #define RCU_STRESS_PIPE_LEN 10
271 struct rcu_stress rcu_stress_array
[RCU_STRESS_PIPE_LEN
] = { { 0 } };
272 struct rcu_stress
*rcu_stress_current
;
273 int rcu_stress_idx
= 0;
276 DEFINE_PER_THREAD(long long [RCU_STRESS_PIPE_LEN
+ 1], rcu_stress_count
);
280 void *rcu_read_stress_test(void *arg
)
284 struct rcu_stress
*p
;
287 rcu_register_thread();
288 while (goflag
== GOFLAG_INIT
)
290 mark_rcu_quiescent_state();
291 while (goflag
== GOFLAG_RUN
) {
293 p
= rcu_dereference(rcu_stress_current
);
296 rcu_read_lock_nest();
297 for (i
= 0; i
< 100; i
++)
299 rcu_read_unlock_nest();
302 if ((pc
> RCU_STRESS_PIPE_LEN
) || (pc
< 0))
303 pc
= RCU_STRESS_PIPE_LEN
;
304 __get_thread_var(rcu_stress_count
)[pc
]++;
305 __get_thread_var(n_reads_pt
)++;
306 mark_rcu_quiescent_state();
307 if ((++itercnt
% 0x1000) == 0) {
308 put_thread_offline();
309 put_thread_online_delay();
313 put_thread_offline();
314 rcu_unregister_thread();
319 static pthread_mutex_t call_rcu_test_mutex
= PTHREAD_MUTEX_INITIALIZER
;
320 static pthread_cond_t call_rcu_test_cond
= PTHREAD_COND_INITIALIZER
;
322 void rcu_update_stress_test_rcu(struct rcu_head
*head
)
324 if (pthread_mutex_lock(&call_rcu_test_mutex
) != 0) {
325 perror("pthread_mutex_lock");
328 if (pthread_cond_signal(&call_rcu_test_cond
) != 0) {
329 perror("pthread_cond_signal");
332 if (pthread_mutex_unlock(&call_rcu_test_mutex
) != 0) {
333 perror("pthread_mutex_unlock");
338 void *rcu_update_stress_test(void *arg
)
341 struct rcu_stress
*p
;
344 while (goflag
== GOFLAG_INIT
)
346 while (goflag
== GOFLAG_RUN
) {
347 i
= rcu_stress_idx
+ 1;
348 if (i
>= RCU_STRESS_PIPE_LEN
)
350 p
= &rcu_stress_array
[i
];
355 rcu_assign_pointer(rcu_stress_current
, p
);
357 for (i
= 0; i
< RCU_STRESS_PIPE_LEN
; i
++)
358 if (i
!= rcu_stress_idx
)
359 rcu_stress_array
[i
].pipe_count
++;
363 if (pthread_mutex_lock(&call_rcu_test_mutex
) != 0) {
364 perror("pthread_mutex_lock");
367 call_rcu(&rh
, rcu_update_stress_test_rcu
);
368 if (pthread_cond_wait(&call_rcu_test_cond
,
369 &call_rcu_test_mutex
) != 0) {
370 perror("pthread_cond_wait");
373 if (pthread_mutex_unlock(&call_rcu_test_mutex
) != 0) {
374 perror("pthread_mutex_unlock");
383 void *rcu_fake_update_stress_test(void *arg
)
385 if ((random() & 0xf00) == 0) {
386 struct call_rcu_data
*crdp
;
388 crdp
= create_call_rcu_data(0);
391 "Using per-thread call_rcu() worker.\n");
392 set_thread_call_rcu_data(crdp
);
395 while (goflag
== GOFLAG_INIT
)
397 while (goflag
== GOFLAG_RUN
) {
404 void stresstest(int nreaders
)
411 init_per_thread(n_reads_pt
, 0LL);
413 p
= &per_thread(rcu_stress_count
,t
)[0];
414 for (i
= 0; i
<= RCU_STRESS_PIPE_LEN
; i
++)
417 rcu_stress_current
= &rcu_stress_array
[0];
418 rcu_stress_current
->pipe_count
= 0;
419 rcu_stress_current
->mbtest
= 1;
420 for (i
= 0; i
< nreaders
; i
++)
421 create_thread(rcu_read_stress_test
, NULL
);
422 create_thread(rcu_update_stress_test
, NULL
);
423 for (i
= 0; i
< 5; i
++)
424 create_thread(rcu_fake_update_stress_test
, NULL
);
430 goflag
= GOFLAG_STOP
;
434 n_reads
+= per_thread(n_reads_pt
, t
);
435 printf("n_reads: %lld n_updates: %ld n_mberror: %d\n",
436 n_reads
, n_updates
, n_mberror
);
437 printf("rcu_stress_count:");
438 for (i
= 0; i
<= RCU_STRESS_PIPE_LEN
; i
++) {
441 sum
+= per_thread(rcu_stress_count
, t
)[i
];
443 printf(" %lld", sum
);
446 if (get_cpu_call_rcu_data(0)) {
447 fprintf(stderr
, "Deallocating per-CPU call_rcu threads.\n");
448 free_all_cpu_call_rcu_data();
457 void usage(int argc
, char *argv
[])
459 fprintf(stderr
, "Usage: %s [nreaders [ perf | stress ] ]\n", argv
[0]);
463 int main(int argc
, char *argv
[])
471 if (random() & 0x100) {
472 fprintf(stderr
, "Allocating per-CPU call_rcu threads.\n");
473 if (create_all_cpu_call_rcu_data(0))
474 perror("create_all_cpu_call_rcu_data");
478 yield_active
|= YIELD_READ
;
479 yield_active
|= YIELD_WRITE
;
483 nreaders
= strtoul(argv
[1], NULL
, 0);
485 perftest(nreaders
, cpustride
);
487 cpustride
= strtoul(argv
[3], NULL
, 0);
488 if (strcmp(argv
[2], "perf") == 0)
489 perftest(nreaders
, cpustride
);
490 else if (strcmp(argv
[2], "rperf") == 0)
491 rperftest(nreaders
, cpustride
);
492 else if (strcmp(argv
[2], "uperf") == 0)
493 uperftest(nreaders
, cpustride
);
494 else if (strcmp(argv
[2], "stress") == 0)
495 stresstest(nreaders
);
498 perftest(nreaders
, cpustride
);