X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=tests%2Fregression%2Frcutorture.h;h=f495cbd12259709721212a17b84a7e7744be905a;hb=c92c99041415698b57ca123e58a19b05189ae398;hp=3444f5bdbab5e815234cc6976e4fd91f7e3f66c6;hpb=ad46005890368f9c306f0c510b3d4b08c47b66f8;p=userspace-rcu.git diff --git a/tests/regression/rcutorture.h b/tests/regression/rcutorture.h index 3444f5b..f495cbd 100644 --- a/tests/regression/rcutorture.h +++ b/tests/regression/rcutorture.h @@ -1,3 +1,7 @@ +// SPDX-FileCopyrightText: 2008 Paul E. McKenney, IBM Corporation. +// +// SPDX-License-Identifier: GPL-2.0-or-later + /* * rcutorture.h: simple user-level performance/stress test of RCU. * @@ -43,22 +47,6 @@ * line lists the number of readers observing progressively more stale * data. A correct RCU implementation will have all but the first two * numbers non-zero. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Copyright (c) 2008 Paul E. McKenney, IBM Corporation. */ /* @@ -68,11 +56,27 @@ #include #include "tap.h" +#include "urcu-wait.h" + #define NR_TESTS 1 DEFINE_PER_THREAD(long long, n_reads_pt); DEFINE_PER_THREAD(long long, n_updates_pt); +enum callrcu_type { + CALLRCU_GLOBAL, + CALLRCU_PERCPU, + CALLRCU_PERTHREAD, +}; + +enum writer_state { + WRITER_STATE_SYNC_RCU, + WRITER_STATE_CALL_RCU, + WRITER_STATE_POLL_RCU, +}; + +static enum callrcu_type callrcu_type = CALLRCU_GLOBAL; + long long n_reads = 0LL; long n_updates = 0L; int nthreadsrunning; @@ -105,13 +109,13 @@ volatile int goflag __attribute__((__aligned__(CAA_CACHE_LINE_SIZE))) #endif #ifndef mark_rcu_quiescent_state -#define mark_rcu_quiescent_state() do ; while (0) +#define mark_rcu_quiescent_state() do {} while (0) #endif /* #ifdef mark_rcu_quiescent_state */ #ifndef put_thread_offline -#define put_thread_offline() do ; while (0) -#define put_thread_online() do ; while (0) -#define put_thread_online_delay() do ; while (0) +#define put_thread_offline() do {} while (0) +#define put_thread_online() do {} while (0) +#define put_thread_online_delay() do {} while (0) #else /* #ifndef put_thread_offline */ #define put_thread_online_delay() synchronize_rcu() #endif /* #else #ifndef put_thread_offline */ @@ -120,9 +124,9 @@ volatile int goflag __attribute__((__aligned__(CAA_CACHE_LINE_SIZE))) * Performance test. */ +static void *rcu_read_perf_test(void *arg) { - struct call_rcu_data *crdp; int i; int me = (long)arg; long long n_reads_local = 0; @@ -146,24 +150,22 @@ void *rcu_read_perf_test(void *arg) } __get_thread_var(n_reads_pt) += n_reads_local; put_thread_offline(); - crdp = get_thread_call_rcu_data(); - set_thread_call_rcu_data(NULL); - call_rcu_data_free(crdp); rcu_unregister_thread(); return (NULL); } -void *rcu_update_perf_test(void *arg) +static +void *rcu_update_perf_test(void *arg __attribute__((unused))) { long long n_updates_local = 0; - if ((random() & 0xf00) == 0) { + if (callrcu_type == CALLRCU_PERTHREAD) { struct call_rcu_data *crdp; crdp = create_call_rcu_data(0, -1); if (crdp != NULL) { - diag("Using per-thread call_rcu() worker."); + diag("Successfully using per-thread call_rcu() worker."); set_thread_call_rcu_data(crdp); } } @@ -175,9 +177,17 @@ void *rcu_update_perf_test(void *arg) n_updates_local++; } __get_thread_var(n_updates_pt) += n_updates_local; + if (callrcu_type == CALLRCU_PERTHREAD) { + struct call_rcu_data *crdp; + + crdp = get_thread_call_rcu_data(); + set_thread_call_rcu_data(NULL); + call_rcu_data_free(crdp); + } return NULL; } +static void perftestinit(void) { init_per_thread(n_reads_pt, 0LL); @@ -185,6 +195,7 @@ void perftestinit(void) uatomic_set(&nthreadsrunning, 0); } +static int perftestrun(int nthreads, int nreaders, int nupdaters) { int t; @@ -218,6 +229,7 @@ int perftestrun(int nthreads, int nreaders, int nupdaters) return 0; } +static int perftest(int nreaders, int cpustride) { int i; @@ -233,6 +245,7 @@ int perftest(int nreaders, int cpustride) return perftestrun(i + 1, nreaders, 1); } +static int rperftest(int nreaders, int cpustride) { int i; @@ -247,6 +260,7 @@ int rperftest(int nreaders, int cpustride) return perftestrun(i, nreaders, 0); } +static int uperftest(int nupdaters, int cpustride) { int i; @@ -272,7 +286,7 @@ struct rcu_stress { int mbtest; }; -struct rcu_stress rcu_stress_array[RCU_STRESS_PIPE_LEN] = { { 0 } }; +struct rcu_stress rcu_stress_array[RCU_STRESS_PIPE_LEN] = { { 0, 0 } }; struct rcu_stress *rcu_stress_current; int rcu_stress_idx = 0; @@ -281,7 +295,8 @@ DEFINE_PER_THREAD(long long [RCU_STRESS_PIPE_LEN + 1], rcu_stress_count); int garbage = 0; -void *rcu_read_stress_test(void *arg) +static +void *rcu_read_stress_test(void *arg __attribute__((unused))) { int i; int itercnt = 0; @@ -321,44 +336,49 @@ void *rcu_read_stress_test(void *arg) return (NULL); } -static pthread_mutex_t call_rcu_test_mutex = PTHREAD_MUTEX_INITIALIZER; -static pthread_cond_t call_rcu_test_cond = PTHREAD_COND_INITIALIZER; +static DEFINE_URCU_WAIT_QUEUE(call_rcu_waiters); -void rcu_update_stress_test_rcu(struct rcu_head *head) +static +void rcu_update_stress_test_rcu(struct rcu_head *head __attribute__((unused))) { - int ret; + struct urcu_waiters waiters; - ret = pthread_mutex_lock(&call_rcu_test_mutex); - if (ret) { - errno = ret; - diag("pthread_mutex_lock: %s", - strerror(errno)); - abort(); - } - ret = pthread_cond_signal(&call_rcu_test_cond); - if (ret) { - errno = ret; - diag("pthread_cond_signal: %s", - strerror(errno)); - abort(); - } - ret = pthread_mutex_unlock(&call_rcu_test_mutex); - if (ret) { - errno = ret; - diag("pthread_mutex_unlock: %s", - strerror(errno)); - abort(); + urcu_move_waiters(&waiters, &call_rcu_waiters); + urcu_wake_all_waiters(&waiters); +} + +static +void advance_writer_state(enum writer_state *state) +{ + switch (*state) { + case WRITER_STATE_SYNC_RCU: + *state = WRITER_STATE_CALL_RCU; + break; + case WRITER_STATE_CALL_RCU: + *state = WRITER_STATE_POLL_RCU; + break; + case WRITER_STATE_POLL_RCU: + *state = WRITER_STATE_SYNC_RCU; + break; } } -void *rcu_update_stress_test(void *arg) +static +void *rcu_update_stress_test(void *arg __attribute__((unused))) { int i; struct rcu_stress *p; struct rcu_head rh; + enum writer_state writer_state = WRITER_STATE_SYNC_RCU; + + rcu_register_thread(); + /* Offline for poll. */ + put_thread_offline(); while (goflag == GOFLAG_INIT) (void) poll(NULL, 0, 1); + put_thread_online(); + while (goflag == GOFLAG_RUN) { i = rcu_stress_idx + 1; if (i >= RCU_STRESS_PIPE_LEN) @@ -373,48 +393,56 @@ void *rcu_update_stress_test(void *arg) for (i = 0; i < RCU_STRESS_PIPE_LEN; i++) if (i != rcu_stress_idx) rcu_stress_array[i].pipe_count++; - if (n_updates & 0x1) + switch (writer_state) { + case WRITER_STATE_SYNC_RCU: synchronize_rcu(); - else { - int ret; - - ret = pthread_mutex_lock(&call_rcu_test_mutex); - if (ret) { - errno = ret; - diag("pthread_mutex_lock: %s", - strerror(errno)); - abort(); - } + break; + case WRITER_STATE_CALL_RCU: + { + DEFINE_URCU_WAIT_NODE(wait, URCU_WAIT_WAITING); + + urcu_wait_add(&call_rcu_waiters, &wait); + call_rcu(&rh, rcu_update_stress_test_rcu); - ret = pthread_cond_wait(&call_rcu_test_cond, - &call_rcu_test_mutex); - if (ret) { - errno = ret; - diag("pthread_cond_signal: %s", - strerror(errno)); - abort(); - } - ret = pthread_mutex_unlock(&call_rcu_test_mutex); - if (ret) { - errno = ret; - diag("pthread_mutex_unlock: %s", - strerror(errno)); - abort(); - } + + /* Offline for busy-wait. */ + put_thread_offline(); + urcu_adaptative_busy_wait(&wait); + put_thread_online(); + break; + } + case WRITER_STATE_POLL_RCU: + { + struct urcu_gp_poll_state poll_state; + + poll_state = start_poll_synchronize_rcu(); + + /* Offline for poll. */ + put_thread_offline(); + while (!poll_state_synchronize_rcu(poll_state)) + (void) poll(NULL, 0, 1); /* Wait for 1ms */ + put_thread_online(); + break; + } } n_updates++; + advance_writer_state(&writer_state); } + + rcu_unregister_thread(); + return NULL; } -void *rcu_fake_update_stress_test(void *arg) +static +void *rcu_fake_update_stress_test(void *arg __attribute__((unused))) { - if ((random() & 0xf00) == 0) { + if (callrcu_type == CALLRCU_PERTHREAD) { struct call_rcu_data *crdp; crdp = create_call_rcu_data(0, -1); if (crdp != NULL) { - diag("Using per-thread call_rcu() worker."); + diag("Successfully using per-thread call_rcu() worker."); set_thread_call_rcu_data(crdp); } } @@ -424,9 +452,17 @@ void *rcu_fake_update_stress_test(void *arg) synchronize_rcu(); (void) poll(NULL, 0, 1); } + if (callrcu_type == CALLRCU_PERTHREAD) { + struct call_rcu_data *crdp; + + crdp = get_thread_call_rcu_data(); + set_thread_call_rcu_data(NULL); + call_rcu_data_free(crdp); + } return NULL; } +static int stresstest(int nreaders) { int i; @@ -484,9 +520,13 @@ int stresstest(int nreaders) * Mainprogram. */ -void usage(int argc, char *argv[]) +static +void usage(char *argv[]) __attribute__((__noreturn__)); + +static +void usage(char *argv[]) { - diag("Usage: %s [nreaders [ perf | rperf | uperf | stress ] ]\n", argv[0]); + diag("Usage: %s nreaders [ perf | rperf | uperf | stress ] [ stride ] [ callrcu_global | callrcu_percpu | callrcu_perthread ]\n", argv[0]); exit(-1); } @@ -499,12 +539,36 @@ int main(int argc, char *argv[]) smp_init(); //rcu_init(); - srandom(time(NULL)); - if (random() & 0x100) { - diag("Allocating per-CPU call_rcu threads."); + if (argc > 4) { + const char *callrcu_str = argv[4];; + + if (strcmp(callrcu_str, "callrcu_global") == 0) { + callrcu_type = CALLRCU_GLOBAL; + } else if (strcmp(callrcu_str, "callrcu_percpu") == 0) { + callrcu_type = CALLRCU_PERCPU; + } else if (strcmp(callrcu_str, "callrcu_perthread") == 0) { + callrcu_type = CALLRCU_PERTHREAD; + } else { + usage(argv); + goto end; + } + } + + switch (callrcu_type) { + case CALLRCU_GLOBAL: + diag("Using global per-process call_rcu thread."); + break; + case CALLRCU_PERCPU: + diag("Using per-CPU call_rcu threads."); if (create_all_cpu_call_rcu_data(0)) diag("create_all_cpu_call_rcu_data: %s", strerror(errno)); + break; + case CALLRCU_PERTHREAD: + diag("Using per-thread call_rcu() worker."); + break; + default: + abort(); } #ifdef DEBUG_YIELD @@ -513,6 +577,11 @@ int main(int argc, char *argv[]) #endif if (argc > 1) { + if (strcmp(argv[1], "-h") == 0 + || strcmp(argv[1], "--help") == 0) { + usage(argv); + goto end; + } nreaders = strtoul(argv[1], NULL, 0); if (argc == 2) { ok(!perftest(nreaders, cpustride), @@ -539,11 +608,9 @@ int main(int argc, char *argv[]) "stresstest readers: %d, stride: %d", nreaders, cpustride); else - usage(argc, argv); + usage(argv); } else { - ok(!perftest(nreaders, cpustride), - "perftest readers: %d, stride: %d", - nreaders, cpustride); + usage(argv); } end: return exit_status();