X-Git-Url: https://git.lttng.org/?p=urcu.git;a=blobdiff_plain;f=tests%2Frcutorture.h;h=f8548d811ed8980061716cc797a63524d97524a4;hp=00b0b316b736c181aa3270d45a78b5403637db87;hb=a5bae03d59c22e57263b8610b9fed99738303cf0;hpb=833dbdb633ba44bfb71b4b965a83096d8e4e827b diff --git a/tests/rcutorture.h b/tests/rcutorture.h index 00b0b31..f8548d8 100644 --- a/tests/rcutorture.h +++ b/tests/rcutorture.h @@ -56,7 +56,7 @@ * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * Copyright (c) 2008 Paul E. McKenney, IBM Corporation. */ @@ -65,19 +65,22 @@ * Test variables. */ +#include + DEFINE_PER_THREAD(long long, n_reads_pt); DEFINE_PER_THREAD(long long, n_updates_pt); long long n_reads = 0LL; long n_updates = 0L; -atomic_t nthreadsrunning; +int nthreadsrunning; char argsbuf[64]; #define GOFLAG_INIT 0 #define GOFLAG_RUN 1 #define GOFLAG_STOP 2 -int goflag __attribute__((__aligned__(CACHE_LINE_SIZE))) = GOFLAG_INIT; +volatile int goflag __attribute__((__aligned__(CAA_CACHE_LINE_SIZE))) + = GOFLAG_INIT; #define RCU_READ_RUN 1000 @@ -92,6 +95,12 @@ int goflag __attribute__((__aligned__(CACHE_LINE_SIZE))) = GOFLAG_INIT; #define rcu_read_unlock_nest() #endif /* #else #ifdef RCU_READ_NESTABLE */ +#ifdef TORTURE_QSBR +#define mark_rcu_quiescent_state rcu_quiescent_state +#define put_thread_offline rcu_thread_offline +#define put_thread_online rcu_thread_online +#endif + #ifndef mark_rcu_quiescent_state #define mark_rcu_quiescent_state() do ; while (0) #endif /* #ifdef mark_rcu_quiescent_state */ @@ -110,16 +119,18 @@ int goflag __attribute__((__aligned__(CACHE_LINE_SIZE))) = GOFLAG_INIT; void *rcu_read_perf_test(void *arg) { + struct call_rcu_data *crdp; int i; int me = (long)arg; long long n_reads_local = 0; rcu_register_thread(); run_on(me); - atomic_inc(&nthreadsrunning); + uatomic_inc(&nthreadsrunning); + put_thread_offline(); while (goflag == GOFLAG_INIT) poll(NULL, 0, 1); - mark_rcu_quiescent_state(); + put_thread_online(); while (goflag == GOFLAG_RUN) { for (i = 0; i < RCU_READ_RUN; i++) { rcu_read_lock(); @@ -132,6 +143,9 @@ void *rcu_read_perf_test(void *arg) } __get_thread_var(n_reads_pt) += n_reads_local; put_thread_offline(); + crdp = get_thread_call_rcu_data(); + set_thread_call_rcu_data(NULL); + call_rcu_data_free(crdp); rcu_unregister_thread(); return (NULL); @@ -141,7 +155,17 @@ void *rcu_update_perf_test(void *arg) { long long n_updates_local = 0; - atomic_inc(&nthreadsrunning); + if ((random() & 0xf00) == 0) { + struct call_rcu_data *crdp; + + crdp = create_call_rcu_data(0, -1); + if (crdp != NULL) { + fprintf(stderr, + "Using per-thread call_rcu() worker.\n"); + set_thread_call_rcu_data(crdp); + } + } + uatomic_inc(&nthreadsrunning); while (goflag == GOFLAG_INIT) poll(NULL, 0, 1); while (goflag == GOFLAG_RUN) { @@ -156,7 +180,7 @@ void perftestinit(void) { init_per_thread(n_reads_pt, 0LL); init_per_thread(n_updates_pt, 0LL); - atomic_set(&nthreadsrunning, 0); + uatomic_set(&nthreadsrunning, 0); } void perftestrun(int nthreads, int nreaders, int nupdaters) @@ -164,15 +188,15 @@ void perftestrun(int nthreads, int nreaders, int nupdaters) int t; int duration = 1; - smp_mb(); - while (atomic_read(&nthreadsrunning) < nthreads) + cmm_smp_mb(); + while (uatomic_read(&nthreadsrunning) < nthreads) poll(NULL, 0, 1); goflag = GOFLAG_RUN; - smp_mb(); + cmm_smp_mb(); sleep(duration); - smp_mb(); + cmm_smp_mb(); goflag = GOFLAG_STOP; - smp_mb(); + cmm_smp_mb(); wait_all_threads(); for_each_thread(t) { n_reads += per_thread(n_reads_pt, t); @@ -185,6 +209,10 @@ void perftestrun(int nthreads, int nreaders, int nupdaters) (double)n_reads), ((duration * 1000*1000*1000.*(double)nupdaters) / (double)n_updates)); + if (get_cpu_call_rcu_data(0)) { + fprintf(stderr, "Deallocating per-CPU call_rcu threads.\n"); + free_all_cpu_call_rcu_data(); + } exit(0); } @@ -259,9 +287,10 @@ void *rcu_read_stress_test(void *arg) int pc; rcu_register_thread(); + put_thread_offline(); while (goflag == GOFLAG_INIT) poll(NULL, 0, 1); - mark_rcu_quiescent_state(); + put_thread_online(); while (goflag == GOFLAG_RUN) { rcu_read_lock(); p = rcu_dereference(rcu_stress_current); @@ -290,10 +319,30 @@ void *rcu_read_stress_test(void *arg) return (NULL); } +static pthread_mutex_t call_rcu_test_mutex = PTHREAD_MUTEX_INITIALIZER; +static pthread_cond_t call_rcu_test_cond = PTHREAD_COND_INITIALIZER; + +void rcu_update_stress_test_rcu(struct rcu_head *head) +{ + if (pthread_mutex_lock(&call_rcu_test_mutex) != 0) { + perror("pthread_mutex_lock"); + exit(-1); + } + if (pthread_cond_signal(&call_rcu_test_cond) != 0) { + perror("pthread_cond_signal"); + exit(-1); + } + if (pthread_mutex_unlock(&call_rcu_test_mutex) != 0) { + perror("pthread_mutex_unlock"); + exit(-1); + } +} + void *rcu_update_stress_test(void *arg) { int i; struct rcu_stress *p; + struct rcu_head rh; while (goflag == GOFLAG_INIT) poll(NULL, 0, 1); @@ -303,7 +352,7 @@ void *rcu_update_stress_test(void *arg) i = 0; p = &rcu_stress_array[i]; p->mbtest = 0; - smp_mb(); + cmm_smp_mb(); p->pipe_count = 0; p->mbtest = 1; rcu_assign_pointer(rcu_stress_current, p); @@ -311,7 +360,24 @@ void *rcu_update_stress_test(void *arg) for (i = 0; i < RCU_STRESS_PIPE_LEN; i++) if (i != rcu_stress_idx) rcu_stress_array[i].pipe_count++; - synchronize_rcu(); + if (n_updates & 0x1) + synchronize_rcu(); + else { + if (pthread_mutex_lock(&call_rcu_test_mutex) != 0) { + perror("pthread_mutex_lock"); + exit(-1); + } + call_rcu(&rh, rcu_update_stress_test_rcu); + if (pthread_cond_wait(&call_rcu_test_cond, + &call_rcu_test_mutex) != 0) { + perror("pthread_cond_wait"); + exit(-1); + } + if (pthread_mutex_unlock(&call_rcu_test_mutex) != 0) { + perror("pthread_mutex_unlock"); + exit(-1); + } + } n_updates++; } return NULL; @@ -319,6 +385,16 @@ void *rcu_update_stress_test(void *arg) void *rcu_fake_update_stress_test(void *arg) { + if ((random() & 0xf00) == 0) { + struct call_rcu_data *crdp; + + crdp = create_call_rcu_data(0, -1); + if (crdp != NULL) { + fprintf(stderr, + "Using per-thread call_rcu() worker.\n"); + set_thread_call_rcu_data(crdp); + } + } while (goflag == GOFLAG_INIT) poll(NULL, 0, 1); while (goflag == GOFLAG_RUN) { @@ -349,13 +425,13 @@ void stresstest(int nreaders) create_thread(rcu_update_stress_test, NULL); for (i = 0; i < 5; i++) create_thread(rcu_fake_update_stress_test, NULL); - smp_mb(); + cmm_smp_mb(); goflag = GOFLAG_RUN; - smp_mb(); + cmm_smp_mb(); sleep(10); - smp_mb(); + cmm_smp_mb(); goflag = GOFLAG_STOP; - smp_mb(); + cmm_smp_mb(); wait_all_threads(); for_each_thread(t) n_reads += per_thread(n_reads_pt, t); @@ -370,6 +446,10 @@ void stresstest(int nreaders) printf(" %lld", sum); } printf("\n"); + if (get_cpu_call_rcu_data(0)) { + fprintf(stderr, "Deallocating per-CPU call_rcu threads.\n"); + free_all_cpu_call_rcu_data(); + } exit(0); } @@ -390,6 +470,12 @@ int main(int argc, char *argv[]) smp_init(); //rcu_init(); + srandom(time(NULL)); + if (random() & 0x100) { + fprintf(stderr, "Allocating per-CPU call_rcu threads.\n"); + if (create_all_cpu_call_rcu_data(0)) + perror("create_all_cpu_call_rcu_data"); + } #ifdef DEBUG_YIELD yield_active |= YIELD_READ;