X-Git-Url: https://git.lttng.org/?p=urcu.git;a=blobdiff_plain;f=tests%2Ftest_urcu_assign.c;h=42d70c26ce8b15ef0ae074e83ca3c9632bc0dc00;hp=b1298a447762397c14a452f0b8bd4f2b2e4d6ec8;hb=4d0d66bb795d1ed938e11a97a4e5f71326e20c71;hpb=6982d6d71aeed16d2d929bd0ed221e8f444b706e diff --git a/tests/test_urcu_assign.c b/tests/test_urcu_assign.c index b1298a4..42d70c2 100644 --- a/tests/test_urcu_assign.c +++ b/tests/test_urcu_assign.c @@ -31,12 +31,15 @@ #include #include #include -#include #include #include #include +#ifdef __linux__ +#include +#endif + /* hardcoded number of CPUs */ #define NR_CPUS 16384 @@ -83,7 +86,7 @@ static unsigned long wduration; static inline void loop_sleep(unsigned long l) { while(l-- != 0) - cpu_relax(); + caa_cpu_relax(); } static int verbose_mode; @@ -183,7 +186,8 @@ void rcu_copy_mutex_unlock(void) /* * malloc/free are reusing memory areas too quickly, which does not let us * test races appropriately. Use a large circular array for allocations. - * ARRAY_SIZE is larger than nr_writers, which insures we never run over our tail. + * ARRAY_SIZE is larger than nr_writers, and we keep the mutex across + * both alloc and free, which insures we never run over our tail. */ #define ARRAY_SIZE (1048576 * nr_writers) #define ARRAY_POISON 0xDEADBEEF @@ -195,7 +199,6 @@ static struct test_array *test_array_alloc(void) struct test_array *ret; int index; - rcu_copy_mutex_lock(); index = array_index % ARRAY_SIZE; assert(test_array[index].a == ARRAY_POISON || test_array[index].a == 0); @@ -203,7 +206,6 @@ static struct test_array *test_array_alloc(void) array_index++; if (array_index == ARRAY_SIZE) array_index = 0; - rcu_copy_mutex_unlock(); return ret; } @@ -211,9 +213,7 @@ static void test_array_free(struct test_array *ptr) { if (!ptr) return; - rcu_copy_mutex_lock(); ptr->a = ARRAY_POISON; - rcu_copy_mutex_unlock(); } void *thr_reader(void *_count) @@ -231,7 +231,7 @@ void *thr_reader(void *_count) while (!test_go) { } - smp_mb(); + cmm_smp_mb(); for (;;) { rcu_read_lock(); @@ -239,11 +239,11 @@ void *thr_reader(void *_count) debug_yield_read(); if (local_ptr) assert(local_ptr->a == 8); - if (unlikely(rduration)) + if (caa_unlikely(rduration)) loop_sleep(rduration); rcu_read_unlock(); nr_reads++; - if (unlikely(!test_duration_read())) + if (caa_unlikely(!test_duration_read())) break; } @@ -269,25 +269,25 @@ void *thr_writer(void *_count) while (!test_go) { } - smp_mb(); + cmm_smp_mb(); for (;;) { + rcu_copy_mutex_lock(); new = test_array_alloc(); new->a = 8; - rcu_copy_mutex_lock(); old = test_rcu_pointer; rcu_assign_pointer(test_rcu_pointer, new); - if (unlikely(wduration)) + if (caa_unlikely(wduration)) loop_sleep(wduration); - rcu_copy_mutex_unlock(); synchronize_rcu(); if (old) old->a = 0; test_array_free(old); + rcu_copy_mutex_unlock(); nr_writes++; - if (unlikely(!test_duration_write())) + if (caa_unlikely(!test_duration_write())) break; - if (unlikely(wdelay)) + if (caa_unlikely(wdelay)) loop_sleep(wdelay); } @@ -399,7 +399,7 @@ int main(int argc, char **argv) printf_verbose("thread %-6s, thread id : %lx, tid %lu\n", "main", pthread_self(), (unsigned long)gettid()); - test_array = malloc(sizeof(*test_array) * ARRAY_SIZE); + test_array = calloc(1, sizeof(*test_array) * ARRAY_SIZE); tid_reader = malloc(sizeof(*tid_reader) * nr_readers); tid_writer = malloc(sizeof(*tid_writer) * nr_writers); count_reader = malloc(sizeof(*count_reader) * nr_readers); @@ -420,7 +420,7 @@ int main(int argc, char **argv) exit(1); } - smp_mb(); + cmm_smp_mb(); test_go = 1;