From d4267b0b37f1a7d35e23f0d7c045a973f6c3a6aa Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Thu, 30 Jun 2011 10:45:33 -0400 Subject: [PATCH] urcu tests: hold mutex across use of custom allocator A thread preempted for a long period of time could race, when scheduled again, with another thread that would have been allocating/freeing entries (thus wrapping-around the available buffer), which would trigger this race only when overcommitting the number of threads compared to the number of available CPUs. Taking the mutex across alloc and free to fix this. Signed-off-by: Mathieu Desnoyers --- tests/test_qsbr.c | 9 ++++----- tests/test_urcu.c | 9 ++++----- tests/test_urcu_assign.c | 11 ++++------- tests/test_urcu_bp.c | 9 ++++----- 4 files changed, 16 insertions(+), 22 deletions(-) diff --git a/tests/test_qsbr.c b/tests/test_qsbr.c index 12346c3..1c5a696 100644 --- a/tests/test_qsbr.c +++ b/tests/test_qsbr.c @@ -185,7 +185,8 @@ void rcu_copy_mutex_unlock(void) /* * malloc/free are reusing memory areas too quickly, which does not let us * test races appropriately. Use a large circular array for allocations. - * ARRAY_SIZE is larger than nr_writers, which insures we never run over our tail. + * ARRAY_SIZE is larger than nr_writers, and we keep the mutex across + * both alloc and free, which insures we never run over our tail. */ #define ARRAY_SIZE (1048576 * nr_writers) #define ARRAY_POISON 0xDEADBEEF @@ -197,7 +198,6 @@ static struct test_array *test_array_alloc(void) struct test_array *ret; int index; - rcu_copy_mutex_lock(); index = array_index % ARRAY_SIZE; assert(test_array[index].a == ARRAY_POISON || test_array[index].a == 0); @@ -205,7 +205,6 @@ static struct test_array *test_array_alloc(void) array_index++; if (array_index == ARRAY_SIZE) array_index = 0; - rcu_copy_mutex_unlock(); return ret; } @@ -213,9 +212,7 @@ static void test_array_free(struct test_array *ptr) { if (!ptr) return; - rcu_copy_mutex_lock(); ptr->a = ARRAY_POISON; - rcu_copy_mutex_unlock(); } void *thr_reader(void *_count) @@ -281,6 +278,7 @@ void *thr_writer(void *_count) cmm_smp_mb(); for (;;) { + rcu_copy_mutex_lock(); new = test_array_alloc(); new->a = 8; old = rcu_xchg_pointer(&test_rcu_pointer, new); @@ -291,6 +289,7 @@ void *thr_writer(void *_count) if (old) old->a = 0; test_array_free(old); + rcu_copy_mutex_unlock(); nr_writes++; if (unlikely(!test_duration_write())) break; diff --git a/tests/test_urcu.c b/tests/test_urcu.c index f1b2f21..884d77c 100644 --- a/tests/test_urcu.c +++ b/tests/test_urcu.c @@ -186,7 +186,8 @@ void rcu_copy_mutex_unlock(void) /* * malloc/free are reusing memory areas too quickly, which does not let us * test races appropriately. Use a large circular array for allocations. - * ARRAY_SIZE is larger than nr_writers, which insures we never run over our tail. + * ARRAY_SIZE is larger than nr_writers, and we keep the mutex across + * both alloc and free, which insures we never run over our tail. */ #define ARRAY_SIZE (1048576 * nr_writers) #define ARRAY_POISON 0xDEADBEEF @@ -198,7 +199,6 @@ static struct test_array *test_array_alloc(void) struct test_array *ret; int index; - rcu_copy_mutex_lock(); index = array_index % ARRAY_SIZE; assert(test_array[index].a == ARRAY_POISON || test_array[index].a == 0); @@ -206,7 +206,6 @@ static struct test_array *test_array_alloc(void) array_index++; if (array_index == ARRAY_SIZE) array_index = 0; - rcu_copy_mutex_unlock(); return ret; } @@ -214,9 +213,7 @@ static void test_array_free(struct test_array *ptr) { if (!ptr) return; - rcu_copy_mutex_lock(); ptr->a = ARRAY_POISON; - rcu_copy_mutex_unlock(); } void *thr_reader(void *_count) @@ -279,6 +276,7 @@ void *thr_writer(void *_count) cmm_smp_mb(); for (;;) { + rcu_copy_mutex_lock(); new = test_array_alloc(); new->a = 8; old = rcu_xchg_pointer(&test_rcu_pointer, new); @@ -288,6 +286,7 @@ void *thr_writer(void *_count) if (old) old->a = 0; test_array_free(old); + rcu_copy_mutex_unlock(); nr_writes++; if (unlikely(!test_duration_write())) break; diff --git a/tests/test_urcu_assign.c b/tests/test_urcu_assign.c index 851b850..0d9ef85 100644 --- a/tests/test_urcu_assign.c +++ b/tests/test_urcu_assign.c @@ -186,7 +186,8 @@ void rcu_copy_mutex_unlock(void) /* * malloc/free are reusing memory areas too quickly, which does not let us * test races appropriately. Use a large circular array for allocations. - * ARRAY_SIZE is larger than nr_writers, which insures we never run over our tail. + * ARRAY_SIZE is larger than nr_writers, and we keep the mutex across + * both alloc and free, which insures we never run over our tail. */ #define ARRAY_SIZE (1048576 * nr_writers) #define ARRAY_POISON 0xDEADBEEF @@ -198,7 +199,6 @@ static struct test_array *test_array_alloc(void) struct test_array *ret; int index; - rcu_copy_mutex_lock(); index = array_index % ARRAY_SIZE; assert(test_array[index].a == ARRAY_POISON || test_array[index].a == 0); @@ -206,7 +206,6 @@ static struct test_array *test_array_alloc(void) array_index++; if (array_index == ARRAY_SIZE) array_index = 0; - rcu_copy_mutex_unlock(); return ret; } @@ -214,9 +213,7 @@ static void test_array_free(struct test_array *ptr) { if (!ptr) return; - rcu_copy_mutex_lock(); ptr->a = ARRAY_POISON; - rcu_copy_mutex_unlock(); } void *thr_reader(void *_count) @@ -275,18 +272,18 @@ void *thr_writer(void *_count) cmm_smp_mb(); for (;;) { + rcu_copy_mutex_lock(); new = test_array_alloc(); new->a = 8; - rcu_copy_mutex_lock(); old = test_rcu_pointer; rcu_assign_pointer(test_rcu_pointer, new); if (unlikely(wduration)) loop_sleep(wduration); - rcu_copy_mutex_unlock(); synchronize_rcu(); if (old) old->a = 0; test_array_free(old); + rcu_copy_mutex_unlock(); nr_writes++; if (unlikely(!test_duration_write())) break; diff --git a/tests/test_urcu_bp.c b/tests/test_urcu_bp.c index 52e7f45..ba80ae6 100644 --- a/tests/test_urcu_bp.c +++ b/tests/test_urcu_bp.c @@ -186,7 +186,8 @@ void rcu_copy_mutex_unlock(void) /* * malloc/free are reusing memory areas too quickly, which does not let us * test races appropriately. Use a large circular array for allocations. - * ARRAY_SIZE is larger than nr_writers, which insures we never run over our tail. + * ARRAY_SIZE is larger than nr_writers, and we keep the mutex across + * both alloc and free, which insures we never run over our tail. */ #define ARRAY_SIZE (1048576 * nr_writers) #define ARRAY_POISON 0xDEADBEEF @@ -198,7 +199,6 @@ static struct test_array *test_array_alloc(void) struct test_array *ret; int index; - rcu_copy_mutex_lock(); index = array_index % ARRAY_SIZE; assert(test_array[index].a == ARRAY_POISON || test_array[index].a == 0); @@ -206,7 +206,6 @@ static struct test_array *test_array_alloc(void) array_index++; if (array_index == ARRAY_SIZE) array_index = 0; - rcu_copy_mutex_unlock(); return ret; } @@ -214,9 +213,7 @@ static void test_array_free(struct test_array *ptr) { if (!ptr) return; - rcu_copy_mutex_lock(); ptr->a = ARRAY_POISON; - rcu_copy_mutex_unlock(); } void *thr_reader(void *_count) @@ -275,6 +272,7 @@ void *thr_writer(void *_count) cmm_smp_mb(); for (;;) { + rcu_copy_mutex_lock(); new = test_array_alloc(); new->a = 8; old = rcu_xchg_pointer(&test_rcu_pointer, new); @@ -284,6 +282,7 @@ void *thr_writer(void *_count) if (old) old->a = 0; test_array_free(old); + rcu_copy_mutex_unlock(); nr_writes++; if (unlikely(!test_duration_write())) break; -- 2.34.1