summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
dc745ef)
A thread preempted for a long period of time could race, when scheduled
again, with another thread that would have been allocating/freeing
entries (thus wrapping-around the available buffer), which would trigger
this race only when overcommitting the number of threads compared to the
number of available CPUs.
Taking the mutex across alloc and free to fix this.
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
/*
* malloc/free are reusing memory areas too quickly, which does not let us
* test races appropriately. Use a large circular array for allocations.
/*
* malloc/free are reusing memory areas too quickly, which does not let us
* test races appropriately. Use a large circular array for allocations.
- * ARRAY_SIZE is larger than nr_writers, which insures we never run over our tail.
+ * ARRAY_SIZE is larger than nr_writers, and we keep the mutex across
+ * both alloc and free, which insures we never run over our tail.
*/
#define ARRAY_SIZE (1048576 * nr_writers)
#define ARRAY_POISON 0xDEADBEEF
*/
#define ARRAY_SIZE (1048576 * nr_writers)
#define ARRAY_POISON 0xDEADBEEF
struct test_array *ret;
int index;
struct test_array *ret;
int index;
index = array_index % ARRAY_SIZE;
assert(test_array[index].a == ARRAY_POISON ||
test_array[index].a == 0);
index = array_index % ARRAY_SIZE;
assert(test_array[index].a == ARRAY_POISON ||
test_array[index].a == 0);
array_index++;
if (array_index == ARRAY_SIZE)
array_index = 0;
array_index++;
if (array_index == ARRAY_SIZE)
array_index = 0;
- rcu_copy_mutex_unlock();
- rcu_copy_mutex_unlock();
}
void *thr_reader(void *_count)
}
void *thr_reader(void *_count)
new = test_array_alloc();
new->a = 8;
old = rcu_xchg_pointer(&test_rcu_pointer, new);
new = test_array_alloc();
new->a = 8;
old = rcu_xchg_pointer(&test_rcu_pointer, new);
if (old)
old->a = 0;
test_array_free(old);
if (old)
old->a = 0;
test_array_free(old);
+ rcu_copy_mutex_unlock();
nr_writes++;
if (unlikely(!test_duration_write()))
break;
nr_writes++;
if (unlikely(!test_duration_write()))
break;
/*
* malloc/free are reusing memory areas too quickly, which does not let us
* test races appropriately. Use a large circular array for allocations.
/*
* malloc/free are reusing memory areas too quickly, which does not let us
* test races appropriately. Use a large circular array for allocations.
- * ARRAY_SIZE is larger than nr_writers, which insures we never run over our tail.
+ * ARRAY_SIZE is larger than nr_writers, and we keep the mutex across
+ * both alloc and free, which insures we never run over our tail.
*/
#define ARRAY_SIZE (1048576 * nr_writers)
#define ARRAY_POISON 0xDEADBEEF
*/
#define ARRAY_SIZE (1048576 * nr_writers)
#define ARRAY_POISON 0xDEADBEEF
struct test_array *ret;
int index;
struct test_array *ret;
int index;
index = array_index % ARRAY_SIZE;
assert(test_array[index].a == ARRAY_POISON ||
test_array[index].a == 0);
index = array_index % ARRAY_SIZE;
assert(test_array[index].a == ARRAY_POISON ||
test_array[index].a == 0);
array_index++;
if (array_index == ARRAY_SIZE)
array_index = 0;
array_index++;
if (array_index == ARRAY_SIZE)
array_index = 0;
- rcu_copy_mutex_unlock();
- rcu_copy_mutex_unlock();
}
void *thr_reader(void *_count)
}
void *thr_reader(void *_count)
new = test_array_alloc();
new->a = 8;
old = rcu_xchg_pointer(&test_rcu_pointer, new);
new = test_array_alloc();
new->a = 8;
old = rcu_xchg_pointer(&test_rcu_pointer, new);
if (old)
old->a = 0;
test_array_free(old);
if (old)
old->a = 0;
test_array_free(old);
+ rcu_copy_mutex_unlock();
nr_writes++;
if (unlikely(!test_duration_write()))
break;
nr_writes++;
if (unlikely(!test_duration_write()))
break;
/*
* malloc/free are reusing memory areas too quickly, which does not let us
* test races appropriately. Use a large circular array for allocations.
/*
* malloc/free are reusing memory areas too quickly, which does not let us
* test races appropriately. Use a large circular array for allocations.
- * ARRAY_SIZE is larger than nr_writers, which insures we never run over our tail.
+ * ARRAY_SIZE is larger than nr_writers, and we keep the mutex across
+ * both alloc and free, which insures we never run over our tail.
*/
#define ARRAY_SIZE (1048576 * nr_writers)
#define ARRAY_POISON 0xDEADBEEF
*/
#define ARRAY_SIZE (1048576 * nr_writers)
#define ARRAY_POISON 0xDEADBEEF
struct test_array *ret;
int index;
struct test_array *ret;
int index;
index = array_index % ARRAY_SIZE;
assert(test_array[index].a == ARRAY_POISON ||
test_array[index].a == 0);
index = array_index % ARRAY_SIZE;
assert(test_array[index].a == ARRAY_POISON ||
test_array[index].a == 0);
array_index++;
if (array_index == ARRAY_SIZE)
array_index = 0;
array_index++;
if (array_index == ARRAY_SIZE)
array_index = 0;
- rcu_copy_mutex_unlock();
- rcu_copy_mutex_unlock();
}
void *thr_reader(void *_count)
}
void *thr_reader(void *_count)
new = test_array_alloc();
new->a = 8;
new = test_array_alloc();
new->a = 8;
old = test_rcu_pointer;
rcu_assign_pointer(test_rcu_pointer, new);
if (unlikely(wduration))
loop_sleep(wduration);
old = test_rcu_pointer;
rcu_assign_pointer(test_rcu_pointer, new);
if (unlikely(wduration))
loop_sleep(wduration);
- rcu_copy_mutex_unlock();
synchronize_rcu();
if (old)
old->a = 0;
test_array_free(old);
synchronize_rcu();
if (old)
old->a = 0;
test_array_free(old);
+ rcu_copy_mutex_unlock();
nr_writes++;
if (unlikely(!test_duration_write()))
break;
nr_writes++;
if (unlikely(!test_duration_write()))
break;
/*
* malloc/free are reusing memory areas too quickly, which does not let us
* test races appropriately. Use a large circular array for allocations.
/*
* malloc/free are reusing memory areas too quickly, which does not let us
* test races appropriately. Use a large circular array for allocations.
- * ARRAY_SIZE is larger than nr_writers, which insures we never run over our tail.
+ * ARRAY_SIZE is larger than nr_writers, and we keep the mutex across
+ * both alloc and free, which insures we never run over our tail.
*/
#define ARRAY_SIZE (1048576 * nr_writers)
#define ARRAY_POISON 0xDEADBEEF
*/
#define ARRAY_SIZE (1048576 * nr_writers)
#define ARRAY_POISON 0xDEADBEEF
struct test_array *ret;
int index;
struct test_array *ret;
int index;
index = array_index % ARRAY_SIZE;
assert(test_array[index].a == ARRAY_POISON ||
test_array[index].a == 0);
index = array_index % ARRAY_SIZE;
assert(test_array[index].a == ARRAY_POISON ||
test_array[index].a == 0);
array_index++;
if (array_index == ARRAY_SIZE)
array_index = 0;
array_index++;
if (array_index == ARRAY_SIZE)
array_index = 0;
- rcu_copy_mutex_unlock();
- rcu_copy_mutex_unlock();
}
void *thr_reader(void *_count)
}
void *thr_reader(void *_count)
new = test_array_alloc();
new->a = 8;
old = rcu_xchg_pointer(&test_rcu_pointer, new);
new = test_array_alloc();
new->a = 8;
old = rcu_xchg_pointer(&test_rcu_pointer, new);
if (old)
old->a = 0;
test_array_free(old);
if (old)
old->a = 0;
test_array_free(old);
+ rcu_copy_mutex_unlock();
nr_writes++;
if (unlikely(!test_duration_write()))
break;
nr_writes++;
if (unlikely(!test_duration_write()))
break;