projects
/
urcu.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
update version to 0.5.4
[urcu.git]
/
urcu-defer.c
diff --git
a/urcu-defer.c
b/urcu-defer.c
index 696ccae77db6c691c1172a6cd7c8b4eac5a88c09..3f596ae14882e4a635529baa663312b33b2ad247 100644
(file)
--- a/
urcu-defer.c
+++ b/
urcu-defer.c
@@
-3,7
+3,7
@@
*
* Userspace RCU library - batch memory reclamation
*
*
* Userspace RCU library - batch memory reclamation
*
- * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@
polymtl.ca
>
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@
efficios.com
>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@
-54,7
+54,7
@@
static int defer_thread_futex;
* the reclamation tread.
*/
static struct defer_queue __thread defer_queue;
* the reclamation tread.
*/
static struct defer_queue __thread defer_queue;
-static LIST_HEAD(registry);
+static
CDS_
LIST_HEAD(registry);
static pthread_t tid_defer;
static void mutex_lock(pthread_mutex_t *mutex)
static pthread_t tid_defer;
static void mutex_lock(pthread_mutex_t *mutex)
@@
-109,8
+109,8
@@
static unsigned long rcu_defer_num_callbacks(void)
struct defer_queue *index;
mutex_lock(&rcu_defer_mutex);
struct defer_queue *index;
mutex_lock(&rcu_defer_mutex);
- list_for_each_entry(index, ®istry, list) {
- head = LOAD_SHARED(index->head);
+
cds_
list_for_each_entry(index, ®istry, list) {
+ head =
CMM_
LOAD_SHARED(index->head);
num_items += head - index->tail;
}
mutex_unlock(&rcu_defer_mutex);
num_items += head - index->tail;
}
mutex_unlock(&rcu_defer_mutex);
@@
-123,13
+123,13
@@
static unsigned long rcu_defer_num_callbacks(void)
static void wait_defer(void)
{
uatomic_dec(&defer_thread_futex);
static void wait_defer(void)
{
uatomic_dec(&defer_thread_futex);
-
smp_mb();
/* Write futex before read queue */
+
cmm_smp_mb();
/* Write futex before read queue */
if (rcu_defer_num_callbacks()) {
if (rcu_defer_num_callbacks()) {
-
smp_mb();
/* Read queue before write futex */
+
cmm_smp_mb();
/* Read queue before write futex */
/* Callbacks are queued, don't wait. */
uatomic_set(&defer_thread_futex, 0);
} else {
/* Callbacks are queued, don't wait. */
uatomic_set(&defer_thread_futex, 0);
} else {
-
smp_rmb();
/* Read queue before read futex */
+
cmm_smp_rmb();
/* Read queue before read futex */
if (uatomic_read(&defer_thread_futex) == -1)
futex_noasync(&defer_thread_futex, FUTEX_WAIT, -1,
NULL, NULL, 0);
if (uatomic_read(&defer_thread_futex) == -1)
futex_noasync(&defer_thread_futex, FUTEX_WAIT, -1,
NULL, NULL, 0);
@@
-152,22
+152,22
@@
static void rcu_defer_barrier_queue(struct defer_queue *queue,
*/
for (i = queue->tail; i != head;) {
*/
for (i = queue->tail; i != head;) {
- smp_rmb(); /* read head before q[]. */
- p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
+
cmm_
smp_rmb(); /* read head before q[]. */
+ p =
CMM_
LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
if (unlikely(DQ_IS_FCT_BIT(p))) {
DQ_CLEAR_FCT_BIT(p);
queue->last_fct_out = p;
if (unlikely(DQ_IS_FCT_BIT(p))) {
DQ_CLEAR_FCT_BIT(p);
queue->last_fct_out = p;
- p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
+ p =
CMM_
LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
} else if (unlikely(p == DQ_FCT_MARK)) {
} else if (unlikely(p == DQ_FCT_MARK)) {
- p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
+ p =
CMM_
LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
queue->last_fct_out = p;
queue->last_fct_out = p;
- p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
+ p =
CMM_
LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
}
fct = queue->last_fct_out;
fct(p);
}
}
fct = queue->last_fct_out;
fct(p);
}
-
smp_mb();
/* push tail after having used q[] */
- STORE_SHARED(queue->tail, i);
+
cmm_smp_mb();
/* push tail after having used q[] */
+
CMM_
STORE_SHARED(queue->tail, i);
}
static void _rcu_defer_barrier_thread(void)
}
static void _rcu_defer_barrier_thread(void)
@@
-207,12
+207,12
@@
void rcu_defer_barrier(void)
struct defer_queue *index;
unsigned long num_items = 0;
struct defer_queue *index;
unsigned long num_items = 0;
- if (list_empty(®istry))
+ if (
cds_
list_empty(®istry))
return;
mutex_lock(&rcu_defer_mutex);
return;
mutex_lock(&rcu_defer_mutex);
- list_for_each_entry(index, ®istry, list) {
- index->last_head = LOAD_SHARED(index->head);
+
cds_
list_for_each_entry(index, ®istry, list) {
+ index->last_head =
CMM_
LOAD_SHARED(index->head);
num_items += index->last_head - index->tail;
}
if (likely(!num_items)) {
num_items += index->last_head - index->tail;
}
if (likely(!num_items)) {
@@
-223,7
+223,7
@@
void rcu_defer_barrier(void)
goto end;
}
synchronize_rcu();
goto end;
}
synchronize_rcu();
- list_for_each_entry(index, ®istry, list)
+
cds_
list_for_each_entry(index, ®istry, list)
rcu_defer_barrier_queue(index, index->last_head);
end:
mutex_unlock(&rcu_defer_mutex);
rcu_defer_barrier_queue(index, index->last_head);
end:
mutex_unlock(&rcu_defer_mutex);
@@
-241,16
+241,16
@@
void _defer_rcu(void (*fct)(void *p), void *p)
* thread.
*/
head = defer_queue.head;
* thread.
*/
head = defer_queue.head;
- tail = LOAD_SHARED(defer_queue.tail);
+ tail =
CMM_
LOAD_SHARED(defer_queue.tail);
/*
* If queue is full, or reached threshold. Empty queue ourself.
* Worse-case: must allow 2 supplementary entries for fct pointer.
*/
/*
* If queue is full, or reached threshold. Empty queue ourself.
* Worse-case: must allow 2 supplementary entries for fct pointer.
*/
- if (unlikely(
sync || (head - tail >= DEFER_QUEUE_SIZE - 2)
)) {
+ if (unlikely(
head - tail >= DEFER_QUEUE_SIZE - 2
)) {
assert(head - tail <= DEFER_QUEUE_SIZE);
rcu_defer_barrier_thread();
assert(head - tail <= DEFER_QUEUE_SIZE);
rcu_defer_barrier_thread();
- assert(head - LOAD_SHARED(defer_queue.tail) == 0);
+ assert(head -
CMM_
LOAD_SHARED(defer_queue.tail) == 0);
}
if (unlikely(defer_queue.last_fct_in != fct)) {
}
if (unlikely(defer_queue.last_fct_in != fct)) {
@@
-261,13
+261,13
@@
void _defer_rcu(void (*fct)(void *p), void *p)
* marker, write DQ_FCT_MARK followed by the function
* pointer.
*/
* marker, write DQ_FCT_MARK followed by the function
* pointer.
*/
- _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
+ _
CMM_
STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
DQ_FCT_MARK);
DQ_FCT_MARK);
- _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
+ _
CMM_
STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
fct);
} else {
DQ_SET_FCT_BIT(fct);
fct);
} else {
DQ_SET_FCT_BIT(fct);
- _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
+ _
CMM_
STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
fct);
}
} else {
fct);
}
} else {
@@
-276,17
+276,17
@@
void _defer_rcu(void (*fct)(void *p), void *p)
* If the data to encode is not aligned or the marker,
* write DQ_FCT_MARK followed by the function pointer.
*/
* If the data to encode is not aligned or the marker,
* write DQ_FCT_MARK followed by the function pointer.
*/
- _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
+ _
CMM_
STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
DQ_FCT_MARK);
DQ_FCT_MARK);
- _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
+ _
CMM_
STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
fct);
}
}
fct);
}
}
- _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], p);
-
smp_wmb();
/* Publish new pointer before head */
+ _
CMM_
STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], p);
+
cmm_smp_wmb();
/* Publish new pointer before head */
/* Write q[] before head. */
/* Write q[] before head. */
- STORE_SHARED(defer_queue.head, head);
-
smp_mb();
/* Write queue head before read futex */
+
CMM_
STORE_SHARED(defer_queue.head, head);
+
cmm_smp_mb();
/* Write queue head before read futex */
/*
* Wake-up any waiting defer thread.
*/
/*
* Wake-up any waiting defer thread.
*/
@@
-339,23
+339,26
@@
static void stop_defer_thread(void)
assert(!ret);
}
assert(!ret);
}
-
void
rcu_defer_register_thread(void)
+
int
rcu_defer_register_thread(void)
{
int was_empty;
assert(defer_queue.last_head == 0);
assert(defer_queue.q == NULL);
defer_queue.q = malloc(sizeof(void *) * DEFER_QUEUE_SIZE);
{
int was_empty;
assert(defer_queue.last_head == 0);
assert(defer_queue.q == NULL);
defer_queue.q = malloc(sizeof(void *) * DEFER_QUEUE_SIZE);
+ if (!defer_queue.q)
+ return -ENOMEM;
mutex_lock(&defer_thread_mutex);
mutex_lock(&rcu_defer_mutex);
mutex_lock(&defer_thread_mutex);
mutex_lock(&rcu_defer_mutex);
- was_empty = list_empty(®istry);
- list_add(&defer_queue.list, ®istry);
+ was_empty =
cds_
list_empty(®istry);
+
cds_
list_add(&defer_queue.list, ®istry);
mutex_unlock(&rcu_defer_mutex);
if (was_empty)
start_defer_thread();
mutex_unlock(&defer_thread_mutex);
mutex_unlock(&rcu_defer_mutex);
if (was_empty)
start_defer_thread();
mutex_unlock(&defer_thread_mutex);
+ return 0;
}
void rcu_defer_unregister_thread(void)
}
void rcu_defer_unregister_thread(void)
@@
-364,11
+367,11
@@
void rcu_defer_unregister_thread(void)
mutex_lock(&defer_thread_mutex);
mutex_lock(&rcu_defer_mutex);
mutex_lock(&defer_thread_mutex);
mutex_lock(&rcu_defer_mutex);
- list_del(&defer_queue.list);
+
cds_
list_del(&defer_queue.list);
_rcu_defer_barrier_thread();
free(defer_queue.q);
defer_queue.q = NULL;
_rcu_defer_barrier_thread();
free(defer_queue.q);
defer_queue.q = NULL;
- is_empty = list_empty(®istry);
+ is_empty =
cds_
list_empty(®istry);
mutex_unlock(&rcu_defer_mutex);
if (is_empty)
mutex_unlock(&rcu_defer_mutex);
if (is_empty)
@@
-378,5
+381,5
@@
void rcu_defer_unregister_thread(void)
void rcu_defer_exit(void)
{
void rcu_defer_exit(void)
{
- assert(list_empty(®istry));
+ assert(
cds_
list_empty(®istry));
}
}
This page took
0.027502 seconds
and
4
git commands to generate.