projects
/
urcu.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
wfcqueue test
[urcu.git]
/
urcu-call-rcu-impl.h
diff --git
a/urcu-call-rcu-impl.h
b/urcu-call-rcu-impl.h
index 6e80fa98c8aea9ebd7a7a835ea8fa3c52546228e..13b24ff20b7c71c265c1fc5b0b3d17f1c28ce55c 100644
(file)
--- a/
urcu-call-rcu-impl.h
+++ b/
urcu-call-rcu-impl.h
@@
-40,6
+40,8
@@
#include "urcu-pointer.h"
#include "urcu/list.h"
#include "urcu/futex.h"
#include "urcu-pointer.h"
#include "urcu/list.h"
#include "urcu/futex.h"
+#include "urcu/tls-compat.h"
+#include "urcu-die.h"
/* Data structure that identifies a call_rcu thread. */
/* Data structure that identifies a call_rcu thread. */
@@
-62,7
+64,7
@@
CDS_LIST_HEAD(call_rcu_data_list);
/* Link a thread using call_rcu() to its call_rcu thread. */
/* Link a thread using call_rcu() to its call_rcu thread. */
-static
__thread struct call_rcu_data *thread_call_rcu_data
;
+static
DEFINE_URCU_TLS(struct call_rcu_data *, thread_call_rcu_data)
;
/* Guard call_rcu thread creation. */
/* Guard call_rcu thread creation. */
@@
-82,12
+84,20
@@
static struct call_rcu_data *default_call_rcu_data;
/*
* Pointer to array of pointers to per-CPU call_rcu_data structures
/*
* Pointer to array of pointers to per-CPU call_rcu_data structures
- * and # CPUs.
+ * and # CPUs. per_cpu_call_rcu_data is a RCU-protected pointer to an
+ * array of RCU-protected pointers to call_rcu_data. call_rcu acts as a
+ * RCU read-side and reads per_cpu_call_rcu_data and the per-cpu pointer
+ * without mutex. The call_rcu_mutex protects updates.
*/
static struct call_rcu_data **per_cpu_call_rcu_data;
static long maxcpus;
*/
static struct call_rcu_data **per_cpu_call_rcu_data;
static long maxcpus;
+static void maxcpus_reset(void)
+{
+ maxcpus = 0;
+}
+
/* Allocate the array if it has not already been allocated. */
static void alloc_cpu_call_rcu_data(void)
/* Allocate the array if it has not already been allocated. */
static void alloc_cpu_call_rcu_data(void)
@@
-104,7
+114,7
@@
static void alloc_cpu_call_rcu_data(void)
p = malloc(maxcpus * sizeof(*per_cpu_call_rcu_data));
if (p != NULL) {
memset(p, '\0', maxcpus * sizeof(*per_cpu_call_rcu_data));
p = malloc(maxcpus * sizeof(*per_cpu_call_rcu_data));
if (p != NULL) {
memset(p, '\0', maxcpus * sizeof(*per_cpu_call_rcu_data));
-
per_cpu_call_rcu_data = p
;
+
rcu_set_pointer(&per_cpu_call_rcu_data, p)
;
} else {
if (!warned) {
fprintf(stderr, "[error] liburcu: unable to allocate per-CPU pointer array\n");
} else {
if (!warned) {
fprintf(stderr, "[error] liburcu: unable to allocate per-CPU pointer array\n");
@@
-123,6
+133,10
@@
static void alloc_cpu_call_rcu_data(void)
static struct call_rcu_data **per_cpu_call_rcu_data = NULL;
static const long maxcpus = -1;
static struct call_rcu_data **per_cpu_call_rcu_data = NULL;
static const long maxcpus = -1;
+static void maxcpus_reset(void)
+{
+}
+
static void alloc_cpu_call_rcu_data(void)
{
}
static void alloc_cpu_call_rcu_data(void)
{
}
@@
-138,20
+152,22
@@
static int sched_getcpu(void)
static void call_rcu_lock(pthread_mutex_t *pmp)
{
static void call_rcu_lock(pthread_mutex_t *pmp)
{
- if (pthread_mutex_lock(pmp) != 0) {
- perror("pthread_mutex_lock");
- exit(-1);
- }
+ int ret;
+
+ ret = pthread_mutex_lock(pmp);
+ if (ret)
+ urcu_die(ret);
}
/* Release the specified pthread mutex. */
static void call_rcu_unlock(pthread_mutex_t *pmp)
{
}
/* Release the specified pthread mutex. */
static void call_rcu_unlock(pthread_mutex_t *pmp)
{
- if (pthread_mutex_unlock(pmp) != 0) {
- perror("pthread_mutex_unlock");
- exit(-1);
- }
+ int ret;
+
+ ret = pthread_mutex_unlock(pmp);
+ if (ret)
+ urcu_die(ret);
}
#if HAVE_SCHED_SETAFFINITY
}
#if HAVE_SCHED_SETAFFINITY
@@
-192,7
+208,7
@@
static void call_rcu_wake_up(struct call_rcu_data *crdp)
{
/* Write to call_rcu list before reading/writing futex */
cmm_smp_mb();
{
/* Write to call_rcu list before reading/writing futex */
cmm_smp_mb();
- if (unlikely(uatomic_read(&crdp->futex) == -1)) {
+ if (
caa_
unlikely(uatomic_read(&crdp->futex) == -1)) {
uatomic_set(&crdp->futex, 0);
futex_async(&crdp->futex, FUTEX_WAKE, 1,
NULL, NULL, 0);
uatomic_set(&crdp->futex, 0);
futex_async(&crdp->futex, FUTEX_WAKE, 1,
NULL, NULL, 0);
@@
-209,18
+225,18
@@
static void *call_rcu_thread(void *arg)
struct call_rcu_data *crdp = (struct call_rcu_data *)arg;
struct rcu_head *rhp;
int rt = !!(uatomic_read(&crdp->flags) & URCU_CALL_RCU_RT);
struct call_rcu_data *crdp = (struct call_rcu_data *)arg;
struct rcu_head *rhp;
int rt = !!(uatomic_read(&crdp->flags) & URCU_CALL_RCU_RT);
+ int ret;
- if (set_thread_cpu_affinity(crdp) != 0) {
- perror("pthread_setaffinity_np");
- exit(-1);
- }
+ ret = set_thread_cpu_affinity(crdp);
+ if (ret)
+ urcu_die(errno);
/*
* If callbacks take a read-side lock, we need to be registered.
*/
rcu_register_thread();
/*
* If callbacks take a read-side lock, we need to be registered.
*/
rcu_register_thread();
-
thread_call_rcu_data
= crdp;
+
URCU_TLS(thread_call_rcu_data)
= crdp;
if (!rt) {
uatomic_dec(&crdp->futex);
/* Decrement futex before reading call_rcu list */
if (!rt) {
uatomic_dec(&crdp->futex);
/* Decrement futex before reading call_rcu list */
@@
-295,12
+311,11
@@
static void call_rcu_data_init(struct call_rcu_data **crdpp,
int cpu_affinity)
{
struct call_rcu_data *crdp;
int cpu_affinity)
{
struct call_rcu_data *crdp;
+ int ret;
crdp = malloc(sizeof(*crdp));
crdp = malloc(sizeof(*crdp));
- if (crdp == NULL) {
- fprintf(stderr, "Out of memory.\n");
- exit(-1);
- }
+ if (crdp == NULL)
+ urcu_die(errno);
memset(crdp, '\0', sizeof(*crdp));
cds_wfq_init(&crdp->cbs);
crdp->qlen = 0;
memset(crdp, '\0', sizeof(*crdp));
cds_wfq_init(&crdp->cbs);
crdp->qlen = 0;
@@
-310,10
+325,9
@@
static void call_rcu_data_init(struct call_rcu_data **crdpp,
crdp->cpu_affinity = cpu_affinity;
cmm_smp_mb(); /* Structure initialized before pointer is planted. */
*crdpp = crdp;
crdp->cpu_affinity = cpu_affinity;
cmm_smp_mb(); /* Structure initialized before pointer is planted. */
*crdpp = crdp;
- if (pthread_create(&crdp->tid, NULL, call_rcu_thread, crdp) != 0) {
- perror("pthread_create");
- exit(-1);
- }
+ ret = pthread_create(&crdp->tid, NULL, call_rcu_thread, crdp);
+ if (ret)
+ urcu_die(ret);
}
/*
}
/*
@@
-321,13
+335,18
@@
static void call_rcu_data_init(struct call_rcu_data **crdpp,
* CPU, returning NULL if there is none. We cannot automatically
* created it because the platform we are running on might not define
* sched_getcpu().
* CPU, returning NULL if there is none. We cannot automatically
* created it because the platform we are running on might not define
* sched_getcpu().
+ *
+ * The call to this function and use of the returned call_rcu_data
+ * should be protected by RCU read-side lock.
*/
struct call_rcu_data *get_cpu_call_rcu_data(int cpu)
{
static int warned = 0;
*/
struct call_rcu_data *get_cpu_call_rcu_data(int cpu)
{
static int warned = 0;
+ struct call_rcu_data **pcpu_crdp;
- if (per_cpu_call_rcu_data == NULL)
+ pcpu_crdp = rcu_dereference(per_cpu_call_rcu_data);
+ if (pcpu_crdp == NULL)
return NULL;
if (!warned && maxcpus > 0 && (cpu < 0 || maxcpus <= cpu)) {
fprintf(stderr, "[error] liburcu: get CPU # out of range\n");
return NULL;
if (!warned && maxcpus > 0 && (cpu < 0 || maxcpus <= cpu)) {
fprintf(stderr, "[error] liburcu: get CPU # out of range\n");
@@
-335,7
+354,7
@@
struct call_rcu_data *get_cpu_call_rcu_data(int cpu)
}
if (cpu < 0 || maxcpus <= cpu)
return NULL;
}
if (cpu < 0 || maxcpus <= cpu)
return NULL;
- return
per_cpu_call_rcu_data[cpu]
;
+ return
rcu_dereference(pcpu_crdp[cpu])
;
}
/*
}
/*
@@
-379,6
+398,10
@@
struct call_rcu_data *create_call_rcu_data(unsigned long flags,
* the caller's responsibility to dispose of the removed structure.
* Use get_cpu_call_rcu_data() to obtain a pointer to the old structure
* (prior to NULLing it out, of course).
* the caller's responsibility to dispose of the removed structure.
* Use get_cpu_call_rcu_data() to obtain a pointer to the old structure
* (prior to NULLing it out, of course).
+ *
+ * The caller must wait for a grace-period to pass between return from
+ * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the
+ * previous call rcu data as argument.
*/
int set_cpu_call_rcu_data(int cpu, struct call_rcu_data *crdp)
*/
int set_cpu_call_rcu_data(int cpu, struct call_rcu_data *crdp)
@@
-396,12
+419,21
@@
int set_cpu_call_rcu_data(int cpu, struct call_rcu_data *crdp)
errno = EINVAL;
return -EINVAL;
}
errno = EINVAL;
return -EINVAL;
}
- call_rcu_unlock(&call_rcu_mutex);
+
if (per_cpu_call_rcu_data == NULL) {
if (per_cpu_call_rcu_data == NULL) {
+ call_rcu_unlock(&call_rcu_mutex);
errno = ENOMEM;
return -ENOMEM;
}
errno = ENOMEM;
return -ENOMEM;
}
- per_cpu_call_rcu_data[cpu] = crdp;
+
+ if (per_cpu_call_rcu_data[cpu] != NULL && crdp != NULL) {
+ call_rcu_unlock(&call_rcu_mutex);
+ errno = EEXIST;
+ return -EEXIST;
+ }
+
+ rcu_set_pointer(&per_cpu_call_rcu_data[cpu], crdp);
+ call_rcu_unlock(&call_rcu_mutex);
return 0;
}
return 0;
}
@@
-432,13
+464,16
@@
struct call_rcu_data *get_default_call_rcu_data(void)
* structure assigned to the CPU on which the thread is running,
* followed by the default call_rcu_data structure. If there is not
* yet a default call_rcu_data structure, one will be created.
* structure assigned to the CPU on which the thread is running,
* followed by the default call_rcu_data structure. If there is not
* yet a default call_rcu_data structure, one will be created.
+ *
+ * Calls to this function and use of the returned call_rcu_data should
+ * be protected by RCU read-side lock.
*/
struct call_rcu_data *get_call_rcu_data(void)
{
struct call_rcu_data *crd;
*/
struct call_rcu_data *get_call_rcu_data(void)
{
struct call_rcu_data *crd;
- if (
thread_call_rcu_data
!= NULL)
- return
thread_call_rcu_data
;
+ if (
URCU_TLS(thread_call_rcu_data)
!= NULL)
+ return
URCU_TLS(thread_call_rcu_data)
;
if (maxcpus > 0) {
crd = get_cpu_call_rcu_data(sched_getcpu());
if (maxcpus > 0) {
crd = get_cpu_call_rcu_data(sched_getcpu());
@@
-455,7
+490,7
@@
struct call_rcu_data *get_call_rcu_data(void)
struct call_rcu_data *get_thread_call_rcu_data(void)
{
struct call_rcu_data *get_thread_call_rcu_data(void)
{
- return
thread_call_rcu_data
;
+ return
URCU_TLS(thread_call_rcu_data)
;
}
/*
}
/*
@@
-471,7
+506,7
@@
struct call_rcu_data *get_thread_call_rcu_data(void)
void set_thread_call_rcu_data(struct call_rcu_data *crdp)
{
void set_thread_call_rcu_data(struct call_rcu_data *crdp)
{
-
thread_call_rcu_data
= crdp;
+
URCU_TLS(thread_call_rcu_data)
= crdp;
}
/*
}
/*
@@
-513,8
+548,13
@@
int create_all_cpu_call_rcu_data(unsigned long flags)
}
call_rcu_unlock(&call_rcu_mutex);
if ((ret = set_cpu_call_rcu_data(i, crdp)) != 0) {
}
call_rcu_unlock(&call_rcu_mutex);
if ((ret = set_cpu_call_rcu_data(i, crdp)) != 0) {
- /* FIXME: Leaks crdp for now. */
- return ret; /* Can happen on race. */
+ call_rcu_data_free(crdp);
+
+ /* it has been created by other thread */
+ if (ret == -EEXIST)
+ continue;
+
+ return ret;
}
}
return 0;
}
}
return 0;
@@
-541,6
+581,8
@@
static void wake_call_rcu_thread(struct call_rcu_data *crdp)
* need the first invocation of call_rcu() to be fast, make sure
* to create a call_rcu thread first. One way to accomplish this is
* "get_call_rcu_data();", and another is create_all_cpu_call_rcu_data().
* need the first invocation of call_rcu() to be fast, make sure
* to create a call_rcu thread first. One way to accomplish this is
* "get_call_rcu_data();", and another is create_all_cpu_call_rcu_data().
+ *
+ * call_rcu must be called by registered RCU read-side threads.
*/
void call_rcu(struct rcu_head *head,
*/
void call_rcu(struct rcu_head *head,
@@
-550,10
+592,13
@@
void call_rcu(struct rcu_head *head,
cds_wfq_node_init(&head->next);
head->func = func;
cds_wfq_node_init(&head->next);
head->func = func;
+ /* Holding rcu read-side lock across use of per-cpu crdp */
+ rcu_read_lock();
crdp = get_call_rcu_data();
cds_wfq_enqueue(&crdp->cbs, &head->next);
uatomic_inc(&crdp->qlen);
wake_call_rcu_thread(crdp);
crdp = get_call_rcu_data();
cds_wfq_enqueue(&crdp->cbs, &head->next);
uatomic_inc(&crdp->qlen);
wake_call_rcu_thread(crdp);
+ rcu_read_unlock();
}
/*
}
/*
@@
-573,6
+618,10
@@
void call_rcu(struct rcu_head *head,
*
* We also silently refuse to free NULL pointers. This simplifies
* the calling code.
*
* We also silently refuse to free NULL pointers. This simplifies
* the calling code.
+ *
+ * The caller must wait for a grace-period to pass between return from
+ * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the
+ * previous call rcu data as argument.
*/
void call_rcu_data_free(struct call_rcu_data *crdp)
{
*/
void call_rcu_data_free(struct call_rcu_data *crdp)
{
@@
-595,6
+644,8
@@
void call_rcu_data_free(struct call_rcu_data *crdp)
_CMM_STORE_SHARED(crdp->cbs.head, NULL);
cbs_tail = (struct cds_wfq_node **)
uatomic_xchg(&crdp->cbs.tail, &crdp->cbs.head);
_CMM_STORE_SHARED(crdp->cbs.head, NULL);
cbs_tail = (struct cds_wfq_node **)
uatomic_xchg(&crdp->cbs.tail, &crdp->cbs.head);
+ /* Create default call rcu data if need be */
+ (void) get_default_call_rcu_data();
cbs_endprev = (struct cds_wfq_node **)
uatomic_xchg(&default_call_rcu_data, cbs_tail);
*cbs_endprev = cbs;
cbs_endprev = (struct cds_wfq_node **)
uatomic_xchg(&default_call_rcu_data, cbs_tail);
*cbs_endprev = cbs;
@@
-603,7
+654,10
@@
void call_rcu_data_free(struct call_rcu_data *crdp)
wake_call_rcu_thread(default_call_rcu_data);
}
wake_call_rcu_thread(default_call_rcu_data);
}
+ call_rcu_lock(&call_rcu_mutex);
cds_list_del(&crdp->list);
cds_list_del(&crdp->list);
+ call_rcu_unlock(&call_rcu_mutex);
+
free(crdp);
}
free(crdp);
}
@@
-613,17
+667,38
@@
void call_rcu_data_free(struct call_rcu_data *crdp)
void free_all_cpu_call_rcu_data(void)
{
int cpu;
void free_all_cpu_call_rcu_data(void)
{
int cpu;
- struct call_rcu_data *crdp;
+ struct call_rcu_data **crdp;
+ static int warned = 0;
if (maxcpus <= 0)
return;
if (maxcpus <= 0)
return;
+
+ crdp = malloc(sizeof(*crdp) * maxcpus);
+ if (!crdp) {
+ if (!warned) {
+ fprintf(stderr, "[error] liburcu: unable to allocate per-CPU pointer array\n");
+ }
+ warned = 1;
+ return;
+ }
+
for (cpu = 0; cpu < maxcpus; cpu++) {
for (cpu = 0; cpu < maxcpus; cpu++) {
- crdp = get_cpu_call_rcu_data(cpu);
- if (crdp == NULL)
+ crdp
[cpu]
= get_cpu_call_rcu_data(cpu);
+ if (crdp
[cpu]
== NULL)
continue;
set_cpu_call_rcu_data(cpu, NULL);
continue;
set_cpu_call_rcu_data(cpu, NULL);
- call_rcu_data_free(crdp);
}
}
+ /*
+ * Wait for call_rcu sites acting as RCU readers of the
+ * call_rcu_data to become quiescent.
+ */
+ synchronize_rcu();
+ for (cpu = 0; cpu < maxcpus; cpu++) {
+ if (crdp[cpu] == NULL)
+ continue;
+ call_rcu_data_free(crdp[cpu]);
+ }
+ free(crdp);
}
/*
}
/*
@@
-658,6
+733,10
@@
void call_rcu_after_fork_child(void)
/* Release the mutex. */
call_rcu_unlock(&call_rcu_mutex);
/* Release the mutex. */
call_rcu_unlock(&call_rcu_mutex);
+ /* Do nothing when call_rcu() has not been used */
+ if (cds_list_empty(&call_rcu_data_list))
+ return;
+
/*
* Allocate a new default call_rcu_data structure in order
* to get a working call_rcu thread to go with it.
/*
* Allocate a new default call_rcu_data structure in order
* to get a working call_rcu thread to go with it.
@@
-665,6
+744,12
@@
void call_rcu_after_fork_child(void)
default_call_rcu_data = NULL;
(void)get_default_call_rcu_data();
default_call_rcu_data = NULL;
(void)get_default_call_rcu_data();
+ /* Cleanup call_rcu_data pointers before use */
+ maxcpus_reset();
+ free(per_cpu_call_rcu_data);
+ rcu_set_pointer(&per_cpu_call_rcu_data, NULL);
+ URCU_TLS(thread_call_rcu_data) = NULL;
+
/* Dispose of all of the rest of the call_rcu_data structures. */
cds_list_for_each_entry_safe(crdp, next, &call_rcu_data_list, list) {
if (crdp == default_call_rcu_data)
/* Dispose of all of the rest of the call_rcu_data structures. */
cds_list_for_each_entry_safe(crdp, next, &call_rcu_data_list, list) {
if (crdp == default_call_rcu_data)
This page took
0.029541 seconds
and
4
git commands to generate.