projects
/
urcu.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Update version to 0.7.4
[urcu.git]
/
urcu-call-rcu-impl.h
diff --git
a/urcu-call-rcu-impl.h
b/urcu-call-rcu-impl.h
index 182e9b15bd96552ed5e4fb8125bee02da04a8715..13b24ff20b7c71c265c1fc5b0b3d17f1c28ce55c 100644
(file)
--- a/
urcu-call-rcu-impl.h
+++ b/
urcu-call-rcu-impl.h
@@
-40,6
+40,8
@@
#include "urcu-pointer.h"
#include "urcu/list.h"
#include "urcu/futex.h"
#include "urcu-pointer.h"
#include "urcu/list.h"
#include "urcu/futex.h"
+#include "urcu/tls-compat.h"
+#include "urcu-die.h"
/* Data structure that identifies a call_rcu thread. */
/* Data structure that identifies a call_rcu thread. */
@@
-62,7
+64,7
@@
CDS_LIST_HEAD(call_rcu_data_list);
/* Link a thread using call_rcu() to its call_rcu thread. */
/* Link a thread using call_rcu() to its call_rcu thread. */
-static
__thread struct call_rcu_data *thread_call_rcu_data
;
+static
DEFINE_URCU_TLS(struct call_rcu_data *, thread_call_rcu_data)
;
/* Guard call_rcu thread creation. */
/* Guard call_rcu thread creation. */
@@
-150,20
+152,22
@@
static int sched_getcpu(void)
static void call_rcu_lock(pthread_mutex_t *pmp)
{
static void call_rcu_lock(pthread_mutex_t *pmp)
{
- if (pthread_mutex_lock(pmp) != 0) {
- perror("pthread_mutex_lock");
- exit(-1);
- }
+ int ret;
+
+ ret = pthread_mutex_lock(pmp);
+ if (ret)
+ urcu_die(ret);
}
/* Release the specified pthread mutex. */
static void call_rcu_unlock(pthread_mutex_t *pmp)
{
}
/* Release the specified pthread mutex. */
static void call_rcu_unlock(pthread_mutex_t *pmp)
{
- if (pthread_mutex_unlock(pmp) != 0) {
- perror("pthread_mutex_unlock");
- exit(-1);
- }
+ int ret;
+
+ ret = pthread_mutex_unlock(pmp);
+ if (ret)
+ urcu_die(ret);
}
#if HAVE_SCHED_SETAFFINITY
}
#if HAVE_SCHED_SETAFFINITY
@@
-204,7
+208,7
@@
static void call_rcu_wake_up(struct call_rcu_data *crdp)
{
/* Write to call_rcu list before reading/writing futex */
cmm_smp_mb();
{
/* Write to call_rcu list before reading/writing futex */
cmm_smp_mb();
- if (unlikely(uatomic_read(&crdp->futex) == -1)) {
+ if (
caa_
unlikely(uatomic_read(&crdp->futex) == -1)) {
uatomic_set(&crdp->futex, 0);
futex_async(&crdp->futex, FUTEX_WAKE, 1,
NULL, NULL, 0);
uatomic_set(&crdp->futex, 0);
futex_async(&crdp->futex, FUTEX_WAKE, 1,
NULL, NULL, 0);
@@
-221,18
+225,18
@@
static void *call_rcu_thread(void *arg)
struct call_rcu_data *crdp = (struct call_rcu_data *)arg;
struct rcu_head *rhp;
int rt = !!(uatomic_read(&crdp->flags) & URCU_CALL_RCU_RT);
struct call_rcu_data *crdp = (struct call_rcu_data *)arg;
struct rcu_head *rhp;
int rt = !!(uatomic_read(&crdp->flags) & URCU_CALL_RCU_RT);
+ int ret;
- if (set_thread_cpu_affinity(crdp) != 0) {
- perror("pthread_setaffinity_np");
- exit(-1);
- }
+ ret = set_thread_cpu_affinity(crdp);
+ if (ret)
+ urcu_die(errno);
/*
* If callbacks take a read-side lock, we need to be registered.
*/
rcu_register_thread();
/*
* If callbacks take a read-side lock, we need to be registered.
*/
rcu_register_thread();
-
thread_call_rcu_data
= crdp;
+
URCU_TLS(thread_call_rcu_data)
= crdp;
if (!rt) {
uatomic_dec(&crdp->futex);
/* Decrement futex before reading call_rcu list */
if (!rt) {
uatomic_dec(&crdp->futex);
/* Decrement futex before reading call_rcu list */
@@
-307,12
+311,11
@@
static void call_rcu_data_init(struct call_rcu_data **crdpp,
int cpu_affinity)
{
struct call_rcu_data *crdp;
int cpu_affinity)
{
struct call_rcu_data *crdp;
+ int ret;
crdp = malloc(sizeof(*crdp));
crdp = malloc(sizeof(*crdp));
- if (crdp == NULL) {
- fprintf(stderr, "Out of memory.\n");
- exit(-1);
- }
+ if (crdp == NULL)
+ urcu_die(errno);
memset(crdp, '\0', sizeof(*crdp));
cds_wfq_init(&crdp->cbs);
crdp->qlen = 0;
memset(crdp, '\0', sizeof(*crdp));
cds_wfq_init(&crdp->cbs);
crdp->qlen = 0;
@@
-322,10
+325,9
@@
static void call_rcu_data_init(struct call_rcu_data **crdpp,
crdp->cpu_affinity = cpu_affinity;
cmm_smp_mb(); /* Structure initialized before pointer is planted. */
*crdpp = crdp;
crdp->cpu_affinity = cpu_affinity;
cmm_smp_mb(); /* Structure initialized before pointer is planted. */
*crdpp = crdp;
- if (pthread_create(&crdp->tid, NULL, call_rcu_thread, crdp) != 0) {
- perror("pthread_create");
- exit(-1);
- }
+ ret = pthread_create(&crdp->tid, NULL, call_rcu_thread, crdp);
+ if (ret)
+ urcu_die(ret);
}
/*
}
/*
@@
-470,8
+472,8
@@
struct call_rcu_data *get_call_rcu_data(void)
{
struct call_rcu_data *crd;
{
struct call_rcu_data *crd;
- if (
thread_call_rcu_data
!= NULL)
- return
thread_call_rcu_data
;
+ if (
URCU_TLS(thread_call_rcu_data)
!= NULL)
+ return
URCU_TLS(thread_call_rcu_data)
;
if (maxcpus > 0) {
crd = get_cpu_call_rcu_data(sched_getcpu());
if (maxcpus > 0) {
crd = get_cpu_call_rcu_data(sched_getcpu());
@@
-488,7
+490,7
@@
struct call_rcu_data *get_call_rcu_data(void)
struct call_rcu_data *get_thread_call_rcu_data(void)
{
struct call_rcu_data *get_thread_call_rcu_data(void)
{
- return
thread_call_rcu_data
;
+ return
URCU_TLS(thread_call_rcu_data)
;
}
/*
}
/*
@@
-504,7
+506,7
@@
struct call_rcu_data *get_thread_call_rcu_data(void)
void set_thread_call_rcu_data(struct call_rcu_data *crdp)
{
void set_thread_call_rcu_data(struct call_rcu_data *crdp)
{
-
thread_call_rcu_data
= crdp;
+
URCU_TLS(thread_call_rcu_data)
= crdp;
}
/*
}
/*
@@
-746,7
+748,7
@@
void call_rcu_after_fork_child(void)
maxcpus_reset();
free(per_cpu_call_rcu_data);
rcu_set_pointer(&per_cpu_call_rcu_data, NULL);
maxcpus_reset();
free(per_cpu_call_rcu_data);
rcu_set_pointer(&per_cpu_call_rcu_data, NULL);
-
thread_call_rcu_data
= NULL;
+
URCU_TLS(thread_call_rcu_data)
= NULL;
/* Dispose of all of the rest of the call_rcu_data structures. */
cds_list_for_each_entry_safe(crdp, next, &call_rcu_data_list, list) {
/* Dispose of all of the rest of the call_rcu_data structures. */
cds_list_for_each_entry_safe(crdp, next, &call_rcu_data_list, list) {
This page took
0.024751 seconds
and
4
git commands to generate.