#include "urcu-pointer.h"
#include "urcu/list.h"
#include "urcu/futex.h"
+#include "urcu/tls-compat.h"
+#include "urcu-die.h"
/* Data structure that identifies a call_rcu thread. */
/* Link a thread using call_rcu() to its call_rcu thread. */
-static __thread struct call_rcu_data *thread_call_rcu_data;
+static DEFINE_URCU_TLS(struct call_rcu_data *, thread_call_rcu_data);
/* Guard call_rcu thread creation. */
static void call_rcu_lock(pthread_mutex_t *pmp)
{
- if (pthread_mutex_lock(pmp) != 0) {
- perror("pthread_mutex_lock");
- exit(-1);
- }
+ int ret;
+
+ ret = pthread_mutex_lock(pmp);
+ if (ret)
+ urcu_die(ret);
}
/* Release the specified pthread mutex. */
static void call_rcu_unlock(pthread_mutex_t *pmp)
{
- if (pthread_mutex_unlock(pmp) != 0) {
- perror("pthread_mutex_unlock");
- exit(-1);
- }
+ int ret;
+
+ ret = pthread_mutex_unlock(pmp);
+ if (ret)
+ urcu_die(ret);
}
#if HAVE_SCHED_SETAFFINITY
{
/* Write to call_rcu list before reading/writing futex */
cmm_smp_mb();
- if (unlikely(uatomic_read(&crdp->futex) == -1)) {
+ if (caa_unlikely(uatomic_read(&crdp->futex) == -1)) {
uatomic_set(&crdp->futex, 0);
futex_async(&crdp->futex, FUTEX_WAKE, 1,
NULL, NULL, 0);
struct call_rcu_data *crdp = (struct call_rcu_data *)arg;
struct rcu_head *rhp;
int rt = !!(uatomic_read(&crdp->flags) & URCU_CALL_RCU_RT);
+ int ret;
- if (set_thread_cpu_affinity(crdp) != 0) {
- perror("pthread_setaffinity_np");
- exit(-1);
- }
+ ret = set_thread_cpu_affinity(crdp);
+ if (ret)
+ urcu_die(errno);
/*
* If callbacks take a read-side lock, we need to be registered.
*/
rcu_register_thread();
- thread_call_rcu_data = crdp;
+ URCU_TLS(thread_call_rcu_data) = crdp;
if (!rt) {
uatomic_dec(&crdp->futex);
/* Decrement futex before reading call_rcu list */
int cpu_affinity)
{
struct call_rcu_data *crdp;
+ int ret;
crdp = malloc(sizeof(*crdp));
- if (crdp == NULL) {
- fprintf(stderr, "Out of memory.\n");
- exit(-1);
- }
+ if (crdp == NULL)
+ urcu_die(errno);
memset(crdp, '\0', sizeof(*crdp));
cds_wfq_init(&crdp->cbs);
crdp->qlen = 0;
crdp->cpu_affinity = cpu_affinity;
cmm_smp_mb(); /* Structure initialized before pointer is planted. */
*crdpp = crdp;
- if (pthread_create(&crdp->tid, NULL, call_rcu_thread, crdp) != 0) {
- perror("pthread_create");
- exit(-1);
- }
+ ret = pthread_create(&crdp->tid, NULL, call_rcu_thread, crdp);
+ if (ret)
+ urcu_die(ret);
}
/*
{
struct call_rcu_data *crd;
- if (thread_call_rcu_data != NULL)
- return thread_call_rcu_data;
+ if (URCU_TLS(thread_call_rcu_data) != NULL)
+ return URCU_TLS(thread_call_rcu_data);
if (maxcpus > 0) {
crd = get_cpu_call_rcu_data(sched_getcpu());
struct call_rcu_data *get_thread_call_rcu_data(void)
{
- return thread_call_rcu_data;
+ return URCU_TLS(thread_call_rcu_data);
}
/*
void set_thread_call_rcu_data(struct call_rcu_data *crdp)
{
- thread_call_rcu_data = crdp;
+ URCU_TLS(thread_call_rcu_data) = crdp;
}
/*
/* Create default call rcu data if need be */
(void) get_default_call_rcu_data();
cbs_endprev = (struct cds_wfq_node **)
- uatomic_xchg(&default_call_rcu_data, cbs_tail);
- *cbs_endprev = cbs;
+ uatomic_xchg(&default_call_rcu_data->cbs.tail,
+ cbs_tail);
+ _CMM_STORE_SHARED(*cbs_endprev, cbs);
uatomic_add(&default_call_rcu_data->qlen,
uatomic_read(&crdp->qlen));
wake_call_rcu_thread(default_call_rcu_data);
maxcpus_reset();
free(per_cpu_call_rcu_data);
rcu_set_pointer(&per_cpu_call_rcu_data, NULL);
- thread_call_rcu_data = NULL;
+ URCU_TLS(thread_call_rcu_data) = NULL;
/* Dispose of all of the rest of the call_rcu_data structures. */
cds_list_for_each_entry_safe(crdp, next, &call_rcu_data_list, list) {