*
* Userspace RCU library, "bulletproof" version.
*
- * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
* Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
*
* This library is free software; you can redistribute it and/or
* IBM's contributions to this file may be relicensed under LGPLv2 or later.
*/
+#define _GNU_SOURCE
+#define _LGPL_SOURCE
#include <stdio.h>
#include <pthread.h>
#include <signal.h>
#include <unistd.h>
#include <sys/mman.h>
-#include "urcu-bp-static.h"
+#include "urcu/wfqueue.h"
+#include "urcu/map/urcu-bp.h"
+#include "urcu/static/urcu-bp.h"
+#include "urcu-pointer.h"
+#include "urcu/tls-compat.h"
+
+#include "urcu-die.h"
+
/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
+#undef _LGPL_SOURCE
#include "urcu-bp.h"
+#define _LGPL_SOURCE
+
+#ifndef MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+#ifdef __linux__
+static
+void *mremap_wrapper(void *old_address, size_t old_size,
+ size_t new_size, int flags)
+{
+ return mremap(old_address, old_size, new_size, flags);
+}
+#else
+
+#define MREMAP_MAYMOVE 1
+#define MREMAP_FIXED 2
+
+/*
+ * mremap wrapper for non-Linux systems not allowing MAYMOVE.
+ * This is not generic.
+*/
+static
+void *mremap_wrapper(void *old_address, size_t old_size,
+ size_t new_size, int flags)
+{
+ assert(!(flags & MREMAP_MAYMOVE));
+
+ return MAP_FAILED;
+}
+#endif
+
+/* Sleep delay in ms */
+#define RCU_SLEEP_DELAY_MS 10
+#define INIT_NR_THREADS 8
+#define ARENA_INIT_ALLOC \
+ sizeof(struct registry_chunk) \
+ + INIT_NR_THREADS * sizeof(struct rcu_reader)
+
+/*
+ * Active attempts to check for reader Q.S. before calling sleep().
+ */
+#define RCU_QS_ACTIVE_ATTEMPTS 100
+
+static
+int rcu_bp_refcount;
+
+static
+void __attribute__((constructor)) rcu_bp_init(void);
+static
+void __attribute__((destructor)) rcu_bp_exit(void);
-/* Sleep delay in us */
-#define RCU_SLEEP_DELAY 1000
-#define ARENA_INIT_ALLOC 16
+static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
-void __attribute__((destructor)) urcu_bp_exit(void);
+static pthread_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER;
+static int initialized;
-static pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_key_t urcu_bp_key;
#ifdef DEBUG_YIELD
unsigned int yield_active;
-unsigned int __thread rand_yield;
+__DEFINE_URCU_TLS_GLOBAL(unsigned int, rand_yield);
#endif
/*
* Global grace period counter.
- * Contains the current RCU_GP_CTR_BIT.
+ * Contains the current RCU_GP_CTR_PHASE.
* Also has a RCU_GP_COUNT of 1, to accelerate the reader fast path.
* Written to only by writer with mutex taken. Read by both writer and readers.
*/
-long urcu_gp_ctr = RCU_GP_COUNT;
+long rcu_gp_ctr = RCU_GP_COUNT;
/*
* Pointer to registry elements. Written to only by each individual reader. Read
* by both the reader and the writers.
*/
-struct urcu_reader __thread *urcu_reader;
+__DEFINE_URCU_TLS_GLOBAL(struct rcu_reader *, rcu_reader);
-static LIST_HEAD(registry);
+static CDS_LIST_HEAD(registry);
+
+struct registry_chunk {
+ size_t data_len; /* data length */
+ size_t used; /* amount of data used */
+ struct cds_list_head node; /* chunk_list node */
+ char data[];
+};
struct registry_arena {
- void *p;
- size_t len;
- size_t used;
+ struct cds_list_head chunk_list;
};
-static struct registry_arena registry_arena;
+static struct registry_arena registry_arena = {
+ .chunk_list = CDS_LIST_HEAD_INIT(registry_arena.chunk_list),
+};
-static void rcu_gc_registry(void);
+/* Saved fork signal mask, protected by rcu_gp_lock */
+static sigset_t saved_fork_signal_mask;
-static void internal_urcu_lock(void)
+static void mutex_lock(pthread_mutex_t *mutex)
{
int ret;
#ifndef DISTRUST_SIGNALS_EXTREME
- ret = pthread_mutex_lock(&urcu_mutex);
- if (ret) {
- perror("Error in pthread mutex lock");
- exit(-1);
- }
+ ret = pthread_mutex_lock(mutex);
+ if (ret)
+ urcu_die(ret);
#else /* #ifndef DISTRUST_SIGNALS_EXTREME */
- while ((ret = pthread_mutex_trylock(&urcu_mutex)) != 0) {
- if (ret != EBUSY && ret != EINTR) {
- printf("ret = %d, errno = %d\n", ret, errno);
- perror("Error in pthread mutex lock");
- exit(-1);
- }
- if (urcu_reader.need_mb) {
- smp_mb();
- urcu_reader.need_mb = 0;
- smp_mb();
- }
+ while ((ret = pthread_mutex_trylock(mutex)) != 0) {
+ if (ret != EBUSY && ret != EINTR)
+ urcu_die(ret);
poll(NULL,0,10);
}
#endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
}
-static void internal_urcu_unlock(void)
+static void mutex_unlock(pthread_mutex_t *mutex)
{
int ret;
- ret = pthread_mutex_unlock(&urcu_mutex);
- if (ret) {
- perror("Error in pthread mutex unlock");
- exit(-1);
- }
+ ret = pthread_mutex_unlock(mutex);
+ if (ret)
+ urcu_die(ret);
}
-/*
- * called with urcu_mutex held.
- */
-static void switch_next_urcu_qparity(void)
+void update_counter_and_wait(void)
{
- STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr ^ RCU_GP_CTR_BIT);
-}
+ CDS_LIST_HEAD(qsreaders);
+ unsigned int wait_loops = 0;
+ struct rcu_reader *index, *tmp;
-void wait_for_quiescent_state(void)
-{
- LIST_HEAD(qsreaders);
- int wait_loops = 0;
- struct urcu_reader *index, *tmp;
+ /* Switch parity: 0 -> 1, 1 -> 0 */
+ CMM_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE);
+
+ /*
+ * Must commit qparity update to memory before waiting for other parity
+ * quiescent state. Failure to do so could result in the writer waiting
+ * forever while new readers are always accessing data (no progress).
+ * Ensured by CMM_STORE_SHARED and CMM_LOAD_SHARED.
+ */
+
+ /*
+ * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
+ * model easier to understand. It does not have a big performance impact
+ * anyway, given this is the write-side.
+ */
+ cmm_smp_mb();
- if (list_empty(®istry))
- return;
/*
- * Wait for each thread urcu_reader.ctr count to become 0.
+ * Wait for each thread rcu_reader.ctr count to become 0.
*/
for (;;) {
- wait_loops++;
- list_for_each_entry_safe(index, tmp, ®istry, head) {
+ if (wait_loops < RCU_QS_ACTIVE_ATTEMPTS)
+ wait_loops++;
+
+ cds_list_for_each_entry_safe(index, tmp, ®istry, node) {
if (!rcu_old_gp_ongoing(&index->ctr))
- list_move(&index->head, &qsreaders);
+ cds_list_move(&index->node, &qsreaders);
}
- if (list_empty(®istry)) {
+ if (cds_list_empty(®istry)) {
break;
} else {
- if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS)
- usleep(RCU_SLEEP_DELAY);
+ if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS)
+ (void) poll(NULL, 0, RCU_SLEEP_DELAY_MS);
else
- cpu_relax();
+ caa_cpu_relax();
}
}
/* put back the reader list in the registry */
- list_splice(&qsreaders, ®istry);
+ cds_list_splice(&qsreaders, ®istry);
}
void synchronize_rcu(void)
sigset_t newmask, oldmask;
int ret;
- ret = sigemptyset(&newmask);
+ ret = sigfillset(&newmask);
assert(!ret);
- ret = pthread_sigmask(SIG_SETMASK, &newmask, &oldmask);
+ ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
assert(!ret);
- internal_urcu_lock();
+ mutex_lock(&rcu_gp_lock);
- /* Remove old registry elements */
- rcu_gc_registry();
+ if (cds_list_empty(®istry))
+ goto out;
/* All threads should read qparity before accessing data structure
- * where new ptr points to. Must be done within internal_urcu_lock
- * because it iterates on reader threads.*/
+ * where new ptr points to. */
/* Write new ptr before changing the qparity */
- smp_mb();
-
- switch_next_urcu_qparity(); /* 0 -> 1 */
-
- /*
- * Must commit qparity update to memory before waiting for parity
- * 0 quiescent state. Failure to do so could result in the writer
- * waiting forever while new readers are always accessing data (no
- * progress).
- * Ensured by STORE_SHARED and LOAD_SHARED.
- */
-
- /*
- * Adding a smp_mb() which is _not_ formally required, but makes the
- * model easier to understand. It does not have a big performance impact
- * anyway, given this is the write-side.
- */
- smp_mb();
+ cmm_smp_mb();
/*
* Wait for previous parity to be empty of readers.
*/
- wait_for_quiescent_state(); /* Wait readers in parity 0 */
-
- /*
- * Must finish waiting for quiescent state for parity 0 before
- * committing qparity update to memory. Failure to do so could result in
- * the writer waiting forever while new readers are always accessing
- * data (no progress).
- * Ensured by STORE_SHARED and LOAD_SHARED.
- */
+ update_counter_and_wait(); /* 0 -> 1, wait readers in parity 0 */
/*
- * Adding a smp_mb() which is _not_ formally required, but makes the
+ * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
* model easier to understand. It does not have a big performance impact
* anyway, given this is the write-side.
*/
- smp_mb();
-
- switch_next_urcu_qparity(); /* 1 -> 0 */
-
- /*
- * Must commit qparity update to memory before waiting for parity
- * 1 quiescent state. Failure to do so could result in the writer
- * waiting forever while new readers are always accessing data (no
- * progress).
- * Ensured by STORE_SHARED and LOAD_SHARED.
- */
+ cmm_smp_mb();
/*
- * Adding a smp_mb() which is _not_ formally required, but makes the
- * model easier to understand. It does not have a big performance impact
- * anyway, given this is the write-side.
+ * Wait for previous parity to be empty of readers.
*/
- smp_mb();
+ update_counter_and_wait(); /* 1 -> 0, wait readers in parity 1 */
/*
- * Wait for previous parity to be empty of readers.
+ * Finish waiting for reader threads before letting the old ptr being
+ * freed.
*/
- wait_for_quiescent_state(); /* Wait readers in parity 1 */
-
- /* Finish waiting for reader threads before letting the old ptr being
- * freed. Must be done within internal_urcu_lock because it iterates on
- * reader threads. */
- smp_mb();
-
- internal_urcu_unlock();
+ cmm_smp_mb();
+out:
+ mutex_unlock(&rcu_gp_lock);
ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
assert(!ret);
}
}
/*
- * only grow for now.
+ * Only grow for now. If empty, allocate a ARENA_INIT_ALLOC sized chunk.
+ * Else, try expanding the last chunk. If this fails, allocate a new
+ * chunk twice as big as the last chunk.
+ * Memory used by chunks _never_ moves. A chunk could theoretically be
+ * freed when all "used" slots are released, but we don't do it at this
+ * point.
*/
-static void resize_arena(struct registry_arena *arena, size_t len)
+static
+void expand_arena(struct registry_arena *arena)
{
- void *new_arena;
+ struct registry_chunk *new_chunk, *last_chunk;
+ size_t old_chunk_len, new_chunk_len;
+
+ /* No chunk. */
+ if (cds_list_empty(&arena->chunk_list)) {
+ assert(ARENA_INIT_ALLOC >=
+ sizeof(struct registry_chunk)
+ + sizeof(struct rcu_reader));
+ new_chunk_len = ARENA_INIT_ALLOC;
+ new_chunk = mmap(NULL, new_chunk_len,
+ PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE,
+ -1, 0);
+ if (new_chunk == MAP_FAILED)
+ abort();
+ bzero(new_chunk, new_chunk_len);
+ new_chunk->data_len =
+ new_chunk_len - sizeof(struct registry_chunk);
+ cds_list_add_tail(&new_chunk->node, &arena->chunk_list);
+ return; /* We're done. */
+ }
- new_arena = mmap(arena->p, len,
- PROT_READ | PROT_WRITE,
- MAP_ANONYMOUS | MAP_PRIVATE,
- -1, 0);
- /*
- * re-used the same region ?
- */
- if (new_arena == arena->p)
- return;
+ /* Try expanding last chunk. */
+ last_chunk = cds_list_entry(arena->chunk_list.prev,
+ struct registry_chunk, node);
+ old_chunk_len =
+ last_chunk->data_len + sizeof(struct registry_chunk);
+ new_chunk_len = old_chunk_len << 1;
+
+ /* Don't allow memory mapping to move, just expand. */
+ new_chunk = mremap_wrapper(last_chunk, old_chunk_len,
+ new_chunk_len, 0);
+ if (new_chunk != MAP_FAILED) {
+ /* Should not have moved. */
+ assert(new_chunk == last_chunk);
+ bzero((char *) last_chunk + old_chunk_len,
+ new_chunk_len - old_chunk_len);
+ last_chunk->data_len =
+ new_chunk_len - sizeof(struct registry_chunk);
+ return; /* We're done. */
+ }
- memcpy(new_arena, arena->p, arena->len);
- bzero(new_arena + arena->len, len - arena->len);
- arena->p = new_arena;
+ /* Remap did not succeed, we need to add a new chunk. */
+ new_chunk = mmap(NULL, new_chunk_len,
+ PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE,
+ -1, 0);
+ if (new_chunk == MAP_FAILED)
+ abort();
+ bzero(new_chunk, new_chunk_len);
+ new_chunk->data_len =
+ new_chunk_len - sizeof(struct registry_chunk);
+ cds_list_add_tail(&new_chunk->node, &arena->chunk_list);
}
-/* Called with signals off and mutex locked */
-static void add_thread(void)
+static
+struct rcu_reader *arena_alloc(struct registry_arena *arena)
{
- struct urcu_reader *urcu_reader_reg;
+ struct registry_chunk *chunk;
+ struct rcu_reader *rcu_reader_reg;
+ int expand_done = 0; /* Only allow to expand once per alloc */
+ size_t len = sizeof(struct rcu_reader);
+
+retry:
+ cds_list_for_each_entry(chunk, &arena->chunk_list, node) {
+ if (chunk->data_len - chunk->used < len)
+ continue;
+ /* Find spot */
+ for (rcu_reader_reg = (struct rcu_reader *) &chunk->data[0];
+ rcu_reader_reg < (struct rcu_reader *) &chunk->data[chunk->data_len];
+ rcu_reader_reg++) {
+ if (!rcu_reader_reg->alloc) {
+ rcu_reader_reg->alloc = 1;
+ chunk->used += len;
+ return rcu_reader_reg;
+ }
+ }
+ }
- if (registry_arena.len
- < registry_arena.used + sizeof(struct urcu_reader))
- resize_arena(®istry_arena,
- max(registry_arena.len << 1, ARENA_INIT_ALLOC));
- /*
- * Find a free spot.
- */
- for (urcu_reader_reg = registry_arena.p;
- (void *)urcu_reader_reg < registry_arena.p + registry_arena.len;
- urcu_reader_reg++) {
- if (!urcu_reader_reg->alloc)
- break;
+ if (!expand_done) {
+ expand_arena(arena);
+ expand_done = 1;
+ goto retry;
}
- urcu_reader_reg->alloc = 1;
- registry_arena.used += sizeof(struct urcu_reader);
- /* Add to registry */
- urcu_reader_reg->tid = pthread_self();
- assert(urcu_reader_reg->ctr == 0);
- list_add(&urcu_reader_reg->head, ®istry);
- urcu_reader = urcu_reader_reg;
+ return NULL;
}
/* Called with signals off and mutex locked */
-static void rcu_gc_registry(void)
+static
+void add_thread(void)
{
- struct urcu_reader *urcu_reader_reg;
- pthread_t tid;
+ struct rcu_reader *rcu_reader_reg;
int ret;
- for (urcu_reader_reg = registry_arena.p;
- (void *)urcu_reader_reg < registry_arena.p + registry_arena.len;
- urcu_reader_reg++) {
- if (!urcu_reader_reg->alloc)
+ rcu_reader_reg = arena_alloc(®istry_arena);
+ if (!rcu_reader_reg)
+ abort();
+ ret = pthread_setspecific(urcu_bp_key, rcu_reader_reg);
+ if (ret)
+ abort();
+
+ /* Add to registry */
+ rcu_reader_reg->tid = pthread_self();
+ assert(rcu_reader_reg->ctr == 0);
+ cds_list_add(&rcu_reader_reg->node, ®istry);
+ /*
+ * Reader threads are pointing to the reader registry. This is
+ * why its memory should never be relocated.
+ */
+ URCU_TLS(rcu_reader) = rcu_reader_reg;
+}
+
+/* Called with mutex locked */
+static
+void cleanup_thread(struct registry_chunk *chunk,
+ struct rcu_reader *rcu_reader_reg)
+{
+ rcu_reader_reg->ctr = 0;
+ cds_list_del(&rcu_reader_reg->node);
+ rcu_reader_reg->tid = 0;
+ rcu_reader_reg->alloc = 0;
+ chunk->used -= sizeof(struct rcu_reader);
+}
+
+static
+struct registry_chunk *find_chunk(struct rcu_reader *rcu_reader_reg)
+{
+ struct registry_chunk *chunk;
+
+ cds_list_for_each_entry(chunk, ®istry_arena.chunk_list, node) {
+ if (rcu_reader_reg < (struct rcu_reader *) &chunk->data[0])
continue;
- tid = urcu_reader_reg->tid;
- ret = pthread_kill(tid, 0);
- assert(ret != EINVAL);
- if (ret == ESRCH) {
- list_del(&urcu_reader_reg->head);
- urcu_reader_reg->alloc = 0;
- registry_arena.used -= sizeof(struct urcu_reader);
- }
+ if (rcu_reader_reg >= (struct rcu_reader *) &chunk->data[chunk->data_len])
+ continue;
+ return chunk;
}
+ return NULL;
+}
+
+/* Called with signals off and mutex locked */
+static
+void remove_thread(struct rcu_reader *rcu_reader_reg)
+{
+ cleanup_thread(find_chunk(rcu_reader_reg), rcu_reader_reg);
+ URCU_TLS(rcu_reader) = NULL;
}
/* Disable signals, take mutex, add to registry */
sigset_t newmask, oldmask;
int ret;
- ret = sigemptyset(&newmask);
- assert(!ret);
- ret = pthread_sigmask(SIG_SETMASK, &newmask, &oldmask);
- assert(!ret);
+ ret = sigfillset(&newmask);
+ if (ret)
+ abort();
+ ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
+ if (ret)
+ abort();
/*
* Check if a signal concurrently registered our thread since
- * the check in rcu_read_lock(). */
- if (urcu_reader)
+ * the check in rcu_read_lock().
+ */
+ if (URCU_TLS(rcu_reader))
goto end;
- internal_urcu_lock();
+ /*
+ * Take care of early registration before urcu_bp constructor.
+ */
+ rcu_bp_init();
+
+ mutex_lock(&rcu_gp_lock);
add_thread();
- internal_urcu_unlock();
+ mutex_unlock(&rcu_gp_lock);
end:
+ ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
+ if (ret)
+ abort();
+}
+
+/* Disable signals, take mutex, remove from registry */
+static
+void rcu_bp_unregister(struct rcu_reader *rcu_reader_reg)
+{
+ sigset_t newmask, oldmask;
+ int ret;
+
+ ret = sigfillset(&newmask);
+ if (ret)
+ abort();
+ ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
+ if (ret)
+ abort();
+
+ mutex_lock(&rcu_gp_lock);
+ remove_thread(rcu_reader_reg);
+ mutex_unlock(&rcu_gp_lock);
+ ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
+ if (ret)
+ abort();
+ rcu_bp_exit();
+}
+
+/*
+ * Remove thread from the registry when it exits, and flag it as
+ * destroyed so garbage collection can take care of it.
+ */
+static
+void urcu_bp_thread_exit_notifier(void *rcu_key)
+{
+ rcu_bp_unregister(rcu_key);
+}
+
+static
+void rcu_bp_init(void)
+{
+ mutex_lock(&init_lock);
+ if (!rcu_bp_refcount++) {
+ int ret;
+
+ ret = pthread_key_create(&urcu_bp_key,
+ urcu_bp_thread_exit_notifier);
+ if (ret)
+ abort();
+ initialized = 1;
+ }
+ mutex_unlock(&init_lock);
+}
+
+static
+void rcu_bp_exit(void)
+{
+ mutex_lock(&init_lock);
+ if (!--rcu_bp_refcount) {
+ struct registry_chunk *chunk, *tmp;
+ int ret;
+
+ cds_list_for_each_entry_safe(chunk, tmp,
+ ®istry_arena.chunk_list, node) {
+ munmap(chunk, chunk->data_len
+ + sizeof(struct registry_chunk));
+ }
+ ret = pthread_key_delete(urcu_bp_key);
+ if (ret)
+ abort();
+ }
+ mutex_unlock(&init_lock);
+}
+
+/*
+ * Holding the rcu_gp_lock across fork will make sure we fork() don't race with
+ * a concurrent thread executing with this same lock held. This ensures that the
+ * registry is in a coherent state in the child.
+ */
+void rcu_bp_before_fork(void)
+{
+ sigset_t newmask, oldmask;
+ int ret;
+
+ ret = sigfillset(&newmask);
+ assert(!ret);
+ ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
+ assert(!ret);
+ mutex_lock(&rcu_gp_lock);
+ saved_fork_signal_mask = oldmask;
+}
+
+void rcu_bp_after_fork_parent(void)
+{
+ sigset_t oldmask;
+ int ret;
+
+ oldmask = saved_fork_signal_mask;
+ mutex_unlock(&rcu_gp_lock);
+ ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
+ assert(!ret);
+}
+
+/*
+ * Prune all entries from registry except our own thread. Fits the Linux
+ * fork behavior. Called with rcu_gp_lock held.
+ */
+static
+void urcu_bp_prune_registry(void)
+{
+ struct registry_chunk *chunk;
+ struct rcu_reader *rcu_reader_reg;
+
+ cds_list_for_each_entry(chunk, ®istry_arena.chunk_list, node) {
+ for (rcu_reader_reg = (struct rcu_reader *) &chunk->data[0];
+ rcu_reader_reg < (struct rcu_reader *) &chunk->data[chunk->data_len];
+ rcu_reader_reg++) {
+ if (!rcu_reader_reg->alloc)
+ continue;
+ if (rcu_reader_reg->tid == pthread_self())
+ continue;
+ cleanup_thread(chunk, rcu_reader_reg);
+ }
+ }
+}
+
+void rcu_bp_after_fork_child(void)
+{
+ sigset_t oldmask;
+ int ret;
+
+ urcu_bp_prune_registry();
+ oldmask = saved_fork_signal_mask;
+ mutex_unlock(&rcu_gp_lock);
ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
assert(!ret);
}
-void urcu_bp_exit()
+void *rcu_dereference_sym_bp(void *p)
+{
+ return _rcu_dereference(p);
+}
+
+void *rcu_set_pointer_sym_bp(void **p, void *v)
+{
+ cmm_wmb();
+ uatomic_set(p, v);
+ return v;
+}
+
+void *rcu_xchg_pointer_sym_bp(void **p, void *v)
{
- munmap(registry_arena.p, registry_arena.len);
+ cmm_wmb();
+ return uatomic_xchg(p, v);
}
+
+void *rcu_cmpxchg_pointer_sym_bp(void **p, void *old, void *_new)
+{
+ cmm_wmb();
+ return uatomic_cmpxchg(p, old, _new);
+}
+
+DEFINE_RCU_FLAVOR(rcu_flavor);
+
+#include "urcu-call-rcu-impl.h"
+#include "urcu-defer-impl.h"