*
* Userspace RCU library
*
- * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+ * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
*
- * Distributed under GPLv2
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * IBM's contributions to this file may be relicensed under LGPLv2 or later.
*/
#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include <string.h>
+#include <errno.h>
+#include <poll.h>
+#include "urcu-static.h"
+/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
#include "urcu.h"
-pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER;
+#ifndef URCU_MB
+static int init_done;
+
+void __attribute__((constructor)) urcu_init(void);
+void __attribute__((destructor)) urcu_exit(void);
+#else
+void urcu_init(void)
+{
+}
+#endif
+
+static pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+int gp_futex;
/*
* Global grace period counter.
* Contains the current RCU_GP_CTR_BIT.
- * Also has a RCU_GP_CTR_BIT of 1, to accelerate the reader fast path.
+ * Also has a RCU_GP_COUNT of 1, to accelerate the reader fast path.
+ * Written to only by writer with mutex taken. Read by both writer and readers.
*/
long urcu_gp_ctr = RCU_GP_COUNT;
+/*
+ * Written to only by each individual reader. Read by both the reader and the
+ * writers.
+ */
long __thread urcu_active_readers;
/* Thread IDs of registered readers */
#define INIT_NUM_THREADS 4
-struct reader_data {
+struct reader_registry {
pthread_t tid;
long *urcu_active_readers;
+ char *need_mb;
};
#ifdef DEBUG_YIELD
unsigned int __thread rand_yield;
#endif
-static struct reader_data *reader_data;
+static struct reader_registry *registry;
+static char __thread need_mb;
static int num_readers, alloc_readers;
-#ifndef DEBUG_FULL_MB
-static int sig_done;
-#endif
-void internal_urcu_lock(void)
+static void internal_urcu_lock(void)
{
int ret;
+
+#ifndef DISTRUST_SIGNALS_EXTREME
ret = pthread_mutex_lock(&urcu_mutex);
if (ret) {
perror("Error in pthread mutex lock");
exit(-1);
}
+#else /* #ifndef DISTRUST_SIGNALS_EXTREME */
+ while ((ret = pthread_mutex_trylock(&urcu_mutex)) != 0) {
+ if (ret != EBUSY && ret != EINTR) {
+ printf("ret = %d, errno = %d\n", ret, errno);
+ perror("Error in pthread mutex lock");
+ exit(-1);
+ }
+ if (need_mb) {
+ smp_mb();
+ need_mb = 0;
+ smp_mb();
+ }
+ poll(NULL,0,10);
+ }
+#endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
}
-void internal_urcu_unlock(void)
+static void internal_urcu_unlock(void)
{
int ret;
*/
static void switch_next_urcu_qparity(void)
{
- urcu_gp_ctr ^= RCU_GP_CTR_BIT;
+ STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr ^ RCU_GP_CTR_BIT);
}
-#ifdef DEBUG_FULL_MB
-static void force_mb_single_thread(pthread_t tid)
+#ifdef URCU_MB
+static void force_mb_single_thread(struct reader_registry *index)
{
smp_mb();
}
{
smp_mb();
}
-#else
-
-static void force_mb_single_thread(pthread_t tid)
+#else /* #ifdef URCU_MB */
+static void force_mb_single_thread(struct reader_registry *index)
{
- assert(reader_data);
- sig_done = 0;
+ assert(registry);
/*
* pthread_kill has a smp_mb(). But beware, we assume it performs
* a cache flush on architectures with non-coherent cache. Let's play
* safe and don't assume anything : we use smp_mc() to make sure the
* cache flush is enforced.
- * smp_mb(); write sig_done before sending the signals
*/
- smp_mc(); /* write sig_done before sending the signals */
- pthread_kill(tid, SIGURCU);
+ *index->need_mb = 1;
+ smp_mc(); /* write ->need_mb before sending the signals */
+ pthread_kill(index->tid, SIGURCU);
+ smp_mb();
/*
* Wait for sighandler (and thus mb()) to execute on every thread.
* BUSY-LOOP.
*/
- while (LOAD_SHARED(sig_done) < 1)
- cpu_relax();
- smp_mb(); /* read sig_done before ending the barrier */
+ while (*index->need_mb) {
+ poll(NULL, 0, 1);
+ }
+ smp_mb(); /* read ->need_mb before ending the barrier */
}
static void force_mb_all_threads(void)
{
- struct reader_data *index;
+ struct reader_registry *index;
/*
* Ask for each threads to execute a smp_mb() so we can consider the
* compiler barriers around rcu read lock as real memory barriers.
*/
- if (!reader_data)
+ if (!registry)
return;
- sig_done = 0;
/*
* pthread_kill has a smp_mb(). But beware, we assume it performs
* a cache flush on architectures with non-coherent cache. Let's play
* safe and don't assume anything : we use smp_mc() to make sure the
* cache flush is enforced.
- * smp_mb(); write sig_done before sending the signals
*/
- smp_mc(); /* write sig_done before sending the signals */
- for (index = reader_data; index < reader_data + num_readers; index++)
+ for (index = registry; index < registry + num_readers; index++) {
+ *index->need_mb = 1;
+ smp_mc(); /* write need_mb before sending the signal */
pthread_kill(index->tid, SIGURCU);
+ }
/*
* Wait for sighandler (and thus mb()) to execute on every thread.
- * BUSY-LOOP.
+ *
+ * Note that the pthread_kill() will never be executed on systems
+ * that correctly deliver signals in a timely manner. However, it
+ * is not uncommon for kernels to have bugs that can result in
+ * lost or unduly delayed signals.
+ *
+ * If you are seeing the below pthread_kill() executing much at
+ * all, we suggest testing the underlying kernel and filing the
+ * relevant bug report. For Linux kernels, we recommend getting
+ * the Linux Test Project (LTP).
*/
- while (LOAD_SHARED(sig_done) < num_readers)
- cpu_relax();
- smp_mb(); /* read sig_done before ending the barrier */
+ for (index = registry; index < registry + num_readers; index++) {
+ while (*index->need_mb) {
+ pthread_kill(index->tid, SIGURCU);
+ poll(NULL, 0, 1);
+ }
+ }
+ smp_mb(); /* read ->need_mb before ending the barrier */
+}
+#endif /* #else #ifdef URCU_MB */
+
+/*
+ * synchronize_rcu() waiting. Single thread.
+ */
+static void wait_gp(struct reader_registry *index)
+{
+ atomic_dec(&gp_futex);
+ force_mb_single_thread(index); /* Write futex before read reader_gp */
+ if (!rcu_old_gp_ongoing(index->urcu_active_readers)) {
+ /* Read reader_gp before write futex */
+ force_mb_single_thread(index);
+ /* Callbacks are queued, don't wait. */
+ atomic_set(&gp_futex, 0);
+ } else {
+ /* Read reader_gp before read futex */
+ force_mb_single_thread(index);
+ if (atomic_read(&gp_futex) == -1)
+ futex(&gp_futex, FUTEX_WAIT, -1,
+ NULL, NULL, 0);
+ }
}
-#endif
void wait_for_quiescent_state(void)
{
- struct reader_data *index;
+ struct reader_registry *index;
- if (!reader_data)
+ if (!registry)
return;
/*
* Wait for each thread urcu_active_readers count to become 0.
*/
- for (index = reader_data; index < reader_data + num_readers; index++) {
+ for (index = registry; index < registry + num_readers; index++) {
int wait_loops = 0;
+#ifndef HAS_INCOHERENT_CACHES
+ while (rcu_old_gp_ongoing(index->urcu_active_readers)) {
+ if (wait_loops++ == RCU_QS_ACTIVE_ATTEMPTS) {
+ wait_gp(index);
+ } else {
+ cpu_relax();
+ }
+ }
+#else /* #ifndef HAS_INCOHERENT_CACHES */
/*
* BUSY-LOOP. Force the reader thread to commit its
* urcu_active_readers update to memory if we wait for too long.
*/
while (rcu_old_gp_ongoing(index->urcu_active_readers)) {
- if (wait_loops++ == KICK_READER_LOOPS) {
- force_mb_single_thread(index->tid);
+ switch (wait_loops++) {
+ case RCU_QS_ACTIVE_ATTEMPTS:
+ wait_gp(index);
+ break;
+ case KICK_READER_LOOPS:
+ force_mb_single_thread(index);
wait_loops = 0;
- } else {
+ break;
+ default:
cpu_relax();
}
}
+#endif /* #else #ifndef HAS_INCOHERENT_CACHES */
}
}
* 0 quiescent state. Failure to do so could result in the writer
* waiting forever while new readers are always accessing data (no
* progress).
+ * Ensured by STORE_SHARED and LOAD_SHARED.
*/
- smp_mc();
+
+ /*
+ * Adding a smp_mb() which is _not_ formally required, but makes the
+ * model easier to understand. It does not have a big performance impact
+ * anyway, given this is the write-side.
+ */
+ smp_mb();
/*
* Wait for previous parity to be empty of readers.
* committing qparity update to memory. Failure to do so could result in
* the writer waiting forever while new readers are always accessing
* data (no progress).
+ * Ensured by STORE_SHARED and LOAD_SHARED.
*/
- smp_mc();
+
+ /*
+ * Adding a smp_mb() which is _not_ formally required, but makes the
+ * model easier to understand. It does not have a big performance impact
+ * anyway, given this is the write-side.
+ */
+ smp_mb();
switch_next_urcu_qparity(); /* 1 -> 0 */
* 1 quiescent state. Failure to do so could result in the writer
* waiting forever while new readers are always accessing data (no
* progress).
+ * Ensured by STORE_SHARED and LOAD_SHARED.
*/
- smp_mc();
+
+ /*
+ * Adding a smp_mb() which is _not_ formally required, but makes the
+ * model easier to understand. It does not have a big performance impact
+ * anyway, given this is the write-side.
+ */
+ smp_mb();
/*
* Wait for previous parity to be empty of readers.
internal_urcu_unlock();
}
-void urcu_add_reader(pthread_t id)
+/*
+ * library wrappers to be used by non-LGPL compatible source code.
+ */
+
+void rcu_read_lock(void)
+{
+ _rcu_read_lock();
+}
+
+void rcu_read_unlock(void)
+{
+ _rcu_read_unlock();
+}
+
+void *rcu_dereference(void *p)
+{
+ return _rcu_dereference(p);
+}
+
+void *rcu_assign_pointer_sym(void **p, void *v)
+{
+ wmb();
+ return STORE_SHARED(p, v);
+}
+
+void *rcu_xchg_pointer_sym(void **p, void *v)
+{
+ wmb();
+ return xchg(p, v);
+}
+
+void *rcu_cmpxchg_pointer_sym(void **p, void *old, void *_new)
+{
+ wmb();
+ return cmpxchg(p, old, _new);
+}
+
+void *rcu_publish_content_sym(void **p, void *v)
{
- struct reader_data *oldarray;
+ void *oldptr;
- if (!reader_data) {
+ oldptr = _rcu_xchg_pointer(p, v);
+ synchronize_rcu();
+ return oldptr;
+}
+
+static void rcu_add_reader(pthread_t id)
+{
+ struct reader_registry *oldarray;
+
+ if (!registry) {
alloc_readers = INIT_NUM_THREADS;
num_readers = 0;
- reader_data =
- malloc(sizeof(struct reader_data) * alloc_readers);
+ registry =
+ malloc(sizeof(struct reader_registry) * alloc_readers);
}
if (alloc_readers < num_readers + 1) {
- oldarray = reader_data;
- reader_data = malloc(sizeof(struct reader_data)
+ oldarray = registry;
+ registry = malloc(sizeof(struct reader_registry)
* (alloc_readers << 1));
- memcpy(reader_data, oldarray,
- sizeof(struct reader_data) * alloc_readers);
+ memcpy(registry, oldarray,
+ sizeof(struct reader_registry) * alloc_readers);
alloc_readers <<= 1;
free(oldarray);
}
- reader_data[num_readers].tid = id;
+ registry[num_readers].tid = id;
/* reference to the TLS of _this_ reader thread. */
- reader_data[num_readers].urcu_active_readers = &urcu_active_readers;
+ registry[num_readers].urcu_active_readers = &urcu_active_readers;
+ registry[num_readers].need_mb = &need_mb;
num_readers++;
}
* Never shrink (implementation limitation).
* This is O(nb threads). Eventually use a hash table.
*/
-void urcu_remove_reader(pthread_t id)
+static void rcu_remove_reader(pthread_t id)
{
- struct reader_data *index;
+ struct reader_registry *index;
- assert(reader_data != NULL);
- for (index = reader_data; index < reader_data + num_readers; index++) {
+ assert(registry != NULL);
+ for (index = registry; index < registry + num_readers; index++) {
if (pthread_equal(index->tid, id)) {
- memcpy(index, &reader_data[num_readers - 1],
- sizeof(struct reader_data));
- reader_data[num_readers - 1].tid = 0;
- reader_data[num_readers - 1].urcu_active_readers = NULL;
+ memcpy(index, ®istry[num_readers - 1],
+ sizeof(struct reader_registry));
+ registry[num_readers - 1].tid = 0;
+ registry[num_readers - 1].urcu_active_readers = NULL;
num_readers--;
return;
}
assert(0);
}
-void urcu_register_thread(void)
+void rcu_register_thread(void)
{
internal_urcu_lock();
- urcu_add_reader(pthread_self());
+ urcu_init(); /* In case gcc does not support constructor attribute */
+ rcu_add_reader(pthread_self());
internal_urcu_unlock();
}
-void urcu_unregister_thread(void)
+void rcu_unregister_thread(void)
{
internal_urcu_lock();
- urcu_remove_reader(pthread_self());
+ rcu_remove_reader(pthread_self());
internal_urcu_unlock();
}
-#ifndef DEBUG_FULL_MB
-void sigurcu_handler(int signo, siginfo_t *siginfo, void *context)
+#ifndef URCU_MB
+static void sigurcu_handler(int signo, siginfo_t *siginfo, void *context)
{
/*
* Executing this smp_mb() is the only purpose of this signal handler.
* executed on.
*/
smp_mb();
- atomic_inc(&sig_done);
+ need_mb = 0;
+ smp_mb();
}
-void __attribute__((constructor)) urcu_init(void)
+/*
+ * urcu_init constructor. Called when the library is linked, but also when
+ * reader threads are calling rcu_register_thread().
+ * Should only be called by a single thread at a given time. This is ensured by
+ * holing the internal_urcu_lock() from rcu_register_thread() or by running at
+ * library load time, which should not be executed by multiple threads nor
+ * concurrently with rcu_register_thread() anyway.
+ */
+void urcu_init(void)
{
struct sigaction act;
int ret;
+ if (init_done)
+ return;
+ init_done = 1;
+
act.sa_sigaction = sigurcu_handler;
+ act.sa_flags = SA_SIGINFO | SA_RESTART;
+ sigemptyset(&act.sa_mask);
ret = sigaction(SIGURCU, &act, NULL);
if (ret) {
perror("Error in sigaction");
}
}
-void __attribute__((destructor)) urcu_exit(void)
+void urcu_exit(void)
{
struct sigaction act;
int ret;
exit(-1);
}
assert(act.sa_sigaction == sigurcu_handler);
- free(reader_data);
+ free(registry);
}
-#endif
+#endif /* #ifndef URCU_MB */