*
* Userspace RCU library
*
- * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+ * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
*
- * Distributed under LGPLv2.1
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* IBM's contributions to this file may be relicensed under LGPLv2 or later.
*/
/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
#include "urcu.h"
-pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER;
+#ifndef URCU_MB
+static int init_done;
+
+void __attribute__((constructor)) urcu_init(void);
+void __attribute__((destructor)) urcu_exit(void);
+#else
+void urcu_init(void)
+{
+}
+#endif
+
+static pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+int gp_futex;
/*
* Global grace period counter.
* Contains the current RCU_GP_CTR_BIT.
- * Also has a RCU_GP_CTR_BIT of 1, to accelerate the reader fast path.
+ * Also has a RCU_GP_COUNT of 1, to accelerate the reader fast path.
* Written to only by writer with mutex taken. Read by both writer and readers.
*/
long urcu_gp_ctr = RCU_GP_COUNT;
* Written to only by each individual reader. Read by both the reader and the
* writers.
*/
-long __thread urcu_active_readers;
-
-/* Thread IDs of registered readers */
-#define INIT_NUM_THREADS 4
-
-struct reader_registry {
- pthread_t tid;
- long *urcu_active_readers;
- char *need_mb;
-};
+struct urcu_reader __thread urcu_reader;
#ifdef DEBUG_YIELD
unsigned int yield_active;
unsigned int __thread rand_yield;
#endif
-static struct reader_registry *registry;
-static char __thread need_mb;
-static int num_readers, alloc_readers;
+static LIST_HEAD(registry);
-void internal_urcu_lock(void)
+static void internal_urcu_lock(void)
{
int ret;
perror("Error in pthread mutex lock");
exit(-1);
}
- if (need_mb) {
+ if (urcu_reader.need_mb) {
smp_mb();
- need_mb = 0;
+ urcu_reader.need_mb = 0;
smp_mb();
}
poll(NULL,0,10);
#endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
}
-void internal_urcu_unlock(void)
+static void internal_urcu_unlock(void)
{
int ret;
STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr ^ RCU_GP_CTR_BIT);
}
-#ifdef DEBUG_FULL_MB
-#ifdef HAS_INCOHERENT_CACHES
-static void force_mb_single_thread(struct reader_registry *index)
+#ifdef URCU_MB
+#if 0 /* unused */
+static void force_mb_single_thread(struct urcu_reader *index)
{
smp_mb();
}
-#endif /* #ifdef HAS_INCOHERENT_CACHES */
+#endif //0
static void force_mb_all_threads(void)
{
smp_mb();
}
-#else /* #ifdef DEBUG_FULL_MB */
-#ifdef HAS_INCOHERENT_CACHES
-static void force_mb_single_thread(struct reader_registry *index)
+#else /* #ifdef URCU_MB */
+#if 0 /* unused */
+static void force_mb_single_thread(struct urcu_reader *index)
{
- assert(registry);
+ assert(!list_empty(®istry));
/*
* pthread_kill has a smp_mb(). But beware, we assume it performs
* a cache flush on architectures with non-coherent cache. Let's play
* safe and don't assume anything : we use smp_mc() to make sure the
* cache flush is enforced.
*/
- *index->need_mb = 1;
+ index->need_mb = 1;
smp_mc(); /* write ->need_mb before sending the signals */
pthread_kill(index->tid, SIGURCU);
smp_mb();
* Wait for sighandler (and thus mb()) to execute on every thread.
* BUSY-LOOP.
*/
- while (*index->need_mb) {
+ while (index->need_mb) {
poll(NULL, 0, 1);
}
smp_mb(); /* read ->need_mb before ending the barrier */
}
-#endif /* #ifdef HAS_INCOHERENT_CACHES */
+#endif //0
static void force_mb_all_threads(void)
{
- struct reader_registry *index;
+ struct urcu_reader *index;
+
/*
* Ask for each threads to execute a smp_mb() so we can consider the
* compiler barriers around rcu read lock as real memory barriers.
*/
- if (!registry)
+ if (list_empty(®istry))
return;
/*
* pthread_kill has a smp_mb(). But beware, we assume it performs
* safe and don't assume anything : we use smp_mc() to make sure the
* cache flush is enforced.
*/
- for (index = registry; index < registry + num_readers; index++) {
- *index->need_mb = 1;
+ list_for_each_entry(index, ®istry, head) {
+ index->need_mb = 1;
smp_mc(); /* write need_mb before sending the signal */
pthread_kill(index->tid, SIGURCU);
}
* relevant bug report. For Linux kernels, we recommend getting
* the Linux Test Project (LTP).
*/
- for (index = registry; index < registry + num_readers; index++) {
- while (*index->need_mb) {
+ list_for_each_entry(index, ®istry, head) {
+ while (index->need_mb) {
pthread_kill(index->tid, SIGURCU);
poll(NULL, 0, 1);
}
}
smp_mb(); /* read ->need_mb before ending the barrier */
}
-#endif /* #else #ifdef DEBUG_FULL_MB */
+#endif /* #else #ifdef URCU_MB */
+
+/*
+ * synchronize_rcu() waiting. Single thread.
+ */
+static void wait_gp(void)
+{
+ /* Read reader_gp before read futex */
+ force_mb_all_threads();
+ if (uatomic_read(&gp_futex) == -1)
+ futex(&gp_futex, FUTEX_WAIT, -1,
+ NULL, NULL, 0);
+}
void wait_for_quiescent_state(void)
{
- struct reader_registry *index;
+ LIST_HEAD(qsreaders);
+ int wait_loops = 0;
+ struct urcu_reader *index, *tmp;
- if (!registry)
+ if (list_empty(®istry))
return;
/*
- * Wait for each thread urcu_active_readers count to become 0.
+ * Wait for each thread urcu_reader.ctr count to become 0.
*/
- for (index = registry; index < registry + num_readers; index++) {
+ for (;;) {
+ wait_loops++;
+ if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
+ uatomic_dec(&gp_futex);
+ /* Write futex before read reader_gp */
+ force_mb_all_threads();
+ }
+
+ list_for_each_entry_safe(index, tmp, ®istry, head) {
+ if (!rcu_old_gp_ongoing(&index->ctr))
+ list_move(&index->head, &qsreaders);
+ }
+
#ifndef HAS_INCOHERENT_CACHES
- while (rcu_old_gp_ongoing(index->urcu_active_readers))
- cpu_relax();
+ if (list_empty(®istry)) {
+ if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
+ /* Read reader_gp before write futex */
+ force_mb_all_threads();
+ uatomic_set(&gp_futex, 0);
+ }
+ break;
+ } else {
+ if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS)
+ wait_gp();
+ else
+ cpu_relax();
+ }
#else /* #ifndef HAS_INCOHERENT_CACHES */
- int wait_loops = 0;
/*
* BUSY-LOOP. Force the reader thread to commit its
- * urcu_active_readers update to memory if we wait for too long.
+ * urcu_reader.ctr update to memory if we wait for too long.
*/
- while (rcu_old_gp_ongoing(index->urcu_active_readers)) {
- if (wait_loops++ == KICK_READER_LOOPS) {
- force_mb_single_thread(index);
+ if (list_empty(®istry)) {
+ if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
+ /* Read reader_gp before write futex */
+ force_mb_all_threads();
+ uatomic_set(&gp_futex, 0);
+ }
+ break;
+ } else {
+ switch (wait_loops) {
+ case RCU_QS_ACTIVE_ATTEMPTS:
+ wait_gp();
+ break; /* only escape switch */
+ case KICK_READER_LOOPS:
+ force_mb_all_threads();
wait_loops = 0;
- } else {
+ break; /* only escape switch */
+ default:
cpu_relax();
}
}
#endif /* #else #ifndef HAS_INCOHERENT_CACHES */
}
+ /* put back the reader list in the registry */
+ list_splice(&qsreaders, ®istry);
}
void synchronize_rcu(void)
* Ensured by STORE_SHARED and LOAD_SHARED.
*/
+ /*
+ * Adding a smp_mb() which is _not_ formally required, but makes the
+ * model easier to understand. It does not have a big performance impact
+ * anyway, given this is the write-side.
+ */
+ smp_mb();
+
/*
* Wait for previous parity to be empty of readers.
*/
* Ensured by STORE_SHARED and LOAD_SHARED.
*/
+ /*
+ * Adding a smp_mb() which is _not_ formally required, but makes the
+ * model easier to understand. It does not have a big performance impact
+ * anyway, given this is the write-side.
+ */
+ smp_mb();
+
switch_next_urcu_qparity(); /* 1 -> 0 */
/*
* Ensured by STORE_SHARED and LOAD_SHARED.
*/
+ /*
+ * Adding a smp_mb() which is _not_ formally required, but makes the
+ * model easier to understand. It does not have a big performance impact
+ * anyway, given this is the write-side.
+ */
+ smp_mb();
+
/*
* Wait for previous parity to be empty of readers.
*/
_rcu_read_unlock();
}
-void *rcu_dereference(void *p)
-{
- return _rcu_dereference(p);
-}
-
-void *rcu_assign_pointer_sym(void **p, void *v)
-{
- wmb();
- return STORE_SHARED(p, v);
-}
-
-void *rcu_xchg_pointer_sym(void **p, void *v)
-{
- wmb();
- return xchg(p, v);
-}
-
-void *rcu_publish_content_sym(void **p, void *v)
-{
- void *oldptr;
-
- oldptr = _rcu_xchg_pointer(p, v);
- synchronize_rcu();
- return oldptr;
-}
-
-static void rcu_add_reader(pthread_t id)
-{
- struct reader_registry *oldarray;
-
- if (!registry) {
- alloc_readers = INIT_NUM_THREADS;
- num_readers = 0;
- registry =
- malloc(sizeof(struct reader_registry) * alloc_readers);
- }
- if (alloc_readers < num_readers + 1) {
- oldarray = registry;
- registry = malloc(sizeof(struct reader_registry)
- * (alloc_readers << 1));
- memcpy(registry, oldarray,
- sizeof(struct reader_registry) * alloc_readers);
- alloc_readers <<= 1;
- free(oldarray);
- }
- registry[num_readers].tid = id;
- /* reference to the TLS of _this_ reader thread. */
- registry[num_readers].urcu_active_readers = &urcu_active_readers;
- registry[num_readers].need_mb = &need_mb;
- num_readers++;
-}
-
-/*
- * Never shrink (implementation limitation).
- * This is O(nb threads). Eventually use a hash table.
- */
-static void rcu_remove_reader(pthread_t id)
-{
- struct reader_registry *index;
-
- assert(registry != NULL);
- for (index = registry; index < registry + num_readers; index++) {
- if (pthread_equal(index->tid, id)) {
- memcpy(index, ®istry[num_readers - 1],
- sizeof(struct reader_registry));
- registry[num_readers - 1].tid = 0;
- registry[num_readers - 1].urcu_active_readers = NULL;
- num_readers--;
- return;
- }
- }
- /* Hrm not found, forgot to register ? */
- assert(0);
-}
-
void rcu_register_thread(void)
{
+ urcu_reader.tid = pthread_self();
+ assert(urcu_reader.need_mb == 0);
+ assert(urcu_reader.ctr == 0);
+
internal_urcu_lock();
- rcu_add_reader(pthread_self());
+ urcu_init(); /* In case gcc does not support constructor attribute */
+ list_add(&urcu_reader.head, ®istry);
internal_urcu_unlock();
}
void rcu_unregister_thread(void)
{
internal_urcu_lock();
- rcu_remove_reader(pthread_self());
+ list_del(&urcu_reader.head);
internal_urcu_unlock();
}
-#ifndef DEBUG_FULL_MB
+#ifndef URCU_MB
static void sigurcu_handler(int signo, siginfo_t *siginfo, void *context)
{
/*
* executed on.
*/
smp_mb();
- need_mb = 0;
+ urcu_reader.need_mb = 0;
smp_mb();
}
-void __attribute__((constructor)) urcu_init(void)
+/*
+ * urcu_init constructor. Called when the library is linked, but also when
+ * reader threads are calling rcu_register_thread().
+ * Should only be called by a single thread at a given time. This is ensured by
+ * holing the internal_urcu_lock() from rcu_register_thread() or by running at
+ * library load time, which should not be executed by multiple threads nor
+ * concurrently with rcu_register_thread() anyway.
+ */
+void urcu_init(void)
{
struct sigaction act;
int ret;
+ if (init_done)
+ return;
+ init_done = 1;
+
act.sa_sigaction = sigurcu_handler;
+ act.sa_flags = SA_SIGINFO | SA_RESTART;
+ sigemptyset(&act.sa_mask);
ret = sigaction(SIGURCU, &act, NULL);
if (ret) {
perror("Error in sigaction");
}
}
-void __attribute__((destructor)) urcu_exit(void)
+void urcu_exit(void)
{
struct sigaction act;
int ret;
exit(-1);
}
assert(act.sa_sigaction == sigurcu_handler);
- free(registry);
+ assert(list_empty(®istry));
}
-#endif /* #ifndef DEBUG_FULL_MB */
+#endif /* #ifndef URCU_MB */