*
* Userspace RCU library
*
- * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
* Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
*
* This library is free software; you can redistribute it and/or
*/
#define _BSD_SOURCE
+#define _GNU_SOURCE
+#define _LGPL_SOURCE
#include <stdio.h>
#include <pthread.h>
#include <signal.h>
#include <assert.h>
#include <stdlib.h>
+#include <stdint.h>
#include <string.h>
#include <errno.h>
#include <poll.h>
-#include "urcu-static.h"
+#include "urcu/wfqueue.h"
+#include "urcu/map/urcu.h"
+#include "urcu/static/urcu.h"
+#include "urcu-pointer.h"
+
/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
+#undef _LGPL_SOURCE
#include "urcu.h"
+#define _LGPL_SOURCE
+
+/*
+ * If a reader is really non-cooperative and refuses to commit its
+ * rcu_active_readers count to memory (there is no barrier in the reader
+ * per-se), kick it after a few loops waiting for it.
+ */
+#define KICK_READER_LOOPS 10000
+
+/*
+ * Active attempts to check for reader Q.S. before calling futex().
+ */
+#define RCU_QS_ACTIVE_ATTEMPTS 100
#ifdef RCU_MEMBARRIER
static int init_done;
void __attribute__((destructor)) rcu_exit(void);
#endif
-static pthread_mutex_t rcu_mutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
-int gp_futex;
+int32_t gp_futex;
/*
* Global grace period counter.
* Also has a RCU_GP_COUNT of 1, to accelerate the reader fast path.
* Written to only by writer with mutex taken. Read by both writer and readers.
*/
-long rcu_gp_ctr = RCU_GP_COUNT;
+unsigned long rcu_gp_ctr = RCU_GP_COUNT;
/*
* Written to only by each individual reader. Read by both the reader and the
unsigned int __thread rand_yield;
#endif
-static LIST_HEAD(registry);
+static CDS_LIST_HEAD(registry);
-static void internal_rcu_lock(void)
+static void mutex_lock(pthread_mutex_t *mutex)
{
int ret;
#ifndef DISTRUST_SIGNALS_EXTREME
- ret = pthread_mutex_lock(&rcu_mutex);
+ ret = pthread_mutex_lock(mutex);
if (ret) {
perror("Error in pthread mutex lock");
exit(-1);
}
#else /* #ifndef DISTRUST_SIGNALS_EXTREME */
- while ((ret = pthread_mutex_trylock(&rcu_mutex)) != 0) {
+ while ((ret = pthread_mutex_trylock(mutex)) != 0) {
if (ret != EBUSY && ret != EINTR) {
printf("ret = %d, errno = %d\n", ret, errno);
perror("Error in pthread mutex lock");
exit(-1);
}
- if (rcu_reader.need_mb) {
- smp_mb();
- rcu_reader.need_mb = 0;
- smp_mb();
+ if (CMM_LOAD_SHARED(rcu_reader.need_mb)) {
+ cmm_smp_mb();
+ _CMM_STORE_SHARED(rcu_reader.need_mb, 0);
+ cmm_smp_mb();
}
poll(NULL,0,10);
}
#endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
}
-static void internal_rcu_unlock(void)
+static void mutex_unlock(pthread_mutex_t *mutex)
{
int ret;
- ret = pthread_mutex_unlock(&rcu_mutex);
+ ret = pthread_mutex_unlock(mutex);
if (ret) {
perror("Error in pthread mutex unlock");
exit(-1);
}
}
-/*
- * called with rcu_mutex held.
- */
-static void switch_next_rcu_qparity(void)
-{
- STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE);
-}
-
#ifdef RCU_MEMBARRIER
-static void smp_mb_heavy(void)
+static void smp_mb_master(int group)
{
- if (likely(has_sys_membarrier))
+ if (caa_likely(has_sys_membarrier))
membarrier(MEMBARRIER_EXPEDITED);
else
- smp_mb();
+ cmm_smp_mb();
}
#endif
#ifdef RCU_MB
-static void smp_mb_heavy(void)
+static void smp_mb_master(int group)
{
- smp_mb();
+ cmm_smp_mb();
}
#endif
struct rcu_reader *index;
/*
- * Ask for each threads to execute a smp_mb() so we can consider the
+ * Ask for each threads to execute a cmm_smp_mb() so we can consider the
* compiler barriers around rcu read lock as real memory barriers.
*/
- if (list_empty(®istry))
+ if (cds_list_empty(®istry))
return;
/*
- * pthread_kill has a smp_mb(). But beware, we assume it performs
+ * pthread_kill has a cmm_smp_mb(). But beware, we assume it performs
* a cache flush on architectures with non-coherent cache. Let's play
- * safe and don't assume anything : we use smp_mc() to make sure the
+ * safe and don't assume anything : we use cmm_smp_mc() to make sure the
* cache flush is enforced.
*/
- list_for_each_entry(index, ®istry, head) {
- index->need_mb = 1;
- smp_mc(); /* write need_mb before sending the signal */
+ cds_list_for_each_entry(index, ®istry, node) {
+ CMM_STORE_SHARED(index->need_mb, 1);
pthread_kill(index->tid, SIGRCU);
}
/*
* relevant bug report. For Linux kernels, we recommend getting
* the Linux Test Project (LTP).
*/
- list_for_each_entry(index, ®istry, head) {
- while (index->need_mb) {
+ cds_list_for_each_entry(index, ®istry, node) {
+ while (CMM_LOAD_SHARED(index->need_mb)) {
pthread_kill(index->tid, SIGRCU);
poll(NULL, 0, 1);
}
}
- smp_mb(); /* read ->need_mb before ending the barrier */
+ cmm_smp_mb(); /* read ->need_mb before ending the barrier */
}
-static void smp_mb_heavy(void)
+static void smp_mb_master(int group)
{
force_mb_all_readers();
}
static void wait_gp(void)
{
/* Read reader_gp before read futex */
- smp_mb_heavy();
+ smp_mb_master(RCU_MB_GROUP);
if (uatomic_read(&gp_futex) == -1)
futex_async(&gp_futex, FUTEX_WAIT, -1,
NULL, NULL, 0);
}
-void wait_for_quiescent_state(void)
+void update_counter_and_wait(void)
{
- LIST_HEAD(qsreaders);
+ CDS_LIST_HEAD(qsreaders);
int wait_loops = 0;
struct rcu_reader *index, *tmp;
- if (list_empty(®istry))
- return;
+ /* Switch parity: 0 -> 1, 1 -> 0 */
+ CMM_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE);
+
+ /*
+ * Must commit rcu_gp_ctr update to memory before waiting for quiescent
+ * state. Failure to do so could result in the writer waiting forever
+ * while new readers are always accessing data (no progress). Enforce
+ * compiler-order of store to rcu_gp_ctr before load rcu_reader ctr.
+ */
+ cmm_barrier();
+
+ /*
+ *
+ * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
+ * model easier to understand. It does not have a big performance impact
+ * anyway, given this is the write-side.
+ */
+ cmm_smp_mb();
+
/*
* Wait for each thread rcu_reader.ctr count to become 0.
*/
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
uatomic_dec(&gp_futex);
/* Write futex before read reader_gp */
- smp_mb_heavy();
+ smp_mb_master(RCU_MB_GROUP);
}
- list_for_each_entry_safe(index, tmp, ®istry, head) {
- if (!rcu_old_gp_ongoing(&index->ctr))
- list_move(&index->head, &qsreaders);
+ cds_list_for_each_entry_safe(index, tmp, ®istry, node) {
+ if (!rcu_gp_ongoing(&index->ctr))
+ cds_list_move(&index->node, &qsreaders);
}
#ifndef HAS_INCOHERENT_CACHES
- if (list_empty(®istry)) {
+ if (cds_list_empty(®istry)) {
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
/* Read reader_gp before write futex */
- smp_mb_heavy();
+ smp_mb_master(RCU_MB_GROUP);
uatomic_set(&gp_futex, 0);
}
break;
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS)
wait_gp();
else
- cpu_relax();
+ caa_cpu_relax();
}
#else /* #ifndef HAS_INCOHERENT_CACHES */
/*
* BUSY-LOOP. Force the reader thread to commit its
* rcu_reader.ctr update to memory if we wait for too long.
*/
- if (list_empty(®istry)) {
+ if (cds_list_empty(®istry)) {
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
/* Read reader_gp before write futex */
- smp_mb_heavy();
+ smp_mb_master(RCU_MB_GROUP);
uatomic_set(&gp_futex, 0);
}
break;
wait_gp();
break; /* only escape switch */
case KICK_READER_LOOPS:
- smp_mb_heavy();
+ smp_mb_master(RCU_MB_GROUP);
wait_loops = 0;
break; /* only escape switch */
default:
- cpu_relax();
+ caa_cpu_relax();
}
}
#endif /* #else #ifndef HAS_INCOHERENT_CACHES */
}
/* put back the reader list in the registry */
- list_splice(&qsreaders, ®istry);
+ cds_list_splice(&qsreaders, ®istry);
}
void synchronize_rcu(void)
{
- internal_rcu_lock();
+ mutex_lock(&rcu_gp_lock);
+
+ if (cds_list_empty(®istry))
+ goto out;
/* All threads should read qparity before accessing data structure
- * where new ptr points to. Must be done within internal_rcu_lock
- * because it iterates on reader threads.*/
+ * where new ptr points to. Must be done within rcu_gp_lock because it
+ * iterates on reader threads.*/
/* Write new ptr before changing the qparity */
- smp_mb_heavy();
-
- switch_next_rcu_qparity(); /* 0 -> 1 */
-
- /*
- * Must commit qparity update to memory before waiting for parity
- * 0 quiescent state. Failure to do so could result in the writer
- * waiting forever while new readers are always accessing data (no
- * progress).
- * Ensured by STORE_SHARED and LOAD_SHARED.
- */
-
- /*
- * Adding a smp_mb() which is _not_ formally required, but makes the
- * model easier to understand. It does not have a big performance impact
- * anyway, given this is the write-side.
- */
- smp_mb();
+ smp_mb_master(RCU_MB_GROUP);
/*
* Wait for previous parity to be empty of readers.
*/
- wait_for_quiescent_state(); /* Wait readers in parity 0 */
+ update_counter_and_wait(); /* 0 -> 1, wait readers in parity 0 */
/*
* Must finish waiting for quiescent state for parity 0 before
- * committing qparity update to memory. Failure to do so could result in
- * the writer waiting forever while new readers are always accessing
- * data (no progress).
- * Ensured by STORE_SHARED and LOAD_SHARED.
+ * committing next rcu_gp_ctr update to memory. Failure to do so could
+ * result in the writer waiting forever while new readers are always
+ * accessing data (no progress). Enforce compiler-order of load
+ * rcu_reader ctr before store to rcu_gp_ctr.
*/
+ cmm_barrier();
/*
- * Adding a smp_mb() which is _not_ formally required, but makes the
+ * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
* model easier to understand. It does not have a big performance impact
* anyway, given this is the write-side.
*/
- smp_mb();
-
- switch_next_rcu_qparity(); /* 1 -> 0 */
-
- /*
- * Must commit qparity update to memory before waiting for parity
- * 1 quiescent state. Failure to do so could result in the writer
- * waiting forever while new readers are always accessing data (no
- * progress).
- * Ensured by STORE_SHARED and LOAD_SHARED.
- */
-
- /*
- * Adding a smp_mb() which is _not_ formally required, but makes the
- * model easier to understand. It does not have a big performance impact
- * anyway, given this is the write-side.
- */
- smp_mb();
+ cmm_smp_mb();
/*
* Wait for previous parity to be empty of readers.
*/
- wait_for_quiescent_state(); /* Wait readers in parity 1 */
+ update_counter_and_wait(); /* 1 -> 0, wait readers in parity 1 */
/* Finish waiting for reader threads before letting the old ptr being
- * freed. Must be done within internal_rcu_lock because it iterates on
- * reader threads. */
- smp_mb_heavy();
-
- internal_rcu_unlock();
+ * freed. Must be done within rcu_gp_lock because it iterates on reader
+ * threads. */
+ smp_mb_master(RCU_MB_GROUP);
+out:
+ mutex_unlock(&rcu_gp_lock);
}
/*
{
rcu_reader.tid = pthread_self();
assert(rcu_reader.need_mb == 0);
- assert(rcu_reader.ctr == 0);
+ assert(!(rcu_reader.ctr & RCU_GP_CTR_NEST_MASK));
- internal_rcu_lock();
+ mutex_lock(&rcu_gp_lock);
rcu_init(); /* In case gcc does not support constructor attribute */
- list_add(&rcu_reader.head, ®istry);
- internal_rcu_unlock();
+ cds_list_add(&rcu_reader.node, ®istry);
+ mutex_unlock(&rcu_gp_lock);
}
void rcu_unregister_thread(void)
{
- internal_rcu_lock();
- list_del(&rcu_reader.head);
- internal_rcu_unlock();
+ mutex_lock(&rcu_gp_lock);
+ cds_list_del(&rcu_reader.node);
+ mutex_unlock(&rcu_gp_lock);
}
#ifdef RCU_MEMBARRIER
if (init_done)
return;
init_done = 1;
- if (!membarrier(MEMBARRIER_EXPEDITED))
+ if (!membarrier(MEMBARRIER_EXPEDITED | MEMBARRIER_QUERY))
has_sys_membarrier = 1;
}
#endif
static void sigrcu_handler(int signo, siginfo_t *siginfo, void *context)
{
/*
- * Executing this smp_mb() is the only purpose of this signal handler.
- * It punctually promotes barrier() into smp_mb() on every thread it is
+ * Executing this cmm_smp_mb() is the only purpose of this signal handler.
+ * It punctually promotes cmm_barrier() into cmm_smp_mb() on every thread it is
* executed on.
*/
- smp_mb();
- rcu_reader.need_mb = 0;
- smp_mb();
+ cmm_smp_mb();
+ _CMM_STORE_SHARED(rcu_reader.need_mb, 0);
+ cmm_smp_mb();
}
/*
* rcu_init constructor. Called when the library is linked, but also when
* reader threads are calling rcu_register_thread().
* Should only be called by a single thread at a given time. This is ensured by
- * holing the internal_rcu_lock() from rcu_register_thread() or by running at
- * library load time, which should not be executed by multiple threads nor
- * concurrently with rcu_register_thread() anyway.
+ * holing the rcu_gp_lock from rcu_register_thread() or by running at library
+ * load time, which should not be executed by multiple threads nor concurrently
+ * with rcu_register_thread() anyway.
*/
void rcu_init(void)
{
exit(-1);
}
assert(act.sa_sigaction == sigrcu_handler);
- assert(list_empty(®istry));
+ assert(cds_list_empty(®istry));
}
+
#endif /* #ifdef RCU_SIGNAL */
+
+DEFINE_RCU_FLAVOR()
+
+#include "urcu-call-rcu-impl.h"
+#include "urcu-defer-impl.h"