*
* Userspace RCU QSBR library
*
- * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
* Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
*
* This library is free software; you can redistribute it and/or
* IBM's contributions to this file may be relicensed under LGPLv2 or later.
*/
+#define _GNU_SOURCE
#include <stdio.h>
#include <pthread.h>
#include <signal.h>
#include <errno.h>
#include <poll.h>
+#include "urcu-qsbr-map.h"
+
#define BUILD_QSBR_LIB
#include "urcu-qsbr-static.h"
/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
unsigned int __thread rand_yield;
#endif
-static LIST_HEAD(registry);
+static CDS_LIST_HEAD(registry);
static void mutex_lock(pthread_mutex_t *mutex)
{
static void wait_gp(void)
{
/* Read reader_gp before read futex */
- smp_rmb();
+ cmm_smp_rmb();
if (uatomic_read(&gp_futex) == -1)
futex_noasync(&gp_futex, FUTEX_WAIT, -1,
NULL, NULL, 0);
}
-static void wait_for_quiescent_state(void)
+static void update_counter_and_wait(void)
{
- LIST_HEAD(qsreaders);
+ CDS_LIST_HEAD(qsreaders);
int wait_loops = 0;
struct rcu_reader *index, *tmp;
- if (list_empty(®istry))
- return;
+#if (CAA_BITS_PER_LONG < 64)
+ /* Switch parity: 0 -> 1, 1 -> 0 */
+ CMM_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
+#else /* !(CAA_BITS_PER_LONG < 64) */
+ /* Increment current G.P. */
+ CMM_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR);
+#endif /* !(CAA_BITS_PER_LONG < 64) */
+
+ /*
+ * Must commit rcu_gp_ctr update to memory before waiting for
+ * quiescent state. Failure to do so could result in the writer
+ * waiting forever while new readers are always accessing data
+ * (no progress). Enforce compiler-order of store to rcu_gp_ctr
+ * before load rcu_reader ctr.
+ */
+ cmm_barrier();
+
+ /*
+ * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
+ * model easier to understand. It does not have a big performance impact
+ * anyway, given this is the write-side.
+ */
+ cmm_smp_mb();
+
/*
* Wait for each thread rcu_reader_qs_gp count to become 0.
*/
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
uatomic_dec(&gp_futex);
/* Write futex before read reader_gp */
- smp_mb();
+ cmm_smp_mb();
}
- list_for_each_entry_safe(index, tmp, ®istry, head) {
+ cds_list_for_each_entry_safe(index, tmp, ®istry, node) {
if (!rcu_gp_ongoing(&index->ctr))
- list_move(&index->head, &qsreaders);
+ cds_list_move(&index->node, &qsreaders);
}
- if (list_empty(®istry)) {
+ if (cds_list_empty(®istry)) {
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
/* Read reader_gp before write futex */
- smp_mb();
+ cmm_smp_mb();
uatomic_set(&gp_futex, 0);
}
break;
wait_gp();
} else {
#ifndef HAS_INCOHERENT_CACHES
- cpu_relax();
+ caa_cpu_relax();
#else /* #ifndef HAS_INCOHERENT_CACHES */
- smp_mb();
+ cmm_smp_mb();
#endif /* #else #ifndef HAS_INCOHERENT_CACHES */
}
}
}
/* put back the reader list in the registry */
- list_splice(&qsreaders, ®istry);
+ cds_list_splice(&qsreaders, ®istry);
}
/*
* long-size to ensure we do not encounter an overflow bug.
*/
-#if (BITS_PER_LONG < 64)
-/*
- * called with rcu_gp_lock held.
- */
-static void switch_next_rcu_qparity(void)
-{
- STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
-}
-
+#if (CAA_BITS_PER_LONG < 64)
void synchronize_rcu(void)
{
unsigned long was_online;
* where new ptr points to.
*/
/* Write new ptr before changing the qparity */
- smp_mb();
+ cmm_smp_mb();
/*
* Mark the writer thread offline to make sure we don't wait for
- * our own quiescent state. This allows using synchronize_rcu() in
- * threads registered as readers.
+ * our own quiescent state. This allows using synchronize_rcu()
+ * in threads registered as readers.
*/
if (was_online)
- STORE_SHARED(rcu_reader.ctr, 0);
+ CMM_STORE_SHARED(rcu_reader.ctr, 0);
mutex_lock(&rcu_gp_lock);
- switch_next_rcu_qparity(); /* 0 -> 1 */
-
- /*
- * Must commit qparity update to memory before waiting for parity
- * 0 quiescent state. Failure to do so could result in the writer
- * waiting forever while new readers are always accessing data (no
- * progress).
- * Ensured by STORE_SHARED and LOAD_SHARED.
- */
+ if (cds_list_empty(®istry))
+ goto out;
/*
* Wait for previous parity to be empty of readers.
*/
- wait_for_quiescent_state(); /* Wait readers in parity 0 */
+ update_counter_and_wait(); /* 0 -> 1, wait readers in parity 0 */
/*
* Must finish waiting for quiescent state for parity 0 before
- * committing qparity update to memory. Failure to do so could result in
- * the writer waiting forever while new readers are always accessing
- * data (no progress).
- * Ensured by STORE_SHARED and LOAD_SHARED.
+ * committing next rcu_gp_ctr update to memory. Failure to
+ * do so could result in the writer waiting forever while new
+ * readers are always accessing data (no progress). Enforce
+ * compiler-order of load rcu_reader ctr before store to
+ * rcu_gp_ctr.
*/
-
- switch_next_rcu_qparity(); /* 1 -> 0 */
+ cmm_barrier();
/*
- * Must commit qparity update to memory before waiting for parity
- * 1 quiescent state. Failure to do so could result in the writer
- * waiting forever while new readers are always accessing data (no
- * progress).
- * Ensured by STORE_SHARED and LOAD_SHARED.
+ * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
+ * model easier to understand. It does not have a big performance impact
+ * anyway, given this is the write-side.
*/
+ cmm_smp_mb();
/*
* Wait for previous parity to be empty of readers.
*/
- wait_for_quiescent_state(); /* Wait readers in parity 1 */
-
+ update_counter_and_wait(); /* 1 -> 0, wait readers in parity 1 */
+out:
mutex_unlock(&rcu_gp_lock);
/*
* freed.
*/
if (was_online)
- _STORE_SHARED(rcu_reader.ctr, LOAD_SHARED(rcu_gp_ctr));
- smp_mb();
+ _CMM_STORE_SHARED(rcu_reader.ctr,
+ CMM_LOAD_SHARED(rcu_gp_ctr));
+ cmm_smp_mb();
}
-#else /* !(BITS_PER_LONG < 64) */
+#else /* !(CAA_BITS_PER_LONG < 64) */
void synchronize_rcu(void)
{
unsigned long was_online;
/*
* Mark the writer thread offline to make sure we don't wait for
- * our own quiescent state. This allows using synchronize_rcu() in
- * threads registered as readers.
+ * our own quiescent state. This allows using synchronize_rcu()
+ * in threads registered as readers.
*/
- smp_mb();
+ cmm_smp_mb();
if (was_online)
- STORE_SHARED(rcu_reader.ctr, 0);
+ CMM_STORE_SHARED(rcu_reader.ctr, 0);
mutex_lock(&rcu_gp_lock);
- STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR);
- wait_for_quiescent_state();
+ if (cds_list_empty(®istry))
+ goto out;
+ update_counter_and_wait();
+out:
mutex_unlock(&rcu_gp_lock);
if (was_online)
- _STORE_SHARED(rcu_reader.ctr, LOAD_SHARED(rcu_gp_ctr));
- smp_mb();
+ _CMM_STORE_SHARED(rcu_reader.ctr,
+ CMM_LOAD_SHARED(rcu_gp_ctr));
+ cmm_smp_mb();
}
-#endif /* !(BITS_PER_LONG < 64) */
+#endif /* !(CAA_BITS_PER_LONG < 64) */
/*
* library wrappers to be used by non-LGPL compatible source code.
assert(rcu_reader.ctr == 0);
mutex_lock(&rcu_gp_lock);
- list_add(&rcu_reader.head, ®istry);
+ cds_list_add(&rcu_reader.node, ®istry);
mutex_unlock(&rcu_gp_lock);
_rcu_thread_online();
}
*/
_rcu_thread_offline();
mutex_lock(&rcu_gp_lock);
- list_del(&rcu_reader.head);
+ cds_list_del(&rcu_reader.node);
mutex_unlock(&rcu_gp_lock);
}
void rcu_exit(void)
{
- assert(list_empty(®istry));
+ assert(cds_list_empty(®istry));
}
+
+#include "urcu-call-rcu-impl.h"
+#include "urcu-defer-impl.h"