#define BUILD_QSBR_LIB
#include "urcu/static/urcu-qsbr.h"
#include "urcu-pointer.h"
+#include "urcu/tls-compat.h"
+
+#include "urcu-die.h"
/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
#undef _LGPL_SOURCE
void __attribute__((destructor)) rcu_exit(void);
+/*
+ * rcu_gp_lock ensures mutual exclusion between threads calling
+ * synchronize_rcu().
+ */
static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
+/*
+ * rcu_registry_lock ensures mutual exclusion between threads
+ * registering and unregistering themselves to/from the registry, and
+ * with threads reading that registry from synchronize_rcu(). However,
+ * this lock is not held all the way through the completion of awaiting
+ * for the grace period. It is sporadically released between iterations
+ * on the registry.
+ * rcu_registry_lock may nest inside rcu_gp_lock.
+ */
+static pthread_mutex_t rcu_registry_lock = PTHREAD_MUTEX_INITIALIZER;
int32_t gp_futex;
* Written to only by each individual reader. Read by both the reader and the
* writers.
*/
-struct rcu_reader __thread rcu_reader;
+__DEFINE_URCU_TLS_GLOBAL(struct rcu_reader, rcu_reader);
#ifdef DEBUG_YIELD
unsigned int yield_active;
-unsigned int __thread rand_yield;
+__DEFINE_URCU_TLS_GLOBAL(unsigned int, rand_yield);
#endif
static CDS_LIST_HEAD(registry);
#ifndef DISTRUST_SIGNALS_EXTREME
ret = pthread_mutex_lock(mutex);
- if (ret) {
- perror("Error in pthread mutex lock");
- exit(-1);
- }
+ if (ret)
+ urcu_die(ret);
#else /* #ifndef DISTRUST_SIGNALS_EXTREME */
while ((ret = pthread_mutex_trylock(mutex)) != 0) {
- if (ret != EBUSY && ret != EINTR) {
- printf("ret = %d, errno = %d\n", ret, errno);
- perror("Error in pthread mutex lock");
- exit(-1);
- }
+ if (ret != EBUSY && ret != EINTR)
+ urcu_die(ret);
poll(NULL,0,10);
}
#endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
int ret;
ret = pthread_mutex_unlock(mutex);
- if (ret) {
- perror("Error in pthread mutex unlock");
- exit(-1);
- }
+ if (ret)
+ urcu_die(ret);
}
/*
NULL, NULL, 0);
}
+/*
+ * Always called with rcu_registry lock held. Releases this lock between
+ * iterations and grabs it again. Holds the lock when it returns.
+ */
static void update_counter_and_wait(void)
{
CDS_LIST_HEAD(qsreaders);
- int wait_loops = 0;
+ unsigned int wait_loops = 0;
struct rcu_reader *index, *tmp;
#if (CAA_BITS_PER_LONG < 64)
* quiescent state. Failure to do so could result in the writer
* waiting forever while new readers are always accessing data
* (no progress). Enforce compiler-order of store to rcu_gp_ctr
- * before load rcu_reader ctr.
+ * before load URCU_TLS(rcu_reader).ctr.
*/
cmm_barrier();
* Wait for each thread rcu_reader_qs_gp count to become 0.
*/
for (;;) {
- wait_loops++;
+ if (wait_loops < RCU_QS_ACTIVE_ATTEMPTS)
+ wait_loops++;
if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
uatomic_set(&gp_futex, -1);
/*
}
break;
} else {
+ /* Temporarily unlock the registry lock. */
+ mutex_unlock(&rcu_registry_lock);
if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
wait_gp();
} else {
cmm_smp_mb();
#endif /* #else #ifndef HAS_INCOHERENT_CACHES */
}
+ /* Re-lock the registry lock before the next loop. */
+ mutex_lock(&rcu_registry_lock);
}
}
/* put back the reader list in the registry */
{
unsigned long was_online;
- was_online = rcu_reader.ctr;
+ was_online = URCU_TLS(rcu_reader).ctr;
/* All threads should read qparity before accessing data structure
* where new ptr points to. In the "then" case, rcu_thread_offline
cmm_smp_mb();
mutex_lock(&rcu_gp_lock);
+ mutex_lock(&rcu_registry_lock);
if (cds_list_empty(®istry))
goto out;
/*
* Wait for previous parity to be empty of readers.
+ * update_counter_and_wait() can release and grab again
+ * rcu_registry_lock interally.
*/
update_counter_and_wait(); /* 0 -> 1, wait readers in parity 0 */
* committing next rcu_gp_ctr update to memory. Failure to
* do so could result in the writer waiting forever while new
* readers are always accessing data (no progress). Enforce
- * compiler-order of load rcu_reader ctr before store to
+ * compiler-order of load URCU_TLS(rcu_reader).ctr before store to
* rcu_gp_ctr.
*/
cmm_barrier();
/*
* Wait for previous parity to be empty of readers.
+ * update_counter_and_wait() can release and grab again
+ * rcu_registry_lock interally.
*/
update_counter_and_wait(); /* 1 -> 0, wait readers in parity 1 */
out:
+ mutex_unlock(&rcu_registry_lock);
mutex_unlock(&rcu_gp_lock);
/*
{
unsigned long was_online;
- was_online = rcu_reader.ctr;
+ was_online = URCU_TLS(rcu_reader).ctr;
/*
* Mark the writer thread offline to make sure we don't wait for
cmm_smp_mb();
mutex_lock(&rcu_gp_lock);
+ mutex_lock(&rcu_registry_lock);
if (cds_list_empty(®istry))
goto out;
+ /*
+ * update_counter_and_wait() can release and grab again
+ * rcu_registry_lock interally.
+ */
update_counter_and_wait();
out:
+ mutex_unlock(&rcu_registry_lock);
mutex_unlock(&rcu_gp_lock);
if (was_online)
void rcu_register_thread(void)
{
- rcu_reader.tid = pthread_self();
- assert(rcu_reader.ctr == 0);
+ URCU_TLS(rcu_reader).tid = pthread_self();
+ assert(URCU_TLS(rcu_reader).ctr == 0);
- mutex_lock(&rcu_gp_lock);
- cds_list_add(&rcu_reader.node, ®istry);
- mutex_unlock(&rcu_gp_lock);
+ mutex_lock(&rcu_registry_lock);
+ cds_list_add(&URCU_TLS(rcu_reader).node, ®istry);
+ mutex_unlock(&rcu_registry_lock);
_rcu_thread_online();
}
* with a waiting writer.
*/
_rcu_thread_offline();
- mutex_lock(&rcu_gp_lock);
- cds_list_del(&rcu_reader.node);
- mutex_unlock(&rcu_gp_lock);
+ mutex_lock(&rcu_registry_lock);
+ cds_list_del(&URCU_TLS(rcu_reader).node);
+ mutex_unlock(&rcu_registry_lock);
}
void rcu_exit(void)