pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER;
-/* Global quiescent period parity */
-int urcu_qparity;
+/*
+ * Global grace period counter.
+ * Contains the current RCU_GP_CTR_BIT.
+ * Also has a RCU_GP_CTR_BIT of 1, to accelerate the reader fast path.
+ * Written to only by writer with mutex taken. Read by both writer and readers.
+ */
+long urcu_gp_ctr = RCU_GP_COUNT;
-int __thread urcu_active_readers[2];
+/*
+ * Written to only by each individual reader. Read by both the reader and the
+ * writers.
+ */
+long __thread urcu_active_readers;
/* Thread IDs of registered readers */
#define INIT_NUM_THREADS 4
struct reader_data {
pthread_t tid;
- int *urcu_active_readers;
+ long *urcu_active_readers;
};
+#ifdef DEBUG_YIELD
+unsigned int yield_active;
+unsigned int __thread rand_yield;
+#endif
+
static struct reader_data *reader_data;
static int num_readers, alloc_readers;
+#ifndef DEBUG_FULL_MB
static int sig_done;
+#endif
-void rcu_write_lock(void)
+void internal_urcu_lock(void)
{
int ret;
ret = pthread_mutex_lock(&urcu_mutex);
}
}
-void rcu_write_unlock(void)
+void internal_urcu_unlock(void)
{
int ret;
/*
* called with urcu_mutex held.
*/
-static int switch_next_urcu_qparity(void)
+static void switch_next_urcu_qparity(void)
{
- int old_parity = urcu_qparity;
- urcu_qparity = 1 - old_parity;
- return old_parity;
+ STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr ^ RCU_GP_CTR_BIT);
+}
+
+#ifdef DEBUG_FULL_MB
+static void force_mb_single_thread(pthread_t tid)
+{
+ smp_mb();
+}
+
+static void force_mb_all_threads(void)
+{
+ smp_mb();
+}
+#else
+
+static void force_mb_single_thread(pthread_t tid)
+{
+ assert(reader_data);
+ sig_done = 0;
+ /*
+ * pthread_kill has a smp_mb(). But beware, we assume it performs
+ * a cache flush on architectures with non-coherent cache. Let's play
+ * safe and don't assume anything : we use smp_mc() to make sure the
+ * cache flush is enforced.
+ * smp_mb(); write sig_done before sending the signals
+ */
+ smp_mc(); /* write sig_done before sending the signals */
+ pthread_kill(tid, SIGURCU);
+ /*
+ * Wait for sighandler (and thus mb()) to execute on every thread.
+ * BUSY-LOOP.
+ */
+ while (LOAD_SHARED(sig_done) < 1)
+ cpu_relax();
+ smp_mb(); /* read sig_done before ending the barrier */
}
static void force_mb_all_threads(void)
{
struct reader_data *index;
/*
- * Ask for each threads to execute a mb() so we can consider the
+ * Ask for each threads to execute a smp_mb() so we can consider the
* compiler barriers around rcu read lock as real memory barriers.
*/
if (!reader_data)
return;
sig_done = 0;
- mb(); /* write sig_done before sending the signals */
+ /*
+ * pthread_kill has a smp_mb(). But beware, we assume it performs
+ * a cache flush on architectures with non-coherent cache. Let's play
+ * safe and don't assume anything : we use smp_mc() to make sure the
+ * cache flush is enforced.
+ * smp_mb(); write sig_done before sending the signals
+ */
+ smp_mc(); /* write sig_done before sending the signals */
for (index = reader_data; index < reader_data + num_readers; index++)
pthread_kill(index->tid, SIGURCU);
/*
* Wait for sighandler (and thus mb()) to execute on every thread.
* BUSY-LOOP.
*/
- while (sig_done < num_readers)
- barrier();
- mb(); /* read sig_done before ending the barrier */
+ while (LOAD_SHARED(sig_done) < num_readers)
+ cpu_relax();
+ smp_mb(); /* read sig_done before ending the barrier */
}
+#endif
-void wait_for_quiescent_state(int parity)
+void wait_for_quiescent_state(void)
{
struct reader_data *index;
if (!reader_data)
return;
- /* Wait for each thread urcu_active_readers count to become 0.
+ /*
+ * Wait for each thread urcu_active_readers count to become 0.
*/
for (index = reader_data; index < reader_data + num_readers; index++) {
+ int wait_loops = 0;
/*
- * BUSY-LOOP.
+ * BUSY-LOOP. Force the reader thread to commit its
+ * urcu_active_readers update to memory if we wait for too long.
*/
- while (index->urcu_active_readers[parity] != 0)
- barrier();
+ while (rcu_old_gp_ongoing(index->urcu_active_readers)) {
+ if (wait_loops++ == KICK_READER_LOOPS) {
+ force_mb_single_thread(index->tid);
+ wait_loops = 0;
+ } else {
+ cpu_relax();
+ }
+ }
}
- /*
- * Locally : read *index->urcu_active_readers before freeing old
- * pointer.
- * Remote (reader threads) : Order urcu_qparity update and other
- * thread's quiescent state counter read.
- */
- force_mb_all_threads();
}
-static void switch_qparity(void)
+void synchronize_rcu(void)
{
- int prev_parity;
+ internal_urcu_lock();
- /* All threads should read qparity before accessing data structure. */
- /* Write ptr before changing the qparity */
+ /* All threads should read qparity before accessing data structure
+ * where new ptr points to. Must be done within internal_urcu_lock
+ * because it iterates on reader threads.*/
+ /* Write new ptr before changing the qparity */
force_mb_all_threads();
- prev_parity = switch_next_urcu_qparity();
+
+ switch_next_urcu_qparity(); /* 0 -> 1 */
+
+ /*
+ * Must commit qparity update to memory before waiting for parity
+ * 0 quiescent state. Failure to do so could result in the writer
+ * waiting forever while new readers are always accessing data (no
+ * progress).
+ * Ensured by STORE_SHARED and LOAD_SHARED.
+ */
/*
* Wait for previous parity to be empty of readers.
*/
- wait_for_quiescent_state(prev_parity);
-}
+ wait_for_quiescent_state(); /* Wait readers in parity 0 */
-void synchronize_rcu(void)
-{
- rcu_write_lock();
- switch_qparity();
- switch_qparity();
- rcu_write_unlock();
-}
+ /*
+ * Must finish waiting for quiescent state for parity 0 before
+ * committing qparity update to memory. Failure to do so could result in
+ * the writer waiting forever while new readers are always accessing
+ * data (no progress).
+ * Ensured by STORE_SHARED and LOAD_SHARED.
+ */
-/*
- * Return old pointer, OK to free, no more reference exist.
- * Called under rcu_write_lock.
- */
-void *urcu_publish_content(void **ptr, void *new)
-{
- void *oldptr;
+ switch_next_urcu_qparity(); /* 1 -> 0 */
+
+ /*
+ * Must commit qparity update to memory before waiting for parity
+ * 1 quiescent state. Failure to do so could result in the writer
+ * waiting forever while new readers are always accessing data (no
+ * progress).
+ * Ensured by STORE_SHARED and LOAD_SHARED.
+ */
/*
- * We can publish the new pointer before we change the current qparity.
- * Readers seeing the new pointer while being in the previous qparity
- * window will make us wait until the end of the quiescent state before
- * we release the unrelated memory area. However, given we hold the
- * urcu_mutex, we are making sure that no further garbage collection can
- * occur until we release the mutex, therefore we guarantee that this
- * given reader will have completed its execution using the new pointer
- * when the next quiescent state window will be over.
+ * Wait for previous parity to be empty of readers.
*/
- oldptr = *ptr;
- *ptr = new;
+ wait_for_quiescent_state(); /* Wait readers in parity 1 */
- switch_qparity();
- switch_qparity();
+ /* Finish waiting for reader threads before letting the old ptr being
+ * freed. Must be done within internal_urcu_lock because it iterates on
+ * reader threads. */
+ force_mb_all_threads();
- return oldptr;
+ internal_urcu_unlock();
}
void urcu_add_reader(pthread_t id)
}
reader_data[num_readers].tid = id;
/* reference to the TLS of _this_ reader thread. */
- reader_data[num_readers].urcu_active_readers = urcu_active_readers;
+ reader_data[num_readers].urcu_active_readers = &urcu_active_readers;
num_readers++;
}
void urcu_register_thread(void)
{
- rcu_write_lock();
+ internal_urcu_lock();
urcu_add_reader(pthread_self());
- rcu_write_unlock();
+ internal_urcu_unlock();
}
void urcu_unregister_thread(void)
{
- rcu_write_lock();
+ internal_urcu_lock();
urcu_remove_reader(pthread_self());
- rcu_write_unlock();
+ internal_urcu_unlock();
}
+#ifndef DEBUG_FULL_MB
void sigurcu_handler(int signo, siginfo_t *siginfo, void *context)
{
- mb();
+ /*
+ * Executing this smp_mb() is the only purpose of this signal handler.
+ * It punctually promotes barrier() into smp_mb() on every thread it is
+ * executed on.
+ */
+ smp_mb();
atomic_inc(&sig_done);
}
assert(act.sa_sigaction == sigurcu_handler);
free(reader_data);
}
+#endif