pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER;
-/* Global grace period counter */
-int urcu_gp_ctr;
+/*
+ * Global grace period counter.
+ * Contains the current RCU_GP_CTR_BIT.
+ * Also has a RCU_GP_CTR_BIT of 1, to accelerate the reader fast path.
+ */
+long urcu_gp_ctr = RCU_GP_COUNT;
-int __thread urcu_active_readers;
+long __thread urcu_active_readers;
/* Thread IDs of registered readers */
#define INIT_NUM_THREADS 4
struct reader_data {
pthread_t tid;
- int *urcu_active_readers;
+ long *urcu_active_readers;
};
#ifdef DEBUG_YIELD
static struct reader_data *reader_data;
static int num_readers, alloc_readers;
+#ifndef DEBUG_FULL_MB
static int sig_done;
+#endif
void internal_urcu_lock(void)
{
urcu_gp_ctr ^= RCU_GP_CTR_BIT;
}
+#ifdef DEBUG_FULL_MB
+static void force_mb_all_threads(void)
+{
+ smp_mb();
+}
+#else
static void force_mb_all_threads(void)
{
struct reader_data *index;
/*
- * Ask for each threads to execute a mb() so we can consider the
+ * Ask for each threads to execute a smp_mb() so we can consider the
* compiler barriers around rcu read lock as real memory barriers.
*/
if (!reader_data)
debug_yield_write();
sig_done = 0;
debug_yield_write();
- mb(); /* write sig_done before sending the signals */
+ smp_mb(); /* write sig_done before sending the signals */
debug_yield_write();
for (index = reader_data; index < reader_data + num_readers; index++) {
pthread_kill(index->tid, SIGURCU);
while (sig_done < num_readers)
barrier();
debug_yield_write();
- mb(); /* read sig_done before ending the barrier */
+ smp_mb(); /* read sig_done before ending the barrier */
debug_yield_write();
}
+#endif
void wait_for_quiescent_state(void)
{
while (rcu_old_gp_ongoing(index->urcu_active_readers))
barrier();
}
- /*
- * Locally : read *index->urcu_active_readers before freeing old
- * pointer.
- * Remote (reader threads) : Order urcu_qparity update and other
- * thread's quiescent state counter read.
- */
- force_mb_all_threads();
}
-static void switch_qparity(void)
+void synchronize_rcu(void)
{
- /* All threads should read qparity before accessing data structure. */
- /* Write ptr before changing the qparity */
+ /* All threads should read qparity before accessing data structure
+ * where new ptr points to. */
+ /* Write new ptr before changing the qparity */
force_mb_all_threads();
debug_yield_write();
- switch_next_urcu_qparity();
+
+ internal_urcu_lock();
+ debug_yield_write();
+
+ switch_next_urcu_qparity(); /* 0 -> 1 */
debug_yield_write();
/*
- * Wait for previous parity to be empty of readers.
+ * Must commit qparity update to memory before waiting for parity
+ * 0 quiescent state. Failure to do so could result in the writer
+ * waiting forever while new readers are always accessing data (no
+ * progress).
*/
- wait_for_quiescent_state();
-}
+ smp_mb();
-void synchronize_rcu(void)
-{
- debug_yield_write();
- internal_urcu_lock();
+ /*
+ * Wait for previous parity to be empty of readers.
+ */
+ wait_for_quiescent_state(); /* Wait readers in parity 0 */
debug_yield_write();
- switch_qparity();
+
+ /*
+ * Must finish waiting for quiescent state for parity 0 before
+ * committing qparity update to memory. Failure to do so could result in
+ * the writer waiting forever while new readers are always accessing
+ * data (no progress).
+ */
+ smp_mb();
+
+ switch_next_urcu_qparity(); /* 1 -> 0 */
debug_yield_write();
- switch_qparity();
+
+ /*
+ * Must commit qparity update to memory before waiting for parity
+ * 1 quiescent state. Failure to do so could result in the writer
+ * waiting forever while new readers are always accessing data (no
+ * progress).
+ */
+ smp_mb();
+
+ /*
+ * Wait for previous parity to be empty of readers.
+ */
+ wait_for_quiescent_state(); /* Wait readers in parity 1 */
debug_yield_write();
+
internal_urcu_unlock();
debug_yield_write();
+
+ /* All threads should finish using the data referred to by old ptr
+ * before decrementing their urcu_active_readers count */
+ /* Finish waiting for reader threads before letting the old ptr being
+ * freed. */
+ force_mb_all_threads();
+ debug_yield_write();
}
void urcu_add_reader(pthread_t id)
internal_urcu_unlock();
}
+#ifndef DEBUG_FULL_MB
void sigurcu_handler(int signo, siginfo_t *siginfo, void *context)
{
- mb();
+ smp_mb();
atomic_inc(&sig_done);
}
assert(act.sa_sigaction == sigurcu_handler);
free(reader_data);
}
+#endif