pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER;
-/* Global quiescent period parity */
-int urcu_qparity;
+/* Global grace period counter */
+int urcu_gp_ctr;
-int __thread urcu_active_readers[2];
+int __thread urcu_active_readers;
/* Thread IDs of registered readers */
#define INIT_NUM_THREADS 4
};
#ifdef DEBUG_YIELD
-int yield_active;
+unsigned int yield_active;
+unsigned int __thread rand_yield;
#endif
static struct reader_data *reader_data;
/*
* called with urcu_mutex held.
*/
-static int switch_next_urcu_qparity(void)
+static void switch_next_urcu_qparity(void)
{
- int old_parity = urcu_qparity;
- urcu_qparity = 1 - old_parity;
- return old_parity;
+ urcu_gp_ctr ^= RCU_GP_CTR_BIT;
}
static void force_mb_all_threads(void)
debug_yield_write();
}
-void wait_for_quiescent_state(int parity)
+void wait_for_quiescent_state(void)
{
struct reader_data *index;
/*
* BUSY-LOOP.
*/
- while (index->urcu_active_readers[parity] != 0)
+ while (rcu_old_gp_ongoing(index->urcu_active_readers))
barrier();
}
/*
static void switch_qparity(void)
{
- int prev_parity;
-
/* All threads should read qparity before accessing data structure. */
/* Write ptr before changing the qparity */
force_mb_all_threads();
debug_yield_write();
- prev_parity = switch_next_urcu_qparity();
+ switch_next_urcu_qparity();
debug_yield_write();
/*
* Wait for previous parity to be empty of readers.
*/
- wait_for_quiescent_state(prev_parity);
+ wait_for_quiescent_state();
}
void synchronize_rcu(void)
*/
oldptr = *ptr;
debug_yield_write();
- *ptr = new;
+ rcu_assign_pointer(*ptr, new);
debug_yield_write();
switch_qparity();
}
reader_data[num_readers].tid = id;
/* reference to the TLS of _this_ reader thread. */
- reader_data[num_readers].urcu_active_readers = urcu_active_readers;
+ reader_data[num_readers].urcu_active_readers = &urcu_active_readers;
num_readers++;
}