+ STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr ^ RCU_GP_CTR_BIT);
+}
+
+#ifdef URCU_MB
+#if 0 /* unused */
+static void force_mb_single_thread(struct urcu_reader *index)
+{
+ smp_mb();
+}
+#endif //0
+
+static void force_mb_all_threads(void)
+{
+ smp_mb();
+}
+#else /* #ifdef URCU_MB */
+#if 0 /* unused */
+static void force_mb_single_thread(struct urcu_reader *index)
+{
+ assert(!list_empty(®istry));
+ /*
+ * pthread_kill has a smp_mb(). But beware, we assume it performs
+ * a cache flush on architectures with non-coherent cache. Let's play
+ * safe and don't assume anything : we use smp_mc() to make sure the
+ * cache flush is enforced.
+ */
+ index->need_mb = 1;
+ smp_mc(); /* write ->need_mb before sending the signals */
+ pthread_kill(index->tid, SIGURCU);
+ smp_mb();
+ /*
+ * Wait for sighandler (and thus mb()) to execute on every thread.
+ * BUSY-LOOP.
+ */
+ while (index->need_mb) {
+ poll(NULL, 0, 1);
+ }
+ smp_mb(); /* read ->need_mb before ending the barrier */