+ for (;;) {
+ wait_loops++;
+ if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
+ uatomic_dec(&gp_futex);
+ /* Write futex before read reader_gp */
+ force_mb_all_threads();
+ }
+
+ list_for_each_entry_safe(index, tmp, ®istry, head) {
+ if (!rcu_old_gp_ongoing(&index->ctr))
+ list_move(&index->head, &qsreaders);
+ }
+
+#ifndef HAS_INCOHERENT_CACHES
+ if (list_empty(®istry)) {
+ if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
+ /* Read reader_gp before write futex */
+ force_mb_all_threads();
+ uatomic_set(&gp_futex, 0);
+ }
+ break;
+ } else {
+ if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS)
+ wait_gp();
+ else
+ cpu_relax();
+ }
+#else /* #ifndef HAS_INCOHERENT_CACHES */
+ /*
+ * BUSY-LOOP. Force the reader thread to commit its
+ * rcu_reader.ctr update to memory if we wait for too long.
+ */
+ if (list_empty(®istry)) {
+ if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
+ /* Read reader_gp before write futex */
+ force_mb_all_threads();
+ uatomic_set(&gp_futex, 0);
+ }
+ break;
+ } else {
+ switch (wait_loops) {
+ case RCU_QS_ACTIVE_ATTEMPTS:
+ wait_gp();
+ break; /* only escape switch */
+ case KICK_READER_LOOPS:
+ force_mb_all_threads();
+ wait_loops = 0;
+ break; /* only escape switch */
+ default:
+ cpu_relax();
+ }
+ }
+#endif /* #else #ifndef HAS_INCOHERENT_CACHES */
+ }
+ /* put back the reader list in the registry */
+ list_splice(&qsreaders, ®istry);
+}
+
+void synchronize_rcu(void)
+{
+ internal_rcu_lock();
+
+ /* All threads should read qparity before accessing data structure
+ * where new ptr points to. Must be done within internal_rcu_lock
+ * because it iterates on reader threads.*/
+ /* Write new ptr before changing the qparity */