+ for (;;) {
+ if (wait_loops < RCU_QS_ACTIVE_ATTEMPTS)
+ wait_loops++;
+ if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
+ uatomic_set(&rcu_gp.futex, -1);
+ /*
+ * Write futex before write waiting (the other side
+ * reads them in the opposite order).
+ */
+ cmm_smp_wmb();
+ cds_list_for_each_entry(index, input_readers, node) {
+ _CMM_STORE_SHARED(index->waiting, 1);
+ }
+ /* Write futex before read reader_gp */
+ cmm_smp_mb();
+ }
+ cds_list_for_each_entry_safe(index, tmp, input_readers, node) {
+ switch (rcu_reader_state(&index->ctr)) {
+ case RCU_READER_ACTIVE_CURRENT:
+ if (cur_snap_readers) {
+ cds_list_move(&index->node,
+ cur_snap_readers);
+ break;
+ }
+ /* Fall-through */
+ case RCU_READER_INACTIVE:
+ cds_list_move(&index->node, qsreaders);
+ break;
+ case RCU_READER_ACTIVE_OLD:
+ /*
+ * Old snapshot. Leaving node in
+ * input_readers will make us busy-loop
+ * until the snapshot becomes current or
+ * the reader becomes inactive.
+ */
+ break;
+ }
+ }
+
+ if (cds_list_empty(input_readers)) {
+ if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
+ /* Read reader_gp before write futex */
+ cmm_smp_mb();
+ uatomic_set(&rcu_gp.futex, 0);
+ }
+ break;
+ } else {
+ if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
+ wait_gp();
+ } else {