}
}
+/*
+ * synchronize_rcu() waiting. Single thread.
+ */
static void wait_for_quiescent_state(void)
{
struct reader_registry *index;
* Wait for each thread rcu_reader_qs_gp count to become 0.
*/
for (index = registry; index < registry + num_readers; index++) {
+ int wait_loops = 0;
+
+ while (rcu_gp_ongoing(index->rcu_reader_qs_gp)) {
+ if (wait_loops++ == RCU_QS_ACTIVE_ATTEMPTS) {
+ sched_yield(); /* ideally sched_yield_to() */
+ } else {
#ifndef HAS_INCOHERENT_CACHES
- while (rcu_gp_ongoing(index->rcu_reader_qs_gp))
- cpu_relax();
+ cpu_relax();
#else /* #ifndef HAS_INCOHERENT_CACHES */
- while (rcu_gp_ongoing(index->rcu_reader_qs_gp))
- smp_mb();
+ smp_mb();
#endif /* #else #ifndef HAS_INCOHERENT_CACHES */
+ }
+ }
}
}
internal_urcu_lock();
+ STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr ^ RCU_GP_ONGOING);
+
switch_next_urcu_qparity(); /* 0 -> 1 */
/*
*/
wait_for_quiescent_state(); /* Wait readers in parity 1 */
+ STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr ^ RCU_GP_ONGOING);
+
internal_urcu_unlock();
/*
STORE_SHARED(rcu_reader_qs_gp, 0);
internal_urcu_lock();
+ STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr ^ RCU_GP_ONGOING);
STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr + RCU_GP_CTR);
wait_for_quiescent_state();
+ STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr ^ RCU_GP_ONGOING);
internal_urcu_unlock();
if (was_online)
return STORE_SHARED(p, v);
}
+void *rcu_cmpxchg_pointer_sym(void **p, void *old, void *_new)
+{
+ wmb();
+ return cmpxchg(p, old, _new);
+}
+
void *rcu_xchg_pointer_sym(void **p, void *v)
{
wmb();