* CPUs rather than only to specific threads.
*/
-#if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
+#ifdef HAVE_SCHED_GETCPU
+
+static int urcu_sched_getcpu(void)
+{
+ return sched_getcpu();
+}
+
+#else /* #ifdef HAVE_SCHED_GETCPU */
+
+static int urcu_sched_getcpu(void)
+{
+ return -1;
+}
+
+#endif /* #else #ifdef HAVE_SCHED_GETCPU */
+
+#if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU)
/*
* Pointer to array of pointers to per-CPU call_rcu_data structures
}
}
-#else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
+#else /* #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) */
/*
* per_cpu_call_rcu_data should be constant, but some functions below, used both
{
}
-static int sched_getcpu(void)
-{
- return -1;
-}
-
-#endif /* #else #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
+#endif /* #else #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) */
/* Acquire the specified pthread mutex. */
uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSED);
while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) != 0)
poll(NULL, 0, 1);
+ uatomic_and(&crdp->flags, ~URCU_CALL_RCU_PAUSED);
+ cmm_smp_mb__after_uatomic_and();
rcu_register_thread();
}
* Return a pointer to the call_rcu_data structure for the specified
* CPU, returning NULL if there is none. We cannot automatically
* created it because the platform we are running on might not define
- * sched_getcpu().
+ * urcu_sched_getcpu().
*
* The call to this function and use of the returned call_rcu_data
* should be protected by RCU read-side lock.
return URCU_TLS(thread_call_rcu_data);
if (maxcpus > 0) {
- crd = get_cpu_call_rcu_data(sched_getcpu());
+ crd = get_cpu_call_rcu_data(urcu_sched_getcpu());
if (crd)
return crd;
}
cds_list_for_each_entry(crdp, &call_rcu_data_list, list)
uatomic_and(&crdp->flags, ~URCU_CALL_RCU_PAUSE);
+ cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
+ while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSED) != 0)
+ poll(NULL, 0, 1);
+ }
call_rcu_unlock(&call_rcu_mutex);
}