* Inserts memory barriers on architectures that require them (currently only
* Alpha) and documents which pointers are protected by RCU.
*
+ * The compiler memory barrier in LOAD_SHARED() ensures that value-speculative
+ * optimizations (e.g. VSS: Value Speculation Scheduling) does not perform the
+ * data read before the pointer read by speculating the value of the pointer.
+ * Correct ordering is ensured because the pointer is read as a volatile access.
+ * This acts as a global side-effect operation, which forbids reordering of
+ * dependent memory operations. Note that such concern about dependency-breaking
+ * optimizations will eventually be taken care of by the "memory_order_consume"
+ * addition to forthcoming C++ standard.
+ *
* Should match rcu_assign_pointer() or rcu_xchg_pointer().
*/
*/
#define KICK_READER_LOOPS 10000
+#ifdef DEBUG_RCU
+#define rcu_assert(args...) assert(args)
+#else
+#define rcu_assert(args...)
+#endif
+
#ifdef DEBUG_YIELD
#include <sched.h>
#include <time.h>
#define YIELD_READ (1 << 0)
#define YIELD_WRITE (1 << 1)
-/* Updates without DEBUG_FULL_MB are much slower. Account this in the delay */
-#ifdef DEBUG_FULL_MB
+/*
+ * Updates without URCU_MB are much slower. Account this in
+ * the delay.
+ */
+#ifdef URCU_MB
/* maximum sleep delay, in us */
#define MAX_SLEEP 50
#else
}
#endif
-#ifdef DEBUG_FULL_MB
+#ifdef URCU_MB
static inline void reader_barrier()
{
smp_mb();
tmp = urcu_active_readers;
/* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */
- /*
- * The data dependency "read urcu_gp_ctr, write urcu_active_readers",
- * serializes those two memory operations. The memory barrier in the
- * signal handler ensures we receive the proper memory commit barriers
- * required by _STORE_SHARED and _LOAD_SHARED whenever communication
- * with the writer is needed.
- */
- if (likely(!(tmp & RCU_GP_CTR_NEST_MASK)))
+ if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) {
_STORE_SHARED(urcu_active_readers, _LOAD_SHARED(urcu_gp_ctr));
- else
+ /*
+ * Set active readers count for outermost nesting level before
+ * accessing the pointer. See force_mb_all_threads().
+ */
+ reader_barrier();
+ } else {
_STORE_SHARED(urcu_active_readers, tmp + RCU_GP_COUNT);
- /*
- * Increment active readers count before accessing the pointer.
- * See force_mb_all_threads().
- */
- reader_barrier();
+ }
}
static inline void _rcu_read_unlock(void)
/*
* Finish using rcu before decrementing the pointer.
* See force_mb_all_threads().
+ * Formally only needed for outermost nesting level, but leave barrier
+ * in place for nested unlocks to remove a branch from the common case
+ * (no nesting).
*/
_STORE_SHARED(urcu_active_readers, urcu_active_readers - RCU_GP_COUNT);
}
/**
* _rcu_xchg_pointer - same as rcu_assign_pointer, but returns the previous
- * pointer to the data structure, which can be safely freed after waitin for a
+ * pointer to the data structure, which can be safely freed after waiting for a
* quiescent state using synchronize_rcu().
*/