*/
#include <stdlib.h>
+#include <pthread.h>
/* The "volatile" is due to gcc bugs */
#define barrier() __asm__ __volatile__("": : :"memory")
+#define likely(x) __builtin_expect(!!(x), 1)
+#define unlikely(x) __builtin_expect(!!(x), 0)
+
/* x86 32/64 specific */
#define mb() asm volatile("mfence":::"memory")
#define rmb() asm volatile("lfence":::"memory")
: "+m" (*v));
}
+#define xchg(ptr, v) \
+ ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr))))
+
+struct __xchg_dummy {
+ unsigned long a[100];
+};
+#define __xg(x) ((struct __xchg_dummy *)(x))
+
+/*
+ * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
+ * Note 2: xchg has side effect, so that attribute volatile is necessary,
+ * but generally the primitive is invalid, *ptr is output argument. --ANK
+ */
+static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
+ int size)
+{
+ switch (size) {
+ case 1:
+ asm volatile("xchgb %b0,%1"
+ : "=q" (x)
+ : "m" (*__xg(ptr)), "0" (x)
+ : "memory");
+ break;
+ case 2:
+ asm volatile("xchgw %w0,%1"
+ : "=r" (x)
+ : "m" (*__xg(ptr)), "0" (x)
+ : "memory");
+ break;
+ case 4:
+ asm volatile("xchgl %k0,%1"
+ : "=r" (x)
+ : "m" (*__xg(ptr)), "0" (x)
+ : "memory");
+ break;
+ case 8:
+ asm volatile("xchgq %0,%1"
+ : "=r" (x)
+ : "m" (*__xg(ptr)), "0" (x)
+ : "memory");
+ break;
+ }
+ return x;
+}
+
/* Nop everywhere except on alpha. */
#define smp_read_barrier_depends()
#ifdef DEBUG_YIELD
#include <sched.h>
+#include <time.h>
+#include <pthread.h>
#define YIELD_READ (1 << 0)
#define YIELD_WRITE (1 << 1)
#endif
/*
- * Limiting the nesting level to 256 to keep instructions small in the read
- * fast-path.
+ * The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a
+ * full 8-bits, 16-bits or 32-bits bitmask for the lower order bits.
*/
-#define RCU_GP_COUNT (1U << 0)
-#define RCU_GP_CTR_BIT (1U << 8)
+#define RCU_GP_COUNT (1UL << 0)
+/* Use the amount of bits equal to half of the architecture long size */
+#define RCU_GP_CTR_BIT (1UL << (sizeof(long) << 2))
#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
-/* Global quiescent period counter with low-order bits unused. */
-extern int urcu_gp_ctr;
+/*
+ * Global quiescent period counter with low-order bits unused.
+ * Using a int rather than a char to eliminate false register dependencies
+ * causing stalls on some architectures.
+ */
+extern long urcu_gp_ctr;
-extern int __thread urcu_active_readers;
+extern long __thread urcu_active_readers;
-static inline int rcu_old_gp_ongoing(int *value)
+static inline int rcu_old_gp_ongoing(long *value)
{
- int v;
+ long v;
if (value == NULL)
return 0;
static inline void rcu_read_lock(void)
{
- int tmp;
+ long tmp;
debug_yield_read();
tmp = urcu_active_readers;
debug_yield_read();
- if (!(tmp & RCU_GP_CTR_NEST_MASK))
- urcu_active_readers = urcu_gp_ctr + RCU_GP_COUNT;
+ if (likely(!(tmp & RCU_GP_CTR_NEST_MASK)))
+ urcu_active_readers = urcu_gp_ctr;
else
urcu_active_readers = tmp + RCU_GP_COUNT;
debug_yield_read();
(p) = (v); \
})
-extern void *urcu_publish_content(void **ptr, void *new);
+#define rcu_xchg_pointer(p, v) \
+ ({ \
+ if (!__builtin_constant_p(v) || \
+ ((v) != NULL)) \
+ wmb(); \
+ xchg(p, v); \
+ })
+
extern void synchronize_rcu(void);
+/*
+ * Exchanges the pointer and waits for quiescent state.
+ * The pointer returned can be freed.
+ */
+#define urcu_publish_content(p, v) \
+ ({ \
+ void *oldptr; \
+ debug_yield_write(); \
+ oldptr = rcu_xchg_pointer(p, v); \
+ synchronize_rcu(); \
+ oldptr; \
+ })
+
/*
* Reader thread registration.
*/