projects
/
urcu.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Add missing memory barriers to ensure progress and remove an unnecessary ACCESS_ONCE
[urcu.git]
/
urcu.h
diff --git
a/urcu.h
b/urcu.h
index bc86d435845d6da33255a63d7d5d2fbdecb4aa76..c4a7992b4b7b166d527411d4ae5108cac174fc95 100644
(file)
--- a/
urcu.h
+++ b/
urcu.h
@@
-17,12
+17,15
@@
* Distributed under GPLv2
*/
* Distributed under GPLv2
*/
-#define __USE_GNU
#include <stdlib.h>
#include <stdlib.h>
+#include <pthread.h>
/* The "volatile" is due to gcc bugs */
#define barrier() __asm__ __volatile__("": : :"memory")
/* The "volatile" is due to gcc bugs */
#define barrier() __asm__ __volatile__("": : :"memory")
+#define likely(x) __builtin_expect(!!(x), 1)
+#define unlikely(x) __builtin_expect(!!(x), 0)
+
/* x86 32/64 specific */
#define mb() asm volatile("mfence":::"memory")
#define rmb() asm volatile("lfence":::"memory")
/* x86 32/64 specific */
#define mb() asm volatile("mfence":::"memory")
#define rmb() asm volatile("lfence":::"memory")
@@
-64,7
+67,13
@@
static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
: "memory");
break;
case 4:
: "memory");
break;
case 4:
- asm volatile("xchgl %0,%1"
+ asm volatile("xchgl %k0,%1"
+ : "=r" (x)
+ : "m" (*__xg(ptr)), "0" (x)
+ : "memory");
+ break;
+ case 8:
+ asm volatile("xchgq %0,%1"
: "=r" (x)
: "m" (*__xg(ptr)), "0" (x)
: "memory");
: "=r" (x)
: "m" (*__xg(ptr)), "0" (x)
: "memory");
@@
-112,10
+121,19
@@
static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
#include <sched.h>
#include <time.h>
#include <pthread.h>
#include <sched.h>
#include <time.h>
#include <pthread.h>
+#include <unistd.h>
#define YIELD_READ (1 << 0)
#define YIELD_WRITE (1 << 1)
#define YIELD_READ (1 << 0)
#define YIELD_WRITE (1 << 1)
+/* Updates without DEBUG_FULL_MB are much slower. Account this in the delay */
+#ifdef DEBUG_FULL_MB
+/* maximum sleep delay, in us */
+#define MAX_SLEEP 50
+#else
+#define MAX_SLEEP 30000
+#endif
+
extern unsigned int yield_active;
extern unsigned int __thread rand_yield;
extern unsigned int yield_active;
extern unsigned int __thread rand_yield;
@@
-123,14
+141,14
@@
static inline void debug_yield_read(void)
{
if (yield_active & YIELD_READ)
if (rand_r(&rand_yield) & 0x1)
{
if (yield_active & YIELD_READ)
if (rand_r(&rand_yield) & 0x1)
-
sched_yield(
);
+
usleep(rand_r(&rand_yield) % MAX_SLEEP
);
}
static inline void debug_yield_write(void)
{
if (yield_active & YIELD_WRITE)
if (rand_r(&rand_yield) & 0x1)
}
static inline void debug_yield_write(void)
{
if (yield_active & YIELD_WRITE)
if (rand_r(&rand_yield) & 0x1)
-
sched_yield(
);
+
usleep(rand_r(&rand_yield) % MAX_SLEEP
);
}
static inline void debug_yield_init(void)
}
static inline void debug_yield_init(void)
@@
-152,41
+170,63
@@
static inline void debug_yield_init(void)
}
#endif
}
#endif
+#ifdef DEBUG_FULL_MB
+static inline void read_barrier()
+{
+ mb();
+}
+#else
+static inline void read_barrier()
+{
+ barrier();
+}
+#endif
+
/*
/*
- *
Limiting the nesting level to 256 to keep instructions small in the read
- * f
ast-path
.
+ *
The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a
+ * f
ull 8-bits, 16-bits or 32-bits bitmask for the lower order bits
.
*/
*/
-#define RCU_GP_COUNT (1U << 0)
-#define RCU_GP_CTR_BIT (1U << 8)
+#define RCU_GP_COUNT (1UL << 0)
+/* Use the amount of bits equal to half of the architecture long size */
+#define RCU_GP_CTR_BIT (1UL << (sizeof(long) << 2))
#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
-/* Global quiescent period counter with low-order bits unused. */
-extern int urcu_gp_ctr;
+/*
+ * Global quiescent period counter with low-order bits unused.
+ * Using a int rather than a char to eliminate false register dependencies
+ * causing stalls on some architectures.
+ */
+extern long urcu_gp_ctr;
-extern
int
__thread urcu_active_readers;
+extern
long
__thread urcu_active_readers;
-static inline int rcu_old_gp_ongoing(
int
*value)
+static inline int rcu_old_gp_ongoing(
long
*value)
{
{
-
int
v;
+
long
v;
if (value == NULL)
return 0;
debug_yield_write();
if (value == NULL)
return 0;
debug_yield_write();
+ /*
+ * Make sure both tests below are done on the same version of *value
+ * to insure consistency.
+ */
v = ACCESS_ONCE(*value);
debug_yield_write();
return (v & RCU_GP_CTR_NEST_MASK) &&
v = ACCESS_ONCE(*value);
debug_yield_write();
return (v & RCU_GP_CTR_NEST_MASK) &&
- ((v ^
ACCESS_ONCE(urcu_gp_ctr)
) & RCU_GP_CTR_BIT);
+ ((v ^
urcu_gp_ctr
) & RCU_GP_CTR_BIT);
}
static inline void rcu_read_lock(void)
{
}
static inline void rcu_read_lock(void)
{
-
int
tmp;
+
long
tmp;
debug_yield_read();
tmp = urcu_active_readers;
debug_yield_read();
debug_yield_read();
tmp = urcu_active_readers;
debug_yield_read();
- if (!(tmp & RCU_GP_CTR_NEST_MASK))
- urcu_active_readers = urcu_gp_ctr + RCU_GP_COUNT;
+ /* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */
+ if (likely(!(tmp & RCU_GP_CTR_NEST_MASK)))
+ urcu_active_readers = urcu_gp_ctr;
else
urcu_active_readers = tmp + RCU_GP_COUNT;
debug_yield_read();
else
urcu_active_readers = tmp + RCU_GP_COUNT;
debug_yield_read();
@@
-194,14
+234,14
@@
static inline void rcu_read_lock(void)
* Increment active readers count before accessing the pointer.
* See force_mb_all_threads().
*/
* Increment active readers count before accessing the pointer.
* See force_mb_all_threads().
*/
- barrier();
+
read_
barrier();
debug_yield_read();
}
static inline void rcu_read_unlock(void)
{
debug_yield_read();
debug_yield_read();
}
static inline void rcu_read_unlock(void)
{
debug_yield_read();
- barrier();
+
read_
barrier();
debug_yield_read();
/*
* Finish using rcu before decrementing the pointer.
debug_yield_read();
/*
* Finish using rcu before decrementing the pointer.
This page took
0.024895 seconds
and
4
git commands to generate.