Add support for x86 older than P4, with CONFIG_HAS_FENCE option
[urcu.git] / urcu.h
CommitLineData
27b012e2
MD
1#ifndef _URCU_H
2#define _URCU_H
3
b257a10b
MD
4/*
5 * urcu.h
6 *
7 * Userspace RCU header
8 *
9 * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
10 *
5e7e64b9
MD
11 * Credits for Paul e. McKenney <paulmck@linux.vnet.ibm.com>
12 * for inspiration coming from the Linux kernel RCU and rcu-preempt.
13 *
14 * The barrier, mb, rmb, wmb, atomic_inc, smp_read_barrier_depends, ACCESS_ONCE
15 * and rcu_dereference primitives come from the Linux kernel.
16 *
b257a10b
MD
17 * Distributed under GPLv2
18 */
19
1430ee0b 20#include <stdlib.h>
69a757c9 21#include <pthread.h>
1430ee0b 22
27b012e2
MD
23/* The "volatile" is due to gcc bugs */
24#define barrier() __asm__ __volatile__("": : :"memory")
25
5b1da0c8
MD
26#define likely(x) __builtin_expect(!!(x), 1)
27#define unlikely(x) __builtin_expect(!!(x), 0)
28
82faadb5
MD
29/* Assume P4 or newer */
30#define CONFIG_HAS_FENCE 1
31
27b012e2 32/* x86 32/64 specific */
82faadb5 33#ifdef CONFIG_HAS_FENCE
27b012e2
MD
34#define mb() asm volatile("mfence":::"memory")
35#define rmb() asm volatile("lfence":::"memory")
82faadb5
MD
36#define wmb() asm volatile("sfence"::: "memory")
37#else
38/*
39 * Some non-Intel clones support out of order store. wmb() ceases to be a
40 * nop for these.
41 */
42#define mb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
43#define rmb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
44#define wmb() asm volatile("lock; addl $0,0(%%esp)"::: "memory")
45#endif
27b012e2 46
b715b99e
MD
47/* Assume SMP machine, given we don't have this information */
48#define CONFIG_SMP 1
49
50#ifdef CONFIG_SMP
51#define smp_mb() mb()
52#define smp_rmb() rmb()
53#define smp_wmb() wmb()
54#else
55#define smp_mb() barrier()
56#define smp_rmb() barrier()
57#define smp_wmb() barrier()
58#endif
59
27b012e2
MD
60static inline void atomic_inc(int *v)
61{
62 asm volatile("lock; incl %0"
f69f195a 63 : "+m" (*v));
27b012e2
MD
64}
65
f4a486ac
MD
66#define xchg(ptr, v) \
67 ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr))))
68
69struct __xchg_dummy {
70 unsigned long a[100];
71};
72#define __xg(x) ((struct __xchg_dummy *)(x))
73
74/*
75 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
76 * Note 2: xchg has side effect, so that attribute volatile is necessary,
77 * but generally the primitive is invalid, *ptr is output argument. --ANK
78 */
79static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
80 int size)
81{
82 switch (size) {
83 case 1:
84 asm volatile("xchgb %b0,%1"
85 : "=q" (x)
86 : "m" (*__xg(ptr)), "0" (x)
87 : "memory");
88 break;
89 case 2:
90 asm volatile("xchgw %w0,%1"
91 : "=r" (x)
92 : "m" (*__xg(ptr)), "0" (x)
93 : "memory");
94 break;
95 case 4:
5b1da0c8
MD
96 asm volatile("xchgl %k0,%1"
97 : "=r" (x)
98 : "m" (*__xg(ptr)), "0" (x)
99 : "memory");
100 break;
101 case 8:
102 asm volatile("xchgq %0,%1"
f4a486ac
MD
103 : "=r" (x)
104 : "m" (*__xg(ptr)), "0" (x)
105 : "memory");
106 break;
107 }
108 return x;
109}
110
27b012e2
MD
111/* Nop everywhere except on alpha. */
112#define smp_read_barrier_depends()
113
41718ff9
MD
114/*
115 * Prevent the compiler from merging or refetching accesses. The compiler
116 * is also forbidden from reordering successive instances of ACCESS_ONCE(),
117 * but only when the compiler is aware of some particular ordering. One way
118 * to make the compiler aware of ordering is to put the two invocations of
119 * ACCESS_ONCE() in different C statements.
120 *
121 * This macro does absolutely -nothing- to prevent the CPU from reordering,
122 * merging, or refetching absolutely anything at any time. Its main intended
123 * use is to mediate communication between process-level code and irq/NMI
124 * handlers, all running on the same CPU.
125 */
126#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
127
128/**
129 * rcu_dereference - fetch an RCU-protected pointer in an
130 * RCU read-side critical section. This pointer may later
131 * be safely dereferenced.
132 *
133 * Inserts memory barriers on architectures that require them
134 * (currently only the Alpha), and, more importantly, documents
135 * exactly which pointers are protected by RCU.
136 */
137
138#define rcu_dereference(p) ({ \
139 typeof(p) _________p1 = ACCESS_ONCE(p); \
140 smp_read_barrier_depends(); \
141 (_________p1); \
142 })
143
27b012e2
MD
144#define SIGURCU SIGUSR1
145
40e140c9
MD
146/*
147 * If a reader is really non-cooperative and refuses to commit its
148 * urcu_active_readers count to memory (there is no barrier in the reader
149 * per-se), kick it after a few loops waiting for it.
150 */
151#define KICK_READER_LOOPS 10000
152
cf380c2f
MD
153#ifdef DEBUG_YIELD
154#include <sched.h>
9b171f46
MD
155#include <time.h>
156#include <pthread.h>
bb488185 157#include <unistd.h>
cf380c2f
MD
158
159#define YIELD_READ (1 << 0)
160#define YIELD_WRITE (1 << 1)
161
bb488185
MD
162/* Updates without DEBUG_FULL_MB are much slower. Account this in the delay */
163#ifdef DEBUG_FULL_MB
164/* maximum sleep delay, in us */
165#define MAX_SLEEP 50
166#else
167#define MAX_SLEEP 30000
168#endif
169
9d335088
MD
170extern unsigned int yield_active;
171extern unsigned int __thread rand_yield;
cf380c2f
MD
172
173static inline void debug_yield_read(void)
174{
175 if (yield_active & YIELD_READ)
9d335088 176 if (rand_r(&rand_yield) & 0x1)
bb488185 177 usleep(rand_r(&rand_yield) % MAX_SLEEP);
cf380c2f
MD
178}
179
180static inline void debug_yield_write(void)
181{
182 if (yield_active & YIELD_WRITE)
9d335088 183 if (rand_r(&rand_yield) & 0x1)
bb488185 184 usleep(rand_r(&rand_yield) % MAX_SLEEP);
9d335088
MD
185}
186
187static inline void debug_yield_init(void)
188{
189 rand_yield = time(NULL) ^ pthread_self();
cf380c2f
MD
190}
191#else
192static inline void debug_yield_read(void)
193{
194}
195
196static inline void debug_yield_write(void)
197{
9d335088
MD
198}
199
200static inline void debug_yield_init(void)
201{
202
cf380c2f
MD
203}
204#endif
205
bb488185
MD
206#ifdef DEBUG_FULL_MB
207static inline void read_barrier()
208{
b715b99e 209 smp_mb();
bb488185
MD
210}
211#else
212static inline void read_barrier()
213{
214 barrier();
215}
216#endif
217
1430ee0b 218/*
4917a879
MD
219 * The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a
220 * full 8-bits, 16-bits or 32-bits bitmask for the lower order bits.
1430ee0b 221 */
6e32665b 222#define RCU_GP_COUNT (1UL << 0)
4917a879 223/* Use the amount of bits equal to half of the architecture long size */
6e32665b 224#define RCU_GP_CTR_BIT (1UL << (sizeof(long) << 2))
1430ee0b
MD
225#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
226
5b1da0c8
MD
227/*
228 * Global quiescent period counter with low-order bits unused.
229 * Using a int rather than a char to eliminate false register dependencies
230 * causing stalls on some architectures.
231 */
6e8b8429 232extern long urcu_gp_ctr;
27b012e2 233
6e8b8429 234extern long __thread urcu_active_readers;
27b012e2 235
128166c9 236static inline int rcu_old_gp_ongoing(long *value)
27b012e2 237{
6e8b8429 238 long v;
1430ee0b
MD
239
240 if (value == NULL)
241 return 0;
9598a481
MD
242 /*
243 * Make sure both tests below are done on the same version of *value
244 * to insure consistency.
245 */
1430ee0b 246 v = ACCESS_ONCE(*value);
1430ee0b 247 return (v & RCU_GP_CTR_NEST_MASK) &&
9598a481 248 ((v ^ urcu_gp_ctr) & RCU_GP_CTR_BIT);
27b012e2
MD
249}
250
1430ee0b 251static inline void rcu_read_lock(void)
27b012e2 252{
6e8b8429 253 long tmp;
1430ee0b 254
1430ee0b 255 tmp = urcu_active_readers;
3a9e6e9d 256 /* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */
40e140c9
MD
257 /* The data dependency "read urcu_gp_ctr, write urcu_active_readers",
258 * serializes those two memory operations. */
5b1da0c8 259 if (likely(!(tmp & RCU_GP_CTR_NEST_MASK)))
b0b31506 260 urcu_active_readers = ACCESS_ONCE(urcu_gp_ctr);
1430ee0b
MD
261 else
262 urcu_active_readers = tmp + RCU_GP_COUNT;
27b012e2
MD
263 /*
264 * Increment active readers count before accessing the pointer.
265 * See force_mb_all_threads().
266 */
bb488185 267 read_barrier();
27b012e2
MD
268}
269
1430ee0b 270static inline void rcu_read_unlock(void)
27b012e2 271{
bb488185 272 read_barrier();
27b012e2
MD
273 /*
274 * Finish using rcu before decrementing the pointer.
275 * See force_mb_all_threads().
276 */
1430ee0b 277 urcu_active_readers -= RCU_GP_COUNT;
27b012e2
MD
278}
279
e462817e
MD
280/**
281 * rcu_assign_pointer - assign (publicize) a pointer to a newly
282 * initialized structure that will be dereferenced by RCU read-side
283 * critical sections. Returns the value assigned.
284 *
285 * Inserts memory barriers on architectures that require them
286 * (pretty much all of them other than x86), and also prevents
287 * the compiler from reordering the code that initializes the
288 * structure after the pointer assignment. More importantly, this
289 * call documents which pointers will be dereferenced by RCU read-side
290 * code.
291 */
292
293#define rcu_assign_pointer(p, v) \
294 ({ \
295 if (!__builtin_constant_p(v) || \
296 ((v) != NULL)) \
297 wmb(); \
298 (p) = (v); \
299 })
300
f4a486ac
MD
301#define rcu_xchg_pointer(p, v) \
302 ({ \
303 if (!__builtin_constant_p(v) || \
304 ((v) != NULL)) \
305 wmb(); \
306 xchg(p, v); \
307 })
308
e462817e 309extern void synchronize_rcu(void);
27b012e2 310
f4a486ac
MD
311/*
312 * Exchanges the pointer and waits for quiescent state.
313 * The pointer returned can be freed.
314 */
315#define urcu_publish_content(p, v) \
316 ({ \
317 void *oldptr; \
f4a486ac
MD
318 oldptr = rcu_xchg_pointer(p, v); \
319 synchronize_rcu(); \
320 oldptr; \
321 })
322
27b012e2
MD
323/*
324 * Reader thread registration.
325 */
326extern void urcu_register_thread(void);
5e7e64b9 327extern void urcu_unregister_thread(void);
27b012e2
MD
328
329#endif /* _URCU_H */
This page took 0.0365259999999999 seconds and 4 git commands to generate.