Add support for x86 older than P4, with CONFIG_HAS_FENCE option
[urcu.git] / urcu.h
1 #ifndef _URCU_H
2 #define _URCU_H
3
4 /*
5 * urcu.h
6 *
7 * Userspace RCU header
8 *
9 * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
10 *
11 * Credits for Paul e. McKenney <paulmck@linux.vnet.ibm.com>
12 * for inspiration coming from the Linux kernel RCU and rcu-preempt.
13 *
14 * The barrier, mb, rmb, wmb, atomic_inc, smp_read_barrier_depends, ACCESS_ONCE
15 * and rcu_dereference primitives come from the Linux kernel.
16 *
17 * Distributed under GPLv2
18 */
19
20 #include <stdlib.h>
21 #include <pthread.h>
22
23 /* The "volatile" is due to gcc bugs */
24 #define barrier() __asm__ __volatile__("": : :"memory")
25
26 #define likely(x) __builtin_expect(!!(x), 1)
27 #define unlikely(x) __builtin_expect(!!(x), 0)
28
29 /* Assume P4 or newer */
30 #define CONFIG_HAS_FENCE 1
31
32 /* x86 32/64 specific */
33 #ifdef CONFIG_HAS_FENCE
34 #define mb() asm volatile("mfence":::"memory")
35 #define rmb() asm volatile("lfence":::"memory")
36 #define wmb() asm volatile("sfence"::: "memory")
37 #else
38 /*
39 * Some non-Intel clones support out of order store. wmb() ceases to be a
40 * nop for these.
41 */
42 #define mb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
43 #define rmb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
44 #define wmb() asm volatile("lock; addl $0,0(%%esp)"::: "memory")
45 #endif
46
47 /* Assume SMP machine, given we don't have this information */
48 #define CONFIG_SMP 1
49
50 #ifdef CONFIG_SMP
51 #define smp_mb() mb()
52 #define smp_rmb() rmb()
53 #define smp_wmb() wmb()
54 #else
55 #define smp_mb() barrier()
56 #define smp_rmb() barrier()
57 #define smp_wmb() barrier()
58 #endif
59
60 static inline void atomic_inc(int *v)
61 {
62 asm volatile("lock; incl %0"
63 : "+m" (*v));
64 }
65
66 #define xchg(ptr, v) \
67 ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr))))
68
69 struct __xchg_dummy {
70 unsigned long a[100];
71 };
72 #define __xg(x) ((struct __xchg_dummy *)(x))
73
74 /*
75 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
76 * Note 2: xchg has side effect, so that attribute volatile is necessary,
77 * but generally the primitive is invalid, *ptr is output argument. --ANK
78 */
79 static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
80 int size)
81 {
82 switch (size) {
83 case 1:
84 asm volatile("xchgb %b0,%1"
85 : "=q" (x)
86 : "m" (*__xg(ptr)), "0" (x)
87 : "memory");
88 break;
89 case 2:
90 asm volatile("xchgw %w0,%1"
91 : "=r" (x)
92 : "m" (*__xg(ptr)), "0" (x)
93 : "memory");
94 break;
95 case 4:
96 asm volatile("xchgl %k0,%1"
97 : "=r" (x)
98 : "m" (*__xg(ptr)), "0" (x)
99 : "memory");
100 break;
101 case 8:
102 asm volatile("xchgq %0,%1"
103 : "=r" (x)
104 : "m" (*__xg(ptr)), "0" (x)
105 : "memory");
106 break;
107 }
108 return x;
109 }
110
111 /* Nop everywhere except on alpha. */
112 #define smp_read_barrier_depends()
113
114 /*
115 * Prevent the compiler from merging or refetching accesses. The compiler
116 * is also forbidden from reordering successive instances of ACCESS_ONCE(),
117 * but only when the compiler is aware of some particular ordering. One way
118 * to make the compiler aware of ordering is to put the two invocations of
119 * ACCESS_ONCE() in different C statements.
120 *
121 * This macro does absolutely -nothing- to prevent the CPU from reordering,
122 * merging, or refetching absolutely anything at any time. Its main intended
123 * use is to mediate communication between process-level code and irq/NMI
124 * handlers, all running on the same CPU.
125 */
126 #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
127
128 /**
129 * rcu_dereference - fetch an RCU-protected pointer in an
130 * RCU read-side critical section. This pointer may later
131 * be safely dereferenced.
132 *
133 * Inserts memory barriers on architectures that require them
134 * (currently only the Alpha), and, more importantly, documents
135 * exactly which pointers are protected by RCU.
136 */
137
138 #define rcu_dereference(p) ({ \
139 typeof(p) _________p1 = ACCESS_ONCE(p); \
140 smp_read_barrier_depends(); \
141 (_________p1); \
142 })
143
144 #define SIGURCU SIGUSR1
145
146 /*
147 * If a reader is really non-cooperative and refuses to commit its
148 * urcu_active_readers count to memory (there is no barrier in the reader
149 * per-se), kick it after a few loops waiting for it.
150 */
151 #define KICK_READER_LOOPS 10000
152
153 #ifdef DEBUG_YIELD
154 #include <sched.h>
155 #include <time.h>
156 #include <pthread.h>
157 #include <unistd.h>
158
159 #define YIELD_READ (1 << 0)
160 #define YIELD_WRITE (1 << 1)
161
162 /* Updates without DEBUG_FULL_MB are much slower. Account this in the delay */
163 #ifdef DEBUG_FULL_MB
164 /* maximum sleep delay, in us */
165 #define MAX_SLEEP 50
166 #else
167 #define MAX_SLEEP 30000
168 #endif
169
170 extern unsigned int yield_active;
171 extern unsigned int __thread rand_yield;
172
173 static inline void debug_yield_read(void)
174 {
175 if (yield_active & YIELD_READ)
176 if (rand_r(&rand_yield) & 0x1)
177 usleep(rand_r(&rand_yield) % MAX_SLEEP);
178 }
179
180 static inline void debug_yield_write(void)
181 {
182 if (yield_active & YIELD_WRITE)
183 if (rand_r(&rand_yield) & 0x1)
184 usleep(rand_r(&rand_yield) % MAX_SLEEP);
185 }
186
187 static inline void debug_yield_init(void)
188 {
189 rand_yield = time(NULL) ^ pthread_self();
190 }
191 #else
192 static inline void debug_yield_read(void)
193 {
194 }
195
196 static inline void debug_yield_write(void)
197 {
198 }
199
200 static inline void debug_yield_init(void)
201 {
202
203 }
204 #endif
205
206 #ifdef DEBUG_FULL_MB
207 static inline void read_barrier()
208 {
209 smp_mb();
210 }
211 #else
212 static inline void read_barrier()
213 {
214 barrier();
215 }
216 #endif
217
218 /*
219 * The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a
220 * full 8-bits, 16-bits or 32-bits bitmask for the lower order bits.
221 */
222 #define RCU_GP_COUNT (1UL << 0)
223 /* Use the amount of bits equal to half of the architecture long size */
224 #define RCU_GP_CTR_BIT (1UL << (sizeof(long) << 2))
225 #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
226
227 /*
228 * Global quiescent period counter with low-order bits unused.
229 * Using a int rather than a char to eliminate false register dependencies
230 * causing stalls on some architectures.
231 */
232 extern long urcu_gp_ctr;
233
234 extern long __thread urcu_active_readers;
235
236 static inline int rcu_old_gp_ongoing(long *value)
237 {
238 long v;
239
240 if (value == NULL)
241 return 0;
242 /*
243 * Make sure both tests below are done on the same version of *value
244 * to insure consistency.
245 */
246 v = ACCESS_ONCE(*value);
247 return (v & RCU_GP_CTR_NEST_MASK) &&
248 ((v ^ urcu_gp_ctr) & RCU_GP_CTR_BIT);
249 }
250
251 static inline void rcu_read_lock(void)
252 {
253 long tmp;
254
255 tmp = urcu_active_readers;
256 /* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */
257 /* The data dependency "read urcu_gp_ctr, write urcu_active_readers",
258 * serializes those two memory operations. */
259 if (likely(!(tmp & RCU_GP_CTR_NEST_MASK)))
260 urcu_active_readers = ACCESS_ONCE(urcu_gp_ctr);
261 else
262 urcu_active_readers = tmp + RCU_GP_COUNT;
263 /*
264 * Increment active readers count before accessing the pointer.
265 * See force_mb_all_threads().
266 */
267 read_barrier();
268 }
269
270 static inline void rcu_read_unlock(void)
271 {
272 read_barrier();
273 /*
274 * Finish using rcu before decrementing the pointer.
275 * See force_mb_all_threads().
276 */
277 urcu_active_readers -= RCU_GP_COUNT;
278 }
279
280 /**
281 * rcu_assign_pointer - assign (publicize) a pointer to a newly
282 * initialized structure that will be dereferenced by RCU read-side
283 * critical sections. Returns the value assigned.
284 *
285 * Inserts memory barriers on architectures that require them
286 * (pretty much all of them other than x86), and also prevents
287 * the compiler from reordering the code that initializes the
288 * structure after the pointer assignment. More importantly, this
289 * call documents which pointers will be dereferenced by RCU read-side
290 * code.
291 */
292
293 #define rcu_assign_pointer(p, v) \
294 ({ \
295 if (!__builtin_constant_p(v) || \
296 ((v) != NULL)) \
297 wmb(); \
298 (p) = (v); \
299 })
300
301 #define rcu_xchg_pointer(p, v) \
302 ({ \
303 if (!__builtin_constant_p(v) || \
304 ((v) != NULL)) \
305 wmb(); \
306 xchg(p, v); \
307 })
308
309 extern void synchronize_rcu(void);
310
311 /*
312 * Exchanges the pointer and waits for quiescent state.
313 * The pointer returned can be freed.
314 */
315 #define urcu_publish_content(p, v) \
316 ({ \
317 void *oldptr; \
318 oldptr = rcu_xchg_pointer(p, v); \
319 synchronize_rcu(); \
320 oldptr; \
321 })
322
323 /*
324 * Reader thread registration.
325 */
326 extern void urcu_register_thread(void);
327 extern void urcu_unregister_thread(void);
328
329 #endif /* _URCU_H */
This page took 0.03712 seconds and 5 git commands to generate.