Commit | Line | Data |
---|---|---|
27b012e2 MD |
1 | #ifndef _URCU_H |
2 | #define _URCU_H | |
3 | ||
b257a10b MD |
4 | /* |
5 | * urcu.h | |
6 | * | |
7 | * Userspace RCU header | |
8 | * | |
9 | * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> | |
10 | * | |
5e7e64b9 MD |
11 | * Credits for Paul e. McKenney <paulmck@linux.vnet.ibm.com> |
12 | * for inspiration coming from the Linux kernel RCU and rcu-preempt. | |
13 | * | |
14 | * The barrier, mb, rmb, wmb, atomic_inc, smp_read_barrier_depends, ACCESS_ONCE | |
15 | * and rcu_dereference primitives come from the Linux kernel. | |
16 | * | |
b257a10b MD |
17 | * Distributed under GPLv2 |
18 | */ | |
19 | ||
1430ee0b | 20 | #include <stdlib.h> |
69a757c9 | 21 | #include <pthread.h> |
1430ee0b | 22 | |
27b012e2 MD |
23 | /* The "volatile" is due to gcc bugs */ |
24 | #define barrier() __asm__ __volatile__("": : :"memory") | |
25 | ||
5b1da0c8 MD |
26 | #define likely(x) __builtin_expect(!!(x), 1) |
27 | #define unlikely(x) __builtin_expect(!!(x), 0) | |
28 | ||
27b012e2 MD |
29 | /* x86 32/64 specific */ |
30 | #define mb() asm volatile("mfence":::"memory") | |
31 | #define rmb() asm volatile("lfence":::"memory") | |
32 | #define wmb() asm volatile("sfence" ::: "memory") | |
33 | ||
27b012e2 MD |
34 | static inline void atomic_inc(int *v) |
35 | { | |
36 | asm volatile("lock; incl %0" | |
f69f195a | 37 | : "+m" (*v)); |
27b012e2 MD |
38 | } |
39 | ||
f4a486ac MD |
40 | #define xchg(ptr, v) \ |
41 | ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr)))) | |
42 | ||
43 | struct __xchg_dummy { | |
44 | unsigned long a[100]; | |
45 | }; | |
46 | #define __xg(x) ((struct __xchg_dummy *)(x)) | |
47 | ||
48 | /* | |
49 | * Note: no "lock" prefix even on SMP: xchg always implies lock anyway | |
50 | * Note 2: xchg has side effect, so that attribute volatile is necessary, | |
51 | * but generally the primitive is invalid, *ptr is output argument. --ANK | |
52 | */ | |
53 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, | |
54 | int size) | |
55 | { | |
56 | switch (size) { | |
57 | case 1: | |
58 | asm volatile("xchgb %b0,%1" | |
59 | : "=q" (x) | |
60 | : "m" (*__xg(ptr)), "0" (x) | |
61 | : "memory"); | |
62 | break; | |
63 | case 2: | |
64 | asm volatile("xchgw %w0,%1" | |
65 | : "=r" (x) | |
66 | : "m" (*__xg(ptr)), "0" (x) | |
67 | : "memory"); | |
68 | break; | |
69 | case 4: | |
5b1da0c8 MD |
70 | asm volatile("xchgl %k0,%1" |
71 | : "=r" (x) | |
72 | : "m" (*__xg(ptr)), "0" (x) | |
73 | : "memory"); | |
74 | break; | |
75 | case 8: | |
76 | asm volatile("xchgq %0,%1" | |
f4a486ac MD |
77 | : "=r" (x) |
78 | : "m" (*__xg(ptr)), "0" (x) | |
79 | : "memory"); | |
80 | break; | |
81 | } | |
82 | return x; | |
83 | } | |
84 | ||
27b012e2 MD |
85 | /* Nop everywhere except on alpha. */ |
86 | #define smp_read_barrier_depends() | |
87 | ||
41718ff9 MD |
88 | /* |
89 | * Prevent the compiler from merging or refetching accesses. The compiler | |
90 | * is also forbidden from reordering successive instances of ACCESS_ONCE(), | |
91 | * but only when the compiler is aware of some particular ordering. One way | |
92 | * to make the compiler aware of ordering is to put the two invocations of | |
93 | * ACCESS_ONCE() in different C statements. | |
94 | * | |
95 | * This macro does absolutely -nothing- to prevent the CPU from reordering, | |
96 | * merging, or refetching absolutely anything at any time. Its main intended | |
97 | * use is to mediate communication between process-level code and irq/NMI | |
98 | * handlers, all running on the same CPU. | |
99 | */ | |
100 | #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) | |
101 | ||
102 | /** | |
103 | * rcu_dereference - fetch an RCU-protected pointer in an | |
104 | * RCU read-side critical section. This pointer may later | |
105 | * be safely dereferenced. | |
106 | * | |
107 | * Inserts memory barriers on architectures that require them | |
108 | * (currently only the Alpha), and, more importantly, documents | |
109 | * exactly which pointers are protected by RCU. | |
110 | */ | |
111 | ||
112 | #define rcu_dereference(p) ({ \ | |
113 | typeof(p) _________p1 = ACCESS_ONCE(p); \ | |
114 | smp_read_barrier_depends(); \ | |
115 | (_________p1); \ | |
116 | }) | |
117 | ||
27b012e2 MD |
118 | #define SIGURCU SIGUSR1 |
119 | ||
cf380c2f MD |
120 | #ifdef DEBUG_YIELD |
121 | #include <sched.h> | |
9b171f46 MD |
122 | #include <time.h> |
123 | #include <pthread.h> | |
cf380c2f MD |
124 | |
125 | #define YIELD_READ (1 << 0) | |
126 | #define YIELD_WRITE (1 << 1) | |
127 | ||
9d335088 MD |
128 | extern unsigned int yield_active; |
129 | extern unsigned int __thread rand_yield; | |
cf380c2f MD |
130 | |
131 | static inline void debug_yield_read(void) | |
132 | { | |
133 | if (yield_active & YIELD_READ) | |
9d335088 MD |
134 | if (rand_r(&rand_yield) & 0x1) |
135 | sched_yield(); | |
cf380c2f MD |
136 | } |
137 | ||
138 | static inline void debug_yield_write(void) | |
139 | { | |
140 | if (yield_active & YIELD_WRITE) | |
9d335088 MD |
141 | if (rand_r(&rand_yield) & 0x1) |
142 | sched_yield(); | |
143 | } | |
144 | ||
145 | static inline void debug_yield_init(void) | |
146 | { | |
147 | rand_yield = time(NULL) ^ pthread_self(); | |
cf380c2f MD |
148 | } |
149 | #else | |
150 | static inline void debug_yield_read(void) | |
151 | { | |
152 | } | |
153 | ||
154 | static inline void debug_yield_write(void) | |
155 | { | |
9d335088 MD |
156 | } |
157 | ||
158 | static inline void debug_yield_init(void) | |
159 | { | |
160 | ||
cf380c2f MD |
161 | } |
162 | #endif | |
163 | ||
1430ee0b | 164 | /* |
4917a879 MD |
165 | * The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a |
166 | * full 8-bits, 16-bits or 32-bits bitmask for the lower order bits. | |
1430ee0b | 167 | */ |
6e32665b | 168 | #define RCU_GP_COUNT (1UL << 0) |
4917a879 | 169 | /* Use the amount of bits equal to half of the architecture long size */ |
6e32665b | 170 | #define RCU_GP_CTR_BIT (1UL << (sizeof(long) << 2)) |
1430ee0b MD |
171 | #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1) |
172 | ||
5b1da0c8 MD |
173 | /* |
174 | * Global quiescent period counter with low-order bits unused. | |
175 | * Using a int rather than a char to eliminate false register dependencies | |
176 | * causing stalls on some architectures. | |
177 | */ | |
6e8b8429 | 178 | extern long urcu_gp_ctr; |
27b012e2 | 179 | |
6e8b8429 | 180 | extern long __thread urcu_active_readers; |
27b012e2 | 181 | |
128166c9 | 182 | static inline int rcu_old_gp_ongoing(long *value) |
27b012e2 | 183 | { |
6e8b8429 | 184 | long v; |
1430ee0b MD |
185 | |
186 | if (value == NULL) | |
187 | return 0; | |
188 | debug_yield_write(); | |
189 | v = ACCESS_ONCE(*value); | |
190 | debug_yield_write(); | |
191 | return (v & RCU_GP_CTR_NEST_MASK) && | |
192 | ((v ^ ACCESS_ONCE(urcu_gp_ctr)) & RCU_GP_CTR_BIT); | |
27b012e2 MD |
193 | } |
194 | ||
1430ee0b | 195 | static inline void rcu_read_lock(void) |
27b012e2 | 196 | { |
6e8b8429 | 197 | long tmp; |
1430ee0b | 198 | |
cf380c2f | 199 | debug_yield_read(); |
1430ee0b | 200 | tmp = urcu_active_readers; |
cf380c2f | 201 | debug_yield_read(); |
5b1da0c8 | 202 | if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) |
128166c9 | 203 | urcu_active_readers = urcu_gp_ctr; |
1430ee0b MD |
204 | else |
205 | urcu_active_readers = tmp + RCU_GP_COUNT; | |
cf380c2f | 206 | debug_yield_read(); |
27b012e2 MD |
207 | /* |
208 | * Increment active readers count before accessing the pointer. | |
209 | * See force_mb_all_threads(). | |
210 | */ | |
211 | barrier(); | |
cf380c2f | 212 | debug_yield_read(); |
27b012e2 MD |
213 | } |
214 | ||
1430ee0b | 215 | static inline void rcu_read_unlock(void) |
27b012e2 | 216 | { |
cf380c2f | 217 | debug_yield_read(); |
27b012e2 | 218 | barrier(); |
cf380c2f | 219 | debug_yield_read(); |
27b012e2 MD |
220 | /* |
221 | * Finish using rcu before decrementing the pointer. | |
222 | * See force_mb_all_threads(). | |
223 | */ | |
1430ee0b | 224 | urcu_active_readers -= RCU_GP_COUNT; |
cf380c2f | 225 | debug_yield_read(); |
27b012e2 MD |
226 | } |
227 | ||
e462817e MD |
228 | /** |
229 | * rcu_assign_pointer - assign (publicize) a pointer to a newly | |
230 | * initialized structure that will be dereferenced by RCU read-side | |
231 | * critical sections. Returns the value assigned. | |
232 | * | |
233 | * Inserts memory barriers on architectures that require them | |
234 | * (pretty much all of them other than x86), and also prevents | |
235 | * the compiler from reordering the code that initializes the | |
236 | * structure after the pointer assignment. More importantly, this | |
237 | * call documents which pointers will be dereferenced by RCU read-side | |
238 | * code. | |
239 | */ | |
240 | ||
241 | #define rcu_assign_pointer(p, v) \ | |
242 | ({ \ | |
243 | if (!__builtin_constant_p(v) || \ | |
244 | ((v) != NULL)) \ | |
245 | wmb(); \ | |
246 | (p) = (v); \ | |
247 | }) | |
248 | ||
f4a486ac MD |
249 | #define rcu_xchg_pointer(p, v) \ |
250 | ({ \ | |
251 | if (!__builtin_constant_p(v) || \ | |
252 | ((v) != NULL)) \ | |
253 | wmb(); \ | |
254 | xchg(p, v); \ | |
255 | }) | |
256 | ||
e462817e | 257 | extern void synchronize_rcu(void); |
27b012e2 | 258 | |
f4a486ac MD |
259 | /* |
260 | * Exchanges the pointer and waits for quiescent state. | |
261 | * The pointer returned can be freed. | |
262 | */ | |
263 | #define urcu_publish_content(p, v) \ | |
264 | ({ \ | |
265 | void *oldptr; \ | |
266 | debug_yield_write(); \ | |
267 | oldptr = rcu_xchg_pointer(p, v); \ | |
268 | synchronize_rcu(); \ | |
269 | oldptr; \ | |
270 | }) | |
271 | ||
27b012e2 MD |
272 | /* |
273 | * Reader thread registration. | |
274 | */ | |
275 | extern void urcu_register_thread(void); | |
5e7e64b9 | 276 | extern void urcu_unregister_thread(void); |
27b012e2 MD |
277 | |
278 | #endif /* _URCU_H */ |