Commit | Line | Data |
---|---|---|
27b012e2 MD |
1 | #ifndef _URCU_H |
2 | #define _URCU_H | |
3 | ||
b257a10b MD |
4 | /* |
5 | * urcu.h | |
6 | * | |
7 | * Userspace RCU header | |
8 | * | |
9 | * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> | |
10 | * | |
5e7e64b9 MD |
11 | * Credits for Paul e. McKenney <paulmck@linux.vnet.ibm.com> |
12 | * for inspiration coming from the Linux kernel RCU and rcu-preempt. | |
13 | * | |
14 | * The barrier, mb, rmb, wmb, atomic_inc, smp_read_barrier_depends, ACCESS_ONCE | |
15 | * and rcu_dereference primitives come from the Linux kernel. | |
16 | * | |
b257a10b MD |
17 | * Distributed under GPLv2 |
18 | */ | |
19 | ||
9b171f46 | 20 | #define __USE_GNU |
1430ee0b MD |
21 | #include <stdlib.h> |
22 | ||
27b012e2 MD |
23 | /* The "volatile" is due to gcc bugs */ |
24 | #define barrier() __asm__ __volatile__("": : :"memory") | |
25 | ||
5b1da0c8 MD |
26 | #define likely(x) __builtin_expect(!!(x), 1) |
27 | #define unlikely(x) __builtin_expect(!!(x), 0) | |
28 | ||
27b012e2 MD |
29 | /* x86 32/64 specific */ |
30 | #define mb() asm volatile("mfence":::"memory") | |
31 | #define rmb() asm volatile("lfence":::"memory") | |
32 | #define wmb() asm volatile("sfence" ::: "memory") | |
33 | ||
27b012e2 MD |
34 | static inline void atomic_inc(int *v) |
35 | { | |
36 | asm volatile("lock; incl %0" | |
f69f195a | 37 | : "+m" (*v)); |
27b012e2 MD |
38 | } |
39 | ||
f4a486ac MD |
40 | #define xchg(ptr, v) \ |
41 | ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr)))) | |
42 | ||
43 | struct __xchg_dummy { | |
44 | unsigned long a[100]; | |
45 | }; | |
46 | #define __xg(x) ((struct __xchg_dummy *)(x)) | |
47 | ||
48 | /* | |
49 | * Note: no "lock" prefix even on SMP: xchg always implies lock anyway | |
50 | * Note 2: xchg has side effect, so that attribute volatile is necessary, | |
51 | * but generally the primitive is invalid, *ptr is output argument. --ANK | |
52 | */ | |
53 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, | |
54 | int size) | |
55 | { | |
56 | switch (size) { | |
57 | case 1: | |
58 | asm volatile("xchgb %b0,%1" | |
59 | : "=q" (x) | |
60 | : "m" (*__xg(ptr)), "0" (x) | |
61 | : "memory"); | |
62 | break; | |
63 | case 2: | |
64 | asm volatile("xchgw %w0,%1" | |
65 | : "=r" (x) | |
66 | : "m" (*__xg(ptr)), "0" (x) | |
67 | : "memory"); | |
68 | break; | |
69 | case 4: | |
5b1da0c8 MD |
70 | asm volatile("xchgl %k0,%1" |
71 | : "=r" (x) | |
72 | : "m" (*__xg(ptr)), "0" (x) | |
73 | : "memory"); | |
74 | break; | |
75 | case 8: | |
76 | asm volatile("xchgq %0,%1" | |
f4a486ac MD |
77 | : "=r" (x) |
78 | : "m" (*__xg(ptr)), "0" (x) | |
79 | : "memory"); | |
80 | break; | |
81 | } | |
82 | return x; | |
83 | } | |
84 | ||
27b012e2 MD |
85 | /* Nop everywhere except on alpha. */ |
86 | #define smp_read_barrier_depends() | |
87 | ||
41718ff9 MD |
88 | /* |
89 | * Prevent the compiler from merging or refetching accesses. The compiler | |
90 | * is also forbidden from reordering successive instances of ACCESS_ONCE(), | |
91 | * but only when the compiler is aware of some particular ordering. One way | |
92 | * to make the compiler aware of ordering is to put the two invocations of | |
93 | * ACCESS_ONCE() in different C statements. | |
94 | * | |
95 | * This macro does absolutely -nothing- to prevent the CPU from reordering, | |
96 | * merging, or refetching absolutely anything at any time. Its main intended | |
97 | * use is to mediate communication between process-level code and irq/NMI | |
98 | * handlers, all running on the same CPU. | |
99 | */ | |
100 | #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) | |
101 | ||
102 | /** | |
103 | * rcu_dereference - fetch an RCU-protected pointer in an | |
104 | * RCU read-side critical section. This pointer may later | |
105 | * be safely dereferenced. | |
106 | * | |
107 | * Inserts memory barriers on architectures that require them | |
108 | * (currently only the Alpha), and, more importantly, documents | |
109 | * exactly which pointers are protected by RCU. | |
110 | */ | |
111 | ||
112 | #define rcu_dereference(p) ({ \ | |
113 | typeof(p) _________p1 = ACCESS_ONCE(p); \ | |
114 | smp_read_barrier_depends(); \ | |
115 | (_________p1); \ | |
116 | }) | |
117 | ||
27b012e2 MD |
118 | #define SIGURCU SIGUSR1 |
119 | ||
cf380c2f MD |
120 | #ifdef DEBUG_YIELD |
121 | #include <sched.h> | |
9b171f46 MD |
122 | #include <time.h> |
123 | #include <pthread.h> | |
cf380c2f MD |
124 | |
125 | #define YIELD_READ (1 << 0) | |
126 | #define YIELD_WRITE (1 << 1) | |
127 | ||
9d335088 MD |
128 | extern unsigned int yield_active; |
129 | extern unsigned int __thread rand_yield; | |
cf380c2f MD |
130 | |
131 | static inline void debug_yield_read(void) | |
132 | { | |
133 | if (yield_active & YIELD_READ) | |
9d335088 MD |
134 | if (rand_r(&rand_yield) & 0x1) |
135 | sched_yield(); | |
cf380c2f MD |
136 | } |
137 | ||
138 | static inline void debug_yield_write(void) | |
139 | { | |
140 | if (yield_active & YIELD_WRITE) | |
9d335088 MD |
141 | if (rand_r(&rand_yield) & 0x1) |
142 | sched_yield(); | |
143 | } | |
144 | ||
145 | static inline void debug_yield_init(void) | |
146 | { | |
147 | rand_yield = time(NULL) ^ pthread_self(); | |
cf380c2f MD |
148 | } |
149 | #else | |
150 | static inline void debug_yield_read(void) | |
151 | { | |
152 | } | |
153 | ||
154 | static inline void debug_yield_write(void) | |
155 | { | |
9d335088 MD |
156 | } |
157 | ||
158 | static inline void debug_yield_init(void) | |
159 | { | |
160 | ||
cf380c2f MD |
161 | } |
162 | #endif | |
163 | ||
1430ee0b MD |
164 | /* |
165 | * Limiting the nesting level to 256 to keep instructions small in the read | |
166 | * fast-path. | |
167 | */ | |
168 | #define RCU_GP_COUNT (1U << 0) | |
169 | #define RCU_GP_CTR_BIT (1U << 8) | |
170 | #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1) | |
171 | ||
5b1da0c8 MD |
172 | /* |
173 | * Global quiescent period counter with low-order bits unused. | |
174 | * Using a int rather than a char to eliminate false register dependencies | |
175 | * causing stalls on some architectures. | |
176 | */ | |
1430ee0b | 177 | extern int urcu_gp_ctr; |
27b012e2 | 178 | |
1430ee0b | 179 | extern int __thread urcu_active_readers; |
27b012e2 | 180 | |
1430ee0b | 181 | static inline int rcu_old_gp_ongoing(int *value) |
27b012e2 | 182 | { |
1430ee0b MD |
183 | int v; |
184 | ||
185 | if (value == NULL) | |
186 | return 0; | |
187 | debug_yield_write(); | |
188 | v = ACCESS_ONCE(*value); | |
189 | debug_yield_write(); | |
190 | return (v & RCU_GP_CTR_NEST_MASK) && | |
191 | ((v ^ ACCESS_ONCE(urcu_gp_ctr)) & RCU_GP_CTR_BIT); | |
27b012e2 MD |
192 | } |
193 | ||
1430ee0b | 194 | static inline void rcu_read_lock(void) |
27b012e2 | 195 | { |
1430ee0b MD |
196 | int tmp; |
197 | ||
cf380c2f | 198 | debug_yield_read(); |
1430ee0b | 199 | tmp = urcu_active_readers; |
cf380c2f | 200 | debug_yield_read(); |
5b1da0c8 | 201 | if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) |
1430ee0b MD |
202 | urcu_active_readers = urcu_gp_ctr + RCU_GP_COUNT; |
203 | else | |
204 | urcu_active_readers = tmp + RCU_GP_COUNT; | |
cf380c2f | 205 | debug_yield_read(); |
27b012e2 MD |
206 | /* |
207 | * Increment active readers count before accessing the pointer. | |
208 | * See force_mb_all_threads(). | |
209 | */ | |
210 | barrier(); | |
cf380c2f | 211 | debug_yield_read(); |
27b012e2 MD |
212 | } |
213 | ||
1430ee0b | 214 | static inline void rcu_read_unlock(void) |
27b012e2 | 215 | { |
cf380c2f | 216 | debug_yield_read(); |
27b012e2 | 217 | barrier(); |
cf380c2f | 218 | debug_yield_read(); |
27b012e2 MD |
219 | /* |
220 | * Finish using rcu before decrementing the pointer. | |
221 | * See force_mb_all_threads(). | |
222 | */ | |
1430ee0b | 223 | urcu_active_readers -= RCU_GP_COUNT; |
cf380c2f | 224 | debug_yield_read(); |
27b012e2 MD |
225 | } |
226 | ||
e462817e MD |
227 | /** |
228 | * rcu_assign_pointer - assign (publicize) a pointer to a newly | |
229 | * initialized structure that will be dereferenced by RCU read-side | |
230 | * critical sections. Returns the value assigned. | |
231 | * | |
232 | * Inserts memory barriers on architectures that require them | |
233 | * (pretty much all of them other than x86), and also prevents | |
234 | * the compiler from reordering the code that initializes the | |
235 | * structure after the pointer assignment. More importantly, this | |
236 | * call documents which pointers will be dereferenced by RCU read-side | |
237 | * code. | |
238 | */ | |
239 | ||
240 | #define rcu_assign_pointer(p, v) \ | |
241 | ({ \ | |
242 | if (!__builtin_constant_p(v) || \ | |
243 | ((v) != NULL)) \ | |
244 | wmb(); \ | |
245 | (p) = (v); \ | |
246 | }) | |
247 | ||
f4a486ac MD |
248 | #define rcu_xchg_pointer(p, v) \ |
249 | ({ \ | |
250 | if (!__builtin_constant_p(v) || \ | |
251 | ((v) != NULL)) \ | |
252 | wmb(); \ | |
253 | xchg(p, v); \ | |
254 | }) | |
255 | ||
e462817e | 256 | extern void synchronize_rcu(void); |
27b012e2 | 257 | |
f4a486ac MD |
258 | /* |
259 | * Exchanges the pointer and waits for quiescent state. | |
260 | * The pointer returned can be freed. | |
261 | */ | |
262 | #define urcu_publish_content(p, v) \ | |
263 | ({ \ | |
264 | void *oldptr; \ | |
265 | debug_yield_write(); \ | |
266 | oldptr = rcu_xchg_pointer(p, v); \ | |
267 | synchronize_rcu(); \ | |
268 | oldptr; \ | |
269 | }) | |
270 | ||
27b012e2 MD |
271 | /* |
272 | * Reader thread registration. | |
273 | */ | |
274 | extern void urcu_register_thread(void); | |
5e7e64b9 | 275 | extern void urcu_unregister_thread(void); |
27b012e2 MD |
276 | |
277 | #endif /* _URCU_H */ |