Commit | Line | Data |
---|---|---|
27b012e2 MD |
1 | #ifndef _URCU_H |
2 | #define _URCU_H | |
3 | ||
b257a10b MD |
4 | /* |
5 | * urcu.h | |
6 | * | |
7 | * Userspace RCU header | |
8 | * | |
9 | * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> | |
10 | * | |
5e7e64b9 MD |
11 | * Credits for Paul e. McKenney <paulmck@linux.vnet.ibm.com> |
12 | * for inspiration coming from the Linux kernel RCU and rcu-preempt. | |
13 | * | |
14 | * The barrier, mb, rmb, wmb, atomic_inc, smp_read_barrier_depends, ACCESS_ONCE | |
15 | * and rcu_dereference primitives come from the Linux kernel. | |
16 | * | |
b257a10b MD |
17 | * Distributed under GPLv2 |
18 | */ | |
19 | ||
9b171f46 | 20 | #define __USE_GNU |
1430ee0b | 21 | #include <stdlib.h> |
69a757c9 | 22 | #include <pthread.h> |
1430ee0b | 23 | |
27b012e2 MD |
24 | /* The "volatile" is due to gcc bugs */ |
25 | #define barrier() __asm__ __volatile__("": : :"memory") | |
26 | ||
5b1da0c8 MD |
27 | #define likely(x) __builtin_expect(!!(x), 1) |
28 | #define unlikely(x) __builtin_expect(!!(x), 0) | |
29 | ||
27b012e2 MD |
30 | /* x86 32/64 specific */ |
31 | #define mb() asm volatile("mfence":::"memory") | |
32 | #define rmb() asm volatile("lfence":::"memory") | |
33 | #define wmb() asm volatile("sfence" ::: "memory") | |
34 | ||
27b012e2 MD |
35 | static inline void atomic_inc(int *v) |
36 | { | |
37 | asm volatile("lock; incl %0" | |
f69f195a | 38 | : "+m" (*v)); |
27b012e2 MD |
39 | } |
40 | ||
f4a486ac MD |
41 | #define xchg(ptr, v) \ |
42 | ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr)))) | |
43 | ||
44 | struct __xchg_dummy { | |
45 | unsigned long a[100]; | |
46 | }; | |
47 | #define __xg(x) ((struct __xchg_dummy *)(x)) | |
48 | ||
49 | /* | |
50 | * Note: no "lock" prefix even on SMP: xchg always implies lock anyway | |
51 | * Note 2: xchg has side effect, so that attribute volatile is necessary, | |
52 | * but generally the primitive is invalid, *ptr is output argument. --ANK | |
53 | */ | |
54 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, | |
55 | int size) | |
56 | { | |
57 | switch (size) { | |
58 | case 1: | |
59 | asm volatile("xchgb %b0,%1" | |
60 | : "=q" (x) | |
61 | : "m" (*__xg(ptr)), "0" (x) | |
62 | : "memory"); | |
63 | break; | |
64 | case 2: | |
65 | asm volatile("xchgw %w0,%1" | |
66 | : "=r" (x) | |
67 | : "m" (*__xg(ptr)), "0" (x) | |
68 | : "memory"); | |
69 | break; | |
70 | case 4: | |
5b1da0c8 MD |
71 | asm volatile("xchgl %k0,%1" |
72 | : "=r" (x) | |
73 | : "m" (*__xg(ptr)), "0" (x) | |
74 | : "memory"); | |
75 | break; | |
76 | case 8: | |
77 | asm volatile("xchgq %0,%1" | |
f4a486ac MD |
78 | : "=r" (x) |
79 | : "m" (*__xg(ptr)), "0" (x) | |
80 | : "memory"); | |
81 | break; | |
82 | } | |
83 | return x; | |
84 | } | |
85 | ||
27b012e2 MD |
86 | /* Nop everywhere except on alpha. */ |
87 | #define smp_read_barrier_depends() | |
88 | ||
41718ff9 MD |
89 | /* |
90 | * Prevent the compiler from merging or refetching accesses. The compiler | |
91 | * is also forbidden from reordering successive instances of ACCESS_ONCE(), | |
92 | * but only when the compiler is aware of some particular ordering. One way | |
93 | * to make the compiler aware of ordering is to put the two invocations of | |
94 | * ACCESS_ONCE() in different C statements. | |
95 | * | |
96 | * This macro does absolutely -nothing- to prevent the CPU from reordering, | |
97 | * merging, or refetching absolutely anything at any time. Its main intended | |
98 | * use is to mediate communication between process-level code and irq/NMI | |
99 | * handlers, all running on the same CPU. | |
100 | */ | |
101 | #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) | |
102 | ||
103 | /** | |
104 | * rcu_dereference - fetch an RCU-protected pointer in an | |
105 | * RCU read-side critical section. This pointer may later | |
106 | * be safely dereferenced. | |
107 | * | |
108 | * Inserts memory barriers on architectures that require them | |
109 | * (currently only the Alpha), and, more importantly, documents | |
110 | * exactly which pointers are protected by RCU. | |
111 | */ | |
112 | ||
113 | #define rcu_dereference(p) ({ \ | |
114 | typeof(p) _________p1 = ACCESS_ONCE(p); \ | |
115 | smp_read_barrier_depends(); \ | |
116 | (_________p1); \ | |
117 | }) | |
118 | ||
27b012e2 MD |
119 | #define SIGURCU SIGUSR1 |
120 | ||
cf380c2f MD |
121 | #ifdef DEBUG_YIELD |
122 | #include <sched.h> | |
9b171f46 MD |
123 | #include <time.h> |
124 | #include <pthread.h> | |
cf380c2f MD |
125 | |
126 | #define YIELD_READ (1 << 0) | |
127 | #define YIELD_WRITE (1 << 1) | |
128 | ||
9d335088 MD |
129 | extern unsigned int yield_active; |
130 | extern unsigned int __thread rand_yield; | |
cf380c2f MD |
131 | |
132 | static inline void debug_yield_read(void) | |
133 | { | |
134 | if (yield_active & YIELD_READ) | |
9d335088 MD |
135 | if (rand_r(&rand_yield) & 0x1) |
136 | sched_yield(); | |
cf380c2f MD |
137 | } |
138 | ||
139 | static inline void debug_yield_write(void) | |
140 | { | |
141 | if (yield_active & YIELD_WRITE) | |
9d335088 MD |
142 | if (rand_r(&rand_yield) & 0x1) |
143 | sched_yield(); | |
144 | } | |
145 | ||
146 | static inline void debug_yield_init(void) | |
147 | { | |
148 | rand_yield = time(NULL) ^ pthread_self(); | |
cf380c2f MD |
149 | } |
150 | #else | |
151 | static inline void debug_yield_read(void) | |
152 | { | |
153 | } | |
154 | ||
155 | static inline void debug_yield_write(void) | |
156 | { | |
9d335088 MD |
157 | } |
158 | ||
159 | static inline void debug_yield_init(void) | |
160 | { | |
161 | ||
cf380c2f MD |
162 | } |
163 | #endif | |
164 | ||
1430ee0b MD |
165 | /* |
166 | * Limiting the nesting level to 256 to keep instructions small in the read | |
167 | * fast-path. | |
168 | */ | |
169 | #define RCU_GP_COUNT (1U << 0) | |
170 | #define RCU_GP_CTR_BIT (1U << 8) | |
171 | #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1) | |
172 | ||
5b1da0c8 MD |
173 | /* |
174 | * Global quiescent period counter with low-order bits unused. | |
175 | * Using a int rather than a char to eliminate false register dependencies | |
176 | * causing stalls on some architectures. | |
177 | */ | |
1430ee0b | 178 | extern int urcu_gp_ctr; |
27b012e2 | 179 | |
1430ee0b | 180 | extern int __thread urcu_active_readers; |
27b012e2 | 181 | |
1430ee0b | 182 | static inline int rcu_old_gp_ongoing(int *value) |
27b012e2 | 183 | { |
1430ee0b MD |
184 | int v; |
185 | ||
186 | if (value == NULL) | |
187 | return 0; | |
188 | debug_yield_write(); | |
189 | v = ACCESS_ONCE(*value); | |
190 | debug_yield_write(); | |
191 | return (v & RCU_GP_CTR_NEST_MASK) && | |
192 | ((v ^ ACCESS_ONCE(urcu_gp_ctr)) & RCU_GP_CTR_BIT); | |
27b012e2 MD |
193 | } |
194 | ||
1430ee0b | 195 | static inline void rcu_read_lock(void) |
27b012e2 | 196 | { |
1430ee0b MD |
197 | int tmp; |
198 | ||
cf380c2f | 199 | debug_yield_read(); |
1430ee0b | 200 | tmp = urcu_active_readers; |
cf380c2f | 201 | debug_yield_read(); |
5b1da0c8 | 202 | if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) |
1430ee0b MD |
203 | urcu_active_readers = urcu_gp_ctr + RCU_GP_COUNT; |
204 | else | |
205 | urcu_active_readers = tmp + RCU_GP_COUNT; | |
cf380c2f | 206 | debug_yield_read(); |
27b012e2 MD |
207 | /* |
208 | * Increment active readers count before accessing the pointer. | |
209 | * See force_mb_all_threads(). | |
210 | */ | |
211 | barrier(); | |
cf380c2f | 212 | debug_yield_read(); |
27b012e2 MD |
213 | } |
214 | ||
1430ee0b | 215 | static inline void rcu_read_unlock(void) |
27b012e2 | 216 | { |
cf380c2f | 217 | debug_yield_read(); |
27b012e2 | 218 | barrier(); |
cf380c2f | 219 | debug_yield_read(); |
27b012e2 MD |
220 | /* |
221 | * Finish using rcu before decrementing the pointer. | |
222 | * See force_mb_all_threads(). | |
223 | */ | |
1430ee0b | 224 | urcu_active_readers -= RCU_GP_COUNT; |
cf380c2f | 225 | debug_yield_read(); |
27b012e2 MD |
226 | } |
227 | ||
e462817e MD |
228 | /** |
229 | * rcu_assign_pointer - assign (publicize) a pointer to a newly | |
230 | * initialized structure that will be dereferenced by RCU read-side | |
231 | * critical sections. Returns the value assigned. | |
232 | * | |
233 | * Inserts memory barriers on architectures that require them | |
234 | * (pretty much all of them other than x86), and also prevents | |
235 | * the compiler from reordering the code that initializes the | |
236 | * structure after the pointer assignment. More importantly, this | |
237 | * call documents which pointers will be dereferenced by RCU read-side | |
238 | * code. | |
239 | */ | |
240 | ||
241 | #define rcu_assign_pointer(p, v) \ | |
242 | ({ \ | |
243 | if (!__builtin_constant_p(v) || \ | |
244 | ((v) != NULL)) \ | |
245 | wmb(); \ | |
246 | (p) = (v); \ | |
247 | }) | |
248 | ||
f4a486ac MD |
249 | #define rcu_xchg_pointer(p, v) \ |
250 | ({ \ | |
251 | if (!__builtin_constant_p(v) || \ | |
252 | ((v) != NULL)) \ | |
253 | wmb(); \ | |
254 | xchg(p, v); \ | |
255 | }) | |
256 | ||
e462817e | 257 | extern void synchronize_rcu(void); |
27b012e2 | 258 | |
f4a486ac MD |
259 | /* |
260 | * Exchanges the pointer and waits for quiescent state. | |
261 | * The pointer returned can be freed. | |
262 | */ | |
263 | #define urcu_publish_content(p, v) \ | |
264 | ({ \ | |
265 | void *oldptr; \ | |
266 | debug_yield_write(); \ | |
267 | oldptr = rcu_xchg_pointer(p, v); \ | |
268 | synchronize_rcu(); \ | |
269 | oldptr; \ | |
270 | }) | |
271 | ||
27b012e2 MD |
272 | /* |
273 | * Reader thread registration. | |
274 | */ | |
275 | extern void urcu_register_thread(void); | |
5e7e64b9 | 276 | extern void urcu_unregister_thread(void); |
27b012e2 MD |
277 | |
278 | #endif /* _URCU_H */ |