Commit | Line | Data |
---|---|---|
adcfce54 MD |
1 | #ifndef _URCU_STATIC_H |
2 | #define _URCU_STATIC_H | |
3 | ||
4 | /* | |
5 | * urcu-static.h | |
6 | * | |
d2d23035 | 7 | * Userspace RCU header. |
adcfce54 | 8 | * |
d2d23035 MD |
9 | * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu.h for linking |
10 | * dynamically with the userspace rcu library. | |
adcfce54 | 11 | * |
d2d23035 MD |
12 | * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> |
13 | * Copyright (c) 2009 Paul E. McKenney, IBM Corporation. | |
adcfce54 | 14 | * |
d2d23035 MD |
15 | * This library is free software; you can redistribute it and/or |
16 | * modify it under the terms of the GNU Lesser General Public | |
17 | * License as published by the Free Software Foundation; either | |
18 | * version 2.1 of the License, or (at your option) any later version. | |
19 | * | |
20 | * This library is distributed in the hope that it will be useful, | |
21 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
22 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
23 | * Lesser General Public License for more details. | |
24 | * | |
25 | * You should have received a copy of the GNU Lesser General Public | |
26 | * License along with this library; if not, write to the Free Software | |
27 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
adcfce54 MD |
28 | * |
29 | * IBM's contributions to this file may be relicensed under LGPLv2 or later. | |
30 | */ | |
31 | ||
32 | #include <stdlib.h> | |
33 | #include <pthread.h> | |
ae62b5e8 | 34 | #include <sched.h> |
adcfce54 MD |
35 | |
36 | #include <compiler.h> | |
37 | #include <arch.h> | |
38 | ||
39 | /* | |
40 | * Identify a shared load. A smp_rmc() or smp_mc() should come before the load. | |
41 | */ | |
42 | #define _LOAD_SHARED(p) ACCESS_ONCE(p) | |
43 | ||
44 | /* | |
45 | * Load a data from shared memory, doing a cache flush if required. | |
46 | */ | |
47 | #define LOAD_SHARED(p) \ | |
48 | ({ \ | |
49 | smp_rmc(); \ | |
50 | _LOAD_SHARED(p); \ | |
51 | }) | |
52 | ||
53 | /* | |
54 | * Identify a shared store. A smp_wmc() or smp_mc() should follow the store. | |
55 | */ | |
56 | #define _STORE_SHARED(x, v) ({ ACCESS_ONCE(x) = (v); }) | |
57 | ||
58 | /* | |
59 | * Store v into x, where x is located in shared memory. Performs the required | |
60 | * cache flush after writing. Returns v. | |
61 | */ | |
62 | #define STORE_SHARED(x, v) \ | |
63 | ({ \ | |
64 | _STORE_SHARED(x, v); \ | |
65 | smp_wmc(); \ | |
66 | (v); \ | |
67 | }) | |
68 | ||
69 | /** | |
70 | * _rcu_dereference - reads (copy) a RCU-protected pointer to a local variable | |
71 | * into a RCU read-side critical section. The pointer can later be safely | |
72 | * dereferenced within the critical section. | |
73 | * | |
74 | * This ensures that the pointer copy is invariant thorough the whole critical | |
75 | * section. | |
76 | * | |
77 | * Inserts memory barriers on architectures that require them (currently only | |
78 | * Alpha) and documents which pointers are protected by RCU. | |
79 | * | |
809f4fde MD |
80 | * The compiler memory barrier in LOAD_SHARED() ensures that value-speculative |
81 | * optimizations (e.g. VSS: Value Speculation Scheduling) does not perform the | |
82 | * data read before the pointer read by speculating the value of the pointer. | |
83 | * Correct ordering is ensured because the pointer is read as a volatile access. | |
84 | * This acts as a global side-effect operation, which forbids reordering of | |
015c702f MD |
85 | * dependent memory operations. Note that such concern about dependency-breaking |
86 | * optimizations will eventually be taken care of by the "memory_order_consume" | |
87 | * addition to forthcoming C++ standard. | |
809f4fde | 88 | * |
adcfce54 MD |
89 | * Should match rcu_assign_pointer() or rcu_xchg_pointer(). |
90 | */ | |
91 | ||
92 | #define _rcu_dereference(p) ({ \ | |
93 | typeof(p) _________p1 = LOAD_SHARED(p); \ | |
94 | smp_read_barrier_depends(); \ | |
95 | (_________p1); \ | |
96 | }) | |
97 | ||
98 | /* | |
99 | * This code section can only be included in LGPL 2.1 compatible source code. | |
100 | * See below for the function call wrappers which can be used in code meant to | |
101 | * be only linked with the Userspace RCU library. This comes with a small | |
102 | * performance degradation on the read-side due to the added function calls. | |
103 | * This is required to permit relinking with newer versions of the library. | |
104 | */ | |
105 | ||
106 | /* | |
107 | * The signal number used by the RCU library can be overridden with | |
108 | * -DSIGURCU= when compiling the library. | |
109 | */ | |
110 | #ifndef SIGURCU | |
111 | #define SIGURCU SIGUSR1 | |
112 | #endif | |
113 | ||
114 | /* | |
115 | * If a reader is really non-cooperative and refuses to commit its | |
116 | * urcu_active_readers count to memory (there is no barrier in the reader | |
117 | * per-se), kick it after a few loops waiting for it. | |
118 | */ | |
119 | #define KICK_READER_LOOPS 10000 | |
120 | ||
bc6c15bb | 121 | /* |
ae62b5e8 | 122 | * Active attempts to check for reader Q.S. before calling sched_yield(). |
bc6c15bb MD |
123 | */ |
124 | #define RCU_QS_ACTIVE_ATTEMPTS 100 | |
125 | ||
7ac06cef MD |
126 | #ifdef DEBUG_RCU |
127 | #define rcu_assert(args...) assert(args) | |
128 | #else | |
129 | #define rcu_assert(args...) | |
130 | #endif | |
131 | ||
adcfce54 MD |
132 | #ifdef DEBUG_YIELD |
133 | #include <sched.h> | |
134 | #include <time.h> | |
135 | #include <pthread.h> | |
136 | #include <unistd.h> | |
137 | ||
138 | #define YIELD_READ (1 << 0) | |
139 | #define YIELD_WRITE (1 << 1) | |
140 | ||
b4ce1526 | 141 | /* |
0a1d290b | 142 | * Updates without URCU_MB are much slower. Account this in |
b4ce1526 MD |
143 | * the delay. |
144 | */ | |
0a1d290b | 145 | #ifdef URCU_MB |
adcfce54 MD |
146 | /* maximum sleep delay, in us */ |
147 | #define MAX_SLEEP 50 | |
148 | #else | |
149 | #define MAX_SLEEP 30000 | |
150 | #endif | |
151 | ||
152 | extern unsigned int yield_active; | |
153 | extern unsigned int __thread rand_yield; | |
154 | ||
155 | static inline void debug_yield_read(void) | |
156 | { | |
157 | if (yield_active & YIELD_READ) | |
158 | if (rand_r(&rand_yield) & 0x1) | |
159 | usleep(rand_r(&rand_yield) % MAX_SLEEP); | |
160 | } | |
161 | ||
162 | static inline void debug_yield_write(void) | |
163 | { | |
164 | if (yield_active & YIELD_WRITE) | |
165 | if (rand_r(&rand_yield) & 0x1) | |
166 | usleep(rand_r(&rand_yield) % MAX_SLEEP); | |
167 | } | |
168 | ||
169 | static inline void debug_yield_init(void) | |
170 | { | |
171 | rand_yield = time(NULL) ^ pthread_self(); | |
172 | } | |
173 | #else | |
174 | static inline void debug_yield_read(void) | |
175 | { | |
176 | } | |
177 | ||
178 | static inline void debug_yield_write(void) | |
179 | { | |
180 | } | |
181 | ||
182 | static inline void debug_yield_init(void) | |
183 | { | |
184 | ||
185 | } | |
186 | #endif | |
187 | ||
0a1d290b | 188 | #ifdef URCU_MB |
adcfce54 MD |
189 | static inline void reader_barrier() |
190 | { | |
191 | smp_mb(); | |
192 | } | |
193 | #else | |
194 | static inline void reader_barrier() | |
195 | { | |
196 | barrier(); | |
197 | } | |
198 | #endif | |
199 | ||
200 | /* | |
201 | * The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a | |
202 | * full 8-bits, 16-bits or 32-bits bitmask for the lower order bits. | |
203 | */ | |
204 | #define RCU_GP_COUNT (1UL << 0) | |
205 | /* Use the amount of bits equal to half of the architecture long size */ | |
206 | #define RCU_GP_CTR_BIT (1UL << (sizeof(long) << 2)) | |
207 | #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1) | |
ae62b5e8 | 208 | #define RCU_GP_ONGOING (RCU_GP_CTR_BIT << 1) |
adcfce54 MD |
209 | |
210 | /* | |
211 | * Global quiescent period counter with low-order bits unused. | |
212 | * Using a int rather than a char to eliminate false register dependencies | |
213 | * causing stalls on some architectures. | |
214 | */ | |
215 | extern long urcu_gp_ctr; | |
216 | ||
8b25e300 MD |
217 | struct urcu_reader_status { |
218 | long active_readers; | |
219 | long gp_waiting; | |
220 | }; | |
221 | ||
222 | extern struct urcu_reader_status __thread urcu_reader_status; | |
adcfce54 MD |
223 | |
224 | static inline int rcu_old_gp_ongoing(long *value) | |
225 | { | |
226 | long v; | |
227 | ||
228 | if (value == NULL) | |
229 | return 0; | |
230 | /* | |
231 | * Make sure both tests below are done on the same version of *value | |
232 | * to insure consistency. | |
233 | */ | |
234 | v = LOAD_SHARED(*value); | |
235 | return (v & RCU_GP_CTR_NEST_MASK) && | |
236 | ((v ^ urcu_gp_ctr) & RCU_GP_CTR_BIT); | |
237 | } | |
238 | ||
239 | static inline void _rcu_read_lock(void) | |
240 | { | |
ae62b5e8 | 241 | long tmp, gp_ctr; |
adcfce54 | 242 | |
8b25e300 | 243 | tmp = urcu_reader_status.active_readers; |
adcfce54 | 244 | /* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */ |
67ef1a2c | 245 | if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) { |
8b25e300 MD |
246 | /* |
247 | * volatile accesses can be reordered and optimized when within | |
248 | * the same statement. | |
249 | */ | |
250 | if (unlikely((gp_ctr = _LOAD_SHARED(urcu_gp_ctr)) | |
251 | & RCU_GP_ONGOING) && | |
252 | unlikely(LOAD_SHARED(urcu_reader_status.gp_waiting))) { | |
ae62b5e8 MD |
253 | sched_yield(); |
254 | gp_ctr = _LOAD_SHARED(urcu_gp_ctr); | |
255 | } | |
8b25e300 | 256 | _STORE_SHARED(urcu_reader_status.active_readers, gp_ctr); |
67ef1a2c MD |
257 | /* |
258 | * Set active readers count for outermost nesting level before | |
259 | * accessing the pointer. See force_mb_all_threads(). | |
260 | */ | |
261 | reader_barrier(); | |
262 | } else { | |
8b25e300 MD |
263 | _STORE_SHARED(urcu_reader_status.active_readers, |
264 | tmp + RCU_GP_COUNT); | |
67ef1a2c | 265 | } |
adcfce54 MD |
266 | } |
267 | ||
268 | static inline void _rcu_read_unlock(void) | |
269 | { | |
adcfce54 MD |
270 | /* |
271 | * Finish using rcu before decrementing the pointer. | |
272 | * See force_mb_all_threads(). | |
ae62b5e8 MD |
273 | * Formally only needed for outermost nesting level, but leave barrier |
274 | * in place for nested unlocks to remove a branch from the common case | |
275 | * (no nesting). | |
adcfce54 | 276 | */ |
ae62b5e8 | 277 | reader_barrier(); |
8b25e300 MD |
278 | _STORE_SHARED(urcu_reader_status.active_readers, |
279 | urcu_reader_status.active_readers - RCU_GP_COUNT); | |
adcfce54 MD |
280 | } |
281 | ||
282 | /** | |
283 | * _rcu_assign_pointer - assign (publicize) a pointer to a new data structure | |
284 | * meant to be read by RCU read-side critical sections. Returns the assigned | |
285 | * value. | |
286 | * | |
287 | * Documents which pointers will be dereferenced by RCU read-side critical | |
288 | * sections and adds the required memory barriers on architectures requiring | |
289 | * them. It also makes sure the compiler does not reorder code initializing the | |
290 | * data structure before its publication. | |
291 | * | |
292 | * Should match rcu_dereference_pointer(). | |
293 | */ | |
294 | ||
295 | #define _rcu_assign_pointer(p, v) \ | |
296 | ({ \ | |
297 | if (!__builtin_constant_p(v) || \ | |
298 | ((v) != NULL)) \ | |
299 | wmb(); \ | |
300 | STORE_SHARED(p, v); \ | |
301 | }) | |
302 | ||
4d1ce26f MD |
303 | /** |
304 | * _rcu_cmpxchg_pointer - same as rcu_assign_pointer, but tests if the pointer | |
305 | * is as expected by "old". If succeeds, returns the previous pointer to the | |
306 | * data structure, which can be safely freed after waiting for a quiescent state | |
307 | * using synchronize_rcu(). If fails (unexpected value), returns old (which | |
308 | * should not be freed !). | |
309 | */ | |
310 | ||
311 | #define _rcu_cmpxchg_pointer(p, old, _new) \ | |
312 | ({ \ | |
313 | if (!__builtin_constant_p(_new) || \ | |
314 | ((_new) != NULL)) \ | |
315 | wmb(); \ | |
316 | cmpxchg(p, old, _new); \ | |
317 | }) | |
318 | ||
adcfce54 MD |
319 | /** |
320 | * _rcu_xchg_pointer - same as rcu_assign_pointer, but returns the previous | |
67ef1a2c | 321 | * pointer to the data structure, which can be safely freed after waiting for a |
adcfce54 MD |
322 | * quiescent state using synchronize_rcu(). |
323 | */ | |
324 | ||
325 | #define _rcu_xchg_pointer(p, v) \ | |
326 | ({ \ | |
327 | if (!__builtin_constant_p(v) || \ | |
328 | ((v) != NULL)) \ | |
329 | wmb(); \ | |
330 | xchg(p, v); \ | |
331 | }) | |
332 | ||
333 | /* | |
334 | * Exchanges the pointer and waits for quiescent state. | |
335 | * The pointer returned can be freed. | |
336 | */ | |
337 | #define _rcu_publish_content(p, v) \ | |
338 | ({ \ | |
339 | void *oldptr; \ | |
340 | oldptr = _rcu_xchg_pointer(p, v); \ | |
341 | synchronize_rcu(); \ | |
342 | oldptr; \ | |
343 | }) | |
344 | ||
345 | #endif /* _URCU_STATIC_H */ |