Commit | Line | Data |
---|---|---|
fdee2e6d MD |
1 | /* |
2 | * urcu-bp.c | |
3 | * | |
4 | * Userspace RCU library, "bulletproof" version. | |
5 | * | |
6982d6d7 | 6 | * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
fdee2e6d MD |
7 | * Copyright (c) 2009 Paul E. McKenney, IBM Corporation. |
8 | * | |
9 | * This library is free software; you can redistribute it and/or | |
10 | * modify it under the terms of the GNU Lesser General Public | |
11 | * License as published by the Free Software Foundation; either | |
12 | * version 2.1 of the License, or (at your option) any later version. | |
13 | * | |
14 | * This library is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * Lesser General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU Lesser General Public | |
20 | * License along with this library; if not, write to the Free Software | |
21 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
22 | * | |
23 | * IBM's contributions to this file may be relicensed under LGPLv2 or later. | |
24 | */ | |
25 | ||
0617bf4c | 26 | #define _GNU_SOURCE |
71c811bf | 27 | #define _LGPL_SOURCE |
fdee2e6d MD |
28 | #include <stdio.h> |
29 | #include <pthread.h> | |
30 | #include <signal.h> | |
31 | #include <assert.h> | |
32 | #include <stdlib.h> | |
33 | #include <string.h> | |
34 | #include <errno.h> | |
35 | #include <poll.h> | |
36 | #include <unistd.h> | |
37 | #include <sys/mman.h> | |
38 | ||
999991c6 | 39 | #include "urcu/arch.h" |
d73fb81f | 40 | #include "urcu/wfcqueue.h" |
57760d44 | 41 | #include "urcu/map/urcu-bp.h" |
af7c2dbe | 42 | #include "urcu/static/urcu-bp.h" |
618b2595 | 43 | #include "urcu-pointer.h" |
bd252a04 | 44 | #include "urcu/tls-compat.h" |
71c811bf | 45 | |
4a6d7378 MD |
46 | #include "urcu-die.h" |
47 | ||
fdee2e6d | 48 | /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */ |
71c811bf | 49 | #undef _LGPL_SOURCE |
fdee2e6d | 50 | #include "urcu-bp.h" |
71c811bf | 51 | #define _LGPL_SOURCE |
fdee2e6d | 52 | |
4c1ae2ea MD |
53 | #ifndef MAP_ANONYMOUS |
54 | #define MAP_ANONYMOUS MAP_ANON | |
55 | #endif | |
56 | ||
c7eaf61c MD |
57 | #ifdef __linux__ |
58 | static | |
59 | void *mremap_wrapper(void *old_address, size_t old_size, | |
60 | size_t new_size, int flags) | |
61 | { | |
62 | return mremap(old_address, old_size, new_size, flags); | |
63 | } | |
64 | #else | |
45a4872f MD |
65 | |
66 | #define MREMAP_MAYMOVE 1 | |
67 | #define MREMAP_FIXED 2 | |
68 | ||
69 | /* | |
95b94246 | 70 | * mremap wrapper for non-Linux systems not allowing MAYMOVE. |
45a4872f MD |
71 | * This is not generic. |
72 | */ | |
c7eaf61c MD |
73 | static |
74 | void *mremap_wrapper(void *old_address, size_t old_size, | |
75 | size_t new_size, int flags) | |
45a4872f | 76 | { |
95b94246 MD |
77 | assert(!(flags & MREMAP_MAYMOVE)); |
78 | ||
79 | return MAP_FAILED; | |
45a4872f MD |
80 | } |
81 | #endif | |
82 | ||
9340c38d MD |
83 | /* Sleep delay in ms */ |
84 | #define RCU_SLEEP_DELAY_MS 10 | |
95b94246 MD |
85 | #define INIT_NR_THREADS 8 |
86 | #define ARENA_INIT_ALLOC \ | |
87 | sizeof(struct registry_chunk) \ | |
88 | + INIT_NR_THREADS * sizeof(struct rcu_reader) | |
fdee2e6d | 89 | |
b7b6a8f5 PB |
90 | /* |
91 | * Active attempts to check for reader Q.S. before calling sleep(). | |
92 | */ | |
93 | #define RCU_QS_ACTIVE_ATTEMPTS 100 | |
94 | ||
76d6a951 MD |
95 | static |
96 | int rcu_bp_refcount; | |
97 | ||
999991c6 MD |
98 | /* If the headers do not support membarrier system call, fall back smp_mb. */ |
99 | #ifdef __NR_membarrier | |
100 | # define membarrier(...) syscall(__NR_membarrier, __VA_ARGS__) | |
f541831e MD |
101 | #else |
102 | # define membarrier(...) -ENOSYS | |
103 | #endif | |
104 | ||
105 | enum membarrier_cmd { | |
106 | MEMBARRIER_CMD_QUERY = 0, | |
107 | MEMBARRIER_CMD_SHARED = (1 << 0), | |
108 | }; | |
109 | ||
c1be8fb9 MD |
110 | static |
111 | void __attribute__((constructor)) rcu_bp_init(void); | |
112 | static | |
02be5561 | 113 | void __attribute__((destructor)) rcu_bp_exit(void); |
fdee2e6d | 114 | |
f541831e MD |
115 | int urcu_bp_has_sys_membarrier; |
116 | ||
731ccb96 MD |
117 | /* |
118 | * rcu_gp_lock ensures mutual exclusion between threads calling | |
119 | * synchronize_rcu(). | |
120 | */ | |
6abb4bd5 | 121 | static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER; |
731ccb96 MD |
122 | /* |
123 | * rcu_registry_lock ensures mutual exclusion between threads | |
124 | * registering and unregistering themselves to/from the registry, and | |
125 | * with threads reading that registry from synchronize_rcu(). However, | |
126 | * this lock is not held all the way through the completion of awaiting | |
127 | * for the grace period. It is sporadically released between iterations | |
128 | * on the registry. | |
129 | * rcu_registry_lock may nest inside rcu_gp_lock. | |
130 | */ | |
131 | static pthread_mutex_t rcu_registry_lock = PTHREAD_MUTEX_INITIALIZER; | |
fdee2e6d | 132 | |
c1be8fb9 MD |
133 | static pthread_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER; |
134 | static int initialized; | |
135 | ||
136 | static pthread_key_t urcu_bp_key; | |
137 | ||
c13c2e55 | 138 | struct rcu_gp rcu_gp = { .ctr = RCU_GP_COUNT }; |
fdee2e6d MD |
139 | |
140 | /* | |
141 | * Pointer to registry elements. Written to only by each individual reader. Read | |
142 | * by both the reader and the writers. | |
143 | */ | |
bd252a04 | 144 | DEFINE_URCU_TLS(struct rcu_reader *, rcu_reader); |
fdee2e6d | 145 | |
16aa9ee8 | 146 | static CDS_LIST_HEAD(registry); |
fdee2e6d | 147 | |
95b94246 MD |
148 | struct registry_chunk { |
149 | size_t data_len; /* data length */ | |
c1be8fb9 | 150 | size_t used; /* amount of data used */ |
95b94246 MD |
151 | struct cds_list_head node; /* chunk_list node */ |
152 | char data[]; | |
153 | }; | |
154 | ||
fdee2e6d | 155 | struct registry_arena { |
95b94246 | 156 | struct cds_list_head chunk_list; |
fdee2e6d MD |
157 | }; |
158 | ||
95b94246 MD |
159 | static struct registry_arena registry_arena = { |
160 | .chunk_list = CDS_LIST_HEAD_INIT(registry_arena.chunk_list), | |
161 | }; | |
fdee2e6d | 162 | |
4cf1675f MD |
163 | /* Saved fork signal mask, protected by rcu_gp_lock */ |
164 | static sigset_t saved_fork_signal_mask; | |
165 | ||
6abb4bd5 | 166 | static void mutex_lock(pthread_mutex_t *mutex) |
fdee2e6d MD |
167 | { |
168 | int ret; | |
169 | ||
170 | #ifndef DISTRUST_SIGNALS_EXTREME | |
6abb4bd5 | 171 | ret = pthread_mutex_lock(mutex); |
4a6d7378 MD |
172 | if (ret) |
173 | urcu_die(ret); | |
fdee2e6d | 174 | #else /* #ifndef DISTRUST_SIGNALS_EXTREME */ |
6abb4bd5 | 175 | while ((ret = pthread_mutex_trylock(mutex)) != 0) { |
4a6d7378 MD |
176 | if (ret != EBUSY && ret != EINTR) |
177 | urcu_die(ret); | |
fdee2e6d MD |
178 | poll(NULL,0,10); |
179 | } | |
180 | #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */ | |
181 | } | |
182 | ||
6abb4bd5 | 183 | static void mutex_unlock(pthread_mutex_t *mutex) |
fdee2e6d MD |
184 | { |
185 | int ret; | |
186 | ||
6abb4bd5 | 187 | ret = pthread_mutex_unlock(mutex); |
4a6d7378 MD |
188 | if (ret) |
189 | urcu_die(ret); | |
fdee2e6d MD |
190 | } |
191 | ||
f541831e MD |
192 | static void smp_mb_master(void) |
193 | { | |
194 | if (caa_likely(urcu_bp_has_sys_membarrier)) | |
195 | (void) membarrier(MEMBARRIER_CMD_SHARED, 0); | |
196 | else | |
197 | cmm_smp_mb(); | |
198 | } | |
199 | ||
731ccb96 MD |
200 | /* |
201 | * Always called with rcu_registry lock held. Releases this lock between | |
202 | * iterations and grabs it again. Holds the lock when it returns. | |
203 | */ | |
52c75091 MD |
204 | static void wait_for_readers(struct cds_list_head *input_readers, |
205 | struct cds_list_head *cur_snap_readers, | |
206 | struct cds_list_head *qsreaders) | |
fdee2e6d | 207 | { |
9340c38d | 208 | unsigned int wait_loops = 0; |
02be5561 | 209 | struct rcu_reader *index, *tmp; |
fdee2e6d | 210 | |
fdee2e6d | 211 | /* |
dd61d077 MD |
212 | * Wait for each thread URCU_TLS(rcu_reader).ctr to either |
213 | * indicate quiescence (not nested), or observe the current | |
c13c2e55 | 214 | * rcu_gp.ctr value. |
fdee2e6d MD |
215 | */ |
216 | for (;;) { | |
9340c38d MD |
217 | if (wait_loops < RCU_QS_ACTIVE_ATTEMPTS) |
218 | wait_loops++; | |
219 | ||
52c75091 MD |
220 | cds_list_for_each_entry_safe(index, tmp, input_readers, node) { |
221 | switch (rcu_reader_state(&index->ctr)) { | |
222 | case RCU_READER_ACTIVE_CURRENT: | |
223 | if (cur_snap_readers) { | |
224 | cds_list_move(&index->node, | |
225 | cur_snap_readers); | |
226 | break; | |
227 | } | |
228 | /* Fall-through */ | |
229 | case RCU_READER_INACTIVE: | |
230 | cds_list_move(&index->node, qsreaders); | |
231 | break; | |
232 | case RCU_READER_ACTIVE_OLD: | |
233 | /* | |
234 | * Old snapshot. Leaving node in | |
235 | * input_readers will make us busy-loop | |
236 | * until the snapshot becomes current or | |
237 | * the reader becomes inactive. | |
238 | */ | |
239 | break; | |
240 | } | |
fdee2e6d MD |
241 | } |
242 | ||
52c75091 | 243 | if (cds_list_empty(input_readers)) { |
fdee2e6d MD |
244 | break; |
245 | } else { | |
731ccb96 MD |
246 | /* Temporarily unlock the registry lock. */ |
247 | mutex_unlock(&rcu_registry_lock); | |
9340c38d MD |
248 | if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) |
249 | (void) poll(NULL, 0, RCU_SLEEP_DELAY_MS); | |
fdee2e6d | 250 | else |
06f22bdb | 251 | caa_cpu_relax(); |
731ccb96 MD |
252 | /* Re-lock the registry lock before the next loop. */ |
253 | mutex_lock(&rcu_registry_lock); | |
fdee2e6d MD |
254 | } |
255 | } | |
fdee2e6d MD |
256 | } |
257 | ||
258 | void synchronize_rcu(void) | |
259 | { | |
52c75091 MD |
260 | CDS_LIST_HEAD(cur_snap_readers); |
261 | CDS_LIST_HEAD(qsreaders); | |
fdee2e6d MD |
262 | sigset_t newmask, oldmask; |
263 | int ret; | |
264 | ||
6ed4b2e6 | 265 | ret = sigfillset(&newmask); |
fdee2e6d | 266 | assert(!ret); |
6ed4b2e6 | 267 | ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask); |
fdee2e6d MD |
268 | assert(!ret); |
269 | ||
6abb4bd5 | 270 | mutex_lock(&rcu_gp_lock); |
fdee2e6d | 271 | |
731ccb96 MD |
272 | mutex_lock(&rcu_registry_lock); |
273 | ||
16aa9ee8 | 274 | if (cds_list_empty(®istry)) |
2dfb8b5e | 275 | goto out; |
fdee2e6d MD |
276 | |
277 | /* All threads should read qparity before accessing data structure | |
2dfb8b5e | 278 | * where new ptr points to. */ |
fdee2e6d | 279 | /* Write new ptr before changing the qparity */ |
f541831e | 280 | smp_mb_master(); |
fdee2e6d | 281 | |
fdee2e6d | 282 | /* |
dd61d077 | 283 | * Wait for readers to observe original parity or be quiescent. |
731ccb96 MD |
284 | * wait_for_readers() can release and grab again rcu_registry_lock |
285 | * interally. | |
dd61d077 | 286 | */ |
52c75091 | 287 | wait_for_readers(®istry, &cur_snap_readers, &qsreaders); |
dd61d077 MD |
288 | |
289 | /* | |
290 | * Adding a cmm_smp_mb() which is _not_ formally required, but makes the | |
291 | * model easier to understand. It does not have a big performance impact | |
292 | * anyway, given this is the write-side. | |
293 | */ | |
294 | cmm_smp_mb(); | |
295 | ||
296 | /* Switch parity: 0 -> 1, 1 -> 0 */ | |
c13c2e55 | 297 | CMM_STORE_SHARED(rcu_gp.ctr, rcu_gp.ctr ^ RCU_GP_CTR_PHASE); |
dd61d077 MD |
298 | |
299 | /* | |
300 | * Must commit qparity update to memory before waiting for other parity | |
301 | * quiescent state. Failure to do so could result in the writer waiting | |
302 | * forever while new readers are always accessing data (no progress). | |
303 | * Ensured by CMM_STORE_SHARED and CMM_LOAD_SHARED. | |
fdee2e6d | 304 | */ |
fdee2e6d MD |
305 | |
306 | /* | |
5481ddb3 | 307 | * Adding a cmm_smp_mb() which is _not_ formally required, but makes the |
fdee2e6d MD |
308 | * model easier to understand. It does not have a big performance impact |
309 | * anyway, given this is the write-side. | |
310 | */ | |
5481ddb3 | 311 | cmm_smp_mb(); |
fdee2e6d | 312 | |
fdee2e6d | 313 | /* |
dd61d077 | 314 | * Wait for readers to observe new parity or be quiescent. |
731ccb96 MD |
315 | * wait_for_readers() can release and grab again rcu_registry_lock |
316 | * interally. | |
fdee2e6d | 317 | */ |
52c75091 MD |
318 | wait_for_readers(&cur_snap_readers, NULL, &qsreaders); |
319 | ||
320 | /* | |
321 | * Put quiescent reader list back into registry. | |
322 | */ | |
323 | cds_list_splice(&qsreaders, ®istry); | |
fdee2e6d MD |
324 | |
325 | /* | |
2dfb8b5e MD |
326 | * Finish waiting for reader threads before letting the old ptr being |
327 | * freed. | |
fdee2e6d | 328 | */ |
f541831e | 329 | smp_mb_master(); |
2dfb8b5e | 330 | out: |
731ccb96 | 331 | mutex_unlock(&rcu_registry_lock); |
6abb4bd5 | 332 | mutex_unlock(&rcu_gp_lock); |
fdee2e6d MD |
333 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); |
334 | assert(!ret); | |
335 | } | |
336 | ||
337 | /* | |
338 | * library wrappers to be used by non-LGPL compatible source code. | |
339 | */ | |
340 | ||
341 | void rcu_read_lock(void) | |
342 | { | |
343 | _rcu_read_lock(); | |
344 | } | |
345 | ||
346 | void rcu_read_unlock(void) | |
347 | { | |
348 | _rcu_read_unlock(); | |
349 | } | |
350 | ||
882f3357 MD |
351 | int rcu_read_ongoing(void) |
352 | { | |
353 | return _rcu_read_ongoing(); | |
354 | } | |
355 | ||
fdee2e6d | 356 | /* |
95b94246 MD |
357 | * Only grow for now. If empty, allocate a ARENA_INIT_ALLOC sized chunk. |
358 | * Else, try expanding the last chunk. If this fails, allocate a new | |
359 | * chunk twice as big as the last chunk. | |
360 | * Memory used by chunks _never_ moves. A chunk could theoretically be | |
361 | * freed when all "used" slots are released, but we don't do it at this | |
362 | * point. | |
fdee2e6d | 363 | */ |
95b94246 MD |
364 | static |
365 | void expand_arena(struct registry_arena *arena) | |
fdee2e6d | 366 | { |
95b94246 MD |
367 | struct registry_chunk *new_chunk, *last_chunk; |
368 | size_t old_chunk_len, new_chunk_len; | |
369 | ||
370 | /* No chunk. */ | |
371 | if (cds_list_empty(&arena->chunk_list)) { | |
372 | assert(ARENA_INIT_ALLOC >= | |
373 | sizeof(struct registry_chunk) | |
374 | + sizeof(struct rcu_reader)); | |
375 | new_chunk_len = ARENA_INIT_ALLOC; | |
376 | new_chunk = mmap(NULL, new_chunk_len, | |
9d8612b7 MD |
377 | PROT_READ | PROT_WRITE, |
378 | MAP_ANONYMOUS | MAP_PRIVATE, | |
379 | -1, 0); | |
95b94246 MD |
380 | if (new_chunk == MAP_FAILED) |
381 | abort(); | |
d3ac5bb7 | 382 | memset(new_chunk, 0, new_chunk_len); |
95b94246 MD |
383 | new_chunk->data_len = |
384 | new_chunk_len - sizeof(struct registry_chunk); | |
385 | cds_list_add_tail(&new_chunk->node, &arena->chunk_list); | |
386 | return; /* We're done. */ | |
387 | } | |
9d8612b7 | 388 | |
95b94246 MD |
389 | /* Try expanding last chunk. */ |
390 | last_chunk = cds_list_entry(arena->chunk_list.prev, | |
391 | struct registry_chunk, node); | |
392 | old_chunk_len = | |
393 | last_chunk->data_len + sizeof(struct registry_chunk); | |
394 | new_chunk_len = old_chunk_len << 1; | |
395 | ||
396 | /* Don't allow memory mapping to move, just expand. */ | |
397 | new_chunk = mremap_wrapper(last_chunk, old_chunk_len, | |
398 | new_chunk_len, 0); | |
399 | if (new_chunk != MAP_FAILED) { | |
400 | /* Should not have moved. */ | |
401 | assert(new_chunk == last_chunk); | |
d3ac5bb7 | 402 | memset((char *) last_chunk + old_chunk_len, 0, |
95b94246 MD |
403 | new_chunk_len - old_chunk_len); |
404 | last_chunk->data_len = | |
405 | new_chunk_len - sizeof(struct registry_chunk); | |
406 | return; /* We're done. */ | |
407 | } | |
0617bf4c | 408 | |
95b94246 MD |
409 | /* Remap did not succeed, we need to add a new chunk. */ |
410 | new_chunk = mmap(NULL, new_chunk_len, | |
411 | PROT_READ | PROT_WRITE, | |
412 | MAP_ANONYMOUS | MAP_PRIVATE, | |
413 | -1, 0); | |
414 | if (new_chunk == MAP_FAILED) | |
415 | abort(); | |
d3ac5bb7 | 416 | memset(new_chunk, 0, new_chunk_len); |
95b94246 MD |
417 | new_chunk->data_len = |
418 | new_chunk_len - sizeof(struct registry_chunk); | |
419 | cds_list_add_tail(&new_chunk->node, &arena->chunk_list); | |
420 | } | |
fdee2e6d | 421 | |
95b94246 MD |
422 | static |
423 | struct rcu_reader *arena_alloc(struct registry_arena *arena) | |
424 | { | |
425 | struct registry_chunk *chunk; | |
426 | struct rcu_reader *rcu_reader_reg; | |
427 | int expand_done = 0; /* Only allow to expand once per alloc */ | |
428 | size_t len = sizeof(struct rcu_reader); | |
429 | ||
430 | retry: | |
431 | cds_list_for_each_entry(chunk, &arena->chunk_list, node) { | |
432 | if (chunk->data_len - chunk->used < len) | |
433 | continue; | |
434 | /* Find spot */ | |
435 | for (rcu_reader_reg = (struct rcu_reader *) &chunk->data[0]; | |
436 | rcu_reader_reg < (struct rcu_reader *) &chunk->data[chunk->data_len]; | |
437 | rcu_reader_reg++) { | |
438 | if (!rcu_reader_reg->alloc) { | |
439 | rcu_reader_reg->alloc = 1; | |
440 | chunk->used += len; | |
441 | return rcu_reader_reg; | |
442 | } | |
443 | } | |
444 | } | |
445 | ||
446 | if (!expand_done) { | |
447 | expand_arena(arena); | |
448 | expand_done = 1; | |
449 | goto retry; | |
450 | } | |
451 | ||
452 | return NULL; | |
fdee2e6d MD |
453 | } |
454 | ||
455 | /* Called with signals off and mutex locked */ | |
95b94246 MD |
456 | static |
457 | void add_thread(void) | |
fdee2e6d | 458 | { |
02be5561 | 459 | struct rcu_reader *rcu_reader_reg; |
c1be8fb9 | 460 | int ret; |
fdee2e6d | 461 | |
95b94246 MD |
462 | rcu_reader_reg = arena_alloc(®istry_arena); |
463 | if (!rcu_reader_reg) | |
464 | abort(); | |
c1be8fb9 MD |
465 | ret = pthread_setspecific(urcu_bp_key, rcu_reader_reg); |
466 | if (ret) | |
467 | abort(); | |
fdee2e6d MD |
468 | |
469 | /* Add to registry */ | |
02be5561 MD |
470 | rcu_reader_reg->tid = pthread_self(); |
471 | assert(rcu_reader_reg->ctr == 0); | |
16aa9ee8 | 472 | cds_list_add(&rcu_reader_reg->node, ®istry); |
95b94246 MD |
473 | /* |
474 | * Reader threads are pointing to the reader registry. This is | |
475 | * why its memory should never be relocated. | |
476 | */ | |
bd252a04 | 477 | URCU_TLS(rcu_reader) = rcu_reader_reg; |
fdee2e6d MD |
478 | } |
479 | ||
c1be8fb9 MD |
480 | /* Called with mutex locked */ |
481 | static | |
482 | void cleanup_thread(struct registry_chunk *chunk, | |
483 | struct rcu_reader *rcu_reader_reg) | |
484 | { | |
485 | rcu_reader_reg->ctr = 0; | |
486 | cds_list_del(&rcu_reader_reg->node); | |
487 | rcu_reader_reg->tid = 0; | |
488 | rcu_reader_reg->alloc = 0; | |
489 | chunk->used -= sizeof(struct rcu_reader); | |
490 | } | |
491 | ||
492 | static | |
493 | struct registry_chunk *find_chunk(struct rcu_reader *rcu_reader_reg) | |
fdee2e6d | 494 | { |
95b94246 | 495 | struct registry_chunk *chunk; |
fdee2e6d | 496 | |
95b94246 | 497 | cds_list_for_each_entry(chunk, ®istry_arena.chunk_list, node) { |
c1be8fb9 MD |
498 | if (rcu_reader_reg < (struct rcu_reader *) &chunk->data[0]) |
499 | continue; | |
500 | if (rcu_reader_reg >= (struct rcu_reader *) &chunk->data[chunk->data_len]) | |
501 | continue; | |
502 | return chunk; | |
503 | } | |
504 | return NULL; | |
505 | } | |
95b94246 | 506 | |
c1be8fb9 MD |
507 | /* Called with signals off and mutex locked */ |
508 | static | |
76d6a951 | 509 | void remove_thread(struct rcu_reader *rcu_reader_reg) |
c1be8fb9 | 510 | { |
c1be8fb9 MD |
511 | cleanup_thread(find_chunk(rcu_reader_reg), rcu_reader_reg); |
512 | URCU_TLS(rcu_reader) = NULL; | |
fdee2e6d MD |
513 | } |
514 | ||
515 | /* Disable signals, take mutex, add to registry */ | |
516 | void rcu_bp_register(void) | |
517 | { | |
518 | sigset_t newmask, oldmask; | |
519 | int ret; | |
520 | ||
6ed4b2e6 | 521 | ret = sigfillset(&newmask); |
c1be8fb9 MD |
522 | if (ret) |
523 | abort(); | |
6ed4b2e6 | 524 | ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask); |
c1be8fb9 MD |
525 | if (ret) |
526 | abort(); | |
fdee2e6d MD |
527 | |
528 | /* | |
529 | * Check if a signal concurrently registered our thread since | |
c1be8fb9 MD |
530 | * the check in rcu_read_lock(). |
531 | */ | |
bd252a04 | 532 | if (URCU_TLS(rcu_reader)) |
fdee2e6d MD |
533 | goto end; |
534 | ||
c1be8fb9 MD |
535 | /* |
536 | * Take care of early registration before urcu_bp constructor. | |
537 | */ | |
538 | rcu_bp_init(); | |
539 | ||
731ccb96 | 540 | mutex_lock(&rcu_registry_lock); |
fdee2e6d | 541 | add_thread(); |
731ccb96 | 542 | mutex_unlock(&rcu_registry_lock); |
fdee2e6d MD |
543 | end: |
544 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); | |
c1be8fb9 MD |
545 | if (ret) |
546 | abort(); | |
547 | } | |
548 | ||
549 | /* Disable signals, take mutex, remove from registry */ | |
550 | static | |
76d6a951 | 551 | void rcu_bp_unregister(struct rcu_reader *rcu_reader_reg) |
c1be8fb9 MD |
552 | { |
553 | sigset_t newmask, oldmask; | |
554 | int ret; | |
555 | ||
556 | ret = sigfillset(&newmask); | |
557 | if (ret) | |
558 | abort(); | |
559 | ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask); | |
560 | if (ret) | |
561 | abort(); | |
562 | ||
731ccb96 | 563 | mutex_lock(&rcu_registry_lock); |
76d6a951 | 564 | remove_thread(rcu_reader_reg); |
731ccb96 | 565 | mutex_unlock(&rcu_registry_lock); |
c1be8fb9 MD |
566 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); |
567 | if (ret) | |
568 | abort(); | |
76d6a951 | 569 | rcu_bp_exit(); |
c1be8fb9 MD |
570 | } |
571 | ||
572 | /* | |
573 | * Remove thread from the registry when it exits, and flag it as | |
574 | * destroyed so garbage collection can take care of it. | |
575 | */ | |
576 | static | |
577 | void urcu_bp_thread_exit_notifier(void *rcu_key) | |
578 | { | |
76d6a951 | 579 | rcu_bp_unregister(rcu_key); |
c1be8fb9 MD |
580 | } |
581 | ||
582 | static | |
583 | void rcu_bp_init(void) | |
584 | { | |
585 | mutex_lock(&init_lock); | |
76d6a951 | 586 | if (!rcu_bp_refcount++) { |
c1be8fb9 MD |
587 | int ret; |
588 | ||
589 | ret = pthread_key_create(&urcu_bp_key, | |
590 | urcu_bp_thread_exit_notifier); | |
591 | if (ret) | |
592 | abort(); | |
f541831e MD |
593 | ret = membarrier(MEMBARRIER_CMD_QUERY, 0); |
594 | if (ret >= 0 && (ret & MEMBARRIER_CMD_SHARED)) { | |
595 | urcu_bp_has_sys_membarrier = 1; | |
596 | } | |
c1be8fb9 MD |
597 | initialized = 1; |
598 | } | |
599 | mutex_unlock(&init_lock); | |
fdee2e6d MD |
600 | } |
601 | ||
c1be8fb9 | 602 | static |
9380711a | 603 | void rcu_bp_exit(void) |
fdee2e6d | 604 | { |
76d6a951 MD |
605 | mutex_lock(&init_lock); |
606 | if (!--rcu_bp_refcount) { | |
607 | struct registry_chunk *chunk, *tmp; | |
608 | int ret; | |
95b94246 | 609 | |
76d6a951 MD |
610 | cds_list_for_each_entry_safe(chunk, tmp, |
611 | ®istry_arena.chunk_list, node) { | |
612 | munmap(chunk, chunk->data_len | |
613 | + sizeof(struct registry_chunk)); | |
614 | } | |
615 | ret = pthread_key_delete(urcu_bp_key); | |
616 | if (ret) | |
617 | abort(); | |
95b94246 | 618 | } |
76d6a951 | 619 | mutex_unlock(&init_lock); |
fdee2e6d | 620 | } |
4cf1675f MD |
621 | |
622 | /* | |
731ccb96 MD |
623 | * Holding the rcu_gp_lock and rcu_registry_lock across fork will make |
624 | * sure we fork() don't race with a concurrent thread executing with | |
625 | * any of those locks held. This ensures that the registry and data | |
626 | * protected by rcu_gp_lock are in a coherent state in the child. | |
4cf1675f MD |
627 | */ |
628 | void rcu_bp_before_fork(void) | |
629 | { | |
630 | sigset_t newmask, oldmask; | |
631 | int ret; | |
632 | ||
6ed4b2e6 | 633 | ret = sigfillset(&newmask); |
4cf1675f | 634 | assert(!ret); |
6ed4b2e6 | 635 | ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask); |
4cf1675f MD |
636 | assert(!ret); |
637 | mutex_lock(&rcu_gp_lock); | |
731ccb96 | 638 | mutex_lock(&rcu_registry_lock); |
4cf1675f MD |
639 | saved_fork_signal_mask = oldmask; |
640 | } | |
641 | ||
642 | void rcu_bp_after_fork_parent(void) | |
643 | { | |
644 | sigset_t oldmask; | |
645 | int ret; | |
646 | ||
647 | oldmask = saved_fork_signal_mask; | |
731ccb96 | 648 | mutex_unlock(&rcu_registry_lock); |
4cf1675f MD |
649 | mutex_unlock(&rcu_gp_lock); |
650 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); | |
651 | assert(!ret); | |
652 | } | |
653 | ||
c1be8fb9 MD |
654 | /* |
655 | * Prune all entries from registry except our own thread. Fits the Linux | |
731ccb96 | 656 | * fork behavior. Called with rcu_gp_lock and rcu_registry_lock held. |
c1be8fb9 MD |
657 | */ |
658 | static | |
659 | void urcu_bp_prune_registry(void) | |
660 | { | |
661 | struct registry_chunk *chunk; | |
662 | struct rcu_reader *rcu_reader_reg; | |
663 | ||
664 | cds_list_for_each_entry(chunk, ®istry_arena.chunk_list, node) { | |
665 | for (rcu_reader_reg = (struct rcu_reader *) &chunk->data[0]; | |
666 | rcu_reader_reg < (struct rcu_reader *) &chunk->data[chunk->data_len]; | |
667 | rcu_reader_reg++) { | |
668 | if (!rcu_reader_reg->alloc) | |
669 | continue; | |
670 | if (rcu_reader_reg->tid == pthread_self()) | |
671 | continue; | |
672 | cleanup_thread(chunk, rcu_reader_reg); | |
673 | } | |
674 | } | |
675 | } | |
676 | ||
4cf1675f MD |
677 | void rcu_bp_after_fork_child(void) |
678 | { | |
679 | sigset_t oldmask; | |
680 | int ret; | |
681 | ||
c1be8fb9 | 682 | urcu_bp_prune_registry(); |
4cf1675f | 683 | oldmask = saved_fork_signal_mask; |
731ccb96 | 684 | mutex_unlock(&rcu_registry_lock); |
4cf1675f MD |
685 | mutex_unlock(&rcu_gp_lock); |
686 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); | |
687 | assert(!ret); | |
688 | } | |
5e77fc1f | 689 | |
9b7981bb MD |
690 | void *rcu_dereference_sym_bp(void *p) |
691 | { | |
692 | return _rcu_dereference(p); | |
693 | } | |
694 | ||
5efd3cd2 MD |
695 | void *rcu_set_pointer_sym_bp(void **p, void *v) |
696 | { | |
697 | cmm_wmb(); | |
424d4ed5 MD |
698 | uatomic_set(p, v); |
699 | return v; | |
5efd3cd2 MD |
700 | } |
701 | ||
702 | void *rcu_xchg_pointer_sym_bp(void **p, void *v) | |
703 | { | |
704 | cmm_wmb(); | |
705 | return uatomic_xchg(p, v); | |
706 | } | |
707 | ||
708 | void *rcu_cmpxchg_pointer_sym_bp(void **p, void *old, void *_new) | |
709 | { | |
710 | cmm_wmb(); | |
711 | return uatomic_cmpxchg(p, old, _new); | |
712 | } | |
713 | ||
5e6b23a6 | 714 | DEFINE_RCU_FLAVOR(rcu_flavor); |
541d828d | 715 | |
5e77fc1f | 716 | #include "urcu-call-rcu-impl.h" |
0376e7b2 | 717 | #include "urcu-defer-impl.h" |