Fix: call_rcu: teardown default call_rcu worker on application exit
[urcu.git] / src / urcu-bp.c
CommitLineData
fdee2e6d
MD
1/*
2 * urcu-bp.c
3 *
4 * Userspace RCU library, "bulletproof" version.
5 *
6982d6d7 6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
fdee2e6d
MD
7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
8 *
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
24 */
25
e37faee1 26#define URCU_NO_COMPAT_IDENTIFIERS
71c811bf 27#define _LGPL_SOURCE
fdee2e6d
MD
28#include <stdio.h>
29#include <pthread.h>
30#include <signal.h>
fdee2e6d
MD
31#include <stdlib.h>
32#include <string.h>
33#include <errno.h>
34#include <poll.h>
35#include <unistd.h>
3745305b 36#include <stdbool.h>
fdee2e6d
MD
37#include <sys/mman.h>
38
01477510 39#include <urcu/assert.h>
375db287 40#include <urcu/config.h>
4477a870
MD
41#include <urcu/arch.h>
42#include <urcu/wfcqueue.h>
43#include <urcu/map/urcu-bp.h>
44#include <urcu/static/urcu-bp.h>
45#include <urcu/pointer.h>
46#include <urcu/tls-compat.h>
71c811bf 47
4a6d7378 48#include "urcu-die.h"
ce28e67a 49#include "urcu-utils.h"
4a6d7378 50
4477a870 51#define URCU_API_MAP
fdee2e6d 52/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
71c811bf 53#undef _LGPL_SOURCE
4477a870 54#include <urcu/urcu-bp.h>
71c811bf 55#define _LGPL_SOURCE
fdee2e6d 56
4c1ae2ea
MD
57#ifndef MAP_ANONYMOUS
58#define MAP_ANONYMOUS MAP_ANON
59#endif
60
c7eaf61c
MD
61#ifdef __linux__
62static
63void *mremap_wrapper(void *old_address, size_t old_size,
64 size_t new_size, int flags)
65{
66 return mremap(old_address, old_size, new_size, flags);
67}
68#else
45a4872f
MD
69
70#define MREMAP_MAYMOVE 1
71#define MREMAP_FIXED 2
72
73/*
95b94246 74 * mremap wrapper for non-Linux systems not allowing MAYMOVE.
45a4872f
MD
75 * This is not generic.
76*/
c7eaf61c 77static
a142df4e
MJ
78void *mremap_wrapper(void *old_address __attribute__((unused)),
79 size_t old_size __attribute__((unused)),
80 size_t new_size __attribute__((unused)),
81 int flags)
45a4872f 82{
01477510 83 urcu_posix_assert(!(flags & MREMAP_MAYMOVE));
95b94246
MD
84
85 return MAP_FAILED;
45a4872f
MD
86}
87#endif
88
9340c38d
MD
89/* Sleep delay in ms */
90#define RCU_SLEEP_DELAY_MS 10
95b94246
MD
91#define INIT_NR_THREADS 8
92#define ARENA_INIT_ALLOC \
93 sizeof(struct registry_chunk) \
4477a870 94 + INIT_NR_THREADS * sizeof(struct urcu_bp_reader)
fdee2e6d 95
b7b6a8f5
PB
96/*
97 * Active attempts to check for reader Q.S. before calling sleep().
98 */
99#define RCU_QS_ACTIVE_ATTEMPTS 100
100
76d6a951 101static
4477a870 102int urcu_bp_refcount;
76d6a951 103
999991c6
MD
104/* If the headers do not support membarrier system call, fall back smp_mb. */
105#ifdef __NR_membarrier
106# define membarrier(...) syscall(__NR_membarrier, __VA_ARGS__)
f541831e
MD
107#else
108# define membarrier(...) -ENOSYS
109#endif
110
111enum membarrier_cmd {
3745305b
MD
112 MEMBARRIER_CMD_QUERY = 0,
113 MEMBARRIER_CMD_SHARED = (1 << 0),
114 /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */
115 /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */
116 MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3),
117 MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4),
f541831e
MD
118};
119
c1be8fb9 120static
4477a870 121void __attribute__((constructor)) _urcu_bp_init(void);
c1be8fb9 122static
4477a870 123void __attribute__((destructor)) urcu_bp_exit(void);
90f72b8c 124static void urcu_call_rcu_exit(void);
fdee2e6d 125
d8d9a340 126#ifndef CONFIG_RCU_FORCE_SYS_MEMBARRIER
f541831e 127int urcu_bp_has_sys_membarrier;
d8d9a340 128#endif
f541831e 129
731ccb96
MD
130/*
131 * rcu_gp_lock ensures mutual exclusion between threads calling
132 * synchronize_rcu().
133 */
6abb4bd5 134static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
731ccb96
MD
135/*
136 * rcu_registry_lock ensures mutual exclusion between threads
137 * registering and unregistering themselves to/from the registry, and
138 * with threads reading that registry from synchronize_rcu(). However,
139 * this lock is not held all the way through the completion of awaiting
140 * for the grace period. It is sporadically released between iterations
141 * on the registry.
142 * rcu_registry_lock may nest inside rcu_gp_lock.
143 */
144static pthread_mutex_t rcu_registry_lock = PTHREAD_MUTEX_INITIALIZER;
fdee2e6d 145
c1be8fb9
MD
146static pthread_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER;
147static int initialized;
148
149static pthread_key_t urcu_bp_key;
150
4477a870 151struct urcu_bp_gp urcu_bp_gp = { .ctr = URCU_BP_GP_COUNT };
fdee2e6d
MD
152
153/*
154 * Pointer to registry elements. Written to only by each individual reader. Read
155 * by both the reader and the writers.
156 */
4477a870 157DEFINE_URCU_TLS(struct urcu_bp_reader *, urcu_bp_reader);
fdee2e6d 158
16aa9ee8 159static CDS_LIST_HEAD(registry);
fdee2e6d 160
95b94246
MD
161struct registry_chunk {
162 size_t data_len; /* data length */
c1be8fb9 163 size_t used; /* amount of data used */
95b94246
MD
164 struct cds_list_head node; /* chunk_list node */
165 char data[];
166};
167
fdee2e6d 168struct registry_arena {
95b94246 169 struct cds_list_head chunk_list;
fdee2e6d
MD
170};
171
95b94246
MD
172static struct registry_arena registry_arena = {
173 .chunk_list = CDS_LIST_HEAD_INIT(registry_arena.chunk_list),
174};
fdee2e6d 175
4cf1675f
MD
176/* Saved fork signal mask, protected by rcu_gp_lock */
177static sigset_t saved_fork_signal_mask;
178
6abb4bd5 179static void mutex_lock(pthread_mutex_t *mutex)
fdee2e6d
MD
180{
181 int ret;
182
183#ifndef DISTRUST_SIGNALS_EXTREME
6abb4bd5 184 ret = pthread_mutex_lock(mutex);
4a6d7378
MD
185 if (ret)
186 urcu_die(ret);
fdee2e6d 187#else /* #ifndef DISTRUST_SIGNALS_EXTREME */
6abb4bd5 188 while ((ret = pthread_mutex_trylock(mutex)) != 0) {
4a6d7378
MD
189 if (ret != EBUSY && ret != EINTR)
190 urcu_die(ret);
fdee2e6d
MD
191 poll(NULL,0,10);
192 }
193#endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
194}
195
6abb4bd5 196static void mutex_unlock(pthread_mutex_t *mutex)
fdee2e6d
MD
197{
198 int ret;
199
6abb4bd5 200 ret = pthread_mutex_unlock(mutex);
4a6d7378
MD
201 if (ret)
202 urcu_die(ret);
fdee2e6d
MD
203}
204
f541831e
MD
205static void smp_mb_master(void)
206{
3745305b
MD
207 if (caa_likely(urcu_bp_has_sys_membarrier)) {
208 if (membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED, 0))
209 urcu_die(errno);
210 } else {
f541831e 211 cmm_smp_mb();
3745305b 212 }
f541831e
MD
213}
214
731ccb96
MD
215/*
216 * Always called with rcu_registry lock held. Releases this lock between
217 * iterations and grabs it again. Holds the lock when it returns.
218 */
52c75091
MD
219static void wait_for_readers(struct cds_list_head *input_readers,
220 struct cds_list_head *cur_snap_readers,
221 struct cds_list_head *qsreaders)
fdee2e6d 222{
9340c38d 223 unsigned int wait_loops = 0;
4477a870 224 struct urcu_bp_reader *index, *tmp;
fdee2e6d 225
fdee2e6d 226 /*
4477a870 227 * Wait for each thread URCU_TLS(urcu_bp_reader).ctr to either
dd61d077 228 * indicate quiescence (not nested), or observe the current
c13c2e55 229 * rcu_gp.ctr value.
fdee2e6d
MD
230 */
231 for (;;) {
9340c38d
MD
232 if (wait_loops < RCU_QS_ACTIVE_ATTEMPTS)
233 wait_loops++;
234
52c75091 235 cds_list_for_each_entry_safe(index, tmp, input_readers, node) {
4477a870
MD
236 switch (urcu_bp_reader_state(&index->ctr)) {
237 case URCU_BP_READER_ACTIVE_CURRENT:
52c75091
MD
238 if (cur_snap_readers) {
239 cds_list_move(&index->node,
240 cur_snap_readers);
241 break;
242 }
243 /* Fall-through */
4477a870 244 case URCU_BP_READER_INACTIVE:
52c75091
MD
245 cds_list_move(&index->node, qsreaders);
246 break;
4477a870 247 case URCU_BP_READER_ACTIVE_OLD:
52c75091
MD
248 /*
249 * Old snapshot. Leaving node in
250 * input_readers will make us busy-loop
251 * until the snapshot becomes current or
252 * the reader becomes inactive.
253 */
254 break;
255 }
fdee2e6d
MD
256 }
257
52c75091 258 if (cds_list_empty(input_readers)) {
fdee2e6d
MD
259 break;
260 } else {
731ccb96
MD
261 /* Temporarily unlock the registry lock. */
262 mutex_unlock(&rcu_registry_lock);
9340c38d
MD
263 if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS)
264 (void) poll(NULL, 0, RCU_SLEEP_DELAY_MS);
fdee2e6d 265 else
06f22bdb 266 caa_cpu_relax();
731ccb96
MD
267 /* Re-lock the registry lock before the next loop. */
268 mutex_lock(&rcu_registry_lock);
fdee2e6d
MD
269 }
270 }
fdee2e6d
MD
271}
272
4477a870 273void urcu_bp_synchronize_rcu(void)
fdee2e6d 274{
52c75091
MD
275 CDS_LIST_HEAD(cur_snap_readers);
276 CDS_LIST_HEAD(qsreaders);
fdee2e6d
MD
277 sigset_t newmask, oldmask;
278 int ret;
279
6ed4b2e6 280 ret = sigfillset(&newmask);
01477510 281 urcu_posix_assert(!ret);
6ed4b2e6 282 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
01477510 283 urcu_posix_assert(!ret);
fdee2e6d 284
6abb4bd5 285 mutex_lock(&rcu_gp_lock);
fdee2e6d 286
731ccb96
MD
287 mutex_lock(&rcu_registry_lock);
288
16aa9ee8 289 if (cds_list_empty(&registry))
2dfb8b5e 290 goto out;
fdee2e6d
MD
291
292 /* All threads should read qparity before accessing data structure
2dfb8b5e 293 * where new ptr points to. */
fdee2e6d 294 /* Write new ptr before changing the qparity */
f541831e 295 smp_mb_master();
fdee2e6d 296
fdee2e6d 297 /*
dd61d077 298 * Wait for readers to observe original parity or be quiescent.
731ccb96 299 * wait_for_readers() can release and grab again rcu_registry_lock
f99c6e92 300 * internally.
dd61d077 301 */
52c75091 302 wait_for_readers(&registry, &cur_snap_readers, &qsreaders);
dd61d077
MD
303
304 /*
305 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
306 * model easier to understand. It does not have a big performance impact
307 * anyway, given this is the write-side.
308 */
309 cmm_smp_mb();
310
311 /* Switch parity: 0 -> 1, 1 -> 0 */
4477a870 312 CMM_STORE_SHARED(rcu_gp.ctr, rcu_gp.ctr ^ URCU_BP_GP_CTR_PHASE);
dd61d077
MD
313
314 /*
315 * Must commit qparity update to memory before waiting for other parity
316 * quiescent state. Failure to do so could result in the writer waiting
317 * forever while new readers are always accessing data (no progress).
318 * Ensured by CMM_STORE_SHARED and CMM_LOAD_SHARED.
fdee2e6d 319 */
fdee2e6d
MD
320
321 /*
5481ddb3 322 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
fdee2e6d
MD
323 * model easier to understand. It does not have a big performance impact
324 * anyway, given this is the write-side.
325 */
5481ddb3 326 cmm_smp_mb();
fdee2e6d 327
fdee2e6d 328 /*
dd61d077 329 * Wait for readers to observe new parity or be quiescent.
731ccb96 330 * wait_for_readers() can release and grab again rcu_registry_lock
f99c6e92 331 * internally.
fdee2e6d 332 */
52c75091
MD
333 wait_for_readers(&cur_snap_readers, NULL, &qsreaders);
334
335 /*
336 * Put quiescent reader list back into registry.
337 */
338 cds_list_splice(&qsreaders, &registry);
fdee2e6d
MD
339
340 /*
2dfb8b5e
MD
341 * Finish waiting for reader threads before letting the old ptr being
342 * freed.
fdee2e6d 343 */
f541831e 344 smp_mb_master();
2dfb8b5e 345out:
731ccb96 346 mutex_unlock(&rcu_registry_lock);
6abb4bd5 347 mutex_unlock(&rcu_gp_lock);
fdee2e6d 348 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
01477510 349 urcu_posix_assert(!ret);
fdee2e6d
MD
350}
351
352/*
353 * library wrappers to be used by non-LGPL compatible source code.
354 */
355
4477a870 356void urcu_bp_read_lock(void)
fdee2e6d 357{
4477a870 358 _urcu_bp_read_lock();
fdee2e6d
MD
359}
360
4477a870 361void urcu_bp_read_unlock(void)
fdee2e6d 362{
4477a870 363 _urcu_bp_read_unlock();
fdee2e6d
MD
364}
365
4477a870 366int urcu_bp_read_ongoing(void)
882f3357 367{
4477a870 368 return _urcu_bp_read_ongoing();
882f3357
MD
369}
370
fdee2e6d 371/*
95b94246
MD
372 * Only grow for now. If empty, allocate a ARENA_INIT_ALLOC sized chunk.
373 * Else, try expanding the last chunk. If this fails, allocate a new
374 * chunk twice as big as the last chunk.
375 * Memory used by chunks _never_ moves. A chunk could theoretically be
376 * freed when all "used" slots are released, but we don't do it at this
377 * point.
fdee2e6d 378 */
95b94246
MD
379static
380void expand_arena(struct registry_arena *arena)
fdee2e6d 381{
95b94246
MD
382 struct registry_chunk *new_chunk, *last_chunk;
383 size_t old_chunk_len, new_chunk_len;
384
385 /* No chunk. */
386 if (cds_list_empty(&arena->chunk_list)) {
01477510 387 urcu_posix_assert(ARENA_INIT_ALLOC >=
95b94246
MD
388 sizeof(struct registry_chunk)
389 + sizeof(struct rcu_reader));
390 new_chunk_len = ARENA_INIT_ALLOC;
5592d049
MJ
391 new_chunk = (struct registry_chunk *) mmap(NULL,
392 new_chunk_len,
9d8612b7
MD
393 PROT_READ | PROT_WRITE,
394 MAP_ANONYMOUS | MAP_PRIVATE,
395 -1, 0);
95b94246
MD
396 if (new_chunk == MAP_FAILED)
397 abort();
d3ac5bb7 398 memset(new_chunk, 0, new_chunk_len);
95b94246
MD
399 new_chunk->data_len =
400 new_chunk_len - sizeof(struct registry_chunk);
401 cds_list_add_tail(&new_chunk->node, &arena->chunk_list);
402 return; /* We're done. */
403 }
9d8612b7 404
95b94246
MD
405 /* Try expanding last chunk. */
406 last_chunk = cds_list_entry(arena->chunk_list.prev,
407 struct registry_chunk, node);
408 old_chunk_len =
409 last_chunk->data_len + sizeof(struct registry_chunk);
410 new_chunk_len = old_chunk_len << 1;
411
412 /* Don't allow memory mapping to move, just expand. */
413 new_chunk = mremap_wrapper(last_chunk, old_chunk_len,
414 new_chunk_len, 0);
415 if (new_chunk != MAP_FAILED) {
416 /* Should not have moved. */
01477510 417 urcu_posix_assert(new_chunk == last_chunk);
d3ac5bb7 418 memset((char *) last_chunk + old_chunk_len, 0,
95b94246
MD
419 new_chunk_len - old_chunk_len);
420 last_chunk->data_len =
421 new_chunk_len - sizeof(struct registry_chunk);
422 return; /* We're done. */
423 }
0617bf4c 424
95b94246 425 /* Remap did not succeed, we need to add a new chunk. */
5592d049
MJ
426 new_chunk = (struct registry_chunk *) mmap(NULL,
427 new_chunk_len,
95b94246
MD
428 PROT_READ | PROT_WRITE,
429 MAP_ANONYMOUS | MAP_PRIVATE,
430 -1, 0);
431 if (new_chunk == MAP_FAILED)
432 abort();
d3ac5bb7 433 memset(new_chunk, 0, new_chunk_len);
95b94246
MD
434 new_chunk->data_len =
435 new_chunk_len - sizeof(struct registry_chunk);
436 cds_list_add_tail(&new_chunk->node, &arena->chunk_list);
437}
fdee2e6d 438
95b94246
MD
439static
440struct rcu_reader *arena_alloc(struct registry_arena *arena)
441{
442 struct registry_chunk *chunk;
443 struct rcu_reader *rcu_reader_reg;
444 int expand_done = 0; /* Only allow to expand once per alloc */
445 size_t len = sizeof(struct rcu_reader);
446
447retry:
448 cds_list_for_each_entry(chunk, &arena->chunk_list, node) {
449 if (chunk->data_len - chunk->used < len)
450 continue;
451 /* Find spot */
452 for (rcu_reader_reg = (struct rcu_reader *) &chunk->data[0];
453 rcu_reader_reg < (struct rcu_reader *) &chunk->data[chunk->data_len];
454 rcu_reader_reg++) {
455 if (!rcu_reader_reg->alloc) {
456 rcu_reader_reg->alloc = 1;
457 chunk->used += len;
458 return rcu_reader_reg;
459 }
460 }
461 }
462
463 if (!expand_done) {
464 expand_arena(arena);
465 expand_done = 1;
466 goto retry;
467 }
468
469 return NULL;
fdee2e6d
MD
470}
471
472/* Called with signals off and mutex locked */
95b94246
MD
473static
474void add_thread(void)
fdee2e6d 475{
02be5561 476 struct rcu_reader *rcu_reader_reg;
c1be8fb9 477 int ret;
fdee2e6d 478
95b94246
MD
479 rcu_reader_reg = arena_alloc(&registry_arena);
480 if (!rcu_reader_reg)
481 abort();
c1be8fb9
MD
482 ret = pthread_setspecific(urcu_bp_key, rcu_reader_reg);
483 if (ret)
484 abort();
fdee2e6d
MD
485
486 /* Add to registry */
02be5561 487 rcu_reader_reg->tid = pthread_self();
01477510 488 urcu_posix_assert(rcu_reader_reg->ctr == 0);
16aa9ee8 489 cds_list_add(&rcu_reader_reg->node, &registry);
95b94246
MD
490 /*
491 * Reader threads are pointing to the reader registry. This is
492 * why its memory should never be relocated.
493 */
4477a870 494 URCU_TLS(urcu_bp_reader) = rcu_reader_reg;
fdee2e6d
MD
495}
496
c1be8fb9
MD
497/* Called with mutex locked */
498static
499void cleanup_thread(struct registry_chunk *chunk,
500 struct rcu_reader *rcu_reader_reg)
501{
502 rcu_reader_reg->ctr = 0;
503 cds_list_del(&rcu_reader_reg->node);
504 rcu_reader_reg->tid = 0;
505 rcu_reader_reg->alloc = 0;
506 chunk->used -= sizeof(struct rcu_reader);
507}
508
509static
510struct registry_chunk *find_chunk(struct rcu_reader *rcu_reader_reg)
fdee2e6d 511{
95b94246 512 struct registry_chunk *chunk;
fdee2e6d 513
95b94246 514 cds_list_for_each_entry(chunk, &registry_arena.chunk_list, node) {
c1be8fb9
MD
515 if (rcu_reader_reg < (struct rcu_reader *) &chunk->data[0])
516 continue;
517 if (rcu_reader_reg >= (struct rcu_reader *) &chunk->data[chunk->data_len])
518 continue;
519 return chunk;
520 }
521 return NULL;
522}
95b94246 523
c1be8fb9
MD
524/* Called with signals off and mutex locked */
525static
76d6a951 526void remove_thread(struct rcu_reader *rcu_reader_reg)
c1be8fb9 527{
c1be8fb9 528 cleanup_thread(find_chunk(rcu_reader_reg), rcu_reader_reg);
4477a870 529 URCU_TLS(urcu_bp_reader) = NULL;
fdee2e6d
MD
530}
531
532/* Disable signals, take mutex, add to registry */
4477a870 533void urcu_bp_register(void)
fdee2e6d
MD
534{
535 sigset_t newmask, oldmask;
536 int ret;
537
6ed4b2e6 538 ret = sigfillset(&newmask);
c1be8fb9
MD
539 if (ret)
540 abort();
6ed4b2e6 541 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
c1be8fb9
MD
542 if (ret)
543 abort();
fdee2e6d
MD
544
545 /*
546 * Check if a signal concurrently registered our thread since
c1be8fb9
MD
547 * the check in rcu_read_lock().
548 */
4477a870 549 if (URCU_TLS(urcu_bp_reader))
fdee2e6d
MD
550 goto end;
551
c1be8fb9
MD
552 /*
553 * Take care of early registration before urcu_bp constructor.
554 */
4477a870 555 _urcu_bp_init();
c1be8fb9 556
731ccb96 557 mutex_lock(&rcu_registry_lock);
fdee2e6d 558 add_thread();
731ccb96 559 mutex_unlock(&rcu_registry_lock);
fdee2e6d
MD
560end:
561 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
c1be8fb9
MD
562 if (ret)
563 abort();
564}
565
5b46e39d
MD
566void urcu_bp_register_thread(void)
567{
568 if (caa_unlikely(!URCU_TLS(urcu_bp_reader)))
569 urcu_bp_register(); /* If not yet registered. */
570}
571
c1be8fb9
MD
572/* Disable signals, take mutex, remove from registry */
573static
4477a870 574void urcu_bp_unregister(struct rcu_reader *rcu_reader_reg)
c1be8fb9
MD
575{
576 sigset_t newmask, oldmask;
577 int ret;
578
579 ret = sigfillset(&newmask);
580 if (ret)
581 abort();
582 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
583 if (ret)
584 abort();
585
731ccb96 586 mutex_lock(&rcu_registry_lock);
76d6a951 587 remove_thread(rcu_reader_reg);
731ccb96 588 mutex_unlock(&rcu_registry_lock);
c1be8fb9
MD
589 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
590 if (ret)
591 abort();
4477a870 592 urcu_bp_exit();
c1be8fb9
MD
593}
594
595/*
596 * Remove thread from the registry when it exits, and flag it as
597 * destroyed so garbage collection can take care of it.
598 */
599static
600void urcu_bp_thread_exit_notifier(void *rcu_key)
601{
4477a870 602 urcu_bp_unregister(rcu_key);
c1be8fb9
MD
603}
604
d8d9a340
MD
605#ifdef CONFIG_RCU_FORCE_SYS_MEMBARRIER
606static
4477a870 607void urcu_bp_sys_membarrier_status(bool available)
d8d9a340
MD
608{
609 if (!available)
610 abort();
611}
612#else
613static
4477a870 614void urcu_bp_sys_membarrier_status(bool available)
d8d9a340 615{
3745305b
MD
616 if (!available)
617 return;
618 urcu_bp_has_sys_membarrier = 1;
d8d9a340
MD
619}
620#endif
621
3745305b 622static
4477a870 623void urcu_bp_sys_membarrier_init(void)
3745305b
MD
624{
625 bool available = false;
626 int mask;
627
628 mask = membarrier(MEMBARRIER_CMD_QUERY, 0);
629 if (mask >= 0) {
630 if (mask & MEMBARRIER_CMD_PRIVATE_EXPEDITED) {
631 if (membarrier(MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED, 0))
632 urcu_die(errno);
633 available = true;
634 }
635 }
4477a870 636 urcu_bp_sys_membarrier_status(available);
3745305b
MD
637}
638
c1be8fb9 639static
4477a870 640void _urcu_bp_init(void)
c1be8fb9
MD
641{
642 mutex_lock(&init_lock);
4477a870 643 if (!urcu_bp_refcount++) {
c1be8fb9
MD
644 int ret;
645
646 ret = pthread_key_create(&urcu_bp_key,
647 urcu_bp_thread_exit_notifier);
648 if (ret)
649 abort();
4477a870 650 urcu_bp_sys_membarrier_init();
c1be8fb9
MD
651 initialized = 1;
652 }
653 mutex_unlock(&init_lock);
fdee2e6d
MD
654}
655
c1be8fb9 656static
4477a870 657void urcu_bp_exit(void)
fdee2e6d 658{
90f72b8c
MD
659 urcu_call_rcu_exit();
660
76d6a951 661 mutex_lock(&init_lock);
4477a870 662 if (!--urcu_bp_refcount) {
76d6a951
MD
663 struct registry_chunk *chunk, *tmp;
664 int ret;
95b94246 665
76d6a951
MD
666 cds_list_for_each_entry_safe(chunk, tmp,
667 &registry_arena.chunk_list, node) {
5592d049 668 munmap((void *) chunk, chunk->data_len
76d6a951
MD
669 + sizeof(struct registry_chunk));
670 }
7937ae1c 671 CDS_INIT_LIST_HEAD(&registry_arena.chunk_list);
76d6a951
MD
672 ret = pthread_key_delete(urcu_bp_key);
673 if (ret)
674 abort();
95b94246 675 }
76d6a951 676 mutex_unlock(&init_lock);
fdee2e6d 677}
4cf1675f
MD
678
679/*
731ccb96
MD
680 * Holding the rcu_gp_lock and rcu_registry_lock across fork will make
681 * sure we fork() don't race with a concurrent thread executing with
682 * any of those locks held. This ensures that the registry and data
683 * protected by rcu_gp_lock are in a coherent state in the child.
4cf1675f 684 */
4477a870 685void urcu_bp_before_fork(void)
4cf1675f
MD
686{
687 sigset_t newmask, oldmask;
688 int ret;
689
6ed4b2e6 690 ret = sigfillset(&newmask);
01477510 691 urcu_posix_assert(!ret);
6ed4b2e6 692 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
01477510 693 urcu_posix_assert(!ret);
4cf1675f 694 mutex_lock(&rcu_gp_lock);
731ccb96 695 mutex_lock(&rcu_registry_lock);
4cf1675f
MD
696 saved_fork_signal_mask = oldmask;
697}
698
4477a870 699void urcu_bp_after_fork_parent(void)
4cf1675f
MD
700{
701 sigset_t oldmask;
702 int ret;
703
704 oldmask = saved_fork_signal_mask;
731ccb96 705 mutex_unlock(&rcu_registry_lock);
4cf1675f
MD
706 mutex_unlock(&rcu_gp_lock);
707 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
01477510 708 urcu_posix_assert(!ret);
4cf1675f
MD
709}
710
c1be8fb9
MD
711/*
712 * Prune all entries from registry except our own thread. Fits the Linux
731ccb96 713 * fork behavior. Called with rcu_gp_lock and rcu_registry_lock held.
c1be8fb9
MD
714 */
715static
716void urcu_bp_prune_registry(void)
717{
718 struct registry_chunk *chunk;
4477a870 719 struct urcu_bp_reader *rcu_reader_reg;
c1be8fb9
MD
720
721 cds_list_for_each_entry(chunk, &registry_arena.chunk_list, node) {
4477a870
MD
722 for (rcu_reader_reg = (struct urcu_bp_reader *) &chunk->data[0];
723 rcu_reader_reg < (struct urcu_bp_reader *) &chunk->data[chunk->data_len];
c1be8fb9
MD
724 rcu_reader_reg++) {
725 if (!rcu_reader_reg->alloc)
726 continue;
727 if (rcu_reader_reg->tid == pthread_self())
728 continue;
729 cleanup_thread(chunk, rcu_reader_reg);
730 }
731 }
732}
733
4477a870 734void urcu_bp_after_fork_child(void)
4cf1675f
MD
735{
736 sigset_t oldmask;
737 int ret;
738
c1be8fb9 739 urcu_bp_prune_registry();
4cf1675f 740 oldmask = saved_fork_signal_mask;
731ccb96 741 mutex_unlock(&rcu_registry_lock);
4cf1675f
MD
742 mutex_unlock(&rcu_gp_lock);
743 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
01477510 744 urcu_posix_assert(!ret);
4cf1675f 745}
5e77fc1f 746
4477a870 747void *urcu_bp_dereference_sym(void *p)
9b7981bb
MD
748{
749 return _rcu_dereference(p);
750}
751
4477a870 752void *urcu_bp_set_pointer_sym(void **p, void *v)
5efd3cd2
MD
753{
754 cmm_wmb();
424d4ed5
MD
755 uatomic_set(p, v);
756 return v;
5efd3cd2
MD
757}
758
4477a870 759void *urcu_bp_xchg_pointer_sym(void **p, void *v)
5efd3cd2
MD
760{
761 cmm_wmb();
762 return uatomic_xchg(p, v);
763}
764
4477a870 765void *urcu_bp_cmpxchg_pointer_sym(void **p, void *old, void *_new)
5efd3cd2
MD
766{
767 cmm_wmb();
768 return uatomic_cmpxchg(p, old, _new);
769}
770
5e6b23a6 771DEFINE_RCU_FLAVOR(rcu_flavor);
541d828d 772
5e77fc1f 773#include "urcu-call-rcu-impl.h"
0376e7b2 774#include "urcu-defer-impl.h"
111bda8f 775#include "urcu-poll-impl.h"
This page took 0.082917 seconds and 4 git commands to generate.