liburcu-bp: Use membarrier private expedited when available
[urcu.git] / src / urcu-bp.c
CommitLineData
fdee2e6d
MD
1/*
2 * urcu-bp.c
3 *
4 * Userspace RCU library, "bulletproof" version.
5 *
6982d6d7 6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
fdee2e6d
MD
7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
8 *
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
24 */
25
71c811bf 26#define _LGPL_SOURCE
fdee2e6d
MD
27#include <stdio.h>
28#include <pthread.h>
29#include <signal.h>
30#include <assert.h>
31#include <stdlib.h>
32#include <string.h>
33#include <errno.h>
34#include <poll.h>
35#include <unistd.h>
3745305b 36#include <stdbool.h>
fdee2e6d
MD
37#include <sys/mman.h>
38
999991c6 39#include "urcu/arch.h"
d73fb81f 40#include "urcu/wfcqueue.h"
57760d44 41#include "urcu/map/urcu-bp.h"
af7c2dbe 42#include "urcu/static/urcu-bp.h"
618b2595 43#include "urcu-pointer.h"
bd252a04 44#include "urcu/tls-compat.h"
71c811bf 45
4a6d7378
MD
46#include "urcu-die.h"
47
fdee2e6d 48/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
71c811bf 49#undef _LGPL_SOURCE
fdee2e6d 50#include "urcu-bp.h"
71c811bf 51#define _LGPL_SOURCE
fdee2e6d 52
4c1ae2ea
MD
53#ifndef MAP_ANONYMOUS
54#define MAP_ANONYMOUS MAP_ANON
55#endif
56
c7eaf61c
MD
57#ifdef __linux__
58static
59void *mremap_wrapper(void *old_address, size_t old_size,
60 size_t new_size, int flags)
61{
62 return mremap(old_address, old_size, new_size, flags);
63}
64#else
45a4872f
MD
65
66#define MREMAP_MAYMOVE 1
67#define MREMAP_FIXED 2
68
69/*
95b94246 70 * mremap wrapper for non-Linux systems not allowing MAYMOVE.
45a4872f
MD
71 * This is not generic.
72*/
c7eaf61c
MD
73static
74void *mremap_wrapper(void *old_address, size_t old_size,
75 size_t new_size, int flags)
45a4872f 76{
95b94246
MD
77 assert(!(flags & MREMAP_MAYMOVE));
78
79 return MAP_FAILED;
45a4872f
MD
80}
81#endif
82
9340c38d
MD
83/* Sleep delay in ms */
84#define RCU_SLEEP_DELAY_MS 10
95b94246
MD
85#define INIT_NR_THREADS 8
86#define ARENA_INIT_ALLOC \
87 sizeof(struct registry_chunk) \
88 + INIT_NR_THREADS * sizeof(struct rcu_reader)
fdee2e6d 89
b7b6a8f5
PB
90/*
91 * Active attempts to check for reader Q.S. before calling sleep().
92 */
93#define RCU_QS_ACTIVE_ATTEMPTS 100
94
76d6a951
MD
95static
96int rcu_bp_refcount;
97
999991c6
MD
98/* If the headers do not support membarrier system call, fall back smp_mb. */
99#ifdef __NR_membarrier
100# define membarrier(...) syscall(__NR_membarrier, __VA_ARGS__)
f541831e
MD
101#else
102# define membarrier(...) -ENOSYS
103#endif
104
105enum membarrier_cmd {
3745305b
MD
106 MEMBARRIER_CMD_QUERY = 0,
107 MEMBARRIER_CMD_SHARED = (1 << 0),
108 /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */
109 /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */
110 MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3),
111 MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4),
f541831e
MD
112};
113
c1be8fb9
MD
114static
115void __attribute__((constructor)) rcu_bp_init(void);
116static
02be5561 117void __attribute__((destructor)) rcu_bp_exit(void);
fdee2e6d 118
d8d9a340 119#ifndef CONFIG_RCU_FORCE_SYS_MEMBARRIER
f541831e 120int urcu_bp_has_sys_membarrier;
d8d9a340 121#endif
f541831e 122
731ccb96
MD
123/*
124 * rcu_gp_lock ensures mutual exclusion between threads calling
125 * synchronize_rcu().
126 */
6abb4bd5 127static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
731ccb96
MD
128/*
129 * rcu_registry_lock ensures mutual exclusion between threads
130 * registering and unregistering themselves to/from the registry, and
131 * with threads reading that registry from synchronize_rcu(). However,
132 * this lock is not held all the way through the completion of awaiting
133 * for the grace period. It is sporadically released between iterations
134 * on the registry.
135 * rcu_registry_lock may nest inside rcu_gp_lock.
136 */
137static pthread_mutex_t rcu_registry_lock = PTHREAD_MUTEX_INITIALIZER;
fdee2e6d 138
c1be8fb9
MD
139static pthread_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER;
140static int initialized;
141
142static pthread_key_t urcu_bp_key;
143
c13c2e55 144struct rcu_gp rcu_gp = { .ctr = RCU_GP_COUNT };
fdee2e6d
MD
145
146/*
147 * Pointer to registry elements. Written to only by each individual reader. Read
148 * by both the reader and the writers.
149 */
2f661865 150DEFINE_URCU_TLS(struct rcu_reader *, rcu_reader);
fdee2e6d 151
16aa9ee8 152static CDS_LIST_HEAD(registry);
fdee2e6d 153
95b94246
MD
154struct registry_chunk {
155 size_t data_len; /* data length */
c1be8fb9 156 size_t used; /* amount of data used */
95b94246
MD
157 struct cds_list_head node; /* chunk_list node */
158 char data[];
159};
160
fdee2e6d 161struct registry_arena {
95b94246 162 struct cds_list_head chunk_list;
fdee2e6d
MD
163};
164
95b94246
MD
165static struct registry_arena registry_arena = {
166 .chunk_list = CDS_LIST_HEAD_INIT(registry_arena.chunk_list),
167};
fdee2e6d 168
4cf1675f
MD
169/* Saved fork signal mask, protected by rcu_gp_lock */
170static sigset_t saved_fork_signal_mask;
171
6abb4bd5 172static void mutex_lock(pthread_mutex_t *mutex)
fdee2e6d
MD
173{
174 int ret;
175
176#ifndef DISTRUST_SIGNALS_EXTREME
6abb4bd5 177 ret = pthread_mutex_lock(mutex);
4a6d7378
MD
178 if (ret)
179 urcu_die(ret);
fdee2e6d 180#else /* #ifndef DISTRUST_SIGNALS_EXTREME */
6abb4bd5 181 while ((ret = pthread_mutex_trylock(mutex)) != 0) {
4a6d7378
MD
182 if (ret != EBUSY && ret != EINTR)
183 urcu_die(ret);
fdee2e6d
MD
184 poll(NULL,0,10);
185 }
186#endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
187}
188
6abb4bd5 189static void mutex_unlock(pthread_mutex_t *mutex)
fdee2e6d
MD
190{
191 int ret;
192
6abb4bd5 193 ret = pthread_mutex_unlock(mutex);
4a6d7378
MD
194 if (ret)
195 urcu_die(ret);
fdee2e6d
MD
196}
197
f541831e
MD
198static void smp_mb_master(void)
199{
3745305b
MD
200 if (caa_likely(urcu_bp_has_sys_membarrier)) {
201 if (membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED, 0))
202 urcu_die(errno);
203 } else {
f541831e 204 cmm_smp_mb();
3745305b 205 }
f541831e
MD
206}
207
731ccb96
MD
208/*
209 * Always called with rcu_registry lock held. Releases this lock between
210 * iterations and grabs it again. Holds the lock when it returns.
211 */
52c75091
MD
212static void wait_for_readers(struct cds_list_head *input_readers,
213 struct cds_list_head *cur_snap_readers,
214 struct cds_list_head *qsreaders)
fdee2e6d 215{
9340c38d 216 unsigned int wait_loops = 0;
02be5561 217 struct rcu_reader *index, *tmp;
fdee2e6d 218
fdee2e6d 219 /*
dd61d077
MD
220 * Wait for each thread URCU_TLS(rcu_reader).ctr to either
221 * indicate quiescence (not nested), or observe the current
c13c2e55 222 * rcu_gp.ctr value.
fdee2e6d
MD
223 */
224 for (;;) {
9340c38d
MD
225 if (wait_loops < RCU_QS_ACTIVE_ATTEMPTS)
226 wait_loops++;
227
52c75091
MD
228 cds_list_for_each_entry_safe(index, tmp, input_readers, node) {
229 switch (rcu_reader_state(&index->ctr)) {
230 case RCU_READER_ACTIVE_CURRENT:
231 if (cur_snap_readers) {
232 cds_list_move(&index->node,
233 cur_snap_readers);
234 break;
235 }
236 /* Fall-through */
237 case RCU_READER_INACTIVE:
238 cds_list_move(&index->node, qsreaders);
239 break;
240 case RCU_READER_ACTIVE_OLD:
241 /*
242 * Old snapshot. Leaving node in
243 * input_readers will make us busy-loop
244 * until the snapshot becomes current or
245 * the reader becomes inactive.
246 */
247 break;
248 }
fdee2e6d
MD
249 }
250
52c75091 251 if (cds_list_empty(input_readers)) {
fdee2e6d
MD
252 break;
253 } else {
731ccb96
MD
254 /* Temporarily unlock the registry lock. */
255 mutex_unlock(&rcu_registry_lock);
9340c38d
MD
256 if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS)
257 (void) poll(NULL, 0, RCU_SLEEP_DELAY_MS);
fdee2e6d 258 else
06f22bdb 259 caa_cpu_relax();
731ccb96
MD
260 /* Re-lock the registry lock before the next loop. */
261 mutex_lock(&rcu_registry_lock);
fdee2e6d
MD
262 }
263 }
fdee2e6d
MD
264}
265
266void synchronize_rcu(void)
267{
52c75091
MD
268 CDS_LIST_HEAD(cur_snap_readers);
269 CDS_LIST_HEAD(qsreaders);
fdee2e6d
MD
270 sigset_t newmask, oldmask;
271 int ret;
272
6ed4b2e6 273 ret = sigfillset(&newmask);
fdee2e6d 274 assert(!ret);
6ed4b2e6 275 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
fdee2e6d
MD
276 assert(!ret);
277
6abb4bd5 278 mutex_lock(&rcu_gp_lock);
fdee2e6d 279
731ccb96
MD
280 mutex_lock(&rcu_registry_lock);
281
16aa9ee8 282 if (cds_list_empty(&registry))
2dfb8b5e 283 goto out;
fdee2e6d
MD
284
285 /* All threads should read qparity before accessing data structure
2dfb8b5e 286 * where new ptr points to. */
fdee2e6d 287 /* Write new ptr before changing the qparity */
f541831e 288 smp_mb_master();
fdee2e6d 289
fdee2e6d 290 /*
dd61d077 291 * Wait for readers to observe original parity or be quiescent.
731ccb96
MD
292 * wait_for_readers() can release and grab again rcu_registry_lock
293 * interally.
dd61d077 294 */
52c75091 295 wait_for_readers(&registry, &cur_snap_readers, &qsreaders);
dd61d077
MD
296
297 /*
298 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
299 * model easier to understand. It does not have a big performance impact
300 * anyway, given this is the write-side.
301 */
302 cmm_smp_mb();
303
304 /* Switch parity: 0 -> 1, 1 -> 0 */
c13c2e55 305 CMM_STORE_SHARED(rcu_gp.ctr, rcu_gp.ctr ^ RCU_GP_CTR_PHASE);
dd61d077
MD
306
307 /*
308 * Must commit qparity update to memory before waiting for other parity
309 * quiescent state. Failure to do so could result in the writer waiting
310 * forever while new readers are always accessing data (no progress).
311 * Ensured by CMM_STORE_SHARED and CMM_LOAD_SHARED.
fdee2e6d 312 */
fdee2e6d
MD
313
314 /*
5481ddb3 315 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
fdee2e6d
MD
316 * model easier to understand. It does not have a big performance impact
317 * anyway, given this is the write-side.
318 */
5481ddb3 319 cmm_smp_mb();
fdee2e6d 320
fdee2e6d 321 /*
dd61d077 322 * Wait for readers to observe new parity or be quiescent.
731ccb96
MD
323 * wait_for_readers() can release and grab again rcu_registry_lock
324 * interally.
fdee2e6d 325 */
52c75091
MD
326 wait_for_readers(&cur_snap_readers, NULL, &qsreaders);
327
328 /*
329 * Put quiescent reader list back into registry.
330 */
331 cds_list_splice(&qsreaders, &registry);
fdee2e6d
MD
332
333 /*
2dfb8b5e
MD
334 * Finish waiting for reader threads before letting the old ptr being
335 * freed.
fdee2e6d 336 */
f541831e 337 smp_mb_master();
2dfb8b5e 338out:
731ccb96 339 mutex_unlock(&rcu_registry_lock);
6abb4bd5 340 mutex_unlock(&rcu_gp_lock);
fdee2e6d
MD
341 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
342 assert(!ret);
343}
344
345/*
346 * library wrappers to be used by non-LGPL compatible source code.
347 */
348
349void rcu_read_lock(void)
350{
351 _rcu_read_lock();
352}
353
354void rcu_read_unlock(void)
355{
356 _rcu_read_unlock();
357}
358
882f3357
MD
359int rcu_read_ongoing(void)
360{
361 return _rcu_read_ongoing();
362}
363
fdee2e6d 364/*
95b94246
MD
365 * Only grow for now. If empty, allocate a ARENA_INIT_ALLOC sized chunk.
366 * Else, try expanding the last chunk. If this fails, allocate a new
367 * chunk twice as big as the last chunk.
368 * Memory used by chunks _never_ moves. A chunk could theoretically be
369 * freed when all "used" slots are released, but we don't do it at this
370 * point.
fdee2e6d 371 */
95b94246
MD
372static
373void expand_arena(struct registry_arena *arena)
fdee2e6d 374{
95b94246
MD
375 struct registry_chunk *new_chunk, *last_chunk;
376 size_t old_chunk_len, new_chunk_len;
377
378 /* No chunk. */
379 if (cds_list_empty(&arena->chunk_list)) {
380 assert(ARENA_INIT_ALLOC >=
381 sizeof(struct registry_chunk)
382 + sizeof(struct rcu_reader));
383 new_chunk_len = ARENA_INIT_ALLOC;
5592d049
MJ
384 new_chunk = (struct registry_chunk *) mmap(NULL,
385 new_chunk_len,
9d8612b7
MD
386 PROT_READ | PROT_WRITE,
387 MAP_ANONYMOUS | MAP_PRIVATE,
388 -1, 0);
95b94246
MD
389 if (new_chunk == MAP_FAILED)
390 abort();
d3ac5bb7 391 memset(new_chunk, 0, new_chunk_len);
95b94246
MD
392 new_chunk->data_len =
393 new_chunk_len - sizeof(struct registry_chunk);
394 cds_list_add_tail(&new_chunk->node, &arena->chunk_list);
395 return; /* We're done. */
396 }
9d8612b7 397
95b94246
MD
398 /* Try expanding last chunk. */
399 last_chunk = cds_list_entry(arena->chunk_list.prev,
400 struct registry_chunk, node);
401 old_chunk_len =
402 last_chunk->data_len + sizeof(struct registry_chunk);
403 new_chunk_len = old_chunk_len << 1;
404
405 /* Don't allow memory mapping to move, just expand. */
406 new_chunk = mremap_wrapper(last_chunk, old_chunk_len,
407 new_chunk_len, 0);
408 if (new_chunk != MAP_FAILED) {
409 /* Should not have moved. */
410 assert(new_chunk == last_chunk);
d3ac5bb7 411 memset((char *) last_chunk + old_chunk_len, 0,
95b94246
MD
412 new_chunk_len - old_chunk_len);
413 last_chunk->data_len =
414 new_chunk_len - sizeof(struct registry_chunk);
415 return; /* We're done. */
416 }
0617bf4c 417
95b94246 418 /* Remap did not succeed, we need to add a new chunk. */
5592d049
MJ
419 new_chunk = (struct registry_chunk *) mmap(NULL,
420 new_chunk_len,
95b94246
MD
421 PROT_READ | PROT_WRITE,
422 MAP_ANONYMOUS | MAP_PRIVATE,
423 -1, 0);
424 if (new_chunk == MAP_FAILED)
425 abort();
d3ac5bb7 426 memset(new_chunk, 0, new_chunk_len);
95b94246
MD
427 new_chunk->data_len =
428 new_chunk_len - sizeof(struct registry_chunk);
429 cds_list_add_tail(&new_chunk->node, &arena->chunk_list);
430}
fdee2e6d 431
95b94246
MD
432static
433struct rcu_reader *arena_alloc(struct registry_arena *arena)
434{
435 struct registry_chunk *chunk;
436 struct rcu_reader *rcu_reader_reg;
437 int expand_done = 0; /* Only allow to expand once per alloc */
438 size_t len = sizeof(struct rcu_reader);
439
440retry:
441 cds_list_for_each_entry(chunk, &arena->chunk_list, node) {
442 if (chunk->data_len - chunk->used < len)
443 continue;
444 /* Find spot */
445 for (rcu_reader_reg = (struct rcu_reader *) &chunk->data[0];
446 rcu_reader_reg < (struct rcu_reader *) &chunk->data[chunk->data_len];
447 rcu_reader_reg++) {
448 if (!rcu_reader_reg->alloc) {
449 rcu_reader_reg->alloc = 1;
450 chunk->used += len;
451 return rcu_reader_reg;
452 }
453 }
454 }
455
456 if (!expand_done) {
457 expand_arena(arena);
458 expand_done = 1;
459 goto retry;
460 }
461
462 return NULL;
fdee2e6d
MD
463}
464
465/* Called with signals off and mutex locked */
95b94246
MD
466static
467void add_thread(void)
fdee2e6d 468{
02be5561 469 struct rcu_reader *rcu_reader_reg;
c1be8fb9 470 int ret;
fdee2e6d 471
95b94246
MD
472 rcu_reader_reg = arena_alloc(&registry_arena);
473 if (!rcu_reader_reg)
474 abort();
c1be8fb9
MD
475 ret = pthread_setspecific(urcu_bp_key, rcu_reader_reg);
476 if (ret)
477 abort();
fdee2e6d
MD
478
479 /* Add to registry */
02be5561
MD
480 rcu_reader_reg->tid = pthread_self();
481 assert(rcu_reader_reg->ctr == 0);
16aa9ee8 482 cds_list_add(&rcu_reader_reg->node, &registry);
95b94246
MD
483 /*
484 * Reader threads are pointing to the reader registry. This is
485 * why its memory should never be relocated.
486 */
bd252a04 487 URCU_TLS(rcu_reader) = rcu_reader_reg;
fdee2e6d
MD
488}
489
c1be8fb9
MD
490/* Called with mutex locked */
491static
492void cleanup_thread(struct registry_chunk *chunk,
493 struct rcu_reader *rcu_reader_reg)
494{
495 rcu_reader_reg->ctr = 0;
496 cds_list_del(&rcu_reader_reg->node);
497 rcu_reader_reg->tid = 0;
498 rcu_reader_reg->alloc = 0;
499 chunk->used -= sizeof(struct rcu_reader);
500}
501
502static
503struct registry_chunk *find_chunk(struct rcu_reader *rcu_reader_reg)
fdee2e6d 504{
95b94246 505 struct registry_chunk *chunk;
fdee2e6d 506
95b94246 507 cds_list_for_each_entry(chunk, &registry_arena.chunk_list, node) {
c1be8fb9
MD
508 if (rcu_reader_reg < (struct rcu_reader *) &chunk->data[0])
509 continue;
510 if (rcu_reader_reg >= (struct rcu_reader *) &chunk->data[chunk->data_len])
511 continue;
512 return chunk;
513 }
514 return NULL;
515}
95b94246 516
c1be8fb9
MD
517/* Called with signals off and mutex locked */
518static
76d6a951 519void remove_thread(struct rcu_reader *rcu_reader_reg)
c1be8fb9 520{
c1be8fb9
MD
521 cleanup_thread(find_chunk(rcu_reader_reg), rcu_reader_reg);
522 URCU_TLS(rcu_reader) = NULL;
fdee2e6d
MD
523}
524
525/* Disable signals, take mutex, add to registry */
526void rcu_bp_register(void)
527{
528 sigset_t newmask, oldmask;
529 int ret;
530
6ed4b2e6 531 ret = sigfillset(&newmask);
c1be8fb9
MD
532 if (ret)
533 abort();
6ed4b2e6 534 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
c1be8fb9
MD
535 if (ret)
536 abort();
fdee2e6d
MD
537
538 /*
539 * Check if a signal concurrently registered our thread since
c1be8fb9
MD
540 * the check in rcu_read_lock().
541 */
bd252a04 542 if (URCU_TLS(rcu_reader))
fdee2e6d
MD
543 goto end;
544
c1be8fb9
MD
545 /*
546 * Take care of early registration before urcu_bp constructor.
547 */
548 rcu_bp_init();
549
731ccb96 550 mutex_lock(&rcu_registry_lock);
fdee2e6d 551 add_thread();
731ccb96 552 mutex_unlock(&rcu_registry_lock);
fdee2e6d
MD
553end:
554 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
c1be8fb9
MD
555 if (ret)
556 abort();
557}
558
559/* Disable signals, take mutex, remove from registry */
560static
76d6a951 561void rcu_bp_unregister(struct rcu_reader *rcu_reader_reg)
c1be8fb9
MD
562{
563 sigset_t newmask, oldmask;
564 int ret;
565
566 ret = sigfillset(&newmask);
567 if (ret)
568 abort();
569 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
570 if (ret)
571 abort();
572
731ccb96 573 mutex_lock(&rcu_registry_lock);
76d6a951 574 remove_thread(rcu_reader_reg);
731ccb96 575 mutex_unlock(&rcu_registry_lock);
c1be8fb9
MD
576 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
577 if (ret)
578 abort();
76d6a951 579 rcu_bp_exit();
c1be8fb9
MD
580}
581
582/*
583 * Remove thread from the registry when it exits, and flag it as
584 * destroyed so garbage collection can take care of it.
585 */
586static
587void urcu_bp_thread_exit_notifier(void *rcu_key)
588{
76d6a951 589 rcu_bp_unregister(rcu_key);
c1be8fb9
MD
590}
591
d8d9a340
MD
592#ifdef CONFIG_RCU_FORCE_SYS_MEMBARRIER
593static
3745305b 594void rcu_sys_membarrier_status(bool available)
d8d9a340
MD
595{
596 if (!available)
597 abort();
598}
599#else
600static
3745305b 601void rcu_sys_membarrier_status(bool available)
d8d9a340 602{
3745305b
MD
603 if (!available)
604 return;
605 urcu_bp_has_sys_membarrier = 1;
d8d9a340
MD
606}
607#endif
608
3745305b
MD
609static
610void rcu_sys_membarrier_init(void)
611{
612 bool available = false;
613 int mask;
614
615 mask = membarrier(MEMBARRIER_CMD_QUERY, 0);
616 if (mask >= 0) {
617 if (mask & MEMBARRIER_CMD_PRIVATE_EXPEDITED) {
618 if (membarrier(MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED, 0))
619 urcu_die(errno);
620 available = true;
621 }
622 }
623 rcu_sys_membarrier_status(available);
624}
625
c1be8fb9
MD
626static
627void rcu_bp_init(void)
628{
629 mutex_lock(&init_lock);
76d6a951 630 if (!rcu_bp_refcount++) {
c1be8fb9
MD
631 int ret;
632
633 ret = pthread_key_create(&urcu_bp_key,
634 urcu_bp_thread_exit_notifier);
635 if (ret)
636 abort();
3745305b 637 rcu_sys_membarrier_init();
c1be8fb9
MD
638 initialized = 1;
639 }
640 mutex_unlock(&init_lock);
fdee2e6d
MD
641}
642
c1be8fb9 643static
9380711a 644void rcu_bp_exit(void)
fdee2e6d 645{
76d6a951
MD
646 mutex_lock(&init_lock);
647 if (!--rcu_bp_refcount) {
648 struct registry_chunk *chunk, *tmp;
649 int ret;
95b94246 650
76d6a951
MD
651 cds_list_for_each_entry_safe(chunk, tmp,
652 &registry_arena.chunk_list, node) {
5592d049 653 munmap((void *) chunk, chunk->data_len
76d6a951
MD
654 + sizeof(struct registry_chunk));
655 }
7937ae1c 656 CDS_INIT_LIST_HEAD(&registry_arena.chunk_list);
76d6a951
MD
657 ret = pthread_key_delete(urcu_bp_key);
658 if (ret)
659 abort();
95b94246 660 }
76d6a951 661 mutex_unlock(&init_lock);
fdee2e6d 662}
4cf1675f
MD
663
664/*
731ccb96
MD
665 * Holding the rcu_gp_lock and rcu_registry_lock across fork will make
666 * sure we fork() don't race with a concurrent thread executing with
667 * any of those locks held. This ensures that the registry and data
668 * protected by rcu_gp_lock are in a coherent state in the child.
4cf1675f
MD
669 */
670void rcu_bp_before_fork(void)
671{
672 sigset_t newmask, oldmask;
673 int ret;
674
6ed4b2e6 675 ret = sigfillset(&newmask);
4cf1675f 676 assert(!ret);
6ed4b2e6 677 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
4cf1675f
MD
678 assert(!ret);
679 mutex_lock(&rcu_gp_lock);
731ccb96 680 mutex_lock(&rcu_registry_lock);
4cf1675f
MD
681 saved_fork_signal_mask = oldmask;
682}
683
684void rcu_bp_after_fork_parent(void)
685{
686 sigset_t oldmask;
687 int ret;
688
689 oldmask = saved_fork_signal_mask;
731ccb96 690 mutex_unlock(&rcu_registry_lock);
4cf1675f
MD
691 mutex_unlock(&rcu_gp_lock);
692 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
693 assert(!ret);
694}
695
c1be8fb9
MD
696/*
697 * Prune all entries from registry except our own thread. Fits the Linux
731ccb96 698 * fork behavior. Called with rcu_gp_lock and rcu_registry_lock held.
c1be8fb9
MD
699 */
700static
701void urcu_bp_prune_registry(void)
702{
703 struct registry_chunk *chunk;
704 struct rcu_reader *rcu_reader_reg;
705
706 cds_list_for_each_entry(chunk, &registry_arena.chunk_list, node) {
707 for (rcu_reader_reg = (struct rcu_reader *) &chunk->data[0];
708 rcu_reader_reg < (struct rcu_reader *) &chunk->data[chunk->data_len];
709 rcu_reader_reg++) {
710 if (!rcu_reader_reg->alloc)
711 continue;
712 if (rcu_reader_reg->tid == pthread_self())
713 continue;
714 cleanup_thread(chunk, rcu_reader_reg);
715 }
716 }
717}
718
4cf1675f
MD
719void rcu_bp_after_fork_child(void)
720{
721 sigset_t oldmask;
722 int ret;
723
c1be8fb9 724 urcu_bp_prune_registry();
4cf1675f 725 oldmask = saved_fork_signal_mask;
731ccb96 726 mutex_unlock(&rcu_registry_lock);
4cf1675f
MD
727 mutex_unlock(&rcu_gp_lock);
728 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
729 assert(!ret);
730}
5e77fc1f 731
9b7981bb
MD
732void *rcu_dereference_sym_bp(void *p)
733{
734 return _rcu_dereference(p);
735}
736
5efd3cd2
MD
737void *rcu_set_pointer_sym_bp(void **p, void *v)
738{
739 cmm_wmb();
424d4ed5
MD
740 uatomic_set(p, v);
741 return v;
5efd3cd2
MD
742}
743
744void *rcu_xchg_pointer_sym_bp(void **p, void *v)
745{
746 cmm_wmb();
747 return uatomic_xchg(p, v);
748}
749
750void *rcu_cmpxchg_pointer_sym_bp(void **p, void *old, void *_new)
751{
752 cmm_wmb();
753 return uatomic_cmpxchg(p, old, _new);
754}
755
5e6b23a6 756DEFINE_RCU_FLAVOR(rcu_flavor);
541d828d 757
5e77fc1f 758#include "urcu-call-rcu-impl.h"
0376e7b2 759#include "urcu-defer-impl.h"
This page took 0.122987 seconds and 4 git commands to generate.