cleanup: spelling fixes in comments
[lttng-ust.git] / src / lib / lttng-ust-common / lttng-ust-urcu.c
CommitLineData
10544ee8 1/*
c0c0989a 2 * SPDX-License-Identifier: LGPL-2.1-or-later
10544ee8
MD
3 *
4 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
6 *
c0c0989a 7 * Userspace RCU library for LTTng-UST, derived from liburcu "bulletproof" version.
10544ee8
MD
8 */
9
10#define _LGPL_SOURCE
11#include <stdio.h>
12#include <pthread.h>
13#include <signal.h>
14#include <assert.h>
15#include <stdlib.h>
16#include <string.h>
17#include <errno.h>
18#include <poll.h>
19#include <unistd.h>
20#include <stdbool.h>
21#include <sys/mman.h>
22
23#include <urcu/arch.h>
24#include <urcu/wfcqueue.h>
25#include <lttng/urcu/static/urcu-ust.h>
26#include <lttng/urcu/pointer.h>
27#include <urcu/tls-compat.h>
28
29/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
30#undef _LGPL_SOURCE
31#include <lttng/urcu/urcu-ust.h>
32#define _LGPL_SOURCE
33
34#ifndef MAP_ANONYMOUS
35#define MAP_ANONYMOUS MAP_ANON
36#endif
37
38#ifdef __linux__
39static
40void *mremap_wrapper(void *old_address, size_t old_size,
41 size_t new_size, int flags)
42{
43 return mremap(old_address, old_size, new_size, flags);
44}
45#else
46
47#define MREMAP_MAYMOVE 1
48#define MREMAP_FIXED 2
49
50/*
51 * mremap wrapper for non-Linux systems not allowing MAYMOVE.
52 * This is not generic.
53*/
54static
e2a195a6
MJ
55void *mremap_wrapper(void *old_address __attribute__((unused)),
56 size_t old_size __attribute__((unused)),
57 size_t new_size __attribute__((unused)),
58 int flags)
10544ee8
MD
59{
60 assert(!(flags & MREMAP_MAYMOVE));
61
62 return MAP_FAILED;
63}
64#endif
65
66/* Sleep delay in ms */
67#define RCU_SLEEP_DELAY_MS 10
68#define INIT_NR_THREADS 8
69#define ARENA_INIT_ALLOC \
70 sizeof(struct registry_chunk) \
71 + INIT_NR_THREADS * sizeof(struct lttng_ust_urcu_reader)
72
73/*
74 * Active attempts to check for reader Q.S. before calling sleep().
75 */
76#define RCU_QS_ACTIVE_ATTEMPTS 100
77
78static
79int lttng_ust_urcu_refcount;
80
81/* If the headers do not support membarrier system call, fall back smp_mb. */
82#ifdef __NR_membarrier
83# define membarrier(...) syscall(__NR_membarrier, __VA_ARGS__)
84#else
85# define membarrier(...) -ENOSYS
86#endif
87
88enum membarrier_cmd {
89 MEMBARRIER_CMD_QUERY = 0,
90 MEMBARRIER_CMD_SHARED = (1 << 0),
91 /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */
92 /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */
93 MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3),
94 MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4),
95};
96
97static
465a0d04
MJ
98void _lttng_ust_urcu_init(void)
99 __attribute__((constructor));
10544ee8 100static
c589eca2
MJ
101void lttng_ust_urcu_exit(void)
102 __attribute__((destructor));
10544ee8
MD
103
104#ifndef CONFIG_RCU_FORCE_SYS_MEMBARRIER
105int lttng_ust_urcu_has_sys_membarrier;
106#endif
107
108/*
109 * rcu_gp_lock ensures mutual exclusion between threads calling
110 * synchronize_rcu().
111 */
112static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
113/*
114 * rcu_registry_lock ensures mutual exclusion between threads
115 * registering and unregistering themselves to/from the registry, and
116 * with threads reading that registry from synchronize_rcu(). However,
117 * this lock is not held all the way through the completion of awaiting
118 * for the grace period. It is sporadically released between iterations
119 * on the registry.
120 * rcu_registry_lock may nest inside rcu_gp_lock.
121 */
122static pthread_mutex_t rcu_registry_lock = PTHREAD_MUTEX_INITIALIZER;
123
124static pthread_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER;
125static int initialized;
126
127static pthread_key_t lttng_ust_urcu_key;
128
129struct lttng_ust_urcu_gp lttng_ust_urcu_gp = { .ctr = LTTNG_UST_URCU_GP_COUNT };
130
131/*
132 * Pointer to registry elements. Written to only by each individual reader. Read
133 * by both the reader and the writers.
134 */
135DEFINE_URCU_TLS(struct lttng_ust_urcu_reader *, lttng_ust_urcu_reader);
136
137static CDS_LIST_HEAD(registry);
138
139struct registry_chunk {
140 size_t data_len; /* data length */
141 size_t used; /* amount of data used */
142 struct cds_list_head node; /* chunk_list node */
143 char data[];
144};
145
146struct registry_arena {
147 struct cds_list_head chunk_list;
148};
149
150static struct registry_arena registry_arena = {
151 .chunk_list = CDS_LIST_HEAD_INIT(registry_arena.chunk_list),
152};
153
154/* Saved fork signal mask, protected by rcu_gp_lock */
155static sigset_t saved_fork_signal_mask;
156
157static void mutex_lock(pthread_mutex_t *mutex)
158{
159 int ret;
160
161#ifndef DISTRUST_SIGNALS_EXTREME
162 ret = pthread_mutex_lock(mutex);
163 if (ret)
164 abort();
165#else /* #ifndef DISTRUST_SIGNALS_EXTREME */
166 while ((ret = pthread_mutex_trylock(mutex)) != 0) {
167 if (ret != EBUSY && ret != EINTR)
168 abort();
169 poll(NULL,0,10);
170 }
171#endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
172}
173
174static void mutex_unlock(pthread_mutex_t *mutex)
175{
176 int ret;
177
178 ret = pthread_mutex_unlock(mutex);
179 if (ret)
180 abort();
181}
182
183static void smp_mb_master(void)
184{
185 if (caa_likely(lttng_ust_urcu_has_sys_membarrier)) {
186 if (membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED, 0))
187 abort();
188 } else {
189 cmm_smp_mb();
190 }
191}
192
193/*
194 * Always called with rcu_registry lock held. Releases this lock between
195 * iterations and grabs it again. Holds the lock when it returns.
196 */
197static void wait_for_readers(struct cds_list_head *input_readers,
198 struct cds_list_head *cur_snap_readers,
199 struct cds_list_head *qsreaders)
200{
201 unsigned int wait_loops = 0;
202 struct lttng_ust_urcu_reader *index, *tmp;
203
204 /*
205 * Wait for each thread URCU_TLS(lttng_ust_urcu_reader).ctr to either
206 * indicate quiescence (not nested), or observe the current
207 * rcu_gp.ctr value.
208 */
209 for (;;) {
210 if (wait_loops < RCU_QS_ACTIVE_ATTEMPTS)
211 wait_loops++;
212
213 cds_list_for_each_entry_safe(index, tmp, input_readers, node) {
214 switch (lttng_ust_urcu_reader_state(&index->ctr)) {
215 case LTTNG_UST_URCU_READER_ACTIVE_CURRENT:
216 if (cur_snap_readers) {
217 cds_list_move(&index->node,
218 cur_snap_readers);
219 break;
220 }
221 /* Fall-through */
222 case LTTNG_UST_URCU_READER_INACTIVE:
223 cds_list_move(&index->node, qsreaders);
224 break;
225 case LTTNG_UST_URCU_READER_ACTIVE_OLD:
226 /*
227 * Old snapshot. Leaving node in
228 * input_readers will make us busy-loop
229 * until the snapshot becomes current or
230 * the reader becomes inactive.
231 */
232 break;
233 }
234 }
235
236 if (cds_list_empty(input_readers)) {
237 break;
238 } else {
239 /* Temporarily unlock the registry lock. */
240 mutex_unlock(&rcu_registry_lock);
241 if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS)
242 (void) poll(NULL, 0, RCU_SLEEP_DELAY_MS);
243 else
244 caa_cpu_relax();
245 /* Re-lock the registry lock before the next loop. */
246 mutex_lock(&rcu_registry_lock);
247 }
248 }
249}
250
251void lttng_ust_urcu_synchronize_rcu(void)
252{
253 CDS_LIST_HEAD(cur_snap_readers);
254 CDS_LIST_HEAD(qsreaders);
255 sigset_t newmask, oldmask;
256 int ret;
257
258 ret = sigfillset(&newmask);
259 assert(!ret);
260 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
261 assert(!ret);
262
263 mutex_lock(&rcu_gp_lock);
264
265 mutex_lock(&rcu_registry_lock);
266
267 if (cds_list_empty(&registry))
268 goto out;
269
270 /* All threads should read qparity before accessing data structure
271 * where new ptr points to. */
272 /* Write new ptr before changing the qparity */
273 smp_mb_master();
274
275 /*
276 * Wait for readers to observe original parity or be quiescent.
277 * wait_for_readers() can release and grab again rcu_registry_lock
2fbda51c 278 * internally.
10544ee8
MD
279 */
280 wait_for_readers(&registry, &cur_snap_readers, &qsreaders);
281
282 /*
283 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
284 * model easier to understand. It does not have a big performance impact
285 * anyway, given this is the write-side.
286 */
287 cmm_smp_mb();
288
289 /* Switch parity: 0 -> 1, 1 -> 0 */
290 CMM_STORE_SHARED(lttng_ust_urcu_gp.ctr, lttng_ust_urcu_gp.ctr ^ LTTNG_UST_URCU_GP_CTR_PHASE);
291
292 /*
293 * Must commit qparity update to memory before waiting for other parity
294 * quiescent state. Failure to do so could result in the writer waiting
295 * forever while new readers are always accessing data (no progress).
296 * Ensured by CMM_STORE_SHARED and CMM_LOAD_SHARED.
297 */
298
299 /*
300 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
301 * model easier to understand. It does not have a big performance impact
302 * anyway, given this is the write-side.
303 */
304 cmm_smp_mb();
305
306 /*
307 * Wait for readers to observe new parity or be quiescent.
308 * wait_for_readers() can release and grab again rcu_registry_lock
2fbda51c 309 * internally.
10544ee8
MD
310 */
311 wait_for_readers(&cur_snap_readers, NULL, &qsreaders);
312
313 /*
314 * Put quiescent reader list back into registry.
315 */
316 cds_list_splice(&qsreaders, &registry);
317
318 /*
319 * Finish waiting for reader threads before letting the old ptr being
320 * freed.
321 */
322 smp_mb_master();
323out:
324 mutex_unlock(&rcu_registry_lock);
325 mutex_unlock(&rcu_gp_lock);
326 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
327 assert(!ret);
328}
329
330/*
331 * library wrappers to be used by non-LGPL compatible source code.
332 */
333
334void lttng_ust_urcu_read_lock(void)
335{
336 _lttng_ust_urcu_read_lock();
337}
338
339void lttng_ust_urcu_read_unlock(void)
340{
341 _lttng_ust_urcu_read_unlock();
342}
343
344int lttng_ust_urcu_read_ongoing(void)
345{
346 return _lttng_ust_urcu_read_ongoing();
347}
348
349/*
350 * Only grow for now. If empty, allocate a ARENA_INIT_ALLOC sized chunk.
351 * Else, try expanding the last chunk. If this fails, allocate a new
352 * chunk twice as big as the last chunk.
353 * Memory used by chunks _never_ moves. A chunk could theoretically be
354 * freed when all "used" slots are released, but we don't do it at this
355 * point.
356 */
357static
358void expand_arena(struct registry_arena *arena)
359{
360 struct registry_chunk *new_chunk, *last_chunk;
361 size_t old_chunk_len, new_chunk_len;
362
363 /* No chunk. */
364 if (cds_list_empty(&arena->chunk_list)) {
365 assert(ARENA_INIT_ALLOC >=
366 sizeof(struct registry_chunk)
367 + sizeof(struct lttng_ust_urcu_reader));
368 new_chunk_len = ARENA_INIT_ALLOC;
369 new_chunk = (struct registry_chunk *) mmap(NULL,
370 new_chunk_len,
371 PROT_READ | PROT_WRITE,
372 MAP_ANONYMOUS | MAP_PRIVATE,
373 -1, 0);
374 if (new_chunk == MAP_FAILED)
375 abort();
376 memset(new_chunk, 0, new_chunk_len);
377 new_chunk->data_len =
378 new_chunk_len - sizeof(struct registry_chunk);
379 cds_list_add_tail(&new_chunk->node, &arena->chunk_list);
380 return; /* We're done. */
381 }
382
383 /* Try expanding last chunk. */
384 last_chunk = cds_list_entry(arena->chunk_list.prev,
385 struct registry_chunk, node);
386 old_chunk_len =
387 last_chunk->data_len + sizeof(struct registry_chunk);
388 new_chunk_len = old_chunk_len << 1;
389
390 /* Don't allow memory mapping to move, just expand. */
391 new_chunk = mremap_wrapper(last_chunk, old_chunk_len,
392 new_chunk_len, 0);
393 if (new_chunk != MAP_FAILED) {
394 /* Should not have moved. */
395 assert(new_chunk == last_chunk);
396 memset((char *) last_chunk + old_chunk_len, 0,
397 new_chunk_len - old_chunk_len);
398 last_chunk->data_len =
399 new_chunk_len - sizeof(struct registry_chunk);
400 return; /* We're done. */
401 }
402
403 /* Remap did not succeed, we need to add a new chunk. */
404 new_chunk = (struct registry_chunk *) mmap(NULL,
405 new_chunk_len,
406 PROT_READ | PROT_WRITE,
407 MAP_ANONYMOUS | MAP_PRIVATE,
408 -1, 0);
409 if (new_chunk == MAP_FAILED)
410 abort();
411 memset(new_chunk, 0, new_chunk_len);
412 new_chunk->data_len =
413 new_chunk_len - sizeof(struct registry_chunk);
414 cds_list_add_tail(&new_chunk->node, &arena->chunk_list);
415}
416
417static
418struct lttng_ust_urcu_reader *arena_alloc(struct registry_arena *arena)
419{
420 struct registry_chunk *chunk;
421 struct lttng_ust_urcu_reader *rcu_reader_reg;
422 int expand_done = 0; /* Only allow to expand once per alloc */
423 size_t len = sizeof(struct lttng_ust_urcu_reader);
424
425retry:
426 cds_list_for_each_entry(chunk, &arena->chunk_list, node) {
427 if (chunk->data_len - chunk->used < len)
428 continue;
429 /* Find spot */
430 for (rcu_reader_reg = (struct lttng_ust_urcu_reader *) &chunk->data[0];
431 rcu_reader_reg < (struct lttng_ust_urcu_reader *) &chunk->data[chunk->data_len];
432 rcu_reader_reg++) {
433 if (!rcu_reader_reg->alloc) {
434 rcu_reader_reg->alloc = 1;
435 chunk->used += len;
436 return rcu_reader_reg;
437 }
438 }
439 }
440
441 if (!expand_done) {
442 expand_arena(arena);
443 expand_done = 1;
444 goto retry;
445 }
446
447 return NULL;
448}
449
450/* Called with signals off and mutex locked */
451static
452void add_thread(void)
453{
454 struct lttng_ust_urcu_reader *rcu_reader_reg;
455 int ret;
456
457 rcu_reader_reg = arena_alloc(&registry_arena);
458 if (!rcu_reader_reg)
459 abort();
460 ret = pthread_setspecific(lttng_ust_urcu_key, rcu_reader_reg);
461 if (ret)
462 abort();
463
464 /* Add to registry */
465 rcu_reader_reg->tid = pthread_self();
466 assert(rcu_reader_reg->ctr == 0);
467 cds_list_add(&rcu_reader_reg->node, &registry);
468 /*
469 * Reader threads are pointing to the reader registry. This is
470 * why its memory should never be relocated.
471 */
472 URCU_TLS(lttng_ust_urcu_reader) = rcu_reader_reg;
473}
474
475/* Called with mutex locked */
476static
477void cleanup_thread(struct registry_chunk *chunk,
478 struct lttng_ust_urcu_reader *rcu_reader_reg)
479{
480 rcu_reader_reg->ctr = 0;
481 cds_list_del(&rcu_reader_reg->node);
482 rcu_reader_reg->tid = 0;
483 rcu_reader_reg->alloc = 0;
484 chunk->used -= sizeof(struct lttng_ust_urcu_reader);
485}
486
487static
488struct registry_chunk *find_chunk(struct lttng_ust_urcu_reader *rcu_reader_reg)
489{
490 struct registry_chunk *chunk;
491
492 cds_list_for_each_entry(chunk, &registry_arena.chunk_list, node) {
493 if (rcu_reader_reg < (struct lttng_ust_urcu_reader *) &chunk->data[0])
494 continue;
495 if (rcu_reader_reg >= (struct lttng_ust_urcu_reader *) &chunk->data[chunk->data_len])
496 continue;
497 return chunk;
498 }
499 return NULL;
500}
501
502/* Called with signals off and mutex locked */
503static
504void remove_thread(struct lttng_ust_urcu_reader *rcu_reader_reg)
505{
506 cleanup_thread(find_chunk(rcu_reader_reg), rcu_reader_reg);
507 URCU_TLS(lttng_ust_urcu_reader) = NULL;
508}
509
510/* Disable signals, take mutex, add to registry */
511void lttng_ust_urcu_register(void)
512{
513 sigset_t newmask, oldmask;
514 int ret;
515
516 ret = sigfillset(&newmask);
517 if (ret)
518 abort();
519 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
520 if (ret)
521 abort();
522
523 /*
524 * Check if a signal concurrently registered our thread since
525 * the check in rcu_read_lock().
526 */
527 if (URCU_TLS(lttng_ust_urcu_reader))
528 goto end;
529
530 /*
531 * Take care of early registration before lttng_ust_urcu constructor.
532 */
533 _lttng_ust_urcu_init();
534
535 mutex_lock(&rcu_registry_lock);
536 add_thread();
537 mutex_unlock(&rcu_registry_lock);
538end:
539 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
540 if (ret)
541 abort();
542}
543
544void lttng_ust_urcu_register_thread(void)
545{
546 if (caa_unlikely(!URCU_TLS(lttng_ust_urcu_reader)))
547 lttng_ust_urcu_register(); /* If not yet registered. */
548}
549
550/* Disable signals, take mutex, remove from registry */
551static
552void lttng_ust_urcu_unregister(struct lttng_ust_urcu_reader *rcu_reader_reg)
553{
554 sigset_t newmask, oldmask;
555 int ret;
556
557 ret = sigfillset(&newmask);
558 if (ret)
559 abort();
560 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
561 if (ret)
562 abort();
563
564 mutex_lock(&rcu_registry_lock);
565 remove_thread(rcu_reader_reg);
566 mutex_unlock(&rcu_registry_lock);
567 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
568 if (ret)
569 abort();
570 lttng_ust_urcu_exit();
571}
572
573/*
574 * Remove thread from the registry when it exits, and flag it as
575 * destroyed so garbage collection can take care of it.
576 */
577static
578void lttng_ust_urcu_thread_exit_notifier(void *rcu_key)
579{
580 lttng_ust_urcu_unregister(rcu_key);
581}
582
583#ifdef CONFIG_RCU_FORCE_SYS_MEMBARRIER
584static
585void lttng_ust_urcu_sys_membarrier_status(bool available)
586{
587 if (!available)
588 abort();
589}
590#else
591static
592void lttng_ust_urcu_sys_membarrier_status(bool available)
593{
594 if (!available)
595 return;
596 lttng_ust_urcu_has_sys_membarrier = 1;
597}
598#endif
599
600static
601void lttng_ust_urcu_sys_membarrier_init(void)
602{
603 bool available = false;
604 int mask;
605
606 mask = membarrier(MEMBARRIER_CMD_QUERY, 0);
607 if (mask >= 0) {
608 if (mask & MEMBARRIER_CMD_PRIVATE_EXPEDITED) {
609 if (membarrier(MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED, 0))
610 abort();
611 available = true;
612 }
613 }
614 lttng_ust_urcu_sys_membarrier_status(available);
615}
616
617static
618void _lttng_ust_urcu_init(void)
619{
620 mutex_lock(&init_lock);
621 if (!lttng_ust_urcu_refcount++) {
622 int ret;
623
624 ret = pthread_key_create(&lttng_ust_urcu_key,
625 lttng_ust_urcu_thread_exit_notifier);
626 if (ret)
627 abort();
628 lttng_ust_urcu_sys_membarrier_init();
629 initialized = 1;
630 }
631 mutex_unlock(&init_lock);
632}
633
634static
635void lttng_ust_urcu_exit(void)
636{
637 mutex_lock(&init_lock);
638 if (!--lttng_ust_urcu_refcount) {
639 struct registry_chunk *chunk, *tmp;
640 int ret;
641
642 cds_list_for_each_entry_safe(chunk, tmp,
643 &registry_arena.chunk_list, node) {
644 munmap((void *) chunk, chunk->data_len
645 + sizeof(struct registry_chunk));
646 }
647 CDS_INIT_LIST_HEAD(&registry_arena.chunk_list);
648 ret = pthread_key_delete(lttng_ust_urcu_key);
649 if (ret)
650 abort();
651 }
652 mutex_unlock(&init_lock);
653}
654
655/*
656 * Holding the rcu_gp_lock and rcu_registry_lock across fork will make
657 * sure we fork() don't race with a concurrent thread executing with
658 * any of those locks held. This ensures that the registry and data
659 * protected by rcu_gp_lock are in a coherent state in the child.
660 */
661void lttng_ust_urcu_before_fork(void)
662{
663 sigset_t newmask, oldmask;
664 int ret;
665
666 ret = sigfillset(&newmask);
667 assert(!ret);
668 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
669 assert(!ret);
670 mutex_lock(&rcu_gp_lock);
671 mutex_lock(&rcu_registry_lock);
672 saved_fork_signal_mask = oldmask;
673}
674
675void lttng_ust_urcu_after_fork_parent(void)
676{
677 sigset_t oldmask;
678 int ret;
679
680 oldmask = saved_fork_signal_mask;
681 mutex_unlock(&rcu_registry_lock);
682 mutex_unlock(&rcu_gp_lock);
683 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
684 assert(!ret);
685}
686
687/*
688 * Prune all entries from registry except our own thread. Fits the Linux
689 * fork behavior. Called with rcu_gp_lock and rcu_registry_lock held.
690 */
691static
692void lttng_ust_urcu_prune_registry(void)
693{
694 struct registry_chunk *chunk;
695 struct lttng_ust_urcu_reader *rcu_reader_reg;
696
697 cds_list_for_each_entry(chunk, &registry_arena.chunk_list, node) {
698 for (rcu_reader_reg = (struct lttng_ust_urcu_reader *) &chunk->data[0];
699 rcu_reader_reg < (struct lttng_ust_urcu_reader *) &chunk->data[chunk->data_len];
700 rcu_reader_reg++) {
701 if (!rcu_reader_reg->alloc)
702 continue;
703 if (rcu_reader_reg->tid == pthread_self())
704 continue;
705 cleanup_thread(chunk, rcu_reader_reg);
706 }
707 }
708}
709
710void lttng_ust_urcu_after_fork_child(void)
711{
712 sigset_t oldmask;
713 int ret;
714
715 lttng_ust_urcu_prune_registry();
716 oldmask = saved_fork_signal_mask;
717 mutex_unlock(&rcu_registry_lock);
718 mutex_unlock(&rcu_gp_lock);
719 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
720 assert(!ret);
721}
This page took 0.051629 seconds and 4 git commands to generate.