Fix: pass private data to context callbacks
[lttng-ust.git] / liblttng-ust / lttng-ust-urcu.c
CommitLineData
10544ee8 1/*
c0c0989a 2 * SPDX-License-Identifier: LGPL-2.1-or-later
10544ee8
MD
3 *
4 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
6 *
c0c0989a 7 * Userspace RCU library for LTTng-UST, derived from liburcu "bulletproof" version.
10544ee8
MD
8 */
9
10#define _LGPL_SOURCE
11#include <stdio.h>
12#include <pthread.h>
13#include <signal.h>
14#include <assert.h>
15#include <stdlib.h>
16#include <string.h>
17#include <errno.h>
18#include <poll.h>
19#include <unistd.h>
20#include <stdbool.h>
21#include <sys/mman.h>
22
23#include <urcu/arch.h>
24#include <urcu/wfcqueue.h>
25#include <lttng/urcu/static/urcu-ust.h>
26#include <lttng/urcu/pointer.h>
27#include <urcu/tls-compat.h>
28
29/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
30#undef _LGPL_SOURCE
31#include <lttng/urcu/urcu-ust.h>
32#define _LGPL_SOURCE
33
34#ifndef MAP_ANONYMOUS
35#define MAP_ANONYMOUS MAP_ANON
36#endif
37
38#ifdef __linux__
39static
40void *mremap_wrapper(void *old_address, size_t old_size,
41 size_t new_size, int flags)
42{
43 return mremap(old_address, old_size, new_size, flags);
44}
45#else
46
47#define MREMAP_MAYMOVE 1
48#define MREMAP_FIXED 2
49
50/*
51 * mremap wrapper for non-Linux systems not allowing MAYMOVE.
52 * This is not generic.
53*/
54static
55void *mremap_wrapper(void *old_address, size_t old_size,
56 size_t new_size, int flags)
57{
58 assert(!(flags & MREMAP_MAYMOVE));
59
60 return MAP_FAILED;
61}
62#endif
63
64/* Sleep delay in ms */
65#define RCU_SLEEP_DELAY_MS 10
66#define INIT_NR_THREADS 8
67#define ARENA_INIT_ALLOC \
68 sizeof(struct registry_chunk) \
69 + INIT_NR_THREADS * sizeof(struct lttng_ust_urcu_reader)
70
71/*
72 * Active attempts to check for reader Q.S. before calling sleep().
73 */
74#define RCU_QS_ACTIVE_ATTEMPTS 100
75
76static
77int lttng_ust_urcu_refcount;
78
79/* If the headers do not support membarrier system call, fall back smp_mb. */
80#ifdef __NR_membarrier
81# define membarrier(...) syscall(__NR_membarrier, __VA_ARGS__)
82#else
83# define membarrier(...) -ENOSYS
84#endif
85
86enum membarrier_cmd {
87 MEMBARRIER_CMD_QUERY = 0,
88 MEMBARRIER_CMD_SHARED = (1 << 0),
89 /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */
90 /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */
91 MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3),
92 MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4),
93};
94
95static
465a0d04
MJ
96void _lttng_ust_urcu_init(void)
97 __attribute__((constructor));
10544ee8 98static
c589eca2
MJ
99void lttng_ust_urcu_exit(void)
100 __attribute__((destructor));
10544ee8
MD
101
102#ifndef CONFIG_RCU_FORCE_SYS_MEMBARRIER
103int lttng_ust_urcu_has_sys_membarrier;
104#endif
105
106/*
107 * rcu_gp_lock ensures mutual exclusion between threads calling
108 * synchronize_rcu().
109 */
110static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
111/*
112 * rcu_registry_lock ensures mutual exclusion between threads
113 * registering and unregistering themselves to/from the registry, and
114 * with threads reading that registry from synchronize_rcu(). However,
115 * this lock is not held all the way through the completion of awaiting
116 * for the grace period. It is sporadically released between iterations
117 * on the registry.
118 * rcu_registry_lock may nest inside rcu_gp_lock.
119 */
120static pthread_mutex_t rcu_registry_lock = PTHREAD_MUTEX_INITIALIZER;
121
122static pthread_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER;
123static int initialized;
124
125static pthread_key_t lttng_ust_urcu_key;
126
127struct lttng_ust_urcu_gp lttng_ust_urcu_gp = { .ctr = LTTNG_UST_URCU_GP_COUNT };
128
129/*
130 * Pointer to registry elements. Written to only by each individual reader. Read
131 * by both the reader and the writers.
132 */
133DEFINE_URCU_TLS(struct lttng_ust_urcu_reader *, lttng_ust_urcu_reader);
134
135static CDS_LIST_HEAD(registry);
136
137struct registry_chunk {
138 size_t data_len; /* data length */
139 size_t used; /* amount of data used */
140 struct cds_list_head node; /* chunk_list node */
141 char data[];
142};
143
144struct registry_arena {
145 struct cds_list_head chunk_list;
146};
147
148static struct registry_arena registry_arena = {
149 .chunk_list = CDS_LIST_HEAD_INIT(registry_arena.chunk_list),
150};
151
152/* Saved fork signal mask, protected by rcu_gp_lock */
153static sigset_t saved_fork_signal_mask;
154
155static void mutex_lock(pthread_mutex_t *mutex)
156{
157 int ret;
158
159#ifndef DISTRUST_SIGNALS_EXTREME
160 ret = pthread_mutex_lock(mutex);
161 if (ret)
162 abort();
163#else /* #ifndef DISTRUST_SIGNALS_EXTREME */
164 while ((ret = pthread_mutex_trylock(mutex)) != 0) {
165 if (ret != EBUSY && ret != EINTR)
166 abort();
167 poll(NULL,0,10);
168 }
169#endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
170}
171
172static void mutex_unlock(pthread_mutex_t *mutex)
173{
174 int ret;
175
176 ret = pthread_mutex_unlock(mutex);
177 if (ret)
178 abort();
179}
180
181static void smp_mb_master(void)
182{
183 if (caa_likely(lttng_ust_urcu_has_sys_membarrier)) {
184 if (membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED, 0))
185 abort();
186 } else {
187 cmm_smp_mb();
188 }
189}
190
191/*
192 * Always called with rcu_registry lock held. Releases this lock between
193 * iterations and grabs it again. Holds the lock when it returns.
194 */
195static void wait_for_readers(struct cds_list_head *input_readers,
196 struct cds_list_head *cur_snap_readers,
197 struct cds_list_head *qsreaders)
198{
199 unsigned int wait_loops = 0;
200 struct lttng_ust_urcu_reader *index, *tmp;
201
202 /*
203 * Wait for each thread URCU_TLS(lttng_ust_urcu_reader).ctr to either
204 * indicate quiescence (not nested), or observe the current
205 * rcu_gp.ctr value.
206 */
207 for (;;) {
208 if (wait_loops < RCU_QS_ACTIVE_ATTEMPTS)
209 wait_loops++;
210
211 cds_list_for_each_entry_safe(index, tmp, input_readers, node) {
212 switch (lttng_ust_urcu_reader_state(&index->ctr)) {
213 case LTTNG_UST_URCU_READER_ACTIVE_CURRENT:
214 if (cur_snap_readers) {
215 cds_list_move(&index->node,
216 cur_snap_readers);
217 break;
218 }
219 /* Fall-through */
220 case LTTNG_UST_URCU_READER_INACTIVE:
221 cds_list_move(&index->node, qsreaders);
222 break;
223 case LTTNG_UST_URCU_READER_ACTIVE_OLD:
224 /*
225 * Old snapshot. Leaving node in
226 * input_readers will make us busy-loop
227 * until the snapshot becomes current or
228 * the reader becomes inactive.
229 */
230 break;
231 }
232 }
233
234 if (cds_list_empty(input_readers)) {
235 break;
236 } else {
237 /* Temporarily unlock the registry lock. */
238 mutex_unlock(&rcu_registry_lock);
239 if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS)
240 (void) poll(NULL, 0, RCU_SLEEP_DELAY_MS);
241 else
242 caa_cpu_relax();
243 /* Re-lock the registry lock before the next loop. */
244 mutex_lock(&rcu_registry_lock);
245 }
246 }
247}
248
249void lttng_ust_urcu_synchronize_rcu(void)
250{
251 CDS_LIST_HEAD(cur_snap_readers);
252 CDS_LIST_HEAD(qsreaders);
253 sigset_t newmask, oldmask;
254 int ret;
255
256 ret = sigfillset(&newmask);
257 assert(!ret);
258 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
259 assert(!ret);
260
261 mutex_lock(&rcu_gp_lock);
262
263 mutex_lock(&rcu_registry_lock);
264
265 if (cds_list_empty(&registry))
266 goto out;
267
268 /* All threads should read qparity before accessing data structure
269 * where new ptr points to. */
270 /* Write new ptr before changing the qparity */
271 smp_mb_master();
272
273 /*
274 * Wait for readers to observe original parity or be quiescent.
275 * wait_for_readers() can release and grab again rcu_registry_lock
276 * interally.
277 */
278 wait_for_readers(&registry, &cur_snap_readers, &qsreaders);
279
280 /*
281 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
282 * model easier to understand. It does not have a big performance impact
283 * anyway, given this is the write-side.
284 */
285 cmm_smp_mb();
286
287 /* Switch parity: 0 -> 1, 1 -> 0 */
288 CMM_STORE_SHARED(lttng_ust_urcu_gp.ctr, lttng_ust_urcu_gp.ctr ^ LTTNG_UST_URCU_GP_CTR_PHASE);
289
290 /*
291 * Must commit qparity update to memory before waiting for other parity
292 * quiescent state. Failure to do so could result in the writer waiting
293 * forever while new readers are always accessing data (no progress).
294 * Ensured by CMM_STORE_SHARED and CMM_LOAD_SHARED.
295 */
296
297 /*
298 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
299 * model easier to understand. It does not have a big performance impact
300 * anyway, given this is the write-side.
301 */
302 cmm_smp_mb();
303
304 /*
305 * Wait for readers to observe new parity or be quiescent.
306 * wait_for_readers() can release and grab again rcu_registry_lock
307 * interally.
308 */
309 wait_for_readers(&cur_snap_readers, NULL, &qsreaders);
310
311 /*
312 * Put quiescent reader list back into registry.
313 */
314 cds_list_splice(&qsreaders, &registry);
315
316 /*
317 * Finish waiting for reader threads before letting the old ptr being
318 * freed.
319 */
320 smp_mb_master();
321out:
322 mutex_unlock(&rcu_registry_lock);
323 mutex_unlock(&rcu_gp_lock);
324 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
325 assert(!ret);
326}
327
328/*
329 * library wrappers to be used by non-LGPL compatible source code.
330 */
331
332void lttng_ust_urcu_read_lock(void)
333{
334 _lttng_ust_urcu_read_lock();
335}
336
337void lttng_ust_urcu_read_unlock(void)
338{
339 _lttng_ust_urcu_read_unlock();
340}
341
342int lttng_ust_urcu_read_ongoing(void)
343{
344 return _lttng_ust_urcu_read_ongoing();
345}
346
347/*
348 * Only grow for now. If empty, allocate a ARENA_INIT_ALLOC sized chunk.
349 * Else, try expanding the last chunk. If this fails, allocate a new
350 * chunk twice as big as the last chunk.
351 * Memory used by chunks _never_ moves. A chunk could theoretically be
352 * freed when all "used" slots are released, but we don't do it at this
353 * point.
354 */
355static
356void expand_arena(struct registry_arena *arena)
357{
358 struct registry_chunk *new_chunk, *last_chunk;
359 size_t old_chunk_len, new_chunk_len;
360
361 /* No chunk. */
362 if (cds_list_empty(&arena->chunk_list)) {
363 assert(ARENA_INIT_ALLOC >=
364 sizeof(struct registry_chunk)
365 + sizeof(struct lttng_ust_urcu_reader));
366 new_chunk_len = ARENA_INIT_ALLOC;
367 new_chunk = (struct registry_chunk *) mmap(NULL,
368 new_chunk_len,
369 PROT_READ | PROT_WRITE,
370 MAP_ANONYMOUS | MAP_PRIVATE,
371 -1, 0);
372 if (new_chunk == MAP_FAILED)
373 abort();
374 memset(new_chunk, 0, new_chunk_len);
375 new_chunk->data_len =
376 new_chunk_len - sizeof(struct registry_chunk);
377 cds_list_add_tail(&new_chunk->node, &arena->chunk_list);
378 return; /* We're done. */
379 }
380
381 /* Try expanding last chunk. */
382 last_chunk = cds_list_entry(arena->chunk_list.prev,
383 struct registry_chunk, node);
384 old_chunk_len =
385 last_chunk->data_len + sizeof(struct registry_chunk);
386 new_chunk_len = old_chunk_len << 1;
387
388 /* Don't allow memory mapping to move, just expand. */
389 new_chunk = mremap_wrapper(last_chunk, old_chunk_len,
390 new_chunk_len, 0);
391 if (new_chunk != MAP_FAILED) {
392 /* Should not have moved. */
393 assert(new_chunk == last_chunk);
394 memset((char *) last_chunk + old_chunk_len, 0,
395 new_chunk_len - old_chunk_len);
396 last_chunk->data_len =
397 new_chunk_len - sizeof(struct registry_chunk);
398 return; /* We're done. */
399 }
400
401 /* Remap did not succeed, we need to add a new chunk. */
402 new_chunk = (struct registry_chunk *) mmap(NULL,
403 new_chunk_len,
404 PROT_READ | PROT_WRITE,
405 MAP_ANONYMOUS | MAP_PRIVATE,
406 -1, 0);
407 if (new_chunk == MAP_FAILED)
408 abort();
409 memset(new_chunk, 0, new_chunk_len);
410 new_chunk->data_len =
411 new_chunk_len - sizeof(struct registry_chunk);
412 cds_list_add_tail(&new_chunk->node, &arena->chunk_list);
413}
414
415static
416struct lttng_ust_urcu_reader *arena_alloc(struct registry_arena *arena)
417{
418 struct registry_chunk *chunk;
419 struct lttng_ust_urcu_reader *rcu_reader_reg;
420 int expand_done = 0; /* Only allow to expand once per alloc */
421 size_t len = sizeof(struct lttng_ust_urcu_reader);
422
423retry:
424 cds_list_for_each_entry(chunk, &arena->chunk_list, node) {
425 if (chunk->data_len - chunk->used < len)
426 continue;
427 /* Find spot */
428 for (rcu_reader_reg = (struct lttng_ust_urcu_reader *) &chunk->data[0];
429 rcu_reader_reg < (struct lttng_ust_urcu_reader *) &chunk->data[chunk->data_len];
430 rcu_reader_reg++) {
431 if (!rcu_reader_reg->alloc) {
432 rcu_reader_reg->alloc = 1;
433 chunk->used += len;
434 return rcu_reader_reg;
435 }
436 }
437 }
438
439 if (!expand_done) {
440 expand_arena(arena);
441 expand_done = 1;
442 goto retry;
443 }
444
445 return NULL;
446}
447
448/* Called with signals off and mutex locked */
449static
450void add_thread(void)
451{
452 struct lttng_ust_urcu_reader *rcu_reader_reg;
453 int ret;
454
455 rcu_reader_reg = arena_alloc(&registry_arena);
456 if (!rcu_reader_reg)
457 abort();
458 ret = pthread_setspecific(lttng_ust_urcu_key, rcu_reader_reg);
459 if (ret)
460 abort();
461
462 /* Add to registry */
463 rcu_reader_reg->tid = pthread_self();
464 assert(rcu_reader_reg->ctr == 0);
465 cds_list_add(&rcu_reader_reg->node, &registry);
466 /*
467 * Reader threads are pointing to the reader registry. This is
468 * why its memory should never be relocated.
469 */
470 URCU_TLS(lttng_ust_urcu_reader) = rcu_reader_reg;
471}
472
473/* Called with mutex locked */
474static
475void cleanup_thread(struct registry_chunk *chunk,
476 struct lttng_ust_urcu_reader *rcu_reader_reg)
477{
478 rcu_reader_reg->ctr = 0;
479 cds_list_del(&rcu_reader_reg->node);
480 rcu_reader_reg->tid = 0;
481 rcu_reader_reg->alloc = 0;
482 chunk->used -= sizeof(struct lttng_ust_urcu_reader);
483}
484
485static
486struct registry_chunk *find_chunk(struct lttng_ust_urcu_reader *rcu_reader_reg)
487{
488 struct registry_chunk *chunk;
489
490 cds_list_for_each_entry(chunk, &registry_arena.chunk_list, node) {
491 if (rcu_reader_reg < (struct lttng_ust_urcu_reader *) &chunk->data[0])
492 continue;
493 if (rcu_reader_reg >= (struct lttng_ust_urcu_reader *) &chunk->data[chunk->data_len])
494 continue;
495 return chunk;
496 }
497 return NULL;
498}
499
500/* Called with signals off and mutex locked */
501static
502void remove_thread(struct lttng_ust_urcu_reader *rcu_reader_reg)
503{
504 cleanup_thread(find_chunk(rcu_reader_reg), rcu_reader_reg);
505 URCU_TLS(lttng_ust_urcu_reader) = NULL;
506}
507
508/* Disable signals, take mutex, add to registry */
509void lttng_ust_urcu_register(void)
510{
511 sigset_t newmask, oldmask;
512 int ret;
513
514 ret = sigfillset(&newmask);
515 if (ret)
516 abort();
517 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
518 if (ret)
519 abort();
520
521 /*
522 * Check if a signal concurrently registered our thread since
523 * the check in rcu_read_lock().
524 */
525 if (URCU_TLS(lttng_ust_urcu_reader))
526 goto end;
527
528 /*
529 * Take care of early registration before lttng_ust_urcu constructor.
530 */
531 _lttng_ust_urcu_init();
532
533 mutex_lock(&rcu_registry_lock);
534 add_thread();
535 mutex_unlock(&rcu_registry_lock);
536end:
537 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
538 if (ret)
539 abort();
540}
541
542void lttng_ust_urcu_register_thread(void)
543{
544 if (caa_unlikely(!URCU_TLS(lttng_ust_urcu_reader)))
545 lttng_ust_urcu_register(); /* If not yet registered. */
546}
547
548/* Disable signals, take mutex, remove from registry */
549static
550void lttng_ust_urcu_unregister(struct lttng_ust_urcu_reader *rcu_reader_reg)
551{
552 sigset_t newmask, oldmask;
553 int ret;
554
555 ret = sigfillset(&newmask);
556 if (ret)
557 abort();
558 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
559 if (ret)
560 abort();
561
562 mutex_lock(&rcu_registry_lock);
563 remove_thread(rcu_reader_reg);
564 mutex_unlock(&rcu_registry_lock);
565 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
566 if (ret)
567 abort();
568 lttng_ust_urcu_exit();
569}
570
571/*
572 * Remove thread from the registry when it exits, and flag it as
573 * destroyed so garbage collection can take care of it.
574 */
575static
576void lttng_ust_urcu_thread_exit_notifier(void *rcu_key)
577{
578 lttng_ust_urcu_unregister(rcu_key);
579}
580
581#ifdef CONFIG_RCU_FORCE_SYS_MEMBARRIER
582static
583void lttng_ust_urcu_sys_membarrier_status(bool available)
584{
585 if (!available)
586 abort();
587}
588#else
589static
590void lttng_ust_urcu_sys_membarrier_status(bool available)
591{
592 if (!available)
593 return;
594 lttng_ust_urcu_has_sys_membarrier = 1;
595}
596#endif
597
598static
599void lttng_ust_urcu_sys_membarrier_init(void)
600{
601 bool available = false;
602 int mask;
603
604 mask = membarrier(MEMBARRIER_CMD_QUERY, 0);
605 if (mask >= 0) {
606 if (mask & MEMBARRIER_CMD_PRIVATE_EXPEDITED) {
607 if (membarrier(MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED, 0))
608 abort();
609 available = true;
610 }
611 }
612 lttng_ust_urcu_sys_membarrier_status(available);
613}
614
615static
616void _lttng_ust_urcu_init(void)
617{
618 mutex_lock(&init_lock);
619 if (!lttng_ust_urcu_refcount++) {
620 int ret;
621
622 ret = pthread_key_create(&lttng_ust_urcu_key,
623 lttng_ust_urcu_thread_exit_notifier);
624 if (ret)
625 abort();
626 lttng_ust_urcu_sys_membarrier_init();
627 initialized = 1;
628 }
629 mutex_unlock(&init_lock);
630}
631
632static
633void lttng_ust_urcu_exit(void)
634{
635 mutex_lock(&init_lock);
636 if (!--lttng_ust_urcu_refcount) {
637 struct registry_chunk *chunk, *tmp;
638 int ret;
639
640 cds_list_for_each_entry_safe(chunk, tmp,
641 &registry_arena.chunk_list, node) {
642 munmap((void *) chunk, chunk->data_len
643 + sizeof(struct registry_chunk));
644 }
645 CDS_INIT_LIST_HEAD(&registry_arena.chunk_list);
646 ret = pthread_key_delete(lttng_ust_urcu_key);
647 if (ret)
648 abort();
649 }
650 mutex_unlock(&init_lock);
651}
652
653/*
654 * Holding the rcu_gp_lock and rcu_registry_lock across fork will make
655 * sure we fork() don't race with a concurrent thread executing with
656 * any of those locks held. This ensures that the registry and data
657 * protected by rcu_gp_lock are in a coherent state in the child.
658 */
659void lttng_ust_urcu_before_fork(void)
660{
661 sigset_t newmask, oldmask;
662 int ret;
663
664 ret = sigfillset(&newmask);
665 assert(!ret);
666 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
667 assert(!ret);
668 mutex_lock(&rcu_gp_lock);
669 mutex_lock(&rcu_registry_lock);
670 saved_fork_signal_mask = oldmask;
671}
672
673void lttng_ust_urcu_after_fork_parent(void)
674{
675 sigset_t oldmask;
676 int ret;
677
678 oldmask = saved_fork_signal_mask;
679 mutex_unlock(&rcu_registry_lock);
680 mutex_unlock(&rcu_gp_lock);
681 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
682 assert(!ret);
683}
684
685/*
686 * Prune all entries from registry except our own thread. Fits the Linux
687 * fork behavior. Called with rcu_gp_lock and rcu_registry_lock held.
688 */
689static
690void lttng_ust_urcu_prune_registry(void)
691{
692 struct registry_chunk *chunk;
693 struct lttng_ust_urcu_reader *rcu_reader_reg;
694
695 cds_list_for_each_entry(chunk, &registry_arena.chunk_list, node) {
696 for (rcu_reader_reg = (struct lttng_ust_urcu_reader *) &chunk->data[0];
697 rcu_reader_reg < (struct lttng_ust_urcu_reader *) &chunk->data[chunk->data_len];
698 rcu_reader_reg++) {
699 if (!rcu_reader_reg->alloc)
700 continue;
701 if (rcu_reader_reg->tid == pthread_self())
702 continue;
703 cleanup_thread(chunk, rcu_reader_reg);
704 }
705 }
706}
707
708void lttng_ust_urcu_after_fork_child(void)
709{
710 sigset_t oldmask;
711 int ret;
712
713 lttng_ust_urcu_prune_registry();
714 oldmask = saved_fork_signal_mask;
715 mutex_unlock(&rcu_registry_lock);
716 mutex_unlock(&rcu_gp_lock);
717 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
718 assert(!ret);
719}
This page took 0.050451 seconds and 4 git commands to generate.