1 // SPDX-FileCopyrightText: 2010 Paul E. McKenney <paulmck@linux.vnet.ibm.com>
3 // SPDX-License-Identifier: LGPL-2.1-or-later
6 * Userspace RCU library - batch memory reclamation with kernel API
22 #include "compat-getcpu.h"
23 #include <urcu/assert.h>
24 #include <urcu/wfcqueue.h>
25 #include <urcu/call-rcu.h>
26 #include <urcu/pointer.h>
27 #include <urcu/list.h>
28 #include <urcu/futex.h>
29 #include <urcu/tls-compat.h>
32 #include "urcu-utils.h"
33 #include "compat-smp.h"
35 #define SET_AFFINITY_CHECK_PERIOD (1U << 8) /* 256 */
36 #define SET_AFFINITY_CHECK_PERIOD_MASK (SET_AFFINITY_CHECK_PERIOD - 1)
38 /* Data structure that identifies a call_rcu thread. */
40 struct call_rcu_data
{
42 * We do not align head on a different cache-line than tail
43 * mainly because call_rcu callback-invocation threads use
44 * batching ("splice") to get an entire list of callbacks, which
45 * effectively empties the queue, and requires to touch the tail
48 struct cds_wfcq_tail cbs_tail
;
49 struct cds_wfcq_head cbs_head
;
52 unsigned long qlen
; /* maintained for debugging. */
55 unsigned long gp_count
;
56 struct cds_list_head list
;
57 } __attribute__((aligned(CAA_CACHE_LINE_SIZE
)));
59 struct call_rcu_completion
{
65 struct call_rcu_completion_work
{
67 struct call_rcu_completion
*completion
;
71 CRDF_FLAG_JOIN_THREAD
= (1 << 0),
75 * List of all call_rcu_data structures to keep valgrind happy.
76 * Protected by call_rcu_mutex.
79 static CDS_LIST_HEAD(call_rcu_data_list
);
81 /* Link a thread using call_rcu() to its call_rcu thread. */
83 static DEFINE_URCU_TLS(struct call_rcu_data
*, thread_call_rcu_data
);
86 * Guard call_rcu thread creation and atfork handlers.
88 static pthread_mutex_t call_rcu_mutex
= PTHREAD_MUTEX_INITIALIZER
;
90 /* If a given thread does not have its own call_rcu thread, this is default. */
92 static struct call_rcu_data
*default_call_rcu_data
;
94 static struct urcu_atfork
*registered_rculfhash_atfork
;
97 * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are
98 * available, then we can have call_rcu threads assigned to individual
99 * CPUs rather than only to specific threads.
102 #if defined(HAVE_SYSCONF) && (defined(HAVE_SCHED_GETCPU) || defined(HAVE_GETCPUID))
105 * Pointer to array of pointers to per-CPU call_rcu_data structures
106 * and # CPUs. per_cpu_call_rcu_data is a RCU-protected pointer to an
107 * array of RCU-protected pointers to call_rcu_data. call_rcu acts as a
108 * RCU read-side and reads per_cpu_call_rcu_data and the per-cpu pointer
109 * without mutex. The call_rcu_mutex protects updates.
112 static struct call_rcu_data
**per_cpu_call_rcu_data
;
113 static long cpus_array_len
;
115 static void cpus_array_len_reset(void)
120 /* Allocate the array if it has not already been allocated. */
122 static void alloc_cpu_call_rcu_data(void)
124 struct call_rcu_data
**p
;
125 static int warned
= 0;
127 if (cpus_array_len
!= 0)
129 cpus_array_len
= get_possible_cpus_array_len();
130 if (cpus_array_len
<= 0) {
133 p
= malloc(cpus_array_len
* sizeof(*per_cpu_call_rcu_data
));
135 memset(p
, '\0', cpus_array_len
* sizeof(*per_cpu_call_rcu_data
));
136 rcu_set_pointer(&per_cpu_call_rcu_data
, p
);
139 fprintf(stderr
, "[error] liburcu: unable to allocate per-CPU pointer array\n");
145 #else /* #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) */
148 * per_cpu_call_rcu_data should be constant, but some functions below, used both
149 * for cases where cpu number is available and not available, assume it it not
152 static struct call_rcu_data
**per_cpu_call_rcu_data
= NULL
;
153 static const long cpus_array_len
= -1;
155 static void cpus_array_len_reset(void)
159 static void alloc_cpu_call_rcu_data(void)
163 #endif /* #else #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) */
165 /* Acquire the specified pthread mutex. */
167 static void call_rcu_lock(pthread_mutex_t
*pmp
)
171 ret
= pthread_mutex_lock(pmp
);
176 /* Release the specified pthread mutex. */
178 static void call_rcu_unlock(pthread_mutex_t
*pmp
)
182 ret
= pthread_mutex_unlock(pmp
);
188 * Periodically retry setting CPU affinity if we migrate.
189 * Losing affinity can be caused by CPU hotunplug/hotplug, or by
192 #ifdef HAVE_SCHED_SETAFFINITY
194 int set_thread_cpu_affinity(struct call_rcu_data
*crdp
)
199 if (crdp
->cpu_affinity
< 0)
201 if (++crdp
->gp_count
& SET_AFFINITY_CHECK_PERIOD_MASK
)
203 if (urcu_sched_getcpu() == crdp
->cpu_affinity
)
207 CPU_SET(crdp
->cpu_affinity
, &mask
);
208 ret
= sched_setaffinity(0, sizeof(mask
), &mask
);
211 * EINVAL is fine: can be caused by hotunplugged CPUs, or by
212 * cpuset(7). This is why we should always retry if we detect
215 if (ret
&& errno
== EINVAL
) {
223 int set_thread_cpu_affinity(struct call_rcu_data
*crdp
__attribute__((unused
)))
229 static void call_rcu_wait(struct call_rcu_data
*crdp
)
231 /* Read call_rcu list before read futex */
233 while (uatomic_read(&crdp
->futex
) == -1) {
234 if (!futex_async(&crdp
->futex
, FUTEX_WAIT
, -1, NULL
, NULL
, 0)) {
236 * Prior queued wakeups queued by unrelated code
237 * using the same address can cause futex wait to
238 * return 0 even through the futex value is still
239 * -1 (spurious wakeups). Check the value again
240 * in user-space to validate whether it really
247 /* Value already changed. */
250 /* Retry if interrupted by signal. */
251 break; /* Get out of switch. Check again. */
253 /* Unexpected error. */
259 static void call_rcu_wake_up(struct call_rcu_data
*crdp
)
261 /* Write to call_rcu list before reading/writing futex */
263 if (caa_unlikely(uatomic_read(&crdp
->futex
) == -1)) {
264 uatomic_set(&crdp
->futex
, 0);
265 if (futex_async(&crdp
->futex
, FUTEX_WAKE
, 1,
271 static void call_rcu_completion_wait(struct call_rcu_completion
*completion
)
273 /* Read completion barrier count before read futex */
275 while (uatomic_read(&completion
->futex
) == -1) {
276 if (!futex_async(&completion
->futex
, FUTEX_WAIT
, -1, NULL
, NULL
, 0)) {
278 * Prior queued wakeups queued by unrelated code
279 * using the same address can cause futex wait to
280 * return 0 even through the futex value is still
281 * -1 (spurious wakeups). Check the value again
282 * in user-space to validate whether it really
289 /* Value already changed. */
292 /* Retry if interrupted by signal. */
293 break; /* Get out of switch. Check again. */
295 /* Unexpected error. */
301 static void call_rcu_completion_wake_up(struct call_rcu_completion
*completion
)
303 /* Write to completion barrier count before reading/writing futex */
305 if (caa_unlikely(uatomic_read(&completion
->futex
) == -1)) {
306 uatomic_set(&completion
->futex
, 0);
307 if (futex_async(&completion
->futex
, FUTEX_WAKE
, 1,
313 /* This is the code run by each call_rcu thread. */
315 static void *call_rcu_thread(void *arg
)
317 unsigned long cbcount
;
318 struct call_rcu_data
*crdp
= (struct call_rcu_data
*) arg
;
319 int rt
= !!(uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_RT
);
321 if (set_thread_cpu_affinity(crdp
))
325 * If callbacks take a read-side lock, we need to be registered.
327 rcu_register_thread();
329 URCU_TLS(thread_call_rcu_data
) = crdp
;
331 uatomic_dec(&crdp
->futex
);
332 /* Decrement futex before reading call_rcu list */
336 struct cds_wfcq_head cbs_tmp_head
;
337 struct cds_wfcq_tail cbs_tmp_tail
;
338 struct cds_wfcq_node
*cbs
, *cbs_tmp_n
;
339 enum cds_wfcq_ret splice_ret
;
341 if (set_thread_cpu_affinity(crdp
))
344 if (uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_PAUSE
) {
346 * Pause requested. Become quiescent: remove
347 * ourself from all global lists, and don't
348 * process any callback. The callback lists may
349 * still be non-empty though.
351 rcu_unregister_thread();
352 cmm_smp_mb__before_uatomic_or();
353 uatomic_or(&crdp
->flags
, URCU_CALL_RCU_PAUSED
);
354 while ((uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_PAUSE
) != 0)
355 (void) poll(NULL
, 0, 1);
356 uatomic_and(&crdp
->flags
, ~URCU_CALL_RCU_PAUSED
);
357 cmm_smp_mb__after_uatomic_and();
358 rcu_register_thread();
361 cds_wfcq_init(&cbs_tmp_head
, &cbs_tmp_tail
);
362 splice_ret
= __cds_wfcq_splice_blocking(&cbs_tmp_head
,
363 &cbs_tmp_tail
, &crdp
->cbs_head
, &crdp
->cbs_tail
);
364 urcu_posix_assert(splice_ret
!= CDS_WFCQ_RET_WOULDBLOCK
);
365 urcu_posix_assert(splice_ret
!= CDS_WFCQ_RET_DEST_NON_EMPTY
);
366 if (splice_ret
!= CDS_WFCQ_RET_SRC_EMPTY
) {
369 __cds_wfcq_for_each_blocking_safe(&cbs_tmp_head
,
370 &cbs_tmp_tail
, cbs
, cbs_tmp_n
) {
371 struct rcu_head
*rhp
;
373 rhp
= caa_container_of(cbs
,
374 struct rcu_head
, next
);
378 uatomic_sub(&crdp
->qlen
, cbcount
);
380 if (uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_STOP
)
382 rcu_thread_offline();
384 if (cds_wfcq_empty(&crdp
->cbs_head
,
387 (void) poll(NULL
, 0, 10);
388 uatomic_dec(&crdp
->futex
);
390 * Decrement futex before reading
395 (void) poll(NULL
, 0, 10);
398 (void) poll(NULL
, 0, 10);
404 * Read call_rcu list before write futex.
407 uatomic_set(&crdp
->futex
, 0);
409 uatomic_or(&crdp
->flags
, URCU_CALL_RCU_STOPPED
);
410 rcu_unregister_thread();
415 * Create both a call_rcu thread and the corresponding call_rcu_data
416 * structure, linking the structure in as specified. Caller must hold
420 static void call_rcu_data_init(struct call_rcu_data
**crdpp
,
424 struct call_rcu_data
*crdp
;
426 sigset_t newmask
, oldmask
;
428 crdp
= malloc(sizeof(*crdp
));
431 memset(crdp
, '\0', sizeof(*crdp
));
432 cds_wfcq_init(&crdp
->cbs_head
, &crdp
->cbs_tail
);
436 cds_list_add(&crdp
->list
, &call_rcu_data_list
);
437 crdp
->cpu_affinity
= cpu_affinity
;
439 rcu_set_pointer(crdpp
, crdp
);
441 ret
= sigfillset(&newmask
);
442 urcu_posix_assert(!ret
);
443 ret
= pthread_sigmask(SIG_BLOCK
, &newmask
, &oldmask
);
444 urcu_posix_assert(!ret
);
446 ret
= pthread_create(&crdp
->tid
, NULL
, call_rcu_thread
, crdp
);
450 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
451 urcu_posix_assert(!ret
);
455 * Return a pointer to the call_rcu_data structure for the specified
456 * CPU, returning NULL if there is none. We cannot automatically
457 * created it because the platform we are running on might not define
458 * urcu_sched_getcpu().
460 * The call to this function and use of the returned call_rcu_data
461 * should be protected by RCU read-side lock.
464 struct call_rcu_data
*get_cpu_call_rcu_data(int cpu
)
466 static int warned
= 0;
467 struct call_rcu_data
**pcpu_crdp
;
469 pcpu_crdp
= rcu_dereference(per_cpu_call_rcu_data
);
470 if (pcpu_crdp
== NULL
)
472 if (!warned
&& cpus_array_len
> 0 && (cpu
< 0 || cpus_array_len
<= cpu
)) {
473 fprintf(stderr
, "[error] liburcu: get CPU # out of range\n");
476 if (cpu
< 0 || cpus_array_len
<= cpu
)
478 return rcu_dereference(pcpu_crdp
[cpu
]);
482 * Return the tid corresponding to the call_rcu thread whose
483 * call_rcu_data structure is specified.
486 pthread_t
get_call_rcu_thread(struct call_rcu_data
*crdp
)
492 * Create a call_rcu_data structure (with thread) and return a pointer.
495 static struct call_rcu_data
*__create_call_rcu_data(unsigned long flags
,
498 struct call_rcu_data
*crdp
;
500 call_rcu_data_init(&crdp
, flags
, cpu_affinity
);
504 struct call_rcu_data
*create_call_rcu_data(unsigned long flags
,
507 struct call_rcu_data
*crdp
;
509 call_rcu_lock(&call_rcu_mutex
);
510 crdp
= __create_call_rcu_data(flags
, cpu_affinity
);
511 call_rcu_unlock(&call_rcu_mutex
);
516 * Set the specified CPU to use the specified call_rcu_data structure.
518 * Use NULL to remove a CPU's call_rcu_data structure, but it is
519 * the caller's responsibility to dispose of the removed structure.
520 * Use get_cpu_call_rcu_data() to obtain a pointer to the old structure
521 * (prior to NULLing it out, of course).
523 * The caller must wait for a grace-period to pass between return from
524 * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the
525 * previous call rcu data as argument.
528 int set_cpu_call_rcu_data(int cpu
, struct call_rcu_data
*crdp
)
530 static int warned
= 0;
532 call_rcu_lock(&call_rcu_mutex
);
533 alloc_cpu_call_rcu_data();
534 if (cpu
< 0 || cpus_array_len
<= cpu
) {
536 fprintf(stderr
, "[error] liburcu: set CPU # out of range\n");
539 call_rcu_unlock(&call_rcu_mutex
);
544 if (per_cpu_call_rcu_data
== NULL
) {
545 call_rcu_unlock(&call_rcu_mutex
);
550 if (per_cpu_call_rcu_data
[cpu
] != NULL
&& crdp
!= NULL
) {
551 call_rcu_unlock(&call_rcu_mutex
);
556 rcu_set_pointer(&per_cpu_call_rcu_data
[cpu
], crdp
);
557 call_rcu_unlock(&call_rcu_mutex
);
562 * Return a pointer to the default call_rcu_data structure, creating
565 * The call to this function with intent to use the returned
566 * call_rcu_data should be protected by RCU read-side lock.
569 struct call_rcu_data
*get_default_call_rcu_data(void)
571 struct call_rcu_data
*crdp
;
573 crdp
= rcu_dereference(default_call_rcu_data
);
577 call_rcu_lock(&call_rcu_mutex
);
578 if (default_call_rcu_data
== NULL
)
579 call_rcu_data_init(&default_call_rcu_data
, 0, -1);
580 crdp
= default_call_rcu_data
;
581 call_rcu_unlock(&call_rcu_mutex
);
587 * Return the call_rcu_data structure that applies to the currently
588 * running thread. Any call_rcu_data structure assigned specifically
589 * to this thread has first priority, followed by any call_rcu_data
590 * structure assigned to the CPU on which the thread is running,
591 * followed by the default call_rcu_data structure. If there is not
592 * yet a default call_rcu_data structure, one will be created.
594 * Calls to this function and use of the returned call_rcu_data should
595 * be protected by RCU read-side lock.
597 struct call_rcu_data
*get_call_rcu_data(void)
599 struct call_rcu_data
*crd
;
601 if (URCU_TLS(thread_call_rcu_data
) != NULL
)
602 return URCU_TLS(thread_call_rcu_data
);
604 if (cpus_array_len
> 0) {
605 crd
= get_cpu_call_rcu_data(urcu_sched_getcpu());
610 return get_default_call_rcu_data();
614 * Return a pointer to this task's call_rcu_data if there is one.
617 struct call_rcu_data
*get_thread_call_rcu_data(void)
619 return URCU_TLS(thread_call_rcu_data
);
623 * Set this task's call_rcu_data structure as specified, regardless
624 * of whether or not this task already had one. (This allows switching
625 * to and from real-time call_rcu threads, for example.)
627 * Use NULL to remove a thread's call_rcu_data structure, but it is
628 * the caller's responsibility to dispose of the removed structure.
629 * Use get_thread_call_rcu_data() to obtain a pointer to the old structure
630 * (prior to NULLing it out, of course).
633 void set_thread_call_rcu_data(struct call_rcu_data
*crdp
)
635 URCU_TLS(thread_call_rcu_data
) = crdp
;
639 * Create a separate call_rcu thread for each CPU. This does not
640 * replace a pre-existing call_rcu thread -- use the set_cpu_call_rcu_data()
641 * function if you want that behavior. Should be paired with
642 * free_all_cpu_call_rcu_data() to teardown these call_rcu worker
646 int create_all_cpu_call_rcu_data(unsigned long flags
)
649 struct call_rcu_data
*crdp
;
652 call_rcu_lock(&call_rcu_mutex
);
653 alloc_cpu_call_rcu_data();
654 call_rcu_unlock(&call_rcu_mutex
);
655 if (cpus_array_len
<= 0) {
659 if (per_cpu_call_rcu_data
== NULL
) {
663 for (i
= 0; i
< cpus_array_len
; i
++) {
664 call_rcu_lock(&call_rcu_mutex
);
665 if (get_cpu_call_rcu_data(i
)) {
666 call_rcu_unlock(&call_rcu_mutex
);
669 crdp
= __create_call_rcu_data(flags
, i
);
671 call_rcu_unlock(&call_rcu_mutex
);
675 call_rcu_unlock(&call_rcu_mutex
);
676 if ((ret
= set_cpu_call_rcu_data(i
, crdp
)) != 0) {
677 call_rcu_data_free(crdp
);
679 /* it has been created by other thread */
690 * Wake up the call_rcu thread corresponding to the specified
691 * call_rcu_data structure.
693 static void wake_call_rcu_thread(struct call_rcu_data
*crdp
)
695 if (!(_CMM_LOAD_SHARED(crdp
->flags
) & URCU_CALL_RCU_RT
))
696 call_rcu_wake_up(crdp
);
699 static void _call_rcu(struct rcu_head
*head
,
700 void (*func
)(struct rcu_head
*head
),
701 struct call_rcu_data
*crdp
)
703 cds_wfcq_node_init(&head
->next
);
705 cds_wfcq_enqueue(&crdp
->cbs_head
, &crdp
->cbs_tail
, &head
->next
);
706 uatomic_inc(&crdp
->qlen
);
707 wake_call_rcu_thread(crdp
);
711 * Schedule a function to be invoked after a following grace period.
712 * This is the only function that must be called -- the others are
713 * only present to allow applications to tune their use of RCU for
714 * maximum performance.
716 * Note that unless a call_rcu thread has not already been created,
717 * the first invocation of call_rcu() will create one. So, if you
718 * need the first invocation of call_rcu() to be fast, make sure
719 * to create a call_rcu thread first. One way to accomplish this is
720 * "get_call_rcu_data();", and another is create_all_cpu_call_rcu_data().
722 * call_rcu must be called by registered RCU read-side threads.
724 void call_rcu(struct rcu_head
*head
,
725 void (*func
)(struct rcu_head
*head
))
727 struct call_rcu_data
*crdp
;
729 /* Holding rcu read-side lock across use of per-cpu crdp */
731 crdp
= get_call_rcu_data();
732 _call_rcu(head
, func
, crdp
);
737 * Free up the specified call_rcu_data structure, terminating the
738 * associated call_rcu thread. The caller must have previously
739 * removed the call_rcu_data structure from per-thread or per-CPU
740 * usage. For example, set_cpu_call_rcu_data(cpu, NULL) for per-CPU
741 * call_rcu_data structures or set_thread_call_rcu_data(NULL) for
742 * per-thread call_rcu_data structures.
744 * We silently refuse to free up the default call_rcu_data structure
745 * because that is where we put any leftover callbacks. Note that
746 * the possibility of self-spawning callbacks makes it impossible
747 * to execute all the callbacks in finite time without putting any
748 * newly spawned callbacks somewhere else. The "somewhere else" of
749 * last resort is the default call_rcu_data structure.
751 * We also silently refuse to free NULL pointers. This simplifies
754 * The caller must wait for a grace-period to pass between return from
755 * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the
756 * previous call rcu data as argument.
758 * Note: introducing __cds_wfcq_splice_blocking() in this function fixed
759 * a list corruption bug in the 0.7.x series. The equivalent fix
760 * appeared in 0.6.8 for the stable-0.6 branch.
763 void _call_rcu_data_free(struct call_rcu_data
*crdp
, unsigned int flags
)
765 if (crdp
== NULL
|| crdp
== default_call_rcu_data
) {
768 if ((uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_STOPPED
) == 0) {
769 uatomic_or(&crdp
->flags
, URCU_CALL_RCU_STOP
);
770 wake_call_rcu_thread(crdp
);
771 while ((uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_STOPPED
) == 0)
772 (void) poll(NULL
, 0, 1);
774 call_rcu_lock(&call_rcu_mutex
);
775 if (!cds_wfcq_empty(&crdp
->cbs_head
, &crdp
->cbs_tail
)) {
776 call_rcu_unlock(&call_rcu_mutex
);
777 /* Create default call rcu data if need be. */
778 /* CBs queued here will be handed to the default list. */
779 (void) get_default_call_rcu_data();
780 call_rcu_lock(&call_rcu_mutex
);
781 __cds_wfcq_splice_blocking(&default_call_rcu_data
->cbs_head
,
782 &default_call_rcu_data
->cbs_tail
,
783 &crdp
->cbs_head
, &crdp
->cbs_tail
);
784 uatomic_add(&default_call_rcu_data
->qlen
,
785 uatomic_read(&crdp
->qlen
));
786 wake_call_rcu_thread(default_call_rcu_data
);
789 cds_list_del(&crdp
->list
);
790 call_rcu_unlock(&call_rcu_mutex
);
792 if (flags
& CRDF_FLAG_JOIN_THREAD
) {
795 ret
= pthread_join(get_call_rcu_thread(crdp
), NULL
);
802 void call_rcu_data_free(struct call_rcu_data
*crdp
)
804 _call_rcu_data_free(crdp
, CRDF_FLAG_JOIN_THREAD
);
808 * Clean up all the per-CPU call_rcu threads.
810 void free_all_cpu_call_rcu_data(void)
813 struct call_rcu_data
**crdp
;
814 static int warned
= 0;
816 if (cpus_array_len
<= 0)
819 crdp
= malloc(sizeof(*crdp
) * cpus_array_len
);
822 fprintf(stderr
, "[error] liburcu: unable to allocate per-CPU pointer array\n");
828 for (cpu
= 0; cpu
< cpus_array_len
; cpu
++) {
829 crdp
[cpu
] = get_cpu_call_rcu_data(cpu
);
830 if (crdp
[cpu
] == NULL
)
832 set_cpu_call_rcu_data(cpu
, NULL
);
835 * Wait for call_rcu sites acting as RCU readers of the
836 * call_rcu_data to become quiescent.
839 for (cpu
= 0; cpu
< cpus_array_len
; cpu
++) {
840 if (crdp
[cpu
] == NULL
)
842 call_rcu_data_free(crdp
[cpu
]);
848 void free_completion(struct urcu_ref
*ref
)
850 struct call_rcu_completion
*completion
;
852 completion
= caa_container_of(ref
, struct call_rcu_completion
, ref
);
857 void _rcu_barrier_complete(struct rcu_head
*head
)
859 struct call_rcu_completion_work
*work
;
860 struct call_rcu_completion
*completion
;
862 work
= caa_container_of(head
, struct call_rcu_completion_work
, head
);
863 completion
= work
->completion
;
864 if (!uatomic_sub_return(&completion
->barrier_count
, 1))
865 call_rcu_completion_wake_up(completion
);
866 urcu_ref_put(&completion
->ref
, free_completion
);
871 * Wait for all in-flight call_rcu callbacks to complete execution.
873 void rcu_barrier(void)
875 struct call_rcu_data
*crdp
;
876 struct call_rcu_completion
*completion
;
880 /* Put in offline state in QSBR. */
881 was_online
= _rcu_read_ongoing();
883 rcu_thread_offline();
885 * Calling a rcu_barrier() within a RCU read-side critical
886 * section is an error.
888 if (_rcu_read_ongoing()) {
889 static int warned
= 0;
892 fprintf(stderr
, "[error] liburcu: rcu_barrier() called from within RCU read-side critical section.\n");
898 completion
= calloc(sizeof(*completion
), 1);
902 call_rcu_lock(&call_rcu_mutex
);
903 cds_list_for_each_entry(crdp
, &call_rcu_data_list
, list
)
906 /* Referenced by rcu_barrier() and each call_rcu thread. */
907 urcu_ref_set(&completion
->ref
, count
+ 1);
908 completion
->barrier_count
= count
;
910 cds_list_for_each_entry(crdp
, &call_rcu_data_list
, list
) {
911 struct call_rcu_completion_work
*work
;
913 work
= calloc(sizeof(*work
), 1);
916 work
->completion
= completion
;
917 _call_rcu(&work
->head
, _rcu_barrier_complete
, crdp
);
919 call_rcu_unlock(&call_rcu_mutex
);
923 uatomic_dec(&completion
->futex
);
924 /* Decrement futex before reading barrier_count */
926 if (!uatomic_read(&completion
->barrier_count
))
928 call_rcu_completion_wait(completion
);
931 urcu_ref_put(&completion
->ref
, free_completion
);
939 * Acquire the call_rcu_mutex in order to ensure that the child sees
940 * all of the call_rcu() data structures in a consistent state. Ensure
941 * that all call_rcu threads are in a quiescent state across fork.
942 * Suitable for pthread_atfork() and friends.
944 void call_rcu_before_fork(void)
946 struct call_rcu_data
*crdp
;
947 struct urcu_atfork
*atfork
;
949 call_rcu_lock(&call_rcu_mutex
);
951 atfork
= registered_rculfhash_atfork
;
953 atfork
->before_fork(atfork
->priv
);
955 cds_list_for_each_entry(crdp
, &call_rcu_data_list
, list
) {
956 uatomic_or(&crdp
->flags
, URCU_CALL_RCU_PAUSE
);
957 cmm_smp_mb__after_uatomic_or();
958 wake_call_rcu_thread(crdp
);
960 cds_list_for_each_entry(crdp
, &call_rcu_data_list
, list
) {
961 while ((uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_PAUSED
) == 0)
962 (void) poll(NULL
, 0, 1);
967 * Clean up call_rcu data structures in the parent of a successful fork()
968 * that is not followed by exec() in the child. Suitable for
969 * pthread_atfork() and friends.
971 void call_rcu_after_fork_parent(void)
973 struct call_rcu_data
*crdp
;
974 struct urcu_atfork
*atfork
;
976 cds_list_for_each_entry(crdp
, &call_rcu_data_list
, list
)
977 uatomic_and(&crdp
->flags
, ~URCU_CALL_RCU_PAUSE
);
978 cds_list_for_each_entry(crdp
, &call_rcu_data_list
, list
) {
979 while ((uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_PAUSED
) != 0)
980 (void) poll(NULL
, 0, 1);
982 atfork
= registered_rculfhash_atfork
;
984 atfork
->after_fork_parent(atfork
->priv
);
985 call_rcu_unlock(&call_rcu_mutex
);
989 * Clean up call_rcu data structures in the child of a successful fork()
990 * that is not followed by exec(). Suitable for pthread_atfork() and
993 void call_rcu_after_fork_child(void)
995 struct call_rcu_data
*crdp
, *next
;
996 struct urcu_atfork
*atfork
;
998 /* Release the mutex. */
999 call_rcu_unlock(&call_rcu_mutex
);
1001 atfork
= registered_rculfhash_atfork
;
1003 atfork
->after_fork_child(atfork
->priv
);
1005 /* Do nothing when call_rcu() has not been used */
1006 if (cds_list_empty(&call_rcu_data_list
))
1010 * Allocate a new default call_rcu_data structure in order
1011 * to get a working call_rcu thread to go with it.
1013 default_call_rcu_data
= NULL
;
1014 (void)get_default_call_rcu_data();
1016 /* Cleanup call_rcu_data pointers before use */
1017 cpus_array_len_reset();
1018 free(per_cpu_call_rcu_data
);
1019 rcu_set_pointer(&per_cpu_call_rcu_data
, NULL
);
1020 URCU_TLS(thread_call_rcu_data
) = NULL
;
1023 * Dispose of all of the rest of the call_rcu_data structures.
1024 * Leftover call_rcu callbacks will be merged into the new
1025 * default call_rcu thread queue.
1027 cds_list_for_each_entry_safe(crdp
, next
, &call_rcu_data_list
, list
) {
1028 if (crdp
== default_call_rcu_data
)
1030 uatomic_set(&crdp
->flags
, URCU_CALL_RCU_STOPPED
);
1032 * Do not join the thread because it does not exist in
1035 _call_rcu_data_free(crdp
, 0);
1039 void urcu_register_rculfhash_atfork(struct urcu_atfork
*atfork
)
1041 if (CMM_LOAD_SHARED(registered_rculfhash_atfork
))
1043 call_rcu_lock(&call_rcu_mutex
);
1044 if (!registered_rculfhash_atfork
)
1045 registered_rculfhash_atfork
= atfork
;
1046 call_rcu_unlock(&call_rcu_mutex
);
1050 * This unregistration function is deprecated, meant only for internal
1053 __attribute__((__noreturn__
))
1054 void urcu_unregister_rculfhash_atfork(struct urcu_atfork
*atfork
__attribute__((unused
)))
1060 * Teardown the default call_rcu worker thread if there are no queued
1061 * callbacks on process exit. This prevents leaking memory.
1063 * Here is how an application can ensure graceful teardown of this
1066 * - An application queuing call_rcu callbacks should invoke
1067 * rcu_barrier() before it exits.
1068 * - When chaining call_rcu callbacks, the number of calls to
1069 * rcu_barrier() on application exit must match at least the maximum
1070 * number of chained callbacks.
1071 * - If an application chains callbacks endlessly, it would have to be
1072 * modified to stop chaining callbacks when it detects an application
1073 * exit (e.g. with a flag), and wait for quiescence with rcu_barrier()
1074 * after setting that flag.
1075 * - The statements above apply to a library which queues call_rcu
1076 * callbacks, only it needs to invoke rcu_barrier in its library
1079 * Note that this function does not presume it is being called when the
1080 * application is single-threaded even though this is invoked from a
1081 * destructor: this function synchronizes against concurrent calls to
1082 * get_default_call_rcu_data().
1084 static void urcu_call_rcu_exit(void)
1086 struct call_rcu_data
*crdp
;
1087 bool teardown
= true;
1089 if (default_call_rcu_data
== NULL
)
1091 call_rcu_lock(&call_rcu_mutex
);
1093 * If the application leaves callbacks in the default call_rcu
1094 * worker queue, keep the default worker in place.
1096 crdp
= default_call_rcu_data
;
1101 if (!cds_wfcq_empty(&crdp
->cbs_head
, &crdp
->cbs_tail
)) {
1105 rcu_set_pointer(&default_call_rcu_data
, NULL
);
1107 call_rcu_unlock(&call_rcu_mutex
);
1110 call_rcu_data_free(crdp
);