+// SPDX-FileCopyrightText: 2010 Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+//
+// SPDX-License-Identifier: LGPL-2.1-or-later
+
/*
- * urcu-call-rcu.c
- *
* Userspace RCU library - batch memory reclamation with kernel API
- *
- * Copyright (c) 2010 Paul E. McKenney <paulmck@linux.vnet.ibm.com>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#define _LGPL_SOURCE
#include <urcu/ref.h>
#include "urcu-die.h"
#include "urcu-utils.h"
+#include "compat-smp.h"
#define SET_AFFINITY_CHECK_PERIOD (1U << 8) /* 256 */
#define SET_AFFINITY_CHECK_PERIOD_MASK (SET_AFFINITY_CHECK_PERIOD - 1)
struct call_rcu_completion *completion;
};
+enum crdf_flags {
+ CRDF_FLAG_JOIN_THREAD = (1 << 0),
+};
+
/*
* List of all call_rcu_data structures to keep valgrind happy.
* Protected by call_rcu_mutex.
static struct call_rcu_data *default_call_rcu_data;
static struct urcu_atfork *registered_rculfhash_atfork;
-static unsigned long registered_rculfhash_atfork_refcount;
/*
* If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are
*/
static struct call_rcu_data **per_cpu_call_rcu_data;
-static long maxcpus;
+static long cpus_array_len;
-static void maxcpus_reset(void)
+static void cpus_array_len_reset(void)
{
- maxcpus = 0;
+ cpus_array_len = 0;
}
/* Allocate the array if it has not already been allocated. */
struct call_rcu_data **p;
static int warned = 0;
- if (maxcpus != 0)
+ if (cpus_array_len != 0)
return;
- maxcpus = sysconf(_SC_NPROCESSORS_CONF);
- if (maxcpus <= 0) {
+ cpus_array_len = get_possible_cpus_array_len();
+ if (cpus_array_len <= 0) {
return;
}
- p = malloc(maxcpus * sizeof(*per_cpu_call_rcu_data));
+ p = malloc(cpus_array_len * sizeof(*per_cpu_call_rcu_data));
if (p != NULL) {
- memset(p, '\0', maxcpus * sizeof(*per_cpu_call_rcu_data));
+ memset(p, '\0', cpus_array_len * sizeof(*per_cpu_call_rcu_data));
rcu_set_pointer(&per_cpu_call_rcu_data, p);
} else {
if (!warned) {
* constant.
*/
static struct call_rcu_data **per_cpu_call_rcu_data = NULL;
-static const long maxcpus = -1;
+static const long cpus_array_len = -1;
-static void maxcpus_reset(void)
+static void cpus_array_len_reset(void)
{
}
{
struct call_rcu_data *crdp;
int ret;
+ sigset_t newmask, oldmask;
crdp = malloc(sizeof(*crdp));
if (crdp == NULL)
cds_list_add(&crdp->list, &call_rcu_data_list);
crdp->cpu_affinity = cpu_affinity;
crdp->gp_count = 0;
- cmm_smp_mb(); /* Structure initialized before pointer is planted. */
- *crdpp = crdp;
+ rcu_set_pointer(crdpp, crdp);
+
+ ret = sigfillset(&newmask);
+ urcu_posix_assert(!ret);
+ ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
+ urcu_posix_assert(!ret);
+
ret = pthread_create(&crdp->tid, NULL, call_rcu_thread, crdp);
if (ret)
urcu_die(ret);
+
+ ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
+ urcu_posix_assert(!ret);
}
/*
pcpu_crdp = rcu_dereference(per_cpu_call_rcu_data);
if (pcpu_crdp == NULL)
return NULL;
- if (!warned && maxcpus > 0 && (cpu < 0 || maxcpus <= cpu)) {
+ if (!warned && cpus_array_len > 0 && (cpu < 0 || cpus_array_len <= cpu)) {
fprintf(stderr, "[error] liburcu: get CPU # out of range\n");
warned = 1;
}
- if (cpu < 0 || maxcpus <= cpu)
+ if (cpu < 0 || cpus_array_len <= cpu)
return NULL;
return rcu_dereference(pcpu_crdp[cpu]);
}
call_rcu_lock(&call_rcu_mutex);
alloc_cpu_call_rcu_data();
- if (cpu < 0 || maxcpus <= cpu) {
+ if (cpu < 0 || cpus_array_len <= cpu) {
if (!warned) {
fprintf(stderr, "[error] liburcu: set CPU # out of range\n");
warned = 1;
/*
* Return a pointer to the default call_rcu_data structure, creating
- * one if need be. Because we never free call_rcu_data structures,
- * we don't need to be in an RCU read-side critical section.
+ * one if need be.
+ *
+ * The call to this function with intent to use the returned
+ * call_rcu_data should be protected by RCU read-side lock.
*/
struct call_rcu_data *get_default_call_rcu_data(void)
{
- if (default_call_rcu_data != NULL)
- return rcu_dereference(default_call_rcu_data);
+ struct call_rcu_data *crdp;
+
+ crdp = rcu_dereference(default_call_rcu_data);
+ if (crdp != NULL)
+ return crdp;
+
call_rcu_lock(&call_rcu_mutex);
- if (default_call_rcu_data != NULL) {
- call_rcu_unlock(&call_rcu_mutex);
- return default_call_rcu_data;
- }
- call_rcu_data_init(&default_call_rcu_data, 0, -1);
+ if (default_call_rcu_data == NULL)
+ call_rcu_data_init(&default_call_rcu_data, 0, -1);
+ crdp = default_call_rcu_data;
call_rcu_unlock(&call_rcu_mutex);
- return default_call_rcu_data;
+
+ return crdp;
}
/*
if (URCU_TLS(thread_call_rcu_data) != NULL)
return URCU_TLS(thread_call_rcu_data);
- if (maxcpus > 0) {
+ if (cpus_array_len > 0) {
crd = get_cpu_call_rcu_data(urcu_sched_getcpu());
if (crd)
return crd;
call_rcu_lock(&call_rcu_mutex);
alloc_cpu_call_rcu_data();
call_rcu_unlock(&call_rcu_mutex);
- if (maxcpus <= 0) {
+ if (cpus_array_len <= 0) {
errno = EINVAL;
return -EINVAL;
}
errno = ENOMEM;
return -ENOMEM;
}
- for (i = 0; i < maxcpus; i++) {
+ for (i = 0; i < cpus_array_len; i++) {
call_rcu_lock(&call_rcu_mutex);
if (get_cpu_call_rcu_data(i)) {
call_rcu_unlock(&call_rcu_mutex);
* a list corruption bug in the 0.7.x series. The equivalent fix
* appeared in 0.6.8 for the stable-0.6 branch.
*/
-void call_rcu_data_free(struct call_rcu_data *crdp)
+static
+void _call_rcu_data_free(struct call_rcu_data *crdp, unsigned int flags)
{
if (crdp == NULL || crdp == default_call_rcu_data) {
return;
cds_list_del(&crdp->list);
call_rcu_unlock(&call_rcu_mutex);
+ if (flags & CRDF_FLAG_JOIN_THREAD) {
+ int ret;
+
+ ret = pthread_join(get_call_rcu_thread(crdp), NULL);
+ if (ret)
+ urcu_die(ret);
+ }
free(crdp);
}
+void call_rcu_data_free(struct call_rcu_data *crdp)
+{
+ _call_rcu_data_free(crdp, CRDF_FLAG_JOIN_THREAD);
+}
+
/*
* Clean up all the per-CPU call_rcu threads.
*/
struct call_rcu_data **crdp;
static int warned = 0;
- if (maxcpus <= 0)
+ if (cpus_array_len <= 0)
return;
- crdp = malloc(sizeof(*crdp) * maxcpus);
+ crdp = malloc(sizeof(*crdp) * cpus_array_len);
if (!crdp) {
if (!warned) {
fprintf(stderr, "[error] liburcu: unable to allocate per-CPU pointer array\n");
return;
}
- for (cpu = 0; cpu < maxcpus; cpu++) {
+ for (cpu = 0; cpu < cpus_array_len; cpu++) {
crdp[cpu] = get_cpu_call_rcu_data(cpu);
if (crdp[cpu] == NULL)
continue;
* call_rcu_data to become quiescent.
*/
synchronize_rcu();
- for (cpu = 0; cpu < maxcpus; cpu++) {
+ for (cpu = 0; cpu < cpus_array_len; cpu++) {
if (crdp[cpu] == NULL)
continue;
call_rcu_data_free(crdp[cpu]);
goto online;
}
- completion = calloc(sizeof(*completion), 1);
+ completion = calloc(1, sizeof(*completion));
if (!completion)
urcu_die(errno);
cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
struct call_rcu_completion_work *work;
- work = calloc(sizeof(*work), 1);
+ work = calloc(1, sizeof(*work));
if (!work)
urcu_die(errno);
work->completion = completion;
(void)get_default_call_rcu_data();
/* Cleanup call_rcu_data pointers before use */
- maxcpus_reset();
+ cpus_array_len_reset();
free(per_cpu_call_rcu_data);
rcu_set_pointer(&per_cpu_call_rcu_data, NULL);
URCU_TLS(thread_call_rcu_data) = NULL;
if (crdp == default_call_rcu_data)
continue;
uatomic_set(&crdp->flags, URCU_CALL_RCU_STOPPED);
- call_rcu_data_free(crdp);
+ /*
+ * Do not join the thread because it does not exist in
+ * the child.
+ */
+ _call_rcu_data_free(crdp, 0);
}
}
void urcu_register_rculfhash_atfork(struct urcu_atfork *atfork)
{
+ if (CMM_LOAD_SHARED(registered_rculfhash_atfork))
+ return;
call_rcu_lock(&call_rcu_mutex);
- if (registered_rculfhash_atfork_refcount++)
- goto end;
- registered_rculfhash_atfork = atfork;
-end:
+ if (!registered_rculfhash_atfork)
+ registered_rculfhash_atfork = atfork;
call_rcu_unlock(&call_rcu_mutex);
}
+/*
+ * This unregistration function is deprecated, meant only for internal
+ * use by rculfhash.
+ */
+__attribute__((__noreturn__))
void urcu_unregister_rculfhash_atfork(struct urcu_atfork *atfork __attribute__((unused)))
{
+ urcu_die(EPERM);
+}
+
+/*
+ * Teardown the default call_rcu worker thread if there are no queued
+ * callbacks on process exit. This prevents leaking memory.
+ *
+ * Here is how an application can ensure graceful teardown of this
+ * worker thread:
+ *
+ * - An application queuing call_rcu callbacks should invoke
+ * rcu_barrier() before it exits.
+ * - When chaining call_rcu callbacks, the number of calls to
+ * rcu_barrier() on application exit must match at least the maximum
+ * number of chained callbacks.
+ * - If an application chains callbacks endlessly, it would have to be
+ * modified to stop chaining callbacks when it detects an application
+ * exit (e.g. with a flag), and wait for quiescence with rcu_barrier()
+ * after setting that flag.
+ * - The statements above apply to a library which queues call_rcu
+ * callbacks, only it needs to invoke rcu_barrier in its library
+ * destructor.
+ *
+ * Note that this function does not presume it is being called when the
+ * application is single-threaded even though this is invoked from a
+ * destructor: this function synchronizes against concurrent calls to
+ * get_default_call_rcu_data().
+ */
+static void urcu_call_rcu_exit(void)
+{
+ struct call_rcu_data *crdp;
+ bool teardown = true;
+
+ if (default_call_rcu_data == NULL)
+ return;
call_rcu_lock(&call_rcu_mutex);
- if (--registered_rculfhash_atfork_refcount)
- goto end;
- registered_rculfhash_atfork = NULL;
-end:
+ /*
+ * If the application leaves callbacks in the default call_rcu
+ * worker queue, keep the default worker in place.
+ */
+ crdp = default_call_rcu_data;
+ if (!crdp) {
+ teardown = false;
+ goto unlock;
+ }
+ if (!cds_wfcq_empty(&crdp->cbs_head, &crdp->cbs_tail)) {
+ teardown = false;
+ goto unlock;
+ }
+ rcu_set_pointer(&default_call_rcu_data, NULL);
+unlock:
call_rcu_unlock(&call_rcu_mutex);
+ if (teardown) {
+ synchronize_rcu();
+ call_rcu_data_free(crdp);
+ }
}