+// SPDX-FileCopyrightText: 2010 Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+// SPDX-FileCopyrightText: 2017 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+//
+// SPDX-License-Identifier: LGPL-2.1-or-later
+
/*
- * workqueue.c
- *
* Userspace RCU library - Userspace workqeues
- *
- * Copyright (c) 2010 Paul E. McKenney <paulmck@linux.vnet.ibm.com>
- * Copyright (c) 2017 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#define _LGPL_SOURCE
#include <stdio.h>
#include <pthread.h>
#include <signal.h>
-#include <assert.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <sched.h>
#include "compat-getcpu.h"
-#include "urcu/wfcqueue.h"
-#include "urcu-call-rcu.h"
-#include "urcu-pointer.h"
-#include "urcu/list.h"
-#include "urcu/futex.h"
-#include "urcu/tls-compat.h"
-#include "urcu/ref.h"
+#include <urcu/assert.h>
+#include <urcu/wfcqueue.h>
+#include <urcu/pointer.h>
+#include <urcu/list.h>
+#include <urcu/futex.h>
+#include <urcu/tls-compat.h>
+#include <urcu/ref.h>
#include "urcu-die.h"
#include "workqueue.h"
struct urcu_workqueue {
/*
* We do not align head on a different cache-line than tail
- * mainly because call_rcu callback-invocation threads use
- * batching ("splice") to get an entire list of callbacks, which
- * effectively empties the queue, and requires to touch the tail
- * anyway.
+ * mainly because workqueue threads use batching ("splice") to
+ * get an entire list of callbacks, which effectively empties
+ * the queue, and requires to touch the tail anyway.
*/
struct cds_wfcq_tail cbs_tail;
struct cds_wfcq_head cbs_head;
* Losing affinity can be caused by CPU hotunplug/hotplug, or by
* cpuset(7).
*/
-#if HAVE_SCHED_SETAFFINITY
+#ifdef HAVE_SCHED_SETAFFINITY
static int set_thread_cpu_affinity(struct urcu_workqueue *workqueue)
{
cpu_set_t mask;
CPU_ZERO(&mask);
CPU_SET(workqueue->cpu_affinity, &mask);
-#if SCHED_SETAFFINITY_ARGS == 2
- ret = sched_setaffinity(0, &mask);
-#else
ret = sched_setaffinity(0, sizeof(mask), &mask);
-#endif
+
/*
* EINVAL is fine: can be caused by hotunplugged CPUs, or by
* cpuset(7). This is why we should always retry if we detect
return ret;
}
#else
-static int set_thread_cpu_affinity(struct urcu_workqueue *workqueue)
+static int set_thread_cpu_affinity(struct urcu_workqueue *workqueue __attribute__((unused)))
{
return 0;
}
{
/* Read condition before read futex */
cmm_smp_mb();
- if (uatomic_read(futex) != -1)
- return;
- while (futex_async(futex, FUTEX_WAIT, -1, NULL, NULL, 0)) {
+ while (uatomic_read(futex) == -1) {
+ if (!futex_async(futex, FUTEX_WAIT, -1, NULL, NULL, 0)) {
+ /*
+ * Prior queued wakeups queued by unrelated code
+ * using the same address can cause futex wait to
+ * return 0 even through the futex value is still
+ * -1 (spurious wakeups). Check the value again
+ * in user-space to validate whether it really
+ * differs from -1.
+ */
+ continue;
+ }
switch (errno) {
- case EWOULDBLOCK:
+ case EAGAIN:
/* Value already changed. */
return;
case EINTR:
/* Retry if interrupted by signal. */
- break; /* Get out of switch. */
+ break; /* Get out of switch. Check again. */
default:
/* Unexpected error. */
urcu_die(errno);
cds_wfcq_init(&cbs_tmp_head, &cbs_tmp_tail);
splice_ret = __cds_wfcq_splice_blocking(&cbs_tmp_head,
&cbs_tmp_tail, &workqueue->cbs_head, &workqueue->cbs_tail);
- assert(splice_ret != CDS_WFCQ_RET_WOULDBLOCK);
- assert(splice_ret != CDS_WFCQ_RET_DEST_NON_EMPTY);
+ urcu_posix_assert(splice_ret != CDS_WFCQ_RET_WOULDBLOCK);
+ urcu_posix_assert(splice_ret != CDS_WFCQ_RET_DEST_NON_EMPTY);
if (splice_ret != CDS_WFCQ_RET_SRC_EMPTY) {
if (workqueue->grace_period_fct)
workqueue->grace_period_fct(workqueue, workqueue->priv);
cbcount = 0;
__cds_wfcq_for_each_blocking_safe(&cbs_tmp_head,
&cbs_tmp_tail, cbs, cbs_tmp_n) {
- struct rcu_head *rhp;
+ struct urcu_work *uwp;
- rhp = caa_container_of(cbs,
- struct rcu_head, next);
- rhp->func(rhp);
+ uwp = caa_container_of(cbs,
+ struct urcu_work, next);
+ uwp->func(uwp);
cbcount++;
}
uatomic_sub(&workqueue->qlen, cbcount);
if (cds_wfcq_empty(&workqueue->cbs_head,
&workqueue->cbs_tail)) {
futex_wait(&workqueue->futex);
- (void) poll(NULL, 0, 10);
uatomic_dec(&workqueue->futex);
/*
* Decrement futex before reading
- * call_rcu list.
+ * urcu_work list.
*/
cmm_smp_mb();
- } else {
- (void) poll(NULL, 0, 10);
}
} else {
- (void) poll(NULL, 0, 10);
+ if (cds_wfcq_empty(&workqueue->cbs_head,
+ &workqueue->cbs_tail)) {
+ (void) poll(NULL, 0, 10);
+ }
}
if (workqueue->worker_after_wake_up_fct)
workqueue->worker_after_wake_up_fct(workqueue, workqueue->priv);
}
if (!rt) {
/*
- * Read call_rcu list before write futex.
+ * Read urcu_work list before write futex.
*/
cmm_smp_mb();
uatomic_set(&workqueue->futex, 0);
{
struct urcu_workqueue *workqueue;
int ret;
+ sigset_t newmask, oldmask;
workqueue = malloc(sizeof(*workqueue));
if (workqueue == NULL)
workqueue->cpu_affinity = cpu_affinity;
workqueue->loop_count = 0;
cmm_smp_mb(); /* Structure initialized before pointer is planted. */
+
+ ret = sigfillset(&newmask);
+ urcu_posix_assert(!ret);
+ ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
+ urcu_posix_assert(!ret);
+
ret = pthread_create(&workqueue->tid, NULL, workqueue_thread, workqueue);
if (ret) {
urcu_die(ret);
}
+
+ ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
+ urcu_posix_assert(!ret);
+
return workqueue;
}
static void wake_worker_thread(struct urcu_workqueue *workqueue)
{
- if (!(_CMM_LOAD_SHARED(workqueue->flags) & URCU_CALL_RCU_RT))
+ if (!(_CMM_LOAD_SHARED(workqueue->flags) & URCU_WORKQUEUE_RT))
futex_wake_up(&workqueue->futex);
}
if (urcu_workqueue_destroy_worker(workqueue)) {
urcu_die(errno);
}
- assert(cds_wfcq_empty(&workqueue->cbs_head, &workqueue->cbs_tail));
+ urcu_posix_assert(cds_wfcq_empty(&workqueue->cbs_head, &workqueue->cbs_tail));
free(workqueue);
}
void urcu_workqueue_create_worker(struct urcu_workqueue *workqueue)
{
int ret;
+ sigset_t newmask, oldmask;
/* Clear workqueue state from parent. */
workqueue->flags &= ~URCU_WORKQUEUE_PAUSED;
workqueue->flags &= ~URCU_WORKQUEUE_PAUSE;
workqueue->tid = 0;
+
+ ret = sigfillset(&newmask);
+ urcu_posix_assert(!ret);
+ ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
+ urcu_posix_assert(!ret);
+
ret = pthread_create(&workqueue->tid, NULL, workqueue_thread, workqueue);
if (ret) {
urcu_die(ret);
}
+
+ ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
+ urcu_posix_assert(!ret);
}