Fix: don't wait after completion of a work queue job batch
[urcu.git] / src / workqueue.c
index 17ea835779b8bed4f4ab77fc70b0c5ba7d6ca8a3..39d0e077acc101b5e1588c9a1fae954050bd134c 100644 (file)
@@ -37,7 +37,6 @@
 
 #include "compat-getcpu.h"
 #include "urcu/wfcqueue.h"
-#include "urcu-call-rcu.h"
 #include "urcu-pointer.h"
 #include "urcu/list.h"
 #include "urcu/futex.h"
 struct urcu_workqueue {
        /*
         * We do not align head on a different cache-line than tail
-        * mainly because call_rcu callback-invocation threads use
-        * batching ("splice") to get an entire list of callbacks, which
-        * effectively empties the queue, and requires to touch the tail
-        * anyway.
+        * mainly because workqueue threads use batching ("splice") to
+        * get an entire list of callbacks, which effectively empties
+        * the queue, and requires to touch the tail anyway.
         */
        struct cds_wfcq_tail cbs_tail;
        struct cds_wfcq_head cbs_head;
@@ -223,11 +221,11 @@ static void *workqueue_thread(void *arg)
                        cbcount = 0;
                        __cds_wfcq_for_each_blocking_safe(&cbs_tmp_head,
                                        &cbs_tmp_tail, cbs, cbs_tmp_n) {
-                               struct rcu_head *rhp;
+                               struct urcu_work *uwp;
 
-                               rhp = caa_container_of(cbs,
-                                       struct rcu_head, next);
-                               rhp->func(rhp);
+                               uwp = caa_container_of(cbs,
+                                       struct urcu_work, next);
+                               uwp->func(uwp);
                                cbcount++;
                        }
                        uatomic_sub(&workqueue->qlen, cbcount);
@@ -240,15 +238,12 @@ static void *workqueue_thread(void *arg)
                        if (cds_wfcq_empty(&workqueue->cbs_head,
                                        &workqueue->cbs_tail)) {
                                futex_wait(&workqueue->futex);
-                               (void) poll(NULL, 0, 10);
                                uatomic_dec(&workqueue->futex);
                                /*
                                 * Decrement futex before reading
-                                * call_rcu list.
+                                * urcu_work list.
                                 */
                                cmm_smp_mb();
-                       } else {
-                               (void) poll(NULL, 0, 10);
                        }
                } else {
                        (void) poll(NULL, 0, 10);
@@ -258,7 +253,7 @@ static void *workqueue_thread(void *arg)
        }
        if (!rt) {
                /*
-                * Read call_rcu list before write futex.
+                * Read urcu_work list before write futex.
                 */
                cmm_smp_mb();
                uatomic_set(&workqueue->futex, 0);
@@ -309,7 +304,7 @@ struct urcu_workqueue *urcu_workqueue_create(unsigned long flags,
 
 static void wake_worker_thread(struct urcu_workqueue *workqueue)
 {
-       if (!(_CMM_LOAD_SHARED(workqueue->flags) & URCU_CALL_RCU_RT))
+       if (!(_CMM_LOAD_SHARED(workqueue->flags) & URCU_WORKQUEUE_RT))
                futex_wake_up(&workqueue->futex);
 }
 
This page took 0.024008 seconds and 4 git commands to generate.