+ mutex_unlock(&rcu_defer_mutex);
+}
+
+/*
+ * _defer_rcu - Queue a RCU callback.
+ */
+void _defer_rcu(void (*fct)(void *p), void *p)
+{
+ unsigned long head, tail;
+
+ /*
+ * Head is only modified by ourself. Tail can be modified by reclamation
+ * thread.
+ */
+ head = defer_queue.head;
+ tail = LOAD_SHARED(defer_queue.tail);
+
+ /*
+ * If queue is full, or reached threshold. Empty queue ourself.
+ * Worse-case: must allow 2 supplementary entries for fct pointer.
+ */
+ if (unlikely(sync || (head - tail >= DEFER_QUEUE_SIZE - 2))) {
+ assert(head - tail <= DEFER_QUEUE_SIZE);
+ rcu_defer_barrier_thread();
+ assert(head - LOAD_SHARED(defer_queue.tail) == 0);
+ }
+
+ if (unlikely(defer_queue.last_fct_in != fct)) {
+ defer_queue.last_fct_in = fct;
+ if (unlikely(DQ_IS_FCT_BIT(fct) || fct == DQ_FCT_MARK)) {
+ /*
+ * If the function to encode is not aligned or the
+ * marker, write DQ_FCT_MARK followed by the function
+ * pointer.
+ */
+ _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
+ DQ_FCT_MARK);
+ _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
+ fct);
+ } else {
+ DQ_SET_FCT_BIT(fct);
+ _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
+ fct);
+ }
+ } else {
+ if (unlikely(DQ_IS_FCT_BIT(p) || p == DQ_FCT_MARK)) {
+ /*
+ * If the data to encode is not aligned or the marker,
+ * write DQ_FCT_MARK followed by the function pointer.
+ */
+ _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
+ DQ_FCT_MARK);
+ _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
+ fct);
+ }
+ }
+ _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], p);
+ smp_wmb(); /* Publish new pointer before head */
+ /* Write q[] before head. */
+ STORE_SHARED(defer_queue.head, head);
+ smp_mb(); /* Write queue head before read futex */
+ /*
+ * Wake-up any waiting defer thread.
+ */
+ wake_up_defer();