* Must be called after Q.S. is reached.
*/
static void rcu_defer_barrier_queue(struct defer_queue *queue,
- unsigned long head)
+ unsigned long head)
{
unsigned long i;
+ void (*fct)(void *p);
+ void *p;
/*
* Tail is only modified when lock is held.
* Head is only modified by owner thread.
*/
- for (i = queue->tail; i != head; i++) {
+ for (i = queue->tail; i != head;) {
smp_rmb(); /* read head before q[]. */
- free(LOAD_SHARED(queue->q[i & DEFER_QUEUE_MASK]));
+ p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
+ if (unlikely(DQ_IS_FCT_BIT(p))) {
+ DQ_CLEAR_FCT_BIT(p);
+ queue->last_fct_out = p;
+ p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
+ } else if (unlikely(p == DQ_FCT_MARK)) {
+ p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
+ queue->last_fct_out = p;
+ p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
+ }
+ fct = queue->last_fct_out;
+ fct(p);
}
smp_mb(); /* push tail after having used q[] */
STORE_SHARED(queue->tail, i);
static void _rcu_defer_barrier_thread(void)
{
- unsigned long head;
+ unsigned long head, num_items;
head = defer_queue.head;
+ num_items = head - defer_queue.tail;
+ if (unlikely(!num_items))
+ return;
synchronize_rcu();
rcu_defer_barrier_queue(&defer_queue, head);
}
internal_urcu_unlock(&urcu_defer_mutex);
}
+/*
+ * rcu_defer_barrier - Execute all queued rcu callbacks.
+ *
+ * Execute all RCU callbacks queued before rcu_defer_barrier() execution.
+ * All callbacks queued on the local thread prior to a rcu_defer_barrier() call
+ * are guaranteed to be executed.
+ * Callbacks queued by other threads concurrently with rcu_defer_barrier()
+ * execution are not guaranteed to be executed in the current batch (could
+ * be left for the next batch). These callbacks queued by other threads are only
+ * guaranteed to be executed if there is explicit synchronization between
+ * the thread adding to the queue and the thread issuing the defer_barrier call.
+ */
+
void rcu_defer_barrier(void)
{
struct deferer_registry *index;
+ unsigned long num_items = 0;
if (!registry)
return;
internal_urcu_lock(&urcu_defer_mutex);
- for (index = registry; index < registry + num_deferers; index++)
+ for (index = registry; index < registry + num_deferers; index++) {
index->last_head = LOAD_SHARED(index->defer_queue->head);
+ num_items += index->last_head - index->defer_queue->tail;
+ }
+ if (likely(!num_items)) {
+ /*
+ * We skip the grace period because there are no queued
+ * callbacks to execute.
+ */
+ goto end;
+ }
synchronize_rcu();
for (index = registry; index < registry + num_deferers; index++)
rcu_defer_barrier_queue(index->defer_queue,
index->last_head);
+end:
internal_urcu_unlock(&urcu_defer_mutex);
}
* library wrappers to be used by non-LGPL compatible source code.
*/
-void rcu_defer_queue(void *p)
+void rcu_defer_queue(void (*fct)(void *p), void *p)
{
- _rcu_defer_queue(p);
+ _rcu_defer_queue(fct, p);
}
static void rcu_add_deferer(pthread_t id)
registry[num_deferers].tid = id;
/* reference to the TLS of _this_ deferer thread. */
registry[num_deferers].defer_queue = &defer_queue;
+ registry[num_deferers].last_head = 0;
num_deferers++;
}
sizeof(struct deferer_registry));
registry[num_deferers - 1].tid = 0;
registry[num_deferers - 1].defer_queue = NULL;
+ registry[num_deferers - 1].last_head = 0;
num_deferers--;
return;
}