#define URCU_LFQ_PERMANENT_REF 128
-void _rcu_lfq_node_init(struct rcu_lfq_node *node)
+void _cds_lfq_node_init_rcu(struct cds_lfq_node_rcu *node)
{
node->next = NULL;
urcu_ref_init(&node->ref);
}
-void _rcu_lfq_init(struct rcu_lfq_queue *q)
+void _cds_lfq_init_rcu(struct cds_lfq_queue_rcu *q,
+ void (*release)(struct urcu_ref *ref))
{
- _rcu_lfq_node_init(&q->init);
+ _cds_lfq_node_init_rcu(&q->init);
/* Make sure the initial node is never freed. */
urcu_ref_set(&q->init.ref, URCU_LFQ_PERMANENT_REF);
q->head = q->tail = &q->init;
+ q->release = release;
}
-void _rcu_lfq_enqueue(struct rcu_lfq_queue *q, struct rcu_lfq_node *node)
+/*
+ * Should be called under rcu read lock critical section.
+ */
+void _cds_lfq_enqueue_rcu(struct cds_lfq_queue_rcu *q,
+ struct cds_lfq_node_rcu *node)
{
urcu_ref_get(&node->ref);
+ node->queue = q;
/*
* uatomic_cmpxchg() implicit memory barrier orders earlier stores to
*/
for (;;) {
- struct rcu_lfq_node *tail, *next;
+ struct cds_lfq_node_rcu *tail, *next;
- rcu_read_lock();
tail = rcu_dereference(q->tail);
/*
* Typically expect tail->next to be NULL.
* Now move tail (another enqueue might beat
* us to it, that's fine).
*/
- uatomic_cmpxchg(&q->tail, tail, node);
- rcu_read_unlock();
+ (void) uatomic_cmpxchg(&q->tail, tail, node);
return;
} else {
/*
* Failure to append to current tail. Help moving tail
* further and retry.
*/
- uatomic_cmpxchg(&q->tail, tail, next);
- rcu_read_unlock();
+ (void) uatomic_cmpxchg(&q->tail, tail, next);
continue;
}
}
}
/*
- * The entry returned by dequeue must be taken care of by doing a urcu_ref_put,
- * which calls the release primitive when the reference count drops to zero. A
- * grace period must be waited after execution of the release callback before
- * performing the actual memory reclamation or modifying the rcu_lfq_node
- * structure.
+ * Should be called under rcu read lock critical section.
+ *
+ * The entry returned by dequeue must be taken care of by doing a
+ * sequence of urcu_ref_put which release handler should do a call_rcu.
+ *
* In other words, the entry lfq node returned by dequeue must not be
* modified/re-used/freed until the reference count reaches zero and a grace
- * period has elapsed (after the refcount reached 0).
+ * period has elapsed.
*/
-struct rcu_lfq_node *
-_rcu_lfq_dequeue(struct rcu_lfq_queue *q, void (*release)(struct urcu_ref *))
+struct cds_lfq_node_rcu *_cds_lfq_dequeue_rcu(struct cds_lfq_queue_rcu *q)
{
for (;;) {
- struct rcu_lfq_node *head, *next;
+ struct cds_lfq_node_rcu *head, *next;
- rcu_read_lock();
head = rcu_dereference(q->head);
next = rcu_dereference(head->next);
if (next) {
if (uatomic_cmpxchg(&q->head, head, next) == head) {
- rcu_read_unlock();
- urcu_ref_put(&head->ref, release);
+ urcu_ref_put(&head->ref, q->release);
return next;
} else {
/* Concurrently pushed, retry */
- rcu_read_unlock();
continue;
}
} else {
/* Empty */
- rcu_read_unlock();
return NULL;
}
}