1 #ifndef _URCU_WFCQUEUE_STATIC_H
2 #define _URCU_WFCQUEUE_STATIC_H
5 * urcu/static/wfcqueue.h
7 * Userspace RCU library - Concurrent Queue with Wait-Free Enqueue/Blocking Dequeue
9 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu/wfcqueue.h for
10 * linking dynamically with the userspace rcu library.
12 * Copyright 2010-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
13 * Copyright 2011-2012 - Lai Jiangshan <laijs@cn.fujitsu.com>
15 * This library is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU Lesser General Public
17 * License as published by the Free Software Foundation; either
18 * version 2.1 of the License, or (at your option) any later version.
20 * This library is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * Lesser General Public License for more details.
25 * You should have received a copy of the GNU Lesser General Public
26 * License along with this library; if not, write to the Free Software
27 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
34 #include <urcu/compiler.h>
35 #include <urcu/uatomic.h>
42 * Concurrent queue with wait-free enqueue/blocking dequeue.
44 * This queue has been designed and implemented collaboratively by
45 * Mathieu Desnoyers and Lai Jiangshan. Inspired from
46 * half-wait-free/half-blocking queue implementation done by Paul E.
49 * Mutual exclusion of __cds_wfcq_* API
51 * Unless otherwise stated, the caller must ensure mutual exclusion of
52 * queue update operations "dequeue" and "splice" (for source queue).
53 * Queue read operations "first" and "next", which are used by
54 * "for_each" iterations, need to be protected against concurrent
55 * "dequeue" and "splice" (for source queue) by the caller.
56 * "enqueue", "splice" (for destination queue), and "empty" are the only
57 * operations that can be used without any mutual exclusion.
58 * Mutual exclusion can be ensured by holding cds_wfcq_dequeue_lock().
60 * For convenience, cds_wfcq_dequeue_blocking() and
61 * cds_wfcq_splice_blocking() hold the dequeue lock.
63 * Besides locking, mutual exclusion of dequeue, splice and iteration
64 * can be ensured by performing all of those operations from a single
65 * thread, without requiring any lock.
68 #define WFCQ_ADAPT_ATTEMPTS 10 /* Retry if being set */
69 #define WFCQ_WAIT 10 /* Wait 10 ms if being set */
72 * cds_wfcq_node_init: initialize wait-free queue node.
74 static inline void _cds_wfcq_node_init(struct cds_wfcq_node
*node
)
80 * cds_wfcq_init: initialize wait-free queue.
82 static inline void _cds_wfcq_init(struct cds_wfcq_head
*head
,
83 struct cds_wfcq_tail
*tail
)
87 /* Set queue head and tail */
88 _cds_wfcq_node_init(&head
->node
);
89 tail
->p
= &head
->node
;
90 ret
= pthread_mutex_init(&head
->lock
, NULL
);
95 * cds_wfcq_empty: return whether wait-free queue is empty.
97 * No memory barrier is issued. No mutual exclusion is required.
99 static inline bool _cds_wfcq_empty(struct cds_wfcq_head
*head
,
100 struct cds_wfcq_tail
*tail
)
103 * Queue is empty if no node is pointed by head->node.next nor
104 * tail->p. Even though the tail->p check is sufficient to find
105 * out of the queue is empty, we first check head->node.next as a
106 * common case to ensure that dequeuers do not frequently access
107 * enqueuer's tail->p cache line.
109 return CMM_LOAD_SHARED(head
->node
.next
) == NULL
110 && CMM_LOAD_SHARED(tail
->p
) == &head
->node
;
113 static inline void _cds_wfcq_dequeue_lock(struct cds_wfcq_head
*head
,
114 struct cds_wfcq_tail
*tail
)
118 ret
= pthread_mutex_lock(&head
->lock
);
122 static inline void _cds_wfcq_dequeue_unlock(struct cds_wfcq_head
*head
,
123 struct cds_wfcq_tail
*tail
)
127 ret
= pthread_mutex_unlock(&head
->lock
);
131 static inline bool ___cds_wfcq_append(struct cds_wfcq_head
*head
,
132 struct cds_wfcq_tail
*tail
,
133 struct cds_wfcq_node
*new_head
,
134 struct cds_wfcq_node
*new_tail
)
136 struct cds_wfcq_node
*old_tail
;
139 * Implicit memory barrier before uatomic_xchg() orders earlier
140 * stores to data structure containing node and setting
141 * node->next to NULL before publication.
143 old_tail
= uatomic_xchg(&tail
->p
, new_tail
);
146 * Implicit memory barrier after uatomic_xchg() orders store to
147 * q->tail before store to old_tail->next.
149 * At this point, dequeuers see a NULL tail->p->next, which
150 * indicates that the queue is being appended to. The following
151 * store will append "node" to the queue from a dequeuer
154 CMM_STORE_SHARED(old_tail
->next
, new_head
);
156 * Return false if queue was empty prior to adding the node,
159 return old_tail
!= &head
->node
;
163 * cds_wfcq_enqueue: enqueue a node into a wait-free queue.
165 * Issues a full memory barrier before enqueue. No mutual exclusion is
168 * Returns false if the queue was empty prior to adding the node.
169 * Returns true otherwise.
171 static inline bool _cds_wfcq_enqueue(struct cds_wfcq_head
*head
,
172 struct cds_wfcq_tail
*tail
,
173 struct cds_wfcq_node
*new_tail
)
175 return ___cds_wfcq_append(head
, tail
, new_tail
, new_tail
);
179 * Waiting for enqueuer to complete enqueue and return the next node.
181 static inline struct cds_wfcq_node
*
182 ___cds_wfcq_node_sync_next(struct cds_wfcq_node
*node
, int blocking
)
184 struct cds_wfcq_node
*next
;
188 * Adaptative busy-looping waiting for enqueuer to complete enqueue.
190 while ((next
= CMM_LOAD_SHARED(node
->next
)) == NULL
) {
192 return CDS_WFCQ_WOULDBLOCK
;
193 if (++attempt
>= WFCQ_ADAPT_ATTEMPTS
) {
194 poll(NULL
, 0, WFCQ_WAIT
); /* Wait for 10ms */
204 static inline struct cds_wfcq_node
*
205 ___cds_wfcq_first(struct cds_wfcq_head
*head
,
206 struct cds_wfcq_tail
*tail
,
209 struct cds_wfcq_node
*node
;
211 if (_cds_wfcq_empty(head
, tail
))
213 node
= ___cds_wfcq_node_sync_next(&head
->node
, blocking
);
214 /* Load head->node.next before loading node's content */
215 cmm_smp_read_barrier_depends();
220 * __cds_wfcq_first_blocking: get first node of a queue, without dequeuing.
222 * Content written into the node before enqueue is guaranteed to be
223 * consistent, but no other memory ordering is ensured.
224 * Dequeue/splice/iteration mutual exclusion should be ensured by the
227 * Used by for-like iteration macros in urcu/wfqueue.h:
228 * __cds_wfcq_for_each_blocking()
229 * __cds_wfcq_for_each_blocking_safe()
231 static inline struct cds_wfcq_node
*
232 ___cds_wfcq_first_blocking(struct cds_wfcq_head
*head
,
233 struct cds_wfcq_tail
*tail
)
235 return ___cds_wfcq_first(head
, tail
, 1);
240 * __cds_wfcq_first_nonblocking: get first node of a queue, without dequeuing.
242 * Same as __cds_wfcq_first_blocking, but returns CDS_WFCQ_WOULDBLOCK if
245 static inline struct cds_wfcq_node
*
246 ___cds_wfcq_first_nonblocking(struct cds_wfcq_head
*head
,
247 struct cds_wfcq_tail
*tail
)
249 return ___cds_wfcq_first(head
, tail
, 0);
252 static inline struct cds_wfcq_node
*
253 ___cds_wfcq_next(struct cds_wfcq_head
*head
,
254 struct cds_wfcq_tail
*tail
,
255 struct cds_wfcq_node
*node
,
258 struct cds_wfcq_node
*next
;
261 * Even though the following tail->p check is sufficient to find
262 * out if we reached the end of the queue, we first check
263 * node->next as a common case to ensure that iteration on nodes
264 * do not frequently access enqueuer's tail->p cache line.
266 if ((next
= CMM_LOAD_SHARED(node
->next
)) == NULL
) {
267 /* Load node->next before tail->p */
269 if (CMM_LOAD_SHARED(tail
->p
) == node
)
271 next
= ___cds_wfcq_node_sync_next(node
, blocking
);
273 /* Load node->next before loading next's content */
274 cmm_smp_read_barrier_depends();
279 * __cds_wfcq_next_blocking: get next node of a queue, without dequeuing.
281 * Content written into the node before enqueue is guaranteed to be
282 * consistent, but no other memory ordering is ensured.
283 * Dequeue/splice/iteration mutual exclusion should be ensured by the
286 * Used by for-like iteration macros in urcu/wfqueue.h:
287 * __cds_wfcq_for_each_blocking()
288 * __cds_wfcq_for_each_blocking_safe()
290 static inline struct cds_wfcq_node
*
291 ___cds_wfcq_next_blocking(struct cds_wfcq_head
*head
,
292 struct cds_wfcq_tail
*tail
,
293 struct cds_wfcq_node
*node
)
295 return ___cds_wfcq_next(head
, tail
, node
, 1);
299 * __cds_wfcq_next_blocking: get next node of a queue, without dequeuing.
301 * Same as __cds_wfcq_next_blocking, but returns CDS_WFCQ_WOULDBLOCK if
304 static inline struct cds_wfcq_node
*
305 ___cds_wfcq_next_nonblocking(struct cds_wfcq_head
*head
,
306 struct cds_wfcq_tail
*tail
,
307 struct cds_wfcq_node
*node
)
309 return ___cds_wfcq_next(head
, tail
, node
, 0);
312 static inline struct cds_wfcq_node
*
313 ___cds_wfcq_dequeue(struct cds_wfcq_head
*head
,
314 struct cds_wfcq_tail
*tail
,
317 struct cds_wfcq_node
*node
, *next
;
319 if (_cds_wfcq_empty(head
, tail
))
322 node
= ___cds_wfcq_node_sync_next(&head
->node
, blocking
);
323 if (!blocking
&& node
== CDS_WFCQ_WOULDBLOCK
)
324 return CDS_WFCQ_WOULDBLOCK
;
326 if ((next
= CMM_LOAD_SHARED(node
->next
)) == NULL
) {
328 * @node is probably the only node in the queue.
329 * Try to move the tail to &q->head.
330 * q->head.next is set to NULL here, and stays
331 * NULL if the cmpxchg succeeds. Should the
332 * cmpxchg fail due to a concurrent enqueue, the
333 * q->head.next will be set to the next node.
334 * The implicit memory barrier before
335 * uatomic_cmpxchg() orders load node->next
336 * before loading q->tail.
337 * The implicit memory barrier before uatomic_cmpxchg
338 * orders load q->head.next before loading node's
341 _cds_wfcq_node_init(&head
->node
);
342 if (uatomic_cmpxchg(&tail
->p
, node
, &head
->node
) == node
)
344 next
= ___cds_wfcq_node_sync_next(node
, blocking
);
346 * In nonblocking mode, if we would need to block to
347 * get node's next, set the head next node pointer
348 * (currently NULL) back to its original value.
350 if (!blocking
&& next
== CDS_WFCQ_WOULDBLOCK
) {
351 head
->node
.next
= node
;
352 return CDS_WFCQ_WOULDBLOCK
;
357 * Move queue head forward.
359 head
->node
.next
= next
;
361 /* Load q->head.next before loading node's content */
362 cmm_smp_read_barrier_depends();
367 * __cds_wfcq_dequeue_blocking: dequeue a node from the queue.
369 * Content written into the node before enqueue is guaranteed to be
370 * consistent, but no other memory ordering is ensured.
371 * It is valid to reuse and free a dequeued node immediately.
372 * Dequeue/splice/iteration mutual exclusion should be ensured by the
375 static inline struct cds_wfcq_node
*
376 ___cds_wfcq_dequeue_blocking(struct cds_wfcq_head
*head
,
377 struct cds_wfcq_tail
*tail
)
379 return ___cds_wfcq_dequeue(head
, tail
, 1);
383 * __cds_wfcq_dequeue_nonblocking: dequeue a node from a wait-free queue.
385 * Same as __cds_wfcq_dequeue_blocking, but returns CDS_WFCQ_WOULDBLOCK
386 * if it needs to block.
388 static inline struct cds_wfcq_node
*
389 ___cds_wfcq_dequeue_nonblocking(struct cds_wfcq_head
*head
,
390 struct cds_wfcq_tail
*tail
)
392 return ___cds_wfcq_dequeue(head
, tail
, 0);
395 static inline enum cds_wfcq_ret
397 struct cds_wfcq_head
*dest_q_head
,
398 struct cds_wfcq_tail
*dest_q_tail
,
399 struct cds_wfcq_head
*src_q_head
,
400 struct cds_wfcq_tail
*src_q_tail
,
403 struct cds_wfcq_node
*head
, *tail
;
405 if (_cds_wfcq_empty(src_q_head
, src_q_tail
))
406 return CDS_WFCQ_RET_SRC_EMPTY
;
408 head
= ___cds_wfcq_node_sync_next(&src_q_head
->node
, blocking
);
409 if (head
== CDS_WFCQ_WOULDBLOCK
)
410 return CDS_WFCQ_RET_WOULDBLOCK
;
411 _cds_wfcq_node_init(&src_q_head
->node
);
414 * Memory barrier implied before uatomic_xchg() orders store to
415 * src_q->head before store to src_q->tail. This is required by
416 * concurrent enqueue on src_q, which exchanges the tail before
417 * updating the previous tail's next pointer.
419 tail
= uatomic_xchg(&src_q_tail
->p
, &src_q_head
->node
);
422 * Append the spliced content of src_q into dest_q. Does not
423 * require mutual exclusion on dest_q (wait-free).
425 if (___cds_wfcq_append(dest_q_head
, dest_q_tail
, head
, tail
))
426 return CDS_WFCQ_RET_DEST_NON_EMPTY
;
428 return CDS_WFCQ_RET_DEST_EMPTY
;
433 * __cds_wfcq_splice_blocking: enqueue all src_q nodes at the end of dest_q.
435 * Dequeue all nodes from src_q.
436 * dest_q must be already initialized.
437 * Dequeue/splice/iteration mutual exclusion for src_q should be ensured
439 * Returns enum cds_wfcq_ret which indicates the state of the src or
440 * dest queue. Never returns CDS_WFCQ_RET_WOULDBLOCK.
442 static inline enum cds_wfcq_ret
443 ___cds_wfcq_splice_blocking(
444 struct cds_wfcq_head
*dest_q_head
,
445 struct cds_wfcq_tail
*dest_q_tail
,
446 struct cds_wfcq_head
*src_q_head
,
447 struct cds_wfcq_tail
*src_q_tail
)
449 return ___cds_wfcq_splice(dest_q_head
, dest_q_tail
,
450 src_q_head
, src_q_tail
, 1);
454 * __cds_wfcq_splice_nonblocking: enqueue all src_q nodes at the end of dest_q.
456 * Same as __cds_wfcq_splice_blocking, but returns
457 * CDS_WFCQ_RET_WOULDBLOCK if it needs to block.
459 static inline enum cds_wfcq_ret
460 ___cds_wfcq_splice_nonblocking(
461 struct cds_wfcq_head
*dest_q_head
,
462 struct cds_wfcq_tail
*dest_q_tail
,
463 struct cds_wfcq_head
*src_q_head
,
464 struct cds_wfcq_tail
*src_q_tail
)
466 return ___cds_wfcq_splice(dest_q_head
, dest_q_tail
,
467 src_q_head
, src_q_tail
, 0);
471 * cds_wfcq_dequeue_blocking: dequeue a node from a wait-free queue.
473 * Content written into the node before enqueue is guaranteed to be
474 * consistent, but no other memory ordering is ensured.
475 * Mutual exlusion with cds_wfcq_splice_blocking and dequeue lock is
477 * It is valid to reuse and free a dequeued node immediately.
479 static inline struct cds_wfcq_node
*
480 _cds_wfcq_dequeue_blocking(struct cds_wfcq_head
*head
,
481 struct cds_wfcq_tail
*tail
)
483 struct cds_wfcq_node
*retval
;
485 _cds_wfcq_dequeue_lock(head
, tail
);
486 retval
= ___cds_wfcq_dequeue_blocking(head
, tail
);
487 _cds_wfcq_dequeue_unlock(head
, tail
);
492 * cds_wfcq_splice_blocking: enqueue all src_q nodes at the end of dest_q.
494 * Dequeue all nodes from src_q.
495 * dest_q must be already initialized.
496 * Content written into the node before enqueue is guaranteed to be
497 * consistent, but no other memory ordering is ensured.
498 * Mutual exlusion with cds_wfcq_dequeue_blocking and dequeue lock is
500 * Returns enum cds_wfcq_ret which indicates the state of the src or
501 * dest queue. Never returns CDS_WFCQ_RET_WOULDBLOCK.
503 static inline enum cds_wfcq_ret
504 _cds_wfcq_splice_blocking(
505 struct cds_wfcq_head
*dest_q_head
,
506 struct cds_wfcq_tail
*dest_q_tail
,
507 struct cds_wfcq_head
*src_q_head
,
508 struct cds_wfcq_tail
*src_q_tail
)
510 enum cds_wfcq_ret ret
;
512 _cds_wfcq_dequeue_lock(src_q_head
, src_q_tail
);
513 ret
= ___cds_wfcq_splice_blocking(dest_q_head
, dest_q_tail
,
514 src_q_head
, src_q_tail
);
515 _cds_wfcq_dequeue_unlock(src_q_head
, src_q_tail
);
523 #endif /* _URCU_WFCQUEUE_STATIC_H */