Cleanup: remove leftover manual pthread detection
[urcu.git] / urcu / static / wfcqueue.h
1 #ifndef _URCU_WFCQUEUE_STATIC_H
2 #define _URCU_WFCQUEUE_STATIC_H
3
4 /*
5 * urcu/static/wfcqueue.h
6 *
7 * Userspace RCU library - Concurrent Queue with Wait-Free Enqueue/Blocking Dequeue
8 *
9 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu/wfcqueue.h for
10 * linking dynamically with the userspace rcu library.
11 *
12 * Copyright 2010-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
13 * Copyright 2011-2012 - Lai Jiangshan <laijs@cn.fujitsu.com>
14 *
15 * This library is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU Lesser General Public
17 * License as published by the Free Software Foundation; either
18 * version 2.1 of the License, or (at your option) any later version.
19 *
20 * This library is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * Lesser General Public License for more details.
24 *
25 * You should have received a copy of the GNU Lesser General Public
26 * License along with this library; if not, write to the Free Software
27 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
28 */
29
30 #include <pthread.h>
31 #include <assert.h>
32 #include <poll.h>
33 #include <stdbool.h>
34 #include <urcu/compiler.h>
35 #include <urcu/uatomic.h>
36
37 #ifdef __cplusplus
38 extern "C" {
39 #endif
40
41 /*
42 * Concurrent queue with wait-free enqueue/blocking dequeue.
43 *
44 * This queue has been designed and implemented collaboratively by
45 * Mathieu Desnoyers and Lai Jiangshan. Inspired from
46 * half-wait-free/half-blocking queue implementation done by Paul E.
47 * McKenney.
48 *
49 * Mutual exclusion of cds_wfcq_* / __cds_wfcq_* API
50 *
51 * Synchronization table:
52 *
53 * External synchronization techniques described in the API below is
54 * required between pairs marked with "X". No external synchronization
55 * required between pairs marked with "-".
56 *
57 * Legend:
58 * [1] cds_wfcq_enqueue
59 * [2] __cds_wfcq_splice (destination queue)
60 * [3] __cds_wfcq_dequeue
61 * [4] __cds_wfcq_splice (source queue)
62 * [5] __cds_wfcq_first
63 * [6] __cds_wfcq_next
64 *
65 * [1] [2] [3] [4] [5] [6]
66 * [1] - - - - - -
67 * [2] - - - - - -
68 * [3] - - X X X X
69 * [4] - - X - X X
70 * [5] - - X X - -
71 * [6] - - X X - -
72 *
73 * Mutual exclusion can be ensured by holding cds_wfcq_dequeue_lock().
74 *
75 * For convenience, cds_wfcq_dequeue_blocking() and
76 * cds_wfcq_splice_blocking() hold the dequeue lock.
77 *
78 * Besides locking, mutual exclusion of dequeue, splice and iteration
79 * can be ensured by performing all of those operations from a single
80 * thread, without requiring any lock.
81 */
82
83 #define WFCQ_ADAPT_ATTEMPTS 10 /* Retry if being set */
84 #define WFCQ_WAIT 10 /* Wait 10 ms if being set */
85
86 /*
87 * cds_wfcq_node_init: initialize wait-free queue node.
88 */
89 static inline void _cds_wfcq_node_init(struct cds_wfcq_node *node)
90 {
91 node->next = NULL;
92 }
93
94 /*
95 * cds_wfcq_init: initialize wait-free queue (with lock). Pair with
96 * cds_wfcq_destroy().
97 */
98 static inline void _cds_wfcq_init(struct cds_wfcq_head *head,
99 struct cds_wfcq_tail *tail)
100 {
101 int ret;
102
103 /* Set queue head and tail */
104 _cds_wfcq_node_init(&head->node);
105 tail->p = &head->node;
106 ret = pthread_mutex_init(&head->lock, NULL);
107 assert(!ret);
108 }
109
110 /*
111 * cds_wfcq_destroy: destroy wait-free queue (with lock). Pair with
112 * cds_wfcq_init().
113 */
114 static inline void _cds_wfcq_destroy(struct cds_wfcq_head *head,
115 struct cds_wfcq_tail *tail)
116 {
117 int ret = pthread_mutex_destroy(&head->lock);
118 assert(!ret);
119 }
120
121 /*
122 * __cds_wfcq_init: initialize wait-free queue (without lock). Don't
123 * pair with any destroy function.
124 */
125 static inline void ___cds_wfcq_init(struct __cds_wfcq_head *head,
126 struct cds_wfcq_tail *tail)
127 {
128 /* Set queue head and tail */
129 _cds_wfcq_node_init(&head->node);
130 tail->p = &head->node;
131 }
132
133 /*
134 * cds_wfcq_empty: return whether wait-free queue is empty.
135 *
136 * No memory barrier is issued. No mutual exclusion is required.
137 *
138 * We perform the test on head->node.next to check if the queue is
139 * possibly empty, but we confirm this by checking if the tail pointer
140 * points to the head node because the tail pointer is the linearisation
141 * point of the enqueuers. Just checking the head next pointer could
142 * make a queue appear empty if an enqueuer is preempted for a long time
143 * between xchg() and setting the previous node's next pointer.
144 */
145 static inline bool _cds_wfcq_empty(cds_wfcq_head_ptr_t u_head,
146 struct cds_wfcq_tail *tail)
147 {
148 struct __cds_wfcq_head *head = u_head._h;
149 /*
150 * Queue is empty if no node is pointed by head->node.next nor
151 * tail->p. Even though the tail->p check is sufficient to find
152 * out of the queue is empty, we first check head->node.next as a
153 * common case to ensure that dequeuers do not frequently access
154 * enqueuer's tail->p cache line.
155 */
156 return CMM_LOAD_SHARED(head->node.next) == NULL
157 && CMM_LOAD_SHARED(tail->p) == &head->node;
158 }
159
160 static inline void _cds_wfcq_dequeue_lock(struct cds_wfcq_head *head,
161 struct cds_wfcq_tail *tail)
162 {
163 int ret;
164
165 ret = pthread_mutex_lock(&head->lock);
166 assert(!ret);
167 }
168
169 static inline void _cds_wfcq_dequeue_unlock(struct cds_wfcq_head *head,
170 struct cds_wfcq_tail *tail)
171 {
172 int ret;
173
174 ret = pthread_mutex_unlock(&head->lock);
175 assert(!ret);
176 }
177
178 static inline bool ___cds_wfcq_append(cds_wfcq_head_ptr_t u_head,
179 struct cds_wfcq_tail *tail,
180 struct cds_wfcq_node *new_head,
181 struct cds_wfcq_node *new_tail)
182 {
183 struct __cds_wfcq_head *head = u_head._h;
184 struct cds_wfcq_node *old_tail;
185
186 /*
187 * Implicit memory barrier before uatomic_xchg() orders earlier
188 * stores to data structure containing node and setting
189 * node->next to NULL before publication.
190 */
191 old_tail = uatomic_xchg(&tail->p, new_tail);
192
193 /*
194 * Implicit memory barrier after uatomic_xchg() orders store to
195 * q->tail before store to old_tail->next.
196 *
197 * At this point, dequeuers see a NULL tail->p->next, which
198 * indicates that the queue is being appended to. The following
199 * store will append "node" to the queue from a dequeuer
200 * perspective.
201 */
202 CMM_STORE_SHARED(old_tail->next, new_head);
203 /*
204 * Return false if queue was empty prior to adding the node,
205 * else return true.
206 */
207 return old_tail != &head->node;
208 }
209
210 /*
211 * cds_wfcq_enqueue: enqueue a node into a wait-free queue.
212 *
213 * Issues a full memory barrier before enqueue. No mutual exclusion is
214 * required.
215 *
216 * Returns false if the queue was empty prior to adding the node.
217 * Returns true otherwise.
218 */
219 static inline bool _cds_wfcq_enqueue(cds_wfcq_head_ptr_t head,
220 struct cds_wfcq_tail *tail,
221 struct cds_wfcq_node *new_tail)
222 {
223 return ___cds_wfcq_append(head, tail, new_tail, new_tail);
224 }
225
226 /*
227 * ___cds_wfcq_busy_wait: adaptative busy-wait.
228 *
229 * Returns 1 if nonblocking and needs to block, 0 otherwise.
230 */
231 static inline bool
232 ___cds_wfcq_busy_wait(int *attempt, int blocking)
233 {
234 if (!blocking)
235 return 1;
236 if (++(*attempt) >= WFCQ_ADAPT_ATTEMPTS) {
237 (void) poll(NULL, 0, WFCQ_WAIT); /* Wait for 10ms */
238 *attempt = 0;
239 } else {
240 caa_cpu_relax();
241 }
242 return 0;
243 }
244
245 /*
246 * Waiting for enqueuer to complete enqueue and return the next node.
247 */
248 static inline struct cds_wfcq_node *
249 ___cds_wfcq_node_sync_next(struct cds_wfcq_node *node, int blocking)
250 {
251 struct cds_wfcq_node *next;
252 int attempt = 0;
253
254 /*
255 * Adaptative busy-looping waiting for enqueuer to complete enqueue.
256 */
257 while ((next = CMM_LOAD_SHARED(node->next)) == NULL) {
258 if (___cds_wfcq_busy_wait(&attempt, blocking))
259 return CDS_WFCQ_WOULDBLOCK;
260 }
261
262 return next;
263 }
264
265 static inline struct cds_wfcq_node *
266 ___cds_wfcq_first(cds_wfcq_head_ptr_t u_head,
267 struct cds_wfcq_tail *tail,
268 int blocking)
269 {
270 struct __cds_wfcq_head *head = u_head._h;
271 struct cds_wfcq_node *node;
272
273 if (_cds_wfcq_empty(__cds_wfcq_head_cast(head), tail))
274 return NULL;
275 node = ___cds_wfcq_node_sync_next(&head->node, blocking);
276 /* Load head->node.next before loading node's content */
277 cmm_smp_read_barrier_depends();
278 return node;
279 }
280
281 /*
282 * __cds_wfcq_first_blocking: get first node of a queue, without dequeuing.
283 *
284 * Content written into the node before enqueue is guaranteed to be
285 * consistent, but no other memory ordering is ensured.
286 * Dequeue/splice/iteration mutual exclusion should be ensured by the
287 * caller.
288 *
289 * Used by for-like iteration macros in urcu/wfqueue.h:
290 * __cds_wfcq_for_each_blocking()
291 * __cds_wfcq_for_each_blocking_safe()
292 *
293 * Returns NULL if queue is empty, first node otherwise.
294 */
295 static inline struct cds_wfcq_node *
296 ___cds_wfcq_first_blocking(cds_wfcq_head_ptr_t head,
297 struct cds_wfcq_tail *tail)
298 {
299 return ___cds_wfcq_first(head, tail, 1);
300 }
301
302
303 /*
304 * __cds_wfcq_first_nonblocking: get first node of a queue, without dequeuing.
305 *
306 * Same as __cds_wfcq_first_blocking, but returns CDS_WFCQ_WOULDBLOCK if
307 * it needs to block.
308 */
309 static inline struct cds_wfcq_node *
310 ___cds_wfcq_first_nonblocking(cds_wfcq_head_ptr_t head,
311 struct cds_wfcq_tail *tail)
312 {
313 return ___cds_wfcq_first(head, tail, 0);
314 }
315
316 static inline struct cds_wfcq_node *
317 ___cds_wfcq_next(cds_wfcq_head_ptr_t head,
318 struct cds_wfcq_tail *tail,
319 struct cds_wfcq_node *node,
320 int blocking)
321 {
322 struct cds_wfcq_node *next;
323
324 /*
325 * Even though the following tail->p check is sufficient to find
326 * out if we reached the end of the queue, we first check
327 * node->next as a common case to ensure that iteration on nodes
328 * do not frequently access enqueuer's tail->p cache line.
329 */
330 if ((next = CMM_LOAD_SHARED(node->next)) == NULL) {
331 /* Load node->next before tail->p */
332 cmm_smp_rmb();
333 if (CMM_LOAD_SHARED(tail->p) == node)
334 return NULL;
335 next = ___cds_wfcq_node_sync_next(node, blocking);
336 }
337 /* Load node->next before loading next's content */
338 cmm_smp_read_barrier_depends();
339 return next;
340 }
341
342 /*
343 * __cds_wfcq_next_blocking: get next node of a queue, without dequeuing.
344 *
345 * Content written into the node before enqueue is guaranteed to be
346 * consistent, but no other memory ordering is ensured.
347 * Dequeue/splice/iteration mutual exclusion should be ensured by the
348 * caller.
349 *
350 * Used by for-like iteration macros in urcu/wfqueue.h:
351 * __cds_wfcq_for_each_blocking()
352 * __cds_wfcq_for_each_blocking_safe()
353 *
354 * Returns NULL if reached end of queue, non-NULL next queue node
355 * otherwise.
356 */
357 static inline struct cds_wfcq_node *
358 ___cds_wfcq_next_blocking(cds_wfcq_head_ptr_t head,
359 struct cds_wfcq_tail *tail,
360 struct cds_wfcq_node *node)
361 {
362 return ___cds_wfcq_next(head, tail, node, 1);
363 }
364
365 /*
366 * __cds_wfcq_next_blocking: get next node of a queue, without dequeuing.
367 *
368 * Same as __cds_wfcq_next_blocking, but returns CDS_WFCQ_WOULDBLOCK if
369 * it needs to block.
370 */
371 static inline struct cds_wfcq_node *
372 ___cds_wfcq_next_nonblocking(cds_wfcq_head_ptr_t head,
373 struct cds_wfcq_tail *tail,
374 struct cds_wfcq_node *node)
375 {
376 return ___cds_wfcq_next(head, tail, node, 0);
377 }
378
379 static inline struct cds_wfcq_node *
380 ___cds_wfcq_dequeue_with_state(cds_wfcq_head_ptr_t u_head,
381 struct cds_wfcq_tail *tail,
382 int *state,
383 int blocking)
384 {
385 struct __cds_wfcq_head *head = u_head._h;
386 struct cds_wfcq_node *node, *next;
387
388 if (state)
389 *state = 0;
390
391 if (_cds_wfcq_empty(__cds_wfcq_head_cast(head), tail)) {
392 return NULL;
393 }
394
395 node = ___cds_wfcq_node_sync_next(&head->node, blocking);
396 if (!blocking && node == CDS_WFCQ_WOULDBLOCK) {
397 return CDS_WFCQ_WOULDBLOCK;
398 }
399
400 if ((next = CMM_LOAD_SHARED(node->next)) == NULL) {
401 /*
402 * @node is probably the only node in the queue.
403 * Try to move the tail to &q->head.
404 * q->head.next is set to NULL here, and stays
405 * NULL if the cmpxchg succeeds. Should the
406 * cmpxchg fail due to a concurrent enqueue, the
407 * q->head.next will be set to the next node.
408 * The implicit memory barrier before
409 * uatomic_cmpxchg() orders load node->next
410 * before loading q->tail.
411 * The implicit memory barrier before uatomic_cmpxchg
412 * orders load q->head.next before loading node's
413 * content.
414 */
415 _cds_wfcq_node_init(&head->node);
416 if (uatomic_cmpxchg(&tail->p, node, &head->node) == node) {
417 if (state)
418 *state |= CDS_WFCQ_STATE_LAST;
419 return node;
420 }
421 next = ___cds_wfcq_node_sync_next(node, blocking);
422 /*
423 * In nonblocking mode, if we would need to block to
424 * get node's next, set the head next node pointer
425 * (currently NULL) back to its original value.
426 */
427 if (!blocking && next == CDS_WFCQ_WOULDBLOCK) {
428 head->node.next = node;
429 return CDS_WFCQ_WOULDBLOCK;
430 }
431 }
432
433 /*
434 * Move queue head forward.
435 */
436 head->node.next = next;
437
438 /* Load q->head.next before loading node's content */
439 cmm_smp_read_barrier_depends();
440 return node;
441 }
442
443 /*
444 * __cds_wfcq_dequeue_with_state_blocking: dequeue node from queue, with state.
445 *
446 * Content written into the node before enqueue is guaranteed to be
447 * consistent, but no other memory ordering is ensured.
448 * It is valid to reuse and free a dequeued node immediately.
449 * Dequeue/splice/iteration mutual exclusion should be ensured by the
450 * caller.
451 */
452 static inline struct cds_wfcq_node *
453 ___cds_wfcq_dequeue_with_state_blocking(cds_wfcq_head_ptr_t head,
454 struct cds_wfcq_tail *tail, int *state)
455 {
456 return ___cds_wfcq_dequeue_with_state(head, tail, state, 1);
457 }
458
459 /*
460 * ___cds_wfcq_dequeue_blocking: dequeue node from queue.
461 *
462 * Same as __cds_wfcq_dequeue_with_state_blocking, but without saving
463 * state.
464 */
465 static inline struct cds_wfcq_node *
466 ___cds_wfcq_dequeue_blocking(cds_wfcq_head_ptr_t head,
467 struct cds_wfcq_tail *tail)
468 {
469 return ___cds_wfcq_dequeue_with_state_blocking(head, tail, NULL);
470 }
471
472 /*
473 * __cds_wfcq_dequeue_with_state_nonblocking: dequeue node, with state.
474 *
475 * Same as __cds_wfcq_dequeue_blocking, but returns CDS_WFCQ_WOULDBLOCK
476 * if it needs to block.
477 */
478 static inline struct cds_wfcq_node *
479 ___cds_wfcq_dequeue_with_state_nonblocking(cds_wfcq_head_ptr_t head,
480 struct cds_wfcq_tail *tail, int *state)
481 {
482 return ___cds_wfcq_dequeue_with_state(head, tail, state, 0);
483 }
484
485 /*
486 * ___cds_wfcq_dequeue_nonblocking: dequeue node from queue.
487 *
488 * Same as __cds_wfcq_dequeue_with_state_nonblocking, but without saving
489 * state.
490 */
491 static inline struct cds_wfcq_node *
492 ___cds_wfcq_dequeue_nonblocking(cds_wfcq_head_ptr_t head,
493 struct cds_wfcq_tail *tail)
494 {
495 return ___cds_wfcq_dequeue_with_state_nonblocking(head, tail, NULL);
496 }
497
498 /*
499 * __cds_wfcq_splice: enqueue all src_q nodes at the end of dest_q.
500 *
501 * Dequeue all nodes from src_q.
502 * dest_q must be already initialized.
503 * Mutual exclusion for src_q should be ensured by the caller as
504 * specified in the "Synchronisation table".
505 * Returns enum cds_wfcq_ret which indicates the state of the src or
506 * dest queue.
507 */
508 static inline enum cds_wfcq_ret
509 ___cds_wfcq_splice(
510 cds_wfcq_head_ptr_t u_dest_q_head,
511 struct cds_wfcq_tail *dest_q_tail,
512 cds_wfcq_head_ptr_t u_src_q_head,
513 struct cds_wfcq_tail *src_q_tail,
514 int blocking)
515 {
516 struct __cds_wfcq_head *dest_q_head = u_dest_q_head._h;
517 struct __cds_wfcq_head *src_q_head = u_src_q_head._h;
518 struct cds_wfcq_node *head, *tail;
519 int attempt = 0;
520
521 /*
522 * Initial emptiness check to speed up cases where queue is
523 * empty: only require loads to check if queue is empty.
524 */
525 if (_cds_wfcq_empty(__cds_wfcq_head_cast(src_q_head), src_q_tail))
526 return CDS_WFCQ_RET_SRC_EMPTY;
527
528 for (;;) {
529 /*
530 * Open-coded _cds_wfcq_empty() by testing result of
531 * uatomic_xchg, as well as tail pointer vs head node
532 * address.
533 */
534 head = uatomic_xchg(&src_q_head->node.next, NULL);
535 if (head)
536 break; /* non-empty */
537 if (CMM_LOAD_SHARED(src_q_tail->p) == &src_q_head->node)
538 return CDS_WFCQ_RET_SRC_EMPTY;
539 if (___cds_wfcq_busy_wait(&attempt, blocking))
540 return CDS_WFCQ_RET_WOULDBLOCK;
541 }
542
543 /*
544 * Memory barrier implied before uatomic_xchg() orders store to
545 * src_q->head before store to src_q->tail. This is required by
546 * concurrent enqueue on src_q, which exchanges the tail before
547 * updating the previous tail's next pointer.
548 */
549 tail = uatomic_xchg(&src_q_tail->p, &src_q_head->node);
550
551 /*
552 * Append the spliced content of src_q into dest_q. Does not
553 * require mutual exclusion on dest_q (wait-free).
554 */
555 if (___cds_wfcq_append(__cds_wfcq_head_cast(dest_q_head), dest_q_tail,
556 head, tail))
557 return CDS_WFCQ_RET_DEST_NON_EMPTY;
558 else
559 return CDS_WFCQ_RET_DEST_EMPTY;
560 }
561
562 /*
563 * __cds_wfcq_splice_blocking: enqueue all src_q nodes at the end of dest_q.
564 *
565 * Dequeue all nodes from src_q.
566 * dest_q must be already initialized.
567 * Mutual exclusion for src_q should be ensured by the caller as
568 * specified in the "Synchronisation table".
569 * Returns enum cds_wfcq_ret which indicates the state of the src or
570 * dest queue. Never returns CDS_WFCQ_RET_WOULDBLOCK.
571 */
572 static inline enum cds_wfcq_ret
573 ___cds_wfcq_splice_blocking(
574 cds_wfcq_head_ptr_t dest_q_head,
575 struct cds_wfcq_tail *dest_q_tail,
576 cds_wfcq_head_ptr_t src_q_head,
577 struct cds_wfcq_tail *src_q_tail)
578 {
579 return ___cds_wfcq_splice(dest_q_head, dest_q_tail,
580 src_q_head, src_q_tail, 1);
581 }
582
583 /*
584 * __cds_wfcq_splice_nonblocking: enqueue all src_q nodes at the end of dest_q.
585 *
586 * Same as __cds_wfcq_splice_blocking, but returns
587 * CDS_WFCQ_RET_WOULDBLOCK if it needs to block.
588 */
589 static inline enum cds_wfcq_ret
590 ___cds_wfcq_splice_nonblocking(
591 cds_wfcq_head_ptr_t dest_q_head,
592 struct cds_wfcq_tail *dest_q_tail,
593 cds_wfcq_head_ptr_t src_q_head,
594 struct cds_wfcq_tail *src_q_tail)
595 {
596 return ___cds_wfcq_splice(dest_q_head, dest_q_tail,
597 src_q_head, src_q_tail, 0);
598 }
599
600 /*
601 * cds_wfcq_dequeue_with_state_blocking: dequeue a node from a wait-free queue.
602 *
603 * Content written into the node before enqueue is guaranteed to be
604 * consistent, but no other memory ordering is ensured.
605 * Mutual exclusion with cds_wfcq_splice_blocking and dequeue lock is
606 * ensured.
607 * It is valid to reuse and free a dequeued node immediately.
608 */
609 static inline struct cds_wfcq_node *
610 _cds_wfcq_dequeue_with_state_blocking(struct cds_wfcq_head *head,
611 struct cds_wfcq_tail *tail, int *state)
612 {
613 struct cds_wfcq_node *retval;
614
615 _cds_wfcq_dequeue_lock(head, tail);
616 retval = ___cds_wfcq_dequeue_with_state_blocking(cds_wfcq_head_cast(head),
617 tail, state);
618 _cds_wfcq_dequeue_unlock(head, tail);
619 return retval;
620 }
621
622 /*
623 * cds_wfcq_dequeue_blocking: dequeue node from queue.
624 *
625 * Same as cds_wfcq_dequeue_blocking, but without saving state.
626 */
627 static inline struct cds_wfcq_node *
628 _cds_wfcq_dequeue_blocking(struct cds_wfcq_head *head,
629 struct cds_wfcq_tail *tail)
630 {
631 return _cds_wfcq_dequeue_with_state_blocking(head, tail, NULL);
632 }
633
634 /*
635 * cds_wfcq_splice_blocking: enqueue all src_q nodes at the end of dest_q.
636 *
637 * Dequeue all nodes from src_q.
638 * dest_q must be already initialized.
639 * Content written into the node before enqueue is guaranteed to be
640 * consistent, but no other memory ordering is ensured.
641 * Mutual exclusion with cds_wfcq_dequeue_blocking and dequeue lock is
642 * ensured.
643 * Returns enum cds_wfcq_ret which indicates the state of the src or
644 * dest queue. Never returns CDS_WFCQ_RET_WOULDBLOCK.
645 */
646 static inline enum cds_wfcq_ret
647 _cds_wfcq_splice_blocking(
648 struct cds_wfcq_head *dest_q_head,
649 struct cds_wfcq_tail *dest_q_tail,
650 struct cds_wfcq_head *src_q_head,
651 struct cds_wfcq_tail *src_q_tail)
652 {
653 enum cds_wfcq_ret ret;
654
655 _cds_wfcq_dequeue_lock(src_q_head, src_q_tail);
656 ret = ___cds_wfcq_splice_blocking(cds_wfcq_head_cast(dest_q_head), dest_q_tail,
657 cds_wfcq_head_cast(src_q_head), src_q_tail);
658 _cds_wfcq_dequeue_unlock(src_q_head, src_q_tail);
659 return ret;
660 }
661
662 #ifdef __cplusplus
663 }
664 #endif
665
666 #endif /* _URCU_WFCQUEUE_STATIC_H */
This page took 0.041642 seconds and 4 git commands to generate.