1 #ifndef _URCU_WFQUEUE_STATIC_H
2 #define _URCU_WFQUEUE_STATIC_H
7 * Userspace RCU library - Queue with Wait-Free Enqueue/Blocking Dequeue
9 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See wfqueue.h for linking
10 * dynamically with the userspace rcu library.
12 * Copyright 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
14 * This library is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU Lesser General Public
16 * License as published by the Free Software Foundation; either
17 * version 2.1 of the License, or (at your option) any later version.
19 * This library is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * Lesser General Public License for more details.
24 * You should have received a copy of the GNU Lesser General Public
25 * License along with this library; if not, write to the Free Software
26 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
32 #include <urcu/compiler.h>
33 #include <urcu/uatomic.h>
40 * Queue with wait-free enqueue/blocking dequeue.
41 * This implementation adds a dummy head node when the queue is empty to ensure
42 * we can always update the queue locklessly.
44 * Inspired from half-wait-free/half-blocking queue implementation done by
48 #define WFQ_ADAPT_ATTEMPTS 10 /* Retry if being set */
49 #define WFQ_WAIT 10 /* Wait 10 ms if being set */
51 static inline void _cds_wfq_node_init(struct cds_wfq_node
*node
)
56 static inline void _cds_wfq_init(struct cds_wfq_queue
*q
)
60 _cds_wfq_node_init(&q
->dummy
);
61 /* Set queue head and tail */
63 q
->tail
= &q
->dummy
.next
;
64 ret
= pthread_mutex_init(&q
->lock
, NULL
);
68 static inline void _cds_wfq_enqueue(struct cds_wfq_queue
*q
,
69 struct cds_wfq_node
*node
)
71 struct cds_wfq_node
**old_tail
;
74 * uatomic_xchg() implicit memory barrier orders earlier stores to data
75 * structure containing node and setting node->next to NULL before
78 old_tail
= uatomic_xchg(&q
->tail
, &node
->next
);
80 * At this point, dequeuers see a NULL old_tail->next, which indicates
81 * that the queue is being appended to. The following store will append
82 * "node" to the queue from a dequeuer perspective.
84 CMM_STORE_SHARED(*old_tail
, node
);
88 * Waiting for enqueuer to complete enqueue and return the next node
90 static inline struct cds_wfq_node
*
91 ___cds_wfq_node_sync_next(struct cds_wfq_node
*node
)
93 struct cds_wfq_node
*next
;
97 * Adaptative busy-looping waiting for enqueuer to complete enqueue.
99 while ((next
= CMM_LOAD_SHARED(node
->next
)) == NULL
) {
100 if (++attempt
>= WFQ_ADAPT_ATTEMPTS
) {
101 poll(NULL
, 0, WFQ_WAIT
); /* Wait for 10ms */
111 * It is valid to reuse and free a dequeued node immediately.
113 * No need to go on a waitqueue here, as there is no possible state in which the
114 * list could cause dequeue to busy-loop needlessly while waiting for another
115 * thread to be scheduled. The queue appears empty until tail->next is set by
118 static inline struct cds_wfq_node
*
119 ___cds_wfq_dequeue_blocking(struct cds_wfq_queue
*q
)
121 struct cds_wfq_node
*node
, *next
;
124 * Queue is empty if it only contains the dummy node.
126 if (q
->head
== &q
->dummy
&& CMM_LOAD_SHARED(q
->tail
) == &q
->dummy
.next
)
130 next
= ___cds_wfq_node_sync_next(node
);
133 * Move queue head forward.
137 * Requeue dummy node if we just dequeued it.
139 if (node
== &q
->dummy
) {
140 _cds_wfq_node_init(node
);
141 _cds_wfq_enqueue(q
, node
);
142 return ___cds_wfq_dequeue_blocking(q
);
147 static inline struct cds_wfq_node
*
148 _cds_wfq_dequeue_blocking(struct cds_wfq_queue
*q
)
150 struct cds_wfq_node
*retnode
;
153 ret
= pthread_mutex_lock(&q
->lock
);
155 retnode
= ___cds_wfq_dequeue_blocking(q
);
156 ret
= pthread_mutex_unlock(&q
->lock
);
165 #endif /* _URCU_WFQUEUE_STATIC_H */
This page took 0.032223 seconds and 4 git commands to generate.