Fix: uatomic arm32: add missing release barrier before uatomic_xchg
[urcu.git] / include / urcu / static / wfqueue.h
1 #ifndef _URCU_WFQUEUE_STATIC_H
2 #define _URCU_WFQUEUE_STATIC_H
3
4 /*
5 * wfqueue-static.h
6 *
7 * Userspace RCU library - Queue with Wait-Free Enqueue/Blocking Dequeue
8 *
9 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See wfqueue.h for linking
10 * dynamically with the userspace rcu library.
11 *
12 * Copyright 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
13 *
14 * This library is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU Lesser General Public
16 * License as published by the Free Software Foundation; either
17 * version 2.1 of the License, or (at your option) any later version.
18 *
19 * This library is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * Lesser General Public License for more details.
23 *
24 * You should have received a copy of the GNU Lesser General Public
25 * License along with this library; if not, write to the Free Software
26 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 */
28
29 #include <pthread.h>
30 #include <assert.h>
31 #include <poll.h>
32 #include <urcu/compiler.h>
33 #include <urcu/uatomic.h>
34
35 #ifdef __cplusplus
36 extern "C" {
37 #endif
38
39 /*
40 * Queue with wait-free enqueue/blocking dequeue.
41 * This implementation adds a dummy head node when the queue is empty to ensure
42 * we can always update the queue locklessly.
43 *
44 * Inspired from half-wait-free/half-blocking queue implementation done by
45 * Paul E. McKenney.
46 */
47
48 #define WFQ_ADAPT_ATTEMPTS 10 /* Retry if being set */
49 #define WFQ_WAIT 10 /* Wait 10 ms if being set */
50
51 static inline void _cds_wfq_node_init(struct cds_wfq_node *node)
52 {
53 node->next = NULL;
54 }
55
56 static inline void _cds_wfq_init(struct cds_wfq_queue *q)
57 {
58 int ret;
59
60 _cds_wfq_node_init(&q->dummy);
61 /* Set queue head and tail */
62 q->head = &q->dummy;
63 q->tail = &q->dummy.next;
64 ret = pthread_mutex_init(&q->lock, NULL);
65 assert(!ret);
66 }
67
68 static inline void _cds_wfq_destroy(struct cds_wfq_queue *q)
69 {
70 int ret = pthread_mutex_destroy(&q->lock);
71 assert(!ret);
72 }
73
74 static inline void _cds_wfq_enqueue(struct cds_wfq_queue *q,
75 struct cds_wfq_node *node)
76 {
77 struct cds_wfq_node **old_tail;
78
79 /*
80 * uatomic_xchg() implicit memory barrier orders earlier stores to data
81 * structure containing node and setting node->next to NULL before
82 * publication.
83 */
84 old_tail = uatomic_xchg(&q->tail, &node->next);
85 /*
86 * At this point, dequeuers see a NULL old_tail->next, which indicates
87 * that the queue is being appended to. The following store will append
88 * "node" to the queue from a dequeuer perspective.
89 */
90 CMM_STORE_SHARED(*old_tail, node);
91 }
92
93 /*
94 * Waiting for enqueuer to complete enqueue and return the next node
95 */
96 static inline struct cds_wfq_node *
97 ___cds_wfq_node_sync_next(struct cds_wfq_node *node)
98 {
99 struct cds_wfq_node *next;
100 int attempt = 0;
101
102 /*
103 * Adaptative busy-looping waiting for enqueuer to complete enqueue.
104 */
105 while ((next = CMM_LOAD_SHARED(node->next)) == NULL) {
106 if (++attempt >= WFQ_ADAPT_ATTEMPTS) {
107 (void) poll(NULL, 0, WFQ_WAIT); /* Wait for 10ms */
108 attempt = 0;
109 } else
110 caa_cpu_relax();
111 }
112
113 return next;
114 }
115
116 /*
117 * It is valid to reuse and free a dequeued node immediately.
118 *
119 * No need to go on a waitqueue here, as there is no possible state in which the
120 * list could cause dequeue to busy-loop needlessly while waiting for another
121 * thread to be scheduled. The queue appears empty until tail->next is set by
122 * enqueue.
123 */
124 static inline struct cds_wfq_node *
125 ___cds_wfq_dequeue_blocking(struct cds_wfq_queue *q)
126 {
127 struct cds_wfq_node *node, *next;
128
129 /*
130 * Queue is empty if it only contains the dummy node.
131 */
132 if (q->head == &q->dummy && CMM_LOAD_SHARED(q->tail) == &q->dummy.next)
133 return NULL;
134 node = q->head;
135
136 next = ___cds_wfq_node_sync_next(node);
137
138 /*
139 * Move queue head forward.
140 */
141 q->head = next;
142 /*
143 * Requeue dummy node if we just dequeued it.
144 */
145 if (node == &q->dummy) {
146 _cds_wfq_node_init(node);
147 _cds_wfq_enqueue(q, node);
148 return ___cds_wfq_dequeue_blocking(q);
149 }
150 return node;
151 }
152
153 static inline struct cds_wfq_node *
154 _cds_wfq_dequeue_blocking(struct cds_wfq_queue *q)
155 {
156 struct cds_wfq_node *retnode;
157 int ret;
158
159 ret = pthread_mutex_lock(&q->lock);
160 assert(!ret);
161 retnode = ___cds_wfq_dequeue_blocking(q);
162 ret = pthread_mutex_unlock(&q->lock);
163 assert(!ret);
164 return retnode;
165 }
166
167 #ifdef __cplusplus
168 }
169 #endif
170
171 #endif /* _URCU_WFQUEUE_STATIC_H */
This page took 0.032489 seconds and 4 git commands to generate.