uatomic/x86: Remove redundant memory barriers
[urcu.git] / include / urcu / static / lfstack.h
1 // SPDX-FileCopyrightText: 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
2 //
3 // SPDX-License-Identifier: LGPL-2.1-or-later
4
5 #ifndef _URCU_STATIC_LFSTACK_H
6 #define _URCU_STATIC_LFSTACK_H
7
8 /*
9 * Userspace RCU library - Lock-Free Stack
10 *
11 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu/lfstack.h for
12 * linking dynamically with the userspace rcu library.
13 */
14
15 #include <stdbool.h>
16 #include <pthread.h>
17 #include <urcu/assert.h>
18 #include <urcu/uatomic.h>
19 #include <urcu-pointer.h>
20
21 #ifdef __cplusplus
22 extern "C" {
23 #endif
24
25 /*
26 * Lock-free stack.
27 *
28 * Stack implementing push, pop, pop_all operations, as well as iterator
29 * on the stack head returned by pop_all.
30 *
31 * Synchronization table:
32 *
33 * External synchronization techniques described in the API below is
34 * required between pairs marked with "X". No external synchronization
35 * required between pairs marked with "-".
36 *
37 * cds_lfs_push __cds_lfs_pop __cds_lfs_pop_all
38 * cds_lfs_push - - -
39 * __cds_lfs_pop - X X
40 * __cds_lfs_pop_all - X -
41 *
42 * cds_lfs_pop_blocking and cds_lfs_pop_all_blocking use an internal
43 * mutex to provide synchronization.
44 */
45
46 /*
47 * cds_lfs_node_init: initialize lock-free stack node.
48 */
49 static inline
50 void _cds_lfs_node_init(struct cds_lfs_node *node __attribute__((unused)))
51 {
52 }
53
54 /*
55 * cds_lfs_init: initialize lock-free stack (with lock). Pair with
56 * cds_lfs_destroy().
57 */
58 static inline
59 void _cds_lfs_init(struct cds_lfs_stack *s)
60 {
61 int ret;
62
63 s->head = NULL;
64 ret = pthread_mutex_init(&s->lock, NULL);
65 urcu_posix_assert(!ret);
66 }
67
68 /*
69 * cds_lfs_destroy: destroy lock-free stack (with lock). Pair with
70 * cds_lfs_init().
71 */
72 static inline
73 void _cds_lfs_destroy(struct cds_lfs_stack *s)
74 {
75 int ret = pthread_mutex_destroy(&s->lock);
76 urcu_posix_assert(!ret);
77 }
78
79 /*
80 * ___cds_lfs_init: initialize lock-free stack (without lock).
81 * Don't pair with any destroy function.
82 */
83 static inline
84 void ___cds_lfs_init(struct __cds_lfs_stack *s)
85 {
86 s->head = NULL;
87 }
88
89 static inline
90 bool ___cds_lfs_empty_head(struct cds_lfs_head *head)
91 {
92 return head == NULL;
93 }
94
95 /*
96 * cds_lfs_empty: return whether lock-free stack is empty.
97 *
98 * No memory barrier is issued. No mutual exclusion is required.
99 */
100 static inline
101 bool _cds_lfs_empty(cds_lfs_stack_ptr_t s)
102 {
103 return ___cds_lfs_empty_head(uatomic_load(&s._s->head, CMM_RELAXED));
104 }
105
106 /*
107 * cds_lfs_push: push a node into the stack.
108 *
109 * Does not require any synchronization with other push nor pop.
110 *
111 * Operations before push are consistent when observed after associated pop.
112 *
113 * Lock-free stack push is not subject to ABA problem, so no need to
114 * take the RCU read-side lock. Even if "head" changes between two
115 * uatomic_cmpxchg() invocations here (being popped, and then pushed
116 * again by one or more concurrent threads), the second
117 * uatomic_cmpxchg() invocation only cares about pushing a new entry at
118 * the head of the stack, ensuring consistency by making sure the new
119 * node->next is the same pointer value as the value replaced as head.
120 * It does not care about the content of the actual next node, so it can
121 * very well be reallocated between the two uatomic_cmpxchg().
122 *
123 * We take the approach of expecting the stack to be usually empty, so
124 * we first try an initial uatomic_cmpxchg() on a NULL old_head, and
125 * retry if the old head was non-NULL (the value read by the first
126 * uatomic_cmpxchg() is used as old head for the following loop). The
127 * upside of this scheme is to minimize the amount of cacheline traffic,
128 * always performing an exclusive cacheline access, rather than doing
129 * non-exclusive followed by exclusive cacheline access (which would be
130 * required if we first read the old head value). This design decision
131 * might be revisited after more thorough benchmarking on various
132 * platforms.
133 *
134 * Returns 0 if the stack was empty prior to adding the node.
135 * Returns non-zero otherwise.
136 */
137 static inline
138 bool _cds_lfs_push(cds_lfs_stack_ptr_t u_s,
139 struct cds_lfs_node *node)
140 {
141 struct __cds_lfs_stack *s = u_s._s;
142 struct cds_lfs_head *head = NULL;
143 struct cds_lfs_head *new_head =
144 caa_container_of(node, struct cds_lfs_head, node);
145
146 for (;;) {
147 struct cds_lfs_head *old_head = head;
148
149 /*
150 * node->next is still private at this point, no need to
151 * perform a _CMM_STORE_SHARED().
152 */
153 node->next = &head->node;
154 /*
155 * uatomic_cmpxchg() implicit memory barrier orders earlier
156 * stores to node before publication.
157 */
158 cmm_emit_legacy_smp_mb();
159 head = uatomic_cmpxchg_mo(&s->head, old_head, new_head,
160 CMM_SEQ_CST, CMM_SEQ_CST);
161 if (old_head == head)
162 break;
163 }
164 return !___cds_lfs_empty_head(head);
165 }
166
167 /*
168 * __cds_lfs_pop: pop a node from the stack.
169 *
170 * Returns NULL if stack is empty.
171 *
172 * Operations after pop are consistent when observed before associated push.
173 *
174 * __cds_lfs_pop needs to be synchronized using one of the following
175 * techniques:
176 *
177 * 1) Calling __cds_lfs_pop under rcu read lock critical section.
178 * Both __cds_lfs_pop and __cds_lfs_pop_all callers must wait for a
179 * grace period to pass before freeing the returned node or pushing
180 * the node back into the stack. It is valid to overwrite the content
181 * of cds_lfs_node immediately after __cds_lfs_pop and
182 * __cds_lfs_pop_all. No RCU read-side critical section is needed
183 * around __cds_lfs_pop_all.
184 * 2) Using mutual exclusion (e.g. mutexes) to protect __cds_lfs_pop
185 * and __cds_lfs_pop_all callers.
186 * 3) Ensuring that only ONE thread can call __cds_lfs_pop() and
187 * __cds_lfs_pop_all(). (multi-provider/single-consumer scheme).
188 */
189 static inline
190 struct cds_lfs_node *___cds_lfs_pop(cds_lfs_stack_ptr_t u_s)
191 {
192 struct __cds_lfs_stack *s = u_s._s;
193
194 for (;;) {
195 struct cds_lfs_head *head, *next_head;
196 struct cds_lfs_node *next;
197
198 head = uatomic_load(&s->head, CMM_CONSUME);
199 if (___cds_lfs_empty_head(head))
200 return NULL; /* Empty stack */
201
202 /*
203 * Read head before head->next. Matches the implicit
204 * memory barrier before uatomic_cmpxchg() in
205 * cds_lfs_push.
206 */
207 next = uatomic_load(&head->node.next, CMM_RELAXED);
208 next_head = caa_container_of(next,
209 struct cds_lfs_head, node);
210 if (uatomic_cmpxchg_mo(&s->head, head, next_head,
211 CMM_SEQ_CST, CMM_SEQ_CST) == head){
212 cmm_emit_legacy_smp_mb();
213 return &head->node;
214 }
215 /* busy-loop if head changed under us */
216 }
217 }
218
219 /*
220 * __cds_lfs_pop_all: pop all nodes from a stack.
221 *
222 * __cds_lfs_pop_all does not require any synchronization with other
223 * push, nor with other __cds_lfs_pop_all, but requires synchronization
224 * matching the technique used to synchronize __cds_lfs_pop:
225 *
226 * 1) If __cds_lfs_pop is called under rcu read lock critical section,
227 * both __cds_lfs_pop and __cds_lfs_pop_all callers must wait for a
228 * grace period to pass before freeing the returned node or pushing
229 * the node back into the stack. It is valid to overwrite the content
230 * of cds_lfs_node immediately after __cds_lfs_pop and
231 * __cds_lfs_pop_all. No RCU read-side critical section is needed
232 * around __cds_lfs_pop_all.
233 * 2) Using mutual exclusion (e.g. mutexes) to protect __cds_lfs_pop and
234 * __cds_lfs_pop_all callers.
235 * 3) Ensuring that only ONE thread can call __cds_lfs_pop() and
236 * __cds_lfs_pop_all(). (multi-provider/single-consumer scheme).
237 */
238 static inline
239 struct cds_lfs_head *___cds_lfs_pop_all(cds_lfs_stack_ptr_t u_s)
240 {
241 struct __cds_lfs_stack *s = u_s._s;
242 struct cds_lfs_head *head;
243
244 /*
245 * Implicit memory barrier after uatomic_xchg() matches implicit
246 * memory barrier before uatomic_cmpxchg() in cds_lfs_push. It
247 * ensures that all nodes of the returned list are consistent.
248 * There is no need to issue memory barriers when iterating on
249 * the returned list, because the full memory barrier issued
250 * prior to each uatomic_cmpxchg, which each write to head, are
251 * taking care to order writes to each node prior to the full
252 * memory barrier after this uatomic_xchg().
253 */
254 head = uatomic_xchg_mo(&s->head, NULL, CMM_SEQ_CST);
255 cmm_emit_legacy_smp_mb();
256 return head;
257 }
258
259 /*
260 * cds_lfs_pop_lock: lock stack pop-protection mutex.
261 */
262 static inline void _cds_lfs_pop_lock(struct cds_lfs_stack *s)
263 {
264 int ret;
265
266 ret = pthread_mutex_lock(&s->lock);
267 urcu_posix_assert(!ret);
268 }
269
270 /*
271 * cds_lfs_pop_unlock: unlock stack pop-protection mutex.
272 */
273 static inline void _cds_lfs_pop_unlock(struct cds_lfs_stack *s)
274 {
275 int ret;
276
277 ret = pthread_mutex_unlock(&s->lock);
278 urcu_posix_assert(!ret);
279 }
280
281 /*
282 * Call __cds_lfs_pop with an internal pop mutex held.
283 */
284 static inline
285 struct cds_lfs_node *
286 _cds_lfs_pop_blocking(struct cds_lfs_stack *s)
287 {
288 struct cds_lfs_node *retnode;
289 cds_lfs_stack_ptr_t stack;
290
291 _cds_lfs_pop_lock(s);
292 stack.s = s;
293 retnode = ___cds_lfs_pop(stack);
294 _cds_lfs_pop_unlock(s);
295 return retnode;
296 }
297
298 /*
299 * Call __cds_lfs_pop_all with an internal pop mutex held.
300 */
301 static inline
302 struct cds_lfs_head *
303 _cds_lfs_pop_all_blocking(struct cds_lfs_stack *s)
304 {
305 struct cds_lfs_head *rethead;
306 cds_lfs_stack_ptr_t stack;
307
308 _cds_lfs_pop_lock(s);
309 stack.s = s;
310 rethead = ___cds_lfs_pop_all(stack);
311 _cds_lfs_pop_unlock(s);
312 return rethead;
313 }
314
315 #ifdef __cplusplus
316 }
317 #endif
318
319 #endif /* _URCU_STATIC_LFSTACK_H */
This page took 0.036723 seconds and 5 git commands to generate.