fix: handle EINTR correctly in get_cpu_mask_from_sysfs
[urcu.git] / include / urcu / static / wfstack.h
... / ...
CommitLineData
1// SPDX-FileCopyrightText: 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4
5#ifndef _URCU_STATIC_WFSTACK_H
6#define _URCU_STATIC_WFSTACK_H
7
8/*
9 * Userspace RCU library - Stack with with wait-free push, blocking traversal.
10 *
11 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu/wfstack.h for
12 * linking dynamically with the userspace rcu library.
13 */
14
15#include <pthread.h>
16#include <poll.h>
17#include <stdbool.h>
18#include <urcu/assert.h>
19#include <urcu/compiler.h>
20#include <urcu/uatomic.h>
21
22#ifdef __cplusplus
23extern "C" {
24#endif
25
26#define CDS_WFS_END ((struct cds_wfs_head *) 0x1UL)
27#define CDS_WFS_ADAPT_ATTEMPTS 10 /* Retry if being set */
28#define CDS_WFS_WAIT 10 /* Wait 10 ms if being set */
29
30/*
31 * Stack with wait-free push, blocking traversal.
32 *
33 * Stack implementing push, pop, pop_all operations, as well as iterator
34 * on the stack head returned by pop_all.
35 *
36 * Wait-free operations: cds_wfs_push, __cds_wfs_pop_all, cds_wfs_empty,
37 * cds_wfs_first.
38 * Blocking operations: cds_wfs_pop, cds_wfs_pop_all, cds_wfs_next,
39 * iteration on stack head returned by pop_all.
40 *
41 * Synchronization table:
42 *
43 * External synchronization techniques described in the API below is
44 * required between pairs marked with "X". No external synchronization
45 * required between pairs marked with "-".
46 *
47 * cds_wfs_push __cds_wfs_pop __cds_wfs_pop_all
48 * cds_wfs_push - - -
49 * __cds_wfs_pop - X X
50 * __cds_wfs_pop_all - X -
51 *
52 * cds_wfs_pop and cds_wfs_pop_all use an internal mutex to provide
53 * synchronization.
54 */
55
56/*
57 * cds_wfs_node_init: initialize wait-free stack node.
58 */
59static inline
60void _cds_wfs_node_init(struct cds_wfs_node *node)
61{
62 node->next = NULL;
63}
64
65/*
66 * __cds_wfs_init: initialize wait-free stack. Don't pair with
67 * any destroy function.
68 */
69static inline void ___cds_wfs_init(struct __cds_wfs_stack *s)
70{
71 s->head = CDS_WFS_END;
72}
73
74/*
75 * cds_wfs_init: initialize wait-free stack. Pair with
76 * cds_wfs_destroy().
77 */
78static inline
79void _cds_wfs_init(struct cds_wfs_stack *s)
80{
81 int ret;
82
83 s->head = CDS_WFS_END;
84 ret = pthread_mutex_init(&s->lock, NULL);
85 urcu_posix_assert(!ret);
86}
87
88/*
89 * cds_wfs_destroy: destroy wait-free stack. Pair with
90 * cds_wfs_init().
91 */
92static inline
93void _cds_wfs_destroy(struct cds_wfs_stack *s)
94{
95 int ret = pthread_mutex_destroy(&s->lock);
96 urcu_posix_assert(!ret);
97}
98
99static inline bool ___cds_wfs_end(void *node)
100{
101 return node == CDS_WFS_END;
102}
103
104/*
105 * cds_wfs_empty: return whether wait-free stack is empty.
106 *
107 * No memory barrier is issued. No mutual exclusion is required.
108 */
109static inline bool _cds_wfs_empty(cds_wfs_stack_ptr_t u_stack)
110{
111 struct __cds_wfs_stack *s = u_stack._s;
112
113 return ___cds_wfs_end(uatomic_load(&s->head, CMM_RELAXED));
114}
115
116/*
117 * cds_wfs_push: push a node into the stack.
118 *
119 * Issues a full memory barrier before push. No mutual exclusion is
120 * required.
121 *
122 * Operations before push are consistent when observed after associated pop.
123 *
124 * Returns 0 if the stack was empty prior to adding the node.
125 * Returns non-zero otherwise.
126 */
127static inline
128int _cds_wfs_push(cds_wfs_stack_ptr_t u_stack, struct cds_wfs_node *node)
129{
130 struct __cds_wfs_stack *s = u_stack._s;
131 struct cds_wfs_head *old_head, *new_head;
132
133 urcu_posix_assert(node->next == NULL);
134 new_head = caa_container_of(node, struct cds_wfs_head, node);
135 /*
136 * uatomic_xchg() implicit memory barrier orders earlier stores
137 * to node (setting it to NULL) before publication.
138 */
139 cmm_emit_legacy_smp_mb();
140 old_head = uatomic_xchg_mo(&s->head, new_head, CMM_SEQ_CST);
141 /*
142 * At this point, dequeuers see a NULL node->next, they should
143 * busy-wait until node->next is set to old_head.
144 */
145 uatomic_store(&node->next, &old_head->node, CMM_RELEASE);
146 return !___cds_wfs_end(old_head);
147}
148
149/*
150 * Waiting for push to complete enqueue and return the next node.
151 */
152static inline struct cds_wfs_node *
153___cds_wfs_node_sync_next(struct cds_wfs_node *node, int blocking)
154{
155 struct cds_wfs_node *next;
156 int attempt = 0;
157
158 /*
159 * Adaptative busy-looping waiting for push to complete.
160 */
161 while ((next = uatomic_load(&node->next, CMM_CONSUME)) == NULL) {
162 if (!blocking)
163 return CDS_WFS_WOULDBLOCK;
164 if (++attempt >= CDS_WFS_ADAPT_ATTEMPTS) {
165 (void) poll(NULL, 0, CDS_WFS_WAIT); /* Wait for 10ms */
166 attempt = 0;
167 } else {
168 caa_cpu_relax();
169 }
170 }
171
172 return next;
173}
174
175static inline
176struct cds_wfs_node *
177___cds_wfs_pop(cds_wfs_stack_ptr_t u_stack, int *state, int blocking)
178{
179 struct cds_wfs_head *head, *new_head;
180 struct cds_wfs_node *next;
181 struct __cds_wfs_stack *s = u_stack._s;
182
183 if (state)
184 *state = 0;
185 for (;;) {
186 head = uatomic_load(&s->head, CMM_CONSUME);
187 if (___cds_wfs_end(head)) {
188 return NULL;
189 }
190 next = ___cds_wfs_node_sync_next(&head->node, blocking);
191 if (!blocking && next == CDS_WFS_WOULDBLOCK) {
192 return CDS_WFS_WOULDBLOCK;
193 }
194 new_head = caa_container_of(next, struct cds_wfs_head, node);
195 if (uatomic_cmpxchg_mo(&s->head, head, new_head,
196 CMM_SEQ_CST, CMM_SEQ_CST) == head) {
197 if (state && ___cds_wfs_end(new_head))
198 *state |= CDS_WFS_STATE_LAST;
199 cmm_emit_legacy_smp_mb();
200 return &head->node;
201 }
202 if (!blocking) {
203 return CDS_WFS_WOULDBLOCK;
204 }
205 /* busy-loop if head changed under us */
206 }
207}
208
209/*
210 * __cds_wfs_pop_with_state_blocking: pop a node from the stack, with state.
211 *
212 * Returns NULL if stack is empty.
213 *
214 * Operations after pop push are consistent when observed before associated push.
215 *
216 * __cds_wfs_pop_blocking needs to be synchronized using one of the
217 * following techniques:
218 *
219 * 1) Calling __cds_wfs_pop_blocking under rcu read lock critical
220 * section. The caller must wait for a grace period to pass before
221 * freeing the returned node or modifying the cds_wfs_node structure.
222 * 2) Using mutual exclusion (e.g. mutexes) to protect
223 * __cds_wfs_pop_blocking and __cds_wfs_pop_all callers.
224 * 3) Ensuring that only ONE thread can call __cds_wfs_pop_blocking()
225 * and __cds_wfs_pop_all(). (multi-provider/single-consumer scheme).
226 *
227 * "state" saves state flags atomically sampled with pop operation.
228 */
229static inline
230struct cds_wfs_node *
231___cds_wfs_pop_with_state_blocking(cds_wfs_stack_ptr_t u_stack, int *state)
232{
233 return ___cds_wfs_pop(u_stack, state, 1);
234}
235
236static inline
237struct cds_wfs_node *
238___cds_wfs_pop_blocking(cds_wfs_stack_ptr_t u_stack)
239{
240 return ___cds_wfs_pop_with_state_blocking(u_stack, NULL);
241}
242
243/*
244 * __cds_wfs_pop_with_state_nonblocking: pop a node from the stack.
245 *
246 * Same as __cds_wfs_pop_with_state_blocking, but returns
247 * CDS_WFS_WOULDBLOCK if it needs to block.
248 *
249 * "state" saves state flags atomically sampled with pop operation.
250 */
251static inline
252struct cds_wfs_node *
253___cds_wfs_pop_with_state_nonblocking(cds_wfs_stack_ptr_t u_stack, int *state)
254{
255 return ___cds_wfs_pop(u_stack, state, 0);
256}
257
258/*
259 * __cds_wfs_pop_nonblocking: pop a node from the stack.
260 *
261 * Same as __cds_wfs_pop_blocking, but returns CDS_WFS_WOULDBLOCK if
262 * it needs to block.
263 */
264static inline
265struct cds_wfs_node *
266___cds_wfs_pop_nonblocking(cds_wfs_stack_ptr_t u_stack)
267{
268 return ___cds_wfs_pop_with_state_nonblocking(u_stack, NULL);
269}
270
271/*
272 * __cds_wfs_pop_all: pop all nodes from a stack.
273 *
274 * Operations after pop push are consistent when observed before associated push.
275 *
276 * __cds_wfs_pop_all does not require any synchronization with other
277 * push, nor with other __cds_wfs_pop_all, but requires synchronization
278 * matching the technique used to synchronize __cds_wfs_pop_blocking:
279 *
280 * 1) If __cds_wfs_pop_blocking is called under rcu read lock critical
281 * section, both __cds_wfs_pop_blocking and cds_wfs_pop_all callers
282 * must wait for a grace period to pass before freeing the returned
283 * node or modifying the cds_wfs_node structure. However, no RCU
284 * read-side critical section is needed around __cds_wfs_pop_all.
285 * 2) Using mutual exclusion (e.g. mutexes) to protect
286 * __cds_wfs_pop_blocking and __cds_wfs_pop_all callers.
287 * 3) Ensuring that only ONE thread can call __cds_wfs_pop_blocking()
288 * and __cds_wfs_pop_all(). (multi-provider/single-consumer scheme).
289 */
290static inline
291struct cds_wfs_head *
292___cds_wfs_pop_all(cds_wfs_stack_ptr_t u_stack)
293{
294 struct __cds_wfs_stack *s = u_stack._s;
295 struct cds_wfs_head *head;
296
297 /*
298 * Implicit memory barrier after uatomic_xchg() matches implicit
299 * memory barrier before uatomic_xchg() in cds_wfs_push. It
300 * ensures that all nodes of the returned list are consistent.
301 * There is no need to issue memory barriers when iterating on
302 * the returned list, because the full memory barrier issued
303 * prior to each uatomic_cmpxchg, which each write to head, are
304 * taking care to order writes to each node prior to the full
305 * memory barrier after this uatomic_xchg().
306 */
307 head = uatomic_xchg_mo(&s->head, CDS_WFS_END, CMM_SEQ_CST);
308 cmm_emit_legacy_smp_mb();
309 if (___cds_wfs_end(head))
310 return NULL;
311 return head;
312}
313
314/*
315 * cds_wfs_pop_lock: lock stack pop-protection mutex.
316 */
317static inline void _cds_wfs_pop_lock(struct cds_wfs_stack *s)
318{
319 int ret;
320
321 ret = pthread_mutex_lock(&s->lock);
322 urcu_posix_assert(!ret);
323}
324
325/*
326 * cds_wfs_pop_unlock: unlock stack pop-protection mutex.
327 */
328static inline void _cds_wfs_pop_unlock(struct cds_wfs_stack *s)
329{
330 int ret;
331
332 ret = pthread_mutex_unlock(&s->lock);
333 urcu_posix_assert(!ret);
334}
335
336/*
337 * Call __cds_wfs_pop_with_state_blocking with an internal pop mutex held.
338 */
339static inline
340struct cds_wfs_node *
341_cds_wfs_pop_with_state_blocking(struct cds_wfs_stack *s, int *state)
342{
343 struct cds_wfs_node *retnode;
344 cds_wfs_stack_ptr_t stack;
345
346 _cds_wfs_pop_lock(s);
347 stack.s = s;
348 retnode = ___cds_wfs_pop_with_state_blocking(stack, state);
349 _cds_wfs_pop_unlock(s);
350 return retnode;
351}
352
353/*
354 * Call _cds_wfs_pop_with_state_blocking without saving any state.
355 */
356static inline
357struct cds_wfs_node *
358_cds_wfs_pop_blocking(struct cds_wfs_stack *s)
359{
360 return _cds_wfs_pop_with_state_blocking(s, NULL);
361}
362
363/*
364 * Call __cds_wfs_pop_all with an internal pop mutex held.
365 */
366static inline
367struct cds_wfs_head *
368_cds_wfs_pop_all_blocking(struct cds_wfs_stack *s)
369{
370 struct cds_wfs_head *rethead;
371 cds_wfs_stack_ptr_t stack;
372
373 _cds_wfs_pop_lock(s);
374 stack.s = s;
375 rethead = ___cds_wfs_pop_all(stack);
376 _cds_wfs_pop_unlock(s);
377 return rethead;
378}
379
380/*
381 * cds_wfs_first: get first node of a popped stack.
382 *
383 * Content written into the node before enqueue is guaranteed to be
384 * consistent, but no other memory ordering is ensured.
385 *
386 * Used by for-like iteration macros in urcu/wfstack.h:
387 * cds_wfs_for_each_blocking()
388 * cds_wfs_for_each_blocking_safe()
389 *
390 * Returns NULL if popped stack is empty, top stack node otherwise.
391 */
392static inline struct cds_wfs_node *
393_cds_wfs_first(struct cds_wfs_head *head)
394{
395 if (___cds_wfs_end(head))
396 return NULL;
397 return &head->node;
398}
399
400static inline struct cds_wfs_node *
401___cds_wfs_next(struct cds_wfs_node *node, int blocking)
402{
403 struct cds_wfs_node *next;
404
405 next = ___cds_wfs_node_sync_next(node, blocking);
406 /*
407 * CDS_WFS_WOULDBLOCK != CSD_WFS_END, so we can check for end
408 * even if ___cds_wfs_node_sync_next returns CDS_WFS_WOULDBLOCK,
409 * and still return CDS_WFS_WOULDBLOCK.
410 */
411 if (___cds_wfs_end(next))
412 return NULL;
413 return next;
414}
415
416/*
417 * cds_wfs_next_blocking: get next node of a popped stack.
418 *
419 * Content written into the node before enqueue is guaranteed to be
420 * consistent, but no other memory ordering is ensured.
421 *
422 * Used by for-like iteration macros in urcu/wfstack.h:
423 * cds_wfs_for_each_blocking()
424 * cds_wfs_for_each_blocking_safe()
425 *
426 * Returns NULL if reached end of popped stack, non-NULL next stack
427 * node otherwise.
428 */
429static inline struct cds_wfs_node *
430_cds_wfs_next_blocking(struct cds_wfs_node *node)
431{
432 return ___cds_wfs_next(node, 1);
433}
434
435
436/*
437 * cds_wfs_next_nonblocking: get next node of a popped stack.
438 *
439 * Same as cds_wfs_next_blocking, but returns CDS_WFS_WOULDBLOCK if it
440 * needs to block.
441 */
442static inline struct cds_wfs_node *
443_cds_wfs_next_nonblocking(struct cds_wfs_node *node)
444{
445 return ___cds_wfs_next(node, 0);
446}
447
448#ifdef __cplusplus
449}
450#endif
451
452#endif /* _URCU_STATIC_WFSTACK_H */
This page took 0.024663 seconds and 5 git commands to generate.