uatomic/x86: Remove redundant memory barriers
[urcu.git] / include / urcu / pointer.h
CommitLineData
d3d3857f
MJ
1// SPDX-FileCopyrightText: 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
2// SPDX-FileCopyrightText: 2009 Paul E. McKenney, IBM Corporation.
3//
4// SPDX-License-Identifier: LGPL-2.1-or-later
5
7e30abe3
MD
6#ifndef _URCU_POINTER_H
7#define _URCU_POINTER_H
8
9/*
7e30abe3
MD
10 * Userspace RCU header. Operations on pointers.
11 *
7e30abe3
MD
12 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
13 */
14
15#include <urcu/compiler.h>
16#include <urcu/arch.h>
a2e7bf9c 17#include <urcu/uatomic.h>
7e30abe3 18
36bc70a8
MD
19#ifdef __cplusplus
20extern "C" {
67ecffc0 21#endif
36bc70a8 22
d663f489 23#if defined(_LGPL_SOURCE) || defined(URCU_INLINE_SMALL_FUNCTIONS)
7e30abe3 24
6cd23d47 25#include <urcu/static/pointer.h>
7e30abe3
MD
26
27/*
28 * rcu_dereference(ptr)
29 *
30 * Fetch a RCU-protected pointer. Typically used to copy the variable ptr to a
31 * local variable.
32 */
33#define rcu_dereference _rcu_dereference
34
35/*
3daae22a 36 * type *rcu_cmpxchg_pointer(type **ptr, type *new, type *old)
7e30abe3 37 * type *rcu_xchg_pointer(type **ptr, type *new)
3daae22a 38 * void rcu_set_pointer(type **ptr, type *new)
7e30abe3
MD
39 *
40 * RCU pointer updates.
41 * @ptr: address of the pointer to modify
42 * @new: new pointer value
43 * @old: old pointer value (expected)
44 *
45 * return: old pointer value
46 */
47#define rcu_cmpxchg_pointer _rcu_cmpxchg_pointer
48#define rcu_xchg_pointer _rcu_xchg_pointer
49#define rcu_set_pointer _rcu_set_pointer
50
d663f489 51#else /* !(defined(_LGPL_SOURCE) || defined(URCU_INLINE_SMALL_FUNCTIONS)) */
7e30abe3 52
2ff1db4a
MD
53extern void *rcu_dereference_sym(void *p);
54#define rcu_dereference(p) \
1b85da85 55 __extension__ \
2ff1db4a 56 ({ \
bdffa73a 57 __typeof__(p) _________p1 = URCU_FORCE_CAST(__typeof__(p), \
4501f284 58 rcu_dereference_sym(URCU_FORCE_CAST(void *, p))); \
2ff1db4a
MD
59 (_________p1); \
60 })
7e30abe3
MD
61
62extern void *rcu_cmpxchg_pointer_sym(void **p, void *old, void *_new);
2ff1db4a 63#define rcu_cmpxchg_pointer(p, old, _new) \
1b85da85 64 __extension__ \
2ff1db4a 65 ({ \
bdffa73a
MD
66 __typeof__(*(p)) _________pold = (old); \
67 __typeof__(*(p)) _________pnew = (_new); \
68 __typeof__(*(p)) _________p1 = URCU_FORCE_CAST(__typeof__(*(p)), \
69 rcu_cmpxchg_pointer_sym(URCU_FORCE_CAST(void **, p), \
4501f284
MD
70 _________pold, \
71 _________pnew)); \
2ff1db4a
MD
72 (_________p1); \
73 })
7e30abe3
MD
74
75extern void *rcu_xchg_pointer_sym(void **p, void *v);
2ff1db4a 76#define rcu_xchg_pointer(p, v) \
1b85da85 77 __extension__ \
2ff1db4a 78 ({ \
bdffa73a
MD
79 __typeof__(*(p)) _________pv = (v); \
80 __typeof__(*(p)) _________p1 = URCU_FORCE_CAST(__typeof__(*(p)), \
4501f284
MD
81 rcu_xchg_pointer_sym(URCU_FORCE_CAST(void **, p), \
82 _________pv)); \
2ff1db4a
MD
83 (_________p1); \
84 })
7e30abe3 85
3daae22a
MD
86/*
87 * Note: rcu_set_pointer_sym returns @v because we don't want to break
88 * the ABI. At the API level, rcu_set_pointer() now returns void. Use of
89 * the return value is therefore deprecated, and will cause a build
90 * error.
91 */
7e30abe3 92extern void *rcu_set_pointer_sym(void **p, void *v);
2ff1db4a 93#define rcu_set_pointer(p, v) \
3daae22a 94 do { \
bdffa73a 95 __typeof__(*(p)) _________pv = (v); \
3daae22a
MD
96 (void) rcu_set_pointer_sym(URCU_FORCE_CAST(void **, p), \
97 _________pv); \
98 } while (0)
7e30abe3 99
d663f489 100#endif /* !(defined(_LGPL_SOURCE) || defined(URCU_INLINE_SMALL_FUNCTIONS)) */
7e30abe3
MD
101
102/*
3daae22a 103 * void rcu_assign_pointer(type *ptr, type *new)
7e30abe3
MD
104 *
105 * Same as rcu_set_pointer, but takes the pointer to assign to rather than its
106 * address as first parameter. Provided for compatibility with the Linux kernel
107 * RCU semantic.
108 */
109#define rcu_assign_pointer(p, v) rcu_set_pointer((&p), (v))
110
67ecffc0 111#ifdef __cplusplus
36bc70a8
MD
112}
113#endif
114
7e30abe3 115#endif /* _URCU_POINTER_H */
This page took 0.050757 seconds and 4 git commands to generate.