convert to autotools
[urcu.git] / urcu / uatomic_arch_ppc.h
1 #ifndef _URCU_ARCH_UATOMIC_PPC_H
2 #define _URCU_ARCH_UATOMIC_PPC_H
3
4 /*
5 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
6 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
7 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
8 * Copyright (c) 2009 Mathieu Desnoyers
9 *
10 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
11 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
12 *
13 * Permission is hereby granted to use or copy this program
14 * for any purpose, provided the above notices are retained on all copies.
15 * Permission to modify the code and to distribute modified code is granted,
16 * provided the above notices are retained, and a notice that the code was
17 * modified is included with the above copyright notice.
18 *
19 * Code inspired from libuatomic_ops-1.2, inherited in part from the
20 * Boehm-Demers-Weiser conservative garbage collector.
21 */
22
23 #include <urcu/compiler.h>
24
25 #ifndef __SIZEOF_LONG__
26 #ifdef __powerpc64__
27 #define __SIZEOF_LONG__ 8
28 #else
29 #define __SIZEOF_LONG__ 4
30 #endif
31 #endif
32
33 #ifndef BITS_PER_LONG
34 #define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
35 #endif
36
37 #define ILLEGAL_INSTR ".long 0xd00d00"
38
39 #define uatomic_set(addr, v) \
40 do { \
41 ACCESS_ONCE(*(addr)) = (v); \
42 } while (0)
43
44 #define uatomic_read(addr) ACCESS_ONCE(*(addr))
45
46 /*
47 * Using a isync as second barrier for exchange to provide acquire semantic.
48 * According to uatomic_ops/sysdeps/gcc/powerpc.h, the documentation is "fairly
49 * explicit that this also has acquire semantics."
50 * Derived from AO_compare_and_swap(), but removed the comparison.
51 */
52
53 /* xchg */
54
55 static inline __attribute__((always_inline))
56 unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
57 {
58 switch (len) {
59 case 4:
60 {
61 unsigned int result;
62
63 __asm__ __volatile__(
64 "lwsync\n"
65 "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
66 "stwcx. %2,0,%1\n" /* else store conditional */
67 "bne- 1b\n" /* retry if lost reservation */
68 "isync\n"
69 : "=&r"(result)
70 : "r"(addr), "r"(val)
71 : "memory", "cc");
72
73 return result;
74 }
75 #if (BITS_PER_LONG == 64)
76 case 8:
77 {
78 unsigned long result;
79
80 __asm__ __volatile__(
81 "lwsync\n"
82 "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
83 "stdcx. %2,0,%1\n" /* else store conditional */
84 "bne- 1b\n" /* retry if lost reservation */
85 "isync\n"
86 : "=&r"(result)
87 : "r"(addr), "r"(val)
88 : "memory", "cc");
89
90 return result;
91 }
92 #endif
93 }
94 /* generate an illegal instruction. Cannot catch this with linker tricks
95 * when optimizations are disabled. */
96 __asm__ __volatile__(ILLEGAL_INSTR);
97 return 0;
98 }
99
100 #define uatomic_xchg(addr, v) \
101 ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
102 sizeof(*(addr))))
103 /* cmpxchg */
104
105 static inline __attribute__((always_inline))
106 unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
107 unsigned long _new, int len)
108 {
109 switch (len) {
110 case 4:
111 {
112 unsigned int old_val;
113
114 __asm__ __volatile__(
115 "lwsync\n"
116 "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
117 "cmpd %0,%3\n" /* if load is not equal to */
118 "bne 2f\n" /* old, fail */
119 "stwcx. %2,0,%1\n" /* else store conditional */
120 "bne- 1b\n" /* retry if lost reservation */
121 "isync\n"
122 "2:\n"
123 : "=&r"(old_val)
124 : "r"(addr), "r"((unsigned int)_new),
125 "r"((unsigned int)old)
126 : "memory", "cc");
127
128 return old_val;
129 }
130 #if (BITS_PER_LONG == 64)
131 case 8:
132 {
133 unsigned long old_val;
134
135 __asm__ __volatile__(
136 "lwsync\n"
137 "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
138 "cmpd %0,%3\n" /* if load is not equal to */
139 "bne 2f\n" /* old, fail */
140 "stdcx. %2,0,%1\n" /* else store conditional */
141 "bne- 1b\n" /* retry if lost reservation */
142 "isync\n"
143 "2:\n"
144 : "=&r"(old_val),
145 : "r"(addr), "r"((unsigned long)_new),
146 "r"((unsigned long)old)
147 : "memory", "cc");
148
149 return old_val;
150 }
151 #endif
152 }
153 /* generate an illegal instruction. Cannot catch this with linker tricks
154 * when optimizations are disabled. */
155 __asm__ __volatile__(ILLEGAL_INSTR);
156 return 0;
157 }
158
159
160 #define uatomic_cmpxchg(addr, old, _new) \
161 ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
162 (unsigned long)(_new), \
163 sizeof(*(addr))))
164
165 /* uatomic_add_return */
166
167 static inline __attribute__((always_inline))
168 unsigned long _uatomic_add_return(void *addr, unsigned long val,
169 int len)
170 {
171 switch (len) {
172 case 4:
173 {
174 unsigned int result;
175
176 __asm__ __volatile__(
177 "lwsync\n"
178 "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
179 "add %0,%2,%0\n" /* add val to value loaded */
180 "stwcx. %0,0,%1\n" /* store conditional */
181 "bne- 1b\n" /* retry if lost reservation */
182 "isync\n"
183 : "=&r"(result)
184 : "r"(addr), "r"(val)
185 : "memory", "cc");
186
187 return result;
188 }
189 #if (BITS_PER_LONG == 64)
190 case 8:
191 {
192 unsigned long result;
193
194 __asm__ __volatile__(
195 "lwsync\n"
196 "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
197 "add %0,%2,%0\n" /* add val to value loaded */
198 "stdcx. %0,0,%1\n" /* store conditional */
199 "bne- 1b\n" /* retry if lost reservation */
200 "isync\n"
201 : "=&r"(result)
202 : "r"(addr), "r"(val)
203 : "memory", "cc");
204
205 return result;
206 }
207 #endif
208 }
209 /* generate an illegal instruction. Cannot catch this with linker tricks
210 * when optimizations are disabled. */
211 __asm__ __volatile__(ILLEGAL_INSTR);
212 return 0;
213 }
214
215
216 #define uatomic_add_return(addr, v) \
217 ((__typeof__(*(addr))) _uatomic_add_return((addr), \
218 (unsigned long)(v), \
219 sizeof(*(addr))))
220
221 /* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */
222
223 #define uatomic_sub_return(addr, v) uatomic_add_return((addr), -(v))
224
225 #define uatomic_add(addr, v) (void)uatomic_add_return((addr), (v))
226 #define uatomic_sub(addr, v) (void)uatomic_sub_return((addr), (v))
227
228 #define uatomic_inc(addr) uatomic_add((addr), 1)
229 #define uatomic_dec(addr) uatomic_add((addr), -1)
230
231 #endif /* _URCU_ARCH_UATOMIC_PPC_H */
This page took 0.054035 seconds and 4 git commands to generate.