b37e5f0fbf4a771aec326e04e3c6abc468f4a4ff
[urcu.git] / urcu / uatomic_arch_s390.h
1 #ifndef _URCU_ARCH_ATOMIC_S390_H
2 #define _URCU_ARCH_ATOMIC_S390_H
3
4 /*
5 * Atomic exchange operations for the S390 architecture. Based on information
6 * taken from the Principles of Operation Appendix A "Conditional Swapping
7 * Instructions (CS, CDS)".
8 *
9 * Copyright (c) 2009 Novell, Inc.
10 * Author: Jan Blunck <jblunck@suse.de>
11 *
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this software and associated documentation files (the "Software"), to
14 * deal in the Software without restriction, including without limitation the
15 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
16 * sell copies of the Software, and to permit persons to whom the Software is
17 * furnished to do so, subject to the following conditions:
18 *
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 * IN THE SOFTWARE.
29 */
30
31 #ifndef __SIZEOF_LONG__
32 #ifdef __s390x__
33 #define __SIZEOF_LONG__ 8
34 #else
35 #define __SIZEOF_LONG__ 4
36 #endif
37 #endif
38
39 #ifndef BITS_PER_LONG
40 #define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
41 #endif
42
43 #define uatomic_set(addr, v) \
44 do { \
45 ACCESS_ONCE(*(addr)) = (v); \
46 } while (0)
47
48 #define uatomic_read(addr) ACCESS_ONCE(*(addr))
49
50 static inline __attribute__((always_inline))
51 unsigned int uatomic_exchange_32(volatile unsigned int *addr, unsigned int val)
52 {
53 unsigned int result;
54
55 __asm__ __volatile__(
56 "0: cs %0,%2,%1\n"
57 " brc 4,0b\n"
58 : "=&r"(result), "=m" (*addr)
59 : "r"(val), "m" (*addr)
60 : "memory", "cc");
61
62 return result;
63 }
64
65 #if (BITS_PER_LONG == 64)
66
67 static inline __attribute__((always_inline))
68 unsigned long uatomic_exchange_64(volatile unsigned long *addr,
69 unsigned long val)
70 {
71 unsigned long result;
72
73 __asm__ __volatile__(
74 "0: csg %0,%2,%1\n"
75 " brc 4,0b\n"
76 : "=&r"(result), "=m" (*addr)
77 : "r"(val), "m" (*addr)
78 : "memory", "cc");
79
80 return result;
81 }
82
83 #endif
84
85 static inline __attribute__((always_inline))
86 unsigned long _uatomic_exchange(volatile void *addr, unsigned long val, int len)
87 {
88 switch (len) {
89 case 4:
90 return uatomic_exchange_32(addr, val);
91 #if (BITS_PER_LONG == 64)
92 case 8:
93 return uatomic_exchange_64(addr, val);
94 #endif
95 default:
96 __asm__ __volatile__(".long 0xd00d00");
97 }
98
99 return 0;
100 }
101
102 #define uatomic_xchg(addr, v) \
103 (__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
104 sizeof(*(addr)))
105
106
107 static inline __attribute__((always_inline))
108 void uatomic_add_32(volatile unsigned int *addr, unsigned int val)
109 {
110 unsigned int result, old;
111
112 __asm__ __volatile__(
113 " l %0, %1\n"
114 "0: lr %2, %0\n"
115 " ar %2, %3\n"
116 " cs %0,%2,%1\n"
117 " brc 4,0b\n"
118 : "=&r"(old), "+m" (*addr),
119 "=&r"(result)
120 : "r"(val)
121 : "memory", "cc");
122 }
123
124 #if (BITS_PER_LONG == 64)
125
126 static inline __attribute__((always_inline))
127 void uatomic_add_64(volatile unsigned long *addr, unsigned long val)
128 {
129 unsigned long result, old;
130
131 __asm__ __volatile__(
132 " lg %0, %1\n"
133 "0: lgr %2, %0\n"
134 " agr %2, %3\n"
135 " csg %0,%2,%1\n"
136 " brc 4,0b\n"
137 : "=&r"(old), "+m" (*addr),
138 "=&r"(result)
139 : "r"(val)
140 : "memory", "cc");
141 }
142
143 #endif
144
145 static inline __attribute__((always_inline))
146 void _uatomic_add(void *addr, unsigned long val, int len)
147 {
148 switch (len) {
149 case 4:
150 uatomic_add_32(addr, val);
151 return;
152 #if (BITS_PER_LONG == 64)
153 case 8:
154 uatomic_add_64(addr, val);
155 return;
156 #endif
157 default:
158 __asm__ __volatile__(".long 0xd00d00");
159 }
160
161 return;
162 }
163
164 #define uatomic_add(addr, val) \
165 _uatomic_add((addr), (unsigned long)(val), sizeof(*(addr)))
166
167 static inline __attribute__((always_inline))
168 unsigned int uatomic_cmpxchg_32(volatile unsigned int *addr, unsigned int old,
169 unsigned int new)
170 {
171 __asm__ __volatile__(
172 " cs %0,%2,%1\n"
173 : "+r"(old), "+m"(*addr)
174 : "r"(new)
175 : "memory", "cc");
176
177 return old;
178 }
179
180 #if (BITS_PER_LONG == 64)
181
182 static inline __attribute__((always_inline))
183 unsigned long uatomic_cmpxchg_64(volatile unsigned long *addr,
184 unsigned long old, unsigned long new)
185 {
186 __asm__ __volatile__(
187 " csg %0,%2,%1\n"
188 : "+r"(old), "+m"(*addr)
189 : "r"(new)
190 : "memory", "cc");
191
192 return old;
193 }
194
195 #endif
196
197 unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
198 unsigned long new, int len)
199 {
200 switch (len) {
201 case 4:
202 return uatomic_cmpxchg_32(addr, old, new);
203 #if (BITS_PER_LONG == 64)
204 case 8:
205 return uatomic_cmpxchg_64(addr, old, new);
206 #endif
207 default:
208 __asm__ __volatile__(".long 0xd00d00");
209 }
210
211 return 0;
212 }
213
214 #define uatomic_cmpxchg(addr, old, new) \
215 (__typeof__(*(addr))) _uatomic_cmpxchg((addr), \
216 (unsigned long)(old), \
217 (unsigned long)(new), \
218 sizeof(*(addr)))
219
220 #define URCU_CAS_AVAIL() 1
221
222 #endif /* _URCU_ARCH_ATOMIC_S390_H */
This page took 0.033788 seconds and 3 git commands to generate.