Fix: x86 and s390: uatomic __hp() macro C++ support
[userspace-rcu.git] / include / urcu / uatomic / x86.h
CommitLineData
ec4e58a3
MD
1#ifndef _URCU_ARCH_UATOMIC_X86_H
2#define _URCU_ARCH_UATOMIC_X86_H
0114ba7f 3
67ecffc0 4/*
0114ba7f
MD
5 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
6 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
7 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
8 * Copyright (c) 2009 Mathieu Desnoyers
9 *
10 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
11 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
12 *
13 * Permission is hereby granted to use or copy this program
14 * for any purpose, provided the above notices are retained on all copies.
15 * Permission to modify the code and to distribute modified code is granted,
16 * provided the above notices are retained, and a notice that the code was
17 * modified is included with the above copyright notice.
18 *
ec4e58a3 19 * Code inspired from libuatomic_ops-1.2, inherited in part from the
0114ba7f
MD
20 * Boehm-Demers-Weiser conservative garbage collector.
21 */
22
ae505838 23#include <urcu/config.h>
ec4e58a3 24#include <urcu/compiler.h>
bf9de1b7 25#include <urcu/system.h>
0fad128b 26
f469d839
PB
27#define UATOMIC_HAS_ATOMIC_BYTE
28#define UATOMIC_HAS_ATOMIC_SHORT
29
36bc70a8
MD
30#ifdef __cplusplus
31extern "C" {
67ecffc0 32#endif
36bc70a8 33
0114ba7f 34/*
0114ba7f
MD
35 * Derived from AO_compare_and_swap() and AO_test_and_set_full().
36 */
37
03155c0d 38/*
dcae1b08 39 * The __hp() macro casts the void pointer @x to a pointer to a structure
03155c0d
MD
40 * containing an array of char of the specified size. This allows passing the
41 * @addr arguments of the following inline functions as "m" and "+m" operands
dcae1b08 42 * to the assembly. The @size parameter should be a constant to support
f41d90ce
MD
43 * compilers such as clang which do not support VLA. Create typedefs because
44 * C++ does not allow types be defined in casts.
03155c0d
MD
45 */
46
f41d90ce
MD
47typedef struct { char v[1]; } __hp_1;
48typedef struct { char v[2]; } __hp_2;
49typedef struct { char v[4]; } __hp_4;
50typedef struct { char v[8]; } __hp_8;
51
52#define __hp(size, x) ((__hp_##size *)(x))
cc1be41b 53
424d4ed5 54#define _uatomic_set(addr, v) ((void) CMM_STORE_SHARED(*(addr), (v)))
0fad128b 55
cc1be41b
MD
56/* cmpxchg */
57
5dba80f9 58static inline __attribute__((always_inline))
bf9de1b7 59unsigned long __uatomic_cmpxchg(void *addr, unsigned long old,
0fad128b 60 unsigned long _new, int len)
0114ba7f 61{
cc1be41b
MD
62 switch (len) {
63 case 1:
64 {
65 unsigned char result = old;
0fad128b 66
cc1be41b
MD
67 __asm__ __volatile__(
68 "lock; cmpxchgb %2, %1"
dcae1b08 69 : "+a"(result), "+m"(*__hp(1, addr))
cc1be41b 70 : "q"((unsigned char)_new)
0114ba7f 71 : "memory");
cc1be41b
MD
72 return result;
73 }
74 case 2:
75 {
76 unsigned short result = old;
0fad128b 77
cc1be41b
MD
78 __asm__ __volatile__(
79 "lock; cmpxchgw %2, %1"
dcae1b08 80 : "+a"(result), "+m"(*__hp(2, addr))
cc1be41b
MD
81 : "r"((unsigned short)_new)
82 : "memory");
83 return result;
84 }
85 case 4:
86 {
87 unsigned int result = old;
0fad128b 88
cc1be41b
MD
89 __asm__ __volatile__(
90 "lock; cmpxchgl %2, %1"
dcae1b08 91 : "+a"(result), "+m"(*__hp(4, addr))
cc1be41b
MD
92 : "r"((unsigned int)_new)
93 : "memory");
94 return result;
95 }
e040d717 96#if (CAA_BITS_PER_LONG == 64)
cc1be41b
MD
97 case 8:
98 {
6edb297e 99 unsigned long result = old;
0fad128b 100
cc1be41b 101 __asm__ __volatile__(
2c5e5fb3 102 "lock; cmpxchgq %2, %1"
dcae1b08 103 : "+a"(result), "+m"(*__hp(8, addr))
cc1be41b
MD
104 : "r"((unsigned long)_new)
105 : "memory");
106 return result;
107 }
108#endif
109 }
d0bbd9c2
MD
110 /*
111 * generate an illegal instruction. Cannot catch this with
112 * linker tricks when optimizations are disabled.
113 */
cc1be41b
MD
114 __asm__ __volatile__("ud2");
115 return 0;
0114ba7f
MD
116}
117
bf9de1b7 118#define _uatomic_cmpxchg(addr, old, _new) \
e56d99bf
MD
119 ((__typeof__(*(addr))) __uatomic_cmpxchg((addr), \
120 caa_cast_long_keep_sign(old), \
121 caa_cast_long_keep_sign(_new),\
cc1be41b
MD
122 sizeof(*(addr))))
123
124/* xchg */
0114ba7f 125
5dba80f9 126static inline __attribute__((always_inline))
bf9de1b7 127unsigned long __uatomic_exchange(void *addr, unsigned long val, int len)
0114ba7f 128{
cc1be41b
MD
129 /* Note: the "xchg" instruction does not need a "lock" prefix. */
130 switch (len) {
131 case 1:
132 {
133 unsigned char result;
134 __asm__ __volatile__(
135 "xchgb %0, %1"
dcae1b08 136 : "=q"(result), "+m"(*__hp(1, addr))
cc1be41b
MD
137 : "0" ((unsigned char)val)
138 : "memory");
139 return result;
140 }
141 case 2:
142 {
143 unsigned short result;
144 __asm__ __volatile__(
145 "xchgw %0, %1"
dcae1b08 146 : "=r"(result), "+m"(*__hp(2, addr))
cc1be41b
MD
147 : "0" ((unsigned short)val)
148 : "memory");
149 return result;
150 }
151 case 4:
152 {
153 unsigned int result;
154 __asm__ __volatile__(
155 "xchgl %0, %1"
dcae1b08 156 : "=r"(result), "+m"(*__hp(4, addr))
cc1be41b
MD
157 : "0" ((unsigned int)val)
158 : "memory");
159 return result;
160 }
e040d717 161#if (CAA_BITS_PER_LONG == 64)
cc1be41b
MD
162 case 8:
163 {
164 unsigned long result;
165 __asm__ __volatile__(
0114ba7f 166 "xchgq %0, %1"
dcae1b08 167 : "=r"(result), "+m"(*__hp(8, addr))
cc1be41b 168 : "0" ((unsigned long)val)
0114ba7f 169 : "memory");
cc1be41b
MD
170 return result;
171 }
172#endif
173 }
d0bbd9c2
MD
174 /*
175 * generate an illegal instruction. Cannot catch this with
176 * linker tricks when optimizations are disabled.
177 */
cc1be41b
MD
178 __asm__ __volatile__("ud2");
179 return 0;
0114ba7f
MD
180}
181
bf9de1b7 182#define _uatomic_xchg(addr, v) \
e56d99bf
MD
183 ((__typeof__(*(addr))) __uatomic_exchange((addr), \
184 caa_cast_long_keep_sign(v), \
cc1be41b
MD
185 sizeof(*(addr))))
186
8760d94e 187/* uatomic_add_return */
0fad128b
MD
188
189static inline __attribute__((always_inline))
bf9de1b7 190unsigned long __uatomic_add_return(void *addr, unsigned long val,
0fad128b
MD
191 int len)
192{
193 switch (len) {
194 case 1:
195 {
196 unsigned char result = val;
197
198 __asm__ __volatile__(
199 "lock; xaddb %1, %0"
dcae1b08 200 : "+m"(*__hp(1, addr)), "+q" (result)
0fad128b
MD
201 :
202 : "memory");
203 return result + (unsigned char)val;
204 }
205 case 2:
206 {
207 unsigned short result = val;
208
209 __asm__ __volatile__(
210 "lock; xaddw %1, %0"
dcae1b08 211 : "+m"(*__hp(2, addr)), "+r" (result)
0fad128b
MD
212 :
213 : "memory");
214 return result + (unsigned short)val;
215 }
216 case 4:
217 {
218 unsigned int result = val;
219
220 __asm__ __volatile__(
221 "lock; xaddl %1, %0"
dcae1b08 222 : "+m"(*__hp(4, addr)), "+r" (result)
0fad128b
MD
223 :
224 : "memory");
225 return result + (unsigned int)val;
226 }
e040d717 227#if (CAA_BITS_PER_LONG == 64)
0fad128b
MD
228 case 8:
229 {
230 unsigned long result = val;
231
232 __asm__ __volatile__(
233 "lock; xaddq %1, %0"
dcae1b08 234 : "+m"(*__hp(8, addr)), "+r" (result)
0fad128b
MD
235 :
236 : "memory");
237 return result + (unsigned long)val;
238 }
239#endif
240 }
d0bbd9c2
MD
241 /*
242 * generate an illegal instruction. Cannot catch this with
243 * linker tricks when optimizations are disabled.
244 */
0fad128b
MD
245 __asm__ __volatile__("ud2");
246 return 0;
247}
248
e56d99bf
MD
249#define _uatomic_add_return(addr, v) \
250 ((__typeof__(*(addr))) __uatomic_add_return((addr), \
251 caa_cast_long_keep_sign(v), \
252 sizeof(*(addr))))
0fad128b 253
bf33aaea
PB
254/* uatomic_and */
255
256static inline __attribute__((always_inline))
257void __uatomic_and(void *addr, unsigned long val, int len)
258{
259 switch (len) {
260 case 1:
261 {
262 __asm__ __volatile__(
263 "lock; andb %1, %0"
dcae1b08 264 : "=m"(*__hp(1, addr))
bf33aaea
PB
265 : "iq" ((unsigned char)val)
266 : "memory");
267 return;
268 }
269 case 2:
270 {
271 __asm__ __volatile__(
272 "lock; andw %1, %0"
dcae1b08 273 : "=m"(*__hp(2, addr))
bf33aaea
PB
274 : "ir" ((unsigned short)val)
275 : "memory");
276 return;
277 }
278 case 4:
279 {
280 __asm__ __volatile__(
281 "lock; andl %1, %0"
dcae1b08 282 : "=m"(*__hp(4, addr))
bf33aaea
PB
283 : "ir" ((unsigned int)val)
284 : "memory");
285 return;
286 }
287#if (CAA_BITS_PER_LONG == 64)
288 case 8:
289 {
290 __asm__ __volatile__(
291 "lock; andq %1, %0"
dcae1b08 292 : "=m"(*__hp(8, addr))
bf33aaea
PB
293 : "er" ((unsigned long)val)
294 : "memory");
295 return;
296 }
297#endif
298 }
d0bbd9c2
MD
299 /*
300 * generate an illegal instruction. Cannot catch this with
301 * linker tricks when optimizations are disabled.
302 */
bf33aaea
PB
303 __asm__ __volatile__("ud2");
304 return;
305}
306
307#define _uatomic_and(addr, v) \
e56d99bf 308 (__uatomic_and((addr), caa_cast_long_keep_sign(v), sizeof(*(addr))))
bf33aaea 309
985b35b1
PB
310/* uatomic_or */
311
312static inline __attribute__((always_inline))
313void __uatomic_or(void *addr, unsigned long val, int len)
314{
315 switch (len) {
316 case 1:
317 {
318 __asm__ __volatile__(
319 "lock; orb %1, %0"
dcae1b08 320 : "=m"(*__hp(1, addr))
985b35b1
PB
321 : "iq" ((unsigned char)val)
322 : "memory");
323 return;
324 }
325 case 2:
326 {
327 __asm__ __volatile__(
328 "lock; orw %1, %0"
dcae1b08 329 : "=m"(*__hp(2, addr))
985b35b1
PB
330 : "ir" ((unsigned short)val)
331 : "memory");
332 return;
333 }
334 case 4:
335 {
336 __asm__ __volatile__(
337 "lock; orl %1, %0"
dcae1b08 338 : "=m"(*__hp(4, addr))
985b35b1
PB
339 : "ir" ((unsigned int)val)
340 : "memory");
341 return;
342 }
343#if (CAA_BITS_PER_LONG == 64)
344 case 8:
345 {
346 __asm__ __volatile__(
347 "lock; orq %1, %0"
dcae1b08 348 : "=m"(*__hp(8, addr))
985b35b1
PB
349 : "er" ((unsigned long)val)
350 : "memory");
351 return;
352 }
353#endif
354 }
d0bbd9c2
MD
355 /*
356 * generate an illegal instruction. Cannot catch this with
357 * linker tricks when optimizations are disabled.
358 */
985b35b1
PB
359 __asm__ __volatile__("ud2");
360 return;
361}
362
363#define _uatomic_or(addr, v) \
e56d99bf 364 (__uatomic_or((addr), caa_cast_long_keep_sign(v), sizeof(*(addr))))
985b35b1 365
8760d94e 366/* uatomic_add */
0114ba7f 367
5dba80f9 368static inline __attribute__((always_inline))
bf9de1b7 369void __uatomic_add(void *addr, unsigned long val, int len)
0114ba7f
MD
370{
371 switch (len) {
cc1be41b
MD
372 case 1:
373 {
374 __asm__ __volatile__(
375 "lock; addb %1, %0"
dcae1b08 376 : "=m"(*__hp(1, addr))
87322fe8
MD
377 : "iq" ((unsigned char)val)
378 : "memory");
cc1be41b
MD
379 return;
380 }
381 case 2:
382 {
383 __asm__ __volatile__(
384 "lock; addw %1, %0"
dcae1b08 385 : "=m"(*__hp(2, addr))
87322fe8
MD
386 : "ir" ((unsigned short)val)
387 : "memory");
cc1be41b
MD
388 return;
389 }
390 case 4:
391 {
392 __asm__ __volatile__(
393 "lock; addl %1, %0"
dcae1b08 394 : "=m"(*__hp(4, addr))
87322fe8
MD
395 : "ir" ((unsigned int)val)
396 : "memory");
cc1be41b
MD
397 return;
398 }
e040d717 399#if (CAA_BITS_PER_LONG == 64)
cc1be41b
MD
400 case 8:
401 {
402 __asm__ __volatile__(
403 "lock; addq %1, %0"
dcae1b08 404 : "=m"(*__hp(8, addr))
87322fe8
MD
405 : "er" ((unsigned long)val)
406 : "memory");
cc1be41b
MD
407 return;
408 }
0114ba7f
MD
409#endif
410 }
d0bbd9c2
MD
411 /*
412 * generate an illegal instruction. Cannot catch this with
413 * linker tricks when optimizations are disabled.
414 */
0114ba7f 415 __asm__ __volatile__("ud2");
a81b8e5e 416 return;
0114ba7f
MD
417}
418
bf9de1b7 419#define _uatomic_add(addr, v) \
e56d99bf 420 (__uatomic_add((addr), caa_cast_long_keep_sign(v), sizeof(*(addr))))
0114ba7f 421
2c5e5fb3 422
ec4e58a3 423/* uatomic_inc */
2c5e5fb3
MD
424
425static inline __attribute__((always_inline))
bf9de1b7 426void __uatomic_inc(void *addr, int len)
2c5e5fb3
MD
427{
428 switch (len) {
429 case 1:
430 {
431 __asm__ __volatile__(
432 "lock; incb %0"
dcae1b08 433 : "=m"(*__hp(1, addr))
2c5e5fb3
MD
434 :
435 : "memory");
436 return;
437 }
438 case 2:
439 {
440 __asm__ __volatile__(
441 "lock; incw %0"
dcae1b08 442 : "=m"(*__hp(2, addr))
2c5e5fb3
MD
443 :
444 : "memory");
445 return;
446 }
447 case 4:
448 {
449 __asm__ __volatile__(
450 "lock; incl %0"
dcae1b08 451 : "=m"(*__hp(4, addr))
2c5e5fb3
MD
452 :
453 : "memory");
454 return;
455 }
e040d717 456#if (CAA_BITS_PER_LONG == 64)
2c5e5fb3
MD
457 case 8:
458 {
459 __asm__ __volatile__(
460 "lock; incq %0"
dcae1b08 461 : "=m"(*__hp(8, addr))
2c5e5fb3
MD
462 :
463 : "memory");
464 return;
465 }
466#endif
467 }
468 /* generate an illegal instruction. Cannot catch this with linker tricks
469 * when optimizations are disabled. */
470 __asm__ __volatile__("ud2");
471 return;
472}
473
bf9de1b7 474#define _uatomic_inc(addr) (__uatomic_inc((addr), sizeof(*(addr))))
2c5e5fb3 475
ec4e58a3 476/* uatomic_dec */
2c5e5fb3
MD
477
478static inline __attribute__((always_inline))
bf9de1b7 479void __uatomic_dec(void *addr, int len)
2c5e5fb3
MD
480{
481 switch (len) {
482 case 1:
483 {
484 __asm__ __volatile__(
485 "lock; decb %0"
dcae1b08 486 : "=m"(*__hp(1, addr))
2c5e5fb3
MD
487 :
488 : "memory");
489 return;
490 }
491 case 2:
492 {
493 __asm__ __volatile__(
494 "lock; decw %0"
dcae1b08 495 : "=m"(*__hp(2, addr))
2c5e5fb3
MD
496 :
497 : "memory");
498 return;
499 }
500 case 4:
501 {
502 __asm__ __volatile__(
503 "lock; decl %0"
dcae1b08 504 : "=m"(*__hp(4, addr))
2c5e5fb3
MD
505 :
506 : "memory");
507 return;
508 }
e040d717 509#if (CAA_BITS_PER_LONG == 64)
2c5e5fb3
MD
510 case 8:
511 {
512 __asm__ __volatile__(
513 "lock; decq %0"
dcae1b08 514 : "=m"(*__hp(8, addr))
2c5e5fb3
MD
515 :
516 : "memory");
517 return;
518 }
519#endif
520 }
d0bbd9c2
MD
521 /*
522 * generate an illegal instruction. Cannot catch this with
523 * linker tricks when optimizations are disabled.
524 */
2c5e5fb3
MD
525 __asm__ __volatile__("ud2");
526 return;
527}
528
bf9de1b7 529#define _uatomic_dec(addr) (__uatomic_dec((addr), sizeof(*(addr))))
0114ba7f 530
e040d717 531#if ((CAA_BITS_PER_LONG != 64) && defined(CONFIG_RCU_COMPAT_ARCH))
02be5561
MD
532extern int __rcu_cas_avail;
533extern int __rcu_cas_init(void);
bf9de1b7
MD
534
535#define UATOMIC_COMPAT(insn) \
a0b7f7ea 536 ((caa_likely(__rcu_cas_avail > 0)) \
bf9de1b7 537 ? (_uatomic_##insn) \
a0b7f7ea 538 : ((caa_unlikely(__rcu_cas_avail < 0) \
02be5561 539 ? ((__rcu_cas_init() > 0) \
bf9de1b7
MD
540 ? (_uatomic_##insn) \
541 : (compat_uatomic_##insn)) \
542 : (compat_uatomic_##insn))))
543
424d4ed5
MD
544/*
545 * We leave the return value so we don't break the ABI, but remove the
546 * return value from the API.
547 */
bf9de1b7
MD
548extern unsigned long _compat_uatomic_set(void *addr,
549 unsigned long _new, int len);
550#define compat_uatomic_set(addr, _new) \
424d4ed5
MD
551 ((void) _compat_uatomic_set((addr), \
552 caa_cast_long_keep_sign(_new), \
553 sizeof(*(addr))))
bf9de1b7
MD
554
555
556extern unsigned long _compat_uatomic_xchg(void *addr,
557 unsigned long _new, int len);
558#define compat_uatomic_xchg(addr, _new) \
559 ((__typeof__(*(addr))) _compat_uatomic_xchg((addr), \
e56d99bf 560 caa_cast_long_keep_sign(_new), \
bf9de1b7 561 sizeof(*(addr))))
7d413817
MD
562
563extern unsigned long _compat_uatomic_cmpxchg(void *addr, unsigned long old,
bf9de1b7
MD
564 unsigned long _new, int len);
565#define compat_uatomic_cmpxchg(addr, old, _new) \
566 ((__typeof__(*(addr))) _compat_uatomic_cmpxchg((addr), \
e56d99bf
MD
567 caa_cast_long_keep_sign(old), \
568 caa_cast_long_keep_sign(_new), \
bf9de1b7 569 sizeof(*(addr))))
7d413817 570
8c43fe72 571extern void _compat_uatomic_and(void *addr, unsigned long _new, int len);
bf33aaea 572#define compat_uatomic_and(addr, v) \
8c43fe72 573 (_compat_uatomic_and((addr), \
e56d99bf 574 caa_cast_long_keep_sign(v), \
8c43fe72 575 sizeof(*(addr))))
bf33aaea 576
8c43fe72 577extern void _compat_uatomic_or(void *addr, unsigned long _new, int len);
985b35b1 578#define compat_uatomic_or(addr, v) \
8c43fe72 579 (_compat_uatomic_or((addr), \
e56d99bf 580 caa_cast_long_keep_sign(v), \
8c43fe72 581 sizeof(*(addr))))
985b35b1 582
28ca843d
PB
583extern unsigned long _compat_uatomic_add_return(void *addr,
584 unsigned long _new, int len);
e56d99bf
MD
585#define compat_uatomic_add_return(addr, v) \
586 ((__typeof__(*(addr))) _compat_uatomic_add_return((addr), \
587 caa_cast_long_keep_sign(v), \
588 sizeof(*(addr))))
bf9de1b7 589
bf9de1b7
MD
590#define compat_uatomic_add(addr, v) \
591 ((void)compat_uatomic_add_return((addr), (v)))
bf9de1b7
MD
592#define compat_uatomic_inc(addr) \
593 (compat_uatomic_add((addr), 1))
594#define compat_uatomic_dec(addr) \
8760d94e 595 (compat_uatomic_add((addr), -1))
bf9de1b7
MD
596
597#else
598#define UATOMIC_COMPAT(insn) (_uatomic_##insn)
7d413817
MD
599#endif
600
bf9de1b7 601/* Read is atomic even in compat mode */
bf9de1b7
MD
602#define uatomic_set(addr, v) \
603 UATOMIC_COMPAT(set(addr, v))
8760d94e 604
bf9de1b7
MD
605#define uatomic_cmpxchg(addr, old, _new) \
606 UATOMIC_COMPAT(cmpxchg(addr, old, _new))
607#define uatomic_xchg(addr, v) \
608 UATOMIC_COMPAT(xchg(addr, v))
2812a2d2 609
bf33aaea
PB
610#define uatomic_and(addr, v) \
611 UATOMIC_COMPAT(and(addr, v))
42e83919
MD
612#define cmm_smp_mb__before_uatomic_and() cmm_barrier()
613#define cmm_smp_mb__after_uatomic_and() cmm_barrier()
2812a2d2 614
985b35b1
PB
615#define uatomic_or(addr, v) \
616 UATOMIC_COMPAT(or(addr, v))
42e83919
MD
617#define cmm_smp_mb__before_uatomic_or() cmm_barrier()
618#define cmm_smp_mb__after_uatomic_or() cmm_barrier()
2812a2d2 619
bf9de1b7
MD
620#define uatomic_add_return(addr, v) \
621 UATOMIC_COMPAT(add_return(addr, v))
8760d94e 622
bf9de1b7 623#define uatomic_add(addr, v) UATOMIC_COMPAT(add(addr, v))
42e83919
MD
624#define cmm_smp_mb__before_uatomic_add() cmm_barrier()
625#define cmm_smp_mb__after_uatomic_add() cmm_barrier()
2812a2d2 626
bf9de1b7 627#define uatomic_inc(addr) UATOMIC_COMPAT(inc(addr))
42e83919
MD
628#define cmm_smp_mb__before_uatomic_inc() cmm_barrier()
629#define cmm_smp_mb__after_uatomic_inc() cmm_barrier()
2812a2d2 630
bf9de1b7 631#define uatomic_dec(addr) UATOMIC_COMPAT(dec(addr))
42e83919
MD
632#define cmm_smp_mb__before_uatomic_dec() cmm_barrier()
633#define cmm_smp_mb__after_uatomic_dec() cmm_barrier()
bf9de1b7 634
67ecffc0 635#ifdef __cplusplus
36bc70a8
MD
636}
637#endif
638
a2e7bf9c 639#include <urcu/uatomic/generic.h>
8760d94e 640
ec4e58a3 641#endif /* _URCU_ARCH_UATOMIC_X86_H */
This page took 0.07442 seconds and 4 git commands to generate.