1 // SPDX-FileCopyrightText: 1991-1994 by Xerox Corporation. All rights reserved.
2 // SPDX-FileCopyrightText: 1996-1999 by Silicon Graphics. All rights reserved.
3 // SPDX-FileCopyrightText: 1999-2004 Hewlett-Packard Development Company, L.P.
4 // SPDX-FileCopyrightText: 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 // SPDX-FileCopyrightText: 2010 Paolo Bonzini
7 // SPDX-License-Identifier: LicenseRef-Boehm-GC
9 #ifndef _URCU_UATOMIC_GENERIC_H
10 #define _URCU_UATOMIC_GENERIC_H
13 * Code inspired from libuatomic_ops-1.2, inherited in part from the
14 * Boehm-Demers-Weiser conservative garbage collector.
18 #include <urcu/compiler.h>
19 #include <urcu/system.h>
26 #define uatomic_set(addr, v) ((void) CMM_STORE_SHARED(*(addr), (v)))
29 extern void abort(void);
31 #define uatomic_load_store_return_op(op, addr, v, mo) \
43 case CMM_SEQ_CST_FENCE: \
50 __typeof__((*addr)) _value = op(addr, v); \
54 cmm_smp_read_barrier_depends(); \
59 case CMM_SEQ_CST_FENCE: \
71 #define uatomic_load_store_op(op, addr, v, mo) \
81 case CMM_SEQ_CST_FENCE: \
92 cmm_smp_read_barrier_depends(); \
97 case CMM_SEQ_CST_FENCE: \
108 #define uatomic_store(addr, v, mo) \
115 case CMM_SEQ_CST_FENCE: \
122 uatomic_set(addr, v); \
129 case CMM_SEQ_CST_FENCE: \
137 #define uatomic_and_mo(addr, v, mo) \
138 uatomic_load_store_op(uatomic_and, addr, v, mo)
140 #define uatomic_or_mo(addr, v, mo) \
141 uatomic_load_store_op(uatomic_or, addr, v, mo)
143 #define uatomic_add_mo(addr, v, mo) \
144 uatomic_load_store_op(uatomic_add, addr, v, mo)
146 #define uatomic_sub_mo(addr, v, mo) \
147 uatomic_load_store_op(uatomic_sub, addr, v, mo)
149 #define uatomic_inc_mo(addr, mo) \
150 uatomic_load_store_op(uatomic_add, addr, 1, mo)
152 #define uatomic_dec_mo(addr, mo) \
153 uatomic_load_store_op(uatomic_add, addr, -1, mo)
155 * NOTE: We can not just do switch (_value == (old) ? mos : mof) otherwise the
156 * compiler emit a -Wduplicated-cond warning.
158 #define uatomic_cmpxchg_mo(addr, old, new, mos, mof) \
169 case CMM_SEQ_CST_FENCE: \
176 __typeof__(*(addr)) _value = uatomic_cmpxchg(addr, old, \
179 if (_value == (old)) { \
182 cmm_smp_read_barrier_depends(); \
187 case CMM_SEQ_CST_FENCE: \
199 cmm_smp_read_barrier_depends(); \
204 case CMM_SEQ_CST_FENCE: \
217 #define uatomic_xchg_mo(addr, v, mo) \
218 uatomic_load_store_return_op(uatomic_xchg, addr, v, mo)
220 #define uatomic_add_return_mo(addr, v, mo) \
221 uatomic_load_store_return_op(uatomic_add_return, addr, v)
223 #define uatomic_sub_return_mo(addr, v, mo) \
224 uatomic_load_store_return_op(uatomic_sub_return, addr, v)
228 #define uatomic_read(addr) CMM_LOAD_SHARED(*(addr))
231 #define uatomic_load(addr, mo) \
240 case CMM_SEQ_CST_FENCE: \
247 __typeof__(*(addr)) _rcu_value = uatomic_read(addr); \
253 cmm_smp_read_barrier_depends(); \
257 case CMM_SEQ_CST_FENCE: \
267 #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR
269 static inline __attribute__((always_inline
))
270 void _uatomic_link_error(void)
273 * generate an illegal instruction. Cannot catch this with
274 * linker tricks when optimizations are disabled.
276 __asm__
__volatile__(ILLEGAL_INSTR
);
279 static inline __attribute__((always_inline
, __noreturn__
))
280 void _uatomic_link_error(void)
286 #else /* #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR */
287 extern void _uatomic_link_error(void);
288 #endif /* #else #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR */
292 #ifndef uatomic_cmpxchg
293 static inline __attribute__((always_inline
))
294 unsigned long _uatomic_cmpxchg(void *addr
, unsigned long old
,
295 unsigned long _new
, int len
)
298 #ifdef UATOMIC_HAS_ATOMIC_BYTE
300 return __sync_val_compare_and_swap_1((uint8_t *) addr
, old
,
303 #ifdef UATOMIC_HAS_ATOMIC_SHORT
305 return __sync_val_compare_and_swap_2((uint16_t *) addr
, old
,
309 return __sync_val_compare_and_swap_4((uint32_t *) addr
, old
,
311 #if (CAA_BITS_PER_LONG == 64)
313 return __sync_val_compare_and_swap_8((uint64_t *) addr
, old
,
317 _uatomic_link_error();
322 #define uatomic_cmpxchg(addr, old, _new) \
323 ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), \
324 caa_cast_long_keep_sign(old), \
325 caa_cast_long_keep_sign(_new),\
332 static inline __attribute__((always_inline
))
333 void _uatomic_and(void *addr
, unsigned long val
,
337 #ifdef UATOMIC_HAS_ATOMIC_BYTE
339 __sync_and_and_fetch_1((uint8_t *) addr
, val
);
342 #ifdef UATOMIC_HAS_ATOMIC_SHORT
344 __sync_and_and_fetch_2((uint16_t *) addr
, val
);
348 __sync_and_and_fetch_4((uint32_t *) addr
, val
);
350 #if (CAA_BITS_PER_LONG == 64)
352 __sync_and_and_fetch_8((uint64_t *) addr
, val
);
356 _uatomic_link_error();
359 #define uatomic_and(addr, v) \
360 (_uatomic_and((addr), \
361 caa_cast_long_keep_sign(v), \
363 #define cmm_smp_mb__before_uatomic_and() cmm_barrier()
364 #define cmm_smp_mb__after_uatomic_and() cmm_barrier()
371 static inline __attribute__((always_inline
))
372 void _uatomic_or(void *addr
, unsigned long val
,
376 #ifdef UATOMIC_HAS_ATOMIC_BYTE
378 __sync_or_and_fetch_1((uint8_t *) addr
, val
);
381 #ifdef UATOMIC_HAS_ATOMIC_SHORT
383 __sync_or_and_fetch_2((uint16_t *) addr
, val
);
387 __sync_or_and_fetch_4((uint32_t *) addr
, val
);
389 #if (CAA_BITS_PER_LONG == 64)
391 __sync_or_and_fetch_8((uint64_t *) addr
, val
);
395 _uatomic_link_error();
399 #define uatomic_or(addr, v) \
400 (_uatomic_or((addr), \
401 caa_cast_long_keep_sign(v), \
403 #define cmm_smp_mb__before_uatomic_or() cmm_barrier()
404 #define cmm_smp_mb__after_uatomic_or() cmm_barrier()
409 /* uatomic_add_return */
411 #ifndef uatomic_add_return
412 static inline __attribute__((always_inline
))
413 unsigned long _uatomic_add_return(void *addr
, unsigned long val
,
417 #ifdef UATOMIC_HAS_ATOMIC_BYTE
419 return __sync_add_and_fetch_1((uint8_t *) addr
, val
);
421 #ifdef UATOMIC_HAS_ATOMIC_SHORT
423 return __sync_add_and_fetch_2((uint16_t *) addr
, val
);
426 return __sync_add_and_fetch_4((uint32_t *) addr
, val
);
427 #if (CAA_BITS_PER_LONG == 64)
429 return __sync_add_and_fetch_8((uint64_t *) addr
, val
);
432 _uatomic_link_error();
437 #define uatomic_add_return(addr, v) \
438 ((__typeof__(*(addr))) _uatomic_add_return((addr), \
439 caa_cast_long_keep_sign(v), \
441 #endif /* #ifndef uatomic_add_return */
446 static inline __attribute__((always_inline
))
447 unsigned long _uatomic_exchange(void *addr
, unsigned long val
, int len
)
450 #ifdef UATOMIC_HAS_ATOMIC_BYTE
456 old
= uatomic_read((uint8_t *) addr
);
457 } while (!__sync_bool_compare_and_swap_1((uint8_t *) addr
,
463 #ifdef UATOMIC_HAS_ATOMIC_SHORT
469 old
= uatomic_read((uint16_t *) addr
);
470 } while (!__sync_bool_compare_and_swap_2((uint16_t *) addr
,
481 old
= uatomic_read((uint32_t *) addr
);
482 } while (!__sync_bool_compare_and_swap_4((uint32_t *) addr
,
487 #if (CAA_BITS_PER_LONG == 64)
493 old
= uatomic_read((uint64_t *) addr
);
494 } while (!__sync_bool_compare_and_swap_8((uint64_t *) addr
,
501 _uatomic_link_error();
505 #define uatomic_xchg(addr, v) \
506 ((__typeof__(*(addr))) _uatomic_exchange((addr), \
507 caa_cast_long_keep_sign(v), \
509 #endif /* #ifndef uatomic_xchg */
511 #else /* #ifndef uatomic_cmpxchg */
516 static inline __attribute__((always_inline
))
517 void _uatomic_and(void *addr
, unsigned long val
, int len
)
520 #ifdef UATOMIC_HAS_ATOMIC_BYTE
525 oldt
= uatomic_read((uint8_t *) addr
);
528 oldt
= _uatomic_cmpxchg(addr
, old
, old
& val
, 1);
529 } while (oldt
!= old
);
534 #ifdef UATOMIC_HAS_ATOMIC_SHORT
539 oldt
= uatomic_read((uint16_t *) addr
);
542 oldt
= _uatomic_cmpxchg(addr
, old
, old
& val
, 2);
543 } while (oldt
!= old
);
550 oldt
= uatomic_read((uint32_t *) addr
);
553 oldt
= _uatomic_cmpxchg(addr
, old
, old
& val
, 4);
554 } while (oldt
!= old
);
558 #if (CAA_BITS_PER_LONG == 64)
563 oldt
= uatomic_read((uint64_t *) addr
);
566 oldt
= _uatomic_cmpxchg(addr
, old
, old
& val
, 8);
567 } while (oldt
!= old
);
573 _uatomic_link_error();
576 #define uatomic_and(addr, v) \
577 (_uatomic_and((addr), \
578 caa_cast_long_keep_sign(v), \
580 #define cmm_smp_mb__before_uatomic_and() cmm_barrier()
581 #define cmm_smp_mb__after_uatomic_and() cmm_barrier()
583 #endif /* #ifndef uatomic_and */
588 static inline __attribute__((always_inline
))
589 void _uatomic_or(void *addr
, unsigned long val
, int len
)
592 #ifdef UATOMIC_HAS_ATOMIC_BYTE
597 oldt
= uatomic_read((uint8_t *) addr
);
600 oldt
= _uatomic_cmpxchg(addr
, old
, old
| val
, 1);
601 } while (oldt
!= old
);
606 #ifdef UATOMIC_HAS_ATOMIC_SHORT
611 oldt
= uatomic_read((uint16_t *) addr
);
614 oldt
= _uatomic_cmpxchg(addr
, old
, old
| val
, 2);
615 } while (oldt
!= old
);
624 oldt
= uatomic_read((uint32_t *) addr
);
627 oldt
= _uatomic_cmpxchg(addr
, old
, old
| val
, 4);
628 } while (oldt
!= old
);
632 #if (CAA_BITS_PER_LONG == 64)
637 oldt
= uatomic_read((uint64_t *) addr
);
640 oldt
= _uatomic_cmpxchg(addr
, old
, old
| val
, 8);
641 } while (oldt
!= old
);
647 _uatomic_link_error();
650 #define uatomic_or(addr, v) \
651 (_uatomic_or((addr), \
652 caa_cast_long_keep_sign(v), \
654 #define cmm_smp_mb__before_uatomic_or() cmm_barrier()
655 #define cmm_smp_mb__after_uatomic_or() cmm_barrier()
657 #endif /* #ifndef uatomic_or */
659 #ifndef uatomic_add_return
660 /* uatomic_add_return */
662 static inline __attribute__((always_inline
))
663 unsigned long _uatomic_add_return(void *addr
, unsigned long val
, int len
)
666 #ifdef UATOMIC_HAS_ATOMIC_BYTE
671 oldt
= uatomic_read((uint8_t *) addr
);
674 oldt
= uatomic_cmpxchg((uint8_t *) addr
,
676 } while (oldt
!= old
);
681 #ifdef UATOMIC_HAS_ATOMIC_SHORT
686 oldt
= uatomic_read((uint16_t *) addr
);
689 oldt
= uatomic_cmpxchg((uint16_t *) addr
,
691 } while (oldt
!= old
);
700 oldt
= uatomic_read((uint32_t *) addr
);
703 oldt
= uatomic_cmpxchg((uint32_t *) addr
,
705 } while (oldt
!= old
);
709 #if (CAA_BITS_PER_LONG == 64)
714 oldt
= uatomic_read((uint64_t *) addr
);
717 oldt
= uatomic_cmpxchg((uint64_t *) addr
,
719 } while (oldt
!= old
);
725 _uatomic_link_error();
729 #define uatomic_add_return(addr, v) \
730 ((__typeof__(*(addr))) _uatomic_add_return((addr), \
731 caa_cast_long_keep_sign(v), \
733 #endif /* #ifndef uatomic_add_return */
738 static inline __attribute__((always_inline
))
739 unsigned long _uatomic_exchange(void *addr
, unsigned long val
, int len
)
742 #ifdef UATOMIC_HAS_ATOMIC_BYTE
747 oldt
= uatomic_read((uint8_t *) addr
);
750 oldt
= uatomic_cmpxchg((uint8_t *) addr
,
752 } while (oldt
!= old
);
757 #ifdef UATOMIC_HAS_ATOMIC_SHORT
762 oldt
= uatomic_read((uint16_t *) addr
);
765 oldt
= uatomic_cmpxchg((uint16_t *) addr
,
767 } while (oldt
!= old
);
776 oldt
= uatomic_read((uint32_t *) addr
);
779 oldt
= uatomic_cmpxchg((uint32_t *) addr
,
781 } while (oldt
!= old
);
785 #if (CAA_BITS_PER_LONG == 64)
790 oldt
= uatomic_read((uint64_t *) addr
);
793 oldt
= uatomic_cmpxchg((uint64_t *) addr
,
795 } while (oldt
!= old
);
801 _uatomic_link_error();
805 #define uatomic_xchg(addr, v) \
806 ((__typeof__(*(addr))) _uatomic_exchange((addr), \
807 caa_cast_long_keep_sign(v), \
809 #endif /* #ifndef uatomic_xchg */
811 #endif /* #else #ifndef uatomic_cmpxchg */
813 /* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */
816 #define uatomic_add(addr, v) (void)uatomic_add_return((addr), (v))
817 #define cmm_smp_mb__before_uatomic_add() cmm_barrier()
818 #define cmm_smp_mb__after_uatomic_add() cmm_barrier()
821 #define uatomic_sub_return(addr, v) \
822 uatomic_add_return((addr), -(caa_cast_long_keep_sign(v)))
823 #define uatomic_sub(addr, v) \
824 uatomic_add((addr), -(caa_cast_long_keep_sign(v)))
825 #define cmm_smp_mb__before_uatomic_sub() cmm_smp_mb__before_uatomic_add()
826 #define cmm_smp_mb__after_uatomic_sub() cmm_smp_mb__after_uatomic_add()
829 #define uatomic_inc(addr) uatomic_add((addr), 1)
830 #define cmm_smp_mb__before_uatomic_inc() cmm_smp_mb__before_uatomic_add()
831 #define cmm_smp_mb__after_uatomic_inc() cmm_smp_mb__after_uatomic_add()
835 #define uatomic_dec(addr) uatomic_add((addr), -1)
836 #define cmm_smp_mb__before_uatomic_dec() cmm_smp_mb__before_uatomic_add()
837 #define cmm_smp_mb__after_uatomic_dec() cmm_smp_mb__after_uatomic_add()
844 #endif /* _URCU_UATOMIC_GENERIC_H */
This page took 0.046682 seconds and 5 git commands to generate.