X-Git-Url: https://git.lttng.org/?a=blobdiff_plain;f=urcu%2Fuatomic_generic.h;h=cef58f382180ba1061b1bb2e37c7cc87c0b7f354;hb=bf33aaea06cbf8257cc49c38abca6d26c0c31c78;hp=6b4ef9e8ddb8e0ab1eabca7e1541e421ba87adbe;hpb=06f22bdbb0c4c4d5db42a2e2dc35818aa61415be;p=urcu.git diff --git a/urcu/uatomic_generic.h b/urcu/uatomic_generic.h index 6b4ef9e..cef58f3 100644 --- a/urcu/uatomic_generic.h +++ b/urcu/uatomic_generic.h @@ -29,11 +29,11 @@ extern "C" { #endif #ifndef uatomic_set -#define uatomic_set(addr, v) CAA_STORE_SHARED(*(addr), (v)) +#define uatomic_set(addr, v) CMM_STORE_SHARED(*(addr), (v)) #endif #ifndef uatomic_read -#define uatomic_read(addr) CAA_LOAD_SHARED(*(addr)) +#define uatomic_read(addr) CMM_LOAD_SHARED(*(addr)) #endif #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR @@ -71,7 +71,7 @@ unsigned long _uatomic_cmpxchg(void *addr, unsigned long old, #endif case 4: return __sync_val_compare_and_swap_4(addr, old, _new); -#if (BITS_PER_LONG == 64) +#if (CAA_BITS_PER_LONG == 64) case 8: return __sync_val_compare_and_swap_8(addr, old, _new); #endif @@ -87,6 +87,72 @@ unsigned long _uatomic_cmpxchg(void *addr, unsigned long old, sizeof(*(addr)))) +/* uatomic_and */ + +#ifndef uatomic_and +static inline __attribute__((always_inline)) +void _uatomic_and(void *addr, unsigned long val, + int len) +{ + switch (len) { +#ifdef UATOMIC_HAS_ATOMIC_BYTE + case 1: + __sync_and_and_fetch_1(addr, val); +#endif +#ifdef UATOMIC_HAS_ATOMIC_SHORT + case 2: + __sync_and_and_fetch_2(addr, val); +#endif + case 4: + __sync_and_and_fetch_4(addr, val); +#if (CAA_BITS_PER_LONG == 64) + case 8: + __sync_and_and_fetch_8(addr, val); +#endif + } + _uatomic_link_error(); + return 0; +} + +#define uatomic_and(addr, v) \ + (_uatomic_and((addr), \ + (unsigned long)(v), \ + sizeof(*(addr)))) +#endif + +/* uatomic_or */ + +#ifndef uatomic_or +static inline __attribute__((always_inline)) +void _uatomic_or(void *addr, unsigned long val, + int len) +{ + switch (len) { +#ifdef UATOMIC_HAS_ATOMIC_BYTE + case 1: + __sync_or_and_fetch_1(addr, val); +#endif +#ifdef UATOMIC_HAS_ATOMIC_SHORT + case 2: + __sync_or_and_fetch_2(addr, val); +#endif + case 4: + __sync_or_and_fetch_4(addr, val); +#if (CAA_BITS_PER_LONG == 64) + case 8: + __sync_or_and_fetch_8(addr, val); +#endif + } + _uatomic_link_error(); + return 0; +} + +#define uatomic_or(addr, v) \ + (_uatomic_or((addr), \ + (unsigned long)(v), \ + sizeof(*(addr)))) +#endif + /* uatomic_add_return */ #ifndef uatomic_add_return @@ -105,7 +171,7 @@ unsigned long _uatomic_add_return(void *addr, unsigned long val, #endif case 4: return __sync_add_and_fetch_4(addr, val); -#if (BITS_PER_LONG == 64) +#if (CAA_BITS_PER_LONG == 64) case 8: return __sync_add_and_fetch_8(addr, val); #endif @@ -162,7 +228,7 @@ unsigned long _uatomic_exchange(void *addr, unsigned long val, int len) return old; } -#if (BITS_PER_LONG == 64) +#if (CAA_BITS_PER_LONG == 64) case 8: { unsigned long old; @@ -186,6 +252,134 @@ unsigned long _uatomic_exchange(void *addr, unsigned long val, int len) #else /* #ifndef uatomic_cmpxchg */ +#ifndef uatomic_and +/* uatomic_and */ + +static inline __attribute__((always_inline)) +void _uatomic_and(void *addr, unsigned long val, int len) +{ + switch (len) { +#ifdef UATOMIC_HAS_ATOMIC_BYTE + case 1: + { + unsigned char old, oldt; + + oldt = uatomic_read((unsigned char *)addr); + do { + old = oldt; + oldt = _uatomic_cmpxchg(addr, old, old & val, 1); + } while (oldt != old); + } +#endif +#ifdef UATOMIC_HAS_ATOMIC_SHORT + case 2: + { + unsigned short old, oldt; + + oldt = uatomic_read((unsigned short *)addr); + do { + old = oldt; + oldt = _uatomic_cmpxchg(addr, old, old & val, 2); + } while (oldt != old); + } +#endif + case 4: + { + unsigned int old, oldt; + + oldt = uatomic_read((unsigned int *)addr); + do { + old = oldt; + oldt = _uatomic_cmpxchg(addr, old, old & val, 4); + } while (oldt != old); + } +#if (CAA_BITS_PER_LONG == 64) + case 8: + { + unsigned long old, oldt; + + oldt = uatomic_read((unsigned long *)addr); + do { + old = oldt; + oldt = _uatomic_cmpxchg(addr, old, old & val, 8); + } while (oldt != old); + } +#endif + } + _uatomic_link_error(); + return 0; +} + +#define uatomic_and(addr, v) \ + (uatomic_and((addr), \ + (unsigned long)(v), \ + sizeof(*(addr)))) +#endif /* #ifndef uatomic_and */ + +#ifndef uatomic_or +/* uatomic_or */ + +static inline __attribute__((always_inline)) +void _uatomic_or(void *addr, unsigned long val, int len) +{ + switch (len) { +#ifdef UATOMIC_HAS_ATOMIC_BYTE + case 1: + { + unsigned char old, oldt; + + oldt = uatomic_read((unsigned char *)addr); + do { + old = oldt; + oldt = _uatomic_cmpxchg(addr, old, old | val, 1); + } while (oldt != old); + } +#endif +#ifdef UATOMIC_HAS_ATOMIC_SHORT + case 2: + { + unsigned short old, oldt; + + oldt = uatomic_read((unsigned short *)addr); + do { + old = oldt; + oldt = _uatomic_cmpxchg(addr, old, old | val, 2); + } while (oldt != old); + } +#endif + case 4: + { + unsigned int old, oldt; + + oldt = uatomic_read((unsigned int *)addr); + do { + old = oldt; + oldt = _uatomic_cmpxchg(addr, old, old | val, 4); + } while (oldt != old); + } +#if (CAA_BITS_PER_LONG == 64) + case 8: + { + unsigned long old, oldt; + + oldt = uatomic_read((unsigned long *)addr); + do { + old = oldt; + oldt = _uatomic_cmpxchg(addr, old, old | val, 8); + } while (oldt != old); + } +#endif + } + _uatomic_link_error(); + return 0; +} + +#define uatomic_or(addr, v) \ + (uatomic_or((addr), \ + (unsigned long)(v), \ + sizeof(*(addr)))) +#endif /* #ifndef uatomic_or */ + #ifndef uatomic_add_return /* uatomic_add_return */ @@ -233,7 +427,7 @@ unsigned long _uatomic_add_return(void *addr, unsigned long val, int len) return old + val; } -#if (BITS_PER_LONG == 64) +#if (CAA_BITS_PER_LONG == 64) case 8: { unsigned long old, oldt; @@ -305,7 +499,7 @@ unsigned long _uatomic_exchange(void *addr, unsigned long val, int len) return old; } -#if (BITS_PER_LONG == 64) +#if (CAA_BITS_PER_LONG == 64) case 8: { unsigned long old, oldt;