X-Git-Url: https://git.lttng.org/?a=blobdiff_plain;f=urcu%2Fuatomic_arch_x86.h;h=9fedee65fac00d72a95545b2bcd38c909a786d30;hb=bf33aaea06cbf8257cc49c38abca6d26c0c31c78;hp=4e09afd7195aaa835a1478df76a7e9b0e508ff16;hpb=06f22bdbb0c4c4d5db42a2e2dc35818aa61415be;p=urcu.git diff --git a/urcu/uatomic_arch_x86.h b/urcu/uatomic_arch_x86.h index 4e09afd..9fedee6 100644 --- a/urcu/uatomic_arch_x86.h +++ b/urcu/uatomic_arch_x86.h @@ -39,7 +39,7 @@ struct __uatomic_dummy { }; #define __hp(x) ((struct __uatomic_dummy *)(x)) -#define _uatomic_set(addr, v) CAA_STORE_SHARED(*(addr), (v)) +#define _uatomic_set(addr, v) CMM_STORE_SHARED(*(addr), (v)) /* cmpxchg */ @@ -81,7 +81,7 @@ unsigned long __uatomic_cmpxchg(void *addr, unsigned long old, : "memory"); return result; } -#if (BITS_PER_LONG == 64) +#if (CAA_BITS_PER_LONG == 64) case 8: { unsigned long result = old; @@ -143,7 +143,7 @@ unsigned long __uatomic_exchange(void *addr, unsigned long val, int len) : "memory"); return result; } -#if (BITS_PER_LONG == 64) +#if (CAA_BITS_PER_LONG == 64) case 8: { unsigned long result; @@ -206,7 +206,7 @@ unsigned long __uatomic_add_return(void *addr, unsigned long val, : "memory"); return result + (unsigned int)val; } -#if (BITS_PER_LONG == 64) +#if (CAA_BITS_PER_LONG == 64) case 8: { unsigned long result = val; @@ -231,6 +231,114 @@ unsigned long __uatomic_add_return(void *addr, unsigned long val, (unsigned long)(v), \ sizeof(*(addr)))) +/* uatomic_and */ + +static inline __attribute__((always_inline)) +void __uatomic_and(void *addr, unsigned long val, int len) +{ + switch (len) { + case 1: + { + __asm__ __volatile__( + "lock; andb %1, %0" + : "=m"(*__hp(addr)) + : "iq" ((unsigned char)val) + : "memory"); + return; + } + case 2: + { + __asm__ __volatile__( + "lock; andw %1, %0" + : "=m"(*__hp(addr)) + : "ir" ((unsigned short)val) + : "memory"); + return; + } + case 4: + { + __asm__ __volatile__( + "lock; andl %1, %0" + : "=m"(*__hp(addr)) + : "ir" ((unsigned int)val) + : "memory"); + return; + } +#if (CAA_BITS_PER_LONG == 64) + case 8: + { + __asm__ __volatile__( + "lock; andq %1, %0" + : "=m"(*__hp(addr)) + : "er" ((unsigned long)val) + : "memory"); + return; + } +#endif + } + /* generate an illegal instruction. Cannot catch this with linker tricks + * when optimizations are disabled. */ + __asm__ __volatile__("ud2"); + return; +} + +#define _uatomic_and(addr, v) \ + (__uatomic_and((addr), (unsigned long)(v), sizeof(*(addr)))) + +/* uatomic_or */ + +static inline __attribute__((always_inline)) +void __uatomic_or(void *addr, unsigned long val, int len) +{ + switch (len) { + case 1: + { + __asm__ __volatile__( + "lock; orb %1, %0" + : "=m"(*__hp(addr)) + : "iq" ((unsigned char)val) + : "memory"); + return; + } + case 2: + { + __asm__ __volatile__( + "lock; orw %1, %0" + : "=m"(*__hp(addr)) + : "ir" ((unsigned short)val) + : "memory"); + return; + } + case 4: + { + __asm__ __volatile__( + "lock; orl %1, %0" + : "=m"(*__hp(addr)) + : "ir" ((unsigned int)val) + : "memory"); + return; + } +#if (CAA_BITS_PER_LONG == 64) + case 8: + { + __asm__ __volatile__( + "lock; orq %1, %0" + : "=m"(*__hp(addr)) + : "er" ((unsigned long)val) + : "memory"); + return; + } +#endif + } + /* generate an illegal instruction. Cannot catch this with linker tricks + * when optimizations are disabled. */ + __asm__ __volatile__("ud2"); + return; +} + +#define _uatomic_or(addr, v) \ + (__uatomic_or((addr), (unsigned long)(v), sizeof(*(addr)))) + /* uatomic_add */ static inline __attribute__((always_inline)) @@ -264,7 +372,7 @@ void __uatomic_add(void *addr, unsigned long val, int len) : "memory"); return; } -#if (BITS_PER_LONG == 64) +#if (CAA_BITS_PER_LONG == 64) case 8: { __asm__ __volatile__( @@ -319,7 +427,7 @@ void __uatomic_inc(void *addr, int len) : "memory"); return; } -#if (BITS_PER_LONG == 64) +#if (CAA_BITS_PER_LONG == 64) case 8: { __asm__ __volatile__( @@ -372,7 +480,7 @@ void __uatomic_dec(void *addr, int len) : "memory"); return; } -#if (BITS_PER_LONG == 64) +#if (CAA_BITS_PER_LONG == 64) case 8: { __asm__ __volatile__( @@ -392,7 +500,7 @@ void __uatomic_dec(void *addr, int len) #define _uatomic_dec(addr) (__uatomic_dec((addr), sizeof(*(addr)))) -#if ((BITS_PER_LONG != 64) && defined(CONFIG_RCU_COMPAT_ARCH)) +#if ((CAA_BITS_PER_LONG != 64) && defined(CONFIG_RCU_COMPAT_ARCH)) extern int __rcu_cas_avail; extern int __rcu_cas_init(void); @@ -428,8 +536,22 @@ extern unsigned long _compat_uatomic_cmpxchg(void *addr, unsigned long old, (unsigned long)(_new), \ sizeof(*(addr)))) -extern unsigned long _compat_uatomic_xchg(void *addr, - unsigned long _new, int len); +extern unsigned long _compat_uatomic_and(void *addr, + unsigned long _new, int len); +#define compat_uatomic_and(addr, v) \ + ((__typeof__(*(addr))) _compat_uatomic_and((addr), \ + (unsigned long)(v), \ + sizeof(*(addr)))) + +extern unsigned long _compat_uatomic_or(void *addr, + unsigned long _new, int len); +#define compat_uatomic_or(addr, v) \ + ((__typeof__(*(addr))) _compat_uatomic_or((addr), \ + (unsigned long)(v), \ + sizeof(*(addr)))) + +extern unsigned long _compat_uatomic_add_return(void *addr, + unsigned long _new, int len); #define compat_uatomic_add_return(addr, v) \ ((__typeof__(*(addr))) _compat_uatomic_add_return((addr), \ (unsigned long)(v), \ @@ -454,6 +576,10 @@ extern unsigned long _compat_uatomic_xchg(void *addr, UATOMIC_COMPAT(cmpxchg(addr, old, _new)) #define uatomic_xchg(addr, v) \ UATOMIC_COMPAT(xchg(addr, v)) +#define uatomic_and(addr, v) \ + UATOMIC_COMPAT(and(addr, v)) +#define uatomic_or(addr, v) \ + UATOMIC_COMPAT(or(addr, v)) #define uatomic_add_return(addr, v) \ UATOMIC_COMPAT(add_return(addr, v))