X-Git-Url: http://git.lttng.org/?p=urcu.git;a=blobdiff_plain;f=tests%2Fapi_x86.h;h=9147249d6b680b812b43e6a0e24930af3153cedf;hp=527221c55eda3efab0239353de8e9d9a5efd92e5;hb=901bbb4413cd3e236717687b559725fb3979a6cf;hpb=2f8a5ae7266ad4fb9ba77cfa1b4a7104d0465096 diff --git a/tests/api_x86.h b/tests/api_x86.h index 527221c..9147249 100644 --- a/tests/api_x86.h +++ b/tests/api_x86.h @@ -78,246 +78,7 @@ /* #define CAA_CACHE_LINE_SIZE 64 */ #define ____cacheline_internodealigned_in_smp \ - __attribute__((__aligned__(1 << 6))) - -#define LOCK_PREFIX "lock ; " - -#if 0 /* duplicate with arch_atomic.h */ - -/* - * Atomic data structure, initialization, and access. - */ - -typedef struct { volatile int counter; } atomic_t; - -#define ATOMIC_INIT(i) { (i) } - -#define atomic_read(v) ((v)->counter) -#define atomic_set(v, i) (((v)->counter) = (i)) - -/* - * Atomic operations. - */ - -/** - * atomic_add - add integer to atomic variable - * @i: integer value to add - * @v: pointer of type atomic_t - * - * Atomically adds @i to @v. - */ -static __inline__ void atomic_add(int i, atomic_t *v) -{ - __asm__ __volatile__( - LOCK_PREFIX "addl %1,%0" - :"+m" (v->counter) - :"ir" (i)); -} - -/** - * atomic_sub - subtract the atomic variable - * @i: integer value to subtract - * @v: pointer of type atomic_t - * - * Atomically subtracts @i from @v. - */ -static __inline__ void atomic_sub(int i, atomic_t *v) -{ - __asm__ __volatile__( - LOCK_PREFIX "subl %1,%0" - :"+m" (v->counter) - :"ir" (i)); -} - -/** - * atomic_sub_and_test - subtract value from variable and test result - * @i: integer value to subtract - * @v: pointer of type atomic_t - * - * Atomically subtracts @i from @v and returns - * true if the result is zero, or false for all - * other cases. - */ -static __inline__ int atomic_sub_and_test(int i, atomic_t *v) -{ - unsigned char c; - - __asm__ __volatile__( - LOCK_PREFIX "subl %2,%0; sete %1" - :"+m" (v->counter), "=qm" (c) - :"ir" (i) : "memory"); - return c; -} - -/** - * atomic_inc - increment atomic variable - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1. - */ -static __inline__ void atomic_inc(atomic_t *v) -{ - __asm__ __volatile__( - LOCK_PREFIX "incl %0" - :"+m" (v->counter)); -} - -/** - * atomic_dec - decrement atomic variable - * @v: pointer of type atomic_t - * - * Atomically decrements @v by 1. - */ -static __inline__ void atomic_dec(atomic_t *v) -{ - __asm__ __volatile__( - LOCK_PREFIX "decl %0" - :"+m" (v->counter)); -} - -/** - * atomic_dec_and_test - decrement and test - * @v: pointer of type atomic_t - * - * Atomically decrements @v by 1 and - * returns true if the result is 0, or false for all other - * cases. - */ -static __inline__ int atomic_dec_and_test(atomic_t *v) -{ - unsigned char c; - - __asm__ __volatile__( - LOCK_PREFIX "decl %0; sete %1" - :"+m" (v->counter), "=qm" (c) - : : "memory"); - return c != 0; -} - -/** - * atomic_inc_and_test - increment and test - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -static __inline__ int atomic_inc_and_test(atomic_t *v) -{ - unsigned char c; - - __asm__ __volatile__( - LOCK_PREFIX "incl %0; sete %1" - :"+m" (v->counter), "=qm" (c) - : : "memory"); - return c != 0; -} - -/** - * atomic_add_negative - add and test if negative - * @v: pointer of type atomic_t - * @i: integer value to add - * - * Atomically adds @i to @v and returns true - * if the result is negative, or false when - * result is greater than or equal to zero. - */ -static __inline__ int atomic_add_negative(int i, atomic_t *v) -{ - unsigned char c; - - __asm__ __volatile__( - LOCK_PREFIX "addl %2,%0; sets %1" - :"+m" (v->counter), "=qm" (c) - :"ir" (i) : "memory"); - return c; -} - -/** - * atomic_add_return - add and return - * @v: pointer of type atomic_t - * @i: integer value to add - * - * Atomically adds @i to @v and returns @i + @v - */ -static __inline__ int atomic_add_return(int i, atomic_t *v) -{ - int __i; - - __i = i; - __asm__ __volatile__( - LOCK_PREFIX "xaddl %0, %1;" - :"=r"(i) - :"m"(v->counter), "0"(i)); - return i + __i; -} - -static __inline__ int atomic_sub_return(int i, atomic_t *v) -{ - return atomic_add_return(-i,v); -} - -static inline unsigned int -cmpxchg(volatile long *ptr, long oldval, long newval) -{ - unsigned long retval; - - asm("# cmpxchg\n" - "lock; cmpxchgl %4,(%2)\n" - "# end atomic_cmpxchg4" - : "=a" (retval), "=m" (*ptr) - : "r" (ptr), "0" (oldval), "r" (newval), "m" (*ptr) - : "cc"); - return (retval); -} - -#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) - -/** - * atomic_add_unless - add unless the number is a given value - * @v: pointer of type atomic_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, so long as it was not @u. - * Returns non-zero if @v was not @u, and zero otherwise. - */ -#define atomic_add_unless(v, a, u) \ -({ \ - int c, old; \ - c = atomic_read(v); \ - for (;;) { \ - if (unlikely(c == (u))) \ - break; \ - old = atomic_cmpxchg((v), c, c + (a)); \ - if (likely(old == c)) \ - break; \ - c = old; \ - } \ - c != (u); \ -}) -#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) - -#define atomic_inc_return(v) (atomic_add_return(1,v)) -#define atomic_dec_return(v) (atomic_sub_return(1,v)) - -/* These are x86-specific, used by some header files */ -#define atomic_clear_mask(mask, addr) \ -__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \ -: : "r" (~(mask)),"m" (*addr) : "memory") - -#define atomic_set_mask(mask, addr) \ -__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \ -: : "r" (mask),"m" (*(addr)) : "memory") - -/* Atomic operations are already serializing on x86 */ -#define smp_mb__before_atomic_dec() cmm_barrier() -#define smp_mb__after_atomic_dec() cmm_barrier() -#define smp_mb__before_atomic_inc() cmm_barrier() -#define smp_mb__after_atomic_inc() cmm_barrier() - -#endif //0 + __attribute__((__aligned__(CAA_CACHE_LINE_SIZE))) /* * api_pthreads.h: API mapping to pthreads environment.