X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=share%2Fkernelcompat.h;h=052f7688bb9f36d804529eca4ca65176fc04c428;hb=1ae7f0744f280e97ab1a2adc548b8fd9f2cb21a4;hp=f95d0d77e886d17090b41c8be0c556935e2df399;hpb=09938485689f3482ec845e52d5bf5e78c1093e27;p=ust.git diff --git a/share/kernelcompat.h b/share/kernelcompat.h index f95d0d7..052f768 100644 --- a/share/kernelcompat.h +++ b/share/kernelcompat.h @@ -1,6 +1,8 @@ #ifndef KERNELCOMPAT_H #define KERNELCOMPAT_H +#include + #include "compiler.h" #include @@ -40,12 +42,12 @@ static inline long IS_ERR(const void *ptr) /* FIXED SIZE INTEGERS */ -#include +//#include -typedef uint8_t u8; -typedef uint16_t u16; -typedef uint32_t u32; -typedef uint64_t u64; +//typedef uint8_t u8; +//typedef uint16_t u16; +//typedef uint32_t u32; +//typedef uint64_t u64; #define min_t(type, x, y) ({ \ type __min1 = (x); \ @@ -98,14 +100,8 @@ typedef int spinlock_t; /* MEMORY BARRIERS */ -//#define smp_rmb() do {} while(0) -//#define smp_wmb() do {} while(0) -//#define smp_mb() do {} while(0) #define smp_mb__after_atomic_inc() do {} while(0) -#define read_barrier_depends() do {} while(0) -//#define smp_read_barrier_depends() do {} while(0) - /* RCU */ #include "urcu.h" @@ -118,8 +114,6 @@ typedef int spinlock_t; #include -typedef struct { sig_atomic_t counter; } atomic_t; - static inline int atomic_dec_and_test(atomic_t *p) { (p->counter)--; @@ -153,39 +147,6 @@ static int atomic_read(atomic_t *p) ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ (unsigned long)(n), sizeof(*(ptr)))) -static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, - unsigned long new, int size) -{ - unsigned long prev; - switch (size) { - case 1: - asm volatile("lock; cmpxchgb %b1,%2" - : "=a"(prev) - : "q"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); - return prev; - case 2: - asm volatile("lock; cmpxchgw %w1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); - return prev; - case 4: - asm volatile("lock; cmpxchgl %k1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); - return prev; - case 8: - asm volatile("lock; cmpxchgq %1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); - return prev; - } - return old; -} - //#define local_cmpxchg cmpxchg #define local_cmpxchg(l, o, n) (cmpxchg(&((l)->a.counter), (o), (n)))