completely remove kernel headers
[lttv.git] / ltt-usertrace / ltt / kernelutils-i386.h
1 /*****************************************************************************
2 * kernelutils-x386.h
3 *
4 * This file holds the code needed by LTT usertrace that comes from the
5 * kernel headers. Since including kernel headers is not recommended in
6 * userspace programs/libraries, we rewrote implementations HIGHLY INSPIRED
7 * (i.e. copied/pasted) from the original kernel headers (2.6.17).
8 *
9 * Martin Bisson, July 2006
10 * Mathieu Desnoyers, August 2006
11 */
12
13 #ifndef _KERNELUTILS_I386_H
14 #define _KERNELUTILS_I386_H
15
16 // We are careful, so we assume a possibly SMP machine
17 #define LOCK "lock ; "
18 #define LOCK_PREFIX "lock ; "
19
20
21
22
23 // From atomic.h
24
25
26 /*
27 * Make sure gcc doesn't try to be clever and move things around
28 * on us. We need to use _exactly_ the address the user gave us,
29 * not some alias that contains the same information.
30 */
31 typedef struct { volatile int counter; } atomic_t;
32
33
34 /**
35 * atomic_read - read atomic variable
36 * @v: pointer of type atomic_t
37 *
38 * Atomically reads the value of @v.
39 */
40 #define atomic_read(v) ((v)->counter)
41
42 /**
43 * atomic_add - add integer to atomic variable
44 * @i: integer value to add
45 * @v: pointer of type atomic_t
46 *
47 * Atomically adds @i to @v.
48 */
49 static __inline__ void atomic_add(int i, atomic_t *v)
50 {
51 __asm__ __volatile__(
52 LOCK "addl %1,%0"
53 :"=m" (v->counter)
54 :"ir" (i), "m" (v->counter));
55 }
56
57 /**
58 * atomic_inc - increment atomic variable
59 * @v: pointer of type atomic_t
60 *
61 * Atomically increments @v by 1.
62 */
63 static __inline__ void atomic_inc(atomic_t *v)
64 {
65 __asm__ __volatile__(
66 LOCK "incl %0"
67 :"=m" (v->counter)
68 :"m" (v->counter));
69 }
70
71 /**
72 * atomic_add_return - add and return
73 * @i: integer value to add
74 * @v: pointer of type atomic_t
75 *
76 * Atomically adds @i to @v and returns @i + @v
77 */
78 static __inline__ int atomic_add_return(int i, atomic_t *v)
79 {
80 int __i = i;
81 __asm__ __volatile__(
82 LOCK "xaddl %0, %1;"
83 :"=r"(i)
84 :"m"(v->counter), "0"(i));
85 return i + __i;
86 }
87
88
89
90
91 // From system.h
92
93 struct __xchg_dummy { unsigned long a[100]; };
94 #define __xg(x) ((struct __xchg_dummy *)(x))
95
96
97 /*
98 * Atomic compare and exchange. Compare OLD with MEM, if identical,
99 * store NEW in MEM. Return the initial value in MEM. Success is
100 * indicated by comparing RETURN with OLD.
101 */
102
103 #define __HAVE_ARCH_CMPXCHG 1
104 #define cmpxchg(ptr,o,n)\
105 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
106 (unsigned long)(n),sizeof(*(ptr))))
107
108 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
109 unsigned long new, int size)
110 {
111 unsigned long prev;
112 switch (size) {
113 case 1:
114 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
115 : "=a"(prev)
116 : "q"(new), "m"(*__xg(ptr)), "0"(old)
117 : "memory");
118 return prev;
119 case 2:
120 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
121 : "=a"(prev)
122 : "r"(new), "m"(*__xg(ptr)), "0"(old)
123 : "memory");
124 return prev;
125 case 4:
126 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
127 : "=a"(prev)
128 : "r"(new), "m"(*__xg(ptr)), "0"(old)
129 : "memory");
130 return prev;
131 }
132 return old;
133 }
134
135
136 // From msr.h
137
138 #define rdtscll(val) \
139 __asm__ __volatile__("rdtsc" : "=A" (val))
140
141 // From timex.h
142
143 typedef unsigned long long cycles_t;
144
145 static inline cycles_t get_cycles (void)
146 {
147 unsigned long long ret;
148
149 rdtscll(ret);
150 return ret;
151 }
152
153
154 #endif // _KERNELUTILS_I386_H
This page took 0.039227 seconds and 4 git commands to generate.