convert from svn repository: remove tags directory
[lttv.git] / trunk / obsolete / ltt-usertrace / ltt / kernelutils-x86_64.h
1 /*****************************************************************************
2 * kernelutils-x86_64.h
3 *
4 * This file holds the code needed by LTT usertrace that comes from the
5 * kernel headers. Since including kernel headers is not recommended in
6 * userspace programs/libraries, we rewrote implementations HIGHLY INSPIRED
7 * (i.e. copied/pasted) from the original kernel headers (2.6.17).
8 *
9 * Martin Bisson, July 2006
10 */
11
12 #ifndef _KERNELUTILS_X86_64_H
13 #define _KERNELUTILS_X86_64_H
14
15 #ifdef __cplusplus
16 extern "C" {
17 #endif
18
19 // We are careful, so we assume a possibly SMP machine
20 #define LOCK "lock ; "
21 #define LOCK_PREFIX "lock ; "
22
23
24
25
26 // From atomic.h
27
28
29 /*
30 * Make sure gcc doesn't try to be clever and move things around
31 * on us. We need to use _exactly_ the address the user gave us,
32 * not some alias that contains the same information.
33 */
34 typedef struct { volatile int counter; } atomic_t;
35
36
37 /**
38 * atomic_read - read atomic variable
39 * @v: pointer of type atomic_t
40 *
41 * Atomically reads the value of @v.
42 */
43 #define atomic_read(v) ((v)->counter)
44
45 /**
46 * atomic_add - add integer to atomic variable
47 * @i: integer value to add
48 * @v: pointer of type atomic_t
49 *
50 * Atomically adds @i to @v.
51 */
52 static __inline__ void atomic_add(int i, atomic_t *v)
53 {
54 __asm__ __volatile__(
55 LOCK "addl %1,%0"
56 :"=m" (v->counter)
57 :"ir" (i), "m" (v->counter));
58 }
59
60 /**
61 * atomic_inc - increment atomic variable
62 * @v: pointer of type atomic_t
63 *
64 * Atomically increments @v by 1.
65 */
66 static __inline__ void atomic_inc(atomic_t *v)
67 {
68 __asm__ __volatile__(
69 LOCK "incl %0"
70 :"=m" (v->counter)
71 :"m" (v->counter));
72 }
73
74 /**
75 * atomic_add_return - add and return
76 * @i: integer value to add
77 * @v: pointer of type atomic_t
78 *
79 * Atomically adds @i to @v and returns @i + @v
80 */
81 static __inline__ int atomic_add_return(int i, atomic_t *v)
82 {
83 int __i = i;
84 __asm__ __volatile__(
85 LOCK "xaddl %0, %1;"
86 :"=r"(i)
87 :"m"(v->counter), "0"(i));
88 return i + __i;
89 }
90
91
92
93
94 // From system.h
95
96 #define __xg(x) ((volatile long *)(x))
97
98
99 /*
100 * Atomic compare and exchange. Compare OLD with MEM, if identical,
101 * store NEW in MEM. Return the initial value in MEM. Success is
102 * indicated by comparing RETURN with OLD.
103 */
104
105 #define __HAVE_ARCH_CMPXCHG 1
106
107 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
108 unsigned long new, int size)
109 {
110 unsigned long prev;
111 switch (size) {
112 case 1:
113 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
114 : "=a"(prev)
115 : "q"(new), "m"(*__xg(ptr)), "0"(old)
116 : "memory");
117 return prev;
118 case 2:
119 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
120 : "=a"(prev)
121 : "r"(new), "m"(*__xg(ptr)), "0"(old)
122 : "memory");
123 return prev;
124 case 4:
125 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
126 : "=a"(prev)
127 : "r"(new), "m"(*__xg(ptr)), "0"(old)
128 : "memory");
129 return prev;
130 case 8:
131 __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
132 : "=a"(prev)
133 : "r"(new), "m"(*__xg(ptr)), "0"(old)
134 : "memory");
135 return prev;
136 }
137 return old;
138 }
139
140 #define cmpxchg(ptr,o,n)\
141 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
142 (unsigned long)(n),sizeof(*(ptr))))
143
144
145
146
147 // From msr.h
148
149
150 #define rdtscll(val) do { \
151 unsigned int __a,__d; \
152 asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
153 (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
154 } while(0)
155
156
157
158
159 // From timex.h
160
161 typedef unsigned long long cycles_t;
162
163 static inline cycles_t get_cycles (void)
164 {
165 unsigned long long ret;
166
167 rdtscll(ret);
168 return ret;
169 }
170
171 #ifdef __cplusplus
172 } /* end of extern "C" */
173 #endif
174
175 #endif // _KERNELUTILS_X86_64_H
This page took 0.03286 seconds and 4 git commands to generate.