update genevent for char ptr
[lttv.git] / ltt-usertrace / ltt / kernelutils-x86_64.h
CommitLineData
4b18c4a6 1/*****************************************************************************
2 * kernelutils-x86_64.h
3 *
4 * This file holds the code needed by LTT usertrace that comes from the
5 * kernel headers. Since including kernel headers is not recommended in
6 * userspace programs/libraries, we rewrote implementations HIGHLY INSPIRED
7 * (i.e. copied/pasted) from the original kernel headers (2.6.17).
8 *
9 * Martin Bisson, July 2006
10 */
11
12#ifndef _KERNELUTILS_X86_64_H
13#define _KERNELUTILS_X86_64_H
14
15// We are careful, so we assume a possibly SMP machine
16#define LOCK "lock ; "
17#define LOCK_PREFIX "lock ; "
18
19
20
21
22// From atomic.h
23
24
25/*
26 * Make sure gcc doesn't try to be clever and move things around
27 * on us. We need to use _exactly_ the address the user gave us,
28 * not some alias that contains the same information.
29 */
30typedef struct { volatile int counter; } atomic_t;
31
32
33/**
34 * atomic_read - read atomic variable
35 * @v: pointer of type atomic_t
36 *
37 * Atomically reads the value of @v.
38 */
39#define atomic_read(v) ((v)->counter)
40
41/**
42 * atomic_add - add integer to atomic variable
43 * @i: integer value to add
44 * @v: pointer of type atomic_t
45 *
46 * Atomically adds @i to @v.
47 */
48static __inline__ void atomic_add(int i, atomic_t *v)
49{
50 __asm__ __volatile__(
51 LOCK "addl %1,%0"
52 :"=m" (v->counter)
53 :"ir" (i), "m" (v->counter));
54}
55
56/**
57 * atomic_inc - increment atomic variable
58 * @v: pointer of type atomic_t
59 *
60 * Atomically increments @v by 1.
61 */
62static __inline__ void atomic_inc(atomic_t *v)
63{
64 __asm__ __volatile__(
65 LOCK "incl %0"
66 :"=m" (v->counter)
67 :"m" (v->counter));
68}
69
70/**
71 * atomic_add_return - add and return
72 * @i: integer value to add
73 * @v: pointer of type atomic_t
74 *
75 * Atomically adds @i to @v and returns @i + @v
76 */
77static __inline__ int atomic_add_return(int i, atomic_t *v)
78{
79 int __i = i;
80 __asm__ __volatile__(
81 LOCK "xaddl %0, %1;"
82 :"=r"(i)
83 :"m"(v->counter), "0"(i));
84 return i + __i;
85}
86
87
88
89
90// From system.h
91
92#define __xg(x) ((volatile long *)(x))
93
94
95/*
96 * Atomic compare and exchange. Compare OLD with MEM, if identical,
97 * store NEW in MEM. Return the initial value in MEM. Success is
98 * indicated by comparing RETURN with OLD.
99 */
100
101#define __HAVE_ARCH_CMPXCHG 1
102
103static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
104 unsigned long new, int size)
105{
106 unsigned long prev;
107 switch (size) {
108 case 1:
109 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
110 : "=a"(prev)
111 : "q"(new), "m"(*__xg(ptr)), "0"(old)
112 : "memory");
113 return prev;
114 case 2:
115 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
116 : "=a"(prev)
117 : "r"(new), "m"(*__xg(ptr)), "0"(old)
118 : "memory");
119 return prev;
120 case 4:
121 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
122 : "=a"(prev)
123 : "r"(new), "m"(*__xg(ptr)), "0"(old)
124 : "memory");
125 return prev;
126 case 8:
127 __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
128 : "=a"(prev)
129 : "r"(new), "m"(*__xg(ptr)), "0"(old)
130 : "memory");
131 return prev;
132 }
133 return old;
134}
135
136#define cmpxchg(ptr,o,n)\
137 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
138 (unsigned long)(n),sizeof(*(ptr))))
139
140
141
142
143// From msr.h
144
145
146#define rdtscll(val) do { \
147 unsigned int __a,__d; \
148 asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
149 (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
150} while(0)
151
152
153
154
155// From timex.h
156
157typedef unsigned long long cycles_t;
158
159static inline cycles_t get_cycles (void)
160{
161 unsigned long long ret;
162
163 rdtscll(ret);
164 return ret;
165}
166
167
168#endif // _KERNELUTILS_X86_64_H
This page took 0.027848 seconds and 4 git commands to generate.