convert from svn repository: remove tags directory
[lttv.git] / trunk / obsolete / ltt-usertrace / ltt / kernelutils-i386.h
CommitLineData
4b18c4a6 1/*****************************************************************************
2 * kernelutils-x386.h
3 *
4 * This file holds the code needed by LTT usertrace that comes from the
5 * kernel headers. Since including kernel headers is not recommended in
6 * userspace programs/libraries, we rewrote implementations HIGHLY INSPIRED
7 * (i.e. copied/pasted) from the original kernel headers (2.6.17).
8 *
9 * Martin Bisson, July 2006
10 * Mathieu Desnoyers, August 2006
11 */
12
13#ifndef _KERNELUTILS_I386_H
14#define _KERNELUTILS_I386_H
15
895ad115 16#ifdef __cplusplus
17extern "C" {
18#endif
19
4b18c4a6 20// We are careful, so we assume a possibly SMP machine
21#define LOCK "lock ; "
22#define LOCK_PREFIX "lock ; "
23
24
4b18c4a6 25// From atomic.h
26
27
28/*
29 * Make sure gcc doesn't try to be clever and move things around
30 * on us. We need to use _exactly_ the address the user gave us,
31 * not some alias that contains the same information.
32 */
33typedef struct { volatile int counter; } atomic_t;
34
35
36/**
37 * atomic_read - read atomic variable
38 * @v: pointer of type atomic_t
39 *
40 * Atomically reads the value of @v.
41 */
42#define atomic_read(v) ((v)->counter)
43
44/**
45 * atomic_add - add integer to atomic variable
46 * @i: integer value to add
47 * @v: pointer of type atomic_t
48 *
49 * Atomically adds @i to @v.
50 */
51static __inline__ void atomic_add(int i, atomic_t *v)
52{
53 __asm__ __volatile__(
54 LOCK "addl %1,%0"
55 :"=m" (v->counter)
56 :"ir" (i), "m" (v->counter));
57}
58
59/**
60 * atomic_inc - increment atomic variable
61 * @v: pointer of type atomic_t
62 *
63 * Atomically increments @v by 1.
64 */
65static __inline__ void atomic_inc(atomic_t *v)
66{
67 __asm__ __volatile__(
68 LOCK "incl %0"
69 :"=m" (v->counter)
70 :"m" (v->counter));
71}
72
73/**
74 * atomic_add_return - add and return
75 * @i: integer value to add
76 * @v: pointer of type atomic_t
77 *
78 * Atomically adds @i to @v and returns @i + @v
79 */
80static __inline__ int atomic_add_return(int i, atomic_t *v)
81{
82 int __i = i;
83 __asm__ __volatile__(
84 LOCK "xaddl %0, %1;"
85 :"=r"(i)
86 :"m"(v->counter), "0"(i));
87 return i + __i;
88}
89
90
91
92
93// From system.h
94
95struct __xchg_dummy { unsigned long a[100]; };
96#define __xg(x) ((struct __xchg_dummy *)(x))
97
98
99/*
100 * Atomic compare and exchange. Compare OLD with MEM, if identical,
101 * store NEW in MEM. Return the initial value in MEM. Success is
102 * indicated by comparing RETURN with OLD.
103 */
104
105#define __HAVE_ARCH_CMPXCHG 1
106#define cmpxchg(ptr,o,n)\
107 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
108 (unsigned long)(n),sizeof(*(ptr))))
109
110static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
895ad115 111 unsigned long newval, int size)
4b18c4a6 112{
113 unsigned long prev;
114 switch (size) {
115 case 1:
116 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
117 : "=a"(prev)
895ad115 118 : "q"(newval), "m"(*__xg(ptr)), "0"(old)
4b18c4a6 119 : "memory");
120 return prev;
121 case 2:
122 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
123 : "=a"(prev)
895ad115 124 : "r"(newval), "m"(*__xg(ptr)), "0"(old)
4b18c4a6 125 : "memory");
126 return prev;
127 case 4:
128 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
129 : "=a"(prev)
895ad115 130 : "r"(newval), "m"(*__xg(ptr)), "0"(old)
4b18c4a6 131 : "memory");
132 return prev;
133 }
134 return old;
135}
136
137
138// From msr.h
139
140#define rdtscll(val) \
141 __asm__ __volatile__("rdtsc" : "=A" (val))
142
143// From timex.h
144
145typedef unsigned long long cycles_t;
146
147static inline cycles_t get_cycles (void)
148{
149 unsigned long long ret;
150
151 rdtscll(ret);
152 return ret;
153}
154
895ad115 155#ifdef __cplusplus
156} /* end of extern "C" */
157#endif
4b18c4a6 158
159#endif // _KERNELUTILS_I386_H
This page took 0.037121 seconds and 4 git commands to generate.