ust: generate traces readable in lttv
[ust.git] / share / kernelcompat.h
CommitLineData
c66d2821
PMF
1#ifndef KERNELCOMPAT_H
2#define KERNELCOMPAT_H
3
59b161cd
PMF
4#include "compiler.h"
5
c66d2821
PMF
6#include <string.h>
7
8#define container_of(ptr, type, member) ({ \
9 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
10 (type *)( (char *)__mptr - offsetof(type,member) );})
11
b6bf28ec
PMF
12#define KERN_DEBUG ""
13#define KERN_NOTICE ""
14#define KERN_INFO ""
15#define KERN_ERR ""
16#define KERN_ALERT ""
bb07823d 17#define KERN_WARNING ""
c66d2821 18
59b161cd
PMF
19/* ERROR OPS */
20
21#define MAX_ERRNO 4095
22
23#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
24
c66d2821
PMF
25static inline void *ERR_PTR(long error)
26{
59b161cd 27 return (void *) error;
c66d2821
PMF
28}
29
59b161cd
PMF
30static inline long PTR_ERR(const void *ptr)
31{
32 return (long) ptr;
33}
34
35static inline long IS_ERR(const void *ptr)
36{
37 return IS_ERR_VALUE((unsigned long)ptr);
38}
39
40
41/* FIXED SIZE INTEGERS */
c66d2821
PMF
42
43#include <stdint.h>
44
59b161cd 45typedef uint8_t u8;
c66d2821
PMF
46typedef uint16_t u16;
47typedef uint32_t u32;
59b161cd 48typedef uint64_t u64;
c66d2821 49
b6bf28ec
PMF
50#define min_t(type, x, y) ({ \
51 type __min1 = (x); \
52 type __min2 = (y); \
53 __min1 < __min2 ? __min1: __min2; })
54
55#define max_t(type, x, y) ({ \
56 type __max1 = (x); \
57 type __max2 = (y); \
58 __max1 > __max2 ? __max1: __max2; })
59
60
61/* MUTEXES */
c66d2821
PMF
62
63#include <pthread.h>
64
65#define DEFINE_MUTEX(m) pthread_mutex_t (m) = PTHREAD_MUTEX_INITIALIZER;
ba6459ba 66#define DECLARE_MUTEX(m) extern pthread_mutex_t (m);
c66d2821
PMF
67
68#define mutex_lock(m) pthread_mutex_lock(m)
69
70#define mutex_unlock(m) pthread_mutex_unlock(m)
71
bb07823d
PMF
72/* SPINLOCKS */
73
74typedef int spinlock_t;
75
76#define spin_lock(a) /* nothing */
77#define spin_unlock(a) /* nothing */
78#define spin_lock_init(a) /* nothing */
79
80
b6bf28ec 81/* MALLOCATION */
c66d2821
PMF
82
83#include <stdlib.h>
84
85#define kmalloc(s, t) malloc(s)
ba6459ba 86#define kzalloc(s, t) zmalloc(s)
c66d2821
PMF
87#define kfree(p) free((void *)p)
88#define kstrdup(s, t) strdup(s)
89
ba6459ba
PMF
90#define zmalloc(s) calloc(1, s)
91
bb07823d
PMF
92#define GFP_KERNEL
93
b6bf28ec 94/* PRINTK */
c66d2821
PMF
95
96#include <stdio.h>
97#define printk(fmt, args...) printf(fmt, ## args)
98
59b161cd
PMF
99/* MEMORY BARRIERS */
100
101#define smp_rmb() do {} while(0)
102#define smp_wmb() do {} while(0)
103#define smp_mb() do {} while(0)
104#define smp_mb__after_atomic_inc() do {} while(0)
105
106#define read_barrier_depends() do {} while(0)
107#define smp_read_barrier_depends() do {} while(0)
c66d2821 108
59b161cd 109/* RCU */
c66d2821 110
59b161cd 111#define rcu_assign_pointer(a, b) do {} while(0)
b6bf28ec
PMF
112#define call_rcu_sched(a,b) do {} while(0)
113#define rcu_barrier_sched() do {} while(0)
ba6459ba
PMF
114#define rcu_read_lock_sched_notrace() do{} while (0)
115#define rcu_read_unlock_sched_notrace() do{} while (0)
c66d2821 116
59b161cd 117/* ATOMICITY */
b6bf28ec 118
59b161cd
PMF
119#include <signal.h>
120
121typedef struct { sig_atomic_t counter; } atomic_t;
122
123static inline int atomic_dec_and_test(atomic_t *p)
124{
125 (p->counter)--;
126 return !p->counter;
127}
128
129static inline void atomic_set(atomic_t *p, int v)
130{
131 p->counter=v;
132}
133
134static inline void atomic_inc(atomic_t *p)
135{
136 p->counter++;
137}
138
139static int atomic_read(atomic_t *p)
140{
141 return p->counter;
142}
c66d2821 143
bb07823d
PMF
144#define atomic_long_t atomic_t
145#define atomic_long_set atomic_set
146#define atomic_long_read atomic_read
147
148#include "asm.h"
149
150#define __xg(x) ((volatile long *)(x))
151
152#define cmpxchg(ptr, o, n) \
153 ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
154 (unsigned long)(n), sizeof(*(ptr))))
155
156static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
157 unsigned long new, int size)
158{
159 unsigned long prev;
160 switch (size) {
161 case 1:
9c67dc50 162 asm volatile("lock; cmpxchgb %b1,%2"
bb07823d
PMF
163 : "=a"(prev)
164 : "q"(new), "m"(*__xg(ptr)), "0"(old)
165 : "memory");
166 return prev;
167 case 2:
9c67dc50 168 asm volatile("lock; cmpxchgw %w1,%2"
bb07823d
PMF
169 : "=a"(prev)
170 : "r"(new), "m"(*__xg(ptr)), "0"(old)
171 : "memory");
172 return prev;
173 case 4:
9c67dc50 174 asm volatile("lock; cmpxchgl %k1,%2"
bb07823d
PMF
175 : "=a"(prev)
176 : "r"(new), "m"(*__xg(ptr)), "0"(old)
177 : "memory");
178 return prev;
179 case 8:
9c67dc50 180 asm volatile("lock; cmpxchgq %1,%2"
bb07823d
PMF
181 : "=a"(prev)
182 : "r"(new), "m"(*__xg(ptr)), "0"(old)
183 : "memory");
184 return prev;
185 }
186 return old;
187}
188
c1dea0b3
PMF
189//#define local_cmpxchg cmpxchg
190#define local_cmpxchg(l, o, n) (cmpxchg(&((l)->a.counter), (o), (n)))
191
bb07823d
PMF
192#define atomic_long_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
193
194
5f54827b
PMF
195/* LOCAL OPS */
196
c1dea0b3
PMF
197//typedef int local_t;
198typedef struct
199{
200 atomic_long_t a;
201} local_t;
202
bb07823d 203
c1dea0b3 204static inline void local_inc(local_t *l)
bb07823d 205{
c1dea0b3 206 (l->a.counter)++;
bb07823d
PMF
207}
208
c1dea0b3 209static inline void local_set(local_t *l, int v)
bb07823d 210{
c1dea0b3 211 l->a.counter = v;
bb07823d
PMF
212}
213
c1dea0b3 214static inline void local_add(int v, local_t *l)
bb07823d 215{
c1dea0b3 216 l->a.counter += v;
bb07823d
PMF
217}
218
c1dea0b3 219static int local_add_return(int v, local_t *l)
bb07823d 220{
c1dea0b3 221 return l->a.counter += v;
bb07823d
PMF
222}
223
c1dea0b3 224static inline int local_read(local_t *l)
bb07823d 225{
c1dea0b3 226 return l->a.counter;
bb07823d 227}
5f54827b
PMF
228
229
230/* ATTRIBUTES */
b6bf28ec 231
59b161cd 232#define ____cacheline_aligned
5f54827b
PMF
233#define __init
234#define __exit
c66d2821 235
b6bf28ec
PMF
236/* MATH */
237
238static inline unsigned int hweight32(unsigned int w)
239{
240 unsigned int res = w - ((w >> 1) & 0x55555555);
241 res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
242 res = (res + (res >> 4)) & 0x0F0F0F0F;
243 res = res + (res >> 8);
244 return (res + (res >> 16)) & 0x000000FF;
245}
246
247static inline int fls(int x)
248{
249 int r;
250//ust// #ifdef CONFIG_X86_CMOV
251 asm("bsrl %1,%0\n\t"
252 "cmovzl %2,%0"
253 : "=&r" (r) : "rm" (x), "rm" (-1));
254//ust// #else
255//ust// asm("bsrl %1,%0\n\t"
256//ust// "jnz 1f\n\t"
257//ust// "movl $-1,%0\n"
258//ust// "1:" : "=r" (r) : "rm" (x));
259//ust// #endif
260 return r + 1;
261}
262
263static __inline__ int get_count_order(unsigned int count)
264{
265 int order;
266
267 order = fls(count) - 1;
268 if (count & (count - 1))
269 order++;
270 return order;
271}
272
273
5f54827b
PMF
274
275
276#include <unistd.h>
277
278#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1)
279#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask))
280#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
281#define PAGE_SIZE sysconf(_SC_PAGE_SIZE)
bb07823d 282#define PAGE_MASK (PAGE_SIZE-1)
5f54827b
PMF
283
284
285
286
b6bf28ec
PMF
287/* ARRAYS */
288
289#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
290
291/* TRACE CLOCK */
292
293static inline u64 trace_clock_read64(void)
294{
295 return 0LL;
296}
297
298static inline unsigned int trace_clock_frequency(void)
299{
300 return 0LL;
301}
302
303static inline u32 trace_clock_freq_scale(void)
304{
305 return 0;
306}
307
8d938dbd
PMF
308
309/* LISTS */
310
311#define list_add_rcu list_add
312#define list_for_each_entry_rcu list_for_each_entry
313
314
315#define EXPORT_SYMBOL_GPL(a) /*nothing*/
316
ba6459ba
PMF
317#define smp_processor_id() (-1)
318
c66d2821 319#endif /* KERNELCOMPAT_H */
This page took 0.035527 seconds and 4 git commands to generate.