#ifndef KERNELCOMPAT_H
#define KERNELCOMPAT_H
+#include <kcompat.h>
+
#include "compiler.h"
#include <string.h>
/* FIXED SIZE INTEGERS */
-#include <stdint.h>
+//#include <stdint.h>
-typedef uint8_t u8;
-typedef uint16_t u16;
-typedef uint32_t u32;
-typedef uint64_t u64;
+//typedef uint8_t u8;
+//typedef uint16_t u16;
+//typedef uint32_t u32;
+//typedef uint64_t u64;
#define min_t(type, x, y) ({ \
type __min1 = (x); \
#define mutex_unlock(m) pthread_mutex_unlock(m)
-/* SPINLOCKS */
-
-typedef int spinlock_t;
-
-#define spin_lock(a) /* nothing */
-#define spin_unlock(a) /* nothing */
-#define spin_lock_init(a) /* nothing */
-
/* MALLOCATION */
/* MEMORY BARRIERS */
-#define smp_rmb() do {} while(0)
-#define smp_wmb() do {} while(0)
-#define smp_mb() do {} while(0)
#define smp_mb__after_atomic_inc() do {} while(0)
-#define read_barrier_depends() do {} while(0)
-#define smp_read_barrier_depends() do {} while(0)
-
/* RCU */
-#define rcu_assign_pointer(a, b) do {} while(0)
-#define call_rcu_sched(a,b) do {} while(0)
-#define rcu_barrier_sched() do {} while(0)
-#define rcu_read_lock_sched_notrace() do{} while (0)
-#define rcu_read_unlock_sched_notrace() do{} while (0)
+#include "urcu.h"
+#define call_rcu_sched(a,b) b(a); synchronize_rcu()
+#define rcu_barrier_sched() do {} while(0) /* this nop is ok if call_rcu_sched does a synchronize_rcu() */
+#define rcu_read_lock_sched_notrace() rcu_read_lock()
+#define rcu_read_unlock_sched_notrace() rcu_read_unlock()
/* ATOMICITY */
#include <signal.h>
-typedef struct { sig_atomic_t counter; } atomic_t;
-
static inline int atomic_dec_and_test(atomic_t *p)
{
(p->counter)--;
#include "asm.h"
-#define __xg(x) ((volatile long *)(x))
+//#define __xg(x) ((volatile long *)(x))
#define cmpxchg(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr))))
-static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
- unsigned long new, int size)
-{
- unsigned long prev;
- switch (size) {
- case 1:
- asm volatile("lock; cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 2:
- asm volatile("lock; cmpxchgw %w1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 4:
- asm volatile("lock; cmpxchgl %k1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 8:
- asm volatile("lock; cmpxchgq %1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- }
- return old;
-}
-
//#define local_cmpxchg cmpxchg
#define local_cmpxchg(l, o, n) (cmpxchg(&((l)->a.counter), (o), (n)))
/* TRACE CLOCK */
+//ust// static inline u64 trace_clock_read64(void)
+//ust// {
+//ust// uint32_t low;
+//ust// uint32_t high;
+//ust// uint64_t retval;
+//ust// __asm__ volatile ("rdtsc\n" : "=a" (low), "=d" (high));
+//ust//
+//ust// retval = high;
+//ust// retval <<= 32;
+//ust// return retval | low;
+//ust// }
+
static inline u64 trace_clock_read64(void)
{
- return 0LL;
+ struct timeval tv;
+ u64 retval;
+
+ gettimeofday(&tv, NULL);
+ retval = tv.tv_sec;
+ retval *= 1000000;
+ retval += tv.tv_usec;
+
+ return retval;
}
-static inline unsigned int trace_clock_frequency(void)
+static inline u64 trace_clock_frequency(void)
{
- return 0LL;
+ return 1000000LL;
}
static inline u32 trace_clock_freq_scale(void)
{
- return 0;
+ return 1;
}