update ppc
[lttv.git] / ltt-usertrace / ltt / ltt-usertrace-ppc.h
CommitLineData
0c2cd852 1/*
2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
3 */
4#ifndef __LTT_USERTRACE_PPC_H
5#define __LTT_USERTRACE_PPC_H
6
4359c2bb 7#ifdef __powerpc64__
8#include "ltt/atomic-ppc64.h"
9#else
10#include "ltt/atomic-ppc.h"
11#endif
12
0c2cd852 13static __inline__ unsigned long
14xchg_u32(volatile void *p, unsigned long val)
15{
16 unsigned long prev;
17
18 __asm__ __volatile__ ("\n\
191: lwarx %0,0,%2 \n"
0c2cd852 20" stwcx. %3,0,%2 \n\
21 bne- 1b"
22 : "=&r" (prev), "=m" (*(volatile unsigned long *)p)
23 : "r" (p), "r" (val), "m" (*(volatile unsigned long *)p)
24 : "cc", "memory");
25
26 return prev;
27}
28
29/*
30 * This function doesn't exist, so you'll get a linker error
31 * if something tries to do an invalid xchg().
32 */
33extern void __xchg_called_with_bad_pointer(void);
34
35#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
36#define tas(ptr) (xchg((ptr),1))
37
38static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
39{
40 switch (size) {
41 case 4:
42 return (unsigned long) xchg_u32(ptr, x);
43#if 0 /* xchg_u64 doesn't exist on 32-bit PPC */
44 case 8:
45 return (unsigned long) xchg_u64(ptr, x);
46#endif /* 0 */
47 }
48 __xchg_called_with_bad_pointer();
49 return x;
50
51
52}
53
54extern inline void * xchg_ptr(void * m, void * val)
55{
56 return (void *) xchg_u32(m, (unsigned long) val);
57}
58
59
60#define __HAVE_ARCH_CMPXCHG 1
61
62static __inline__ unsigned long
63__cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new)
64{
65 unsigned int prev;
66
67 __asm__ __volatile__ ("\n\
681: lwarx %0,0,%2 \n\
69 cmpw 0,%0,%3 \n\
70 bne 2f \n"
0c2cd852 71" stwcx. %4,0,%2 \n\
72 bne- 1b\n"
73#ifdef CONFIG_SMP
74" sync\n"
75#endif /* CONFIG_SMP */
76"2:"
77 : "=&r" (prev), "=m" (*p)
78 : "r" (p), "r" (old), "r" (new), "m" (*p)
79 : "cc", "memory");
80
81 return prev;
82}
83
84/* This function doesn't exist, so you'll get a linker error
85 if something tries to do an invalid cmpxchg(). */
86extern void __cmpxchg_called_with_bad_pointer(void);
87
88static __inline__ unsigned long
89__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
90{
91 switch (size) {
92 case 4:
93 return __cmpxchg_u32(ptr, old, new);
94#if 0 /* we don't have __cmpxchg_u64 on 32-bit PPC */
95 case 8:
96 return __cmpxchg_u64(ptr, old, new);
97#endif /* 0 */
98 }
99 __cmpxchg_called_with_bad_pointer();
100 return old;
101}
102
103#define cmpxchg(ptr,o,n) \
104 ({ \
105 __typeof__(*(ptr)) _o_ = (o); \
106 __typeof__(*(ptr)) _n_ = (n); \
107 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
108 (unsigned long)_n_, sizeof(*(ptr))); \
109 })
110
111
112#define CPU_FTR_601 0x00000100
113
114#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
115
116typedef uint64_t cycles_t;
117
118/* On ppc64 this gets us the whole timebase; on ppc32 just the lower half */
119static inline unsigned long get_tbl(void)
120{
121 unsigned long tbl;
122
123//#if defined(CONFIG_403GCX)
124// asm volatile("mfspr %0, 0x3dd" : "=r" (tbl));
125//#else
126 asm volatile("mftb %0" : "=r" (tbl));
127//#endif
128 return tbl;
129}
130
131static inline unsigned int get_tbu(void)
132{
133 unsigned int tbu;
134
135//#if defined(CONFIG_403GCX)
136// asm volatile("mfspr %0, 0x3dc" : "=r" (tbu));
137//#else
138 asm volatile("mftbu %0" : "=r" (tbu));
139//#endif
140 return tbu;
141}
142
143
144#ifdef __powerpc64__
145static inline uint64_t get_tb(void)
146{
147 return mftb();
148}
149#else
150static inline uint64_t get_tb(void)
151{
152 unsigned int tbhi, tblo, tbhi2;
153
154 do {
155 tbhi = get_tbu();
156 tblo = get_tbl();
157 tbhi2 = get_tbu();
158 } while (tbhi != tbhi2);
159
160 return ((uint64_t)tbhi << 32) | tblo;
161}
162#endif
163
164static inline cycles_t get_cycles(void)
165{
166 return get_tb();
167}
168
169
170#endif /* __LTT_USERTRACE_PPC_H */
This page took 0.029722 seconds and 4 git commands to generate.