b43d08bdac998bcd30446fe30373ef7c261557c2
5 * arch_x86.h: Definitions for the x86 architecture, derived from Linux.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; but only version 2 of the License given
10 * that this comes from the Linux kernel.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
26 #define CONFIG_HAVE_FENCE 1
27 #define CONFIG_HAVE_MEM_COHERENCY
29 #define mb() asm volatile("sync":::"memory")
30 #define rmb() asm volatile("sync":::"memory")
31 #define wmb() asm volatile("sync"::: "memory")
34 * Architectures without cache coherency need something like the following:
39 * #define mc() arch_cache_flush()
40 * #define rmc() arch_cache_flush_read()
41 * #define wmc() arch_cache_flush_write()
44 #define mc() barrier()
45 #define rmc() barrier()
46 #define wmc() barrier()
48 /* Assume SMP machine, given we don't have this information */
53 #define smp_rmb() rmb()
54 #define smp_wmb() wmb()
56 #define smp_rmc() rmc()
57 #define smp_wmc() wmc()
59 #define smp_mb() barrier()
60 #define smp_rmb() barrier()
61 #define smp_wmb() barrier()
62 #define smp_mc() barrier()
63 #define smp_rmc() barrier()
64 #define smp_wmc() barrier()
67 /* Nop everywhere except on alpha. */
68 #define smp_read_barrier_depends()
70 static inline void cpu_relax(void)
75 #define PPC405_ERR77(ra,rb)
76 #define LWSYNC_ON_SMP "\n\tlwsync\n"
77 #define ISYNC_ON_SMP "\n\tisync\n"
79 #ifndef _INCLUDE_API_H
81 static __inline__
void atomic_inc(int *v
)
86 "1: lwarx %0,0,%2 # atomic_inc\n\
96 #endif /* #ifndef _INCLUDE_API_H */
101 #define __xg(x) ((struct __xchg_dummy *)(x))
103 #ifndef _INCLUDE_API_H
108 * Changes the memory location '*ptr' to be val and returns
109 * the previous value stored there.
111 static __always_inline
unsigned long
112 __xchg_u32(volatile void *p
, unsigned long val
)
116 __asm__
__volatile__(
118 "1: lwarx %0,0,%2 \n"
123 : "=&r" (prev
), "+m" (*(volatile unsigned int *)p
)
131 * This function doesn't exist, so you'll get a linker error
132 * if something tries to do an invalid xchg().
134 extern void __xchg_called_with_bad_pointer(void);
136 static __always_inline
unsigned long
137 __xchg(volatile void *ptr
, unsigned long x
, unsigned int size
)
141 return __xchg_u32(ptr
, x
);
144 return __xchg_u64(ptr
, x
);
147 __xchg_called_with_bad_pointer();
151 #define xchg(ptr,x) \
153 __typeof__(*(ptr)) _x_ = (x); \
154 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
157 #endif /* #ifndef _INCLUDE_API_H */
159 #define mftbl() ({unsigned long rval; \
160 asm volatile("mftbl %0" : "=r" (rval)); rval;})
161 #define mftbu() ({unsigned long rval; \
162 asm volatile("mftbu %0" : "=r" (rval)); rval;})
164 typedef unsigned long long cycles_t
;
166 static inline cycles_t
get_cycles (void)
177 return (((long long)h
) << 32) + l
;
181 #endif /* _ARCH_PPC_H */
This page took 0.047268 seconds and 4 git commands to generate.