Add compatibility support for older intel cpus
[urcu.git] / urcu / arch_x86.h
1 #ifndef _URCU_ARCH_X86_H
2 #define _URCU_ARCH_X86_H
3
4 /*
5 * arch_x86.h: trivial definitions for the x86 architecture.
6 *
7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
8 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
9 *
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25 #include <urcu/compiler.h>
26 #include "config.h"
27
28 /* Assume P4 or newer */
29 #define CONFIG_HAVE_FENCE 1
30 #define CONFIG_HAVE_MEM_COHERENCY
31
32 #define CACHE_LINE_SIZE 128
33
34 #ifndef BITS_PER_LONG
35 #define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
36 #endif
37
38 #ifdef CONFIG_HAVE_FENCE
39 #define mb() asm volatile("mfence":::"memory")
40 #define rmb() asm volatile("lfence":::"memory")
41 #define wmb() asm volatile("sfence"::: "memory")
42 #else
43 /*
44 * Some non-Intel clones support out of order store. wmb() ceases to be a
45 * nop for these.
46 */
47 #define mb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
48 #define rmb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
49 #define wmb() asm volatile("lock; addl $0,0(%%esp)"::: "memory")
50 #endif
51
52 /*
53 * Architectures without cache coherency need something like the following:
54 *
55 * #define mb() mc()
56 * #define rmb() rmc()
57 * #define wmb() wmc()
58 * #define mc() arch_cache_flush()
59 * #define rmc() arch_cache_flush_read()
60 * #define wmc() arch_cache_flush_write()
61 */
62
63 #define mc() barrier()
64 #define rmc() barrier()
65 #define wmc() barrier()
66
67 #ifdef CONFIG_SMP
68 #define smp_mb() mb()
69 #define smp_rmb() rmb()
70 #define smp_wmb() wmb()
71 #define smp_mc() mc()
72 #define smp_rmc() rmc()
73 #define smp_wmc() wmc()
74 #else
75 #define smp_mb() barrier()
76 #define smp_rmb() barrier()
77 #define smp_wmb() barrier()
78 #define smp_mc() barrier()
79 #define smp_rmc() barrier()
80 #define smp_wmc() barrier()
81 #endif
82
83 /* Nop everywhere except on alpha. */
84 #define smp_read_barrier_depends()
85
86 static inline void rep_nop(void)
87 {
88 asm volatile("rep; nop" : : : "memory");
89 }
90
91 static inline void cpu_relax(void)
92 {
93 rep_nop();
94 }
95
96 /*
97 * Serialize core instruction execution. Also acts as a compiler barrier.
98 */
99 #ifdef __PIC__
100 /*
101 * Cannot use cpuid because it clobbers the ebx register and clashes
102 * with -fPIC :
103 * error: PIC register 'ebx' clobbered in 'asm'
104 */
105 static inline void sync_core(void)
106 {
107 mb();
108 }
109 #else
110 static inline void sync_core(void)
111 {
112 asm volatile("cpuid" : : : "memory", "eax", "ebx", "ecx", "edx");
113 }
114 #endif
115
116 #define rdtscll(val) \
117 do { \
118 unsigned int __a, __d; \
119 asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
120 (val) = ((unsigned long long)__a) \
121 | (((unsigned long long)__d) << 32); \
122 } while(0)
123
124 typedef unsigned long long cycles_t;
125
126 static inline cycles_t get_cycles(void)
127 {
128 cycles_t ret = 0;
129
130 rdtscll(ret);
131 return ret;
132 }
133
134 #endif /* _URCU_ARCH_X86_H */
This page took 0.030705 seconds and 4 git commands to generate.