f9e115f14e190c6da199b6d005d9515641b4be56
[urcu.git] / include / urcu / arch / generic.h
1 // SPDX-FileCopyrightText: 2010 Paolo Bonzini <pbonzini@redhat.com>
2 //
3 // SPDX-License-Identifier: LGPL-2.1-or-later
4
5 #ifndef _URCU_ARCH_GENERIC_H
6 #define _URCU_ARCH_GENERIC_H
7
8 /*
9 * arch_generic.h: common definitions for multiple architectures.
10 */
11
12 #include <urcu/compiler.h>
13 #include <urcu/config.h>
14 #include <urcu/syscall-compat.h>
15
16 #ifdef __cplusplus
17 extern "C" {
18 #endif
19
20 #ifndef CAA_CACHE_LINE_SIZE
21 #define CAA_CACHE_LINE_SIZE 64
22 #endif
23
24 #if !defined(cmm_mc) && !defined(cmm_rmc) && !defined(cmm_wmc)
25 #define CONFIG_HAVE_MEM_COHERENCY
26 /*
27 * Architectures with cache coherency must _not_ define cmm_mc/cmm_rmc/cmm_wmc.
28 *
29 * For them, cmm_mc/cmm_rmc/cmm_wmc are implemented with a simple
30 * compiler barrier; in addition, we provide defaults for cmm_mb (using
31 * GCC builtins) as well as cmm_rmb and cmm_wmb (defaulting to cmm_mb).
32 */
33
34 #ifdef CONFIG_RCU_USE_ATOMIC_BUILTINS
35
36 # ifndef cmm_smp_mb
37 # define cmm_smp_mb() __atomic_thread_fence(__ATOMIC_SEQ_CST)
38 # endif
39
40 #endif /* CONFIG_RCU_USE_ATOMIC_BUILTINS */
41
42
43 /*
44 * cmm_mb() expands to __sync_synchronize() instead of __atomic_thread_fence
45 * with SEQ_CST because the former "issues a full memory barrier" while the
46 * latter "acts as a synchronization fence between threads" which is too weak
47 * for what we want, for example with I/O devices.
48 *
49 * Even though sync_synchronize seems to be an alias for a sequential consistent
50 * atomic thread fence on every architecture on GCC and Clang, this assumption
51 * might be untrue in future. Therefore, the definitions above are used to
52 * ensure correct behavior in the future.
53 *
54 * The above defintions are quoted from the GCC manual.
55 */
56 #ifndef cmm_mb
57 #define cmm_mb() __sync_synchronize()
58 #endif
59
60 #ifndef cmm_rmb
61 #define cmm_rmb() cmm_mb()
62 #endif
63
64 #ifndef cmm_wmb
65 #define cmm_wmb() cmm_mb()
66 #endif
67
68 #define cmm_mc() cmm_barrier()
69 #define cmm_rmc() cmm_barrier()
70 #define cmm_wmc() cmm_barrier()
71 #else
72 /*
73 * Architectures without cache coherency need something like the following:
74 *
75 * #define cmm_mc() arch_cache_flush()
76 * #define cmm_rmc() arch_cache_flush_read()
77 * #define cmm_wmc() arch_cache_flush_write()
78 *
79 * Of these, only cmm_mc is mandatory. cmm_rmc and cmm_wmc default to
80 * cmm_mc. cmm_mb/cmm_rmb/cmm_wmb use these definitions by default:
81 *
82 * #define cmm_mb() cmm_mc()
83 * #define cmm_rmb() cmm_rmc()
84 * #define cmm_wmb() cmm_wmc()
85 */
86
87 #ifndef cmm_mb
88 #define cmm_mb() cmm_mc()
89 #endif
90
91 #ifndef cmm_rmb
92 #define cmm_rmb() cmm_rmc()
93 #endif
94
95 #ifndef cmm_wmb
96 #define cmm_wmb() cmm_wmc()
97 #endif
98
99 #ifndef cmm_rmc
100 #define cmm_rmc() cmm_mc()
101 #endif
102
103 #ifndef cmm_wmc
104 #define cmm_wmc() cmm_mc()
105 #endif
106 #endif
107
108 /* Nop everywhere except on alpha. */
109 #ifndef cmm_read_barrier_depends
110 #define cmm_read_barrier_depends()
111 #endif
112
113 #ifdef CONFIG_RCU_SMP
114 #ifndef cmm_smp_mb
115 #define cmm_smp_mb() cmm_mb()
116 #endif
117 #ifndef cmm_smp_rmb
118 #define cmm_smp_rmb() cmm_rmb()
119 #endif
120 #ifndef cmm_smp_wmb
121 #define cmm_smp_wmb() cmm_wmb()
122 #endif
123 #ifndef cmm_smp_mc
124 #define cmm_smp_mc() cmm_mc()
125 #endif
126 #ifndef cmm_smp_rmc
127 #define cmm_smp_rmc() cmm_rmc()
128 #endif
129 #ifndef cmm_smp_wmc
130 #define cmm_smp_wmc() cmm_wmc()
131 #endif
132 #ifndef cmm_smp_read_barrier_depends
133 #define cmm_smp_read_barrier_depends() cmm_read_barrier_depends()
134 #endif
135 #else
136 #ifndef cmm_smp_mb
137 #define cmm_smp_mb() cmm_barrier()
138 #endif
139 #ifndef cmm_smp_rmb
140 #define cmm_smp_rmb() cmm_barrier()
141 #endif
142 #ifndef cmm_smp_wmb
143 #define cmm_smp_wmb() cmm_barrier()
144 #endif
145 #ifndef cmm_smp_mc
146 #define cmm_smp_mc() cmm_barrier()
147 #endif
148 #ifndef cmm_smp_rmc
149 #define cmm_smp_rmc() cmm_barrier()
150 #endif
151 #ifndef cmm_smp_wmc
152 #define cmm_smp_wmc() cmm_barrier()
153 #endif
154 #ifndef cmm_smp_read_barrier_depends
155 #define cmm_smp_read_barrier_depends()
156 #endif
157 #endif
158
159 #ifndef caa_cpu_relax
160 #define caa_cpu_relax() cmm_barrier()
161 #endif
162
163 #ifndef HAS_CAA_GET_CYCLES
164 #define HAS_CAA_GET_CYCLES
165
166 #if defined(__APPLE__)
167
168 #include <mach/mach.h>
169 #include <mach/clock.h>
170 #include <mach/mach_time.h>
171 #include <time.h>
172 #include <stdint.h>
173
174 typedef uint64_t caa_cycles_t;
175
176 static inline caa_cycles_t caa_get_cycles (void)
177 {
178 mach_timespec_t ts = { 0, 0 };
179 static clock_serv_t clock_service;
180
181 if (caa_unlikely(!clock_service)) {
182 if (host_get_clock_service(mach_host_self(),
183 SYSTEM_CLOCK, &clock_service))
184 return -1ULL;
185 }
186 if (caa_unlikely(clock_get_time(clock_service, &ts)))
187 return -1ULL;
188 return ((uint64_t) ts.tv_sec * 1000000000ULL) + ts.tv_nsec;
189 }
190
191 #elif defined(CONFIG_RCU_HAVE_CLOCK_GETTIME)
192
193 #include <time.h>
194 #include <stdint.h>
195
196 typedef uint64_t caa_cycles_t;
197
198 static inline caa_cycles_t caa_get_cycles (void)
199 {
200 struct timespec ts;
201
202 if (caa_unlikely(clock_gettime(CLOCK_MONOTONIC, &ts)))
203 return -1ULL;
204 return ((uint64_t) ts.tv_sec * 1000000000ULL) + ts.tv_nsec;
205 }
206
207 #else
208
209 #error caa_get_cycles() not implemented for this platform.
210
211 #endif
212
213 #endif /* HAS_CAA_GET_CYCLES */
214
215 #ifdef __cplusplus
216 }
217 #endif
218
219 #endif /* _URCU_ARCH_GENERIC_H */
This page took 0.032581 seconds and 3 git commands to generate.