urcu/arch/generic: Use atomic builtins if configured
[urcu.git] / include / urcu / arch / generic.h
CommitLineData
d3d3857f
MJ
1// SPDX-FileCopyrightText: 2010 Paolo Bonzini <pbonzini@redhat.com>
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4
e4d1eb09
PB
5#ifndef _URCU_ARCH_GENERIC_H
6#define _URCU_ARCH_GENERIC_H
7
8/*
9 * arch_generic.h: common definitions for multiple architectures.
e4d1eb09
PB
10 */
11
12#include <urcu/compiler.h>
13#include <urcu/config.h>
999991c6 14#include <urcu/syscall-compat.h>
e4d1eb09
PB
15
16#ifdef __cplusplus
17extern "C" {
18#endif
19
06f22bdb
DG
20#ifndef CAA_CACHE_LINE_SIZE
21#define CAA_CACHE_LINE_SIZE 64
e4d1eb09
PB
22#endif
23
5481ddb3 24#if !defined(cmm_mc) && !defined(cmm_rmc) && !defined(cmm_wmc)
e4d1eb09
PB
25#define CONFIG_HAVE_MEM_COHERENCY
26/*
5481ddb3 27 * Architectures with cache coherency must _not_ define cmm_mc/cmm_rmc/cmm_wmc.
e4d1eb09 28 *
d0bbd9c2
MD
29 * For them, cmm_mc/cmm_rmc/cmm_wmc are implemented with a simple
30 * compiler barrier; in addition, we provide defaults for cmm_mb (using
31 * GCC builtins) as well as cmm_rmb and cmm_wmb (defaulting to cmm_mb).
e4d1eb09
PB
32 */
33
72d24c88
OD
34#ifdef CONFIG_RCU_USE_ATOMIC_BUILTINS
35
36# ifndef cmm_smp_mb
37# define cmm_smp_mb() __atomic_thread_fence(__ATOMIC_SEQ_CST)
38# endif
39
40#endif /* CONFIG_RCU_USE_ATOMIC_BUILTINS */
41
42
43/*
44 * cmm_mb() expands to __sync_synchronize() instead of __atomic_thread_fence
45 * with SEQ_CST because the former "issues a full memory barrier" while the
46 * latter "acts as a synchronization fence between threads" which is too weak
47 * for what we want, for example with I/O devices.
48 *
49 * Even though sync_synchronize seems to be an alias for a sequential consistent
50 * atomic thread fence on every architecture on GCC and Clang, this assumption
51 * might be untrue in future. Therefore, the definitions above are used to
52 * ensure correct behavior in the future.
53 *
54 * The above defintions are quoted from the GCC manual.
55 */
5481ddb3
DG
56#ifndef cmm_mb
57#define cmm_mb() __sync_synchronize()
e4d1eb09
PB
58#endif
59
5481ddb3
DG
60#ifndef cmm_rmb
61#define cmm_rmb() cmm_mb()
e4d1eb09
PB
62#endif
63
5481ddb3
DG
64#ifndef cmm_wmb
65#define cmm_wmb() cmm_mb()
e4d1eb09
PB
66#endif
67
5481ddb3
DG
68#define cmm_mc() cmm_barrier()
69#define cmm_rmc() cmm_barrier()
70#define cmm_wmc() cmm_barrier()
e4d1eb09
PB
71#else
72/*
73 * Architectures without cache coherency need something like the following:
74 *
67ecffc0 75 * #define cmm_mc() arch_cache_flush()
5481ddb3
DG
76 * #define cmm_rmc() arch_cache_flush_read()
77 * #define cmm_wmc() arch_cache_flush_write()
e4d1eb09 78 *
d0bbd9c2
MD
79 * Of these, only cmm_mc is mandatory. cmm_rmc and cmm_wmc default to
80 * cmm_mc. cmm_mb/cmm_rmb/cmm_wmb use these definitions by default:
e4d1eb09 81 *
d0bbd9c2 82 * #define cmm_mb() cmm_mc()
5481ddb3
DG
83 * #define cmm_rmb() cmm_rmc()
84 * #define cmm_wmb() cmm_wmc()
e4d1eb09
PB
85 */
86
5481ddb3
DG
87#ifndef cmm_mb
88#define cmm_mb() cmm_mc()
e4d1eb09
PB
89#endif
90
5481ddb3
DG
91#ifndef cmm_rmb
92#define cmm_rmb() cmm_rmc()
e4d1eb09
PB
93#endif
94
5481ddb3
DG
95#ifndef cmm_wmb
96#define cmm_wmb() cmm_wmc()
e4d1eb09
PB
97#endif
98
5481ddb3
DG
99#ifndef cmm_rmc
100#define cmm_rmc() cmm_mc()
e4d1eb09
PB
101#endif
102
5481ddb3
DG
103#ifndef cmm_wmc
104#define cmm_wmc() cmm_mc()
e4d1eb09
PB
105#endif
106#endif
107
108/* Nop everywhere except on alpha. */
5481ddb3
DG
109#ifndef cmm_read_barrier_depends
110#define cmm_read_barrier_depends()
e4d1eb09
PB
111#endif
112
113#ifdef CONFIG_RCU_SMP
0bd48ad3 114#ifndef cmm_smp_mb
5481ddb3 115#define cmm_smp_mb() cmm_mb()
0bd48ad3
PB
116#endif
117#ifndef cmm_smp_rmb
5481ddb3 118#define cmm_smp_rmb() cmm_rmb()
0bd48ad3
PB
119#endif
120#ifndef cmm_smp_wmb
5481ddb3 121#define cmm_smp_wmb() cmm_wmb()
0bd48ad3
PB
122#endif
123#ifndef cmm_smp_mc
5481ddb3 124#define cmm_smp_mc() cmm_mc()
0bd48ad3
PB
125#endif
126#ifndef cmm_smp_rmc
5481ddb3 127#define cmm_smp_rmc() cmm_rmc()
0bd48ad3
PB
128#endif
129#ifndef cmm_smp_wmc
5481ddb3 130#define cmm_smp_wmc() cmm_wmc()
0bd48ad3
PB
131#endif
132#ifndef cmm_smp_read_barrier_depends
5481ddb3 133#define cmm_smp_read_barrier_depends() cmm_read_barrier_depends()
0bd48ad3 134#endif
e4d1eb09 135#else
0bd48ad3 136#ifndef cmm_smp_mb
5481ddb3 137#define cmm_smp_mb() cmm_barrier()
0bd48ad3
PB
138#endif
139#ifndef cmm_smp_rmb
5481ddb3 140#define cmm_smp_rmb() cmm_barrier()
0bd48ad3
PB
141#endif
142#ifndef cmm_smp_wmb
5481ddb3 143#define cmm_smp_wmb() cmm_barrier()
0bd48ad3
PB
144#endif
145#ifndef cmm_smp_mc
5481ddb3 146#define cmm_smp_mc() cmm_barrier()
0bd48ad3
PB
147#endif
148#ifndef cmm_smp_rmc
5481ddb3 149#define cmm_smp_rmc() cmm_barrier()
0bd48ad3
PB
150#endif
151#ifndef cmm_smp_wmc
5481ddb3 152#define cmm_smp_wmc() cmm_barrier()
0bd48ad3
PB
153#endif
154#ifndef cmm_smp_read_barrier_depends
5481ddb3 155#define cmm_smp_read_barrier_depends()
e4d1eb09 156#endif
0bd48ad3 157#endif
e4d1eb09 158
06f22bdb
DG
159#ifndef caa_cpu_relax
160#define caa_cpu_relax() cmm_barrier()
e4d1eb09
PB
161#endif
162
f8c43f45
MD
163#ifndef HAS_CAA_GET_CYCLES
164#define HAS_CAA_GET_CYCLES
f8c43f45 165
86e8ab17 166#if defined(__APPLE__)
a0307b90 167
86e8ab17
MJ
168#include <mach/mach.h>
169#include <mach/clock.h>
170#include <mach/mach_time.h>
3fa18286
MD
171#include <time.h>
172#include <stdint.h>
173
174typedef uint64_t caa_cycles_t;
175
176static inline caa_cycles_t caa_get_cycles (void)
f8c43f45 177{
86e8ab17
MJ
178 mach_timespec_t ts = { 0, 0 };
179 static clock_serv_t clock_service;
f8c43f45 180
86e8ab17
MJ
181 if (caa_unlikely(!clock_service)) {
182 if (host_get_clock_service(mach_host_self(),
183 SYSTEM_CLOCK, &clock_service))
184 return -1ULL;
185 }
186 if (caa_unlikely(clock_get_time(clock_service, &ts)))
3fa18286
MD
187 return -1ULL;
188 return ((uint64_t) ts.tv_sec * 1000000000ULL) + ts.tv_nsec;
f8c43f45 189}
a0307b90 190
86e8ab17 191#elif defined(CONFIG_RCU_HAVE_CLOCK_GETTIME)
a0307b90 192
a0307b90
MD
193#include <time.h>
194#include <stdint.h>
195
196typedef uint64_t caa_cycles_t;
197
198static inline caa_cycles_t caa_get_cycles (void)
199{
86e8ab17 200 struct timespec ts;
a0307b90 201
86e8ab17 202 if (caa_unlikely(clock_gettime(CLOCK_MONOTONIC, &ts)))
a0307b90
MD
203 return -1ULL;
204 return ((uint64_t) ts.tv_sec * 1000000000ULL) + ts.tv_nsec;
205}
206
207#else
208
209#error caa_get_cycles() not implemented for this platform.
210
211#endif
212
f8c43f45
MD
213#endif /* HAS_CAA_GET_CYCLES */
214
e4d1eb09
PB
215#ifdef __cplusplus
216}
217#endif
218
219#endif /* _URCU_ARCH_GENERIC_H */
This page took 0.055243 seconds and 4 git commands to generate.