ppc.h: use mftb on ppc
[urcu.git] / include / urcu / arch / ppc.h
... / ...
CommitLineData
1// SPDX-FileCopyrightText: 2009 Paul E. McKenney, IBM Corporation.
2// SPDX-FileCopyrightText: 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
3//
4// SPDX-License-Identifier: LGPL-2.1-or-later
5
6#ifndef _URCU_ARCH_PPC_H
7#define _URCU_ARCH_PPC_H
8
9/*
10 * arch_ppc.h: trivial definitions for the powerpc architecture.
11 */
12
13#include <urcu/compiler.h>
14#include <urcu/config.h>
15#include <urcu/syscall-compat.h>
16#include <stdint.h>
17
18#ifdef __cplusplus
19extern "C" {
20#endif
21
22/* Include size of POWER5+ L3 cache lines: 256 bytes */
23#define CAA_CACHE_LINE_SIZE 256
24
25#ifdef __NO_LWSYNC__
26#define LWSYNC_OPCODE "sync\n"
27#else
28#define LWSYNC_OPCODE "lwsync\n"
29#endif
30
31/*
32 * Use sync for all cmm_mb/rmb/wmb barriers because lwsync does not
33 * preserve ordering of cacheable vs. non-cacheable accesses, so it
34 * should not be used to order with respect to MMIO operations. An
35 * eieio+lwsync pair is also not enough for cmm_rmb, because it will
36 * order cacheable and non-cacheable memory operations separately---i.e.
37 * not the latter against the former.
38 */
39#define cmm_mb() __asm__ __volatile__ ("sync":::"memory")
40
41/*
42 * lwsync orders loads in cacheable memory with respect to other loads,
43 * and stores in cacheable memory with respect to other stores.
44 * Therefore, use it for barriers ordering accesses to cacheable memory
45 * only.
46 */
47#define cmm_smp_rmb() __asm__ __volatile__ (LWSYNC_OPCODE:::"memory")
48#define cmm_smp_wmb() __asm__ __volatile__ (LWSYNC_OPCODE:::"memory")
49
50#define mftbl() \
51 __extension__ \
52 ({ \
53 unsigned long rval; \
54 __asm__ __volatile__ ("mftb %0" : "=r" (rval)); \
55 rval; \
56 })
57
58#define mftbu() \
59 __extension__ \
60 ({ \
61 unsigned long rval; \
62 __asm__ __volatile__ ("mftbu %0" : "=r" (rval)); \
63 rval; \
64 })
65
66#define mftb() \
67 __extension__ \
68 ({ \
69 unsigned long long rval; \
70 __asm__ __volatile__ ("mftb %0" : "=r" (rval)); \
71 rval; \
72 })
73
74#define HAS_CAA_GET_CYCLES
75
76typedef uint64_t caa_cycles_t;
77
78#ifdef __powerpc64__
79static inline caa_cycles_t caa_get_cycles(void)
80{
81 return (caa_cycles_t) mftb();
82}
83#else
84static inline caa_cycles_t caa_get_cycles(void)
85{
86 unsigned long h, l;
87
88 for (;;) {
89 h = mftbu();
90 cmm_barrier();
91 l = mftbl();
92 cmm_barrier();
93 if (mftbu() == h)
94 return (((caa_cycles_t) h) << 32) + l;
95 }
96}
97#endif
98
99/*
100 * On Linux, define the membarrier system call number if not yet available in
101 * the system headers.
102 */
103#if (defined(__linux__) && !defined(__NR_membarrier))
104#define __NR_membarrier 365
105#endif
106
107#ifdef __cplusplus
108}
109#endif
110
111#include <urcu/arch/generic.h>
112
113#endif /* _URCU_ARCH_PPC_H */
This page took 0.023417 seconds and 4 git commands to generate.