Drop 'linux/percpu-defs.h' wrapper
[lttng-modules.git] / include / wrapper / trace-clock.h
CommitLineData
b7cdc182 1/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
9f36eaed 2 *
886d51a3 3 * wrapper/trace-clock.h
f6c19f6e
MD
4 *
5 * Contains LTTng trace clock mapping to LTTng trace clock or mainline monotonic
6 * clock. This wrapper depends on CONFIG_HIGH_RES_TIMERS=y.
7 *
886d51a3 8 * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
f6c19f6e
MD
9 */
10
9f36eaed
MJ
11#ifndef _LTTNG_TRACE_CLOCK_H
12#define _LTTNG_TRACE_CLOCK_H
13
f6c19f6e
MD
14#ifdef CONFIG_HAVE_TRACE_CLOCK
15#include <linux/trace-clock.h>
16#else /* CONFIG_HAVE_TRACE_CLOCK */
17
18#include <linux/hardirq.h>
19#include <linux/ktime.h>
20#include <linux/time.h>
21#include <linux/hrtimer.h>
b0725207 22#include <linux/percpu.h>
fe5e935d
MJ
23#include <linux/percpu-defs.h>
24
5f4c791e 25#include <lttng/kernel-version.h>
b0725207 26#include <asm/local.h>
2df37e95
MD
27#include <lttng/kernel-version.h>
28#include <lttng/clock.h>
a8f2d0c7 29#include <wrapper/compiler.h>
5a2f5e92 30#include <wrapper/random.h>
9153ad23 31#include <blacklist/timekeeping.h>
fc8216ae 32
2754583e
MD
33extern struct lttng_trace_clock *lttng_trace_clock;
34
a9df1445
MD
35/*
36 * Upstream Linux commit 27727df240c7 ("Avoid taking lock in NMI path with
37 * CONFIG_DEBUG_TIMEKEEPING") introduces a buggy ktime_get_mono_fast_ns().
38 * This is fixed by patch "timekeeping: Fix __ktime_get_fast_ns() regression".
39 */
7d99572f
MD
40#if (LTTNG_KERNEL_RANGE(4,8,0, 4,8,2) \
41 || LTTNG_KERNEL_RANGE(4,7,4, 4,7,8) \
42 || LTTNG_KERNEL_RANGE(4,4,20, 4,4,25) \
43 || LTTNG_KERNEL_RANGE(4,1,32, 4,1,35))
254adeb0
MD
44#define LTTNG_CLOCK_NMI_SAFE_BROKEN
45#endif
46
60e1cd07
MD
47/*
48 * We need clock values to be monotonically increasing per-cpu, which is
49 * not strictly guaranteed by ktime_get_mono_fast_ns(). It is
50 * straightforward to do on architectures with a 64-bit cmpxchg(), but
51 * not so on architectures without 64-bit cmpxchg. For now, only enable
52 * this feature on 64-bit architectures.
53 */
54
5f4c791e 55#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,17,0) \
60e1cd07 56 && BITS_PER_LONG == 64 \
254adeb0 57 && !defined(LTTNG_CLOCK_NMI_SAFE_BROKEN))
60e1cd07
MD
58#define LTTNG_USE_NMI_SAFE_CLOCK
59#endif
b0725207 60
60e1cd07 61#ifdef LTTNG_USE_NMI_SAFE_CLOCK
b0725207 62
60e1cd07 63DECLARE_PER_CPU(u64, lttng_last_tsc);
b0725207
MD
64
65/*
0aaa7220 66 * Sometimes called with preemption enabled. Can be interrupted.
b0725207
MD
67 */
68static inline u64 trace_clock_monotonic_wrapper(void)
69{
60e1cd07
MD
70 u64 now, last, result;
71 u64 *last_tsc_ptr;
b0725207
MD
72
73 /* Use fast nmi-safe monotonic clock provided by the Linux kernel. */
0aaa7220 74 preempt_disable();
fe5e935d 75 last_tsc_ptr = this_cpu_ptr(&lttng_last_tsc);
60e1cd07 76 last = *last_tsc_ptr;
b0725207
MD
77 /*
78 * Read "last" before "now". It is not strictly required, but it ensures
79 * that an interrupt coming in won't artificially trigger a case where
80 * "now" < "last". This kind of situation should only happen if the
81 * mono_fast time source goes slightly backwards.
82 */
83 barrier();
84 now = ktime_get_mono_fast_ns();
60e1cd07
MD
85 if (U64_MAX / 2 < now - last)
86 now = last;
87 result = cmpxchg64_local(last_tsc_ptr, last, now);
0aaa7220 88 preempt_enable();
b0725207
MD
89 if (result == last) {
90 /* Update done. */
91 return now;
92 } else {
93 /*
94 * Update not done, due to concurrent update. We can use
95 * "result", since it has been sampled concurrently with our
96 * time read, so it should not be far from "now".
97 */
60e1cd07 98 return result;
b0725207
MD
99 }
100}
101
60e1cd07 102#else /* #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
f6c19f6e
MD
103static inline u64 trace_clock_monotonic_wrapper(void)
104{
105 ktime_t ktime;
106
107 /*
108 * Refuse to trace from NMIs with this wrapper, because an NMI could
109 * nest over the xtime write seqlock and deadlock.
110 */
111 if (in_nmi())
97ca2c54 112 return (u64) -EIO;
f6c19f6e
MD
113
114 ktime = ktime_get();
cfaf9f3d 115 return ktime_to_ns(ktime);
f6c19f6e 116}
60e1cd07 117#endif /* #else #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
f6c19f6e 118
2754583e 119static inline u64 trace_clock_read64_monotonic(void)
f6c19f6e
MD
120{
121 return (u64) trace_clock_monotonic_wrapper();
122}
123
2754583e 124static inline u64 trace_clock_freq_monotonic(void)
f6c19f6e 125{
a3ccff4f 126 return (u64) NSEC_PER_SEC;
f6c19f6e
MD
127}
128
2754583e 129static inline int trace_clock_uuid_monotonic(char *uuid)
f6c19f6e 130{
a82c63f1 131 return wrapper_get_bootid(uuid);
f6c19f6e
MD
132}
133
2754583e
MD
134static inline const char *trace_clock_name_monotonic(void)
135{
136 return "monotonic";
137}
138
139static inline const char *trace_clock_description_monotonic(void)
140{
141 return "Monotonic Clock";
142}
143
60e1cd07 144#ifdef LTTNG_USE_NMI_SAFE_CLOCK
f6c19f6e
MD
145static inline int get_trace_clock(void)
146{
e36de50d 147 printk_once(KERN_WARNING "LTTng: Using mainline kernel monotonic fast clock, which is NMI-safe.\n");
b0725207
MD
148 return 0;
149}
60e1cd07 150#else /* #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
b0725207
MD
151static inline int get_trace_clock(void)
152{
e36de50d 153 printk_once(KERN_WARNING "LTTng: Using mainline kernel monotonic clock. NMIs will not be traced.\n");
f6c19f6e
MD
154 return 0;
155}
60e1cd07 156#endif /* #else #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
f6c19f6e
MD
157
158static inline void put_trace_clock(void)
159{
160}
161
2754583e
MD
162static inline u64 trace_clock_read64(void)
163{
585e5dcc 164 struct lttng_trace_clock *ltc = LTTNG_READ_ONCE(lttng_trace_clock);
2754583e
MD
165
166 if (likely(!ltc)) {
167 return trace_clock_read64_monotonic();
168 } else {
2754583e
MD
169 return ltc->read64();
170 }
171}
172
173static inline u64 trace_clock_freq(void)
174{
585e5dcc 175 struct lttng_trace_clock *ltc = LTTNG_READ_ONCE(lttng_trace_clock);
2754583e
MD
176
177 if (!ltc) {
178 return trace_clock_freq_monotonic();
179 } else {
2754583e
MD
180 return ltc->freq();
181 }
182}
183
184static inline int trace_clock_uuid(char *uuid)
185{
585e5dcc 186 struct lttng_trace_clock *ltc = LTTNG_READ_ONCE(lttng_trace_clock);
2754583e 187
2754583e
MD
188 /* Use default UUID cb when NULL */
189 if (!ltc || !ltc->uuid) {
190 return trace_clock_uuid_monotonic(uuid);
191 } else {
192 return ltc->uuid(uuid);
193 }
194}
195
196static inline const char *trace_clock_name(void)
197{
585e5dcc 198 struct lttng_trace_clock *ltc = LTTNG_READ_ONCE(lttng_trace_clock);
2754583e
MD
199
200 if (!ltc) {
201 return trace_clock_name_monotonic();
202 } else {
2754583e
MD
203 return ltc->name();
204 }
205}
206
207static inline const char *trace_clock_description(void)
208{
585e5dcc 209 struct lttng_trace_clock *ltc = LTTNG_READ_ONCE(lttng_trace_clock);
2754583e
MD
210
211 if (!ltc) {
212 return trace_clock_description_monotonic();
213 } else {
2754583e
MD
214 return ltc->description();
215 }
216}
217
f6c19f6e
MD
218#endif /* CONFIG_HAVE_TRACE_CLOCK */
219
a90917c3 220#endif /* _LTTNG_TRACE_CLOCK_H */
This page took 0.065227 seconds and 4 git commands to generate.