Commit | Line | Data |
---|---|---|
b7cdc182 | 1 | /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only) |
9f36eaed | 2 | * |
886d51a3 | 3 | * wrapper/trace-clock.h |
f6c19f6e MD |
4 | * |
5 | * Contains LTTng trace clock mapping to LTTng trace clock or mainline monotonic | |
6 | * clock. This wrapper depends on CONFIG_HIGH_RES_TIMERS=y. | |
7 | * | |
886d51a3 | 8 | * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
f6c19f6e MD |
9 | */ |
10 | ||
9f36eaed MJ |
11 | #ifndef _LTTNG_TRACE_CLOCK_H |
12 | #define _LTTNG_TRACE_CLOCK_H | |
13 | ||
f6c19f6e MD |
14 | #ifdef CONFIG_HAVE_TRACE_CLOCK |
15 | #include <linux/trace-clock.h> | |
16 | #else /* CONFIG_HAVE_TRACE_CLOCK */ | |
17 | ||
18 | #include <linux/hardirq.h> | |
19 | #include <linux/ktime.h> | |
20 | #include <linux/time.h> | |
21 | #include <linux/hrtimer.h> | |
b0725207 | 22 | #include <linux/percpu.h> |
fc8216ae | 23 | #include <linux/version.h> |
46e25482 | 24 | #include <linux/percpu-defs.h> |
b0725207 | 25 | #include <asm/local.h> |
5a2f5e92 MD |
26 | #include <lttng-kernel-version.h> |
27 | #include <lttng-clock.h> | |
84bbd9ea | 28 | #include <linux/random.h> |
fc8216ae | 29 | |
2754583e MD |
30 | extern struct lttng_trace_clock *lttng_trace_clock; |
31 | ||
60e1cd07 MD |
32 | /* |
33 | * We need clock values to be monotonically increasing per-cpu, which is | |
34 | * not strictly guaranteed by ktime_get_mono_fast_ns(). It is | |
35 | * straightforward to do on architectures with a 64-bit cmpxchg(), but | |
36 | * not so on architectures without 64-bit cmpxchg. For now, only enable | |
37 | * this feature on 64-bit architectures. | |
38 | */ | |
39 | ||
d0c04533 | 40 | #if BITS_PER_LONG == 64 |
60e1cd07 MD |
41 | #define LTTNG_USE_NMI_SAFE_CLOCK |
42 | #endif | |
b0725207 | 43 | |
60e1cd07 | 44 | #ifdef LTTNG_USE_NMI_SAFE_CLOCK |
b0725207 | 45 | |
60e1cd07 | 46 | DECLARE_PER_CPU(u64, lttng_last_tsc); |
b0725207 MD |
47 | |
48 | /* | |
0aaa7220 | 49 | * Sometimes called with preemption enabled. Can be interrupted. |
b0725207 MD |
50 | */ |
51 | static inline u64 trace_clock_monotonic_wrapper(void) | |
52 | { | |
60e1cd07 MD |
53 | u64 now, last, result; |
54 | u64 *last_tsc_ptr; | |
b0725207 MD |
55 | |
56 | /* Use fast nmi-safe monotonic clock provided by the Linux kernel. */ | |
0aaa7220 | 57 | preempt_disable(); |
46e25482 | 58 | last_tsc_ptr = this_cpu_ptr(<tng_last_tsc); |
60e1cd07 | 59 | last = *last_tsc_ptr; |
b0725207 MD |
60 | /* |
61 | * Read "last" before "now". It is not strictly required, but it ensures | |
62 | * that an interrupt coming in won't artificially trigger a case where | |
63 | * "now" < "last". This kind of situation should only happen if the | |
64 | * mono_fast time source goes slightly backwards. | |
65 | */ | |
66 | barrier(); | |
67 | now = ktime_get_mono_fast_ns(); | |
60e1cd07 MD |
68 | if (U64_MAX / 2 < now - last) |
69 | now = last; | |
70 | result = cmpxchg64_local(last_tsc_ptr, last, now); | |
0aaa7220 | 71 | preempt_enable(); |
b0725207 MD |
72 | if (result == last) { |
73 | /* Update done. */ | |
74 | return now; | |
75 | } else { | |
76 | /* | |
77 | * Update not done, due to concurrent update. We can use | |
78 | * "result", since it has been sampled concurrently with our | |
79 | * time read, so it should not be far from "now". | |
80 | */ | |
60e1cd07 | 81 | return result; |
b0725207 MD |
82 | } |
83 | } | |
84 | ||
60e1cd07 | 85 | #else /* #ifdef LTTNG_USE_NMI_SAFE_CLOCK */ |
f6c19f6e MD |
86 | static inline u64 trace_clock_monotonic_wrapper(void) |
87 | { | |
88 | ktime_t ktime; | |
89 | ||
90 | /* | |
91 | * Refuse to trace from NMIs with this wrapper, because an NMI could | |
92 | * nest over the xtime write seqlock and deadlock. | |
93 | */ | |
94 | if (in_nmi()) | |
97ca2c54 | 95 | return (u64) -EIO; |
f6c19f6e MD |
96 | |
97 | ktime = ktime_get(); | |
cfaf9f3d | 98 | return ktime_to_ns(ktime); |
f6c19f6e | 99 | } |
60e1cd07 | 100 | #endif /* #else #ifdef LTTNG_USE_NMI_SAFE_CLOCK */ |
f6c19f6e | 101 | |
2754583e | 102 | static inline u64 trace_clock_read64_monotonic(void) |
f6c19f6e MD |
103 | { |
104 | return (u64) trace_clock_monotonic_wrapper(); | |
105 | } | |
106 | ||
2754583e | 107 | static inline u64 trace_clock_freq_monotonic(void) |
f6c19f6e | 108 | { |
a3ccff4f | 109 | return (u64) NSEC_PER_SEC; |
f6c19f6e MD |
110 | } |
111 | ||
2754583e | 112 | static inline int trace_clock_uuid_monotonic(char *uuid) |
f6c19f6e | 113 | { |
84bbd9ea MD |
114 | unsigned char *boot_id; |
115 | ||
116 | boot_id = get_kernel_boot_id(); | |
117 | sprintf(uuid, "%pU", boot_id); | |
118 | return 0; | |
f6c19f6e MD |
119 | } |
120 | ||
2754583e MD |
121 | static inline const char *trace_clock_name_monotonic(void) |
122 | { | |
123 | return "monotonic"; | |
124 | } | |
125 | ||
126 | static inline const char *trace_clock_description_monotonic(void) | |
127 | { | |
128 | return "Monotonic Clock"; | |
129 | } | |
130 | ||
60e1cd07 | 131 | #ifdef LTTNG_USE_NMI_SAFE_CLOCK |
f6c19f6e MD |
132 | static inline int get_trace_clock(void) |
133 | { | |
e36de50d | 134 | printk_once(KERN_WARNING "LTTng: Using mainline kernel monotonic fast clock, which is NMI-safe.\n"); |
b0725207 MD |
135 | return 0; |
136 | } | |
60e1cd07 | 137 | #else /* #ifdef LTTNG_USE_NMI_SAFE_CLOCK */ |
b0725207 MD |
138 | static inline int get_trace_clock(void) |
139 | { | |
e36de50d | 140 | printk_once(KERN_WARNING "LTTng: Using mainline kernel monotonic clock. NMIs will not be traced.\n"); |
f6c19f6e MD |
141 | return 0; |
142 | } | |
60e1cd07 | 143 | #endif /* #else #ifdef LTTNG_USE_NMI_SAFE_CLOCK */ |
f6c19f6e MD |
144 | |
145 | static inline void put_trace_clock(void) | |
146 | { | |
147 | } | |
148 | ||
2754583e MD |
149 | static inline u64 trace_clock_read64(void) |
150 | { | |
a8f2d0c7 | 151 | struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock); |
2754583e MD |
152 | |
153 | if (likely(!ltc)) { | |
154 | return trace_clock_read64_monotonic(); | |
155 | } else { | |
156 | read_barrier_depends(); /* load ltc before content */ | |
157 | return ltc->read64(); | |
158 | } | |
159 | } | |
160 | ||
161 | static inline u64 trace_clock_freq(void) | |
162 | { | |
a8f2d0c7 | 163 | struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock); |
2754583e MD |
164 | |
165 | if (!ltc) { | |
166 | return trace_clock_freq_monotonic(); | |
167 | } else { | |
168 | read_barrier_depends(); /* load ltc before content */ | |
169 | return ltc->freq(); | |
170 | } | |
171 | } | |
172 | ||
173 | static inline int trace_clock_uuid(char *uuid) | |
174 | { | |
a8f2d0c7 | 175 | struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock); |
2754583e MD |
176 | |
177 | read_barrier_depends(); /* load ltc before content */ | |
178 | /* Use default UUID cb when NULL */ | |
179 | if (!ltc || !ltc->uuid) { | |
180 | return trace_clock_uuid_monotonic(uuid); | |
181 | } else { | |
182 | return ltc->uuid(uuid); | |
183 | } | |
184 | } | |
185 | ||
186 | static inline const char *trace_clock_name(void) | |
187 | { | |
a8f2d0c7 | 188 | struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock); |
2754583e MD |
189 | |
190 | if (!ltc) { | |
191 | return trace_clock_name_monotonic(); | |
192 | } else { | |
193 | read_barrier_depends(); /* load ltc before content */ | |
194 | return ltc->name(); | |
195 | } | |
196 | } | |
197 | ||
198 | static inline const char *trace_clock_description(void) | |
199 | { | |
a8f2d0c7 | 200 | struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock); |
2754583e MD |
201 | |
202 | if (!ltc) { | |
203 | return trace_clock_description_monotonic(); | |
204 | } else { | |
205 | read_barrier_depends(); /* load ltc before content */ | |
206 | return ltc->description(); | |
207 | } | |
208 | } | |
209 | ||
f6c19f6e MD |
210 | #endif /* CONFIG_HAVE_TRACE_CLOCK */ |
211 | ||
a90917c3 | 212 | #endif /* _LTTNG_TRACE_CLOCK_H */ |