1 #ifndef _LTTNG_TRACE_CLOCK_H
2 #define _LTTNG_TRACE_CLOCK_H
5 * wrapper/trace-clock.h
7 * Contains LTTng trace clock mapping to LTTng trace clock or mainline monotonic
8 * clock. This wrapper depends on CONFIG_HIGH_RES_TIMERS=y.
10 * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
12 * This library is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public
14 * License as published by the Free Software Foundation; only
15 * version 2.1 of the License.
17 * This library is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with this library; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 #ifdef CONFIG_HAVE_TRACE_CLOCK
28 #include <linux/trace-clock.h>
29 #else /* CONFIG_HAVE_TRACE_CLOCK */
31 #include <linux/hardirq.h>
32 #include <linux/ktime.h>
33 #include <linux/time.h>
34 #include <linux/hrtimer.h>
35 #include <linux/percpu.h>
36 #include <linux/version.h>
37 #include <asm/local.h>
38 #include "../lttng-kernel-version.h"
41 #if LTTNG_KERNEL_RANGE(3,10,0, 3,10,14) || LTTNG_KERNEL_RANGE(3,11,0, 3,11,3)
42 #error "Linux kernels 3.10 and 3.11 introduce a deadlock in the timekeeping subsystem. Fixed by commit 7bd36014460f793c19e7d6c94dab67b0afcfcb7f \"timekeeping: Fix HRTICK related deadlock from ntp lock changes\" in Linux."
45 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0))
47 DECLARE_PER_CPU(local_t
, lttng_last_tsc
);
49 #if (BITS_PER_LONG == 32)
51 * Fixup "src_now" using the 32 LSB from "last". We need to handle overflow and
52 * underflow of the 32nd bit. "last" can be above, below or equal to the 32 LSB
55 static inline u64
trace_clock_fixup(u64 src_now
, u32 last
)
59 now
= src_now
& 0xFFFFFFFF00000000ULL
;
61 /* Detect overflow or underflow between now and last. */
62 if ((src_now
& 0x80000000U
) && !(last
& 0x80000000U
)) {
64 * If 32nd bit transitions from 1 to 0, and we move forward in
65 * time from "now" to "last", then we have an overflow.
67 if (((s32
) now
- (s32
) last
) < 0)
68 now
+= 0x0000000100000000ULL
;
69 } else if (!(src_now
& 0x80000000U
) && (last
& 0x80000000U
)) {
71 * If 32nd bit transitions from 0 to 1, and we move backward in
72 * time from "now" to "last", then we have an underflow.
74 if (((s32
) now
- (s32
) last
) > 0)
75 now
-= 0x0000000100000000ULL
;
79 #else /* #if (BITS_PER_LONG == 32) */
81 * The fixup is pretty easy on 64-bit architectures: "last" is a 64-bit
82 * value, so we can use last directly as current time.
84 static inline u64
trace_clock_fixup(u64 src_now
, u64 last
)
88 #endif /* #else #if (BITS_PER_LONG == 32) */
91 * Always called with preemption disabled. Can be interrupted.
93 static inline u64
trace_clock_monotonic_wrapper(void)
96 unsigned long last
, result
;
99 /* Use fast nmi-safe monotonic clock provided by the Linux kernel. */
100 last_tsc
= &__get_cpu_var(lttng_last_tsc
);
101 last
= local_read(last_tsc
);
103 * Read "last" before "now". It is not strictly required, but it ensures
104 * that an interrupt coming in won't artificially trigger a case where
105 * "now" < "last". This kind of situation should only happen if the
106 * mono_fast time source goes slightly backwards.
109 now
= ktime_get_mono_fast_ns();
110 if (((long) now
- (long) last
) < 0)
111 now
= trace_clock_fixup(now
, last
);
112 result
= local_cmpxchg(last_tsc
, last
, (unsigned long) now
);
113 if (result
== last
) {
118 * Update not done, due to concurrent update. We can use
119 * "result", since it has been sampled concurrently with our
120 * time read, so it should not be far from "now".
122 return trace_clock_fixup(now
, result
);
126 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)) */
127 static inline u64
trace_clock_monotonic_wrapper(void)
132 * Refuse to trace from NMIs with this wrapper, because an NMI could
133 * nest over the xtime write seqlock and deadlock.
139 return ktime_to_ns(ktime
);
141 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)) */
143 static inline u64
trace_clock_read64(void)
145 return (u64
) trace_clock_monotonic_wrapper();
148 static inline u64
trace_clock_freq(void)
150 return (u64
) NSEC_PER_SEC
;
153 static inline int trace_clock_uuid(char *uuid
)
155 return wrapper_get_bootid(uuid
);
158 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0))
159 static inline int get_trace_clock(void)
161 printk(KERN_WARNING
"LTTng: Using mainline kernel monotonic fast clock, which is NMI-safe.\n");
164 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)) */
165 static inline int get_trace_clock(void)
167 printk(KERN_WARNING
"LTTng: Using mainline kernel monotonic clock. NMIs will not be traced.\n");
170 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)) */
172 static inline void put_trace_clock(void)
176 #endif /* CONFIG_HAVE_TRACE_CLOCK */
178 #endif /* _LTTNG_TRACE_CLOCK_H */
This page took 0.033969 seconds and 4 git commands to generate.