#include <lttng/clock.h>
#include <wrapper/compiler.h>
#include <wrapper/random.h>
-#include <blacklist/timekeeping.h>
extern struct lttng_trace_clock *lttng_trace_clock;
*/
#if (LTTNG_KERNEL_RANGE(4,8,0, 4,8,2) \
|| LTTNG_KERNEL_RANGE(4,7,4, 4,7,8) \
- || LTTNG_KERNEL_RANGE(4,4,20, 4,4,25) \
- || LTTNG_KERNEL_RANGE(4,1,32, 4,1,35))
+ || LTTNG_KERNEL_RANGE(4,4,20, 4,4,25))
#define LTTNG_CLOCK_NMI_SAFE_BROKEN
#endif
* this feature on 64-bit architectures.
*/
-#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,17,0) \
- && BITS_PER_LONG == 64 \
- && !defined(LTTNG_CLOCK_NMI_SAFE_BROKEN))
+#if (BITS_PER_LONG == 64 && !defined(LTTNG_CLOCK_NMI_SAFE_BROKEN))
#define LTTNG_USE_NMI_SAFE_CLOCK
#endif
#ifdef LTTNG_USE_NMI_SAFE_CLOCK
-DECLARE_PER_CPU(u64, lttng_last_tsc);
+DECLARE_PER_CPU(u64, lttng_last_timestamp);
/*
* Sometimes called with preemption enabled. Can be interrupted.
static inline u64 trace_clock_monotonic_wrapper(void)
{
u64 now, last, result;
- u64 *last_tsc_ptr;
+ u64 *last_timestamp_ptr;
/* Use fast nmi-safe monotonic clock provided by the Linux kernel. */
preempt_disable();
- last_tsc_ptr = this_cpu_ptr(<tng_last_tsc);
- last = *last_tsc_ptr;
+ last_timestamp_ptr = this_cpu_ptr(<tng_last_timestamp);
+ last = *last_timestamp_ptr;
/*
* Read "last" before "now". It is not strictly required, but it ensures
* that an interrupt coming in won't artificially trigger a case where
now = ktime_get_mono_fast_ns();
if (U64_MAX / 2 < now - last)
now = last;
- result = cmpxchg64_local(last_tsc_ptr, last, now);
+ result = cmpxchg64_local(last_timestamp_ptr, last, now);
preempt_enable();
if (result == last) {
/* Update done. */