#include <linux/time.h>
#include <linux/hrtimer.h>
#include <linux/percpu.h>
-#include <linux/version.h>
+#include <linux/percpu-defs.h>
+
+#include <lttng/kernel-version.h>
#include <asm/local.h>
#include <lttng/kernel-version.h>
#include <lttng/clock.h>
#include <wrapper/compiler.h>
-#include <wrapper/percpu-defs.h>
#include <wrapper/random.h>
-#include <blacklist/timekeeping.h>
extern struct lttng_trace_clock *lttng_trace_clock;
*/
#if (LTTNG_KERNEL_RANGE(4,8,0, 4,8,2) \
|| LTTNG_KERNEL_RANGE(4,7,4, 4,7,8) \
- || LTTNG_KERNEL_RANGE(4,4,20, 4,4,25) \
- || LTTNG_KERNEL_RANGE(4,1,32, 4,1,35))
+ || LTTNG_KERNEL_RANGE(4,4,20, 4,4,25))
#define LTTNG_CLOCK_NMI_SAFE_BROKEN
#endif
* this feature on 64-bit architectures.
*/
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0) \
- && BITS_PER_LONG == 64 \
- && !defined(LTTNG_CLOCK_NMI_SAFE_BROKEN))
+#if (BITS_PER_LONG == 64 && !defined(LTTNG_CLOCK_NMI_SAFE_BROKEN))
#define LTTNG_USE_NMI_SAFE_CLOCK
#endif
/* Use fast nmi-safe monotonic clock provided by the Linux kernel. */
preempt_disable();
- last_tsc_ptr = lttng_this_cpu_ptr(<tng_last_tsc);
+ last_tsc_ptr = this_cpu_ptr(<tng_last_tsc);
last = *last_tsc_ptr;
/*
* Read "last" before "now". It is not strictly required, but it ensures
static inline u64 trace_clock_read64(void)
{
- struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
+ struct lttng_trace_clock *ltc = LTTNG_READ_ONCE(lttng_trace_clock);
if (likely(!ltc)) {
return trace_clock_read64_monotonic();
} else {
- read_barrier_depends(); /* load ltc before content */
return ltc->read64();
}
}
static inline u64 trace_clock_freq(void)
{
- struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
+ struct lttng_trace_clock *ltc = LTTNG_READ_ONCE(lttng_trace_clock);
if (!ltc) {
return trace_clock_freq_monotonic();
} else {
- read_barrier_depends(); /* load ltc before content */
return ltc->freq();
}
}
static inline int trace_clock_uuid(char *uuid)
{
- struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
+ struct lttng_trace_clock *ltc = LTTNG_READ_ONCE(lttng_trace_clock);
- read_barrier_depends(); /* load ltc before content */
/* Use default UUID cb when NULL */
if (!ltc || !ltc->uuid) {
return trace_clock_uuid_monotonic(uuid);
static inline const char *trace_clock_name(void)
{
- struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
+ struct lttng_trace_clock *ltc = LTTNG_READ_ONCE(lttng_trace_clock);
if (!ltc) {
return trace_clock_name_monotonic();
} else {
- read_barrier_depends(); /* load ltc before content */
return ltc->name();
}
}
static inline const char *trace_clock_description(void)
{
- struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
+ struct lttng_trace_clock *ltc = LTTNG_READ_ONCE(lttng_trace_clock);
if (!ltc) {
return trace_clock_description_monotonic();
} else {
- read_barrier_depends(); /* load ltc before content */
return ltc->description();
}
}