X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=liblttng-ust%2Fclock.h;h=a7e8eb7b49e062b1e5ebe9f24cd7d60278a4c64e;hb=702d1b7d91bc75f4fc01415fc625af1b834ddec4;hp=ed191b0bf9b7ca4b75a55ddbb17682e052b0ad4c;hpb=35897f8b2d311b756b81657dad9c53ef1c0fad8a;p=lttng-ust.git diff --git a/liblttng-ust/clock.h b/liblttng-ust/clock.h index ed191b0b..a7e8eb7b 100644 --- a/liblttng-ust/clock.h +++ b/liblttng-ust/clock.h @@ -24,34 +24,148 @@ #include #include #include +#include +#include +#include +#include -/* TRACE CLOCK */ +#include "lttng-ust-uuid.h" -/* - * Currently using the kernel MONOTONIC clock, waiting for kernel-side - * LTTng to implement mmap'd trace clock. - */ +struct lttng_trace_clock { + uint64_t (*read64)(void); + uint64_t (*freq)(void); + int (*uuid)(char *uuid); + const char *(*name)(void); + const char *(*description)(void); +}; + +extern struct lttng_trace_clock *lttng_trace_clock; + +void lttng_ust_clock_init(void); -/* Choosing correct trace clock */ +/* Use the kernel MONOTONIC clock. */ -static __inline__ uint64_t trace_clock_read64(void) +static __inline__ +uint64_t trace_clock_read64_monotonic(void) { struct timespec ts; - clock_gettime(CLOCK_MONOTONIC, &ts); - return (ts.tv_sec * 1000000000) + ts.tv_nsec; + if (caa_unlikely(clock_gettime(CLOCK_MONOTONIC, &ts))) { + ts.tv_sec = 0; + ts.tv_nsec = 0; + } + return ((uint64_t) ts.tv_sec * 1000000000ULL) + ts.tv_nsec; +} + +static __inline__ +uint64_t trace_clock_freq_monotonic(void) +{ + return 1000000000ULL; +} + +static __inline__ +int trace_clock_uuid_monotonic(char *uuid) +{ + int ret = 0; + size_t len; + FILE *fp; + + /* + * boot_id needs to be read once before being used concurrently + * to deal with a Linux kernel race. A fix is proposed for + * upstream, but the work-around is needed for older kernels. + */ + fp = fopen("/proc/sys/kernel/random/boot_id", "r"); + if (!fp) { + return -ENOENT; + } + len = fread(uuid, 1, LTTNG_UST_UUID_STR_LEN - 1, fp); + if (len < LTTNG_UST_UUID_STR_LEN - 1) { + ret = -EINVAL; + goto end; + } + uuid[LTTNG_UST_UUID_STR_LEN - 1] = '\0'; +end: + fclose(fp); + return ret; +} + +static __inline__ +const char *trace_clock_name_monotonic(void) +{ + return "monotonic"; } -#if __i386__ || __x86_64__ -static __inline__ uint64_t trace_clock_frequency(void) +static __inline__ +const char *trace_clock_description_monotonic(void) { - return 1000000000LL; + return "Monotonic Clock"; } -#endif /* #else #if __i386__ || __x86_64__ */ -static __inline__ uint32_t trace_clock_freq_scale(void) +static __inline__ +uint64_t trace_clock_read64(void) { - return 1; + struct lttng_trace_clock *ltc = CMM_LOAD_SHARED(lttng_trace_clock); + + if (caa_likely(!ltc)) { + return trace_clock_read64_monotonic(); + } else { + cmm_read_barrier_depends(); /* load ltc before content */ + return ltc->read64(); + } +} + +static __inline__ +uint64_t trace_clock_freq(void) +{ + struct lttng_trace_clock *ltc = CMM_LOAD_SHARED(lttng_trace_clock); + + if (!ltc) { + return trace_clock_freq_monotonic(); + } else { + cmm_read_barrier_depends(); /* load ltc before content */ + return ltc->freq(); + } +} + +static __inline__ +int trace_clock_uuid(char *uuid) +{ + struct lttng_trace_clock *ltc = CMM_LOAD_SHARED(lttng_trace_clock); + + cmm_read_barrier_depends(); /* load ltc before content */ + /* Use default UUID cb when NULL */ + if (!ltc || !ltc->uuid) { + return trace_clock_uuid_monotonic(uuid); + } else { + return ltc->uuid(uuid); + } +} + +static __inline__ +const char *trace_clock_name(void) +{ + struct lttng_trace_clock *ltc = CMM_LOAD_SHARED(lttng_trace_clock); + + if (!ltc) { + return trace_clock_name_monotonic(); + } else { + cmm_read_barrier_depends(); /* load ltc before content */ + return ltc->name(); + } +} + +static __inline__ +const char *trace_clock_description(void) +{ + struct lttng_trace_clock *ltc = CMM_LOAD_SHARED(lttng_trace_clock); + + if (!ltc) { + return trace_clock_description_monotonic(); + } else { + cmm_read_barrier_depends(); /* load ltc before content */ + return ltc->description(); + } } #endif /* _UST_CLOCK_H */