From 2e8e974039487cb8b2b22dc01ffdd6c9931ec02b Mon Sep 17 00:00:00 2001 From: Olivier Dion Date: Thu, 21 Mar 2024 14:51:07 -0400 Subject: [PATCH 1/9] ust-tracepoint-event: Add static check of sequences length type Enforce required unsigned type for length of sequence at compile time. Change-Id: Ia8668a80eb0c0b81e8c03b208d7581e34af313fd Signed-off-by: Olivier Dion Signed-off-by: Mathieu Desnoyers --- include/lttng/ust-tracepoint-event.h | 32 ++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/include/lttng/ust-tracepoint-event.h b/include/lttng/ust-tracepoint-event.h index baf98793..5d845ae1 100644 --- a/include/lttng/ust-tracepoint-event.h +++ b/include/lttng/ust-tracepoint-event.h @@ -255,6 +255,38 @@ void lttng_ust__event_template_proto___##_provider##___##_name(LTTNG_UST__TP_ARG }; #include LTTNG_UST_TRACEPOINT_INCLUDE + +/* + * Stage 0.9.0 + * Verifying sequence length types are of an unsigned type. + */ + +/* Reset all macros within LTTNG_UST_TRACEPOINT_EVENT */ +#include +#include +#include + +/* + * Note that it is not possible to encode the length type as a C identifier, + * since it can be multiple tokens. + */ +#undef lttng_ust__field_sequence_encoded +#define lttng_ust__field_sequence_encoded(_type, _item, _src, _byte_order, \ + _length_type, _src_length, _encoding, _nowrite, \ + _elem_type_base) \ + lttng_ust_static_assert(!lttng_ust_is_signed_type(_length_type), \ + "Length type " #_length_type " is not a unsigned integer type", \ + Length_type_is_not_a_unsigned_integer_type); + +#undef LTTNG_UST_TP_FIELDS +#define LTTNG_UST_TP_FIELDS(...) __VA_ARGS__ /* Only one used in this phase */ + +#undef LTTNG_UST__TRACEPOINT_EVENT_CLASS +#define LTTNG_UST__TRACEPOINT_EVENT_CLASS(_provider, _name, _args, _fields) \ + _fields + +#include LTTNG_UST_TRACEPOINT_INCLUDE + #if defined(__cplusplus) /* -- 2.34.1 From 4ce8959484725ec7ba08dcb6b6792db76d20451b Mon Sep 17 00:00:00 2001 From: Kienan Stewart Date: Fri, 9 Feb 2024 14:48:29 -0500 Subject: [PATCH 2/9] docs: Add cases in which tracepoints in ctors/dtors may not work Change-Id: I52666810322e26b3841ea1bca6f588b6c3e6f3f8 Signed-off-by: Kienan Stewart Signed-off-by: Mathieu Desnoyers --- doc/man/lttng-ust.3.txt | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/doc/man/lttng-ust.3.txt b/doc/man/lttng-ust.3.txt index 23d31cac..12f2d65d 100644 --- a/doc/man/lttng-ust.3.txt +++ b/doc/man/lttng-ust.3.txt @@ -688,6 +688,27 @@ NOTE: Neither `lttng_ust_tracepoint_enabled()` nor `lttng_ust_do_tracepoint()` have a `STAP_PROBEV()` call, so if you need it, you should emit this call yourself. +Tracing in C/C++ constructors and destructors +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +As of LTTng-UST{nbsp}2.13, tracepoint definitions are implemented using +compound literals. In the following cases, those compound literals are +allocated on the heap: + +* g++{nbsp}<=={nbsp}4.8 is used as the compiler or, +* `LTTNG_UST_ALLOCATE_COMPOUND_LITERAL_ON_HEAP` is defined in the C pre-processor flags +and the application is compiled with a C++ compiler + +When the compound literals are heap-allocated, there are some cases in which +both C-style and C++ constructors and destructors will not be traced. + +1. C-style constructors and destructors in statically linked archives +2. C-style constructors and destructors in the application itself +3. Some C++-style constructors and destructors in the application and statically linked archives + +In the 3rd case above, which C++-style constructors and destructors will not be traced depends +on the initialization order within each translation unit and across the entire program when +all translation units are linked together. [[build-static]] Statically linking the tracepoint provider -- 2.34.1 From 9c3f3d0208bab524c94466b8cc6bb1dcbc4a906b Mon Sep 17 00:00:00 2001 From: Kienan Stewart Date: Thu, 14 Mar 2024 11:39:12 -0400 Subject: [PATCH 3/9] docs: Add supported versions and fix-backport policy Change-Id: I9ec43912652fc713484959e9315765f7e9d29a3e Signed-off-by: Kienan Stewart Signed-off-by: Mathieu Desnoyers --- README.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/README.md b/README.md index f71649b4..657cba85 100644 --- a/README.md +++ b/README.md @@ -203,6 +203,23 @@ compiled in C++. To compile tracepoint probes in C++, you need G++ >= 4.7 or Clang >= 4.0. The C++ compilers need to support C++11. +Supported versions +------------------ + +The LTTng project supports the last two released stable versions +(e.g. stable-2.13 and stable-2.12). + +Fixes are backported from the master branch to the last stable version +unless those fixes would break the ABI or API. Those fixes may be backported +to the second-last stable version, depending on complexity and ABI/API +compatibility. + +Security fixes are backported from the master branch to both of the last stable +version and the the second-last stable version. + +New features are integrated into the master branch and not backported to the +last stable branch. + Contact ------- -- 2.34.1 From 4b01076fea0f635af6af6762a8edce1be03e5d39 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Thu, 18 Apr 2024 11:25:55 -0400 Subject: [PATCH 4/9] Add close_range wrapper to liblttng-ust-fd.so glibc 2.34 implements close_range(2), which is used by the ssh client (amongst others). This needs to be overridden to make sure ssh does not close lttng-ust file descriptors. Signed-off-by: Mathieu Desnoyers Change-Id: Ic4e0046499e1f010395aec71a48316b9d1e9bf3f --- src/common/ust-fd.h | 2 + src/lib/lttng-ust-common/fd-tracker.c | 59 +++++++++++++++++++++++++++ src/lib/lttng-ust-fd/lttng-ust-fd.c | 45 ++++++++++++++++++++ 3 files changed, 106 insertions(+) diff --git a/src/common/ust-fd.h b/src/common/ust-fd.h index 4e6c4a5d..85038231 100644 --- a/src/common/ust-fd.h +++ b/src/common/ust-fd.h @@ -25,5 +25,7 @@ void lttng_ust_unlock_fd_tracker(void); int lttng_ust_safe_close_fd(int fd, int (*close_cb)(int)); int lttng_ust_safe_fclose_stream(FILE *stream, int (*fclose_cb)(FILE *stream)); int lttng_ust_safe_closefrom_fd(int lowfd, int (*close_cb)(int)); +int lttng_ust_safe_close_range_fd(unsigned int first, unsigned int last, int flags, + int (*close_range_cb)(unsigned int, unsigned int, int)); #endif /* _LTTNG_UST_FD_H */ diff --git a/src/lib/lttng-ust-common/fd-tracker.c b/src/lib/lttng-ust-common/fd-tracker.c index f5e6ed21..a39869ef 100644 --- a/src/lib/lttng-ust-common/fd-tracker.c +++ b/src/lib/lttng-ust-common/fd-tracker.c @@ -466,3 +466,62 @@ int lttng_ust_safe_closefrom_fd(int lowfd, int (*close_cb)(int fd)) end: return ret; } + +/* + * Implement helper for close_range() override. + */ +int lttng_ust_safe_close_range_fd(unsigned int first, unsigned int last, int flags, + int (*close_range_cb)(unsigned int first, unsigned int last, int flags)) +{ + int ret = 0, i; + + lttng_ust_fd_tracker_alloc_tls(); + + /* + * Ensure the tracker is initialized when called from + * constructors. + */ + lttng_ust_fd_tracker_init(); + + if (first > last || last > INT_MAX) { + ret = -1; + errno = EINVAL; + goto end; + } + /* + * If called from lttng-ust, we directly call close_range + * without validating whether the FD is part of the tracked set. + */ + if (URCU_TLS(ust_fd_mutex_nest)) { + if (close_range_cb(first, last, flags) < 0) { + ret = -1; + goto end; + } + } else { + int last_check = last; + + if (last > lttng_ust_max_fd) + last_check = lttng_ust_max_fd; + lttng_ust_lock_fd_tracker(); + for (i = first; i <= last_check; i++) { + if (IS_FD_VALID(i) && IS_FD_SET(i, lttng_fd_set)) + continue; + if (close_range_cb(i, i, flags) < 0) { + ret = -1; + /* propagate errno from close_range_cb. */ + lttng_ust_unlock_fd_tracker(); + goto end; + } + } + if (last > lttng_ust_max_fd) { + if (close_range_cb(lttng_ust_max_fd + 1, last, flags) < 0) { + ret = -1; + lttng_ust_unlock_fd_tracker(); + goto end; + } + } + lttng_ust_unlock_fd_tracker(); + } +end: + return ret; +} diff --git a/src/lib/lttng-ust-fd/lttng-ust-fd.c b/src/lib/lttng-ust-fd/lttng-ust-fd.c index d2ebcfbc..0360b6f2 100644 --- a/src/lib/lttng-ust-fd/lttng-ust-fd.c +++ b/src/lib/lttng-ust-fd/lttng-ust-fd.c @@ -21,6 +21,8 @@ static int (*__lttng_ust_fd_plibc_close)(int fd) = NULL; static int (*__lttng_ust_fd_plibc_fclose)(FILE *stream) = NULL; +static int (*__lttng_ust_fd_plibc_close_range)(unsigned int first, + unsigned int last, int flags) = NULL; /* * Use dlsym to find the original libc close() symbol and store it in @@ -60,6 +62,24 @@ void *_lttng_ust_fd_init_plibc_fclose(void) return __lttng_ust_fd_plibc_fclose; } +/* + * Use dlsym to find the original libc close_range() symbol and store it + * in __lttng_ust_fd_plibc_close_range. The close_range symbol only + * appears in glibc 2.34, so it is considered optional. + */ +static +void *_lttng_ust_fd_init_plibc_close_range(void) +{ + if (__lttng_ust_fd_plibc_close_range == NULL) { + __lttng_ust_fd_plibc_close_range = dlsym(RTLD_NEXT, "close_range"); + + if (__lttng_ust_fd_plibc_close_range == NULL) + __lttng_ust_fd_plibc_close_range = (void *) LTTNG_UST_DLSYM_FAILED_PTR; + } + + return __lttng_ust_fd_plibc_close_range; +} + static void _lttng_ust_fd_ctor(void) __attribute__((constructor)); @@ -75,6 +95,7 @@ void _lttng_ust_fd_ctor(void) */ (void) _lttng_ust_fd_init_plibc_close(); (void) _lttng_ust_fd_init_plibc_fclose(); + (void) _lttng_ust_fd_init_plibc_close_range(); } /* @@ -127,6 +148,30 @@ int fclose(FILE *stream) __lttng_ust_fd_plibc_fclose); } +/* + * Override the libc close_range() symbol with our own, allowing + * applications to close arbitrary file descriptors. If the fd is owned + * by lttng-ust, return -1, errno=EBADF instead of closing it. + * + * If dlsym failed to find the original libc close_range() symbol, + * return -1, errno=ENOSYS. + * + * There is a short window before the library constructor has executed where + * this wrapper could call dlsym() and thus not be async-signal-safe. + */ +int close_range(unsigned int first, unsigned int last, int flags) +{ + /* + * We can't retry dlsym here since close is async-signal-safe. + */ + if (_lttng_ust_fd_init_plibc_close_range() == (void *) LTTNG_UST_DLSYM_FAILED_PTR) { + errno = ENOSYS; + return -1; + } + + return lttng_ust_safe_close_range_fd(first, last, flags, __lttng_ust_fd_plibc_close_range); +} + #if defined(__sun__) || defined(__FreeBSD__) /* Solaris and FreeBSD. */ void closefrom(int lowfd) -- 2.34.1 From 97572c0438845cee953ebd3e39615f78bfa405a7 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Wed, 20 Mar 2024 16:47:39 -0400 Subject: [PATCH 5/9] Introduce LTTNG_UST_MAP_POPULATE_POLICY environment variable Problem Statement ----------------- commit 4d4838bad480 ("Use MAP_POPULATE to reduce pagefault when available") was first introduced in tag v2.11.0 and never backported to stable branches. Its purpose was to reduce the tracer fast-path latency caused by handling minor page faults the first time a given application writes to each page of the ring buffer after mapping them. The discussion thread leading to this commit can be found here [1]. When using LTTng-UST for diagnosing real-time applications with very strict constraints, this added latency is unwanted. That commit introduced the MAP_POPULATE flag when mapping the ring buffer pages, which causes the kernel to pre-populate the page table entries (PTE). This has, however, unintended consequences for the following scenarios: * Short-lived applications which write very little to the ring buffer end up taking more time to start, because of the time it takes to pre-populate all the ring buffer pages, even though they typically won't be used by the application. * Containerized workloads using cpusets will also end up having longer application startup time than strictly required, and will populate PTE for ring buffers of CPUs which are not present in the cpuset. There are, therefore, two sets of irreconcilable requirements: short-lived and containerized workloads benefit from lazily populating the PTE, whereas real-time workloads benefit from pre-populating them. This will therefore require a tunable environment variable that will let the end-user choose the behavior for each application. Solution -------- Allow users to specify whether they want to pre-populate shared memory pages within the application with an environment variable. LTTNG_UST_MAP_POPULATE_POLICY If set, override the policy used to populate shared memory pages within the application. The expected values are: none Do not pre-populate any pages, take minor faults on first access while tracing. cpu_possible Pre-populate pages for all possible CPUs in the system, as listed by /sys/devices/system/cpu/possible. Default: none. If the policy is unknown, use the default. Choice of the default --------------------- Given that users with strict real-time constraints already have to setup their tracing with specific options (see the "--read-timer" lttng-enable-channel(3) option [2]), it makes sense that the default is to lazily populate the ring buffer PTE, and require users with real-time constraints to explicitly enable the pre-populate through an environment variable. Effect on default behavior -------------------------- The default behavior for ring buffer PTE mapping will be changing across LTTng-UST versions in the following way: - 2.10 and earlier: lazily populate PTE, - 2.11-2.13: pre-populate PTE, - 2.14: lazily populate PTE. LTTng-UST 2.14 will revert back to the 2.10 lazy populate scheme by default. [1] https://lists.lttng.org/pipermail/lttng-dev/2019-July/thread.html#29094 [2] https://lttng.org/docs/v2.13/#doc-channel-timers Signed-off-by: Mathieu Desnoyers Change-Id: I6743b08cd1fe0d956caaf6aad63005555bb9640e --- doc/man/lttng-ust.3.txt | 17 ++++ src/common/Makefile.am | 2 + src/common/counter/counter.c | 15 ++-- src/common/counter/shm.c | 38 +++++---- src/common/counter/shm.h | 7 +- src/common/getenv.c | 1 + src/common/macros.h | 25 +++++- src/common/populate.c | 86 ++++++++++++++++++++ src/common/populate.h | 18 ++++ src/common/ringbuffer/ring_buffer_backend.c | 7 +- src/common/ringbuffer/ring_buffer_frontend.c | 15 ++-- src/common/ringbuffer/shm.c | 36 +++++--- src/common/ringbuffer/shm.h | 6 +- tests/unit/libringbuffer/shm.c | 4 +- 14 files changed, 226 insertions(+), 51 deletions(-) create mode 100644 src/common/populate.c create mode 100644 src/common/populate.h diff --git a/doc/man/lttng-ust.3.txt b/doc/man/lttng-ust.3.txt index 12f2d65d..0864b9a3 100644 --- a/doc/man/lttng-ust.3.txt +++ b/doc/man/lttng-ust.3.txt @@ -1556,6 +1556,23 @@ affect application timings. documentation under https://github.com/lttng/lttng-ust/tree/v{lttng_version}/doc/examples/getcpu-override[`examples/getcpu-override`]. +`LTTNG_UST_MAP_POPULATE_POLICY`:: ++ +-- +If set, override the policy used to populate shared memory pages +within the application. The expected values are: + +`none`::: + Do not pre-populate any pages, take minor faults on first access + while tracing. + +`cpu_possible`::: + Pre-populate pages for all possible CPUs in the system, as + listed by `/sys/devices/system/cpu/possible`. +-- ++ +Default: `none`. If the policy is unknown, use the default. + `LTTNG_UST_REGISTER_TIMEOUT`:: Waiting time for the _registration done_ session daemon command before proceeding to execute the main program (milliseconds). diff --git a/src/common/Makefile.am b/src/common/Makefile.am index 05d08ade..ad889d16 100644 --- a/src/common/Makefile.am +++ b/src/common/Makefile.am @@ -171,6 +171,8 @@ libcommon_la_SOURCES = \ logging.h \ smp.c \ smp.h \ + populate.c \ + populate.h \ strutils.c \ strutils.h \ utils.c \ diff --git a/src/common/counter/counter.c b/src/common/counter/counter.c index 60edad0c..99a46af6 100644 --- a/src/common/counter/counter.c +++ b/src/common/counter/counter.c @@ -17,6 +17,7 @@ #include "common/bitmap.h" #include "common/smp.h" +#include "common/populate.h" #include "shm.h" static size_t lttng_counter_get_dimension_nr_elements(struct lib_counter_dimension *dimension) @@ -84,13 +85,14 @@ static int lttng_counter_layout_init(struct lib_counter *counter, int cpu, int s if (counter->is_daemon) { /* Allocate and clear shared memory. */ shm_object = lttng_counter_shm_object_table_alloc(counter->object_table, - shm_length, LTTNG_COUNTER_SHM_OBJECT_SHM, shm_fd, cpu); + shm_length, LTTNG_COUNTER_SHM_OBJECT_SHM, shm_fd, cpu, + lttng_ust_map_populate_cpu_is_enabled(cpu)); if (!shm_object) return -ENOMEM; } else { /* Map pre-existing shared memory. */ shm_object = lttng_counter_shm_object_table_append_shm(counter->object_table, - shm_fd, shm_length); + shm_fd, shm_length, lttng_ust_map_populate_cpu_is_enabled(cpu)); if (!shm_object) return -ENOMEM; } @@ -211,12 +213,13 @@ struct lib_counter *lttng_counter_create(const struct lib_counter_config *config int cpu, ret; int nr_handles = 0; int nr_cpus = get_possible_cpus_array_len(); + bool populate = lttng_ust_map_populate_is_enabled(); if (validate_args(config, nr_dimensions, max_nr_elem, global_sum_step, global_counter_fd, nr_counter_cpu_fds, counter_cpu_fds)) return NULL; - counter = zmalloc(sizeof(struct lib_counter)); + counter = zmalloc_populate(sizeof(struct lib_counter), populate); if (!counter) return NULL; counter->global_counters.shm_fd = -1; @@ -225,13 +228,13 @@ struct lib_counter *lttng_counter_create(const struct lib_counter_config *config if (lttng_counter_set_global_sum_step(counter, global_sum_step)) goto error_sum_step; counter->nr_dimensions = nr_dimensions; - counter->dimensions = zmalloc(nr_dimensions * sizeof(*counter->dimensions)); + counter->dimensions = zmalloc_populate(nr_dimensions * sizeof(*counter->dimensions), populate); if (!counter->dimensions) goto error_dimensions; for (dimension = 0; dimension < nr_dimensions; dimension++) counter->dimensions[dimension].max_nr_elem = max_nr_elem[dimension]; if (config->alloc & COUNTER_ALLOC_PER_CPU) { - counter->percpu_counters = zmalloc(sizeof(struct lib_counter_layout) * nr_cpus); + counter->percpu_counters = zmalloc_populate(sizeof(struct lib_counter_layout) * nr_cpus, populate); if (!counter->percpu_counters) goto error_alloc_percpu; for_each_possible_cpu(cpu) @@ -250,7 +253,7 @@ struct lib_counter *lttng_counter_create(const struct lib_counter_config *config if (config->alloc & COUNTER_ALLOC_PER_CPU) nr_handles += nr_cpus; /* Allocate table for global and per-cpu counters. */ - counter->object_table = lttng_counter_shm_object_table_create(nr_handles); + counter->object_table = lttng_counter_shm_object_table_create(nr_handles, populate); if (!counter->object_table) goto error_alloc_object_table; diff --git a/src/common/counter/shm.c b/src/common/counter/shm.c index 8b65d1fc..6f7ae37a 100644 --- a/src/common/counter/shm.c +++ b/src/common/counter/shm.c @@ -69,12 +69,12 @@ error: return ret; } -struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj) +struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj, bool populate) { struct lttng_counter_shm_object_table *table; - table = zmalloc(sizeof(struct lttng_counter_shm_object_table) + - max_nb_obj * sizeof(table->objects[0])); + table = zmalloc_populate(sizeof(struct lttng_counter_shm_object_table) + + max_nb_obj * sizeof(table->objects[0]), populate); if (!table) return NULL; table->size = max_nb_obj; @@ -84,10 +84,11 @@ struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(siz static struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_shm(struct lttng_counter_shm_object_table *table, size_t memory_map_size, - int cpu_fd) + int cpu_fd, bool populate) { - int shmfd, ret; struct lttng_counter_shm_object *obj; + int flags = MAP_SHARED; + int shmfd, ret; char *memory_map; if (cpu_fd < 0) @@ -121,9 +122,11 @@ struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_shm(struc obj->shm_fd_ownership = 0; obj->shm_fd = shmfd; + if (populate) + flags |= LTTNG_MAP_POPULATE; /* memory_map: mmap */ memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE, - MAP_SHARED | LTTNG_MAP_POPULATE, shmfd, 0); + flags, shmfd, 0); if (memory_map == MAP_FAILED) { PERROR("mmap"); goto error_mmap; @@ -145,7 +148,7 @@ error_zero_file: static struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_mem(struct lttng_counter_shm_object_table *table, - size_t memory_map_size) + size_t memory_map_size, bool populate) { struct lttng_counter_shm_object *obj; void *memory_map; @@ -154,7 +157,7 @@ struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_mem(struc return NULL; obj = &table->objects[table->allocated_len]; - memory_map = zmalloc(memory_map_size); + memory_map = zmalloc_populate(memory_map_size, populate); if (!memory_map) goto alloc_error; @@ -197,13 +200,15 @@ struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct ltt size_t memory_map_size, enum lttng_counter_shm_object_type type, int cpu_fd, - int cpu) + int cpu, + bool populate) #else struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct lttng_counter_shm_object_table *table, size_t memory_map_size, enum lttng_counter_shm_object_type type, int cpu_fd, - int cpu __attribute__((unused))) + int cpu __attribute__((unused)), + bool populate) #endif { struct lttng_counter_shm_object *shm_object; @@ -226,10 +231,11 @@ struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct ltt switch (type) { case LTTNG_COUNTER_SHM_OBJECT_SHM: shm_object = _lttng_counter_shm_object_table_alloc_shm(table, memory_map_size, - cpu_fd); + cpu_fd, populate); break; case LTTNG_COUNTER_SHM_OBJECT_MEM: - shm_object = _lttng_counter_shm_object_table_alloc_mem(table, memory_map_size); + shm_object = _lttng_counter_shm_object_table_alloc_mem(table, memory_map_size, + populate); break; default: assert(0); @@ -242,10 +248,10 @@ struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct ltt } struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_shm(struct lttng_counter_shm_object_table *table, - int shm_fd, - size_t memory_map_size) + int shm_fd, size_t memory_map_size, bool populate) { struct lttng_counter_shm_object *obj; + int flags = MAP_SHARED; char *memory_map; if (table->allocated_len >= table->size) @@ -256,9 +262,11 @@ struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_shm(struc obj->shm_fd = shm_fd; obj->shm_fd_ownership = 1; + if (populate) + flags |= LTTNG_MAP_POPULATE; /* memory_map: mmap */ memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE, - MAP_SHARED | LTTNG_MAP_POPULATE, shm_fd, 0); + flags, shm_fd, 0); if (memory_map == MAP_FAILED) { PERROR("mmap"); goto error_mmap; diff --git a/src/common/counter/shm.h b/src/common/counter/shm.h index 689edb0a..1293a7b0 100644 --- a/src/common/counter/shm.h +++ b/src/common/counter/shm.h @@ -10,6 +10,7 @@ #include #include #include +#include #include "common/logging.h" #include #include "shm_types.h" @@ -73,18 +74,18 @@ void _lttng_counter_set_shmp(struct lttng_counter_shm_ref *ref, struct lttng_cou #define lttng_counter_set_shmp(ref, src) _lttng_counter_set_shmp(&(ref)._ref, src) -struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj) +struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj, bool populate) __attribute__((visibility("hidden"))); struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct lttng_counter_shm_object_table *table, size_t memory_map_size, enum lttng_counter_shm_object_type type, const int cpu_fd, - int cpu) + int cpu, bool populate) __attribute__((visibility("hidden"))); struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_shm(struct lttng_counter_shm_object_table *table, - int shm_fd, size_t memory_map_size) + int shm_fd, size_t memory_map_size, bool populate) __attribute__((visibility("hidden"))); /* mem ownership is passed to lttng_counter_shm_object_table_append_mem(). */ diff --git a/src/common/getenv.c b/src/common/getenv.c index 7f7b8534..120225e6 100644 --- a/src/common/getenv.c +++ b/src/common/getenv.c @@ -42,6 +42,7 @@ static struct lttng_env lttng_env[] = { /* Env. var. which can be used in setuid/setgid executables. */ { "LTTNG_UST_WITHOUT_BADDR_STATEDUMP", LTTNG_ENV_NOT_SECURE, NULL, }, { "LTTNG_UST_REGISTER_TIMEOUT", LTTNG_ENV_NOT_SECURE, NULL, }, + { "LTTNG_UST_MAP_POPULATE_POLICY", LTTNG_ENV_NOT_SECURE, NULL, }, /* Env. var. which are not fetched in setuid/setgid executables. */ { "LTTNG_UST_CLOCK_PLUGIN", LTTNG_ENV_SECURE, NULL, }, diff --git a/src/common/macros.h b/src/common/macros.h index 308a1dfc..e8965b38 100644 --- a/src/common/macros.h +++ b/src/common/macros.h @@ -8,9 +8,32 @@ #define _UST_COMMON_MACROS_H #include +#include +#include #include +/* + * calloc() does not always populate the page table for the allocated + * memory. Optionally enforce page table populate. + */ +static inline +void *zmalloc_populate(size_t len, bool populate) + __attribute__((always_inline)); +static inline +void *zmalloc_populate(size_t len, bool populate) +{ + if (populate) { + void *ret = malloc(len); + if (ret == NULL) + return ret; + bzero(ret, len); + return ret; + } else { + return calloc(len, 1); + } +} + /* * Memory allocation zeroed */ @@ -20,7 +43,7 @@ void *zmalloc(size_t len) static inline void *zmalloc(size_t len) { - return calloc(len, 1); + return zmalloc_populate(len, false); } #define max_t(type, x, y) \ diff --git a/src/common/populate.c b/src/common/populate.c new file mode 100644 index 00000000..b7f6bcce --- /dev/null +++ b/src/common/populate.c @@ -0,0 +1,86 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright (C) 2024-2012 Mathieu Desnoyers + */ + +#define _LGPL_SOURCE +#include "common/getenv.h" +#include "common/logging.h" +#include "common/populate.h" + +enum populate_policy { + POPULATE_UNSET, + + POPULATE_NONE, + POPULATE_CPU_POSSIBLE, + + POPULATE_UNKNOWN, +}; + +static enum populate_policy map_populate_policy = POPULATE_UNSET; + +static void init_map_populate_policy(void) +{ + const char *populate_env_str; + + if (map_populate_policy != POPULATE_UNSET) + return; + + populate_env_str = lttng_ust_getenv("LTTNG_UST_MAP_POPULATE_POLICY"); + if (!populate_env_str) { + map_populate_policy = POPULATE_NONE; + return; + } + if (!strcmp(populate_env_str, "none")) { + map_populate_policy = POPULATE_NONE; + } else if (!strcmp(populate_env_str, "cpu_possible")) { + map_populate_policy = POPULATE_CPU_POSSIBLE; + } else { + /* + * populate_env_str is an untrusted environment variable + * input (can be provided to setuid/setgid binaries), so + * don't even try to print it. + */ + WARN("Unknown policy for LTTNG_UST_MAP_POPULATE_POLICY environment variable."); + map_populate_policy = POPULATE_UNKNOWN; + } +} + +/* + * Return the shared page populate policy for global pages. Returns true + * if shared memory pages should be pre-populated, false otherwise. + */ +bool lttng_ust_map_populate_is_enabled(void) +{ + init_map_populate_policy(); + + switch (map_populate_policy) { + case POPULATE_UNKNOWN: /* Fall-through */ + case POPULATE_NONE: + return false; + case POPULATE_CPU_POSSIBLE: + return true; + default: + abort(); + } + return false; +} + +/* + * Return the shared page populate policy based on the @cpu number + * provided as input. Returns true if shared memory pages should be + * pre-populated, false otherwise. + * + * The @cpu argument is currently unused except for negative value + * validation. It is present to eventually match cpu affinity or cpu + * online masks if those features are added in the future. + */ +bool lttng_ust_map_populate_cpu_is_enabled(int cpu) +{ + /* Reject invalid cpu number. */ + if (cpu < 0) + return false; + + return lttng_ust_map_populate_is_enabled(); +} diff --git a/src/common/populate.h b/src/common/populate.h new file mode 100644 index 00000000..f65c4851 --- /dev/null +++ b/src/common/populate.h @@ -0,0 +1,18 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright (C) 2024 Mathieu Desnoyers + */ + +#ifndef _UST_COMMON_POPULATE_H +#define _UST_COMMON_POPULATE_H + +#include + +bool lttng_ust_map_populate_cpu_is_enabled(int cpu) + __attribute__((visibility("hidden"))); + +bool lttng_ust_map_populate_is_enabled(void) + __attribute__((visibility("hidden"))); + +#endif /* _UST_COMMON_POPULATE_H */ diff --git a/src/common/ringbuffer/ring_buffer_backend.c b/src/common/ringbuffer/ring_buffer_backend.c index 27c335d5..82dc247a 100644 --- a/src/common/ringbuffer/ring_buffer_backend.c +++ b/src/common/ringbuffer/ring_buffer_backend.c @@ -21,6 +21,7 @@ #include "common/smp.h" #include "shm.h" #include "common/align.h" +#include "common/populate.h" /** * lib_ring_buffer_backend_allocate - allocate a channel buffer @@ -346,7 +347,8 @@ int channel_backend_init(struct channel_backend *chanb, struct shm_object *shmobj; shmobj = shm_object_table_alloc(handle->table, shmsize, - SHM_OBJECT_SHM, stream_fds[i], i); + SHM_OBJECT_SHM, stream_fds[i], i, + lttng_ust_map_populate_cpu_is_enabled(i)); if (!shmobj) goto end; align_shm(shmobj, __alignof__(struct lttng_ust_ring_buffer)); @@ -365,7 +367,8 @@ int channel_backend_init(struct channel_backend *chanb, struct lttng_ust_ring_buffer *buf; shmobj = shm_object_table_alloc(handle->table, shmsize, - SHM_OBJECT_SHM, stream_fds[0], -1); + SHM_OBJECT_SHM, stream_fds[0], -1, + lttng_ust_map_populate_is_enabled()); if (!shmobj) goto end; align_shm(shmobj, __alignof__(struct lttng_ust_ring_buffer)); diff --git a/src/common/ringbuffer/ring_buffer_frontend.c b/src/common/ringbuffer/ring_buffer_frontend.c index 5dcc0be7..f3f82e82 100644 --- a/src/common/ringbuffer/ring_buffer_frontend.c +++ b/src/common/ringbuffer/ring_buffer_frontend.c @@ -63,6 +63,7 @@ #include "shm.h" #include "rb-init.h" #include "common/compat/errno.h" /* For ENODATA */ +#include "common/populate.h" /* Print DBG() messages about events lost only every 1048576 hits */ #define DBG_PRINT_NR_LOST (1UL << 20) @@ -980,6 +981,7 @@ struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_ring_buffer_c struct shm_object *shmobj; unsigned int nr_streams; int64_t blocking_timeout_ms; + bool populate = lttng_ust_map_populate_is_enabled(); if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) nr_streams = get_possible_cpus_array_len(); @@ -1006,12 +1008,12 @@ struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_ring_buffer_c read_timer_interval)) return NULL; - handle = zmalloc(sizeof(struct lttng_ust_shm_handle)); + handle = zmalloc_populate(sizeof(struct lttng_ust_shm_handle), populate); if (!handle) return NULL; /* Allocate table for channel + per-cpu buffers */ - handle->table = shm_object_table_create(1 + get_possible_cpus_array_len()); + handle->table = shm_object_table_create(1 + get_possible_cpus_array_len(), populate); if (!handle->table) goto error_table_alloc; @@ -1026,7 +1028,7 @@ struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_ring_buffer_c /* Allocate normal memory for channel (not shared) */ shmobj = shm_object_table_alloc(handle->table, shmsize, SHM_OBJECT_MEM, - -1, -1); + -1, -1, populate); if (!shmobj) goto error_append; /* struct lttng_ust_ring_buffer_channel is at object 0, offset 0 (hardcoded) */ @@ -1089,13 +1091,14 @@ struct lttng_ust_shm_handle *channel_handle_create(void *data, { struct lttng_ust_shm_handle *handle; struct shm_object *object; + bool populate = lttng_ust_map_populate_is_enabled(); - handle = zmalloc(sizeof(struct lttng_ust_shm_handle)); + handle = zmalloc_populate(sizeof(struct lttng_ust_shm_handle), populate); if (!handle) return NULL; /* Allocate table for channel + per-cpu buffers */ - handle->table = shm_object_table_create(1 + get_possible_cpus_array_len()); + handle->table = shm_object_table_create(1 + get_possible_cpus_array_len(), populate); if (!handle->table) goto error_table_alloc; /* Add channel object */ @@ -1124,7 +1127,7 @@ int channel_handle_add_stream(struct lttng_ust_shm_handle *handle, /* Add stream object */ object = shm_object_table_append_shm(handle->table, shm_fd, wakeup_fd, stream_nr, - memory_map_size); + memory_map_size, lttng_ust_map_populate_cpu_is_enabled(stream_nr)); if (!object) return -EINVAL; return 0; diff --git a/src/common/ringbuffer/shm.c b/src/common/ringbuffer/shm.c index a1ef3d69..347f9af0 100644 --- a/src/common/ringbuffer/shm.c +++ b/src/common/ringbuffer/shm.c @@ -69,12 +69,12 @@ error: return ret; } -struct shm_object_table *shm_object_table_create(size_t max_nb_obj) +struct shm_object_table *shm_object_table_create(size_t max_nb_obj, bool populate) { struct shm_object_table *table; - table = zmalloc(sizeof(struct shm_object_table) + - max_nb_obj * sizeof(table->objects[0])); + table = zmalloc_populate(sizeof(struct shm_object_table) + + max_nb_obj * sizeof(table->objects[0]), populate); if (!table) return NULL; table->size = max_nb_obj; @@ -84,9 +84,11 @@ struct shm_object_table *shm_object_table_create(size_t max_nb_obj) static struct shm_object *_shm_object_table_alloc_shm(struct shm_object_table *table, size_t memory_map_size, - int stream_fd) + int stream_fd, + bool populate) { int shmfd, waitfd[2], ret, i; + int flags = MAP_SHARED; struct shm_object *obj; char *memory_map; @@ -145,9 +147,11 @@ struct shm_object *_shm_object_table_alloc_shm(struct shm_object_table *table, obj->shm_fd_ownership = 0; obj->shm_fd = shmfd; + if (populate) + flags |= LTTNG_MAP_POPULATE; /* memory_map: mmap */ memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE, - MAP_SHARED | LTTNG_MAP_POPULATE, shmfd, 0); + flags, shmfd, 0); if (memory_map == MAP_FAILED) { PERROR("mmap"); goto error_mmap; @@ -178,7 +182,7 @@ error_pipe: static struct shm_object *_shm_object_table_alloc_mem(struct shm_object_table *table, - size_t memory_map_size) + size_t memory_map_size, bool populate) { struct shm_object *obj; void *memory_map; @@ -188,7 +192,7 @@ struct shm_object *_shm_object_table_alloc_mem(struct shm_object_table *table, return NULL; obj = &table->objects[table->allocated_len]; - memory_map = zmalloc(memory_map_size); + memory_map = zmalloc_populate(memory_map_size, populate); if (!memory_map) goto alloc_error; @@ -255,13 +259,15 @@ struct shm_object *shm_object_table_alloc(struct shm_object_table *table, size_t memory_map_size, enum shm_object_type type, int stream_fd, - int cpu) + int cpu, + bool populate) #else struct shm_object *shm_object_table_alloc(struct shm_object_table *table, size_t memory_map_size, enum shm_object_type type, int stream_fd, - int cpu __attribute__((unused))) + int cpu __attribute__((unused)), + bool populate) #endif { struct shm_object *shm_object; @@ -284,10 +290,11 @@ struct shm_object *shm_object_table_alloc(struct shm_object_table *table, switch (type) { case SHM_OBJECT_SHM: shm_object = _shm_object_table_alloc_shm(table, memory_map_size, - stream_fd); + stream_fd, populate); break; case SHM_OBJECT_MEM: - shm_object = _shm_object_table_alloc_mem(table, memory_map_size); + shm_object = _shm_object_table_alloc_mem(table, memory_map_size, + populate); break; default: assert(0); @@ -301,8 +308,9 @@ struct shm_object *shm_object_table_alloc(struct shm_object_table *table, struct shm_object *shm_object_table_append_shm(struct shm_object_table *table, int shm_fd, int wakeup_fd, uint32_t stream_nr, - size_t memory_map_size) + size_t memory_map_size, bool populate) { + int flags = MAP_SHARED; struct shm_object *obj; char *memory_map; int ret; @@ -328,9 +336,11 @@ struct shm_object *shm_object_table_append_shm(struct shm_object_table *table, goto error_fcntl; } + if (populate) + flags |= LTTNG_MAP_POPULATE; /* memory_map: mmap */ memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE, - MAP_SHARED | LTTNG_MAP_POPULATE, shm_fd, 0); + flags, shm_fd, 0); if (memory_map == MAP_FAILED) { PERROR("mmap"); goto error_mmap; diff --git a/src/common/ringbuffer/shm.h b/src/common/ringbuffer/shm.h index 6e4f7f7b..944410d8 100644 --- a/src/common/ringbuffer/shm.h +++ b/src/common/ringbuffer/shm.h @@ -71,19 +71,19 @@ void _set_shmp(struct shm_ref *ref, struct shm_ref src) #define set_shmp(ref, src) _set_shmp(&(ref)._ref, src) -struct shm_object_table *shm_object_table_create(size_t max_nb_obj) +struct shm_object_table *shm_object_table_create(size_t max_nb_obj, bool populate) __attribute__((visibility("hidden"))); struct shm_object *shm_object_table_alloc(struct shm_object_table *table, size_t memory_map_size, enum shm_object_type type, const int stream_fd, - int cpu) + int cpu, bool populate) __attribute__((visibility("hidden"))); struct shm_object *shm_object_table_append_shm(struct shm_object_table *table, int shm_fd, int wakeup_fd, uint32_t stream_nr, - size_t memory_map_size) + size_t memory_map_size, bool populate) __attribute__((visibility("hidden"))); /* mem ownership is passed to shm_object_table_append_mem(). */ diff --git a/tests/unit/libringbuffer/shm.c b/tests/unit/libringbuffer/shm.c index 13c74c3d..8b78da25 100644 --- a/tests/unit/libringbuffer/shm.c +++ b/tests/unit/libringbuffer/shm.c @@ -34,12 +34,12 @@ int main(void) ok(shmfd > 0, "Open a POSIX shm fd"); /* Create a dummy shm object table to test the allocation function */ - table = shm_object_table_create(1); + table = shm_object_table_create(1, false); ok(table, "Create a shm object table"); assert(table); /* This function sets the initial size of the shm with ftruncate and zeros it */ - shmobj = shm_object_table_alloc(table, shmsize, SHM_OBJECT_SHM, shmfd, -1); + shmobj = shm_object_table_alloc(table, shmsize, SHM_OBJECT_SHM, shmfd, -1, false); ok(shmobj, "Allocate the shm object table"); assert(shmobj); -- 2.34.1 From b3044227626e747a84452afacb72277ba1596867 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Thu, 2 May 2024 10:41:49 -0400 Subject: [PATCH 6/9] fix: handle EINTR correctly in get_cpu_mask_from_sysfs If the read() in get_cpu_mask_from_sysfs() fails with EINTR, the code is supposed to retry, but the while loop condition has (bytes_read > 0), which is false when read() fails with EINTR. The result is that the code exits the loop, having only read part of the string. Use (bytes_read != 0) in the while loop condition instead, since the (bytes_read < 0) case is already handled in the loop. Original fix in liburcu from Benjamin Marzinski : commit 9922f33e2986 ("fix: handle EINTR correctly in get_cpu_mask_from_sysfs") Signed-off-by: Mathieu Desnoyers Change-Id: I885a0fb98e5a7cfb9a8bd180c8e64b20926ff58c --- src/common/smp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/common/smp.c b/src/common/smp.c index 36967ccc..10b9954a 100644 --- a/src/common/smp.c +++ b/src/common/smp.c @@ -167,7 +167,7 @@ int get_cpu_mask_from_sysfs(char *buf, size_t max_bytes, const char *path) total_bytes_read += bytes_read; assert(total_bytes_read <= max_bytes); - } while (max_bytes > total_bytes_read && bytes_read > 0); + } while (max_bytes > total_bytes_read && bytes_read != 0); /* * Make sure the mask read is a null terminated string. -- 2.34.1 From 5cc8729236a95d784e9561abbcb93a0fce90890c Mon Sep 17 00:00:00 2001 From: Kienan Stewart Date: Thu, 2 May 2024 16:51:45 -0400 Subject: [PATCH 7/9] docs: Correct GitHub URLs in lttng-ust.3 The branches follow the format `stable-X.YZ` rather than `vX.YZ`. Furthermore, when rendering the man pages from source, the URLs were omitted completely as the subsitution `{lttng_version}` was not defined. This hasn't been an issue for the published HTML versions as those are produced via a different script in the `lttng-www` project which presumably sets the substitution properly. Change-Id: Ib96c99df13ddf724e128f95e7ce7c74b2c10c766 Signed-off-by: Kienan Stewart Signed-off-by: Mathieu Desnoyers --- configure.ac | 3 +++ doc/man/Makefile.am | 7 ++++--- doc/man/lttng-ust.3.txt | 7 ++++--- 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/configure.ac b/configure.ac index a1c12412..05c343b7 100644 --- a/configure.ac +++ b/configure.ac @@ -609,6 +609,9 @@ AC_SUBST(AM_CPPFLAGS) AC_SUBST(JNI_CPPFLAGS) +# Used in man pages +AC_SUBST([LTTNG_UST_MAJOR_VERSION], ust_version_major) +AC_SUBST([LTTNG_UST_MINOR_VERSION], ust_version_minor) ## ## ## Output files generated by configure ## diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am index 29c06739..94510d4e 100644 --- a/doc/man/Makefile.am +++ b/doc/man/Makefile.am @@ -76,13 +76,14 @@ xmlto_verbose_out_ = $(xmlto_verbose_out_@AM_DEFAULT_V@) xmlto_verbose_out_0 = 2>/dev/null # Tools to execute: -ADOC = $(asciidoc_verbose)$(ASCIIDOC) -f $(ASCIIDOC_CONF) -d manpage \ +ADOC = $(asciidoc_verbose)$(ASCIIDOC) -v -f $(ASCIIDOC_CONF) -d manpage \ -a mansource="LTTng" \ -a manmanual="LTTng Manual" \ - -a manversion="$(PACKAGE_VERSION)" + -a manversion="$(PACKAGE_VERSION)" \ + -a lttng_version="$(LTTNG_UST_MAJOR_VERSION).$(LTTNG_UST_MINOR_VERSION)" ADOC_DOCBOOK = $(ADOC) -b docbook -XTO = $(xmlto_verbose)$(XMLTO) -m $(XSL_FILE) man +XTO = $(xmlto_verbose)$(XMLTO) -v -m $(XSL_FILE) man # Recipes: %.1.xml: $(srcdir)/%.1.txt $(COMMON_DEPS) diff --git a/doc/man/lttng-ust.3.txt b/doc/man/lttng-ust.3.txt index 0864b9a3..601ebec8 100644 --- a/doc/man/lttng-ust.3.txt +++ b/doc/man/lttng-ust.3.txt @@ -1230,8 +1230,9 @@ if (lttng_ust_loaded) { [[example]] EXAMPLE ------- + NOTE: A few examples are available in the -https://github.com/lttng/lttng-ust/tree/v{lttng_version}/doc/examples[`doc/examples`] +https://github.com/lttng/lttng-ust/tree/stable-{lttng_version}/doc/examples[`doc/examples`] directory of LTTng-UST's source tree. This example shows all the features documented in the previous @@ -1545,7 +1546,7 @@ affect application timings. Path to the shared object which acts as the clock override plugin. An example of such a plugin can be found in the LTTng-UST documentation under - https://github.com/lttng/lttng-ust/tree/v{lttng_version}/doc/examples/clock-override[`examples/clock-override`]. + https://github.com/lttng/lttng-ust/tree/stable-{lttng_version}/doc/examples/clock-override[`examples/clock-override`]. `LTTNG_UST_DEBUG`:: If set, enable `liblttng-ust`'s debug and error output. @@ -1554,7 +1555,7 @@ affect application timings. Path to the shared object which acts as the `getcpu()` override plugin. An example of such a plugin can be found in the LTTng-UST documentation under - https://github.com/lttng/lttng-ust/tree/v{lttng_version}/doc/examples/getcpu-override[`examples/getcpu-override`]. + https://github.com/lttng/lttng-ust/tree/stable-{lttng_version}/doc/examples/getcpu-override[`examples/getcpu-override`]. `LTTNG_UST_MAP_POPULATE_POLICY`:: + -- 2.34.1 From 373ea80ac0db5072e995140f1dbdbf4f0b1bdaad Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Thu, 2 May 2024 17:22:14 -0400 Subject: [PATCH 8/9] Rename "tsc" to "timestamp" Naming timestamps "TSC" or "tsc" is an historical artefact dating from the implementation of libringbuffer, where the initial intent was to use the x86 "rdtsc" instruction directly, which ended up not being what was done in reality. Rename uses of "TSC" and "tsc" to "timestamp" to clarify things and don't require reviewers to be fluent in x86 instruction set. Signed-off-by: Mathieu Desnoyers Change-Id: I8e7e2ad9cd2d2427485fc6adbc340fccde14ca2f --- .../ringbuffer-clients/metadata-template.h | 6 +-- src/common/ringbuffer-clients/template.h | 38 +++++++------- src/common/ringbuffer/backend_types.h | 2 +- src/common/ringbuffer/frontend_api.h | 35 ++++++------- src/common/ringbuffer/frontend_internal.h | 50 +++++++++---------- src/common/ringbuffer/frontend_types.h | 4 +- src/common/ringbuffer/ring_buffer_backend.c | 4 +- src/common/ringbuffer/ring_buffer_frontend.c | 46 ++++++++--------- src/common/ringbuffer/ringbuffer-config.h | 14 +++--- 9 files changed, 100 insertions(+), 99 deletions(-) diff --git a/src/common/ringbuffer-clients/metadata-template.h b/src/common/ringbuffer-clients/metadata-template.h index 56d95516..080288d5 100644 --- a/src/common/ringbuffer-clients/metadata-template.h +++ b/src/common/ringbuffer-clients/metadata-template.h @@ -93,7 +93,7 @@ static size_t client_packet_header_size(void) } static void client_buffer_begin(struct lttng_ust_ring_buffer *buf, - uint64_t tsc __attribute__((unused)), + uint64_t timestamp __attribute__((unused)), unsigned int subbuf_idx, struct lttng_ust_shm_handle *handle) { @@ -125,7 +125,7 @@ static void client_buffer_begin(struct lttng_ust_ring_buffer *buf, * subbuffer. data_size is between 1 and subbuf_size. */ static void client_buffer_end(struct lttng_ust_ring_buffer *buf, - uint64_t tsc __attribute__((unused)), + uint64_t timestamp __attribute__((unused)), unsigned int subbuf_idx, unsigned long data_size, struct lttng_ust_shm_handle *handle, const struct lttng_ust_ring_buffer_ctx *ctx) @@ -193,7 +193,7 @@ static const struct lttng_ust_ring_buffer_config client_config = { .cb.buffer_create = client_buffer_create, .cb.buffer_finalize = client_buffer_finalize, - .tsc_bits = 0, + .timestamp_bits = 0, .alloc = RING_BUFFER_ALLOC_GLOBAL, .sync = RING_BUFFER_SYNC_GLOBAL, .mode = RING_BUFFER_MODE_TEMPLATE, diff --git a/src/common/ringbuffer-clients/template.h b/src/common/ringbuffer-clients/template.h index fe8f8e02..58a8400d 100644 --- a/src/common/ringbuffer-clients/template.h +++ b/src/common/ringbuffer-clients/template.h @@ -19,8 +19,8 @@ #include "common/clock.h" #include "common/ringbuffer/frontend_types.h" -#define LTTNG_COMPACT_EVENT_BITS 5 -#define LTTNG_COMPACT_TSC_BITS 27 +#define LTTNG_COMPACT_EVENT_BITS 5 +#define LTTNG_COMPACT_TIMESTAMP_BITS 27 /* * Keep the natural field alignment for _each field_ within this structure if @@ -156,7 +156,7 @@ size_t record_header_size( case 1: /* compact */ padding = lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uint32_t)); offset += padding; - if (!(ctx->priv->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) { + if (!(ctx->priv->rflags & (RING_BUFFER_RFLAG_FULL_TIMESTAMP | LTTNG_RFLAG_EXTENDED))) { offset += sizeof(uint32_t); /* id and timestamp */ } else { /* Minimum space taken by LTTNG_COMPACT_EVENT_BITS id */ @@ -172,7 +172,7 @@ size_t record_header_size( padding = lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uint16_t)); offset += padding; offset += sizeof(uint16_t); - if (!(ctx->priv->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) { + if (!(ctx->priv->rflags & (RING_BUFFER_RFLAG_FULL_TIMESTAMP | LTTNG_RFLAG_EXTENDED))) { offset += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uint32_t)); offset += sizeof(uint32_t); /* timestamp */ } else { @@ -235,14 +235,14 @@ void lttng_write_event_header(const struct lttng_ust_ring_buffer_config *config, event_id); bt_bitfield_write(&id_time, uint32_t, LTTNG_COMPACT_EVENT_BITS, - LTTNG_COMPACT_TSC_BITS, - ctx->priv->tsc); + LTTNG_COMPACT_TIMESTAMP_BITS, + ctx->priv->timestamp); lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time)); break; } case 2: /* large */ { - uint32_t timestamp = (uint32_t) ctx->priv->tsc; + uint32_t timestamp = (uint32_t) ctx->priv->timestamp; uint16_t id = event_id; lib_ring_buffer_write(config, ctx, &id, sizeof(id)); @@ -275,7 +275,7 @@ void lttng_write_event_header_slow(const struct lttng_ust_ring_buffer_config *co switch (lttng_chan->priv->header_type) { case 1: /* compact */ - if (!(ctx_private->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) { + if (!(ctx_private->rflags & (RING_BUFFER_RFLAG_FULL_TIMESTAMP | LTTNG_RFLAG_EXTENDED))) { uint32_t id_time = 0; bt_bitfield_write(&id_time, uint32_t, @@ -284,12 +284,12 @@ void lttng_write_event_header_slow(const struct lttng_ust_ring_buffer_config *co event_id); bt_bitfield_write(&id_time, uint32_t, LTTNG_COMPACT_EVENT_BITS, - LTTNG_COMPACT_TSC_BITS, - ctx_private->tsc); + LTTNG_COMPACT_TIMESTAMP_BITS, + ctx_private->timestamp); lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time)); } else { uint8_t id = 0; - uint64_t timestamp = ctx_private->tsc; + uint64_t timestamp = ctx_private->timestamp; bt_bitfield_write(&id, uint8_t, 0, @@ -305,8 +305,8 @@ void lttng_write_event_header_slow(const struct lttng_ust_ring_buffer_config *co break; case 2: /* large */ { - if (!(ctx_private->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) { - uint32_t timestamp = (uint32_t) ctx_private->tsc; + if (!(ctx_private->rflags & (RING_BUFFER_RFLAG_FULL_TIMESTAMP | LTTNG_RFLAG_EXTENDED))) { + uint32_t timestamp = (uint32_t) ctx_private->timestamp; uint16_t id = event_id; lib_ring_buffer_write(config, ctx, &id, sizeof(id)); @@ -314,7 +314,7 @@ void lttng_write_event_header_slow(const struct lttng_ust_ring_buffer_config *co lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp)); } else { uint16_t id = 65535; - uint64_t timestamp = ctx_private->tsc; + uint64_t timestamp = ctx_private->timestamp; lib_ring_buffer_write(config, ctx, &id, sizeof(id)); /* Align extended struct on largest member */ @@ -364,7 +364,7 @@ static size_t client_packet_header_size(void) return offsetof(struct packet_header, ctx.header_end); } -static void client_buffer_begin(struct lttng_ust_ring_buffer *buf, uint64_t tsc, +static void client_buffer_begin(struct lttng_ust_ring_buffer *buf, uint64_t timestamp, unsigned int subbuf_idx, struct lttng_ust_shm_handle *handle) { @@ -384,7 +384,7 @@ static void client_buffer_begin(struct lttng_ust_ring_buffer *buf, uint64_t tsc, memcpy(header->uuid, lttng_chan->priv->uuid, sizeof(lttng_chan->priv->uuid)); header->stream_id = lttng_chan->priv->id; header->stream_instance_id = buf->backend.cpu; - header->ctx.timestamp_begin = tsc; + header->ctx.timestamp_begin = timestamp; header->ctx.timestamp_end = 0; header->ctx.content_size = ~0ULL; /* for debugging */ header->ctx.packet_size = ~0ULL; @@ -397,7 +397,7 @@ static void client_buffer_begin(struct lttng_ust_ring_buffer *buf, uint64_t tsc, * offset is assumed to never be 0 here : never deliver a completely empty * subbuffer. data_size is between 1 and subbuf_size. */ -static void client_buffer_end(struct lttng_ust_ring_buffer *buf, uint64_t tsc, +static void client_buffer_end(struct lttng_ust_ring_buffer *buf, uint64_t timestamp, unsigned int subbuf_idx, unsigned long data_size, struct lttng_ust_shm_handle *handle, const struct lttng_ust_ring_buffer_ctx *ctx) @@ -413,7 +413,7 @@ static void client_buffer_end(struct lttng_ust_ring_buffer *buf, uint64_t tsc, assert(header); if (!header) return; - header->ctx.timestamp_end = tsc; + header->ctx.timestamp_end = timestamp; header->ctx.content_size = (uint64_t) data_size * CHAR_BIT; /* in bits */ header->ctx.packet_size = @@ -614,7 +614,7 @@ static const struct lttng_ust_ring_buffer_config client_config = { .cb.content_size_field = client_content_size_field, .cb.packet_size_field = client_packet_size_field, - .tsc_bits = LTTNG_COMPACT_TSC_BITS, + .timestamp_bits = LTTNG_COMPACT_TIMESTAMP_BITS, .alloc = RING_BUFFER_ALLOC_PER_CPU, .sync = RING_BUFFER_SYNC_GLOBAL, .mode = RING_BUFFER_MODE_TEMPLATE, diff --git a/src/common/ringbuffer/backend_types.h b/src/common/ringbuffer/backend_types.h index a4e207f4..c9cc4025 100644 --- a/src/common/ringbuffer/backend_types.h +++ b/src/common/ringbuffer/backend_types.h @@ -87,7 +87,7 @@ struct channel_backend { unsigned int buf_size_order; /* Order of buffer size */ unsigned int extra_reader_sb:1; /* has extra reader subbuffer ? */ unsigned long num_subbuf; /* Number of sub-buffers for writer */ - uint64_t start_tsc; /* Channel creation TSC value */ + uint64_t start_timestamp; /* Channel creation timestamp value */ DECLARE_SHMP(void *, priv_data);/* Client-specific information */ struct lttng_ust_ring_buffer_config config; /* Ring buffer configuration */ char name[NAME_MAX]; /* Channel name */ diff --git a/src/common/ringbuffer/frontend_api.h b/src/common/ringbuffer/frontend_api.h index e3507073..030169ff 100644 --- a/src/common/ringbuffer/frontend_api.h +++ b/src/common/ringbuffer/frontend_api.h @@ -82,8 +82,8 @@ int lib_ring_buffer_try_reserve(const struct lttng_ust_ring_buffer_config *confi *o_begin = v_read(config, &buf->offset); *o_old = *o_begin; - ctx_private->tsc = lib_ring_buffer_clock_read(chan); - if ((int64_t) ctx_private->tsc == -EIO) + ctx_private->timestamp = lib_ring_buffer_clock_read(chan); + if ((int64_t) ctx_private->timestamp == -EIO) return 1; /* @@ -93,8 +93,8 @@ int lib_ring_buffer_try_reserve(const struct lttng_ust_ring_buffer_config *confi */ //prefetch(&buf->commit_hot[subbuf_index(*o_begin, chan)]); - if (last_tsc_overflow(config, buf, ctx_private->tsc)) - ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TSC; + if (last_timestamp_overflow(config, buf, ctx_private->timestamp)) + ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TIMESTAMP; if (caa_unlikely(subbuf_offset(*o_begin, chan) == 0)) return 1; @@ -130,7 +130,8 @@ int lib_ring_buffer_try_reserve(const struct lttng_ust_ring_buffer_config *confi * @ctx: ring buffer context. (input and output) Must be already initialized. * * Atomic wait-free slot reservation. The reserved space starts at the context - * "pre_offset". Its length is "slot_size". The associated time-stamp is "tsc". + * "pre_offset". Its length is "slot_size". The associated time-stamp is + * "timestamp". * * Return : * 0 on success. @@ -179,12 +180,12 @@ int lib_ring_buffer_reserve(const struct lttng_ust_ring_buffer_config *config, goto slow_path; /* - * Atomically update last_tsc. This update races against concurrent - * atomic updates, but the race will always cause supplementary full TSC - * record headers, never the opposite (missing a full TSC record header - * when it would be needed). + * Atomically update last_timestamp. This update races against concurrent + * atomic updates, but the race will always cause supplementary full + * timestamp record headers, never the opposite (missing a full + * timestamp record header when it would be needed). */ - save_last_tsc(config, buf, ctx_private->tsc); + save_last_timestamp(config, buf, ctx_private->timestamp); /* * Push the reader if necessary @@ -317,17 +318,17 @@ int lib_ring_buffer_try_discard_reserve(const struct lttng_ust_ring_buffer_confi /* * We need to ensure that if the cmpxchg succeeds and discards the - * record, the next record will record a full TSC, because it cannot - * rely on the last_tsc associated with the discarded record to detect - * overflows. The only way to ensure this is to set the last_tsc to 0 - * (assuming no 64-bit TSC overflow), which forces to write a 64-bit + * record, the next record will record a full timestamp, because it cannot + * rely on the last_timestamp associated with the discarded record to detect + * overflows. The only way to ensure this is to set the last_timestamp to 0 + * (assuming no 64-bit timestamp overflow), which forces to write a 64-bit * timestamp in the next record. * - * Note: if discard fails, we must leave the TSC in the record header. - * It is needed to keep track of TSC overflows for the following + * Note: if discard fails, we must leave the timestamp in the record header. + * It is needed to keep track of timestamp overflows for the following * records. */ - save_last_tsc(config, buf, 0ULL); + save_last_timestamp(config, buf, 0ULL); if (caa_likely(v_cmpxchg(config, &buf->offset, end_offset, ctx_private->pre_offset) != end_offset)) diff --git a/src/common/ringbuffer/frontend_internal.h b/src/common/ringbuffer/frontend_internal.h index 1dc816a3..d9f16a51 100644 --- a/src/common/ringbuffer/frontend_internal.h +++ b/src/common/ringbuffer/frontend_internal.h @@ -85,62 +85,62 @@ unsigned long subbuf_index(unsigned long offset, } /* - * Last TSC comparison functions. Check if the current TSC overflows tsc_bits - * bits from the last TSC read. When overflows are detected, the full 64-bit - * timestamp counter should be written in the record header. Reads and writes - * last_tsc atomically. + * Last timestamp comparison functions. Check if the current timestamp overflows + * timestamp_bits bits from the last timestamp read. When overflows are + * detected, the full 64-bit timestamp counter should be written in the record + * header. Reads and writes last_timestamp atomically. */ #if (CAA_BITS_PER_LONG == 32) static inline -void save_last_tsc(const struct lttng_ust_ring_buffer_config *config, - struct lttng_ust_ring_buffer *buf, uint64_t tsc) +void save_last_timestamp(const struct lttng_ust_ring_buffer_config *config, + struct lttng_ust_ring_buffer *buf, uint64_t timestamp) { - if (config->tsc_bits == 0 || config->tsc_bits == 64) + if (config->timestamp_bits == 0 || config->timestamp_bits == 64) return; /* * Ensure the compiler performs this update in a single instruction. */ - v_set(config, &buf->last_tsc, (unsigned long)(tsc >> config->tsc_bits)); + v_set(config, &buf->last_timestamp, (unsigned long)(timestamp >> config->timestamp_bits)); } static inline -int last_tsc_overflow(const struct lttng_ust_ring_buffer_config *config, - struct lttng_ust_ring_buffer *buf, uint64_t tsc) +int last_timestamp_overflow(const struct lttng_ust_ring_buffer_config *config, + struct lttng_ust_ring_buffer *buf, uint64_t timestamp) { - unsigned long tsc_shifted; + unsigned long timestamp_shifted; - if (config->tsc_bits == 0 || config->tsc_bits == 64) + if (config->timestamp_bits == 0 || config->timestamp_bits == 64) return 0; - tsc_shifted = (unsigned long)(tsc >> config->tsc_bits); - if (caa_unlikely(tsc_shifted - - (unsigned long)v_read(config, &buf->last_tsc))) + timestamp_shifted = (unsigned long)(timestamp >> config->timestamp_bits); + if (caa_unlikely(timestamp_shifted + - (unsigned long)v_read(config, &buf->last_timestamp))) return 1; else return 0; } #else static inline -void save_last_tsc(const struct lttng_ust_ring_buffer_config *config, - struct lttng_ust_ring_buffer *buf, uint64_t tsc) +void save_last_timestamp(const struct lttng_ust_ring_buffer_config *config, + struct lttng_ust_ring_buffer *buf, uint64_t timestamp) { - if (config->tsc_bits == 0 || config->tsc_bits == 64) + if (config->timestamp_bits == 0 || config->timestamp_bits == 64) return; - v_set(config, &buf->last_tsc, (unsigned long)tsc); + v_set(config, &buf->last_timestamp, (unsigned long)timestamp); } static inline -int last_tsc_overflow(const struct lttng_ust_ring_buffer_config *config, - struct lttng_ust_ring_buffer *buf, uint64_t tsc) +int last_timestamp_overflow(const struct lttng_ust_ring_buffer_config *config, + struct lttng_ust_ring_buffer *buf, uint64_t timestamp) { - if (config->tsc_bits == 0 || config->tsc_bits == 64) + if (config->timestamp_bits == 0 || config->timestamp_bits == 64) return 0; - if (caa_unlikely((tsc - v_read(config, &buf->last_tsc)) - >> config->tsc_bits)) + if (caa_unlikely((timestamp - v_read(config, &buf->last_timestamp)) + >> config->timestamp_bits)) return 1; else return 0; @@ -287,7 +287,7 @@ int lib_ring_buffer_reserve_committed(const struct lttng_ust_ring_buffer_config } /* - * Receive end of subbuffer TSC as parameter. It has been read in the + * Receive end of subbuffer timestamp as parameter. It has been read in the * space reservation loop of either reserve or switch, which ensures it * progresses monotonically with event records in the buffer. Therefore, * it ensures that the end timestamp of a subbuffer is <= begin diff --git a/src/common/ringbuffer/frontend_types.h b/src/common/ringbuffer/frontend_types.h index 1b0e1a08..3be7ec1b 100644 --- a/src/common/ringbuffer/frontend_types.h +++ b/src/common/ringbuffer/frontend_types.h @@ -181,7 +181,7 @@ struct lttng_ust_ring_buffer { int record_disabled; /* End of cache-hot 32 bytes cacheline */ - union v_atomic last_tsc; /* + union v_atomic last_timestamp; /* * Last timestamp written in the buffer. */ @@ -251,7 +251,7 @@ struct lttng_ust_ring_buffer_ctx_private { * prior to record header alignment * padding. */ - uint64_t tsc; /* time-stamp counter value */ + uint64_t timestamp; /* time-stamp counter value */ unsigned int rflags; /* reservation flags */ struct lttng_ust_ring_buffer *buf; /* * buffer corresponding to processor id diff --git a/src/common/ringbuffer/ring_buffer_backend.c b/src/common/ringbuffer/ring_buffer_backend.c index 82dc247a..d2fadb77 100644 --- a/src/common/ringbuffer/ring_buffer_backend.c +++ b/src/common/ringbuffer/ring_buffer_backend.c @@ -235,7 +235,7 @@ void channel_backend_reset(struct channel_backend *chanb) * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf, * priv, notifiers, config, cpumask and name. */ - chanb->start_tsc = config->cb.ring_buffer_clock_read(chan); + chanb->start_timestamp = config->cb.ring_buffer_clock_read(chan); } /** @@ -382,7 +382,7 @@ int channel_backend_init(struct channel_backend *chanb, if (ret) goto free_bufs; } - chanb->start_tsc = config->cb.ring_buffer_clock_read(chan); + chanb->start_timestamp = config->cb.ring_buffer_clock_read(chan); return 0; diff --git a/src/common/ringbuffer/ring_buffer_frontend.c b/src/common/ringbuffer/ring_buffer_frontend.c index f3f82e82..ab1fc0ff 100644 --- a/src/common/ringbuffer/ring_buffer_frontend.c +++ b/src/common/ringbuffer/ring_buffer_frontend.c @@ -203,7 +203,7 @@ void lib_ring_buffer_reset(struct lttng_ust_ring_buffer *buf, } uatomic_set(&buf->consumed, 0); uatomic_set(&buf->record_disabled, 0); - v_set(config, &buf->last_tsc, 0); + v_set(config, &buf->last_timestamp, 0); lib_ring_buffer_backend_reset(&buf->backend, handle); /* Don't reset number of active readers */ v_set(config, &buf->records_lost_full, 0); @@ -341,7 +341,7 @@ int lib_ring_buffer_create(struct lttng_ust_ring_buffer *buf, struct commit_counters_hot *cc_hot; void *priv = channel_get_private_config(chan); size_t subbuf_header_size; - uint64_t tsc; + uint64_t timestamp; int ret; /* Test for cpu hotplug */ @@ -398,8 +398,8 @@ int lib_ring_buffer_create(struct lttng_ust_ring_buffer *buf, ret = -EPERM; goto free_chanbuf; } - tsc = config->cb.ring_buffer_clock_read(shmp_chan); - config->cb.buffer_begin(buf, tsc, 0, handle); + timestamp = config->cb.ring_buffer_clock_read(shmp_chan); + config->cb.buffer_begin(buf, timestamp, 0, handle); cc_hot = shmp_index(handle, buf->commit_hot, 0); if (!cc_hot) { ret = -EPERM; @@ -1774,7 +1774,7 @@ void lib_ring_buffer_switch_old_start(struct lttng_ust_ring_buffer *buf, unsigned long commit_count; struct commit_counters_hot *cc_hot; - config->cb.buffer_begin(buf, ctx->priv->tsc, oldidx, handle); + config->cb.buffer_begin(buf, ctx->priv->timestamp, oldidx, handle); /* * Order all writes to buffer before the commit count update that will @@ -1832,7 +1832,7 @@ void lib_ring_buffer_switch_old_end(struct lttng_ust_ring_buffer *buf, * postponed until the commit counter is incremented for the * current space reservation. */ - *ts_end = ctx->priv->tsc; + *ts_end = ctx->priv->timestamp; /* * Order all writes to buffer and store to ts_end before the commit @@ -1870,7 +1870,7 @@ void lib_ring_buffer_switch_new_start(struct lttng_ust_ring_buffer *buf, unsigned long commit_count; struct commit_counters_hot *cc_hot; - config->cb.buffer_begin(buf, ctx->priv->tsc, beginidx, handle); + config->cb.buffer_begin(buf, ctx->priv->timestamp, beginidx, handle); /* * Order all writes to buffer before the commit count update that will @@ -1924,7 +1924,7 @@ void lib_ring_buffer_switch_new_end(struct lttng_ust_ring_buffer *buf, * postponed until the commit counter is incremented for the * current space reservation. */ - *ts_end = ctx->priv->tsc; + *ts_end = ctx->priv->timestamp; } /* @@ -1948,7 +1948,7 @@ int lib_ring_buffer_try_switch_slow(enum switch_mode mode, offsets->switch_old_start = 0; off = subbuf_offset(offsets->begin, chan); - ctx->priv->tsc = config->cb.ring_buffer_clock_read(chan); + ctx->priv->timestamp = config->cb.ring_buffer_clock_read(chan); /* * Ensure we flush the header of an empty subbuffer when doing the @@ -2084,12 +2084,12 @@ void lib_ring_buffer_switch_slow(struct lttng_ust_ring_buffer *buf, enum switch_ != offsets.old); /* - * Atomically update last_tsc. This update races against concurrent - * atomic updates, but the race will always cause supplementary full TSC - * records, never the opposite (missing a full TSC record when it would - * be needed). + * Atomically update last_timestamp. This update races against concurrent + * atomic updates, but the race will always cause supplementary full + * timestamp records, never the opposite (missing a full timestamp + * record when it would be needed). */ - save_last_tsc(config, buf, ctx.priv->tsc); + save_last_timestamp(config, buf, ctx.priv->timestamp); /* * Push the reader if necessary @@ -2158,12 +2158,12 @@ retry: offsets->switch_old_end = 0; offsets->pre_header_padding = 0; - ctx_private->tsc = config->cb.ring_buffer_clock_read(chan); - if ((int64_t) ctx_private->tsc == -EIO) + ctx_private->timestamp = config->cb.ring_buffer_clock_read(chan); + if ((int64_t) ctx_private->timestamp == -EIO) return -EIO; - if (last_tsc_overflow(config, buf, ctx_private->tsc)) - ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TSC; + if (last_timestamp_overflow(config, buf, ctx_private->timestamp)) + ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TIMESTAMP; if (caa_unlikely(subbuf_offset(offsets->begin, chan) == 0)) { offsets->switch_new_start = 1; /* For offsets->begin */ @@ -2371,12 +2371,12 @@ int lib_ring_buffer_reserve_slow(struct lttng_ust_ring_buffer_ctx *ctx, != offsets.old)); /* - * Atomically update last_tsc. This update races against concurrent - * atomic updates, but the race will always cause supplementary full TSC - * records, never the opposite (missing a full TSC record when it would - * be needed). + * Atomically update last_timestamp. This update races against concurrent + * atomic updates, but the race will always cause supplementary full + * timestamp records, never the opposite (missing a full timestamp + * record when it would be needed). */ - save_last_tsc(config, buf, ctx_private->tsc); + save_last_timestamp(config, buf, ctx_private->timestamp); /* * Push the reader if necessary diff --git a/src/common/ringbuffer/ringbuffer-config.h b/src/common/ringbuffer/ringbuffer-config.h index 61386174..83efea9e 100644 --- a/src/common/ringbuffer/ringbuffer-config.h +++ b/src/common/ringbuffer/ringbuffer-config.h @@ -46,10 +46,10 @@ struct lttng_ust_ring_buffer_client_cb { /* Slow path only, at subbuffer switch */ size_t (*subbuffer_header_size) (void); - void (*buffer_begin) (struct lttng_ust_ring_buffer *buf, uint64_t tsc, + void (*buffer_begin) (struct lttng_ust_ring_buffer *buf, uint64_t timestamp, unsigned int subbuf_idx, struct lttng_ust_shm_handle *handle); - void (*buffer_end) (struct lttng_ust_ring_buffer *buf, uint64_t tsc, + void (*buffer_end) (struct lttng_ust_ring_buffer *buf, uint64_t timestamp, unsigned int subbuf_idx, unsigned long data_size, struct lttng_ust_shm_handle *handle, const struct lttng_ust_ring_buffer_ctx *ctx); @@ -185,10 +185,10 @@ struct lttng_ust_ring_buffer_config { enum lttng_ust_ring_buffer_ipi_types ipi; enum lttng_ust_ring_buffer_wakeup_types wakeup; /* - * tsc_bits: timestamp bits saved at each record. + * timestamp_bits: timestamp bits saved at each record. * 0 and 64 disable the timestamp compression scheme. */ - unsigned int tsc_bits; + unsigned int timestamp_bits; struct lttng_ust_ring_buffer_client_cb cb; /* * client_type is used by the consumer process (which is in a @@ -204,18 +204,18 @@ struct lttng_ust_ring_buffer_config { /* * Reservation flags. * - * RING_BUFFER_RFLAG_FULL_TSC + * RING_BUFFER_RFLAG_FULL_TIMESTAMP * * This flag is passed to record_header_size() and to the primitive used to * write the record header. It indicates that the full 64-bit time value is * needed in the record header. If this flag is not set, the record header needs - * only to contain "tsc_bits" bit of time value. + * only to contain "timestamp_bits" bit of time value. * * Reservation flags can be added by the client, starting from * "(RING_BUFFER_FLAGS_END << 0)". It can be used to pass information from * record_header_size() to lib_ring_buffer_write_record_header(). */ -#define RING_BUFFER_RFLAG_FULL_TSC (1U << 0) +#define RING_BUFFER_RFLAG_FULL_TIMESTAMP (1U << 0) #define RING_BUFFER_RFLAG_END (1U << 1) /* -- 2.34.1 From aa56331b81e9f1199e29af0fbb6df041659b58ba Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Thu, 9 May 2024 15:09:17 -0400 Subject: [PATCH 9/9] ust-fd: Add close_range declaration Old libc headers do not contain a declaration of close_range(). Emit our own declaration to prevent compiler warnings. Signed-off-by: Mathieu Desnoyers Change-Id: If6ca8193895efbb6ce1ba46e092939b8099bcff6 --- src/lib/lttng-ust-fd/lttng-ust-fd.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/lib/lttng-ust-fd/lttng-ust-fd.c b/src/lib/lttng-ust-fd/lttng-ust-fd.c index 0360b6f2..01decd1e 100644 --- a/src/lib/lttng-ust-fd/lttng-ust-fd.c +++ b/src/lib/lttng-ust-fd/lttng-ust-fd.c @@ -148,6 +148,9 @@ int fclose(FILE *stream) __lttng_ust_fd_plibc_fclose); } +/* Old libc headers don't contain a close_range() declaration. */ +int close_range(unsigned int first, unsigned int last, int flags); + /* * Override the libc close_range() symbol with our own, allowing * applications to close arbitrary file descriptors. If the fd is owned -- 2.34.1