ust-fd: Add close_range declaration master
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Thu, 9 May 2024 19:09:17 +0000 (15:09 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Thu, 9 May 2024 19:09:17 +0000 (15:09 -0400)
Old libc headers do not contain a declaration of close_range(). Emit our
own declaration to prevent compiler warnings.

Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Change-Id: If6ca8193895efbb6ce1ba46e092939b8099bcff6

29 files changed:
README.md
configure.ac
doc/man/Makefile.am
doc/man/lttng-ust.3.txt
include/lttng/ust-tracepoint-event.h
src/common/Makefile.am
src/common/counter/counter.c
src/common/counter/shm.c
src/common/counter/shm.h
src/common/getenv.c
src/common/macros.h
src/common/populate.c [new file with mode: 0644]
src/common/populate.h [new file with mode: 0644]
src/common/ringbuffer-clients/metadata-template.h
src/common/ringbuffer-clients/template.h
src/common/ringbuffer/backend_types.h
src/common/ringbuffer/frontend_api.h
src/common/ringbuffer/frontend_internal.h
src/common/ringbuffer/frontend_types.h
src/common/ringbuffer/ring_buffer_backend.c
src/common/ringbuffer/ring_buffer_frontend.c
src/common/ringbuffer/ringbuffer-config.h
src/common/ringbuffer/shm.c
src/common/ringbuffer/shm.h
src/common/smp.c
src/common/ust-fd.h
src/lib/lttng-ust-common/fd-tracker.c
src/lib/lttng-ust-fd/lttng-ust-fd.c
tests/unit/libringbuffer/shm.c

index f71649b4155d9f02978d3f40b12b5cf832cf9cd9..657cba85e45edbf031a6576fe5284ce0f2bcbf22 100644 (file)
--- a/README.md
+++ b/README.md
@@ -203,6 +203,23 @@ compiled in C++. To compile tracepoint probes in C++, you need
 G++ >= 4.7 or Clang >= 4.0. The C++ compilers need to support C++11.
 
 
 G++ >= 4.7 or Clang >= 4.0. The C++ compilers need to support C++11.
 
 
+Supported versions
+------------------
+
+The LTTng project supports the last two released stable versions
+(e.g. stable-2.13 and stable-2.12).
+
+Fixes are backported from the master branch to the last stable version
+unless those fixes would break the ABI or API. Those fixes may be backported
+to the second-last stable version, depending on complexity and ABI/API
+compatibility.
+
+Security fixes are backported from the master branch to both of the last stable
+version and the the second-last stable version.
+
+New features are integrated into the master branch and not backported to the
+last stable branch.
+
 Contact
 -------
 
 Contact
 -------
 
index a1c12412e7445c6cd8699a527488cb7fad9203d5..05c343b7aeb7114f1830bd88b23b701ac43af27c 100644 (file)
@@ -609,6 +609,9 @@ AC_SUBST(AM_CPPFLAGS)
 
 AC_SUBST(JNI_CPPFLAGS)
 
 
 AC_SUBST(JNI_CPPFLAGS)
 
+# Used in man pages
+AC_SUBST([LTTNG_UST_MAJOR_VERSION], ust_version_major)
+AC_SUBST([LTTNG_UST_MINOR_VERSION], ust_version_minor)
 
 ##                                     ##
 ## Output files generated by configure ##
 
 ##                                     ##
 ## Output files generated by configure ##
index 29c06739657ec6d5ed4c7fb7e6122468494f4e1a..94510d4eff61a73360f5fc64fd5e6090f33e18e4 100644 (file)
@@ -76,13 +76,14 @@ xmlto_verbose_out_ = $(xmlto_verbose_out_@AM_DEFAULT_V@)
 xmlto_verbose_out_0 = 2>/dev/null
 
 # Tools to execute:
 xmlto_verbose_out_0 = 2>/dev/null
 
 # Tools to execute:
-ADOC = $(asciidoc_verbose)$(ASCIIDOC) -f $(ASCIIDOC_CONF) -d manpage \
+ADOC = $(asciidoc_verbose)$(ASCIIDOC) -v -f $(ASCIIDOC_CONF) -d manpage \
        -a mansource="LTTng" \
        -a manmanual="LTTng Manual" \
        -a mansource="LTTng" \
        -a manmanual="LTTng Manual" \
-       -a manversion="$(PACKAGE_VERSION)"
+       -a manversion="$(PACKAGE_VERSION)" \
+       -a lttng_version="$(LTTNG_UST_MAJOR_VERSION).$(LTTNG_UST_MINOR_VERSION)"
 
 ADOC_DOCBOOK = $(ADOC) -b docbook
 
 ADOC_DOCBOOK = $(ADOC) -b docbook
-XTO = $(xmlto_verbose)$(XMLTO) -m $(XSL_FILE) man
+XTO = $(xmlto_verbose)$(XMLTO) -v -m $(XSL_FILE) man
 
 # Recipes:
 %.1.xml: $(srcdir)/%.1.txt $(COMMON_DEPS)
 
 # Recipes:
 %.1.xml: $(srcdir)/%.1.txt $(COMMON_DEPS)
index 23d31cacaeb681c119e6df75441d636165fa6775..601ebec86dc1a358004d985d25fdc3205bd09f55 100644 (file)
@@ -688,6 +688,27 @@ NOTE: Neither `lttng_ust_tracepoint_enabled()` nor
 `lttng_ust_do_tracepoint()` have a `STAP_PROBEV()` call, so if you need
 it, you should emit this call yourself.
 
 `lttng_ust_do_tracepoint()` have a `STAP_PROBEV()` call, so if you need
 it, you should emit this call yourself.
 
+Tracing in C/C++ constructors and destructors
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+As of LTTng-UST{nbsp}2.13, tracepoint definitions are implemented using
+compound literals. In the following cases, those compound literals are
+allocated on the heap:
+
+* g++{nbsp}<=={nbsp}4.8 is used as the compiler or,
+* `LTTNG_UST_ALLOCATE_COMPOUND_LITERAL_ON_HEAP` is defined in the C pre-processor flags
+and the application is compiled with a C++ compiler
+
+When the compound literals are heap-allocated, there are some cases in which
+both C-style and C++ constructors and destructors will not be traced.
+
+1. C-style constructors and destructors in statically linked archives
+2. C-style constructors and destructors in the application itself
+3. Some C++-style constructors and destructors in the application and statically linked archives
+
+In the 3rd case above, which C++-style constructors and destructors will not be traced depends
+on the initialization order within each translation unit and across the entire program when
+all translation units are linked together.
 
 [[build-static]]
 Statically linking the tracepoint provider
 
 [[build-static]]
 Statically linking the tracepoint provider
@@ -1209,8 +1230,9 @@ if (lttng_ust_loaded) {
 [[example]]
 EXAMPLE
 -------
 [[example]]
 EXAMPLE
 -------
+
 NOTE: A few examples are available in the
 NOTE: A few examples are available in the
-https://github.com/lttng/lttng-ust/tree/v{lttng_version}/doc/examples[`doc/examples`]
+https://github.com/lttng/lttng-ust/tree/stable-{lttng_version}/doc/examples[`doc/examples`]
 directory of LTTng-UST's source tree.
 
 This example shows all the features documented in the previous
 directory of LTTng-UST's source tree.
 
 This example shows all the features documented in the previous
@@ -1524,7 +1546,7 @@ affect application timings.
     Path to the shared object which acts as the clock override plugin.
     An example of such a plugin can be found in the LTTng-UST
     documentation under
     Path to the shared object which acts as the clock override plugin.
     An example of such a plugin can be found in the LTTng-UST
     documentation under
-    https://github.com/lttng/lttng-ust/tree/v{lttng_version}/doc/examples/clock-override[`examples/clock-override`].
+    https://github.com/lttng/lttng-ust/tree/stable-{lttng_version}/doc/examples/clock-override[`examples/clock-override`].
 
 `LTTNG_UST_DEBUG`::
     If set, enable `liblttng-ust`'s debug and error output.
 
 `LTTNG_UST_DEBUG`::
     If set, enable `liblttng-ust`'s debug and error output.
@@ -1533,7 +1555,24 @@ affect application timings.
     Path to the shared object which acts as the `getcpu()` override
     plugin. An example of such a plugin can be found in the LTTng-UST
     documentation under
     Path to the shared object which acts as the `getcpu()` override
     plugin. An example of such a plugin can be found in the LTTng-UST
     documentation under
-    https://github.com/lttng/lttng-ust/tree/v{lttng_version}/doc/examples/getcpu-override[`examples/getcpu-override`].
+    https://github.com/lttng/lttng-ust/tree/stable-{lttng_version}/doc/examples/getcpu-override[`examples/getcpu-override`].
+
+`LTTNG_UST_MAP_POPULATE_POLICY`::
++
+--
+If set, override the policy used to populate shared memory pages
+within the application. The expected values are:
+
+`none`:::
+  Do not pre-populate any pages, take minor faults on first access
+  while tracing.
+
+`cpu_possible`:::
+  Pre-populate pages for all possible CPUs in the system, as
+  listed by `/sys/devices/system/cpu/possible`.
+--
++
+Default: `none`. If the policy is unknown, use the default.
 
 `LTTNG_UST_REGISTER_TIMEOUT`::
     Waiting time for the _registration done_ session daemon command
 
 `LTTNG_UST_REGISTER_TIMEOUT`::
     Waiting time for the _registration done_ session daemon command
index baf9879371898289ff5d9cef25763403f9448be1..5d845ae1e4bb0246cef8cda5495b89e04aa89c17 100644 (file)
@@ -255,6 +255,38 @@ void lttng_ust__event_template_proto___##_provider##___##_name(LTTNG_UST__TP_ARG
        };
 #include LTTNG_UST_TRACEPOINT_INCLUDE
 
        };
 #include LTTNG_UST_TRACEPOINT_INCLUDE
 
+
+/*
+ * Stage 0.9.0
+ * Verifying sequence length types are of an unsigned type.
+ */
+
+/* Reset all macros within LTTNG_UST_TRACEPOINT_EVENT */
+#include <lttng/ust-tracepoint-event-reset.h>
+#include <lttng/ust-tracepoint-event-write.h>
+#include <lttng/ust-tracepoint-event-nowrite.h>
+
+/*
+ * Note that it is not possible to encode the length type as a C identifier,
+ * since it can be multiple tokens.
+ */
+#undef lttng_ust__field_sequence_encoded
+#define lttng_ust__field_sequence_encoded(_type, _item, _src, _byte_order,     \
+                       _length_type, _src_length, _encoding, _nowrite, \
+                       _elem_type_base)                        \
+       lttng_ust_static_assert(!lttng_ust_is_signed_type(_length_type), \
+                               "Length type " #_length_type " is not a unsigned integer type", \
+                               Length_type_is_not_a_unsigned_integer_type);
+
+#undef LTTNG_UST_TP_FIELDS
+#define LTTNG_UST_TP_FIELDS(...) __VA_ARGS__   /* Only one used in this phase */
+
+#undef LTTNG_UST__TRACEPOINT_EVENT_CLASS
+#define LTTNG_UST__TRACEPOINT_EVENT_CLASS(_provider, _name, _args, _fields)    \
+               _fields
+
+#include LTTNG_UST_TRACEPOINT_INCLUDE
+
 #if defined(__cplusplus)
 
 /*
 #if defined(__cplusplus)
 
 /*
index 05d08ade7dab2d5137e2d430f8ef7acdee5f96dd..ad889d165d784582d93aff771ae5a9ed2fdd7bbc 100644 (file)
@@ -171,6 +171,8 @@ libcommon_la_SOURCES = \
        logging.h \
        smp.c \
        smp.h \
        logging.h \
        smp.c \
        smp.h \
+       populate.c \
+       populate.h \
        strutils.c \
        strutils.h \
        utils.c \
        strutils.c \
        strutils.h \
        utils.c \
index 60edad0c5b63b9ea2a1f3f92e9ca42e5ab400217..99a46af6d12549416cba4fb4575db9cf97791fe6 100644 (file)
@@ -17,6 +17,7 @@
 #include "common/bitmap.h"
 
 #include "common/smp.h"
 #include "common/bitmap.h"
 
 #include "common/smp.h"
+#include "common/populate.h"
 #include "shm.h"
 
 static size_t lttng_counter_get_dimension_nr_elements(struct lib_counter_dimension *dimension)
 #include "shm.h"
 
 static size_t lttng_counter_get_dimension_nr_elements(struct lib_counter_dimension *dimension)
@@ -84,13 +85,14 @@ static int lttng_counter_layout_init(struct lib_counter *counter, int cpu, int s
        if (counter->is_daemon) {
                /* Allocate and clear shared memory. */
                shm_object = lttng_counter_shm_object_table_alloc(counter->object_table,
        if (counter->is_daemon) {
                /* Allocate and clear shared memory. */
                shm_object = lttng_counter_shm_object_table_alloc(counter->object_table,
-                       shm_length, LTTNG_COUNTER_SHM_OBJECT_SHM, shm_fd, cpu);
+                       shm_length, LTTNG_COUNTER_SHM_OBJECT_SHM, shm_fd, cpu,
+                       lttng_ust_map_populate_cpu_is_enabled(cpu));
                if (!shm_object)
                        return -ENOMEM;
        } else {
                /* Map pre-existing shared memory. */
                shm_object = lttng_counter_shm_object_table_append_shm(counter->object_table,
                if (!shm_object)
                        return -ENOMEM;
        } else {
                /* Map pre-existing shared memory. */
                shm_object = lttng_counter_shm_object_table_append_shm(counter->object_table,
-                       shm_fd, shm_length);
+                       shm_fd, shm_length, lttng_ust_map_populate_cpu_is_enabled(cpu));
                if (!shm_object)
                        return -ENOMEM;
        }
                if (!shm_object)
                        return -ENOMEM;
        }
@@ -211,12 +213,13 @@ struct lib_counter *lttng_counter_create(const struct lib_counter_config *config
        int cpu, ret;
        int nr_handles = 0;
        int nr_cpus = get_possible_cpus_array_len();
        int cpu, ret;
        int nr_handles = 0;
        int nr_cpus = get_possible_cpus_array_len();
+       bool populate = lttng_ust_map_populate_is_enabled();
 
        if (validate_args(config, nr_dimensions, max_nr_elem,
                        global_sum_step, global_counter_fd, nr_counter_cpu_fds,
                        counter_cpu_fds))
                return NULL;
 
        if (validate_args(config, nr_dimensions, max_nr_elem,
                        global_sum_step, global_counter_fd, nr_counter_cpu_fds,
                        counter_cpu_fds))
                return NULL;
-       counter = zmalloc(sizeof(struct lib_counter));
+       counter = zmalloc_populate(sizeof(struct lib_counter), populate);
        if (!counter)
                return NULL;
        counter->global_counters.shm_fd = -1;
        if (!counter)
                return NULL;
        counter->global_counters.shm_fd = -1;
@@ -225,13 +228,13 @@ struct lib_counter *lttng_counter_create(const struct lib_counter_config *config
        if (lttng_counter_set_global_sum_step(counter, global_sum_step))
                goto error_sum_step;
        counter->nr_dimensions = nr_dimensions;
        if (lttng_counter_set_global_sum_step(counter, global_sum_step))
                goto error_sum_step;
        counter->nr_dimensions = nr_dimensions;
-       counter->dimensions = zmalloc(nr_dimensions * sizeof(*counter->dimensions));
+       counter->dimensions = zmalloc_populate(nr_dimensions * sizeof(*counter->dimensions), populate);
        if (!counter->dimensions)
                goto error_dimensions;
        for (dimension = 0; dimension < nr_dimensions; dimension++)
                counter->dimensions[dimension].max_nr_elem = max_nr_elem[dimension];
        if (config->alloc & COUNTER_ALLOC_PER_CPU) {
        if (!counter->dimensions)
                goto error_dimensions;
        for (dimension = 0; dimension < nr_dimensions; dimension++)
                counter->dimensions[dimension].max_nr_elem = max_nr_elem[dimension];
        if (config->alloc & COUNTER_ALLOC_PER_CPU) {
-               counter->percpu_counters = zmalloc(sizeof(struct lib_counter_layout) * nr_cpus);
+               counter->percpu_counters = zmalloc_populate(sizeof(struct lib_counter_layout) * nr_cpus, populate);
                if (!counter->percpu_counters)
                        goto error_alloc_percpu;
                for_each_possible_cpu(cpu)
                if (!counter->percpu_counters)
                        goto error_alloc_percpu;
                for_each_possible_cpu(cpu)
@@ -250,7 +253,7 @@ struct lib_counter *lttng_counter_create(const struct lib_counter_config *config
        if (config->alloc & COUNTER_ALLOC_PER_CPU)
                nr_handles += nr_cpus;
        /* Allocate table for global and per-cpu counters. */
        if (config->alloc & COUNTER_ALLOC_PER_CPU)
                nr_handles += nr_cpus;
        /* Allocate table for global and per-cpu counters. */
-       counter->object_table = lttng_counter_shm_object_table_create(nr_handles);
+       counter->object_table = lttng_counter_shm_object_table_create(nr_handles, populate);
        if (!counter->object_table)
                goto error_alloc_object_table;
 
        if (!counter->object_table)
                goto error_alloc_object_table;
 
index 8b65d1fc6137dcb80dafbe5c39689ed8c5010c29..6f7ae37abd2c410483125cf4fa4e8d78710529b1 100644 (file)
@@ -69,12 +69,12 @@ error:
        return ret;
 }
 
        return ret;
 }
 
-struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj)
+struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj, bool populate)
 {
        struct lttng_counter_shm_object_table *table;
 
 {
        struct lttng_counter_shm_object_table *table;
 
-       table = zmalloc(sizeof(struct lttng_counter_shm_object_table) +
-                       max_nb_obj * sizeof(table->objects[0]));
+       table = zmalloc_populate(sizeof(struct lttng_counter_shm_object_table) +
+                       max_nb_obj * sizeof(table->objects[0]), populate);
        if (!table)
                return NULL;
        table->size = max_nb_obj;
        if (!table)
                return NULL;
        table->size = max_nb_obj;
@@ -84,10 +84,11 @@ struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(siz
 static
 struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_shm(struct lttng_counter_shm_object_table *table,
                                           size_t memory_map_size,
 static
 struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_shm(struct lttng_counter_shm_object_table *table,
                                           size_t memory_map_size,
-                                          int cpu_fd)
+                                          int cpu_fd, bool populate)
 {
 {
-       int shmfd, ret;
        struct lttng_counter_shm_object *obj;
        struct lttng_counter_shm_object *obj;
+       int flags = MAP_SHARED;
+       int shmfd, ret;
        char *memory_map;
 
        if (cpu_fd < 0)
        char *memory_map;
 
        if (cpu_fd < 0)
@@ -121,9 +122,11 @@ struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_shm(struc
        obj->shm_fd_ownership = 0;
        obj->shm_fd = shmfd;
 
        obj->shm_fd_ownership = 0;
        obj->shm_fd = shmfd;
 
+       if (populate)
+               flags |= LTTNG_MAP_POPULATE;
        /* memory_map: mmap */
        memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
        /* memory_map: mmap */
        memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
-                         MAP_SHARED | LTTNG_MAP_POPULATE, shmfd, 0);
+                         flags, shmfd, 0);
        if (memory_map == MAP_FAILED) {
                PERROR("mmap");
                goto error_mmap;
        if (memory_map == MAP_FAILED) {
                PERROR("mmap");
                goto error_mmap;
@@ -145,7 +148,7 @@ error_zero_file:
 
 static
 struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_mem(struct lttng_counter_shm_object_table *table,
 
 static
 struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_mem(struct lttng_counter_shm_object_table *table,
-                                          size_t memory_map_size)
+                                          size_t memory_map_size, bool populate)
 {
        struct lttng_counter_shm_object *obj;
        void *memory_map;
 {
        struct lttng_counter_shm_object *obj;
        void *memory_map;
@@ -154,7 +157,7 @@ struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_mem(struc
                return NULL;
        obj = &table->objects[table->allocated_len];
 
                return NULL;
        obj = &table->objects[table->allocated_len];
 
-       memory_map = zmalloc(memory_map_size);
+       memory_map = zmalloc_populate(memory_map_size, populate);
        if (!memory_map)
                goto alloc_error;
 
        if (!memory_map)
                goto alloc_error;
 
@@ -197,13 +200,15 @@ struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct ltt
                        size_t memory_map_size,
                        enum lttng_counter_shm_object_type type,
                        int cpu_fd,
                        size_t memory_map_size,
                        enum lttng_counter_shm_object_type type,
                        int cpu_fd,
-                       int cpu)
+                       int cpu,
+                       bool populate)
 #else
 struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct lttng_counter_shm_object_table *table,
                        size_t memory_map_size,
                        enum lttng_counter_shm_object_type type,
                        int cpu_fd,
 #else
 struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct lttng_counter_shm_object_table *table,
                        size_t memory_map_size,
                        enum lttng_counter_shm_object_type type,
                        int cpu_fd,
-                       int cpu __attribute__((unused)))
+                       int cpu __attribute__((unused)),
+                       bool populate)
 #endif
 {
        struct lttng_counter_shm_object *shm_object;
 #endif
 {
        struct lttng_counter_shm_object *shm_object;
@@ -226,10 +231,11 @@ struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct ltt
        switch (type) {
        case LTTNG_COUNTER_SHM_OBJECT_SHM:
                shm_object = _lttng_counter_shm_object_table_alloc_shm(table, memory_map_size,
        switch (type) {
        case LTTNG_COUNTER_SHM_OBJECT_SHM:
                shm_object = _lttng_counter_shm_object_table_alloc_shm(table, memory_map_size,
-                               cpu_fd);
+                               cpu_fd, populate);
                break;
        case LTTNG_COUNTER_SHM_OBJECT_MEM:
                break;
        case LTTNG_COUNTER_SHM_OBJECT_MEM:
-               shm_object = _lttng_counter_shm_object_table_alloc_mem(table, memory_map_size);
+               shm_object = _lttng_counter_shm_object_table_alloc_mem(table, memory_map_size,
+                               populate);
                break;
        default:
                assert(0);
                break;
        default:
                assert(0);
@@ -242,10 +248,10 @@ struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct ltt
 }
 
 struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_shm(struct lttng_counter_shm_object_table *table,
 }
 
 struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_shm(struct lttng_counter_shm_object_table *table,
-                       int shm_fd,
-                       size_t memory_map_size)
+                       int shm_fd, size_t memory_map_size, bool populate)
 {
        struct lttng_counter_shm_object *obj;
 {
        struct lttng_counter_shm_object *obj;
+       int flags = MAP_SHARED;
        char *memory_map;
 
        if (table->allocated_len >= table->size)
        char *memory_map;
 
        if (table->allocated_len >= table->size)
@@ -256,9 +262,11 @@ struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_shm(struc
        obj->shm_fd = shm_fd;
        obj->shm_fd_ownership = 1;
 
        obj->shm_fd = shm_fd;
        obj->shm_fd_ownership = 1;
 
+       if (populate)
+               flags |= LTTNG_MAP_POPULATE;
        /* memory_map: mmap */
        memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
        /* memory_map: mmap */
        memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
-                         MAP_SHARED | LTTNG_MAP_POPULATE, shm_fd, 0);
+                         flags, shm_fd, 0);
        if (memory_map == MAP_FAILED) {
                PERROR("mmap");
                goto error_mmap;
        if (memory_map == MAP_FAILED) {
                PERROR("mmap");
                goto error_mmap;
index 689edb0a5a1917a9bc6591af81688442e52affbb..1293a7b09410484ecb4774d7a3da749ed44a710c 100644 (file)
@@ -10,6 +10,7 @@
 #include <stddef.h>
 #include <stdint.h>
 #include <unistd.h>
 #include <stddef.h>
 #include <stdint.h>
 #include <unistd.h>
+#include <stdbool.h>
 #include "common/logging.h"
 #include <urcu/compiler.h>
 #include "shm_types.h"
 #include "common/logging.h"
 #include <urcu/compiler.h>
 #include "shm_types.h"
@@ -73,18 +74,18 @@ void _lttng_counter_set_shmp(struct lttng_counter_shm_ref *ref, struct lttng_cou
 
 #define lttng_counter_set_shmp(ref, src)       _lttng_counter_set_shmp(&(ref)._ref, src)
 
 
 #define lttng_counter_set_shmp(ref, src)       _lttng_counter_set_shmp(&(ref)._ref, src)
 
-struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj)
+struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj, bool populate)
        __attribute__((visibility("hidden")));
 
 struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct lttng_counter_shm_object_table *table,
                        size_t memory_map_size,
                        enum lttng_counter_shm_object_type type,
                        const int cpu_fd,
        __attribute__((visibility("hidden")));
 
 struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct lttng_counter_shm_object_table *table,
                        size_t memory_map_size,
                        enum lttng_counter_shm_object_type type,
                        const int cpu_fd,
-                       int cpu)
+                       int cpu, bool populate)
        __attribute__((visibility("hidden")));
 
 struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_shm(struct lttng_counter_shm_object_table *table,
        __attribute__((visibility("hidden")));
 
 struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_shm(struct lttng_counter_shm_object_table *table,
-                       int shm_fd, size_t memory_map_size)
+                       int shm_fd, size_t memory_map_size, bool populate)
        __attribute__((visibility("hidden")));
 
 /* mem ownership is passed to lttng_counter_shm_object_table_append_mem(). */
        __attribute__((visibility("hidden")));
 
 /* mem ownership is passed to lttng_counter_shm_object_table_append_mem(). */
index 7f7b85349fe597654b4a2c4700a0e392478275ed..120225e6bc888104262856849ea9a51fcb71151d 100644 (file)
@@ -42,6 +42,7 @@ static struct lttng_env lttng_env[] = {
        /* Env. var. which can be used in setuid/setgid executables. */
        { "LTTNG_UST_WITHOUT_BADDR_STATEDUMP", LTTNG_ENV_NOT_SECURE, NULL, },
        { "LTTNG_UST_REGISTER_TIMEOUT", LTTNG_ENV_NOT_SECURE, NULL, },
        /* Env. var. which can be used in setuid/setgid executables. */
        { "LTTNG_UST_WITHOUT_BADDR_STATEDUMP", LTTNG_ENV_NOT_SECURE, NULL, },
        { "LTTNG_UST_REGISTER_TIMEOUT", LTTNG_ENV_NOT_SECURE, NULL, },
+       { "LTTNG_UST_MAP_POPULATE_POLICY", LTTNG_ENV_NOT_SECURE, NULL, },
 
        /* Env. var. which are not fetched in setuid/setgid executables. */
        { "LTTNG_UST_CLOCK_PLUGIN", LTTNG_ENV_SECURE, NULL, },
 
        /* Env. var. which are not fetched in setuid/setgid executables. */
        { "LTTNG_UST_CLOCK_PLUGIN", LTTNG_ENV_SECURE, NULL, },
index 308a1dfca696349832af3df107c22d808155f4bb..e8965b383cae3b25d0228d0778f79ca3487c6483 100644 (file)
@@ -8,9 +8,32 @@
 #define _UST_COMMON_MACROS_H
 
 #include <stdlib.h>
 #define _UST_COMMON_MACROS_H
 
 #include <stdlib.h>
+#include <stdbool.h>
+#include <string.h>
 
 #include <lttng/ust-arch.h>
 
 
 #include <lttng/ust-arch.h>
 
+/*
+ * calloc() does not always populate the page table for the allocated
+ * memory. Optionally enforce page table populate.
+ */
+static inline
+void *zmalloc_populate(size_t len, bool populate)
+       __attribute__((always_inline));
+static inline
+void *zmalloc_populate(size_t len, bool populate)
+{
+       if (populate) {
+               void *ret = malloc(len);
+               if (ret == NULL)
+                       return ret;
+               bzero(ret, len);
+               return ret;
+       } else {
+               return calloc(len, 1);
+       }
+}
+
 /*
  * Memory allocation zeroed
  */
 /*
  * Memory allocation zeroed
  */
@@ -20,7 +43,7 @@ void *zmalloc(size_t len)
 static inline
 void *zmalloc(size_t len)
 {
 static inline
 void *zmalloc(size_t len)
 {
-       return calloc(len, 1);
+       return zmalloc_populate(len, false);
 }
 
 #define max_t(type, x, y)                              \
 }
 
 #define max_t(type, x, y)                              \
diff --git a/src/common/populate.c b/src/common/populate.c
new file mode 100644 (file)
index 0000000..b7f6bcc
--- /dev/null
@@ -0,0 +1,86 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2024-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+#include "common/getenv.h"
+#include "common/logging.h"
+#include "common/populate.h"
+
+enum populate_policy {
+       POPULATE_UNSET,
+
+       POPULATE_NONE,
+       POPULATE_CPU_POSSIBLE,
+
+       POPULATE_UNKNOWN,
+};
+
+static enum populate_policy map_populate_policy = POPULATE_UNSET;
+
+static void init_map_populate_policy(void)
+{
+       const char *populate_env_str;
+
+       if (map_populate_policy != POPULATE_UNSET)
+               return;
+
+       populate_env_str = lttng_ust_getenv("LTTNG_UST_MAP_POPULATE_POLICY");
+       if (!populate_env_str) {
+               map_populate_policy = POPULATE_NONE;
+               return;
+       }
+       if (!strcmp(populate_env_str, "none")) {
+               map_populate_policy = POPULATE_NONE;
+       } else if (!strcmp(populate_env_str, "cpu_possible")) {
+               map_populate_policy = POPULATE_CPU_POSSIBLE;
+       } else {
+               /*
+                * populate_env_str is an untrusted environment variable
+                * input (can be provided to setuid/setgid binaries), so
+                * don't even try to print it.
+                */
+               WARN("Unknown policy for LTTNG_UST_MAP_POPULATE_POLICY environment variable.");
+               map_populate_policy = POPULATE_UNKNOWN;
+       }
+}
+
+/*
+ * Return the shared page populate policy for global pages. Returns true
+ * if shared memory pages should be pre-populated, false otherwise.
+ */
+bool lttng_ust_map_populate_is_enabled(void)
+{
+       init_map_populate_policy();
+
+       switch (map_populate_policy) {
+       case POPULATE_UNKNOWN:  /* Fall-through */
+       case POPULATE_NONE:
+               return false;
+       case POPULATE_CPU_POSSIBLE:
+               return true;
+       default:
+               abort();
+       }
+       return false;
+}
+
+/*
+ * Return the shared page populate policy based on the @cpu number
+ * provided as input. Returns true if shared memory pages should be
+ * pre-populated, false otherwise.
+ *
+ * The @cpu argument is currently unused except for negative value
+ * validation. It is present to eventually match cpu affinity or cpu
+ * online masks if those features are added in the future.
+ */
+bool lttng_ust_map_populate_cpu_is_enabled(int cpu)
+{
+       /* Reject invalid cpu number. */
+       if (cpu < 0)
+               return false;
+
+       return lttng_ust_map_populate_is_enabled();
+}
diff --git a/src/common/populate.h b/src/common/populate.h
new file mode 100644 (file)
index 0000000..f65c485
--- /dev/null
@@ -0,0 +1,18 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2024 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _UST_COMMON_POPULATE_H
+#define _UST_COMMON_POPULATE_H
+
+#include <stdbool.h>
+
+bool lttng_ust_map_populate_cpu_is_enabled(int cpu)
+       __attribute__((visibility("hidden")));
+
+bool lttng_ust_map_populate_is_enabled(void)
+       __attribute__((visibility("hidden")));
+
+#endif /* _UST_COMMON_POPULATE_H */
index 56d955161af74ced9d8158ab04fb9b69cfc16844..080288d5241ccb2c60a47d4e2601150de0af4b34 100644 (file)
@@ -93,7 +93,7 @@ static size_t client_packet_header_size(void)
 }
 
 static void client_buffer_begin(struct lttng_ust_ring_buffer *buf,
 }
 
 static void client_buffer_begin(struct lttng_ust_ring_buffer *buf,
-               uint64_t tsc __attribute__((unused)),
+               uint64_t timestamp __attribute__((unused)),
                unsigned int subbuf_idx,
                struct lttng_ust_shm_handle *handle)
 {
                unsigned int subbuf_idx,
                struct lttng_ust_shm_handle *handle)
 {
@@ -125,7 +125,7 @@ static void client_buffer_begin(struct lttng_ust_ring_buffer *buf,
  * subbuffer. data_size is between 1 and subbuf_size.
  */
 static void client_buffer_end(struct lttng_ust_ring_buffer *buf,
  * subbuffer. data_size is between 1 and subbuf_size.
  */
 static void client_buffer_end(struct lttng_ust_ring_buffer *buf,
-               uint64_t tsc  __attribute__((unused)),
+               uint64_t timestamp  __attribute__((unused)),
                unsigned int subbuf_idx, unsigned long data_size,
                struct lttng_ust_shm_handle *handle,
                const struct lttng_ust_ring_buffer_ctx *ctx)
                unsigned int subbuf_idx, unsigned long data_size,
                struct lttng_ust_shm_handle *handle,
                const struct lttng_ust_ring_buffer_ctx *ctx)
@@ -193,7 +193,7 @@ static const struct lttng_ust_ring_buffer_config client_config = {
        .cb.buffer_create = client_buffer_create,
        .cb.buffer_finalize = client_buffer_finalize,
 
        .cb.buffer_create = client_buffer_create,
        .cb.buffer_finalize = client_buffer_finalize,
 
-       .tsc_bits = 0,
+       .timestamp_bits = 0,
        .alloc = RING_BUFFER_ALLOC_GLOBAL,
        .sync = RING_BUFFER_SYNC_GLOBAL,
        .mode = RING_BUFFER_MODE_TEMPLATE,
        .alloc = RING_BUFFER_ALLOC_GLOBAL,
        .sync = RING_BUFFER_SYNC_GLOBAL,
        .mode = RING_BUFFER_MODE_TEMPLATE,
index fe8f8e020d22af883bd9daecd1f156620eb4308c..58a8400d08f808b54db74f1d13f41394e6d60fe7 100644 (file)
@@ -19,8 +19,8 @@
 #include "common/clock.h"
 #include "common/ringbuffer/frontend_types.h"
 
 #include "common/clock.h"
 #include "common/ringbuffer/frontend_types.h"
 
-#define LTTNG_COMPACT_EVENT_BITS       5
-#define LTTNG_COMPACT_TSC_BITS         27
+#define LTTNG_COMPACT_EVENT_BITS       5
+#define LTTNG_COMPACT_TIMESTAMP_BITS   27
 
 /*
  * Keep the natural field alignment for _each field_ within this structure if
 
 /*
  * Keep the natural field alignment for _each field_ within this structure if
@@ -156,7 +156,7 @@ size_t record_header_size(
        case 1: /* compact */
                padding = lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uint32_t));
                offset += padding;
        case 1: /* compact */
                padding = lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uint32_t));
                offset += padding;
-               if (!(ctx->priv->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+               if (!(ctx->priv->rflags & (RING_BUFFER_RFLAG_FULL_TIMESTAMP | LTTNG_RFLAG_EXTENDED))) {
                        offset += sizeof(uint32_t);     /* id and timestamp */
                } else {
                        /* Minimum space taken by LTTNG_COMPACT_EVENT_BITS id */
                        offset += sizeof(uint32_t);     /* id and timestamp */
                } else {
                        /* Minimum space taken by LTTNG_COMPACT_EVENT_BITS id */
@@ -172,7 +172,7 @@ size_t record_header_size(
                padding = lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uint16_t));
                offset += padding;
                offset += sizeof(uint16_t);
                padding = lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uint16_t));
                offset += padding;
                offset += sizeof(uint16_t);
-               if (!(ctx->priv->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+               if (!(ctx->priv->rflags & (RING_BUFFER_RFLAG_FULL_TIMESTAMP | LTTNG_RFLAG_EXTENDED))) {
                        offset += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uint32_t));
                        offset += sizeof(uint32_t);     /* timestamp */
                } else {
                        offset += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uint32_t));
                        offset += sizeof(uint32_t);     /* timestamp */
                } else {
@@ -235,14 +235,14 @@ void lttng_write_event_header(const struct lttng_ust_ring_buffer_config *config,
                                event_id);
                bt_bitfield_write(&id_time, uint32_t,
                                LTTNG_COMPACT_EVENT_BITS,
                                event_id);
                bt_bitfield_write(&id_time, uint32_t,
                                LTTNG_COMPACT_EVENT_BITS,
-                               LTTNG_COMPACT_TSC_BITS,
-                               ctx->priv->tsc);
+                               LTTNG_COMPACT_TIMESTAMP_BITS,
+                               ctx->priv->timestamp);
                lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
                break;
        }
        case 2: /* large */
        {
                lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
                break;
        }
        case 2: /* large */
        {
-               uint32_t timestamp = (uint32_t) ctx->priv->tsc;
+               uint32_t timestamp = (uint32_t) ctx->priv->timestamp;
                uint16_t id = event_id;
 
                lib_ring_buffer_write(config, ctx, &id, sizeof(id));
                uint16_t id = event_id;
 
                lib_ring_buffer_write(config, ctx, &id, sizeof(id));
@@ -275,7 +275,7 @@ void lttng_write_event_header_slow(const struct lttng_ust_ring_buffer_config *co
 
        switch (lttng_chan->priv->header_type) {
        case 1: /* compact */
 
        switch (lttng_chan->priv->header_type) {
        case 1: /* compact */
-               if (!(ctx_private->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+               if (!(ctx_private->rflags & (RING_BUFFER_RFLAG_FULL_TIMESTAMP | LTTNG_RFLAG_EXTENDED))) {
                        uint32_t id_time = 0;
 
                        bt_bitfield_write(&id_time, uint32_t,
                        uint32_t id_time = 0;
 
                        bt_bitfield_write(&id_time, uint32_t,
@@ -284,12 +284,12 @@ void lttng_write_event_header_slow(const struct lttng_ust_ring_buffer_config *co
                                        event_id);
                        bt_bitfield_write(&id_time, uint32_t,
                                        LTTNG_COMPACT_EVENT_BITS,
                                        event_id);
                        bt_bitfield_write(&id_time, uint32_t,
                                        LTTNG_COMPACT_EVENT_BITS,
-                                       LTTNG_COMPACT_TSC_BITS,
-                                       ctx_private->tsc);
+                                       LTTNG_COMPACT_TIMESTAMP_BITS,
+                                       ctx_private->timestamp);
                        lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
                } else {
                        uint8_t id = 0;
                        lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
                } else {
                        uint8_t id = 0;
-                       uint64_t timestamp = ctx_private->tsc;
+                       uint64_t timestamp = ctx_private->timestamp;
 
                        bt_bitfield_write(&id, uint8_t,
                                        0,
 
                        bt_bitfield_write(&id, uint8_t,
                                        0,
@@ -305,8 +305,8 @@ void lttng_write_event_header_slow(const struct lttng_ust_ring_buffer_config *co
                break;
        case 2: /* large */
        {
                break;
        case 2: /* large */
        {
-               if (!(ctx_private->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
-                       uint32_t timestamp = (uint32_t) ctx_private->tsc;
+               if (!(ctx_private->rflags & (RING_BUFFER_RFLAG_FULL_TIMESTAMP | LTTNG_RFLAG_EXTENDED))) {
+                       uint32_t timestamp = (uint32_t) ctx_private->timestamp;
                        uint16_t id = event_id;
 
                        lib_ring_buffer_write(config, ctx, &id, sizeof(id));
                        uint16_t id = event_id;
 
                        lib_ring_buffer_write(config, ctx, &id, sizeof(id));
@@ -314,7 +314,7 @@ void lttng_write_event_header_slow(const struct lttng_ust_ring_buffer_config *co
                        lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
                } else {
                        uint16_t id = 65535;
                        lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
                } else {
                        uint16_t id = 65535;
-                       uint64_t timestamp = ctx_private->tsc;
+                       uint64_t timestamp = ctx_private->timestamp;
 
                        lib_ring_buffer_write(config, ctx, &id, sizeof(id));
                        /* Align extended struct on largest member */
 
                        lib_ring_buffer_write(config, ctx, &id, sizeof(id));
                        /* Align extended struct on largest member */
@@ -364,7 +364,7 @@ static size_t client_packet_header_size(void)
        return offsetof(struct packet_header, ctx.header_end);
 }
 
        return offsetof(struct packet_header, ctx.header_end);
 }
 
-static void client_buffer_begin(struct lttng_ust_ring_buffer *buf, uint64_t tsc,
+static void client_buffer_begin(struct lttng_ust_ring_buffer *buf, uint64_t timestamp,
                                unsigned int subbuf_idx,
                                struct lttng_ust_shm_handle *handle)
 {
                                unsigned int subbuf_idx,
                                struct lttng_ust_shm_handle *handle)
 {
@@ -384,7 +384,7 @@ static void client_buffer_begin(struct lttng_ust_ring_buffer *buf, uint64_t tsc,
        memcpy(header->uuid, lttng_chan->priv->uuid, sizeof(lttng_chan->priv->uuid));
        header->stream_id = lttng_chan->priv->id;
        header->stream_instance_id = buf->backend.cpu;
        memcpy(header->uuid, lttng_chan->priv->uuid, sizeof(lttng_chan->priv->uuid));
        header->stream_id = lttng_chan->priv->id;
        header->stream_instance_id = buf->backend.cpu;
-       header->ctx.timestamp_begin = tsc;
+       header->ctx.timestamp_begin = timestamp;
        header->ctx.timestamp_end = 0;
        header->ctx.content_size = ~0ULL; /* for debugging */
        header->ctx.packet_size = ~0ULL;
        header->ctx.timestamp_end = 0;
        header->ctx.content_size = ~0ULL; /* for debugging */
        header->ctx.packet_size = ~0ULL;
@@ -397,7 +397,7 @@ static void client_buffer_begin(struct lttng_ust_ring_buffer *buf, uint64_t tsc,
  * offset is assumed to never be 0 here : never deliver a completely empty
  * subbuffer. data_size is between 1 and subbuf_size.
  */
  * offset is assumed to never be 0 here : never deliver a completely empty
  * subbuffer. data_size is between 1 and subbuf_size.
  */
-static void client_buffer_end(struct lttng_ust_ring_buffer *buf, uint64_t tsc,
+static void client_buffer_end(struct lttng_ust_ring_buffer *buf, uint64_t timestamp,
                              unsigned int subbuf_idx, unsigned long data_size,
                              struct lttng_ust_shm_handle *handle,
                              const struct lttng_ust_ring_buffer_ctx *ctx)
                              unsigned int subbuf_idx, unsigned long data_size,
                              struct lttng_ust_shm_handle *handle,
                              const struct lttng_ust_ring_buffer_ctx *ctx)
@@ -413,7 +413,7 @@ static void client_buffer_end(struct lttng_ust_ring_buffer *buf, uint64_t tsc,
        assert(header);
        if (!header)
                return;
        assert(header);
        if (!header)
                return;
-       header->ctx.timestamp_end = tsc;
+       header->ctx.timestamp_end = timestamp;
        header->ctx.content_size =
                (uint64_t) data_size * CHAR_BIT;                /* in bits */
        header->ctx.packet_size =
        header->ctx.content_size =
                (uint64_t) data_size * CHAR_BIT;                /* in bits */
        header->ctx.packet_size =
@@ -614,7 +614,7 @@ static const struct lttng_ust_ring_buffer_config client_config = {
        .cb.content_size_field = client_content_size_field,
        .cb.packet_size_field = client_packet_size_field,
 
        .cb.content_size_field = client_content_size_field,
        .cb.packet_size_field = client_packet_size_field,
 
-       .tsc_bits = LTTNG_COMPACT_TSC_BITS,
+       .timestamp_bits = LTTNG_COMPACT_TIMESTAMP_BITS,
        .alloc = RING_BUFFER_ALLOC_PER_CPU,
        .sync = RING_BUFFER_SYNC_GLOBAL,
        .mode = RING_BUFFER_MODE_TEMPLATE,
        .alloc = RING_BUFFER_ALLOC_PER_CPU,
        .sync = RING_BUFFER_SYNC_GLOBAL,
        .mode = RING_BUFFER_MODE_TEMPLATE,
index a4e207f460fabe47a4f84388df649ee08d14a817..c9cc4025aaf926becb1566ffbfb8f0964bf1103a 100644 (file)
@@ -87,7 +87,7 @@ struct channel_backend {
        unsigned int buf_size_order;    /* Order of buffer size */
        unsigned int extra_reader_sb:1; /* has extra reader subbuffer ? */
        unsigned long num_subbuf;       /* Number of sub-buffers for writer */
        unsigned int buf_size_order;    /* Order of buffer size */
        unsigned int extra_reader_sb:1; /* has extra reader subbuffer ? */
        unsigned long num_subbuf;       /* Number of sub-buffers for writer */
-       uint64_t start_tsc;             /* Channel creation TSC value */
+       uint64_t start_timestamp;       /* Channel creation timestamp value */
        DECLARE_SHMP(void *, priv_data);/* Client-specific information */
        struct lttng_ust_ring_buffer_config config; /* Ring buffer configuration */
        char name[NAME_MAX];            /* Channel name */
        DECLARE_SHMP(void *, priv_data);/* Client-specific information */
        struct lttng_ust_ring_buffer_config config; /* Ring buffer configuration */
        char name[NAME_MAX];            /* Channel name */
index e35070736e36f32875e9512921c941a585147cbe..030169ff4004cbd0def9cf813d02097bdf370b6e 100644 (file)
@@ -82,8 +82,8 @@ int lib_ring_buffer_try_reserve(const struct lttng_ust_ring_buffer_config *confi
        *o_begin = v_read(config, &buf->offset);
        *o_old = *o_begin;
 
        *o_begin = v_read(config, &buf->offset);
        *o_old = *o_begin;
 
-       ctx_private->tsc = lib_ring_buffer_clock_read(chan);
-       if ((int64_t) ctx_private->tsc == -EIO)
+       ctx_private->timestamp = lib_ring_buffer_clock_read(chan);
+       if ((int64_t) ctx_private->timestamp == -EIO)
                return 1;
 
        /*
                return 1;
 
        /*
@@ -93,8 +93,8 @@ int lib_ring_buffer_try_reserve(const struct lttng_ust_ring_buffer_config *confi
         */
        //prefetch(&buf->commit_hot[subbuf_index(*o_begin, chan)]);
 
         */
        //prefetch(&buf->commit_hot[subbuf_index(*o_begin, chan)]);
 
-       if (last_tsc_overflow(config, buf, ctx_private->tsc))
-               ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
+       if (last_timestamp_overflow(config, buf, ctx_private->timestamp))
+               ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TIMESTAMP;
 
        if (caa_unlikely(subbuf_offset(*o_begin, chan) == 0))
                return 1;
 
        if (caa_unlikely(subbuf_offset(*o_begin, chan) == 0))
                return 1;
@@ -130,7 +130,8 @@ int lib_ring_buffer_try_reserve(const struct lttng_ust_ring_buffer_config *confi
  * @ctx: ring buffer context. (input and output) Must be already initialized.
  *
  * Atomic wait-free slot reservation. The reserved space starts at the context
  * @ctx: ring buffer context. (input and output) Must be already initialized.
  *
  * Atomic wait-free slot reservation. The reserved space starts at the context
- * "pre_offset". Its length is "slot_size". The associated time-stamp is "tsc".
+ * "pre_offset". Its length is "slot_size". The associated time-stamp is
+ * "timestamp".
  *
  * Return :
  *  0 on success.
  *
  * Return :
  *  0 on success.
@@ -179,12 +180,12 @@ int lib_ring_buffer_reserve(const struct lttng_ust_ring_buffer_config *config,
                goto slow_path;
 
        /*
                goto slow_path;
 
        /*
-        * Atomically update last_tsc. This update races against concurrent
-        * atomic updates, but the race will always cause supplementary full TSC
-        * record headers, never the opposite (missing a full TSC record header
-        * when it would be needed).
+        * Atomically update last_timestamp. This update races against concurrent
+        * atomic updates, but the race will always cause supplementary full
+        * timestamp record headers, never the opposite (missing a full
+        * timestamp record header when it would be needed).
         */
         */
-       save_last_tsc(config, buf, ctx_private->tsc);
+       save_last_timestamp(config, buf, ctx_private->timestamp);
 
        /*
         * Push the reader if necessary
 
        /*
         * Push the reader if necessary
@@ -317,17 +318,17 @@ int lib_ring_buffer_try_discard_reserve(const struct lttng_ust_ring_buffer_confi
 
        /*
         * We need to ensure that if the cmpxchg succeeds and discards the
 
        /*
         * We need to ensure that if the cmpxchg succeeds and discards the
-        * record, the next record will record a full TSC, because it cannot
-        * rely on the last_tsc associated with the discarded record to detect
-        * overflows. The only way to ensure this is to set the last_tsc to 0
-        * (assuming no 64-bit TSC overflow), which forces to write a 64-bit
+        * record, the next record will record a full timestamp, because it cannot
+        * rely on the last_timestamp associated with the discarded record to detect
+        * overflows. The only way to ensure this is to set the last_timestamp to 0
+        * (assuming no 64-bit timestamp overflow), which forces to write a 64-bit
         * timestamp in the next record.
         *
         * timestamp in the next record.
         *
-        * Note: if discard fails, we must leave the TSC in the record header.
-        * It is needed to keep track of TSC overflows for the following
+        * Note: if discard fails, we must leave the timestamp in the record header.
+        * It is needed to keep track of timestamp overflows for the following
         * records.
         */
         * records.
         */
-       save_last_tsc(config, buf, 0ULL);
+       save_last_timestamp(config, buf, 0ULL);
 
        if (caa_likely(v_cmpxchg(config, &buf->offset, end_offset, ctx_private->pre_offset)
                   != end_offset))
 
        if (caa_likely(v_cmpxchg(config, &buf->offset, end_offset, ctx_private->pre_offset)
                   != end_offset))
index 1dc816a3c221670a692f82bbea7bf555d062166e..d9f16a515129859d1888f3c88bd89e84366bd5a9 100644 (file)
@@ -85,62 +85,62 @@ unsigned long subbuf_index(unsigned long offset,
 }
 
 /*
 }
 
 /*
- * Last TSC comparison functions. Check if the current TSC overflows tsc_bits
- * bits from the last TSC read. When overflows are detected, the full 64-bit
- * timestamp counter should be written in the record header. Reads and writes
- * last_tsc atomically.
+ * Last timestamp comparison functions. Check if the current timestamp overflows
+ * timestamp_bits bits from the last timestamp read. When overflows are
+ * detected, the full 64-bit timestamp counter should be written in the record
+ * header. Reads and writes last_timestamp atomically.
  */
 
 #if (CAA_BITS_PER_LONG == 32)
 static inline
  */
 
 #if (CAA_BITS_PER_LONG == 32)
 static inline
-void save_last_tsc(const struct lttng_ust_ring_buffer_config *config,
-                  struct lttng_ust_ring_buffer *buf, uint64_t tsc)
+void save_last_timestamp(const struct lttng_ust_ring_buffer_config *config,
+                  struct lttng_ust_ring_buffer *buf, uint64_t timestamp)
 {
 {
-       if (config->tsc_bits == 0 || config->tsc_bits == 64)
+       if (config->timestamp_bits == 0 || config->timestamp_bits == 64)
                return;
 
        /*
         * Ensure the compiler performs this update in a single instruction.
         */
                return;
 
        /*
         * Ensure the compiler performs this update in a single instruction.
         */
-       v_set(config, &buf->last_tsc, (unsigned long)(tsc >> config->tsc_bits));
+       v_set(config, &buf->last_timestamp, (unsigned long)(timestamp >> config->timestamp_bits));
 }
 
 static inline
 }
 
 static inline
-int last_tsc_overflow(const struct lttng_ust_ring_buffer_config *config,
-                     struct lttng_ust_ring_buffer *buf, uint64_t tsc)
+int last_timestamp_overflow(const struct lttng_ust_ring_buffer_config *config,
+                     struct lttng_ust_ring_buffer *buf, uint64_t timestamp)
 {
 {
-       unsigned long tsc_shifted;
+       unsigned long timestamp_shifted;
 
 
-       if (config->tsc_bits == 0 || config->tsc_bits == 64)
+       if (config->timestamp_bits == 0 || config->timestamp_bits == 64)
                return 0;
 
                return 0;
 
-       tsc_shifted = (unsigned long)(tsc >> config->tsc_bits);
-       if (caa_unlikely(tsc_shifted
-                    - (unsigned long)v_read(config, &buf->last_tsc)))
+       timestamp_shifted = (unsigned long)(timestamp >> config->timestamp_bits);
+       if (caa_unlikely(timestamp_shifted
+                    - (unsigned long)v_read(config, &buf->last_timestamp)))
                return 1;
        else
                return 0;
 }
 #else
 static inline
                return 1;
        else
                return 0;
 }
 #else
 static inline
-void save_last_tsc(const struct lttng_ust_ring_buffer_config *config,
-                  struct lttng_ust_ring_buffer *buf, uint64_t tsc)
+void save_last_timestamp(const struct lttng_ust_ring_buffer_config *config,
+                  struct lttng_ust_ring_buffer *buf, uint64_t timestamp)
 {
 {
-       if (config->tsc_bits == 0 || config->tsc_bits == 64)
+       if (config->timestamp_bits == 0 || config->timestamp_bits == 64)
                return;
 
                return;
 
-       v_set(config, &buf->last_tsc, (unsigned long)tsc);
+       v_set(config, &buf->last_timestamp, (unsigned long)timestamp);
 }
 
 static inline
 }
 
 static inline
-int last_tsc_overflow(const struct lttng_ust_ring_buffer_config *config,
-                     struct lttng_ust_ring_buffer *buf, uint64_t tsc)
+int last_timestamp_overflow(const struct lttng_ust_ring_buffer_config *config,
+                     struct lttng_ust_ring_buffer *buf, uint64_t timestamp)
 {
 {
-       if (config->tsc_bits == 0 || config->tsc_bits == 64)
+       if (config->timestamp_bits == 0 || config->timestamp_bits == 64)
                return 0;
 
                return 0;
 
-       if (caa_unlikely((tsc - v_read(config, &buf->last_tsc))
-                    >> config->tsc_bits))
+       if (caa_unlikely((timestamp - v_read(config, &buf->last_timestamp))
+                    >> config->timestamp_bits))
                return 1;
        else
                return 0;
                return 1;
        else
                return 0;
@@ -287,7 +287,7 @@ int lib_ring_buffer_reserve_committed(const struct lttng_ust_ring_buffer_config
 }
 
 /*
 }
 
 /*
- * Receive end of subbuffer TSC as parameter. It has been read in the
+ * Receive end of subbuffer timestamp as parameter. It has been read in the
  * space reservation loop of either reserve or switch, which ensures it
  * progresses monotonically with event records in the buffer. Therefore,
  * it ensures that the end timestamp of a subbuffer is <= begin
  * space reservation loop of either reserve or switch, which ensures it
  * progresses monotonically with event records in the buffer. Therefore,
  * it ensures that the end timestamp of a subbuffer is <= begin
index 1b0e1a080e6bb15d79459d516c6b1eb56a81bb7e..3be7ec1bf63bd054a7180d44dcce3329b0a34d5c 100644 (file)
@@ -181,7 +181,7 @@ struct lttng_ust_ring_buffer {
        int record_disabled;
        /* End of cache-hot 32 bytes cacheline */
 
        int record_disabled;
        /* End of cache-hot 32 bytes cacheline */
 
-       union v_atomic last_tsc;        /*
+       union v_atomic last_timestamp;  /*
                                         * Last timestamp written in the buffer.
                                         */
 
                                         * Last timestamp written in the buffer.
                                         */
 
@@ -251,7 +251,7 @@ struct lttng_ust_ring_buffer_ctx_private {
                                                 * prior to record header alignment
                                                 * padding.
                                                 */
                                                 * prior to record header alignment
                                                 * padding.
                                                 */
-       uint64_t tsc;                           /* time-stamp counter value */
+       uint64_t timestamp;                     /* time-stamp counter value */
        unsigned int rflags;                    /* reservation flags */
        struct lttng_ust_ring_buffer *buf;      /*
                                                 * buffer corresponding to processor id
        unsigned int rflags;                    /* reservation flags */
        struct lttng_ust_ring_buffer *buf;      /*
                                                 * buffer corresponding to processor id
index 27c335d50129aa3594897d1957fe4a237fc6078d..d2fadb771b9aae9878d02afc2499848244c57870 100644 (file)
@@ -21,6 +21,7 @@
 #include "common/smp.h"
 #include "shm.h"
 #include "common/align.h"
 #include "common/smp.h"
 #include "shm.h"
 #include "common/align.h"
+#include "common/populate.h"
 
 /**
  * lib_ring_buffer_backend_allocate - allocate a channel buffer
 
 /**
  * lib_ring_buffer_backend_allocate - allocate a channel buffer
@@ -234,7 +235,7 @@ void channel_backend_reset(struct channel_backend *chanb)
         * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
         * priv, notifiers, config, cpumask and name.
         */
         * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
         * priv, notifiers, config, cpumask and name.
         */
-       chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
+       chanb->start_timestamp = config->cb.ring_buffer_clock_read(chan);
 }
 
 /**
 }
 
 /**
@@ -346,7 +347,8 @@ int channel_backend_init(struct channel_backend *chanb,
                        struct shm_object *shmobj;
 
                        shmobj = shm_object_table_alloc(handle->table, shmsize,
                        struct shm_object *shmobj;
 
                        shmobj = shm_object_table_alloc(handle->table, shmsize,
-                                       SHM_OBJECT_SHM, stream_fds[i], i);
+                                       SHM_OBJECT_SHM, stream_fds[i], i,
+                                       lttng_ust_map_populate_cpu_is_enabled(i));
                        if (!shmobj)
                                goto end;
                        align_shm(shmobj, __alignof__(struct lttng_ust_ring_buffer));
                        if (!shmobj)
                                goto end;
                        align_shm(shmobj, __alignof__(struct lttng_ust_ring_buffer));
@@ -365,7 +367,8 @@ int channel_backend_init(struct channel_backend *chanb,
                struct lttng_ust_ring_buffer *buf;
 
                shmobj = shm_object_table_alloc(handle->table, shmsize,
                struct lttng_ust_ring_buffer *buf;
 
                shmobj = shm_object_table_alloc(handle->table, shmsize,
-                                       SHM_OBJECT_SHM, stream_fds[0], -1);
+                                       SHM_OBJECT_SHM, stream_fds[0], -1,
+                                       lttng_ust_map_populate_is_enabled());
                if (!shmobj)
                        goto end;
                align_shm(shmobj, __alignof__(struct lttng_ust_ring_buffer));
                if (!shmobj)
                        goto end;
                align_shm(shmobj, __alignof__(struct lttng_ust_ring_buffer));
@@ -379,7 +382,7 @@ int channel_backend_init(struct channel_backend *chanb,
                if (ret)
                        goto free_bufs;
        }
                if (ret)
                        goto free_bufs;
        }
-       chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
+       chanb->start_timestamp = config->cb.ring_buffer_clock_read(chan);
 
        return 0;
 
 
        return 0;
 
index 5dcc0be7263940a8e54973474b89d59f648a7a0d..ab1fc0ff878b749779d9ce3e09d1d46ed70fc9ea 100644 (file)
@@ -63,6 +63,7 @@
 #include "shm.h"
 #include "rb-init.h"
 #include "common/compat/errno.h"       /* For ENODATA */
 #include "shm.h"
 #include "rb-init.h"
 #include "common/compat/errno.h"       /* For ENODATA */
+#include "common/populate.h"
 
 /* Print DBG() messages about events lost only every 1048576 hits */
 #define DBG_PRINT_NR_LOST      (1UL << 20)
 
 /* Print DBG() messages about events lost only every 1048576 hits */
 #define DBG_PRINT_NR_LOST      (1UL << 20)
@@ -202,7 +203,7 @@ void lib_ring_buffer_reset(struct lttng_ust_ring_buffer *buf,
        }
        uatomic_set(&buf->consumed, 0);
        uatomic_set(&buf->record_disabled, 0);
        }
        uatomic_set(&buf->consumed, 0);
        uatomic_set(&buf->record_disabled, 0);
-       v_set(config, &buf->last_tsc, 0);
+       v_set(config, &buf->last_timestamp, 0);
        lib_ring_buffer_backend_reset(&buf->backend, handle);
        /* Don't reset number of active readers */
        v_set(config, &buf->records_lost_full, 0);
        lib_ring_buffer_backend_reset(&buf->backend, handle);
        /* Don't reset number of active readers */
        v_set(config, &buf->records_lost_full, 0);
@@ -340,7 +341,7 @@ int lib_ring_buffer_create(struct lttng_ust_ring_buffer *buf,
        struct commit_counters_hot *cc_hot;
        void *priv = channel_get_private_config(chan);
        size_t subbuf_header_size;
        struct commit_counters_hot *cc_hot;
        void *priv = channel_get_private_config(chan);
        size_t subbuf_header_size;
-       uint64_t tsc;
+       uint64_t timestamp;
        int ret;
 
        /* Test for cpu hotplug */
        int ret;
 
        /* Test for cpu hotplug */
@@ -397,8 +398,8 @@ int lib_ring_buffer_create(struct lttng_ust_ring_buffer *buf,
                ret = -EPERM;
                goto free_chanbuf;
        }
                ret = -EPERM;
                goto free_chanbuf;
        }
-       tsc = config->cb.ring_buffer_clock_read(shmp_chan);
-       config->cb.buffer_begin(buf, tsc, 0, handle);
+       timestamp = config->cb.ring_buffer_clock_read(shmp_chan);
+       config->cb.buffer_begin(buf, timestamp, 0, handle);
        cc_hot = shmp_index(handle, buf->commit_hot, 0);
        if (!cc_hot) {
                ret = -EPERM;
        cc_hot = shmp_index(handle, buf->commit_hot, 0);
        if (!cc_hot) {
                ret = -EPERM;
@@ -980,6 +981,7 @@ struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_ring_buffer_c
        struct shm_object *shmobj;
        unsigned int nr_streams;
        int64_t blocking_timeout_ms;
        struct shm_object *shmobj;
        unsigned int nr_streams;
        int64_t blocking_timeout_ms;
+       bool populate = lttng_ust_map_populate_is_enabled();
 
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
                nr_streams = get_possible_cpus_array_len();
 
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
                nr_streams = get_possible_cpus_array_len();
@@ -1006,12 +1008,12 @@ struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_ring_buffer_c
                                         read_timer_interval))
                return NULL;
 
                                         read_timer_interval))
                return NULL;
 
-       handle = zmalloc(sizeof(struct lttng_ust_shm_handle));
+       handle = zmalloc_populate(sizeof(struct lttng_ust_shm_handle), populate);
        if (!handle)
                return NULL;
 
        /* Allocate table for channel + per-cpu buffers */
        if (!handle)
                return NULL;
 
        /* Allocate table for channel + per-cpu buffers */
-       handle->table = shm_object_table_create(1 + get_possible_cpus_array_len());
+       handle->table = shm_object_table_create(1 + get_possible_cpus_array_len(), populate);
        if (!handle->table)
                goto error_table_alloc;
 
        if (!handle->table)
                goto error_table_alloc;
 
@@ -1026,7 +1028,7 @@ struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_ring_buffer_c
 
        /* Allocate normal memory for channel (not shared) */
        shmobj = shm_object_table_alloc(handle->table, shmsize, SHM_OBJECT_MEM,
 
        /* Allocate normal memory for channel (not shared) */
        shmobj = shm_object_table_alloc(handle->table, shmsize, SHM_OBJECT_MEM,
-                       -1, -1);
+                       -1, -1, populate);
        if (!shmobj)
                goto error_append;
        /* struct lttng_ust_ring_buffer_channel is at object 0, offset 0 (hardcoded) */
        if (!shmobj)
                goto error_append;
        /* struct lttng_ust_ring_buffer_channel is at object 0, offset 0 (hardcoded) */
@@ -1089,13 +1091,14 @@ struct lttng_ust_shm_handle *channel_handle_create(void *data,
 {
        struct lttng_ust_shm_handle *handle;
        struct shm_object *object;
 {
        struct lttng_ust_shm_handle *handle;
        struct shm_object *object;
+       bool populate = lttng_ust_map_populate_is_enabled();
 
 
-       handle = zmalloc(sizeof(struct lttng_ust_shm_handle));
+       handle = zmalloc_populate(sizeof(struct lttng_ust_shm_handle), populate);
        if (!handle)
                return NULL;
 
        /* Allocate table for channel + per-cpu buffers */
        if (!handle)
                return NULL;
 
        /* Allocate table for channel + per-cpu buffers */
-       handle->table = shm_object_table_create(1 + get_possible_cpus_array_len());
+       handle->table = shm_object_table_create(1 + get_possible_cpus_array_len(), populate);
        if (!handle->table)
                goto error_table_alloc;
        /* Add channel object */
        if (!handle->table)
                goto error_table_alloc;
        /* Add channel object */
@@ -1124,7 +1127,7 @@ int channel_handle_add_stream(struct lttng_ust_shm_handle *handle,
        /* Add stream object */
        object = shm_object_table_append_shm(handle->table,
                        shm_fd, wakeup_fd, stream_nr,
        /* Add stream object */
        object = shm_object_table_append_shm(handle->table,
                        shm_fd, wakeup_fd, stream_nr,
-                       memory_map_size);
+                       memory_map_size, lttng_ust_map_populate_cpu_is_enabled(stream_nr));
        if (!object)
                return -EINVAL;
        return 0;
        if (!object)
                return -EINVAL;
        return 0;
@@ -1771,7 +1774,7 @@ void lib_ring_buffer_switch_old_start(struct lttng_ust_ring_buffer *buf,
        unsigned long commit_count;
        struct commit_counters_hot *cc_hot;
 
        unsigned long commit_count;
        struct commit_counters_hot *cc_hot;
 
-       config->cb.buffer_begin(buf, ctx->priv->tsc, oldidx, handle);
+       config->cb.buffer_begin(buf, ctx->priv->timestamp, oldidx, handle);
 
        /*
         * Order all writes to buffer before the commit count update that will
 
        /*
         * Order all writes to buffer before the commit count update that will
@@ -1829,7 +1832,7 @@ void lib_ring_buffer_switch_old_end(struct lttng_ust_ring_buffer *buf,
         * postponed until the commit counter is incremented for the
         * current space reservation.
         */
         * postponed until the commit counter is incremented for the
         * current space reservation.
         */
-       *ts_end = ctx->priv->tsc;
+       *ts_end = ctx->priv->timestamp;
 
        /*
         * Order all writes to buffer and store to ts_end before the commit
 
        /*
         * Order all writes to buffer and store to ts_end before the commit
@@ -1867,7 +1870,7 @@ void lib_ring_buffer_switch_new_start(struct lttng_ust_ring_buffer *buf,
        unsigned long commit_count;
        struct commit_counters_hot *cc_hot;
 
        unsigned long commit_count;
        struct commit_counters_hot *cc_hot;
 
-       config->cb.buffer_begin(buf, ctx->priv->tsc, beginidx, handle);
+       config->cb.buffer_begin(buf, ctx->priv->timestamp, beginidx, handle);
 
        /*
         * Order all writes to buffer before the commit count update that will
 
        /*
         * Order all writes to buffer before the commit count update that will
@@ -1921,7 +1924,7 @@ void lib_ring_buffer_switch_new_end(struct lttng_ust_ring_buffer *buf,
         * postponed until the commit counter is incremented for the
         * current space reservation.
         */
         * postponed until the commit counter is incremented for the
         * current space reservation.
         */
-       *ts_end = ctx->priv->tsc;
+       *ts_end = ctx->priv->timestamp;
 }
 
 /*
 }
 
 /*
@@ -1945,7 +1948,7 @@ int lib_ring_buffer_try_switch_slow(enum switch_mode mode,
        offsets->switch_old_start = 0;
        off = subbuf_offset(offsets->begin, chan);
 
        offsets->switch_old_start = 0;
        off = subbuf_offset(offsets->begin, chan);
 
-       ctx->priv->tsc = config->cb.ring_buffer_clock_read(chan);
+       ctx->priv->timestamp = config->cb.ring_buffer_clock_read(chan);
 
        /*
         * Ensure we flush the header of an empty subbuffer when doing the
 
        /*
         * Ensure we flush the header of an empty subbuffer when doing the
@@ -2081,12 +2084,12 @@ void lib_ring_buffer_switch_slow(struct lttng_ust_ring_buffer *buf, enum switch_
                 != offsets.old);
 
        /*
                 != offsets.old);
 
        /*
-        * Atomically update last_tsc. This update races against concurrent
-        * atomic updates, but the race will always cause supplementary full TSC
-        * records, never the opposite (missing a full TSC record when it would
-        * be needed).
+        * Atomically update last_timestamp. This update races against concurrent
+        * atomic updates, but the race will always cause supplementary full
+        * timestamp records, never the opposite (missing a full timestamp
+        * record when it would be needed).
         */
         */
-       save_last_tsc(config, buf, ctx.priv->tsc);
+       save_last_timestamp(config, buf, ctx.priv->timestamp);
 
        /*
         * Push the reader if necessary
 
        /*
         * Push the reader if necessary
@@ -2155,12 +2158,12 @@ retry:
        offsets->switch_old_end = 0;
        offsets->pre_header_padding = 0;
 
        offsets->switch_old_end = 0;
        offsets->pre_header_padding = 0;
 
-       ctx_private->tsc = config->cb.ring_buffer_clock_read(chan);
-       if ((int64_t) ctx_private->tsc == -EIO)
+       ctx_private->timestamp = config->cb.ring_buffer_clock_read(chan);
+       if ((int64_t) ctx_private->timestamp == -EIO)
                return -EIO;
 
                return -EIO;
 
-       if (last_tsc_overflow(config, buf, ctx_private->tsc))
-               ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
+       if (last_timestamp_overflow(config, buf, ctx_private->timestamp))
+               ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TIMESTAMP;
 
        if (caa_unlikely(subbuf_offset(offsets->begin, chan) == 0)) {
                offsets->switch_new_start = 1;          /* For offsets->begin */
 
        if (caa_unlikely(subbuf_offset(offsets->begin, chan) == 0)) {
                offsets->switch_new_start = 1;          /* For offsets->begin */
@@ -2368,12 +2371,12 @@ int lib_ring_buffer_reserve_slow(struct lttng_ust_ring_buffer_ctx *ctx,
                          != offsets.old));
 
        /*
                          != offsets.old));
 
        /*
-        * Atomically update last_tsc. This update races against concurrent
-        * atomic updates, but the race will always cause supplementary full TSC
-        * records, never the opposite (missing a full TSC record when it would
-        * be needed).
+        * Atomically update last_timestamp. This update races against concurrent
+        * atomic updates, but the race will always cause supplementary full
+        * timestamp records, never the opposite (missing a full timestamp
+        * record when it would be needed).
         */
         */
-       save_last_tsc(config, buf, ctx_private->tsc);
+       save_last_timestamp(config, buf, ctx_private->timestamp);
 
        /*
         * Push the reader if necessary
 
        /*
         * Push the reader if necessary
index 61386174c4f7e27ad019650ca16bf432e7b14bf6..83efea9efa13828528fff8b2e4fd6e2f336872da 100644 (file)
@@ -46,10 +46,10 @@ struct lttng_ust_ring_buffer_client_cb {
 
        /* Slow path only, at subbuffer switch */
        size_t (*subbuffer_header_size) (void);
 
        /* Slow path only, at subbuffer switch */
        size_t (*subbuffer_header_size) (void);
-       void (*buffer_begin) (struct lttng_ust_ring_buffer *buf, uint64_t tsc,
+       void (*buffer_begin) (struct lttng_ust_ring_buffer *buf, uint64_t timestamp,
                              unsigned int subbuf_idx,
                              struct lttng_ust_shm_handle *handle);
                              unsigned int subbuf_idx,
                              struct lttng_ust_shm_handle *handle);
-       void (*buffer_end) (struct lttng_ust_ring_buffer *buf, uint64_t tsc,
+       void (*buffer_end) (struct lttng_ust_ring_buffer *buf, uint64_t timestamp,
                            unsigned int subbuf_idx, unsigned long data_size,
                            struct lttng_ust_shm_handle *handle,
                            const struct lttng_ust_ring_buffer_ctx *ctx);
                            unsigned int subbuf_idx, unsigned long data_size,
                            struct lttng_ust_shm_handle *handle,
                            const struct lttng_ust_ring_buffer_ctx *ctx);
@@ -185,10 +185,10 @@ struct lttng_ust_ring_buffer_config {
        enum lttng_ust_ring_buffer_ipi_types ipi;
        enum lttng_ust_ring_buffer_wakeup_types wakeup;
        /*
        enum lttng_ust_ring_buffer_ipi_types ipi;
        enum lttng_ust_ring_buffer_wakeup_types wakeup;
        /*
-        * tsc_bits: timestamp bits saved at each record.
+        * timestamp_bits: timestamp bits saved at each record.
         *   0 and 64 disable the timestamp compression scheme.
         */
         *   0 and 64 disable the timestamp compression scheme.
         */
-       unsigned int tsc_bits;
+       unsigned int timestamp_bits;
        struct lttng_ust_ring_buffer_client_cb cb;
        /*
         * client_type is used by the consumer process (which is in a
        struct lttng_ust_ring_buffer_client_cb cb;
        /*
         * client_type is used by the consumer process (which is in a
@@ -204,18 +204,18 @@ struct lttng_ust_ring_buffer_config {
 /*
  * Reservation flags.
  *
 /*
  * Reservation flags.
  *
- * RING_BUFFER_RFLAG_FULL_TSC
+ * RING_BUFFER_RFLAG_FULL_TIMESTAMP
  *
  * This flag is passed to record_header_size() and to the primitive used to
  * write the record header. It indicates that the full 64-bit time value is
  * needed in the record header. If this flag is not set, the record header needs
  *
  * This flag is passed to record_header_size() and to the primitive used to
  * write the record header. It indicates that the full 64-bit time value is
  * needed in the record header. If this flag is not set, the record header needs
- * only to contain "tsc_bits" bit of time value.
+ * only to contain "timestamp_bits" bit of time value.
  *
  * Reservation flags can be added by the client, starting from
  * "(RING_BUFFER_FLAGS_END << 0)". It can be used to pass information from
  * record_header_size() to lib_ring_buffer_write_record_header().
  */
  *
  * Reservation flags can be added by the client, starting from
  * "(RING_BUFFER_FLAGS_END << 0)". It can be used to pass information from
  * record_header_size() to lib_ring_buffer_write_record_header().
  */
-#define        RING_BUFFER_RFLAG_FULL_TSC              (1U << 0)
+#define        RING_BUFFER_RFLAG_FULL_TIMESTAMP        (1U << 0)
 #define RING_BUFFER_RFLAG_END                  (1U << 1)
 
 /*
 #define RING_BUFFER_RFLAG_END                  (1U << 1)
 
 /*
index a1ef3d69f0602544f9335a9f1d59843e14208c55..347f9af0ab1d7aa6ca8353b0641daece84afd461 100644 (file)
@@ -69,12 +69,12 @@ error:
        return ret;
 }
 
        return ret;
 }
 
-struct shm_object_table *shm_object_table_create(size_t max_nb_obj)
+struct shm_object_table *shm_object_table_create(size_t max_nb_obj, bool populate)
 {
        struct shm_object_table *table;
 
 {
        struct shm_object_table *table;
 
-       table = zmalloc(sizeof(struct shm_object_table) +
-                       max_nb_obj * sizeof(table->objects[0]));
+       table = zmalloc_populate(sizeof(struct shm_object_table) +
+                       max_nb_obj * sizeof(table->objects[0]), populate);
        if (!table)
                return NULL;
        table->size = max_nb_obj;
        if (!table)
                return NULL;
        table->size = max_nb_obj;
@@ -84,9 +84,11 @@ struct shm_object_table *shm_object_table_create(size_t max_nb_obj)
 static
 struct shm_object *_shm_object_table_alloc_shm(struct shm_object_table *table,
                                           size_t memory_map_size,
 static
 struct shm_object *_shm_object_table_alloc_shm(struct shm_object_table *table,
                                           size_t memory_map_size,
-                                          int stream_fd)
+                                          int stream_fd,
+                                          bool populate)
 {
        int shmfd, waitfd[2], ret, i;
 {
        int shmfd, waitfd[2], ret, i;
+       int flags = MAP_SHARED;
        struct shm_object *obj;
        char *memory_map;
 
        struct shm_object *obj;
        char *memory_map;
 
@@ -145,9 +147,11 @@ struct shm_object *_shm_object_table_alloc_shm(struct shm_object_table *table,
        obj->shm_fd_ownership = 0;
        obj->shm_fd = shmfd;
 
        obj->shm_fd_ownership = 0;
        obj->shm_fd = shmfd;
 
+       if (populate)
+               flags |= LTTNG_MAP_POPULATE;
        /* memory_map: mmap */
        memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
        /* memory_map: mmap */
        memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
-                         MAP_SHARED | LTTNG_MAP_POPULATE, shmfd, 0);
+                         flags, shmfd, 0);
        if (memory_map == MAP_FAILED) {
                PERROR("mmap");
                goto error_mmap;
        if (memory_map == MAP_FAILED) {
                PERROR("mmap");
                goto error_mmap;
@@ -178,7 +182,7 @@ error_pipe:
 
 static
 struct shm_object *_shm_object_table_alloc_mem(struct shm_object_table *table,
 
 static
 struct shm_object *_shm_object_table_alloc_mem(struct shm_object_table *table,
-                                          size_t memory_map_size)
+                                          size_t memory_map_size, bool populate)
 {
        struct shm_object *obj;
        void *memory_map;
 {
        struct shm_object *obj;
        void *memory_map;
@@ -188,7 +192,7 @@ struct shm_object *_shm_object_table_alloc_mem(struct shm_object_table *table,
                return NULL;
        obj = &table->objects[table->allocated_len];
 
                return NULL;
        obj = &table->objects[table->allocated_len];
 
-       memory_map = zmalloc(memory_map_size);
+       memory_map = zmalloc_populate(memory_map_size, populate);
        if (!memory_map)
                goto alloc_error;
 
        if (!memory_map)
                goto alloc_error;
 
@@ -255,13 +259,15 @@ struct shm_object *shm_object_table_alloc(struct shm_object_table *table,
                        size_t memory_map_size,
                        enum shm_object_type type,
                        int stream_fd,
                        size_t memory_map_size,
                        enum shm_object_type type,
                        int stream_fd,
-                       int cpu)
+                       int cpu,
+                       bool populate)
 #else
 struct shm_object *shm_object_table_alloc(struct shm_object_table *table,
                        size_t memory_map_size,
                        enum shm_object_type type,
                        int stream_fd,
 #else
 struct shm_object *shm_object_table_alloc(struct shm_object_table *table,
                        size_t memory_map_size,
                        enum shm_object_type type,
                        int stream_fd,
-                       int cpu __attribute__((unused)))
+                       int cpu __attribute__((unused)),
+                       bool populate)
 #endif
 {
        struct shm_object *shm_object;
 #endif
 {
        struct shm_object *shm_object;
@@ -284,10 +290,11 @@ struct shm_object *shm_object_table_alloc(struct shm_object_table *table,
        switch (type) {
        case SHM_OBJECT_SHM:
                shm_object = _shm_object_table_alloc_shm(table, memory_map_size,
        switch (type) {
        case SHM_OBJECT_SHM:
                shm_object = _shm_object_table_alloc_shm(table, memory_map_size,
-                               stream_fd);
+                               stream_fd, populate);
                break;
        case SHM_OBJECT_MEM:
                break;
        case SHM_OBJECT_MEM:
-               shm_object = _shm_object_table_alloc_mem(table, memory_map_size);
+               shm_object = _shm_object_table_alloc_mem(table, memory_map_size,
+                               populate);
                break;
        default:
                assert(0);
                break;
        default:
                assert(0);
@@ -301,8 +308,9 @@ struct shm_object *shm_object_table_alloc(struct shm_object_table *table,
 
 struct shm_object *shm_object_table_append_shm(struct shm_object_table *table,
                        int shm_fd, int wakeup_fd, uint32_t stream_nr,
 
 struct shm_object *shm_object_table_append_shm(struct shm_object_table *table,
                        int shm_fd, int wakeup_fd, uint32_t stream_nr,
-                       size_t memory_map_size)
+                       size_t memory_map_size, bool populate)
 {
 {
+       int flags = MAP_SHARED;
        struct shm_object *obj;
        char *memory_map;
        int ret;
        struct shm_object *obj;
        char *memory_map;
        int ret;
@@ -328,9 +336,11 @@ struct shm_object *shm_object_table_append_shm(struct shm_object_table *table,
                goto error_fcntl;
        }
 
                goto error_fcntl;
        }
 
+       if (populate)
+               flags |= LTTNG_MAP_POPULATE;
        /* memory_map: mmap */
        memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
        /* memory_map: mmap */
        memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
-                         MAP_SHARED | LTTNG_MAP_POPULATE, shm_fd, 0);
+                         flags, shm_fd, 0);
        if (memory_map == MAP_FAILED) {
                PERROR("mmap");
                goto error_mmap;
        if (memory_map == MAP_FAILED) {
                PERROR("mmap");
                goto error_mmap;
index 6e4f7f7b85221a9029a38514b03eeaf36944a801..944410d8845c2a2a2e637fde839121754c9386d1 100644 (file)
@@ -71,19 +71,19 @@ void _set_shmp(struct shm_ref *ref, struct shm_ref src)
 
 #define set_shmp(ref, src)     _set_shmp(&(ref)._ref, src)
 
 
 #define set_shmp(ref, src)     _set_shmp(&(ref)._ref, src)
 
-struct shm_object_table *shm_object_table_create(size_t max_nb_obj)
+struct shm_object_table *shm_object_table_create(size_t max_nb_obj, bool populate)
        __attribute__((visibility("hidden")));
 
 struct shm_object *shm_object_table_alloc(struct shm_object_table *table,
                        size_t memory_map_size,
                        enum shm_object_type type,
                        const int stream_fd,
        __attribute__((visibility("hidden")));
 
 struct shm_object *shm_object_table_alloc(struct shm_object_table *table,
                        size_t memory_map_size,
                        enum shm_object_type type,
                        const int stream_fd,
-                       int cpu)
+                       int cpu, bool populate)
        __attribute__((visibility("hidden")));
 
 struct shm_object *shm_object_table_append_shm(struct shm_object_table *table,
                        int shm_fd, int wakeup_fd, uint32_t stream_nr,
        __attribute__((visibility("hidden")));
 
 struct shm_object *shm_object_table_append_shm(struct shm_object_table *table,
                        int shm_fd, int wakeup_fd, uint32_t stream_nr,
-                       size_t memory_map_size)
+                       size_t memory_map_size, bool populate)
        __attribute__((visibility("hidden")));
 
 /* mem ownership is passed to shm_object_table_append_mem(). */
        __attribute__((visibility("hidden")));
 
 /* mem ownership is passed to shm_object_table_append_mem(). */
index 36967ccc5df829f0160b1cd659544fa4fa65851f..10b9954a5880d29e6ce10a0dc7d4095fdfcd7e69 100644 (file)
@@ -167,7 +167,7 @@ int get_cpu_mask_from_sysfs(char *buf, size_t max_bytes, const char *path)
 
                total_bytes_read += bytes_read;
                assert(total_bytes_read <= max_bytes);
 
                total_bytes_read += bytes_read;
                assert(total_bytes_read <= max_bytes);
-       } while (max_bytes > total_bytes_read && bytes_read > 0);
+       } while (max_bytes > total_bytes_read && bytes_read != 0);
 
        /*
         * Make sure the mask read is a null terminated string.
 
        /*
         * Make sure the mask read is a null terminated string.
index 4e6c4a5d2bd6e729bf1209d49f29a05a79ba99f5..85038231588d4a3207d265635f9dafa8256649b7 100644 (file)
@@ -25,5 +25,7 @@ void lttng_ust_unlock_fd_tracker(void);
 int lttng_ust_safe_close_fd(int fd, int (*close_cb)(int));
 int lttng_ust_safe_fclose_stream(FILE *stream, int (*fclose_cb)(FILE *stream));
 int lttng_ust_safe_closefrom_fd(int lowfd, int (*close_cb)(int));
 int lttng_ust_safe_close_fd(int fd, int (*close_cb)(int));
 int lttng_ust_safe_fclose_stream(FILE *stream, int (*fclose_cb)(FILE *stream));
 int lttng_ust_safe_closefrom_fd(int lowfd, int (*close_cb)(int));
+int lttng_ust_safe_close_range_fd(unsigned int first, unsigned int last, int flags,
+               int (*close_range_cb)(unsigned int, unsigned int, int));
 
 #endif /* _LTTNG_UST_FD_H */
 
 #endif /* _LTTNG_UST_FD_H */
index f5e6ed21448db3b0bf4003505664b07b6b333085..a39869ef90f39c460ddcfd5532ade69a3a19f994 100644 (file)
@@ -466,3 +466,62 @@ int lttng_ust_safe_closefrom_fd(int lowfd, int (*close_cb)(int fd))
 end:
        return ret;
 }
 end:
        return ret;
 }
+
+/*
+ * Implement helper for close_range() override.
+ */
+int lttng_ust_safe_close_range_fd(unsigned int first, unsigned int last, int flags,
+               int (*close_range_cb)(unsigned int first, unsigned int last, int flags))
+{
+       int ret = 0, i;
+
+       lttng_ust_fd_tracker_alloc_tls();
+
+       /*
+        * Ensure the tracker is initialized when called from
+        * constructors.
+        */
+       lttng_ust_fd_tracker_init();
+
+       if (first > last || last > INT_MAX) {
+               ret = -1;
+               errno = EINVAL;
+               goto end;
+       }
+       /*
+        * If called from lttng-ust, we directly call close_range
+        * without validating whether the FD is part of the tracked set.
+        */
+       if (URCU_TLS(ust_fd_mutex_nest)) {
+               if (close_range_cb(first, last, flags) < 0) {
+                       ret = -1;
+                       goto end;
+               }
+       } else {
+               int last_check = last;
+
+               if (last > lttng_ust_max_fd)
+                       last_check = lttng_ust_max_fd;
+               lttng_ust_lock_fd_tracker();
+               for (i = first; i <= last_check; i++) {
+                       if (IS_FD_VALID(i) && IS_FD_SET(i, lttng_fd_set))
+                               continue;
+                       if (close_range_cb(i, i, flags) < 0) {
+                               ret = -1;
+                               /* propagate errno from close_range_cb. */
+                               lttng_ust_unlock_fd_tracker();
+                               goto end;
+                       }
+               }
+               if (last > lttng_ust_max_fd) {
+                       if (close_range_cb(lttng_ust_max_fd + 1, last, flags) < 0) {
+                               ret = -1;
+                               lttng_ust_unlock_fd_tracker();
+                               goto end;
+                       }
+               }
+               lttng_ust_unlock_fd_tracker();
+       }
+end:
+       return ret;
+}
index d2ebcfbc38be862b6effffdc490f7a7d0ce968cb..01decd1e0e2ef68444bb62dd452aabc92ee25548 100644 (file)
@@ -21,6 +21,8 @@
 
 static int (*__lttng_ust_fd_plibc_close)(int fd) = NULL;
 static int (*__lttng_ust_fd_plibc_fclose)(FILE *stream) = NULL;
 
 static int (*__lttng_ust_fd_plibc_close)(int fd) = NULL;
 static int (*__lttng_ust_fd_plibc_fclose)(FILE *stream) = NULL;
+static int (*__lttng_ust_fd_plibc_close_range)(unsigned int first,
+               unsigned int last, int flags) = NULL;
 
 /*
  * Use dlsym to find the original libc close() symbol and store it in
 
 /*
  * Use dlsym to find the original libc close() symbol and store it in
@@ -60,6 +62,24 @@ void *_lttng_ust_fd_init_plibc_fclose(void)
        return __lttng_ust_fd_plibc_fclose;
 }
 
        return __lttng_ust_fd_plibc_fclose;
 }
 
+/*
+ * Use dlsym to find the original libc close_range() symbol and store it
+ * in __lttng_ust_fd_plibc_close_range. The close_range symbol only
+ * appears in glibc 2.34, so it is considered optional.
+ */
+static
+void *_lttng_ust_fd_init_plibc_close_range(void)
+{
+       if (__lttng_ust_fd_plibc_close_range == NULL) {
+               __lttng_ust_fd_plibc_close_range = dlsym(RTLD_NEXT, "close_range");
+
+               if (__lttng_ust_fd_plibc_close_range == NULL)
+                       __lttng_ust_fd_plibc_close_range = (void *) LTTNG_UST_DLSYM_FAILED_PTR;
+       }
+
+       return __lttng_ust_fd_plibc_close_range;
+}
+
 static
 void _lttng_ust_fd_ctor(void)
        __attribute__((constructor));
 static
 void _lttng_ust_fd_ctor(void)
        __attribute__((constructor));
@@ -75,6 +95,7 @@ void _lttng_ust_fd_ctor(void)
         */
        (void) _lttng_ust_fd_init_plibc_close();
        (void) _lttng_ust_fd_init_plibc_fclose();
         */
        (void) _lttng_ust_fd_init_plibc_close();
        (void) _lttng_ust_fd_init_plibc_fclose();
+       (void) _lttng_ust_fd_init_plibc_close_range();
 }
 
 /*
 }
 
 /*
@@ -127,6 +148,33 @@ int fclose(FILE *stream)
                        __lttng_ust_fd_plibc_fclose);
 }
 
                        __lttng_ust_fd_plibc_fclose);
 }
 
+/* Old libc headers don't contain a close_range() declaration. */
+int close_range(unsigned int first, unsigned int last, int flags);
+
+/*
+ * Override the libc close_range() symbol with our own, allowing
+ * applications to close arbitrary file descriptors. If the fd is owned
+ * by lttng-ust, return -1, errno=EBADF instead of closing it.
+ *
+ * If dlsym failed to find the original libc close_range() symbol,
+ * return -1, errno=ENOSYS.
+ *
+ * There is a short window before the library constructor has executed where
+ * this wrapper could call dlsym() and thus not be async-signal-safe.
+ */
+int close_range(unsigned int first, unsigned int last, int flags)
+{
+       /*
+        * We can't retry dlsym here since close is async-signal-safe.
+        */
+       if (_lttng_ust_fd_init_plibc_close_range() == (void *) LTTNG_UST_DLSYM_FAILED_PTR) {
+               errno = ENOSYS;
+               return -1;
+       }
+
+       return lttng_ust_safe_close_range_fd(first, last, flags, __lttng_ust_fd_plibc_close_range);
+}
+
 #if defined(__sun__) || defined(__FreeBSD__)
 /* Solaris and FreeBSD. */
 void closefrom(int lowfd)
 #if defined(__sun__) || defined(__FreeBSD__)
 /* Solaris and FreeBSD. */
 void closefrom(int lowfd)
index 13c74c3dc9dabd518342a0c8ffefaa5665d4883a..8b78da254672781b2dfcb1f689376c162230b346 100644 (file)
@@ -34,12 +34,12 @@ int main(void)
        ok(shmfd > 0, "Open a POSIX shm fd");
 
        /* Create a dummy shm object table to test the allocation function */
        ok(shmfd > 0, "Open a POSIX shm fd");
 
        /* Create a dummy shm object table to test the allocation function */
-       table = shm_object_table_create(1);
+       table = shm_object_table_create(1, false);
        ok(table, "Create a shm object table");
        assert(table);
 
        /* This function sets the initial size of the shm with ftruncate and zeros it */
        ok(table, "Create a shm object table");
        assert(table);
 
        /* This function sets the initial size of the shm with ftruncate and zeros it */
-       shmobj = shm_object_table_alloc(table, shmsize, SHM_OBJECT_SHM, shmfd, -1);
+       shmobj = shm_object_table_alloc(table, shmsize, SHM_OBJECT_SHM, shmfd, -1, false);
        ok(shmobj, "Allocate the shm object table");
        assert(shmobj);
 
        ok(shmobj, "Allocate the shm object table");
        assert(shmobj);
 
This page took 0.056224 seconds and 4 git commands to generate.