AC_SUBST(JNI_CPPFLAGS)
+# Used in man pages
+AC_SUBST([LTTNG_UST_MAJOR_VERSION], ust_version_major)
+AC_SUBST([LTTNG_UST_MINOR_VERSION], ust_version_minor)
## ##
## Output files generated by configure ##
xmlto_verbose_out_0 = 2>/dev/null
# Tools to execute:
-ADOC = $(asciidoc_verbose)$(ASCIIDOC) -f $(ASCIIDOC_CONF) -d manpage \
+ADOC = $(asciidoc_verbose)$(ASCIIDOC) -v -f $(ASCIIDOC_CONF) -d manpage \
-a mansource="LTTng" \
-a manmanual="LTTng Manual" \
- -a manversion="$(PACKAGE_VERSION)"
+ -a manversion="$(PACKAGE_VERSION)" \
+ -a lttng_version="$(LTTNG_UST_MAJOR_VERSION).$(LTTNG_UST_MINOR_VERSION)"
ADOC_DOCBOOK = $(ADOC) -b docbook
-XTO = $(xmlto_verbose)$(XMLTO) -m $(XSL_FILE) man
+XTO = $(xmlto_verbose)$(XMLTO) -v -m $(XSL_FILE) man
# Recipes:
%.1.xml: $(srcdir)/%.1.txt $(COMMON_DEPS)
[[example]]
EXAMPLE
-------
+
NOTE: A few examples are available in the
-https://github.com/lttng/lttng-ust/tree/v{lttng_version}/doc/examples[`doc/examples`]
+https://github.com/lttng/lttng-ust/tree/stable-{lttng_version}/doc/examples[`doc/examples`]
directory of LTTng-UST's source tree.
This example shows all the features documented in the previous
Path to the shared object which acts as the clock override plugin.
An example of such a plugin can be found in the LTTng-UST
documentation under
- https://github.com/lttng/lttng-ust/tree/v{lttng_version}/doc/examples/clock-override[`examples/clock-override`].
+ https://github.com/lttng/lttng-ust/tree/stable-{lttng_version}/doc/examples/clock-override[`examples/clock-override`].
`LTTNG_UST_DEBUG`::
If set, enable `liblttng-ust`'s debug and error output.
Path to the shared object which acts as the `getcpu()` override
plugin. An example of such a plugin can be found in the LTTng-UST
documentation under
- https://github.com/lttng/lttng-ust/tree/v{lttng_version}/doc/examples/getcpu-override[`examples/getcpu-override`].
+ https://github.com/lttng/lttng-ust/tree/stable-{lttng_version}/doc/examples/getcpu-override[`examples/getcpu-override`].
+
+`LTTNG_UST_MAP_POPULATE_POLICY`::
++
+--
+If set, override the policy used to populate shared memory pages
+within the application. The expected values are:
+
+`none`:::
+ Do not pre-populate any pages, take minor faults on first access
+ while tracing.
+
+`cpu_possible`:::
+ Pre-populate pages for all possible CPUs in the system, as
+ listed by `/sys/devices/system/cpu/possible`.
+--
++
+Default: `none`. If the policy is unknown, use the default.
`LTTNG_UST_REGISTER_TIMEOUT`::
Waiting time for the _registration done_ session daemon command
logging.h \
smp.c \
smp.h \
+ populate.c \
+ populate.h \
strutils.c \
strutils.h \
utils.c \
#include "common/bitmap.h"
#include "common/smp.h"
+#include "common/populate.h"
#include "shm.h"
static size_t lttng_counter_get_dimension_nr_elements(struct lib_counter_dimension *dimension)
if (counter->is_daemon) {
/* Allocate and clear shared memory. */
shm_object = lttng_counter_shm_object_table_alloc(counter->object_table,
- shm_length, LTTNG_COUNTER_SHM_OBJECT_SHM, shm_fd, cpu);
+ shm_length, LTTNG_COUNTER_SHM_OBJECT_SHM, shm_fd, cpu,
+ lttng_ust_map_populate_cpu_is_enabled(cpu));
if (!shm_object)
return -ENOMEM;
} else {
/* Map pre-existing shared memory. */
shm_object = lttng_counter_shm_object_table_append_shm(counter->object_table,
- shm_fd, shm_length);
+ shm_fd, shm_length, lttng_ust_map_populate_cpu_is_enabled(cpu));
if (!shm_object)
return -ENOMEM;
}
int cpu, ret;
int nr_handles = 0;
int nr_cpus = get_possible_cpus_array_len();
+ bool populate = lttng_ust_map_populate_is_enabled();
if (validate_args(config, nr_dimensions, max_nr_elem,
global_sum_step, global_counter_fd, nr_counter_cpu_fds,
counter_cpu_fds))
return NULL;
- counter = zmalloc(sizeof(struct lib_counter));
+ counter = zmalloc_populate(sizeof(struct lib_counter), populate);
if (!counter)
return NULL;
counter->global_counters.shm_fd = -1;
if (lttng_counter_set_global_sum_step(counter, global_sum_step))
goto error_sum_step;
counter->nr_dimensions = nr_dimensions;
- counter->dimensions = zmalloc(nr_dimensions * sizeof(*counter->dimensions));
+ counter->dimensions = zmalloc_populate(nr_dimensions * sizeof(*counter->dimensions), populate);
if (!counter->dimensions)
goto error_dimensions;
for (dimension = 0; dimension < nr_dimensions; dimension++)
counter->dimensions[dimension].max_nr_elem = max_nr_elem[dimension];
if (config->alloc & COUNTER_ALLOC_PER_CPU) {
- counter->percpu_counters = zmalloc(sizeof(struct lib_counter_layout) * nr_cpus);
+ counter->percpu_counters = zmalloc_populate(sizeof(struct lib_counter_layout) * nr_cpus, populate);
if (!counter->percpu_counters)
goto error_alloc_percpu;
for_each_possible_cpu(cpu)
if (config->alloc & COUNTER_ALLOC_PER_CPU)
nr_handles += nr_cpus;
/* Allocate table for global and per-cpu counters. */
- counter->object_table = lttng_counter_shm_object_table_create(nr_handles);
+ counter->object_table = lttng_counter_shm_object_table_create(nr_handles, populate);
if (!counter->object_table)
goto error_alloc_object_table;
return ret;
}
-struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj)
+struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj, bool populate)
{
struct lttng_counter_shm_object_table *table;
- table = zmalloc(sizeof(struct lttng_counter_shm_object_table) +
- max_nb_obj * sizeof(table->objects[0]));
+ table = zmalloc_populate(sizeof(struct lttng_counter_shm_object_table) +
+ max_nb_obj * sizeof(table->objects[0]), populate);
if (!table)
return NULL;
table->size = max_nb_obj;
static
struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_shm(struct lttng_counter_shm_object_table *table,
size_t memory_map_size,
- int cpu_fd)
+ int cpu_fd, bool populate)
{
- int shmfd, ret;
struct lttng_counter_shm_object *obj;
+ int flags = MAP_SHARED;
+ int shmfd, ret;
char *memory_map;
if (cpu_fd < 0)
obj->shm_fd_ownership = 0;
obj->shm_fd = shmfd;
+ if (populate)
+ flags |= LTTNG_MAP_POPULATE;
/* memory_map: mmap */
memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
- MAP_SHARED | LTTNG_MAP_POPULATE, shmfd, 0);
+ flags, shmfd, 0);
if (memory_map == MAP_FAILED) {
PERROR("mmap");
goto error_mmap;
static
struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_mem(struct lttng_counter_shm_object_table *table,
- size_t memory_map_size)
+ size_t memory_map_size, bool populate)
{
struct lttng_counter_shm_object *obj;
void *memory_map;
return NULL;
obj = &table->objects[table->allocated_len];
- memory_map = zmalloc(memory_map_size);
+ memory_map = zmalloc_populate(memory_map_size, populate);
if (!memory_map)
goto alloc_error;
size_t memory_map_size,
enum lttng_counter_shm_object_type type,
int cpu_fd,
- int cpu)
+ int cpu,
+ bool populate)
#else
struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct lttng_counter_shm_object_table *table,
size_t memory_map_size,
enum lttng_counter_shm_object_type type,
int cpu_fd,
- int cpu __attribute__((unused)))
+ int cpu __attribute__((unused)),
+ bool populate)
#endif
{
struct lttng_counter_shm_object *shm_object;
switch (type) {
case LTTNG_COUNTER_SHM_OBJECT_SHM:
shm_object = _lttng_counter_shm_object_table_alloc_shm(table, memory_map_size,
- cpu_fd);
+ cpu_fd, populate);
break;
case LTTNG_COUNTER_SHM_OBJECT_MEM:
- shm_object = _lttng_counter_shm_object_table_alloc_mem(table, memory_map_size);
+ shm_object = _lttng_counter_shm_object_table_alloc_mem(table, memory_map_size,
+ populate);
break;
default:
assert(0);
}
struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_shm(struct lttng_counter_shm_object_table *table,
- int shm_fd,
- size_t memory_map_size)
+ int shm_fd, size_t memory_map_size, bool populate)
{
struct lttng_counter_shm_object *obj;
+ int flags = MAP_SHARED;
char *memory_map;
if (table->allocated_len >= table->size)
obj->shm_fd = shm_fd;
obj->shm_fd_ownership = 1;
+ if (populate)
+ flags |= LTTNG_MAP_POPULATE;
/* memory_map: mmap */
memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
- MAP_SHARED | LTTNG_MAP_POPULATE, shm_fd, 0);
+ flags, shm_fd, 0);
if (memory_map == MAP_FAILED) {
PERROR("mmap");
goto error_mmap;
#include <stddef.h>
#include <stdint.h>
#include <unistd.h>
+#include <stdbool.h>
#include "common/logging.h"
#include <urcu/compiler.h>
#include "shm_types.h"
#define lttng_counter_set_shmp(ref, src) _lttng_counter_set_shmp(&(ref)._ref, src)
-struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj)
+struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj, bool populate)
__attribute__((visibility("hidden")));
struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct lttng_counter_shm_object_table *table,
size_t memory_map_size,
enum lttng_counter_shm_object_type type,
const int cpu_fd,
- int cpu)
+ int cpu, bool populate)
__attribute__((visibility("hidden")));
struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_shm(struct lttng_counter_shm_object_table *table,
- int shm_fd, size_t memory_map_size)
+ int shm_fd, size_t memory_map_size, bool populate)
__attribute__((visibility("hidden")));
/* mem ownership is passed to lttng_counter_shm_object_table_append_mem(). */
/* Env. var. which can be used in setuid/setgid executables. */
{ "LTTNG_UST_WITHOUT_BADDR_STATEDUMP", LTTNG_ENV_NOT_SECURE, NULL, },
{ "LTTNG_UST_REGISTER_TIMEOUT", LTTNG_ENV_NOT_SECURE, NULL, },
+ { "LTTNG_UST_MAP_POPULATE_POLICY", LTTNG_ENV_NOT_SECURE, NULL, },
/* Env. var. which are not fetched in setuid/setgid executables. */
{ "LTTNG_UST_CLOCK_PLUGIN", LTTNG_ENV_SECURE, NULL, },
#define _UST_COMMON_MACROS_H
#include <stdlib.h>
+#include <stdbool.h>
+#include <string.h>
#include <lttng/ust-arch.h>
+/*
+ * calloc() does not always populate the page table for the allocated
+ * memory. Optionally enforce page table populate.
+ */
+static inline
+void *zmalloc_populate(size_t len, bool populate)
+ __attribute__((always_inline));
+static inline
+void *zmalloc_populate(size_t len, bool populate)
+{
+ if (populate) {
+ void *ret = malloc(len);
+ if (ret == NULL)
+ return ret;
+ bzero(ret, len);
+ return ret;
+ } else {
+ return calloc(len, 1);
+ }
+}
+
/*
* Memory allocation zeroed
*/
static inline
void *zmalloc(size_t len)
{
- return calloc(len, 1);
+ return zmalloc_populate(len, false);
}
#define max_t(type, x, y) \
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2024-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+#include "common/getenv.h"
+#include "common/logging.h"
+#include "common/populate.h"
+
+enum populate_policy {
+ POPULATE_UNSET,
+
+ POPULATE_NONE,
+ POPULATE_CPU_POSSIBLE,
+
+ POPULATE_UNKNOWN,
+};
+
+static enum populate_policy map_populate_policy = POPULATE_UNSET;
+
+static void init_map_populate_policy(void)
+{
+ const char *populate_env_str;
+
+ if (map_populate_policy != POPULATE_UNSET)
+ return;
+
+ populate_env_str = lttng_ust_getenv("LTTNG_UST_MAP_POPULATE_POLICY");
+ if (!populate_env_str) {
+ map_populate_policy = POPULATE_NONE;
+ return;
+ }
+ if (!strcmp(populate_env_str, "none")) {
+ map_populate_policy = POPULATE_NONE;
+ } else if (!strcmp(populate_env_str, "cpu_possible")) {
+ map_populate_policy = POPULATE_CPU_POSSIBLE;
+ } else {
+ /*
+ * populate_env_str is an untrusted environment variable
+ * input (can be provided to setuid/setgid binaries), so
+ * don't even try to print it.
+ */
+ WARN("Unknown policy for LTTNG_UST_MAP_POPULATE_POLICY environment variable.");
+ map_populate_policy = POPULATE_UNKNOWN;
+ }
+}
+
+/*
+ * Return the shared page populate policy for global pages. Returns true
+ * if shared memory pages should be pre-populated, false otherwise.
+ */
+bool lttng_ust_map_populate_is_enabled(void)
+{
+ init_map_populate_policy();
+
+ switch (map_populate_policy) {
+ case POPULATE_UNKNOWN: /* Fall-through */
+ case POPULATE_NONE:
+ return false;
+ case POPULATE_CPU_POSSIBLE:
+ return true;
+ default:
+ abort();
+ }
+ return false;
+}
+
+/*
+ * Return the shared page populate policy based on the @cpu number
+ * provided as input. Returns true if shared memory pages should be
+ * pre-populated, false otherwise.
+ *
+ * The @cpu argument is currently unused except for negative value
+ * validation. It is present to eventually match cpu affinity or cpu
+ * online masks if those features are added in the future.
+ */
+bool lttng_ust_map_populate_cpu_is_enabled(int cpu)
+{
+ /* Reject invalid cpu number. */
+ if (cpu < 0)
+ return false;
+
+ return lttng_ust_map_populate_is_enabled();
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2024 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _UST_COMMON_POPULATE_H
+#define _UST_COMMON_POPULATE_H
+
+#include <stdbool.h>
+
+bool lttng_ust_map_populate_cpu_is_enabled(int cpu)
+ __attribute__((visibility("hidden")));
+
+bool lttng_ust_map_populate_is_enabled(void)
+ __attribute__((visibility("hidden")));
+
+#endif /* _UST_COMMON_POPULATE_H */
}
static void client_buffer_begin(struct lttng_ust_ring_buffer *buf,
- uint64_t tsc __attribute__((unused)),
+ uint64_t timestamp __attribute__((unused)),
unsigned int subbuf_idx,
struct lttng_ust_shm_handle *handle)
{
* subbuffer. data_size is between 1 and subbuf_size.
*/
static void client_buffer_end(struct lttng_ust_ring_buffer *buf,
- uint64_t tsc __attribute__((unused)),
+ uint64_t timestamp __attribute__((unused)),
unsigned int subbuf_idx, unsigned long data_size,
struct lttng_ust_shm_handle *handle,
const struct lttng_ust_ring_buffer_ctx *ctx)
.cb.buffer_create = client_buffer_create,
.cb.buffer_finalize = client_buffer_finalize,
- .tsc_bits = 0,
+ .timestamp_bits = 0,
.alloc = RING_BUFFER_ALLOC_GLOBAL,
.sync = RING_BUFFER_SYNC_GLOBAL,
.mode = RING_BUFFER_MODE_TEMPLATE,
#include "common/clock.h"
#include "common/ringbuffer/frontend_types.h"
-#define LTTNG_COMPACT_EVENT_BITS 5
-#define LTTNG_COMPACT_TSC_BITS 27
+#define LTTNG_COMPACT_EVENT_BITS 5
+#define LTTNG_COMPACT_TIMESTAMP_BITS 27
/*
* Keep the natural field alignment for _each field_ within this structure if
case 1: /* compact */
padding = lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uint32_t));
offset += padding;
- if (!(ctx->priv->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+ if (!(ctx->priv->rflags & (RING_BUFFER_RFLAG_FULL_TIMESTAMP | LTTNG_RFLAG_EXTENDED))) {
offset += sizeof(uint32_t); /* id and timestamp */
} else {
/* Minimum space taken by LTTNG_COMPACT_EVENT_BITS id */
padding = lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uint16_t));
offset += padding;
offset += sizeof(uint16_t);
- if (!(ctx->priv->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+ if (!(ctx->priv->rflags & (RING_BUFFER_RFLAG_FULL_TIMESTAMP | LTTNG_RFLAG_EXTENDED))) {
offset += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uint32_t));
offset += sizeof(uint32_t); /* timestamp */
} else {
event_id);
bt_bitfield_write(&id_time, uint32_t,
LTTNG_COMPACT_EVENT_BITS,
- LTTNG_COMPACT_TSC_BITS,
- ctx->priv->tsc);
+ LTTNG_COMPACT_TIMESTAMP_BITS,
+ ctx->priv->timestamp);
lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
break;
}
case 2: /* large */
{
- uint32_t timestamp = (uint32_t) ctx->priv->tsc;
+ uint32_t timestamp = (uint32_t) ctx->priv->timestamp;
uint16_t id = event_id;
lib_ring_buffer_write(config, ctx, &id, sizeof(id));
switch (lttng_chan->priv->header_type) {
case 1: /* compact */
- if (!(ctx_private->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+ if (!(ctx_private->rflags & (RING_BUFFER_RFLAG_FULL_TIMESTAMP | LTTNG_RFLAG_EXTENDED))) {
uint32_t id_time = 0;
bt_bitfield_write(&id_time, uint32_t,
event_id);
bt_bitfield_write(&id_time, uint32_t,
LTTNG_COMPACT_EVENT_BITS,
- LTTNG_COMPACT_TSC_BITS,
- ctx_private->tsc);
+ LTTNG_COMPACT_TIMESTAMP_BITS,
+ ctx_private->timestamp);
lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
} else {
uint8_t id = 0;
- uint64_t timestamp = ctx_private->tsc;
+ uint64_t timestamp = ctx_private->timestamp;
bt_bitfield_write(&id, uint8_t,
0,
break;
case 2: /* large */
{
- if (!(ctx_private->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
- uint32_t timestamp = (uint32_t) ctx_private->tsc;
+ if (!(ctx_private->rflags & (RING_BUFFER_RFLAG_FULL_TIMESTAMP | LTTNG_RFLAG_EXTENDED))) {
+ uint32_t timestamp = (uint32_t) ctx_private->timestamp;
uint16_t id = event_id;
lib_ring_buffer_write(config, ctx, &id, sizeof(id));
lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp));
} else {
uint16_t id = 65535;
- uint64_t timestamp = ctx_private->tsc;
+ uint64_t timestamp = ctx_private->timestamp;
lib_ring_buffer_write(config, ctx, &id, sizeof(id));
/* Align extended struct on largest member */
return offsetof(struct packet_header, ctx.header_end);
}
-static void client_buffer_begin(struct lttng_ust_ring_buffer *buf, uint64_t tsc,
+static void client_buffer_begin(struct lttng_ust_ring_buffer *buf, uint64_t timestamp,
unsigned int subbuf_idx,
struct lttng_ust_shm_handle *handle)
{
memcpy(header->uuid, lttng_chan->priv->uuid, sizeof(lttng_chan->priv->uuid));
header->stream_id = lttng_chan->priv->id;
header->stream_instance_id = buf->backend.cpu;
- header->ctx.timestamp_begin = tsc;
+ header->ctx.timestamp_begin = timestamp;
header->ctx.timestamp_end = 0;
header->ctx.content_size = ~0ULL; /* for debugging */
header->ctx.packet_size = ~0ULL;
* offset is assumed to never be 0 here : never deliver a completely empty
* subbuffer. data_size is between 1 and subbuf_size.
*/
-static void client_buffer_end(struct lttng_ust_ring_buffer *buf, uint64_t tsc,
+static void client_buffer_end(struct lttng_ust_ring_buffer *buf, uint64_t timestamp,
unsigned int subbuf_idx, unsigned long data_size,
struct lttng_ust_shm_handle *handle,
const struct lttng_ust_ring_buffer_ctx *ctx)
assert(header);
if (!header)
return;
- header->ctx.timestamp_end = tsc;
+ header->ctx.timestamp_end = timestamp;
header->ctx.content_size =
(uint64_t) data_size * CHAR_BIT; /* in bits */
header->ctx.packet_size =
.cb.content_size_field = client_content_size_field,
.cb.packet_size_field = client_packet_size_field,
- .tsc_bits = LTTNG_COMPACT_TSC_BITS,
+ .timestamp_bits = LTTNG_COMPACT_TIMESTAMP_BITS,
.alloc = RING_BUFFER_ALLOC_PER_CPU,
.sync = RING_BUFFER_SYNC_GLOBAL,
.mode = RING_BUFFER_MODE_TEMPLATE,
unsigned int buf_size_order; /* Order of buffer size */
unsigned int extra_reader_sb:1; /* has extra reader subbuffer ? */
unsigned long num_subbuf; /* Number of sub-buffers for writer */
- uint64_t start_tsc; /* Channel creation TSC value */
+ uint64_t start_timestamp; /* Channel creation timestamp value */
DECLARE_SHMP(void *, priv_data);/* Client-specific information */
struct lttng_ust_ring_buffer_config config; /* Ring buffer configuration */
char name[NAME_MAX]; /* Channel name */
*o_begin = v_read(config, &buf->offset);
*o_old = *o_begin;
- ctx_private->tsc = lib_ring_buffer_clock_read(chan);
- if ((int64_t) ctx_private->tsc == -EIO)
+ ctx_private->timestamp = lib_ring_buffer_clock_read(chan);
+ if ((int64_t) ctx_private->timestamp == -EIO)
return 1;
/*
*/
//prefetch(&buf->commit_hot[subbuf_index(*o_begin, chan)]);
- if (last_tsc_overflow(config, buf, ctx_private->tsc))
- ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
+ if (last_timestamp_overflow(config, buf, ctx_private->timestamp))
+ ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TIMESTAMP;
if (caa_unlikely(subbuf_offset(*o_begin, chan) == 0))
return 1;
* @ctx: ring buffer context. (input and output) Must be already initialized.
*
* Atomic wait-free slot reservation. The reserved space starts at the context
- * "pre_offset". Its length is "slot_size". The associated time-stamp is "tsc".
+ * "pre_offset". Its length is "slot_size". The associated time-stamp is
+ * "timestamp".
*
* Return :
* 0 on success.
goto slow_path;
/*
- * Atomically update last_tsc. This update races against concurrent
- * atomic updates, but the race will always cause supplementary full TSC
- * record headers, never the opposite (missing a full TSC record header
- * when it would be needed).
+ * Atomically update last_timestamp. This update races against concurrent
+ * atomic updates, but the race will always cause supplementary full
+ * timestamp record headers, never the opposite (missing a full
+ * timestamp record header when it would be needed).
*/
- save_last_tsc(config, buf, ctx_private->tsc);
+ save_last_timestamp(config, buf, ctx_private->timestamp);
/*
* Push the reader if necessary
/*
* We need to ensure that if the cmpxchg succeeds and discards the
- * record, the next record will record a full TSC, because it cannot
- * rely on the last_tsc associated with the discarded record to detect
- * overflows. The only way to ensure this is to set the last_tsc to 0
- * (assuming no 64-bit TSC overflow), which forces to write a 64-bit
+ * record, the next record will record a full timestamp, because it cannot
+ * rely on the last_timestamp associated with the discarded record to detect
+ * overflows. The only way to ensure this is to set the last_timestamp to 0
+ * (assuming no 64-bit timestamp overflow), which forces to write a 64-bit
* timestamp in the next record.
*
- * Note: if discard fails, we must leave the TSC in the record header.
- * It is needed to keep track of TSC overflows for the following
+ * Note: if discard fails, we must leave the timestamp in the record header.
+ * It is needed to keep track of timestamp overflows for the following
* records.
*/
- save_last_tsc(config, buf, 0ULL);
+ save_last_timestamp(config, buf, 0ULL);
if (caa_likely(v_cmpxchg(config, &buf->offset, end_offset, ctx_private->pre_offset)
!= end_offset))
}
/*
- * Last TSC comparison functions. Check if the current TSC overflows tsc_bits
- * bits from the last TSC read. When overflows are detected, the full 64-bit
- * timestamp counter should be written in the record header. Reads and writes
- * last_tsc atomically.
+ * Last timestamp comparison functions. Check if the current timestamp overflows
+ * timestamp_bits bits from the last timestamp read. When overflows are
+ * detected, the full 64-bit timestamp counter should be written in the record
+ * header. Reads and writes last_timestamp atomically.
*/
#if (CAA_BITS_PER_LONG == 32)
static inline
-void save_last_tsc(const struct lttng_ust_ring_buffer_config *config,
- struct lttng_ust_ring_buffer *buf, uint64_t tsc)
+void save_last_timestamp(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer *buf, uint64_t timestamp)
{
- if (config->tsc_bits == 0 || config->tsc_bits == 64)
+ if (config->timestamp_bits == 0 || config->timestamp_bits == 64)
return;
/*
* Ensure the compiler performs this update in a single instruction.
*/
- v_set(config, &buf->last_tsc, (unsigned long)(tsc >> config->tsc_bits));
+ v_set(config, &buf->last_timestamp, (unsigned long)(timestamp >> config->timestamp_bits));
}
static inline
-int last_tsc_overflow(const struct lttng_ust_ring_buffer_config *config,
- struct lttng_ust_ring_buffer *buf, uint64_t tsc)
+int last_timestamp_overflow(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer *buf, uint64_t timestamp)
{
- unsigned long tsc_shifted;
+ unsigned long timestamp_shifted;
- if (config->tsc_bits == 0 || config->tsc_bits == 64)
+ if (config->timestamp_bits == 0 || config->timestamp_bits == 64)
return 0;
- tsc_shifted = (unsigned long)(tsc >> config->tsc_bits);
- if (caa_unlikely(tsc_shifted
- - (unsigned long)v_read(config, &buf->last_tsc)))
+ timestamp_shifted = (unsigned long)(timestamp >> config->timestamp_bits);
+ if (caa_unlikely(timestamp_shifted
+ - (unsigned long)v_read(config, &buf->last_timestamp)))
return 1;
else
return 0;
}
#else
static inline
-void save_last_tsc(const struct lttng_ust_ring_buffer_config *config,
- struct lttng_ust_ring_buffer *buf, uint64_t tsc)
+void save_last_timestamp(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer *buf, uint64_t timestamp)
{
- if (config->tsc_bits == 0 || config->tsc_bits == 64)
+ if (config->timestamp_bits == 0 || config->timestamp_bits == 64)
return;
- v_set(config, &buf->last_tsc, (unsigned long)tsc);
+ v_set(config, &buf->last_timestamp, (unsigned long)timestamp);
}
static inline
-int last_tsc_overflow(const struct lttng_ust_ring_buffer_config *config,
- struct lttng_ust_ring_buffer *buf, uint64_t tsc)
+int last_timestamp_overflow(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer *buf, uint64_t timestamp)
{
- if (config->tsc_bits == 0 || config->tsc_bits == 64)
+ if (config->timestamp_bits == 0 || config->timestamp_bits == 64)
return 0;
- if (caa_unlikely((tsc - v_read(config, &buf->last_tsc))
- >> config->tsc_bits))
+ if (caa_unlikely((timestamp - v_read(config, &buf->last_timestamp))
+ >> config->timestamp_bits))
return 1;
else
return 0;
}
/*
- * Receive end of subbuffer TSC as parameter. It has been read in the
+ * Receive end of subbuffer timestamp as parameter. It has been read in the
* space reservation loop of either reserve or switch, which ensures it
* progresses monotonically with event records in the buffer. Therefore,
* it ensures that the end timestamp of a subbuffer is <= begin
int record_disabled;
/* End of cache-hot 32 bytes cacheline */
- union v_atomic last_tsc; /*
+ union v_atomic last_timestamp; /*
* Last timestamp written in the buffer.
*/
* prior to record header alignment
* padding.
*/
- uint64_t tsc; /* time-stamp counter value */
+ uint64_t timestamp; /* time-stamp counter value */
unsigned int rflags; /* reservation flags */
struct lttng_ust_ring_buffer *buf; /*
* buffer corresponding to processor id
#include "common/smp.h"
#include "shm.h"
#include "common/align.h"
+#include "common/populate.h"
/**
* lib_ring_buffer_backend_allocate - allocate a channel buffer
* num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
* priv, notifiers, config, cpumask and name.
*/
- chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
+ chanb->start_timestamp = config->cb.ring_buffer_clock_read(chan);
}
/**
struct shm_object *shmobj;
shmobj = shm_object_table_alloc(handle->table, shmsize,
- SHM_OBJECT_SHM, stream_fds[i], i);
+ SHM_OBJECT_SHM, stream_fds[i], i,
+ lttng_ust_map_populate_cpu_is_enabled(i));
if (!shmobj)
goto end;
align_shm(shmobj, __alignof__(struct lttng_ust_ring_buffer));
struct lttng_ust_ring_buffer *buf;
shmobj = shm_object_table_alloc(handle->table, shmsize,
- SHM_OBJECT_SHM, stream_fds[0], -1);
+ SHM_OBJECT_SHM, stream_fds[0], -1,
+ lttng_ust_map_populate_is_enabled());
if (!shmobj)
goto end;
align_shm(shmobj, __alignof__(struct lttng_ust_ring_buffer));
if (ret)
goto free_bufs;
}
- chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
+ chanb->start_timestamp = config->cb.ring_buffer_clock_read(chan);
return 0;
#include "shm.h"
#include "rb-init.h"
#include "common/compat/errno.h" /* For ENODATA */
+#include "common/populate.h"
/* Print DBG() messages about events lost only every 1048576 hits */
#define DBG_PRINT_NR_LOST (1UL << 20)
}
uatomic_set(&buf->consumed, 0);
uatomic_set(&buf->record_disabled, 0);
- v_set(config, &buf->last_tsc, 0);
+ v_set(config, &buf->last_timestamp, 0);
lib_ring_buffer_backend_reset(&buf->backend, handle);
/* Don't reset number of active readers */
v_set(config, &buf->records_lost_full, 0);
struct commit_counters_hot *cc_hot;
void *priv = channel_get_private_config(chan);
size_t subbuf_header_size;
- uint64_t tsc;
+ uint64_t timestamp;
int ret;
/* Test for cpu hotplug */
ret = -EPERM;
goto free_chanbuf;
}
- tsc = config->cb.ring_buffer_clock_read(shmp_chan);
- config->cb.buffer_begin(buf, tsc, 0, handle);
+ timestamp = config->cb.ring_buffer_clock_read(shmp_chan);
+ config->cb.buffer_begin(buf, timestamp, 0, handle);
cc_hot = shmp_index(handle, buf->commit_hot, 0);
if (!cc_hot) {
ret = -EPERM;
struct shm_object *shmobj;
unsigned int nr_streams;
int64_t blocking_timeout_ms;
+ bool populate = lttng_ust_map_populate_is_enabled();
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
nr_streams = get_possible_cpus_array_len();
read_timer_interval))
return NULL;
- handle = zmalloc(sizeof(struct lttng_ust_shm_handle));
+ handle = zmalloc_populate(sizeof(struct lttng_ust_shm_handle), populate);
if (!handle)
return NULL;
/* Allocate table for channel + per-cpu buffers */
- handle->table = shm_object_table_create(1 + get_possible_cpus_array_len());
+ handle->table = shm_object_table_create(1 + get_possible_cpus_array_len(), populate);
if (!handle->table)
goto error_table_alloc;
/* Allocate normal memory for channel (not shared) */
shmobj = shm_object_table_alloc(handle->table, shmsize, SHM_OBJECT_MEM,
- -1, -1);
+ -1, -1, populate);
if (!shmobj)
goto error_append;
/* struct lttng_ust_ring_buffer_channel is at object 0, offset 0 (hardcoded) */
{
struct lttng_ust_shm_handle *handle;
struct shm_object *object;
+ bool populate = lttng_ust_map_populate_is_enabled();
- handle = zmalloc(sizeof(struct lttng_ust_shm_handle));
+ handle = zmalloc_populate(sizeof(struct lttng_ust_shm_handle), populate);
if (!handle)
return NULL;
/* Allocate table for channel + per-cpu buffers */
- handle->table = shm_object_table_create(1 + get_possible_cpus_array_len());
+ handle->table = shm_object_table_create(1 + get_possible_cpus_array_len(), populate);
if (!handle->table)
goto error_table_alloc;
/* Add channel object */
/* Add stream object */
object = shm_object_table_append_shm(handle->table,
shm_fd, wakeup_fd, stream_nr,
- memory_map_size);
+ memory_map_size, lttng_ust_map_populate_cpu_is_enabled(stream_nr));
if (!object)
return -EINVAL;
return 0;
unsigned long commit_count;
struct commit_counters_hot *cc_hot;
- config->cb.buffer_begin(buf, ctx->priv->tsc, oldidx, handle);
+ config->cb.buffer_begin(buf, ctx->priv->timestamp, oldidx, handle);
/*
* Order all writes to buffer before the commit count update that will
* postponed until the commit counter is incremented for the
* current space reservation.
*/
- *ts_end = ctx->priv->tsc;
+ *ts_end = ctx->priv->timestamp;
/*
* Order all writes to buffer and store to ts_end before the commit
unsigned long commit_count;
struct commit_counters_hot *cc_hot;
- config->cb.buffer_begin(buf, ctx->priv->tsc, beginidx, handle);
+ config->cb.buffer_begin(buf, ctx->priv->timestamp, beginidx, handle);
/*
* Order all writes to buffer before the commit count update that will
* postponed until the commit counter is incremented for the
* current space reservation.
*/
- *ts_end = ctx->priv->tsc;
+ *ts_end = ctx->priv->timestamp;
}
/*
offsets->switch_old_start = 0;
off = subbuf_offset(offsets->begin, chan);
- ctx->priv->tsc = config->cb.ring_buffer_clock_read(chan);
+ ctx->priv->timestamp = config->cb.ring_buffer_clock_read(chan);
/*
* Ensure we flush the header of an empty subbuffer when doing the
!= offsets.old);
/*
- * Atomically update last_tsc. This update races against concurrent
- * atomic updates, but the race will always cause supplementary full TSC
- * records, never the opposite (missing a full TSC record when it would
- * be needed).
+ * Atomically update last_timestamp. This update races against concurrent
+ * atomic updates, but the race will always cause supplementary full
+ * timestamp records, never the opposite (missing a full timestamp
+ * record when it would be needed).
*/
- save_last_tsc(config, buf, ctx.priv->tsc);
+ save_last_timestamp(config, buf, ctx.priv->timestamp);
/*
* Push the reader if necessary
offsets->switch_old_end = 0;
offsets->pre_header_padding = 0;
- ctx_private->tsc = config->cb.ring_buffer_clock_read(chan);
- if ((int64_t) ctx_private->tsc == -EIO)
+ ctx_private->timestamp = config->cb.ring_buffer_clock_read(chan);
+ if ((int64_t) ctx_private->timestamp == -EIO)
return -EIO;
- if (last_tsc_overflow(config, buf, ctx_private->tsc))
- ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
+ if (last_timestamp_overflow(config, buf, ctx_private->timestamp))
+ ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TIMESTAMP;
if (caa_unlikely(subbuf_offset(offsets->begin, chan) == 0)) {
offsets->switch_new_start = 1; /* For offsets->begin */
!= offsets.old));
/*
- * Atomically update last_tsc. This update races against concurrent
- * atomic updates, but the race will always cause supplementary full TSC
- * records, never the opposite (missing a full TSC record when it would
- * be needed).
+ * Atomically update last_timestamp. This update races against concurrent
+ * atomic updates, but the race will always cause supplementary full
+ * timestamp records, never the opposite (missing a full timestamp
+ * record when it would be needed).
*/
- save_last_tsc(config, buf, ctx_private->tsc);
+ save_last_timestamp(config, buf, ctx_private->timestamp);
/*
* Push the reader if necessary
/* Slow path only, at subbuffer switch */
size_t (*subbuffer_header_size) (void);
- void (*buffer_begin) (struct lttng_ust_ring_buffer *buf, uint64_t tsc,
+ void (*buffer_begin) (struct lttng_ust_ring_buffer *buf, uint64_t timestamp,
unsigned int subbuf_idx,
struct lttng_ust_shm_handle *handle);
- void (*buffer_end) (struct lttng_ust_ring_buffer *buf, uint64_t tsc,
+ void (*buffer_end) (struct lttng_ust_ring_buffer *buf, uint64_t timestamp,
unsigned int subbuf_idx, unsigned long data_size,
struct lttng_ust_shm_handle *handle,
const struct lttng_ust_ring_buffer_ctx *ctx);
enum lttng_ust_ring_buffer_ipi_types ipi;
enum lttng_ust_ring_buffer_wakeup_types wakeup;
/*
- * tsc_bits: timestamp bits saved at each record.
+ * timestamp_bits: timestamp bits saved at each record.
* 0 and 64 disable the timestamp compression scheme.
*/
- unsigned int tsc_bits;
+ unsigned int timestamp_bits;
struct lttng_ust_ring_buffer_client_cb cb;
/*
* client_type is used by the consumer process (which is in a
/*
* Reservation flags.
*
- * RING_BUFFER_RFLAG_FULL_TSC
+ * RING_BUFFER_RFLAG_FULL_TIMESTAMP
*
* This flag is passed to record_header_size() and to the primitive used to
* write the record header. It indicates that the full 64-bit time value is
* needed in the record header. If this flag is not set, the record header needs
- * only to contain "tsc_bits" bit of time value.
+ * only to contain "timestamp_bits" bit of time value.
*
* Reservation flags can be added by the client, starting from
* "(RING_BUFFER_FLAGS_END << 0)". It can be used to pass information from
* record_header_size() to lib_ring_buffer_write_record_header().
*/
-#define RING_BUFFER_RFLAG_FULL_TSC (1U << 0)
+#define RING_BUFFER_RFLAG_FULL_TIMESTAMP (1U << 0)
#define RING_BUFFER_RFLAG_END (1U << 1)
/*
return ret;
}
-struct shm_object_table *shm_object_table_create(size_t max_nb_obj)
+struct shm_object_table *shm_object_table_create(size_t max_nb_obj, bool populate)
{
struct shm_object_table *table;
- table = zmalloc(sizeof(struct shm_object_table) +
- max_nb_obj * sizeof(table->objects[0]));
+ table = zmalloc_populate(sizeof(struct shm_object_table) +
+ max_nb_obj * sizeof(table->objects[0]), populate);
if (!table)
return NULL;
table->size = max_nb_obj;
static
struct shm_object *_shm_object_table_alloc_shm(struct shm_object_table *table,
size_t memory_map_size,
- int stream_fd)
+ int stream_fd,
+ bool populate)
{
int shmfd, waitfd[2], ret, i;
+ int flags = MAP_SHARED;
struct shm_object *obj;
char *memory_map;
obj->shm_fd_ownership = 0;
obj->shm_fd = shmfd;
+ if (populate)
+ flags |= LTTNG_MAP_POPULATE;
/* memory_map: mmap */
memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
- MAP_SHARED | LTTNG_MAP_POPULATE, shmfd, 0);
+ flags, shmfd, 0);
if (memory_map == MAP_FAILED) {
PERROR("mmap");
goto error_mmap;
static
struct shm_object *_shm_object_table_alloc_mem(struct shm_object_table *table,
- size_t memory_map_size)
+ size_t memory_map_size, bool populate)
{
struct shm_object *obj;
void *memory_map;
return NULL;
obj = &table->objects[table->allocated_len];
- memory_map = zmalloc(memory_map_size);
+ memory_map = zmalloc_populate(memory_map_size, populate);
if (!memory_map)
goto alloc_error;
size_t memory_map_size,
enum shm_object_type type,
int stream_fd,
- int cpu)
+ int cpu,
+ bool populate)
#else
struct shm_object *shm_object_table_alloc(struct shm_object_table *table,
size_t memory_map_size,
enum shm_object_type type,
int stream_fd,
- int cpu __attribute__((unused)))
+ int cpu __attribute__((unused)),
+ bool populate)
#endif
{
struct shm_object *shm_object;
switch (type) {
case SHM_OBJECT_SHM:
shm_object = _shm_object_table_alloc_shm(table, memory_map_size,
- stream_fd);
+ stream_fd, populate);
break;
case SHM_OBJECT_MEM:
- shm_object = _shm_object_table_alloc_mem(table, memory_map_size);
+ shm_object = _shm_object_table_alloc_mem(table, memory_map_size,
+ populate);
break;
default:
assert(0);
struct shm_object *shm_object_table_append_shm(struct shm_object_table *table,
int shm_fd, int wakeup_fd, uint32_t stream_nr,
- size_t memory_map_size)
+ size_t memory_map_size, bool populate)
{
+ int flags = MAP_SHARED;
struct shm_object *obj;
char *memory_map;
int ret;
goto error_fcntl;
}
+ if (populate)
+ flags |= LTTNG_MAP_POPULATE;
/* memory_map: mmap */
memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
- MAP_SHARED | LTTNG_MAP_POPULATE, shm_fd, 0);
+ flags, shm_fd, 0);
if (memory_map == MAP_FAILED) {
PERROR("mmap");
goto error_mmap;
#define set_shmp(ref, src) _set_shmp(&(ref)._ref, src)
-struct shm_object_table *shm_object_table_create(size_t max_nb_obj)
+struct shm_object_table *shm_object_table_create(size_t max_nb_obj, bool populate)
__attribute__((visibility("hidden")));
struct shm_object *shm_object_table_alloc(struct shm_object_table *table,
size_t memory_map_size,
enum shm_object_type type,
const int stream_fd,
- int cpu)
+ int cpu, bool populate)
__attribute__((visibility("hidden")));
struct shm_object *shm_object_table_append_shm(struct shm_object_table *table,
int shm_fd, int wakeup_fd, uint32_t stream_nr,
- size_t memory_map_size)
+ size_t memory_map_size, bool populate)
__attribute__((visibility("hidden")));
/* mem ownership is passed to shm_object_table_append_mem(). */
total_bytes_read += bytes_read;
assert(total_bytes_read <= max_bytes);
- } while (max_bytes > total_bytes_read && bytes_read > 0);
+ } while (max_bytes > total_bytes_read && bytes_read != 0);
/*
* Make sure the mask read is a null terminated string.
ok(shmfd > 0, "Open a POSIX shm fd");
/* Create a dummy shm object table to test the allocation function */
- table = shm_object_table_create(1);
+ table = shm_object_table_create(1, false);
ok(table, "Create a shm object table");
assert(table);
/* This function sets the initial size of the shm with ftruncate and zeros it */
- shmobj = shm_object_table_alloc(table, shmsize, SHM_OBJECT_SHM, shmfd, -1);
+ shmobj = shm_object_table_alloc(table, shmsize, SHM_OBJECT_SHM, shmfd, -1, false);
ok(shmobj, "Allocate the shm object table");
assert(shmobj);