# Library version information of "liblttng-ust-ctl"
# Following the numbering scheme proposed by libtool for the library version
# http://www.gnu.org/software/libtool/manual/html_node/Updating-version-info.html
-m4_define([ust_ctl_lib_version_current], [5])
+m4_define([ust_ctl_lib_version_current], [6])
m4_define([ust_ctl_lib_version_revision], [0])
m4_define([ust_ctl_lib_version_age], [0])
m4_define([ust_ctl_lib_version], ust_ctl_lib_version_current[:]ust_ctl_lib_version_revision[:]ust_ctl_lib_version_age)
AC_SUBST(JNI_CPPFLAGS)
+# Used in man pages
+AC_SUBST([LTTNG_UST_MAJOR_VERSION], ust_version_major)
+AC_SUBST([LTTNG_UST_MINOR_VERSION], ust_version_minor)
## ##
## Output files generated by configure ##
CFLAGS='$(CFLAGS)' \
AM_CFLAGS='$(AM_CFLAGS)' \
LDFLAGS="$(LDFLAGS)" \
- AM_LDFLAGS='$(AM_LDFLAGS) -L../../../src/lib/lttng-ust/.libs -Wl,-rpath="$(PWD)/../../src/lib/lttng-ust/.libs/" -Wl,-rpath-link="$(PWD)/../../src/lib/lttng-ust/.libs/"' \
+ AM_LDFLAGS='$(AM_LDFLAGS) -L../../../src/lib/lttng-ust/.libs -L../../../src/lib/lttng-ust-common/.libs -L../../../src/lib/lttng-ust-tracepoint/.libs \
+ -Wl,-rpath="$(abs_top_builddir)/src/lib/lttng-ust/.libs/" \
+ -Wl,-rpath-link="$(abs_top_builddir)/src/lib/lttng-ust-common/.libs/" \
+ -Wl,-rpath-link="$(abs_top_builddir)/src/lib/lttng-ust-tracepoint/.libs/"' \
LTTNG_GEN_TP_PATH="$$rel_src_subdir$(top_srcdir)/tools/" \
AM_V_P="$(AM_V_P)" \
AM_V_at="$(AM_V_at)" \
CXX="$(CXX)" \
$(CMAKE) \
-DCMAKE_INCLUDE_PATH="$(abs_top_srcdir)/include;$(abs_top_builddir)/include" \
- -DCMAKE_LIBRARY_PATH="$(abs_top_builddir)/src/lib/lttng-ust/.libs" \
+ -DCMAKE_LIBRARY_PATH="$(abs_top_builddir)/src/lib/lttng-ust/.libs;$(abs_top_builddir)/src/lib/lttng-ust-common/.libs;$(abs_top_builddir)/src/lib/lttng-ust-tracepoint/.libs" \
-DCMAKE_C_FLAGS="$(AM_CFLAGS) $(CPPFLAGS) $(CFLAGS)" \
-DCMAKE_CXX_FLAGS="$(AM_CXXFLAGS) $(CXXFLAGS) $(CPPFLAGS)" \
- -DCMAKE_EXE_LINKER_FLAGS="$(AM_LDFLAGS) $(LDFLAGS)" \
+ -DCMAKE_EXE_LINKER_FLAGS="$(AM_LDFLAGS) $(LDFLAGS) \
+ -L../../../src/lib/lttng-ust/.libs -L../../../src/lib/lttng-ust-common/.libs -L../../../src/lib/lttng-ust-tracepoint/.libs \
+ -Wl,-rpath=$(abs_top_builddir)/src/lib/lttng-ust/.libs/ \
+ -Wl,-rpath-link=$(abs_top_builddir)/src/lib/lttng-ust-common/.libs/ \
+ -Wl,-rpath-link=$(abs_top_builddir)/src/lib/lttng-ust-tracepoint/.libs/" \
.. && \
$(MAKE) \
) || exit 1; \
xmlto_verbose_out_0 = 2>/dev/null
# Tools to execute:
-ADOC = $(asciidoc_verbose)$(ASCIIDOC) -f $(ASCIIDOC_CONF) -d manpage \
+ADOC = $(asciidoc_verbose)$(ASCIIDOC) -v -f $(ASCIIDOC_CONF) -d manpage \
-a mansource="LTTng" \
-a manmanual="LTTng Manual" \
- -a manversion="$(PACKAGE_VERSION)"
+ -a manversion="$(PACKAGE_VERSION)" \
+ -a lttng_version="$(LTTNG_UST_MAJOR_VERSION).$(LTTNG_UST_MINOR_VERSION)"
ADOC_DOCBOOK = $(ADOC) -b docbook
-XTO = $(xmlto_verbose)$(XMLTO) -m $(XSL_FILE) man
+XTO = $(xmlto_verbose)$(XMLTO) -v -m $(XSL_FILE) man
# Recipes:
%.1.xml: $(srcdir)/%.1.txt $(COMMON_DEPS)
[[example]]
EXAMPLE
-------
+
NOTE: A few examples are available in the
-https://github.com/lttng/lttng-ust/tree/v{lttng_version}/doc/examples[`doc/examples`]
+https://github.com/lttng/lttng-ust/tree/stable-{lttng_version}/doc/examples[`doc/examples`]
directory of LTTng-UST's source tree.
This example shows all the features documented in the previous
Path to the shared object which acts as the clock override plugin.
An example of such a plugin can be found in the LTTng-UST
documentation under
- https://github.com/lttng/lttng-ust/tree/v{lttng_version}/doc/examples/clock-override[`examples/clock-override`].
+ https://github.com/lttng/lttng-ust/tree/stable-{lttng_version}/doc/examples/clock-override[`examples/clock-override`].
`LTTNG_UST_DEBUG`::
If set, enable `liblttng-ust`'s debug and error output.
Path to the shared object which acts as the `getcpu()` override
plugin. An example of such a plugin can be found in the LTTng-UST
documentation under
- https://github.com/lttng/lttng-ust/tree/v{lttng_version}/doc/examples/getcpu-override[`examples/getcpu-override`].
+ https://github.com/lttng/lttng-ust/tree/stable-{lttng_version}/doc/examples/getcpu-override[`examples/getcpu-override`].
+
+`LTTNG_UST_MAP_POPULATE_POLICY`::
++
+--
+If set, override the policy used to populate shared memory pages
+within the application. The expected values are:
+
+`none`:::
+ Do not pre-populate any pages, take minor faults on first access
+ while tracing.
+
+`cpu_possible`:::
+ Pre-populate pages for all possible CPUs in the system, as
+ listed by `/sys/devices/system/cpu/possible`.
+--
++
+Default: `none`. If the policy is unknown, use the default.
`LTTNG_UST_REGISTER_TIMEOUT`::
Waiting time for the _registration done_ session daemon command
lttng/ust-common.h \
lttng/ust-ctl.h \
lttng/ust-abi.h \
+ lttng/ust-abi-old.h \
lttng/ust-tracer.h \
lttng/ust-compiler.h \
lttng/ust-fork.h \
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng-UST ABI header
+ */
+
+#ifndef _LTTNG_UST_ABI_OLD_H
+#define _LTTNG_UST_ABI_OLD_H
+
+#include <stdint.h>
+#include <lttng/ust-abi.h>
+
+#define LTTNG_UST_ABI_OLD_SYM_NAME_LEN 256
+#define LTTNG_UST_ABI_OLD_COUNTER_DIMENSION_MAX 4
+
+struct lttng_ust_abi_old_counter_dimension {
+ uint64_t size;
+ uint64_t underflow_index;
+ uint64_t overflow_index;
+ uint8_t has_underflow;
+ uint8_t has_overflow;
+} __attribute__((packed));
+
+#define LTTNG_UST_ABI_OLD_COUNTER_CONF_PADDING1 67
+struct lttng_ust_abi_old_counter_conf {
+ uint32_t arithmetic; /* enum lttng_ust_abi_counter_arithmetic */
+ uint32_t bitness; /* enum lttng_ust_abi_counter_bitness */
+ uint32_t number_dimensions;
+ int64_t global_sum_step;
+ struct lttng_ust_abi_old_counter_dimension dimensions[LTTNG_UST_ABI_OLD_COUNTER_DIMENSION_MAX];
+ uint8_t coalesce_hits;
+ char padding[LTTNG_UST_ABI_OLD_COUNTER_CONF_PADDING1];
+} __attribute__((packed));
+
+#define LTTNG_UST_ABI_OLD_COUNTER_PADDING1 (LTTNG_UST_ABI_OLD_SYM_NAME_LEN + 32)
+#define LTTNG_UST_ABI_OLD_COUNTER_DATA_MAX_LEN 4096U
+struct lttng_ust_abi_old_counter {
+ uint64_t len;
+ char padding[LTTNG_UST_ABI_OLD_COUNTER_PADDING1];
+ char data[]; /* variable sized data */
+} __attribute__((packed));
+
+#define LTTNG_UST_ABI_OLD_COUNTER_GLOBAL_PADDING1 (LTTNG_UST_ABI_OLD_SYM_NAME_LEN + 32)
+struct lttng_ust_abi_old_counter_global {
+ uint64_t len; /* shm len */
+ char padding[LTTNG_UST_ABI_OLD_COUNTER_GLOBAL_PADDING1];
+} __attribute__((packed));
+
+#define LTTNG_UST_ABI_OLD_COUNTER_CPU_PADDING1 (LTTNG_UST_ABI_OLD_SYM_NAME_LEN + 32)
+struct lttng_ust_abi_old_counter_cpu {
+ uint64_t len; /* shm len */
+ uint32_t cpu_nr;
+ char padding[LTTNG_UST_ABI_OLD_COUNTER_CPU_PADDING1];
+} __attribute__((packed));
+
+/* Event notifier group commands */
+#define LTTNG_UST_ABI_OLD_COUNTER \
+ LTTNG_UST_ABI_CMDW(0xC0, struct lttng_ust_abi_old_counter)
+
+/* Counter commands */
+#define LTTNG_UST_ABI_OLD_COUNTER_GLOBAL \
+ LTTNG_UST_ABI_CMDW(0xD0, struct lttng_ust_abi_old_counter_global)
+#define LTTNG_UST_ABI_OLD_COUNTER_CPU \
+ LTTNG_UST_ABI_CMDW(0xD1, struct lttng_ust_abi_old_counter_cpu)
+
+#endif /* _LTTNG_UST_ABI_OLD_H */
#define LTTNG_UST_ABI_MAJOR_VERSION_OLDEST_COMPATIBLE 8
#define LTTNG_UST_ABI_MINOR_VERSION 0
+#define LTTNG_UST_ABI_CMD_MAX_LEN 4096U
+
enum lttng_ust_abi_instrumentation {
LTTNG_UST_ABI_TRACEPOINT = 0,
LTTNG_UST_ABI_PROBE = 1,
*/
} __attribute__((packed));
-#define LTTNG_UST_ABI_COUNTER_DIMENSION_MAX 4
-
-enum lttng_ust_abi_counter_arithmetic {
- LTTNG_UST_ABI_COUNTER_ARITHMETIC_MODULAR = 0,
- LTTNG_UST_ABI_COUNTER_ARITHMETIC_SATURATION = 1,
-};
-
-enum lttng_ust_abi_counter_bitness {
- LTTNG_UST_ABI_COUNTER_BITNESS_32 = 0,
- LTTNG_UST_ABI_COUNTER_BITNESS_64 = 1,
-};
-
-struct lttng_ust_abi_counter_dimension {
- uint64_t size;
- uint64_t underflow_index;
- uint64_t overflow_index;
- uint8_t has_underflow;
- uint8_t has_overflow;
-} __attribute__((packed));
-
-#define LTTNG_UST_ABI_COUNTER_CONF_PADDING1 67
-struct lttng_ust_abi_counter_conf {
- uint32_t arithmetic; /* enum lttng_ust_abi_counter_arithmetic */
- uint32_t bitness; /* enum lttng_ust_abi_counter_bitness */
- uint32_t number_dimensions;
- int64_t global_sum_step;
- struct lttng_ust_abi_counter_dimension dimensions[LTTNG_UST_ABI_COUNTER_DIMENSION_MAX];
- uint8_t coalesce_hits;
- char padding[LTTNG_UST_ABI_COUNTER_CONF_PADDING1];
-} __attribute__((packed));
-
-struct lttng_ust_abi_counter_value {
- uint32_t number_dimensions;
- uint64_t dimension_indexes[LTTNG_UST_ABI_COUNTER_DIMENSION_MAX];
- int64_t value;
-} __attribute__((packed));
-
#define LTTNG_UST_ABI_EVENT_PADDING1 8
#define LTTNG_UST_ABI_EVENT_PADDING2 (LTTNG_UST_ABI_SYM_NAME_LEN + 32)
struct lttng_ust_abi_event {
char padding[LTTNG_UST_ABI_EVENT_NOTIFIER_NOTIFICATION_PADDING];
} __attribute__((packed));
-#define LTTNG_UST_ABI_COUNTER_PADDING1 (LTTNG_UST_ABI_SYM_NAME_LEN + 32)
-#define LTTNG_UST_ABI_COUNTER_DATA_MAX_LEN 4096U
-struct lttng_ust_abi_counter {
- uint64_t len;
- char padding[LTTNG_UST_ABI_COUNTER_PADDING1];
- char data[]; /* variable sized data */
+enum lttng_ust_abi_key_token_type {
+ LTTNG_UST_ABI_KEY_TOKEN_STRING = 0, /* arg: strtab_offset. */
+ LTTNG_UST_ABI_KEY_TOKEN_EVENT_NAME = 1, /* no arg. */
+ LTTNG_UST_ABI_KEY_TOKEN_PROVIDER_NAME = 2, /* no arg. */
+};
+
+enum lttng_ust_abi_counter_arithmetic {
+ LTTNG_UST_ABI_COUNTER_ARITHMETIC_MODULAR = 0,
+ LTTNG_UST_ABI_COUNTER_ARITHMETIC_SATURATION = 1,
+};
+
+enum lttng_ust_abi_counter_bitness {
+ LTTNG_UST_ABI_COUNTER_BITNESS_32 = 0,
+ LTTNG_UST_ABI_COUNTER_BITNESS_64 = 1,
+};
+
+struct lttng_ust_abi_key_token {
+ uint32_t len; /* length of child structure. */
+ uint32_t type; /* enum lttng_ust_abi_key_token_type */
+ /*
+ * The size of this structure is fixed because it is embedded into
+ * children structures.
+ */
+} __attribute__((packed));
+
+/* Length of this structure excludes the following string. */
+struct lttng_ust_abi_key_token_string {
+ struct lttng_ust_abi_key_token parent;
+ uint32_t string_len; /* string length (includes \0) */
+
+ char str[]; /* Null-terminated string following this structure. */
+} __attribute__((packed));
+
+/*
+ * token types event_name and provider_name don't have specific fields,
+ * so they do not need to derive their own specific child structure.
+ */
+
+/*
+ * Dimension indexing: All events should use the same key type to index
+ * a given map dimension.
+ */
+enum lttng_ust_abi_key_type {
+ LTTNG_UST_ABI_KEY_TYPE_TOKENS = 0, /* Dimension key is a set of tokens. */
+ LTTNG_UST_ABI_KEY_TYPE_INTEGER = 1, /* Dimension key is an integer value. */
+};
+
+struct lttng_ust_abi_counter_key_dimension {
+ uint32_t len; /* length of child structure */
+ uint32_t key_type; /* enum lttng_ust_abi_key_type */
+ /*
+ * The size of this structure is fixed because it is embedded into
+ * children structures.
+ */
+} __attribute__((packed));
+
+struct lttng_ust_abi_counter_key_dimension_tokens {
+ struct lttng_ust_abi_counter_key_dimension parent;
+ uint32_t nr_key_tokens;
+
+ /* Followed by an array of nr_key_tokens struct lttng_ust_abi_key_token elements. */
+} __attribute__((packed));
+
+/*
+ * The "integer" key type is not implemented yet, but when it will be
+ * introduced in the future, its specific key dimension will allow
+ * defining the function to apply over input argument, bytecode to run
+ * and so on.
+ */
+
+enum lttng_ust_abi_counter_action {
+ LTTNG_UST_ABI_COUNTER_ACTION_INCREMENT = 0,
+
+ /*
+ * Can be extended with additional actions, such as decrement,
+ * set value, run bytecode, and so on.
+ */
+};
+
+struct lttng_ust_abi_counter_event {
+ uint32_t len; /* length of this structure */
+ uint32_t action; /* enum lttng_ust_abi_counter_action */
+
+ struct lttng_ust_abi_event event;
+ uint32_t number_key_dimensions; /* array of dimensions is an array of var. len. elements. */
+
+ /*
+ * Followed by additional data specific to the action, and by a
+ * variable-length array of key dimensions.
+ */
+} __attribute__((packed));
+
+enum lttng_ust_abi_counter_dimension_flags {
+ LTTNG_UST_ABI_COUNTER_DIMENSION_FLAG_UNDERFLOW = (1 << 0),
+ LTTNG_UST_ABI_COUNTER_DIMENSION_FLAG_OVERFLOW = (1 << 1),
+};
+
+struct lttng_ust_abi_counter_dimension {
+ uint32_t key_type; /* enum lttng_ust_abi_key_type */
+ uint32_t flags; /* enum lttng_ust_abi_counter_dimension_flags */
+ uint64_t size; /* dimension size (count of entries) */
+ uint64_t underflow_index;
+ uint64_t overflow_index;
+} __attribute__((packed));
+
+enum lttng_ust_abi_counter_conf_flags {
+ LTTNG_UST_ABI_COUNTER_CONF_FLAG_COALESCE_HITS = (1 << 0),
+};
+
+struct lttng_ust_abi_counter_conf {
+ uint32_t len; /* Length of fields before var. len. data. */
+ uint32_t flags; /* enum lttng_ust_abi_counter_conf_flags */
+ uint32_t arithmetic; /* enum lttng_ust_abi_counter_arithmetic */
+ uint32_t bitness; /* enum lttng_ust_abi_counter_bitness */
+ int64_t global_sum_step;
+ uint32_t number_dimensions;
+ uint32_t elem_len; /* array stride (size of lttng_ust_abi_counter_dimension) */
} __attribute__((packed));
-#define LTTNG_UST_ABI_COUNTER_GLOBAL_PADDING1 (LTTNG_UST_ABI_SYM_NAME_LEN + 32)
struct lttng_ust_abi_counter_global {
- uint64_t len; /* shm len */
- char padding[LTTNG_UST_ABI_COUNTER_GLOBAL_PADDING1];
+ uint32_t len; /* Length of this structure */
+ uint64_t shm_len; /* shm len */
} __attribute__((packed));
-#define LTTNG_UST_ABI_COUNTER_CPU_PADDING1 (LTTNG_UST_ABI_SYM_NAME_LEN + 32)
struct lttng_ust_abi_counter_cpu {
- uint64_t len; /* shm len */
+ uint32_t len; /* Length of this structure */
+ uint64_t shm_len; /* shm len */
uint32_t cpu_nr;
- char padding[LTTNG_UST_ABI_COUNTER_CPU_PADDING1];
} __attribute__((packed));
enum lttng_ust_abi_field_type {
LTTNG_UST_ABI_OBJECT_TYPE_COUNTER = 6,
LTTNG_UST_ABI_OBJECT_TYPE_COUNTER_GLOBAL = 7,
LTTNG_UST_ABI_OBJECT_TYPE_COUNTER_CPU = 8,
+ LTTNG_UST_ABI_OBJECT_TYPE_COUNTER_EVENT = 9,
};
#define LTTNG_UST_ABI_OBJECT_DATA_PADDING1 32
char names[LTTNG_UST_ABI_SYM_NAME_LEN][0];
} __attribute__((packed));
-#define LTTNG_UST_ABI_CMD(minor) (minor)
-#define LTTNG_UST_ABI_CMDR(minor, type) (minor)
-#define LTTNG_UST_ABI_CMDW(minor, type) (minor)
+#define LTTNG_UST_ABI_CMD(minor) (minor)
+#define LTTNG_UST_ABI_CMDR(minor, type) (minor)
+#define LTTNG_UST_ABI_CMDW(minor, type) (minor)
+#define LTTNG_UST_ABI_CMDV(minor, var_len_cmd_type) (minor)
/* Handled by object descriptor */
#define LTTNG_UST_ABI_RELEASE LTTNG_UST_ABI_CMD(0x1)
/* Event notifier group commands */
#define LTTNG_UST_ABI_EVENT_NOTIFIER_CREATE \
- LTTNG_UST_ABI_CMDW(0xB0, struct lttng_ust_abi_event_notifier)
+ LTTNG_UST_ABI_CMDV(0xB0, struct lttng_ust_abi_event_notifier)
/* Event notifier commands */
#define LTTNG_UST_ABI_CAPTURE LTTNG_UST_ABI_CMD(0xB6)
/* Session and event notifier group commands */
+/* (0xC0) reserved for old ABI. */
#define LTTNG_UST_ABI_COUNTER \
- LTTNG_UST_ABI_CMDW(0xC0, struct lttng_ust_abi_counter)
+ LTTNG_UST_ABI_CMDV(0xC1, struct lttng_ust_abi_counter_conf)
/* Counter commands */
+/* (0xD0, 0xD1) reserved for old ABI. */
#define LTTNG_UST_ABI_COUNTER_GLOBAL \
- LTTNG_UST_ABI_CMDW(0xD0, struct lttng_ust_abi_counter_global)
+ LTTNG_UST_ABI_CMDV(0xD2, struct lttng_ust_abi_counter_global)
#define LTTNG_UST_ABI_COUNTER_CPU \
- LTTNG_UST_ABI_CMDW(0xD1, struct lttng_ust_abi_counter_cpu)
+ LTTNG_UST_ABI_CMDV(0xD3, struct lttng_ust_abi_counter_cpu)
+#define LTTNG_UST_ABI_COUNTER_EVENT \
+ LTTNG_UST_ABI_CMDV(0xD4, struct lttng_ust_abi_counter_event)
#define LTTNG_UST_ABI_ROOT_HANDLE 0
LTTNG_UST_CTL_NOTIFY_CMD_EVENT = 0,
LTTNG_UST_CTL_NOTIFY_CMD_CHANNEL = 1,
LTTNG_UST_CTL_NOTIFY_CMD_ENUM = 2,
+ LTTNG_UST_CTL_NOTIFY_CMD_KEY = 3,
};
enum lttng_ust_ctl_channel_header {
*/
size_t *nr_fields,
struct lttng_ust_ctl_field **fields,
- char **model_emf_uri);
+ char **model_emf_uri,
+ uint64_t *user_token);
/*
* Returns 0 on success, negative error value on error.
*/
int lttng_ust_ctl_reply_register_event(int sock,
- uint32_t id, /* event id (input) */
+ uint32_t id, /* id (input) */
+ int ret_code); /* return code. 0 ok, negative error */
+
+/*
+ * Returns 0 on success, negative UST or system error value on error.
+ */
+int lttng_ust_ctl_recv_register_key(int sock,
+ int *session_objd, /* session descriptor (output) */
+ int *map_objd, /* map descriptor (output) */
+ uint32_t *dimension, /*
+ * Against which dimension is
+ * this key expressed. (output)
+ */
+ uint64_t **dimension_indexes, /*
+ * Indexes (output,
+ * dynamically
+ * allocated, must be
+ * free(3)'d by the
+ * caller if function
+ * returns success.)
+ * Contains @dimension
+ * elements.
+ */
+ char **key_string, /*
+ * key string (output,
+ * dynamically allocated, must
+ * be free(3)'d by the caller if
+ * function returns success.)
+ */
+ uint64_t *user_token);
+
+/*
+ * Returns 0 on success, negative error value on error.
+ */
+int lttng_ust_ctl_reply_register_key(int sock,
+ uint64_t index, /* Index within dimension (input) */
int ret_code); /* return code. 0 ok, negative error */
/*
LTTNG_UST_CTL_COUNTER_ARITHMETIC_SATURATION = 1,
};
+enum lttng_ust_ctl_key_type {
+ LTTNG_UST_CTL_KEY_TYPE_TOKENS = 0,
+ LTTNG_UST_CTL_KEY_TYPE_INTEGER = 1,
+};
+
/* Used as alloc flags. */
enum lttng_ust_ctl_counter_alloc {
LTTNG_UST_CTL_COUNTER_ALLOC_PER_CPU = (1 << 0),
uint64_t size;
uint64_t underflow_index;
uint64_t overflow_index;
+ enum lttng_ust_ctl_key_type key_type;
uint8_t has_underflow;
uint8_t has_overflow;
};
int lttng_ust_ctl_counter_clear(struct lttng_ust_ctl_daemon_counter *counter,
const size_t *dimension_indexes);
+int lttng_ust_ctl_counter_create_event(int sock,
+ struct lttng_ust_abi_counter_event *counter_event,
+ size_t counter_event_len,
+ struct lttng_ust_abi_object_data *counter_data,
+ struct lttng_ust_abi_object_data **counter_event_data);
+
void lttng_ust_ctl_sigbus_handle(void *addr);
int lttng_ust_ctl_get_version(uint32_t *major, uint32_t *minor, uint32_t *patchlevel);
enum lttng_ust_event_type {
LTTNG_UST_EVENT_TYPE_RECORDER = 0,
LTTNG_UST_EVENT_TYPE_NOTIFIER = 1,
+ LTTNG_UST_EVENT_TYPE_COUNTER = 2,
};
/*
/* End of base ABI. Fields below should be used after checking struct_size. */
};
+struct lttng_ust_event_counter_private;
+
+/*
+ * IMPORTANT: this structure is part of the ABI between the probe and
+ * UST. Fields need to be only added at the end, never reordered, never
+ * removed.
+ *
+ * The field @struct_size should be used to determine the size of the
+ * structure. It should be queried before using additional fields added
+ * at the end of the structure.
+ */
+struct lttng_ust_event_counter_ctx {
+ uint32_t struct_size; /* Size of this structure. */
+ int args_available; /* Input arguments are available. */
+
+ /* End of base ABI. Fields below should be used after checking struct_size. */
+};
+
+/*
+ * IMPORTANT: this structure is part of the ABI between the probe and
+ * UST. Fields need to be only added at the end, never reordered, never
+ * removed.
+ *
+ * struct lttng_ust_event_recorder is the action for recording events
+ * into a ring buffer. It inherits from struct lttng_ust_event_common
+ * by composition to ensure both parent and child structure are
+ * extensible.
+ *
+ * The field @struct_size should be used to determine the size of the
+ * structure. It should be queried before using additional fields added
+ * at the end of the structure.
+ */
+struct lttng_ust_event_counter {
+ uint32_t struct_size; /* Size of this structure. */
+
+ struct lttng_ust_event_common *parent; /* Inheritance by aggregation. */
+ struct lttng_ust_event_counter_private *priv; /* Private event counter interface */
+
+ struct lttng_ust_channel_counter *chan;
+
+ int use_args; /* Use input arguments. */
+
+ /* End of base ABI. Fields below should be used after checking struct_size. */
+};
+
/*
* IMPORTANT: this structure is part of the ABI between the probe and
* UST. Fields need to be only added at the end, never reordered, never
enum lttng_ust_channel_type {
LTTNG_UST_CHANNEL_TYPE_BUFFER = 0,
+ LTTNG_UST_CHANNEL_TYPE_COUNTER = 1,
};
struct lttng_ust_channel_common_private;
/* End of base ABI. Fields below should be used after checking struct_size. */
};
+struct lttng_ust_channel_counter;
+struct lttng_ust_channel_counter_ops_private;
+
+/*
+ * IMPORTANT: this structure is part of the ABI between the probe and
+ * UST. Fields need to be only added at the end, never reordered, never
+ * removed.
+ *
+ * The field @struct_size should be used to determine the size of the
+ * structure. It should be queried before using additional fields added
+ * at the end of the structure.
+ */
+struct lttng_ust_channel_counter_ops {
+ uint32_t struct_size;
+
+ struct lttng_ust_channel_counter_ops_private *priv; /* Private channel counter ops interface */
+
+ int (*counter_hit)(struct lttng_ust_event_counter *event_counter,
+ const char *stack_data,
+ struct lttng_ust_probe_ctx *probe_ctx,
+ struct lttng_ust_event_counter_ctx *event_counter_ctx);
+
+ /* End of base ABI. Fields below should be used after checking struct_size. */
+};
+
+/*
+ * IMPORTANT: this structure is part of the ABI between the probe and
+ * UST. Fields need to be only added at the end, never reordered, never
+ * removed.
+ *
+ * The field @struct_size should be used to determine the size of the
+ * structure. It should be queried before using additional fields added
+ * at the end of the structure.
+ */
+struct lttng_ust_channel_counter {
+ uint32_t struct_size; /* Size of this structure. */
+
+ struct lttng_ust_channel_common *parent; /* Inheritance by aggregation. */
+ struct lttng_ust_channel_counter_private *priv; /* Private channel counter interface */
+
+ struct lttng_ust_channel_counter_ops *ops;
+
+ /* End of base ABI. Fields below should be used after checking struct_size. */
+};
+
/*
* IMPORTANT: this structure is part of the ABI between the probe and
* UST. Fields need to be only added at the end, never reordered, never
*/
void lttng_ust_context_procname_reset(void);
+static inline
+struct lttng_ust_channel_common *lttng_ust_get_chan_common_from_event_common(
+ struct lttng_ust_event_common *event)
+{
+ switch (event->type) {
+ case LTTNG_UST_EVENT_TYPE_RECORDER:
+ {
+ struct lttng_ust_event_recorder *event_recorder = (struct lttng_ust_event_recorder *) event->child;
+ struct lttng_ust_channel_buffer *chan_buf = event_recorder->chan;
+
+ return chan_buf->parent;
+ }
+ case LTTNG_UST_EVENT_TYPE_COUNTER:
+ {
+ struct lttng_ust_event_counter *event_counter = (struct lttng_ust_event_counter *) event->child;
+ struct lttng_ust_channel_counter *chan_counter = event_counter->chan;
+
+ return chan_counter->parent;
+ }
+ default:
+ return NULL;
+ }
+}
+
#ifdef __cplusplus
}
#endif
void lttng_ust__event_probe__##_provider##___##_name(LTTNG_UST__TP_ARGS_DATA_PROTO(_args)) \
{ \
struct lttng_ust_event_common *__event = (struct lttng_ust_event_common *) __tp_data; \
+ struct lttng_ust_channel_common *__chan_common; \
size_t __dynamic_len_idx = 0; \
const size_t __num_fields = LTTNG_UST__TP_ARRAY_SIZE(lttng_ust__event_fields___##_provider##___##_name) - 1; \
struct lttng_ust_probe_ctx __probe_ctx; \
\
if (0) \
(void) __dynamic_len_idx; /* don't warn if unused */ \
- switch (__event->type) { \
- case LTTNG_UST_EVENT_TYPE_RECORDER: \
- { \
- struct lttng_ust_event_recorder *__event_recorder = (struct lttng_ust_event_recorder *) __event->child; \
- struct lttng_ust_channel_buffer *__chan = __event_recorder->chan; \
- struct lttng_ust_channel_common *__chan_common = __chan->parent; \
- \
+ if (caa_unlikely(!CMM_ACCESS_ONCE(__event->enabled))) \
+ return; \
+ if (caa_unlikely(!LTTNG_UST_TP_RCU_LINK_TEST())) \
+ return; \
+ __chan_common = lttng_ust_get_chan_common_from_event_common(__event); \
+ if (__chan_common) { \
if (!LTTNG_UST__TP_SESSION_CHECK(session, __chan_common->session)) \
return; \
if (caa_unlikely(!CMM_ACCESS_ONCE(__chan_common->session->active))) \
return; \
if (caa_unlikely(!CMM_ACCESS_ONCE(__chan_common->enabled))) \
return; \
- break; \
- } \
- case LTTNG_UST_EVENT_TYPE_NOTIFIER: \
- break; \
} \
- if (caa_unlikely(!CMM_ACCESS_ONCE(__event->enabled))) \
- return; \
- if (caa_unlikely(!LTTNG_UST_TP_RCU_LINK_TEST())) \
- return; \
__probe_ctx.struct_size = sizeof(struct lttng_ust_probe_ctx); \
__probe_ctx.ip = LTTNG_UST__TP_IP_PARAM(LTTNG_UST_TP_IP_PARAM); \
if (caa_unlikely(CMM_ACCESS_ONCE(__event->eval_filter))) { \
&__notif_ctx); \
break; \
} \
+ case LTTNG_UST_EVENT_TYPE_COUNTER: \
+ { \
+ struct lttng_ust_event_counter *__event_counter = (struct lttng_ust_event_counter *) __event->child; \
+ struct lttng_ust_event_counter_ctx __event_counter_ctx; \
+ \
+ __event_counter_ctx.struct_size = sizeof(struct lttng_ust_event_counter_ctx); \
+ __event_counter_ctx.args_available = CMM_ACCESS_ONCE(__event_counter->use_args); \
+ \
+ if (caa_unlikely(!__interpreter_stack_prepared && __event_counter_ctx.args_available)) \
+ lttng_ust__event_prepare_interpreter_stack__##_provider##___##_name(__stackvar.__interpreter_stack_data, \
+ LTTNG_UST__TP_ARGS_DATA_VAR(_args)); \
+ \
+ (void) __event_counter->chan->ops->counter_hit(__event_counter, \
+ __stackvar.__interpreter_stack_data, \
+ &__probe_ctx, \
+ &__event_counter_ctx); \
+ break; \
+ } \
} \
}
logging.h \
smp.c \
smp.h \
+ populate.c \
+ populate.h \
strutils.c \
strutils.h \
utils.c \
return NULL;
}
+struct lttng_ust_channel_counter *lttng_ust_alloc_channel_counter(void)
+{
+ struct lttng_ust_channel_counter *lttng_chan_counter;
+ struct lttng_ust_channel_common *lttng_chan_common;
+ struct lttng_ust_channel_counter_private *lttng_chan_counter_priv;
+
+ lttng_chan_counter = zmalloc(sizeof(struct lttng_ust_channel_counter));
+ if (!lttng_chan_counter)
+ goto lttng_chan_counter_error;
+ lttng_chan_counter->struct_size = sizeof(struct lttng_ust_channel_counter);
+ lttng_chan_common = zmalloc(sizeof(struct lttng_ust_channel_common));
+ if (!lttng_chan_common)
+ goto lttng_chan_common_error;
+ lttng_chan_common->struct_size = sizeof(struct lttng_ust_channel_common);
+ lttng_chan_counter_priv = zmalloc(sizeof(struct lttng_ust_channel_counter_private));
+ if (!lttng_chan_counter_priv)
+ goto lttng_chan_counter_priv_error;
+ lttng_chan_counter->parent = lttng_chan_common;
+ lttng_chan_common->type = LTTNG_UST_CHANNEL_TYPE_COUNTER;
+ lttng_chan_common->child = lttng_chan_counter;
+ lttng_chan_counter->priv = lttng_chan_counter_priv;
+ lttng_chan_common->priv = <tng_chan_counter_priv->parent;
+ lttng_chan_counter_priv->pub = lttng_chan_counter;
+ lttng_chan_counter_priv->parent.pub = lttng_chan_common;
+
+ return lttng_chan_counter;
+
+lttng_chan_counter_priv_error:
+ free(lttng_chan_common);
+lttng_chan_common_error:
+ free(lttng_chan_counter);
+lttng_chan_counter_error:
+ return NULL;
+}
+
void lttng_ust_free_channel_common(struct lttng_ust_channel_common *chan)
{
switch (chan->type) {
free(chan_buf);
break;
}
+ case LTTNG_UST_CHANNEL_TYPE_COUNTER:
+ {
+ struct lttng_ust_channel_counter *chan_counter;
+
+ chan_counter = (struct lttng_ust_channel_counter *)chan->child;
+ free(chan_counter->parent);
+ free(chan_counter->priv);
+ free(chan_counter);
+ break;
+ }
default:
abort();
}
.counter_size = COUNTER_SIZE_32_BIT,
};
-static struct lib_counter *counter_create(size_t nr_dimensions,
+static struct lttng_ust_channel_counter *counter_create(size_t nr_dimensions,
const struct lttng_counter_dimension *dimensions,
int64_t global_sum_step,
int global_counter_fd,
bool is_daemon)
{
size_t max_nr_elem[LTTNG_COUNTER_DIMENSION_MAX], i;
+ struct lttng_ust_channel_counter *lttng_chan_counter;
+ struct lib_counter *counter;
if (nr_dimensions > LTTNG_COUNTER_DIMENSION_MAX)
return NULL;
return NULL;
max_nr_elem[i] = dimensions[i].size;
}
- return lttng_counter_create(&client_config, nr_dimensions, max_nr_elem,
+ lttng_chan_counter = lttng_ust_alloc_channel_counter();
+ if (!lttng_chan_counter)
+ return NULL;
+ counter = lttng_counter_create(&client_config, nr_dimensions, max_nr_elem,
global_sum_step, global_counter_fd, nr_counter_cpu_fds,
counter_cpu_fds, is_daemon);
+ if (!counter)
+ goto error;
+ lttng_chan_counter->priv->counter = counter;
+ for (i = 0; i < nr_dimensions; i++)
+ lttng_chan_counter->priv->dimension_key_types[i] = dimensions[i].key_type;
+ return lttng_chan_counter;
+
+error:
+ lttng_ust_free_channel_common(lttng_chan_counter->parent);
+ return NULL;
+}
+
+static void counter_destroy(struct lttng_ust_channel_counter *counter)
+{
+ lttng_counter_destroy(counter->priv->counter);
+ lttng_ust_free_channel_common(counter->parent);
}
-static void counter_destroy(struct lib_counter *counter)
+static int counter_add(struct lttng_ust_channel_counter *counter,
+ const size_t *dimension_indexes, int64_t v)
{
- lttng_counter_destroy(counter);
+ return lttng_counter_add(&client_config, counter->priv->counter, dimension_indexes, v);
}
-static int counter_add(struct lib_counter *counter, const size_t *dimension_indexes, int64_t v)
+static int counter_hit(struct lttng_ust_event_counter *event_counter,
+ const char *stack_data __attribute__((unused)),
+ struct lttng_ust_probe_ctx *probe_ctx __attribute__((unused)),
+ struct lttng_ust_event_counter_ctx *event_counter_ctx __attribute__((unused)))
{
- return lttng_counter_add(&client_config, counter, dimension_indexes, v);
+ struct lttng_ust_channel_counter *counter = event_counter->chan;
+
+ switch (event_counter->priv->action) {
+ case LTTNG_EVENT_COUNTER_ACTION_INCREMENT:
+ {
+ size_t index = event_counter->priv->parent.id;
+ return counter_add(counter, &index, 1);
+ }
+ default:
+ return -ENOSYS;
+ }
}
-static int counter_read(struct lib_counter *counter, const size_t *dimension_indexes, int cpu,
+static int counter_read(struct lttng_ust_channel_counter *counter, const size_t *dimension_indexes, int cpu,
int64_t *value, bool *overflow, bool *underflow)
{
- return lttng_counter_read(&client_config, counter, dimension_indexes, cpu, value,
+ return lttng_counter_read(&client_config, counter->priv->counter, dimension_indexes, cpu, value,
overflow, underflow);
}
-static int counter_aggregate(struct lib_counter *counter, const size_t *dimension_indexes,
+static int counter_aggregate(struct lttng_ust_channel_counter *counter, const size_t *dimension_indexes,
int64_t *value, bool *overflow, bool *underflow)
{
- return lttng_counter_aggregate(&client_config, counter, dimension_indexes, value,
+ return lttng_counter_aggregate(&client_config, counter->priv->counter, dimension_indexes, value,
overflow, underflow);
}
-static int counter_clear(struct lib_counter *counter, const size_t *dimension_indexes)
+static int counter_clear(struct lttng_ust_channel_counter *counter, const size_t *dimension_indexes)
{
- return lttng_counter_clear(&client_config, counter, dimension_indexes);
+ return lttng_counter_clear(&client_config, counter->priv->counter, dimension_indexes);
}
static struct lttng_counter_transport lttng_counter_transport = {
.name = "counter-per-cpu-32-modular",
.ops = {
- .counter_create = counter_create,
- .counter_destroy = counter_destroy,
- .counter_add = counter_add,
- .counter_read = counter_read,
- .counter_aggregate = counter_aggregate,
- .counter_clear = counter_clear,
+ .struct_size = sizeof(struct lttng_ust_channel_counter_ops),
+ .priv = LTTNG_UST_COMPOUND_LITERAL(struct lttng_ust_channel_counter_ops_private, {
+ .pub = <tng_counter_transport.ops,
+ .counter_create = counter_create,
+ .counter_destroy = counter_destroy,
+ .counter_add = counter_add,
+ .counter_read = counter_read,
+ .counter_aggregate = counter_aggregate,
+ .counter_clear = counter_clear,
+ }),
+ .counter_hit = counter_hit,
},
.client_config = &client_config,
};
.counter_size = COUNTER_SIZE_64_BIT,
};
-static struct lib_counter *counter_create(size_t nr_dimensions,
+static struct lttng_ust_channel_counter *counter_create(size_t nr_dimensions,
const struct lttng_counter_dimension *dimensions,
int64_t global_sum_step,
int global_counter_fd,
bool is_daemon)
{
size_t max_nr_elem[LTTNG_COUNTER_DIMENSION_MAX], i;
+ struct lttng_ust_channel_counter *lttng_chan_counter;
+ struct lib_counter *counter;
if (nr_dimensions > LTTNG_COUNTER_DIMENSION_MAX)
return NULL;
return NULL;
max_nr_elem[i] = dimensions[i].size;
}
- return lttng_counter_create(&client_config, nr_dimensions, max_nr_elem,
+ lttng_chan_counter = lttng_ust_alloc_channel_counter();
+ if (!lttng_chan_counter)
+ return NULL;
+ counter = lttng_counter_create(&client_config, nr_dimensions, max_nr_elem,
global_sum_step, global_counter_fd, nr_counter_cpu_fds,
counter_cpu_fds, is_daemon);
+ if (!counter)
+ goto error;
+ lttng_chan_counter->priv->counter = counter;
+ for (i = 0; i < nr_dimensions; i++)
+ lttng_chan_counter->priv->dimension_key_types[i] = dimensions[i].key_type;
+ return lttng_chan_counter;
+
+error:
+ lttng_ust_free_channel_common(lttng_chan_counter->parent);
+ return NULL;
+}
+
+static void counter_destroy(struct lttng_ust_channel_counter *counter)
+{
+ lttng_counter_destroy(counter->priv->counter);
+ lttng_ust_free_channel_common(counter->parent);
}
-static void counter_destroy(struct lib_counter *counter)
+static int counter_add(struct lttng_ust_channel_counter *counter,
+ const size_t *dimension_indexes, int64_t v)
{
- lttng_counter_destroy(counter);
+ return lttng_counter_add(&client_config, counter->priv->counter, dimension_indexes, v);
}
-static int counter_add(struct lib_counter *counter, const size_t *dimension_indexes, int64_t v)
+static int counter_hit(struct lttng_ust_event_counter *event_counter,
+ const char *stack_data __attribute__((unused)),
+ struct lttng_ust_probe_ctx *probe_ctx __attribute__((unused)),
+ struct lttng_ust_event_counter_ctx *event_counter_ctx __attribute__((unused)))
{
- return lttng_counter_add(&client_config, counter, dimension_indexes, v);
+ struct lttng_ust_channel_counter *counter = event_counter->chan;
+
+ switch (event_counter->priv->action) {
+ case LTTNG_EVENT_COUNTER_ACTION_INCREMENT:
+ {
+ size_t index = event_counter->priv->parent.id;
+ return counter_add(counter, &index, 1);
+ }
+ default:
+ return -ENOSYS;
+ }
}
-static int counter_read(struct lib_counter *counter, const size_t *dimension_indexes, int cpu,
+static int counter_read(struct lttng_ust_channel_counter *counter, const size_t *dimension_indexes, int cpu,
int64_t *value, bool *overflow, bool *underflow)
{
- return lttng_counter_read(&client_config, counter, dimension_indexes, cpu, value,
+ return lttng_counter_read(&client_config, counter->priv->counter, dimension_indexes, cpu, value,
overflow, underflow);
}
-static int counter_aggregate(struct lib_counter *counter, const size_t *dimension_indexes,
+static int counter_aggregate(struct lttng_ust_channel_counter *counter, const size_t *dimension_indexes,
int64_t *value, bool *overflow, bool *underflow)
{
- return lttng_counter_aggregate(&client_config, counter, dimension_indexes, value,
+ return lttng_counter_aggregate(&client_config, counter->priv->counter, dimension_indexes, value,
overflow, underflow);
}
-static int counter_clear(struct lib_counter *counter, const size_t *dimension_indexes)
+static int counter_clear(struct lttng_ust_channel_counter *counter, const size_t *dimension_indexes)
{
- return lttng_counter_clear(&client_config, counter, dimension_indexes);
+ return lttng_counter_clear(&client_config, counter->priv->counter, dimension_indexes);
}
static struct lttng_counter_transport lttng_counter_transport = {
.name = "counter-per-cpu-64-modular",
.ops = {
- .counter_create = counter_create,
- .counter_destroy = counter_destroy,
- .counter_add = counter_add,
- .counter_read = counter_read,
- .counter_aggregate = counter_aggregate,
- .counter_clear = counter_clear,
+ .struct_size = sizeof(struct lttng_ust_channel_counter_ops),
+ .priv = LTTNG_UST_COMPOUND_LITERAL(struct lttng_ust_channel_counter_ops_private, {
+ .pub = <tng_counter_transport.ops,
+ .counter_create = counter_create,
+ .counter_destroy = counter_destroy,
+ .counter_add = counter_add,
+ .counter_read = counter_read,
+ .counter_aggregate = counter_aggregate,
+ .counter_clear = counter_clear,
+ }),
+ .counter_hit = counter_hit,
},
.client_config = &client_config,
};
*/
static inline int __lttng_counter_add(const struct lib_counter_config *config,
enum lib_counter_config_alloc alloc,
- enum lib_counter_config_sync sync,
+ enum lib_counter_config_sync sync __attribute__((unused)),
struct lib_counter *counter,
const size_t *dimension_indexes, int64_t v,
int64_t *remainder)
int8_t global_sum_step = counter->global_sum_step.s8;
res = *int_p;
- switch (sync) {
- case COUNTER_SYNC_PER_CPU:
+ switch (alloc) {
+ case COUNTER_ALLOC_PER_CPU:
{
do {
move_sum = 0;
old = res;
n = (int8_t) ((uint8_t) old + (uint8_t) v);
- if (caa_unlikely(n > (int8_t) global_sum_step))
- move_sum = (int8_t) global_sum_step / 2;
- else if (caa_unlikely(n < -(int8_t) global_sum_step))
- move_sum = -((int8_t) global_sum_step / 2);
- n -= move_sum;
+ if (caa_unlikely(global_sum_step)) {
+ if (caa_unlikely(n > (int8_t) global_sum_step))
+ move_sum = (int8_t) global_sum_step / 2;
+ else if (caa_unlikely(n < -(int8_t) global_sum_step))
+ move_sum = -((int8_t) global_sum_step / 2);
+ n -= move_sum;
+ }
res = uatomic_cmpxchg(int_p, old, n);
} while (old != res);
break;
}
- case COUNTER_SYNC_GLOBAL:
+ case COUNTER_ALLOC_GLOBAL:
{
do {
old = res;
int16_t global_sum_step = counter->global_sum_step.s16;
res = *int_p;
- switch (sync) {
- case COUNTER_SYNC_PER_CPU:
+ switch (alloc) {
+ case COUNTER_ALLOC_PER_CPU:
{
do {
move_sum = 0;
old = res;
n = (int16_t) ((uint16_t) old + (uint16_t) v);
- if (caa_unlikely(n > (int16_t) global_sum_step))
- move_sum = (int16_t) global_sum_step / 2;
- else if (caa_unlikely(n < -(int16_t) global_sum_step))
- move_sum = -((int16_t) global_sum_step / 2);
- n -= move_sum;
+ if (caa_unlikely(global_sum_step)) {
+ if (caa_unlikely(n > (int16_t) global_sum_step))
+ move_sum = (int16_t) global_sum_step / 2;
+ else if (caa_unlikely(n < -(int16_t) global_sum_step))
+ move_sum = -((int16_t) global_sum_step / 2);
+ n -= move_sum;
+ }
res = uatomic_cmpxchg(int_p, old, n);
} while (old != res);
break;
}
- case COUNTER_SYNC_GLOBAL:
+ case COUNTER_ALLOC_GLOBAL:
{
do {
old = res;
int32_t global_sum_step = counter->global_sum_step.s32;
res = *int_p;
- switch (sync) {
- case COUNTER_SYNC_PER_CPU:
+ switch (alloc) {
+ case COUNTER_ALLOC_PER_CPU:
{
do {
move_sum = 0;
old = res;
n = (int32_t) ((uint32_t) old + (uint32_t) v);
- if (caa_unlikely(n > (int32_t) global_sum_step))
- move_sum = (int32_t) global_sum_step / 2;
- else if (caa_unlikely(n < -(int32_t) global_sum_step))
- move_sum = -((int32_t) global_sum_step / 2);
- n -= move_sum;
+ if (caa_unlikely(global_sum_step)) {
+ if (caa_unlikely(n > (int32_t) global_sum_step))
+ move_sum = (int32_t) global_sum_step / 2;
+ else if (caa_unlikely(n < -(int32_t) global_sum_step))
+ move_sum = -((int32_t) global_sum_step / 2);
+ n -= move_sum;
+ }
res = uatomic_cmpxchg(int_p, old, n);
} while (old != res);
break;
}
- case COUNTER_SYNC_GLOBAL:
+ case COUNTER_ALLOC_GLOBAL:
{
do {
old = res;
int64_t global_sum_step = counter->global_sum_step.s64;
res = *int_p;
- switch (sync) {
- case COUNTER_SYNC_PER_CPU:
+ switch (alloc) {
+ case COUNTER_ALLOC_PER_CPU:
{
do {
move_sum = 0;
old = res;
n = (int64_t) ((uint64_t) old + (uint64_t) v);
- if (caa_unlikely(n > (int64_t) global_sum_step))
- move_sum = (int64_t) global_sum_step / 2;
- else if (caa_unlikely(n < -(int64_t) global_sum_step))
- move_sum = -((int64_t) global_sum_step / 2);
- n -= move_sum;
+ if (caa_unlikely(global_sum_step)) {
+ if (caa_unlikely(n > (int64_t) global_sum_step))
+ move_sum = (int64_t) global_sum_step / 2;
+ else if (caa_unlikely(n < -(int64_t) global_sum_step))
+ move_sum = -((int64_t) global_sum_step / 2);
+ n -= move_sum;
+ }
res = uatomic_cmpxchg(int_p, old, n);
} while (old != res);
break;
}
- case COUNTER_SYNC_GLOBAL:
+ case COUNTER_ALLOC_GLOBAL:
{
do {
old = res;
struct lib_counter_layout global_counters;
struct lib_counter_layout *percpu_counters;
+ size_t expected_shm;
+ size_t received_shm;
+
bool is_daemon;
struct lttng_counter_shm_object_table *object_table;
};
#include "common/bitmap.h"
#include "common/smp.h"
+#include "common/populate.h"
#include "shm.h"
static size_t lttng_counter_get_dimension_nr_elements(struct lib_counter_dimension *dimension)
if (counter->is_daemon) {
/* Allocate and clear shared memory. */
shm_object = lttng_counter_shm_object_table_alloc(counter->object_table,
- shm_length, LTTNG_COUNTER_SHM_OBJECT_SHM, shm_fd, cpu);
+ shm_length, LTTNG_COUNTER_SHM_OBJECT_SHM, shm_fd, cpu,
+ lttng_ust_map_populate_cpu_is_enabled(cpu));
if (!shm_object)
return -ENOMEM;
} else {
/* Map pre-existing shared memory. */
shm_object = lttng_counter_shm_object_table_append_shm(counter->object_table,
- shm_fd, shm_length);
+ shm_fd, shm_length, lttng_ust_map_populate_cpu_is_enabled(cpu));
if (!shm_object)
return -ENOMEM;
}
{
struct lib_counter_config *config = &counter->config;
struct lib_counter_layout *layout;
+ int ret;
if (!(config->alloc & COUNTER_ALLOC_GLOBAL))
return -EINVAL;
layout = &counter->global_counters;
if (layout->shm_fd >= 0)
return -EBUSY;
- return lttng_counter_layout_init(counter, -1, fd);
+ ret = lttng_counter_layout_init(counter, -1, fd);
+ if (!ret)
+ counter->received_shm++;
+ return ret;
}
int lttng_counter_set_cpu_shm(struct lib_counter *counter, int cpu, int fd)
{
struct lib_counter_config *config = &counter->config;
struct lib_counter_layout *layout;
+ int ret;
if (cpu < 0 || cpu >= get_possible_cpus_array_len())
return -EINVAL;
layout = &counter->percpu_counters[cpu];
if (layout->shm_fd >= 0)
return -EBUSY;
- return lttng_counter_layout_init(counter, cpu, fd);
+ ret = lttng_counter_layout_init(counter, cpu, fd);
+ if (!ret)
+ counter->received_shm++;
+ return ret;
}
static
int cpu, ret;
int nr_handles = 0;
int nr_cpus = get_possible_cpus_array_len();
+ bool populate = lttng_ust_map_populate_is_enabled();
if (validate_args(config, nr_dimensions, max_nr_elem,
global_sum_step, global_counter_fd, nr_counter_cpu_fds,
counter_cpu_fds))
return NULL;
- counter = zmalloc(sizeof(struct lib_counter));
+ counter = zmalloc_populate(sizeof(struct lib_counter), populate);
if (!counter)
return NULL;
counter->global_counters.shm_fd = -1;
if (lttng_counter_set_global_sum_step(counter, global_sum_step))
goto error_sum_step;
counter->nr_dimensions = nr_dimensions;
- counter->dimensions = zmalloc(nr_dimensions * sizeof(*counter->dimensions));
+ counter->dimensions = zmalloc_populate(nr_dimensions * sizeof(*counter->dimensions), populate);
if (!counter->dimensions)
goto error_dimensions;
for (dimension = 0; dimension < nr_dimensions; dimension++)
counter->dimensions[dimension].max_nr_elem = max_nr_elem[dimension];
if (config->alloc & COUNTER_ALLOC_PER_CPU) {
- counter->percpu_counters = zmalloc(sizeof(struct lib_counter_layout) * nr_cpus);
+ counter->percpu_counters = zmalloc_populate(sizeof(struct lib_counter_layout) * nr_cpus, populate);
if (!counter->percpu_counters)
goto error_alloc_percpu;
for_each_possible_cpu(cpu)
nr_handles++;
if (config->alloc & COUNTER_ALLOC_PER_CPU)
nr_handles += nr_cpus;
+ counter->expected_shm = nr_handles;
/* Allocate table for global and per-cpu counters. */
- counter->object_table = lttng_counter_shm_object_table_create(nr_handles);
+ counter->object_table = lttng_counter_shm_object_table_create(nr_handles, populate);
if (!counter->object_table)
goto error_alloc_object_table;
return 0;
}
+bool lttng_counter_ready(struct lib_counter *counter)
+{
+ if (counter->received_shm == counter->expected_shm)
+ return true;
+ return false;
+}
+
int lttng_counter_read(const struct lib_counter_config *config,
struct lib_counter *counter,
const size_t *dimension_indexes,
default:
return -EINVAL;
}
+ switch (config->counter_size) {
+ case COUNTER_SIZE_8_BIT:
+ if (sum > INT8_MAX)
+ *overflow = true;
+ if (sum < INT8_MIN)
+ *underflow = true;
+ sum = (int8_t) sum; /* Truncate sum. */
+ break;
+ case COUNTER_SIZE_16_BIT:
+ if (sum > INT16_MAX)
+ *overflow = true;
+ if (sum < INT16_MIN)
+ *underflow = true;
+ sum = (int16_t) sum; /* Truncate sum. */
+ break;
+ case COUNTER_SIZE_32_BIT:
+ if (sum > INT32_MAX)
+ *overflow = true;
+ if (sum < INT32_MIN)
+ *underflow = true;
+ sum = (int32_t) sum; /* Truncate sum. */
+ break;
+#if CAA_BITS_PER_LONG == 64
+ case COUNTER_SIZE_64_BIT:
+ break;
+#endif
+ default:
+ return -EINVAL;
+ }
*value = sum;
return 0;
}
int lttng_counter_get_cpu_shm(struct lib_counter *counter, int cpu, int *fd, size_t *len)
__attribute__((visibility("hidden")));
+/*
+ * Has counter received all expected shm ?
+ */
+bool lttng_counter_ready(struct lib_counter *counter)
+ __attribute__((visibility("hidden")));
+
int lttng_counter_read(const struct lib_counter_config *config,
struct lib_counter *counter,
const size_t *dimension_indexes,
return ret;
}
-struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj)
+struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj, bool populate)
{
struct lttng_counter_shm_object_table *table;
- table = zmalloc(sizeof(struct lttng_counter_shm_object_table) +
- max_nb_obj * sizeof(table->objects[0]));
+ table = zmalloc_populate(sizeof(struct lttng_counter_shm_object_table) +
+ max_nb_obj * sizeof(table->objects[0]), populate);
if (!table)
return NULL;
table->size = max_nb_obj;
static
struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_shm(struct lttng_counter_shm_object_table *table,
size_t memory_map_size,
- int cpu_fd)
+ int cpu_fd, bool populate)
{
- int shmfd, ret;
struct lttng_counter_shm_object *obj;
+ int flags = MAP_SHARED;
+ int shmfd, ret;
char *memory_map;
if (cpu_fd < 0)
obj->shm_fd_ownership = 0;
obj->shm_fd = shmfd;
+ if (populate)
+ flags |= LTTNG_MAP_POPULATE;
/* memory_map: mmap */
memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
- MAP_SHARED | LTTNG_MAP_POPULATE, shmfd, 0);
+ flags, shmfd, 0);
if (memory_map == MAP_FAILED) {
PERROR("mmap");
goto error_mmap;
static
struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_mem(struct lttng_counter_shm_object_table *table,
- size_t memory_map_size)
+ size_t memory_map_size, bool populate)
{
struct lttng_counter_shm_object *obj;
void *memory_map;
return NULL;
obj = &table->objects[table->allocated_len];
- memory_map = zmalloc(memory_map_size);
+ memory_map = zmalloc_populate(memory_map_size, populate);
if (!memory_map)
goto alloc_error;
size_t memory_map_size,
enum lttng_counter_shm_object_type type,
int cpu_fd,
- int cpu)
+ int cpu,
+ bool populate)
#else
struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct lttng_counter_shm_object_table *table,
size_t memory_map_size,
enum lttng_counter_shm_object_type type,
int cpu_fd,
- int cpu __attribute__((unused)))
+ int cpu __attribute__((unused)),
+ bool populate)
#endif
{
struct lttng_counter_shm_object *shm_object;
switch (type) {
case LTTNG_COUNTER_SHM_OBJECT_SHM:
shm_object = _lttng_counter_shm_object_table_alloc_shm(table, memory_map_size,
- cpu_fd);
+ cpu_fd, populate);
break;
case LTTNG_COUNTER_SHM_OBJECT_MEM:
- shm_object = _lttng_counter_shm_object_table_alloc_mem(table, memory_map_size);
+ shm_object = _lttng_counter_shm_object_table_alloc_mem(table, memory_map_size,
+ populate);
break;
default:
assert(0);
}
struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_shm(struct lttng_counter_shm_object_table *table,
- int shm_fd,
- size_t memory_map_size)
+ int shm_fd, size_t memory_map_size, bool populate)
{
struct lttng_counter_shm_object *obj;
+ int flags = MAP_SHARED;
char *memory_map;
if (table->allocated_len >= table->size)
obj->shm_fd = shm_fd;
obj->shm_fd_ownership = 1;
+ if (populate)
+ flags |= LTTNG_MAP_POPULATE;
/* memory_map: mmap */
memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
- MAP_SHARED | LTTNG_MAP_POPULATE, shm_fd, 0);
+ flags, shm_fd, 0);
if (memory_map == MAP_FAILED) {
PERROR("mmap");
goto error_mmap;
#include <stddef.h>
#include <stdint.h>
#include <unistd.h>
+#include <stdbool.h>
#include "common/logging.h"
#include <urcu/compiler.h>
#include "shm_types.h"
#define lttng_counter_set_shmp(ref, src) _lttng_counter_set_shmp(&(ref)._ref, src)
-struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj)
+struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj, bool populate)
__attribute__((visibility("hidden")));
struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct lttng_counter_shm_object_table *table,
size_t memory_map_size,
enum lttng_counter_shm_object_type type,
const int cpu_fd,
- int cpu)
+ int cpu, bool populate)
__attribute__((visibility("hidden")));
struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_shm(struct lttng_counter_shm_object_table *table,
- int shm_fd, size_t memory_map_size)
+ int shm_fd, size_t memory_map_size, bool populate)
__attribute__((visibility("hidden")));
/* mem ownership is passed to lttng_counter_shm_object_table_append_mem(). */
int event_notifier_notif_fd;
} event_notifier_handle;
struct {
- void *counter_data;
+ uint32_t len;
+ } event_notifier;
+ struct {
+ uint32_t len;
} counter;
struct {
+ uint32_t len;
int shm_fd;
} counter_shm;
+ struct {
+ uint32_t len;
+ } counter_event;
};
struct lttng_ust_abi_objd_ops {
LTTNG_ENABLER_FORMAT_EVENT,
};
+enum lttng_key_token_type {
+ LTTNG_KEY_TOKEN_STRING = 0,
+ LTTNG_KEY_TOKEN_EVENT_NAME = 1,
+ LTTNG_KEY_TOKEN_PROVIDER_NAME = 2,
+};
+
+#define LTTNG_KEY_TOKEN_STRING_LEN_MAX 4096
+struct lttng_key_token {
+ enum lttng_key_token_type type;
+ union {
+ char string[LTTNG_KEY_TOKEN_STRING_LEN_MAX];
+ } arg;
+};
+
+enum lttng_key_type {
+ LTTNG_KEY_TYPE_TOKENS = 0,
+ LTTNG_KEY_TYPE_INTEGER = 1,
+};
+
+#define LTTNG_NR_KEY_TOKEN 8
+struct lttng_counter_key_dimension {
+ enum lttng_key_type key_type;
+
+ union {
+ struct {
+ size_t nr_key_tokens;
+ struct lttng_key_token key_tokens[LTTNG_NR_KEY_TOKEN];
+ } tokens;
+ } u;
+};
+
+#define LTTNG_COUNTER_DIMENSION_MAX 4
+struct lttng_counter_key {
+ size_t nr_dimensions;
+ struct lttng_counter_key_dimension key_dimensions[LTTNG_COUNTER_DIMENSION_MAX];
+};
+
+struct lttng_counter_dimension {
+ uint64_t size;
+ uint64_t underflow_index;
+ uint64_t overflow_index;
+ enum lttng_key_type key_type;
+ uint8_t has_underflow;
+ uint8_t has_overflow;
+};
+
+enum lttng_event_enabler_type {
+ LTTNG_EVENT_ENABLER_TYPE_RECORDER,
+ LTTNG_EVENT_ENABLER_TYPE_NOTIFIER,
+ LTTNG_EVENT_ENABLER_TYPE_COUNTER,
+};
+
/*
* Enabler field, within whatever object is enabling an event. Target of
* backward reference.
*/
-struct lttng_enabler {
+struct lttng_event_enabler_common {
+ enum lttng_event_enabler_type enabler_type;
+
enum lttng_enabler_format_type format_type;
/* head list of struct lttng_ust_filter_bytecode_node */
struct lttng_ust_abi_event event_param;
unsigned int enabled:1;
+
+ struct cds_list_head node; /* list of enablers */
+ uint64_t user_token; /* User-provided token */
};
-struct lttng_event_enabler {
- struct lttng_enabler base;
- struct cds_list_head node; /* per-session list of enablers */
+struct lttng_event_enabler_session_common {
+ struct lttng_event_enabler_common parent;
+ struct lttng_ust_channel_common *chan;
+};
+
+struct lttng_event_recorder_enabler {
+ struct lttng_event_enabler_session_common parent;
struct lttng_ust_channel_buffer *chan;
- /*
- * Unused, but kept around to make it explicit that the tracer can do
- * it.
- */
- struct lttng_ust_ctx *ctx;
+};
+
+enum lttng_event_counter_action {
+ LTTNG_EVENT_COUNTER_ACTION_INCREMENT = 0,
+};
+
+struct lttng_event_counter_enabler {
+ struct lttng_event_enabler_session_common parent;
+ struct lttng_ust_channel_counter *chan;
+ struct lttng_counter_key key;
+
+ enum lttng_event_counter_action action;
};
struct lttng_event_notifier_enabler {
- struct lttng_enabler base;
+ struct lttng_event_enabler_common parent;
uint64_t error_counter_index;
- struct cds_list_head node; /* per-app list of event_notifier enablers */
struct cds_list_head capture_bytecode_head;
struct lttng_event_notifier_group *group; /* weak ref */
- uint64_t user_token; /* User-provided token */
uint64_t num_captures;
};
struct lttng_ust_bytecode_node {
enum lttng_ust_bytecode_type type;
struct cds_list_head node;
- struct lttng_enabler *enabler;
+ struct lttng_event_enabler_common *enabler;
struct {
uint32_t len;
uint32_t reloc_offset;
struct lttng_ust_excluder_node {
struct cds_list_head node;
- struct lttng_enabler *enabler;
+ struct lttng_event_enabler_common *enabler;
/*
* struct lttng_ust_event_exclusion had variable sized array,
* must be last field.
*/
struct lttng_enabler_ref {
struct cds_list_head node; /* enabler ref list */
- struct lttng_enabler *ref; /* backward ref */
-};
-
-#define LTTNG_COUNTER_DIMENSION_MAX 8
-struct lttng_counter_dimension {
- uint64_t size;
- uint64_t underflow_index;
- uint64_t overflow_index;
- uint8_t has_underflow;
- uint8_t has_overflow;
-};
-
-struct lttng_counter_ops {
- struct lib_counter *(*counter_create)(size_t nr_dimensions,
- const struct lttng_counter_dimension *dimensions,
- int64_t global_sum_step,
- int global_counter_fd,
- int nr_counter_cpu_fds,
- const int *counter_cpu_fds,
- bool is_daemon);
- void (*counter_destroy)(struct lib_counter *counter);
- int (*counter_add)(struct lib_counter *counter,
- const size_t *dimension_indexes, int64_t v);
- int (*counter_read)(struct lib_counter *counter,
- const size_t *dimension_indexes, int cpu,
- int64_t *value, bool *overflow, bool *underflow);
- int (*counter_aggregate)(struct lib_counter *counter,
- const size_t *dimension_indexes, int64_t *value,
- bool *overflow, bool *underflow);
- int (*counter_clear)(struct lib_counter *counter, const size_t *dimension_indexes);
-};
-
-struct lttng_counter {
- int objd;
- struct lttng_event_notifier_group *event_notifier_group; /* owner */
- struct lttng_counter_transport *transport;
- struct lib_counter *counter;
- struct lttng_counter_ops *ops;
+ struct lttng_event_enabler_common *ref; /* backward ref */
};
#define LTTNG_UST_EVENT_HT_BITS 12
struct cds_hlist_head table[LTTNG_UST_EVENT_HT_SIZE];
};
-#define LTTNG_UST_EVENT_NOTIFIER_HT_BITS 12
-#define LTTNG_UST_EVENT_NOTIFIER_HT_SIZE (1U << LTTNG_UST_EVENT_NOTIFIER_HT_BITS)
-struct lttng_ust_event_notifier_ht {
- struct cds_hlist_head table[LTTNG_UST_EVENT_NOTIFIER_HT_SIZE];
-};
-
#define LTTNG_UST_ENUM_HT_BITS 12
#define LTTNG_UST_ENUM_HT_SIZE (1U << LTTNG_UST_ENUM_HT_BITS)
struct cds_list_head sync_enablers_head;
struct cds_list_head event_notifiers_head; /* list of event_notifiers */
- struct lttng_ust_event_notifier_ht event_notifiers_ht; /* hashtable of event_notifiers */
+ struct lttng_ust_event_ht event_notifiers_ht; /* hashtable of event notifiers */
struct lttng_ust_ctx *ctx; /* contexts for filters. */
- struct lttng_counter *error_counter;
+ struct lttng_ust_channel_counter *error_counter;
size_t error_counter_len;
};
struct lttng_counter_transport {
const char *name;
struct cds_list_head node;
- struct lttng_counter_ops ops;
+ struct lttng_ust_channel_counter_ops ops;
const struct lib_counter_config *client_config;
};
int has_enablers_without_filter_bytecode;
/* list of struct lttng_ust_bytecode_runtime, sorted by seqnum */
struct cds_list_head filter_bytecode_runtime_head;
+
+ struct cds_hlist_node name_hlist_node; /* node in events-by-name hash table */
+ struct cds_list_head node; /* node in event list */
};
-struct lttng_ust_event_recorder_private {
+struct lttng_ust_event_session_common_private {
struct lttng_ust_event_common_private parent;
+ struct lttng_ust_channel_common *chan;
+
+ uint64_t id; /* Event id */
+};
+
+struct lttng_ust_event_recorder_private {
+ struct lttng_ust_event_session_common_private parent;
+
struct lttng_ust_event_recorder *pub; /* Public event interface */
- struct cds_list_head node; /* Event recorder list */
- struct cds_hlist_node hlist; /* Hash table of event recorders */
- struct lttng_ust_ctx *ctx;
- unsigned int id;
+};
+
+struct lttng_ust_event_counter_private {
+ struct lttng_ust_event_session_common_private parent;
+
+ struct lttng_ust_event_counter *pub; /* Public event interface */
+ enum lttng_event_counter_action action;
+ char key[LTTNG_KEY_TOKEN_STRING_LEN_MAX];
};
struct lttng_ust_event_notifier_private {
struct lttng_event_notifier_group *group; /* weak ref */
size_t num_captures; /* Needed to allocate the msgpack array. */
uint64_t error_counter_index;
- struct cds_list_head node; /* Event notifier list */
- struct cds_hlist_node hlist; /* Hash table of event notifiers */
struct cds_list_head capture_bytecode_runtime_head;
};
int been_active; /* Been active ? */
int objd; /* Object associated */
struct cds_list_head chan_head; /* Channel list head */
+ struct cds_list_head counters_head; /* Counter list head */
struct cds_list_head events_head; /* list of events */
struct cds_list_head node; /* Session list */
/* List of synchronized enablers */
struct cds_list_head sync_enablers_head;
- struct lttng_ust_event_ht events_ht; /* ht of events */
+ struct cds_list_head enums_head;
+
+ struct lttng_ust_event_ht events_name_ht; /* ht of events, indexed by name */
+ struct lttng_ust_enum_ht enums_ht; /* ht of enumerations */
+
void *owner; /* object owner */
unsigned int tstate:1; /* Transient enable state */
-
unsigned int statedump_pending:1;
-
- struct lttng_ust_enum_ht enums_ht; /* ht of enumerations */
- struct cds_list_head enums_head;
struct lttng_ust_ctx *ctx; /* contexts for filters. */
-
unsigned char uuid[LTTNG_UST_UUID_LEN]; /* Trace session unique ID */
bool uuid_set; /* Is uuid set ? */
};
int objd; /* Object associated with channel. */
unsigned int tstate:1; /* Transient enable state */
+ bool coalesce_hits;
};
struct lttng_ust_channel_buffer_private {
unsigned char uuid[LTTNG_UST_UUID_LEN]; /* Trace session unique ID */
};
+struct lttng_ust_channel_counter_ops_private {
+ struct lttng_ust_channel_counter_ops *pub; /* Public channel counter ops interface */
+
+ struct lttng_ust_channel_counter *(*counter_create)(size_t nr_dimensions,
+ const struct lttng_counter_dimension *dimensions,
+ int64_t global_sum_step,
+ int global_counter_fd,
+ int nr_counter_cpu_fds,
+ const int *counter_cpu_fds,
+ bool is_daemon);
+ void (*counter_destroy)(struct lttng_ust_channel_counter *counter);
+ int (*counter_add)(struct lttng_ust_channel_counter *counter,
+ const size_t *dimension_indexes, int64_t v);
+ int (*counter_read)(struct lttng_ust_channel_counter *counter,
+ const size_t *dimension_indexes, int cpu,
+ int64_t *value, bool *overflow, bool *underflow);
+ int (*counter_aggregate)(struct lttng_ust_channel_counter *counter,
+ const size_t *dimension_indexes, int64_t *value,
+ bool *overflow, bool *underflow);
+ int (*counter_clear)(struct lttng_ust_channel_counter *counter,
+ const size_t *dimension_indexes);
+};
+
+struct lttng_ust_channel_counter_private {
+ struct lttng_ust_channel_common_private parent;
+
+ struct lttng_ust_channel_counter *pub; /* Public channel counter interface */
+ struct lib_counter *counter;
+ struct lttng_ust_channel_counter_ops *ops;
+
+ /* Event notifier group owner. */
+ struct lttng_event_notifier_group *event_notifier_group;
+
+ /* Session owner. */
+ struct lttng_session *session;
+ struct cds_list_head node; /* Counter list (in session) */
+ size_t free_index; /* Next index to allocate */
+ enum lttng_key_type dimension_key_types[LTTNG_COUNTER_DIMENSION_MAX];
+};
+
/*
* IMPORTANT: this structure is part of the ABI between the consumer
* daemon and the UST library within traced applications. Changing it
})
static inline
-struct lttng_enabler *lttng_event_enabler_as_enabler(
- struct lttng_event_enabler *event_enabler)
+struct lttng_event_enabler_common *lttng_event_notifier_enabler_as_enabler(
+ struct lttng_event_notifier_enabler *event_notifier_enabler)
{
- return &event_enabler->base;
+ return &event_notifier_enabler->parent;
}
static inline
-struct lttng_enabler *lttng_event_notifier_enabler_as_enabler(
- struct lttng_event_notifier_enabler *event_notifier_enabler)
+struct lttng_ust_event_ht *lttng_get_event_ht_from_enabler(struct lttng_event_enabler_common *event_enabler)
{
- return &event_notifier_enabler->base;
+ switch (event_enabler->enabler_type) {
+ case LTTNG_EVENT_ENABLER_TYPE_RECORDER: /* Fall-through */
+ case LTTNG_EVENT_ENABLER_TYPE_COUNTER:
+ {
+ struct lttng_event_enabler_session_common *event_enabler_session =
+ caa_container_of(event_enabler, struct lttng_event_enabler_session_common, parent);
+ return &event_enabler_session->chan->session->priv->events_name_ht;
+ }
+ case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
+ {
+ struct lttng_event_notifier_enabler *event_notifier_enabler =
+ caa_container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
+ return &event_notifier_enabler->group->event_notifiers_ht;
+ }
+ default:
+ return NULL;
+ }
}
-
+static inline
+struct cds_list_head *lttng_get_event_list_head_from_enabler(struct lttng_event_enabler_common *event_enabler)
+{
+ switch (event_enabler->enabler_type) {
+ case LTTNG_EVENT_ENABLER_TYPE_RECORDER: /* Fall-through */
+ case LTTNG_EVENT_ENABLER_TYPE_COUNTER:
+ {
+ struct lttng_event_enabler_session_common *event_enabler_session =
+ caa_container_of(event_enabler, struct lttng_event_enabler_session_common, parent);
+ return &event_enabler_session->chan->session->priv->events_head;
+ }
+ case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
+ {
+ struct lttng_event_notifier_enabler *event_notifier_enabler =
+ caa_container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
+ return &event_notifier_enabler->group->event_notifiers_head;
+ }
+ default:
+ return NULL;
+ }
+}
/* This is ABI between liblttng-ust and liblttng-ust-dl */
void lttng_ust_dl_update(void *ip);
/* Env. var. which can be used in setuid/setgid executables. */
{ "LTTNG_UST_WITHOUT_BADDR_STATEDUMP", LTTNG_ENV_NOT_SECURE, NULL, },
{ "LTTNG_UST_REGISTER_TIMEOUT", LTTNG_ENV_NOT_SECURE, NULL, },
+ { "LTTNG_UST_MAP_POPULATE_POLICY", LTTNG_ENV_NOT_SECURE, NULL, },
/* Env. var. which are not fetched in setuid/setgid executables. */
{ "LTTNG_UST_CLOCK_PLUGIN", LTTNG_ENV_SECURE, NULL, },
#define _UST_COMMON_MACROS_H
#include <stdlib.h>
+#include <stdbool.h>
+#include <string.h>
#include <lttng/ust-arch.h>
+/*
+ * calloc() does not always populate the page table for the allocated
+ * memory. Optionally enforce page table populate.
+ */
+static inline
+void *zmalloc_populate(size_t len, bool populate)
+ __attribute__((always_inline));
+static inline
+void *zmalloc_populate(size_t len, bool populate)
+{
+ if (populate) {
+ void *ret = malloc(len);
+ if (ret == NULL)
+ return ret;
+ bzero(ret, len);
+ return ret;
+ } else {
+ return calloc(len, 1);
+ }
+}
+
/*
* Memory allocation zeroed
*/
static inline
void *zmalloc(size_t len)
{
- return calloc(len, 1);
+ return zmalloc_populate(len, false);
}
#define max_t(type, x, y) \
#define LTTNG_UST_CALLER_IP() __builtin_return_address(0)
#endif
+#define lttng_ust_offsetofend(type, field) \
+ (offsetof(type, field) + sizeof(((type *)NULL)->field))
+
#endif /* _UST_COMMON_MACROS_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2024-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+#include "common/getenv.h"
+#include "common/logging.h"
+#include "common/populate.h"
+
+enum populate_policy {
+ POPULATE_UNSET,
+
+ POPULATE_NONE,
+ POPULATE_CPU_POSSIBLE,
+
+ POPULATE_UNKNOWN,
+};
+
+static enum populate_policy map_populate_policy = POPULATE_UNSET;
+
+static void init_map_populate_policy(void)
+{
+ const char *populate_env_str;
+
+ if (map_populate_policy != POPULATE_UNSET)
+ return;
+
+ populate_env_str = lttng_ust_getenv("LTTNG_UST_MAP_POPULATE_POLICY");
+ if (!populate_env_str) {
+ map_populate_policy = POPULATE_NONE;
+ return;
+ }
+ if (!strcmp(populate_env_str, "none")) {
+ map_populate_policy = POPULATE_NONE;
+ } else if (!strcmp(populate_env_str, "cpu_possible")) {
+ map_populate_policy = POPULATE_CPU_POSSIBLE;
+ } else {
+ /*
+ * populate_env_str is an untrusted environment variable
+ * input (can be provided to setuid/setgid binaries), so
+ * don't even try to print it.
+ */
+ WARN("Unknown policy for LTTNG_UST_MAP_POPULATE_POLICY environment variable.");
+ map_populate_policy = POPULATE_UNKNOWN;
+ }
+}
+
+/*
+ * Return the shared page populate policy for global pages. Returns true
+ * if shared memory pages should be pre-populated, false otherwise.
+ */
+bool lttng_ust_map_populate_is_enabled(void)
+{
+ init_map_populate_policy();
+
+ switch (map_populate_policy) {
+ case POPULATE_UNKNOWN: /* Fall-through */
+ case POPULATE_NONE:
+ return false;
+ case POPULATE_CPU_POSSIBLE:
+ return true;
+ default:
+ abort();
+ }
+ return false;
+}
+
+/*
+ * Return the shared page populate policy based on the @cpu number
+ * provided as input. Returns true if shared memory pages should be
+ * pre-populated, false otherwise.
+ *
+ * The @cpu argument is currently unused except for negative value
+ * validation. It is present to eventually match cpu affinity or cpu
+ * online masks if those features are added in the future.
+ */
+bool lttng_ust_map_populate_cpu_is_enabled(int cpu)
+{
+ /* Reject invalid cpu number. */
+ if (cpu < 0)
+ return false;
+
+ return lttng_ust_map_populate_is_enabled();
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2024 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _UST_COMMON_POPULATE_H
+#define _UST_COMMON_POPULATE_H
+
+#include <stdbool.h>
+
+bool lttng_ust_map_populate_cpu_is_enabled(int cpu)
+ __attribute__((visibility("hidden")));
+
+bool lttng_ust_map_populate_is_enabled(void)
+ __attribute__((visibility("hidden")));
+
+#endif /* _UST_COMMON_POPULATE_H */
}
static void client_buffer_begin(struct lttng_ust_ring_buffer *buf,
- uint64_t tsc __attribute__((unused)),
+ uint64_t timestamp __attribute__((unused)),
unsigned int subbuf_idx,
struct lttng_ust_shm_handle *handle)
{
* subbuffer. data_size is between 1 and subbuf_size.
*/
static void client_buffer_end(struct lttng_ust_ring_buffer *buf,
- uint64_t tsc __attribute__((unused)),
+ uint64_t timestamp __attribute__((unused)),
unsigned int subbuf_idx, unsigned long data_size,
struct lttng_ust_shm_handle *handle,
const struct lttng_ust_ring_buffer_ctx *ctx)
.cb.buffer_create = client_buffer_create,
.cb.buffer_finalize = client_buffer_finalize,
- .tsc_bits = 0,
+ .timestamp_bits = 0,
.alloc = RING_BUFFER_ALLOC_GLOBAL,
.sync = RING_BUFFER_SYNC_GLOBAL,
.mode = RING_BUFFER_MODE_TEMPLATE,
#include "common/clock.h"
#include "common/ringbuffer/frontend_types.h"
-#define LTTNG_COMPACT_EVENT_BITS 5
-#define LTTNG_COMPACT_TSC_BITS 27
+#define LTTNG_COMPACT_EVENT_BITS 5
+#define LTTNG_COMPACT_TIMESTAMP_BITS 27
/*
* Keep the natural field alignment for _each field_ within this structure if
size_t packet_context_len;
size_t event_context_len;
struct lttng_ust_ctx *chan_ctx;
- struct lttng_ust_ctx *event_ctx;
};
/*
case 1: /* compact */
padding = lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uint32_t));
offset += padding;
- if (!(ctx->priv->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+ if (!(ctx->priv->rflags & (RING_BUFFER_RFLAG_FULL_TIMESTAMP | LTTNG_RFLAG_EXTENDED))) {
offset += sizeof(uint32_t); /* id and timestamp */
} else {
/* Minimum space taken by LTTNG_COMPACT_EVENT_BITS id */
padding = lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uint16_t));
offset += padding;
offset += sizeof(uint16_t);
- if (!(ctx->priv->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+ if (!(ctx->priv->rflags & (RING_BUFFER_RFLAG_FULL_TIMESTAMP | LTTNG_RFLAG_EXTENDED))) {
offset += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uint32_t));
offset += sizeof(uint32_t); /* timestamp */
} else {
}
offset += ctx_get_aligned_size(offset, client_ctx->chan_ctx,
client_ctx->packet_context_len);
- offset += ctx_get_aligned_size(offset, client_ctx->event_ctx,
- client_ctx->event_context_len);
*pre_header_padding = padding;
return offset - orig_offset;
}
event_id);
bt_bitfield_write(&id_time, uint32_t,
LTTNG_COMPACT_EVENT_BITS,
- LTTNG_COMPACT_TSC_BITS,
- ctx->priv->tsc);
+ LTTNG_COMPACT_TIMESTAMP_BITS,
+ ctx->priv->timestamp);
lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
break;
}
case 2: /* large */
{
- uint32_t timestamp = (uint32_t) ctx->priv->tsc;
+ uint32_t timestamp = (uint32_t) ctx->priv->timestamp;
uint16_t id = event_id;
lib_ring_buffer_write(config, ctx, &id, sizeof(id));
}
ctx_record(ctx, lttng_chan, client_ctx->chan_ctx);
- ctx_record(ctx, lttng_chan, client_ctx->event_ctx);
lttng_ust_ring_buffer_align_ctx(ctx, ctx->largest_align);
return;
switch (lttng_chan->priv->header_type) {
case 1: /* compact */
- if (!(ctx_private->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+ if (!(ctx_private->rflags & (RING_BUFFER_RFLAG_FULL_TIMESTAMP | LTTNG_RFLAG_EXTENDED))) {
uint32_t id_time = 0;
bt_bitfield_write(&id_time, uint32_t,
event_id);
bt_bitfield_write(&id_time, uint32_t,
LTTNG_COMPACT_EVENT_BITS,
- LTTNG_COMPACT_TSC_BITS,
- ctx_private->tsc);
+ LTTNG_COMPACT_TIMESTAMP_BITS,
+ ctx_private->timestamp);
lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
} else {
uint8_t id = 0;
- uint64_t timestamp = ctx_private->tsc;
+ uint64_t timestamp = ctx_private->timestamp;
bt_bitfield_write(&id, uint8_t,
0,
break;
case 2: /* large */
{
- if (!(ctx_private->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
- uint32_t timestamp = (uint32_t) ctx_private->tsc;
+ if (!(ctx_private->rflags & (RING_BUFFER_RFLAG_FULL_TIMESTAMP | LTTNG_RFLAG_EXTENDED))) {
+ uint32_t timestamp = (uint32_t) ctx_private->timestamp;
uint16_t id = event_id;
lib_ring_buffer_write(config, ctx, &id, sizeof(id));
lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp));
} else {
uint16_t id = 65535;
- uint64_t timestamp = ctx_private->tsc;
+ uint64_t timestamp = ctx_private->timestamp;
lib_ring_buffer_write(config, ctx, &id, sizeof(id));
/* Align extended struct on largest member */
WARN_ON_ONCE(1);
}
ctx_record(ctx, lttng_chan, client_ctx->chan_ctx);
- ctx_record(ctx, lttng_chan, client_ctx->event_ctx);
lttng_ust_ring_buffer_align_ctx(ctx, ctx->largest_align);
}
return offsetof(struct packet_header, ctx.header_end);
}
-static void client_buffer_begin(struct lttng_ust_ring_buffer *buf, uint64_t tsc,
+static void client_buffer_begin(struct lttng_ust_ring_buffer *buf, uint64_t timestamp,
unsigned int subbuf_idx,
struct lttng_ust_shm_handle *handle)
{
memcpy(header->uuid, lttng_chan->priv->uuid, sizeof(lttng_chan->priv->uuid));
header->stream_id = lttng_chan->priv->id;
header->stream_instance_id = buf->backend.cpu;
- header->ctx.timestamp_begin = tsc;
+ header->ctx.timestamp_begin = timestamp;
header->ctx.timestamp_end = 0;
header->ctx.content_size = ~0ULL; /* for debugging */
header->ctx.packet_size = ~0ULL;
* offset is assumed to never be 0 here : never deliver a completely empty
* subbuffer. data_size is between 1 and subbuf_size.
*/
-static void client_buffer_end(struct lttng_ust_ring_buffer *buf, uint64_t tsc,
+static void client_buffer_end(struct lttng_ust_ring_buffer *buf, uint64_t timestamp,
unsigned int subbuf_idx, unsigned long data_size,
struct lttng_ust_shm_handle *handle,
const struct lttng_ust_ring_buffer_ctx *ctx)
assert(header);
if (!header)
return;
- header->ctx.timestamp_end = tsc;
+ header->ctx.timestamp_end = timestamp;
header->ctx.content_size =
(uint64_t) data_size * CHAR_BIT; /* in bits */
header->ctx.packet_size =
.cb.content_size_field = client_content_size_field,
.cb.packet_size_field = client_packet_size_field,
- .tsc_bits = LTTNG_COMPACT_TSC_BITS,
+ .timestamp_bits = LTTNG_COMPACT_TIMESTAMP_BITS,
.alloc = RING_BUFFER_ALLOC_PER_CPU,
.sync = RING_BUFFER_SYNC_GLOBAL,
.mode = RING_BUFFER_MODE_TEMPLATE,
struct lttng_ust_ring_buffer_ctx_private *private_ctx;
uint32_t event_id;
- event_id = event_recorder->priv->id;
+ event_id = (uint32_t) event_recorder->priv->parent.id;
client_ctx.chan_ctx = lttng_ust_rcu_dereference(lttng_chan->priv->ctx);
- client_ctx.event_ctx = lttng_ust_rcu_dereference(event_recorder->priv->ctx);
/* Compute internal size of context structures. */
ctx_get_struct_size(ctx, client_ctx.chan_ctx, &client_ctx.packet_context_len);
- ctx_get_struct_size(ctx, client_ctx.event_ctx, &client_ctx.event_context_len);
nesting = lib_ring_buffer_nesting_inc(&client_config);
if (nesting < 0)
unsigned int buf_size_order; /* Order of buffer size */
unsigned int extra_reader_sb:1; /* has extra reader subbuffer ? */
unsigned long num_subbuf; /* Number of sub-buffers for writer */
- uint64_t start_tsc; /* Channel creation TSC value */
+ uint64_t start_timestamp; /* Channel creation timestamp value */
DECLARE_SHMP(void *, priv_data);/* Client-specific information */
struct lttng_ust_ring_buffer_config config; /* Ring buffer configuration */
char name[NAME_MAX]; /* Channel name */
*o_begin = v_read(config, &buf->offset);
*o_old = *o_begin;
- ctx_private->tsc = lib_ring_buffer_clock_read(chan);
- if ((int64_t) ctx_private->tsc == -EIO)
+ ctx_private->timestamp = lib_ring_buffer_clock_read(chan);
+ if ((int64_t) ctx_private->timestamp == -EIO)
return 1;
/*
*/
//prefetch(&buf->commit_hot[subbuf_index(*o_begin, chan)]);
- if (last_tsc_overflow(config, buf, ctx_private->tsc))
- ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
+ if (last_timestamp_overflow(config, buf, ctx_private->timestamp))
+ ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TIMESTAMP;
if (caa_unlikely(subbuf_offset(*o_begin, chan) == 0))
return 1;
* @ctx: ring buffer context. (input and output) Must be already initialized.
*
* Atomic wait-free slot reservation. The reserved space starts at the context
- * "pre_offset". Its length is "slot_size". The associated time-stamp is "tsc".
+ * "pre_offset". Its length is "slot_size". The associated time-stamp is
+ * "timestamp".
*
* Return :
* 0 on success.
goto slow_path;
/*
- * Atomically update last_tsc. This update races against concurrent
- * atomic updates, but the race will always cause supplementary full TSC
- * record headers, never the opposite (missing a full TSC record header
- * when it would be needed).
+ * Atomically update last_timestamp. This update races against concurrent
+ * atomic updates, but the race will always cause supplementary full
+ * timestamp record headers, never the opposite (missing a full
+ * timestamp record header when it would be needed).
*/
- save_last_tsc(config, buf, ctx_private->tsc);
+ save_last_timestamp(config, buf, ctx_private->timestamp);
/*
* Push the reader if necessary
/*
* We need to ensure that if the cmpxchg succeeds and discards the
- * record, the next record will record a full TSC, because it cannot
- * rely on the last_tsc associated with the discarded record to detect
- * overflows. The only way to ensure this is to set the last_tsc to 0
- * (assuming no 64-bit TSC overflow), which forces to write a 64-bit
+ * record, the next record will record a full timestamp, because it cannot
+ * rely on the last_timestamp associated with the discarded record to detect
+ * overflows. The only way to ensure this is to set the last_timestamp to 0
+ * (assuming no 64-bit timestamp overflow), which forces to write a 64-bit
* timestamp in the next record.
*
- * Note: if discard fails, we must leave the TSC in the record header.
- * It is needed to keep track of TSC overflows for the following
+ * Note: if discard fails, we must leave the timestamp in the record header.
+ * It is needed to keep track of timestamp overflows for the following
* records.
*/
- save_last_tsc(config, buf, 0ULL);
+ save_last_timestamp(config, buf, 0ULL);
if (caa_likely(v_cmpxchg(config, &buf->offset, end_offset, ctx_private->pre_offset)
!= end_offset))
}
/*
- * Last TSC comparison functions. Check if the current TSC overflows tsc_bits
- * bits from the last TSC read. When overflows are detected, the full 64-bit
- * timestamp counter should be written in the record header. Reads and writes
- * last_tsc atomically.
+ * Last timestamp comparison functions. Check if the current timestamp overflows
+ * timestamp_bits bits from the last timestamp read. When overflows are
+ * detected, the full 64-bit timestamp counter should be written in the record
+ * header. Reads and writes last_timestamp atomically.
*/
#if (CAA_BITS_PER_LONG == 32)
static inline
-void save_last_tsc(const struct lttng_ust_ring_buffer_config *config,
- struct lttng_ust_ring_buffer *buf, uint64_t tsc)
+void save_last_timestamp(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer *buf, uint64_t timestamp)
{
- if (config->tsc_bits == 0 || config->tsc_bits == 64)
+ if (config->timestamp_bits == 0 || config->timestamp_bits == 64)
return;
/*
* Ensure the compiler performs this update in a single instruction.
*/
- v_set(config, &buf->last_tsc, (unsigned long)(tsc >> config->tsc_bits));
+ v_set(config, &buf->last_timestamp, (unsigned long)(timestamp >> config->timestamp_bits));
}
static inline
-int last_tsc_overflow(const struct lttng_ust_ring_buffer_config *config,
- struct lttng_ust_ring_buffer *buf, uint64_t tsc)
+int last_timestamp_overflow(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer *buf, uint64_t timestamp)
{
- unsigned long tsc_shifted;
+ unsigned long timestamp_shifted;
- if (config->tsc_bits == 0 || config->tsc_bits == 64)
+ if (config->timestamp_bits == 0 || config->timestamp_bits == 64)
return 0;
- tsc_shifted = (unsigned long)(tsc >> config->tsc_bits);
- if (caa_unlikely(tsc_shifted
- - (unsigned long)v_read(config, &buf->last_tsc)))
+ timestamp_shifted = (unsigned long)(timestamp >> config->timestamp_bits);
+ if (caa_unlikely(timestamp_shifted
+ - (unsigned long)v_read(config, &buf->last_timestamp)))
return 1;
else
return 0;
}
#else
static inline
-void save_last_tsc(const struct lttng_ust_ring_buffer_config *config,
- struct lttng_ust_ring_buffer *buf, uint64_t tsc)
+void save_last_timestamp(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer *buf, uint64_t timestamp)
{
- if (config->tsc_bits == 0 || config->tsc_bits == 64)
+ if (config->timestamp_bits == 0 || config->timestamp_bits == 64)
return;
- v_set(config, &buf->last_tsc, (unsigned long)tsc);
+ v_set(config, &buf->last_timestamp, (unsigned long)timestamp);
}
static inline
-int last_tsc_overflow(const struct lttng_ust_ring_buffer_config *config,
- struct lttng_ust_ring_buffer *buf, uint64_t tsc)
+int last_timestamp_overflow(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer *buf, uint64_t timestamp)
{
- if (config->tsc_bits == 0 || config->tsc_bits == 64)
+ if (config->timestamp_bits == 0 || config->timestamp_bits == 64)
return 0;
- if (caa_unlikely((tsc - v_read(config, &buf->last_tsc))
- >> config->tsc_bits))
+ if (caa_unlikely((timestamp - v_read(config, &buf->last_timestamp))
+ >> config->timestamp_bits))
return 1;
else
return 0;
}
/*
- * Receive end of subbuffer TSC as parameter. It has been read in the
+ * Receive end of subbuffer timestamp as parameter. It has been read in the
* space reservation loop of either reserve or switch, which ensures it
* progresses monotonically with event records in the buffer. Therefore,
* it ensures that the end timestamp of a subbuffer is <= begin
int record_disabled;
/* End of cache-hot 32 bytes cacheline */
- union v_atomic last_tsc; /*
+ union v_atomic last_timestamp; /*
* Last timestamp written in the buffer.
*/
* prior to record header alignment
* padding.
*/
- uint64_t tsc; /* time-stamp counter value */
+ uint64_t timestamp; /* time-stamp counter value */
unsigned int rflags; /* reservation flags */
struct lttng_ust_ring_buffer *buf; /*
* buffer corresponding to processor id
#include "common/smp.h"
#include "shm.h"
#include "common/align.h"
+#include "common/populate.h"
/**
* lib_ring_buffer_backend_allocate - allocate a channel buffer
* num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
* priv, notifiers, config, cpumask and name.
*/
- chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
+ chanb->start_timestamp = config->cb.ring_buffer_clock_read(chan);
}
/**
struct shm_object *shmobj;
shmobj = shm_object_table_alloc(handle->table, shmsize,
- SHM_OBJECT_SHM, stream_fds[i], i);
+ SHM_OBJECT_SHM, stream_fds[i], i,
+ lttng_ust_map_populate_cpu_is_enabled(i));
if (!shmobj)
goto end;
align_shm(shmobj, __alignof__(struct lttng_ust_ring_buffer));
struct lttng_ust_ring_buffer *buf;
shmobj = shm_object_table_alloc(handle->table, shmsize,
- SHM_OBJECT_SHM, stream_fds[0], -1);
+ SHM_OBJECT_SHM, stream_fds[0], -1,
+ lttng_ust_map_populate_is_enabled());
if (!shmobj)
goto end;
align_shm(shmobj, __alignof__(struct lttng_ust_ring_buffer));
if (ret)
goto free_bufs;
}
- chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
+ chanb->start_timestamp = config->cb.ring_buffer_clock_read(chan);
return 0;
#include "shm.h"
#include "rb-init.h"
#include "common/compat/errno.h" /* For ENODATA */
+#include "common/populate.h"
/* Print DBG() messages about events lost only every 1048576 hits */
#define DBG_PRINT_NR_LOST (1UL << 20)
}
uatomic_set(&buf->consumed, 0);
uatomic_set(&buf->record_disabled, 0);
- v_set(config, &buf->last_tsc, 0);
+ v_set(config, &buf->last_timestamp, 0);
lib_ring_buffer_backend_reset(&buf->backend, handle);
/* Don't reset number of active readers */
v_set(config, &buf->records_lost_full, 0);
struct commit_counters_hot *cc_hot;
void *priv = channel_get_private_config(chan);
size_t subbuf_header_size;
- uint64_t tsc;
+ uint64_t timestamp;
int ret;
/* Test for cpu hotplug */
ret = -EPERM;
goto free_chanbuf;
}
- tsc = config->cb.ring_buffer_clock_read(shmp_chan);
- config->cb.buffer_begin(buf, tsc, 0, handle);
+ timestamp = config->cb.ring_buffer_clock_read(shmp_chan);
+ config->cb.buffer_begin(buf, timestamp, 0, handle);
cc_hot = shmp_index(handle, buf->commit_hot, 0);
if (!cc_hot) {
ret = -EPERM;
struct shm_object *shmobj;
unsigned int nr_streams;
int64_t blocking_timeout_ms;
+ bool populate = lttng_ust_map_populate_is_enabled();
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
nr_streams = get_possible_cpus_array_len();
read_timer_interval))
return NULL;
- handle = zmalloc(sizeof(struct lttng_ust_shm_handle));
+ handle = zmalloc_populate(sizeof(struct lttng_ust_shm_handle), populate);
if (!handle)
return NULL;
/* Allocate table for channel + per-cpu buffers */
- handle->table = shm_object_table_create(1 + get_possible_cpus_array_len());
+ handle->table = shm_object_table_create(1 + get_possible_cpus_array_len(), populate);
if (!handle->table)
goto error_table_alloc;
/* Allocate normal memory for channel (not shared) */
shmobj = shm_object_table_alloc(handle->table, shmsize, SHM_OBJECT_MEM,
- -1, -1);
+ -1, -1, populate);
if (!shmobj)
goto error_append;
/* struct lttng_ust_ring_buffer_channel is at object 0, offset 0 (hardcoded) */
{
struct lttng_ust_shm_handle *handle;
struct shm_object *object;
+ bool populate = lttng_ust_map_populate_is_enabled();
- handle = zmalloc(sizeof(struct lttng_ust_shm_handle));
+ handle = zmalloc_populate(sizeof(struct lttng_ust_shm_handle), populate);
if (!handle)
return NULL;
/* Allocate table for channel + per-cpu buffers */
- handle->table = shm_object_table_create(1 + get_possible_cpus_array_len());
+ handle->table = shm_object_table_create(1 + get_possible_cpus_array_len(), populate);
if (!handle->table)
goto error_table_alloc;
/* Add channel object */
/* Add stream object */
object = shm_object_table_append_shm(handle->table,
shm_fd, wakeup_fd, stream_nr,
- memory_map_size);
+ memory_map_size, lttng_ust_map_populate_cpu_is_enabled(stream_nr));
if (!object)
return -EINVAL;
return 0;
unsigned long commit_count;
struct commit_counters_hot *cc_hot;
- config->cb.buffer_begin(buf, ctx->priv->tsc, oldidx, handle);
+ config->cb.buffer_begin(buf, ctx->priv->timestamp, oldidx, handle);
/*
* Order all writes to buffer before the commit count update that will
* postponed until the commit counter is incremented for the
* current space reservation.
*/
- *ts_end = ctx->priv->tsc;
+ *ts_end = ctx->priv->timestamp;
/*
* Order all writes to buffer and store to ts_end before the commit
unsigned long commit_count;
struct commit_counters_hot *cc_hot;
- config->cb.buffer_begin(buf, ctx->priv->tsc, beginidx, handle);
+ config->cb.buffer_begin(buf, ctx->priv->timestamp, beginidx, handle);
/*
* Order all writes to buffer before the commit count update that will
* postponed until the commit counter is incremented for the
* current space reservation.
*/
- *ts_end = ctx->priv->tsc;
+ *ts_end = ctx->priv->timestamp;
}
/*
offsets->switch_old_start = 0;
off = subbuf_offset(offsets->begin, chan);
- ctx->priv->tsc = config->cb.ring_buffer_clock_read(chan);
+ ctx->priv->timestamp = config->cb.ring_buffer_clock_read(chan);
/*
* Ensure we flush the header of an empty subbuffer when doing the
!= offsets.old);
/*
- * Atomically update last_tsc. This update races against concurrent
- * atomic updates, but the race will always cause supplementary full TSC
- * records, never the opposite (missing a full TSC record when it would
- * be needed).
+ * Atomically update last_timestamp. This update races against concurrent
+ * atomic updates, but the race will always cause supplementary full
+ * timestamp records, never the opposite (missing a full timestamp
+ * record when it would be needed).
*/
- save_last_tsc(config, buf, ctx.priv->tsc);
+ save_last_timestamp(config, buf, ctx.priv->timestamp);
/*
* Push the reader if necessary
offsets->switch_old_end = 0;
offsets->pre_header_padding = 0;
- ctx_private->tsc = config->cb.ring_buffer_clock_read(chan);
- if ((int64_t) ctx_private->tsc == -EIO)
+ ctx_private->timestamp = config->cb.ring_buffer_clock_read(chan);
+ if ((int64_t) ctx_private->timestamp == -EIO)
return -EIO;
- if (last_tsc_overflow(config, buf, ctx_private->tsc))
- ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
+ if (last_timestamp_overflow(config, buf, ctx_private->timestamp))
+ ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TIMESTAMP;
if (caa_unlikely(subbuf_offset(offsets->begin, chan) == 0)) {
offsets->switch_new_start = 1; /* For offsets->begin */
!= offsets.old));
/*
- * Atomically update last_tsc. This update races against concurrent
- * atomic updates, but the race will always cause supplementary full TSC
- * records, never the opposite (missing a full TSC record when it would
- * be needed).
+ * Atomically update last_timestamp. This update races against concurrent
+ * atomic updates, but the race will always cause supplementary full
+ * timestamp records, never the opposite (missing a full timestamp
+ * record when it would be needed).
*/
- save_last_tsc(config, buf, ctx_private->tsc);
+ save_last_timestamp(config, buf, ctx_private->timestamp);
/*
* Push the reader if necessary
/* Slow path only, at subbuffer switch */
size_t (*subbuffer_header_size) (void);
- void (*buffer_begin) (struct lttng_ust_ring_buffer *buf, uint64_t tsc,
+ void (*buffer_begin) (struct lttng_ust_ring_buffer *buf, uint64_t timestamp,
unsigned int subbuf_idx,
struct lttng_ust_shm_handle *handle);
- void (*buffer_end) (struct lttng_ust_ring_buffer *buf, uint64_t tsc,
+ void (*buffer_end) (struct lttng_ust_ring_buffer *buf, uint64_t timestamp,
unsigned int subbuf_idx, unsigned long data_size,
struct lttng_ust_shm_handle *handle,
const struct lttng_ust_ring_buffer_ctx *ctx);
enum lttng_ust_ring_buffer_ipi_types ipi;
enum lttng_ust_ring_buffer_wakeup_types wakeup;
/*
- * tsc_bits: timestamp bits saved at each record.
+ * timestamp_bits: timestamp bits saved at each record.
* 0 and 64 disable the timestamp compression scheme.
*/
- unsigned int tsc_bits;
+ unsigned int timestamp_bits;
struct lttng_ust_ring_buffer_client_cb cb;
/*
* client_type is used by the consumer process (which is in a
/*
* Reservation flags.
*
- * RING_BUFFER_RFLAG_FULL_TSC
+ * RING_BUFFER_RFLAG_FULL_TIMESTAMP
*
* This flag is passed to record_header_size() and to the primitive used to
* write the record header. It indicates that the full 64-bit time value is
* needed in the record header. If this flag is not set, the record header needs
- * only to contain "tsc_bits" bit of time value.
+ * only to contain "timestamp_bits" bit of time value.
*
* Reservation flags can be added by the client, starting from
* "(RING_BUFFER_FLAGS_END << 0)". It can be used to pass information from
* record_header_size() to lib_ring_buffer_write_record_header().
*/
-#define RING_BUFFER_RFLAG_FULL_TSC (1U << 0)
+#define RING_BUFFER_RFLAG_FULL_TIMESTAMP (1U << 0)
#define RING_BUFFER_RFLAG_END (1U << 1)
/*
return ret;
}
-struct shm_object_table *shm_object_table_create(size_t max_nb_obj)
+struct shm_object_table *shm_object_table_create(size_t max_nb_obj, bool populate)
{
struct shm_object_table *table;
- table = zmalloc(sizeof(struct shm_object_table) +
- max_nb_obj * sizeof(table->objects[0]));
+ table = zmalloc_populate(sizeof(struct shm_object_table) +
+ max_nb_obj * sizeof(table->objects[0]), populate);
if (!table)
return NULL;
table->size = max_nb_obj;
static
struct shm_object *_shm_object_table_alloc_shm(struct shm_object_table *table,
size_t memory_map_size,
- int stream_fd)
+ int stream_fd,
+ bool populate)
{
int shmfd, waitfd[2], ret, i;
+ int flags = MAP_SHARED;
struct shm_object *obj;
char *memory_map;
obj->shm_fd_ownership = 0;
obj->shm_fd = shmfd;
+ if (populate)
+ flags |= LTTNG_MAP_POPULATE;
/* memory_map: mmap */
memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
- MAP_SHARED | LTTNG_MAP_POPULATE, shmfd, 0);
+ flags, shmfd, 0);
if (memory_map == MAP_FAILED) {
PERROR("mmap");
goto error_mmap;
static
struct shm_object *_shm_object_table_alloc_mem(struct shm_object_table *table,
- size_t memory_map_size)
+ size_t memory_map_size, bool populate)
{
struct shm_object *obj;
void *memory_map;
return NULL;
obj = &table->objects[table->allocated_len];
- memory_map = zmalloc(memory_map_size);
+ memory_map = zmalloc_populate(memory_map_size, populate);
if (!memory_map)
goto alloc_error;
size_t memory_map_size,
enum shm_object_type type,
int stream_fd,
- int cpu)
+ int cpu,
+ bool populate)
#else
struct shm_object *shm_object_table_alloc(struct shm_object_table *table,
size_t memory_map_size,
enum shm_object_type type,
int stream_fd,
- int cpu __attribute__((unused)))
+ int cpu __attribute__((unused)),
+ bool populate)
#endif
{
struct shm_object *shm_object;
switch (type) {
case SHM_OBJECT_SHM:
shm_object = _shm_object_table_alloc_shm(table, memory_map_size,
- stream_fd);
+ stream_fd, populate);
break;
case SHM_OBJECT_MEM:
- shm_object = _shm_object_table_alloc_mem(table, memory_map_size);
+ shm_object = _shm_object_table_alloc_mem(table, memory_map_size,
+ populate);
break;
default:
assert(0);
struct shm_object *shm_object_table_append_shm(struct shm_object_table *table,
int shm_fd, int wakeup_fd, uint32_t stream_nr,
- size_t memory_map_size)
+ size_t memory_map_size, bool populate)
{
+ int flags = MAP_SHARED;
struct shm_object *obj;
char *memory_map;
int ret;
goto error_fcntl;
}
+ if (populate)
+ flags |= LTTNG_MAP_POPULATE;
/* memory_map: mmap */
memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
- MAP_SHARED | LTTNG_MAP_POPULATE, shm_fd, 0);
+ flags, shm_fd, 0);
if (memory_map == MAP_FAILED) {
PERROR("mmap");
goto error_mmap;
#define set_shmp(ref, src) _set_shmp(&(ref)._ref, src)
-struct shm_object_table *shm_object_table_create(size_t max_nb_obj)
+struct shm_object_table *shm_object_table_create(size_t max_nb_obj, bool populate)
__attribute__((visibility("hidden")));
struct shm_object *shm_object_table_alloc(struct shm_object_table *table,
size_t memory_map_size,
enum shm_object_type type,
const int stream_fd,
- int cpu)
+ int cpu, bool populate)
__attribute__((visibility("hidden")));
struct shm_object *shm_object_table_append_shm(struct shm_object_table *table,
int shm_fd, int wakeup_fd, uint32_t stream_nr,
- size_t memory_map_size)
+ size_t memory_map_size, bool populate)
__attribute__((visibility("hidden")));
/* mem ownership is passed to shm_object_table_append_mem(). */
total_bytes_read += bytes_read;
assert(total_bytes_read <= max_bytes);
- } while (max_bytes > total_bytes_read && bytes_read > 0);
+ } while (max_bytes > total_bytes_read && bytes_read != 0);
/*
* Make sure the mask read is a null terminated string.
struct lttng_ust_channel_buffer *lttng_ust_alloc_channel_buffer(void)
__attribute__((visibility("hidden")));
+struct lttng_ust_channel_counter *lttng_ust_alloc_channel_counter(void)
+ __attribute__((visibility("hidden")));
+
void lttng_ust_free_channel_common(struct lttng_ust_channel_common *chan)
__attribute__((visibility("hidden")));
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
+#include <inttypes.h>
#include <lttng/ust-ctl.h>
#include "common/ustcomm.h"
return ret;
}
-ssize_t ustcomm_recv_counter_from_sessiond(int sock,
- void **_counter_data, uint64_t var_len)
+ssize_t ustcomm_recv_var_len_cmd_from_sessiond(int sock,
+ void **_data, uint32_t var_len)
{
- void *counter_data;
+ void *data;
ssize_t len;
- if (var_len > LTTNG_UST_ABI_COUNTER_DATA_MAX_LEN) {
+ if (var_len > LTTNG_UST_ABI_CMD_MAX_LEN) {
len = -EINVAL;
goto error_check;
}
/* Receive variable length data */
- counter_data = zmalloc(var_len);
- if (!counter_data) {
+ data = zmalloc(var_len);
+ if (!data) {
len = -ENOMEM;
goto error_alloc;
}
- len = ustcomm_recv_unix_sock(sock, counter_data, var_len);
+ len = ustcomm_recv_unix_sock(sock, data, var_len);
if (len != var_len) {
goto error_recv;
}
- *_counter_data = counter_data;
+ *_data = data;
return len;
error_recv:
- free(counter_data);
+ free(data);
error_alloc:
error_check:
return len;
size_t nr_fields, /* fields */
const struct lttng_ust_event_field * const *lttng_fields,
const char *model_emf_uri,
+ uint64_t user_token,
uint32_t *id) /* event id (output) */
{
ssize_t len;
strncpy(msg.m.event_name, event_name, LTTNG_UST_ABI_SYM_NAME_LEN);
msg.m.event_name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
msg.m.loglevel = loglevel;
+ msg.m.user_token = user_token;
signature_len = strlen(signature) + 1;
msg.m.signature_len = signature_len;
return -EINVAL;
if (reply.r.ret_code < 0)
return reply.r.ret_code;
- *id = reply.r.event_id;
- DBG("Sent register event notification for name \"%s\": ret_code %d, event_id %u\n",
- event_name, reply.r.ret_code, reply.r.event_id);
+ *id = reply.r.id;
+ DBG("Sent register event notification for name \"%s\": ret_code %d, id %" PRIu32 "\n",
+ event_name, reply.r.ret_code, reply.r.id);
return 0;
default:
if (len < 0) {
return ret;
}
+/*
+ * Returns 0 on success, negative error value on error.
+ * Returns -EPIPE or -ECONNRESET if other end has hung up.
+ */
+int ustcomm_register_key(int sock,
+ int session_objd, /* session descriptor */
+ int map_objd, /* map descriptor */
+ uint32_t dimension,
+ const uint64_t *dimension_indexes,
+ const char *key_string, /* key string (input) */
+ uint64_t user_token,
+ uint64_t *index) /* (output) */
+{
+ ssize_t len;
+ struct {
+ struct ustcomm_notify_hdr header;
+ struct ustcomm_notify_key_msg m;
+ } msg;
+ struct {
+ struct ustcomm_notify_hdr header;
+ struct ustcomm_notify_key_reply r;
+ } reply;
+ size_t dimension_indexes_len;
+ int ret;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.header.notify_cmd = LTTNG_UST_CTL_NOTIFY_CMD_KEY;
+ msg.m.session_objd = session_objd;
+ msg.m.map_objd = map_objd;
+ msg.m.dimension = dimension;
+ dimension_indexes_len = sizeof(uint64_t) * dimension;
+ msg.m.key_string_len = strlen(key_string) + 1;
+ msg.m.user_token = user_token;
+
+ len = ustcomm_send_unix_sock(sock, &msg, sizeof(msg));
+ if (len > 0 && len != sizeof(msg)) {
+ ret = -EIO;
+ goto error_send;
+ }
+ if (len < 0) {
+ ret = len;
+ goto error_send;
+ }
+
+ /* send dimension_indexes */
+ if (dimension_indexes) {
+ len = ustcomm_send_unix_sock(sock, dimension_indexes, dimension_indexes_len);
+ if (len > 0 && len != dimension_indexes_len) {
+ ret = -EIO;
+ goto error_dimension_indexes;
+ }
+ if (len < 0) {
+ ret = len;
+ goto error_dimension_indexes;
+ }
+ }
+
+ /* send key_string */
+ len = ustcomm_send_unix_sock(sock, key_string, msg.m.key_string_len);
+ if (len > 0 && len != dimension_indexes_len) {
+ ret = -EIO;
+ goto error_dimension_indexes;
+ }
+ if (len < 0) {
+ ret = len;
+ goto error_dimension_indexes;
+ }
+
+ /* receive reply */
+ len = ustcomm_recv_unix_sock(sock, &reply, sizeof(reply));
+ switch (len) {
+ case 0: /* orderly shutdown */
+ return -EPIPE;
+ case sizeof(reply):
+ if (reply.header.notify_cmd != msg.header.notify_cmd) {
+ ERR("Unexpected result message command "
+ "expected: %u vs received: %u\n",
+ msg.header.notify_cmd, reply.header.notify_cmd);
+ return -EINVAL;
+ }
+ if (reply.r.ret_code > 0)
+ return -EINVAL;
+ if (reply.r.ret_code < 0)
+ return reply.r.ret_code;
+ *index = reply.r.index;
+ DBG("Sent register key notification for key \"%s\": ret_code %d, index %" PRIu64 "\n",
+ key_string, reply.r.ret_code, reply.r.index);
+ return 0;
+ default:
+ if (len < 0) {
+ /* Transport level error */
+ if (errno == EPIPE || errno == ECONNRESET)
+ len = -errno;
+ return len;
+ } else {
+ ERR("incorrect message size: %zd\n", len);
+ return len;
+ }
+ }
+ /* Unreached. */
+
+ /* Error path only. */
+error_dimension_indexes:
+error_send:
+ return ret;
+}
+
/*
* Returns 0 on success, negative error value on error.
* Returns -EPIPE or -ECONNRESET if other end has hung up.
#include <limits.h>
#include <unistd.h>
#include <lttng/ust-abi.h>
+#include <lttng/ust-abi-old.h>
#include <lttng/ust-error.h>
#include <lttng/ust-compiler.h>
#include <lttng/ust-ctl.h>
uint32_t reloc_offset;
uint64_t seqnum;
} __attribute__((packed)) capture;
- struct lttng_ust_abi_counter counter;
- struct lttng_ust_abi_counter_global counter_global;
- struct lttng_ust_abi_counter_cpu counter_cpu;
- /*
- * For lttng_ust_abi_EVENT_NOTIFIER_CREATE, a struct
- * lttng_ust_abi_event_notifier implicitly follows struct
- * ustcomm_ust_msg.
- */
+ struct lttng_ust_abi_old_counter counter_old;
+ struct lttng_ust_abi_old_counter_global counter_global_old;
+ struct lttng_ust_abi_old_counter_cpu counter_cpu_old;
struct {
- /* Length of struct lttng_ust_abi_event_notifier */
- uint32_t len;
- } event_notifier;
+ uint32_t cmd_len;
+ } __attribute__((packed)) var_len_cmd;
char padding[USTCOMM_MSG_PADDING2];
} u;
} __attribute__((packed));
uint32_t notify_cmd;
} __attribute__((packed));
-#define USTCOMM_NOTIFY_EVENT_MSG_PADDING 32
+#define USTCOMM_NOTIFY_EVENT_MSG_PADDING 24
struct ustcomm_notify_event_msg {
uint32_t session_objd;
uint32_t channel_objd;
uint32_t signature_len;
uint32_t fields_len;
uint32_t model_emf_uri_len;
+ uint64_t user_token;
char padding[USTCOMM_NOTIFY_EVENT_MSG_PADDING];
/* followed by signature, fields, and model_emf_uri */
} __attribute__((packed));
#define USTCOMM_NOTIFY_EVENT_REPLY_PADDING 32
struct ustcomm_notify_event_reply {
int32_t ret_code; /* 0: ok, negative: error code */
- uint32_t event_id;
+ uint32_t id; /* 32-bit event id. */
char padding[USTCOMM_NOTIFY_EVENT_REPLY_PADDING];
} __attribute__((packed));
+#define USTCOMM_NOTIFY_KEY_MSG_PADDING 24
+struct ustcomm_notify_key_msg {
+ uint32_t session_objd;
+ uint32_t map_objd;
+ uint32_t dimension;
+ uint32_t key_string_len;
+ uint64_t user_token;
+ char padding[USTCOMM_NOTIFY_KEY_MSG_PADDING];
+ /* followed by dimension_indexes (array of @dimension uint64_t items) and key_string. */
+} __attribute__((packed));
+
+#define USTCOMM_NOTIFY_KEY_REPLY_PADDING 32
+struct ustcomm_notify_key_reply {
+ int32_t ret_code; /* 0: ok, negative: error code */
+ uint64_t index; /* 64-bit key index. */
+ char padding[USTCOMM_NOTIFY_KEY_REPLY_PADDING];
+} __attribute__((packed));
+
#define USTCOMM_NOTIFY_ENUM_MSG_PADDING 32
struct ustcomm_notify_enum_msg {
uint32_t session_objd;
/* followed by enum entries */
} __attribute__((packed));
-#define USTCOMM_NOTIFY_EVENT_REPLY_PADDING 32
+#define USTCOMM_NOTIFY_ENUM_REPLY_PADDING 32
struct ustcomm_notify_enum_reply {
int32_t ret_code; /* 0: ok, negative: error code */
uint64_t enum_id;
- char padding[USTCOMM_NOTIFY_EVENT_REPLY_PADDING];
+ char padding[USTCOMM_NOTIFY_ENUM_REPLY_PADDING];
} __attribute__((packed));
#define USTCOMM_NOTIFY_CHANNEL_MSG_PADDING 32
int *event_notifier_notif_fd)
__attribute__((visibility("hidden")));
-ssize_t ustcomm_recv_counter_from_sessiond(int sock,
- void **counter_data, uint64_t len)
+ssize_t ustcomm_recv_var_len_cmd_from_sessiond(int sock,
+ void **data, uint32_t len)
__attribute__((visibility("hidden")));
int ustcomm_recv_counter_shm_from_sessiond(int sock,
size_t nr_fields, /* fields */
const struct lttng_ust_event_field * const *fields,
const char *model_emf_uri,
- uint32_t *id) /* event id (output) */
+ uint64_t user_token,
+ uint32_t *id) /* (output) */
+ __attribute__((visibility("hidden")));
+
+/*
+ * Returns 0 on success, negative error value on error.
+ * Returns -EPIPE or -ECONNRESET if other end has hung up.
+ */
+int ustcomm_register_key(int sock,
+ int session_objd, /* session descriptor */
+ int map_objd, /* map descriptor */
+ uint32_t dimension,
+ const uint64_t *dimension_indexes,
+ const char *key_string, /* key string (input) */
+ uint64_t user_token,
+ uint64_t *index) /* (output) */
__attribute__((visibility("hidden")));
/*
* Counter representation within daemon.
*/
struct lttng_ust_ctl_daemon_counter {
- struct lib_counter *counter;
- const struct lttng_counter_ops *ops;
+ struct lttng_ust_channel_counter *counter;
+ const struct lttng_ust_channel_counter_ops *ops;
struct lttng_ust_ctl_counter_attr *attr; /* initial attributes */
};
case LTTNG_UST_ABI_OBJECT_TYPE_CONTEXT:
case LTTNG_UST_ABI_OBJECT_TYPE_EVENT_NOTIFIER_GROUP:
case LTTNG_UST_ABI_OBJECT_TYPE_EVENT_NOTIFIER:
+ case LTTNG_UST_ABI_OBJECT_TYPE_COUNTER_EVENT:
break;
case LTTNG_UST_ABI_OBJECT_TYPE_COUNTER:
free(data->u.counter.data);
struct lttng_ust_abi_object_data *event_notifier_group,
struct lttng_ust_abi_object_data **_event_notifier_data)
{
- struct ustcomm_ust_msg lum;
+ struct ustcomm_ust_msg lum = {};
struct ustcomm_ust_reply lur;
struct lttng_ust_abi_object_data *event_notifier_data;
ssize_t len;
event_notifier_data->type = LTTNG_UST_ABI_OBJECT_TYPE_EVENT_NOTIFIER;
- memset(&lum, 0, sizeof(lum));
lum.handle = event_notifier_group->handle;
lum.cmd = LTTNG_UST_ABI_EVENT_NOTIFIER_CREATE;
- lum.u.event_notifier.len = sizeof(*event_notifier);
+ lum.u.var_len_cmd.cmd_len = sizeof(*event_notifier);
ret = ustcomm_send_app_cmd(sock, &lum, &lur);
if (ret) {
case 2:
*notify_cmd = LTTNG_UST_CTL_NOTIFY_CMD_ENUM;
break;
+ case 3:
+ *notify_cmd = LTTNG_UST_CTL_NOTIFY_CMD_KEY;
+ break;
default:
return -EINVAL;
}
char **signature,
size_t *nr_fields,
struct lttng_ust_ctl_field **fields,
- char **model_emf_uri)
+ char **model_emf_uri,
+ uint64_t *user_token)
{
ssize_t len;
struct ustcomm_notify_event_msg msg;
*loglevel = msg.loglevel;
signature_len = msg.signature_len;
fields_len = msg.fields_len;
+ *user_token = msg.user_token;
if (fields_len % sizeof(*a_fields) != 0) {
return -EINVAL;
memset(&reply, 0, sizeof(reply));
reply.header.notify_cmd = LTTNG_UST_CTL_NOTIFY_CMD_EVENT;
reply.r.ret_code = ret_code;
- reply.r.event_id = id;
+ reply.r.id = id;
+ len = ustcomm_send_unix_sock(sock, &reply, sizeof(reply));
+ if (len > 0 && len != sizeof(reply))
+ return -EIO;
+ if (len < 0)
+ return len;
+ return 0;
+}
+
+/*
+ * Returns 0 on success, negative UST or system error value on error.
+ */
+int lttng_ust_ctl_recv_register_key(int sock,
+ int *session_objd, /* session descriptor (output) */
+ int *map_objd, /* map descriptor (output) */
+ uint32_t *dimension, /*
+ * Against which dimension is
+ * this key expressed. (output)
+ */
+ uint64_t **dimension_indexes, /*
+ * Indexes (output,
+ * dynamically
+ * allocated, must be
+ * free(3)'d by the
+ * caller if function
+ * returns success.)
+ * Contains @dimension
+ * elements.
+ */
+ char **key_string, /*
+ * key string (output,
+ * dynamically allocated, must
+ * be free(3)'d by the caller if
+ * function returns success.)
+ */
+ uint64_t *user_token)
+{
+ ssize_t len;
+ struct ustcomm_notify_key_msg msg;
+ size_t dimension_indexes_len, key_string_len;
+ uint64_t *a_dimension_indexes = NULL;
+ char *a_key_string = NULL;
+
+ len = ustcomm_recv_unix_sock(sock, &msg, sizeof(msg));
+ if (len > 0 && len != sizeof(msg))
+ return -EIO;
+ if (len == 0)
+ return -EPIPE;
+ if (len < 0)
+ return len;
+
+ *session_objd = msg.session_objd;
+ *map_objd = msg.map_objd;
+ *dimension = msg.dimension;
+ dimension_indexes_len = msg.dimension * sizeof(uint64_t);
+ key_string_len = msg.key_string_len;
+ *user_token = msg.user_token;
+
+ if (dimension_indexes_len) {
+ /* recv dimension_indexes */
+ a_dimension_indexes = zmalloc(dimension_indexes_len);
+ if (!a_dimension_indexes) {
+ len = -ENOMEM;
+ goto error;
+ }
+ len = ustcomm_recv_unix_sock(sock, a_dimension_indexes, dimension_indexes_len);
+ if (len > 0 && len != dimension_indexes_len) {
+ len = -EIO;
+ goto error;
+ }
+ if (len == 0) {
+ len = -EPIPE;
+ goto error;
+ }
+ if (len < 0) {
+ goto error;
+ }
+ }
+
+ if (key_string_len) {
+ /* recv key_string */
+ a_key_string = zmalloc(key_string_len);
+ if (!a_key_string) {
+ len = -ENOMEM;
+ goto error;
+ }
+ len = ustcomm_recv_unix_sock(sock, a_key_string, key_string_len);
+ if (len > 0 && len != key_string_len) {
+ len = -EIO;
+ goto error;
+ }
+ if (len == 0) {
+ len = -EPIPE;
+ goto error;
+ }
+ if (len < 0) {
+ goto error;
+ }
+ /* Enforce end of string */
+ a_key_string[key_string_len - 1] = '\0';
+ }
+
+ *dimension_indexes = a_dimension_indexes;
+ *key_string = a_key_string;
+ return 0;
+
+error:
+ free(a_key_string);
+ free(a_dimension_indexes);
+ return len;
+}
+
+/*
+ * Returns 0 on success, negative error value on error.
+ */
+int lttng_ust_ctl_reply_register_key(int sock,
+ uint64_t index, /* Index within dimension (input) */
+ int ret_code) /* return code. 0 ok, negative error */
+{
+ ssize_t len;
+ struct {
+ struct ustcomm_notify_hdr header;
+ struct ustcomm_notify_key_reply r;
+ } reply;
+
+ memset(&reply, 0, sizeof(reply));
+ reply.header.notify_cmd = LTTNG_UST_CTL_NOTIFY_CMD_KEY;
+ reply.r.ret_code = ret_code;
+ reply.r.index = index;
len = ustcomm_send_unix_sock(sock, &reply, sizeof(reply));
if (len > 0 && len != sizeof(reply))
return -EIO;
ust_dim[i].overflow_index = dimensions[i].overflow_index;
ust_dim[i].has_underflow = dimensions[i].has_underflow;
ust_dim[i].has_overflow = dimensions[i].has_overflow;
+ switch (dimensions[i].key_type) {
+ case LTTNG_UST_CTL_KEY_TYPE_TOKENS:
+ ust_dim[i].key_type = LTTNG_KEY_TYPE_TOKENS;
+ break;
+ case LTTNG_UST_CTL_KEY_TYPE_INTEGER: /* Fall-through */
+ default:
+ goto free_attr;
+ }
}
- counter->counter = transport->ops.counter_create(nr_dimensions,
+ counter->counter = transport->ops.priv->counter_create(nr_dimensions,
ust_dim, global_sum_step, global_counter_fd,
nr_counter_cpu_fds, counter_cpu_fds, true);
if (!counter->counter)
int lttng_ust_ctl_create_counter_data(struct lttng_ust_ctl_daemon_counter *counter,
struct lttng_ust_abi_object_data **_counter_data)
{
+ struct lttng_ust_abi_counter_conf *counter_conf = NULL;
+ struct lttng_ust_abi_counter_dimension *dimension;
+ uint32_t conf_len = sizeof(struct lttng_ust_abi_counter_conf) +
+ sizeof(struct lttng_ust_abi_counter_dimension);
struct lttng_ust_abi_object_data *counter_data;
- struct lttng_ust_abi_counter_conf counter_conf = {0};
- size_t i;
int ret;
+ if (counter->attr->nr_dimensions != 1) {
+ ret = -EINVAL;
+ goto error;
+ }
+ counter_conf = zmalloc(conf_len);
+ if (!counter_conf) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ counter_conf->len = sizeof(struct lttng_ust_abi_counter_conf);
+ counter_conf->flags |= counter->attr->coalesce_hits ? LTTNG_UST_ABI_COUNTER_CONF_FLAG_COALESCE_HITS : 0;
switch (counter->attr->arithmetic) {
case LTTNG_UST_CTL_COUNTER_ARITHMETIC_MODULAR:
- counter_conf.arithmetic = LTTNG_UST_ABI_COUNTER_ARITHMETIC_MODULAR;
+ counter_conf->arithmetic = LTTNG_UST_ABI_COUNTER_ARITHMETIC_MODULAR;
break;
case LTTNG_UST_CTL_COUNTER_ARITHMETIC_SATURATION:
- counter_conf.arithmetic = LTTNG_UST_ABI_COUNTER_ARITHMETIC_SATURATION;
+ counter_conf->arithmetic = LTTNG_UST_ABI_COUNTER_ARITHMETIC_SATURATION;
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ goto error;
}
switch (counter->attr->bitness) {
case LTTNG_UST_CTL_COUNTER_BITNESS_32:
- counter_conf.bitness = LTTNG_UST_ABI_COUNTER_BITNESS_32;
+ counter_conf->bitness = LTTNG_UST_ABI_COUNTER_BITNESS_32;
break;
case LTTNG_UST_CTL_COUNTER_BITNESS_64:
- counter_conf.bitness = LTTNG_UST_ABI_COUNTER_BITNESS_64;
+ counter_conf->bitness = LTTNG_UST_ABI_COUNTER_BITNESS_64;
break;
default:
return -EINVAL;
}
- counter_conf.number_dimensions = counter->attr->nr_dimensions;
- counter_conf.global_sum_step = counter->attr->global_sum_step;
- counter_conf.coalesce_hits = counter->attr->coalesce_hits;
- for (i = 0; i < counter->attr->nr_dimensions; i++) {
- counter_conf.dimensions[i].size = counter->attr->dimensions[i].size;
- counter_conf.dimensions[i].underflow_index = counter->attr->dimensions[i].underflow_index;
- counter_conf.dimensions[i].overflow_index = counter->attr->dimensions[i].overflow_index;
- counter_conf.dimensions[i].has_underflow = counter->attr->dimensions[i].has_underflow;
- counter_conf.dimensions[i].has_overflow = counter->attr->dimensions[i].has_overflow;
+ counter_conf->global_sum_step = counter->attr->global_sum_step;
+
+ counter_conf->number_dimensions = 1;
+ counter_conf->elem_len = sizeof(struct lttng_ust_abi_counter_dimension);
+
+ dimension = (struct lttng_ust_abi_counter_dimension *)((char *)counter_conf + sizeof(struct lttng_ust_abi_counter_conf));
+ dimension->flags |= counter->attr->dimensions[0].has_underflow ? LTTNG_UST_ABI_COUNTER_DIMENSION_FLAG_UNDERFLOW : 0;
+ dimension->flags |= counter->attr->dimensions[0].has_overflow ? LTTNG_UST_ABI_COUNTER_DIMENSION_FLAG_OVERFLOW : 0;
+ dimension->size = counter->attr->dimensions[0].size;
+ dimension->underflow_index = counter->attr->dimensions[0].underflow_index;
+ dimension->overflow_index = counter->attr->dimensions[0].overflow_index;
+ switch (counter->attr->dimensions[0].key_type) {
+ case LTTNG_UST_CTL_KEY_TYPE_TOKENS:
+ dimension->key_type = LTTNG_UST_ABI_KEY_TYPE_TOKENS;
+ break;
+ case LTTNG_UST_CTL_KEY_TYPE_INTEGER: /* Fall-through */
+ default:
+ ret = -EINVAL;
+ goto error;
}
counter_data = zmalloc(sizeof(*counter_data));
if (!counter_data) {
ret = -ENOMEM;
- goto error_alloc;
+ goto error;
}
counter_data->type = LTTNG_UST_ABI_OBJECT_TYPE_COUNTER;
counter_data->handle = -1;
-
- counter_data->size = sizeof(counter_conf);
- counter_data->u.counter.data = zmalloc(sizeof(counter_conf));
- if (!counter_data->u.counter.data) {
- ret = -ENOMEM;
- goto error_alloc_data;
- }
-
- memcpy(counter_data->u.counter.data, &counter_conf, sizeof(counter_conf));
+ counter_data->size = conf_len;
+ counter_data->u.counter.data = counter_conf;
*_counter_data = counter_data;
return 0;
-error_alloc_data:
- free(counter_data);
-error_alloc:
+error:
+ free(counter_conf);
return ret;
}
int ret, fd;
size_t len;
- if (lttng_counter_get_global_shm(counter->counter, &fd, &len))
+ if (lttng_counter_get_global_shm(counter->counter->priv->counter, &fd, &len))
return -EINVAL;
counter_global_data = zmalloc(sizeof(*counter_global_data));
if (!counter_global_data) {
int ret, fd;
size_t len;
- if (lttng_counter_get_cpu_shm(counter->counter, cpu, &fd, &len))
+ if (lttng_counter_get_cpu_shm(counter->counter->priv->counter, cpu, &fd, &len))
return -EINVAL;
counter_cpu_data = zmalloc(sizeof(*counter_cpu_data));
if (!counter_cpu_data) {
void lttng_ust_ctl_destroy_counter(struct lttng_ust_ctl_daemon_counter *counter)
{
- counter->ops->counter_destroy(counter->counter);
+ counter->ops->priv->counter_destroy(counter->counter);
free(counter->attr);
free(counter);
}
+/*
+ * Protocol for LTTNG_UST_ABI_OLD_COUNTER command:
+ *
+ * - send: struct ustcomm_ust_msg
+ * - receive: struct ustcomm_ust_reply
+ * - send: counter data
+ * - receive: struct ustcomm_ust_reply (actual command return code)
+ */
+static
+int lttng_ust_ctl_send_old_counter_data_to_ust(int sock, int parent_handle,
+ struct lttng_ust_abi_object_data *counter_data)
+{
+ const struct lttng_ust_abi_counter_conf *counter_conf = counter_data->u.counter.data;
+ const struct lttng_ust_abi_counter_dimension *dimension;
+ struct lttng_ust_abi_old_counter_conf old_counter_conf = {};
+ struct ustcomm_ust_msg lum = {};
+ struct ustcomm_ust_reply lur;
+ int ret;
+ size_t size;
+ ssize_t len;
+
+ if (!counter_data)
+ return -EINVAL;
+
+ if (counter_conf->number_dimensions != 1)
+ return -EINVAL;
+ old_counter_conf.coalesce_hits = (counter_conf->flags & LTTNG_UST_ABI_COUNTER_CONF_FLAG_COALESCE_HITS) ? 1 : 0;
+ old_counter_conf.arithmetic = counter_conf->arithmetic;
+ old_counter_conf.bitness = counter_conf->bitness;
+ old_counter_conf.global_sum_step = counter_conf->global_sum_step;
+
+ dimension = (struct lttng_ust_abi_counter_dimension *)((char *)counter_conf + sizeof(struct lttng_ust_abi_counter_conf));
+ old_counter_conf.number_dimensions = 1;
+ old_counter_conf.dimensions[0].size = dimension->size;
+ old_counter_conf.dimensions[0].has_underflow = (dimension->flags & LTTNG_UST_ABI_COUNTER_DIMENSION_FLAG_UNDERFLOW) ? 1 : 0;
+ old_counter_conf.dimensions[0].has_overflow = (dimension->flags & LTTNG_UST_ABI_COUNTER_DIMENSION_FLAG_OVERFLOW) ? 1 : 0;
+ old_counter_conf.dimensions[0].underflow_index = dimension->underflow_index;
+ old_counter_conf.dimensions[0].overflow_index = dimension->overflow_index;
+ if (dimension->key_type != LTTNG_UST_ABI_KEY_TYPE_TOKENS)
+ return -EINVAL;
+
+ size = sizeof(old_counter_conf);
+ lum.handle = parent_handle;
+ lum.cmd = LTTNG_UST_ABI_OLD_COUNTER;
+ lum.u.counter_old.len = size;
+ ret = ustcomm_send_app_cmd(sock, &lum, &lur);
+ if (ret)
+ return ret;
+
+ /* Send counter data */
+ len = ustcomm_send_unix_sock(sock, &old_counter_conf, size);
+ if (len != size) {
+ if (len < 0)
+ return len;
+ else
+ return -EIO;
+ }
+
+ ret = ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd);
+ if (!ret) {
+ counter_data->handle = lur.ret_val;
+ }
+ return ret;
+}
+
+/*
+ * Protocol for LTTNG_UST_ABI_OLD_COUNTER_GLOBAL command:
+ *
+ * - send: struct ustcomm_ust_msg
+ * - receive: struct ustcomm_ust_reply
+ * - send: file descriptor
+ * - receive: struct ustcomm_ust_reply (actual command return code)
+ */
+static
+int lttng_ust_ctl_send_old_counter_global_data_to_ust(int sock,
+ struct lttng_ust_abi_object_data *counter_data,
+ struct lttng_ust_abi_object_data *counter_global_data)
+{
+ struct ustcomm_ust_msg lum = {};
+ struct ustcomm_ust_reply lur;
+ int ret, shm_fd[1];
+ size_t size;
+ ssize_t len;
+
+ if (!counter_data || !counter_global_data)
+ return -EINVAL;
+
+ size = counter_global_data->size;
+ lum.handle = counter_data->handle; /* parent handle */
+ lum.cmd = LTTNG_UST_ABI_OLD_COUNTER_GLOBAL;
+ lum.u.counter_global_old.len = size;
+ ret = ustcomm_send_app_cmd(sock, &lum, &lur);
+ if (ret)
+ return ret;
+
+ shm_fd[0] = counter_global_data->u.counter_global.shm_fd;
+ len = ustcomm_send_fds_unix_sock(sock, shm_fd, 1);
+ if (len <= 0) {
+ if (len < 0)
+ return len;
+ else
+ return -EIO;
+ }
+
+ ret = ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd);
+ if (!ret) {
+ counter_global_data->handle = lur.ret_val;
+ }
+ return ret;
+}
+
+/*
+ * Protocol for LTTNG_UST_ABI_OLD_COUNTER_CPU command:
+ *
+ * - send: struct ustcomm_ust_msg
+ * - receive: struct ustcomm_ust_reply
+ * - send: file descriptor
+ * - receive: struct ustcomm_ust_reply (actual command return code)
+ */
+static
+int lttng_ust_ctl_send_old_counter_cpu_data_to_ust(int sock,
+ struct lttng_ust_abi_object_data *counter_data,
+ struct lttng_ust_abi_object_data *counter_cpu_data)
+{
+ struct ustcomm_ust_msg lum = {};
+ struct ustcomm_ust_reply lur;
+ int ret, shm_fd[1];
+ size_t size;
+ ssize_t len;
+
+ if (!counter_data || !counter_cpu_data)
+ return -EINVAL;
+
+ size = counter_cpu_data->size;
+ lum.handle = counter_data->handle; /* parent handle */
+ lum.cmd = LTTNG_UST_ABI_OLD_COUNTER_CPU;
+ lum.u.counter_cpu_old.len = size;
+ lum.u.counter_cpu_old.cpu_nr = counter_cpu_data->u.counter_cpu.cpu_nr;
+ ret = ustcomm_send_app_cmd(sock, &lum, &lur);
+ if (ret)
+ return ret;
+
+ shm_fd[0] = counter_cpu_data->u.counter_global.shm_fd;
+ len = ustcomm_send_fds_unix_sock(sock, shm_fd, 1);
+ if (len <= 0) {
+ if (len < 0)
+ return len;
+ else
+ return -EIO;
+ }
+
+ ret = ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd);
+ if (!ret) {
+ counter_cpu_data->handle = lur.ret_val;
+ }
+ return ret;
+}
+
/*
* Protocol for LTTNG_UST_ABI_COUNTER command:
*
int lttng_ust_ctl_send_counter_data_to_ust(int sock, int parent_handle,
struct lttng_ust_abi_object_data *counter_data)
{
- struct ustcomm_ust_msg lum;
+ struct ustcomm_ust_msg lum = {};
struct ustcomm_ust_reply lur;
int ret;
size_t size;
return -EINVAL;
size = counter_data->size;
- memset(&lum, 0, sizeof(lum));
lum.handle = parent_handle;
lum.cmd = LTTNG_UST_ABI_COUNTER;
- lum.u.counter.len = size;
+ lum.u.var_len_cmd.cmd_len = size;
ret = ustcomm_send_app_cmd(sock, &lum, &lur);
- if (ret)
+ if (ret == -LTTNG_UST_ERR_INVAL) {
+ return lttng_ust_ctl_send_old_counter_data_to_ust(sock, parent_handle, counter_data);
+ }
+ if (ret) {
return ret;
+ }
- /* Send counter data */
+ /* Send var len cmd */
len = ustcomm_send_unix_sock(sock, counter_data->u.counter.data, size);
if (len != size) {
if (len < 0)
struct lttng_ust_abi_object_data *counter_data,
struct lttng_ust_abi_object_data *counter_global_data)
{
- struct ustcomm_ust_msg lum;
+ struct lttng_ust_abi_counter_global counter_global = {};
+ struct ustcomm_ust_msg lum = {};
struct ustcomm_ust_reply lur;
int ret, shm_fd[1];
size_t size;
return -EINVAL;
size = counter_global_data->size;
- memset(&lum, 0, sizeof(lum));
lum.handle = counter_data->handle; /* parent handle */
lum.cmd = LTTNG_UST_ABI_COUNTER_GLOBAL;
- lum.u.counter_global.len = size;
+ lum.u.var_len_cmd.cmd_len = sizeof(struct lttng_ust_abi_counter_global);
ret = ustcomm_send_app_cmd(sock, &lum, &lur);
- if (ret)
+ if (ret == -LTTNG_UST_ERR_INVAL) {
+ return lttng_ust_ctl_send_old_counter_global_data_to_ust(sock, counter_data, counter_global_data);
+ }
+ if (ret) {
return ret;
+ }
+
+ counter_global.len = sizeof(struct lttng_ust_abi_counter_global);
+ counter_global.shm_len = size;
+
+ /* Send var len cmd */
+ len = ustcomm_send_unix_sock(sock, &counter_global, sizeof(struct lttng_ust_abi_counter_global));
+ if (len != sizeof(struct lttng_ust_abi_counter_global)) {
+ if (len < 0)
+ return len;
+ else
+ return -EIO;
+ }
shm_fd[0] = counter_global_data->u.counter_global.shm_fd;
len = ustcomm_send_fds_unix_sock(sock, shm_fd, 1);
struct lttng_ust_abi_object_data *counter_data,
struct lttng_ust_abi_object_data *counter_cpu_data)
{
- struct ustcomm_ust_msg lum;
+ struct lttng_ust_abi_counter_cpu counter_cpu = {};
+ struct ustcomm_ust_msg lum = {};
struct ustcomm_ust_reply lur;
int ret, shm_fd[1];
size_t size;
return -EINVAL;
size = counter_cpu_data->size;
- memset(&lum, 0, sizeof(lum));
lum.handle = counter_data->handle; /* parent handle */
lum.cmd = LTTNG_UST_ABI_COUNTER_CPU;
- lum.u.counter_cpu.len = size;
- lum.u.counter_cpu.cpu_nr = counter_cpu_data->u.counter_cpu.cpu_nr;
+ lum.u.var_len_cmd.cmd_len = sizeof(struct lttng_ust_abi_counter_cpu);
ret = ustcomm_send_app_cmd(sock, &lum, &lur);
- if (ret)
+ if (ret == -LTTNG_UST_ERR_INVAL) {
+ return lttng_ust_ctl_send_old_counter_cpu_data_to_ust(sock, counter_data, counter_cpu_data);
+ }
+ if (ret) {
return ret;
+ }
+
+ counter_cpu.len = sizeof(struct lttng_ust_abi_counter_cpu);
+ counter_cpu.shm_len = size;
+ counter_cpu.cpu_nr = counter_cpu_data->u.counter_cpu.cpu_nr;
+
+ /* Send var len cmd */
+ len = ustcomm_send_unix_sock(sock, &counter_cpu, sizeof(struct lttng_ust_abi_counter_cpu));
+ if (len != sizeof(struct lttng_ust_abi_counter_cpu)) {
+ if (len < 0)
+ return len;
+ else
+ return -EIO;
+ }
shm_fd[0] = counter_cpu_data->u.counter_global.shm_fd;
len = ustcomm_send_fds_unix_sock(sock, shm_fd, 1);
int cpu, int64_t *value,
bool *overflow, bool *underflow)
{
- return counter->ops->counter_read(counter->counter, dimension_indexes, cpu,
+ return counter->ops->priv->counter_read(counter->counter, dimension_indexes, cpu,
value, overflow, underflow);
}
int64_t *value,
bool *overflow, bool *underflow)
{
- return counter->ops->counter_aggregate(counter->counter, dimension_indexes,
+ return counter->ops->priv->counter_aggregate(counter->counter, dimension_indexes,
value, overflow, underflow);
}
int lttng_ust_ctl_counter_clear(struct lttng_ust_ctl_daemon_counter *counter,
const size_t *dimension_indexes)
{
- return counter->ops->counter_clear(counter->counter, dimension_indexes);
+ return counter->ops->priv->counter_clear(counter->counter, dimension_indexes);
+}
+
+/*
+ * Protocol for LTTNG_UST_COUNTER_EVENT command:
+ *
+ * - send: struct ustcomm_ust_msg
+ * - receive: struct ustcomm_ust_reply
+ * - send: struct lttng_ust_counter_event
+ * - receive: struct ustcomm_ust_reply (actual command return code)
+ */
+int lttng_ust_ctl_counter_create_event(int sock,
+ struct lttng_ust_abi_counter_event *counter_event,
+ size_t counter_event_len,
+ struct lttng_ust_abi_object_data *counter_data,
+ struct lttng_ust_abi_object_data **_counter_event_data)
+{
+ struct ustcomm_ust_msg lum = {};
+ struct ustcomm_ust_reply lur;
+ struct lttng_ust_abi_object_data *counter_event_data;
+ ssize_t len;
+ int ret;
+
+ if (!counter_data || !_counter_event_data)
+ return -EINVAL;
+
+ counter_event_data = zmalloc(sizeof(*counter_event_data));
+ if (!counter_event_data)
+ return -ENOMEM;
+ counter_event_data->type = LTTNG_UST_ABI_OBJECT_TYPE_COUNTER_EVENT;
+ lum.handle = counter_data->handle;
+ lum.cmd = LTTNG_UST_ABI_COUNTER_EVENT;
+ lum.u.var_len_cmd.cmd_len = counter_event_len;
+ ret = ustcomm_send_app_cmd(sock, &lum, &lur);
+ if (ret) {
+ free(counter_event_data);
+ return ret;
+ }
+
+ /* Send var len cmd */
+ len = ustcomm_send_unix_sock(sock, counter_event, counter_event_len);
+ if (len != counter_event_len) {
+ free(counter_event_data);
+ if (len < 0)
+ return len;
+ else
+ return -EIO;
+ }
+ ret = ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd);
+ if (ret) {
+ free(counter_event_data);
+ return ret;
+ }
+ counter_event_data->handle = lur.ret_val;
+ DBG("received counter event handle %u", counter_event_data->handle);
+ *_counter_event_data = counter_event_data;
+ return 0;
}
int lttng_ust_ctl_get_version(uint32_t *major, uint32_t *minor,
__lttng_ust_fd_plibc_fclose);
}
+/* Old libc headers don't contain a close_range() declaration. */
+int close_range(unsigned int first, unsigned int last, int flags);
+
/*
* Override the libc close_range() symbol with our own, allowing
* applications to close arbitrary file descriptors. If the fd is owned
{
struct lttng_event_notifier_group *event_notifier_group =
event_notifier->priv->group;
- struct lttng_counter *error_counter;
+ struct lttng_ust_channel_counter *error_counter;
size_t dimension_index[1];
int ret;
return;
dimension_index[0] = event_notifier->priv->error_counter_index;
- ret = event_notifier_group->error_counter->ops->counter_add(
- error_counter->counter, dimension_index, 1);
+ ret = event_notifier_group->error_counter->ops->priv->counter_add(
+ error_counter, dimension_index, 1);
if (ret)
WARN_ON_ONCE(1);
}
#include "common/events.h"
/*
- * Allocate and initialize a `struct lttng_event_enabler` object.
+ * Allocate and initialize a `struct lttng_event_recorder_enabler` object.
*
- * On success, returns a `struct lttng_event_enabler`,
+ * On success, returns a `struct lttng_event_recorder_enabler`,
* On memory error, returns NULL.
*/
-struct lttng_event_enabler *lttng_event_enabler_create(
+struct lttng_event_recorder_enabler *lttng_event_recorder_enabler_create(
enum lttng_enabler_format_type format_type,
- struct lttng_ust_abi_event *event_param,
+ const struct lttng_ust_abi_event *event_param,
struct lttng_ust_channel_buffer *chan)
__attribute__((visibility("hidden")));
/*
- * Destroy a `struct lttng_event_enabler` object.
+ * Allocate and initialize a `struct lttng_event_counter_enabler` object.
+ *
+ * On success, returns a `struct lttng_event_counter_enabler`,
+ * On memory error, returns NULL.
+ */
+struct lttng_event_counter_enabler *lttng_event_counter_enabler_create(
+ enum lttng_enabler_format_type format_type,
+ const struct lttng_ust_abi_counter_event *counter_event,
+ const struct lttng_counter_key *key,
+ struct lttng_ust_channel_counter *chan)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Destroy a `struct lttng_event_enabler_common` object.
*/
-void lttng_event_enabler_destroy(struct lttng_event_enabler *enabler)
+void lttng_event_enabler_destroy(struct lttng_event_enabler_common *event_enabler)
__attribute__((visibility("hidden")));
/*
- * Enable a `struct lttng_event_enabler` object and all events related to this
+ * Enable a `struct lttng_event_enabler_common` object and all events related to this
* enabler.
*/
-int lttng_event_enabler_enable(struct lttng_event_enabler *enabler)
+int lttng_event_enabler_enable(struct lttng_event_enabler_common *enabler)
__attribute__((visibility("hidden")));
/*
- * Disable a `struct lttng_event_enabler` object and all events related to this
+ * Disable a `struct lttng_event_enabler_common` object and all events related to this
* enabler.
*/
-int lttng_event_enabler_disable(struct lttng_event_enabler *enabler)
+int lttng_event_enabler_disable(struct lttng_event_enabler_common *enabler)
__attribute__((visibility("hidden")));
/*
- * Attach filter bytecode program to `struct lttng_event_enabler` and all
+ * Attach filter bytecode program to `struct lttng_event_enabler_common` and all
* events related to this enabler.
*/
int lttng_event_enabler_attach_filter_bytecode(
- struct lttng_event_enabler *enabler,
+ struct lttng_event_enabler_common *enabler,
struct lttng_ust_bytecode_node **bytecode)
__attribute__((visibility("hidden")));
*
* Not implemented.
*/
-int lttng_event_enabler_attach_context(struct lttng_event_enabler *enabler,
+int lttng_event_enabler_attach_context(struct lttng_event_enabler_session_common *enabler,
struct lttng_ust_abi_context *ctx)
__attribute__((visibility("hidden")));
/*
- * Attach exclusion list to `struct lttng_event_enabler` and all
+ * Attach exclusion list to `struct lttng_event_enabler_common` and all
* events related to this enabler.
*/
-int lttng_event_enabler_attach_exclusion(struct lttng_event_enabler *enabler,
+int lttng_event_enabler_attach_exclusion(struct lttng_event_enabler_common *enabler,
struct lttng_ust_excluder_node **excluder)
__attribute__((visibility("hidden")));
struct lttng_ust_abi_event_notifier *event_notifier_param)
__attribute__((visibility("hidden")));
-/*
- * Destroy a `struct lttng_event_notifier_enabler` object.
- */
-void lttng_event_notifier_enabler_destroy(
- struct lttng_event_notifier_enabler *event_notifier_enabler)
- __attribute__((visibility("hidden")));
-
-/*
- * Enable a `struct lttng_event_notifier_enabler` object and all event
- * notifiers related to this enabler.
- */
-int lttng_event_notifier_enabler_enable(
- struct lttng_event_notifier_enabler *event_notifier_enabler)
- __attribute__((visibility("hidden")));
-
-/*
- * Disable a `struct lttng_event_notifier_enabler` object and all event
- * notifiers related to this enabler.
- */
-int lttng_event_notifier_enabler_disable(
- struct lttng_event_notifier_enabler *event_notifier_enabler)
- __attribute__((visibility("hidden")));
-
-/*
- * Attach filter bytecode program to `struct lttng_event_notifier_enabler` and
- * all event notifiers related to this enabler.
- */
-int lttng_event_notifier_enabler_attach_filter_bytecode(
- struct lttng_event_notifier_enabler *event_notifier_enabler,
- struct lttng_ust_bytecode_node **bytecode)
- __attribute__((visibility("hidden")));
-
/*
* Attach capture bytecode program to `struct lttng_event_notifier_enabler` and
* all event_notifiers related to this enabler.
struct lttng_ust_bytecode_node **bytecode)
__attribute__((visibility("hidden")));
-/*
- * Attach exclusion list to `struct lttng_event_notifier_enabler` and all
- * event notifiers related to this enabler.
- */
-int lttng_event_notifier_enabler_attach_exclusion(
- struct lttng_event_notifier_enabler *event_notifier_enabler,
- struct lttng_ust_excluder_node **excluder)
- __attribute__((visibility("hidden")));
-
void lttng_free_event_filter_runtime(struct lttng_ust_event_common *event)
__attribute__((visibility("hidden")));
int lttng_fix_pending_event_notifiers(void)
__attribute__((visibility("hidden")));
-struct lttng_counter *lttng_ust_counter_create(
+struct lttng_ust_channel_counter *lttng_ust_counter_create(
const char *counter_transport_name,
- size_t number_dimensions, const struct lttng_counter_dimension *dimensions)
+ size_t number_dimensions,
+ const struct lttng_counter_dimension *dimensions,
+ int64_t global_sum_step,
+ bool coalesce_hits)
__attribute__((visibility("hidden")));
#ifdef HAVE_LINUX_PERF_EVENT_H
static
void lttng_session_sync_event_enablers(struct lttng_ust_session *session);
static
-void lttng_event_notifier_group_sync_enablers(
- struct lttng_event_notifier_group *event_notifier_group);
+void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group);
static
-void lttng_enabler_destroy(struct lttng_enabler *enabler);
+void lttng_event_enabler_sync(struct lttng_event_enabler_common *event_enabler);
bool lttng_ust_validate_event_name(const struct lttng_ust_event_desc *desc)
{
}
static
-void lttng_event_enabler_unsync(struct lttng_event_enabler *event_enabler)
+void lttng_event_enabler_unsync(struct lttng_event_enabler_common *event_enabler)
{
- cds_list_move(&event_enabler->node,
- &event_enabler->chan->parent->session->priv->unsync_enablers_head);
+ switch (event_enabler->enabler_type) {
+ case LTTNG_EVENT_ENABLER_TYPE_RECORDER: /* Fall-through */
+ case LTTNG_EVENT_ENABLER_TYPE_COUNTER:
+ {
+ struct lttng_event_enabler_session_common *event_enabler_session =
+ caa_container_of(event_enabler, struct lttng_event_enabler_session_common, parent);
+ cds_list_move(&event_enabler->node,
+ &event_enabler_session->chan->session->priv->unsync_enablers_head);
+ break;
+ }
+ case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
+ {
+ struct lttng_event_notifier_enabler *event_notifier_enabler =
+ caa_container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
+ cds_list_move(&event_enabler->node,
+ &event_notifier_enabler->group->unsync_enablers_head);
+ break;
+ }
+ default:
+ WARN_ON_ONCE(1);
+ }
}
static
CDS_INIT_LIST_HEAD(&session->priv->sync_enablers_head);
}
-static
-void lttng_event_notifier_enabler_unsync(struct lttng_event_notifier_enabler *event_notifier_enabler)
-{
- cds_list_move(&event_notifier_enabler->node,
- &event_notifier_enabler->group->unsync_enablers_head);
-}
-
static
void lttng_event_notifier_group_unsync_enablers(struct lttng_event_notifier_group *event_notifier_group)
{
CDS_INIT_LIST_HEAD(&session->priv->enums_head);
CDS_INIT_LIST_HEAD(&session->priv->unsync_enablers_head);
CDS_INIT_LIST_HEAD(&session->priv->sync_enablers_head);
+ CDS_INIT_LIST_HEAD(&session->priv->counters_head);
for (i = 0; i < LTTNG_UST_EVENT_HT_SIZE; i++)
- CDS_INIT_HLIST_HEAD(&session->priv->events_ht.table[i]);
+ CDS_INIT_HLIST_HEAD(&session->priv->events_name_ht.table[i]);
for (i = 0; i < LTTNG_UST_ENUM_HT_SIZE; i++)
CDS_INIT_HLIST_HEAD(&session->priv->enums_ht.table[i]);
cds_list_add(&session->priv->node, &sessions);
return session;
}
-struct lttng_counter *lttng_ust_counter_create(
+struct lttng_ust_channel_counter *lttng_ust_counter_create(
const char *counter_transport_name,
- size_t number_dimensions, const struct lttng_counter_dimension *dimensions)
+ size_t number_dimensions,
+ const struct lttng_counter_dimension *dimensions,
+ int64_t global_sum_step,
+ bool coalesce_hits)
{
struct lttng_counter_transport *counter_transport = NULL;
- struct lttng_counter *counter = NULL;
+ struct lttng_ust_channel_counter *counter = NULL;
counter_transport = lttng_counter_transport_find(counter_transport_name);
- if (!counter_transport)
+ if (!counter_transport) {
goto notransport;
- counter = zmalloc(sizeof(struct lttng_counter));
- if (!counter)
- goto nomem;
-
- counter->ops = &counter_transport->ops;
- counter->transport = counter_transport;
-
- counter->counter = counter->ops->counter_create(
- number_dimensions, dimensions, 0,
- -1, 0, NULL, false);
- if (!counter->counter) {
+ }
+ counter = counter_transport->ops.priv->counter_create(number_dimensions, dimensions,
+ global_sum_step, -1, 0, NULL, false);
+ if (!counter) {
goto create_error;
}
+ counter->ops = &counter_transport->ops;
+ counter->priv->parent.coalesce_hits = coalesce_hits;
return counter;
create_error:
- free(counter);
-nomem:
notransport:
return NULL;
}
static
-void lttng_ust_counter_destroy(struct lttng_counter *counter)
+void lttng_ust_counter_destroy(struct lttng_ust_channel_counter *counter)
{
- counter->ops->counter_destroy(counter->counter);
- free(counter);
+ counter->ops->priv->counter_destroy(counter);
}
struct lttng_event_notifier_group *lttng_event_notifier_group_create(void)
CDS_INIT_LIST_HEAD(&event_notifier_group->sync_enablers_head);
CDS_INIT_LIST_HEAD(&event_notifier_group->unsync_enablers_head);
CDS_INIT_LIST_HEAD(&event_notifier_group->event_notifiers_head);
- for (i = 0; i < LTTNG_UST_EVENT_NOTIFIER_HT_SIZE; i++)
+ for (i = 0; i < LTTNG_UST_EVENT_HT_SIZE; i++)
CDS_INIT_HLIST_HEAD(&event_notifier_group->event_notifiers_ht.table[i]);
cds_list_add(&event_notifier_group->node, &event_notifier_groups);
void lttng_session_destroy(struct lttng_ust_session *session)
{
- struct lttng_ust_channel_buffer_private *chan, *tmpchan;
- struct lttng_ust_event_recorder_private *event_recorder_priv, *tmpevent_recorder_priv;
+ struct lttng_ust_channel_buffer_private *chan_buffer, *tmpchan_buffer;
+ struct lttng_ust_channel_counter_private *chan_counter, *tmpchan_counter;
+ struct lttng_ust_event_common_private *event_priv, *tmpevent_priv;
struct lttng_enum *_enum, *tmp_enum;
- struct lttng_event_enabler *event_enabler, *event_tmpenabler;
+ struct lttng_event_enabler_common *event_enabler, *event_tmpenabler;
CMM_ACCESS_ONCE(session->active) = 0;
- cds_list_for_each_entry(event_recorder_priv, &session->priv->events_head, node) {
- _lttng_event_unregister(event_recorder_priv->parent.pub);
- }
+ cds_list_for_each_entry(event_priv, &session->priv->events_head, node)
+ _lttng_event_unregister(event_priv->pub);
lttng_ust_urcu_synchronize_rcu(); /* Wait for in-flight events to complete */
lttng_ust_tp_probe_prune_release_queue();
- cds_list_for_each_entry_safe(event_enabler, event_tmpenabler,
- &session->priv->unsync_enablers_head, node)
+ cds_list_for_each_entry_safe(event_enabler, event_tmpenabler, &session->priv->unsync_enablers_head, node)
lttng_event_enabler_destroy(event_enabler);
- cds_list_for_each_entry_safe(event_enabler, event_tmpenabler,
- &session->priv->sync_enablers_head, node)
+ cds_list_for_each_entry_safe(event_enabler, event_tmpenabler, &session->priv->sync_enablers_head, node)
lttng_event_enabler_destroy(event_enabler);
- cds_list_for_each_entry_safe(event_recorder_priv, tmpevent_recorder_priv,
- &session->priv->events_head, node)
- _lttng_event_destroy(event_recorder_priv->parent.pub);
- cds_list_for_each_entry_safe(_enum, tmp_enum,
- &session->priv->enums_head, node)
+ cds_list_for_each_entry_safe(event_priv, tmpevent_priv, &session->priv->events_head, node)
+ _lttng_event_destroy(event_priv->pub);
+ cds_list_for_each_entry_safe(_enum, tmp_enum, &session->priv->enums_head, node)
_lttng_enum_destroy(_enum);
- cds_list_for_each_entry_safe(chan, tmpchan, &session->priv->chan_head, node)
- _lttng_channel_unmap(chan->pub);
+ cds_list_for_each_entry_safe(chan_buffer, tmpchan_buffer, &session->priv->chan_head, node)
+ _lttng_channel_unmap(chan_buffer->pub);
+ cds_list_for_each_entry_safe(chan_counter, tmpchan_counter, &session->priv->counters_head, node) {
+ cds_list_del(&chan_counter->node);
+ lttng_ust_counter_destroy(chan_counter->pub);
+ }
cds_list_del(&session->priv->node);
lttng_destroy_context(session->priv->ctx);
free(session->priv);
struct lttng_event_notifier_group *event_notifier_group)
{
int close_ret;
- struct lttng_event_notifier_enabler *notifier_enabler, *tmpnotifier_enabler;
- struct lttng_ust_event_notifier_private *event_notifier_priv, *tmpevent_notifier_priv;
+ struct lttng_event_enabler_common *event_enabler, *tmpevent_enabler;
+ struct lttng_ust_event_common_private *event_priv, *tmpevent_priv;
if (!event_notifier_group) {
return;
}
- cds_list_for_each_entry(event_notifier_priv,
- &event_notifier_group->event_notifiers_head, node)
- _lttng_event_unregister(event_notifier_priv->parent.pub);
+ cds_list_for_each_entry(event_priv, &event_notifier_group->event_notifiers_head, node)
+ _lttng_event_unregister(event_priv->pub);
lttng_ust_urcu_synchronize_rcu();
- cds_list_for_each_entry_safe(notifier_enabler, tmpnotifier_enabler,
- &event_notifier_group->sync_enablers_head, node)
- lttng_event_notifier_enabler_destroy(notifier_enabler);
- cds_list_for_each_entry_safe(notifier_enabler, tmpnotifier_enabler,
- &event_notifier_group->unsync_enablers_head, node)
- lttng_event_notifier_enabler_destroy(notifier_enabler);
-
- cds_list_for_each_entry_safe(event_notifier_priv, tmpevent_notifier_priv,
- &event_notifier_group->event_notifiers_head, node)
- _lttng_event_destroy(event_notifier_priv->parent.pub);
+ cds_list_for_each_entry_safe(event_enabler, tmpevent_enabler, &event_notifier_group->sync_enablers_head, node)
+ lttng_event_enabler_destroy(event_enabler);
+ cds_list_for_each_entry_safe(event_enabler, tmpevent_enabler, &event_notifier_group->unsync_enablers_head, node)
+ lttng_event_enabler_destroy(event_enabler);
+ cds_list_for_each_entry_safe(event_priv, tmpevent_priv, &event_notifier_group->event_notifiers_head, node)
+ _lttng_event_destroy(event_priv->pub);
if (event_notifier_group->error_counter)
lttng_ust_counter_destroy(event_notifier_group->error_counter);
free(event_notifier_group);
}
-static
-void lttng_enabler_destroy(struct lttng_enabler *enabler)
-{
- struct lttng_ust_bytecode_node *filter_node, *tmp_filter_node;
- struct lttng_ust_excluder_node *excluder_node, *tmp_excluder_node;
-
- if (!enabler) {
- return;
- }
-
- /* Destroy filter bytecode */
- cds_list_for_each_entry_safe(filter_node, tmp_filter_node,
- &enabler->filter_bytecode_head, node) {
- free(filter_node);
- }
-
- /* Destroy excluders */
- cds_list_for_each_entry_safe(excluder_node, tmp_excluder_node,
- &enabler->excluder_head, node) {
- free(excluder_node);
- }
-}
-
- void lttng_event_notifier_enabler_destroy(struct lttng_event_notifier_enabler *event_notifier_enabler)
-{
- if (!event_notifier_enabler) {
- return;
- }
-
- cds_list_del(&event_notifier_enabler->node);
-
- lttng_enabler_destroy(lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
-
- free(event_notifier_enabler);
-}
-
static
int lttng_enum_create(const struct lttng_ust_enum_desc *desc,
struct lttng_ust_session *session)
}
static
-int lttng_create_all_event_enums(size_t nr_fields,
- const struct lttng_ust_event_field * const *event_fields,
- struct lttng_ust_session *session)
+int lttng_create_all_event_enums(struct lttng_event_enabler_common *event_enabler,
+ const struct lttng_ust_event_desc *desc)
{
+ size_t nr_fields = desc->tp_class->nr_fields;
+ const struct lttng_ust_event_field * const *event_fields = desc->tp_class->fields;
+ struct lttng_ust_session *session;
size_t i;
int ret;
- /* For each field, ensure enum is part of the session. */
- for (i = 0; i < nr_fields; i++) {
- const struct lttng_ust_type_common *type = event_fields[i]->type;
+ switch (event_enabler->enabler_type) {
+ case LTTNG_EVENT_ENABLER_TYPE_RECORDER: /* Fall-through */
+ case LTTNG_EVENT_ENABLER_TYPE_COUNTER:
+ {
+ struct lttng_event_enabler_session_common *event_enabler_session =
+ caa_container_of(event_enabler, struct lttng_event_enabler_session_common, parent);
- ret = lttng_create_enum_check(type, session);
- if (ret)
- return ret;
+ session = event_enabler_session->chan->session;
+ /* For each field, ensure enum is part of the session. */
+ for (i = 0; i < nr_fields; i++) {
+ const struct lttng_ust_type_common *type = event_fields[i]->type;
+
+ ret = lttng_create_enum_check(type, session);
+ if (ret)
+ return ret;
+ }
+ return 0;
+ }
+ case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
+ return 0;
+ default:
+ return -EINVAL;
}
- return 0;
}
static
struct cds_hlist_head *borrow_hash_table_bucket(
struct cds_hlist_head *hash_table,
unsigned int hash_table_size,
- const struct lttng_ust_event_desc *desc)
+ const char *name)
{
- char name[LTTNG_UST_ABI_SYM_NAME_LEN];
size_t name_len;
uint32_t hash;
- lttng_ust_format_event_name(desc, name);
name_len = strlen(name);
hash = jhash(name, name_len, 0);
return &hash_table[hash & (hash_table_size - 1)];
}
-/*
- * Supports event creation while tracing session is active.
- */
static
-int lttng_event_recorder_create(const struct lttng_ust_event_desc *desc,
- struct lttng_ust_channel_buffer *chan)
+int format_event_key(struct lttng_event_enabler_common *event_enabler, char *key_string,
+ const char *provider_name, const char *event_name)
{
- char name[LTTNG_UST_ABI_SYM_NAME_LEN];
- struct lttng_ust_event_recorder *event_recorder;
- struct lttng_ust_event_recorder_private *event_recorder_priv;
- struct lttng_ust_session *session = chan->parent->session;
- struct cds_hlist_head *head;
- int ret = 0;
- int notify_socket, loglevel;
- const char *uri;
+ struct lttng_event_counter_enabler *event_counter_enabler;
+ const struct lttng_counter_key_dimension *dim;
+ size_t i, left = LTTNG_KEY_TOKEN_STRING_LEN_MAX;
+ const struct lttng_counter_key *key;
- head = borrow_hash_table_bucket(chan->parent->session->priv->events_ht.table,
- LTTNG_UST_EVENT_HT_SIZE, desc);
+ if (event_enabler->enabler_type != LTTNG_EVENT_ENABLER_TYPE_COUNTER) {
+ return 0;
+ }
+ event_counter_enabler = caa_container_of(event_enabler, struct lttng_event_counter_enabler, parent.parent);
+ key = &event_counter_enabler->key;
+ if (!key->nr_dimensions)
+ return 0;
+ /* Currently event keys can only be specified on a single dimension. */
+ if (key->nr_dimensions != 1)
+ return -EINVAL;
+ dim = &key->key_dimensions[0];
+ /* Currently only tokens keys are supported. */
+ if (dim->key_type != LTTNG_KEY_TYPE_TOKENS)
+ return -EINVAL;
+ for (i = 0; i < dim->u.tokens.nr_key_tokens; i++) {
+ const struct lttng_key_token *token = &dim->u.tokens.key_tokens[i];
+ size_t token_len;
+ const char *str;
+
+ switch (token->type) {
+ case LTTNG_KEY_TOKEN_STRING:
+ str = token->arg.string;
+ break;
+ case LTTNG_KEY_TOKEN_EVENT_NAME:
+ str = event_name;
+ break;
+ case LTTNG_KEY_TOKEN_PROVIDER_NAME:
+ str = provider_name;
+ break;
+ default:
+ return -EINVAL;
+ }
+ token_len = strlen(str);
+ if (token_len >= left)
+ return -EINVAL;
+ strcat(key_string, str);
+ left -= token_len;
+ }
+ return 0;
+}
- notify_socket = lttng_get_notify_socket(session->priv->owner);
- if (notify_socket < 0) {
- ret = notify_socket;
- goto socket_error;
+static
+bool match_event_key(struct lttng_ust_event_common *event, const char *key_string)
+{
+ switch (event->type) {
+ case LTTNG_UST_EVENT_TYPE_RECORDER: /* Fall-through */
+ case LTTNG_UST_EVENT_TYPE_NOTIFIER:
+ return true;
+
+ case LTTNG_UST_EVENT_TYPE_COUNTER:
+ {
+ struct lttng_ust_event_counter_private *event_counter_priv =
+ caa_container_of(event->priv, struct lttng_ust_event_counter_private, parent.parent);
+
+ if (key_string[0] == '\0')
+ return true;
+ return !strcmp(key_string, event_counter_priv->key);
}
- ret = lttng_create_all_event_enums(desc->tp_class->nr_fields, desc->tp_class->fields,
- session);
- if (ret < 0) {
- DBG("Error (%d) adding enum to session", ret);
- goto create_enum_error;
+ default:
+ WARN_ON_ONCE(1);
+ return false;
}
+}
- /*
- * Check if loglevel match. Refuse to connect event if not.
- */
- event_recorder = zmalloc(sizeof(struct lttng_ust_event_recorder));
- if (!event_recorder) {
- ret = -ENOMEM;
- goto cache_error;
+static
+bool match_event_session_token(struct lttng_ust_event_session_common_private *event_session_priv,
+ uint64_t token)
+{
+ if (event_session_priv->chan->priv->coalesce_hits)
+ return true;
+ if (event_session_priv->parent.user_token == token)
+ return true;
+ return false;
+}
+
+static
+struct lttng_ust_event_common *lttng_ust_event_alloc(struct lttng_event_enabler_common *event_enabler,
+ const struct lttng_ust_event_desc *desc,
+ const char *key_string)
+{
+
+ switch (event_enabler->enabler_type) {
+ case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
+ {
+ struct lttng_event_recorder_enabler *event_recorder_enabler =
+ caa_container_of(event_enabler, struct lttng_event_recorder_enabler, parent.parent);
+ struct lttng_ust_event_recorder *event_recorder;
+ struct lttng_ust_event_recorder_private *event_recorder_priv;
+
+ event_recorder = zmalloc(sizeof(struct lttng_ust_event_recorder));
+ if (!event_recorder)
+ return NULL;
+ event_recorder->struct_size = sizeof(struct lttng_ust_event_recorder);
+
+ event_recorder->parent = zmalloc(sizeof(struct lttng_ust_event_common));
+ if (!event_recorder->parent) {
+ free(event_recorder);
+ return NULL;
+ }
+ event_recorder->parent->struct_size = sizeof(struct lttng_ust_event_common);
+ event_recorder->parent->type = LTTNG_UST_EVENT_TYPE_RECORDER;
+ event_recorder->parent->child = event_recorder;
+
+ event_recorder_priv = zmalloc(sizeof(struct lttng_ust_event_recorder_private));
+ if (!event_recorder_priv) {
+ free(event_recorder->parent);
+ free(event_recorder);
+ return NULL;
+ }
+ event_recorder->priv = event_recorder_priv;
+ event_recorder_priv->pub = event_recorder;
+ event_recorder->parent->priv = &event_recorder_priv->parent.parent;
+ event_recorder_priv->parent.parent.pub = event_recorder->parent;
+ event_recorder->chan = event_recorder_enabler->chan;
+
+ /* Event will be enabled by enabler sync. */
+ event_recorder->parent->run_filter = lttng_ust_interpret_event_filter;
+ event_recorder->parent->enabled = 0;
+ event_recorder->parent->priv->registered = 0;
+ CDS_INIT_LIST_HEAD(&event_recorder->parent->priv->filter_bytecode_runtime_head);
+ CDS_INIT_LIST_HEAD(&event_recorder->parent->priv->enablers_ref_head);
+ event_recorder_priv->parent.chan = event_recorder_enabler->chan->parent;
+ event_recorder->priv->parent.parent.desc = desc;
+
+ return event_recorder->parent;
+ }
+ case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
+ {
+ struct lttng_event_notifier_enabler *event_notifier_enabler =
+ caa_container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
+ struct lttng_ust_event_notifier *event_notifier;
+ struct lttng_ust_event_notifier_private *event_notifier_priv;
+ struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group;
+ uint64_t token = event_notifier_enabler->parent.user_token;
+ uint64_t error_counter_index = event_notifier_enabler->error_counter_index;
+
+ event_notifier = zmalloc(sizeof(struct lttng_ust_event_notifier));
+ if (!event_notifier)
+ return NULL;
+ event_notifier->struct_size = sizeof(struct lttng_ust_event_notifier);
+
+ event_notifier->parent = zmalloc(sizeof(struct lttng_ust_event_common));
+ if (!event_notifier->parent) {
+ free(event_notifier);
+ return NULL;
+ }
+ event_notifier->parent->struct_size = sizeof(struct lttng_ust_event_common);
+ event_notifier->parent->type = LTTNG_UST_EVENT_TYPE_NOTIFIER;
+ event_notifier->parent->child = event_notifier;
+
+ event_notifier_priv = zmalloc(sizeof(struct lttng_ust_event_notifier_private));
+ if (!event_notifier_priv) {
+ free(event_notifier->parent);
+ free(event_notifier);
+ return NULL;
+ }
+ event_notifier->priv = event_notifier_priv;
+ event_notifier_priv->pub = event_notifier;
+ event_notifier->parent->priv = &event_notifier_priv->parent;
+ event_notifier_priv->parent.pub = event_notifier->parent;
+
+ event_notifier_priv->group = event_notifier_group;
+ event_notifier_priv->parent.user_token = token;
+ event_notifier_priv->error_counter_index = error_counter_index;
+
+ /* Event notifier will be enabled by enabler sync. */
+ event_notifier->parent->run_filter = lttng_ust_interpret_event_filter;
+ event_notifier->parent->enabled = 0;
+ event_notifier_priv->parent.registered = 0;
+
+ CDS_INIT_LIST_HEAD(&event_notifier->parent->priv->filter_bytecode_runtime_head);
+ CDS_INIT_LIST_HEAD(&event_notifier->priv->capture_bytecode_runtime_head);
+ CDS_INIT_LIST_HEAD(&event_notifier_priv->parent.enablers_ref_head);
+ event_notifier->notification_send = lttng_event_notifier_notification_send;
+ event_notifier_priv->parent.desc = desc;
+ return event_notifier->parent;
}
- event_recorder->struct_size = sizeof(struct lttng_ust_event_recorder);
+ case LTTNG_EVENT_ENABLER_TYPE_COUNTER:
+ {
+ struct lttng_event_counter_enabler *event_counter_enabler =
+ caa_container_of(event_enabler, struct lttng_event_counter_enabler, parent.parent);
+ struct lttng_ust_event_counter *event_counter;
+ struct lttng_ust_event_counter_private *event_counter_priv;
+
+ event_counter = zmalloc(sizeof(struct lttng_ust_event_counter));
+ if (!event_counter)
+ return NULL;
+ event_counter->struct_size = sizeof(struct lttng_ust_event_counter);
+
+ event_counter->parent = zmalloc(sizeof(struct lttng_ust_event_common));
+ if (!event_counter->parent) {
+ free(event_counter);
+ return NULL;
+ }
+ event_counter->parent->struct_size = sizeof(struct lttng_ust_event_common);
+ event_counter->parent->type = LTTNG_UST_EVENT_TYPE_COUNTER;
+ event_counter->parent->child = event_counter;
+
+ event_counter_priv = zmalloc(sizeof(struct lttng_ust_event_counter_private));
+ if (!event_counter_priv) {
+ free(event_counter->parent);
+ free(event_counter);
+ return NULL;
+ }
+ event_counter->priv = event_counter_priv;
+ event_counter_priv->pub = event_counter;
+ event_counter->parent->priv = &event_counter_priv->parent.parent;
+ event_counter_priv->parent.parent.pub = event_counter->parent;
+ event_counter->chan = event_counter_enabler->chan;
+
+ /* Event will be enabled by enabler sync. */
+ event_counter->parent->run_filter = lttng_ust_interpret_event_filter;
+ event_counter->parent->enabled = 0;
+ event_counter->parent->priv->registered = 0;
+ CDS_INIT_LIST_HEAD(&event_counter->parent->priv->filter_bytecode_runtime_head);
+ CDS_INIT_LIST_HEAD(&event_counter->parent->priv->enablers_ref_head);
+ event_counter_priv->parent.chan = event_counter_enabler->chan->parent;
+ if (!event_counter->chan->priv->parent.coalesce_hits)
+ event_counter->priv->parent.parent.user_token = event_counter_enabler->parent.parent.user_token;
+ event_counter->priv->parent.parent.desc = desc;
+ strcpy(event_counter_priv->key, key_string);
+ event_counter_priv->action = event_counter_enabler->action;
+ return event_counter->parent;
+ }
+ default:
+ return NULL;
+ }
+}
- event_recorder->parent = zmalloc(sizeof(struct lttng_ust_event_common));
- if (!event_recorder->parent) {
- ret = -ENOMEM;
- goto parent_error;
+static
+void lttng_ust_event_free(struct lttng_ust_event_common *event)
+{
+ struct lttng_ust_event_common_private *event_priv = event->priv;
+
+ switch (event->type) {
+ case LTTNG_UST_EVENT_TYPE_RECORDER:
+ {
+ struct lttng_ust_event_recorder_private *event_recorder_priv =
+ caa_container_of(event_priv, struct lttng_ust_event_recorder_private, parent.parent);
+
+ free(event_recorder_priv->pub->parent);
+ free(event_recorder_priv->pub);
+ free(event_recorder_priv);
+ break;
}
- event_recorder->parent->struct_size = sizeof(struct lttng_ust_event_common);
- event_recorder->parent->type = LTTNG_UST_EVENT_TYPE_RECORDER;
- event_recorder->parent->child = event_recorder;
+ case LTTNG_UST_EVENT_TYPE_NOTIFIER:
+ {
+ struct lttng_ust_event_notifier_private *event_notifier_priv =
+ caa_container_of(event_priv, struct lttng_ust_event_notifier_private, parent);
- event_recorder_priv = zmalloc(sizeof(struct lttng_ust_event_recorder_private));
- if (!event_recorder_priv) {
- ret = -ENOMEM;
- goto priv_error;
+ free(event_notifier_priv->pub->parent);
+ free(event_notifier_priv->pub);
+ free(event_notifier_priv);
+ break;
}
- event_recorder->priv = event_recorder_priv;
- event_recorder_priv->pub = event_recorder;
- event_recorder->parent->priv = &event_recorder_priv->parent;
- event_recorder_priv->parent.pub = event_recorder->parent;
+ case LTTNG_UST_EVENT_TYPE_COUNTER:
+ {
+ struct lttng_ust_event_counter_private *event_counter_priv =
+ caa_container_of(event_priv, struct lttng_ust_event_counter_private, parent.parent);
- event_recorder->chan = chan;
+ free(event_counter_priv->pub->parent);
+ free(event_counter_priv->pub);
+ free(event_counter_priv);
+ break;
+ }
+ default:
+ WARN_ON_ONCE(1);
+ }
+}
- /* Event will be enabled by enabler sync. */
- event_recorder->parent->run_filter = lttng_ust_interpret_event_filter;
- event_recorder->parent->enabled = 0;
- event_recorder->parent->priv->registered = 0;
- CDS_INIT_LIST_HEAD(&event_recorder->parent->priv->filter_bytecode_runtime_head);
- CDS_INIT_LIST_HEAD(&event_recorder->parent->priv->enablers_ref_head);
- event_recorder->parent->priv->desc = desc;
+static
+int lttng_event_register_to_sessiond(struct lttng_event_enabler_common *event_enabler,
+ struct lttng_ust_event_common *event,
+ const char *name)
+{
+ switch (event_enabler->enabler_type) {
+ case LTTNG_EVENT_ENABLER_TYPE_RECORDER: /* Fall-through */
+ {
+ struct lttng_event_enabler_session_common *event_enabler_session =
+ caa_container_of(event_enabler, struct lttng_event_enabler_session_common, parent);
+ struct lttng_ust_event_session_common_private *event_session_priv =
+ caa_container_of(event->priv, struct lttng_ust_event_session_common_private, parent);
+ struct lttng_ust_session *session = event_enabler_session->chan->session;
+ const struct lttng_ust_event_desc *desc = event->priv->desc;
+ int notify_socket, loglevel, ret;
+ const char *uri;
+ uint32_t id;
+
+ if (desc->loglevel)
+ loglevel = *(*desc->loglevel);
+ else
+ loglevel = LTTNG_UST_TRACEPOINT_LOGLEVEL_DEFAULT;
+ if (desc->model_emf_uri)
+ uri = *(desc->model_emf_uri);
+ else
+ uri = NULL;
- if (desc->loglevel)
- loglevel = *(*desc->loglevel);
- else
- loglevel = LTTNG_UST_TRACEPOINT_LOGLEVEL_DEFAULT;
- if (desc->model_emf_uri)
- uri = *(desc->model_emf_uri);
- else
- uri = NULL;
+ notify_socket = lttng_get_notify_socket(session->priv->owner);
+ if (notify_socket < 0)
+ return notify_socket;
- lttng_ust_format_event_name(desc, name);
+ /* Fetch event ID from sessiond */
+ ret = ustcomm_register_event(notify_socket,
+ session,
+ session->priv->objd,
+ event_enabler_session->chan->priv->objd,
+ name,
+ loglevel,
+ desc->tp_class->signature,
+ desc->tp_class->nr_fields,
+ desc->tp_class->fields,
+ uri,
+ event_enabler_session->parent.user_token,
+ &id);
+ if (ret)
+ return ret;
+ event_session_priv->id = id;
+ return 0;
+ }
+ case LTTNG_EVENT_ENABLER_TYPE_COUNTER:
+ {
+ struct lttng_event_enabler_session_common *event_enabler_session =
+ caa_container_of(event_enabler, struct lttng_event_enabler_session_common, parent);
+ struct lttng_ust_event_session_common_private *event_session_priv =
+ caa_container_of(event->priv, struct lttng_ust_event_session_common_private, parent);
+ struct lttng_ust_session *session = event_enabler_session->chan->session;
+ uint64_t dimension_index[LTTNG_COUNTER_DIMENSION_MAX];
+ int notify_socket, ret;
+
+ notify_socket = lttng_get_notify_socket(session->priv->owner);
+ if (notify_socket < 0)
+ return notify_socket;
+
+ /* Fetch key index from sessiond */
+ ret = ustcomm_register_key(notify_socket,
+ session->priv->objd,
+ event_enabler_session->chan->priv->objd,
+ 0, /* target dimension */
+ NULL,
+ name,
+ event_enabler_session->parent.user_token,
+ dimension_index); /* Filled up to target dimension. */
+ if (ret)
+ return ret;
+ event_session_priv->id = dimension_index[0];
+ return 0;
+ }
- /* Fetch event ID from sessiond */
- ret = ustcomm_register_event(notify_socket,
- session,
- session->priv->objd,
- chan->priv->parent.objd,
- name,
- loglevel,
- desc->tp_class->signature,
- desc->tp_class->nr_fields,
- desc->tp_class->fields,
- uri,
- &event_recorder->priv->id);
- if (ret < 0) {
- DBG("Error (%d) registering event to sessiond", ret);
- goto sessiond_register_error;
+ case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
+ return 0;
+ default:
+ return -EINVAL;
}
+}
- cds_list_add(&event_recorder_priv->node, &chan->parent->session->priv->events_head);
- cds_hlist_add_head(&event_recorder_priv->hlist, head);
- return 0;
+static
+bool lttng_event_enabler_event_desc_key_match_event(struct lttng_event_enabler_common *event_enabler,
+ const struct lttng_ust_event_desc *desc,
+ const char *key_string,
+ struct lttng_ust_event_common *event)
+{
+ switch (event_enabler->enabler_type) {
+ case LTTNG_EVENT_ENABLER_TYPE_RECORDER: /* Fall-through */
+ case LTTNG_EVENT_ENABLER_TYPE_COUNTER:
+ {
+ struct lttng_event_enabler_session_common *event_session_enabler =
+ caa_container_of(event_enabler, struct lttng_event_enabler_session_common, parent);
+ struct lttng_ust_event_session_common_private *event_session_priv =
+ caa_container_of(event->priv, struct lttng_ust_event_session_common_private, parent);
+ bool same_event = false, same_channel = false, same_key = false,
+ same_token = false;
+
+ WARN_ON_ONCE(!event->priv->desc);
+ if (event->priv->desc == desc)
+ same_event = true;
+ if (event_session_enabler->chan == event_session_priv->chan) {
+ same_channel = true;
+ if (match_event_session_token(event_session_priv, event_enabler->user_token))
+ same_token = true;
+ }
+ if (match_event_key(event, key_string))
+ same_key = true;
+ return same_event && same_channel && same_key && same_token;
+ }
-sessiond_register_error:
- free(event_recorder_priv);
-priv_error:
- free(event_recorder->parent);
-parent_error:
- free(event_recorder);
-cache_error:
-create_enum_error:
-socket_error:
- return ret;
+ case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
+ {
+ /*
+ * Check if event_notifier already exists by checking
+ * if the event_notifier and enabler share the same
+ * description and id.
+ */
+ return event->priv->desc == desc && event->priv->user_token == event_enabler->user_token;
+ }
+
+ default:
+ WARN_ON_ONCE(1);
+ return false;
+ }
}
static
-int lttng_event_notifier_create(const struct lttng_ust_event_desc *desc,
- uint64_t token, uint64_t error_counter_index,
- struct lttng_event_notifier_group *event_notifier_group)
-{
- struct lttng_ust_event_notifier *event_notifier;
- struct lttng_ust_event_notifier_private *event_notifier_priv;
- struct cds_hlist_head *head;
+int lttng_ust_event_create(struct lttng_event_enabler_common *event_enabler, const struct lttng_ust_event_desc *desc)
+{
+ char key_string[LTTNG_KEY_TOKEN_STRING_LEN_MAX] = { 0 };
+ char name[LTTNG_UST_ABI_SYM_NAME_LEN] = { 0 };
+ struct cds_list_head *event_list_head = lttng_get_event_list_head_from_enabler(event_enabler);
+ struct lttng_ust_event_ht *events_ht = lttng_get_event_ht_from_enabler(event_enabler);
+ struct lttng_ust_event_common_private *event_priv_iter;
+ struct lttng_ust_event_common *event;
+ struct cds_hlist_head *name_head;
int ret = 0;
- /*
- * Get the hashtable bucket the created lttng_event_notifier object
- * should be inserted.
- */
- head = borrow_hash_table_bucket(
- event_notifier_group->event_notifiers_ht.table,
- LTTNG_UST_EVENT_NOTIFIER_HT_SIZE, desc);
+ if (format_event_key(event_enabler, key_string, desc->probe_desc->provider_name, desc->event_name)) {
+ ret = -EINVAL;
+ goto type_error;
+ }
- event_notifier = zmalloc(sizeof(struct lttng_ust_event_notifier));
- if (!event_notifier) {
- ret = -ENOMEM;
- goto error;
+ lttng_ust_format_event_name(desc, name);
+ name_head = borrow_hash_table_bucket(events_ht->table, LTTNG_UST_EVENT_HT_SIZE, name);
+ cds_hlist_for_each_entry_2(event_priv_iter, name_head, name_hlist_node) {
+ if (lttng_event_enabler_event_desc_key_match_event(event_enabler,
+ desc, key_string, event_priv_iter->pub)) {
+ ret = -EEXIST;
+ goto exist;
+ }
}
- event_notifier->struct_size = sizeof(struct lttng_ust_event_notifier);
- event_notifier->parent = zmalloc(sizeof(struct lttng_ust_event_common));
- if (!event_notifier->parent) {
- ret = -ENOMEM;
- goto parent_error;
+ ret = lttng_create_all_event_enums(event_enabler, desc);
+ if (ret < 0) {
+ DBG("Error (%d) adding enum to session", ret);
+ goto create_enum_error;
}
- event_notifier->parent->struct_size = sizeof(struct lttng_ust_event_common);
- event_notifier->parent->type = LTTNG_UST_EVENT_TYPE_NOTIFIER;
- event_notifier->parent->child = event_notifier;
- event_notifier_priv = zmalloc(sizeof(struct lttng_ust_event_notifier_private));
- if (!event_notifier_priv) {
+ event = lttng_ust_event_alloc(event_enabler, desc, key_string);
+ if (!event) {
ret = -ENOMEM;
- goto priv_error;
+ goto alloc_error;
}
- event_notifier->priv = event_notifier_priv;
- event_notifier_priv->pub = event_notifier;
- event_notifier->parent->priv = &event_notifier_priv->parent;
- event_notifier_priv->parent.pub = event_notifier->parent;
- event_notifier_priv->group = event_notifier_group;
- event_notifier_priv->parent.user_token = token;
- event_notifier_priv->error_counter_index = error_counter_index;
-
- /* Event notifier will be enabled by enabler sync. */
- event_notifier->parent->run_filter = lttng_ust_interpret_event_filter;
- event_notifier->parent->enabled = 0;
- event_notifier_priv->parent.registered = 0;
-
- CDS_INIT_LIST_HEAD(&event_notifier->parent->priv->filter_bytecode_runtime_head);
- CDS_INIT_LIST_HEAD(&event_notifier->priv->capture_bytecode_runtime_head);
- CDS_INIT_LIST_HEAD(&event_notifier_priv->parent.enablers_ref_head);
- event_notifier_priv->parent.desc = desc;
- event_notifier->notification_send = lttng_event_notifier_notification_send;
-
- cds_list_add(&event_notifier_priv->node,
- &event_notifier_group->event_notifiers_head);
- cds_hlist_add_head(&event_notifier_priv->hlist, head);
+ ret = lttng_event_register_to_sessiond(event_enabler, event, name);
+ if (ret < 0) {
+ DBG("Error (%d) registering event '%s' key '%s' to sessiond", ret, name, key_string);
+ goto sessiond_register_error;
+ }
+ cds_list_add(&event->priv->node, event_list_head);
+ cds_hlist_add_head(&event->priv->name_hlist_node, name_head);
return 0;
-priv_error:
- free(event_notifier->parent);
-parent_error:
- free(event_notifier);
-error:
+sessiond_register_error:
+ lttng_ust_event_free(event);
+alloc_error:
+create_enum_error:
+exist:
+type_error:
return ret;
}
static
int lttng_desc_match_star_glob_enabler(const struct lttng_ust_event_desc *desc,
- struct lttng_enabler *enabler)
+ struct lttng_event_enabler_common *enabler)
{
char name[LTTNG_UST_ABI_SYM_NAME_LEN];
int loglevel = 0;
static
int lttng_desc_match_event_enabler(const struct lttng_ust_event_desc *desc,
- struct lttng_enabler *enabler)
+ struct lttng_event_enabler_common *enabler)
{
char name[LTTNG_UST_ABI_SYM_NAME_LEN];
int loglevel = 0;
static
int lttng_desc_match_enabler(const struct lttng_ust_event_desc *desc,
- struct lttng_enabler *enabler)
+ struct lttng_event_enabler_common *enabler)
{
switch (enabler->format_type) {
case LTTNG_ENABLER_FORMAT_STAR_GLOB:
}
static
-int lttng_event_enabler_match_event(struct lttng_event_enabler *event_enabler,
- struct lttng_ust_event_recorder *event_recorder)
+bool lttng_event_session_enabler_match_event_session(struct lttng_event_enabler_session_common *event_enabler_session,
+ struct lttng_ust_event_session_common_private *event_session_priv)
{
- if (lttng_desc_match_enabler(event_recorder->parent->priv->desc,
- lttng_event_enabler_as_enabler(event_enabler))
- && event_recorder->chan == event_enabler->chan)
- return 1;
+ if (lttng_desc_match_enabler(event_session_priv->parent.desc, &event_enabler_session->parent)
+ && event_session_priv->chan == event_enabler_session->chan
+ && match_event_session_token(event_session_priv, event_enabler_session->parent.user_token))
+ return true;
else
- return 0;
+ return false;
}
static
int lttng_event_notifier_enabler_match_event_notifier(
struct lttng_event_notifier_enabler *event_notifier_enabler,
- struct lttng_ust_event_notifier *event_notifier)
+ struct lttng_ust_event_notifier_private *event_notifier_priv)
{
- int desc_matches = lttng_desc_match_enabler(event_notifier->priv->parent.desc,
+ int desc_matches = lttng_desc_match_enabler(event_notifier_priv->parent.desc,
lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
- if (desc_matches && event_notifier->priv->group == event_notifier_enabler->group &&
- event_notifier->priv->parent.user_token == event_notifier_enabler->user_token)
+ if (desc_matches && event_notifier_priv->group == event_notifier_enabler->group &&
+ event_notifier_priv->parent.user_token == event_notifier_enabler->parent.user_token)
return 1;
else
return 0;
}
+static
+bool lttng_event_enabler_match_event(
+ struct lttng_event_enabler_common *event_enabler,
+ struct lttng_ust_event_common *event)
+{
+ switch (event_enabler->enabler_type) {
+ case LTTNG_EVENT_ENABLER_TYPE_RECORDER: /* Fall-through */
+ case LTTNG_EVENT_ENABLER_TYPE_COUNTER:
+ {
+ struct lttng_event_enabler_session_common *event_enabler_session =
+ caa_container_of(event_enabler, struct lttng_event_enabler_session_common, parent);
+ struct lttng_ust_event_session_common_private *event_session_priv =
+ caa_container_of(event->priv, struct lttng_ust_event_session_common_private, parent);
+ return lttng_event_session_enabler_match_event_session(event_enabler_session, event_session_priv);
+ }
+ case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
+ {
+ struct lttng_event_notifier_enabler *event_notifier_enabler =
+ caa_container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
+ struct lttng_ust_event_notifier_private *event_notifier_priv =
+ caa_container_of(event->priv, struct lttng_ust_event_notifier_private, parent);
+ return lttng_event_notifier_enabler_match_event_notifier(event_notifier_enabler, event_notifier_priv);
+ }
+ }
+ return false;
+}
+
static
struct lttng_enabler_ref *lttng_enabler_ref(
struct cds_list_head *enabler_ref_list,
- struct lttng_enabler *enabler)
+ struct lttng_event_enabler_common *enabler)
{
struct lttng_enabler_ref *enabler_ref;
}
/*
- * Create struct lttng_event if it is missing and present in the list of
+ * Create struct lttng_ust_event_common if it is missing and present in the list of
* tracepoint probes.
*/
static
-void lttng_create_event_recorder_if_missing(struct lttng_event_enabler *event_enabler)
+void lttng_create_event_if_missing(struct lttng_event_enabler_common *event_enabler)
{
- struct lttng_ust_session *session = event_enabler->chan->parent->session;
struct lttng_ust_registered_probe *reg_probe;
const struct lttng_ust_event_desc *desc;
- struct lttng_ust_event_recorder_private *event_recorder_priv;
- int i;
struct cds_list_head *probe_list;
+ int i;
probe_list = lttng_get_probe_list_head();
/*
* For each probe event, if we find that a probe event matches
- * our enabler, create an associated lttng_event if not
+ * our enabler, create an associated lttng_ust_event_common if not
* already present.
*/
cds_list_for_each_entry(reg_probe, probe_list, head) {
for (i = 0; i < probe_desc->nr_events; i++) {
int ret;
- bool found = false;
- struct cds_hlist_head *head;
- struct cds_hlist_node *node;
desc = probe_desc->event_desc[i];
- if (!lttng_desc_match_enabler(desc,
- lttng_event_enabler_as_enabler(event_enabler)))
- continue;
-
- head = borrow_hash_table_bucket(
- session->priv->events_ht.table,
- LTTNG_UST_EVENT_HT_SIZE, desc);
-
- cds_hlist_for_each_entry(event_recorder_priv, node, head, hlist) {
- if (event_recorder_priv->parent.desc == desc
- && event_recorder_priv->pub->chan == event_enabler->chan) {
- found = true;
- break;
- }
- }
- if (found)
+ if (!lttng_desc_match_enabler(desc, event_enabler))
continue;
-
/*
- * We need to create an event for this
- * event probe.
+ * We need to create an event for this event probe.
*/
- ret = lttng_event_recorder_create(probe_desc->event_desc[i],
- event_enabler->chan);
+ ret = lttng_ust_event_create(event_enabler, probe_desc->event_desc[i]);
+ /* Skip if already found. */
+ if (ret == -EEXIST)
+ continue;
if (ret) {
DBG("Unable to create event \"%s:%s\", error %d\n",
probe_desc->provider_name,
for (i = 0; i < provider_desc->nr_events; i++) {
const struct lttng_ust_event_desc *event_desc;
struct lttng_event_notifier_group *event_notifier_group;
- struct lttng_ust_event_recorder_private *event_recorder_priv;
- struct lttng_ust_event_notifier_private *event_notifier_priv;
struct lttng_ust_session_private *session_priv;
struct cds_hlist_head *head;
* description.
*/
cds_list_for_each_entry(session_priv, sessionsp, node) {
+ struct lttng_ust_event_common_private *event_priv;
+ char name[LTTNG_UST_ABI_SYM_NAME_LEN];
+
/*
* Get the list of events in the hashtable bucket and
* iterate to find the event matching this descriptor.
*/
+ lttng_ust_format_event_name(event_desc, name);
head = borrow_hash_table_bucket(
- session_priv->events_ht.table,
- LTTNG_UST_EVENT_HT_SIZE, event_desc);
+ session_priv->events_name_ht.table,
+ LTTNG_UST_EVENT_HT_SIZE, name);
- cds_hlist_for_each_entry_safe(event_recorder_priv, node, tmp_node, head, hlist) {
- if (event_desc == event_recorder_priv->parent.desc) {
- event_func(event_recorder_priv->parent.pub);
+ cds_hlist_for_each_entry_safe(event_priv, node, tmp_node, head, name_hlist_node) {
+ if (event_desc == event_priv->desc) {
+ event_func(event_priv->pub);
break;
}
}
* description.
*/
cds_list_for_each_entry(event_notifier_group, &event_notifier_groups, node) {
+ struct lttng_ust_event_common_private *event_priv;
+ char name[LTTNG_UST_ABI_SYM_NAME_LEN];
+
/*
* Get the list of event_notifiers in the hashtable bucket and
* iterate to find the event_notifier matching this
* descriptor.
*/
+ lttng_ust_format_event_name(event_desc, name);
head = borrow_hash_table_bucket(
event_notifier_group->event_notifiers_ht.table,
- LTTNG_UST_EVENT_NOTIFIER_HT_SIZE, event_desc);
+ LTTNG_UST_EVENT_HT_SIZE, name);
- cds_hlist_for_each_entry_safe(event_notifier_priv, node, tmp_node, head, hlist) {
- if (event_desc == event_notifier_priv->parent.desc) {
- event_func(event_notifier_priv->parent.pub);
+ cds_hlist_for_each_entry_safe(event_priv, node, tmp_node, head, name_hlist_node) {
+ if (event_desc == event_priv->desc) {
+ event_func(event_priv->pub);
break;
}
}
{
switch (event->type) {
- case LTTNG_UST_EVENT_TYPE_RECORDER:
+ case LTTNG_UST_EVENT_TYPE_RECORDER: /* Fall-through */
+ case LTTNG_UST_EVENT_TYPE_COUNTER:
{
- struct lttng_ust_event_recorder *event_recorder = event->child;
- struct lttng_ust_session *session = event_recorder->chan->parent->session;
+ struct lttng_ust_event_common_private *event_priv = event->priv;
+ struct lttng_ust_event_session_common_private *event_session_priv =
+ caa_container_of(event_priv, struct lttng_ust_event_session_common_private, parent);
+ struct lttng_ust_session *session = event_session_priv->chan->session;
unsigned int i;
/* Destroy enums of the current event. */
- for (i = 0; i < event_recorder->parent->priv->desc->tp_class->nr_fields; i++) {
+ for (i = 0; i < event_session_priv->parent.desc->tp_class->nr_fields; i++) {
const struct lttng_ust_enum_desc *enum_desc;
const struct lttng_ust_event_field *field;
struct lttng_enum *curr_enum;
- field = event_recorder->parent->priv->desc->tp_class->fields[i];
+ field = event_session_priv->parent.desc->tp_class->fields[i];
switch (field->type->type) {
case lttng_ust_type_enum:
enum_desc = lttng_ust_get_type_enum(field->type)->desc;
probe_provider_event_for_each(provider_desc, _event_enum_destroy);
}
-/*
- * Create events associated with an event enabler (if not already present),
- * and add backward reference from the event to the enabler.
- */
static
-int lttng_event_enabler_ref_event_recorders(struct lttng_event_enabler *event_enabler)
+void lttng_event_enabler_init_event_filter(struct lttng_event_enabler_common *event_enabler,
+ struct lttng_ust_event_common *event)
{
- struct lttng_ust_session *session = event_enabler->chan->parent->session;
- struct lttng_ust_event_recorder_private *event_recorder_priv;
-
- if (!lttng_event_enabler_as_enabler(event_enabler)->enabled)
- goto end;
-
- /* First ensure that probe events are created for this enabler. */
- lttng_create_event_recorder_if_missing(event_enabler);
-
- /* For each event matching enabler in session event list. */
- cds_list_for_each_entry(event_recorder_priv, &session->priv->events_head, node) {
- struct lttng_enabler_ref *enabler_ref;
+ switch (event_enabler->enabler_type) {
+ case LTTNG_EVENT_ENABLER_TYPE_RECORDER: /* Fall-through */
+ case LTTNG_EVENT_ENABLER_TYPE_COUNTER:
+ {
+ struct lttng_event_enabler_session_common *event_enabler_session =
+ caa_container_of(event_enabler, struct lttng_event_enabler_session_common, parent);
- if (!lttng_event_enabler_match_event(event_enabler, event_recorder_priv->pub))
- continue;
+ lttng_enabler_link_bytecode(event->priv->desc, &event_enabler_session->chan->session->priv->ctx,
+ &event->priv->filter_bytecode_runtime_head, &event_enabler->filter_bytecode_head);
+ break;
+ }
+ case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
+ {
+ struct lttng_event_notifier_enabler *event_notifier_enabler =
+ caa_container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
- enabler_ref = lttng_enabler_ref(&event_recorder_priv->parent.enablers_ref_head,
- lttng_event_enabler_as_enabler(event_enabler));
- if (!enabler_ref) {
- /*
- * If no backward ref, create it.
- * Add backward ref from event to enabler.
- */
- enabler_ref = zmalloc(sizeof(*enabler_ref));
- if (!enabler_ref)
- return -ENOMEM;
- enabler_ref->ref = lttng_event_enabler_as_enabler(
- event_enabler);
- cds_list_add(&enabler_ref->node,
- &event_recorder_priv->parent.enablers_ref_head);
- }
+ lttng_enabler_link_bytecode(event->priv->desc, &event_notifier_enabler->group->ctx,
+ &event->priv->filter_bytecode_runtime_head, &event_enabler->filter_bytecode_head);
+ break;
+ }
+ default:
+ WARN_ON_ONCE(1);
+ }
+}
- /*
- * Link filter bytecodes if not linked yet.
- */
- lttng_enabler_link_bytecode(event_recorder_priv->parent.desc,
- &session->priv->ctx,
- &event_recorder_priv->parent.filter_bytecode_runtime_head,
- <tng_event_enabler_as_enabler(event_enabler)->filter_bytecode_head);
+static
+void lttng_event_enabler_init_event_capture(struct lttng_event_enabler_common *event_enabler,
+ struct lttng_ust_event_common *event)
+{
+ switch (event_enabler->enabler_type) {
+ case LTTNG_EVENT_ENABLER_TYPE_RECORDER: /* Fall-through */
+ case LTTNG_EVENT_ENABLER_TYPE_COUNTER:
+ break;
+ case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
+ {
+ struct lttng_event_notifier_enabler *event_notifier_enabler =
+ caa_container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
+ struct lttng_ust_event_notifier *event_notifier = event->child;
- /* TODO: merge event context. */
+ lttng_enabler_link_bytecode(event->priv->desc, &event_notifier_enabler->group->ctx,
+ &event_notifier->priv->capture_bytecode_runtime_head,
+ &event_notifier_enabler->capture_bytecode_head);
+ event_notifier->priv->num_captures = event_notifier_enabler->num_captures;
+ break;
+ }
+ default:
+ WARN_ON_ONCE(1);
}
-end:
- return 0;
}
/*
&event->priv->enablers_ref_head, node)
free(enabler_ref);
- switch (event->type) {
- case LTTNG_UST_EVENT_TYPE_RECORDER:
- {
- struct lttng_ust_event_recorder *event_recorder = event->child;
+ /* Remove from event list. */
+ cds_list_del(&event->priv->node);
+ /* Remove from event hash table. */
+ cds_hlist_del(&event->priv->name_hlist_node);
- /* Remove from event list. */
- cds_list_del(&event_recorder->priv->node);
- /* Remove from event hash table. */
- cds_hlist_del(&event_recorder->priv->hlist);
-
- lttng_destroy_context(event_recorder->priv->ctx);
- free(event_recorder->parent);
- free(event_recorder->priv);
- free(event_recorder);
- break;
- }
- case LTTNG_UST_EVENT_TYPE_NOTIFIER:
- {
- struct lttng_ust_event_notifier *event_notifier = event->child;
-
- /* Remove from event list. */
- cds_list_del(&event_notifier->priv->node);
- /* Remove from event hash table. */
- cds_hlist_del(&event_notifier->priv->hlist);
-
- free(event_notifier->priv);
- free(event_notifier->parent);
- free(event_notifier);
- break;
- }
- default:
- abort();
- }
+ lttng_ust_event_free(event);
}
static
/*
* Enabler management.
*/
-struct lttng_event_enabler *lttng_event_enabler_create(
+struct lttng_event_recorder_enabler *lttng_event_recorder_enabler_create(
enum lttng_enabler_format_type format_type,
- struct lttng_ust_abi_event *event_param,
+ const struct lttng_ust_abi_event *event_param,
struct lttng_ust_channel_buffer *chan)
{
- struct lttng_event_enabler *event_enabler;
+ struct lttng_event_recorder_enabler *event_enabler;
+
+ event_enabler = zmalloc(sizeof(*event_enabler));
+ if (!event_enabler)
+ return NULL;
+ event_enabler->parent.parent.enabler_type = LTTNG_EVENT_ENABLER_TYPE_RECORDER;
+ event_enabler->parent.parent.format_type = format_type;
+ CDS_INIT_LIST_HEAD(&event_enabler->parent.parent.filter_bytecode_head);
+ CDS_INIT_LIST_HEAD(&event_enabler->parent.parent.excluder_head);
+ memcpy(&event_enabler->parent.parent.event_param, event_param,
+ sizeof(event_enabler->parent.parent.event_param));
+ event_enabler->chan = chan;
+ /* ctx left NULL */
+ event_enabler->parent.parent.enabled = 0;
+ event_enabler->parent.parent.user_token = event_param->token;
+ event_enabler->parent.chan = chan->parent;
+ cds_list_add(&event_enabler->parent.parent.node, &event_enabler->chan->parent->session->priv->unsync_enablers_head);
+ lttng_session_lazy_sync_event_enablers(event_enabler->chan->parent->session);
+
+ return event_enabler;
+}
+
+struct lttng_event_counter_enabler *lttng_event_counter_enabler_create(
+ enum lttng_enabler_format_type format_type,
+ const struct lttng_ust_abi_counter_event *counter_event,
+ const struct lttng_counter_key *key,
+ struct lttng_ust_channel_counter *chan)
+{
+ struct lttng_event_counter_enabler *event_enabler;
+ enum lttng_event_counter_action action;
event_enabler = zmalloc(sizeof(*event_enabler));
if (!event_enabler)
return NULL;
- event_enabler->base.format_type = format_type;
- CDS_INIT_LIST_HEAD(&event_enabler->base.filter_bytecode_head);
- CDS_INIT_LIST_HEAD(&event_enabler->base.excluder_head);
- memcpy(&event_enabler->base.event_param, event_param,
- sizeof(event_enabler->base.event_param));
+
+ switch (counter_event->action) {
+ case LTTNG_UST_ABI_COUNTER_ACTION_INCREMENT:
+ action = LTTNG_EVENT_COUNTER_ACTION_INCREMENT;
+ break;
+ default:
+ goto error;
+ }
+ event_enabler->action = action;
+ event_enabler->parent.parent.enabler_type = LTTNG_EVENT_ENABLER_TYPE_COUNTER;
+ event_enabler->parent.parent.format_type = format_type;
+ CDS_INIT_LIST_HEAD(&event_enabler->parent.parent.filter_bytecode_head);
+ CDS_INIT_LIST_HEAD(&event_enabler->parent.parent.excluder_head);
+ memcpy(&event_enabler->parent.parent.event_param, &counter_event->event,
+ sizeof(event_enabler->parent.parent.event_param));
event_enabler->chan = chan;
+ memcpy(&event_enabler->key, key, sizeof(struct lttng_counter_key));
/* ctx left NULL */
- event_enabler->base.enabled = 0;
- cds_list_add(&event_enabler->node, &event_enabler->chan->parent->session->priv->unsync_enablers_head);
+ event_enabler->parent.parent.enabled = 0;
+ event_enabler->parent.parent.user_token = counter_event->event.token;
+ event_enabler->parent.chan = chan->parent;
+ cds_list_add(&event_enabler->parent.parent.node, &event_enabler->chan->parent->session->priv->unsync_enablers_head);
lttng_session_lazy_sync_event_enablers(event_enabler->chan->parent->session);
return event_enabler;
+
+error:
+ free(event_enabler);
+ return NULL;
}
struct lttng_event_notifier_enabler *lttng_event_notifier_enabler_create(
event_notifier_enabler = zmalloc(sizeof(*event_notifier_enabler));
if (!event_notifier_enabler)
return NULL;
- event_notifier_enabler->base.format_type = format_type;
- CDS_INIT_LIST_HEAD(&event_notifier_enabler->base.filter_bytecode_head);
+ event_notifier_enabler->parent.enabler_type = LTTNG_EVENT_ENABLER_TYPE_NOTIFIER;
+ event_notifier_enabler->parent.format_type = format_type;
+ CDS_INIT_LIST_HEAD(&event_notifier_enabler->parent.filter_bytecode_head);
+ CDS_INIT_LIST_HEAD(&event_notifier_enabler->parent.excluder_head);
CDS_INIT_LIST_HEAD(&event_notifier_enabler->capture_bytecode_head);
- CDS_INIT_LIST_HEAD(&event_notifier_enabler->base.excluder_head);
- event_notifier_enabler->user_token = event_notifier_param->event.token;
+ event_notifier_enabler->parent.user_token = event_notifier_param->event.token;
event_notifier_enabler->error_counter_index = event_notifier_param->error_counter_index;
event_notifier_enabler->num_captures = 0;
- memcpy(&event_notifier_enabler->base.event_param.name,
+ memcpy(&event_notifier_enabler->parent.event_param.name,
event_notifier_param->event.name,
- sizeof(event_notifier_enabler->base.event_param.name));
- event_notifier_enabler->base.event_param.instrumentation =
+ sizeof(event_notifier_enabler->parent.event_param.name));
+ event_notifier_enabler->parent.event_param.instrumentation =
event_notifier_param->event.instrumentation;
- event_notifier_enabler->base.event_param.loglevel =
+ event_notifier_enabler->parent.event_param.loglevel =
event_notifier_param->event.loglevel;
- event_notifier_enabler->base.event_param.loglevel_type =
+ event_notifier_enabler->parent.event_param.loglevel_type =
event_notifier_param->event.loglevel_type;
- event_notifier_enabler->base.enabled = 0;
+ event_notifier_enabler->parent.enabled = 0;
event_notifier_enabler->group = event_notifier_group;
- cds_list_add(&event_notifier_enabler->node,
- &event_notifier_group->unsync_enablers_head);
+ cds_list_add(&event_notifier_enabler->parent.node, &event_notifier_group->unsync_enablers_head);
lttng_event_notifier_group_sync_enablers(event_notifier_group);
return event_notifier_enabler;
}
-int lttng_event_enabler_enable(struct lttng_event_enabler *event_enabler)
+int lttng_event_enabler_enable(struct lttng_event_enabler_common *event_enabler)
{
- lttng_event_enabler_as_enabler(event_enabler)->enabled = 1;
+ event_enabler->enabled = 1;
lttng_event_enabler_unsync(event_enabler);
- lttng_session_lazy_sync_event_enablers(event_enabler->chan->parent->session);
-
+ lttng_event_enabler_sync(event_enabler);
return 0;
}
-int lttng_event_enabler_disable(struct lttng_event_enabler *event_enabler)
+int lttng_event_enabler_disable(struct lttng_event_enabler_common *event_enabler)
{
- lttng_event_enabler_as_enabler(event_enabler)->enabled = 0;
+ event_enabler->enabled = 0;
lttng_event_enabler_unsync(event_enabler);
- lttng_session_lazy_sync_event_enablers(event_enabler->chan->parent->session);
+ lttng_event_enabler_sync(event_enabler);
return 0;
}
-static
-void _lttng_enabler_attach_filter_bytecode(struct lttng_enabler *enabler,
+int lttng_event_enabler_attach_filter_bytecode(struct lttng_event_enabler_common *event_enabler,
struct lttng_ust_bytecode_node **bytecode)
{
- (*bytecode)->enabler = enabler;
- cds_list_add_tail(&(*bytecode)->node, &enabler->filter_bytecode_head);
+ (*bytecode)->enabler = event_enabler;
+ cds_list_add_tail(&(*bytecode)->node, &event_enabler->filter_bytecode_head);
/* Take ownership of bytecode */
*bytecode = NULL;
-}
-
-int lttng_event_enabler_attach_filter_bytecode(struct lttng_event_enabler *event_enabler,
- struct lttng_ust_bytecode_node **bytecode)
-{
- _lttng_enabler_attach_filter_bytecode(
- lttng_event_enabler_as_enabler(event_enabler), bytecode);
lttng_event_enabler_unsync(event_enabler);
- lttng_session_lazy_sync_event_enablers(event_enabler->chan->parent->session);
+ lttng_event_enabler_sync(event_enabler);
return 0;
}
-static
-void _lttng_enabler_attach_exclusion(struct lttng_enabler *enabler,
+int lttng_event_enabler_attach_exclusion(struct lttng_event_enabler_common *event_enabler,
struct lttng_ust_excluder_node **excluder)
{
- (*excluder)->enabler = enabler;
- cds_list_add_tail(&(*excluder)->node, &enabler->excluder_head);
+ (*excluder)->enabler = event_enabler;
+ cds_list_add_tail(&(*excluder)->node, &event_enabler->excluder_head);
/* Take ownership of excluder */
*excluder = NULL;
-}
-
-int lttng_event_enabler_attach_exclusion(struct lttng_event_enabler *event_enabler,
- struct lttng_ust_excluder_node **excluder)
-{
- _lttng_enabler_attach_exclusion(
- lttng_event_enabler_as_enabler(event_enabler), excluder);
lttng_event_enabler_unsync(event_enabler);
- lttng_session_lazy_sync_event_enablers(event_enabler->chan->parent->session);
- return 0;
-}
-
-int lttng_event_notifier_enabler_enable(
- struct lttng_event_notifier_enabler *event_notifier_enabler)
-{
- lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 1;
- lttng_event_notifier_enabler_unsync(event_notifier_enabler);
- lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
-
- return 0;
-}
-
-int lttng_event_notifier_enabler_disable(
- struct lttng_event_notifier_enabler *event_notifier_enabler)
-{
- lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 0;
- lttng_event_notifier_enabler_unsync(event_notifier_enabler);
- lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
-
- return 0;
-}
-
-int lttng_event_notifier_enabler_attach_filter_bytecode(
- struct lttng_event_notifier_enabler *event_notifier_enabler,
- struct lttng_ust_bytecode_node **bytecode)
-{
- _lttng_enabler_attach_filter_bytecode(
- lttng_event_notifier_enabler_as_enabler(event_notifier_enabler),
- bytecode);
- lttng_event_notifier_enabler_unsync(event_notifier_enabler);
- lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
+ lttng_event_enabler_sync(event_enabler);
return 0;
}
/* Take ownership of bytecode */
*bytecode = NULL;
event_notifier_enabler->num_captures++;
- lttng_event_notifier_enabler_unsync(event_notifier_enabler);
- lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
- return 0;
-}
-
-int lttng_event_notifier_enabler_attach_exclusion(
- struct lttng_event_notifier_enabler *event_notifier_enabler,
- struct lttng_ust_excluder_node **excluder)
-{
- _lttng_enabler_attach_exclusion(
- lttng_event_notifier_enabler_as_enabler(event_notifier_enabler),
- excluder);
- lttng_event_notifier_enabler_unsync(event_notifier_enabler);
+ lttng_event_enabler_unsync(&event_notifier_enabler->parent);
lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
return 0;
}
}
int lttng_event_enabler_attach_context(
- struct lttng_event_enabler *enabler __attribute__((unused)),
+ struct lttng_event_enabler_session_common *enabler __attribute__((unused)),
struct lttng_ust_abi_context *context_param __attribute__((unused)))
{
return -ENOSYS;
}
-void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler)
+void lttng_event_enabler_destroy(struct lttng_event_enabler_common *event_enabler)
{
+ struct lttng_ust_bytecode_node *filter_node, *tmp_filter_node;
+ struct lttng_ust_excluder_node *excluder_node, *tmp_excluder_node;
+
if (!event_enabler) {
return;
}
- cds_list_del(&event_enabler->node);
-
- lttng_enabler_destroy(lttng_event_enabler_as_enabler(event_enabler));
-
- lttng_destroy_context(event_enabler->ctx);
- free(event_enabler);
-}
-
-/*
- * lttng_session_sync_event_enablers should be called just before starting a
- * session.
- */
-static
-void lttng_session_sync_event_enablers(struct lttng_ust_session *session)
-{
- struct lttng_event_enabler *event_enabler;
- struct lttng_ust_event_recorder_private *event_recorder_priv;
- struct cds_list_head iter_list;
-
- /*
- * lttng_event_enabler_ref_event_recorders can cause lazy probes
- * to add items to the unsync_enablers_head list. Iterate on a
- * local copy of that list until it is stable (empty).
- */
- do {
- CDS_INIT_LIST_HEAD(&iter_list);
- cds_list_splice(&session->priv->unsync_enablers_head, &iter_list);
- CDS_INIT_LIST_HEAD(&session->priv->unsync_enablers_head);
- cds_list_for_each_entry(event_enabler, &iter_list, node)
- lttng_event_enabler_ref_event_recorders(event_enabler);
- cds_list_splice(&iter_list, &session->priv->sync_enablers_head);
- } while (!cds_list_empty(&session->priv->unsync_enablers_head));
-
- /*
- * For each event, if at least one of its enablers is enabled,
- * and its channel and session transient states are enabled, we
- * enable the event, else we disable it.
- */
- cds_list_for_each_entry(event_recorder_priv, &session->priv->events_head, node) {
- struct lttng_enabler_ref *enabler_ref;
- struct lttng_ust_bytecode_runtime *runtime;
- int enabled = 0, has_enablers_without_filter_bytecode = 0;
- int nr_filters = 0;
-
- /* Enable events */
- cds_list_for_each_entry(enabler_ref,
- &event_recorder_priv->parent.enablers_ref_head, node) {
- if (enabler_ref->ref->enabled) {
- enabled = 1;
- break;
- }
- }
- /*
- * Enabled state is based on union of enablers, with
- * intesection of session and channel transient enable
- * states.
- */
- enabled = enabled && session->priv->tstate && event_recorder_priv->pub->chan->priv->parent.tstate;
-
- CMM_STORE_SHARED(event_recorder_priv->pub->parent->enabled, enabled);
- /*
- * Sync tracepoint registration with event enabled
- * state.
- */
- if (enabled) {
- if (!event_recorder_priv->parent.registered)
- register_event(event_recorder_priv->parent.pub);
- } else {
- if (event_recorder_priv->parent.registered)
- unregister_event(event_recorder_priv->parent.pub);
- }
-
- /* Check if has enablers without bytecode enabled */
- cds_list_for_each_entry(enabler_ref,
- &event_recorder_priv->parent.enablers_ref_head, node) {
- if (enabler_ref->ref->enabled
- && cds_list_empty(&enabler_ref->ref->filter_bytecode_head)) {
- has_enablers_without_filter_bytecode = 1;
- break;
- }
- }
- event_recorder_priv->parent.has_enablers_without_filter_bytecode =
- has_enablers_without_filter_bytecode;
- /* Enable filters */
- cds_list_for_each_entry(runtime,
- &event_recorder_priv->parent.filter_bytecode_runtime_head, node) {
- lttng_bytecode_sync_state(runtime);
- nr_filters++;
- }
- CMM_STORE_SHARED(event_recorder_priv->parent.pub->eval_filter,
- !(has_enablers_without_filter_bytecode || !nr_filters));
+ /* Destroy filter bytecode */
+ cds_list_for_each_entry_safe(filter_node, tmp_filter_node,
+ &event_enabler->filter_bytecode_head, node) {
+ free(filter_node);
+ }
+ /* Destroy excluders */
+ cds_list_for_each_entry_safe(excluder_node, tmp_excluder_node,
+ &event_enabler->excluder_head, node) {
+ free(excluder_node);
}
- lttng_ust_tp_probe_prune_release_queue();
-}
-
-static
-void lttng_create_event_notifier_if_missing(
- struct lttng_event_notifier_enabler *event_notifier_enabler)
-{
- struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group;
- struct lttng_ust_registered_probe *reg_probe;
- struct cds_list_head *probe_list;
- int i;
-
- probe_list = lttng_get_probe_list_head();
-
- cds_list_for_each_entry(reg_probe, probe_list, head) {
- const struct lttng_ust_probe_desc *probe_desc = reg_probe->desc;
-
- for (i = 0; i < probe_desc->nr_events; i++) {
- int ret;
- bool found = false;
- const struct lttng_ust_event_desc *desc;
- struct lttng_ust_event_notifier_private *event_notifier_priv;
- struct cds_hlist_head *head;
- struct cds_hlist_node *node;
-
- desc = probe_desc->event_desc[i];
- if (!lttng_desc_match_enabler(desc,
- lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)))
- continue;
+ switch (event_enabler->enabler_type) {
+ case LTTNG_EVENT_ENABLER_TYPE_RECORDER: /* Fall-through */
+ case LTTNG_EVENT_ENABLER_TYPE_COUNTER:
+ {
+ struct lttng_event_enabler_session_common *enabler_session =
+ caa_container_of(event_enabler, struct lttng_event_enabler_session_common, parent);
- /*
- * Given the current event_notifier group, get the bucket that
- * the target event_notifier would be if it was already
- * created.
- */
- head = borrow_hash_table_bucket(
- event_notifier_group->event_notifiers_ht.table,
- LTTNG_UST_EVENT_NOTIFIER_HT_SIZE, desc);
-
- cds_hlist_for_each_entry(event_notifier_priv, node, head, hlist) {
- /*
- * Check if event_notifier already exists by checking
- * if the event_notifier and enabler share the same
- * description and id.
- */
- if (event_notifier_priv->parent.desc == desc &&
- event_notifier_priv->parent.user_token == event_notifier_enabler->user_token) {
- found = true;
- break;
- }
- }
+ cds_list_del(&enabler_session->parent.node);
+ break;
+ }
+ case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
+ break;
+ }
- if (found)
- continue;
+ switch (event_enabler->enabler_type) {
+ case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
+ {
+ struct lttng_event_recorder_enabler *recorder_enabler =
+ caa_container_of(event_enabler, struct lttng_event_recorder_enabler, parent.parent);
+ free(recorder_enabler);
+ break;
+ }
+ case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
+ {
+ struct lttng_event_notifier_enabler *notifier_enabler =
+ caa_container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
- /*
- * We need to create a event_notifier for this event probe.
- */
- ret = lttng_event_notifier_create(desc,
- event_notifier_enabler->user_token,
- event_notifier_enabler->error_counter_index,
- event_notifier_group);
- if (ret) {
- DBG("Unable to create event_notifier \"%s:%s\", error %d\n",
- probe_desc->provider_name,
- probe_desc->event_desc[i]->event_name, ret);
- }
- }
+ cds_list_del(¬ifier_enabler->parent.node);
+ free(notifier_enabler);
+ break;
+ }
+ case LTTNG_EVENT_ENABLER_TYPE_COUNTER:
+ {
+ struct lttng_event_counter_enabler *counter_enabler =
+ caa_container_of(event_enabler, struct lttng_event_counter_enabler, parent.parent);
+ free(counter_enabler);
+ break;
+ }
}
}
/*
- * Create event_notifiers associated with a event_notifier enabler (if not already present).
+ * Create events associated with an event enabler (if not already present).
+ * and add backward reference from the event to the enabler.
*/
static
-int lttng_event_notifier_enabler_ref_event_notifiers(
- struct lttng_event_notifier_enabler *event_notifier_enabler)
+int lttng_event_enabler_ref_events(struct lttng_event_enabler_common *event_enabler)
{
- struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group;
- struct lttng_ust_event_notifier_private *event_notifier_priv;
+ struct cds_list_head *event_list = lttng_get_event_list_head_from_enabler(event_enabler);
+ struct lttng_ust_event_common_private *event_priv;
/*
- * Only try to create event_notifiers for enablers that are enabled, the user
- * might still be attaching filter or exclusion to the
- * event_notifier_enabler.
+ * Only try to create events for enablers that are enabled, the user
+ * might still be attaching filter or exclusion to the event enabler.
*/
- if (!lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled)
+ if (!event_enabler->enabled)
goto end;
/* First, ensure that probe event_notifiers are created for this enabler. */
- lttng_create_event_notifier_if_missing(event_notifier_enabler);
+ lttng_create_event_if_missing(event_enabler);
- /* Link the created event_notifier with its associated enabler. */
- cds_list_for_each_entry(event_notifier_priv, &event_notifier_group->event_notifiers_head, node) {
+ /* Link the created events with their associated enabler. */
+ cds_list_for_each_entry(event_priv, event_list, node) {
struct lttng_enabler_ref *enabler_ref;
- if (!lttng_event_notifier_enabler_match_event_notifier(event_notifier_enabler, event_notifier_priv->pub))
+ if (!lttng_event_enabler_match_event(event_enabler, event_priv->pub))
continue;
- enabler_ref = lttng_enabler_ref(&event_notifier_priv->parent.enablers_ref_head,
- lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
+ enabler_ref = lttng_enabler_ref(&event_priv->enablers_ref_head, event_enabler);
if (!enabler_ref) {
/*
* If no backward ref, create it.
if (!enabler_ref)
return -ENOMEM;
- enabler_ref->ref = lttng_event_notifier_enabler_as_enabler(
- event_notifier_enabler);
- cds_list_add(&enabler_ref->node,
- &event_notifier_priv->parent.enablers_ref_head);
+ enabler_ref->ref = event_enabler;
+ cds_list_add(&enabler_ref->node, &event_priv->enablers_ref_head);
}
- /*
- * Link filter bytecodes if not linked yet.
- */
- lttng_enabler_link_bytecode(event_notifier_priv->parent.desc,
- &event_notifier_group->ctx,
- &event_notifier_priv->parent.filter_bytecode_runtime_head,
- <tng_event_notifier_enabler_as_enabler(event_notifier_enabler)->filter_bytecode_head);
+ lttng_event_enabler_init_event_filter(event_enabler, event_priv->pub);
+ lttng_event_enabler_init_event_capture(event_enabler, event_priv->pub);
+ }
+end:
+ return 0;
+}
+
+static
+void lttng_event_sync_filter_state(struct lttng_ust_event_common *event)
+{
+ int has_enablers_without_filter_bytecode = 0, nr_filters = 0;
+ struct lttng_ust_bytecode_runtime *runtime;
+ struct lttng_enabler_ref *enabler_ref;
+
+ /* Check if has enablers without bytecode enabled */
+ cds_list_for_each_entry(enabler_ref, &event->priv->enablers_ref_head, node) {
+ if (enabler_ref->ref->enabled
+ && cds_list_empty(&enabler_ref->ref->filter_bytecode_head)) {
+ has_enablers_without_filter_bytecode = 1;
+ break;
+ }
+ }
+ event->priv->has_enablers_without_filter_bytecode = has_enablers_without_filter_bytecode;
+
+ /* Enable filters */
+ cds_list_for_each_entry(runtime, &event->priv->filter_bytecode_runtime_head, node) {
+ lttng_bytecode_sync_state(runtime);
+ nr_filters++;
+ }
+ CMM_STORE_SHARED(event->eval_filter, !(has_enablers_without_filter_bytecode || !nr_filters));
+}
+
+static
+void lttng_event_sync_capture_state(struct lttng_ust_event_common *event)
+{
+ switch (event->type) {
+ case LTTNG_UST_EVENT_TYPE_RECORDER: /* Fall-through */
+ case LTTNG_UST_EVENT_TYPE_COUNTER:
+ break;
+ case LTTNG_UST_EVENT_TYPE_NOTIFIER:
+ {
+ struct lttng_ust_event_notifier *event_notifier = event->child;
+ struct lttng_ust_bytecode_runtime *runtime;
+ int nr_captures = 0;
+
+ /* Enable captures */
+ cds_list_for_each_entry(runtime, &event_notifier->priv->capture_bytecode_runtime_head, node) {
+ lttng_bytecode_sync_state(runtime);
+ nr_captures++;
+ }
+ CMM_STORE_SHARED(event_notifier->eval_capture, !!nr_captures);
+ break;
+ }
+ default:
+ WARN_ON_ONCE(1);
+ }
+}
+
+static
+bool lttng_get_event_enabled_state(struct lttng_ust_event_common *event)
+{
+ struct lttng_enabler_ref *enabler_ref;
+ bool enabled = false;
+
+ /* Enable events */
+ cds_list_for_each_entry(enabler_ref, &event->priv->enablers_ref_head, node) {
+ if (enabler_ref->ref->enabled) {
+ enabled = true;
+ break;
+ }
+ }
+
+ switch (event->type) {
+ case LTTNG_UST_EVENT_TYPE_RECORDER: /* Fall-through */
+ case LTTNG_UST_EVENT_TYPE_COUNTER:
+ {
+ struct lttng_ust_event_common_private *event_priv = event->priv;
+ struct lttng_ust_event_session_common_private *event_session_priv =
+ caa_container_of(event_priv, struct lttng_ust_event_session_common_private, parent);
/*
- * Link capture bytecodes if not linked yet.
+ * Enabled state is based on union of enablers, with
+ * intersection of session and channel transient enable
+ * states.
*/
- lttng_enabler_link_bytecode(event_notifier_priv->parent.desc,
- &event_notifier_group->ctx, &event_notifier_priv->capture_bytecode_runtime_head,
- &event_notifier_enabler->capture_bytecode_head);
-
- event_notifier_priv->num_captures = event_notifier_enabler->num_captures;
+ return enabled && event_session_priv->chan->session->priv->tstate && event_session_priv->chan->priv->tstate;
+ }
+ case LTTNG_UST_EVENT_TYPE_NOTIFIER:
+ return enabled;
+ default:
+ WARN_ON_ONCE(1);
+ return false;
}
-end:
- return 0;
}
static
-void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group)
+void lttng_sync_event_list(struct cds_list_head *sync_event_enabler_list,
+ struct cds_list_head *unsync_event_enabler_list,
+ struct cds_list_head *event_list)
{
- struct lttng_event_notifier_enabler *event_notifier_enabler;
- struct lttng_ust_event_notifier_private *event_notifier_priv;
+ struct lttng_ust_event_common_private *event_priv;
+ struct lttng_event_enabler_common *event_enabler;
struct cds_list_head iter_list;
/*
- * lttng_event_notifier_enabler_ref_event_notifiers can cause
- * lazy probes to add items to the unsync_enablers_head list.
- * Iterate on a local copy of that list until it is stable
- * (empty).
+ * lttng_event_enabler_ref_events can cause lazy probes
+ * to add items to the unsync_enablers_head list. Iterate on a
+ * local copy of that list until it is stable (empty).
*/
do {
CDS_INIT_LIST_HEAD(&iter_list);
- cds_list_splice(&event_notifier_group->unsync_enablers_head, &iter_list);
- CDS_INIT_LIST_HEAD(&event_notifier_group->unsync_enablers_head);
- cds_list_for_each_entry(event_notifier_enabler, &iter_list, node)
- lttng_event_notifier_enabler_ref_event_notifiers(event_notifier_enabler);
- cds_list_splice(&iter_list, &event_notifier_group->sync_enablers_head);
- } while (!cds_list_empty(&event_notifier_group->unsync_enablers_head));
+ cds_list_splice(unsync_event_enabler_list, &iter_list);
+ CDS_INIT_LIST_HEAD(unsync_event_enabler_list);
+ cds_list_for_each_entry(event_enabler, &iter_list, node)
+ lttng_event_enabler_ref_events(event_enabler);
+ cds_list_splice(&iter_list, sync_event_enabler_list);
+ } while (!cds_list_empty(unsync_event_enabler_list));
/*
- * For each event_notifier, if at least one of its enablers is enabled,
- * we enable the event_notifier, else we disable it.
+ * For each event, if at least one of its enablers is enabled,
+ * and its channel and session transient states are enabled, we
+ * enable the event, else we disable it.
*/
- cds_list_for_each_entry(event_notifier_priv, &event_notifier_group->event_notifiers_head, node) {
- struct lttng_enabler_ref *enabler_ref;
- struct lttng_ust_bytecode_runtime *runtime;
- int enabled = 0, has_enablers_without_filter_bytecode = 0;
- int nr_filters = 0, nr_captures = 0;
-
- /* Enable event_notifiers */
- cds_list_for_each_entry(enabler_ref,
- &event_notifier_priv->parent.enablers_ref_head, node) {
- if (enabler_ref->ref->enabled) {
- enabled = 1;
- break;
- }
- }
+ cds_list_for_each_entry(event_priv, event_list, node) {
+ bool enabled = lttng_get_event_enabled_state(event_priv->pub);
- CMM_STORE_SHARED(event_notifier_priv->pub->parent->enabled, enabled);
+ CMM_STORE_SHARED(event_priv->pub->enabled, enabled);
/*
- * Sync tracepoint registration with event_notifier enabled
+ * Sync tracepoint registration with event enabled
* state.
*/
if (enabled) {
- if (!event_notifier_priv->parent.registered)
- register_event(event_notifier_priv->parent.pub);
+ if (!event_priv->registered)
+ register_event(event_priv->pub);
} else {
- if (event_notifier_priv->parent.registered)
- unregister_event(event_notifier_priv->parent.pub);
+ if (event_priv->registered)
+ unregister_event(event_priv->pub);
}
- /* Check if has enablers without bytecode enabled */
- cds_list_for_each_entry(enabler_ref,
- &event_notifier_priv->parent.enablers_ref_head, node) {
- if (enabler_ref->ref->enabled
- && cds_list_empty(&enabler_ref->ref->filter_bytecode_head)) {
- has_enablers_without_filter_bytecode = 1;
- break;
- }
- }
- event_notifier_priv->parent.has_enablers_without_filter_bytecode =
- has_enablers_without_filter_bytecode;
-
- /* Enable filters */
- cds_list_for_each_entry(runtime,
- &event_notifier_priv->parent.filter_bytecode_runtime_head, node) {
- lttng_bytecode_sync_state(runtime);
- nr_filters++;
- }
- CMM_STORE_SHARED(event_notifier_priv->parent.pub->eval_filter,
- !(has_enablers_without_filter_bytecode || !nr_filters));
-
- /* Enable captures. */
- cds_list_for_each_entry(runtime,
- &event_notifier_priv->capture_bytecode_runtime_head, node) {
- lttng_bytecode_sync_state(runtime);
- nr_captures++;
- }
- CMM_STORE_SHARED(event_notifier_priv->pub->eval_capture,
- !!nr_captures);
+ lttng_event_sync_filter_state(event_priv->pub);
+ lttng_event_sync_capture_state(event_priv->pub);
}
lttng_ust_tp_probe_prune_release_queue();
}
+/*
+ * lttng_session_sync_event_enablers should be called just before starting a
+ * session.
+ */
+static
+void lttng_session_sync_event_enablers(struct lttng_ust_session *session)
+{
+ lttng_sync_event_list(&session->priv->sync_enablers_head,
+ &session->priv->unsync_enablers_head,
+ &session->priv->events_head);
+}
+
/*
* Apply enablers to session events, adding events to session if need
* be. It is required after each modification applied to an active
lttng_session_sync_event_enablers(session);
}
+static
+void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group)
+{
+ lttng_sync_event_list(&event_notifier_group->sync_enablers_head,
+ &event_notifier_group->unsync_enablers_head,
+ &event_notifier_group->event_notifiers_head);
+}
+
+static
+void lttng_event_enabler_sync(struct lttng_event_enabler_common *event_enabler)
+{
+ switch (event_enabler->enabler_type) {
+ case LTTNG_EVENT_ENABLER_TYPE_RECORDER: /* Fall-through */
+ case LTTNG_EVENT_ENABLER_TYPE_COUNTER:
+ {
+ struct lttng_event_enabler_session_common *event_enabler_session =
+ caa_container_of(event_enabler, struct lttng_event_enabler_session_common, parent);
+ lttng_session_sync_event_enablers(event_enabler_session->chan->session);
+ break;
+ }
+ case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
+ {
+ struct lttng_event_notifier_enabler *event_notifier_enabler =
+ caa_container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
+ lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
+ break;
+ }
+ default:
+ WARN_ON_ONCE(1);
+ }
+}
+
/*
* Update all sessions with the given app context.
* Called with ust lock held.
cds_list_for_each_entry(session_priv, &sessions, node) {
struct lttng_ust_channel_buffer_private *chan;
- struct lttng_ust_event_recorder_private *event_recorder_priv;
int ret;
ret = lttng_ust_context_set_provider_rcu(&session_priv->ctx,
if (ret)
abort();
}
- cds_list_for_each_entry(event_recorder_priv, &session_priv->events_head, node) {
- ret = lttng_ust_context_set_provider_rcu(&event_recorder_priv->ctx,
- name, get_size, record, get_value);
- if (ret)
- abort();
- }
}
}
#include "common/ust-fd.h"
#include "common/logging.h"
+#include "common/align.h"
#include "common/ringbuffer/frontend_types.h"
#include "common/ringbuffer/frontend.h"
static const struct lttng_ust_abi_objd_ops lttng_event_notifier_group_ops;
static const struct lttng_ust_abi_objd_ops lttng_session_ops;
static const struct lttng_ust_abi_objd_ops lttng_channel_ops;
+static const struct lttng_ust_abi_objd_ops lttng_counter_ops;
static const struct lttng_ust_abi_objd_ops lttng_event_enabler_ops;
static const struct lttng_ust_abi_objd_ops lttng_event_notifier_enabler_ops;
static const struct lttng_ust_abi_objd_ops lttng_tracepoint_list_ops;
return ret;
}
+static
+bool check_zero(const char *p, size_t len)
+{
+ size_t i;
+
+ for (i = 0; i < len; i++) {
+ if (p[i] != 0)
+ return false;
+ }
+ return true;
+}
+
+static
+int copy_abi_struct(void *dst_struct, size_t dst_struct_len,
+ const void *src_struct, size_t src_struct_len)
+{
+ if (dst_struct_len >= src_struct_len) {
+ memcpy(dst_struct, src_struct, src_struct_len);
+ if (dst_struct_len > src_struct_len)
+ memset(dst_struct + src_struct_len, 0, dst_struct_len - src_struct_len);
+ } else { /* dst_struct_len < src_struct_len */
+ /* Validate zero-padding. */
+ if (!check_zero(src_struct + dst_struct_len, src_struct_len - dst_struct_len))
+ return -E2BIG;
+ memcpy(dst_struct, src_struct, dst_struct_len);
+ }
+ return 0;
+}
+
+static
+long lttng_session_create_counter(
+ int session_objd,
+ const struct lttng_ust_abi_counter_conf *abi_counter_conf,
+ union lttng_ust_abi_args *uargs,
+ void *owner)
+{
+ struct lttng_ust_session *session = objd_private(session_objd);
+ int counter_objd, ret;
+ const char *counter_transport_name;
+ struct lttng_ust_channel_counter *counter = NULL;
+ struct lttng_counter_dimension dimensions[1] = {};
+ size_t number_dimensions = 1;
+ struct lttng_ust_abi_counter_conf counter_conf;
+ uint32_t min_expected_len = lttng_ust_offsetofend(struct lttng_ust_abi_counter_conf, elem_len);
+ const struct lttng_ust_abi_counter_dimension *abi_dimension;
+ struct lttng_ust_abi_counter_dimension dimension;
+
+ if (uargs->counter.len < min_expected_len) {
+ ERR("LTTng: Map: Counter configuration of wrong size.");
+ return -EINVAL;
+ }
+ if (abi_counter_conf->len > uargs->counter.len || abi_counter_conf->len < lttng_ust_offsetofend(struct lttng_ust_abi_counter_conf, elem_len)) {
+ return -EINVAL;
+ }
+ ret = copy_abi_struct(&counter_conf, sizeof(counter_conf), abi_counter_conf, abi_counter_conf->len);
+ if (ret) {
+ ERR("Unexpected counter configuration structure content");
+ return ret;
+ }
+ if (counter_conf.number_dimensions != 1) {
+ ERR("LTTng: Map: Unsupprted number of dimensions %u.", counter_conf.number_dimensions);
+ return -EINVAL;
+ }
+ if (counter_conf.elem_len < lttng_ust_offsetofend(struct lttng_ust_abi_counter_dimension, overflow_index)) {
+ ERR("Unexpected dimension array element length %u.", counter_conf.elem_len);
+ return -EINVAL;
+ }
+ if (counter_conf.len + counter_conf.elem_len > uargs->counter.len) {
+ return -EINVAL;
+ }
+ abi_dimension = (const struct lttng_ust_abi_counter_dimension *)(((char *)abi_counter_conf) + counter_conf.len);
+ ret = copy_abi_struct(&dimension, sizeof(dimension), abi_dimension, counter_conf.elem_len);
+ if (ret) {
+ ERR("Unexpected dimension structure content");
+ return ret;
+ }
+ if (counter_conf.arithmetic != LTTNG_UST_ABI_COUNTER_ARITHMETIC_MODULAR) {
+ ERR("LTTng: Map: Counter of the wrong type.");
+ return -EINVAL;
+ }
+ if (counter_conf.global_sum_step) {
+ /* Unsupported. */
+ return -EINVAL;
+ }
+ switch (counter_conf.bitness) {
+ case LTTNG_UST_ABI_COUNTER_BITNESS_64:
+ counter_transport_name = "counter-per-cpu-64-modular";
+ break;
+ case LTTNG_UST_ABI_COUNTER_BITNESS_32:
+ counter_transport_name = "counter-per-cpu-32-modular";
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dimensions[0].size = dimension.size;
+ dimensions[0].underflow_index = dimension.underflow_index;
+ dimensions[0].overflow_index = dimension.overflow_index;
+ dimensions[0].has_underflow = dimension.flags & LTTNG_UST_ABI_COUNTER_DIMENSION_FLAG_UNDERFLOW;
+ dimensions[0].has_overflow = dimension.flags & LTTNG_UST_ABI_COUNTER_DIMENSION_FLAG_OVERFLOW;
+ switch (dimension.key_type) {
+ case LTTNG_UST_ABI_KEY_TYPE_TOKENS:
+ dimensions[0].key_type = LTTNG_KEY_TYPE_TOKENS;
+ break;
+ case LTTNG_UST_ABI_KEY_TYPE_INTEGER: /* Fall-through */
+ default:
+ return -EINVAL;
+ }
+
+ counter_objd = objd_alloc(NULL, <tng_counter_ops, owner, "counter");
+ if (counter_objd < 0) {
+ ret = counter_objd;
+ goto objd_error;
+ }
+
+ counter = lttng_ust_counter_create(counter_transport_name,
+ number_dimensions, dimensions,
+ 0, counter_conf.flags & LTTNG_UST_ABI_COUNTER_CONF_FLAG_COALESCE_HITS);
+ if (!counter) {
+ ret = -EINVAL;
+ goto counter_error;
+ }
+ counter->parent->session = session;
+ cds_list_add(&counter->priv->node, &session->priv->counters_head);
+ objd_set_private(counter_objd, counter);
+ counter->priv->parent.objd = counter_objd;
+ counter->priv->parent.tstate = 1;
+ counter->parent->enabled = 1;
+ /* The channel created holds a reference on the session */
+ objd_ref(session_objd);
+ return counter_objd;
+
+counter_error:
+ {
+ int err;
+
+ err = lttng_ust_abi_objd_unref(counter_objd, 1);
+ assert(!err);
+ }
+objd_error:
+ return ret;
+}
+
/**
* lttng_session_cmd - lttng session object command
*
case LTTNG_UST_ABI_SESSION_STATEDUMP:
return lttng_session_statedump(session);
case LTTNG_UST_ABI_COUNTER:
- case LTTNG_UST_ABI_COUNTER_GLOBAL:
- case LTTNG_UST_ABI_COUNTER_CPU:
- /* Not implemented yet. */
- return -EINVAL;
+ return lttng_session_create_counter(objd,
+ (struct lttng_ust_abi_counter_conf *)arg,
+ uargs, owner);
default:
return -EINVAL;
}
struct lttng_event_notifier_enabler *event_notifier_enabler = objd_private(objd);
switch (cmd) {
case LTTNG_UST_ABI_FILTER:
- return lttng_event_notifier_enabler_attach_filter_bytecode(
- event_notifier_enabler,
+ return lttng_event_enabler_attach_filter_bytecode(
+ &event_notifier_enabler->parent,
(struct lttng_ust_bytecode_node **) arg);
case LTTNG_UST_ABI_EXCLUSION:
- return lttng_event_notifier_enabler_attach_exclusion(event_notifier_enabler,
+ return lttng_event_enabler_attach_exclusion(&event_notifier_enabler->parent,
(struct lttng_ust_excluder_node **) arg);
case LTTNG_UST_ABI_CAPTURE:
return lttng_event_notifier_enabler_attach_capture_bytecode(
event_notifier_enabler,
(struct lttng_ust_bytecode_node **) arg);
case LTTNG_UST_ABI_ENABLE:
- return lttng_event_notifier_enabler_enable(event_notifier_enabler);
+ return lttng_event_enabler_enable(&event_notifier_enabler->parent);
case LTTNG_UST_ABI_DISABLE:
- return lttng_event_notifier_enabler_disable(event_notifier_enabler);
+ return lttng_event_enabler_disable(&event_notifier_enabler->parent);
default:
return -EINVAL;
}
union lttng_ust_abi_args *uargs, void *owner __attribute__((unused)))
{
int ret;
- struct lttng_counter *counter = objd_private(objd);
+ struct lttng_ust_channel_counter *counter = objd_private(objd);
switch (cmd) {
case LTTNG_UST_ABI_COUNTER_GLOBAL:
break;
case LTTNG_UST_ABI_COUNTER_CPU:
{
- struct lttng_ust_abi_counter_cpu *counter_cpu =
- (struct lttng_ust_abi_counter_cpu *)arg;
+ struct lttng_ust_abi_counter_cpu *abi_counter_cpu =
+ (struct lttng_ust_abi_counter_cpu *) arg;
+ struct lttng_ust_abi_counter_cpu counter_cpu;
- ret = lttng_counter_set_cpu_shm(counter->counter,
- counter_cpu->cpu_nr, uargs->counter_shm.shm_fd);
+ if (abi_counter_cpu->len < lttng_ust_offsetofend(struct lttng_ust_abi_counter_cpu, cpu_nr)) {
+ return -EINVAL;
+ }
+ ret = copy_abi_struct(&counter_cpu, sizeof(counter_cpu),
+ abi_counter_cpu, abi_counter_cpu->len);
+ if (ret)
+ return ret;
+ ret = lttng_counter_set_cpu_shm(counter->priv->counter,
+ counter_cpu.cpu_nr, uargs->counter_shm.shm_fd);
if (!ret) {
/* Take ownership of the shm_fd. */
uargs->counter_shm.shm_fd = -1;
__attribute__((visibility("hidden")));
int lttng_release_event_notifier_group_error_counter(int objd)
{
- struct lttng_counter *counter = objd_private(objd);
+ struct lttng_ust_channel_counter *counter = objd_private(objd);
if (counter) {
- return lttng_ust_abi_objd_unref(counter->event_notifier_group->objd, 0);
+ return lttng_ust_abi_objd_unref(counter->priv->event_notifier_group->objd, 0);
} else {
return -EINVAL;
}
};
static
-int lttng_ust_event_notifier_group_create_error_counter(int event_notifier_group_objd, void *owner,
- struct lttng_ust_abi_counter_conf *error_counter_conf)
+int lttng_ust_event_notifier_group_create_error_counter(int event_notifier_group_objd,
+ struct lttng_ust_abi_counter_conf *abi_counter_conf,
+ union lttng_ust_abi_args *uargs,
+ void *owner)
{
const char *counter_transport_name;
struct lttng_event_notifier_group *event_notifier_group =
objd_private(event_notifier_group_objd);
- struct lttng_counter *counter;
+ struct lttng_ust_channel_counter *counter;
int counter_objd, ret;
- struct lttng_counter_dimension dimensions[1];
size_t counter_len;
+ struct lttng_counter_dimension dimensions[1] = {};
+ struct lttng_ust_abi_counter_conf counter_conf;
+ uint32_t min_expected_len = lttng_ust_offsetofend(struct lttng_ust_abi_counter_conf, elem_len);
+ const struct lttng_ust_abi_counter_dimension *abi_dimension;
+ struct lttng_ust_abi_counter_dimension dimension;
if (event_notifier_group->error_counter)
return -EBUSY;
- if (error_counter_conf->arithmetic != LTTNG_UST_ABI_COUNTER_ARITHMETIC_MODULAR)
+ if (uargs->counter.len < min_expected_len) {
+ ERR("LTTng: Counter configuration of wrong size.");
return -EINVAL;
-
- if (error_counter_conf->number_dimensions != 1)
+ }
+ if (abi_counter_conf->len > uargs->counter.len || abi_counter_conf->len < lttng_ust_offsetofend(struct lttng_ust_abi_counter_conf, elem_len)) {
return -EINVAL;
-
- switch (error_counter_conf->bitness) {
+ }
+ ret = copy_abi_struct(&counter_conf, sizeof(counter_conf), abi_counter_conf, abi_counter_conf->len);
+ if (ret) {
+ ERR("Unexpected counter configuration structure content");
+ return ret;
+ }
+ if (counter_conf.number_dimensions != 1) {
+ ERR("LTTng: Map: Unsupprted number of dimensions %u.", counter_conf.number_dimensions);
+ return -EINVAL;
+ }
+ if (counter_conf.elem_len < lttng_ust_offsetofend(struct lttng_ust_abi_counter_dimension, overflow_index)) {
+ ERR("Unexpected dimension array element length %u.", counter_conf.elem_len);
+ return -EINVAL;
+ }
+ if (counter_conf.len + counter_conf.elem_len > uargs->counter.len) {
+ return -EINVAL;
+ }
+ abi_dimension = (const struct lttng_ust_abi_counter_dimension *)(((char *)abi_counter_conf) + counter_conf.len);
+ ret = copy_abi_struct(&dimension, sizeof(dimension), abi_dimension, counter_conf.elem_len);
+ if (ret) {
+ ERR("Unexpected dimension structure content");
+ return ret;
+ }
+ if (counter_conf.arithmetic != LTTNG_UST_ABI_COUNTER_ARITHMETIC_MODULAR) {
+ ERR("LTTng: Counter of the wrong type.");
+ return -EINVAL;
+ }
+ if (counter_conf.global_sum_step) {
+ /* Unsupported. */
+ return -EINVAL;
+ }
+ switch (counter_conf.bitness) {
case LTTNG_UST_ABI_COUNTER_BITNESS_64:
counter_transport_name = "counter-per-cpu-64-modular";
break;
return -EINVAL;
}
+ counter_len = dimension.size;
+ dimensions[0].size = counter_len;
+ dimensions[0].underflow_index = dimension.underflow_index;
+ dimensions[0].overflow_index = dimension.overflow_index;
+ dimensions[0].has_underflow = dimension.flags & LTTNG_UST_ABI_COUNTER_DIMENSION_FLAG_UNDERFLOW;
+ dimensions[0].has_overflow = dimension.flags & LTTNG_UST_ABI_COUNTER_DIMENSION_FLAG_OVERFLOW;
+
counter_objd = objd_alloc(NULL, <tng_event_notifier_group_error_counter_ops, owner,
"event_notifier group error counter");
if (counter_objd < 0) {
goto objd_error;
}
- counter_len = error_counter_conf->dimensions[0].size;
- dimensions[0].size = counter_len;
- dimensions[0].underflow_index = 0;
- dimensions[0].overflow_index = 0;
- dimensions[0].has_underflow = 0;
- dimensions[0].has_overflow = 0;
-
- counter = lttng_ust_counter_create(counter_transport_name, 1, dimensions);
+ counter = lttng_ust_counter_create(counter_transport_name, 1, dimensions, 0, false);
if (!counter) {
ret = -EINVAL;
goto create_error;
cmm_smp_mb();
CMM_STORE_SHARED(event_notifier_group->error_counter, counter);
- counter->objd = counter_objd;
- counter->event_notifier_group = event_notifier_group; /* owner */
+ counter->priv->parent.objd = counter_objd;
+ counter->priv->event_notifier_group = event_notifier_group; /* owner */
objd_set_private(counter_objd, counter);
/* The error counter holds a reference on the event_notifier group. */
long lttng_event_notifier_group_cmd(int objd, unsigned int cmd, unsigned long arg,
union lttng_ust_abi_args *uargs, void *owner)
{
+ int ret;
+
switch (cmd) {
case LTTNG_UST_ABI_EVENT_NOTIFIER_CREATE:
{
- struct lttng_ust_abi_event_notifier *event_notifier_param =
+ struct lttng_ust_abi_event_notifier *abi_event_notifier =
(struct lttng_ust_abi_event_notifier *) arg;
- if (strutils_is_star_glob_pattern(event_notifier_param->event.name)) {
+ struct lttng_ust_abi_event_notifier event_notifier = {};
+
+ if (uargs->event_notifier.len < lttng_ust_offsetofend(struct lttng_ust_abi_event_notifier, error_counter_index))
+ return -EINVAL;
+ ret = copy_abi_struct(&event_notifier, sizeof(event_notifier),
+ abi_event_notifier, uargs->event_notifier.len);
+ if (ret)
+ return ret;
+ event_notifier.event.name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
+ if (strutils_is_star_glob_pattern(event_notifier.event.name)) {
/*
* If the event name is a star globbing pattern,
* we create the special star globbing enabler.
*/
return lttng_ust_event_notifier_enabler_create(objd,
- owner, event_notifier_param,
+ owner, &event_notifier,
LTTNG_ENABLER_FORMAT_STAR_GLOB);
} else {
return lttng_ust_event_notifier_enabler_create(objd,
- owner, event_notifier_param,
+ owner, &event_notifier,
LTTNG_ENABLER_FORMAT_EVENT);
}
}
case LTTNG_UST_ABI_COUNTER:
{
- struct lttng_ust_abi_counter_conf *counter_conf =
- (struct lttng_ust_abi_counter_conf *) uargs->counter.counter_data;
return lttng_ust_event_notifier_group_create_error_counter(
- objd, owner, counter_conf);
+ objd, (struct lttng_ust_abi_counter_conf *) arg, uargs, owner);
}
default:
return -EINVAL;
}
static
-int lttng_abi_create_event_enabler(int channel_objd,
- struct lttng_ust_abi_event *event_param,
- void *owner,
- enum lttng_enabler_format_type format_type)
+int lttng_abi_create_event_recorder_enabler(int channel_objd,
+ struct lttng_ust_channel_buffer *channel,
+ struct lttng_ust_abi_event *event_param,
+ void *owner,
+ enum lttng_enabler_format_type format_type)
{
- struct lttng_ust_channel_buffer *channel = objd_private(channel_objd);
- struct lttng_event_enabler *enabler;
+ struct lttng_event_recorder_enabler *enabler;
int event_objd, ret;
event_param->name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
+ event_objd = objd_alloc(NULL, <tng_event_enabler_ops, owner,
+ "event recorder enabler");
+ if (event_objd < 0) {
+ ret = event_objd;
+ goto objd_error;
+ }
+ /*
+ * We tolerate no failure path after event creation. It will stay
+ * invariant for the rest of the session.
+ */
+ enabler = lttng_event_recorder_enabler_create(format_type, event_param,
+ channel);
+ if (!enabler) {
+ ret = -ENOMEM;
+ goto event_error;
+ }
+ objd_set_private(event_objd, &enabler->parent);
+ /* The event holds a reference on the channel */
+ objd_ref(channel_objd);
+ return event_objd;
+
+event_error:
+ {
+ int err;
+
+ err = lttng_ust_abi_objd_unref(event_objd, 1);
+ assert(!err);
+ }
+objd_error:
+ return ret;
+}
+
+static
+int copy_counter_key_dimension_tokens(const struct lttng_ust_abi_counter_key_dimension_tokens *abi_dim_tokens,
+ const char *addr, size_t *offset, size_t arg_len, struct lttng_counter_key_dimension *internal_dim)
+{
+ struct lttng_ust_abi_counter_key_dimension_tokens dim_tokens;
+ size_t nr_key_tokens, j;
+ int ret;
+
+ if (abi_dim_tokens->parent.len < sizeof(struct lttng_ust_abi_counter_key_dimension_tokens))
+ return -EINVAL;
+ ret = copy_abi_struct(&dim_tokens, sizeof(dim_tokens), abi_dim_tokens, abi_dim_tokens->parent.len);
+ if (ret)
+ return ret;
+ nr_key_tokens = dim_tokens.nr_key_tokens;
+ if (!nr_key_tokens || nr_key_tokens > LTTNG_NR_KEY_TOKEN)
+ return -EINVAL;
+ internal_dim->key_type = LTTNG_KEY_TYPE_TOKENS;
+ internal_dim->u.tokens.nr_key_tokens = nr_key_tokens;
+ *offset += sizeof(struct lttng_ust_abi_counter_key_dimension_tokens);
+ for (j = 0; j < nr_key_tokens; j++) {
+ struct lttng_key_token *internal_token = &internal_dim->u.tokens.key_tokens[j];
+ const struct lttng_ust_abi_key_token *abi_token;
+
+ if (*offset + sizeof(struct lttng_ust_abi_key_token) > arg_len)
+ return -EINVAL;
+ abi_token = (const struct lttng_ust_abi_key_token *)(addr + *offset);
+ if (abi_token->len < sizeof(struct lttng_ust_abi_key_token))
+ return -EINVAL;
+ if (*offset + abi_token->len > arg_len)
+ return -EINVAL;
+ switch (abi_token->type) {
+ case LTTNG_UST_ABI_KEY_TOKEN_STRING:
+ {
+ const struct lttng_ust_abi_key_token_string *abi_key_string;
+ struct lttng_ust_abi_key_token_string token_string;
+
+ if (abi_token->len < sizeof(struct lttng_ust_abi_key_token_string))
+ return -EINVAL;
+ abi_key_string = (const struct lttng_ust_abi_key_token_string *)(addr + *offset);
+ ret = copy_abi_struct(&token_string, sizeof(token_string), abi_key_string, abi_key_string->parent.len);
+ if (ret)
+ return ret;
+ *offset += abi_key_string->parent.len;
+ internal_token->type = LTTNG_KEY_TOKEN_STRING;
+ if (!abi_key_string->string_len || abi_key_string->string_len > LTTNG_KEY_TOKEN_STRING_LEN_MAX)
+ return -EINVAL;
+ *offset += abi_key_string->string_len;
+ if (*offset > arg_len)
+ return -EINVAL;
+ if (abi_key_string->str[abi_key_string->string_len - 1] != '\0' ||
+ strlen(abi_key_string->str) + 1 != abi_key_string->string_len)
+ return -EINVAL;
+ memcpy(internal_token->arg.string, abi_key_string->str, abi_key_string->string_len);
+ break;
+ }
+ case LTTNG_UST_ABI_KEY_TOKEN_EVENT_NAME:
+ internal_token->type = LTTNG_KEY_TOKEN_EVENT_NAME;
+ *offset += abi_token->len;
+ break;
+ case LTTNG_UST_ABI_KEY_TOKEN_PROVIDER_NAME:
+ internal_token->type = LTTNG_KEY_TOKEN_PROVIDER_NAME;
+ *offset += abi_token->len;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static
+int copy_counter_key(struct lttng_counter_key *internal_key,
+ unsigned long arg, size_t action_fields_len, size_t arg_len,
+ const struct lttng_ust_abi_counter_event *counter_event)
+{
+ size_t i, nr_dimensions, offset = 0;
+ const char *addr = (const char *)arg;
+ int ret;
+
+ nr_dimensions = counter_event->number_key_dimensions;
+ if (nr_dimensions != 1)
+ return -EINVAL;
+ internal_key->nr_dimensions = nr_dimensions;
+ offset += counter_event->len + action_fields_len;
+ for (i = 0; i < nr_dimensions; i++) {
+ struct lttng_counter_key_dimension *internal_dim = &internal_key->key_dimensions[i];
+ const struct lttng_ust_abi_counter_key_dimension *abi_dim;
+
+ abi_dim = (const struct lttng_ust_abi_counter_key_dimension *)(addr + offset);
+ if (offset + abi_dim->len > arg_len || abi_dim->len < lttng_ust_offsetofend(struct lttng_ust_abi_counter_key_dimension, key_type))
+ return -EINVAL;
+ switch (abi_dim->key_type) {
+ case LTTNG_UST_ABI_KEY_TYPE_TOKENS:
+ {
+ struct lttng_ust_abi_counter_key_dimension_tokens *dim_tokens =
+ caa_container_of(abi_dim, struct lttng_ust_abi_counter_key_dimension_tokens, parent);
+ ret = copy_counter_key_dimension_tokens(dim_tokens, addr, &offset, arg_len,
+ internal_dim);
+ if (ret)
+ return ret;
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static
+int lttng_abi_create_event_counter_enabler(int channel_objd,
+ struct lttng_ust_channel_counter *channel,
+ unsigned long arg, size_t arg_len, void *owner)
+{
+ struct lttng_ust_abi_counter_event *abi_counter_event = (struct lttng_ust_abi_counter_event *)arg;
+ struct lttng_ust_abi_counter_event counter_event = {};
+ struct lttng_counter_key counter_key = {};
+ struct lttng_event_counter_enabler *enabler;
+ enum lttng_enabler_format_type format_type;
+ size_t action_fields_len = 0;
+ int event_objd, ret;
+ size_t i;
+
+ if (arg_len < lttng_ust_offsetofend(struct lttng_ust_abi_counter_event, number_key_dimensions)) {
+ return -EINVAL;
+ }
+ if (arg_len < abi_counter_event->len ||
+ abi_counter_event->len < lttng_ust_offsetofend(struct lttng_ust_abi_counter_event, number_key_dimensions)) {
+ return -EINVAL;
+ }
+ ret = copy_abi_struct(&counter_event, sizeof(counter_event),
+ abi_counter_event, abi_counter_event->len);
+ if (ret) {
+ return ret;
+ }
+ switch (counter_event.action) {
+ case LTTNG_UST_ABI_COUNTER_ACTION_INCREMENT:
+ /* No additional fields specific to this action. */
+ break;
+ default:
+ return -EINVAL;
+ }
+ counter_event.event.name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
+ if (strutils_is_star_glob_pattern(counter_event.event.name)) {
+ format_type = LTTNG_ENABLER_FORMAT_STAR_GLOB;
+ } else {
+ format_type = LTTNG_ENABLER_FORMAT_EVENT;
+ }
+ ret = copy_counter_key(&counter_key, arg, action_fields_len, arg_len, &counter_event);
+ if (ret) {
+ return ret;
+ }
+ /*
+ * Validate that each dimension counter key type match the map
+ * key type.
+ */
+ for (i = 0; i < counter_key.nr_dimensions; i++) {
+ if (channel->priv->dimension_key_types[i] != counter_key.key_dimensions[i].key_type)
+ return -EINVAL;
+ }
event_objd = objd_alloc(NULL, <tng_event_enabler_ops, owner,
"event enabler");
if (event_objd < 0) {
* We tolerate no failure path after event creation. It will stay
* invariant for the rest of the session.
*/
- enabler = lttng_event_enabler_create(format_type, event_param, channel);
+ enabler = lttng_event_counter_enabler_create(format_type, &counter_event, &counter_key, channel);
if (!enabler) {
ret = -ENOMEM;
goto event_error;
}
- objd_set_private(event_objd, enabler);
+ objd_set_private(event_objd, &enabler->parent);
/* The event holds a reference on the channel */
objd_ref(channel_objd);
return event_objd;
* If the event name is a star globbing pattern,
* we create the special star globbing enabler.
*/
- return lttng_abi_create_event_enabler(objd, event_param,
- owner, LTTNG_ENABLER_FORMAT_STAR_GLOB);
+ return lttng_abi_create_event_recorder_enabler(objd, lttng_chan_buf,
+ event_param, owner,
+ LTTNG_ENABLER_FORMAT_STAR_GLOB);
} else {
- return lttng_abi_create_event_enabler(objd, event_param,
- owner, LTTNG_ENABLER_FORMAT_EVENT);
+ return lttng_abi_create_event_recorder_enabler(objd, lttng_chan_buf,
+ event_param, owner,
+ LTTNG_ENABLER_FORMAT_EVENT);
}
}
case LTTNG_UST_ABI_CONTEXT:
.cmd = lttng_channel_cmd,
};
+/**
+ * lttng_counter_cmd - lttng control through object descriptors
+ *
+ * @objd: the object descriptor
+ * @cmd: the command
+ * @arg: command arg
+ * @uargs: UST arguments (internal)
+ * @owner: objd owner
+ *
+ * This object descriptor implements lttng commands:
+ * LTTNG_UST_ABI_COUNTER_GLOBAL:
+ * Returns a global counter object descriptor or failure.
+ * LTTNG_UST_ABI_COUNTER_CPU:
+ * Returns a per-cpu counter object descriptor or failure.
+ * LTTNG_UST_ABI_COUNTER_EVENT
+ * Returns an event object descriptor or failure.
+ * LTTNG_UST_ABI_ENABLE
+ * Enable recording for events in this channel (weak enable)
+ * LTTNG_UST_ABI_DISABLE
+ * Disable recording for events in this channel (strong disable)
+ *
+ * Counter and event object descriptors also hold a reference on the session.
+ */
+static
+long lttng_counter_cmd(int objd, unsigned int cmd, unsigned long arg,
+ union lttng_ust_abi_args *uargs, void *owner)
+{
+ struct lttng_ust_channel_counter *counter = objd_private(objd);
+
+ if (cmd != LTTNG_UST_ABI_COUNTER_GLOBAL && cmd != LTTNG_UST_ABI_COUNTER_CPU) {
+ /*
+ * Check if counter received all global/per-cpu objects.
+ */
+ if (!lttng_counter_ready(counter->priv->counter))
+ return -EPERM;
+ }
+
+ switch (cmd) {
+ case LTTNG_UST_ABI_COUNTER_GLOBAL:
+ {
+ struct lttng_ust_abi_counter_global *abi_counter_global =
+ (struct lttng_ust_abi_counter_global *) arg;
+ struct lttng_ust_abi_counter_global counter_global;
+ long ret;
+ int shm_fd;
+
+ if (uargs->counter_shm.len < lttng_ust_offsetofend(struct lttng_ust_abi_counter_global, shm_len))
+ return -EINVAL;
+ if (abi_counter_global->len > uargs->counter_shm.len ||
+ abi_counter_global->len < lttng_ust_offsetofend(struct lttng_ust_abi_counter_global, shm_len)) {
+ return -EINVAL;
+ }
+ ret = copy_abi_struct(&counter_global, sizeof(counter_global),
+ abi_counter_global, abi_counter_global->len);
+ if (ret)
+ return ret;
+ shm_fd = uargs->counter_shm.shm_fd;
+ ret = lttng_counter_set_global_shm(counter->priv->counter, shm_fd);
+ if (!ret) {
+ /* Take ownership of shm_fd. */
+ uargs->counter_shm.shm_fd = -1;
+ }
+ return ret;
+ }
+ case LTTNG_UST_ABI_COUNTER_CPU:
+ {
+ struct lttng_ust_abi_counter_cpu *abi_counter_cpu =
+ (struct lttng_ust_abi_counter_cpu *) arg;
+ struct lttng_ust_abi_counter_cpu counter_cpu;
+ long ret;
+ int shm_fd;
+
+ if (uargs->counter_shm.len < lttng_ust_offsetofend(struct lttng_ust_abi_counter_cpu, cpu_nr))
+ return -EINVAL;
+ if (abi_counter_cpu->len > uargs->counter_shm.len ||
+ abi_counter_cpu->len < lttng_ust_offsetofend(struct lttng_ust_abi_counter_cpu, cpu_nr)) {
+ return -EINVAL;
+ }
+ ret = copy_abi_struct(&counter_cpu, sizeof(counter_cpu),
+ abi_counter_cpu, abi_counter_cpu->len);
+ if (ret)
+ return ret;
+ shm_fd = uargs->counter_shm.shm_fd;
+ ret = lttng_counter_set_cpu_shm(counter->priv->counter,
+ counter_cpu.cpu_nr, shm_fd);
+ if (!ret) {
+ /* Take ownership of shm_fd. */
+ uargs->counter_shm.shm_fd = -1;
+ }
+ return ret;
+ }
+ case LTTNG_UST_ABI_COUNTER_EVENT:
+ {
+ return lttng_abi_create_event_counter_enabler(objd, counter,
+ arg, uargs->counter_event.len, owner);
+ }
+ case LTTNG_UST_ABI_ENABLE:
+ return lttng_channel_enable(counter->parent);
+ case LTTNG_UST_ABI_DISABLE:
+ return lttng_channel_disable(counter->parent);
+ default:
+ return -EINVAL;
+ }
+}
+
+static
+int lttng_counter_release(int objd)
+{
+ struct lttng_ust_channel_counter *counter = objd_private(objd);
+
+ if (counter) {
+ return lttng_ust_abi_objd_unref(counter->parent->session->priv->objd, 0);
+ }
+ return 0;
+}
+
+static const struct lttng_ust_abi_objd_ops lttng_counter_ops = {
+ .release = lttng_counter_release,
+ .cmd = lttng_counter_cmd,
+};
+
/**
* lttng_enabler_cmd - lttng control through object descriptors
*
union lttng_ust_abi_args *uargs __attribute__((unused)),
void *owner __attribute__((unused)))
{
- struct lttng_event_enabler *enabler = objd_private(objd);
+ struct lttng_event_enabler_session_common *enabler = objd_private(objd);
switch (cmd) {
case LTTNG_UST_ABI_CONTEXT:
return lttng_event_enabler_attach_context(enabler,
(struct lttng_ust_abi_context *) arg);
case LTTNG_UST_ABI_ENABLE:
- return lttng_event_enabler_enable(enabler);
+ return lttng_event_enabler_enable(&enabler->parent);
case LTTNG_UST_ABI_DISABLE:
- return lttng_event_enabler_disable(enabler);
+ return lttng_event_enabler_disable(&enabler->parent);
case LTTNG_UST_ABI_FILTER:
{
int ret;
- ret = lttng_event_enabler_attach_filter_bytecode(enabler,
+ ret = lttng_event_enabler_attach_filter_bytecode(&enabler->parent,
(struct lttng_ust_bytecode_node **) arg);
if (ret)
return ret;
}
case LTTNG_UST_ABI_EXCLUSION:
{
- return lttng_event_enabler_attach_exclusion(enabler,
+ return lttng_event_enabler_attach_exclusion(&enabler->parent,
(struct lttng_ust_excluder_node **) arg);
}
default:
static
int lttng_event_enabler_release(int objd)
{
- struct lttng_event_enabler *event_enabler = objd_private(objd);
+ struct lttng_event_recorder_enabler *event_enabler = objd_private(objd);
if (event_enabler)
return lttng_ust_abi_objd_unref(event_enabler->chan->priv->parent.objd, 0);
/* Counter commands */
[ LTTNG_UST_ABI_COUNTER_GLOBAL ] = "Create Counter Global",
[ LTTNG_UST_ABI_COUNTER_CPU ] = "Create Counter CPU",
+ [ LTTNG_UST_ABI_COUNTER_EVENT ] = "Create Counter Event",
};
static const char *str_timeout;
{
switch (cmd) {
case LTTNG_UST_ABI_CAPTURE:
- return "capture";
+ return "capture bytecode";
case LTTNG_UST_ABI_FILTER:
- return "filter";
+ return "filter bytecode";
default:
abort();
}
}
+enum handle_message_error {
+ MSG_OK = 0,
+ MSG_ERROR = 1,
+ MSG_SHUTDOWN = 2,
+};
+
+/*
+ * Return:
+ * < 0: error
+ * 0: OK, handle command.
+ * > 0: shutdown (no error).
+ */
+static
+enum handle_message_error handle_error(struct sock_info *sock_info, ssize_t len,
+ ssize_t expected_len, const char *str, int *error_code)
+{
+ if (!len) {
+ /* orderly shutdown */
+ *error_code = 0;
+ return MSG_SHUTDOWN;
+ }
+ if (len == expected_len) {
+ DBG("%s data received", str);
+ *error_code = 0;
+ return MSG_OK;
+ }
+ if (len < 0) {
+ DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
+ if (len == -ECONNRESET) {
+ ERR("%s remote end closed connection", sock_info->name);
+ }
+ *error_code = len;
+ return MSG_ERROR;
+ }
+ DBG("incorrect %s data message size: %zd", str, len);
+ *error_code = -EINVAL;
+ return MSG_ERROR;
+}
+
static
int handle_bytecode_recv(struct sock_info *sock_info,
int sock, struct ustcomm_ust_msg *lum)
}
if (data_size > data_size_max) {
- ERR("Bytecode %s data size is too large: %u bytes",
+ ERR("%s data size is too large: %u bytes",
bytecode_type_str(lum->cmd), data_size);
ret = -EINVAL;
goto end;
}
if (reloc_offset > data_size) {
- ERR("Bytecode %s reloc offset %u is not within data",
+ ERR("%s reloc offset %u is not within data",
bytecode_type_str(lum->cmd), reloc_offset);
ret = -EINVAL;
goto end;
bytecode->type = type;
len = ustcomm_recv_unix_sock(sock, bytecode->bc.data, bytecode->bc.len);
- switch (len) {
- case 0: /* orderly shutdown */
- ret = 0;
+ switch (handle_error(sock_info, len, bytecode->bc.len, bytecode_type_str(lum->cmd), &ret)) {
+ case MSG_OK:
+ break;
+ case MSG_ERROR: /* Fallthrough */
+ case MSG_SHUTDOWN:
goto end;
- default:
- if (len == bytecode->bc.len) {
- DBG("Bytecode %s data received",
- bytecode_type_str(lum->cmd));
- break;
- } else if (len < 0) {
- DBG("Receive failed from lttng-sessiond with errno %d",
- (int) -len);
- if (len == -ECONNRESET) {
- ERR("%s remote end closed connection",
- sock_info->name);
- ret = len;
- goto end;
- }
- ret = len;
- goto end;
- } else {
- DBG("Incorrect %s bytecode data message size: %zd",
- bytecode_type_str(lum->cmd), len);
- ret = -EINVAL;
- goto end;
- }
}
-
ops = lttng_ust_abi_objd_ops(lum->handle);
if (!ops) {
ret = -ENOENT;
{
int ret = 0;
const struct lttng_ust_abi_objd_ops *ops;
- struct ustcomm_ust_reply lur;
+ struct ustcomm_ust_reply lur = {};
union lttng_ust_abi_args args;
char ctxstr[LTTNG_UST_ABI_SYM_NAME_LEN]; /* App context string. */
ssize_t len;
-
- memset(&lur, 0, sizeof(lur));
+ void *var_len_cmd_data = NULL;
if (ust_lock()) {
ret = -LTTNG_UST_ERR_EXITING;
case LTTNG_UST_ABI_COUNTER:
case LTTNG_UST_ABI_COUNTER_GLOBAL:
case LTTNG_UST_ABI_COUNTER_CPU:
+ case LTTNG_UST_ABI_COUNTER_EVENT:
case LTTNG_UST_ABI_EVENT_NOTIFIER_CREATE:
case LTTNG_UST_ABI_EVENT_NOTIFIER_GROUP_CREATE:
/*
node->excluder.count = count;
len = ustcomm_recv_unix_sock(sock, node->excluder.names,
count * LTTNG_UST_ABI_SYM_NAME_LEN);
- switch (len) {
- case 0: /* orderly shutdown */
- ret = 0;
+ switch (handle_error(sock_info, len, count * LTTNG_UST_ABI_SYM_NAME_LEN, "exclusion", &ret)) {
+ case MSG_OK:
+ break;
+ case MSG_ERROR: /* Fallthrough */
+ case MSG_SHUTDOWN:
free(node);
goto error;
- default:
- if (len == count * LTTNG_UST_ABI_SYM_NAME_LEN) {
- DBG("Exclusion data received");
- break;
- } else if (len < 0) {
- DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
- if (len == -ECONNRESET) {
- ERR("%s remote end closed connection", sock_info->name);
- ret = len;
- free(node);
- goto error;
- }
- ret = len;
- free(node);
- goto error;
- } else {
- DBG("Incorrect exclusion data message size: %zd", len);
- ret = -EINVAL;
- free(node);
- goto error;
- }
}
if (ops->cmd)
ret = ops->cmd(lum->handle, lum->cmd,
len = ustcomm_recv_event_notifier_notif_fd_from_sessiond(sock,
&event_notifier_notif_fd);
- switch (len) {
- case 0: /* orderly shutdown */
- ret = 0;
- goto error;
- case 1:
+ switch (handle_error(sock_info, len, 1, "event notifier group", &ret)) {
+ case MSG_OK:
break;
- default:
- if (len < 0) {
- DBG("Receive failed from lttng-sessiond with errno %d",
- (int) -len);
- if (len == -ECONNRESET) {
- ERR("%s remote end closed connection",
- sock_info->name);
- ret = len;
- goto error;
- }
- ret = len;
- goto error;
- } else {
- DBG("Incorrect event notifier fd message size: %zd",
- len);
- ret = -EINVAL;
- goto error;
- }
+ case MSG_ERROR: /* Fallthrough */
+ case MSG_SHUTDOWN:
+ goto error;
}
- args.event_notifier_handle.event_notifier_notif_fd =
- event_notifier_notif_fd;
+ args.event_notifier_handle.event_notifier_notif_fd = event_notifier_notif_fd;
if (ops->cmd)
ret = ops->cmd(lum->handle, lum->cmd,
(unsigned long) &lum->u,
len = ustcomm_recv_channel_from_sessiond(sock,
&chan_data, lum->u.channel.len,
&wakeup_fd);
- switch (len) {
- case 0: /* orderly shutdown */
- ret = 0;
+ switch (handle_error(sock_info, len, lum->u.channel.len, "channel", &ret)) {
+ case MSG_OK:
+ break;
+ case MSG_ERROR: /* Fallthrough */
+ case MSG_SHUTDOWN:
goto error;
- default:
- if (len == lum->u.channel.len) {
- DBG("channel data received");
- break;
- } else if (len < 0) {
- DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
- if (len == -ECONNRESET) {
- ERR("%s remote end closed connection", sock_info->name);
- ret = len;
- goto error;
- }
- ret = len;
- goto error;
- } else {
- DBG("incorrect channel data message size: %zd", len);
- ret = -EINVAL;
- goto error;
- }
}
args.channel.chan_data = chan_data;
args.channel.wakeup_fd = wakeup_fd;
p = &ctxstr[strlen("$app.")];
recvlen = ctxlen - strlen("$app.");
len = ustcomm_recv_unix_sock(sock, p, recvlen);
- switch (len) {
- case 0: /* orderly shutdown */
- ret = 0;
+ switch (handle_error(sock_info, len, recvlen, "app context", &ret)) {
+ case MSG_OK:
+ break;
+ case MSG_ERROR: /* Fallthrough */
+ case MSG_SHUTDOWN:
goto error;
- default:
- if (len == recvlen) {
- DBG("app context data received");
- break;
- } else if (len < 0) {
- DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
- if (len == -ECONNRESET) {
- ERR("%s remote end closed connection", sock_info->name);
- ret = len;
- goto error;
- }
- ret = len;
- goto error;
- } else {
- DBG("incorrect app context data message size: %zd", len);
- ret = -EINVAL;
- goto error;
- }
}
/* Put : between provider and ctxname. */
p[lum->u.context.u.app_ctx.provider_name_len - 1] = ':';
break;
case LTTNG_UST_ABI_COUNTER:
{
- void *counter_data;
-
- len = ustcomm_recv_counter_from_sessiond(sock,
- &counter_data, lum->u.counter.len);
- switch (len) {
- case 0: /* orderly shutdown */
- ret = 0;
+ len = ustcomm_recv_var_len_cmd_from_sessiond(sock,
+ &var_len_cmd_data, lum->u.var_len_cmd.cmd_len);
+ switch (handle_error(sock_info, len, lum->u.var_len_cmd.cmd_len, "counter", &ret)) {
+ case MSG_OK:
+ break;
+ case MSG_ERROR: /* Fallthrough */
+ case MSG_SHUTDOWN:
goto error;
- default:
- if (len == lum->u.counter.len) {
- DBG("counter data received");
- break;
- } else if (len < 0) {
- DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
- if (len == -ECONNRESET) {
- ERR("%s remote end closed connection", sock_info->name);
- ret = len;
- goto error;
- }
- ret = len;
- goto error;
- } else {
- DBG("incorrect counter data message size: %zd", len);
- ret = -EINVAL;
- goto error;
- }
}
- args.counter.counter_data = counter_data;
+ args.counter.len = lum->u.var_len_cmd.cmd_len;
if (ops->cmd)
ret = ops->cmd(lum->handle, lum->cmd,
- (unsigned long) &lum->u,
+ (unsigned long) var_len_cmd_data,
&args, sock_info);
else
ret = -ENOSYS;
- free(args.counter.counter_data);
break;
}
case LTTNG_UST_ABI_COUNTER_GLOBAL:
{
+ len = ustcomm_recv_var_len_cmd_from_sessiond(sock,
+ &var_len_cmd_data, lum->u.var_len_cmd.cmd_len);
+ switch (handle_error(sock_info, len, lum->u.var_len_cmd.cmd_len, "counter global", &ret)) {
+ case MSG_OK:
+ break;
+ case MSG_ERROR: /* Fallthrough */
+ case MSG_SHUTDOWN:
+ goto error;
+ }
/* Receive shm_fd */
- ret = ustcomm_recv_counter_shm_from_sessiond(sock,
- &args.counter_shm.shm_fd);
+ ret = ustcomm_recv_counter_shm_from_sessiond(sock, &args.counter_shm.shm_fd);
if (ret) {
goto error;
}
-
+ args.counter_shm.len = lum->u.var_len_cmd.cmd_len;
if (ops->cmd)
ret = ops->cmd(lum->handle, lum->cmd,
- (unsigned long) &lum->u,
+ (unsigned long) var_len_cmd_data,
&args, sock_info);
else
ret = -ENOSYS;
}
case LTTNG_UST_ABI_COUNTER_CPU:
{
+ len = ustcomm_recv_var_len_cmd_from_sessiond(sock,
+ &var_len_cmd_data, lum->u.var_len_cmd.cmd_len);
+ switch (handle_error(sock_info, len, lum->u.var_len_cmd.cmd_len, "counter cpu", &ret)) {
+ case MSG_OK:
+ break;
+ case MSG_ERROR: /* Fallthrough */
+ case MSG_SHUTDOWN:
+ goto error;
+ }
/* Receive shm_fd */
- ret = ustcomm_recv_counter_shm_from_sessiond(sock,
- &args.counter_shm.shm_fd);
+ ret = ustcomm_recv_counter_shm_from_sessiond(sock, &args.counter_shm.shm_fd);
if (ret) {
goto error;
}
-
+ args.counter_shm.len = lum->u.var_len_cmd.cmd_len;
if (ops->cmd)
ret = ops->cmd(lum->handle, lum->cmd,
- (unsigned long) &lum->u,
+ (unsigned long) var_len_cmd_data,
&args, sock_info);
else
ret = -ENOSYS;
}
break;
}
- case LTTNG_UST_ABI_EVENT_NOTIFIER_CREATE:
+ case LTTNG_UST_ABI_COUNTER_EVENT:
{
- /* Receive struct lttng_ust_event_notifier */
- struct lttng_ust_abi_event_notifier event_notifier;
-
- if (sizeof(event_notifier) != lum->u.event_notifier.len) {
- DBG("incorrect event notifier data message size: %u", lum->u.event_notifier.len);
- ret = -EINVAL;
+ len = ustcomm_recv_var_len_cmd_from_sessiond(sock,
+ &var_len_cmd_data, lum->u.var_len_cmd.cmd_len);
+ switch (handle_error(sock_info, len, lum->u.var_len_cmd.cmd_len, "counter event", &ret)) {
+ case MSG_OK:
+ break;
+ case MSG_ERROR: /* Fallthrough */
+ case MSG_SHUTDOWN:
goto error;
}
- len = ustcomm_recv_unix_sock(sock, &event_notifier, sizeof(event_notifier));
- switch (len) {
- case 0: /* orderly shutdown */
- ret = 0;
+ args.counter_event.len = lum->u.var_len_cmd.cmd_len;
+ if (ops->cmd)
+ ret = ops->cmd(lum->handle, lum->cmd,
+ (unsigned long) var_len_cmd_data,
+ &args, sock_info);
+ else
+ ret = -ENOSYS;
+ break;
+ }
+ case LTTNG_UST_ABI_EVENT_NOTIFIER_CREATE:
+ {
+ len = ustcomm_recv_var_len_cmd_from_sessiond(sock,
+ &var_len_cmd_data, lum->u.var_len_cmd.cmd_len);
+ switch (handle_error(sock_info, len, lum->u.var_len_cmd.cmd_len, "event notifier", &ret)) {
+ case MSG_OK:
+ break;
+ case MSG_ERROR: /* Fallthrough */
+ case MSG_SHUTDOWN:
goto error;
- default:
- if (len == sizeof(event_notifier)) {
- DBG("event notifier data received");
- break;
- } else if (len < 0) {
- DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
- if (len == -ECONNRESET) {
- ERR("%s remote end closed connection", sock_info->name);
- ret = len;
- goto error;
- }
- ret = len;
- goto error;
- } else {
- DBG("incorrect event notifier data message size: %zd", len);
- ret = -EINVAL;
- goto error;
- }
}
+ args.event_notifier.len = lum->u.var_len_cmd.cmd_len;
if (ops->cmd)
ret = ops->cmd(lum->handle, lum->cmd,
- (unsigned long) &event_notifier,
+ (unsigned long) var_len_cmd_data,
&args, sock_info);
else
ret = -ENOSYS;
error:
ust_unlock();
+ free(var_len_cmd_data);
return ret;
}
: ${ITERS:=10}
: ${DURATION:=2}
: ${NR_THREADS:=1}
-: ${NR_CPUS:=$(lscpu | grep "^CPU(s)" | sed 's/^.*:[ \t]*//g')}
+: ${NR_CPUS:=$(lscpu | grep "^CPU(s):" | sed 's/^.*:[ \t]*//g')}
: ${TIME:="./$CURDIR/ptime"}
#include "./fake-ust.h"
-void init_usterr(void)
+ __attribute__((noinline)) void init_usterr(void)
{
fprintf(stderr, "libfakeust0: init_usterr() called.\n");
}
ok(shmfd > 0, "Open a POSIX shm fd");
/* Create a dummy shm object table to test the allocation function */
- table = shm_object_table_create(1);
+ table = shm_object_table_create(1, false);
ok(table, "Create a shm object table");
assert(table);
/* This function sets the initial size of the shm with ftruncate and zeros it */
- shmobj = shm_object_table_alloc(table, shmsize, SHM_OBJECT_SHM, shmfd, -1);
+ shmobj = shm_object_table_alloc(table, shmsize, SHM_OBJECT_SHM, shmfd, -1, false);
ok(shmobj, "Allocate the shm object table");
assert(shmobj);