/*
- * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
+ * Copyright (C) 2011 EfficiOS Inc.
* Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
*
* SPDX-License-Identifier: GPL-2.0-only
*/
#define _LGPL_SOURCE
+
+#include "buffer-registry.hpp"
+#include "condition-internal.hpp"
+#include "event-notifier-error-accounting.hpp"
+#include "event.hpp"
+#include "fd-limit.hpp"
+#include "field.hpp"
+#include "health-sessiond.hpp"
+#include "lttng-sessiond.hpp"
+#include "lttng-ust-ctl.hpp"
+#include "lttng-ust-error.hpp"
+#include "notification-thread-commands.hpp"
+#include "rotate.hpp"
+#include "session.hpp"
+#include "ust-app.hpp"
+#include "ust-consumer.hpp"
+#include "ust-field-convert.hpp"
+#include "utils.hpp"
+
+#include <common/bytecode/bytecode.hpp>
+#include <common/common.hpp>
+#include <common/compat/errno.hpp>
+#include <common/exception.hpp>
+#include <common/format.hpp>
+#include <common/hashtable/utils.hpp>
+#include <common/make-unique.hpp>
+#include <common/sessiond-comm/sessiond-comm.hpp>
+#include <common/urcu.hpp>
+
+#include <lttng/condition/condition.h>
+#include <lttng/condition/event-rule-matches-internal.hpp>
+#include <lttng/condition/event-rule-matches.h>
+#include <lttng/event-rule/event-rule-internal.hpp>
+#include <lttng/event-rule/event-rule.h>
+#include <lttng/event-rule/user-tracepoint.h>
+#include <lttng/trigger/trigger-internal.hpp>
+
#include <errno.h>
#include <fcntl.h>
#include <inttypes.h>
#include <pthread.h>
+#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <unistd.h>
#include <urcu/compiler.h>
-#include <signal.h>
-
-#include <common/bytecode/bytecode.h>
-#include <common/compat/errno.h>
-#include <common/common.h>
-#include <common/hashtable/utils.h>
-#include <lttng/event-rule/event-rule.h>
-#include <lttng/event-rule/event-rule-internal.h>
-#include <lttng/event-rule/user-tracepoint.h>
-#include <lttng/condition/condition.h>
-#include <lttng/condition/event-rule-matches-internal.h>
-#include <lttng/condition/event-rule-matches.h>
-#include <lttng/trigger/trigger-internal.h>
-#include <common/sessiond-comm/sessiond-comm.h>
-
-#include "buffer-registry.h"
-#include "condition-internal.h"
-#include "fd-limit.h"
-#include "health-sessiond.h"
-#include "ust-app.h"
-#include "ust-consumer.h"
-#include "lttng-ust-ctl.h"
-#include "lttng-ust-error.h"
-#include "utils.h"
-#include "session.h"
-#include "lttng-sessiond.h"
-#include "notification-thread-commands.h"
-#include "rotate.h"
-#include "event.h"
-#include "event-notifier-error-accounting.h"
+#include <vector>
+namespace lsu = lttng::sessiond::ust;
+namespace lst = lttng::sessiond::trace;
struct lttng_ht *ust_app_ht;
struct lttng_ht *ust_app_ht_by_sock;
static uint64_t _next_session_id;
static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
+namespace {
+
+/*
+ * Return the session registry according to the buffer type of the given
+ * session.
+ *
+ * A registry per UID object MUST exists before calling this function or else
+ * it LTTNG_ASSERT() if not found. RCU read side lock must be acquired.
+ */
+static lsu::registry_session *get_session_registry(
+ const struct ust_app_session *ua_sess)
+{
+ lsu::registry_session *registry = NULL;
+
+ LTTNG_ASSERT(ua_sess);
+
+ switch (ua_sess->buffer_type) {
+ case LTTNG_BUFFER_PER_PID:
+ {
+ struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
+ if (!reg_pid) {
+ goto error;
+ }
+ registry = reg_pid->registry->reg.ust;
+ break;
+ }
+ case LTTNG_BUFFER_PER_UID:
+ {
+ struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
+ ua_sess->tracing_id, ua_sess->bits_per_long,
+ lttng_credentials_get_uid(&ua_sess->real_credentials));
+ if (!reg_uid) {
+ goto error;
+ }
+ registry = reg_uid->registry->reg.ust;
+ break;
+ }
+ default:
+ abort();
+ };
+
+error:
+ return registry;
+}
+
+lsu::registry_session::locked_ptr
+get_locked_session_registry(const struct ust_app_session *ua_sess)
+{
+ auto session = get_session_registry(ua_sess);
+ if (session) {
+ pthread_mutex_lock(&session->_lock);
+ }
+
+ return lsu::registry_session::locked_ptr{session};
+}
+} /* namespace */
+
/*
* Return the incremented value of next_channel_key.
*/
{
int ret;
struct ust_app_notify_sock_obj *obj =
- caa_container_of(head, struct ust_app_notify_sock_obj, head);
+ lttng::utils::container_of(head, &ust_app_notify_sock_obj::head);
/* Must have a valid fd here. */
LTTNG_ASSERT(obj->fd >= 0);
free(obj);
}
-/*
- * Return the session registry according to the buffer type of the given
- * session.
- *
- * A registry per UID object MUST exists before calling this function or else
- * it LTTNG_ASSERT() if not found. RCU read side lock must be acquired.
- */
-static struct ust_registry_session *get_session_registry(
- struct ust_app_session *ua_sess)
-{
- struct ust_registry_session *registry = NULL;
-
- LTTNG_ASSERT(ua_sess);
-
- switch (ua_sess->buffer_type) {
- case LTTNG_BUFFER_PER_PID:
- {
- struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
- if (!reg_pid) {
- goto error;
- }
- registry = reg_pid->registry->reg.ust;
- break;
- }
- case LTTNG_BUFFER_PER_UID:
- {
- struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
- ua_sess->tracing_id, ua_sess->bits_per_long,
- lttng_credentials_get_uid(&ua_sess->real_credentials));
- if (!reg_uid) {
- goto error;
- }
- registry = reg_uid->registry->reg.ust;
- break;
- }
- default:
- abort();
- };
-
-error:
- return registry;
-}
-
/*
* Delete ust context safely. RCU read lock must be held before calling
* this function.
int ret;
LTTNG_ASSERT(ua_ctx);
+ ASSERT_RCU_READ_LOCKED();
if (ua_ctx->obj) {
pthread_mutex_lock(&app->sock_lock);
int ret;
LTTNG_ASSERT(ua_event);
+ ASSERT_RCU_READ_LOCKED();
free(ua_event->filter);
if (ua_event->exclusion != NULL)
static
void free_ust_app_event_notifier_rule_rcu(struct rcu_head *head)
{
- struct ust_app_event_notifier_rule *obj = caa_container_of(
- head, struct ust_app_event_notifier_rule, rcu_head);
+ struct ust_app_event_notifier_rule *obj = lttng::utils::container_of(
+ head, &ust_app_event_notifier_rule::rcu_head);
free(obj);
}
struct ust_app *app)
{
LTTNG_ASSERT(stream);
+ ASSERT_RCU_READ_LOCKED();
(void) release_ust_app_stream(sock, stream, app);
free(stream);
}
-/*
- * We need to execute ht_destroy outside of RCU read-side critical
- * section and outside of call_rcu thread, so we postpone its execution
- * using ht_cleanup_push. It is simpler than to change the semantic of
- * the many callers of delete_ust_app_session().
- */
static
void delete_ust_app_channel_rcu(struct rcu_head *head)
{
struct ust_app_channel *ua_chan =
- caa_container_of(head, struct ust_app_channel, rcu_head);
+ lttng::utils::container_of(head, &ust_app_channel::rcu_head);
- ht_cleanup_push(ua_chan->ctx);
- ht_cleanup_push(ua_chan->events);
+ lttng_ht_destroy(ua_chan->ctx);
+ lttng_ht_destroy(ua_chan->events);
free(ua_chan);
}
*
* The session list lock must be held by the caller.
*/
-static
-void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
- struct ust_app *app)
+static void delete_ust_app_channel(int sock,
+ struct ust_app_channel *ua_chan,
+ struct ust_app *app,
+ const lsu::registry_session::locked_ptr& locked_registry)
{
int ret;
struct lttng_ht_iter iter;
struct ust_app_event *ua_event;
struct ust_app_ctx *ua_ctx;
struct ust_app_stream *stream, *stmp;
- struct ust_registry_session *registry;
LTTNG_ASSERT(ua_chan);
+ ASSERT_RCU_READ_LOCKED();
DBG3("UST app deleting channel %s", ua_chan->name);
if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
/* Wipe and free registry from session registry. */
- registry = get_session_registry(ua_chan->session);
- if (registry) {
- ust_registry_channel_del_free(registry, ua_chan->key,
- sock >= 0);
+ if (locked_registry) {
+ try {
+ locked_registry->remove_channel(ua_chan->key, sock >= 0);
+ } catch (const std::exception &ex) {
+ DBG("Could not find channel for removal: %s", ex.what());
+ }
}
+
/*
* A negative socket can be used by the caller when
* cleaning-up a ua_chan in an error path. Skip the
* but it can be caused by recoverable errors (e.g. the application has
* terminated concurrently).
*/
-ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
- struct consumer_socket *socket, int send_zero_data)
+ssize_t ust_app_push_metadata(const lsu::registry_session::locked_ptr& locked_registry,
+ struct consumer_socket *socket,
+ int send_zero_data)
{
int ret;
char *metadata_str = NULL;
ssize_t ret_val;
uint64_t metadata_key, metadata_version;
- LTTNG_ASSERT(registry);
+ LTTNG_ASSERT(locked_registry);
LTTNG_ASSERT(socket);
+ ASSERT_RCU_READ_LOCKED();
- metadata_key = registry->metadata_key;
+ metadata_key = locked_registry->_metadata_key;
/*
* Means that no metadata was assigned to the session. This can
return 0;
}
- offset = registry->metadata_len_sent;
- len = registry->metadata_len - registry->metadata_len_sent;
- new_metadata_len_sent = registry->metadata_len;
- metadata_version = registry->metadata_version;
+ offset = locked_registry->_metadata_len_sent;
+ len = locked_registry->_metadata_len - locked_registry->_metadata_len_sent;
+ new_metadata_len_sent = locked_registry->_metadata_len;
+ metadata_version = locked_registry->_metadata_version;
if (len == 0) {
DBG3("No metadata to push for metadata key %" PRIu64,
- registry->metadata_key);
+ locked_registry->_metadata_key);
ret_val = len;
if (send_zero_data) {
DBG("No metadata to push");
}
/* Allocate only what we have to send. */
- metadata_str = (char *) zmalloc(len);
+ metadata_str = calloc<char>(len);
if (!metadata_str) {
PERROR("zmalloc ust app metadata string");
ret_val = -ENOMEM;
goto error;
}
/* Copy what we haven't sent out. */
- memcpy(metadata_str, registry->metadata + offset, len);
+ memcpy(metadata_str, locked_registry->_metadata + offset, len);
push_data:
- pthread_mutex_unlock(®istry->lock);
+ pthread_mutex_unlock(&locked_registry->_lock);
/*
* We need to unlock the registry while we push metadata to
* break a circular dependency between the consumerd metadata
*/
ret = consumer_push_metadata(socket, metadata_key,
metadata_str, len, offset, metadata_version);
- pthread_mutex_lock(®istry->lock);
+ pthread_mutex_lock(&locked_registry->_lock);
if (ret < 0) {
/*
* There is an acceptable race here between the registry
* largest metadata_len_sent value of the concurrent
* send.
*/
- registry->metadata_len_sent =
- std::max(registry->metadata_len_sent,
+ locked_registry->_metadata_len_sent =
+ std::max(locked_registry->_metadata_len_sent,
new_metadata_len_sent);
}
free(metadata_str);
* the metadata cache has been destroyed on the
* consumer.
*/
- registry->metadata_closed = 1;
+ locked_registry->_metadata_closed = true;
}
error_push:
free(metadata_str);
* but it can be caused by recoverable errors (e.g. the application has
* terminated concurrently).
*/
-static int push_metadata(struct ust_registry_session *registry,
+static int push_metadata(const lsu::registry_session::locked_ptr& locked_registry,
struct consumer_output *consumer)
{
int ret_val;
ssize_t ret;
struct consumer_socket *socket;
- LTTNG_ASSERT(registry);
+ LTTNG_ASSERT(locked_registry);
LTTNG_ASSERT(consumer);
+ ASSERT_RCU_READ_LOCKED();
- pthread_mutex_lock(®istry->lock);
- if (registry->metadata_closed) {
+ if (locked_registry->_metadata_closed) {
ret_val = -EPIPE;
goto error;
}
/* Get consumer socket to use to push the metadata.*/
- socket = consumer_find_socket_by_bitness(registry->bits_per_long,
+ socket = consumer_find_socket_by_bitness(locked_registry->abi.bits_per_long,
consumer);
if (!socket) {
ret_val = -1;
goto error;
}
- ret = ust_app_push_metadata(registry, socket, 0);
+ ret = ust_app_push_metadata(locked_registry, socket, 0);
if (ret < 0) {
ret_val = ret;
goto error;
}
- pthread_mutex_unlock(®istry->lock);
return 0;
error:
- pthread_mutex_unlock(®istry->lock);
return ret_val;
}
*
* Return 0 on success else a negative value.
*/
-static int close_metadata(struct ust_registry_session *registry,
+static int close_metadata(uint64_t metadata_key, unsigned int consumer_bitness,
struct consumer_output *consumer)
{
int ret;
struct consumer_socket *socket;
- uint64_t metadata_key;
- bool registry_was_already_closed;
+ lttng::urcu::read_lock_guard read_lock_guard;
- LTTNG_ASSERT(registry);
LTTNG_ASSERT(consumer);
- rcu_read_lock();
-
- pthread_mutex_lock(®istry->lock);
- metadata_key = registry->metadata_key;
- registry_was_already_closed = registry->metadata_closed;
- if (metadata_key != 0) {
- /*
- * Metadata closed. Even on error this means that the consumer
- * is not responding or not found so either way a second close
- * should NOT be emit for this registry.
- */
- registry->metadata_closed = 1;
- }
- pthread_mutex_unlock(®istry->lock);
-
- if (metadata_key == 0 || registry_was_already_closed) {
- ret = 0;
- goto end;
- }
-
- /* Get consumer socket to use to push the metadata.*/
- socket = consumer_find_socket_by_bitness(registry->bits_per_long,
+ /* Get consumer socket to use to push the metadata. */
+ socket = consumer_find_socket_by_bitness(consumer_bitness,
consumer);
if (!socket) {
ret = -1;
}
end:
- rcu_read_unlock();
return ret;
}
-/*
- * We need to execute ht_destroy outside of RCU read-side critical
- * section and outside of call_rcu thread, so we postpone its execution
- * using ht_cleanup_push. It is simpler than to change the semantic of
- * the many callers of delete_ust_app_session().
- */
static
void delete_ust_app_session_rcu(struct rcu_head *head)
{
struct ust_app_session *ua_sess =
- caa_container_of(head, struct ust_app_session, rcu_head);
+ lttng::utils::container_of(head, &ust_app_session::rcu_head);
- ht_cleanup_push(ua_sess->channels);
+ lttng_ht_destroy(ua_sess->channels);
free(ua_sess);
}
int ret;
struct lttng_ht_iter iter;
struct ust_app_channel *ua_chan;
- struct ust_registry_session *registry;
LTTNG_ASSERT(ua_sess);
+ ASSERT_RCU_READ_LOCKED();
pthread_mutex_lock(&ua_sess->lock);
LTTNG_ASSERT(!ua_sess->deleted);
ua_sess->deleted = true;
- registry = get_session_registry(ua_sess);
+ auto locked_registry = get_locked_session_registry(ua_sess);
/* Registry can be null on error path during initialization. */
- if (registry) {
+ if (locked_registry) {
/* Push metadata for application before freeing the application. */
- (void) push_metadata(registry, ua_sess->consumer);
+ (void) push_metadata(locked_registry, ua_sess->consumer);
+ }
+
+ cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
+ node.node) {
+ ret = lttng_ht_del(ua_sess->channels, &iter);
+ LTTNG_ASSERT(!ret);
+ delete_ust_app_channel(sock, ua_chan, app, locked_registry);
+ }
+ if (locked_registry) {
/*
* Don't ask to close metadata for global per UID buffers. Close
* metadata only on destroy trace session in this case. Also, the
* close so don't send a close command if closed.
*/
if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
- /* And ask to close it for this session registry. */
- (void) close_metadata(registry, ua_sess->consumer);
- }
- }
+ const auto metadata_key = locked_registry->_metadata_key;
+ const auto consumer_bitness = locked_registry->abi.bits_per_long;
- cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
- node.node) {
- ret = lttng_ht_del(ua_sess->channels, &iter);
- LTTNG_ASSERT(!ret);
- delete_ust_app_channel(sock, ua_chan, app);
+ if (!locked_registry->_metadata_closed && metadata_key != 0) {
+ locked_registry->_metadata_closed = true;
+ }
+
+ /* Release lock before communication, see comments in close_metadata(). */
+ locked_registry.reset();
+ (void) close_metadata(metadata_key, consumer_bitness, ua_sess->consumer);
+ }
}
/* In case of per PID, the registry is kept in the session. */
/*
* Delete a traceable application structure from the global list. Never call
* this function outside of a call_rcu call.
- *
- * RCU read side lock should _NOT_ be held when calling this function.
*/
static
void delete_ust_app(struct ust_app *app)
rcu_read_unlock();
- ht_cleanup_push(app->sessions);
- ht_cleanup_push(app->ust_sessions_objd);
- ht_cleanup_push(app->ust_objd);
- ht_cleanup_push(app->token_to_event_notifier_rule_ht);
+ lttng_ht_destroy(app->sessions);
+ lttng_ht_destroy(app->ust_sessions_objd);
+ lttng_ht_destroy(app->ust_objd);
+ lttng_ht_destroy(app->token_to_event_notifier_rule_ht);
/*
* This could be NULL if the event notifier setup failed (e.g the app
void delete_ust_app_rcu(struct rcu_head *head)
{
struct lttng_ht_node_ulong *node =
- caa_container_of(head, struct lttng_ht_node_ulong, head);
+ lttng::utils::container_of(head, <tng_ht_node_ulong::head);
struct ust_app *app =
- caa_container_of(node, struct ust_app, pid_n);
+ lttng::utils::container_of(node, &ust_app::pid_n);
DBG3("Call RCU deleting app PID %d", app->pid);
delete_ust_app(app);
struct ust_app_session *ua_sess;
/* Init most of the default value by allocating and zeroing */
- ua_sess = (ust_app_session *) zmalloc(sizeof(struct ust_app_session));
+ ua_sess = zmalloc<ust_app_session>();
if (ua_sess == NULL) {
PERROR("malloc");
goto error_free;
struct ust_app_channel *ua_chan;
/* Init most of the default value by allocating and zeroing */
- ua_chan = (ust_app_channel *) zmalloc(sizeof(struct ust_app_channel));
+ ua_chan = zmalloc<ust_app_channel>();
if (ua_chan == NULL) {
PERROR("malloc");
goto error;
{
struct ust_app_stream *stream = NULL;
- stream = (ust_app_stream *) zmalloc(sizeof(*stream));
+ stream = zmalloc<ust_app_stream>();
if (stream == NULL) {
PERROR("zmalloc ust app stream");
goto error;
struct ust_app_event *ua_event;
/* Init most of the default value by allocating and zeroing */
- ua_event = (ust_app_event *) zmalloc(sizeof(struct ust_app_event));
+ ua_event = zmalloc<ust_app_event>();
if (ua_event == NULL) {
PERROR("Failed to allocate ust_app_event structure");
goto error;
struct lttng_condition *condition = NULL;
const struct lttng_event_rule *event_rule = NULL;
- ua_event_notifier_rule = (ust_app_event_notifier_rule *) zmalloc(sizeof(struct ust_app_event_notifier_rule));
+ ua_event_notifier_rule = zmalloc<ust_app_event_notifier_rule>();
if (ua_event_notifier_rule == NULL) {
PERROR("Failed to allocate ust_app_event_notifier_rule structure");
goto error;
{
struct ust_app_ctx *ua_ctx;
- ua_ctx = (ust_app_ctx *) zmalloc(sizeof(struct ust_app_ctx));
+ ua_ctx = zmalloc<ust_app_ctx>();
if (ua_ctx == NULL) {
goto error;
}
struct lttng_ust_abi_filter_bytecode *filter = NULL;
/* Copy filter bytecode. */
- filter = (lttng_ust_abi_filter_bytecode *) zmalloc(sizeof(*filter) + orig_f->len);
+ filter = zmalloc<lttng_ust_abi_filter_bytecode>(sizeof(*filter) + orig_f->len);
if (!filter) {
PERROR("Failed to allocate lttng_ust_filter_bytecode: bytecode len = %" PRIu32 " bytes", orig_f->len);
goto error;
struct lttng_ust_abi_capture_bytecode *capture = NULL;
/* Copy capture bytecode. */
- capture = (lttng_ust_abi_capture_bytecode *) zmalloc(sizeof(*capture) + orig_f->len);
+ capture = zmalloc<lttng_ust_abi_capture_bytecode>(sizeof(*capture) + orig_f->len);
if (!capture) {
PERROR("Failed to allocate lttng_ust_abi_capture_bytecode: bytecode len = %" PRIu32 " bytes", orig_f->len);
goto error;
struct lttng_ht_node_ulong *node;
struct lttng_ht_iter iter;
+ ASSERT_RCU_READ_LOCKED();
+
lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
node = lttng_ht_iter_get_node_ulong(&iter);
if (node == NULL) {
goto error;
}
- return caa_container_of(node, struct ust_app, sock_n);
+ return lttng::utils::container_of(node, &ust_app::sock_n);
error:
return NULL;
struct lttng_ht_node_ulong *node;
struct lttng_ht_iter iter;
+ ASSERT_RCU_READ_LOCKED();
+
lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
&iter);
node = lttng_ht_iter_get_node_ulong(&iter);
goto error;
}
- return caa_container_of(node, struct ust_app, notify_sock_n);
+ return lttng::utils::container_of(node, &ust_app::notify_sock_n);
error:
return NULL;
goto end;
}
- event = caa_container_of(node, struct ust_app_event, node);
+ event = lttng::utils::container_of(node, &ust_app_event::node);
end:
return event;
struct ust_app_event_notifier_rule *event_notifier_rule = NULL;
LTTNG_ASSERT(ht);
+ ASSERT_RCU_READ_LOCKED();
lttng_ht_lookup(ht, &token, &iter);
node = lttng_ht_iter_get_node_u64(&iter);
goto end;
}
- event_notifier_rule = caa_container_of(
- node, struct ust_app_event_notifier_rule, node);
+ event_notifier_rule = lttng::utils::container_of(
+ node, &ust_app_event_notifier_rule::node);
end:
return event_notifier_rule;
}
size_t exclusion_alloc_size = sizeof(struct lttng_ust_abi_event_exclusion) +
LTTNG_UST_ABI_SYM_NAME_LEN * exclusion->count;
- ust_exclusion = (lttng_ust_abi_event_exclusion *) zmalloc(exclusion_alloc_size);
+ ust_exclusion = zmalloc<lttng_ust_abi_event_exclusion>(exclusion_alloc_size);
if (!ust_exclusion) {
PERROR("malloc");
goto end;
cds_list_del(&stream->list);
delete_ust_app_stream(-1, stream, app);
}
- /* Flag the channel that it is sent to the application. */
- ua_chan->is_sent = 1;
error:
health_code_update();
* Should be called with session mutex held.
*/
static
-int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
+int create_ust_event(struct ust_app *app,
struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
{
int ret = 0;
event_notifier->event.instrumentation = LTTNG_UST_ABI_TRACEPOINT;
ret = lttng_strncpy(event_notifier->event.name, pattern,
- LTTNG_UST_ABI_SYM_NAME_LEN - 1);
+ sizeof(event_notifier->event.name));
if (ret) {
ERR("Failed to copy event rule pattern to notifier: pattern = '%s' ",
pattern);
if (uevent->exclusion) {
exclusion_alloc_size = sizeof(struct lttng_event_exclusion) +
LTTNG_UST_ABI_SYM_NAME_LEN * uevent->exclusion->count;
- ua_event->exclusion = (lttng_event_exclusion *) zmalloc(exclusion_alloc_size);
+ ua_event->exclusion = zmalloc<lttng_event_exclusion>(exclusion_alloc_size);
if (ua_event->exclusion == NULL) {
PERROR("malloc");
} else {
LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.uid, usess->uid);
LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.gid, usess->gid);
ua_sess->buffer_type = usess->buffer_type;
- ua_sess->bits_per_long = app->bits_per_long;
+ ua_sess->bits_per_long = app->abi.bits_per_long;
/* There is only one consumer object per session possible. */
consumer_output_get(usess->consumer);
ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
DEFAULT_UST_TRACE_UID_PATH,
lttng_credentials_get_uid(&ua_sess->real_credentials),
- app->bits_per_long);
+ app->abi.bits_per_long);
break;
default:
abort();
case LTTNG_BUFFER_PER_UID:
ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
"/" DEFAULT_UST_TRACE_UID_PATH,
- app->uid, app->bits_per_long);
+ app->uid, app->abi.bits_per_long);
break;
default:
abort();
goto error;
}
- return caa_container_of(node, struct ust_app_session, node);
+ return lttng::utils::container_of(node, &ust_app_session::node);
error:
return NULL;
}
/* Initialize registry. */
- ret = ust_registry_session_init(®_pid->registry->reg.ust, app,
- app->bits_per_long, app->uint8_t_alignment,
- app->uint16_t_alignment, app->uint32_t_alignment,
- app->uint64_t_alignment, app->long_alignment,
- app->byte_order, app->version.major, app->version.minor,
- reg_pid->root_shm_path, reg_pid->shm_path,
+ reg_pid->registry->reg.ust = ust_registry_session_per_pid_create(app, app->abi,
+ app->version.major, app->version.minor, reg_pid->root_shm_path,
+ reg_pid->shm_path,
lttng_credentials_get_uid(&ua_sess->effective_credentials),
lttng_credentials_get_gid(&ua_sess->effective_credentials),
- ua_sess->tracing_id,
- app->uid);
- if (ret < 0) {
+ ua_sess->tracing_id);
+ if (!reg_pid->registry->reg.ust) {
/*
* reg_pid->registry->reg.ust is NULL upon error, so we need to
* destroy the buffer registry, because it is always expected
rcu_read_lock();
- reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
+ reg_uid = buffer_reg_uid_find(usess->id, app->abi.bits_per_long, app->uid);
if (!reg_uid) {
/*
* This is the create channel path meaning that if there is NO
* registry available, we have to create one for this session.
*/
- ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
- LTTNG_DOMAIN_UST, ®_uid,
- ua_sess->root_shm_path, ua_sess->shm_path);
+ ret = buffer_reg_uid_create(usess->id, app->abi.bits_per_long, app->uid,
+ LTTNG_DOMAIN_UST, ®_uid, ua_sess->root_shm_path,
+ ua_sess->shm_path);
if (ret < 0) {
goto error;
}
}
/* Initialize registry. */
- ret = ust_registry_session_init(®_uid->registry->reg.ust, NULL,
- app->bits_per_long, app->uint8_t_alignment,
- app->uint16_t_alignment, app->uint32_t_alignment,
- app->uint64_t_alignment, app->long_alignment,
- app->byte_order, app->version.major,
- app->version.minor, reg_uid->root_shm_path,
- reg_uid->shm_path, usess->uid, usess->gid,
- ua_sess->tracing_id, app->uid);
- if (ret < 0) {
+ reg_uid->registry->reg.ust = ust_registry_session_per_uid_create(app->abi,
+ app->version.major, app->version.minor, reg_uid->root_shm_path,
+ reg_uid->shm_path, usess->uid, usess->gid, ua_sess->tracing_id, app->uid);
+ if (!reg_uid->registry->reg.ust) {
/*
* reg_uid->registry->reg.ust is NULL upon error, so we need to
* destroy the buffer registry, because it is always expected
buffer_reg_uid_destroy(reg_uid, NULL);
goto error;
}
+
/* Add node to teardown list of the session. */
cds_list_add(®_uid->lnode, &usess->buffer_reg_uid_list);
LTTNG_ASSERT(uctx);
LTTNG_ASSERT(ht);
+ ASSERT_RCU_READ_LOCKED();
/* Lookup using the lttng_ust_context_type and a custom match fct. */
cds_lfht_lookup(ht->ht, ht->hash_fct((void *) uctx->ctx, lttng_ht_seed),
goto end;
}
- app_ctx = caa_container_of(node, struct ust_app_ctx, node);
+ app_ctx = lttng::utils::container_of(node, &ust_app_ctx::node);
end:
return app_ctx;
int ret = 0;
struct ust_app_ctx *ua_ctx;
+ ASSERT_RCU_READ_LOCKED();
+
DBG2("UST app adding context to channel %s", ua_chan->name);
ua_ctx = find_ust_app_context(ua_chan->ctx, uctx);
* Called with UST app session lock held.
*/
static
-int enable_ust_app_event(struct ust_app_session *ua_sess,
- struct ust_app_event *ua_event, struct ust_app *app)
+int enable_ust_app_event(struct ust_app_event *ua_event,
+ struct ust_app *app)
{
int ret;
/*
* Disable on the tracer side a ust app event for the session and channel.
*/
-static int disable_ust_app_event(struct ust_app_session *ua_sess,
- struct ust_app_event *ua_event, struct ust_app *app)
+static int disable_ust_app_event(struct ust_app_event *ua_event,
+ struct ust_app *app)
{
int ret;
struct lttng_ht_node_str *ua_chan_node;
struct ust_app_channel *ua_chan;
+ ASSERT_RCU_READ_LOCKED();
+
lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
ua_chan_node = lttng_ht_iter_get_node_str(&iter);
if (ua_chan_node == NULL) {
goto error;
}
- ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
+ ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
ret = enable_ust_channel(app, ua_sess, ua_chan);
if (ret < 0) {
*/
static int do_consumer_create_channel(struct ltt_ust_session *usess,
struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
- int bitness, struct ust_registry_session *registry,
- uint64_t trace_archive_id)
+ int bitness, lsu::registry_session *registry)
{
int ret;
unsigned int nb_fd = 0;
buf_reg_chan->num_subbuf = ua_chan->attr.num_subbuf;
/* Create and add a channel registry to session. */
- ret = ust_registry_channel_add(reg_sess->reg.ust,
- ua_chan->tracing_channel_id);
- if (ret < 0) {
+ try {
+ reg_sess->reg.ust->add_channel(ua_chan->tracing_channel_id);
+ } catch (const std::exception& ex) {
+ ERR("Failed to add a channel registry to userspace registry session: %s", ex.what());
+ ret = -1;
goto error;
}
+
buffer_reg_channel_add(reg_sess, buf_reg_chan);
if (regp) {
/* Send all streams to application. */
pthread_mutex_lock(&buf_reg_chan->stream_list_lock);
cds_list_for_each_entry(reg_stream, &buf_reg_chan->streams, lnode) {
- struct ust_app_stream stream;
+ struct ust_app_stream stream = {};
ret = duplicate_stream_object(reg_stream, &stream);
if (ret < 0) {
* Treat this the same way as an application
* that is exiting.
*/
- WARN("Communication with application %d timed out on send_stream for stream \"%s\" of channel \"%s\" of session \"%" PRIu64 "\".",
- app->pid, stream.name,
+ WARN("Communication with application %d timed out on send_stream for stream of channel \"%s\" of session \"%" PRIu64 "\".",
+ app->pid,
ua_chan->name,
ua_sess->tracing_id);
ret = -ENOTCONN;
*/
(void) release_ust_app_stream(-1, &stream, app);
}
- ua_chan->is_sent = 1;
error_stream_unlock:
pthread_mutex_unlock(&buf_reg_chan->stream_list_lock);
struct buffer_reg_channel *buf_reg_chan;
struct ltt_session *session = NULL;
enum lttng_error_code notification_ret;
- struct ust_registry_channel *ust_reg_chan;
LTTNG_ASSERT(app);
LTTNG_ASSERT(usess);
LTTNG_ASSERT(ua_sess);
LTTNG_ASSERT(ua_chan);
+ ASSERT_RCU_READ_LOCKED();
DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
- reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
+ reg_uid = buffer_reg_uid_find(usess->id, app->abi.bits_per_long, app->uid);
/*
* The session creation handles the creation of this global registry
* object. If none can be find, there is a code flow problem or a
session = session_find_by_id(ua_sess->tracing_id);
LTTNG_ASSERT(session);
- LTTNG_ASSERT(pthread_mutex_trylock(&session->lock));
- LTTNG_ASSERT(session_trylock_list());
+ ASSERT_LOCKED(session->lock);
+ ASSERT_SESSION_LIST_LOCKED();
/*
* Create the buffers on the consumer side. This call populates the
* ust app channel object with all streams and data object.
*/
ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
- app->bits_per_long, reg_uid->registry->reg.ust,
- session->most_recent_chunk_id.value);
+ app->abi.bits_per_long, reg_uid->registry->reg.ust);
if (ret < 0) {
ERR("Error creating UST channel \"%s\" on the consumer daemon",
ua_chan->name);
* Let's remove the previously created buffer registry channel so
* it's not visible anymore in the session registry.
*/
- ust_registry_channel_del_free(reg_uid->registry->reg.ust,
- ua_chan->tracing_channel_id, false);
+ auto locked_registry = reg_uid->registry->reg.ust->lock();
+ try {
+ locked_registry->remove_channel(ua_chan->tracing_channel_id, false);
+ } catch (const std::exception &ex) {
+ DBG("Could not find channel for removal: %s", ex.what());
+ }
buffer_reg_channel_remove(reg_uid->registry, buf_reg_chan);
buffer_reg_channel_destroy(buf_reg_chan, LTTNG_DOMAIN_UST);
goto error;
goto error;
}
- /* Notify the notification subsystem of the channel's creation. */
- pthread_mutex_lock(®_uid->registry->reg.ust->lock);
- ust_reg_chan = ust_registry_channel_find(reg_uid->registry->reg.ust,
- ua_chan->tracing_channel_id);
- LTTNG_ASSERT(ust_reg_chan);
- ust_reg_chan->consumer_key = ua_chan->key;
- ust_reg_chan = NULL;
- pthread_mutex_unlock(®_uid->registry->reg.ust->lock);
+ {
+ auto locked_registry = reg_uid->registry->reg.ust->lock();
+ auto& ust_reg_chan = locked_registry->get_channel(ua_chan->tracing_channel_id);
+
+ ust_reg_chan._consumer_key = ua_chan->key;
+ }
+ /* Notify the notification subsystem of the channel's creation. */
notification_ret = notification_thread_command_add_channel(
- the_notification_thread_handle, session->name,
- lttng_credentials_get_uid(
- &ua_sess->effective_credentials),
- lttng_credentials_get_gid(
- &ua_sess->effective_credentials),
+ the_notification_thread_handle, session->id,
ua_chan->name, ua_chan->key, LTTNG_DOMAIN_UST,
ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
if (notification_ret != LTTNG_OK) {
struct ust_app_channel *ua_chan)
{
int ret;
- struct ust_registry_session *registry;
+ lsu::registry_session *registry;
enum lttng_error_code cmd_ret;
struct ltt_session *session = NULL;
uint64_t chan_reg_key;
- struct ust_registry_channel *ust_reg_chan;
LTTNG_ASSERT(app);
LTTNG_ASSERT(usess);
LTTNG_ASSERT(registry);
/* Create and add a new channel registry to session. */
- ret = ust_registry_channel_add(registry, ua_chan->key);
- if (ret < 0) {
- ERR("Error creating the UST channel \"%s\" registry instance",
- ua_chan->name);
+ try {
+ registry->add_channel(ua_chan->key);
+ } catch (const std::exception& ex) {
+ ERR("Error creating the UST channel \"%s\" registry instance: %s", ua_chan->name,
+ ex.what());
+ ret = -1;
goto error;
}
session = session_find_by_id(ua_sess->tracing_id);
LTTNG_ASSERT(session);
-
- LTTNG_ASSERT(pthread_mutex_trylock(&session->lock));
- LTTNG_ASSERT(session_trylock_list());
+ ASSERT_LOCKED(session->lock);
+ ASSERT_SESSION_LIST_LOCKED();
/* Create and get channel on the consumer side. */
ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
- app->bits_per_long, registry,
- session->most_recent_chunk_id.value);
+ app->abi.bits_per_long, registry);
if (ret < 0) {
ERR("Error creating UST channel \"%s\" on the consumer daemon",
ua_chan->name);
}
chan_reg_key = ua_chan->key;
- pthread_mutex_lock(®istry->lock);
- ust_reg_chan = ust_registry_channel_find(registry, chan_reg_key);
- LTTNG_ASSERT(ust_reg_chan);
- ust_reg_chan->consumer_key = ua_chan->key;
- pthread_mutex_unlock(®istry->lock);
+ {
+ auto locked_registry = registry->lock();
+
+ auto& ust_reg_chan = locked_registry->get_channel(chan_reg_key);
+ ust_reg_chan._consumer_key = ua_chan->key;
+ }
cmd_ret = notification_thread_command_add_channel(
- the_notification_thread_handle, session->name,
- lttng_credentials_get_uid(
- &ua_sess->effective_credentials),
- lttng_credentials_get_gid(
- &ua_sess->effective_credentials),
+ the_notification_thread_handle, session->id,
ua_chan->name, ua_chan->key, LTTNG_DOMAIN_UST,
ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
if (cmd_ret != LTTNG_OK) {
error_remove_from_registry:
if (ret) {
- ust_registry_channel_del_free(registry, ua_chan->key, false);
+ try {
+ auto locked_registry = registry->lock();
+ locked_registry->remove_channel(ua_chan->key, false);
+ } catch (const std::exception& ex) {
+ DBG("Could not find channel for removal: %s", ex.what());
+ }
}
error:
rcu_read_unlock();
LTTNG_ASSERT(usess->active);
LTTNG_ASSERT(ua_sess);
LTTNG_ASSERT(ua_chan);
+ ASSERT_RCU_READ_LOCKED();
/* Handle buffer type before sending the channel to the application. */
switch (usess->buffer_type) {
*/
static int ust_app_channel_allocate(struct ust_app_session *ua_sess,
struct ltt_ust_channel *uchan,
- enum lttng_ust_abi_chan_type type, struct ltt_ust_session *usess,
+ enum lttng_ust_abi_chan_type type,
+ struct ltt_ust_session *usess __attribute__((unused)),
struct ust_app_channel **ua_chanp)
{
int ret = 0;
struct lttng_ht_node_str *ua_chan_node;
struct ust_app_channel *ua_chan;
+ ASSERT_RCU_READ_LOCKED();
+
/* Lookup channel in the ust app session */
lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
ua_chan_node = lttng_ht_iter_get_node_str(&iter);
if (ua_chan_node != NULL) {
- ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
+ ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
goto end;
}
* Called with ust app session mutex held.
*/
static
-int create_ust_app_event(struct ust_app_session *ua_sess,
- struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
+int create_ust_app_event(struct ust_app_channel *ua_chan,
+ struct ltt_ust_event *uevent,
struct ust_app *app)
{
int ret = 0;
struct ust_app_event *ua_event;
+ ASSERT_RCU_READ_LOCKED();
+
ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
if (ua_event == NULL) {
/* Only failure mode of alloc_ust_app_event(). */
shadow_copy_event(ua_event, uevent);
/* Create it on the tracer side */
- ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
+ ret = create_ust_event(app, ua_chan, ua_event);
if (ret < 0) {
/*
* Not found previously means that it does not exist on the
int ret = 0;
struct ust_app_event_notifier_rule *ua_event_notifier_rule;
+ ASSERT_RCU_READ_LOCKED();
+
ua_event_notifier_rule = alloc_ust_app_event_notifier_rule(trigger);
if (ua_event_notifier_rule == NULL) {
ret = -ENOMEM;
int ret = 0;
struct ust_app_channel *metadata;
struct consumer_socket *socket;
- struct ust_registry_session *registry;
struct ltt_session *session = NULL;
LTTNG_ASSERT(ua_sess);
LTTNG_ASSERT(app);
LTTNG_ASSERT(consumer);
+ ASSERT_RCU_READ_LOCKED();
- registry = get_session_registry(ua_sess);
+ auto locked_registry = get_locked_session_registry(ua_sess);
/* The UST app session is held registry shall not be null. */
- LTTNG_ASSERT(registry);
-
- pthread_mutex_lock(®istry->lock);
+ LTTNG_ASSERT(locked_registry);
/* Metadata already exists for this registry or it was closed previously */
- if (registry->metadata_key || registry->metadata_closed) {
+ if (locked_registry->_metadata_key || locked_registry->_metadata_closed) {
ret = 0;
goto error;
}
}
/* Get the right consumer socket for the application. */
- socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
+ socket = consumer_find_socket_by_bitness(app->abi.bits_per_long, consumer);
if (!socket) {
ret = -EINVAL;
goto error_consumer;
* consumer requesting the metadata and the ask_channel call on our side
* did not returned yet.
*/
- registry->metadata_key = metadata->key;
+ locked_registry->_metadata_key = metadata->key;
session = session_find_by_id(ua_sess->tracing_id);
LTTNG_ASSERT(session);
-
- LTTNG_ASSERT(pthread_mutex_trylock(&session->lock));
- LTTNG_ASSERT(session_trylock_list());
+ ASSERT_LOCKED(session->lock);
+ ASSERT_SESSION_LIST_LOCKED();
/*
* Ask the metadata channel creation to the consumer. The metadata object
* consumer.
*/
ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
- registry, session->current_trace_chunk);
+ locked_registry.get(), session->current_trace_chunk);
if (ret < 0) {
/* Nullify the metadata key so we don't try to close it later on. */
- registry->metadata_key = 0;
+ locked_registry->_metadata_key = 0;
goto error_consumer;
}
ret = consumer_setup_metadata(socket, metadata->key);
if (ret < 0) {
/* Nullify the metadata key so we don't try to close it later on. */
- registry->metadata_key = 0;
+ locked_registry->_metadata_key = 0;
goto error_consumer;
}
error_consumer:
lttng_fd_put(LTTNG_FD_APPS, 1);
- delete_ust_app_channel(-1, metadata, app);
+ delete_ust_app_channel(-1, metadata, app, locked_registry);
error:
- pthread_mutex_unlock(®istry->lock);
if (session) {
session_put(session);
}
DBG2("Found UST app by pid %d", pid);
- app = caa_container_of(node, struct ust_app, pid_n);
+ app = lttng::utils::container_of(node, &ust_app::pid_n);
error:
return app;
goto error;
}
- lta = (ust_app *) zmalloc(sizeof(struct ust_app));
+ lta = zmalloc<ust_app>();
if (lta == NULL) {
PERROR("malloc");
goto error_free_pipe;
lta->uid = msg->uid;
lta->gid = msg->gid;
- lta->bits_per_long = msg->bits_per_long;
- lta->uint8_t_alignment = msg->uint8_t_alignment;
- lta->uint16_t_alignment = msg->uint16_t_alignment;
- lta->uint32_t_alignment = msg->uint32_t_alignment;
- lta->uint64_t_alignment = msg->uint64_t_alignment;
- lta->long_alignment = msg->long_alignment;
- lta->byte_order = msg->byte_order;
+ lta->abi = {
+ .bits_per_long = msg->bits_per_long,
+ .long_alignment = msg->long_alignment,
+ .uint8_t_alignment = msg->uint8_t_alignment,
+ .uint16_t_alignment = msg->uint16_t_alignment,
+ .uint32_t_alignment = msg->uint32_t_alignment,
+ .uint64_t_alignment = msg->uint64_t_alignment,
+ .byte_order = msg->byte_order == LITTLE_ENDIAN ?
+ lttng::sessiond::trace::byte_order::LITTLE_ENDIAN_ :
+ lttng::sessiond::trace::byte_order::BIG_ENDIAN_,
+ };
lta->v_major = msg->major;
lta->v_minor = msg->minor;
node = lttng_ht_iter_get_node_ulong(&ust_app_sock_iter);
LTTNG_ASSERT(node);
- lta = caa_container_of(node, struct ust_app, sock_n);
+ lta = lttng::utils::container_of(node, &ust_app::sock_n);
DBG("PID %d unregistering with sock %d", lta->pid, sock);
/*
*/
cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
node.node) {
- struct ust_registry_session *registry;
-
ret = lttng_ht_del(lta->sessions, &iter);
if (ret) {
/* The session was already removed so scheduled for teardown. */
* The close metadata below nullifies the metadata pointer in the
* session so the delete session will NOT push/close a second time.
*/
- registry = get_session_registry(ua_sess);
- if (registry) {
+ auto locked_registry = get_locked_session_registry(ua_sess);
+ if (locked_registry) {
/* Push metadata for application before freeing the application. */
- (void) push_metadata(registry, ua_sess->consumer);
+ (void) push_metadata(locked_registry, ua_sess->consumer);
/*
* Don't ask to close metadata for global per UID buffers. Close
* close so don't send a close command if closed.
*/
if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
- /* And ask to close it for this session registry. */
- (void) close_metadata(registry, ua_sess->consumer);
+ const auto metadata_key = locked_registry->_metadata_key;
+ const auto consumer_bitness = locked_registry->abi.bits_per_long;
+
+ if (!locked_registry->_metadata_closed && metadata_key != 0) {
+ locked_registry->_metadata_closed = true;
+ }
+
+ /* Release lock before communication, see comments in close_metadata(). */
+ locked_registry.reset();
+ (void) close_metadata(metadata_key, consumer_bitness, ua_sess->consumer);
+ } else {
+ locked_registry.reset();
}
}
cds_list_add(&ua_sess->teardown_node, <a->teardown_head);
struct lttng_event *tmp_event;
nbmem = UST_APP_EVENT_LIST_SIZE;
- tmp_event = (lttng_event *) zmalloc(nbmem * sizeof(struct lttng_event));
+ tmp_event = calloc<lttng_event>(nbmem);
if (tmp_event == NULL) {
PERROR("zmalloc ust app events");
ret = -ENOMEM;
struct lttng_event_field *tmp_event;
nbmem = UST_APP_EVENT_LIST_SIZE;
- tmp_event = (lttng_event_field *) zmalloc(nbmem * sizeof(struct lttng_event_field));
+ tmp_event = calloc<lttng_event_field>(nbmem);
if (tmp_event == NULL) {
PERROR("zmalloc ust app event fields");
ret = -ENOMEM;
/*
* Free and clean all traceable apps of the global list.
- *
- * Should _NOT_ be called with RCU read-side lock held.
*/
void ust_app_clean_list(void)
{
/* Destroy is done only when the ht is empty */
if (ust_app_ht) {
- ht_cleanup_push(ust_app_ht);
+ lttng_ht_destroy(ust_app_ht);
}
if (ust_app_ht_by_sock) {
- ht_cleanup_push(ust_app_ht_by_sock);
+ lttng_ht_destroy(ust_app_ht_by_sock);
}
if (ust_app_ht_by_notify_sock) {
- ht_cleanup_push(ust_app_ht_by_notify_sock);
+ lttng_ht_destroy(ust_app_ht_by_notify_sock);
}
}
/* If the session if found for the app, the channel must be there */
LTTNG_ASSERT(ua_chan_node);
- ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
+ ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
/* The channel must not be already disabled */
LTTNG_ASSERT(ua_chan->enabled == 1);
"Skipping", uchan->name, usess->id, app->pid);
continue;
}
- ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
+ ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
uevent->filter, uevent->attr.loglevel,
continue;
}
- ret = disable_ust_app_event(ua_sess, ua_event, app);
+ ret = disable_ust_app_event(ua_event, app);
if (ret < 0) {
/* XXX: Report error someday... */
continue;
continue;
}
- ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
+ ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
/* Get event node */
ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
goto next_app;
}
- ret = enable_ust_app_event(ua_sess, ua_event, app);
+ ret = enable_ust_app_event(ua_event, app);
if (ret < 0) {
pthread_mutex_unlock(&ua_sess->lock);
goto error;
/* If the channel is not found, there is a code flow error */
LTTNG_ASSERT(ua_chan_node);
- ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
+ ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
- ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
+ ret = create_ust_app_event(ua_chan, uevent, app);
pthread_mutex_unlock(&ua_sess->lock);
if (ret < 0) {
if (ret != -LTTNG_UST_ERR_EXIST) {
{
int ret = 0;
struct ust_app_session *ua_sess;
- struct ust_registry_session *registry;
DBG("Stopping tracing for ust app pid %d", app->pid);
health_code_update();
- registry = get_session_registry(ua_sess);
+ {
+ auto locked_registry = get_locked_session_registry(ua_sess);
- /* The UST app session is held registry shall not be null. */
- LTTNG_ASSERT(registry);
+ /* The UST app session is held registry shall not be null. */
+ LTTNG_ASSERT(locked_registry);
- /* Push metadata for application before freeing the application. */
- (void) push_metadata(registry, ua_sess->consumer);
+ /* Push metadata for application before freeing the application. */
+ (void) push_metadata(locked_registry, ua_sess->consumer);
+ }
end_unlock:
pthread_mutex_unlock(&ua_sess->lock);
health_code_update();
/* Flushing buffers */
- socket = consumer_find_socket_by_bitness(app->bits_per_long,
+ socket = consumer_find_socket_by_bitness(app->abi.bits_per_long,
ua_sess->consumer);
/* Flush buffers and push metadata. */
/* Flush all per UID buffers associated to that session. */
cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
- struct ust_registry_session *ust_session_reg;
+ lsu::registry_session *ust_session_reg;
struct buffer_reg_channel *buf_reg_chan;
struct consumer_socket *socket;
ust_session_reg = reg->registry->reg.ust;
/* Push metadata. */
- (void) push_metadata(ust_session_reg, usess->consumer);
+ auto locked_registry = ust_session_reg->lock();
+ (void) push_metadata(locked_registry, usess->consumer);
}
break;
}
health_code_update();
- socket = consumer_find_socket_by_bitness(app->bits_per_long,
+ socket = consumer_find_socket_by_bitness(app->abi.bits_per_long,
ua_sess->consumer);
if (!socket) {
ERR("Failed to find consumer (%" PRIu32 ") socket",
- app->bits_per_long);
+ app->abi.bits_per_long);
ret = -1;
goto end_unlock;
}
/* Session is being or is deleted. */
goto end;
}
- ua_sess = caa_container_of(node, struct ust_app_session, node);
+ ua_sess = lttng::utils::container_of(node, &ust_app_session::node);
health_code_update();
destroy_app_session(app, ua_sess);
static
int ust_app_channel_synchronize_event(struct ust_app_channel *ua_chan,
- struct ltt_ust_event *uevent, struct ust_app_session *ua_sess,
+ struct ltt_ust_event *uevent,
struct ust_app *app)
{
int ret = 0;
ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
uevent->filter, uevent->attr.loglevel, uevent->exclusion);
if (!ua_event) {
- ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
+ ret = create_ust_app_event(ua_chan, uevent, app);
if (ret < 0) {
goto end;
}
} else {
if (ua_event->enabled != uevent->enabled) {
ret = uevent->enabled ?
- enable_ust_app_event(ua_sess, ua_event, app) :
- disable_ust_app_event(ua_sess, ua_event, app);
+ enable_ust_app_event(ua_event, app) :
+ disable_ust_app_event(ua_event, app);
}
}
struct ust_app_event_notifier_rule *event_notifier_rule;
unsigned int count, i;
+ ASSERT_RCU_READ_LOCKED();
+
if (!ust_app_supports_notifiers(app)) {
goto end;
}
LTTNG_ASSERT(usess);
LTTNG_ASSERT(ua_sess);
LTTNG_ASSERT(app);
+ ASSERT_RCU_READ_LOCKED();
cds_lfht_for_each_entry(usess->domain_global.channels->ht, &uchan_iter,
uchan, node.node) {
cds_lfht_for_each_entry(uchan->events->ht, &uevent_iter, uevent,
node.node) {
ret = ust_app_channel_synchronize_event(ua_chan,
- uevent, ua_sess, app);
+ uevent, app);
if (ret) {
goto end;
}
{
LTTNG_ASSERT(usess);
LTTNG_ASSERT(usess->active);
+ ASSERT_RCU_READ_LOCKED();
DBG2("UST app global update for app sock %d for session id %" PRIu64,
app->sock, usess->id);
*/
void ust_app_global_update_event_notifier_rules(struct ust_app *app)
{
+ ASSERT_RCU_READ_LOCKED();
+
DBG2("UST application global event notifier rules update: app = '%s', pid = %d",
app->name, app->pid);
struct ust_app_session *ua_sess = NULL;
LTTNG_ASSERT(app);
+ ASSERT_RCU_READ_LOCKED();
lttng_ht_lookup(app->ust_sessions_objd, (void *)((unsigned long) objd), &iter);
node = lttng_ht_iter_get_node_ulong(&iter);
goto error;
}
- ua_sess = caa_container_of(node, struct ust_app_session, ust_objd_node);
+ ua_sess = lttng::utils::container_of(node, &ust_app_session::ust_objd_node);
error:
return ua_sess;
struct ust_app_channel *ua_chan = NULL;
LTTNG_ASSERT(app);
+ ASSERT_RCU_READ_LOCKED();
lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter);
node = lttng_ht_iter_get_node_ulong(&iter);
goto error;
}
- ua_chan = caa_container_of(node, struct ust_app_channel, ust_objd_node);
+ ua_chan = lttng::utils::container_of(node, &ust_app_channel::ust_objd_node);
error:
return ua_chan;
*
* On success 0 is returned else a negative value.
*/
-static int reply_ust_register_channel(int sock, int cobjd,
- size_t nr_fields, struct lttng_ust_ctl_field *fields)
+static int handle_app_register_channel_notification(int sock,
+ int cobjd,
+ struct lttng_ust_ctl_field *raw_context_fields,
+ size_t context_field_count)
{
int ret, ret_code = 0;
uint32_t chan_id;
uint64_t chan_reg_key;
- enum lttng_ust_ctl_channel_header type;
struct ust_app *app;
struct ust_app_channel *ua_chan;
struct ust_app_session *ua_sess;
- struct ust_registry_session *registry;
- struct ust_registry_channel *ust_reg_chan;
+ auto ust_ctl_context_fields = lttng::make_unique_wrapper<lttng_ust_ctl_field, lttng::free>(
+ raw_context_fields);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock_guard;
/* Lookup application. If not found, there is a code flow error. */
app = find_app_by_notify_sock(sock);
if (!app) {
DBG("Application socket %d is being torn down. Abort event notify",
sock);
- ret = -1;
- goto error_rcu_unlock;
+ return -1;
}
/* Lookup channel by UST object descriptor. */
ua_chan = find_channel_by_objd(app, cobjd);
if (!ua_chan) {
DBG("Application channel is being torn down. Abort event notify");
- ret = 0;
- goto error_rcu_unlock;
+ return 0;
}
LTTNG_ASSERT(ua_chan->session);
ua_sess = ua_chan->session;
/* Get right session registry depending on the session buffer type. */
- registry = get_session_registry(ua_sess);
- if (!registry) {
+ auto locked_registry_session = get_locked_session_registry(ua_sess);
+ if (!locked_registry_session) {
DBG("Application session is being torn down. Abort event notify");
- ret = 0;
- goto error_rcu_unlock;
+ return 0;
};
/* Depending on the buffer type, a different channel key is used. */
chan_reg_key = ua_chan->key;
}
- pthread_mutex_lock(®istry->lock);
+ auto& ust_reg_chan = locked_registry_session->get_channel(chan_reg_key);
+
+ /* Channel id is set during the object creation. */
+ chan_id = ust_reg_chan.id;
- ust_reg_chan = ust_registry_channel_find(registry, chan_reg_key);
- LTTNG_ASSERT(ust_reg_chan);
+ /*
+ * The application returns the typing information of the channel's
+ * context fields. In per-PID buffering mode, this is the first and only
+ * time we get this information. It is our chance to finalize the
+ * initialiation of the channel and serialize it's layout's description
+ * to the trace's metadata.
+ *
+ * However, in per-UID buffering mode, every application will provide
+ * this information (redundantly). The first time will allow us to
+ * complete the initialization. The following times, we simply validate
+ * that all apps provide the same typing for the context fields as a
+ * sanity check.
+ */
+ lst::type::cuptr context_fields = lttng::make_unique<lst::structure_type>(0,
+ lsu::create_trace_fields_from_ust_ctl_fields(*locked_registry_session,
+ ust_ctl_context_fields.get(), context_field_count));
- if (!ust_reg_chan->register_done) {
+ if (!ust_reg_chan.is_registered()) {
+ ust_reg_chan.set_context(std::move(context_fields));
+ } else {
/*
- * TODO: eventually use the registry event count for
- * this channel to better guess header type for per-pid
- * buffers.
+ * Validate that the context fields match between
+ * registry and newcoming application.
*/
- type = LTTNG_UST_CTL_CHANNEL_HEADER_LARGE;
- ust_reg_chan->nr_ctx_fields = nr_fields;
- ust_reg_chan->ctx_fields = fields;
- fields = NULL;
- ust_reg_chan->header_type = type;
- } else {
- /* Get current already assigned values. */
- type = ust_reg_chan->header_type;
+ if (ust_reg_chan.get_context() != *context_fields) {
+ ERR("Registering application channel due to context field mismatch: pid = %d, sock = %d",
+ app->pid, app->sock);
+ ret_code = -EINVAL;
+ goto reply;
+ }
}
- /* Channel id is set during the object creation. */
- chan_id = ust_reg_chan->chan_id;
/* Append to metadata */
- if (!ust_reg_chan->metadata_dumped) {
- ret_code = ust_metadata_channel_statedump(registry, ust_reg_chan);
+ if (!ust_reg_chan._metadata_dumped) {
+ /*ret_code = ust_metadata_channel_statedump(registry, ust_reg_chan);*/
if (ret_code) {
ERR("Error appending channel metadata (errno = %d)", ret_code);
goto reply;
reply:
DBG3("UST app replying to register channel key %" PRIu64
- " with id %u, type = %d, ret = %d", chan_reg_key, chan_id, type,
+ " with id %u, ret = %d", chan_reg_key, chan_id,
ret_code);
- ret = lttng_ust_ctl_reply_register_channel(sock, chan_id, type, ret_code);
+ ret = lttng_ust_ctl_reply_register_channel(sock, chan_id,
+ ust_reg_chan.header_type_ == lst::stream_class::header_type::COMPACT ?
+ LTTNG_UST_CTL_CHANNEL_HEADER_COMPACT :
+ LTTNG_UST_CTL_CHANNEL_HEADER_LARGE,
+ ret_code);
if (ret < 0) {
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
DBG3("UST app reply channel failed. Application died: pid = %d, sock = %d",
ERR("UST app reply channel failed with ret %d: pid = %d, sock = %d",
ret, app->pid, app->sock);
}
- goto error;
+
+ return ret;
}
- /* This channel registry registration is completed. */
- ust_reg_chan->register_done = 1;
+ /* This channel registry's registration is completed. */
+ ust_reg_chan.set_as_registered();
-error:
- pthread_mutex_unlock(®istry->lock);
-error_rcu_unlock:
- rcu_read_unlock();
- free(fields);
return ret;
}
*
* On success 0 is returned else a negative value.
*/
-static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
- char *sig, size_t nr_fields, struct lttng_ust_ctl_field *fields,
- int loglevel_value, char *model_emf_uri)
+static int add_event_ust_registry(int sock, int sobjd, int cobjd, const char *name,
+ char *raw_signature, size_t nr_fields, struct lttng_ust_ctl_field *raw_fields,
+ int loglevel_value, char *raw_model_emf_uri)
{
int ret, ret_code;
uint32_t event_id = 0;
struct ust_app *app;
struct ust_app_channel *ua_chan;
struct ust_app_session *ua_sess;
- struct ust_registry_session *registry;
-
- rcu_read_lock();
+ lttng::urcu::read_lock_guard rcu_lock;
+ auto signature = lttng::make_unique_wrapper<char, lttng::free>(raw_signature);
+ auto fields = lttng::make_unique_wrapper<lttng_ust_ctl_field, lttng::free>(raw_fields);
+ auto model_emf_uri = lttng::make_unique_wrapper<char, lttng::free>(raw_model_emf_uri);
/* Lookup application. If not found, there is a code flow error. */
app = find_app_by_notify_sock(sock);
if (!app) {
DBG("Application socket %d is being torn down. Abort event notify",
sock);
- ret = -1;
- goto error_rcu_unlock;
+ return -1;
}
/* Lookup channel by UST object descriptor. */
ua_chan = find_channel_by_objd(app, cobjd);
if (!ua_chan) {
DBG("Application channel is being torn down. Abort event notify");
- ret = 0;
- goto error_rcu_unlock;
+ return 0;
}
LTTNG_ASSERT(ua_chan->session);
ua_sess = ua_chan->session;
- registry = get_session_registry(ua_sess);
- if (!registry) {
- DBG("Application session is being torn down. Abort event notify");
- ret = 0;
- goto error_rcu_unlock;
- }
-
if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
chan_reg_key = ua_chan->tracing_channel_id;
} else {
chan_reg_key = ua_chan->key;
}
- pthread_mutex_lock(®istry->lock);
-
- /*
- * From this point on, this call acquires the ownership of the sig, fields
- * and model_emf_uri meaning any free are done inside it if needed. These
- * three variables MUST NOT be read/write after this.
- */
- ret_code = ust_registry_create_event(registry, chan_reg_key,
- sobjd, cobjd, name, sig, nr_fields, fields,
- loglevel_value, model_emf_uri, ua_sess->buffer_type,
- &event_id, app);
- sig = NULL;
- fields = NULL;
- model_emf_uri = NULL;
+ {
+ auto locked_registry = get_locked_session_registry(ua_sess);
+ if (locked_registry) {
+ /*
+ * From this point on, this call acquires the ownership of the signature,
+ * fields and model_emf_uri meaning any free are done inside it if needed.
+ * These three variables MUST NOT be read/write after this.
+ */
+ try {
+ auto& channel = locked_registry->get_channel(chan_reg_key);
+
+ /* event_id is set on success. */
+ channel.add_event(sobjd, cobjd, name, signature.get(),
+ lsu::create_trace_fields_from_ust_ctl_fields(
+ *locked_registry, fields.get(),
+ nr_fields),
+ loglevel_value,
+ model_emf_uri.get() ?
+ nonstd::optional<std::string>(
+ model_emf_uri.get()) :
+ nonstd::nullopt,
+ ua_sess->buffer_type, *app, event_id);
+ ret_code = 0;
+ } catch (const std::exception& ex) {
+ ERR("Failed to add event `%s` to registry session: %s", name,
+ ex.what());
+ /* Inform the application of the error; don't return directly. */
+ ret_code = -EINVAL;
+ }
+ } else {
+ DBG("Application session is being torn down. Abort event notify");
+ return 0;
+ }
+ }
/*
* The return value is returned to ustctl so in case of an error, the
* No need to wipe the create event since the application socket will
* get close on error hence cleaning up everything by itself.
*/
- goto error;
+ return ret;
}
DBG3("UST registry event %s with id %" PRId32 " added successfully",
name, event_id);
-
-error:
- pthread_mutex_unlock(®istry->lock);
-error_rcu_unlock:
- rcu_read_unlock();
- free(sig);
- free(fields);
- free(model_emf_uri);
return ret;
}
*
* On success 0 is returned else a negative value.
*/
-static int add_enum_ust_registry(int sock, int sobjd, char *name,
- struct lttng_ust_ctl_enum_entry *entries, size_t nr_entries)
+static int add_enum_ust_registry(int sock, int sobjd, const char *name,
+ struct lttng_ust_ctl_enum_entry *raw_entries, size_t nr_entries)
{
- int ret = 0, ret_code;
+ int ret = 0;
struct ust_app *app;
struct ust_app_session *ua_sess;
- struct ust_registry_session *registry;
uint64_t enum_id = -1ULL;
-
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock_guard;
+ auto entries = lttng::make_unique_wrapper<struct lttng_ust_ctl_enum_entry, lttng::free>(
+ raw_entries);
/* Lookup application. If not found, there is a code flow error. */
app = find_app_by_notify_sock(sock);
/* Return an error since this is not an error */
DBG("Application socket %d is being torn down. Aborting enum registration",
sock);
- free(entries);
- ret = -1;
- goto error_rcu_unlock;
+ return -1;
}
/* Lookup session by UST object descriptor. */
if (!ua_sess) {
/* Return an error since this is not an error */
DBG("Application session is being torn down (session not found). Aborting enum registration.");
- free(entries);
- goto error_rcu_unlock;
+ return 0;
}
- registry = get_session_registry(ua_sess);
- if (!registry) {
+ auto locked_registry = get_locked_session_registry(ua_sess);
+ if (!locked_registry) {
DBG("Application session is being torn down (registry not found). Aborting enum registration.");
- free(entries);
- goto error_rcu_unlock;
+ return 0;
}
- pthread_mutex_lock(®istry->lock);
-
/*
* From this point on, the callee acquires the ownership of
* entries. The variable entries MUST NOT be read/written after
* call.
*/
- ret_code = ust_registry_create_or_find_enum(registry, sobjd, name,
- entries, nr_entries, &enum_id);
- entries = NULL;
+ int application_reply_code;
+ try {
+ locked_registry->create_or_find_enum(
+ sobjd, name, entries.release(), nr_entries, &enum_id);
+ application_reply_code = 0;
+ } catch (const std::exception& ex) {
+ ERR("%s: %s", fmt::format("Failed to create or find enumeration provided by application: app = {}, enumeration name = {}",
+ *app, name).c_str(), ex.what());
+ application_reply_code = -1;
+ }
/*
* The return value is returned to ustctl so in case of an error, the
* application can be notified. In case of an error, it's important not to
* return a negative error or else the application will get closed.
*/
- ret = lttng_ust_ctl_reply_register_enum(sock, enum_id, ret_code);
+ ret = lttng_ust_ctl_reply_register_enum(sock, enum_id, application_reply_code);
if (ret < 0) {
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
DBG3("UST app reply enum failed. Application died: pid = %d, sock = %d",
* No need to wipe the create enum since the application socket will
* get close on error hence cleaning up everything by itself.
*/
- goto error;
+ return ret;
}
DBG3("UST registry enum %s added successfully or already found", name);
-
-error:
- pthread_mutex_unlock(®istry->lock);
-error_rcu_unlock:
- rcu_read_unlock();
- return ret;
+ return 0;
}
/*
DBG2("UST app ustctl register event received");
- ret = lttng_ust_ctl_recv_register_event(sock, &sobjd, &cobjd, name,
- &loglevel_value, &sig, &nr_fields, &fields,
- &model_emf_uri);
+ ret = lttng_ust_ctl_recv_register_event(sock, &sobjd, &cobjd, name, &loglevel_value,
+ &sig, &nr_fields, &fields, &model_emf_uri);
if (ret < 0) {
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
DBG3("UST app recv event failed. Application died: sock = %d",
goto error;
}
+ {
+ lttng::urcu::read_lock_guard rcu_lock;
+ const struct ust_app *app = find_app_by_notify_sock(sock);
+ if (!app) {
+ DBG("Application socket %d is being torn down. Abort event notify", sock);
+ ret = -1;
+ goto error;
+ }
+ }
+
+ if ((!fields && nr_fields > 0) || (fields && nr_fields == 0)) {
+ ERR("Invalid return value from lttng_ust_ctl_recv_register_event: fields = %p, nr_fields = %zu",
+ fields, nr_fields);
+ ret = -1;
+ free(fields);
+ goto error;
+ }
+
/*
* Add event to the UST registry coming from the notify socket. This
* call will free if needed the sig, fields and model_emf_uri. This
case LTTNG_UST_CTL_NOTIFY_CMD_CHANNEL:
{
int sobjd, cobjd;
- size_t nr_fields;
- struct lttng_ust_ctl_field *fields;
+ size_t field_count;
+ struct lttng_ust_ctl_field *context_fields;
DBG2("UST app ustctl register channel received");
- ret = lttng_ust_ctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields,
- &fields);
+ ret = lttng_ust_ctl_recv_register_channel(
+ sock, &sobjd, &cobjd, &field_count, &context_fields);
if (ret < 0) {
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
DBG3("UST app recv channel failed. Application died: sock = %d",
WARN("UST app recv channel failed. Communication time out: sock = %d",
sock);
} else {
- ERR("UST app recv channel failed with ret %d: sock = %d",
- ret, sock);
+ ERR("UST app recv channel failed with ret %d: sock = %d", ret,
+ sock);
}
goto error;
}
/*
* The fields ownership are transfered to this function call meaning
* that if needed it will be freed. After this, it's invalid to access
- * fields or clean it up.
+ * fields or clean them up.
*/
- ret = reply_ust_register_channel(sock, cobjd, nr_fields,
- fields);
+ ret = handle_app_register_channel_notification(sock, cobjd, context_fields, field_count);
if (ret < 0) {
goto error;
}
goto error;
}
- /* Callee assumes ownership of entries */
+ /* Callee assumes ownership of entries. */
ret = add_enum_ust_registry(sock, sobjd, name,
entries, nr_entries);
if (ret < 0) {
rcu_read_lock();
- obj = (ust_app_notify_sock_obj *) zmalloc(sizeof(*obj));
+ obj = zmalloc<ust_app_notify_sock_obj>();
if (!obj) {
/*
* An ENOMEM is kind of uncool. If this strikes we continue the
*/
enum lttng_error_code ust_app_snapshot_record(
const struct ltt_ust_session *usess,
- const struct consumer_output *output, int wait,
+ const struct consumer_output *output,
uint64_t nb_packets_per_stream)
{
int ret = 0;
char pathname[PATH_MAX];
size_t consumer_path_offset = 0;
- if (!reg->registry->reg.ust->metadata_key) {
+ if (!reg->registry->reg.ust->_metadata_key) {
/* Skip since no metadata is present */
continue;
}
buf_reg_chan, node.node) {
status = consumer_snapshot_channel(socket,
buf_reg_chan->consumer_key,
- output, 0, usess->uid,
- usess->gid, &trace_path[consumer_path_offset], wait,
+ output, 0, &trace_path[consumer_path_offset],
nb_packets_per_stream);
if (status != LTTNG_OK) {
goto error;
}
}
status = consumer_snapshot_channel(socket,
- reg->registry->reg.ust->metadata_key, output, 1,
- usess->uid, usess->gid, &trace_path[consumer_path_offset],
- wait, 0);
+ reg->registry->reg.ust->_metadata_key, output, 1,
+ &trace_path[consumer_path_offset], 0);
if (status != LTTNG_OK) {
goto error;
}
struct lttng_ht_iter chan_iter;
struct ust_app_channel *ua_chan;
struct ust_app_session *ua_sess;
- struct ust_registry_session *registry;
+ lsu::registry_session *registry;
char pathname[PATH_MAX];
size_t consumer_path_offset = 0;
}
/* Get the right consumer socket for the application. */
- socket = consumer_find_socket_by_bitness(app->bits_per_long,
+ socket = consumer_find_socket_by_bitness(app->abi.bits_per_long,
output);
if (!socket) {
status = LTTNG_ERR_INVALID;
ua_chan, node.node) {
status = consumer_snapshot_channel(socket,
ua_chan->key, output, 0,
- lttng_credentials_get_uid(&ua_sess->effective_credentials),
- lttng_credentials_get_gid(&ua_sess->effective_credentials),
- &trace_path[consumer_path_offset], wait,
+ &trace_path[consumer_path_offset],
nb_packets_per_stream);
switch (status) {
case LTTNG_OK:
continue;
}
status = consumer_snapshot_channel(socket,
- registry->metadata_key, output, 1,
- lttng_credentials_get_uid(&ua_sess->effective_credentials),
- lttng_credentials_get_gid(&ua_sess->effective_credentials),
- &trace_path[consumer_path_offset], wait, 0);
+ registry->_metadata_key, output, 1,
+ &trace_path[consumer_path_offset], 0);
switch (status) {
case LTTNG_OK:
break;
/* If the session is found for the app, the channel must be there */
LTTNG_ASSERT(ua_chan_node);
- ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
+ ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
if (overwrite) {
uint64_t _lost;
buf_reg_chan, node.node) {
ret = consumer_rotate_channel(socket,
buf_reg_chan->consumer_key,
- usess->uid, usess->gid,
usess->consumer,
/* is_metadata_channel */ false);
if (ret < 0) {
* operations (i.e add context) and lead to data
* channels created with no metadata channel.
*/
- if (!reg->registry->reg.ust->metadata_key) {
+ if (!reg->registry->reg.ust->_metadata_key) {
/* Skip since no metadata is present. */
continue;
}
- (void) push_metadata(reg->registry->reg.ust, usess->consumer);
+ {
+ auto locked_registry = reg->registry->reg.ust->lock();
+ (void) push_metadata(locked_registry, usess->consumer);
+ }
ret = consumer_rotate_channel(socket,
- reg->registry->reg.ust->metadata_key,
- usess->uid, usess->gid,
+ reg->registry->reg.ust->_metadata_key,
usess->consumer,
/* is_metadata_channel */ true);
if (ret < 0) {
struct lttng_ht_iter chan_iter;
struct ust_app_channel *ua_chan;
struct ust_app_session *ua_sess;
- struct ust_registry_session *registry;
+ lsu::registry_session *registry;
ua_sess = lookup_session_by_app(usess, app);
if (!ua_sess) {
}
/* Get the right consumer socket for the application. */
- socket = consumer_find_socket_by_bitness(app->bits_per_long,
+ socket = consumer_find_socket_by_bitness(app->abi.bits_per_long,
usess->consumer);
if (!socket) {
cmd_ret = LTTNG_ERR_INVALID;
ua_chan, node.node) {
ret = consumer_rotate_channel(socket,
ua_chan->key,
- lttng_credentials_get_uid(&ua_sess->effective_credentials),
- lttng_credentials_get_gid(&ua_sess->effective_credentials),
ua_sess->consumer,
/* is_metadata_channel */ false);
if (ret < 0) {
}
/* Rotate the metadata channel. */
- (void) push_metadata(registry, usess->consumer);
+ {
+ auto locked_registry = registry->lock();
+
+ (void) push_metadata(locked_registry, usess->consumer);
+ }
ret = consumer_rotate_channel(socket,
- registry->metadata_key,
- lttng_credentials_get_uid(&ua_sess->effective_credentials),
- lttng_credentials_get_gid(&ua_sess->effective_credentials),
+ registry->_metadata_key,
ua_sess->consumer,
/* is_metadata_channel */ true);
if (ret < 0) {
cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
pid_n.node) {
struct ust_app_session *ua_sess;
- struct ust_registry_session *registry;
+ lsu::registry_session *registry;
ua_sess = lookup_session_by_app(usess, app);
if (!ua_sess) {
}
}
- (void) push_metadata(reg->registry->reg.ust, usess->consumer);
+ {
+ auto locked_registry = reg->registry->reg.ust->lock();
+ (void) push_metadata(locked_registry, usess->consumer);
+ }
/*
* Clear the metadata channel.
* perform a rotation operation on it behind the scene.
*/
ret = consumer_clear_channel(socket,
- reg->registry->reg.ust->metadata_key);
+ reg->registry->reg.ust->_metadata_key);
if (ret < 0) {
goto error;
}
struct lttng_ht_iter chan_iter;
struct ust_app_channel *ua_chan;
struct ust_app_session *ua_sess;
- struct ust_registry_session *registry;
+ lsu::registry_session *registry;
ua_sess = lookup_session_by_app(usess, app);
if (!ua_sess) {
}
/* Get the right consumer socket for the application. */
- socket = consumer_find_socket_by_bitness(app->bits_per_long,
+ socket = consumer_find_socket_by_bitness(app->abi.bits_per_long,
usess->consumer);
if (!socket) {
cmd_ret = LTTNG_ERR_INVALID;
}
}
- (void) push_metadata(registry, usess->consumer);
+ {
+ auto locked_registry = registry->lock();
+ (void) push_metadata(locked_registry, usess->consumer);
+ }
/*
* Clear the metadata channel.
* Metadata channel is not cleared per se but we still need to
* perform rotation operation on it behind the scene.
*/
- ret = consumer_clear_channel(socket, registry->metadata_key);
+ ret = consumer_clear_channel(socket, registry->_metadata_key);
if (ret < 0) {
/* Per-PID buffer and application going away. */
if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
struct lttng_ht_iter chan_iter;
struct ust_app_channel *ua_chan;
struct ust_app_session *ua_sess;
- struct ust_registry_session *registry;
+ lsu::registry_session *registry;
ua_sess = lookup_session_by_app(usess, app);
if (!ua_sess) {
/* Get the right consumer socket for the application. */
socket = consumer_find_socket_by_bitness(
- app->bits_per_long, usess->consumer);
+ app->abi.bits_per_long, usess->consumer);
if (!socket) {
ret = LTTNG_ERR_FATAL;
goto error;