#include "session.hpp"
#include "ust-app.hpp"
#include "ust-consumer.hpp"
-#include "ust-field-convert.hpp"
+#include "ust-field-quirks.hpp"
#include "utils.hpp"
#include <common/bytecode/bytecode.hpp>
#include <common/format.hpp>
#include <common/hashtable/utils.hpp>
#include <common/make-unique.hpp>
+#include <common/pthread-lock.hpp>
#include <common/sessiond-comm/sessiond-comm.hpp>
#include <common/urcu.hpp>
struct lttng_ht *ust_app_ht_by_sock;
struct lttng_ht *ust_app_ht_by_notify_sock;
-static int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
+static int ust_app_flush_app_session(ust_app& app, ust_app_session& ua_sess);
/* Next available channel key. Access under next_channel_key_lock. */
static uint64_t _next_channel_key;
static uint64_t _next_session_id;
static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
-namespace {
-
/*
* Return the session registry according to the buffer type of the given
* session.
* A registry per UID object MUST exists before calling this function or else
* it LTTNG_ASSERT() if not found. RCU read side lock must be acquired.
*/
-static lsu::registry_session *get_session_registry(const struct ust_app_session *ua_sess)
+lsu::registry_session *ust_app_get_session_registry(const ust_app_session::identifier& ua_sess_id)
{
lsu::registry_session *registry = nullptr;
- LTTNG_ASSERT(ua_sess);
-
- switch (ua_sess->buffer_type) {
- case LTTNG_BUFFER_PER_PID:
+ switch (ua_sess_id.allocation_policy) {
+ case ust_app_session::identifier::buffer_allocation_policy::PER_PID:
{
- struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
+ struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess_id.id);
if (!reg_pid) {
goto error;
}
registry = reg_pid->registry->reg.ust;
break;
}
- case LTTNG_BUFFER_PER_UID:
+ case ust_app_session::identifier::buffer_allocation_policy::PER_UID:
{
- struct buffer_reg_uid *reg_uid =
- buffer_reg_uid_find(ua_sess->tracing_id,
- ua_sess->bits_per_long,
- lttng_credentials_get_uid(&ua_sess->real_credentials));
+ struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
+ ua_sess_id.session_id,
+ ua_sess_id.abi == ust_app_session::identifier::application_abi::ABI_32 ?
+ 32 :
+ 64,
+ lttng_credentials_get_uid(&ua_sess_id.app_credentials));
if (!reg_uid) {
goto error;
}
return registry;
}
-lsu::registry_session::locked_ptr get_locked_session_registry(const struct ust_app_session *ua_sess)
+namespace {
+lsu::registry_session::locked_ref
+get_locked_session_registry(const ust_app_session::identifier& identifier)
{
- auto session = get_session_registry(ua_sess);
+ auto session = ust_app_get_session_registry(identifier);
if (session) {
pthread_mutex_lock(&session->_lock);
}
- return lsu::registry_session::locked_ptr{ session };
+ return lsu::registry_session::locked_ref{ session };
}
} /* namespace */
*/
static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
{
- struct ust_app_event *event;
- const struct ust_app_ht_key *key;
- int ev_loglevel_value;
-
LTTNG_ASSERT(node);
LTTNG_ASSERT(_key);
- event = caa_container_of(node, struct ust_app_event, node.node);
- key = (ust_app_ht_key *) _key;
- ev_loglevel_value = event->attr.loglevel;
+ auto *event = lttng_ht_node_container_of(node, &ust_app_event::node);
+ const auto *key = (ust_app_ht_key *) _key;
/* Match the 4 elements of the key: name, filter, loglevel, exclusions */
}
/* Event loglevel. */
- if (ev_loglevel_value != key->loglevel_type) {
- if (event->attr.loglevel_type == LTTNG_UST_ABI_LOGLEVEL_ALL &&
- key->loglevel_type == 0 && ev_loglevel_value == -1) {
- /*
- * Match is accepted. This is because on event creation, the
- * loglevel is set to -1 if the event loglevel type is ALL so 0 and
- * -1 are accepted for this loglevel type since 0 is the one set by
- * the API when receiving an enable event.
- */
- } else {
- goto no_match;
- }
+ if (!loglevels_match(event->attr.loglevel_type,
+ event->attr.loglevel,
+ key->loglevel_type,
+ key->loglevel_value,
+ LTTNG_UST_ABI_LOGLEVEL_ALL)) {
+ goto no_match;
}
/* One of the filters is NULL, fail. */
ht = ua_chan->events;
key.name = event->attr.name;
key.filter = event->filter;
- key.loglevel_type = (lttng_ust_abi_loglevel_type) event->attr.loglevel;
+ key.loglevel_type = (lttng_ust_abi_loglevel_type) event->attr.loglevel_type;
+ key.loglevel_value = event->attr.loglevel;
key.exclusion = event->exclusion;
node_ptr = cds_lfht_add_unique(ht->ht,
}
free(ua_ctx->obj);
}
+
+ if (ua_ctx->ctx.ctx == LTTNG_UST_ABI_CONTEXT_APP_CONTEXT) {
+ free(ua_ctx->ctx.u.app_ctx.provider_name);
+ free(ua_ctx->ctx.u.app_ctx.ctx_name);
+ }
+
free(ua_ctx);
}
static void save_per_pid_lost_discarded_counters(struct ust_app_channel *ua_chan)
{
uint64_t discarded = 0, lost = 0;
- struct ltt_session *session;
struct ltt_ust_channel *uchan;
if (ua_chan->attr.type != LTTNG_UST_ABI_CHAN_PER_CPU) {
return;
}
- lttng::urcu::read_lock_guard read_lock;
- session = session_find_by_id(ua_chan->session->tracing_id);
- if (!session || !session->ust_session) {
- /*
- * Not finding the session is not an error because there are
- * multiple ways the channels can be torn down.
- *
- * 1) The session daemon can initiate the destruction of the
- * ust app session after receiving a destroy command or
- * during its shutdown/teardown.
- * 2) The application, since we are in per-pid tracing, is
- * unregistering and tearing down its ust app session.
- *
- * Both paths are protected by the session list lock which
- * ensures that the accounting of lost packets and discarded
- * events is done exactly once. The session is then unpublished
- * from the session list, resulting in this condition.
- */
- goto end;
- }
+ const lttng::urcu::read_lock_guard read_lock;
- if (ua_chan->attr.overwrite) {
- consumer_get_lost_packets(ua_chan->session->tracing_id,
- ua_chan->key,
- session->ust_session->consumer,
- &lost);
- } else {
- consumer_get_discarded_events(ua_chan->session->tracing_id,
- ua_chan->key,
- session->ust_session->consumer,
- &discarded);
- }
- uchan = trace_ust_find_channel_by_name(session->ust_session->domain_global.channels,
- ua_chan->name);
- if (!uchan) {
- ERR("Missing UST channel to store discarded counters");
- goto end;
+ try {
+ const auto session = ltt_session::find_session(ua_chan->session->tracing_id);
+
+ if (!session->ust_session) {
+ /*
+ * Not finding the session is not an error because there are
+ * multiple ways the channels can be torn down.
+ *
+ * 1) The session daemon can initiate the destruction of the
+ * ust app session after receiving a destroy command or
+ * during its shutdown/teardown.
+ * 2) The application, since we are in per-pid tracing, is
+ * unregistering and tearing down its ust app session.
+ *
+ * Both paths are protected by the session list lock which
+ * ensures that the accounting of lost packets and discarded
+ * events is done exactly once. The session is then unpublished
+ * from the session list, resulting in this condition.
+ */
+ return;
+ }
+
+ if (ua_chan->attr.overwrite) {
+ consumer_get_lost_packets(ua_chan->session->tracing_id,
+ ua_chan->key,
+ session->ust_session->consumer,
+ &lost);
+ } else {
+ consumer_get_discarded_events(ua_chan->session->tracing_id,
+ ua_chan->key,
+ session->ust_session->consumer,
+ &discarded);
+ }
+ uchan = trace_ust_find_channel_by_name(session->ust_session->domain_global.channels,
+ ua_chan->name);
+ if (!uchan) {
+ ERR("Missing UST channel to store discarded counters");
+ return;
+ }
+ } catch (const lttng::sessiond::exceptions::session_not_found_error& ex) {
+ DBG_FMT("Failed to save per-pid lost/discarded counters: {}, location='{}'",
+ ex.what(),
+ ex.source_location);
+ return;
}
uchan->per_pid_closed_app_discarded += discarded;
uchan->per_pid_closed_app_lost += lost;
-
-end:
- if (session) {
- session_put(session);
- }
}
/*
static void delete_ust_app_channel(int sock,
struct ust_app_channel *ua_chan,
struct ust_app *app,
- const lsu::registry_session::locked_ptr& locked_registry)
+ const lsu::registry_session::locked_ref& locked_registry)
{
int ret;
- struct lttng_ht_iter iter;
- struct ust_app_event *ua_event;
- struct ust_app_ctx *ua_ctx;
- struct ust_app_stream *stream, *stmp;
LTTNG_ASSERT(ua_chan);
ASSERT_RCU_READ_LOCKED();
DBG3("UST app deleting channel %s", ua_chan->name);
/* Wipe stream */
- cds_list_for_each_entry_safe (stream, stmp, &ua_chan->streams.head, list) {
+ for (auto *stream :
+ lttng::urcu::list_iteration_adapter<ust_app_stream, &ust_app_stream::list>(
+ ua_chan->streams.head)) {
cds_list_del(&stream->list);
delete_ust_app_stream(sock, stream, app);
}
/* Wipe context */
- cds_lfht_for_each_entry (ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
+ for (auto ua_ctx :
+ lttng::urcu::lfht_iteration_adapter<ust_app_ctx,
+ decltype(ust_app_ctx::node),
+ &ust_app_ctx::node>(*ua_chan->ctx->ht)) {
cds_list_del(&ua_ctx->list);
- ret = lttng_ht_del(ua_chan->ctx, &iter);
+ ret = cds_lfht_del(ua_chan->ctx->ht, &ua_ctx->node.node);
LTTNG_ASSERT(!ret);
delete_ust_app_ctx(sock, ua_ctx, app);
}
/* Wipe events */
- cds_lfht_for_each_entry (ua_chan->events->ht, &iter.iter, ua_event, node.node) {
- ret = lttng_ht_del(ua_chan->events, &iter);
+ for (auto ua_event :
+ lttng::urcu::lfht_iteration_adapter<ust_app_event,
+ decltype(ust_app_event::node),
+ &ust_app_event::node>(*ua_chan->events->ht)) {
+ ret = cds_lfht_del(ua_chan->events->ht, &ua_event->node.node);
LTTNG_ASSERT(!ret);
delete_ust_app_event(sock, ua_event, app);
}
}
if (ua_chan->obj != nullptr) {
+ lttng_ht_iter iter;
+
/* Remove channel from application UST object descriptor. */
iter.iter.node = &ua_chan->ust_objd_node.node;
ret = lttng_ht_del(app->ust_objd, &iter);
* but it can be caused by recoverable errors (e.g. the application has
* terminated concurrently).
*/
-ssize_t ust_app_push_metadata(const lsu::registry_session::locked_ptr& locked_registry,
+ssize_t ust_app_push_metadata(const lsu::registry_session::locked_ref& locked_registry,
struct consumer_socket *socket,
int send_zero_data)
{
* but it can be caused by recoverable errors (e.g. the application has
* terminated concurrently).
*/
-static int push_metadata(const lsu::registry_session::locked_ptr& locked_registry,
+static int push_metadata(const lsu::registry_session::locked_ref& locked_registry,
struct consumer_output *consumer)
{
int ret_val;
{
int ret;
struct consumer_socket *socket;
- lttng::urcu::read_lock_guard read_lock_guard;
+ const lttng::urcu::read_lock_guard read_lock_guard;
LTTNG_ASSERT(consumer);
lttng::utils::container_of(head, &ust_app_session::rcu_head);
lttng_ht_destroy(ua_sess->channels);
- free(ua_sess);
+ delete ua_sess;
}
/*
*/
static void delete_ust_app_session(int sock, struct ust_app_session *ua_sess, struct ust_app *app)
{
- int ret;
- struct lttng_ht_iter iter;
- struct ust_app_channel *ua_chan;
-
LTTNG_ASSERT(ua_sess);
ASSERT_RCU_READ_LOCKED();
- pthread_mutex_lock(&ua_sess->lock);
+ /* Locked for the duration of the function. */
+ auto locked_ua_sess = ua_sess->lock();
LTTNG_ASSERT(!ua_sess->deleted);
ua_sess->deleted = true;
- auto locked_registry = get_locked_session_registry(ua_sess);
+ auto locked_registry = get_locked_session_registry(locked_ua_sess->get_identifier());
/* Registry can be null on error path during initialization. */
if (locked_registry) {
/* Push metadata for application before freeing the application. */
(void) push_metadata(locked_registry, ua_sess->consumer);
}
- cds_lfht_for_each_entry (ua_sess->channels->ht, &iter.iter, ua_chan, node.node) {
- ret = lttng_ht_del(ua_sess->channels, &iter);
- LTTNG_ASSERT(!ret);
+ for (auto *ua_chan :
+ lttng::urcu::lfht_iteration_adapter<ust_app_channel,
+ decltype(ust_app_channel::node),
+ &ust_app_channel::node>(*ua_sess->channels->ht)) {
+ const auto ret = cds_lfht_del(ua_sess->channels->ht, &ua_chan->node.node);
+ LTTNG_ASSERT(ret == 0);
delete_ust_app_channel(sock, ua_chan, app, locked_registry);
}
if (ua_sess->handle != -1) {
pthread_mutex_lock(&app->sock_lock);
- ret = lttng_ust_ctl_release_handle(sock, ua_sess->handle);
+ auto ret = lttng_ust_ctl_release_handle(sock, ua_sess->handle);
pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
}
/* Remove session from application UST object descriptor. */
- iter.iter.node = &ua_sess->ust_objd_node.node;
- ret = lttng_ht_del(app->ust_sessions_objd, &iter);
+ ret = cds_lfht_del(app->ust_sessions_objd->ht, &ua_sess->ust_objd_node.node);
LTTNG_ASSERT(!ret);
}
- pthread_mutex_unlock(&ua_sess->lock);
-
consumer_output_put(ua_sess->consumer);
-
call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
}
static void delete_ust_app(struct ust_app *app)
{
int ret, sock;
- struct ust_app_session *ua_sess, *tmp_ua_sess;
- struct lttng_ht_iter iter;
- struct ust_app_event_notifier_rule *event_notifier_rule;
bool event_notifier_write_fd_is_open;
/*
* The session list lock must be held during this function to guarantee
* the existence of ua_sess.
*/
- session_lock_list();
+ const auto list_lock = lttng::sessiond::lock_session_list();
/* Delete ust app sessions info */
sock = app->sock;
app->sock = -1;
/* Wipe sessions */
- cds_list_for_each_entry_safe (ua_sess, tmp_ua_sess, &app->teardown_head, teardown_node) {
- /* Free every object in the session and the session. */
- lttng::urcu::read_lock_guard read_lock;
- delete_ust_app_session(sock, ua_sess, app);
+ {
+ const lttng::urcu::read_lock_guard read_lock;
+
+ for (const auto ua_sess : app->sessions_to_teardown) {
+ /* Free every object in the session and the session. */
+ delete_ust_app_session(sock, ua_sess, app);
+ }
}
/* Remove the event notifier rules associated with this app. */
{
- lttng::urcu::read_lock_guard read_lock;
-
- cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
- &iter.iter,
- event_notifier_rule,
- node.node) {
- ret = lttng_ht_del(app->token_to_event_notifier_rule_ht, &iter);
+ const lttng::urcu::read_lock_guard read_lock;
+
+ for (auto *event_notifier_rule :
+ lttng::urcu::lfht_iteration_adapter<ust_app_event_notifier_rule,
+ decltype(ust_app_event_notifier_rule::node),
+ &ust_app_event_notifier_rule::node>(
+ *app->token_to_event_notifier_rule_ht->ht)) {
+ ret = cds_lfht_del(app->token_to_event_notifier_rule_ht->ht,
+ &event_notifier_rule->node.node);
LTTNG_ASSERT(!ret);
delete_ust_app_event_notifier_rule(app->sock, event_notifier_rule, app);
lttng_fd_put(LTTNG_FD_APPS, 1);
DBG2("UST app pid %d deleted", app->pid);
- free(app);
- session_unlock_list();
+ delete app;
}
/*
struct ust_app_session *ua_sess;
/* Init most of the default value by allocating and zeroing */
- ua_sess = zmalloc<ust_app_session>();
+ ua_sess = new ust_app_session;
if (ua_sess == nullptr) {
PERROR("malloc");
goto error_free;
ua_sess->handle = -1;
ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
ua_sess->metadata_attr.type = LTTNG_UST_ABI_CHAN_METADATA;
- pthread_mutex_init(&ua_sess->lock, nullptr);
return ua_sess;
/*
* Alloc new UST app channel.
*/
-static struct ust_app_channel *alloc_ust_app_channel(const char *name,
- struct ust_app_session *ua_sess,
- struct lttng_ust_abi_channel_attr *attr)
+static struct ust_app_channel *
+alloc_ust_app_channel(const char *name,
+ const ust_app_session::locked_weak_ref& ua_sess,
+ struct lttng_ust_abi_channel_attr *attr)
{
struct ust_app_channel *ua_chan;
ua_chan->enabled = true;
ua_chan->handle = -1;
- ua_chan->session = ua_sess;
+ ua_chan->session = &ua_sess.get();
ua_chan->key = get_next_channel_key();
ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
ASSERT_RCU_READ_LOCKED();
lttng_ht_lookup(ust_app_ht_by_sock, (void *) ((unsigned long) sock), &iter);
- node = lttng_ht_iter_get_node_ulong(&iter);
+ node = lttng_ht_iter_get_node<lttng_ht_node_ulong>(&iter);
if (node == nullptr) {
DBG2("UST app find by sock %d not found", sock);
goto error;
ASSERT_RCU_READ_LOCKED();
lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *) ((unsigned long) sock), &iter);
- node = lttng_ht_iter_get_node_ulong(&iter);
+ node = lttng_ht_iter_get_node<lttng_ht_node_ulong>(&iter);
if (node == nullptr) {
DBG2("UST app find by notify sock %d not found", sock);
goto error;
static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
const char *name,
const struct lttng_bytecode *filter,
+ lttng_ust_abi_loglevel_type loglevel_type,
int loglevel_value,
const struct lttng_event_exclusion *exclusion)
{
/* Setup key for event lookup. */
key.name = name;
key.filter = filter;
- key.loglevel_type = (lttng_ust_abi_loglevel_type) loglevel_value;
+ key.loglevel_type = loglevel_type;
+ key.loglevel_value = loglevel_value;
/* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
key.exclusion = exclusion;
ht_match_ust_app_event,
&key,
&iter.iter);
- node = lttng_ht_iter_get_node_str(&iter);
+ node = lttng_ht_iter_get_node<lttng_ht_node_str>(&iter);
if (node == nullptr) {
goto end;
}
ASSERT_RCU_READ_LOCKED();
lttng_ht_lookup(ht, &token, &iter);
- node = lttng_ht_iter_get_node_u64(&iter);
+ node = lttng_ht_iter_get_node<lttng_ht_node_u64>(&iter);
if (node == nullptr) {
DBG2("UST app event notifier rule token not found: token = %" PRIu64, token);
goto end;
create_ust_exclusion_from_exclusion(const struct lttng_event_exclusion *exclusion)
{
struct lttng_ust_abi_event_exclusion *ust_exclusion = nullptr;
- size_t exclusion_alloc_size = sizeof(struct lttng_ust_abi_event_exclusion) +
+ const size_t exclusion_alloc_size = sizeof(struct lttng_ust_abi_event_exclusion) +
LTTNG_UST_ABI_SYM_NAME_LEN * exclusion->count;
ust_exclusion = zmalloc<lttng_ust_abi_event_exclusion>(exclusion_alloc_size);
* Disable the specified channel on to UST tracer for the UST session.
*/
static int disable_ust_channel(struct ust_app *app,
- struct ust_app_session *ua_sess,
+ const ust_app_session::locked_weak_ref& ua_sess,
struct ust_app_channel *ua_chan)
{
int ret;
* Enable the specified channel on to UST tracer for the UST session.
*/
static int enable_ust_channel(struct ust_app *app,
- struct ust_app_session *ua_sess,
+ const ust_app_session::locked_weak_ref& ua_sess,
struct ust_app_channel *ua_chan)
{
int ret;
struct ust_app_channel *ua_chan)
{
int ret;
- struct ust_app_stream *stream, *stmp;
LTTNG_ASSERT(app);
LTTNG_ASSERT(ua_sess);
health_code_update();
/* Send all streams to application. */
- cds_list_for_each_entry_safe (stream, stmp, &ua_chan->streams.head, list) {
+ for (auto *stream :
+ lttng::urcu::list_iteration_adapter<ust_app_stream, &ust_app_stream::list>(
+ ua_chan->streams.head)) {
ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
ret = -ENOTCONN; /* Caused by app exiting. */
LTTNG_ASSERT(event_rule_type == LTTNG_EVENT_RULE_TYPE_USER_TRACEPOINT ||
event_rule_type == LTTNG_EVENT_RULE_TYPE_JUL_LOGGING ||
event_rule_type == LTTNG_EVENT_RULE_TYPE_LOG4J_LOGGING ||
+ event_rule_type == LTTNG_EVENT_RULE_TYPE_LOG4J2_LOGGING ||
event_rule_type == LTTNG_EVENT_RULE_TYPE_PYTHON_LOGGING);
init_ust_event_notifier_from_event_rule(event_rule, &event_notifier);
/*
* Lookup sesison wrapper.
*/
-static void __lookup_session_by_app(const struct ltt_ust_session *usess,
- struct ust_app *app,
- struct lttng_ht_iter *iter)
+static void
+__lookup_session_by_app(const ltt_ust_session *usess, const ust_app *app, lttng_ht_iter *iter)
{
/* Get right UST app session from app */
lttng_ht_lookup(app->sessions, &usess->id, iter);
* Return ust app session from the app session hashtable using the UST session
* id.
*/
-static struct ust_app_session *lookup_session_by_app(const struct ltt_ust_session *usess,
- struct ust_app *app)
+ust_app_session *ust_app_lookup_app_session(const struct ltt_ust_session *usess,
+ const struct ust_app *app)
{
struct lttng_ht_iter iter;
struct lttng_ht_node_u64 *node;
__lookup_session_by_app(usess, app, &iter);
- node = lttng_ht_iter_get_node_u64(&iter);
+ node = lttng_ht_iter_get_node<lttng_ht_node_u64>(&iter);
if (node == nullptr) {
goto error;
}
LTTNG_ASSERT(ua_sess);
LTTNG_ASSERT(app);
- lttng::urcu::read_lock_guard read_lock;
+ const lttng::urcu::read_lock_guard read_lock;
reg_pid = buffer_reg_pid_find(ua_sess->id);
if (!reg_pid) {
LTTNG_ASSERT(usess);
LTTNG_ASSERT(app);
- lttng::urcu::read_lock_guard read_lock;
+ const lttng::urcu::read_lock_guard read_lock;
reg_uid = buffer_reg_uid_find(usess->id, app->abi.bits_per_long, app->uid);
if (!reg_uid) {
health_code_update();
- ua_sess = lookup_session_by_app(usess, app);
+ ua_sess = ust_app_lookup_app_session(usess, app);
if (ua_sess == nullptr) {
DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
app->pid,
*/
static int ht_match_ust_app_ctx(struct cds_lfht_node *node, const void *_key)
{
- struct ust_app_ctx *ctx;
- const struct lttng_ust_context_attr *key;
-
LTTNG_ASSERT(node);
LTTNG_ASSERT(_key);
- ctx = caa_container_of(node, struct ust_app_ctx, node.node);
- key = (lttng_ust_context_attr *) _key;
+ auto *ctx = lttng_ht_node_container_of(node, &ust_app_ctx::node);
+ const auto *key = (lttng_ust_context_attr *) _key;
/* Context type */
if (ctx->ctx.ctx != key->ctx) {
ht_match_ust_app_ctx,
uctx,
&iter.iter);
- node = lttng_ht_iter_get_node_ulong(&iter);
+ node = lttng_ht_iter_get_node<lttng_ht_node_ulong>(&iter);
if (!node) {
goto end;
}
/*
* Lookup ust app channel for session and disable it on the tracer side.
*/
-static int disable_ust_app_channel(struct ust_app_session *ua_sess,
+static int disable_ust_app_channel(const ust_app_session::locked_weak_ref& ua_sess,
struct ust_app_channel *ua_chan,
struct ust_app *app)
{
* Lookup ust app channel for session and enable it on the tracer side. This
* MUST be called with a RCU read side lock acquired.
*/
-static int enable_ust_app_channel(struct ust_app_session *ua_sess,
+static int enable_ust_app_channel(const ust_app_session::locked_weak_ref& ua_sess,
struct ltt_ust_channel *uchan,
struct ust_app *app)
{
ASSERT_RCU_READ_LOCKED();
lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &iter);
- ua_chan_node = lttng_ht_iter_get_node_str(&iter);
+ ua_chan_node = lttng_ht_iter_get_node<lttng_ht_node_str>(&iter);
if (ua_chan_node == nullptr) {
DBG2("Unable to find channel %s in ust session id %" PRIu64,
uchan->name,
LTTNG_ASSERT(ua_chan);
LTTNG_ASSERT(registry);
- lttng::urcu::read_lock_guard read_lock;
+ const lttng::urcu::read_lock_guard read_lock;
health_code_update();
/* Get the right consumer socket for the application. */
struct ust_app *app)
{
int ret = 0;
- struct ust_app_stream *stream, *stmp;
LTTNG_ASSERT(buf_reg_chan);
LTTNG_ASSERT(ua_chan);
DBG2("UST app setup buffer registry stream");
/* Send all streams to application. */
- cds_list_for_each_entry_safe (stream, stmp, &ua_chan->streams.head, list) {
+ for (auto *stream :
+ lttng::urcu::list_iteration_adapter<ust_app_stream, &ust_app_stream::list>(
+ ua_chan->streams.head)) {
struct buffer_reg_stream *reg_stream;
ret = buffer_reg_stream_create(®_stream);
struct ust_app_channel *ua_chan)
{
int ret;
- struct buffer_reg_stream *reg_stream;
LTTNG_ASSERT(buf_reg_chan);
LTTNG_ASSERT(app);
/* Send all streams to application. */
pthread_mutex_lock(&buf_reg_chan->stream_list_lock);
- cds_list_for_each_entry (reg_stream, &buf_reg_chan->streams, lnode) {
+ for (auto *reg_stream :
+ lttng::urcu::list_iteration_adapter<buffer_reg_stream, &buffer_reg_stream::lnode>(
+ buf_reg_chan->streams)) {
struct ust_app_stream stream = {};
ret = duplicate_stream_object(reg_stream, &stream);
int ret;
struct buffer_reg_uid *reg_uid;
struct buffer_reg_channel *buf_reg_chan;
- struct ltt_session *session = nullptr;
enum lttng_error_code notification_ret;
LTTNG_ASSERT(app);
DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
+ /* Guaranteed to exist; will not throw. */
+ const auto session = ltt_session::find_session(ua_sess->tracing_id);
+ ASSERT_SESSION_LIST_LOCKED();
+
reg_uid = buffer_reg_uid_find(usess->id, app->abi.bits_per_long, app->uid);
/*
* The session creation handles the creation of this global registry
goto error;
}
- session = session_find_by_id(ua_sess->tracing_id);
- LTTNG_ASSERT(session);
- ASSERT_LOCKED(session->lock);
- ASSERT_SESSION_LIST_LOCKED();
-
/*
* Create the buffers on the consumer side. This call populates the
* ust app channel object with all streams and data object.
}
error:
- if (session) {
- session_put(session);
- }
return ret;
}
*/
static int create_channel_per_pid(struct ust_app *app,
struct ltt_ust_session *usess,
- struct ust_app_session *ua_sess,
+ const ust_app_session::locked_weak_ref& ua_sess,
struct ust_app_channel *ua_chan)
{
int ret;
lsu::registry_session *registry;
enum lttng_error_code cmd_ret;
- struct ltt_session *session = nullptr;
uint64_t chan_reg_key;
LTTNG_ASSERT(app);
LTTNG_ASSERT(usess);
- LTTNG_ASSERT(ua_sess);
LTTNG_ASSERT(ua_chan);
DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
- lttng::urcu::read_lock_guard read_lock;
+ const lttng::urcu::read_lock_guard read_lock;
- registry = get_session_registry(ua_sess);
+ registry = ust_app_get_session_registry(ua_sess->get_identifier());
/* The UST app session lock is held, registry shall not be null. */
LTTNG_ASSERT(registry);
+ /* Guaranteed to exist; will not throw. */
+ const auto session = ltt_session::find_session(ua_sess->tracing_id);
+ ASSERT_LOCKED(session->_lock);
+ ASSERT_SESSION_LIST_LOCKED();
+
/* Create and add a new channel registry to session. */
try {
registry->add_channel(ua_chan->key);
goto error;
}
- session = session_find_by_id(ua_sess->tracing_id);
- LTTNG_ASSERT(session);
- ASSERT_LOCKED(session->lock);
- ASSERT_SESSION_LIST_LOCKED();
-
/* Create and get channel on the consumer side. */
- ret = do_consumer_create_channel(usess, ua_sess, ua_chan, app->abi.bits_per_long, registry);
+ ret = do_consumer_create_channel(
+ usess, &ua_sess.get(), ua_chan, app->abi.bits_per_long, registry);
if (ret < 0) {
ERR("Error creating UST channel \"%s\" on the consumer daemon", ua_chan->name);
goto error_remove_from_registry;
}
- ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
+ ret = send_channel_pid_to_ust(app, &ua_sess.get(), ua_chan);
if (ret < 0) {
if (ret != -ENOTCONN) {
ERR("Error sending channel to application");
}
}
error:
- if (session) {
- session_put(session);
- }
return ret;
}
*/
static int ust_app_channel_send(struct ust_app *app,
struct ltt_ust_session *usess,
- struct ust_app_session *ua_sess,
+ const ust_app_session::locked_weak_ref& ua_sess,
struct ust_app_channel *ua_chan)
{
int ret;
LTTNG_ASSERT(app);
LTTNG_ASSERT(usess);
LTTNG_ASSERT(usess->active);
- LTTNG_ASSERT(ua_sess);
LTTNG_ASSERT(ua_chan);
ASSERT_RCU_READ_LOCKED();
switch (usess->buffer_type) {
case LTTNG_BUFFER_PER_UID:
{
- ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
+ ret = create_channel_per_uid(app, usess, &ua_sess.get(), ua_chan);
if (ret < 0) {
goto error;
}
*
* Return 0 on success or else a negative value.
*/
-static int ust_app_channel_allocate(struct ust_app_session *ua_sess,
+static int ust_app_channel_allocate(const ust_app_session::locked_weak_ref& ua_sess,
struct ltt_ust_channel *uchan,
enum lttng_ust_abi_chan_type type,
struct ltt_ust_session *usess __attribute__((unused)),
/* Lookup channel in the ust app session */
lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &iter);
- ua_chan_node = lttng_ht_iter_get_node_str(&iter);
+ ua_chan_node = lttng_ht_iter_get_node<lttng_ht_node_str>(&iter);
if (ua_chan_node != nullptr) {
ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
goto end;
*
* Called with UST app session lock held and RCU read side lock.
*/
-static int create_ust_app_metadata(struct ust_app_session *ua_sess,
+static int create_ust_app_metadata(const ust_app_session::locked_weak_ref& ua_sess,
struct ust_app *app,
struct consumer_output *consumer)
{
int ret = 0;
struct ust_app_channel *metadata;
struct consumer_socket *socket;
- struct ltt_session *session = nullptr;
- LTTNG_ASSERT(ua_sess);
LTTNG_ASSERT(app);
LTTNG_ASSERT(consumer);
ASSERT_RCU_READ_LOCKED();
- auto locked_registry = get_locked_session_registry(ua_sess);
+ auto locked_registry = get_locked_session_registry(ua_sess->get_identifier());
/* The UST app session is held registry shall not be null. */
LTTNG_ASSERT(locked_registry);
+ /* Guaranteed to exist; will not throw. */
+ const auto session = ltt_session::find_session(ua_sess->tracing_id);
+ ASSERT_LOCKED(session->_lock);
+ ASSERT_SESSION_LIST_LOCKED();
+
/* Metadata already exists for this registry or it was closed previously */
if (locked_registry->_metadata_key || locked_registry->_metadata_closed) {
ret = 0;
*/
locked_registry->_metadata_key = metadata->key;
- session = session_find_by_id(ua_sess->tracing_id);
- LTTNG_ASSERT(session);
- ASSERT_LOCKED(session->lock);
- ASSERT_SESSION_LIST_LOCKED();
-
/*
* Ask the metadata channel creation to the consumer. The metadata object
* will be created by the consumer and kept their. However, the stream is
* never added or monitored until we do a first push metadata to the
* consumer.
*/
- ret = ust_consumer_ask_channel(ua_sess,
+ ret = ust_consumer_ask_channel(&ua_sess.get(),
metadata,
consumer,
socket,
lttng_fd_put(LTTNG_FD_APPS, 1);
delete_ust_app_channel(-1, metadata, app, locked_registry);
error:
- if (session) {
- session_put(session);
- }
return ret;
}
struct lttng_ht_iter iter;
lttng_ht_lookup(ust_app_ht, (void *) ((unsigned long) pid), &iter);
- node = lttng_ht_iter_get_node_ulong(&iter);
+ node = lttng_ht_iter_get_node<lttng_ht_node_ulong>(&iter);
if (node == nullptr) {
DBG2("UST app no found with pid %d", pid);
goto error;
goto error;
}
- lta = zmalloc<ust_app>();
- if (lta == nullptr) {
- PERROR("malloc");
+ try {
+ lta = new ust_app;
+ } catch (const std::bad_alloc&) {
+ ERR_FMT("Failed to allocate ust application instance: name=`{}`, pid={}, uid={}",
+ msg->name,
+ msg->pid,
+ msg->uid);
goto error_free_pipe;
}
+ urcu_ref_init(<a->ref);
+
lta->event_notifier_group.event_pipe = event_notifier_event_source_pipe;
lta->ppid = msg->ppid;
pthread_mutex_init(<a->sock_lock, nullptr);
lttng_ht_node_init_ulong(<a->sock_n, (unsigned long) lta->sock);
- CDS_INIT_LIST_HEAD(<a->teardown_head);
return lta;
error_free_pipe:
app->registration_time = time(nullptr);
- lttng::urcu::read_lock_guard read_lock;
+ const lttng::urcu::read_lock_guard read_lock;
/*
* On a re-registration, we want to kick out the previous registration of
return ret;
}
-/*
- * Unregister app by removing it from the global traceable app list and freeing
- * the data struct.
- *
- * The socket is already closed at this point so no close to sock.
- */
-void ust_app_unregister(int sock)
+static void ust_app_unregister(ust_app& app)
{
- struct ust_app *lta;
- struct lttng_ht_node_ulong *node;
- struct lttng_ht_iter ust_app_sock_iter;
- struct lttng_ht_iter iter;
- struct ust_app_session *ua_sess;
- int ret;
-
- lttng::urcu::read_lock_guard read_lock;
-
- /* Get the node reference for a call_rcu */
- lttng_ht_lookup(ust_app_ht_by_sock, (void *) ((unsigned long) sock), &ust_app_sock_iter);
- node = lttng_ht_iter_get_node_ulong(&ust_app_sock_iter);
- LTTNG_ASSERT(node);
-
- lta = lttng::utils::container_of(node, &ust_app::sock_n);
- DBG("PID %d unregistering with sock %d", lta->pid, sock);
+ const lttng::urcu::read_lock_guard read_lock;
/*
* For per-PID buffers, perform "push metadata" and flush all
* ensuring proper behavior of data_pending check.
* Remove sessions so they are not visible during deletion.
*/
- cds_lfht_for_each_entry (lta->sessions->ht, &iter.iter, ua_sess, node.node) {
- ret = lttng_ht_del(lta->sessions, &iter);
- if (ret) {
+ for (auto *ua_sess :
+ lttng::urcu::lfht_iteration_adapter<ust_app_session,
+ decltype(ust_app_session::node),
+ &ust_app_session::node>(*app.sessions->ht)) {
+ const auto del_ret = cds_lfht_del(app.sessions->ht, &ua_sess->node.node);
+ if (del_ret) {
/* The session was already removed so scheduled for teardown. */
continue;
}
if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
- (void) ust_app_flush_app_session(lta, ua_sess);
+ (void) ust_app_flush_app_session(app, *ua_sess);
}
/*
* Add session to list for teardown. This is safe since at this point we
* are the only one using this list.
*/
- pthread_mutex_lock(&ua_sess->lock);
+ auto locked_ua_sess = ua_sess->lock();
if (ua_sess->deleted) {
- pthread_mutex_unlock(&ua_sess->lock);
continue;
}
* The close metadata below nullifies the metadata pointer in the
* session so the delete session will NOT push/close a second time.
*/
- auto locked_registry = get_locked_session_registry(ua_sess);
+ auto locked_registry =
+ get_locked_session_registry(locked_ua_sess->get_identifier());
if (locked_registry) {
/* Push metadata for application before freeing the application. */
(void) push_metadata(locked_registry, ua_sess->consumer);
locked_registry.reset();
}
}
- cds_list_add(&ua_sess->teardown_node, <a->teardown_head);
- pthread_mutex_unlock(&ua_sess->lock);
+ app.sessions_to_teardown.emplace_back(ua_sess);
}
- /* Remove application from PID hash table */
- ret = lttng_ht_del(ust_app_ht_by_sock, &ust_app_sock_iter);
- LTTNG_ASSERT(!ret);
-
/*
* Remove application from notify hash table. The thread handling the
* notify socket could have deleted the node so ignore on error because
* either way it's valid. The close of that socket is handled by the
* apps_notify_thread.
*/
- iter.iter.node = <a->notify_sock_n.node;
- (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
+ (void) cds_lfht_del(ust_app_ht_by_notify_sock->ht, &app.notify_sock_n.node);
/*
* Ignore return value since the node might have been removed before by an
* add replace during app registration because the PID can be reassigned by
* the OS.
*/
- iter.iter.node = <a->pid_n.node;
- ret = lttng_ht_del(ust_app_ht, &iter);
- if (ret) {
- DBG3("Unregister app by PID %d failed. This can happen on pid reuse", lta->pid);
+ if (cds_lfht_del(ust_app_ht->ht, &app.pid_n.node)) {
+ DBG3("Unregister app by PID %d failed. This can happen on pid reuse", app.pid);
}
+}
+
+/*
+ * Unregister app by removing it from the global traceable app list and freeing
+ * the data struct.
+ *
+ * The socket is already closed at this point, so there is no need to close it.
+ */
+void ust_app_unregister_by_socket(int sock_fd)
+{
+ struct lttng_ht_node_ulong *node;
+ struct lttng_ht_iter ust_app_sock_iter;
+ int ret;
- /* Free memory */
- call_rcu(<a->pid_n.head, delete_ust_app_rcu);
+ const lttng::urcu::read_lock_guard read_lock;
- return;
+ /* Get the node reference for a call_rcu */
+ lttng_ht_lookup(ust_app_ht_by_sock, (void *) ((unsigned long) sock_fd), &ust_app_sock_iter);
+ node = lttng_ht_iter_get_node<lttng_ht_node_ulong>(&ust_app_sock_iter);
+ assert(node);
+
+ auto *app = lttng::utils::container_of(node, &ust_app::sock_n);
+
+ DBG_FMT("Application unregistering after socket activity: app={}, socket_fd={}",
+ *app,
+ sock_fd);
+
+ /* Remove application from socket hash table */
+ ret = lttng_ht_del(ust_app_ht_by_sock, &ust_app_sock_iter);
+ assert(!ret);
+
+ /*
+ * The socket is closed: release its reference to the application
+ * to trigger its eventual teardown.
+ */
+ ust_app_put(app);
}
/*
{
int ret, handle;
size_t nbmem, count = 0;
- struct lttng_ht_iter iter;
- struct ust_app *app;
struct lttng_event *tmp_event;
nbmem = UST_APP_EVENT_LIST_SIZE;
goto error;
}
- {
- lttng::urcu::read_lock_guard read_lock;
+ /* Iterate on all apps. */
+ for (auto *app :
+ lttng::urcu::lfht_iteration_adapter<ust_app, decltype(ust_app::pid_n), &ust_app::pid_n>(
+ *ust_app_ht->ht)) {
+ struct lttng_ust_abi_tracepoint_iter uiter;
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- struct lttng_ust_abi_tracepoint_iter uiter;
+ health_code_update();
- health_code_update();
+ if (!app->compatible) {
+ /*
+ * TODO: In time, we should notice the caller of this error by
+ * telling him that this is a version error.
+ */
+ continue;
+ }
- if (!app->compatible) {
- /*
- * TODO: In time, we should notice the caller of this error by
- * telling him that this is a version error.
- */
- continue;
+ pthread_mutex_lock(&app->sock_lock);
+ handle = lttng_ust_ctl_tracepoint_list(app->sock);
+ if (handle < 0) {
+ if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
+ ERR("UST app list events getting handle failed for app pid %d",
+ app->pid);
}
+ pthread_mutex_unlock(&app->sock_lock);
+ continue;
+ }
+
+ while ((ret = lttng_ust_ctl_tracepoint_list_get(app->sock, handle, &uiter)) !=
+ -LTTNG_UST_ERR_NOENT) {
+ /* Handle ustctl error. */
+ if (ret < 0) {
+ int release_ret;
+
+ if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
+ ERR("UST app tp list get failed for app %d with ret %d",
+ app->sock,
+ ret);
+ } else {
+ DBG3("UST app tp list get failed. Application is dead");
+ break;
+ }
- pthread_mutex_lock(&app->sock_lock);
- handle = lttng_ust_ctl_tracepoint_list(app->sock);
- if (handle < 0) {
- if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
- ERR("UST app list events getting handle failed for app pid %d",
- app->pid);
+ free(tmp_event);
+ release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
+ if (release_ret < 0 && release_ret != -LTTNG_UST_ERR_EXITING &&
+ release_ret != -EPIPE) {
+ ERR("Error releasing app handle for app %d with ret %d",
+ app->sock,
+ release_ret);
}
+
pthread_mutex_unlock(&app->sock_lock);
- continue;
+ goto rcu_error;
}
- while ((ret = lttng_ust_ctl_tracepoint_list_get(
- app->sock, handle, &uiter)) != -LTTNG_UST_ERR_NOENT) {
- /* Handle ustctl error. */
- if (ret < 0) {
+ health_code_update();
+ if (count >= nbmem) {
+ /* In case the realloc fails, we free the memory */
+ struct lttng_event *new_tmp_event;
+ size_t new_nbmem;
+
+ new_nbmem = nbmem << 1;
+ DBG2("Reallocating event list from %zu to %zu entries",
+ nbmem,
+ new_nbmem);
+ new_tmp_event = (lttng_event *) realloc(
+ tmp_event, new_nbmem * sizeof(struct lttng_event));
+ if (new_tmp_event == nullptr) {
int release_ret;
- if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
- ERR("UST app tp list get failed for app %d with ret %d",
- app->sock,
- ret);
- } else {
- DBG3("UST app tp list get failed. Application is dead");
- break;
- }
-
+ PERROR("realloc ust app events");
free(tmp_event);
+ ret = -ENOMEM;
release_ret =
lttng_ust_ctl_release_handle(app->sock, handle);
if (release_ret < 0 &&
pthread_mutex_unlock(&app->sock_lock);
goto rcu_error;
}
-
- health_code_update();
- if (count >= nbmem) {
- /* In case the realloc fails, we free the memory */
- struct lttng_event *new_tmp_event;
- size_t new_nbmem;
-
- new_nbmem = nbmem << 1;
- DBG2("Reallocating event list from %zu to %zu entries",
- nbmem,
- new_nbmem);
- new_tmp_event = (lttng_event *) realloc(
- tmp_event, new_nbmem * sizeof(struct lttng_event));
- if (new_tmp_event == nullptr) {
- int release_ret;
-
- PERROR("realloc ust app events");
- free(tmp_event);
- ret = -ENOMEM;
- release_ret = lttng_ust_ctl_release_handle(
- app->sock, handle);
- if (release_ret < 0 &&
- release_ret != -LTTNG_UST_ERR_EXITING &&
- release_ret != -EPIPE) {
- ERR("Error releasing app handle for app %d with ret %d",
- app->sock,
- release_ret);
- }
-
- pthread_mutex_unlock(&app->sock_lock);
- goto rcu_error;
- }
- /* Zero the new memory */
- memset(new_tmp_event + nbmem,
- 0,
- (new_nbmem - nbmem) * sizeof(struct lttng_event));
- nbmem = new_nbmem;
- tmp_event = new_tmp_event;
- }
-
- memcpy(tmp_event[count].name,
- uiter.name,
- LTTNG_UST_ABI_SYM_NAME_LEN);
- tmp_event[count].loglevel = uiter.loglevel;
- tmp_event[count].type =
- (enum lttng_event_type) LTTNG_UST_ABI_TRACEPOINT;
- tmp_event[count].pid = app->pid;
- tmp_event[count].enabled = -1;
- count++;
+ /* Zero the new memory */
+ memset(new_tmp_event + nbmem,
+ 0,
+ (new_nbmem - nbmem) * sizeof(struct lttng_event));
+ nbmem = new_nbmem;
+ tmp_event = new_tmp_event;
}
- ret = lttng_ust_ctl_release_handle(app->sock, handle);
- pthread_mutex_unlock(&app->sock_lock);
- if (ret < 0) {
- if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
- DBG3("Error releasing app handle. Application died: pid = %d, sock = %d",
- app->pid,
- app->sock);
- } else if (ret == -EAGAIN) {
- WARN("Error releasing app handle. Communication time out: pid = %d, sock = %d",
- app->pid,
- app->sock);
- } else {
- ERR("Error releasing app handle with ret %d: pid = %d, sock = %d",
- ret,
- app->pid,
- app->sock);
- }
+ memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_ABI_SYM_NAME_LEN);
+ tmp_event[count].loglevel = uiter.loglevel;
+ tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_ABI_TRACEPOINT;
+ tmp_event[count].pid = app->pid;
+ tmp_event[count].enabled = -1;
+ count++;
+ }
+
+ ret = lttng_ust_ctl_release_handle(app->sock, handle);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (ret < 0) {
+ if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+ DBG3("Error releasing app handle. Application died: pid = %d, sock = %d",
+ app->pid,
+ app->sock);
+ } else if (ret == -EAGAIN) {
+ WARN("Error releasing app handle. Communication time out: pid = %d, sock = %d",
+ app->pid,
+ app->sock);
+ } else {
+ ERR("Error releasing app handle with ret %d: pid = %d, sock = %d",
+ ret,
+ app->pid,
+ app->sock);
}
}
}
{
int ret, handle;
size_t nbmem, count = 0;
- struct lttng_ht_iter iter;
- struct ust_app *app;
struct lttng_event_field *tmp_event;
nbmem = UST_APP_EVENT_LIST_SIZE;
goto error;
}
- {
- lttng::urcu::read_lock_guard read_lock;
+ /* Iterate on all apps. */
+ for (auto *app :
+ lttng::urcu::lfht_iteration_adapter<ust_app, decltype(ust_app::pid_n), &ust_app::pid_n>(
+ *ust_app_ht->ht)) {
+ struct lttng_ust_abi_field_iter uiter;
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- struct lttng_ust_abi_field_iter uiter;
+ health_code_update();
- health_code_update();
+ if (!app->compatible) {
+ /*
+ * TODO: In time, we should notice the caller of this error by
+ * telling him that this is a version error.
+ */
+ continue;
+ }
- if (!app->compatible) {
- /*
- * TODO: In time, we should notice the caller of this error by
- * telling him that this is a version error.
- */
- continue;
+ pthread_mutex_lock(&app->sock_lock);
+ handle = lttng_ust_ctl_tracepoint_field_list(app->sock);
+ if (handle < 0) {
+ if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
+ ERR("UST app list field getting handle failed for app pid %d",
+ app->pid);
}
+ pthread_mutex_unlock(&app->sock_lock);
+ continue;
+ }
- pthread_mutex_lock(&app->sock_lock);
- handle = lttng_ust_ctl_tracepoint_field_list(app->sock);
- if (handle < 0) {
- if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
- ERR("UST app list field getting handle failed for app pid %d",
- app->pid);
+ while ((ret = lttng_ust_ctl_tracepoint_field_list_get(app->sock, handle, &uiter)) !=
+ -LTTNG_UST_ERR_NOENT) {
+ /* Handle ustctl error. */
+ if (ret < 0) {
+ int release_ret;
+
+ if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
+ ERR("UST app tp list field failed for app %d with ret %d",
+ app->sock,
+ ret);
+ } else {
+ DBG3("UST app tp list field failed. Application is dead");
+ break;
}
+
+ free(tmp_event);
+ release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
pthread_mutex_unlock(&app->sock_lock);
- continue;
+ if (release_ret < 0 && release_ret != -LTTNG_UST_ERR_EXITING &&
+ release_ret != -EPIPE) {
+ ERR("Error releasing app handle for app %d with ret %d",
+ app->sock,
+ release_ret);
+ }
+
+ goto rcu_error;
}
- while ((ret = lttng_ust_ctl_tracepoint_field_list_get(
- app->sock, handle, &uiter)) != -LTTNG_UST_ERR_NOENT) {
- /* Handle ustctl error. */
- if (ret < 0) {
+ health_code_update();
+ if (count >= nbmem) {
+ /* In case the realloc fails, we free the memory */
+ struct lttng_event_field *new_tmp_event;
+ size_t new_nbmem;
+
+ new_nbmem = nbmem << 1;
+ DBG2("Reallocating event field list from %zu to %zu entries",
+ nbmem,
+ new_nbmem);
+ new_tmp_event = (lttng_event_field *) realloc(
+ tmp_event, new_nbmem * sizeof(struct lttng_event_field));
+ if (new_tmp_event == nullptr) {
int release_ret;
- if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
- ERR("UST app tp list field failed for app %d with ret %d",
- app->sock,
- ret);
- } else {
- DBG3("UST app tp list field failed. Application is dead");
- break;
- }
-
+ PERROR("realloc ust app event fields");
free(tmp_event);
+ ret = -ENOMEM;
release_ret =
lttng_ust_ctl_release_handle(app->sock, handle);
pthread_mutex_unlock(&app->sock_lock);
- if (release_ret < 0 &&
- release_ret != -LTTNG_UST_ERR_EXITING &&
+ if (release_ret && release_ret != -LTTNG_UST_ERR_EXITING &&
release_ret != -EPIPE) {
ERR("Error releasing app handle for app %d with ret %d",
app->sock,
goto rcu_error;
}
- health_code_update();
- if (count >= nbmem) {
- /* In case the realloc fails, we free the memory */
- struct lttng_event_field *new_tmp_event;
- size_t new_nbmem;
-
- new_nbmem = nbmem << 1;
- DBG2("Reallocating event field list from %zu to %zu entries",
- nbmem,
- new_nbmem);
- new_tmp_event = (lttng_event_field *) realloc(
- tmp_event,
- new_nbmem * sizeof(struct lttng_event_field));
- if (new_tmp_event == nullptr) {
- int release_ret;
-
- PERROR("realloc ust app event fields");
- free(tmp_event);
- ret = -ENOMEM;
- release_ret = lttng_ust_ctl_release_handle(
- app->sock, handle);
- pthread_mutex_unlock(&app->sock_lock);
- if (release_ret &&
- release_ret != -LTTNG_UST_ERR_EXITING &&
- release_ret != -EPIPE) {
- ERR("Error releasing app handle for app %d with ret %d",
- app->sock,
- release_ret);
- }
-
- goto rcu_error;
- }
+ /* Zero the new memory */
+ memset(new_tmp_event + nbmem,
+ 0,
+ (new_nbmem - nbmem) * sizeof(struct lttng_event_field));
+ nbmem = new_nbmem;
+ tmp_event = new_tmp_event;
+ }
- /* Zero the new memory */
- memset(new_tmp_event + nbmem,
- 0,
- (new_nbmem - nbmem) *
- sizeof(struct lttng_event_field));
- nbmem = new_nbmem;
- tmp_event = new_tmp_event;
- }
+ memcpy(tmp_event[count].field_name,
+ uiter.field_name,
+ LTTNG_UST_ABI_SYM_NAME_LEN);
+ /* Mapping between these enums matches 1 to 1. */
+ tmp_event[count].type = (enum lttng_event_field_type) uiter.type;
+ tmp_event[count].nowrite = uiter.nowrite;
- memcpy(tmp_event[count].field_name,
- uiter.field_name,
- LTTNG_UST_ABI_SYM_NAME_LEN);
- /* Mapping between these enums matches 1 to 1. */
- tmp_event[count].type = (enum lttng_event_field_type) uiter.type;
- tmp_event[count].nowrite = uiter.nowrite;
-
- memcpy(tmp_event[count].event.name,
- uiter.event_name,
- LTTNG_UST_ABI_SYM_NAME_LEN);
- tmp_event[count].event.loglevel = uiter.loglevel;
- tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT;
- tmp_event[count].event.pid = app->pid;
- tmp_event[count].event.enabled = -1;
- count++;
- }
+ memcpy(tmp_event[count].event.name,
+ uiter.event_name,
+ LTTNG_UST_ABI_SYM_NAME_LEN);
+ tmp_event[count].event.loglevel = uiter.loglevel;
+ tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT;
+ tmp_event[count].event.pid = app->pid;
+ tmp_event[count].event.enabled = -1;
+ count++;
+ }
- ret = lttng_ust_ctl_release_handle(app->sock, handle);
- pthread_mutex_unlock(&app->sock_lock);
- if (ret < 0 && ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
- ERR("Error releasing app handle for app %d with ret %d",
- app->sock,
- ret);
- }
+ ret = lttng_ust_ctl_release_handle(app->sock, handle);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (ret < 0 && ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
+ ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
}
}
void ust_app_clean_list()
{
int ret;
- struct ust_app *app;
- struct lttng_ht_iter iter;
-
DBG2("UST app cleaning registered apps hash table");
/* Cleanup notify socket hash table */
if (ust_app_ht_by_notify_sock) {
- lttng::urcu::read_lock_guard read_lock;
-
- cds_lfht_for_each_entry (
- ust_app_ht_by_notify_sock->ht, &iter.iter, app, notify_sock_n.node) {
+ for (auto *app :
+ lttng::urcu::lfht_iteration_adapter<ust_app,
+ decltype(ust_app::notify_sock_n),
+ &ust_app::notify_sock_n>(
+ *ust_app_ht_by_notify_sock->ht)) {
/*
* Assert that all notifiers are gone as all triggers
* are unregistered prior to this clean-up.
*/
LTTNG_ASSERT(lttng_ht_get_count(app->token_to_event_notifier_rule_ht) == 0);
-
ust_app_notify_sock_unregister(app->notify_sock);
}
}
- if (ust_app_ht) {
- lttng::urcu::read_lock_guard read_lock;
-
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- ret = lttng_ht_del(ust_app_ht, &iter);
- LTTNG_ASSERT(!ret);
- call_rcu(&app->pid_n.head, delete_ust_app_rcu);
- }
- }
-
/* Cleanup socket hash table */
if (ust_app_ht_by_sock) {
- lttng::urcu::read_lock_guard read_lock;
+ const lttng::urcu::read_lock_guard read_lock;
- cds_lfht_for_each_entry (ust_app_ht_by_sock->ht, &iter.iter, app, sock_n.node) {
- ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
+ for (auto *app : lttng::urcu::lfht_iteration_adapter<ust_app,
+ decltype(ust_app::sock_n),
+ &ust_app::sock_n>(
+ *ust_app_ht_by_sock->ht)) {
+ ret = cds_lfht_del(ust_app_ht_by_sock->ht, &app->sock_n.node);
LTTNG_ASSERT(!ret);
+ ust_app_put(app);
}
}
int ust_app_disable_channel_glb(struct ltt_ust_session *usess, struct ltt_ust_channel *uchan)
{
int ret = 0;
- struct lttng_ht_iter iter;
struct lttng_ht_node_str *ua_chan_node;
- struct ust_app *app;
struct ust_app_session *ua_sess;
struct ust_app_channel *ua_chan;
uchan->name,
usess->id);
- {
- lttng::urcu::read_lock_guard read_lock;
-
- /* For every registered applications */
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- struct lttng_ht_iter uiter;
- if (!app->compatible) {
- /*
- * TODO: In time, we should notice the caller of this error by
- * telling him that this is a version error.
- */
- continue;
- }
- ua_sess = lookup_session_by_app(usess, app);
- if (ua_sess == nullptr) {
- continue;
- }
+ /* Iterate on all apps. */
+ for (auto *app :
+ lttng::urcu::lfht_iteration_adapter<ust_app, decltype(ust_app::pid_n), &ust_app::pid_n>(
+ *ust_app_ht->ht)) {
+ struct lttng_ht_iter uiter;
+ if (!app->compatible) {
+ /*
+ * TODO: In time, we should notice the caller of this error by
+ * telling him that this is a version error.
+ */
+ continue;
+ }
+ ua_sess = ust_app_lookup_app_session(usess, app);
+ if (ua_sess == nullptr) {
+ continue;
+ }
- /* Get channel */
- lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
- ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
- /* If the session if found for the app, the channel must be there */
- LTTNG_ASSERT(ua_chan_node);
+ /* Get channel */
+ lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
+ ua_chan_node = lttng_ht_iter_get_node<lttng_ht_node_str>(&uiter);
+ /* If the session if found for the app, the channel must be there */
+ LTTNG_ASSERT(ua_chan_node);
- ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
- /* The channel must not be already disabled */
- LTTNG_ASSERT(ua_chan->enabled);
+ ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
+ /* The channel must not be already disabled */
+ LTTNG_ASSERT(ua_chan->enabled);
- /* Disable channel onto application */
- ret = disable_ust_app_channel(ua_sess, ua_chan, app);
- if (ret < 0) {
- /* XXX: We might want to report this error at some point... */
- continue;
- }
+ /* Disable channel onto application */
+ ret = disable_ust_app_channel(ua_sess->lock(), ua_chan, app);
+ if (ret < 0) {
+ /* XXX: We might want to report this error at some point... */
+ continue;
}
}
int ust_app_enable_channel_glb(struct ltt_ust_session *usess, struct ltt_ust_channel *uchan)
{
int ret = 0;
- struct lttng_ht_iter iter;
- struct ust_app *app;
struct ust_app_session *ua_sess;
LTTNG_ASSERT(usess->active);
uchan->name,
usess->id);
- {
- lttng::urcu::read_lock_guard read_lock;
-
- /* For every registered applications */
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- if (!app->compatible) {
- /*
- * TODO: In time, we should notice the caller of this error by
- * telling him that this is a version error.
- */
- continue;
- }
- ua_sess = lookup_session_by_app(usess, app);
- if (ua_sess == nullptr) {
- continue;
- }
+ /* For every registered applications */
+ for (auto *app :
+ lttng::urcu::lfht_iteration_adapter<ust_app, decltype(ust_app::pid_n), &ust_app::pid_n>(
+ *ust_app_ht->ht)) {
+ if (!app->compatible) {
+ /*
+ * TODO: In time, we should notice the caller of this error by
+ * telling him that this is a version error.
+ */
+ continue;
+ }
+ ua_sess = ust_app_lookup_app_session(usess, app);
+ if (ua_sess == nullptr) {
+ continue;
+ }
- /* Enable channel onto application */
- ret = enable_ust_app_channel(ua_sess, uchan, app);
- if (ret < 0) {
- /* XXX: We might want to report this error at some point... */
- continue;
- }
+ /* Enable channel onto application */
+ ret = enable_ust_app_channel(ua_sess->lock(), uchan, app);
+ if (ret < 0) {
+ /* XXX: We might want to report this error at some point... */
+ continue;
}
}
struct ltt_ust_event *uevent)
{
int ret = 0;
- struct lttng_ht_iter iter, uiter;
+ struct lttng_ht_iter uiter;
struct lttng_ht_node_str *ua_chan_node;
- struct ust_app *app;
struct ust_app_session *ua_sess;
struct ust_app_channel *ua_chan;
struct ust_app_event *ua_event;
uchan->name,
usess->id);
- {
- lttng::urcu::read_lock_guard read_lock;
+ /* Iterate on all apps. */
+ for (auto *app :
+ lttng::urcu::lfht_iteration_adapter<ust_app, decltype(ust_app::pid_n), &ust_app::pid_n>(
+ *ust_app_ht->ht)) {
+ if (!app->compatible) {
+ continue;
+ }
+ ua_sess = ust_app_lookup_app_session(usess, app);
+ if (ua_sess == nullptr) {
+ /* Next app */
+ continue;
+ }
- /* For all registered applications */
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- if (!app->compatible) {
- /*
- * TODO: In time, we should notice the caller of this error by
- * telling him that this is a version error.
- */
- continue;
- }
- ua_sess = lookup_session_by_app(usess, app);
- if (ua_sess == nullptr) {
- /* Next app */
- continue;
- }
+ /* Lookup channel in the ust app session */
+ lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
+ ua_chan_node = lttng_ht_iter_get_node<lttng_ht_node_str>(&uiter);
+ if (ua_chan_node == nullptr) {
+ DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
+ "Skipping",
+ uchan->name,
+ usess->id,
+ app->pid);
+ continue;
+ }
+ ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
- /* Lookup channel in the ust app session */
- lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
- ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
- if (ua_chan_node == nullptr) {
- DBG2("Channel %s not found in session id %" PRIu64
- " for app pid %d."
- "Skipping",
- uchan->name,
- usess->id,
- app->pid);
- continue;
- }
- ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
-
- ua_event = find_ust_app_event(ua_chan->events,
- uevent->attr.name,
- uevent->filter,
- uevent->attr.loglevel,
- uevent->exclusion);
- if (ua_event == nullptr) {
- DBG2("Event %s not found in channel %s for app pid %d."
- "Skipping",
- uevent->attr.name,
- uchan->name,
- app->pid);
- continue;
- }
+ ua_event = find_ust_app_event(
+ ua_chan->events,
+ uevent->attr.name,
+ uevent->filter,
+ (enum lttng_ust_abi_loglevel_type) uevent->attr.loglevel_type,
+ uevent->attr.loglevel,
+ uevent->exclusion);
+ if (ua_event == nullptr) {
+ DBG2("Event %s not found in channel %s for app pid %d."
+ "Skipping",
+ uevent->attr.name,
+ uchan->name,
+ app->pid);
+ continue;
+ }
- ret = disable_ust_app_event(ua_event, app);
- if (ret < 0) {
- /* XXX: Report error someday... */
- continue;
- }
+ ret = disable_ust_app_event(ua_event, app);
+ if (ret < 0) {
+ continue;
}
}
/* The ua_sess lock must be held by the caller. */
static int ust_app_channel_create(struct ltt_ust_session *usess,
- struct ust_app_session *ua_sess,
+ const ust_app_session::locked_weak_ref& ua_sess,
struct ltt_ust_channel *uchan,
struct ust_app *app,
struct ust_app_channel **_ua_chan)
int ret = 0;
struct ust_app_channel *ua_chan = nullptr;
- LTTNG_ASSERT(ua_sess);
- ASSERT_LOCKED(ua_sess->lock);
-
if (!strncmp(uchan->name, DEFAULT_METADATA_NAME, sizeof(uchan->name))) {
copy_channel_attr_to_ustctl(&ua_sess->metadata_attr, &uchan->attr);
ret = 0;
} else {
- struct ltt_ust_context *uctx = nullptr;
-
/*
* Create channel onto application and synchronize its
* configuration.
}
/* Add contexts. */
- cds_list_for_each_entry (uctx, &uchan->ctx_list, list) {
+ for (auto *uctx :
+ lttng::urcu::list_iteration_adapter<ltt_ust_context, <t_ust_context::list>(
+ uchan->ctx_list)) {
ret = create_ust_app_channel_context(ua_chan, &uctx->ctx, app);
if (ret) {
goto error;
struct ltt_ust_event *uevent)
{
int ret = 0;
- struct lttng_ht_iter iter, uiter;
+ struct lttng_ht_iter uiter;
struct lttng_ht_node_str *ua_chan_node;
- struct ust_app *app;
struct ust_app_session *ua_sess;
struct ust_app_channel *ua_chan;
struct ust_app_event *ua_event;
* tracer also.
*/
- {
- lttng::urcu::read_lock_guard read_lock;
-
- /* For all registered applications */
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- if (!app->compatible) {
- /*
- * TODO: In time, we should notice the caller of this error by
- * telling him that this is a version error.
- */
- continue;
- }
- ua_sess = lookup_session_by_app(usess, app);
- if (!ua_sess) {
- /* The application has problem or is probably dead. */
- continue;
- }
+ /* Iterate on all apps. */
+ for (auto *app :
+ lttng::urcu::lfht_iteration_adapter<ust_app, decltype(ust_app::pid_n), &ust_app::pid_n>(
+ *ust_app_ht->ht)) {
+ if (!app->compatible) {
+ /*
+ * TODO: In time, we should notice the caller of this error by
+ * telling him that this is a version error.
+ */
+ continue;
+ }
+ ua_sess = ust_app_lookup_app_session(usess, app);
+ if (!ua_sess) {
+ /* The application has problem or is probably dead. */
+ continue;
+ }
- pthread_mutex_lock(&ua_sess->lock);
+ auto locked_ua_sess = ua_sess->lock();
+ if (ua_sess->deleted) {
+ continue;
+ }
- if (ua_sess->deleted) {
- pthread_mutex_unlock(&ua_sess->lock);
- continue;
- }
+ /* Lookup channel in the ust app session */
+ lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
+ ua_chan_node = lttng_ht_iter_get_node<lttng_ht_node_str>(&uiter);
+ /*
+ * It is possible that the channel cannot be found is
+ * the channel/event creation occurs concurrently with
+ * an application exit.
+ */
+ if (!ua_chan_node) {
+ continue;
+ }
- /* Lookup channel in the ust app session */
- lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
- ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
- /*
- * It is possible that the channel cannot be found is
- * the channel/event creation occurs concurrently with
- * an application exit.
- */
- if (!ua_chan_node) {
- pthread_mutex_unlock(&ua_sess->lock);
- continue;
- }
+ ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
- ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
-
- /* Get event node */
- ua_event = find_ust_app_event(ua_chan->events,
- uevent->attr.name,
- uevent->filter,
- uevent->attr.loglevel,
- uevent->exclusion);
- if (ua_event == nullptr) {
- DBG3("UST app enable event %s not found for app PID %d."
- "Skipping app",
- uevent->attr.name,
- app->pid);
- goto next_app;
- }
+ /* Get event node */
+ ua_event = find_ust_app_event(
+ ua_chan->events,
+ uevent->attr.name,
+ uevent->filter,
+ (enum lttng_ust_abi_loglevel_type) uevent->attr.loglevel_type,
+ uevent->attr.loglevel,
+ uevent->exclusion);
+ if (ua_event == nullptr) {
+ DBG3("UST app enable event %s not found for app PID %d."
+ "Skipping app",
+ uevent->attr.name,
+ app->pid);
+ continue;
+ }
- ret = enable_ust_app_event(ua_event, app);
- if (ret < 0) {
- pthread_mutex_unlock(&ua_sess->lock);
- goto error;
- }
- next_app:
- pthread_mutex_unlock(&ua_sess->lock);
+ ret = enable_ust_app_event(ua_event, app);
+ if (ret < 0) {
+ goto error;
}
}
error:
struct ltt_ust_event *uevent)
{
int ret = 0;
- struct lttng_ht_iter iter, uiter;
+ struct lttng_ht_iter uiter;
struct lttng_ht_node_str *ua_chan_node;
- struct ust_app *app;
struct ust_app_session *ua_sess;
struct ust_app_channel *ua_chan;
uevent->attr.name,
usess->id);
- {
- lttng::urcu::read_lock_guard read_lock;
-
- /* For all registered applications */
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- if (!app->compatible) {
- /*
- * TODO: In time, we should notice the caller of this error by
- * telling him that this is a version error.
- */
- continue;
- }
-
- ua_sess = lookup_session_by_app(usess, app);
- if (!ua_sess) {
- /* The application has problem or is probably dead. */
- continue;
- }
+ /* Iterate on all apps. */
+ for (auto *app :
+ lttng::urcu::lfht_iteration_adapter<ust_app, decltype(ust_app::pid_n), &ust_app::pid_n>(
+ *ust_app_ht->ht)) {
+ if (!app->compatible) {
+ /*
+ * TODO: In time, we should notice the caller of this error by
+ * telling him that this is a version error.
+ */
+ continue;
+ }
- pthread_mutex_lock(&ua_sess->lock);
+ ua_sess = ust_app_lookup_app_session(usess, app);
+ if (!ua_sess) {
+ /* The application has problem or is probably dead. */
+ continue;
+ }
- if (ua_sess->deleted) {
- pthread_mutex_unlock(&ua_sess->lock);
- continue;
- }
+ auto locked_ua_sess = ua_sess->lock();
- /* Lookup channel in the ust app session */
- lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
- ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
- /* If the channel is not found, there is a code flow error */
- LTTNG_ASSERT(ua_chan_node);
+ if (locked_ua_sess->deleted) {
+ continue;
+ }
- ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
+ /* Lookup channel in the ust app session */
+ lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
+ ua_chan_node = lttng_ht_iter_get_node<lttng_ht_node_str>(&uiter);
+ /* If the channel is not found, there is a code flow error */
+ LTTNG_ASSERT(ua_chan_node);
- ret = create_ust_app_event(ua_chan, uevent, app);
- pthread_mutex_unlock(&ua_sess->lock);
- if (ret < 0) {
- if (ret != -LTTNG_UST_ERR_EXIST) {
- /* Possible value at this point: -ENOMEM. If so, we stop! */
- break;
- }
+ ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
- DBG2("UST app event %s already exist on app PID %d",
- uevent->attr.name,
- app->pid);
- continue;
+ ret = create_ust_app_event(ua_chan, uevent, app);
+ if (ret < 0) {
+ if (ret != -LTTNG_UST_ERR_EXIST) {
+ /* Possible value at this point: -ENOMEM. If so, we stop! */
+ break;
}
+
+ DBG2("UST app event %s already exist on app PID %d",
+ uevent->attr.name,
+ app->pid);
+ continue;
}
}
DBG("Starting tracing for ust app pid %d", app->pid);
- lttng::urcu::read_lock_guard read_lock;
+ const lttng::urcu::read_lock_guard read_lock;
+ const auto update_health_code_on_exit =
+ lttng::make_scope_exit([]() noexcept { health_code_update(); });
if (!app->compatible) {
- goto end;
+ return 0;
}
- ua_sess = lookup_session_by_app(usess, app);
+ ua_sess = ust_app_lookup_app_session(usess, app);
if (ua_sess == nullptr) {
/* The session is in teardown process. Ignore and continue. */
- goto end;
+ return 0;
}
- pthread_mutex_lock(&ua_sess->lock);
-
- if (ua_sess->deleted) {
- pthread_mutex_unlock(&ua_sess->lock);
- goto end;
- }
+ auto locked_ua_sess = ua_sess->lock();
- if (ua_sess->enabled) {
- pthread_mutex_unlock(&ua_sess->lock);
- goto end;
+ if (locked_ua_sess->deleted) {
+ return 0;
}
- /* Upon restart, we skip the setup, already done */
- if (ua_sess->started) {
- goto skip_setup;
+ if (locked_ua_sess->enabled) {
+ return 0;
}
- health_code_update();
-
-skip_setup:
/* This starts the UST tracing */
pthread_mutex_lock(&app->sock_lock);
ret = lttng_ust_ctl_start_session(app->sock, ua_sess->handle);
DBG3("UST app start session failed. Application is dead: pid = %d, sock = %d",
app->pid,
app->sock);
- pthread_mutex_unlock(&ua_sess->lock);
- goto end;
+ return 0;
} else if (ret == -EAGAIN) {
WARN("UST app start session failed. Communication time out: pid = %d, sock = %d",
app->pid,
app->sock);
- pthread_mutex_unlock(&ua_sess->lock);
- goto end;
+ return 0;
} else {
ERR("UST app start session failed with ret %d: pid = %d, sock = %d",
app->pid,
app->sock);
}
- goto error_unlock;
+
+ return -1;
}
/* Indicate that the session has been started once */
ua_sess->started = true;
ua_sess->enabled = true;
- pthread_mutex_unlock(&ua_sess->lock);
-
health_code_update();
/* Quiescent wait after starting trace */
}
}
-end:
- health_code_update();
return 0;
-
-error_unlock:
- pthread_mutex_unlock(&ua_sess->lock);
- health_code_update();
- return -1;
}
/*
DBG("Stopping tracing for ust app pid %d", app->pid);
- lttng::urcu::read_lock_guard read_lock;
+ const lttng::urcu::read_lock_guard read_lock;
+ const auto update_health_code_on_exit =
+ lttng::make_scope_exit([]() noexcept { health_code_update(); });
if (!app->compatible) {
- goto end_no_session;
+ return 0;
}
- ua_sess = lookup_session_by_app(usess, app);
+ ua_sess = ust_app_lookup_app_session(usess, app);
if (ua_sess == nullptr) {
- goto end_no_session;
+ return 0;
}
- pthread_mutex_lock(&ua_sess->lock);
+ auto locked_ua_sess = ua_sess->lock();
if (ua_sess->deleted) {
- pthread_mutex_unlock(&ua_sess->lock);
- goto end_no_session;
+ return 0;
}
/*
* indicate that this is a stop error.
*/
if (!ua_sess->started) {
- goto error_rcu_unlock;
+ return -1;
}
health_code_update();
DBG3("UST app stop session failed. Application is dead: pid = %d, sock = %d",
app->pid,
app->sock);
- goto end_unlock;
+ return 0;
} else if (ret == -EAGAIN) {
WARN("UST app stop session failed. Communication time out: pid = %d, sock = %d",
app->pid,
app->sock);
- goto end_unlock;
+ return 0;
} else {
ERR("UST app stop session failed with ret %d: pid = %d, sock = %d",
app->pid,
app->sock);
}
- goto error_rcu_unlock;
+
+ return -1;
}
health_code_update();
health_code_update();
{
- auto locked_registry = get_locked_session_registry(ua_sess);
+ auto locked_registry =
+ get_locked_session_registry(locked_ua_sess->get_identifier());
/* The UST app session is held registry shall not be null. */
LTTNG_ASSERT(locked_registry);
(void) push_metadata(locked_registry, ua_sess->consumer);
}
-end_unlock:
- pthread_mutex_unlock(&ua_sess->lock);
-end_no_session:
- health_code_update();
return 0;
-
-error_rcu_unlock:
- pthread_mutex_unlock(&ua_sess->lock);
- health_code_update();
- return -1;
}
-static int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess)
+static int ust_app_flush_app_session(ust_app& app, ust_app_session& ua_sess)
{
int ret, retval = 0;
- struct lttng_ht_iter iter;
- struct ust_app_channel *ua_chan;
struct consumer_socket *socket;
- DBG("Flushing app session buffers for ust app pid %d", app->pid);
+ const auto update_health_code_on_exit =
+ lttng::make_scope_exit([]() noexcept { health_code_update(); });
- if (!app->compatible) {
- goto end_not_compatible;
- }
+ DBG("Flushing app session buffers for ust app pid %d", app.pid);
- pthread_mutex_lock(&ua_sess->lock);
+ if (!app.compatible) {
+ return 0;
+ }
- if (ua_sess->deleted) {
- goto end_deleted;
+ const auto locked_ua_sess = ua_sess.lock();
+ if (locked_ua_sess->deleted) {
+ return 0;
}
health_code_update();
/* Flushing buffers */
- socket = consumer_find_socket_by_bitness(app->abi.bits_per_long, ua_sess->consumer);
+ socket = consumer_find_socket_by_bitness(app.abi.bits_per_long, ua_sess.consumer);
/* Flush buffers and push metadata. */
- switch (ua_sess->buffer_type) {
+ switch (ua_sess.buffer_type) {
case LTTNG_BUFFER_PER_PID:
{
- lttng::urcu::read_lock_guard read_lock;
-
- cds_lfht_for_each_entry (ua_sess->channels->ht, &iter.iter, ua_chan, node.node) {
+ for (auto *ua_chan :
+ lttng::urcu::lfht_iteration_adapter<ust_app_channel,
+ decltype(ust_app_channel::node),
+ &ust_app_channel::node>(
+ *ua_sess.channels->ht)) {
health_code_update();
ret = consumer_flush_channel(socket, ua_chan->key);
if (ret) {
break;
}
- health_code_update();
-
-end_deleted:
- pthread_mutex_unlock(&ua_sess->lock);
-
-end_not_compatible:
- health_code_update();
return retval;
}
switch (usess->buffer_type) {
case LTTNG_BUFFER_PER_UID:
{
- struct buffer_reg_uid *reg;
- struct lttng_ht_iter iter;
-
/* Flush all per UID buffers associated to that session. */
- cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
- lttng::urcu::read_lock_guard read_lock;
+ for (auto *reg :
+ lttng::urcu::list_iteration_adapter<buffer_reg_uid, &buffer_reg_uid::lnode>(
+ usess->buffer_reg_uid_list)) {
+ const lttng::urcu::read_lock_guard read_lock;
lsu::registry_session *ust_session_reg;
- struct buffer_reg_channel *buf_reg_chan;
struct consumer_socket *socket;
/* Get consumer socket to use to push the metadata.*/
continue;
}
- cds_lfht_for_each_entry (
- reg->registry->channels->ht, &iter.iter, buf_reg_chan, node.node) {
+ for (auto *buf_reg_chan :
+ lttng::urcu::lfht_iteration_adapter<buffer_reg_channel,
+ decltype(buffer_reg_channel::node),
+ &buffer_reg_channel::node>(
+ *reg->registry->channels->ht)) {
/*
* The following call will print error values so the return
* code is of little importance because whatever happens, we
}
case LTTNG_BUFFER_PER_PID:
{
- struct ust_app_session *ua_sess;
- struct lttng_ht_iter iter;
- struct ust_app *app;
- lttng::urcu::read_lock_guard read_lock;
-
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- ua_sess = lookup_session_by_app(usess, app);
+ /* Iterate on all apps. */
+ for (auto *app :
+ lttng::urcu::lfht_iteration_adapter<ust_app,
+ decltype(ust_app::pid_n),
+ &ust_app::pid_n>(*ust_app_ht->ht)) {
+ auto *ua_sess = ust_app_lookup_app_session(usess, app);
if (ua_sess == nullptr) {
continue;
}
- (void) ust_app_flush_app_session(app, ua_sess);
+ (void) ust_app_flush_app_session(*app, *ua_sess);
}
break;
static int ust_app_clear_quiescent_app_session(struct ust_app *app, struct ust_app_session *ua_sess)
{
int ret = 0;
- struct lttng_ht_iter iter;
- struct ust_app_channel *ua_chan;
struct consumer_socket *socket;
DBG("Clearing stream quiescent state for ust app pid %d", app->pid);
- lttng::urcu::read_lock_guard read_lock;
+ const lttng::urcu::read_lock_guard read_lock;
+ const auto update_health_code_on_exit =
+ lttng::make_scope_exit([]() noexcept { health_code_update(); });
if (!app->compatible) {
- goto end_not_compatible;
+ return 0;
}
- pthread_mutex_lock(&ua_sess->lock);
-
- if (ua_sess->deleted) {
- goto end_unlock;
+ const auto locked_ua_sess = ua_sess->lock();
+ if (locked_ua_sess->deleted) {
+ return 0;
}
health_code_update();
socket = consumer_find_socket_by_bitness(app->abi.bits_per_long, ua_sess->consumer);
if (!socket) {
ERR("Failed to find consumer (%" PRIu32 ") socket", app->abi.bits_per_long);
- ret = -1;
- goto end_unlock;
+ return -1;
}
/* Clear quiescent state. */
switch (ua_sess->buffer_type) {
case LTTNG_BUFFER_PER_PID:
- cds_lfht_for_each_entry (ua_sess->channels->ht, &iter.iter, ua_chan, node.node) {
+ for (auto *ua_chan :
+ lttng::urcu::lfht_iteration_adapter<ust_app_channel,
+ decltype(ust_app_channel::node),
+ &ust_app_channel::node>(
+ *ua_sess->channels->ht)) {
health_code_update();
ret = consumer_clear_quiescent_channel(socket, ua_chan->key);
if (ret) {
break;
}
- health_code_update();
-
-end_unlock:
- pthread_mutex_unlock(&ua_sess->lock);
-
-end_not_compatible:
- health_code_update();
return ret;
}
switch (usess->buffer_type) {
case LTTNG_BUFFER_PER_UID:
{
- struct lttng_ht_iter iter;
- struct buffer_reg_uid *reg;
-
/*
* Clear quiescent for all per UID buffers associated to
* that session.
*/
- cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
+ for (auto *reg :
+ lttng::urcu::list_iteration_adapter<buffer_reg_uid, &buffer_reg_uid::lnode>(
+ usess->buffer_reg_uid_list)) {
struct consumer_socket *socket;
- struct buffer_reg_channel *buf_reg_chan;
- lttng::urcu::read_lock_guard read_lock;
+ const lttng::urcu::read_lock_guard read_lock;
/* Get associated consumer socket.*/
socket = consumer_find_socket_by_bitness(reg->bits_per_long,
continue;
}
- cds_lfht_for_each_entry (
- reg->registry->channels->ht, &iter.iter, buf_reg_chan, node.node) {
+ for (auto *buf_reg_chan :
+ lttng::urcu::lfht_iteration_adapter<buffer_reg_channel,
+ decltype(buffer_reg_channel::node),
+ &buffer_reg_channel::node>(
+ *reg->registry->channels->ht)) {
/*
* The following call will print error values so
* the return code is of little importance
}
case LTTNG_BUFFER_PER_PID:
{
- struct ust_app_session *ua_sess;
- struct lttng_ht_iter iter;
- struct ust_app *app;
- lttng::urcu::read_lock_guard read_lock;
-
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- ua_sess = lookup_session_by_app(usess, app);
+ /* Iterate on all apps. */
+ for (auto *app :
+ lttng::urcu::lfht_iteration_adapter<ust_app,
+ decltype(ust_app::pid_n),
+ &ust_app::pid_n>(*ust_app_ht->ht)) {
+ auto *ua_sess = ust_app_lookup_app_session(usess, app);
if (ua_sess == nullptr) {
continue;
}
+
(void) ust_app_clear_quiescent_app_session(app, ua_sess);
}
DBG("Destroy tracing for ust app pid %d", app->pid);
- lttng::urcu::read_lock_guard read_lock;
+ const lttng::urcu::read_lock_guard read_lock;
if (!app->compatible) {
goto end;
}
__lookup_session_by_app(usess, app, &iter);
- node = lttng_ht_iter_get_node_u64(&iter);
+ node = lttng_ht_iter_get_node<lttng_ht_node_u64>(&iter);
if (node == nullptr) {
/* Session is being or is deleted. */
goto end;
*/
int ust_app_start_trace_all(struct ltt_ust_session *usess)
{
- struct lttng_ht_iter iter;
- struct ust_app *app;
-
DBG("Starting all UST traces");
/*
*/
(void) ust_app_clear_quiescent_session(usess);
- {
- lttng::urcu::read_lock_guard read_lock;
-
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- ust_app_global_update(usess, app);
- }
+ /* Iterate on all apps. */
+ for (auto *app :
+ lttng::urcu::lfht_iteration_adapter<ust_app, decltype(ust_app::pid_n), &ust_app::pid_n>(
+ *ust_app_ht->ht)) {
+ ust_app_global_update(usess, app);
}
return 0;
int ust_app_stop_trace_all(struct ltt_ust_session *usess)
{
int ret = 0;
- struct lttng_ht_iter iter;
- struct ust_app *app;
DBG("Stopping all UST traces");
*/
usess->active = false;
- {
- lttng::urcu::read_lock_guard read_lock;
-
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- ret = ust_app_stop_trace(usess, app);
- if (ret < 0) {
- /* Continue to next apps even on error */
- continue;
- }
+ /* Iterate on all apps. */
+ for (auto *app :
+ lttng::urcu::lfht_iteration_adapter<ust_app, decltype(ust_app::pid_n), &ust_app::pid_n>(
+ *ust_app_ht->ht)) {
+ ret = ust_app_stop_trace(usess, app);
+ if (ret < 0) {
+ /* Continue to next apps even on error */
+ continue;
}
}
*/
int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
{
- int ret = 0;
- struct lttng_ht_iter iter;
- struct ust_app *app;
-
DBG("Destroy all UST traces");
- {
- lttng::urcu::read_lock_guard read_lock;
-
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- ret = destroy_trace(usess, app);
- if (ret < 0) {
- /* Continue to next apps even on error */
- continue;
- }
- }
+ /* Iterate on all apps. */
+ for (auto *app :
+ lttng::urcu::lfht_iteration_adapter<ust_app, decltype(ust_app::pid_n), &ust_app::pid_n>(
+ *ust_app_ht->ht)) {
+ (void) destroy_trace(usess, app);
}
return 0;
/* The ua_sess lock must be held by the caller. */
static int find_or_create_ust_app_channel(struct ltt_ust_session *usess,
- struct ust_app_session *ua_sess,
+ const ust_app_session::locked_weak_ref& ua_sess,
struct ust_app *app,
struct ltt_ust_channel *uchan,
struct ust_app_channel **ua_chan)
struct lttng_ht_node_str *ua_chan_node;
lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &iter);
- ua_chan_node = lttng_ht_iter_get_node_str(&iter);
+ ua_chan_node = lttng_ht_iter_get_node<lttng_ht_node_str>(&iter);
if (ua_chan_node) {
- *ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
+ *ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
goto end;
}
ua_event = find_ust_app_event(ua_chan->events,
uevent->attr.name,
uevent->filter,
+ (enum lttng_ust_abi_loglevel_type) uevent->attr.loglevel_type,
uevent->attr.loglevel,
uevent->exclusion);
if (!ua_event) {
int ret = 0;
enum lttng_error_code ret_code;
enum lttng_trigger_status t_status;
- struct lttng_ht_iter app_trigger_iter;
struct lttng_triggers *triggers = nullptr;
- struct ust_app_event_notifier_rule *event_notifier_rule;
unsigned int count, i;
ASSERT_RCU_READ_LOCKED();
}
for (i = 0; i < count; i++) {
- struct lttng_condition *condition;
- struct lttng_event_rule *event_rule;
+ const struct lttng_condition *condition;
+ const struct lttng_event_rule *event_rule;
struct lttng_trigger *trigger;
const struct ust_app_event_notifier_rule *looked_up_event_notifier_rule;
enum lttng_condition_status condition_status;
LTTNG_ASSERT(trigger);
token = lttng_trigger_get_tracer_token(trigger);
- condition = lttng_trigger_get_condition(trigger);
+ condition = lttng_trigger_get_const_condition(trigger);
if (lttng_condition_get_type(condition) !=
LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES) {
continue;
}
- condition_status = lttng_condition_event_rule_matches_borrow_rule_mutable(
- condition, &event_rule);
+ condition_status =
+ lttng_condition_event_rule_matches_get_rule(condition, &event_rule);
LTTNG_ASSERT(condition_status == LTTNG_CONDITION_STATUS_OK);
if (lttng_event_rule_get_domain_type(event_rule) == LTTNG_DOMAIN_KERNEL) {
}
}
- {
- lttng::urcu::read_lock_guard read_lock;
-
- /* Remove all unknown event sources from the app. */
- cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
- &app_trigger_iter.iter,
- event_notifier_rule,
- node.node) {
- const uint64_t app_token = event_notifier_rule->token;
- bool found = false;
-
- /*
- * Check if the app event trigger still exists on the
- * notification side.
- */
- for (i = 0; i < count; i++) {
- uint64_t notification_thread_token;
- const struct lttng_trigger *trigger =
- lttng_triggers_get_at_index(triggers, i);
+ /* Remove all unknown event sources from the app. */
+ for (auto *event_notifier_rule :
+ lttng::urcu::lfht_iteration_adapter<ust_app_event_notifier_rule,
+ decltype(ust_app_event_notifier_rule::node),
+ &ust_app_event_notifier_rule::node>(
+ *app->token_to_event_notifier_rule_ht->ht)) {
+ const uint64_t app_token = event_notifier_rule->token;
+ bool found = false;
- LTTNG_ASSERT(trigger);
+ /*
+ * Check if the app event trigger still exists on the
+ * notification side.
+ */
+ for (i = 0; i < count; i++) {
+ uint64_t notification_thread_token;
+ const struct lttng_trigger *trigger =
+ lttng_triggers_get_at_index(triggers, i);
- notification_thread_token = lttng_trigger_get_tracer_token(trigger);
+ LTTNG_ASSERT(trigger);
- if (notification_thread_token == app_token) {
- found = true;
- break;
- }
- }
+ notification_thread_token = lttng_trigger_get_tracer_token(trigger);
- if (found) {
- /* Still valid. */
- continue;
+ if (notification_thread_token == app_token) {
+ found = true;
+ break;
}
+ }
- /*
- * This trigger was unregistered, disable it on the tracer's
- * side.
- */
- ret = lttng_ht_del(app->token_to_event_notifier_rule_ht, &app_trigger_iter);
- LTTNG_ASSERT(ret == 0);
+ if (found) {
+ /* Still valid. */
+ continue;
+ }
- /* Callee logs errors. */
- (void) disable_ust_object(app, event_notifier_rule->obj);
+ /*
+ * This trigger was unregistered, disable it on the tracer's
+ * side.
+ */
+ ret = cds_lfht_del(app->token_to_event_notifier_rule_ht->ht,
+ &event_notifier_rule->node.node);
+ LTTNG_ASSERT(ret == 0);
- delete_ust_app_event_notifier_rule(app->sock, event_notifier_rule, app);
- }
+ /* Callee logs errors. */
+ (void) disable_ust_object(app, event_notifier_rule->obj);
+ delete_ust_app_event_notifier_rule(app->sock, event_notifier_rule, app);
}
end:
* RCU read lock must be held by the caller.
*/
static void ust_app_synchronize_all_channels(struct ltt_ust_session *usess,
- struct ust_app_session *ua_sess,
+ const ust_app_session::locked_weak_ref& ua_sess,
struct ust_app *app)
{
- int ret = 0;
- struct cds_lfht_iter uchan_iter;
- struct ltt_ust_channel *uchan;
-
LTTNG_ASSERT(usess);
- LTTNG_ASSERT(ua_sess);
LTTNG_ASSERT(app);
ASSERT_RCU_READ_LOCKED();
- cds_lfht_for_each_entry (usess->domain_global.channels->ht, &uchan_iter, uchan, node.node) {
+ for (auto *uchan : lttng::urcu::lfht_iteration_adapter<ltt_ust_channel,
+ decltype(ltt_ust_channel::node),
+ <t_ust_channel::node>(
+ *usess->domain_global.channels->ht)) {
struct ust_app_channel *ua_chan;
- struct cds_lfht_iter uevent_iter;
- struct ltt_ust_event *uevent;
/*
* Search for a matching ust_app_channel. If none is found,
* allocated (if necessary) and sent to the application, and
* all enabled contexts will be added to the channel.
*/
- ret = find_or_create_ust_app_channel(usess, ua_sess, app, uchan, &ua_chan);
+ int ret = find_or_create_ust_app_channel(usess, ua_sess, app, uchan, &ua_chan);
if (ret) {
/* Tracer is probably gone or ENOMEM. */
goto end;
continue;
}
- cds_lfht_for_each_entry (uchan->events->ht, &uevent_iter, uevent, node.node) {
+ for (auto *uevent :
+ lttng::urcu::lfht_iteration_adapter<ltt_ust_event,
+ decltype(ltt_ust_event::node),
+ <t_ust_event::node>(
+ *uchan->events->ht)) {
ret = ust_app_channel_synchronize_event(ua_chan, uevent, app);
if (ret) {
goto end;
ret = find_or_create_ust_app_session(usess, app, &ua_sess, nullptr);
if (ret < 0) {
/* Tracer is probably gone or ENOMEM. */
- if (ua_sess) {
- destroy_app_session(app, ua_sess);
- }
- goto end;
+ return;
}
+
LTTNG_ASSERT(ua_sess);
- pthread_mutex_lock(&ua_sess->lock);
- if (ua_sess->deleted) {
- goto deleted_session;
+ const auto locked_ua_sess = ua_sess->lock();
+ if (locked_ua_sess->deleted) {
+ return;
}
{
- lttng::urcu::read_lock_guard read_lock;
+ const lttng::urcu::read_lock_guard read_lock;
- ust_app_synchronize_all_channels(usess, ua_sess, app);
+ ust_app_synchronize_all_channels(usess, locked_ua_sess, app);
/*
* Create the metadata for the application. This returns gracefully if a
* daemon, the consumer will use this assumption to send the
* "STREAMS_SENT" message to the relay daemon.
*/
- ret = create_ust_app_metadata(ua_sess, app, usess->consumer);
+ ret = create_ust_app_metadata(locked_ua_sess, app, usess->consumer);
if (ret < 0) {
ERR("Metadata creation failed for app sock %d for session id %" PRIu64,
app->sock,
usess->id);
}
}
-
-deleted_session:
- pthread_mutex_unlock(&ua_sess->lock);
-end:
- return;
}
static void ust_app_global_destroy(struct ltt_ust_session *usess, struct ust_app *app)
{
struct ust_app_session *ua_sess;
- ua_sess = lookup_session_by_app(usess, app);
+ ua_sess = ust_app_lookup_app_session(usess, app);
if (ua_sess == nullptr) {
return;
}
*/
void ust_app_global_update_all(struct ltt_ust_session *usess)
{
- struct lttng_ht_iter iter;
- struct ust_app *app;
-
- {
- lttng::urcu::read_lock_guard read_lock;
-
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- ust_app_global_update(usess, app);
- }
+ /* Iterate on all apps. */
+ for (auto *app :
+ lttng::urcu::lfht_iteration_adapter<ust_app, decltype(ust_app::pid_n), &ust_app::pid_n>(
+ *ust_app_ht->ht)) {
+ ust_app_global_update(usess, app);
}
}
void ust_app_global_update_all_event_notifier_rules()
{
- struct lttng_ht_iter iter;
- struct ust_app *app;
-
- lttng::urcu::read_lock_guard read_lock;
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ /* Iterate on all apps. */
+ for (auto *app :
+ lttng::urcu::lfht_iteration_adapter<ust_app, decltype(ust_app::pid_n), &ust_app::pid_n>(
+ *ust_app_ht->ht)) {
ust_app_global_update_event_notifier_rules(app);
}
}
{
int ret = 0;
struct lttng_ht_node_str *ua_chan_node;
- struct lttng_ht_iter iter, uiter;
+ struct lttng_ht_iter uiter;
struct ust_app_channel *ua_chan = nullptr;
struct ust_app_session *ua_sess;
- struct ust_app *app;
LTTNG_ASSERT(usess->active);
- {
- lttng::urcu::read_lock_guard read_lock;
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- if (!app->compatible) {
- /*
- * TODO: In time, we should notice the caller of this error by
- * telling him that this is a version error.
- */
- continue;
- }
- ua_sess = lookup_session_by_app(usess, app);
- if (ua_sess == nullptr) {
- continue;
- }
-
- pthread_mutex_lock(&ua_sess->lock);
+ /* Iterate on all apps. */
+ for (auto *app :
+ lttng::urcu::lfht_iteration_adapter<ust_app, decltype(ust_app::pid_n), &ust_app::pid_n>(
+ *ust_app_ht->ht)) {
+ if (!app->compatible) {
+ /*
+ * TODO: In time, we should notice the caller of this error by
+ * telling him that this is a version error.
+ */
+ continue;
+ }
+ ua_sess = ust_app_lookup_app_session(usess, app);
+ if (ua_sess == nullptr) {
+ continue;
+ }
- if (ua_sess->deleted) {
- pthread_mutex_unlock(&ua_sess->lock);
- continue;
- }
+ const auto locked_ua_sess = ua_sess->lock();
+ if (locked_ua_sess->deleted) {
+ continue;
+ }
- /* Lookup channel in the ust app session */
- lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
- ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
- if (ua_chan_node == nullptr) {
- goto next_app;
- }
- ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
- ret = create_ust_app_channel_context(ua_chan, &uctx->ctx, app);
- if (ret < 0) {
- goto next_app;
- }
- next_app:
- pthread_mutex_unlock(&ua_sess->lock);
+ /* Lookup channel in the ust app session */
+ lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
+ ua_chan_node = lttng_ht_iter_get_node<lttng_ht_node_str>(&uiter);
+ if (ua_chan_node == nullptr) {
+ continue;
+ }
+ ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
+ ret = create_ust_app_channel_context(ua_chan, &uctx->ctx, app);
+ if (ret < 0) {
+ continue;
}
}
ASSERT_RCU_READ_LOCKED();
lttng_ht_lookup(app->ust_sessions_objd, (void *) ((unsigned long) objd), &iter);
- node = lttng_ht_iter_get_node_ulong(&iter);
+ node = lttng_ht_iter_get_node<lttng_ht_node_ulong>(&iter);
if (node == nullptr) {
DBG2("UST app session find by objd %d not found", objd);
goto error;
ASSERT_RCU_READ_LOCKED();
lttng_ht_lookup(app->ust_objd, (void *) ((unsigned long) objd), &iter);
- node = lttng_ht_iter_get_node_ulong(&iter);
+ node = lttng_ht_iter_get_node<lttng_ht_node_ulong>(&iter);
if (node == nullptr) {
DBG2("UST app channel find by objd %d not found", objd);
goto error;
struct ust_app_channel *ua_chan;
struct ust_app_session *ua_sess;
auto ust_ctl_context_fields =
- lttng::make_unique_wrapper<lttng_ust_ctl_field, lttng::free>(raw_context_fields);
+ lttng::make_unique_wrapper<lttng_ust_ctl_field, lttng::memory::free>(
+ raw_context_fields);
- lttng::urcu::read_lock_guard read_lock_guard;
+ const lttng::urcu::read_lock_guard read_lock_guard;
/* Lookup application. If not found, there is a code flow error. */
app = find_app_by_notify_sock(sock);
ua_sess = ua_chan->session;
/* Get right session registry depending on the session buffer type. */
- auto locked_registry_session = get_locked_session_registry(ua_sess);
+
+ /*
+ * HACK: ua_sess is already locked by the client thread. This is called
+ * in the context of the handling of a notification from the application.
+ */
+ auto locked_ua_sess = ust_app_session::make_locked_weak_ref(*ua_sess);
+ auto locked_registry_session =
+ get_locked_session_registry(locked_ua_sess->get_identifier());
+ locked_ua_sess.release();
if (!locked_registry_session) {
DBG("Application session is being torn down. Abort event notify");
return 0;
char *raw_model_emf_uri)
{
int ret, ret_code;
- uint32_t event_id = 0;
+ lsu::event_id event_id = 0;
uint64_t chan_reg_key;
struct ust_app *app;
struct ust_app_channel *ua_chan;
struct ust_app_session *ua_sess;
- lttng::urcu::read_lock_guard rcu_lock;
- auto signature = lttng::make_unique_wrapper<char, lttng::free>(raw_signature);
- auto fields = lttng::make_unique_wrapper<lttng_ust_ctl_field, lttng::free>(raw_fields);
- auto model_emf_uri = lttng::make_unique_wrapper<char, lttng::free>(raw_model_emf_uri);
+ const lttng::urcu::read_lock_guard rcu_lock;
+ auto signature = lttng::make_unique_wrapper<char, lttng::memory::free>(raw_signature);
+ auto fields =
+ lttng::make_unique_wrapper<lttng_ust_ctl_field, lttng::memory::free>(raw_fields);
+ auto model_emf_uri =
+ lttng::make_unique_wrapper<char, lttng::memory::free>(raw_model_emf_uri);
/* Lookup application. If not found, there is a code flow error. */
app = find_app_by_notify_sock(sock);
}
{
- auto locked_registry = get_locked_session_registry(ua_sess);
+ auto locked_registry = get_locked_session_registry(ua_sess->get_identifier());
if (locked_registry) {
/*
* From this point on, this call acquires the ownership of the signature,
try {
auto& channel = locked_registry->channel(chan_reg_key);
- /* event_id is set on success. */
+ /* id is set on success. */
channel.add_event(
sobjd,
cobjd,
return ret;
}
- DBG3("UST registry event %s with id %" PRId32 " added successfully", name, event_id);
+ DBG_FMT("UST registry event successfully added: name={}, id={}", name, event_id);
return ret;
}
struct ust_app *app;
struct ust_app_session *ua_sess;
uint64_t enum_id = -1ULL;
- lttng::urcu::read_lock_guard read_lock_guard;
- auto entries = lttng::make_unique_wrapper<struct lttng_ust_ctl_enum_entry, lttng::free>(
- raw_entries);
+ const lttng::urcu::read_lock_guard read_lock_guard;
+ auto entries =
+ lttng::make_unique_wrapper<struct lttng_ust_ctl_enum_entry, lttng::memory::free>(
+ raw_entries);
/* Lookup application. If not found, there is a code flow error. */
app = find_app_by_notify_sock(sock);
return 0;
}
- auto locked_registry = get_locked_session_registry(ua_sess);
+ auto locked_registry = get_locked_session_registry(ua_sess->get_identifier());
if (!locked_registry) {
DBG("Application session is being torn down (registry not found). Aborting enum registration.");
return 0;
int sobjd, cobjd, loglevel_value;
char name[LTTNG_UST_ABI_SYM_NAME_LEN], *sig, *model_emf_uri;
size_t nr_fields;
+ uint64_t tracer_token = 0;
struct lttng_ust_ctl_field *fields;
DBG2("UST app ustctl register event received");
&sig,
&nr_fields,
&fields,
- &model_emf_uri);
+ &model_emf_uri,
+ &tracer_token);
if (ret < 0) {
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
DBG3("UST app recv event failed. Application died: sock = %d",
}
{
- lttng::urcu::read_lock_guard rcu_lock;
+ const lttng::urcu::read_lock_guard rcu_lock;
const struct ust_app *app = find_app_by_notify_sock(sock);
if (!app) {
DBG("Application socket %d is being torn down. Abort event notify",
break;
}
+ case LTTNG_UST_CTL_NOTIFY_CMD_KEY:
+ {
+ DBG2("UST app ustctl register key received");
+ ret = -LTTNG_UST_ERR_NOSYS;
+ // TODO
+ goto error;
+ }
default:
/* Should NEVER happen. */
abort();
LTTNG_ASSERT(sock >= 0);
- lttng::urcu::read_lock_guard read_lock;
+ const lttng::urcu::read_lock_guard read_lock;
obj = zmalloc<ust_app_notify_sock_obj>();
if (!obj) {
/*
* Destroy a ust app data structure and free its memory.
*/
-void ust_app_destroy(struct ust_app *app)
+static void ust_app_destroy(ust_app& app)
{
- if (!app) {
- return;
- }
-
- call_rcu(&app->pid_n.head, delete_ust_app_rcu);
+ call_rcu(&app.pid_n.head, delete_ust_app_rcu);
}
/*
{
int ret = 0;
enum lttng_error_code status = LTTNG_OK;
- struct lttng_ht_iter iter;
- struct ust_app *app;
char *trace_path = nullptr;
LTTNG_ASSERT(usess);
switch (usess->buffer_type) {
case LTTNG_BUFFER_PER_UID:
{
- struct buffer_reg_uid *reg;
-
- lttng::urcu::read_lock_guard read_lock;
+ const lttng::urcu::read_lock_guard read_lock;
- cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
- struct buffer_reg_channel *buf_reg_chan;
+ for (auto *reg :
+ lttng::urcu::list_iteration_adapter<buffer_reg_uid, &buffer_reg_uid::lnode>(
+ usess->buffer_reg_uid_list)) {
struct consumer_socket *socket;
char pathname[PATH_MAX];
size_t consumer_path_offset = 0;
goto error;
}
/* Add the UST default trace dir to path. */
- cds_lfht_for_each_entry (
- reg->registry->channels->ht, &iter.iter, buf_reg_chan, node.node) {
+ for (auto *buf_reg_chan :
+ lttng::urcu::lfht_iteration_adapter<buffer_reg_channel,
+ decltype(buffer_reg_channel::node),
+ &buffer_reg_channel::node>(
+ *reg->registry->channels->ht)) {
status =
consumer_snapshot_channel(socket,
buf_reg_chan->consumer_key,
}
case LTTNG_BUFFER_PER_PID:
{
- lttng::urcu::read_lock_guard read_lock;
-
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ /* Iterate on all apps. */
+ for (auto *app :
+ lttng::urcu::lfht_iteration_adapter<ust_app,
+ decltype(ust_app::pid_n),
+ &ust_app::pid_n>(*ust_app_ht->ht)) {
struct consumer_socket *socket;
- struct lttng_ht_iter chan_iter;
- struct ust_app_channel *ua_chan;
struct ust_app_session *ua_sess;
lsu::registry_session *registry;
char pathname[PATH_MAX];
size_t consumer_path_offset = 0;
- ua_sess = lookup_session_by_app(usess, app);
+ ua_sess = ust_app_lookup_app_session(usess, app);
if (!ua_sess) {
/* Session not associated with this app. */
continue;
status = LTTNG_ERR_INVALID;
goto error;
}
- cds_lfht_for_each_entry (
- ua_sess->channels->ht, &chan_iter.iter, ua_chan, node.node) {
+
+ for (auto *ua_chan :
+ lttng::urcu::lfht_iteration_adapter<ust_app_channel,
+ decltype(ust_app_channel::node),
+ &ust_app_channel::node>(
+ *ua_sess->channels->ht)) {
status =
consumer_snapshot_channel(socket,
ua_chan->key,
}
}
- registry = get_session_registry(ua_sess);
+ registry = ust_app_get_session_registry(ua_sess->get_identifier());
if (!registry) {
DBG("Application session is being torn down. Skip application.");
continue;
uint64_t cur_nr_packets)
{
uint64_t tot_size = 0;
- struct ust_app *app;
- struct lttng_ht_iter iter;
LTTNG_ASSERT(usess);
switch (usess->buffer_type) {
case LTTNG_BUFFER_PER_UID:
{
- struct buffer_reg_uid *reg;
-
- cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
- struct buffer_reg_channel *buf_reg_chan;
-
- lttng::urcu::read_lock_guard read_lock;
-
- cds_lfht_for_each_entry (
- reg->registry->channels->ht, &iter.iter, buf_reg_chan, node.node) {
+ for (auto *reg :
+ lttng::urcu::list_iteration_adapter<buffer_reg_uid, &buffer_reg_uid::lnode>(
+ usess->buffer_reg_uid_list)) {
+ for (auto *buf_reg_chan :
+ lttng::urcu::lfht_iteration_adapter<buffer_reg_channel,
+ decltype(buffer_reg_channel::node),
+ &buffer_reg_channel::node>(
+ *reg->registry->channels->ht)) {
if (cur_nr_packets >= buf_reg_chan->num_subbuf) {
/*
* Don't take channel into account if we
}
case LTTNG_BUFFER_PER_PID:
{
- lttng::urcu::read_lock_guard read_lock;
-
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- struct ust_app_channel *ua_chan;
- struct ust_app_session *ua_sess;
- struct lttng_ht_iter chan_iter;
-
- ua_sess = lookup_session_by_app(usess, app);
+ /* Iterate on all apps. */
+ for (auto *app :
+ lttng::urcu::lfht_iteration_adapter<ust_app,
+ decltype(ust_app::pid_n),
+ &ust_app::pid_n>(*ust_app_ht->ht)) {
+ const auto *ua_sess = ust_app_lookup_app_session(usess, app);
if (!ua_sess) {
/* Session not associated with this app. */
continue;
}
- cds_lfht_for_each_entry (
- ua_sess->channels->ht, &chan_iter.iter, ua_chan, node.node) {
+ for (auto *ua_chan :
+ lttng::urcu::lfht_iteration_adapter<ust_app_channel,
+ decltype(ust_app_channel::node),
+ &ust_app_channel::node>(
+ *ua_sess->channels->ht)) {
if (cur_nr_packets >= ua_chan->attr.num_subbuf) {
/*
* Don't take channel into account if we
uint64_t *lost)
{
int ret = 0;
- struct lttng_ht_iter iter;
struct lttng_ht_node_str *ua_chan_node;
- struct ust_app *app;
struct ust_app_session *ua_sess;
struct ust_app_channel *ua_chan;
* Iterate over every registered applications. Sum counters for
* all applications containing requested session and channel.
*/
- lttng::urcu::read_lock_guard read_lock;
-
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ for (auto *app :
+ lttng::urcu::lfht_iteration_adapter<ust_app, decltype(ust_app::pid_n), &ust_app::pid_n>(
+ *ust_app_ht->ht)) {
struct lttng_ht_iter uiter;
- ua_sess = lookup_session_by_app(usess, app);
+ ua_sess = ust_app_lookup_app_session(usess, app);
if (ua_sess == nullptr) {
continue;
}
/* Get channel */
lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
- ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
+ ua_chan_node = lttng_ht_iter_get_node<lttng_ht_node_str>(&uiter);
/* If the session is found for the app, the channel must be there */
LTTNG_ASSERT(ua_chan_node);
DBG("Regenerating the metadata for ust app pid %d", app->pid);
- lttng::urcu::read_lock_guard read_lock;
+ const lttng::urcu::read_lock_guard read_lock;
+ const auto update_health_code_on_exit =
+ lttng::make_scope_exit([]() noexcept { health_code_update(); });
- ua_sess = lookup_session_by_app(usess, app);
+ ua_sess = ust_app_lookup_app_session(usess, app);
if (ua_sess == nullptr) {
/* The session is in teardown process. Ignore and continue. */
- goto end;
+ return 0;
}
- pthread_mutex_lock(&ua_sess->lock);
-
- if (ua_sess->deleted) {
- goto end_unlock;
+ const auto locked_ua_sess = ua_sess->lock();
+ if (locked_ua_sess->deleted) {
+ return 0;
}
pthread_mutex_lock(&app->sock_lock);
ret = lttng_ust_ctl_regenerate_statedump(app->sock, ua_sess->handle);
pthread_mutex_unlock(&app->sock_lock);
-
-end_unlock:
- pthread_mutex_unlock(&ua_sess->lock);
-
-end:
- health_code_update();
return ret;
}
*/
int ust_app_regenerate_statedump_all(struct ltt_ust_session *usess)
{
- int ret = 0;
- struct lttng_ht_iter iter;
- struct ust_app *app;
-
DBG("Regenerating the metadata for all UST apps");
- lttng::urcu::read_lock_guard read_lock;
-
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ /* Iterate on all apps. */
+ for (auto *app :
+ lttng::urcu::lfht_iteration_adapter<ust_app, decltype(ust_app::pid_n), &ust_app::pid_n>(
+ *ust_app_ht->ht)) {
if (!app->compatible) {
continue;
}
- ret = ust_app_regenerate_statedump(usess, app);
- if (ret < 0) {
- /* Continue to the next app even on error */
- continue;
- }
+ (void) ust_app_regenerate_statedump(usess, app);
}
return 0;
*
* Return LTTNG_OK on success or else an LTTng error code.
*/
-enum lttng_error_code ust_app_rotate_session(struct ltt_session *session)
+enum lttng_error_code ust_app_rotate_session(const ltt_session::locked_ref& session)
{
int ret;
enum lttng_error_code cmd_ret = LTTNG_OK;
- struct lttng_ht_iter iter;
- struct ust_app *app;
struct ltt_ust_session *usess = session->ust_session;
LTTNG_ASSERT(usess);
switch (usess->buffer_type) {
case LTTNG_BUFFER_PER_UID:
{
- struct buffer_reg_uid *reg;
-
- cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
- struct buffer_reg_channel *buf_reg_chan;
+ for (auto *reg :
+ lttng::urcu::list_iteration_adapter<buffer_reg_uid, &buffer_reg_uid::lnode>(
+ usess->buffer_reg_uid_list)) {
struct consumer_socket *socket;
- lttng::urcu::read_lock_guard read_lock;
+ const lttng::urcu::read_lock_guard read_lock;
/* Get consumer socket to use to push the metadata.*/
socket = consumer_find_socket_by_bitness(reg->bits_per_long,
}
/* Rotate the data channels. */
- cds_lfht_for_each_entry (
- reg->registry->channels->ht, &iter.iter, buf_reg_chan, node.node) {
+ for (auto *buf_reg_chan :
+ lttng::urcu::lfht_iteration_adapter<buffer_reg_channel,
+ decltype(buffer_reg_channel::node),
+ &buffer_reg_channel::node>(
+ *reg->registry->channels->ht)) {
ret = consumer_rotate_channel(socket,
buf_reg_chan->consumer_key,
usess->consumer,
}
case LTTNG_BUFFER_PER_PID:
{
- lttng::urcu::read_lock_guard read_lock;
-
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ /* Iterate on all apps. */
+ for (auto raw_app :
+ lttng::urcu::lfht_iteration_adapter<ust_app,
+ decltype(ust_app::pid_n),
+ &ust_app::pid_n>(*ust_app_ht->ht)) {
struct consumer_socket *socket;
- struct lttng_ht_iter chan_iter;
- struct ust_app_channel *ua_chan;
struct ust_app_session *ua_sess;
lsu::registry_session *registry;
+ bool app_reference_taken;
+
+ app_reference_taken = ust_app_get(*raw_app);
+ if (!app_reference_taken) {
+ /* Application unregistered concurrently, skip it. */
+ DBG("Could not get application reference as it is being torn down; skipping application");
+ continue;
+ }
+
+ ust_app_reference app(raw_app);
+ raw_app = nullptr;
- ua_sess = lookup_session_by_app(usess, app);
+ ua_sess = ust_app_lookup_app_session(usess, app.get());
if (!ua_sess) {
/* Session not associated with this app. */
continue;
goto error;
}
- registry = get_session_registry(ua_sess);
- if (!registry) {
- DBG("Application session is being torn down. Skip application.");
- continue;
- }
+ registry = ust_app_get_session_registry(ua_sess->get_identifier());
+ LTTNG_ASSERT(registry);
/* Rotate the data channels. */
- cds_lfht_for_each_entry (
- ua_sess->channels->ht, &chan_iter.iter, ua_chan, node.node) {
+ for (auto *ua_chan :
+ lttng::urcu::lfht_iteration_adapter<ust_app_channel,
+ decltype(ust_app_channel::node),
+ &ust_app_channel::node>(
+ *ua_sess->channels->ht)) {
ret = consumer_rotate_channel(socket,
ua_chan->key,
ua_sess->consumer,
/* is_metadata_channel */ false);
if (ret < 0) {
- /* Per-PID buffer and application going away. */
- if (ret == -LTTNG_ERR_CHAN_NOT_FOUND)
- continue;
cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
goto error;
}
(void) push_metadata(locked_registry, usess->consumer);
}
+
ret = consumer_rotate_channel(socket,
registry->_metadata_key,
ua_sess->consumer,
/* is_metadata_channel */ true);
if (ret < 0) {
- /* Per-PID buffer and application going away. */
- if (ret == -LTTNG_ERR_CHAN_NOT_FOUND)
- continue;
cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
goto error;
}
}
+
break;
}
default:
enum lttng_error_code ust_app_create_channel_subdirectories(const struct ltt_ust_session *usess)
{
enum lttng_error_code ret = LTTNG_OK;
- struct lttng_ht_iter iter;
enum lttng_trace_chunk_status chunk_status;
char *pathname_index;
int fmt_ret;
switch (usess->buffer_type) {
case LTTNG_BUFFER_PER_UID:
{
- struct buffer_reg_uid *reg;
- lttng::urcu::read_lock_guard read_lock;
+ const lttng::urcu::read_lock_guard read_lock;
- cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
+ for (auto *reg :
+ lttng::urcu::list_iteration_adapter<buffer_reg_uid, &buffer_reg_uid::lnode>(
+ usess->buffer_reg_uid_list)) {
fmt_ret = asprintf(&pathname_index,
DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH
"/" DEFAULT_INDEX_DIR,
}
case LTTNG_BUFFER_PER_PID:
{
- struct ust_app *app;
- lttng::urcu::read_lock_guard read_lock;
-
/*
* Create the toplevel ust/ directory in case no apps are running.
*/
goto error;
}
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- struct ust_app_session *ua_sess;
- lsu::registry_session *registry;
-
- ua_sess = lookup_session_by_app(usess, app);
+ /* Iterate on all apps. */
+ for (auto *app :
+ lttng::urcu::lfht_iteration_adapter<ust_app,
+ decltype(ust_app::pid_n),
+ &ust_app::pid_n>(*ust_app_ht->ht)) {
+ const auto ua_sess = ust_app_lookup_app_session(usess, app);
if (!ua_sess) {
/* Session not associated with this app. */
continue;
}
- registry = get_session_registry(ua_sess);
+ const auto registry =
+ ust_app_get_session_registry(ua_sess->get_identifier());
if (!registry) {
DBG("Application session is being torn down. Skip application.");
continue;
*
* Return LTTNG_OK on success or else an LTTng error code.
*/
-enum lttng_error_code ust_app_clear_session(struct ltt_session *session)
+enum lttng_error_code ust_app_clear_session(const ltt_session::locked_ref& session)
{
- int ret;
- enum lttng_error_code cmd_ret = LTTNG_OK;
- struct lttng_ht_iter iter;
- struct ust_app *app;
- struct ltt_ust_session *usess = session->ust_session;
-
- LTTNG_ASSERT(usess);
+ const ltt_ust_session& usess = *session->ust_session;
- if (usess->active) {
+ if (usess.active) {
ERR("Expecting inactive session %s (%" PRIu64 ")", session->name, session->id);
- cmd_ret = LTTNG_ERR_FATAL;
- goto end;
+ return LTTNG_ERR_FATAL;
}
- switch (usess->buffer_type) {
- case LTTNG_BUFFER_PER_UID:
- {
- struct buffer_reg_uid *reg;
- lttng::urcu::read_lock_guard read_lock;
-
- cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
- struct buffer_reg_channel *buf_reg_chan;
- struct consumer_socket *socket;
-
- /* Get consumer socket to use to push the metadata.*/
- socket = consumer_find_socket_by_bitness(reg->bits_per_long,
- usess->consumer);
- if (!socket) {
- cmd_ret = LTTNG_ERR_INVALID;
- goto error_socket;
- }
-
- /* Clear the data channels. */
- cds_lfht_for_each_entry (
- reg->registry->channels->ht, &iter.iter, buf_reg_chan, node.node) {
- ret = consumer_clear_channel(socket, buf_reg_chan->consumer_key);
- if (ret < 0) {
- goto error;
- }
- }
+ const auto channel_keys = session->user_space_consumer_channel_keys();
+ for (auto it = channel_keys.begin(); it != channel_keys.end(); ++it) {
+ const auto key = *it;
- {
- auto locked_registry = reg->registry->reg.ust->lock();
- (void) push_metadata(locked_registry, usess->consumer);
- }
+ const auto consumer_socket = consumer_find_socket_by_bitness(
+ key.bitness ==
+ lttng::sessiond::user_space_consumer_channel_keys::
+ consumer_bitness::ABI_32 ?
+ 32 :
+ 64,
+ usess.consumer);
- /*
- * Clear the metadata channel.
- * Metadata channel is not cleared per se but we still need to
- * perform a rotation operation on it behind the scene.
- */
- ret = consumer_clear_channel(socket, reg->registry->reg.ust->_metadata_key);
- if (ret < 0) {
- goto error;
- }
+ if (key.type ==
+ lttng::sessiond::user_space_consumer_channel_keys::channel_type::METADATA) {
+ (void) push_metadata(it.get_registry_session()->lock(), usess.consumer);
}
- break;
- }
- case LTTNG_BUFFER_PER_PID:
- {
- lttng::urcu::read_lock_guard read_lock;
-
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- struct consumer_socket *socket;
- struct lttng_ht_iter chan_iter;
- struct ust_app_channel *ua_chan;
- struct ust_app_session *ua_sess;
- lsu::registry_session *registry;
-
- ua_sess = lookup_session_by_app(usess, app);
- if (!ua_sess) {
- /* Session not associated with this app. */
- continue;
- }
-
- /* Get the right consumer socket for the application. */
- socket = consumer_find_socket_by_bitness(app->abi.bits_per_long,
- usess->consumer);
- if (!socket) {
- cmd_ret = LTTNG_ERR_INVALID;
- goto error_socket;
- }
- registry = get_session_registry(ua_sess);
- if (!registry) {
- DBG("Application session is being torn down. Skip application.");
+ const auto clean_ret = consumer_clear_channel(consumer_socket, key.key_value);
+ if (clean_ret < 0) {
+ if (clean_ret == -LTTCOMM_CONSUMERD_CHAN_NOT_FOUND &&
+ usess.buffer_type == LTTNG_BUFFER_PER_PID) {
continue;
}
- /* Clear the data channels. */
- cds_lfht_for_each_entry (
- ua_sess->channels->ht, &chan_iter.iter, ua_chan, node.node) {
- ret = consumer_clear_channel(socket, ua_chan->key);
- if (ret < 0) {
- /* Per-PID buffer and application going away. */
- if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
- continue;
- }
- goto error;
- }
- }
-
- {
- auto locked_registry = registry->lock();
- (void) push_metadata(locked_registry, usess->consumer);
+ if (clean_ret == -LTTCOMM_CONSUMERD_RELAYD_CLEAR_DISALLOWED) {
+ return LTTNG_ERR_CLEAR_RELAY_DISALLOWED;
}
- /*
- * Clear the metadata channel.
- * Metadata channel is not cleared per se but we still need to
- * perform rotation operation on it behind the scene.
- */
- ret = consumer_clear_channel(socket, registry->_metadata_key);
- if (ret < 0) {
- /* Per-PID buffer and application going away. */
- if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
- continue;
- }
- goto error;
- }
+ return LTTNG_ERR_CLEAR_FAIL_CONSUMER;
}
- break;
- }
- default:
- abort();
- break;
- }
-
- cmd_ret = LTTNG_OK;
- goto end;
-
-error:
- switch (-ret) {
- case LTTCOMM_CONSUMERD_RELAYD_CLEAR_DISALLOWED:
- cmd_ret = LTTNG_ERR_CLEAR_RELAY_DISALLOWED;
- break;
- default:
- cmd_ret = LTTNG_ERR_CLEAR_FAIL_CONSUMER;
}
-error_socket:
-end:
- return cmd_ret;
+ return LTTNG_OK;
}
/*
* daemon as the same "offset" in a metadata stream will no longer point
* to the same content.
*/
-enum lttng_error_code ust_app_open_packets(struct ltt_session *session)
+enum lttng_error_code ust_app_open_packets(const ltt_session::locked_ref& session)
{
- enum lttng_error_code ret = LTTNG_OK;
- struct lttng_ht_iter iter;
- struct ltt_ust_session *usess = session->ust_session;
-
- LTTNG_ASSERT(usess);
-
- switch (usess->buffer_type) {
- case LTTNG_BUFFER_PER_UID:
- {
- struct buffer_reg_uid *reg;
-
- cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
- struct buffer_reg_channel *buf_reg_chan;
- struct consumer_socket *socket;
- lttng::urcu::read_lock_guard read_lock;
+ const ltt_ust_session& usess = *session->ust_session;
- socket = consumer_find_socket_by_bitness(reg->bits_per_long,
- usess->consumer);
- if (!socket) {
- ret = LTTNG_ERR_FATAL;
- goto error;
- }
-
- cds_lfht_for_each_entry (
- reg->registry->channels->ht, &iter.iter, buf_reg_chan, node.node) {
- const int open_ret = consumer_open_channel_packets(
- socket, buf_reg_chan->consumer_key);
-
- if (open_ret < 0) {
- ret = LTTNG_ERR_UNK;
- goto error;
- }
- }
+ for (const auto key : session->user_space_consumer_channel_keys()) {
+ if (key.type !=
+ lttng::sessiond::user_space_consumer_channel_keys::channel_type::DATA) {
+ continue;
}
- break;
- }
- case LTTNG_BUFFER_PER_PID:
- {
- struct ust_app *app;
- lttng::urcu::read_lock_guard read_lock;
-
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- struct consumer_socket *socket;
- struct lttng_ht_iter chan_iter;
- struct ust_app_channel *ua_chan;
- struct ust_app_session *ua_sess;
- lsu::registry_session *registry;
-
- ua_sess = lookup_session_by_app(usess, app);
- if (!ua_sess) {
- /* Session not associated with this app. */
- continue;
- }
- /* Get the right consumer socket for the application. */
- socket = consumer_find_socket_by_bitness(app->abi.bits_per_long,
- usess->consumer);
- if (!socket) {
- ret = LTTNG_ERR_FATAL;
- goto error;
- }
+ const auto socket = consumer_find_socket_by_bitness(
+ key.bitness ==
+ lttng::sessiond::user_space_consumer_channel_keys::
+ consumer_bitness::ABI_32 ?
+ 32 :
+ 64,
+ usess.consumer);
- registry = get_session_registry(ua_sess);
- if (!registry) {
- DBG("Application session is being torn down. Skip application.");
+ const auto open_ret = consumer_open_channel_packets(socket, key.key_value);
+ if (open_ret < 0) {
+ /* Per-PID buffer and application going away. */
+ if (open_ret == -LTTCOMM_CONSUMERD_CHAN_NOT_FOUND &&
+ usess.buffer_type == LTTNG_BUFFER_PER_PID) {
continue;
}
- cds_lfht_for_each_entry (
- ua_sess->channels->ht, &chan_iter.iter, ua_chan, node.node) {
- const int open_ret =
- consumer_open_channel_packets(socket, ua_chan->key);
-
- if (open_ret < 0) {
- /*
- * Per-PID buffer and application going
- * away.
- */
- if (open_ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
- continue;
- }
-
- ret = LTTNG_ERR_UNK;
- goto error;
- }
- }
+ return LTTNG_ERR_UNK;
}
- break;
- }
- default:
- abort();
- break;
}
-error:
- return ret;
+ return LTTNG_OK;
}
lsu::ctl_field_quirks ust_app::ctl_field_quirks() const
return v_major <= 9 ? lsu::ctl_field_quirks::UNDERSCORE_PREFIXED_VARIANT_TAG_MAPPINGS :
lsu::ctl_field_quirks::NONE;
}
+
+static void ust_app_release(urcu_ref *ref)
+{
+ auto& app = *lttng::utils::container_of(ref, &ust_app::ref);
+
+ ust_app_unregister(app);
+ ust_app_destroy(app);
+}
+
+bool ust_app_get(ust_app& app)
+{
+ return urcu_ref_get_unless_zero(&app.ref);
+}
+
+void ust_app_put(struct ust_app *app)
+{
+ if (!app) {
+ return;
+ }
+
+ urcu_ref_put(&app->ref, ust_app_release);
+}
+
+lttng_ht *ust_app_get_all()
+{
+ return ust_app_ht;
+}