X-Git-Url: https://git.lttng.org/?a=blobdiff_plain;f=src%2Fbin%2Flttng-sessiond%2Fust-app.cpp;h=12250edfdaefb1341736ba5b78c62a53ceba1c8e;hb=HEAD;hp=66d363c4b064ea003e8577a5e504aaf5b126d6b8;hpb=aeeb48c6a7dd4bcc092b3105439489fc393f6425;p=lttng-tools.git diff --git a/src/bin/lttng-sessiond/ust-app.cpp b/src/bin/lttng-sessiond/ust-app.cpp index 66d363c4b..802b593dc 100644 --- a/src/bin/lttng-sessiond/ust-app.cpp +++ b/src/bin/lttng-sessiond/ust-app.cpp @@ -7,10 +7,48 @@ */ #define _LGPL_SOURCE + +#include "buffer-registry.hpp" +#include "condition-internal.hpp" +#include "event-notifier-error-accounting.hpp" +#include "event.hpp" +#include "fd-limit.hpp" +#include "field.hpp" +#include "health-sessiond.hpp" +#include "lttng-sessiond.hpp" +#include "lttng-ust-ctl.hpp" +#include "lttng-ust-error.hpp" +#include "notification-thread-commands.hpp" +#include "session.hpp" +#include "ust-app.hpp" +#include "ust-consumer.hpp" +#include "ust-field-quirks.hpp" +#include "utils.hpp" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + #include #include #include #include +#include #include #include #include @@ -19,44 +57,16 @@ #include #include #include -#include +#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "buffer-registry.hpp" -#include "condition-internal.hpp" -#include "fd-limit.hpp" -#include "health-sessiond.hpp" -#include "ust-app.hpp" -#include "ust-consumer.hpp" -#include "lttng-ust-ctl.hpp" -#include "lttng-ust-error.hpp" -#include "utils.hpp" -#include "session.hpp" -#include "lttng-sessiond.hpp" -#include "notification-thread-commands.hpp" -#include "rotate.hpp" -#include "event.hpp" -#include "event-notifier-error-accounting.hpp" -#include "ust-field-utils.hpp" +namespace lsu = lttng::sessiond::ust; +namespace lst = lttng::sessiond::trace; struct lttng_ht *ust_app_ht; struct lttng_ht *ust_app_ht_by_sock; struct lttng_ht *ust_app_ht_by_notify_sock; -static -int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess); +static int ust_app_flush_app_session(ust_app& app, ust_app_session& ua_sess); /* Next available channel key. Access under next_channel_key_lock. */ static uint64_t _next_channel_key; @@ -66,10 +76,66 @@ static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER; static uint64_t _next_session_id; static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER; +/* + * Return the session registry according to the buffer type of the given + * session. + * + * A registry per UID object MUST exists before calling this function or else + * it LTTNG_ASSERT() if not found. RCU read side lock must be acquired. + */ +lsu::registry_session *ust_app_get_session_registry(const ust_app_session::identifier& ua_sess_id) +{ + lsu::registry_session *registry = nullptr; + + switch (ua_sess_id.allocation_policy) { + case ust_app_session::identifier::buffer_allocation_policy::PER_PID: + { + struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess_id.id); + if (!reg_pid) { + goto error; + } + registry = reg_pid->registry->reg.ust; + break; + } + case ust_app_session::identifier::buffer_allocation_policy::PER_UID: + { + struct buffer_reg_uid *reg_uid = buffer_reg_uid_find( + ua_sess_id.session_id, + ua_sess_id.abi == ust_app_session::identifier::application_abi::ABI_32 ? + 32 : + 64, + lttng_credentials_get_uid(&ua_sess_id.app_credentials)); + if (!reg_uid) { + goto error; + } + registry = reg_uid->registry->reg.ust; + break; + } + default: + abort(); + }; + +error: + return registry; +} + +namespace { +lsu::registry_session::locked_ref +get_locked_session_registry(const ust_app_session::identifier& identifier) +{ + auto session = ust_app_get_session_registry(identifier); + if (session) { + pthread_mutex_lock(&session->_lock); + } + + return lsu::registry_session::locked_ref{ session }; +} +} /* namespace */ + /* * Return the incremented value of next_channel_key. */ -static uint64_t get_next_channel_key(void) +static uint64_t get_next_channel_key() { uint64_t ret; @@ -82,7 +148,7 @@ static uint64_t get_next_channel_key(void) /* * Return the atomically incremented value of next_session_id. */ -static uint64_t get_next_session_id(void) +static uint64_t get_next_session_id() { uint64_t ret; @@ -92,9 +158,8 @@ static uint64_t get_next_session_id(void) return ret; } -static void copy_channel_attr_to_ustctl( - struct lttng_ust_ctl_consumer_channel_attr *attr, - struct lttng_ust_abi_channel_attr *uattr) +static void copy_channel_attr_to_ustctl(struct lttng_ust_ctl_consumer_channel_attr *attr, + struct lttng_ust_abi_channel_attr *uattr) { /* Copy event attributes since the layout is different. */ attr->subbuf_size = uattr->subbuf_size; @@ -114,16 +179,11 @@ static void copy_channel_attr_to_ustctl( */ static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key) { - struct ust_app_event *event; - const struct ust_app_ht_key *key; - int ev_loglevel_value; - LTTNG_ASSERT(node); LTTNG_ASSERT(_key); - event = caa_container_of(node, struct ust_app_event, node.node); - key = (ust_app_ht_key *) _key; - ev_loglevel_value = event->attr.loglevel; + auto *event = lttng_ht_node_container_of(node, &ust_app_event::node); + const auto *key = (ust_app_ht_key *) _key; /* Match the 4 elements of the key: name, filter, loglevel, exclusions */ @@ -133,19 +193,12 @@ static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key) } /* Event loglevel. */ - if (ev_loglevel_value != key->loglevel_type) { - if (event->attr.loglevel_type == LTTNG_UST_ABI_LOGLEVEL_ALL - && key->loglevel_type == 0 && - ev_loglevel_value == -1) { - /* - * Match is accepted. This is because on event creation, the - * loglevel is set to -1 if the event loglevel type is ALL so 0 and - * -1 are accepted for this loglevel type since 0 is the one set by - * the API when receiving an enable event. - */ - } else { - goto no_match; - } + if (!loglevels_match(event->attr.loglevel_type, + event->attr.loglevel, + key->loglevel_type, + key->loglevel_value, + LTTNG_UST_ABI_LOGLEVEL_ALL)) { + goto no_match; } /* One of the filters is NULL, fail. */ @@ -156,8 +209,7 @@ static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key) if (key->filter && event->filter) { /* Both filters exists, check length followed by the bytecode. */ if (event->filter->len != key->filter->len || - memcmp(event->filter->data, key->filter->data, - event->filter->len) != 0) { + memcmp(event->filter->data, key->filter->data, event->filter->len) != 0) { goto no_match; } } @@ -170,13 +222,13 @@ static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key) if (key->exclusion && event->exclusion) { /* Both exclusions exists, check count followed by the names. */ if (event->exclusion->count != key->exclusion->count || - memcmp(event->exclusion->names, key->exclusion->names, - event->exclusion->count * LTTNG_UST_ABI_SYM_NAME_LEN) != 0) { + memcmp(event->exclusion->names, + key->exclusion->names, + event->exclusion->count * LTTNG_UST_ABI_SYM_NAME_LEN) != 0) { goto no_match; } } - /* Match. */ return 1; @@ -188,8 +240,7 @@ no_match: * Unique add of an ust app event in the given ht. This uses the custom * ht_match_ust_app_event match function and the event name as hash. */ -static void add_unique_ust_app_event(struct ust_app_channel *ua_chan, - struct ust_app_event *event) +static void add_unique_ust_app_event(struct ust_app_channel *ua_chan, struct ust_app_event *event) { struct cds_lfht_node *node_ptr; struct ust_app_ht_key key; @@ -202,12 +253,15 @@ static void add_unique_ust_app_event(struct ust_app_channel *ua_chan, ht = ua_chan->events; key.name = event->attr.name; key.filter = event->filter; - key.loglevel_type = (lttng_ust_abi_loglevel_type) event->attr.loglevel; + key.loglevel_type = (lttng_ust_abi_loglevel_type) event->attr.loglevel_type; + key.loglevel_value = event->attr.loglevel; key.exclusion = event->exclusion; node_ptr = cds_lfht_add_unique(ht->ht, - ht->hash_fct(event->node.key, lttng_ht_seed), - ht_match_ust_app_event, &key, &event->node.node); + ht->hash_fct(event->node.key, lttng_ht_seed), + ht_match_ust_app_event, + &key, + &event->node.node); LTTNG_ASSERT(node_ptr == &event->node.node); } @@ -219,7 +273,7 @@ static void close_notify_sock_rcu(struct rcu_head *head) { int ret; struct ust_app_notify_sock_obj *obj = - caa_container_of(head, struct ust_app_notify_sock_obj, head); + lttng::utils::container_of(head, &ust_app_notify_sock_obj::head); /* Must have a valid fd here. */ LTTNG_ASSERT(obj->fd >= 0); @@ -233,56 +287,11 @@ static void close_notify_sock_rcu(struct rcu_head *head) free(obj); } -/* - * Return the session registry according to the buffer type of the given - * session. - * - * A registry per UID object MUST exists before calling this function or else - * it LTTNG_ASSERT() if not found. RCU read side lock must be acquired. - */ -static ust_registry_session *get_session_registry( - struct ust_app_session *ua_sess) -{ - ust_registry_session *registry = NULL; - - LTTNG_ASSERT(ua_sess); - - switch (ua_sess->buffer_type) { - case LTTNG_BUFFER_PER_PID: - { - struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id); - if (!reg_pid) { - goto error; - } - registry = reg_pid->registry->reg.ust; - break; - } - case LTTNG_BUFFER_PER_UID: - { - struct buffer_reg_uid *reg_uid = buffer_reg_uid_find( - ua_sess->tracing_id, ua_sess->bits_per_long, - lttng_credentials_get_uid(&ua_sess->real_credentials)); - if (!reg_uid) { - goto error; - } - registry = reg_uid->registry->reg.ust; - break; - } - default: - abort(); - }; - -error: - return registry; -} - /* * Delete ust context safely. RCU read lock must be held before calling * this function. */ -static -void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx, - struct ust_app *app) +static void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx, struct ust_app *app) { int ret; @@ -296,18 +305,28 @@ void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx, if (ret < 0) { if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) { DBG3("UST app release ctx failed. Application is dead: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else if (ret == -EAGAIN) { WARN("UST app release ctx failed. Communication time out: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else { ERR("UST app release ctx obj handle %d failed with ret %d: pid = %d, sock = %d", - ua_ctx->obj->handle, ret, - app->pid, app->sock); + ua_ctx->obj->handle, + ret, + app->pid, + app->sock); } } free(ua_ctx->obj); } + + if (ua_ctx->ctx.ctx == LTTNG_UST_ABI_CONTEXT_APP_CONTEXT) { + free(ua_ctx->ctx.u.app_ctx.provider_name); + free(ua_ctx->ctx.u.app_ctx.ctx_name); + } + free(ua_ctx); } @@ -315,9 +334,7 @@ void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx, * Delete ust app event safely. RCU read lock must be held before calling * this function. */ -static -void delete_ust_app_event(int sock, struct ust_app_event *ua_event, - struct ust_app *app) +static void delete_ust_app_event(int sock, struct ust_app_event *ua_event, struct ust_app *app) { int ret; @@ -325,22 +342,26 @@ void delete_ust_app_event(int sock, struct ust_app_event *ua_event, ASSERT_RCU_READ_LOCKED(); free(ua_event->filter); - if (ua_event->exclusion != NULL) + if (ua_event->exclusion != nullptr) free(ua_event->exclusion); - if (ua_event->obj != NULL) { + if (ua_event->obj != nullptr) { pthread_mutex_lock(&app->sock_lock); ret = lttng_ust_ctl_release_object(sock, ua_event->obj); pthread_mutex_unlock(&app->sock_lock); if (ret < 0) { if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) { DBG3("UST app release event failed. Application is dead: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else if (ret == -EAGAIN) { WARN("UST app release event failed. Communication time out: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else { ERR("UST app release event obj failed with ret %d: pid = %d, sock = %d", - ret, app->pid, app->sock); + ret, + app->pid, + app->sock); } } free(ua_event->obj); @@ -352,11 +373,10 @@ void delete_ust_app_event(int sock, struct ust_app_event *ua_event, * Delayed reclaim of a ust_app_event_notifier_rule object. This MUST be called * through a call_rcu(). */ -static -void free_ust_app_event_notifier_rule_rcu(struct rcu_head *head) +static void free_ust_app_event_notifier_rule_rcu(struct rcu_head *head) { - struct ust_app_event_notifier_rule *obj = caa_container_of( - head, struct ust_app_event_notifier_rule, rcu_head); + struct ust_app_event_notifier_rule *obj = + lttng::utils::container_of(head, &ust_app_event_notifier_rule::rcu_head); free(obj); } @@ -364,32 +384,35 @@ void free_ust_app_event_notifier_rule_rcu(struct rcu_head *head) /* * Delete ust app event notifier rule safely. */ -static void delete_ust_app_event_notifier_rule(int sock, - struct ust_app_event_notifier_rule *ua_event_notifier_rule, - struct ust_app *app) +static void delete_ust_app_event_notifier_rule( + int sock, struct ust_app_event_notifier_rule *ua_event_notifier_rule, struct ust_app *app) { int ret; LTTNG_ASSERT(ua_event_notifier_rule); - if (ua_event_notifier_rule->exclusion != NULL) { + if (ua_event_notifier_rule->exclusion != nullptr) { free(ua_event_notifier_rule->exclusion); } - if (ua_event_notifier_rule->obj != NULL) { + if (ua_event_notifier_rule->obj != nullptr) { pthread_mutex_lock(&app->sock_lock); ret = lttng_ust_ctl_release_object(sock, ua_event_notifier_rule->obj); pthread_mutex_unlock(&app->sock_lock); if (ret < 0) { if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) { DBG3("UST app release event notifier failed. Application is dead: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else if (ret == -EAGAIN) { WARN("UST app release event notifier failed. Communication time out: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else { ERR("UST app release event notifier failed with ret %d: pid = %d, sock = %d", - ret, app->pid, app->sock); + ret, + app->pid, + app->sock); } } @@ -397,8 +420,7 @@ static void delete_ust_app_event_notifier_rule(int sock, } lttng_trigger_put(ua_event_notifier_rule->trigger); - call_rcu(&ua_event_notifier_rule->rcu_head, - free_ust_app_event_notifier_rule_rcu); + call_rcu(&ua_event_notifier_rule->rcu_head, free_ust_app_event_notifier_rule_rcu); } /* @@ -406,8 +428,7 @@ static void delete_ust_app_event_notifier_rule(int sock, * * Return 0 on success or else a negative value. */ -static int release_ust_app_stream(int sock, struct ust_app_stream *stream, - struct ust_app *app) +static int release_ust_app_stream(int sock, struct ust_app_stream *stream, struct ust_app *app) { int ret = 0; @@ -420,13 +441,17 @@ static int release_ust_app_stream(int sock, struct ust_app_stream *stream, if (ret < 0) { if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) { DBG3("UST app release stream failed. Application is dead: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else if (ret == -EAGAIN) { WARN("UST app release stream failed. Communication time out: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else { ERR("UST app release stream obj failed with ret %d: pid = %d, sock = %d", - ret, app->pid, app->sock); + ret, + app->pid, + app->sock); } } lttng_fd_put(LTTNG_FD_APPS, 2); @@ -440,9 +465,7 @@ static int release_ust_app_stream(int sock, struct ust_app_stream *stream, * Delete ust app stream safely. RCU read lock must be held before calling * this function. */ -static -void delete_ust_app_stream(int sock, struct ust_app_stream *stream, - struct ust_app *app) +static void delete_ust_app_stream(int sock, struct ust_app_stream *stream, struct ust_app *app) { LTTNG_ASSERT(stream); ASSERT_RCU_READ_LOCKED(); @@ -451,11 +474,10 @@ void delete_ust_app_stream(int sock, struct ust_app_stream *stream, free(stream); } -static -void delete_ust_app_channel_rcu(struct rcu_head *head) +static void delete_ust_app_channel_rcu(struct rcu_head *head) { struct ust_app_channel *ua_chan = - caa_container_of(head, struct ust_app_channel, rcu_head); + lttng::utils::container_of(head, &ust_app_channel::rcu_head); lttng_ht_destroy(ua_chan->ctx); lttng_ht_destroy(ua_chan->events); @@ -469,63 +491,65 @@ void delete_ust_app_channel_rcu(struct rcu_head *head) * * The session list lock must be held by the caller. */ -static -void save_per_pid_lost_discarded_counters(struct ust_app_channel *ua_chan) +static void save_per_pid_lost_discarded_counters(struct ust_app_channel *ua_chan) { uint64_t discarded = 0, lost = 0; - struct ltt_session *session; struct ltt_ust_channel *uchan; if (ua_chan->attr.type != LTTNG_UST_ABI_CHAN_PER_CPU) { return; } - rcu_read_lock(); - session = session_find_by_id(ua_chan->session->tracing_id); - if (!session || !session->ust_session) { - /* - * Not finding the session is not an error because there are - * multiple ways the channels can be torn down. - * - * 1) The session daemon can initiate the destruction of the - * ust app session after receiving a destroy command or - * during its shutdown/teardown. - * 2) The application, since we are in per-pid tracing, is - * unregistering and tearing down its ust app session. - * - * Both paths are protected by the session list lock which - * ensures that the accounting of lost packets and discarded - * events is done exactly once. The session is then unpublished - * from the session list, resulting in this condition. - */ - goto end; - } + const lttng::urcu::read_lock_guard read_lock; - if (ua_chan->attr.overwrite) { - consumer_get_lost_packets(ua_chan->session->tracing_id, - ua_chan->key, session->ust_session->consumer, - &lost); - } else { - consumer_get_discarded_events(ua_chan->session->tracing_id, - ua_chan->key, session->ust_session->consumer, - &discarded); - } - uchan = trace_ust_find_channel_by_name( - session->ust_session->domain_global.channels, - ua_chan->name); - if (!uchan) { - ERR("Missing UST channel to store discarded counters"); - goto end; + try { + const auto session = ltt_session::find_session(ua_chan->session->tracing_id); + + if (!session->ust_session) { + /* + * Not finding the session is not an error because there are + * multiple ways the channels can be torn down. + * + * 1) The session daemon can initiate the destruction of the + * ust app session after receiving a destroy command or + * during its shutdown/teardown. + * 2) The application, since we are in per-pid tracing, is + * unregistering and tearing down its ust app session. + * + * Both paths are protected by the session list lock which + * ensures that the accounting of lost packets and discarded + * events is done exactly once. The session is then unpublished + * from the session list, resulting in this condition. + */ + return; + } + + if (ua_chan->attr.overwrite) { + consumer_get_lost_packets(ua_chan->session->tracing_id, + ua_chan->key, + session->ust_session->consumer, + &lost); + } else { + consumer_get_discarded_events(ua_chan->session->tracing_id, + ua_chan->key, + session->ust_session->consumer, + &discarded); + } + uchan = trace_ust_find_channel_by_name(session->ust_session->domain_global.channels, + ua_chan->name); + if (!uchan) { + ERR("Missing UST channel to store discarded counters"); + return; + } + } catch (const lttng::sessiond::exceptions::session_not_found_error& ex) { + DBG_FMT("Failed to save per-pid lost/discarded counters: {}, location='{}'", + ex.what(), + ex.source_location); + return; } uchan->per_pid_closed_app_discarded += discarded; uchan->per_pid_closed_app_lost += lost; - -end: - rcu_read_unlock(); - if (session) { - session_put(session); - } } /* @@ -534,16 +558,12 @@ end: * * The session list lock must be held by the caller. */ -static -void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan, - struct ust_app *app) +static void delete_ust_app_channel(int sock, + struct ust_app_channel *ua_chan, + struct ust_app *app, + const lsu::registry_session::locked_ref& locked_registry) { int ret; - struct lttng_ht_iter iter; - struct ust_app_event *ua_event; - struct ust_app_ctx *ua_ctx; - struct ust_app_stream *stream, *stmp; - ust_registry_session *registry; LTTNG_ASSERT(ua_chan); ASSERT_RCU_READ_LOCKED(); @@ -551,34 +571,44 @@ void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan, DBG3("UST app deleting channel %s", ua_chan->name); /* Wipe stream */ - cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) { + for (auto *stream : + lttng::urcu::list_iteration_adapter( + ua_chan->streams.head)) { cds_list_del(&stream->list); delete_ust_app_stream(sock, stream, app); } /* Wipe context */ - cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) { + for (auto ua_ctx : + lttng::urcu::lfht_iteration_adapter(*ua_chan->ctx->ht)) { cds_list_del(&ua_ctx->list); - ret = lttng_ht_del(ua_chan->ctx, &iter); + ret = cds_lfht_del(ua_chan->ctx->ht, &ua_ctx->node.node); LTTNG_ASSERT(!ret); delete_ust_app_ctx(sock, ua_ctx, app); } /* Wipe events */ - cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event, - node.node) { - ret = lttng_ht_del(ua_chan->events, &iter); + for (auto ua_event : + lttng::urcu::lfht_iteration_adapter(*ua_chan->events->ht)) { + ret = cds_lfht_del(ua_chan->events->ht, &ua_event->node.node); LTTNG_ASSERT(!ret); delete_ust_app_event(sock, ua_event, app); } if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) { /* Wipe and free registry from session registry. */ - registry = get_session_registry(ua_chan->session); - if (registry) { - ust_registry_channel_del_free(registry, ua_chan->key, - sock >= 0); + if (locked_registry) { + try { + locked_registry->remove_channel(ua_chan->key, sock >= 0); + } catch (const std::exception& ex) { + DBG("Could not find channel for removal: %s", ex.what()); + } } + /* * A negative socket can be used by the caller when * cleaning-up a ua_chan in an error path. Skip the @@ -589,7 +619,9 @@ void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan, } } - if (ua_chan->obj != NULL) { + if (ua_chan->obj != nullptr) { + lttng_ht_iter iter; + /* Remove channel from application UST object descriptor. */ iter.iter.node = &ua_chan->ust_objd_node.node; ret = lttng_ht_del(app->ust_objd, &iter); @@ -600,16 +632,20 @@ void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan, if (ret < 0) { if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) { DBG3("UST app channel %s release failed. Application is dead: pid = %d, sock = %d", - ua_chan->name, app->pid, - app->sock); + ua_chan->name, + app->pid, + app->sock); } else if (ret == -EAGAIN) { WARN("UST app channel %s release failed. Communication time out: pid = %d, sock = %d", - ua_chan->name, app->pid, - app->sock); + ua_chan->name, + app->pid, + app->sock); } else { ERR("UST app channel %s release failed with ret %d: pid = %d, sock = %d", - ua_chan->name, ret, app->pid, - app->sock); + ua_chan->name, + ret, + app->pid, + app->sock); } } lttng_fd_put(LTTNG_FD_APPS, 1); @@ -657,20 +693,21 @@ int ust_app_release_object(struct ust_app *app, struct lttng_ust_abi_object_data * but it can be caused by recoverable errors (e.g. the application has * terminated concurrently). */ -ssize_t ust_app_push_metadata(ust_registry_session *registry, - struct consumer_socket *socket, int send_zero_data) +ssize_t ust_app_push_metadata(const lsu::registry_session::locked_ref& locked_registry, + struct consumer_socket *socket, + int send_zero_data) { int ret; - char *metadata_str = NULL; + char *metadata_str = nullptr; size_t len, offset, new_metadata_len_sent; ssize_t ret_val; uint64_t metadata_key, metadata_version; - LTTNG_ASSERT(registry); + LTTNG_ASSERT(locked_registry); LTTNG_ASSERT(socket); ASSERT_RCU_READ_LOCKED(); - metadata_key = registry->_metadata_key; + metadata_key = locked_registry->_metadata_key; /* * Means that no metadata was assigned to the session. This can @@ -680,13 +717,13 @@ ssize_t ust_app_push_metadata(ust_registry_session *registry, return 0; } - offset = registry->_metadata_len_sent; - len = registry->_metadata_len - registry->_metadata_len_sent; - new_metadata_len_sent = registry->_metadata_len; - metadata_version = registry->_metadata_version; + offset = locked_registry->_metadata_len_sent; + len = locked_registry->_metadata_len - locked_registry->_metadata_len_sent; + new_metadata_len_sent = locked_registry->_metadata_len; + metadata_version = locked_registry->_metadata_version; if (len == 0) { DBG3("No metadata to push for metadata key %" PRIu64, - registry->_metadata_key); + locked_registry->_metadata_key); ret_val = len; if (send_zero_data) { DBG("No metadata to push"); @@ -703,10 +740,10 @@ ssize_t ust_app_push_metadata(ust_registry_session *registry, goto error; } /* Copy what we haven't sent out. */ - memcpy(metadata_str, registry->_metadata + offset, len); + memcpy(metadata_str, locked_registry->_metadata + offset, len); push_data: - pthread_mutex_unlock(®istry->_lock); + pthread_mutex_unlock(&locked_registry->_lock); /* * We need to unlock the registry while we push metadata to * break a circular dependency between the consumerd metadata @@ -719,9 +756,9 @@ push_data: * daemon. Those push and pull schemes are performed on two * different bidirectionnal communication sockets. */ - ret = consumer_push_metadata(socket, metadata_key, - metadata_str, len, offset, metadata_version); - pthread_mutex_lock(®istry->_lock); + ret = consumer_push_metadata( + socket, metadata_key, metadata_str, len, offset, metadata_version); + pthread_mutex_lock(&locked_registry->_lock); if (ret < 0) { /* * There is an acceptable race here between the registry @@ -758,9 +795,8 @@ push_data: * largest metadata_len_sent value of the concurrent * send. */ - registry->_metadata_len_sent = - std::max(registry->_metadata_len_sent, - new_metadata_len_sent); + locked_registry->_metadata_len_sent = + std::max(locked_registry->_metadata_len_sent, new_metadata_len_sent); } free(metadata_str); return len; @@ -775,7 +811,7 @@ error: * the metadata cache has been destroyed on the * consumer. */ - registry->_metadata_closed = true; + locked_registry->_metadata_closed = true; } error_push: free(metadata_str); @@ -796,41 +832,37 @@ error_push: * but it can be caused by recoverable errors (e.g. the application has * terminated concurrently). */ -static int push_metadata(ust_registry_session *registry, - struct consumer_output *consumer) +static int push_metadata(const lsu::registry_session::locked_ref& locked_registry, + struct consumer_output *consumer) { int ret_val; ssize_t ret; struct consumer_socket *socket; - LTTNG_ASSERT(registry); + LTTNG_ASSERT(locked_registry); LTTNG_ASSERT(consumer); ASSERT_RCU_READ_LOCKED(); - pthread_mutex_lock(®istry->_lock); - if (registry->_metadata_closed) { + if (locked_registry->_metadata_closed) { ret_val = -EPIPE; goto error; } /* Get consumer socket to use to push the metadata.*/ - socket = consumer_find_socket_by_bitness(registry->_bits_per_long, - consumer); + socket = consumer_find_socket_by_bitness(locked_registry->abi.bits_per_long, consumer); if (!socket) { ret_val = -1; goto error; } - ret = ust_app_push_metadata(registry, socket, 0); + ret = ust_app_push_metadata(locked_registry, socket, 0); if (ret < 0) { ret_val = ret; goto error; } - pthread_mutex_unlock(®istry->_lock); return 0; error: - pthread_mutex_unlock(®istry->_lock); return ret_val; } @@ -846,40 +878,18 @@ error: * * Return 0 on success else a negative value. */ -static int close_metadata(ust_registry_session *registry, - struct consumer_output *consumer) +static int close_metadata(uint64_t metadata_key, + unsigned int consumer_bitness, + struct consumer_output *consumer) { int ret; struct consumer_socket *socket; - uint64_t metadata_key; - bool registry_was_already_closed; + const lttng::urcu::read_lock_guard read_lock_guard; - LTTNG_ASSERT(registry); LTTNG_ASSERT(consumer); - rcu_read_lock(); - - pthread_mutex_lock(®istry->_lock); - metadata_key = registry->_metadata_key; - registry_was_already_closed = registry->_metadata_closed; - if (metadata_key != 0) { - /* - * Metadata closed. Even on error this means that the consumer - * is not responding or not found so either way a second close - * should NOT be emit for this registry. - */ - registry->_metadata_closed = true; - } - pthread_mutex_unlock(®istry->_lock); - - if (metadata_key == 0 || registry_was_already_closed) { - ret = 0; - goto end; - } - - /* Get consumer socket to use to push the metadata.*/ - socket = consumer_find_socket_by_bitness(registry->_bits_per_long, - consumer); + /* Get consumer socket to use to push the metadata. */ + socket = consumer_find_socket_by_bitness(consumer_bitness, consumer); if (!socket) { ret = -1; goto end; @@ -891,18 +901,16 @@ static int close_metadata(ust_registry_session *registry, } end: - rcu_read_unlock(); return ret; } -static -void delete_ust_app_session_rcu(struct rcu_head *head) +static void delete_ust_app_session_rcu(struct rcu_head *head) { struct ust_app_session *ua_sess = - caa_container_of(head, struct ust_app_session, rcu_head); + lttng::utils::container_of(head, &ust_app_session::rcu_head); lttng_ht_destroy(ua_sess->channels); - free(ua_sess); + delete ua_sess; } /* @@ -911,29 +919,34 @@ void delete_ust_app_session_rcu(struct rcu_head *head) * * The session list lock must be held by the caller. */ -static -void delete_ust_app_session(int sock, struct ust_app_session *ua_sess, - struct ust_app *app) +static void delete_ust_app_session(int sock, struct ust_app_session *ua_sess, struct ust_app *app) { - int ret; - struct lttng_ht_iter iter; - struct ust_app_channel *ua_chan; - ust_registry_session *registry; - LTTNG_ASSERT(ua_sess); ASSERT_RCU_READ_LOCKED(); - pthread_mutex_lock(&ua_sess->lock); + /* Locked for the duration of the function. */ + auto locked_ua_sess = ua_sess->lock(); LTTNG_ASSERT(!ua_sess->deleted); ua_sess->deleted = true; - registry = get_session_registry(ua_sess); + auto locked_registry = get_locked_session_registry(locked_ua_sess->get_identifier()); /* Registry can be null on error path during initialization. */ - if (registry) { + if (locked_registry) { /* Push metadata for application before freeing the application. */ - (void) push_metadata(registry, ua_sess->consumer); + (void) push_metadata(locked_registry, ua_sess->consumer); + } + + for (auto *ua_chan : + lttng::urcu::lfht_iteration_adapter(*ua_sess->channels->ht)) { + const auto ret = cds_lfht_del(ua_sess->channels->ht, &ua_chan->node.node); + LTTNG_ASSERT(ret == 0); + delete_ust_app_channel(sock, ua_chan, app, locked_registry); + } + if (locked_registry) { /* * Don't ask to close metadata for global per UID buffers. Close * metadata only on destroy trace session in this case. Also, the @@ -941,16 +954,17 @@ void delete_ust_app_session(int sock, struct ust_app_session *ua_sess, * close so don't send a close command if closed. */ if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) { - /* And ask to close it for this session registry. */ - (void) close_metadata(registry, ua_sess->consumer); - } - } + const auto metadata_key = locked_registry->_metadata_key; + const auto consumer_bitness = locked_registry->abi.bits_per_long; - cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan, - node.node) { - ret = lttng_ht_del(ua_sess->channels, &iter); - LTTNG_ASSERT(!ret); - delete_ust_app_channel(sock, ua_chan, app); + if (!locked_registry->_metadata_closed && metadata_key != 0) { + locked_registry->_metadata_closed = true; + } + + /* Release lock before communication, see comments in close_metadata(). */ + locked_registry.reset(); + (void) close_metadata(metadata_key, consumer_bitness, ua_sess->consumer); + } } /* In case of per PID, the registry is kept in the session. */ @@ -968,31 +982,31 @@ void delete_ust_app_session(int sock, struct ust_app_session *ua_sess, if (ua_sess->handle != -1) { pthread_mutex_lock(&app->sock_lock); - ret = lttng_ust_ctl_release_handle(sock, ua_sess->handle); + auto ret = lttng_ust_ctl_release_handle(sock, ua_sess->handle); pthread_mutex_unlock(&app->sock_lock); if (ret < 0) { if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) { DBG3("UST app release session handle failed. Application is dead: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else if (ret == -EAGAIN) { WARN("UST app release session handle failed. Communication time out: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else { ERR("UST app release session handle failed with ret %d: pid = %d, sock = %d", - ret, app->pid, app->sock); + ret, + app->pid, + app->sock); } } /* Remove session from application UST object descriptor. */ - iter.iter.node = &ua_sess->ust_objd_node.node; - ret = lttng_ht_del(app->ust_sessions_objd, &iter); + ret = cds_lfht_del(app->ust_sessions_objd->ht, &ua_sess->ust_objd_node.node); LTTNG_ASSERT(!ret); } - pthread_mutex_unlock(&ua_sess->lock); - consumer_output_put(ua_sess->consumer); - call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu); } @@ -1000,46 +1014,47 @@ void delete_ust_app_session(int sock, struct ust_app_session *ua_sess, * Delete a traceable application structure from the global list. Never call * this function outside of a call_rcu call. */ -static -void delete_ust_app(struct ust_app *app) +static void delete_ust_app(struct ust_app *app) { int ret, sock; - struct ust_app_session *ua_sess, *tmp_ua_sess; - struct lttng_ht_iter iter; - struct ust_app_event_notifier_rule *event_notifier_rule; bool event_notifier_write_fd_is_open; /* * The session list lock must be held during this function to guarantee * the existence of ua_sess. */ - session_lock_list(); + const auto list_lock = lttng::sessiond::lock_session_list(); /* Delete ust app sessions info */ sock = app->sock; app->sock = -1; /* Wipe sessions */ - cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head, - teardown_node) { - /* Free every object in the session and the session. */ - rcu_read_lock(); - delete_ust_app_session(sock, ua_sess, app); - rcu_read_unlock(); + { + const lttng::urcu::read_lock_guard read_lock; + + for (const auto ua_sess : app->sessions_to_teardown) { + /* Free every object in the session and the session. */ + delete_ust_app_session(sock, ua_sess, app); + } } /* Remove the event notifier rules associated with this app. */ - rcu_read_lock(); - cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht, - &iter.iter, event_notifier_rule, node.node) { - ret = lttng_ht_del(app->token_to_event_notifier_rule_ht, &iter); - LTTNG_ASSERT(!ret); + { + const lttng::urcu::read_lock_guard read_lock; + + for (auto *event_notifier_rule : + lttng::urcu::lfht_iteration_adapter( + *app->token_to_event_notifier_rule_ht->ht)) { + ret = cds_lfht_del(app->token_to_event_notifier_rule_ht->ht, + &event_notifier_rule->node.node); + LTTNG_ASSERT(!ret); - delete_ust_app_event_notifier_rule( - app->sock, event_notifier_rule, app); + delete_ust_app_event_notifier_rule(app->sock, event_notifier_rule, app); + } } - rcu_read_unlock(); - lttng_ht_destroy(app->sessions); lttng_ht_destroy(app->ust_sessions_objd); lttng_ht_destroy(app->ust_objd); @@ -1053,12 +1068,11 @@ void delete_ust_app(struct ust_app *app) enum lttng_error_code ret_code; enum event_notifier_error_accounting_status status; - const int event_notifier_read_fd = lttng_pipe_get_readfd( - app->event_notifier_group.event_pipe); + const int event_notifier_read_fd = + lttng_pipe_get_readfd(app->event_notifier_group.event_pipe); ret_code = notification_thread_command_remove_tracer_event_source( - the_notification_thread_handle, - event_notifier_read_fd); + the_notification_thread_handle, event_notifier_read_fd); if (ret_code != LTTNG_OK) { ERR("Failed to remove application tracer event source from notification thread"); } @@ -1072,8 +1086,8 @@ void delete_ust_app(struct ust_app *app) free(app->event_notifier_group.object); } - event_notifier_write_fd_is_open = lttng_pipe_is_write_open( - app->event_notifier_group.event_pipe); + event_notifier_write_fd_is_open = + lttng_pipe_is_write_open(app->event_notifier_group.event_pipe); lttng_pipe_destroy(app->event_notifier_group.event_pipe); /* * Release the file descriptors reserved for the event notifier pipe. @@ -1103,20 +1117,17 @@ void delete_ust_app(struct ust_app *app) lttng_fd_put(LTTNG_FD_APPS, 1); DBG2("UST app pid %d deleted", app->pid); - free(app); - session_unlock_list(); + delete app; } /* * URCU intermediate call to delete an UST app. */ -static -void delete_ust_app_rcu(struct rcu_head *head) +static void delete_ust_app_rcu(struct rcu_head *head) { struct lttng_ht_node_ulong *node = - caa_container_of(head, struct lttng_ht_node_ulong, head); - struct ust_app *app = - caa_container_of(node, struct ust_app, pid_n); + lttng::utils::container_of(head, <tng_ht_node_ulong::head); + struct ust_app *app = lttng::utils::container_of(node, &ust_app::pid_n); DBG3("Call RCU deleting app PID %d", app->pid); delete_ust_app(app); @@ -1128,8 +1139,7 @@ void delete_ust_app_rcu(struct rcu_head *head) * * The session list lock must be held by the caller. */ -static void destroy_app_session(struct ust_app *app, - struct ust_app_session *ua_sess) +static void destroy_app_session(struct ust_app *app, struct ust_app_session *ua_sess) { int ret; struct lttng_ht_iter iter; @@ -1154,14 +1164,13 @@ end: /* * Alloc new UST app session. */ -static -struct ust_app_session *alloc_ust_app_session(void) +static struct ust_app_session *alloc_ust_app_session() { struct ust_app_session *ua_sess; /* Init most of the default value by allocating and zeroing */ - ua_sess = zmalloc(); - if (ua_sess == NULL) { + ua_sess = new ust_app_session; + if (ua_sess == nullptr) { PERROR("malloc"); goto error_free; } @@ -1169,27 +1178,26 @@ struct ust_app_session *alloc_ust_app_session(void) ua_sess->handle = -1; ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING); ua_sess->metadata_attr.type = LTTNG_UST_ABI_CHAN_METADATA; - pthread_mutex_init(&ua_sess->lock, NULL); return ua_sess; error_free: - return NULL; + return nullptr; } /* * Alloc new UST app channel. */ -static -struct ust_app_channel *alloc_ust_app_channel(const char *name, - struct ust_app_session *ua_sess, - struct lttng_ust_abi_channel_attr *attr) +static struct ust_app_channel * +alloc_ust_app_channel(const char *name, + const ust_app_session::locked_weak_ref& ua_sess, + struct lttng_ust_abi_channel_attr *attr) { struct ust_app_channel *ua_chan; /* Init most of the default value by allocating and zeroing */ ua_chan = zmalloc(); - if (ua_chan == NULL) { + if (ua_chan == nullptr) { PERROR("malloc"); goto error; } @@ -1198,9 +1206,9 @@ struct ust_app_channel *alloc_ust_app_channel(const char *name, strncpy(ua_chan->name, name, sizeof(ua_chan->name)); ua_chan->name[sizeof(ua_chan->name) - 1] = '\0'; - ua_chan->enabled = 1; + ua_chan->enabled = true; ua_chan->handle = -1; - ua_chan->session = ua_sess; + ua_chan->session = &ua_sess.get(); ua_chan->key = get_next_channel_key(); ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG); ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING); @@ -1228,7 +1236,7 @@ struct ust_app_channel *alloc_ust_app_channel(const char *name, return ua_chan; error: - return NULL; + return nullptr; } /* @@ -1236,12 +1244,12 @@ error: * * Return newly allocated stream pointer or NULL on error. */ -struct ust_app_stream *ust_app_alloc_stream(void) +struct ust_app_stream *ust_app_alloc_stream() { - struct ust_app_stream *stream = NULL; + struct ust_app_stream *stream = nullptr; stream = zmalloc(); - if (stream == NULL) { + if (stream == nullptr) { PERROR("zmalloc ust app stream"); goto error; } @@ -1256,20 +1264,18 @@ error: /* * Alloc new UST app event. */ -static -struct ust_app_event *alloc_ust_app_event(char *name, - struct lttng_ust_abi_event *attr) +static struct ust_app_event *alloc_ust_app_event(char *name, struct lttng_ust_abi_event *attr) { struct ust_app_event *ua_event; /* Init most of the default value by allocating and zeroing */ ua_event = zmalloc(); - if (ua_event == NULL) { + if (ua_event == nullptr) { PERROR("Failed to allocate ust_app_event structure"); goto error; } - ua_event->enabled = 1; + ua_event->enabled = true; strncpy(ua_event->name, name, sizeof(ua_event->name)); ua_event->name[sizeof(ua_event->name) - 1] = '\0'; lttng_ht_node_init_str(&ua_event->node, ua_event->name); @@ -1284,52 +1290,49 @@ struct ust_app_event *alloc_ust_app_event(char *name, return ua_event; error: - return NULL; + return nullptr; } /* * Allocate a new UST app event notifier rule. */ -static struct ust_app_event_notifier_rule *alloc_ust_app_event_notifier_rule( - struct lttng_trigger *trigger) +static struct ust_app_event_notifier_rule * +alloc_ust_app_event_notifier_rule(struct lttng_trigger *trigger) { - enum lttng_event_rule_generate_exclusions_status - generate_exclusion_status; + enum lttng_event_rule_generate_exclusions_status generate_exclusion_status; enum lttng_condition_status cond_status; struct ust_app_event_notifier_rule *ua_event_notifier_rule; - struct lttng_condition *condition = NULL; - const struct lttng_event_rule *event_rule = NULL; + struct lttng_condition *condition = nullptr; + const struct lttng_event_rule *event_rule = nullptr; ua_event_notifier_rule = zmalloc(); - if (ua_event_notifier_rule == NULL) { + if (ua_event_notifier_rule == nullptr) { PERROR("Failed to allocate ust_app_event_notifier_rule structure"); goto error; } - ua_event_notifier_rule->enabled = 1; + ua_event_notifier_rule->enabled = true; ua_event_notifier_rule->token = lttng_trigger_get_tracer_token(trigger); - lttng_ht_node_init_u64(&ua_event_notifier_rule->node, - ua_event_notifier_rule->token); + lttng_ht_node_init_u64(&ua_event_notifier_rule->node, ua_event_notifier_rule->token); condition = lttng_trigger_get_condition(trigger); LTTNG_ASSERT(condition); LTTNG_ASSERT(lttng_condition_get_type(condition) == - LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES); + LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES); - cond_status = lttng_condition_event_rule_matches_get_rule( - condition, &event_rule); + cond_status = lttng_condition_event_rule_matches_get_rule(condition, &event_rule); LTTNG_ASSERT(cond_status == LTTNG_CONDITION_STATUS_OK); LTTNG_ASSERT(event_rule); ua_event_notifier_rule->error_counter_index = - lttng_condition_event_rule_matches_get_error_counter_index(condition); + lttng_condition_event_rule_matches_get_error_counter_index(condition); /* Acquire the event notifier's reference to the trigger. */ lttng_trigger_get(trigger); ua_event_notifier_rule->trigger = trigger; ua_event_notifier_rule->filter = lttng_event_rule_get_filter_bytecode(event_rule); generate_exclusion_status = lttng_event_rule_generate_exclusions( - event_rule, &ua_event_notifier_rule->exclusion); + event_rule, &ua_event_notifier_rule->exclusion); switch (generate_exclusion_status) { case LTTNG_EVENT_RULE_GENERATE_EXCLUSIONS_STATUS_OK: case LTTNG_EVENT_RULE_GENERATE_EXCLUSIONS_STATUS_NONE: @@ -1341,7 +1344,7 @@ static struct ust_app_event_notifier_rule *alloc_ust_app_event_notifier_rule( } DBG3("UST app event notifier rule allocated: token = %" PRIu64, - ua_event_notifier_rule->token); + ua_event_notifier_rule->token); return ua_event_notifier_rule; @@ -1349,19 +1352,18 @@ error_put_trigger: lttng_trigger_put(trigger); error: free(ua_event_notifier_rule); - return NULL; + return nullptr; } /* * Alloc new UST app context. */ -static -struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context_attr *uctx) +static struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context_attr *uctx) { struct ust_app_ctx *ua_ctx; ua_ctx = zmalloc(); - if (ua_ctx == NULL) { + if (ua_ctx == nullptr) { goto error; } @@ -1370,7 +1372,7 @@ struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context_attr *uctx) if (uctx) { memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx)); if (uctx->ctx == LTTNG_UST_ABI_CONTEXT_APP_CONTEXT) { - char *provider_name = NULL, *ctx_name = NULL; + char *provider_name = nullptr, *ctx_name = nullptr; provider_name = strdup(uctx->u.app_ctx.provider_name); ctx_name = strdup(uctx->u.app_ctx.ctx_name); @@ -1389,7 +1391,7 @@ struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context_attr *uctx) return ua_ctx; error: free(ua_ctx); - return NULL; + return nullptr; } /* @@ -1397,20 +1399,21 @@ error: * * Return allocated filter or NULL on error. */ -static struct lttng_ust_abi_filter_bytecode *create_ust_filter_bytecode_from_bytecode( - const struct lttng_bytecode *orig_f) +static struct lttng_ust_abi_filter_bytecode * +create_ust_filter_bytecode_from_bytecode(const struct lttng_bytecode *orig_f) { - struct lttng_ust_abi_filter_bytecode *filter = NULL; + struct lttng_ust_abi_filter_bytecode *filter = nullptr; /* Copy filter bytecode. */ filter = zmalloc(sizeof(*filter) + orig_f->len); if (!filter) { - PERROR("Failed to allocate lttng_ust_filter_bytecode: bytecode len = %" PRIu32 " bytes", orig_f->len); + PERROR("Failed to allocate lttng_ust_filter_bytecode: bytecode len = %" PRIu32 + " bytes", + orig_f->len); goto error; } - LTTNG_ASSERT(sizeof(struct lttng_bytecode) == - sizeof(struct lttng_ust_abi_filter_bytecode)); + LTTNG_ASSERT(sizeof(struct lttng_bytecode) == sizeof(struct lttng_ust_abi_filter_bytecode)); memcpy(filter, orig_f, sizeof(*filter) + orig_f->len); error: return filter; @@ -1424,17 +1427,19 @@ error: static struct lttng_ust_abi_capture_bytecode * create_ust_capture_bytecode_from_bytecode(const struct lttng_bytecode *orig_f) { - struct lttng_ust_abi_capture_bytecode *capture = NULL; + struct lttng_ust_abi_capture_bytecode *capture = nullptr; /* Copy capture bytecode. */ capture = zmalloc(sizeof(*capture) + orig_f->len); if (!capture) { - PERROR("Failed to allocate lttng_ust_abi_capture_bytecode: bytecode len = %" PRIu32 " bytes", orig_f->len); + PERROR("Failed to allocate lttng_ust_abi_capture_bytecode: bytecode len = %" PRIu32 + " bytes", + orig_f->len); goto error; } LTTNG_ASSERT(sizeof(struct lttng_bytecode) == - sizeof(struct lttng_ust_abi_capture_bytecode)); + sizeof(struct lttng_ust_abi_capture_bytecode)); memcpy(capture, orig_f, sizeof(*capture) + orig_f->len); error: return capture; @@ -1451,17 +1456,17 @@ struct ust_app *ust_app_find_by_sock(int sock) ASSERT_RCU_READ_LOCKED(); - lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter); - node = lttng_ht_iter_get_node_ulong(&iter); - if (node == NULL) { + lttng_ht_lookup(ust_app_ht_by_sock, (void *) ((unsigned long) sock), &iter); + node = lttng_ht_iter_get_node(&iter); + if (node == nullptr) { DBG2("UST app find by sock %d not found", sock); goto error; } - return caa_container_of(node, struct ust_app, sock_n); + return lttng::utils::container_of(node, &ust_app::sock_n); error: - return NULL; + return nullptr; } /* @@ -1475,18 +1480,17 @@ static struct ust_app *find_app_by_notify_sock(int sock) ASSERT_RCU_READ_LOCKED(); - lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock), - &iter); - node = lttng_ht_iter_get_node_ulong(&iter); - if (node == NULL) { + lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *) ((unsigned long) sock), &iter); + node = lttng_ht_iter_get_node(&iter); + if (node == nullptr) { DBG2("UST app find by notify sock %d not found", sock); goto error; } - return caa_container_of(node, struct ust_app, notify_sock_n); + return lttng::utils::container_of(node, &ust_app::notify_sock_n); error: - return NULL; + return nullptr; } /* @@ -1496,13 +1500,15 @@ error: * Return an ust_app_event object or NULL on error. */ static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht, - const char *name, const struct lttng_bytecode *filter, - int loglevel_value, - const struct lttng_event_exclusion *exclusion) + const char *name, + const struct lttng_bytecode *filter, + lttng_ust_abi_loglevel_type loglevel_type, + int loglevel_value, + const struct lttng_event_exclusion *exclusion) { struct lttng_ht_iter iter; struct lttng_ht_node_str *node; - struct ust_app_event *event = NULL; + struct ust_app_event *event = nullptr; struct ust_app_ht_key key; LTTNG_ASSERT(name); @@ -1511,19 +1517,23 @@ static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht, /* Setup key for event lookup. */ key.name = name; key.filter = filter; - key.loglevel_type = (lttng_ust_abi_loglevel_type) loglevel_value; + key.loglevel_type = loglevel_type; + key.loglevel_value = loglevel_value; /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */ key.exclusion = exclusion; /* Lookup using the event name as hash and a custom match fct. */ - cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed), - ht_match_ust_app_event, &key, &iter.iter); - node = lttng_ht_iter_get_node_str(&iter); - if (node == NULL) { + cds_lfht_lookup(ht->ht, + ht->hash_fct((void *) name, lttng_ht_seed), + ht_match_ust_app_event, + &key, + &iter.iter); + node = lttng_ht_iter_get_node(&iter); + if (node == nullptr) { goto end; } - event = caa_container_of(node, struct ust_app_event, node); + event = lttng::utils::container_of(node, &ust_app_event::node); end: return event; @@ -1535,26 +1545,24 @@ end: * Must be called with the RCU read lock held. * Return an ust_app_event_notifier_rule object or NULL on error. */ -static struct ust_app_event_notifier_rule *find_ust_app_event_notifier_rule( - struct lttng_ht *ht, uint64_t token) +static struct ust_app_event_notifier_rule *find_ust_app_event_notifier_rule(struct lttng_ht *ht, + uint64_t token) { struct lttng_ht_iter iter; struct lttng_ht_node_u64 *node; - struct ust_app_event_notifier_rule *event_notifier_rule = NULL; + struct ust_app_event_notifier_rule *event_notifier_rule = nullptr; LTTNG_ASSERT(ht); ASSERT_RCU_READ_LOCKED(); lttng_ht_lookup(ht, &token, &iter); - node = lttng_ht_iter_get_node_u64(&iter); - if (node == NULL) { - DBG2("UST app event notifier rule token not found: token = %" PRIu64, - token); + node = lttng_ht_iter_get_node(&iter); + if (node == nullptr) { + DBG2("UST app event notifier rule token not found: token = %" PRIu64, token); goto end; } - event_notifier_rule = caa_container_of( - node, struct ust_app_event_notifier_rule, node); + event_notifier_rule = lttng::utils::container_of(node, &ust_app_event_notifier_rule::node); end: return event_notifier_rule; } @@ -1564,30 +1572,33 @@ end: * * Called with UST app session lock held. */ -static -int create_ust_channel_context(struct ust_app_channel *ua_chan, - struct ust_app_ctx *ua_ctx, struct ust_app *app) +static int create_ust_channel_context(struct ust_app_channel *ua_chan, + struct ust_app_ctx *ua_ctx, + struct ust_app *app) { int ret; health_code_update(); pthread_mutex_lock(&app->sock_lock); - ret = lttng_ust_ctl_add_context(app->sock, &ua_ctx->ctx, - ua_chan->obj, &ua_ctx->obj); + ret = lttng_ust_ctl_add_context(app->sock, &ua_ctx->ctx, ua_chan->obj, &ua_ctx->obj); pthread_mutex_unlock(&app->sock_lock); if (ret < 0) { if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) { ret = 0; DBG3("UST app create channel context failed. Application is dead: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else if (ret == -EAGAIN) { ret = 0; WARN("UST app create channel context failed. Communication time out: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else { ERR("UST app create channel context failed with ret %d: pid = %d, sock = %d", - ret, app->pid, app->sock); + ret, + app->pid, + app->sock); } goto error; } @@ -1595,7 +1606,8 @@ int create_ust_channel_context(struct ust_app_channel *ua_chan, ua_ctx->handle = ua_ctx->obj->handle; DBG2("UST app context handle %d created successfully for channel %s", - ua_ctx->handle, ua_chan->name); + ua_ctx->handle, + ua_chan->name); error: health_code_update(); @@ -1606,11 +1618,11 @@ error: * Set the filter on the tracer. */ static int set_ust_object_filter(struct ust_app *app, - const struct lttng_bytecode *bytecode, - struct lttng_ust_abi_object_data *ust_object) + const struct lttng_bytecode *bytecode, + struct lttng_ust_abi_object_data *ust_object) { int ret; - struct lttng_ust_abi_filter_bytecode *ust_bytecode = NULL; + struct lttng_ust_abi_filter_bytecode *ust_bytecode = nullptr; health_code_update(); @@ -1620,21 +1632,25 @@ static int set_ust_object_filter(struct ust_app *app, goto error; } pthread_mutex_lock(&app->sock_lock); - ret = lttng_ust_ctl_set_filter(app->sock, ust_bytecode, - ust_object); + ret = lttng_ust_ctl_set_filter(app->sock, ust_bytecode, ust_object); pthread_mutex_unlock(&app->sock_lock); if (ret < 0) { if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) { ret = 0; DBG3("UST app set filter failed. Application is dead: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else if (ret == -EAGAIN) { ret = 0; WARN("UST app set filter failed. Communication time out: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else { ERR("UST app set filter failed with ret %d: pid = %d, sock = %d, object = %p", - ret, app->pid, app->sock, ust_object); + ret, + app->pid, + app->sock, + ust_object); } goto error; } @@ -1653,12 +1669,12 @@ error: * the captured payloads. */ static int set_ust_capture(struct ust_app *app, - const struct lttng_bytecode *bytecode, - unsigned int capture_seqnum, - struct lttng_ust_abi_object_data *ust_object) + const struct lttng_bytecode *bytecode, + unsigned int capture_seqnum, + struct lttng_ust_abi_object_data *ust_object) { int ret; - struct lttng_ust_abi_capture_bytecode *ust_bytecode = NULL; + struct lttng_ust_abi_capture_bytecode *ust_bytecode = nullptr; health_code_update(); @@ -1674,22 +1690,24 @@ static int set_ust_capture(struct ust_app *app, ust_bytecode->seqnum = capture_seqnum; pthread_mutex_lock(&app->sock_lock); - ret = lttng_ust_ctl_set_capture(app->sock, ust_bytecode, - ust_object); + ret = lttng_ust_ctl_set_capture(app->sock, ust_bytecode, ust_object); pthread_mutex_unlock(&app->sock_lock); if (ret < 0) { if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) { ret = 0; DBG3("UST app set capture failed. Application is dead: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else if (ret == -EAGAIN) { ret = 0; DBG3("UST app set capture failed. Communication timeout: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else { ERR("UST app event set capture failed with ret %d: pid = %d, sock = %d", - ret, app->pid, - app->sock); + ret, + app->pid, + app->sock); } goto error; @@ -1703,12 +1721,11 @@ error: return ret; } -static -struct lttng_ust_abi_event_exclusion *create_ust_exclusion_from_exclusion( - const struct lttng_event_exclusion *exclusion) +static struct lttng_ust_abi_event_exclusion * +create_ust_exclusion_from_exclusion(const struct lttng_event_exclusion *exclusion) { - struct lttng_ust_abi_event_exclusion *ust_exclusion = NULL; - size_t exclusion_alloc_size = sizeof(struct lttng_ust_abi_event_exclusion) + + struct lttng_ust_abi_event_exclusion *ust_exclusion = nullptr; + const size_t exclusion_alloc_size = sizeof(struct lttng_ust_abi_event_exclusion) + LTTNG_UST_ABI_SYM_NAME_LEN * exclusion->count; ust_exclusion = zmalloc(exclusion_alloc_size); @@ -1718,7 +1735,7 @@ struct lttng_ust_abi_event_exclusion *create_ust_exclusion_from_exclusion( } LTTNG_ASSERT(sizeof(struct lttng_event_exclusion) == - sizeof(struct lttng_ust_abi_event_exclusion)); + sizeof(struct lttng_ust_abi_event_exclusion)); memcpy(ust_exclusion, exclusion, exclusion_alloc_size); end: return ust_exclusion; @@ -1728,18 +1745,17 @@ end: * Set event exclusions on the tracer. */ static int set_ust_object_exclusions(struct ust_app *app, - const struct lttng_event_exclusion *exclusions, - struct lttng_ust_abi_object_data *ust_object) + const struct lttng_event_exclusion *exclusions, + struct lttng_ust_abi_object_data *ust_object) { int ret; - struct lttng_ust_abi_event_exclusion *ust_exclusions = NULL; + struct lttng_ust_abi_event_exclusion *ust_exclusions = nullptr; LTTNG_ASSERT(exclusions && exclusions->count > 0); health_code_update(); - ust_exclusions = create_ust_exclusion_from_exclusion( - exclusions); + ust_exclusions = create_ust_exclusion_from_exclusion(exclusions); if (!ust_exclusions) { ret = -LTTNG_ERR_NOMEM; goto error; @@ -1751,14 +1767,19 @@ static int set_ust_object_exclusions(struct ust_app *app, if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) { ret = 0; DBG3("UST app event exclusion failed. Application is dead: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else if (ret == -EAGAIN) { ret = 0; WARN("UST app event exclusion failed. Communication time out(pid: %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else { ERR("UST app event exclusions failed with ret %d: pid = %d, sock = %d, object = %p", - ret, app->pid, app->sock, ust_object); + ret, + app->pid, + app->sock, + ust_object); } goto error; } @@ -1774,8 +1795,7 @@ error: /* * Disable the specified event on to UST tracer for the UST session. */ -static int disable_ust_object(struct ust_app *app, - struct lttng_ust_abi_object_data *object) +static int disable_ust_object(struct ust_app *app, struct lttng_ust_abi_object_data *object) { int ret; @@ -1788,20 +1808,24 @@ static int disable_ust_object(struct ust_app *app, if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) { ret = 0; DBG3("UST app disable object failed. Application is dead: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else if (ret == -EAGAIN) { ret = 0; WARN("UST app disable object failed. Communication time out: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else { ERR("UST app disable object failed with ret %d: pid = %d, sock = %d, object = %p", - ret, app->pid, app->sock, object); + ret, + app->pid, + app->sock, + object); } goto error; } - DBG2("UST app object %p disabled successfully for app: pid = %d", - object, app->pid); + DBG2("UST app object %p disabled successfully for app: pid = %d", object, app->pid); error: health_code_update(); @@ -1812,7 +1836,8 @@ error: * Disable the specified channel on to UST tracer for the UST session. */ static int disable_ust_channel(struct ust_app *app, - struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan) + const ust_app_session::locked_weak_ref& ua_sess, + struct ust_app_channel *ua_chan) { int ret; @@ -1825,21 +1850,25 @@ static int disable_ust_channel(struct ust_app *app, if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) { ret = 0; DBG3("UST app disable channel failed. Application is dead: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else if (ret == -EAGAIN) { ret = 0; WARN("UST app disable channel failed. Communication time out: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else { ERR("UST app channel %s disable failed, session handle %d, with ret %d: pid = %d, sock = %d", - ua_chan->name, ua_sess->handle, ret, - app->pid, app->sock); + ua_chan->name, + ua_sess->handle, + ret, + app->pid, + app->sock); } goto error; } - DBG2("UST app channel %s disabled successfully for app: pid = %d", - ua_chan->name, app->pid); + DBG2("UST app channel %s disabled successfully for app: pid = %d", ua_chan->name, app->pid); error: health_code_update(); @@ -1850,7 +1879,8 @@ error: * Enable the specified channel on to UST tracer for the UST session. */ static int enable_ust_channel(struct ust_app *app, - struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan) + const ust_app_session::locked_weak_ref& ua_sess, + struct ust_app_channel *ua_chan) { int ret; @@ -1863,23 +1893,29 @@ static int enable_ust_channel(struct ust_app *app, if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) { ret = 0; DBG3("UST app channel %s enable failed. Application is dead: pid = %d, sock = %d", - ua_chan->name, app->pid, app->sock); + ua_chan->name, + app->pid, + app->sock); } else if (ret == -EAGAIN) { ret = 0; WARN("UST app channel %s enable failed. Communication time out: pid = %d, sock = %d", - ua_chan->name, app->pid, app->sock); + ua_chan->name, + app->pid, + app->sock); } else { ERR("UST app channel %s enable failed, session handle %d, with ret %d: pid = %d, sock = %d", - ua_chan->name, ua_sess->handle, ret, - app->pid, app->sock); + ua_chan->name, + ua_sess->handle, + ret, + app->pid, + app->sock); } goto error; } - ua_chan->enabled = 1; + ua_chan->enabled = true; - DBG2("UST app channel %s enabled successfully for app: pid = %d", - ua_chan->name, app->pid); + DBG2("UST app channel %s enabled successfully for app: pid = %d", ua_chan->name, app->pid); error: health_code_update(); @@ -1889,8 +1925,7 @@ error: /* * Enable the specified event on to UST tracer for the UST session. */ -static int enable_ust_object( - struct ust_app *app, struct lttng_ust_abi_object_data *ust_object) +static int enable_ust_object(struct ust_app *app, struct lttng_ust_abi_object_data *ust_object) { int ret; @@ -1903,20 +1938,24 @@ static int enable_ust_object( if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) { ret = 0; DBG3("UST app enable object failed. Application is dead: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else if (ret == -EAGAIN) { ret = 0; WARN("UST app enable object failed. Communication time out: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else { ERR("UST app enable object failed with ret %d: pid = %d, sock = %d, object = %p", - ret, app->pid, app->sock, ust_object); + ret, + app->pid, + app->sock, + ust_object); } goto error; } - DBG2("UST app object %p enabled successfully for app: pid = %d", - ust_object, app->pid); + DBG2("UST app object %p enabled successfully for app: pid = %d", ust_object, app->pid); error: health_code_update(); @@ -1929,10 +1968,10 @@ error: * Return 0 on success. On error, a negative value is returned. */ static int send_channel_pid_to_ust(struct ust_app *app, - struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan) + struct ust_app_session *ua_sess, + struct ust_app_channel *ua_chan) { int ret; - struct ust_app_stream *stream, *stmp; LTTNG_ASSERT(app); LTTNG_ASSERT(ua_sess); @@ -1940,18 +1979,20 @@ static int send_channel_pid_to_ust(struct ust_app *app, health_code_update(); - DBG("UST app sending channel %s to UST app sock %d", ua_chan->name, - app->sock); + DBG("UST app sending channel %s to UST app sock %d", ua_chan->name, app->sock); /* Send channel to the application. */ ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan); if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) { - ret = -ENOTCONN; /* Caused by app exiting. */ + ret = -ENOTCONN; /* Caused by app exiting. */ goto error; } else if (ret == -EAGAIN) { /* Caused by timeout. */ - WARN("Communication with application %d timed out on send_channel for channel \"%s\" of session \"%" PRIu64 "\".", - app->pid, ua_chan->name, ua_sess->tracing_id); + WARN("Communication with application %d timed out on send_channel for channel \"%s\" of session \"%" PRIu64 + "\".", + app->pid, + ua_chan->name, + ua_sess->tracing_id); /* Treat this the same way as an application that is exiting. */ ret = -ENOTCONN; goto error; @@ -1962,16 +2003,21 @@ static int send_channel_pid_to_ust(struct ust_app *app, health_code_update(); /* Send all streams to application. */ - cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) { + for (auto *stream : + lttng::urcu::list_iteration_adapter( + ua_chan->streams.head)) { ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream); if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) { ret = -ENOTCONN; /* Caused by app exiting. */ goto error; } else if (ret == -EAGAIN) { /* Caused by timeout. */ - WARN("Communication with application %d timed out on send_stream for stream \"%s\" of channel \"%s\" of session \"%" PRIu64 "\".", - app->pid, stream->name, ua_chan->name, - ua_sess->tracing_id); + WARN("Communication with application %d timed out on send_stream for stream \"%s\" of channel \"%s\" of session \"%" PRIu64 + "\".", + app->pid, + stream->name, + ua_chan->name, + ua_sess->tracing_id); /* * Treat this the same way as an application that is * exiting. @@ -1984,8 +2030,6 @@ static int send_channel_pid_to_ust(struct ust_app *app, cds_list_del(&stream->list); delete_ust_app_stream(-1, stream, app); } - /* Flag the channel that it is sent to the application. */ - ua_chan->is_sent = 1; error: health_code_update(); @@ -1997,9 +2041,9 @@ error: * * Should be called with session mutex held. */ -static -int create_ust_event(struct ust_app *app, - struct ust_app_channel *ua_chan, struct ust_app_event *ua_event) +static int create_ust_event(struct ust_app *app, + struct ust_app_channel *ua_chan, + struct ust_app_event *ua_event) { int ret = 0; @@ -2007,22 +2051,25 @@ int create_ust_event(struct ust_app *app, /* Create UST event on tracer */ pthread_mutex_lock(&app->sock_lock); - ret = lttng_ust_ctl_create_event(app->sock, &ua_event->attr, ua_chan->obj, - &ua_event->obj); + ret = lttng_ust_ctl_create_event(app->sock, &ua_event->attr, ua_chan->obj, &ua_event->obj); pthread_mutex_unlock(&app->sock_lock); if (ret < 0) { if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) { ret = 0; DBG3("UST app create event failed. Application is dead: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else if (ret == -EAGAIN) { ret = 0; WARN("UST app create event failed. Communication time out: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else { ERR("UST app create event '%s' failed with ret %d: pid = %d, sock = %d", - ua_event->attr.name, ret, app->pid, - app->sock); + ua_event->attr.name, + ret, + app->pid, + app->sock); } goto error; } @@ -2030,7 +2077,9 @@ int create_ust_event(struct ust_app *app, ua_event->handle = ua_event->obj->handle; DBG2("UST app event %s created successfully for pid:%d object = %p", - ua_event->attr.name, app->pid, ua_event->obj); + ua_event->attr.name, + app->pid, + ua_event->obj); health_code_update(); @@ -2083,16 +2132,15 @@ error: return ret; } -static int init_ust_event_notifier_from_event_rule( - const struct lttng_event_rule *rule, - struct lttng_ust_abi_event_notifier *event_notifier) +static int +init_ust_event_notifier_from_event_rule(const struct lttng_event_rule *rule, + struct lttng_ust_abi_event_notifier *event_notifier) { enum lttng_event_rule_status status; enum lttng_ust_abi_loglevel_type ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL; int loglevel = -1, ret = 0; const char *pattern; - memset(event_notifier, 0, sizeof(*event_notifier)); if (lttng_event_rule_targets_agent_domain(rule)) { @@ -2102,15 +2150,14 @@ static int init_ust_event_notifier_from_event_rule( * attached later on. * Set the default values for the agent event. */ - pattern = event_get_default_agent_ust_name( - lttng_event_rule_get_domain_type(rule)); + pattern = event_get_default_agent_ust_name(lttng_event_rule_get_domain_type(rule)); loglevel = 0; ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL; } else { const struct lttng_log_level_rule *log_level_rule; LTTNG_ASSERT(lttng_event_rule_get_type(rule) == - LTTNG_EVENT_RULE_TYPE_USER_TRACEPOINT); + LTTNG_EVENT_RULE_TYPE_USER_TRACEPOINT); status = lttng_event_rule_user_tracepoint_get_name_pattern(rule, &pattern); if (status != LTTNG_EVENT_RULE_STATUS_OK) { @@ -2118,8 +2165,7 @@ static int init_ust_event_notifier_from_event_rule( abort(); } - status = lttng_event_rule_user_tracepoint_get_log_level_rule( - rule, &log_level_rule); + status = lttng_event_rule_user_tracepoint_get_log_level_rule(rule, &log_level_rule); if (status == LTTNG_EVENT_RULE_STATUS_UNSET) { ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL; } else if (status == LTTNG_EVENT_RULE_STATUS_OK) { @@ -2128,13 +2174,13 @@ static int init_ust_event_notifier_from_event_rule( switch (lttng_log_level_rule_get_type(log_level_rule)) { case LTTNG_LOG_LEVEL_RULE_TYPE_EXACTLY: ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_SINGLE; - llr_status = lttng_log_level_rule_exactly_get_level( - log_level_rule, &loglevel); + llr_status = lttng_log_level_rule_exactly_get_level(log_level_rule, + &loglevel); break; case LTTNG_LOG_LEVEL_RULE_TYPE_AT_LEAST_AS_SEVERE_AS: ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_RANGE; llr_status = lttng_log_level_rule_at_least_as_severe_as_get_level( - log_level_rule, &loglevel); + log_level_rule, &loglevel); break; default: abort(); @@ -2148,11 +2194,10 @@ static int init_ust_event_notifier_from_event_rule( } event_notifier->event.instrumentation = LTTNG_UST_ABI_TRACEPOINT; - ret = lttng_strncpy(event_notifier->event.name, pattern, - sizeof(event_notifier->event.name)); + ret = lttng_strncpy( + event_notifier->event.name, pattern, sizeof(event_notifier->event.name)); if (ret) { - ERR("Failed to copy event rule pattern to notifier: pattern = '%s' ", - pattern); + ERR("Failed to copy event rule pattern to notifier: pattern = '%s' ", pattern); goto end; } @@ -2167,13 +2212,13 @@ end: * given application. */ static int create_ust_event_notifier(struct ust_app *app, - struct ust_app_event_notifier_rule *ua_event_notifier_rule) + struct ust_app_event_notifier_rule *ua_event_notifier_rule) { int ret = 0; enum lttng_condition_status condition_status; - const struct lttng_condition *condition = NULL; + const struct lttng_condition *condition = nullptr; struct lttng_ust_abi_event_notifier event_notifier; - const struct lttng_event_rule *event_rule = NULL; + const struct lttng_event_rule *event_rule = nullptr; unsigned int capture_bytecode_count = 0, i; enum lttng_condition_status cond_status; enum lttng_event_rule_type event_rule_type; @@ -2181,25 +2226,22 @@ static int create_ust_event_notifier(struct ust_app *app, health_code_update(); LTTNG_ASSERT(app->event_notifier_group.object); - condition = lttng_trigger_get_const_condition( - ua_event_notifier_rule->trigger); + condition = lttng_trigger_get_const_condition(ua_event_notifier_rule->trigger); LTTNG_ASSERT(condition); LTTNG_ASSERT(lttng_condition_get_type(condition) == - LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES); + LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES); - condition_status = lttng_condition_event_rule_matches_get_rule( - condition, &event_rule); + condition_status = lttng_condition_event_rule_matches_get_rule(condition, &event_rule); LTTNG_ASSERT(condition_status == LTTNG_CONDITION_STATUS_OK); LTTNG_ASSERT(event_rule); event_rule_type = lttng_event_rule_get_type(event_rule); LTTNG_ASSERT(event_rule_type == LTTNG_EVENT_RULE_TYPE_USER_TRACEPOINT || - event_rule_type == LTTNG_EVENT_RULE_TYPE_JUL_LOGGING || - event_rule_type == - LTTNG_EVENT_RULE_TYPE_LOG4J_LOGGING || - event_rule_type == - LTTNG_EVENT_RULE_TYPE_PYTHON_LOGGING); + event_rule_type == LTTNG_EVENT_RULE_TYPE_JUL_LOGGING || + event_rule_type == LTTNG_EVENT_RULE_TYPE_LOG4J_LOGGING || + event_rule_type == LTTNG_EVENT_RULE_TYPE_LOG4J2_LOGGING || + event_rule_type == LTTNG_EVENT_RULE_TYPE_PYTHON_LOGGING); init_ust_event_notifier_from_event_rule(event_rule, &event_notifier); event_notifier.event.token = ua_event_notifier_rule->token; @@ -2207,23 +2249,28 @@ static int create_ust_event_notifier(struct ust_app *app, /* Create UST event notifier against the tracer. */ pthread_mutex_lock(&app->sock_lock); - ret = lttng_ust_ctl_create_event_notifier(app->sock, &event_notifier, - app->event_notifier_group.object, - &ua_event_notifier_rule->obj); + ret = lttng_ust_ctl_create_event_notifier(app->sock, + &event_notifier, + app->event_notifier_group.object, + &ua_event_notifier_rule->obj); pthread_mutex_unlock(&app->sock_lock); if (ret < 0) { if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) { ret = 0; DBG3("UST app create event notifier failed. Application is dead: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else if (ret == -EAGAIN) { ret = 0; WARN("UST app create event notifier failed. Communication time out: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else { ERR("UST app create event notifier '%s' failed with ret %d: pid = %d, sock = %d", - event_notifier.event.name, ret, app->pid, - app->sock); + event_notifier.event.name, + ret, + app->pid, + app->sock); } goto error; } @@ -2231,15 +2278,17 @@ static int create_ust_event_notifier(struct ust_app *app, ua_event_notifier_rule->handle = ua_event_notifier_rule->obj->handle; DBG2("UST app event notifier %s created successfully: app = '%s': pid = %d, object = %p", - event_notifier.event.name, app->name, app->pid, - ua_event_notifier_rule->obj); + event_notifier.event.name, + app->name, + app->pid, + ua_event_notifier_rule->obj); health_code_update(); /* Set filter if one is present. */ if (ua_event_notifier_rule->filter) { - ret = set_ust_object_filter(app, ua_event_notifier_rule->filter, - ua_event_notifier_rule->obj); + ret = set_ust_object_filter( + app, ua_event_notifier_rule->filter, ua_event_notifier_rule->obj); if (ret < 0) { goto error; } @@ -2247,9 +2296,8 @@ static int create_ust_event_notifier(struct ust_app *app, /* Set exclusions for the event. */ if (ua_event_notifier_rule->exclusion) { - ret = set_ust_object_exclusions(app, - ua_event_notifier_rule->exclusion, - ua_event_notifier_rule->obj); + ret = set_ust_object_exclusions( + app, ua_event_notifier_rule->exclusion, ua_event_notifier_rule->obj); if (ret < 0) { goto error; } @@ -2257,16 +2305,15 @@ static int create_ust_event_notifier(struct ust_app *app, /* Set the capture bytecodes. */ cond_status = lttng_condition_event_rule_matches_get_capture_descriptor_count( - condition, &capture_bytecode_count); + condition, &capture_bytecode_count); LTTNG_ASSERT(cond_status == LTTNG_CONDITION_STATUS_OK); for (i = 0; i < capture_bytecode_count; i++) { const struct lttng_bytecode *capture_bytecode = - lttng_condition_event_rule_matches_get_capture_bytecode_at_index( - condition, i); + lttng_condition_event_rule_matches_get_capture_bytecode_at_index(condition, + i); - ret = set_ust_capture(app, capture_bytecode, i, - ua_event_notifier_rule->obj); + ret = set_ust_capture(app, capture_bytecode, i, ua_event_notifier_rule->obj); if (ret < 0) { goto error; } @@ -2308,8 +2355,7 @@ error: /* * Copy data between an UST app event and a LTT event. */ -static void shadow_copy_event(struct ust_app_event *ua_event, - struct ltt_ust_event *uevent) +static void shadow_copy_event(struct ust_app_event *ua_event, struct ltt_ust_event *uevent) { size_t exclusion_alloc_size; @@ -2330,13 +2376,12 @@ static void shadow_copy_event(struct ust_app_event *ua_event, /* Copy exclusion data */ if (uevent->exclusion) { exclusion_alloc_size = sizeof(struct lttng_event_exclusion) + - LTTNG_UST_ABI_SYM_NAME_LEN * uevent->exclusion->count; + LTTNG_UST_ABI_SYM_NAME_LEN * uevent->exclusion->count; ua_event->exclusion = zmalloc(exclusion_alloc_size); - if (ua_event->exclusion == NULL) { + if (ua_event->exclusion == nullptr) { PERROR("malloc"); } else { - memcpy(ua_event->exclusion, uevent->exclusion, - exclusion_alloc_size); + memcpy(ua_event->exclusion, uevent->exclusion, exclusion_alloc_size); } } } @@ -2344,8 +2389,7 @@ static void shadow_copy_event(struct ust_app_event *ua_event, /* * Copy data between an UST app channel and a LTT channel. */ -static void shadow_copy_channel(struct ust_app_channel *ua_chan, - struct ltt_ust_channel *uchan) +static void shadow_copy_channel(struct ust_app_channel *ua_chan, struct ltt_ust_channel *uchan) { DBG2("UST app shadow copy of channel %s started", ua_chan->name); @@ -2380,7 +2424,8 @@ static void shadow_copy_channel(struct ust_app_channel *ua_chan, * Copy data between a UST app session and a regular LTT session. */ static void shadow_copy_session(struct ust_app_session *ua_sess, - struct ltt_ust_session *usess, struct ust_app *app) + struct ltt_ust_session *usess, + struct ust_app *app) { struct tm *timeinfo; char datetime[16]; @@ -2399,7 +2444,7 @@ static void shadow_copy_session(struct ust_app_session *ua_sess, LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.uid, usess->uid); LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.gid, usess->gid); ua_sess->buffer_type = usess->buffer_type; - ua_sess->bits_per_long = app->bits_per_long; + ua_sess->bits_per_long = app->abi.bits_per_long; /* There is only one consumer object per session possible. */ consumer_output_get(usess->consumer); @@ -2407,20 +2452,23 @@ static void shadow_copy_session(struct ust_app_session *ua_sess, ua_sess->output_traces = usess->output_traces; ua_sess->live_timer_interval = usess->live_timer_interval; - copy_channel_attr_to_ustctl(&ua_sess->metadata_attr, - &usess->metadata_attr); + copy_channel_attr_to_ustctl(&ua_sess->metadata_attr, &usess->metadata_attr); switch (ua_sess->buffer_type) { case LTTNG_BUFFER_PER_PID: - ret = snprintf(ua_sess->path, sizeof(ua_sess->path), - DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid, - datetime); + ret = snprintf(ua_sess->path, + sizeof(ua_sess->path), + DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", + app->name, + app->pid, + datetime); break; case LTTNG_BUFFER_PER_UID: - ret = snprintf(ua_sess->path, sizeof(ua_sess->path), - DEFAULT_UST_TRACE_UID_PATH, - lttng_credentials_get_uid(&ua_sess->real_credentials), - app->bits_per_long); + ret = snprintf(ua_sess->path, + sizeof(ua_sess->path), + DEFAULT_UST_TRACE_UID_PATH, + lttng_credentials_get_uid(&ua_sess->real_credentials), + app->abi.bits_per_long); break; default: abort(); @@ -2432,23 +2480,26 @@ static void shadow_copy_session(struct ust_app_session *ua_sess, goto error; } - strncpy(ua_sess->root_shm_path, usess->root_shm_path, - sizeof(ua_sess->root_shm_path)); + strncpy(ua_sess->root_shm_path, usess->root_shm_path, sizeof(ua_sess->root_shm_path)); ua_sess->root_shm_path[sizeof(ua_sess->root_shm_path) - 1] = '\0'; - strncpy(ua_sess->shm_path, usess->shm_path, - sizeof(ua_sess->shm_path)); + strncpy(ua_sess->shm_path, usess->shm_path, sizeof(ua_sess->shm_path)); ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0'; if (ua_sess->shm_path[0]) { switch (ua_sess->buffer_type) { case LTTNG_BUFFER_PER_PID: - ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path), - "/" DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", - app->name, app->pid, datetime); + ret = snprintf(tmp_shm_path, + sizeof(tmp_shm_path), + "/" DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", + app->name, + app->pid, + datetime); break; case LTTNG_BUFFER_PER_UID: - ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path), - "/" DEFAULT_UST_TRACE_UID_PATH, - app->uid, app->bits_per_long); + ret = snprintf(tmp_shm_path, + sizeof(tmp_shm_path), + "/" DEFAULT_UST_TRACE_UID_PATH, + app->uid, + app->abi.bits_per_long); break; default: abort(); @@ -2459,7 +2510,8 @@ static void shadow_copy_session(struct ust_app_session *ua_sess, abort(); goto error; } - strncat(ua_sess->shm_path, tmp_shm_path, + strncat(ua_sess->shm_path, + tmp_shm_path, sizeof(ua_sess->shm_path) - strlen(ua_sess->shm_path) - 1); ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0'; } @@ -2472,9 +2524,8 @@ error: /* * Lookup sesison wrapper. */ -static -void __lookup_session_by_app(const struct ltt_ust_session *usess, - struct ust_app *app, struct lttng_ht_iter *iter) +static void +__lookup_session_by_app(const ltt_ust_session *usess, const ust_app *app, lttng_ht_iter *iter) { /* Get right UST app session from app */ lttng_ht_lookup(app->sessions, &usess->id, iter); @@ -2484,22 +2535,22 @@ void __lookup_session_by_app(const struct ltt_ust_session *usess, * Return ust app session from the app session hashtable using the UST session * id. */ -static struct ust_app_session *lookup_session_by_app( - const struct ltt_ust_session *usess, struct ust_app *app) +ust_app_session *ust_app_lookup_app_session(const struct ltt_ust_session *usess, + const struct ust_app *app) { struct lttng_ht_iter iter; struct lttng_ht_node_u64 *node; __lookup_session_by_app(usess, app, &iter); - node = lttng_ht_iter_get_node_u64(&iter); - if (node == NULL) { + node = lttng_ht_iter_get_node(&iter); + if (node == nullptr) { goto error; } - return caa_container_of(node, struct ust_app_session, node); + return lttng::utils::container_of(node, &ust_app_session::node); error: - return NULL; + return nullptr; } /* @@ -2510,7 +2561,8 @@ error: * Return 0 on success or else a negative value. */ static int setup_buffer_reg_pid(struct ust_app_session *ua_sess, - struct ust_app *app, struct buffer_reg_pid **regp) + struct ust_app *app, + struct buffer_reg_pid **regp) { int ret = 0; struct buffer_reg_pid *reg_pid; @@ -2518,7 +2570,7 @@ static int setup_buffer_reg_pid(struct ust_app_session *ua_sess, LTTNG_ASSERT(ua_sess); LTTNG_ASSERT(app); - rcu_read_lock(); + const lttng::urcu::read_lock_guard read_lock; reg_pid = buffer_reg_pid_find(ua_sess->id); if (!reg_pid) { @@ -2526,8 +2578,8 @@ static int setup_buffer_reg_pid(struct ust_app_session *ua_sess, * This is the create channel path meaning that if there is NO * registry available, we have to create one for this session. */ - ret = buffer_reg_pid_create(ua_sess->id, ®_pid, - ua_sess->root_shm_path, ua_sess->shm_path); + ret = buffer_reg_pid_create( + ua_sess->id, ®_pid, ua_sess->root_shm_path, ua_sess->shm_path); if (ret < 0) { goto error; } @@ -2536,15 +2588,16 @@ static int setup_buffer_reg_pid(struct ust_app_session *ua_sess, } /* Initialize registry. */ - reg_pid->registry->reg.ust = ust_registry_session_per_pid_create(app, - app->bits_per_long, app->uint8_t_alignment, - app->uint16_t_alignment, app->uint32_t_alignment, - app->uint64_t_alignment, app->long_alignment, - app->byte_order, app->version.major, app->version.minor, - reg_pid->root_shm_path, reg_pid->shm_path, - lttng_credentials_get_uid(&ua_sess->effective_credentials), - lttng_credentials_get_gid(&ua_sess->effective_credentials), - ua_sess->tracing_id); + reg_pid->registry->reg.ust = ust_registry_session_per_pid_create( + app, + app->abi, + app->version.major, + app->version.minor, + reg_pid->root_shm_path, + reg_pid->shm_path, + lttng_credentials_get_uid(&ua_sess->effective_credentials), + lttng_credentials_get_gid(&ua_sess->effective_credentials), + ua_sess->tracing_id); if (!reg_pid->registry->reg.ust) { /* * reg_pid->registry->reg.ust is NULL upon error, so we need to @@ -2565,7 +2618,6 @@ end: *regp = reg_pid; } error: - rcu_read_unlock(); return ret; } @@ -2577,8 +2629,9 @@ error: * Return 0 on success or else a negative value. */ static int setup_buffer_reg_uid(struct ltt_ust_session *usess, - struct ust_app_session *ua_sess, - struct ust_app *app, struct buffer_reg_uid **regp) + struct ust_app_session *ua_sess, + struct ust_app *app, + struct buffer_reg_uid **regp) { int ret = 0; struct buffer_reg_uid *reg_uid; @@ -2586,17 +2639,21 @@ static int setup_buffer_reg_uid(struct ltt_ust_session *usess, LTTNG_ASSERT(usess); LTTNG_ASSERT(app); - rcu_read_lock(); + const lttng::urcu::read_lock_guard read_lock; - reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid); + reg_uid = buffer_reg_uid_find(usess->id, app->abi.bits_per_long, app->uid); if (!reg_uid) { /* * This is the create channel path meaning that if there is NO * registry available, we have to create one for this session. */ - ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid, - LTTNG_DOMAIN_UST, ®_uid, - ua_sess->root_shm_path, ua_sess->shm_path); + ret = buffer_reg_uid_create(usess->id, + app->abi.bits_per_long, + app->uid, + LTTNG_DOMAIN_UST, + ®_uid, + ua_sess->root_shm_path, + ua_sess->shm_path); if (ret < 0) { goto error; } @@ -2605,14 +2662,15 @@ static int setup_buffer_reg_uid(struct ltt_ust_session *usess, } /* Initialize registry. */ - reg_uid->registry->reg.ust = ust_registry_session_per_uid_create( - app->bits_per_long, app->uint8_t_alignment, - app->uint16_t_alignment, app->uint32_t_alignment, - app->uint64_t_alignment, app->long_alignment, - app->byte_order, app->version.major, - app->version.minor, reg_uid->root_shm_path, - reg_uid->shm_path, usess->uid, usess->gid, - ua_sess->tracing_id, app->uid); + reg_uid->registry->reg.ust = ust_registry_session_per_uid_create(app->abi, + app->version.major, + app->version.minor, + reg_uid->root_shm_path, + reg_uid->shm_path, + usess->uid, + usess->gid, + ua_sess->tracing_id, + app->uid); if (!reg_uid->registry->reg.ust) { /* * reg_uid->registry->reg.ust is NULL upon error, so we need to @@ -2620,7 +2678,7 @@ static int setup_buffer_reg_uid(struct ltt_ust_session *usess, * that if the buffer registry can be found, its ust registry is * non-NULL. */ - buffer_reg_uid_destroy(reg_uid, NULL); + buffer_reg_uid_destroy(reg_uid, nullptr); goto error; } @@ -2635,7 +2693,6 @@ end: *regp = reg_uid; } error: - rcu_read_unlock(); return ret; } @@ -2651,8 +2708,9 @@ error: * -ENOTCONN which is the default code if the lttng_ust_ctl_create_session fails. */ static int find_or_create_ust_app_session(struct ltt_ust_session *usess, - struct ust_app *app, struct ust_app_session **ua_sess_ptr, - int *is_created) + struct ust_app *app, + struct ust_app_session **ua_sess_ptr, + int *is_created) { int ret, created = 0; struct ust_app_session *ua_sess; @@ -2663,12 +2721,13 @@ static int find_or_create_ust_app_session(struct ltt_ust_session *usess, health_code_update(); - ua_sess = lookup_session_by_app(usess, app); - if (ua_sess == NULL) { + ua_sess = ust_app_lookup_app_session(usess, app); + if (ua_sess == nullptr) { DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it", - app->pid, usess->id); + app->pid, + usess->id); ua_sess = alloc_ust_app_session(); - if (ua_sess == NULL) { + if (ua_sess == nullptr) { /* Only malloc can failed so something is really wrong */ ret = -ENOMEM; goto error; @@ -2680,7 +2739,7 @@ static int find_or_create_ust_app_session(struct ltt_ust_session *usess, switch (usess->buffer_type) { case LTTNG_BUFFER_PER_PID: /* Init local registry. */ - ret = setup_buffer_reg_pid(ua_sess, app, NULL); + ret = setup_buffer_reg_pid(ua_sess, app, nullptr); if (ret < 0) { delete_ust_app_session(-1, ua_sess, app); goto error; @@ -2688,7 +2747,7 @@ static int find_or_create_ust_app_session(struct ltt_ust_session *usess, break; case LTTNG_BUFFER_PER_UID: /* Look for a global registry. If none exists, create one. */ - ret = setup_buffer_reg_uid(usess, ua_sess, app, NULL); + ret = setup_buffer_reg_uid(usess, ua_sess, app, nullptr); if (ret < 0) { delete_ust_app_session(-1, ua_sess, app); goto error; @@ -2709,15 +2768,19 @@ static int find_or_create_ust_app_session(struct ltt_ust_session *usess, if (ret < 0) { if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) { DBG("UST app creating session failed. Application is dead: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); ret = 0; } else if (ret == -EAGAIN) { DBG("UST app creating session failed. Communication time out: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); ret = 0; } else { ERR("UST app creating session failed with ret %d: pid = %d, sock =%d", - ret, app->pid, app->sock); + ret, + app->pid, + app->sock); } delete_ust_app_session(-1, ua_sess, app); if (ret != -ENOMEM) { @@ -2733,12 +2796,10 @@ static int find_or_create_ust_app_session(struct ltt_ust_session *usess, ua_sess->handle = ret; /* Add ust app session to app's HT */ - lttng_ht_node_init_u64(&ua_sess->node, - ua_sess->tracing_id); + lttng_ht_node_init_u64(&ua_sess->node, ua_sess->tracing_id); lttng_ht_add_unique_u64(app->sessions, &ua_sess->node); lttng_ht_node_init_ulong(&ua_sess->ust_objd_node, ua_sess->handle); - lttng_ht_add_unique_ulong(app->ust_sessions_objd, - &ua_sess->ust_objd_node); + lttng_ht_add_unique_ulong(app->ust_sessions_objd, &ua_sess->ust_objd_node); DBG2("UST app session created successfully with handle %d", ret); } @@ -2764,33 +2825,28 @@ error: */ static int ht_match_ust_app_ctx(struct cds_lfht_node *node, const void *_key) { - struct ust_app_ctx *ctx; - const struct lttng_ust_context_attr *key; - LTTNG_ASSERT(node); LTTNG_ASSERT(_key); - ctx = caa_container_of(node, struct ust_app_ctx, node.node); - key = (lttng_ust_context_attr *) _key; + auto *ctx = lttng_ht_node_container_of(node, &ust_app_ctx::node); + const auto *key = (lttng_ust_context_attr *) _key; /* Context type */ if (ctx->ctx.ctx != key->ctx) { goto no_match; } - switch(key->ctx) { + switch (key->ctx) { case LTTNG_UST_ABI_CONTEXT_PERF_THREAD_COUNTER: if (strncmp(key->u.perf_counter.name, - ctx->ctx.u.perf_counter.name, - sizeof(key->u.perf_counter.name))) { + ctx->ctx.u.perf_counter.name, + sizeof(key->u.perf_counter.name)) != 0) { goto no_match; } break; case LTTNG_UST_ABI_CONTEXT_APP_CONTEXT: - if (strcmp(key->u.app_ctx.provider_name, - ctx->ctx.u.app_ctx.provider_name) || - strcmp(key->u.app_ctx.ctx_name, - ctx->ctx.u.app_ctx.ctx_name)) { + if (strcmp(key->u.app_ctx.provider_name, ctx->ctx.u.app_ctx.provider_name) != 0 || + strcmp(key->u.app_ctx.ctx_name, ctx->ctx.u.app_ctx.ctx_name) != 0) { goto no_match; } break; @@ -2811,27 +2867,29 @@ no_match: * Must be called while holding RCU read side lock. * Return an ust_app_ctx object or NULL on error. */ -static -struct ust_app_ctx *find_ust_app_context(struct lttng_ht *ht, - struct lttng_ust_context_attr *uctx) +static struct ust_app_ctx *find_ust_app_context(struct lttng_ht *ht, + struct lttng_ust_context_attr *uctx) { struct lttng_ht_iter iter; struct lttng_ht_node_ulong *node; - struct ust_app_ctx *app_ctx = NULL; + struct ust_app_ctx *app_ctx = nullptr; LTTNG_ASSERT(uctx); LTTNG_ASSERT(ht); ASSERT_RCU_READ_LOCKED(); /* Lookup using the lttng_ust_context_type and a custom match fct. */ - cds_lfht_lookup(ht->ht, ht->hash_fct((void *) uctx->ctx, lttng_ht_seed), - ht_match_ust_app_ctx, uctx, &iter.iter); - node = lttng_ht_iter_get_node_ulong(&iter); + cds_lfht_lookup(ht->ht, + ht->hash_fct((void *) uctx->ctx, lttng_ht_seed), + ht_match_ust_app_ctx, + uctx, + &iter.iter); + node = lttng_ht_iter_get_node(&iter); if (!node) { goto end; } - app_ctx = caa_container_of(node, struct ust_app_ctx, node); + app_ctx = lttng::utils::container_of(node, &ust_app_ctx::node); end: return app_ctx; @@ -2842,10 +2900,9 @@ end: * * Called with UST app session lock held and a RCU read side lock. */ -static -int create_ust_app_channel_context(struct ust_app_channel *ua_chan, - struct lttng_ust_context_attr *uctx, - struct ust_app *app) +static int create_ust_app_channel_context(struct ust_app_channel *ua_chan, + struct lttng_ust_context_attr *uctx, + struct ust_app *app) { int ret = 0; struct ust_app_ctx *ua_ctx; @@ -2861,7 +2918,7 @@ int create_ust_app_channel_context(struct ust_app_channel *ua_chan, } ua_ctx = alloc_ust_app_ctx(uctx); - if (ua_ctx == NULL) { + if (ua_ctx == nullptr) { /* malloc failed */ ret = -ENOMEM; goto error; @@ -2885,9 +2942,7 @@ error: * * Called with UST app session lock held. */ -static -int enable_ust_app_event(struct ust_app_event *ua_event, - struct ust_app *app) +static int enable_ust_app_event(struct ust_app_event *ua_event, struct ust_app *app) { int ret; @@ -2896,7 +2951,7 @@ int enable_ust_app_event(struct ust_app_event *ua_event, goto error; } - ua_event->enabled = 1; + ua_event->enabled = true; error: return ret; @@ -2905,8 +2960,7 @@ error: /* * Disable on the tracer side a ust app event for the session and channel. */ -static int disable_ust_app_event(struct ust_app_event *ua_event, - struct ust_app *app) +static int disable_ust_app_event(struct ust_app_event *ua_event, struct ust_app *app) { int ret; @@ -2915,7 +2969,7 @@ static int disable_ust_app_event(struct ust_app_event *ua_event, goto error; } - ua_event->enabled = 0; + ua_event->enabled = false; error: return ret; @@ -2924,9 +2978,9 @@ error: /* * Lookup ust app channel for session and disable it on the tracer side. */ -static -int disable_ust_app_channel(struct ust_app_session *ua_sess, - struct ust_app_channel *ua_chan, struct ust_app *app) +static int disable_ust_app_channel(const ust_app_session::locked_weak_ref& ua_sess, + struct ust_app_channel *ua_chan, + struct ust_app *app) { int ret; @@ -2935,7 +2989,7 @@ int disable_ust_app_channel(struct ust_app_session *ua_sess, goto error; } - ua_chan->enabled = 0; + ua_chan->enabled = false; error: return ret; @@ -2945,8 +2999,9 @@ error: * Lookup ust app channel for session and enable it on the tracer side. This * MUST be called with a RCU read side lock acquired. */ -static int enable_ust_app_channel(struct ust_app_session *ua_sess, - struct ltt_ust_channel *uchan, struct ust_app *app) +static int enable_ust_app_channel(const ust_app_session::locked_weak_ref& ua_sess, + struct ltt_ust_channel *uchan, + struct ust_app *app) { int ret = 0; struct lttng_ht_iter iter; @@ -2955,15 +3010,16 @@ static int enable_ust_app_channel(struct ust_app_session *ua_sess, ASSERT_RCU_READ_LOCKED(); - lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter); - ua_chan_node = lttng_ht_iter_get_node_str(&iter); - if (ua_chan_node == NULL) { + lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &iter); + ua_chan_node = lttng_ht_iter_get_node(&iter); + if (ua_chan_node == nullptr) { DBG2("Unable to find channel %s in ust session id %" PRIu64, - uchan->name, ua_sess->tracing_id); + uchan->name, + ua_sess->tracing_id); goto error; } - ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node); + ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node); ret = enable_ust_channel(app, ua_sess, ua_chan); if (ret < 0) { @@ -2982,8 +3038,10 @@ error: * Return 0 on success or else a negative value. */ static int do_consumer_create_channel(struct ltt_ust_session *usess, - struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan, - int bitness, ust_registry_session *registry) + struct ust_app_session *ua_sess, + struct ust_app_channel *ua_chan, + int bitness, + lsu::registry_session *registry) { int ret; unsigned int nb_fd = 0; @@ -2994,7 +3052,7 @@ static int do_consumer_create_channel(struct ltt_ust_session *usess, LTTNG_ASSERT(ua_chan); LTTNG_ASSERT(registry); - rcu_read_lock(); + const lttng::urcu::read_lock_guard read_lock; health_code_update(); /* Get the right consumer socket for the application. */ @@ -3017,8 +3075,8 @@ static int do_consumer_create_channel(struct ltt_ust_session *usess, * Ask consumer to create channel. The consumer will return the number of * stream we have to expect. */ - ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket, - registry, usess->current_trace_chunk); + ret = ust_consumer_ask_channel( + ua_sess, ua_chan, usess->consumer, socket, registry, usess->current_trace_chunk); if (ret < 0) { goto error_ask; } @@ -3049,7 +3107,6 @@ static int do_consumer_create_channel(struct ltt_ust_session *usess, } } - rcu_read_unlock(); return 0; error_destroy: @@ -3066,7 +3123,6 @@ error_ask: lttng_fd_put(LTTNG_FD_APPS, 1); error: health_code_update(); - rcu_read_unlock(); return ret; } @@ -3077,7 +3133,7 @@ error: * Return 0 on success or else a negative value. */ static int duplicate_stream_object(struct buffer_reg_stream *reg_stream, - struct ust_app_stream *stream) + struct ust_app_stream *stream) { int ret; @@ -3092,11 +3148,12 @@ static int duplicate_stream_object(struct buffer_reg_stream *reg_stream, } /* Duplicate object for stream once the original is in the registry. */ - ret = lttng_ust_ctl_duplicate_ust_object_data(&stream->obj, - reg_stream->obj.ust); + ret = lttng_ust_ctl_duplicate_ust_object_data(&stream->obj, reg_stream->obj.ust); if (ret < 0) { ERR("Duplicate stream obj from %p to %p failed with ret %d", - reg_stream->obj.ust, stream->obj, ret); + reg_stream->obj.ust, + stream->obj, + ret); lttng_fd_put(LTTNG_FD_APPS, 2); goto error; } @@ -3113,7 +3170,7 @@ error: * Return 0 on success or else a negative value. */ static int duplicate_channel_object(struct buffer_reg_channel *buf_reg_chan, - struct ust_app_channel *ua_chan) + struct ust_app_channel *ua_chan) { int ret; @@ -3131,7 +3188,9 @@ static int duplicate_channel_object(struct buffer_reg_channel *buf_reg_chan, ret = lttng_ust_ctl_duplicate_ust_object_data(&ua_chan->obj, buf_reg_chan->obj.ust); if (ret < 0) { ERR("Duplicate channel obj from %p to %p failed with ret: %d", - buf_reg_chan->obj.ust, ua_chan->obj, ret); + buf_reg_chan->obj.ust, + ua_chan->obj, + ret); goto error; } ua_chan->handle = ua_chan->obj->handle; @@ -3151,11 +3210,10 @@ error_fd_get: * Return 0 on success or else a negative value. */ static int setup_buffer_reg_streams(struct buffer_reg_channel *buf_reg_chan, - struct ust_app_channel *ua_chan, - struct ust_app *app) + struct ust_app_channel *ua_chan, + struct ust_app *app) { int ret = 0; - struct ust_app_stream *stream, *stmp; LTTNG_ASSERT(buf_reg_chan); LTTNG_ASSERT(ua_chan); @@ -3163,7 +3221,9 @@ static int setup_buffer_reg_streams(struct buffer_reg_channel *buf_reg_chan, DBG2("UST app setup buffer registry stream"); /* Send all streams to application. */ - cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) { + for (auto *stream : + lttng::urcu::list_iteration_adapter( + ua_chan->streams.head)) { struct buffer_reg_stream *reg_stream; ret = buffer_reg_stream_create(®_stream); @@ -3176,7 +3236,7 @@ static int setup_buffer_reg_streams(struct buffer_reg_channel *buf_reg_chan, * stream call does not release the object. */ reg_stream->obj.ust = stream->obj; - stream->obj = NULL; + stream->obj = nullptr; buffer_reg_stream_add(reg_stream, buf_reg_chan); /* We don't need the streams anymore. */ @@ -3197,10 +3257,11 @@ error: * Return 0 on success else a negative value. */ static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess, - struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp) + struct ust_app_channel *ua_chan, + struct buffer_reg_channel **regp) { int ret; - struct buffer_reg_channel *buf_reg_chan = NULL; + struct buffer_reg_channel *buf_reg_chan = nullptr; LTTNG_ASSERT(reg_sess); LTTNG_ASSERT(ua_chan); @@ -3218,11 +3279,15 @@ static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess, buf_reg_chan->num_subbuf = ua_chan->attr.num_subbuf; /* Create and add a channel registry to session. */ - ret = ust_registry_channel_add(reg_sess->reg.ust, - ua_chan->tracing_channel_id); - if (ret < 0) { + try { + reg_sess->reg.ust->add_channel(ua_chan->tracing_channel_id); + } catch (const std::exception& ex) { + ERR("Failed to add a channel registry to userspace registry session: %s", + ex.what()); + ret = -1; goto error; } + buffer_reg_channel_add(reg_sess, buf_reg_chan); if (regp) { @@ -3245,8 +3310,9 @@ error_create: * Return 0 on success else a negative value. */ static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess, - struct ust_app_channel *ua_chan, struct buffer_reg_channel *buf_reg_chan, - struct ust_app *app) + struct ust_app_channel *ua_chan, + struct buffer_reg_channel *buf_reg_chan, + struct ust_app *app) { int ret; @@ -3264,7 +3330,7 @@ static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess, } buf_reg_chan->obj.ust = ua_chan->obj; - ua_chan->obj = NULL; + ua_chan->obj = nullptr; return 0; @@ -3280,11 +3346,11 @@ error: * Return 0 on success else a negative value. */ static int send_channel_uid_to_ust(struct buffer_reg_channel *buf_reg_chan, - struct ust_app *app, struct ust_app_session *ua_sess, - struct ust_app_channel *ua_chan) + struct ust_app *app, + struct ust_app_session *ua_sess, + struct ust_app_channel *ua_chan) { int ret; - struct buffer_reg_stream *reg_stream; LTTNG_ASSERT(buf_reg_chan); LTTNG_ASSERT(app); @@ -3301,12 +3367,15 @@ static int send_channel_uid_to_ust(struct buffer_reg_channel *buf_reg_chan, /* Send channel to the application. */ ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan); if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) { - ret = -ENOTCONN; /* Caused by app exiting. */ + ret = -ENOTCONN; /* Caused by app exiting. */ goto error; } else if (ret == -EAGAIN) { /* Caused by timeout. */ - WARN("Communication with application %d timed out on send_channel for channel \"%s\" of session \"%" PRIu64 "\".", - app->pid, ua_chan->name, ua_sess->tracing_id); + WARN("Communication with application %d timed out on send_channel for channel \"%s\" of session \"%" PRIu64 + "\".", + app->pid, + ua_chan->name, + ua_sess->tracing_id); /* Treat this the same way as an application that is exiting. */ ret = -ENOTCONN; goto error; @@ -3318,7 +3387,9 @@ static int send_channel_uid_to_ust(struct buffer_reg_channel *buf_reg_chan, /* Send all streams to application. */ pthread_mutex_lock(&buf_reg_chan->stream_list_lock); - cds_list_for_each_entry(reg_stream, &buf_reg_chan->streams, lnode) { + for (auto *reg_stream : + lttng::urcu::list_iteration_adapter( + buf_reg_chan->streams)) { struct ust_app_stream stream = {}; ret = duplicate_stream_object(reg_stream, &stream); @@ -3336,10 +3407,11 @@ static int send_channel_uid_to_ust(struct buffer_reg_channel *buf_reg_chan, * Treat this the same way as an application * that is exiting. */ - WARN("Communication with application %d timed out on send_stream for stream of channel \"%s\" of session \"%" PRIu64 "\".", - app->pid, - ua_chan->name, - ua_sess->tracing_id); + WARN("Communication with application %d timed out on send_stream for stream of channel \"%s\" of session \"%" PRIu64 + "\".", + app->pid, + ua_chan->name, + ua_sess->tracing_id); ret = -ENOTCONN; } (void) release_ust_app_stream(-1, &stream, app); @@ -3352,7 +3424,6 @@ static int send_channel_uid_to_ust(struct buffer_reg_channel *buf_reg_chan, */ (void) release_ust_app_stream(-1, &stream, app); } - ua_chan->is_sent = 1; error_stream_unlock: pthread_mutex_unlock(&buf_reg_chan->stream_list_lock); @@ -3369,15 +3440,14 @@ error: * Return 0 on success else a negative value. */ static int create_channel_per_uid(struct ust_app *app, - struct ltt_ust_session *usess, struct ust_app_session *ua_sess, - struct ust_app_channel *ua_chan) + struct ltt_ust_session *usess, + struct ust_app_session *ua_sess, + struct ust_app_channel *ua_chan) { int ret; struct buffer_reg_uid *reg_uid; struct buffer_reg_channel *buf_reg_chan; - struct ltt_session *session = NULL; enum lttng_error_code notification_ret; - struct ust_registry_channel *ust_reg_chan; LTTNG_ASSERT(app); LTTNG_ASSERT(usess); @@ -3387,7 +3457,11 @@ static int create_channel_per_uid(struct ust_app *app, DBG("UST app creating channel %s with per UID buffers", ua_chan->name); - reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid); + /* Guaranteed to exist; will not throw. */ + const auto session = ltt_session::find_session(ua_sess->tracing_id); + ASSERT_SESSION_LIST_LOCKED(); + + reg_uid = buffer_reg_uid_find(usess->id, app->abi.bits_per_long, app->uid); /* * The session creation handles the creation of this global registry * object. If none can be find, there is a code flow problem or a @@ -3395,8 +3469,7 @@ static int create_channel_per_uid(struct ust_app *app, */ LTTNG_ASSERT(reg_uid); - buf_reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id, - reg_uid); + buf_reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id, reg_uid); if (buf_reg_chan) { goto send_channel; } @@ -3404,32 +3477,29 @@ static int create_channel_per_uid(struct ust_app *app, /* Create the buffer registry channel object. */ ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &buf_reg_chan); if (ret < 0) { - ERR("Error creating the UST channel \"%s\" registry instance", - ua_chan->name); + ERR("Error creating the UST channel \"%s\" registry instance", ua_chan->name); goto error; } - session = session_find_by_id(ua_sess->tracing_id); - LTTNG_ASSERT(session); - ASSERT_LOCKED(session->lock); - ASSERT_SESSION_LIST_LOCKED(); - /* * Create the buffers on the consumer side. This call populates the * ust app channel object with all streams and data object. */ - ret = do_consumer_create_channel(usess, ua_sess, ua_chan, - app->bits_per_long, reg_uid->registry->reg.ust); + ret = do_consumer_create_channel( + usess, ua_sess, ua_chan, app->abi.bits_per_long, reg_uid->registry->reg.ust); if (ret < 0) { - ERR("Error creating UST channel \"%s\" on the consumer daemon", - ua_chan->name); + ERR("Error creating UST channel \"%s\" on the consumer daemon", ua_chan->name); /* * Let's remove the previously created buffer registry channel so * it's not visible anymore in the session registry. */ - ust_registry_channel_del_free(reg_uid->registry->reg.ust, - ua_chan->tracing_channel_id, false); + auto locked_registry = reg_uid->registry->reg.ust->lock(); + try { + locked_registry->remove_channel(ua_chan->tracing_channel_id, false); + } catch (const std::exception& ex) { + DBG("Could not find channel for removal: %s", ex.what()); + } buffer_reg_channel_remove(reg_uid->registry, buf_reg_chan); buffer_reg_channel_destroy(buf_reg_chan, LTTNG_DOMAIN_UST); goto error; @@ -3438,32 +3508,29 @@ static int create_channel_per_uid(struct ust_app *app, /* * Setup the streams and add it to the session registry. */ - ret = setup_buffer_reg_channel(reg_uid->registry, - ua_chan, buf_reg_chan, app); + ret = setup_buffer_reg_channel(reg_uid->registry, ua_chan, buf_reg_chan, app); if (ret < 0) { ERR("Error setting up UST channel \"%s\"", ua_chan->name); goto error; } - /* Notify the notification subsystem of the channel's creation. */ - pthread_mutex_lock(®_uid->registry->reg.ust->_lock); - ust_reg_chan = ust_registry_channel_find(reg_uid->registry->reg.ust, - ua_chan->tracing_channel_id); - LTTNG_ASSERT(ust_reg_chan); - ust_reg_chan->consumer_key = ua_chan->key; - ust_reg_chan = NULL; - pthread_mutex_unlock(®_uid->registry->reg.ust->_lock); + { + auto locked_registry = reg_uid->registry->reg.ust->lock(); + auto& ust_reg_chan = locked_registry->channel(ua_chan->tracing_channel_id); + + ust_reg_chan._consumer_key = ua_chan->key; + } + /* Notify the notification subsystem of the channel's creation. */ notification_ret = notification_thread_command_add_channel( - the_notification_thread_handle, session->name, - lttng_credentials_get_uid( - &ua_sess->effective_credentials), - lttng_credentials_get_gid( - &ua_sess->effective_credentials), - ua_chan->name, ua_chan->key, LTTNG_DOMAIN_UST, - ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf); + the_notification_thread_handle, + session->id, + ua_chan->name, + ua_chan->key, + LTTNG_DOMAIN_UST, + ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf); if (notification_ret != LTTNG_OK) { - ret = - (int) notification_ret; + ret = -(int) notification_ret; ERR("Failed to add channel to notification thread"); goto error; } @@ -3479,9 +3546,6 @@ send_channel: } error: - if (session) { - session_put(session); - } return ret; } @@ -3494,52 +3558,52 @@ error: * Return 0 on success else a negative value. */ static int create_channel_per_pid(struct ust_app *app, - struct ltt_ust_session *usess, struct ust_app_session *ua_sess, - struct ust_app_channel *ua_chan) + struct ltt_ust_session *usess, + const ust_app_session::locked_weak_ref& ua_sess, + struct ust_app_channel *ua_chan) { int ret; - ust_registry_session *registry; + lsu::registry_session *registry; enum lttng_error_code cmd_ret; - struct ltt_session *session = NULL; uint64_t chan_reg_key; - struct ust_registry_channel *ust_reg_chan; LTTNG_ASSERT(app); LTTNG_ASSERT(usess); - LTTNG_ASSERT(ua_sess); LTTNG_ASSERT(ua_chan); DBG("UST app creating channel %s with per PID buffers", ua_chan->name); - rcu_read_lock(); + const lttng::urcu::read_lock_guard read_lock; - registry = get_session_registry(ua_sess); + registry = ust_app_get_session_registry(ua_sess->get_identifier()); /* The UST app session lock is held, registry shall not be null. */ LTTNG_ASSERT(registry); + /* Guaranteed to exist; will not throw. */ + const auto session = ltt_session::find_session(ua_sess->tracing_id); + ASSERT_LOCKED(session->_lock); + ASSERT_SESSION_LIST_LOCKED(); + /* Create and add a new channel registry to session. */ - ret = ust_registry_channel_add(registry, ua_chan->key); - if (ret < 0) { - ERR("Error creating the UST channel \"%s\" registry instance", - ua_chan->name); + try { + registry->add_channel(ua_chan->key); + } catch (const std::exception& ex) { + ERR("Error creating the UST channel \"%s\" registry instance: %s", + ua_chan->name, + ex.what()); + ret = -1; goto error; } - session = session_find_by_id(ua_sess->tracing_id); - LTTNG_ASSERT(session); - ASSERT_LOCKED(session->lock); - ASSERT_SESSION_LIST_LOCKED(); - /* Create and get channel on the consumer side. */ - ret = do_consumer_create_channel(usess, ua_sess, ua_chan, - app->bits_per_long, registry); + ret = do_consumer_create_channel( + usess, &ua_sess.get(), ua_chan, app->abi.bits_per_long, registry); if (ret < 0) { - ERR("Error creating UST channel \"%s\" on the consumer daemon", - ua_chan->name); + ERR("Error creating UST channel \"%s\" on the consumer daemon", ua_chan->name); goto error_remove_from_registry; } - ret = send_channel_pid_to_ust(app, ua_sess, ua_chan); + ret = send_channel_pid_to_ust(app, &ua_sess.get(), ua_chan); if (ret < 0) { if (ret != -ENOTCONN) { ERR("Error sending channel to application"); @@ -3548,35 +3612,36 @@ static int create_channel_per_pid(struct ust_app *app, } chan_reg_key = ua_chan->key; - pthread_mutex_lock(®istry->_lock); - ust_reg_chan = ust_registry_channel_find(registry, chan_reg_key); - LTTNG_ASSERT(ust_reg_chan); - ust_reg_chan->consumer_key = ua_chan->key; - pthread_mutex_unlock(®istry->_lock); - - cmd_ret = notification_thread_command_add_channel( - the_notification_thread_handle, session->name, - lttng_credentials_get_uid( - &ua_sess->effective_credentials), - lttng_credentials_get_gid( - &ua_sess->effective_credentials), - ua_chan->name, ua_chan->key, LTTNG_DOMAIN_UST, - ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf); + { + auto locked_registry = registry->lock(); + + auto& ust_reg_chan = locked_registry->channel(chan_reg_key); + ust_reg_chan._consumer_key = ua_chan->key; + } + + cmd_ret = notification_thread_command_add_channel(the_notification_thread_handle, + session->id, + ua_chan->name, + ua_chan->key, + LTTNG_DOMAIN_UST, + ua_chan->attr.subbuf_size * + ua_chan->attr.num_subbuf); if (cmd_ret != LTTNG_OK) { - ret = - (int) cmd_ret; + ret = -(int) cmd_ret; ERR("Failed to add channel to notification thread"); goto error_remove_from_registry; } error_remove_from_registry: if (ret) { - ust_registry_channel_del_free(registry, ua_chan->key, false); + try { + auto locked_registry = registry->lock(); + locked_registry->remove_channel(ua_chan->key, false); + } catch (const std::exception& ex) { + DBG("Could not find channel for removal: %s", ex.what()); + } } error: - rcu_read_unlock(); - if (session) { - session_put(session); - } return ret; } @@ -3591,15 +3656,15 @@ error: * the application exited concurrently. */ static int ust_app_channel_send(struct ust_app *app, - struct ltt_ust_session *usess, struct ust_app_session *ua_sess, - struct ust_app_channel *ua_chan) + struct ltt_ust_session *usess, + const ust_app_session::locked_weak_ref& ua_sess, + struct ust_app_channel *ua_chan) { int ret; LTTNG_ASSERT(app); LTTNG_ASSERT(usess); LTTNG_ASSERT(usess->active); - LTTNG_ASSERT(ua_sess); LTTNG_ASSERT(ua_chan); ASSERT_RCU_READ_LOCKED(); @@ -3607,7 +3672,7 @@ static int ust_app_channel_send(struct ust_app *app, switch (usess->buffer_type) { case LTTNG_BUFFER_PER_UID: { - ret = create_channel_per_uid(app, usess, ua_sess, ua_chan); + ret = create_channel_per_uid(app, usess, &ua_sess.get(), ua_chan); if (ret < 0) { goto error; } @@ -3650,11 +3715,11 @@ error: * * Return 0 on success or else a negative value. */ -static int ust_app_channel_allocate(struct ust_app_session *ua_sess, - struct ltt_ust_channel *uchan, - enum lttng_ust_abi_chan_type type, - struct ltt_ust_session *usess __attribute__((unused)), - struct ust_app_channel **ua_chanp) +static int ust_app_channel_allocate(const ust_app_session::locked_weak_ref& ua_sess, + struct ltt_ust_channel *uchan, + enum lttng_ust_abi_chan_type type, + struct ltt_ust_session *usess __attribute__((unused)), + struct ust_app_channel **ua_chanp) { int ret = 0; struct lttng_ht_iter iter; @@ -3664,15 +3729,15 @@ static int ust_app_channel_allocate(struct ust_app_session *ua_sess, ASSERT_RCU_READ_LOCKED(); /* Lookup channel in the ust app session */ - lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter); - ua_chan_node = lttng_ht_iter_get_node_str(&iter); - if (ua_chan_node != NULL) { - ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node); + lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &iter); + ua_chan_node = lttng_ht_iter_get_node(&iter); + if (ua_chan_node != nullptr) { + ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node); goto end; } ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr); - if (ua_chan == NULL) { + if (ua_chan == nullptr) { /* Only malloc can fail here */ ret = -ENOMEM; goto error; @@ -3702,10 +3767,9 @@ error: * Must be called with the RCU read side lock held. * Called with ust app session mutex held. */ -static -int create_ust_app_event(struct ust_app_channel *ua_chan, - struct ltt_ust_event *uevent, - struct ust_app *app) +static int create_ust_app_event(struct ust_app_channel *ua_chan, + struct ltt_ust_event *uevent, + struct ust_app *app) { int ret = 0; struct ust_app_event *ua_event; @@ -3713,7 +3777,7 @@ int create_ust_app_event(struct ust_app_channel *ua_chan, ASSERT_RCU_READ_LOCKED(); ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr); - if (ua_event == NULL) { + if (ua_event == nullptr) { /* Only failure mode of alloc_ust_app_event(). */ ret = -ENOMEM; goto end; @@ -3731,18 +3795,19 @@ int create_ust_app_event(struct ust_app_channel *ua_chan, */ if (ret == -LTTNG_UST_ERR_EXIST) { ERR("Tracer for application reported that an event being created already existed: " - "event_name = \"%s\", pid = %d, ppid = %d, uid = %d, gid = %d", - uevent->attr.name, - app->pid, app->ppid, app->uid, - app->gid); + "event_name = \"%s\", pid = %d, ppid = %d, uid = %d, gid = %d", + uevent->attr.name, + app->pid, + app->ppid, + app->uid, + app->gid); } goto error; } add_unique_ust_app_event(ua_chan, ua_event); - DBG2("UST app create event completed: app = '%s' pid = %d", - app->name, app->pid); + DBG2("UST app create event completed: app = '%s' pid = %d", app->name, app->pid); end: return ret; @@ -3759,9 +3824,7 @@ error: * Must be called with the RCU read side lock held. * Called with ust app session mutex held. */ -static -int create_ust_app_event_notifier_rule(struct lttng_trigger *trigger, - struct ust_app *app) +static int create_ust_app_event_notifier_rule(struct lttng_trigger *trigger, struct ust_app *app) { int ret = 0; struct ust_app_event_notifier_rule *ua_event_notifier_rule; @@ -3769,7 +3832,7 @@ int create_ust_app_event_notifier_rule(struct lttng_trigger *trigger, ASSERT_RCU_READ_LOCKED(); ua_event_notifier_rule = alloc_ust_app_event_notifier_rule(trigger); - if (ua_event_notifier_rule == NULL) { + if (ua_event_notifier_rule == nullptr) { ret = -ENOMEM; goto end; } @@ -3785,19 +3848,23 @@ int create_ust_app_event_notifier_rule(struct lttng_trigger *trigger, */ if (ret == -LTTNG_UST_ERR_EXIST) { ERR("Tracer for application reported that an event notifier being created already exists: " - "token = \"%" PRIu64 "\", pid = %d, ppid = %d, uid = %d, gid = %d", - lttng_trigger_get_tracer_token(trigger), - app->pid, app->ppid, app->uid, - app->gid); + "token = \"%" PRIu64 "\", pid = %d, ppid = %d, uid = %d, gid = %d", + lttng_trigger_get_tracer_token(trigger), + app->pid, + app->ppid, + app->uid, + app->gid); } goto error; } lttng_ht_add_unique_u64(app->token_to_event_notifier_rule_ht, - &ua_event_notifier_rule->node); + &ua_event_notifier_rule->node); DBG2("UST app create token event rule completed: app = '%s', pid = %d, token = %" PRIu64, - app->name, app->pid, lttng_trigger_get_tracer_token(trigger)); + app->name, + app->pid, + lttng_trigger_get_tracer_token(trigger)); goto end; @@ -3813,34 +3880,35 @@ end: * * Called with UST app session lock held and RCU read side lock. */ -static int create_ust_app_metadata(struct ust_app_session *ua_sess, - struct ust_app *app, struct consumer_output *consumer) +static int create_ust_app_metadata(const ust_app_session::locked_weak_ref& ua_sess, + struct ust_app *app, + struct consumer_output *consumer) { int ret = 0; struct ust_app_channel *metadata; struct consumer_socket *socket; - ust_registry_session *registry; - struct ltt_session *session = NULL; - LTTNG_ASSERT(ua_sess); LTTNG_ASSERT(app); LTTNG_ASSERT(consumer); ASSERT_RCU_READ_LOCKED(); - registry = get_session_registry(ua_sess); + auto locked_registry = get_locked_session_registry(ua_sess->get_identifier()); /* The UST app session is held registry shall not be null. */ - LTTNG_ASSERT(registry); + LTTNG_ASSERT(locked_registry); - pthread_mutex_lock(®istry->_lock); + /* Guaranteed to exist; will not throw. */ + const auto session = ltt_session::find_session(ua_sess->tracing_id); + ASSERT_LOCKED(session->_lock); + ASSERT_SESSION_LIST_LOCKED(); /* Metadata already exists for this registry or it was closed previously */ - if (registry->_metadata_key || registry->_metadata_closed) { + if (locked_registry->_metadata_key || locked_registry->_metadata_closed) { ret = 0; goto error; } /* Allocate UST metadata */ - metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL); + metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, nullptr); if (!metadata) { /* malloc() failed */ ret = -ENOMEM; @@ -3857,7 +3925,7 @@ static int create_ust_app_metadata(struct ust_app_session *ua_sess, } /* Get the right consumer socket for the application. */ - socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer); + socket = consumer_find_socket_by_bitness(app->abi.bits_per_long, consumer); if (!socket) { ret = -EINVAL; goto error_consumer; @@ -3869,12 +3937,7 @@ static int create_ust_app_metadata(struct ust_app_session *ua_sess, * consumer requesting the metadata and the ask_channel call on our side * did not returned yet. */ - registry->_metadata_key = metadata->key; - - session = session_find_by_id(ua_sess->tracing_id); - LTTNG_ASSERT(session); - ASSERT_LOCKED(session->lock); - ASSERT_SESSION_LIST_LOCKED(); + locked_registry->_metadata_key = metadata->key; /* * Ask the metadata channel creation to the consumer. The metadata object @@ -3882,11 +3945,15 @@ static int create_ust_app_metadata(struct ust_app_session *ua_sess, * never added or monitored until we do a first push metadata to the * consumer. */ - ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket, - registry, session->current_trace_chunk); + ret = ust_consumer_ask_channel(&ua_sess.get(), + metadata, + consumer, + socket, + locked_registry.get(), + session->current_trace_chunk); if (ret < 0) { /* Nullify the metadata key so we don't try to close it later on. */ - registry->_metadata_key = 0; + locked_registry->_metadata_key = 0; goto error_consumer; } @@ -3899,21 +3966,16 @@ static int create_ust_app_metadata(struct ust_app_session *ua_sess, ret = consumer_setup_metadata(socket, metadata->key); if (ret < 0) { /* Nullify the metadata key so we don't try to close it later on. */ - registry->_metadata_key = 0; + locked_registry->_metadata_key = 0; goto error_consumer; } - DBG2("UST metadata with key %" PRIu64 " created for app pid %d", - metadata->key, app->pid); + DBG2("UST metadata with key %" PRIu64 " created for app pid %d", metadata->key, app->pid); error_consumer: lttng_fd_put(LTTNG_FD_APPS, 1); - delete_ust_app_channel(-1, metadata, app); + delete_ust_app_channel(-1, metadata, app, locked_registry); error: - pthread_mutex_unlock(®istry->_lock); - if (session) { - session_put(session); - } return ret; } @@ -3923,20 +3985,20 @@ error: */ struct ust_app *ust_app_find_by_pid(pid_t pid) { - struct ust_app *app = NULL; + struct ust_app *app = nullptr; struct lttng_ht_node_ulong *node; struct lttng_ht_iter iter; - lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter); - node = lttng_ht_iter_get_node_ulong(&iter); - if (node == NULL) { + lttng_ht_lookup(ust_app_ht, (void *) ((unsigned long) pid), &iter); + node = lttng_ht_iter_get_node(&iter); + if (node == nullptr) { DBG2("UST app no found with pid %d", pid); goto error; } DBG2("Found UST app by pid %d", pid); - app = caa_container_of(node, struct ust_app, pid_n); + app = lttng::utils::container_of(node, &ust_app::pid_n); error: return app; @@ -3952,23 +4014,21 @@ error: struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock) { int ret; - struct ust_app *lta = NULL; - struct lttng_pipe *event_notifier_event_source_pipe = NULL; + struct ust_app *lta = nullptr; + struct lttng_pipe *event_notifier_event_source_pipe = nullptr; LTTNG_ASSERT(msg); LTTNG_ASSERT(sock >= 0); DBG3("UST app creating application for socket %d", sock); - if ((msg->bits_per_long == 64 && - (uatomic_read(&the_ust_consumerd64_fd) == - -EINVAL)) || - (msg->bits_per_long == 32 && - (uatomic_read(&the_ust_consumerd32_fd) == - -EINVAL))) { + if ((msg->bits_per_long == 64 && (uatomic_read(&the_ust_consumerd64_fd) == -EINVAL)) || + (msg->bits_per_long == 32 && (uatomic_read(&the_ust_consumerd32_fd) == -EINVAL))) { ERR("Registration failed: application \"%s\" (pid: %d) has " - "%d-bit long, but no consumerd for this size is available.\n", - msg->name, msg->pid, msg->bits_per_long); + "%d-bit long, but no consumerd for this size is available.\n", + msg->name, + msg->pid, + msg->bits_per_long); goto error; } @@ -3980,36 +4040,48 @@ struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock) ret = lttng_fd_get(LTTNG_FD_APPS, 2); if (ret) { ERR("Failed to reserve two file descriptors for the event source pipe while creating a new application instance: app = '%s', pid = %d", - msg->name, (int) msg->pid); + msg->name, + (int) msg->pid); goto error; } event_notifier_event_source_pipe = lttng_pipe_open(FD_CLOEXEC); if (!event_notifier_event_source_pipe) { PERROR("Failed to open application event source pipe: '%s' (pid = %d)", - msg->name, msg->pid); + msg->name, + msg->pid); goto error; } - lta = zmalloc(); - if (lta == NULL) { - PERROR("malloc"); + try { + lta = new ust_app; + } catch (const std::bad_alloc&) { + ERR_FMT("Failed to allocate ust application instance: name=`{}`, pid={}, uid={}", + msg->name, + msg->pid, + msg->uid); goto error_free_pipe; } + urcu_ref_init(<a->ref); + lta->event_notifier_group.event_pipe = event_notifier_event_source_pipe; lta->ppid = msg->ppid; lta->uid = msg->uid; lta->gid = msg->gid; - lta->bits_per_long = msg->bits_per_long; - lta->uint8_t_alignment = msg->uint8_t_alignment; - lta->uint16_t_alignment = msg->uint16_t_alignment; - lta->uint32_t_alignment = msg->uint32_t_alignment; - lta->uint64_t_alignment = msg->uint64_t_alignment; - lta->long_alignment = msg->long_alignment; - lta->byte_order = msg->byte_order; + lta->abi = { + .bits_per_long = msg->bits_per_long, + .long_alignment = msg->long_alignment, + .uint8_t_alignment = msg->uint8_t_alignment, + .uint16_t_alignment = msg->uint16_t_alignment, + .uint32_t_alignment = msg->uint32_t_alignment, + .uint64_t_alignment = msg->uint64_t_alignment, + .byte_order = msg->byte_order == LITTLE_ENDIAN ? + lttng::sessiond::trace::byte_order::LITTLE_ENDIAN_ : + lttng::sessiond::trace::byte_order::BIG_ENDIAN_, + }; lta->v_major = msg->major; lta->v_minor = msg->minor; @@ -4033,17 +4105,16 @@ struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock) lta->pid = msg->pid; lttng_ht_node_init_ulong(<a->pid_n, (unsigned long) lta->pid); lta->sock = sock; - pthread_mutex_init(<a->sock_lock, NULL); + pthread_mutex_init(<a->sock_lock, nullptr); lttng_ht_node_init_ulong(<a->sock_n, (unsigned long) lta->sock); - CDS_INIT_LIST_HEAD(<a->teardown_head); return lta; error_free_pipe: lttng_pipe_destroy(event_notifier_event_source_pipe); lttng_fd_put(LTTNG_FD_APPS, 2); error: - return NULL; + return nullptr; } /* @@ -4054,9 +4125,9 @@ void ust_app_add(struct ust_app *app) LTTNG_ASSERT(app); LTTNG_ASSERT(app->notify_sock >= 0); - app->registration_time = time(NULL); + app->registration_time = time(nullptr); - rcu_read_lock(); + const lttng::urcu::read_lock_guard read_lock; /* * On a re-registration, we want to kick out the previous registration of @@ -4076,11 +4147,16 @@ void ust_app_add(struct ust_app *app) lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n); DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock =%d name:%s " - "notify_sock =%d (version %d.%d)", app->pid, app->ppid, app->uid, - app->gid, app->sock, app->name, app->notify_sock, app->v_major, - app->v_minor); - - rcu_read_unlock(); + "notify_sock =%d (version %d.%d)", + app->pid, + app->ppid, + app->uid, + app->gid, + app->sock, + app->name, + app->notify_sock, + app->v_major, + app->v_minor); } /* @@ -4101,13 +4177,17 @@ int ust_app_version(struct ust_app *app) if (ret < 0) { if (ret == -LTTNG_UST_ERR_EXITING || ret == -EPIPE) { DBG3("UST app version failed. Application is dead: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else if (ret == -EAGAIN) { WARN("UST app version failed. Communication time out: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else { ERR("UST app version failed with ret %d: pid = %d, sock = %d", - ret, app->pid, app->sock); + ret, + app->pid, + app->sock); } } @@ -4134,7 +4214,7 @@ int ust_app_setup_event_notifier_group(struct ust_app *app) { int ret; int event_pipe_write_fd; - struct lttng_ust_abi_object_data *event_notifier_group = NULL; + struct lttng_ust_abi_object_data *event_notifier_group = nullptr; enum lttng_error_code lttng_ret; enum event_notifier_error_accounting_status event_notifier_error_accounting_status; @@ -4146,25 +4226,29 @@ int ust_app_setup_event_notifier_group(struct ust_app *app) } /* Get the write side of the pipe. */ - event_pipe_write_fd = lttng_pipe_get_writefd( - app->event_notifier_group.event_pipe); + event_pipe_write_fd = lttng_pipe_get_writefd(app->event_notifier_group.event_pipe); pthread_mutex_lock(&app->sock_lock); - ret = lttng_ust_ctl_create_event_notifier_group(app->sock, - event_pipe_write_fd, &event_notifier_group); + ret = lttng_ust_ctl_create_event_notifier_group( + app->sock, event_pipe_write_fd, &event_notifier_group); pthread_mutex_unlock(&app->sock_lock); if (ret < 0) { if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) { ret = 0; DBG3("UST app create event notifier group failed. Application is dead: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else if (ret == -EAGAIN) { ret = 0; WARN("UST app create event notifier group failed. Communication time out: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else { ERR("UST app create event notifier group failed with ret %d: pid = %d, sock = %d, event_pipe_write_fd: %d", - ret, app->pid, app->sock, event_pipe_write_fd); + ret, + app->pid, + app->sock, + event_pipe_write_fd); } goto error; } @@ -4172,7 +4256,8 @@ int ust_app_setup_event_notifier_group(struct ust_app *app) ret = lttng_pipe_write_close(app->event_notifier_group.event_pipe); if (ret) { ERR("Failed to close write end of the application's event source pipe: app = '%s' (pid = %d)", - app->name, app->pid); + app->name, + app->pid); goto error; } @@ -4183,32 +4268,34 @@ int ust_app_setup_event_notifier_group(struct ust_app *app) lttng_fd_put(LTTNG_FD_APPS, 1); lttng_ret = notification_thread_command_add_tracer_event_source( - the_notification_thread_handle, - lttng_pipe_get_readfd( - app->event_notifier_group.event_pipe), - LTTNG_DOMAIN_UST); + the_notification_thread_handle, + lttng_pipe_get_readfd(app->event_notifier_group.event_pipe), + LTTNG_DOMAIN_UST); if (lttng_ret != LTTNG_OK) { ERR("Failed to add tracer event source to notification thread"); - ret = - 1; + ret = -1; goto error; } /* Assign handle only when the complete setup is valid. */ app->event_notifier_group.object = event_notifier_group; - event_notifier_error_accounting_status = - event_notifier_error_accounting_register_app(app); + event_notifier_error_accounting_status = event_notifier_error_accounting_register_app(app); switch (event_notifier_error_accounting_status) { case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK: break; case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_UNSUPPORTED: DBG3("Failed to setup event notifier error accounting (application does not support notifier error accounting): app socket fd = %d, app name = '%s', app pid = %d", - app->sock, app->name, (int) app->pid); + app->sock, + app->name, + (int) app->pid); ret = 0; goto error_accounting; case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_APP_DEAD: DBG3("Failed to setup event notifier error accounting (application is dead): app socket fd = %d, app name = '%s', app pid = %d", - app->sock, app->name, (int) app->pid); + app->sock, + app->name, + (int) app->pid); ret = 0; goto error_accounting; default: @@ -4221,9 +4308,8 @@ int ust_app_setup_event_notifier_group(struct ust_app *app) error_accounting: lttng_ret = notification_thread_command_remove_tracer_event_source( - the_notification_thread_handle, - lttng_pipe_get_readfd( - app->event_notifier_group.event_pipe)); + the_notification_thread_handle, + lttng_pipe_get_readfd(app->event_notifier_group.event_pipe)); if (lttng_ret != LTTNG_OK) { ERR("Failed to remove application tracer event source from notification thread"); } @@ -4231,34 +4317,13 @@ error_accounting: error: lttng_ust_ctl_release_object(app->sock, app->event_notifier_group.object); free(app->event_notifier_group.object); - app->event_notifier_group.object = NULL; + app->event_notifier_group.object = nullptr; return ret; } -/* - * Unregister app by removing it from the global traceable app list and freeing - * the data struct. - * - * The socket is already closed at this point so no close to sock. - */ -void ust_app_unregister(int sock) +static void ust_app_unregister(ust_app& app) { - struct ust_app *lta; - struct lttng_ht_node_ulong *node; - struct lttng_ht_iter ust_app_sock_iter; - struct lttng_ht_iter iter; - struct ust_app_session *ua_sess; - int ret; - - rcu_read_lock(); - - /* Get the node reference for a call_rcu */ - lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &ust_app_sock_iter); - node = lttng_ht_iter_get_node_ulong(&ust_app_sock_iter); - LTTNG_ASSERT(node); - - lta = caa_container_of(node, struct ust_app, sock_n); - DBG("PID %d unregistering with sock %d", lta->pid, sock); + const lttng::urcu::read_lock_guard read_lock; /* * For per-PID buffers, perform "push metadata" and flush all @@ -4266,28 +4331,27 @@ void ust_app_unregister(int sock) * ensuring proper behavior of data_pending check. * Remove sessions so they are not visible during deletion. */ - cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess, - node.node) { - ust_registry_session *registry; - - ret = lttng_ht_del(lta->sessions, &iter); - if (ret) { + for (auto *ua_sess : + lttng::urcu::lfht_iteration_adapter(*app.sessions->ht)) { + const auto del_ret = cds_lfht_del(app.sessions->ht, &ua_sess->node.node); + if (del_ret) { /* The session was already removed so scheduled for teardown. */ continue; } if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) { - (void) ust_app_flush_app_session(lta, ua_sess); + (void) ust_app_flush_app_session(app, *ua_sess); } /* * Add session to list for teardown. This is safe since at this point we * are the only one using this list. */ - pthread_mutex_lock(&ua_sess->lock); + auto locked_ua_sess = ua_sess->lock(); if (ua_sess->deleted) { - pthread_mutex_unlock(&ua_sess->lock); continue; } @@ -4302,10 +4366,11 @@ void ust_app_unregister(int sock) * The close metadata below nullifies the metadata pointer in the * session so the delete session will NOT push/close a second time. */ - registry = get_session_registry(ua_sess); - if (registry) { + auto locked_registry = + get_locked_session_registry(locked_ua_sess->get_identifier()); + if (locked_registry) { /* Push metadata for application before freeing the application. */ - (void) push_metadata(registry, ua_sess->consumer); + (void) push_metadata(locked_registry, ua_sess->consumer); /* * Don't ask to close metadata for global per UID buffers. Close @@ -4314,45 +4379,78 @@ void ust_app_unregister(int sock) * close so don't send a close command if closed. */ if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) { - /* And ask to close it for this session registry. */ - (void) close_metadata(registry, ua_sess->consumer); + const auto metadata_key = locked_registry->_metadata_key; + const auto consumer_bitness = locked_registry->abi.bits_per_long; + + if (!locked_registry->_metadata_closed && metadata_key != 0) { + locked_registry->_metadata_closed = true; + } + + /* Release lock before communication, see comments in + * close_metadata(). */ + locked_registry.reset(); + (void) close_metadata( + metadata_key, consumer_bitness, ua_sess->consumer); + } else { + locked_registry.reset(); } } - cds_list_add(&ua_sess->teardown_node, <a->teardown_head); - pthread_mutex_unlock(&ua_sess->lock); + app.sessions_to_teardown.emplace_back(ua_sess); } - /* Remove application from PID hash table */ - ret = lttng_ht_del(ust_app_ht_by_sock, &ust_app_sock_iter); - LTTNG_ASSERT(!ret); - /* * Remove application from notify hash table. The thread handling the * notify socket could have deleted the node so ignore on error because * either way it's valid. The close of that socket is handled by the * apps_notify_thread. */ - iter.iter.node = <a->notify_sock_n.node; - (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter); + (void) cds_lfht_del(ust_app_ht_by_notify_sock->ht, &app.notify_sock_n.node); /* * Ignore return value since the node might have been removed before by an * add replace during app registration because the PID can be reassigned by * the OS. */ - iter.iter.node = <a->pid_n.node; - ret = lttng_ht_del(ust_app_ht, &iter); - if (ret) { - DBG3("Unregister app by PID %d failed. This can happen on pid reuse", - lta->pid); + if (cds_lfht_del(ust_app_ht->ht, &app.pid_n.node)) { + DBG3("Unregister app by PID %d failed. This can happen on pid reuse", app.pid); } +} + +/* + * Unregister app by removing it from the global traceable app list and freeing + * the data struct. + * + * The socket is already closed at this point, so there is no need to close it. + */ +void ust_app_unregister_by_socket(int sock_fd) +{ + struct lttng_ht_node_ulong *node; + struct lttng_ht_iter ust_app_sock_iter; + int ret; - /* Free memory */ - call_rcu(<a->pid_n.head, delete_ust_app_rcu); + const lttng::urcu::read_lock_guard read_lock; - rcu_read_unlock(); - return; + /* Get the node reference for a call_rcu */ + lttng_ht_lookup(ust_app_ht_by_sock, (void *) ((unsigned long) sock_fd), &ust_app_sock_iter); + node = lttng_ht_iter_get_node(&ust_app_sock_iter); + assert(node); + + auto *app = lttng::utils::container_of(node, &ust_app::sock_n); + + DBG_FMT("Application unregistering after socket activity: app={}, socket_fd={}", + *app, + sock_fd); + + /* Remove application from socket hash table */ + ret = lttng_ht_del(ust_app_ht_by_sock, &ust_app_sock_iter); + assert(!ret); + + /* + * The socket is closed: release its reference to the application + * to trigger its eventual teardown. + */ + ust_app_put(app); } /* @@ -4362,21 +4460,20 @@ int ust_app_list_events(struct lttng_event **events) { int ret, handle; size_t nbmem, count = 0; - struct lttng_ht_iter iter; - struct ust_app *app; struct lttng_event *tmp_event; nbmem = UST_APP_EVENT_LIST_SIZE; tmp_event = calloc(nbmem); - if (tmp_event == NULL) { + if (tmp_event == nullptr) { PERROR("zmalloc ust app events"); ret = -ENOMEM; goto error; } - rcu_read_lock(); - - cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) { + /* Iterate on all apps. */ + for (auto *app : + lttng::urcu::lfht_iteration_adapter( + *ust_app_ht->ht)) { struct lttng_ust_abi_tracepoint_iter uiter; health_code_update(); @@ -4388,37 +4485,42 @@ int ust_app_list_events(struct lttng_event **events) */ continue; } + pthread_mutex_lock(&app->sock_lock); handle = lttng_ust_ctl_tracepoint_list(app->sock); if (handle < 0) { if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) { ERR("UST app list events getting handle failed for app pid %d", - app->pid); + app->pid); } pthread_mutex_unlock(&app->sock_lock); continue; } - while ((ret = lttng_ust_ctl_tracepoint_list_get(app->sock, handle, - &uiter)) != -LTTNG_UST_ERR_NOENT) { + while ((ret = lttng_ust_ctl_tracepoint_list_get(app->sock, handle, &uiter)) != + -LTTNG_UST_ERR_NOENT) { /* Handle ustctl error. */ if (ret < 0) { int release_ret; if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) { ERR("UST app tp list get failed for app %d with ret %d", - app->sock, ret); + app->sock, + ret); } else { DBG3("UST app tp list get failed. Application is dead"); break; } + free(tmp_event); release_ret = lttng_ust_ctl_release_handle(app->sock, handle); - if (release_ret < 0 && - release_ret != -LTTNG_UST_ERR_EXITING && - release_ret != -EPIPE) { - ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret); + if (release_ret < 0 && release_ret != -LTTNG_UST_ERR_EXITING && + release_ret != -EPIPE) { + ERR("Error releasing app handle for app %d with ret %d", + app->sock, + release_ret); } + pthread_mutex_unlock(&app->sock_lock); goto rcu_error; } @@ -4431,30 +4533,37 @@ int ust_app_list_events(struct lttng_event **events) new_nbmem = nbmem << 1; DBG2("Reallocating event list from %zu to %zu entries", - nbmem, new_nbmem); - new_tmp_event = (lttng_event *) realloc(tmp_event, - new_nbmem * sizeof(struct lttng_event)); - if (new_tmp_event == NULL) { + nbmem, + new_nbmem); + new_tmp_event = (lttng_event *) realloc( + tmp_event, new_nbmem * sizeof(struct lttng_event)); + if (new_tmp_event == nullptr) { int release_ret; PERROR("realloc ust app events"); free(tmp_event); ret = -ENOMEM; - release_ret = lttng_ust_ctl_release_handle(app->sock, handle); + release_ret = + lttng_ust_ctl_release_handle(app->sock, handle); if (release_ret < 0 && - release_ret != -LTTNG_UST_ERR_EXITING && - release_ret != -EPIPE) { - ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret); + release_ret != -LTTNG_UST_ERR_EXITING && + release_ret != -EPIPE) { + ERR("Error releasing app handle for app %d with ret %d", + app->sock, + release_ret); } + pthread_mutex_unlock(&app->sock_lock); goto rcu_error; } /* Zero the new memory */ - memset(new_tmp_event + nbmem, 0, - (new_nbmem - nbmem) * sizeof(struct lttng_event)); + memset(new_tmp_event + nbmem, + 0, + (new_nbmem - nbmem) * sizeof(struct lttng_event)); nbmem = new_nbmem; tmp_event = new_tmp_event; } + memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_ABI_SYM_NAME_LEN); tmp_event[count].loglevel = uiter.loglevel; tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_ABI_TRACEPOINT; @@ -4462,18 +4571,23 @@ int ust_app_list_events(struct lttng_event **events) tmp_event[count].enabled = -1; count++; } + ret = lttng_ust_ctl_release_handle(app->sock, handle); pthread_mutex_unlock(&app->sock_lock); if (ret < 0) { if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) { DBG3("Error releasing app handle. Application died: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else if (ret == -EAGAIN) { WARN("Error releasing app handle. Communication time out: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else { ERR("Error releasing app handle with ret %d: pid = %d, sock = %d", - ret, app->pid, app->sock); + ret, + app->pid, + app->sock); } } } @@ -4484,7 +4598,6 @@ int ust_app_list_events(struct lttng_event **events) DBG2("UST app list events done (%zu events)", count); rcu_error: - rcu_read_unlock(); error: health_code_update(); return ret; @@ -4497,21 +4610,20 @@ int ust_app_list_event_fields(struct lttng_event_field **fields) { int ret, handle; size_t nbmem, count = 0; - struct lttng_ht_iter iter; - struct ust_app *app; struct lttng_event_field *tmp_event; nbmem = UST_APP_EVENT_LIST_SIZE; tmp_event = calloc(nbmem); - if (tmp_event == NULL) { + if (tmp_event == nullptr) { PERROR("zmalloc ust app event fields"); ret = -ENOMEM; goto error; } - rcu_read_lock(); - - cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) { + /* Iterate on all apps. */ + for (auto *app : + lttng::urcu::lfht_iteration_adapter( + *ust_app_ht->ht)) { struct lttng_ust_abi_field_iter uiter; health_code_update(); @@ -4523,38 +4635,43 @@ int ust_app_list_event_fields(struct lttng_event_field **fields) */ continue; } + pthread_mutex_lock(&app->sock_lock); handle = lttng_ust_ctl_tracepoint_field_list(app->sock); if (handle < 0) { if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) { ERR("UST app list field getting handle failed for app pid %d", - app->pid); + app->pid); } pthread_mutex_unlock(&app->sock_lock); continue; } - while ((ret = lttng_ust_ctl_tracepoint_field_list_get(app->sock, handle, - &uiter)) != -LTTNG_UST_ERR_NOENT) { + while ((ret = lttng_ust_ctl_tracepoint_field_list_get(app->sock, handle, &uiter)) != + -LTTNG_UST_ERR_NOENT) { /* Handle ustctl error. */ if (ret < 0) { int release_ret; if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) { ERR("UST app tp list field failed for app %d with ret %d", - app->sock, ret); + app->sock, + ret); } else { DBG3("UST app tp list field failed. Application is dead"); break; } + free(tmp_event); release_ret = lttng_ust_ctl_release_handle(app->sock, handle); pthread_mutex_unlock(&app->sock_lock); - if (release_ret < 0 && - release_ret != -LTTNG_UST_ERR_EXITING && - release_ret != -EPIPE) { - ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret); + if (release_ret < 0 && release_ret != -LTTNG_UST_ERR_EXITING && + release_ret != -EPIPE) { + ERR("Error releasing app handle for app %d with ret %d", + app->sock, + release_ret); } + goto rcu_error; } @@ -4566,48 +4683,57 @@ int ust_app_list_event_fields(struct lttng_event_field **fields) new_nbmem = nbmem << 1; DBG2("Reallocating event field list from %zu to %zu entries", - nbmem, new_nbmem); - new_tmp_event = (lttng_event_field *) realloc(tmp_event, - new_nbmem * sizeof(struct lttng_event_field)); - if (new_tmp_event == NULL) { + nbmem, + new_nbmem); + new_tmp_event = (lttng_event_field *) realloc( + tmp_event, new_nbmem * sizeof(struct lttng_event_field)); + if (new_tmp_event == nullptr) { int release_ret; PERROR("realloc ust app event fields"); free(tmp_event); ret = -ENOMEM; - release_ret = lttng_ust_ctl_release_handle(app->sock, handle); + release_ret = + lttng_ust_ctl_release_handle(app->sock, handle); pthread_mutex_unlock(&app->sock_lock); - if (release_ret && - release_ret != -LTTNG_UST_ERR_EXITING && - release_ret != -EPIPE) { - ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret); + if (release_ret && release_ret != -LTTNG_UST_ERR_EXITING && + release_ret != -EPIPE) { + ERR("Error releasing app handle for app %d with ret %d", + app->sock, + release_ret); } + goto rcu_error; } + /* Zero the new memory */ - memset(new_tmp_event + nbmem, 0, - (new_nbmem - nbmem) * sizeof(struct lttng_event_field)); + memset(new_tmp_event + nbmem, + 0, + (new_nbmem - nbmem) * sizeof(struct lttng_event_field)); nbmem = new_nbmem; tmp_event = new_tmp_event; } - memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_ABI_SYM_NAME_LEN); + memcpy(tmp_event[count].field_name, + uiter.field_name, + LTTNG_UST_ABI_SYM_NAME_LEN); /* Mapping between these enums matches 1 to 1. */ tmp_event[count].type = (enum lttng_event_field_type) uiter.type; tmp_event[count].nowrite = uiter.nowrite; - memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_ABI_SYM_NAME_LEN); + memcpy(tmp_event[count].event.name, + uiter.event_name, + LTTNG_UST_ABI_SYM_NAME_LEN); tmp_event[count].event.loglevel = uiter.loglevel; tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT; tmp_event[count].event.pid = app->pid; tmp_event[count].event.enabled = -1; count++; } + ret = lttng_ust_ctl_release_handle(app->sock, handle); pthread_mutex_unlock(&app->sock_lock); - if (ret < 0 && - ret != -LTTNG_UST_ERR_EXITING && - ret != -EPIPE) { + if (ret < 0 && ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) { ERR("Error releasing app handle for app %d with ret %d", app->sock, ret); } } @@ -4618,7 +4744,6 @@ int ust_app_list_event_fields(struct lttng_event_field **fields) DBG2("UST app list event fields done (%zu events)", count); rcu_error: - rcu_read_unlock(); error: health_code_update(); return ret; @@ -4627,49 +4752,41 @@ error: /* * Free and clean all traceable apps of the global list. */ -void ust_app_clean_list(void) +void ust_app_clean_list() { int ret; - struct ust_app *app; - struct lttng_ht_iter iter; - DBG2("UST app cleaning registered apps hash table"); - rcu_read_lock(); - /* Cleanup notify socket hash table */ if (ust_app_ht_by_notify_sock) { - cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app, - notify_sock_n.node) { + for (auto *app : + lttng::urcu::lfht_iteration_adapter( + *ust_app_ht_by_notify_sock->ht)) { /* * Assert that all notifiers are gone as all triggers * are unregistered prior to this clean-up. */ LTTNG_ASSERT(lttng_ht_get_count(app->token_to_event_notifier_rule_ht) == 0); - ust_app_notify_sock_unregister(app->notify_sock); } } - if (ust_app_ht) { - cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) { - ret = lttng_ht_del(ust_app_ht, &iter); - LTTNG_ASSERT(!ret); - call_rcu(&app->pid_n.head, delete_ust_app_rcu); - } - } - /* Cleanup socket hash table */ if (ust_app_ht_by_sock) { - cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app, - sock_n.node) { - ret = lttng_ht_del(ust_app_ht_by_sock, &iter); + const lttng::urcu::read_lock_guard read_lock; + + for (auto *app : lttng::urcu::lfht_iteration_adapter( + *ust_app_ht_by_sock->ht)) { + ret = cds_lfht_del(ust_app_ht_by_sock->ht, &app->sock_n.node); LTTNG_ASSERT(!ret); + ust_app_put(app); } } - rcu_read_unlock(); - /* Destroy is done only when the ht is empty */ if (ust_app_ht) { lttng_ht_destroy(ust_app_ht); @@ -4685,7 +4802,7 @@ void ust_app_clean_list(void) /* * Init UST app hash table. */ -int ust_app_ht_alloc(void) +int ust_app_ht_alloc() { ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG); if (!ust_app_ht) { @@ -4705,24 +4822,22 @@ int ust_app_ht_alloc(void) /* * For a specific UST session, disable the channel for all registered apps. */ -int ust_app_disable_channel_glb(struct ltt_ust_session *usess, - struct ltt_ust_channel *uchan) +int ust_app_disable_channel_glb(struct ltt_ust_session *usess, struct ltt_ust_channel *uchan) { int ret = 0; - struct lttng_ht_iter iter; struct lttng_ht_node_str *ua_chan_node; - struct ust_app *app; struct ust_app_session *ua_sess; struct ust_app_channel *ua_chan; LTTNG_ASSERT(usess->active); DBG2("UST app disabling channel %s from global domain for session id %" PRIu64, - uchan->name, usess->id); + uchan->name, + usess->id); - rcu_read_lock(); - - /* For every registered applications */ - cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) { + /* Iterate on all apps. */ + for (auto *app : + lttng::urcu::lfht_iteration_adapter( + *ust_app_ht->ht)) { struct lttng_ht_iter uiter; if (!app->compatible) { /* @@ -4731,52 +4846,49 @@ int ust_app_disable_channel_glb(struct ltt_ust_session *usess, */ continue; } - ua_sess = lookup_session_by_app(usess, app); - if (ua_sess == NULL) { + ua_sess = ust_app_lookup_app_session(usess, app); + if (ua_sess == nullptr) { continue; } /* Get channel */ - lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter); - ua_chan_node = lttng_ht_iter_get_node_str(&uiter); + lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter); + ua_chan_node = lttng_ht_iter_get_node(&uiter); /* If the session if found for the app, the channel must be there */ LTTNG_ASSERT(ua_chan_node); - ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node); + ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node); /* The channel must not be already disabled */ - LTTNG_ASSERT(ua_chan->enabled == 1); + LTTNG_ASSERT(ua_chan->enabled); /* Disable channel onto application */ - ret = disable_ust_app_channel(ua_sess, ua_chan, app); + ret = disable_ust_app_channel(ua_sess->lock(), ua_chan, app); if (ret < 0) { /* XXX: We might want to report this error at some point... */ continue; } } - rcu_read_unlock(); return ret; } /* * For a specific UST session, enable the channel for all registered apps. */ -int ust_app_enable_channel_glb(struct ltt_ust_session *usess, - struct ltt_ust_channel *uchan) +int ust_app_enable_channel_glb(struct ltt_ust_session *usess, struct ltt_ust_channel *uchan) { int ret = 0; - struct lttng_ht_iter iter; - struct ust_app *app; struct ust_app_session *ua_sess; LTTNG_ASSERT(usess->active); DBG2("UST app enabling channel %s to global domain for session id %" PRIu64, - uchan->name, usess->id); - - rcu_read_lock(); + uchan->name, + usess->id); /* For every registered applications */ - cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) { + for (auto *app : + lttng::urcu::lfht_iteration_adapter( + *ust_app_ht->ht)) { if (!app->compatible) { /* * TODO: In time, we should notice the caller of this error by @@ -4784,20 +4896,19 @@ int ust_app_enable_channel_glb(struct ltt_ust_session *usess, */ continue; } - ua_sess = lookup_session_by_app(usess, app); - if (ua_sess == NULL) { + ua_sess = ust_app_lookup_app_session(usess, app); + if (ua_sess == nullptr) { continue; } /* Enable channel onto application */ - ret = enable_ust_app_channel(ua_sess, uchan, app); + ret = enable_ust_app_channel(ua_sess->lock(), uchan, app); if (ret < 0) { /* XXX: We might want to report this error at some point... */ continue; } } - rcu_read_unlock(); return ret; } @@ -4805,110 +4916,108 @@ int ust_app_enable_channel_glb(struct ltt_ust_session *usess, * Disable an event in a channel and for a specific session. */ int ust_app_disable_event_glb(struct ltt_ust_session *usess, - struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent) + struct ltt_ust_channel *uchan, + struct ltt_ust_event *uevent) { int ret = 0; - struct lttng_ht_iter iter, uiter; + struct lttng_ht_iter uiter; struct lttng_ht_node_str *ua_chan_node; - struct ust_app *app; struct ust_app_session *ua_sess; struct ust_app_channel *ua_chan; struct ust_app_event *ua_event; LTTNG_ASSERT(usess->active); DBG("UST app disabling event %s for all apps in channel " - "%s for session id %" PRIu64, - uevent->attr.name, uchan->name, usess->id); - - rcu_read_lock(); - - /* For all registered applications */ - cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) { + "%s for session id %" PRIu64, + uevent->attr.name, + uchan->name, + usess->id); + + /* Iterate on all apps. */ + for (auto *app : + lttng::urcu::lfht_iteration_adapter( + *ust_app_ht->ht)) { if (!app->compatible) { - /* - * TODO: In time, we should notice the caller of this error by - * telling him that this is a version error. - */ continue; } - ua_sess = lookup_session_by_app(usess, app); - if (ua_sess == NULL) { + ua_sess = ust_app_lookup_app_session(usess, app); + if (ua_sess == nullptr) { /* Next app */ continue; } /* Lookup channel in the ust app session */ - lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter); - ua_chan_node = lttng_ht_iter_get_node_str(&uiter); - if (ua_chan_node == NULL) { + lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter); + ua_chan_node = lttng_ht_iter_get_node(&uiter); + if (ua_chan_node == nullptr) { DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d." - "Skipping", uchan->name, usess->id, app->pid); + "Skipping", + uchan->name, + usess->id, + app->pid); continue; } - ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node); + ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node); - ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name, - uevent->filter, uevent->attr.loglevel, - uevent->exclusion); - if (ua_event == NULL) { + ua_event = find_ust_app_event( + ua_chan->events, + uevent->attr.name, + uevent->filter, + (enum lttng_ust_abi_loglevel_type) uevent->attr.loglevel_type, + uevent->attr.loglevel, + uevent->exclusion); + if (ua_event == nullptr) { DBG2("Event %s not found in channel %s for app pid %d." - "Skipping", uevent->attr.name, uchan->name, app->pid); + "Skipping", + uevent->attr.name, + uchan->name, + app->pid); continue; } ret = disable_ust_app_event(ua_event, app); if (ret < 0) { - /* XXX: Report error someday... */ continue; } } - rcu_read_unlock(); return ret; } /* The ua_sess lock must be held by the caller. */ -static -int ust_app_channel_create(struct ltt_ust_session *usess, - struct ust_app_session *ua_sess, - struct ltt_ust_channel *uchan, struct ust_app *app, - struct ust_app_channel **_ua_chan) +static int ust_app_channel_create(struct ltt_ust_session *usess, + const ust_app_session::locked_weak_ref& ua_sess, + struct ltt_ust_channel *uchan, + struct ust_app *app, + struct ust_app_channel **_ua_chan) { int ret = 0; - struct ust_app_channel *ua_chan = NULL; - - LTTNG_ASSERT(ua_sess); - ASSERT_LOCKED(ua_sess->lock); + struct ust_app_channel *ua_chan = nullptr; - if (!strncmp(uchan->name, DEFAULT_METADATA_NAME, - sizeof(uchan->name))) { - copy_channel_attr_to_ustctl(&ua_sess->metadata_attr, - &uchan->attr); + if (!strncmp(uchan->name, DEFAULT_METADATA_NAME, sizeof(uchan->name))) { + copy_channel_attr_to_ustctl(&ua_sess->metadata_attr, &uchan->attr); ret = 0; } else { - struct ltt_ust_context *uctx = NULL; - /* * Create channel onto application and synchronize its * configuration. */ - ret = ust_app_channel_allocate(ua_sess, uchan, - LTTNG_UST_ABI_CHAN_PER_CPU, usess, - &ua_chan); + ret = ust_app_channel_allocate( + ua_sess, uchan, LTTNG_UST_ABI_CHAN_PER_CPU, usess, &ua_chan); if (ret < 0) { goto error; } - ret = ust_app_channel_send(app, usess, - ua_sess, ua_chan); + ret = ust_app_channel_send(app, usess, ua_sess, ua_chan); if (ret) { goto error; } /* Add contexts. */ - cds_list_for_each_entry(uctx, &uchan->ctx_list, list) { - ret = create_ust_app_channel_context(ua_chan, - &uctx->ctx, app); + for (auto *uctx : + lttng::urcu::list_iteration_adapter( + uchan->ctx_list)) { + ret = create_ust_app_channel_context(ua_chan, &uctx->ctx, app); if (ret) { goto error; } @@ -4924,7 +5033,7 @@ error: * or a timeout on it. We can't inform the caller that for a * specific app, the session failed so lets continue here. */ - ret = 0; /* Not an error. */ + ret = 0; /* Not an error. */ break; case -ENOMEM: default: @@ -4947,19 +5056,20 @@ error: * Enable event for a specific session and channel on the tracer. */ int ust_app_enable_event_glb(struct ltt_ust_session *usess, - struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent) + struct ltt_ust_channel *uchan, + struct ltt_ust_event *uevent) { int ret = 0; - struct lttng_ht_iter iter, uiter; + struct lttng_ht_iter uiter; struct lttng_ht_node_str *ua_chan_node; - struct ust_app *app; struct ust_app_session *ua_sess; struct ust_app_channel *ua_chan; struct ust_app_event *ua_event; LTTNG_ASSERT(usess->active); DBG("UST app enabling event %s for all apps for session id %" PRIu64, - uevent->attr.name, usess->id); + uevent->attr.name, + usess->id); /* * NOTE: At this point, this function is called only if the session and @@ -4967,10 +5077,10 @@ int ust_app_enable_event_glb(struct ltt_ust_session *usess, * tracer also. */ - rcu_read_lock(); - - /* For all registered applications */ - cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) { + /* Iterate on all apps. */ + for (auto *app : + lttng::urcu::lfht_iteration_adapter( + *ust_app_ht->ht)) { if (!app->compatible) { /* * TODO: In time, we should notice the caller of this error by @@ -4978,54 +5088,53 @@ int ust_app_enable_event_glb(struct ltt_ust_session *usess, */ continue; } - ua_sess = lookup_session_by_app(usess, app); + ua_sess = ust_app_lookup_app_session(usess, app); if (!ua_sess) { /* The application has problem or is probably dead. */ continue; } - pthread_mutex_lock(&ua_sess->lock); - + auto locked_ua_sess = ua_sess->lock(); if (ua_sess->deleted) { - pthread_mutex_unlock(&ua_sess->lock); continue; } /* Lookup channel in the ust app session */ - lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter); - ua_chan_node = lttng_ht_iter_get_node_str(&uiter); + lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter); + ua_chan_node = lttng_ht_iter_get_node(&uiter); /* * It is possible that the channel cannot be found is * the channel/event creation occurs concurrently with * an application exit. */ if (!ua_chan_node) { - pthread_mutex_unlock(&ua_sess->lock); continue; } - ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node); + ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node); /* Get event node */ - ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name, - uevent->filter, uevent->attr.loglevel, uevent->exclusion); - if (ua_event == NULL) { + ua_event = find_ust_app_event( + ua_chan->events, + uevent->attr.name, + uevent->filter, + (enum lttng_ust_abi_loglevel_type) uevent->attr.loglevel_type, + uevent->attr.loglevel, + uevent->exclusion); + if (ua_event == nullptr) { DBG3("UST app enable event %s not found for app PID %d." - "Skipping app", uevent->attr.name, app->pid); - goto next_app; + "Skipping app", + uevent->attr.name, + app->pid); + continue; } ret = enable_ust_app_event(ua_event, app); if (ret < 0) { - pthread_mutex_unlock(&ua_sess->lock); goto error; } - next_app: - pthread_mutex_unlock(&ua_sess->lock); } - error: - rcu_read_unlock(); return ret; } @@ -5034,23 +5143,24 @@ error: * all registered apps. */ int ust_app_create_event_glb(struct ltt_ust_session *usess, - struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent) + struct ltt_ust_channel *uchan, + struct ltt_ust_event *uevent) { int ret = 0; - struct lttng_ht_iter iter, uiter; + struct lttng_ht_iter uiter; struct lttng_ht_node_str *ua_chan_node; - struct ust_app *app; struct ust_app_session *ua_sess; struct ust_app_channel *ua_chan; LTTNG_ASSERT(usess->active); DBG("UST app creating event %s for all apps for session id %" PRIu64, - uevent->attr.name, usess->id); + uevent->attr.name, + usess->id); - rcu_read_lock(); - - /* For all registered applications */ - cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) { + /* Iterate on all apps. */ + for (auto *app : + lttng::urcu::lfht_iteration_adapter( + *ust_app_ht->ht)) { if (!app->compatible) { /* * TODO: In time, we should notice the caller of this error by @@ -5058,41 +5168,41 @@ int ust_app_create_event_glb(struct ltt_ust_session *usess, */ continue; } - ua_sess = lookup_session_by_app(usess, app); + + ua_sess = ust_app_lookup_app_session(usess, app); if (!ua_sess) { /* The application has problem or is probably dead. */ continue; } - pthread_mutex_lock(&ua_sess->lock); + auto locked_ua_sess = ua_sess->lock(); - if (ua_sess->deleted) { - pthread_mutex_unlock(&ua_sess->lock); + if (locked_ua_sess->deleted) { continue; } /* Lookup channel in the ust app session */ - lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter); - ua_chan_node = lttng_ht_iter_get_node_str(&uiter); + lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter); + ua_chan_node = lttng_ht_iter_get_node(&uiter); /* If the channel is not found, there is a code flow error */ LTTNG_ASSERT(ua_chan_node); - ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node); + ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node); ret = create_ust_app_event(ua_chan, uevent, app); - pthread_mutex_unlock(&ua_sess->lock); if (ret < 0) { if (ret != -LTTNG_UST_ERR_EXIST) { /* Possible value at this point: -ENOMEM. If so, we stop! */ break; } + DBG2("UST app event %s already exist on app PID %d", - uevent->attr.name, app->pid); + uevent->attr.name, + app->pid); continue; } } - rcu_read_unlock(); return ret; } @@ -5102,46 +5212,37 @@ int ust_app_create_event_glb(struct ltt_ust_session *usess, * Called with UST app session lock held. * */ -static -int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app) +static int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app) { int ret = 0; struct ust_app_session *ua_sess; DBG("Starting tracing for ust app pid %d", app->pid); - rcu_read_lock(); + const lttng::urcu::read_lock_guard read_lock; + const auto update_health_code_on_exit = + lttng::make_scope_exit([]() noexcept { health_code_update(); }); if (!app->compatible) { - goto end; + return 0; } - ua_sess = lookup_session_by_app(usess, app); - if (ua_sess == NULL) { + ua_sess = ust_app_lookup_app_session(usess, app); + if (ua_sess == nullptr) { /* The session is in teardown process. Ignore and continue. */ - goto end; + return 0; } - pthread_mutex_lock(&ua_sess->lock); - - if (ua_sess->deleted) { - pthread_mutex_unlock(&ua_sess->lock); - goto end; - } + auto locked_ua_sess = ua_sess->lock(); - if (ua_sess->enabled) { - pthread_mutex_unlock(&ua_sess->lock); - goto end; + if (locked_ua_sess->deleted) { + return 0; } - /* Upon restart, we skip the setup, already done */ - if (ua_sess->started) { - goto skip_setup; + if (locked_ua_sess->enabled) { + return 0; } - health_code_update(); - -skip_setup: /* This starts the UST tracing */ pthread_mutex_lock(&app->sock_lock); ret = lttng_ust_ctl_start_session(app->sock, ua_sess->handle); @@ -5149,27 +5250,28 @@ skip_setup: if (ret < 0) { if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) { DBG3("UST app start session failed. Application is dead: pid = %d, sock = %d", - app->pid, app->sock); - pthread_mutex_unlock(&ua_sess->lock); - goto end; + app->pid, + app->sock); + return 0; } else if (ret == -EAGAIN) { WARN("UST app start session failed. Communication time out: pid = %d, sock = %d", - app->pid, app->sock); - pthread_mutex_unlock(&ua_sess->lock); - goto end; + app->pid, + app->sock); + return 0; } else { ERR("UST app start session failed with ret %d: pid = %d, sock = %d", - ret, app->pid, app->sock); + ret, + app->pid, + app->sock); } - goto error_unlock; + + return -1; } /* Indicate that the session has been started once */ - ua_sess->started = 1; - ua_sess->enabled = 1; - - pthread_mutex_unlock(&ua_sess->lock); + ua_sess->started = true; + ua_sess->enabled = true; health_code_update(); @@ -5180,56 +5282,50 @@ skip_setup: if (ret < 0) { if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) { DBG3("UST app wait quiescent failed. Application is dead: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else if (ret == -EAGAIN) { WARN("UST app wait quiescent failed. Communication time out: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else { ERR("UST app wait quiescent failed with ret %d: pid %d, sock = %d", - ret, app->pid, app->sock); + ret, + app->pid, + app->sock); } } -end: - rcu_read_unlock(); - health_code_update(); return 0; - -error_unlock: - pthread_mutex_unlock(&ua_sess->lock); - rcu_read_unlock(); - health_code_update(); - return -1; } /* * Stop tracing for a specific UST session and app. */ -static -int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app) +static int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app) { int ret = 0; struct ust_app_session *ua_sess; - ust_registry_session *registry; DBG("Stopping tracing for ust app pid %d", app->pid); - rcu_read_lock(); + const lttng::urcu::read_lock_guard read_lock; + const auto update_health_code_on_exit = + lttng::make_scope_exit([]() noexcept { health_code_update(); }); if (!app->compatible) { - goto end_no_session; + return 0; } - ua_sess = lookup_session_by_app(usess, app); - if (ua_sess == NULL) { - goto end_no_session; + ua_sess = ust_app_lookup_app_session(usess, app); + if (ua_sess == nullptr) { + return 0; } - pthread_mutex_lock(&ua_sess->lock); + auto locked_ua_sess = ua_sess->lock(); if (ua_sess->deleted) { - pthread_mutex_unlock(&ua_sess->lock); - goto end_no_session; + return 0; } /* @@ -5239,7 +5335,7 @@ int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app) * indicate that this is a stop error. */ if (!ua_sess->started) { - goto error_rcu_unlock; + return -1; } health_code_update(); @@ -5251,22 +5347,27 @@ int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app) if (ret < 0) { if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) { DBG3("UST app stop session failed. Application is dead: pid = %d, sock = %d", - app->pid, app->sock); - goto end_unlock; + app->pid, + app->sock); + return 0; } else if (ret == -EAGAIN) { WARN("UST app stop session failed. Communication time out: pid = %d, sock = %d", - app->pid, app->sock); - goto end_unlock; + app->pid, + app->sock); + return 0; } else { ERR("UST app stop session failed with ret %d: pid = %d, sock = %d", - ret, app->pid, app->sock); + ret, + app->pid, + app->sock); } - goto error_rcu_unlock; + + return -1; } health_code_update(); - ua_sess->enabled = 0; + ua_sess->enabled = false; /* Quiescent wait after stopping trace */ pthread_mutex_lock(&app->sock_lock); @@ -5275,74 +5376,69 @@ int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app) if (ret < 0) { if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) { DBG3("UST app wait quiescent failed. Application is dead: pid= %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else if (ret == -EAGAIN) { WARN("UST app wait quiescent failed. Communication time out: pid= %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else { ERR("UST app wait quiescent failed with ret %d: pid= %d, sock = %d", - ret, app->pid, app->sock); + ret, + app->pid, + app->sock); } } health_code_update(); - registry = get_session_registry(ua_sess); + { + auto locked_registry = + get_locked_session_registry(locked_ua_sess->get_identifier()); - /* The UST app session is held registry shall not be null. */ - LTTNG_ASSERT(registry); + /* The UST app session is held registry shall not be null. */ + LTTNG_ASSERT(locked_registry); - /* Push metadata for application before freeing the application. */ - (void) push_metadata(registry, ua_sess->consumer); + /* Push metadata for application before freeing the application. */ + (void) push_metadata(locked_registry, ua_sess->consumer); + } -end_unlock: - pthread_mutex_unlock(&ua_sess->lock); -end_no_session: - rcu_read_unlock(); - health_code_update(); return 0; - -error_rcu_unlock: - pthread_mutex_unlock(&ua_sess->lock); - rcu_read_unlock(); - health_code_update(); - return -1; } -static -int ust_app_flush_app_session(struct ust_app *app, - struct ust_app_session *ua_sess) +static int ust_app_flush_app_session(ust_app& app, ust_app_session& ua_sess) { int ret, retval = 0; - struct lttng_ht_iter iter; - struct ust_app_channel *ua_chan; struct consumer_socket *socket; - DBG("Flushing app session buffers for ust app pid %d", app->pid); + const auto update_health_code_on_exit = + lttng::make_scope_exit([]() noexcept { health_code_update(); }); - rcu_read_lock(); + DBG("Flushing app session buffers for ust app pid %d", app.pid); - if (!app->compatible) { - goto end_not_compatible; + if (!app.compatible) { + return 0; } - pthread_mutex_lock(&ua_sess->lock); - - if (ua_sess->deleted) { - goto end_deleted; + const auto locked_ua_sess = ua_sess.lock(); + if (locked_ua_sess->deleted) { + return 0; } health_code_update(); /* Flushing buffers */ - socket = consumer_find_socket_by_bitness(app->bits_per_long, - ua_sess->consumer); + socket = consumer_find_socket_by_bitness(app.abi.bits_per_long, ua_sess.consumer); /* Flush buffers and push metadata. */ - switch (ua_sess->buffer_type) { + switch (ua_sess.buffer_type) { case LTTNG_BUFFER_PER_PID: - cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan, - node.node) { + { + for (auto *ua_chan : + lttng::urcu::lfht_iteration_adapter( + *ua_sess.channels->ht)) { health_code_update(); ret = consumer_flush_channel(socket, ua_chan->key); if (ret) { @@ -5351,21 +5447,15 @@ int ust_app_flush_app_session(struct ust_app *app, continue; } } + break; + } case LTTNG_BUFFER_PER_UID: default: abort(); break; } - health_code_update(); - -end_deleted: - pthread_mutex_unlock(&ua_sess->lock); - -end_not_compatible: - rcu_read_unlock(); - health_code_update(); return retval; } @@ -5373,39 +5463,38 @@ end_not_compatible: * Flush buffers for all applications for a specific UST session. * Called with UST session lock held. */ -static -int ust_app_flush_session(struct ltt_ust_session *usess) +static int ust_app_flush_session(struct ltt_ust_session *usess) { int ret = 0; DBG("Flushing session buffers for all ust apps"); - rcu_read_lock(); - /* Flush buffers and push metadata. */ switch (usess->buffer_type) { case LTTNG_BUFFER_PER_UID: { - struct buffer_reg_uid *reg; - struct lttng_ht_iter iter; - /* Flush all per UID buffers associated to that session. */ - cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) { - ust_registry_session *ust_session_reg; - struct buffer_reg_channel *buf_reg_chan; + for (auto *reg : + lttng::urcu::list_iteration_adapter( + usess->buffer_reg_uid_list)) { + const lttng::urcu::read_lock_guard read_lock; + lsu::registry_session *ust_session_reg; struct consumer_socket *socket; /* Get consumer socket to use to push the metadata.*/ socket = consumer_find_socket_by_bitness(reg->bits_per_long, - usess->consumer); + usess->consumer); if (!socket) { /* Ignore request if no consumer is found for the session. */ continue; } - cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter, - buf_reg_chan, node.node) { + for (auto *buf_reg_chan : + lttng::urcu::lfht_iteration_adapter( + *reg->registry->channels->ht)) { /* * The following call will print error values so the return * code is of little importance because whatever happens, we @@ -5416,23 +5505,27 @@ int ust_app_flush_session(struct ltt_ust_session *usess) ust_session_reg = reg->registry->reg.ust; /* Push metadata. */ - (void) push_metadata(ust_session_reg, usess->consumer); + auto locked_registry = ust_session_reg->lock(); + (void) push_metadata(locked_registry, usess->consumer); } + break; } case LTTNG_BUFFER_PER_PID: { - struct ust_app_session *ua_sess; - struct lttng_ht_iter iter; - struct ust_app *app; - - cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) { - ua_sess = lookup_session_by_app(usess, app); - if (ua_sess == NULL) { + /* Iterate on all apps. */ + for (auto *app : + lttng::urcu::lfht_iteration_adapter(*ust_app_ht->ht)) { + auto *ua_sess = ust_app_lookup_app_session(usess, app); + if (ua_sess == nullptr) { continue; } - (void) ust_app_flush_app_session(app, ua_sess); + + (void) ust_app_flush_app_session(*app, *ua_sess); } + break; } default: @@ -5441,53 +5534,48 @@ int ust_app_flush_session(struct ltt_ust_session *usess) break; } - rcu_read_unlock(); health_code_update(); return ret; } -static -int ust_app_clear_quiescent_app_session(struct ust_app *app, - struct ust_app_session *ua_sess) +static int ust_app_clear_quiescent_app_session(struct ust_app *app, struct ust_app_session *ua_sess) { int ret = 0; - struct lttng_ht_iter iter; - struct ust_app_channel *ua_chan; struct consumer_socket *socket; DBG("Clearing stream quiescent state for ust app pid %d", app->pid); - rcu_read_lock(); + const lttng::urcu::read_lock_guard read_lock; + const auto update_health_code_on_exit = + lttng::make_scope_exit([]() noexcept { health_code_update(); }); if (!app->compatible) { - goto end_not_compatible; + return 0; } - pthread_mutex_lock(&ua_sess->lock); - - if (ua_sess->deleted) { - goto end_unlock; + const auto locked_ua_sess = ua_sess->lock(); + if (locked_ua_sess->deleted) { + return 0; } health_code_update(); - socket = consumer_find_socket_by_bitness(app->bits_per_long, - ua_sess->consumer); + socket = consumer_find_socket_by_bitness(app->abi.bits_per_long, ua_sess->consumer); if (!socket) { - ERR("Failed to find consumer (%" PRIu32 ") socket", - app->bits_per_long); - ret = -1; - goto end_unlock; + ERR("Failed to find consumer (%" PRIu32 ") socket", app->abi.bits_per_long); + return -1; } /* Clear quiescent state. */ switch (ua_sess->buffer_type) { case LTTNG_BUFFER_PER_PID: - cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, - ua_chan, node.node) { + for (auto *ua_chan : + lttng::urcu::lfht_iteration_adapter( + *ua_sess->channels->ht)) { health_code_update(); - ret = consumer_clear_quiescent_channel(socket, - ua_chan->key); + ret = consumer_clear_quiescent_channel(socket, ua_chan->key); if (ret) { ERR("Error clearing quiescent state for consumer channel"); ret = -1; @@ -5502,14 +5590,6 @@ int ust_app_clear_quiescent_app_session(struct ust_app *app, break; } - health_code_update(); - -end_unlock: - pthread_mutex_unlock(&ua_sess->lock); - -end_not_compatible: - rcu_read_unlock(); - health_code_update(); return ret; } @@ -5518,33 +5598,29 @@ end_not_compatible: * specific UST session. * Called with UST session lock held. */ -static -int ust_app_clear_quiescent_session(struct ltt_ust_session *usess) +static int ust_app_clear_quiescent_session(struct ltt_ust_session *usess) { int ret = 0; DBG("Clearing stream quiescent state for all ust apps"); - rcu_read_lock(); - switch (usess->buffer_type) { case LTTNG_BUFFER_PER_UID: { - struct lttng_ht_iter iter; - struct buffer_reg_uid *reg; - /* * Clear quiescent for all per UID buffers associated to * that session. */ - cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) { + for (auto *reg : + lttng::urcu::list_iteration_adapter( + usess->buffer_reg_uid_list)) { struct consumer_socket *socket; - struct buffer_reg_channel *buf_reg_chan; + const lttng::urcu::read_lock_guard read_lock; /* Get associated consumer socket.*/ - socket = consumer_find_socket_by_bitness( - reg->bits_per_long, usess->consumer); + socket = consumer_find_socket_by_bitness(reg->bits_per_long, + usess->consumer); if (!socket) { /* * Ignore request if no consumer is found for @@ -5553,8 +5629,11 @@ int ust_app_clear_quiescent_session(struct ltt_ust_session *usess) continue; } - cds_lfht_for_each_entry(reg->registry->channels->ht, - &iter.iter, buf_reg_chan, node.node) { + for (auto *buf_reg_chan : + lttng::urcu::lfht_iteration_adapter( + *reg->registry->channels->ht)) { /* * The following call will print error values so * the return code is of little importance @@ -5562,26 +5641,27 @@ int ust_app_clear_quiescent_session(struct ltt_ust_session *usess) * all. */ (void) consumer_clear_quiescent_channel(socket, - buf_reg_chan->consumer_key); + buf_reg_chan->consumer_key); } } + break; } case LTTNG_BUFFER_PER_PID: { - struct ust_app_session *ua_sess; - struct lttng_ht_iter iter; - struct ust_app *app; - - cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, - pid_n.node) { - ua_sess = lookup_session_by_app(usess, app); - if (ua_sess == NULL) { + /* Iterate on all apps. */ + for (auto *app : + lttng::urcu::lfht_iteration_adapter(*ust_app_ht->ht)) { + auto *ua_sess = ust_app_lookup_app_session(usess, app); + if (ua_sess == nullptr) { continue; } - (void) ust_app_clear_quiescent_app_session(app, - ua_sess); + + (void) ust_app_clear_quiescent_app_session(app, ua_sess); } + break; } default: @@ -5590,7 +5670,6 @@ int ust_app_clear_quiescent_session(struct ltt_ust_session *usess) break; } - rcu_read_unlock(); health_code_update(); return ret; } @@ -5607,19 +5686,19 @@ static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app) DBG("Destroy tracing for ust app pid %d", app->pid); - rcu_read_lock(); + const lttng::urcu::read_lock_guard read_lock; if (!app->compatible) { goto end; } __lookup_session_by_app(usess, app, &iter); - node = lttng_ht_iter_get_node_u64(&iter); - if (node == NULL) { + node = lttng_ht_iter_get_node(&iter); + if (node == nullptr) { /* Session is being or is deleted. */ goto end; } - ua_sess = caa_container_of(node, struct ust_app_session, node); + ua_sess = lttng::utils::container_of(node, &ust_app_session::node); health_code_update(); destroy_app_session(app, ua_sess); @@ -5633,17 +5712,20 @@ static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app) if (ret < 0) { if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) { DBG3("UST app wait quiescent failed. Application is dead: pid= %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else if (ret == -EAGAIN) { WARN("UST app wait quiescent failed. Communication time out: pid= %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else { ERR("UST app wait quiescent failed with ret %d: pid= %d, sock = %d", - ret, app->pid, app->sock); + ret, + app->pid, + app->sock); } } end: - rcu_read_unlock(); health_code_update(); return 0; } @@ -5653,18 +5735,13 @@ end: */ int ust_app_start_trace_all(struct ltt_ust_session *usess) { - struct lttng_ht_iter iter; - struct ust_app *app; - DBG("Starting all UST traces"); /* * Even though the start trace might fail, flag this session active so * other application coming in are started by default. */ - usess->active = 1; - - rcu_read_lock(); + usess->active = true; /* * In a start-stop-start use-case, we need to clear the quiescent state @@ -5674,12 +5751,13 @@ int ust_app_start_trace_all(struct ltt_ust_session *usess) */ (void) ust_app_clear_quiescent_session(usess); - cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) { + /* Iterate on all apps. */ + for (auto *app : + lttng::urcu::lfht_iteration_adapter( + *ust_app_ht->ht)) { ust_app_global_update(usess, app); } - rcu_read_unlock(); - return 0; } @@ -5690,8 +5768,6 @@ int ust_app_start_trace_all(struct ltt_ust_session *usess) int ust_app_stop_trace_all(struct ltt_ust_session *usess) { int ret = 0; - struct lttng_ht_iter iter; - struct ust_app *app; DBG("Stopping all UST traces"); @@ -5699,11 +5775,12 @@ int ust_app_stop_trace_all(struct ltt_ust_session *usess) * Even though the stop trace might fail, flag this session inactive so * other application coming in are not started by default. */ - usess->active = 0; - - rcu_read_lock(); + usess->active = false; - cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) { + /* Iterate on all apps. */ + for (auto *app : + lttng::urcu::lfht_iteration_adapter( + *ust_app_ht->ht)) { ret = ust_app_stop_trace(usess, app); if (ret < 0) { /* Continue to next apps even on error */ @@ -5713,8 +5790,6 @@ int ust_app_stop_trace_all(struct ltt_ust_session *usess) (void) ust_app_flush_session(usess); - rcu_read_unlock(); - return 0; } @@ -5723,45 +5798,33 @@ int ust_app_stop_trace_all(struct ltt_ust_session *usess) */ int ust_app_destroy_trace_all(struct ltt_ust_session *usess) { - int ret = 0; - struct lttng_ht_iter iter; - struct ust_app *app; - DBG("Destroy all UST traces"); - rcu_read_lock(); - - cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) { - ret = destroy_trace(usess, app); - if (ret < 0) { - /* Continue to next apps even on error */ - continue; - } + /* Iterate on all apps. */ + for (auto *app : + lttng::urcu::lfht_iteration_adapter( + *ust_app_ht->ht)) { + (void) destroy_trace(usess, app); } - rcu_read_unlock(); - return 0; } /* The ua_sess lock must be held by the caller. */ -static -int find_or_create_ust_app_channel( - struct ltt_ust_session *usess, - struct ust_app_session *ua_sess, - struct ust_app *app, - struct ltt_ust_channel *uchan, - struct ust_app_channel **ua_chan) +static int find_or_create_ust_app_channel(struct ltt_ust_session *usess, + const ust_app_session::locked_weak_ref& ua_sess, + struct ust_app *app, + struct ltt_ust_channel *uchan, + struct ust_app_channel **ua_chan) { int ret = 0; struct lttng_ht_iter iter; struct lttng_ht_node_str *ua_chan_node; lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &iter); - ua_chan_node = lttng_ht_iter_get_node_str(&iter); + ua_chan_node = lttng_ht_iter_get_node(&iter); if (ua_chan_node) { - *ua_chan = caa_container_of(ua_chan_node, - struct ust_app_channel, node); + *ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node); goto end; } @@ -5773,16 +5836,19 @@ end: return ret; } -static -int ust_app_channel_synchronize_event(struct ust_app_channel *ua_chan, - struct ltt_ust_event *uevent, - struct ust_app *app) +static int ust_app_channel_synchronize_event(struct ust_app_channel *ua_chan, + struct ltt_ust_event *uevent, + struct ust_app *app) { int ret = 0; - struct ust_app_event *ua_event = NULL; - - ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name, - uevent->filter, uevent->attr.loglevel, uevent->exclusion); + struct ust_app_event *ua_event = nullptr; + + ua_event = find_ust_app_event(ua_chan->events, + uevent->attr.name, + uevent->filter, + (enum lttng_ust_abi_loglevel_type) uevent->attr.loglevel_type, + uevent->attr.loglevel, + uevent->exclusion); if (!ua_event) { ret = create_ust_app_event(ua_chan, uevent, app); if (ret < 0) { @@ -5790,9 +5856,8 @@ int ust_app_channel_synchronize_event(struct ust_app_channel *ua_chan, } } else { if (ua_event->enabled != uevent->enabled) { - ret = uevent->enabled ? - enable_ust_app_event(ua_event, app) : - disable_ust_app_event(ua_event, app); + ret = uevent->enabled ? enable_ust_app_event(ua_event, app) : + disable_ust_app_event(ua_event, app); } } @@ -5801,15 +5866,12 @@ end: } /* Called with RCU read-side lock held. */ -static -void ust_app_synchronize_event_notifier_rules(struct ust_app *app) +static void ust_app_synchronize_event_notifier_rules(struct ust_app *app) { int ret = 0; enum lttng_error_code ret_code; enum lttng_trigger_status t_status; - struct lttng_ht_iter app_trigger_iter; - struct lttng_triggers *triggers = NULL; - struct ust_app_event_notifier_rule *event_notifier_rule; + struct lttng_triggers *triggers = nullptr; unsigned int count, i; ASSERT_RCU_READ_LOCKED(); @@ -5837,7 +5899,7 @@ void ust_app_synchronize_event_notifier_rules(struct ust_app *app) /* Get all triggers using uid 0 (root) */ ret_code = notification_thread_command_list_triggers( - the_notification_thread_handle, 0, &triggers); + the_notification_thread_handle, 0, &triggers); if (ret_code != LTTNG_OK) { goto end; } @@ -5850,8 +5912,8 @@ void ust_app_synchronize_event_notifier_rules(struct ust_app *app) } for (i = 0; i < count; i++) { - struct lttng_condition *condition; - struct lttng_event_rule *event_rule; + const struct lttng_condition *condition; + const struct lttng_event_rule *event_rule; struct lttng_trigger *trigger; const struct ust_app_event_notifier_rule *looked_up_event_notifier_rule; enum lttng_condition_status condition_status; @@ -5861,17 +5923,16 @@ void ust_app_synchronize_event_notifier_rules(struct ust_app *app) LTTNG_ASSERT(trigger); token = lttng_trigger_get_tracer_token(trigger); - condition = lttng_trigger_get_condition(trigger); + condition = lttng_trigger_get_const_condition(trigger); if (lttng_condition_get_type(condition) != - LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES) { + LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES) { /* Does not apply */ continue; } condition_status = - lttng_condition_event_rule_matches_borrow_rule_mutable( - condition, &event_rule); + lttng_condition_event_rule_matches_get_rule(condition, &event_rule); LTTNG_ASSERT(condition_status == LTTNG_CONDITION_STATUS_OK); if (lttng_event_rule_get_domain_type(event_rule) == LTTNG_DOMAIN_KERNEL) { @@ -5885,7 +5946,7 @@ void ust_app_synchronize_event_notifier_rules(struct ust_app *app) * explicitly acquiring it here. */ looked_up_event_notifier_rule = find_ust_app_event_notifier_rule( - app->token_to_event_notifier_rule_ht, token); + app->token_to_event_notifier_rule_ht, token); if (!looked_up_event_notifier_rule) { ret = create_ust_app_event_notifier_rule(trigger, app); if (ret < 0) { @@ -5894,11 +5955,12 @@ void ust_app_synchronize_event_notifier_rules(struct ust_app *app) } } - rcu_read_lock(); /* Remove all unknown event sources from the app. */ - cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht, - &app_trigger_iter.iter, event_notifier_rule, - node.node) { + for (auto *event_notifier_rule : + lttng::urcu::lfht_iteration_adapter( + *app->token_to_event_notifier_rule_ht->ht)) { const uint64_t app_token = event_notifier_rule->token; bool found = false; @@ -5909,13 +5971,11 @@ void ust_app_synchronize_event_notifier_rules(struct ust_app *app) for (i = 0; i < count; i++) { uint64_t notification_thread_token; const struct lttng_trigger *trigger = - lttng_triggers_get_at_index( - triggers, i); + lttng_triggers_get_at_index(triggers, i); LTTNG_ASSERT(trigger); - notification_thread_token = - lttng_trigger_get_tracer_token(trigger); + notification_thread_token = lttng_trigger_get_tracer_token(trigger); if (notification_thread_token == app_token) { found = true; @@ -5932,19 +5992,15 @@ void ust_app_synchronize_event_notifier_rules(struct ust_app *app) * This trigger was unregistered, disable it on the tracer's * side. */ - ret = lttng_ht_del(app->token_to_event_notifier_rule_ht, - &app_trigger_iter); + ret = cds_lfht_del(app->token_to_event_notifier_rule_ht->ht, + &event_notifier_rule->node.node); LTTNG_ASSERT(ret == 0); /* Callee logs errors. */ (void) disable_ust_object(app, event_notifier_rule->obj); - - delete_ust_app_event_notifier_rule( - app->sock, event_notifier_rule, app); + delete_ust_app_event_notifier_rule(app->sock, event_notifier_rule, app); } - rcu_read_unlock(); - end: lttng_triggers_destroy(triggers); return; @@ -5953,25 +6009,19 @@ end: /* * RCU read lock must be held by the caller. */ -static -void ust_app_synchronize_all_channels(struct ltt_ust_session *usess, - struct ust_app_session *ua_sess, - struct ust_app *app) +static void ust_app_synchronize_all_channels(struct ltt_ust_session *usess, + const ust_app_session::locked_weak_ref& ua_sess, + struct ust_app *app) { - int ret = 0; - struct cds_lfht_iter uchan_iter; - struct ltt_ust_channel *uchan; - LTTNG_ASSERT(usess); - LTTNG_ASSERT(ua_sess); LTTNG_ASSERT(app); ASSERT_RCU_READ_LOCKED(); - cds_lfht_for_each_entry(usess->domain_global.channels->ht, &uchan_iter, - uchan, node.node) { + for (auto *uchan : lttng::urcu::lfht_iteration_adapter( + *usess->domain_global.channels->ht)) { struct ust_app_channel *ua_chan; - struct cds_lfht_iter uevent_iter; - struct ltt_ust_event *uevent; /* * Search for a matching ust_app_channel. If none is found, @@ -5980,8 +6030,7 @@ void ust_app_synchronize_all_channels(struct ltt_ust_session *usess, * allocated (if necessary) and sent to the application, and * all enabled contexts will be added to the channel. */ - ret = find_or_create_ust_app_channel(usess, ua_sess, - app, uchan, &ua_chan); + int ret = find_or_create_ust_app_channel(usess, ua_sess, app, uchan, &ua_chan); if (ret) { /* Tracer is probably gone or ENOMEM. */ goto end; @@ -5992,19 +6041,20 @@ void ust_app_synchronize_all_channels(struct ltt_ust_session *usess, continue; } - cds_lfht_for_each_entry(uchan->events->ht, &uevent_iter, uevent, - node.node) { - ret = ust_app_channel_synchronize_event(ua_chan, - uevent, app); + for (auto *uevent : + lttng::urcu::lfht_iteration_adapter( + *uchan->events->ht)) { + ret = ust_app_channel_synchronize_event(ua_chan, uevent, app); if (ret) { goto end; } } if (ua_chan->enabled != uchan->enabled) { - ret = uchan->enabled ? - enable_ust_app_channel(ua_sess, uchan, app) : - disable_ust_app_channel(ua_sess, ua_chan, app); + ret = uchan->enabled ? enable_ust_app_channel(ua_sess, uchan, app) : + disable_ust_app_channel(ua_sess, ua_chan, app); if (ret) { goto end; } @@ -6018,12 +6068,10 @@ end: * The caller must ensure that the application is compatible and is tracked * by the process attribute trackers. */ -static -void ust_app_synchronize(struct ltt_ust_session *usess, - struct ust_app *app) +static void ust_app_synchronize(struct ltt_ust_session *usess, struct ust_app *app) { int ret = 0; - struct ust_app_session *ua_sess = NULL; + struct ust_app_session *ua_sess = nullptr; /* * The application's configuration should only be synchronized for @@ -6031,55 +6079,48 @@ void ust_app_synchronize(struct ltt_ust_session *usess, */ LTTNG_ASSERT(usess->active); - ret = find_or_create_ust_app_session(usess, app, &ua_sess, NULL); + ret = find_or_create_ust_app_session(usess, app, &ua_sess, nullptr); if (ret < 0) { /* Tracer is probably gone or ENOMEM. */ - if (ua_sess) { - destroy_app_session(app, ua_sess); - } - goto end; + return; } + LTTNG_ASSERT(ua_sess); - pthread_mutex_lock(&ua_sess->lock); - if (ua_sess->deleted) { - goto deleted_session; + const auto locked_ua_sess = ua_sess->lock(); + if (locked_ua_sess->deleted) { + return; } - rcu_read_lock(); + { + const lttng::urcu::read_lock_guard read_lock; - ust_app_synchronize_all_channels(usess, ua_sess, app); + ust_app_synchronize_all_channels(usess, locked_ua_sess, app); - /* - * Create the metadata for the application. This returns gracefully if a - * metadata was already set for the session. - * - * The metadata channel must be created after the data channels as the - * consumer daemon assumes this ordering. When interacting with a relay - * daemon, the consumer will use this assumption to send the - * "STREAMS_SENT" message to the relay daemon. - */ - ret = create_ust_app_metadata(ua_sess, app, usess->consumer); - if (ret < 0) { - ERR("Metadata creation failed for app sock %d for session id %" PRIu64, - app->sock, usess->id); + /* + * Create the metadata for the application. This returns gracefully if a + * metadata was already set for the session. + * + * The metadata channel must be created after the data channels as the + * consumer daemon assumes this ordering. When interacting with a relay + * daemon, the consumer will use this assumption to send the + * "STREAMS_SENT" message to the relay daemon. + */ + ret = create_ust_app_metadata(locked_ua_sess, app, usess->consumer); + if (ret < 0) { + ERR("Metadata creation failed for app sock %d for session id %" PRIu64, + app->sock, + usess->id); + } } - - rcu_read_unlock(); - -deleted_session: - pthread_mutex_unlock(&ua_sess->lock); -end: - return; } -static -void ust_app_global_destroy(struct ltt_ust_session *usess, struct ust_app *app) +static void ust_app_global_destroy(struct ltt_ust_session *usess, struct ust_app *app) { struct ust_app_session *ua_sess; - ua_sess = lookup_session_by_app(usess, app); - if (ua_sess == NULL) { + ua_sess = ust_app_lookup_app_session(usess, app); + if (ua_sess == nullptr) { return; } destroy_app_session(app, ua_sess); @@ -6097,20 +6138,14 @@ void ust_app_global_update(struct ltt_ust_session *usess, struct ust_app *app) LTTNG_ASSERT(usess->active); ASSERT_RCU_READ_LOCKED(); - DBG2("UST app global update for app sock %d for session id %" PRIu64, - app->sock, usess->id); + DBG2("UST app global update for app sock %d for session id %" PRIu64, app->sock, usess->id); if (!app->compatible) { return; } - if (trace_ust_id_tracker_lookup(LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID, - usess, app->pid) && - trace_ust_id_tracker_lookup( - LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID, - usess, app->uid) && - trace_ust_id_tracker_lookup( - LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID, - usess, app->gid)) { + if (trace_ust_id_tracker_lookup(LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID, usess, app->pid) && + trace_ust_id_tracker_lookup(LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID, usess, app->uid) && + trace_ust_id_tracker_lookup(LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID, usess, app->gid)) { /* * Synchronize the application's internal tracing configuration * and start tracing. @@ -6133,15 +6168,17 @@ void ust_app_global_update_event_notifier_rules(struct ust_app *app) ASSERT_RCU_READ_LOCKED(); DBG2("UST application global event notifier rules update: app = '%s', pid = %d", - app->name, app->pid); + app->name, + app->pid); if (!app->compatible || !ust_app_supports_notifiers(app)) { return; } - if (app->event_notifier_group.object == NULL) { + if (app->event_notifier_group.object == nullptr) { WARN("UST app global update of event notifiers for app skipped since communication handle is null: app = '%s', pid = %d", - app->name, app->pid); + app->name, + app->pid); return; } @@ -6153,46 +6190,43 @@ void ust_app_global_update_event_notifier_rules(struct ust_app *app) */ void ust_app_global_update_all(struct ltt_ust_session *usess) { - struct lttng_ht_iter iter; - struct ust_app *app; - - rcu_read_lock(); - cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) { + /* Iterate on all apps. */ + for (auto *app : + lttng::urcu::lfht_iteration_adapter( + *ust_app_ht->ht)) { ust_app_global_update(usess, app); } - rcu_read_unlock(); } -void ust_app_global_update_all_event_notifier_rules(void) +void ust_app_global_update_all_event_notifier_rules() { - struct lttng_ht_iter iter; - struct ust_app *app; - - rcu_read_lock(); - cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) { + /* Iterate on all apps. */ + for (auto *app : + lttng::urcu::lfht_iteration_adapter( + *ust_app_ht->ht)) { ust_app_global_update_event_notifier_rules(app); } - - rcu_read_unlock(); } /* * Add context to a specific channel for global UST domain. */ int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess, - struct ltt_ust_channel *uchan, struct ltt_ust_context *uctx) + struct ltt_ust_channel *uchan, + struct ltt_ust_context *uctx) { int ret = 0; struct lttng_ht_node_str *ua_chan_node; - struct lttng_ht_iter iter, uiter; - struct ust_app_channel *ua_chan = NULL; + struct lttng_ht_iter uiter; + struct ust_app_channel *ua_chan = nullptr; struct ust_app_session *ua_sess; - struct ust_app *app; LTTNG_ASSERT(usess->active); - rcu_read_lock(); - cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) { + /* Iterate on all apps. */ + for (auto *app : + lttng::urcu::lfht_iteration_adapter( + *ust_app_ht->ht)) { if (!app->compatible) { /* * TODO: In time, we should notice the caller of this error by @@ -6200,35 +6234,29 @@ int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess, */ continue; } - ua_sess = lookup_session_by_app(usess, app); - if (ua_sess == NULL) { + ua_sess = ust_app_lookup_app_session(usess, app); + if (ua_sess == nullptr) { continue; } - pthread_mutex_lock(&ua_sess->lock); - - if (ua_sess->deleted) { - pthread_mutex_unlock(&ua_sess->lock); + const auto locked_ua_sess = ua_sess->lock(); + if (locked_ua_sess->deleted) { continue; } /* Lookup channel in the ust app session */ - lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter); - ua_chan_node = lttng_ht_iter_get_node_str(&uiter); - if (ua_chan_node == NULL) { - goto next_app; + lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter); + ua_chan_node = lttng_ht_iter_get_node(&uiter); + if (ua_chan_node == nullptr) { + continue; } - ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, - node); + ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node); ret = create_ust_app_channel_context(ua_chan, &uctx->ctx, app); if (ret < 0) { - goto next_app; + continue; } - next_app: - pthread_mutex_unlock(&ua_sess->lock); } - rcu_read_unlock(); return ret; } @@ -6244,16 +6272,22 @@ int ust_app_recv_registration(int sock, struct ust_register_msg *msg) LTTNG_ASSERT(msg); - ret = lttng_ust_ctl_recv_reg_msg(sock, &msg->type, &msg->major, &msg->minor, - &pid, &ppid, &uid, &gid, - &msg->bits_per_long, - &msg->uint8_t_alignment, - &msg->uint16_t_alignment, - &msg->uint32_t_alignment, - &msg->uint64_t_alignment, - &msg->long_alignment, - &msg->byte_order, - msg->name); + ret = lttng_ust_ctl_recv_reg_msg(sock, + &msg->type, + &msg->major, + &msg->minor, + &pid, + &ppid, + &uid, + &gid, + &msg->bits_per_long, + &msg->uint8_t_alignment, + &msg->uint16_t_alignment, + &msg->uint32_t_alignment, + &msg->uint64_t_alignment, + &msg->long_alignment, + &msg->byte_order, + msg->name); if (ret < 0) { switch (-ret) { case EPIPE: @@ -6263,8 +6297,10 @@ int ust_app_recv_registration(int sock, struct ust_register_msg *msg) break; case LTTNG_UST_ERR_UNSUP_MAJOR: ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d", - msg->major, msg->minor, LTTNG_UST_ABI_MAJOR_VERSION, - LTTNG_UST_ABI_MINOR_VERSION); + msg->major, + msg->minor, + LTTNG_UST_ABI_MAJOR_VERSION, + LTTNG_UST_ABI_MINOR_VERSION); break; default: ERR("UST app recv reg message failed with ret %d", ret); @@ -6285,25 +6321,24 @@ error: * Return a ust app session object using the application object and the * session object descriptor has a key. If not found, NULL is returned. * A RCU read side lock MUST be acquired when calling this function. -*/ -static struct ust_app_session *find_session_by_objd(struct ust_app *app, - int objd) + */ +static struct ust_app_session *find_session_by_objd(struct ust_app *app, int objd) { struct lttng_ht_node_ulong *node; struct lttng_ht_iter iter; - struct ust_app_session *ua_sess = NULL; + struct ust_app_session *ua_sess = nullptr; LTTNG_ASSERT(app); ASSERT_RCU_READ_LOCKED(); - lttng_ht_lookup(app->ust_sessions_objd, (void *)((unsigned long) objd), &iter); - node = lttng_ht_iter_get_node_ulong(&iter); - if (node == NULL) { + lttng_ht_lookup(app->ust_sessions_objd, (void *) ((unsigned long) objd), &iter); + node = lttng_ht_iter_get_node(&iter); + if (node == nullptr) { DBG2("UST app session find by objd %d not found", objd); goto error; } - ua_sess = caa_container_of(node, struct ust_app_session, ust_objd_node); + ua_sess = lttng::utils::container_of(node, &ust_app_session::ust_objd_node); error: return ua_sess; @@ -6314,178 +6349,83 @@ error: * object descriptor has a key. If not found, NULL is returned. A RCU read side * lock MUST be acquired before calling this function. */ -static struct ust_app_channel *find_channel_by_objd(struct ust_app *app, - int objd) +static struct ust_app_channel *find_channel_by_objd(struct ust_app *app, int objd) { struct lttng_ht_node_ulong *node; struct lttng_ht_iter iter; - struct ust_app_channel *ua_chan = NULL; + struct ust_app_channel *ua_chan = nullptr; LTTNG_ASSERT(app); ASSERT_RCU_READ_LOCKED(); - lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter); - node = lttng_ht_iter_get_node_ulong(&iter); - if (node == NULL) { + lttng_ht_lookup(app->ust_objd, (void *) ((unsigned long) objd), &iter); + node = lttng_ht_iter_get_node(&iter); + if (node == nullptr) { DBG2("UST app channel find by objd %d not found", objd); goto error; } - ua_chan = caa_container_of(node, struct ust_app_channel, ust_objd_node); + ua_chan = lttng::utils::container_of(node, &ust_app_channel::ust_objd_node); error: return ua_chan; } /* - * Fixup legacy context fields for comparison: - * - legacy array becomes array_nestable, - * - legacy struct becomes struct_nestable, - * - legacy variant becomes variant_nestable, - * legacy sequences are not emitted in LTTng-UST contexts. + * Reply to a register channel notification from an application on the notify + * socket. The channel metadata is also created. + * + * The session UST registry lock is acquired in this function. + * + * On success 0 is returned else a negative value. */ -static int ust_app_fixup_legacy_context_fields(size_t *_nr_fields, - struct lttng_ust_ctl_field **_fields) -{ - struct lttng_ust_ctl_field *fields = *_fields, *new_fields = NULL; - size_t nr_fields = *_nr_fields, new_nr_fields = 0, i, j; - bool found = false; - int ret = 0; - - for (i = 0; i < nr_fields; i++) { - const struct lttng_ust_ctl_field *field = &fields[i]; - - switch (field->type.atype) { - case lttng_ust_ctl_atype_sequence: - ERR("Unexpected legacy sequence context."); - ret = -EINVAL; - goto end; - case lttng_ust_ctl_atype_array: - switch (field->type.u.legacy.array.elem_type.atype) { - case lttng_ust_ctl_atype_integer: - break; - default: - ERR("Unexpected legacy array element type in context."); - ret = -EINVAL; - goto end; - } - found = true; - /* One field for array_nested, one field for elem type. */ - new_nr_fields += 2; - break; - - case lttng_ust_ctl_atype_struct: /* Fallthrough */ - case lttng_ust_ctl_atype_variant: - found = true; - new_nr_fields++; - break; - default: - new_nr_fields++; - break; - } - } - if (!found) { - goto end; - } - - new_fields = calloc(new_nr_fields); - if (!new_fields) { - ret = -ENOMEM; - goto end; - } - - for (i = 0, j = 0; i < nr_fields; i++, j++) { - const struct lttng_ust_ctl_field *field = &fields[i]; - struct lttng_ust_ctl_field *new_field = &new_fields[j]; - - switch (field->type.atype) { - case lttng_ust_ctl_atype_array: - /* One field for array_nested, one field for elem type. */ - strncpy(new_field->name, field->name, LTTNG_UST_ABI_SYM_NAME_LEN - 1); - new_field->type.atype = lttng_ust_ctl_atype_array_nestable; - new_field->type.u.array_nestable.length = field->type.u.legacy.array.length; - new_field->type.u.array_nestable.alignment = 0; - new_field = &new_fields[++j]; /* elem type */ - new_field->type.atype = field->type.u.legacy.array.elem_type.atype; - assert(new_field->type.atype == lttng_ust_ctl_atype_integer); - new_field->type.u.integer = field->type.u.legacy.array.elem_type.u.basic.integer; - break; - case lttng_ust_ctl_atype_struct: - strncpy(new_field->name, field->name, LTTNG_UST_ABI_SYM_NAME_LEN - 1); - new_field->type.atype = lttng_ust_ctl_atype_struct_nestable; - new_field->type.u.struct_nestable.nr_fields = field->type.u.legacy._struct.nr_fields; - new_field->type.u.struct_nestable.alignment = 0; - break; - case lttng_ust_ctl_atype_variant: - strncpy(new_field->name, field->name, LTTNG_UST_ABI_SYM_NAME_LEN - 1); - new_field->type.atype = lttng_ust_ctl_atype_variant_nestable; - new_field->type.u.variant_nestable.nr_choices = field->type.u.legacy.variant.nr_choices; - strncpy(new_field->type.u.variant_nestable.tag_name, - field->type.u.legacy.variant.tag_name, - LTTNG_UST_ABI_SYM_NAME_LEN - 1); - new_field->type.u.variant_nestable.alignment = 0; - break; - default: - *new_field = *field; - break; - } - } - free(fields); - *_fields = new_fields; - *_nr_fields = new_nr_fields; -end: - return ret; -} - -/* - * Reply to a register channel notification from an application on the notify - * socket. The channel metadata is also created. - * - * The session UST registry lock is acquired in this function. - * - * On success 0 is returned else a negative value. - */ -static int reply_ust_register_channel(int sock, int cobjd, - size_t nr_fields, struct lttng_ust_ctl_field *fields) +static int handle_app_register_channel_notification(int sock, + int cobjd, + struct lttng_ust_ctl_field *raw_context_fields, + size_t context_field_count) { int ret, ret_code = 0; uint32_t chan_id; uint64_t chan_reg_key; - enum lttng_ust_ctl_channel_header type = LTTNG_UST_CTL_CHANNEL_HEADER_UNKNOWN; struct ust_app *app; struct ust_app_channel *ua_chan; struct ust_app_session *ua_sess; - ust_registry_session *registry; - struct ust_registry_channel *ust_reg_chan; + auto ust_ctl_context_fields = + lttng::make_unique_wrapper( + raw_context_fields); - rcu_read_lock(); + const lttng::urcu::read_lock_guard read_lock_guard; /* Lookup application. If not found, there is a code flow error. */ app = find_app_by_notify_sock(sock); if (!app) { - DBG("Application socket %d is being torn down. Abort event notify", - sock); - ret = -1; - goto error_rcu_unlock; + DBG("Application socket %d is being torn down. Abort event notify", sock); + return -1; } /* Lookup channel by UST object descriptor. */ ua_chan = find_channel_by_objd(app, cobjd); if (!ua_chan) { DBG("Application channel is being torn down. Abort event notify"); - ret = 0; - goto error_rcu_unlock; + return 0; } LTTNG_ASSERT(ua_chan->session); ua_sess = ua_chan->session; /* Get right session registry depending on the session buffer type. */ - registry = get_session_registry(ua_sess); - if (!registry) { + + /* + * HACK: ua_sess is already locked by the client thread. This is called + * in the context of the handling of a notification from the application. + */ + auto locked_ua_sess = ust_app_session::make_locked_weak_ref(*ua_sess); + auto locked_registry_session = + get_locked_session_registry(locked_ua_sess->get_identifier()); + locked_ua_sess.release(); + if (!locked_registry_session) { DBG("Application session is being torn down. Abort event notify"); - ret = 0; - goto error_rcu_unlock; + return 0; }; /* Depending on the buffer type, a different channel key is used. */ @@ -6495,86 +6435,106 @@ static int reply_ust_register_channel(int sock, int cobjd, chan_reg_key = ua_chan->key; } - pthread_mutex_lock(®istry->_lock); - - ust_reg_chan = ust_registry_channel_find(registry, chan_reg_key); - LTTNG_ASSERT(ust_reg_chan); + auto& ust_reg_chan = locked_registry_session->channel(chan_reg_key); /* Channel id is set during the object creation. */ - chan_id = ust_reg_chan->chan_id; + chan_id = ust_reg_chan.id; - ret = ust_app_fixup_legacy_context_fields(&nr_fields, &fields); - if (ret < 0) { - ERR("Registering application channel due to legacy context fields fixup error: pid = %d, sock = %d", - app->pid, app->sock); - ret_code = -EINVAL; - goto reply; - } - if (!ust_reg_chan->register_done) { - /* - * TODO: eventually use the registry event count for - * this channel to better guess header type for per-pid - * buffers. - */ - type = LTTNG_UST_CTL_CHANNEL_HEADER_LARGE; - ust_reg_chan->nr_ctx_fields = nr_fields; - ust_reg_chan->ctx_fields = fields; - fields = NULL; - ust_reg_chan->header_type = type; - } else { - /* Get current already assigned values. */ - type = ust_reg_chan->header_type; - /* - * Validate that the context fields match between - * registry and newcoming application. - */ - if (!match_lttng_ust_ctl_field_array(ust_reg_chan->ctx_fields, - ust_reg_chan->nr_ctx_fields, - fields, nr_fields)) { - ERR("Registering application channel due to context field mismatch: pid = %d, sock = %d", - app->pid, app->sock); - ret_code = -EINVAL; - goto reply; - } - } + /* + * The application returns the typing information of the channel's + * context fields. In per-PID buffering mode, this is the first and only + * time we get this information. It is our chance to finalize the + * initialiation of the channel and serialize it's layout's description + * to the trace's metadata. + * + * However, in per-UID buffering mode, every application will provide + * this information (redundantly). The first time will allow us to + * complete the initialization. The following times, we simply validate + * that all apps provide the same typing for the context fields as a + * sanity check. + */ + try { + auto app_context_fields = lsu::create_trace_fields_from_ust_ctl_fields( + *locked_registry_session, + ust_ctl_context_fields.get(), + context_field_count, + lst::field_location::root::EVENT_RECORD_COMMON_CONTEXT, + lsu::ctl_field_quirks::UNDERSCORE_PREFIXED_VARIANT_TAG_MAPPINGS); + + if (!ust_reg_chan.is_registered()) { + lst::type::cuptr event_context = app_context_fields.size() ? + lttng::make_unique( + 0, std::move(app_context_fields)) : + nullptr; + + ust_reg_chan.event_context(std::move(event_context)); + } else { + /* + * Validate that the context fields match between + * registry and newcoming application. + */ + bool context_fields_match; + const auto *previous_event_context = ust_reg_chan.event_context(); + + if (!previous_event_context) { + context_fields_match = app_context_fields.size() == 0; + } else { + const lst::structure_type app_event_context_struct( + 0, std::move(app_context_fields)); + + context_fields_match = *previous_event_context == + app_event_context_struct; + } - /* Append to metadata */ - if (!ust_reg_chan->metadata_dumped) { - ret_code = ust_metadata_channel_statedump(registry, ust_reg_chan); - if (ret_code) { - ERR("Error appending channel metadata (errno = %d)", ret_code); - goto reply; + if (!context_fields_match) { + ERR("Registering application channel due to context field mismatch: pid = %d, sock = %d", + app->pid, + app->sock); + ret_code = -EINVAL; + goto reply; + } } + } catch (const std::exception& ex) { + ERR("Failed to handle application context: %s", ex.what()); + ret_code = -EINVAL; + goto reply; } reply: - DBG3("UST app replying to register channel key %" PRIu64 - " with id %u, type = %d, ret = %d", chan_reg_key, chan_id, type, - ret_code); - - ret = lttng_ust_ctl_reply_register_channel(sock, chan_id, type, ret_code); + DBG3("UST app replying to register channel key %" PRIu64 " with id %u, ret = %d", + chan_reg_key, + chan_id, + ret_code); + + ret = lttng_ust_ctl_reply_register_channel( + sock, + chan_id, + ust_reg_chan.header_type_ == lst::stream_class::header_type::COMPACT ? + LTTNG_UST_CTL_CHANNEL_HEADER_COMPACT : + LTTNG_UST_CTL_CHANNEL_HEADER_LARGE, + ret_code); if (ret < 0) { if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) { DBG3("UST app reply channel failed. Application died: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else if (ret == -EAGAIN) { WARN("UST app reply channel failed. Communication time out: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else { ERR("UST app reply channel failed with ret %d: pid = %d, sock = %d", - ret, app->pid, app->sock); + ret, + app->pid, + app->sock); } - goto error; + + return ret; } - /* This channel registry registration is completed. */ - ust_reg_chan->register_done = 1; + /* This channel registry's registration is completed. */ + ust_reg_chan.set_as_registered(); -error: - pthread_mutex_unlock(®istry->_lock); -error_rcu_unlock: - rcu_read_unlock(); - free(fields); return ret; } @@ -6587,67 +6547,96 @@ error_rcu_unlock: * * On success 0 is returned else a negative value. */ -static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name, - char *sig, size_t nr_fields, struct lttng_ust_ctl_field *fields, - int loglevel_value, char *model_emf_uri) +static int add_event_ust_registry(int sock, + int sobjd, + int cobjd, + const char *name, + char *raw_signature, + size_t nr_fields, + struct lttng_ust_ctl_field *raw_fields, + int loglevel_value, + char *raw_model_emf_uri) { int ret, ret_code; - uint32_t event_id = 0; + lsu::event_id event_id = 0; uint64_t chan_reg_key; struct ust_app *app; struct ust_app_channel *ua_chan; struct ust_app_session *ua_sess; - ust_registry_session *registry; - - rcu_read_lock(); + const lttng::urcu::read_lock_guard rcu_lock; + auto signature = lttng::make_unique_wrapper(raw_signature); + auto fields = + lttng::make_unique_wrapper(raw_fields); + auto model_emf_uri = + lttng::make_unique_wrapper(raw_model_emf_uri); /* Lookup application. If not found, there is a code flow error. */ app = find_app_by_notify_sock(sock); if (!app) { - DBG("Application socket %d is being torn down. Abort event notify", - sock); - ret = -1; - goto error_rcu_unlock; + DBG("Application socket %d is being torn down. Abort event notify", sock); + return -1; } /* Lookup channel by UST object descriptor. */ ua_chan = find_channel_by_objd(app, cobjd); if (!ua_chan) { DBG("Application channel is being torn down. Abort event notify"); - ret = 0; - goto error_rcu_unlock; + return 0; } LTTNG_ASSERT(ua_chan->session); ua_sess = ua_chan->session; - registry = get_session_registry(ua_sess); - if (!registry) { - DBG("Application session is being torn down. Abort event notify"); - ret = 0; - goto error_rcu_unlock; - } - if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) { chan_reg_key = ua_chan->tracing_channel_id; } else { chan_reg_key = ua_chan->key; } - pthread_mutex_lock(®istry->_lock); - - /* - * From this point on, this call acquires the ownership of the sig, fields - * and model_emf_uri meaning any free are done inside it if needed. These - * three variables MUST NOT be read/write after this. - */ - ret_code = ust_registry_create_event(registry, chan_reg_key, - sobjd, cobjd, name, sig, nr_fields, fields, - loglevel_value, model_emf_uri, ua_sess->buffer_type, - &event_id, app); - sig = NULL; - fields = NULL; - model_emf_uri = NULL; + { + auto locked_registry = get_locked_session_registry(ua_sess->get_identifier()); + if (locked_registry) { + /* + * From this point on, this call acquires the ownership of the signature, + * fields and model_emf_uri meaning any free are done inside it if needed. + * These three variables MUST NOT be read/write after this. + */ + try { + auto& channel = locked_registry->channel(chan_reg_key); + + /* id is set on success. */ + channel.add_event( + sobjd, + cobjd, + name, + signature.get(), + lsu::create_trace_fields_from_ust_ctl_fields( + *locked_registry, + fields.get(), + nr_fields, + lst::field_location::root::EVENT_RECORD_PAYLOAD, + lsu::ctl_field_quirks:: + UNDERSCORE_PREFIXED_VARIANT_TAG_MAPPINGS), + loglevel_value, + model_emf_uri.get() ? + nonstd::optional(model_emf_uri.get()) : + nonstd::nullopt, + ua_sess->buffer_type, + *app, + event_id); + ret_code = 0; + } catch (const std::exception& ex) { + ERR("Failed to add event `%s` to registry session: %s", + name, + ex.what()); + /* Inform the application of the error; don't return directly. */ + ret_code = -EINVAL; + } + } else { + DBG("Application session is being torn down. Abort event notify"); + return 0; + } + } /* * The return value is returned to ustctl so in case of an error, the @@ -6658,31 +6647,26 @@ static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name, if (ret < 0) { if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) { DBG3("UST app reply event failed. Application died: pid = %d, sock = %d.", - app->pid, app->sock); + app->pid, + app->sock); } else if (ret == -EAGAIN) { WARN("UST app reply event failed. Communication time out: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else { ERR("UST app reply event failed with ret %d: pid = %d, sock = %d", - ret, app->pid, app->sock); + ret, + app->pid, + app->sock); } /* * No need to wipe the create event since the application socket will * get close on error hence cleaning up everything by itself. */ - goto error; + return ret; } - DBG3("UST registry event %s with id %" PRId32 " added successfully", - name, event_id); - -error: - pthread_mutex_unlock(®istry->_lock); -error_rcu_unlock: - rcu_read_unlock(); - free(sig); - free(fields); - free(model_emf_uri); + DBG_FMT("UST registry event successfully added: name={}, id={}", name, event_id); return ret; } @@ -6694,26 +6678,27 @@ error_rcu_unlock: * * On success 0 is returned else a negative value. */ -static int add_enum_ust_registry(int sock, int sobjd, char *name, - struct lttng_ust_ctl_enum_entry *entries, size_t nr_entries) +static int add_enum_ust_registry(int sock, + int sobjd, + const char *name, + struct lttng_ust_ctl_enum_entry *raw_entries, + size_t nr_entries) { - int ret = 0, ret_code; + int ret = 0; struct ust_app *app; struct ust_app_session *ua_sess; - ust_registry_session *registry; uint64_t enum_id = -1ULL; - - rcu_read_lock(); + const lttng::urcu::read_lock_guard read_lock_guard; + auto entries = + lttng::make_unique_wrapper( + raw_entries); /* Lookup application. If not found, there is a code flow error. */ app = find_app_by_notify_sock(sock); if (!app) { /* Return an error since this is not an error */ - DBG("Application socket %d is being torn down. Aborting enum registration", - sock); - free(entries); - ret = -1; - goto error_rcu_unlock; + DBG("Application socket %d is being torn down. Aborting enum registration", sock); + return -1; } /* Lookup session by UST object descriptor. */ @@ -6721,59 +6706,66 @@ static int add_enum_ust_registry(int sock, int sobjd, char *name, if (!ua_sess) { /* Return an error since this is not an error */ DBG("Application session is being torn down (session not found). Aborting enum registration."); - free(entries); - goto error_rcu_unlock; + return 0; } - registry = get_session_registry(ua_sess); - if (!registry) { + auto locked_registry = get_locked_session_registry(ua_sess->get_identifier()); + if (!locked_registry) { DBG("Application session is being torn down (registry not found). Aborting enum registration."); - free(entries); - goto error_rcu_unlock; + return 0; } - pthread_mutex_lock(®istry->_lock); - /* * From this point on, the callee acquires the ownership of * entries. The variable entries MUST NOT be read/written after * call. */ - ret_code = ust_registry_create_or_find_enum(registry, sobjd, name, - entries, nr_entries, &enum_id); - entries = NULL; + int application_reply_code; + try { + locked_registry->create_or_find_enum( + sobjd, name, entries.release(), nr_entries, &enum_id); + application_reply_code = 0; + } catch (const std::exception& ex) { + ERR("%s: %s", + lttng::format( + "Failed to create or find enumeration provided by application: app = {}, enumeration name = {}", + *app, + name) + .c_str(), + ex.what()); + application_reply_code = -1; + } /* * The return value is returned to ustctl so in case of an error, the * application can be notified. In case of an error, it's important not to * return a negative error or else the application will get closed. */ - ret = lttng_ust_ctl_reply_register_enum(sock, enum_id, ret_code); + ret = lttng_ust_ctl_reply_register_enum(sock, enum_id, application_reply_code); if (ret < 0) { if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) { DBG3("UST app reply enum failed. Application died: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else if (ret == -EAGAIN) { WARN("UST app reply enum failed. Communication time out: pid = %d, sock = %d", - app->pid, app->sock); + app->pid, + app->sock); } else { ERR("UST app reply enum failed with ret %d: pid = %d, sock = %d", - ret, app->pid, app->sock); + ret, + app->pid, + app->sock); } /* * No need to wipe the create enum since the application socket will * get close on error hence cleaning up everything by itself. */ - goto error; + return ret; } DBG3("UST registry enum %s added successfully or already found", name); - -error: - pthread_mutex_unlock(®istry->_lock); -error_rcu_unlock: - rcu_read_unlock(); - return ret; + return 0; } /* @@ -6791,14 +6783,11 @@ int ust_app_recv_notify(int sock) ret = lttng_ust_ctl_recv_notify(sock, &cmd); if (ret < 0) { if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) { - DBG3("UST app recv notify failed. Application died: sock = %d", - sock); + DBG3("UST app recv notify failed. Application died: sock = %d", sock); } else if (ret == -EAGAIN) { - WARN("UST app recv notify failed. Communication time out: sock = %d", - sock); + WARN("UST app recv notify failed. Communication time out: sock = %d", sock); } else { - ERR("UST app recv notify failed with ret %d: sock = %d", - ret, sock); + ERR("UST app recv notify failed with ret %d: sock = %d", ret, sock); } goto error; } @@ -6809,24 +6798,51 @@ int ust_app_recv_notify(int sock) int sobjd, cobjd, loglevel_value; char name[LTTNG_UST_ABI_SYM_NAME_LEN], *sig, *model_emf_uri; size_t nr_fields; + uint64_t tracer_token = 0; struct lttng_ust_ctl_field *fields; DBG2("UST app ustctl register event received"); - ret = lttng_ust_ctl_recv_register_event(sock, &sobjd, &cobjd, name, - &loglevel_value, &sig, &nr_fields, &fields, - &model_emf_uri); + ret = lttng_ust_ctl_recv_register_event(sock, + &sobjd, + &cobjd, + name, + &loglevel_value, + &sig, + &nr_fields, + &fields, + &model_emf_uri, + &tracer_token); if (ret < 0) { if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) { DBG3("UST app recv event failed. Application died: sock = %d", - sock); + sock); } else if (ret == -EAGAIN) { WARN("UST app recv event failed. Communication time out: sock = %d", - sock); + sock); } else { - ERR("UST app recv event failed with ret %d: sock = %d", - ret, sock); + ERR("UST app recv event failed with ret %d: sock = %d", ret, sock); + } + goto error; + } + + { + const lttng::urcu::read_lock_guard rcu_lock; + const struct ust_app *app = find_app_by_notify_sock(sock); + if (!app) { + DBG("Application socket %d is being torn down. Abort event notify", + sock); + ret = -1; + goto error; } + } + + if ((!fields && nr_fields > 0) || (fields && nr_fields == 0)) { + ERR("Invalid return value from lttng_ust_ctl_recv_register_event: fields = %p, nr_fields = %zu", + fields, + nr_fields); + ret = -1; + free(fields); goto error; } @@ -6836,8 +6852,15 @@ int ust_app_recv_notify(int sock) * code path loses the ownsership of these variables and transfer them * to the this function. */ - ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields, - fields, loglevel_value, model_emf_uri); + ret = add_event_ust_registry(sock, + sobjd, + cobjd, + name, + sig, + nr_fields, + fields, + loglevel_value, + model_emf_uri); if (ret < 0) { goto error; } @@ -6847,23 +6870,24 @@ int ust_app_recv_notify(int sock) case LTTNG_UST_CTL_NOTIFY_CMD_CHANNEL: { int sobjd, cobjd; - size_t nr_fields; - struct lttng_ust_ctl_field *fields; + size_t field_count; + struct lttng_ust_ctl_field *context_fields; DBG2("UST app ustctl register channel received"); - ret = lttng_ust_ctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields, - &fields); + ret = lttng_ust_ctl_recv_register_channel( + sock, &sobjd, &cobjd, &field_count, &context_fields); if (ret < 0) { if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) { DBG3("UST app recv channel failed. Application died: sock = %d", - sock); + sock); } else if (ret == -EAGAIN) { WARN("UST app recv channel failed. Communication time out: sock = %d", - sock); + sock); } else { ERR("UST app recv channel failed with ret %d: sock = %d", - ret, sock); + ret, + sock); } goto error; } @@ -6871,10 +6895,10 @@ int ust_app_recv_notify(int sock) /* * The fields ownership are transfered to this function call meaning * that if needed it will be freed. After this, it's invalid to access - * fields or clean it up. + * fields or clean them up. */ - ret = reply_ust_register_channel(sock, cobjd, nr_fields, - fields); + ret = handle_app_register_channel_notification( + sock, cobjd, context_fields, field_count); if (ret < 0) { goto error; } @@ -6890,31 +6914,34 @@ int ust_app_recv_notify(int sock) DBG2("UST app ustctl register enum received"); - ret = lttng_ust_ctl_recv_register_enum(sock, &sobjd, name, - &entries, &nr_entries); + ret = lttng_ust_ctl_recv_register_enum(sock, &sobjd, name, &entries, &nr_entries); if (ret < 0) { if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) { - DBG3("UST app recv enum failed. Application died: sock = %d", - sock); + DBG3("UST app recv enum failed. Application died: sock = %d", sock); } else if (ret == -EAGAIN) { WARN("UST app recv enum failed. Communication time out: sock = %d", - sock); + sock); } else { - ERR("UST app recv enum failed with ret %d: sock = %d", - ret, sock); + ERR("UST app recv enum failed with ret %d: sock = %d", ret, sock); } goto error; } - /* Callee assumes ownership of entries */ - ret = add_enum_ust_registry(sock, sobjd, name, - entries, nr_entries); + /* Callee assumes ownership of entries. */ + ret = add_enum_ust_registry(sock, sobjd, name, entries, nr_entries); if (ret < 0) { goto error; } break; } + case LTTNG_UST_CTL_NOTIFY_CMD_KEY: + { + DBG2("UST app ustctl register key received"); + ret = -LTTNG_UST_ERR_NOSYS; + // TODO + goto error; + } default: /* Should NEVER happen. */ abort(); @@ -6942,7 +6969,7 @@ void ust_app_notify_sock_unregister(int sock) LTTNG_ASSERT(sock >= 0); - rcu_read_lock(); + const lttng::urcu::read_lock_guard read_lock; obj = zmalloc(); if (!obj) { @@ -6989,7 +7016,6 @@ void ust_app_notify_sock_unregister(int sock) (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter); close_socket: - rcu_read_unlock(); /* * Close socket after a grace period to avoid for the socket to be reused @@ -7004,13 +7030,9 @@ close_socket: /* * Destroy a ust app data structure and free its memory. */ -void ust_app_destroy(struct ust_app *app) +static void ust_app_destroy(ust_app& app) { - if (!app) { - return; - } - - call_rcu(&app->pid_n.head, delete_ust_app_rcu); + call_rcu(&app.pid_n.head, delete_ust_app_rcu); } /* @@ -7019,29 +7041,25 @@ void ust_app_destroy(struct ust_app *app) * * Returns LTTNG_OK on success or a LTTNG_ERR error code. */ -enum lttng_error_code ust_app_snapshot_record( - const struct ltt_ust_session *usess, - const struct consumer_output *output, - uint64_t nb_packets_per_stream) +enum lttng_error_code ust_app_snapshot_record(const struct ltt_ust_session *usess, + const struct consumer_output *output, + uint64_t nb_packets_per_stream) { int ret = 0; enum lttng_error_code status = LTTNG_OK; - struct lttng_ht_iter iter; - struct ust_app *app; - char *trace_path = NULL; + char *trace_path = nullptr; LTTNG_ASSERT(usess); LTTNG_ASSERT(output); - rcu_read_lock(); - switch (usess->buffer_type) { case LTTNG_BUFFER_PER_UID: { - struct buffer_reg_uid *reg; + const lttng::urcu::read_lock_guard read_lock; - cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) { - struct buffer_reg_channel *buf_reg_chan; + for (auto *reg : + lttng::urcu::list_iteration_adapter( + usess->buffer_reg_uid_list)) { struct consumer_socket *socket; char pathname[PATH_MAX]; size_t consumer_path_offset = 0; @@ -7053,16 +7071,18 @@ enum lttng_error_code ust_app_snapshot_record( /* Get consumer socket to use to push the metadata.*/ socket = consumer_find_socket_by_bitness(reg->bits_per_long, - usess->consumer); + usess->consumer); if (!socket) { status = LTTNG_ERR_INVALID; goto error; } memset(pathname, 0, sizeof(pathname)); - ret = snprintf(pathname, sizeof(pathname), - DEFAULT_UST_TRACE_UID_PATH, - reg->uid, reg->bits_per_long); + ret = snprintf(pathname, + sizeof(pathname), + DEFAULT_UST_TRACE_UID_PATH, + reg->uid, + reg->bits_per_long); if (ret < 0) { PERROR("snprintf snapshot path"); status = LTTNG_ERR_INVALID; @@ -7070,52 +7090,63 @@ enum lttng_error_code ust_app_snapshot_record( } /* Free path allowed on previous iteration. */ free(trace_path); - trace_path = setup_channel_trace_path(usess->consumer, pathname, - &consumer_path_offset); + trace_path = setup_channel_trace_path( + usess->consumer, pathname, &consumer_path_offset); if (!trace_path) { status = LTTNG_ERR_INVALID; goto error; } /* Add the UST default trace dir to path. */ - cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter, - buf_reg_chan, node.node) { - status = consumer_snapshot_channel(socket, - buf_reg_chan->consumer_key, - output, 0, &trace_path[consumer_path_offset], - nb_packets_per_stream); + for (auto *buf_reg_chan : + lttng::urcu::lfht_iteration_adapter( + *reg->registry->channels->ht)) { + status = + consumer_snapshot_channel(socket, + buf_reg_chan->consumer_key, + output, + 0, + &trace_path[consumer_path_offset], + nb_packets_per_stream); if (status != LTTNG_OK) { goto error; } } status = consumer_snapshot_channel(socket, - reg->registry->reg.ust->_metadata_key, output, 1, - &trace_path[consumer_path_offset], 0); + reg->registry->reg.ust->_metadata_key, + output, + 1, + &trace_path[consumer_path_offset], + 0); if (status != LTTNG_OK) { goto error; } } + break; } case LTTNG_BUFFER_PER_PID: { - cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) { + /* Iterate on all apps. */ + for (auto *app : + lttng::urcu::lfht_iteration_adapter(*ust_app_ht->ht)) { struct consumer_socket *socket; - struct lttng_ht_iter chan_iter; - struct ust_app_channel *ua_chan; struct ust_app_session *ua_sess; - ust_registry_session *registry; + lsu::registry_session *registry; char pathname[PATH_MAX]; size_t consumer_path_offset = 0; - ua_sess = lookup_session_by_app(usess, app); + ua_sess = ust_app_lookup_app_session(usess, app); if (!ua_sess) { /* Session not associated with this app. */ continue; } /* Get the right consumer socket for the application. */ - socket = consumer_find_socket_by_bitness(app->bits_per_long, - output); + socket = consumer_find_socket_by_bitness(app->abi.bits_per_long, output); if (!socket) { status = LTTNG_ERR_INVALID; goto error; @@ -7123,8 +7154,7 @@ enum lttng_error_code ust_app_snapshot_record( /* Add the UST default trace dir to path. */ memset(pathname, 0, sizeof(pathname)); - ret = snprintf(pathname, sizeof(pathname), "%s", - ua_sess->path); + ret = snprintf(pathname, sizeof(pathname), "%s", ua_sess->path); if (ret < 0) { status = LTTNG_ERR_INVALID; PERROR("snprintf snapshot path"); @@ -7132,18 +7162,25 @@ enum lttng_error_code ust_app_snapshot_record( } /* Free path allowed on previous iteration. */ free(trace_path); - trace_path = setup_channel_trace_path(usess->consumer, pathname, - &consumer_path_offset); + trace_path = setup_channel_trace_path( + usess->consumer, pathname, &consumer_path_offset); if (!trace_path) { status = LTTNG_ERR_INVALID; goto error; } - cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter, - ua_chan, node.node) { - status = consumer_snapshot_channel(socket, - ua_chan->key, output, 0, - &trace_path[consumer_path_offset], - nb_packets_per_stream); + + for (auto *ua_chan : + lttng::urcu::lfht_iteration_adapter( + *ua_sess->channels->ht)) { + status = + consumer_snapshot_channel(socket, + ua_chan->key, + output, + 0, + &trace_path[consumer_path_offset], + nb_packets_per_stream); switch (status) { case LTTNG_OK: break; @@ -7154,14 +7191,17 @@ enum lttng_error_code ust_app_snapshot_record( } } - registry = get_session_registry(ua_sess); + registry = ust_app_get_session_registry(ua_sess->get_identifier()); if (!registry) { DBG("Application session is being torn down. Skip application."); continue; } status = consumer_snapshot_channel(socket, - registry->_metadata_key, output, 1, - &trace_path[consumer_path_offset], 0); + registry->_metadata_key, + output, + 1, + &trace_path[consumer_path_offset], + 0); switch (status) { case LTTNG_OK: break; @@ -7180,33 +7220,30 @@ enum lttng_error_code ust_app_snapshot_record( error: free(trace_path); - rcu_read_unlock(); return status; } /* * Return the size taken by one more packet per stream. */ -uint64_t ust_app_get_size_one_more_packet_per_stream( - const struct ltt_ust_session *usess, uint64_t cur_nr_packets) +uint64_t ust_app_get_size_one_more_packet_per_stream(const struct ltt_ust_session *usess, + uint64_t cur_nr_packets) { uint64_t tot_size = 0; - struct ust_app *app; - struct lttng_ht_iter iter; LTTNG_ASSERT(usess); switch (usess->buffer_type) { case LTTNG_BUFFER_PER_UID: { - struct buffer_reg_uid *reg; - - cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) { - struct buffer_reg_channel *buf_reg_chan; - - rcu_read_lock(); - cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter, - buf_reg_chan, node.node) { + for (auto *reg : + lttng::urcu::list_iteration_adapter( + usess->buffer_reg_uid_list)) { + for (auto *buf_reg_chan : + lttng::urcu::lfht_iteration_adapter( + *reg->registry->channels->ht)) { if (cur_nr_packets >= buf_reg_chan->num_subbuf) { /* * Don't take channel into account if we @@ -7216,26 +7253,27 @@ uint64_t ust_app_get_size_one_more_packet_per_stream( } tot_size += buf_reg_chan->subbuf_size * buf_reg_chan->stream_count; } - rcu_read_unlock(); } break; } case LTTNG_BUFFER_PER_PID: { - rcu_read_lock(); - cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) { - struct ust_app_channel *ua_chan; - struct ust_app_session *ua_sess; - struct lttng_ht_iter chan_iter; - - ua_sess = lookup_session_by_app(usess, app); + /* Iterate on all apps. */ + for (auto *app : + lttng::urcu::lfht_iteration_adapter(*ust_app_ht->ht)) { + const auto *ua_sess = ust_app_lookup_app_session(usess, app); if (!ua_sess) { /* Session not associated with this app. */ continue; } - cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter, - ua_chan, node.node) { + for (auto *ua_chan : + lttng::urcu::lfht_iteration_adapter( + *ua_sess->channels->ht)) { if (cur_nr_packets >= ua_chan->attr.num_subbuf) { /* * Don't take channel into account if we @@ -7246,7 +7284,6 @@ uint64_t ust_app_get_size_one_more_packet_per_stream( tot_size += ua_chan->attr.subbuf_size * ua_chan->streams.count; } } - rcu_read_unlock(); break; } default: @@ -7258,9 +7295,12 @@ uint64_t ust_app_get_size_one_more_packet_per_stream( } int ust_app_uid_get_channel_runtime_stats(uint64_t ust_session_id, - struct cds_list_head *buffer_reg_uid_list, - struct consumer_output *consumer, uint64_t uchan_id, - int overwrite, uint64_t *discarded, uint64_t *lost) + struct cds_list_head *buffer_reg_uid_list, + struct consumer_output *consumer, + uint64_t uchan_id, + int overwrite, + uint64_t *discarded, + uint64_t *lost) { int ret; uint64_t consumer_chan_key; @@ -7269,7 +7309,7 @@ int ust_app_uid_get_channel_runtime_stats(uint64_t ust_session_id, *lost = 0; ret = buffer_reg_uid_consumer_channel_key( - buffer_reg_uid_list, uchan_id, &consumer_chan_key); + buffer_reg_uid_list, uchan_id, &consumer_chan_key); if (ret < 0) { /* Not found */ ret = 0; @@ -7277,11 +7317,10 @@ int ust_app_uid_get_channel_runtime_stats(uint64_t ust_session_id, } if (overwrite) { - ret = consumer_get_lost_packets(ust_session_id, - consumer_chan_key, consumer, lost); + ret = consumer_get_lost_packets(ust_session_id, consumer_chan_key, consumer, lost); } else { - ret = consumer_get_discarded_events(ust_session_id, - consumer_chan_key, consumer, discarded); + ret = consumer_get_discarded_events( + ust_session_id, consumer_chan_key, consumer, discarded); } end: @@ -7289,46 +7328,46 @@ end: } int ust_app_pid_get_channel_runtime_stats(struct ltt_ust_session *usess, - struct ltt_ust_channel *uchan, - struct consumer_output *consumer, int overwrite, - uint64_t *discarded, uint64_t *lost) + struct ltt_ust_channel *uchan, + struct consumer_output *consumer, + int overwrite, + uint64_t *discarded, + uint64_t *lost) { int ret = 0; - struct lttng_ht_iter iter; struct lttng_ht_node_str *ua_chan_node; - struct ust_app *app; struct ust_app_session *ua_sess; struct ust_app_channel *ua_chan; *discarded = 0; *lost = 0; - rcu_read_lock(); /* * Iterate over every registered applications. Sum counters for * all applications containing requested session and channel. */ - cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) { + for (auto *app : + lttng::urcu::lfht_iteration_adapter( + *ust_app_ht->ht)) { struct lttng_ht_iter uiter; - ua_sess = lookup_session_by_app(usess, app); - if (ua_sess == NULL) { + ua_sess = ust_app_lookup_app_session(usess, app); + if (ua_sess == nullptr) { continue; } /* Get channel */ lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter); - ua_chan_node = lttng_ht_iter_get_node_str(&uiter); + ua_chan_node = lttng_ht_iter_get_node(&uiter); /* If the session is found for the app, the channel must be there */ LTTNG_ASSERT(ua_chan_node); - ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node); + ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node); if (overwrite) { uint64_t _lost; - ret = consumer_get_lost_packets(usess->id, ua_chan->key, - consumer, &_lost); + ret = consumer_get_lost_packets(usess->id, ua_chan->key, consumer, &_lost); if (ret < 0) { break; } @@ -7336,8 +7375,8 @@ int ust_app_pid_get_channel_runtime_stats(struct ltt_ust_session *usess, } else { uint64_t _discarded; - ret = consumer_get_discarded_events(usess->id, - ua_chan->key, consumer, &_discarded); + ret = consumer_get_discarded_events( + usess->id, ua_chan->key, consumer, &_discarded); if (ret < 0) { break; } @@ -7345,43 +7384,34 @@ int ust_app_pid_get_channel_runtime_stats(struct ltt_ust_session *usess, } } - rcu_read_unlock(); return ret; } -static -int ust_app_regenerate_statedump(struct ltt_ust_session *usess, - struct ust_app *app) +static int ust_app_regenerate_statedump(struct ltt_ust_session *usess, struct ust_app *app) { int ret = 0; struct ust_app_session *ua_sess; DBG("Regenerating the metadata for ust app pid %d", app->pid); - rcu_read_lock(); + const lttng::urcu::read_lock_guard read_lock; + const auto update_health_code_on_exit = + lttng::make_scope_exit([]() noexcept { health_code_update(); }); - ua_sess = lookup_session_by_app(usess, app); - if (ua_sess == NULL) { + ua_sess = ust_app_lookup_app_session(usess, app); + if (ua_sess == nullptr) { /* The session is in teardown process. Ignore and continue. */ - goto end; + return 0; } - pthread_mutex_lock(&ua_sess->lock); - - if (ua_sess->deleted) { - goto end_unlock; + const auto locked_ua_sess = ua_sess->lock(); + if (locked_ua_sess->deleted) { + return 0; } pthread_mutex_lock(&app->sock_lock); ret = lttng_ust_ctl_regenerate_statedump(app->sock, ua_sess->handle); pthread_mutex_unlock(&app->sock_lock); - -end_unlock: - pthread_mutex_unlock(&ua_sess->lock); - -end: - rcu_read_unlock(); - health_code_update(); return ret; } @@ -7390,28 +7420,19 @@ end: */ int ust_app_regenerate_statedump_all(struct ltt_ust_session *usess) { - int ret = 0; - struct lttng_ht_iter iter; - struct ust_app *app; - DBG("Regenerating the metadata for all UST apps"); - rcu_read_lock(); - - cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) { + /* Iterate on all apps. */ + for (auto *app : + lttng::urcu::lfht_iteration_adapter( + *ust_app_ht->ht)) { if (!app->compatible) { continue; } - ret = ust_app_regenerate_statedump(usess, app); - if (ret < 0) { - /* Continue to the next app even on error */ - continue; - } + (void) ust_app_regenerate_statedump(usess, app); } - rcu_read_unlock(); - return 0; } @@ -7420,42 +7441,41 @@ int ust_app_regenerate_statedump_all(struct ltt_ust_session *usess) * * Return LTTNG_OK on success or else an LTTng error code. */ -enum lttng_error_code ust_app_rotate_session(struct ltt_session *session) +enum lttng_error_code ust_app_rotate_session(const ltt_session::locked_ref& session) { int ret; enum lttng_error_code cmd_ret = LTTNG_OK; - struct lttng_ht_iter iter; - struct ust_app *app; struct ltt_ust_session *usess = session->ust_session; LTTNG_ASSERT(usess); - rcu_read_lock(); - switch (usess->buffer_type) { case LTTNG_BUFFER_PER_UID: { - struct buffer_reg_uid *reg; - - cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) { - struct buffer_reg_channel *buf_reg_chan; + for (auto *reg : + lttng::urcu::list_iteration_adapter( + usess->buffer_reg_uid_list)) { struct consumer_socket *socket; + const lttng::urcu::read_lock_guard read_lock; /* Get consumer socket to use to push the metadata.*/ socket = consumer_find_socket_by_bitness(reg->bits_per_long, - usess->consumer); + usess->consumer); if (!socket) { cmd_ret = LTTNG_ERR_INVALID; goto error; } /* Rotate the data channels. */ - cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter, - buf_reg_chan, node.node) { + for (auto *buf_reg_chan : + lttng::urcu::lfht_iteration_adapter( + *reg->registry->channels->ht)) { ret = consumer_rotate_channel(socket, - buf_reg_chan->consumer_key, - usess->consumer, - /* is_metadata_channel */ false); + buf_reg_chan->consumer_key, + usess->consumer, + /* is_metadata_channel */ false); if (ret < 0) { cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER; goto error; @@ -7475,12 +7495,15 @@ enum lttng_error_code ust_app_rotate_session(struct ltt_session *session) continue; } - (void) push_metadata(reg->registry->reg.ust, usess->consumer); + { + auto locked_registry = reg->registry->reg.ust->lock(); + (void) push_metadata(locked_registry, usess->consumer); + } ret = consumer_rotate_channel(socket, - reg->registry->reg.ust->_metadata_key, - usess->consumer, - /* is_metadata_channel */ true); + reg->registry->reg.ust->_metadata_key, + usess->consumer, + /* is_metadata_channel */ true); if (ret < 0) { cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER; goto error; @@ -7490,63 +7513,76 @@ enum lttng_error_code ust_app_rotate_session(struct ltt_session *session) } case LTTNG_BUFFER_PER_PID: { - cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) { + /* Iterate on all apps. */ + for (auto raw_app : + lttng::urcu::lfht_iteration_adapter(*ust_app_ht->ht)) { struct consumer_socket *socket; - struct lttng_ht_iter chan_iter; - struct ust_app_channel *ua_chan; struct ust_app_session *ua_sess; - ust_registry_session *registry; + lsu::registry_session *registry; + bool app_reference_taken; - ua_sess = lookup_session_by_app(usess, app); + app_reference_taken = ust_app_get(*raw_app); + if (!app_reference_taken) { + /* Application unregistered concurrently, skip it. */ + DBG("Could not get application reference as it is being torn down; skipping application"); + continue; + } + + ust_app_reference app(raw_app); + raw_app = nullptr; + + ua_sess = ust_app_lookup_app_session(usess, app.get()); if (!ua_sess) { /* Session not associated with this app. */ continue; } /* Get the right consumer socket for the application. */ - socket = consumer_find_socket_by_bitness(app->bits_per_long, - usess->consumer); + socket = consumer_find_socket_by_bitness(app->abi.bits_per_long, + usess->consumer); if (!socket) { cmd_ret = LTTNG_ERR_INVALID; goto error; } - registry = get_session_registry(ua_sess); - if (!registry) { - DBG("Application session is being torn down. Skip application."); - continue; - } + registry = ust_app_get_session_registry(ua_sess->get_identifier()); + LTTNG_ASSERT(registry); /* Rotate the data channels. */ - cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter, - ua_chan, node.node) { + for (auto *ua_chan : + lttng::urcu::lfht_iteration_adapter( + *ua_sess->channels->ht)) { ret = consumer_rotate_channel(socket, - ua_chan->key, - ua_sess->consumer, - /* is_metadata_channel */ false); + ua_chan->key, + ua_sess->consumer, + /* is_metadata_channel */ false); if (ret < 0) { - /* Per-PID buffer and application going away. */ - if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) - continue; cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER; goto error; } } /* Rotate the metadata channel. */ - (void) push_metadata(registry, usess->consumer); + { + auto locked_registry = registry->lock(); + + (void) push_metadata(locked_registry, usess->consumer); + } + ret = consumer_rotate_channel(socket, - registry->_metadata_key, - ua_sess->consumer, - /* is_metadata_channel */ true); + registry->_metadata_key, + ua_sess->consumer, + /* is_metadata_channel */ true); if (ret < 0) { - /* Per-PID buffer and application going away. */ - if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) - continue; cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER; goto error; } } + break; } default: @@ -7557,31 +7593,31 @@ enum lttng_error_code ust_app_rotate_session(struct ltt_session *session) cmd_ret = LTTNG_OK; error: - rcu_read_unlock(); return cmd_ret; } -enum lttng_error_code ust_app_create_channel_subdirectories( - const struct ltt_ust_session *usess) +enum lttng_error_code ust_app_create_channel_subdirectories(const struct ltt_ust_session *usess) { enum lttng_error_code ret = LTTNG_OK; - struct lttng_ht_iter iter; enum lttng_trace_chunk_status chunk_status; char *pathname_index; int fmt_ret; LTTNG_ASSERT(usess->current_trace_chunk); - rcu_read_lock(); switch (usess->buffer_type) { case LTTNG_BUFFER_PER_UID: { - struct buffer_reg_uid *reg; + const lttng::urcu::read_lock_guard read_lock; - cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) { + for (auto *reg : + lttng::urcu::list_iteration_adapter( + usess->buffer_reg_uid_list)) { fmt_ret = asprintf(&pathname_index, - DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH "/" DEFAULT_INDEX_DIR, - reg->uid, reg->bits_per_long); + DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH + "/" DEFAULT_INDEX_DIR, + reg->uid, + reg->bits_per_long); if (fmt_ret < 0) { ERR("Failed to format channel index directory"); ret = LTTNG_ERR_CREATE_DIR_FAIL; @@ -7593,8 +7629,7 @@ enum lttng_error_code ust_app_create_channel_subdirectories( * of implicitly creating the channel's path. */ chunk_status = lttng_trace_chunk_create_subdirectory( - usess->current_trace_chunk, - pathname_index); + usess->current_trace_chunk, pathname_index); free(pathname_index); if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) { ret = LTTNG_ERR_CREATE_DIR_FAIL; @@ -7605,39 +7640,37 @@ enum lttng_error_code ust_app_create_channel_subdirectories( } case LTTNG_BUFFER_PER_PID: { - struct ust_app *app; - /* * Create the toplevel ust/ directory in case no apps are running. */ - chunk_status = lttng_trace_chunk_create_subdirectory( - usess->current_trace_chunk, - DEFAULT_UST_TRACE_DIR); + chunk_status = lttng_trace_chunk_create_subdirectory(usess->current_trace_chunk, + DEFAULT_UST_TRACE_DIR); if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) { ret = LTTNG_ERR_CREATE_DIR_FAIL; goto error; } - cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, - pid_n.node) { - struct ust_app_session *ua_sess; - ust_registry_session *registry; - - ua_sess = lookup_session_by_app(usess, app); + /* Iterate on all apps. */ + for (auto *app : + lttng::urcu::lfht_iteration_adapter(*ust_app_ht->ht)) { + const auto ua_sess = ust_app_lookup_app_session(usess, app); if (!ua_sess) { /* Session not associated with this app. */ continue; } - registry = get_session_registry(ua_sess); + const auto registry = + ust_app_get_session_registry(ua_sess->get_identifier()); if (!registry) { DBG("Application session is being torn down. Skip application."); continue; } fmt_ret = asprintf(&pathname_index, - DEFAULT_UST_TRACE_DIR "/%s/" DEFAULT_INDEX_DIR, - ua_sess->path); + DEFAULT_UST_TRACE_DIR "/%s/" DEFAULT_INDEX_DIR, + ua_sess->path); if (fmt_ret < 0) { ERR("Failed to format channel index directory"); ret = LTTNG_ERR_CREATE_DIR_FAIL; @@ -7648,8 +7681,7 @@ enum lttng_error_code ust_app_create_channel_subdirectories( * of implicitly creating the channel's path. */ chunk_status = lttng_trace_chunk_create_subdirectory( - usess->current_trace_chunk, - pathname_index); + usess->current_trace_chunk, pathname_index); free(pathname_index); if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) { ret = LTTNG_ERR_CREATE_DIR_FAIL; @@ -7664,7 +7696,6 @@ enum lttng_error_code ust_app_create_channel_subdirectories( ret = LTTNG_OK; error: - rcu_read_unlock(); return ret; } @@ -7673,147 +7704,48 @@ error: * * Return LTTNG_OK on success or else an LTTng error code. */ -enum lttng_error_code ust_app_clear_session(struct ltt_session *session) +enum lttng_error_code ust_app_clear_session(const ltt_session::locked_ref& session) { - int ret; - enum lttng_error_code cmd_ret = LTTNG_OK; - struct lttng_ht_iter iter; - struct ust_app *app; - struct ltt_ust_session *usess = session->ust_session; - - LTTNG_ASSERT(usess); + const ltt_ust_session& usess = *session->ust_session; - rcu_read_lock(); - - if (usess->active) { + if (usess.active) { ERR("Expecting inactive session %s (%" PRIu64 ")", session->name, session->id); - cmd_ret = LTTNG_ERR_FATAL; - goto end; + return LTTNG_ERR_FATAL; } - switch (usess->buffer_type) { - case LTTNG_BUFFER_PER_UID: - { - struct buffer_reg_uid *reg; - - cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) { - struct buffer_reg_channel *buf_reg_chan; - struct consumer_socket *socket; - - /* Get consumer socket to use to push the metadata.*/ - socket = consumer_find_socket_by_bitness(reg->bits_per_long, - usess->consumer); - if (!socket) { - cmd_ret = LTTNG_ERR_INVALID; - goto error_socket; - } + const auto channel_keys = session->user_space_consumer_channel_keys(); + for (auto it = channel_keys.begin(); it != channel_keys.end(); ++it) { + const auto key = *it; - /* Clear the data channels. */ - cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter, - buf_reg_chan, node.node) { - ret = consumer_clear_channel(socket, - buf_reg_chan->consumer_key); - if (ret < 0) { - goto error; - } - } + const auto consumer_socket = consumer_find_socket_by_bitness( + key.bitness == + lttng::sessiond::user_space_consumer_channel_keys:: + consumer_bitness::ABI_32 ? + 32 : + 64, + usess.consumer); - (void) push_metadata(reg->registry->reg.ust, usess->consumer); - - /* - * Clear the metadata channel. - * Metadata channel is not cleared per se but we still need to - * perform a rotation operation on it behind the scene. - */ - ret = consumer_clear_channel(socket, - reg->registry->reg.ust->_metadata_key); - if (ret < 0) { - goto error; - } + if (key.type == + lttng::sessiond::user_space_consumer_channel_keys::channel_type::METADATA) { + (void) push_metadata(it.get_registry_session()->lock(), usess.consumer); } - break; - } - case LTTNG_BUFFER_PER_PID: - { - cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) { - struct consumer_socket *socket; - struct lttng_ht_iter chan_iter; - struct ust_app_channel *ua_chan; - struct ust_app_session *ua_sess; - ust_registry_session *registry; - - ua_sess = lookup_session_by_app(usess, app); - if (!ua_sess) { - /* Session not associated with this app. */ - continue; - } - - /* Get the right consumer socket for the application. */ - socket = consumer_find_socket_by_bitness(app->bits_per_long, - usess->consumer); - if (!socket) { - cmd_ret = LTTNG_ERR_INVALID; - goto error_socket; - } - registry = get_session_registry(ua_sess); - if (!registry) { - DBG("Application session is being torn down. Skip application."); + const auto clean_ret = consumer_clear_channel(consumer_socket, key.key_value); + if (clean_ret < 0) { + if (clean_ret == -LTTCOMM_CONSUMERD_CHAN_NOT_FOUND && + usess.buffer_type == LTTNG_BUFFER_PER_PID) { continue; } - /* Clear the data channels. */ - cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter, - ua_chan, node.node) { - ret = consumer_clear_channel(socket, ua_chan->key); - if (ret < 0) { - /* Per-PID buffer and application going away. */ - if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) { - continue; - } - goto error; - } + if (clean_ret == -LTTCOMM_CONSUMERD_RELAYD_CLEAR_DISALLOWED) { + return LTTNG_ERR_CLEAR_RELAY_DISALLOWED; } - (void) push_metadata(registry, usess->consumer); - - /* - * Clear the metadata channel. - * Metadata channel is not cleared per se but we still need to - * perform rotation operation on it behind the scene. - */ - ret = consumer_clear_channel(socket, registry->_metadata_key); - if (ret < 0) { - /* Per-PID buffer and application going away. */ - if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) { - continue; - } - goto error; - } + return LTTNG_ERR_CLEAR_FAIL_CONSUMER; } - break; - } - default: - abort(); - break; } - cmd_ret = LTTNG_OK; - goto end; - -error: - switch (-ret) { - case LTTCOMM_CONSUMERD_RELAYD_CLEAR_DISALLOWED: - cmd_ret = LTTNG_ERR_CLEAR_RELAY_DISALLOWED; - break; - default: - cmd_ret = LTTNG_ERR_CLEAR_FAIL_CONSUMER; - } - -error_socket: -end: - rcu_read_unlock(); - return cmd_ret; + return LTTNG_OK; } /* @@ -7832,109 +7764,89 @@ end: * daemon as the same "offset" in a metadata stream will no longer point * to the same content. */ -enum lttng_error_code ust_app_open_packets(struct ltt_session *session) +enum lttng_error_code ust_app_open_packets(const ltt_session::locked_ref& session) { - enum lttng_error_code ret = LTTNG_OK; - struct lttng_ht_iter iter; - struct ltt_ust_session *usess = session->ust_session; - - LTTNG_ASSERT(usess); + const ltt_ust_session& usess = *session->ust_session; - rcu_read_lock(); - - switch (usess->buffer_type) { - case LTTNG_BUFFER_PER_UID: - { - struct buffer_reg_uid *reg; + for (const auto key : session->user_space_consumer_channel_keys()) { + if (key.type != + lttng::sessiond::user_space_consumer_channel_keys::channel_type::DATA) { + continue; + } - cds_list_for_each_entry ( - reg, &usess->buffer_reg_uid_list, lnode) { - struct buffer_reg_channel *buf_reg_chan; - struct consumer_socket *socket; + const auto socket = consumer_find_socket_by_bitness( + key.bitness == + lttng::sessiond::user_space_consumer_channel_keys:: + consumer_bitness::ABI_32 ? + 32 : + 64, + usess.consumer); - socket = consumer_find_socket_by_bitness( - reg->bits_per_long, usess->consumer); - if (!socket) { - ret = LTTNG_ERR_FATAL; - goto error; + const auto open_ret = consumer_open_channel_packets(socket, key.key_value); + if (open_ret < 0) { + /* Per-PID buffer and application going away. */ + if (open_ret == -LTTCOMM_CONSUMERD_CHAN_NOT_FOUND && + usess.buffer_type == LTTNG_BUFFER_PER_PID) { + continue; } - cds_lfht_for_each_entry(reg->registry->channels->ht, - &iter.iter, buf_reg_chan, node.node) { - const int open_ret = - consumer_open_channel_packets( - socket, - buf_reg_chan->consumer_key); - - if (open_ret < 0) { - ret = LTTNG_ERR_UNK; - goto error; - } - } + return LTTNG_ERR_UNK; } - break; } - case LTTNG_BUFFER_PER_PID: - { - struct ust_app *app; - - cds_lfht_for_each_entry ( - ust_app_ht->ht, &iter.iter, app, pid_n.node) { - struct consumer_socket *socket; - struct lttng_ht_iter chan_iter; - struct ust_app_channel *ua_chan; - struct ust_app_session *ua_sess; - ust_registry_session *registry; - ua_sess = lookup_session_by_app(usess, app); - if (!ua_sess) { - /* Session not associated with this app. */ - continue; - } + return LTTNG_OK; +} - /* Get the right consumer socket for the application. */ - socket = consumer_find_socket_by_bitness( - app->bits_per_long, usess->consumer); - if (!socket) { - ret = LTTNG_ERR_FATAL; - goto error; - } +lsu::ctl_field_quirks ust_app::ctl_field_quirks() const +{ + /* + * Application contexts are expressed as variants. LTTng-UST announces + * those by registering an enumeration named `..._tag`. It then registers a + * variant as part of the event context that contains the various possible + * types. + * + * Unfortunately, the names used in the enumeration and variant don't + * match: the enumeration names are all prefixed with an underscore while + * the variant type tag fields aren't. + * + * While the CTF 1.8.3 specification mentions that + * underscores *should* (not *must*) be removed by CTF readers. Babeltrace + * 1.x (and possibly others) expect a perfect match between the names used + * by tags and variants. + * + * When the UNDERSCORE_PREFIXED_VARIANT_TAG_MAPPINGS quirk is enabled, + * the variant's fields are modified to match the mappings of its tag. + * + * From ABI version >= 10.x, the variant fields and tag mapping names + * correctly match, making this quirk unnecessary. + */ + return v_major <= 9 ? lsu::ctl_field_quirks::UNDERSCORE_PREFIXED_VARIANT_TAG_MAPPINGS : + lsu::ctl_field_quirks::NONE; +} - registry = get_session_registry(ua_sess); - if (!registry) { - DBG("Application session is being torn down. Skip application."); - continue; - } +static void ust_app_release(urcu_ref *ref) +{ + auto& app = *lttng::utils::container_of(ref, &ust_app::ref); - cds_lfht_for_each_entry(ua_sess->channels->ht, - &chan_iter.iter, ua_chan, node.node) { - const int open_ret = - consumer_open_channel_packets( - socket, - ua_chan->key); + ust_app_unregister(app); + ust_app_destroy(app); +} - if (open_ret < 0) { - /* - * Per-PID buffer and application going - * away. - */ - if (open_ret == -LTTNG_ERR_CHAN_NOT_FOUND) { - continue; - } +bool ust_app_get(ust_app& app) +{ + return urcu_ref_get_unless_zero(&app.ref); +} - ret = LTTNG_ERR_UNK; - goto error; - } - } - } - break; - } - default: - abort(); - break; +void ust_app_put(struct ust_app *app) +{ + if (!app) { + return; } -error: - rcu_read_unlock(); - return ret; + urcu_ref_put(&app->ref, ust_app_release); +} + +lttng_ht *ust_app_get_all() +{ + return ust_app_ht; }