#include "lttng-ust-ctl.hpp"
#include "lttng-ust-error.hpp"
#include "notification-thread-commands.hpp"
-#include "rotate.hpp"
#include "session.hpp"
#include "ust-app.hpp"
#include "ust-consumer.hpp"
struct lttng_ht *ust_app_ht_by_sock;
struct lttng_ht *ust_app_ht_by_notify_sock;
-static
-int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
+static int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
/* Next available channel key. Access under next_channel_key_lock. */
static uint64_t _next_channel_key;
* A registry per UID object MUST exists before calling this function or else
* it LTTNG_ASSERT() if not found. RCU read side lock must be acquired.
*/
-static lsu::registry_session *get_session_registry(
- const struct ust_app_session *ua_sess)
+static lsu::registry_session *get_session_registry(const struct ust_app_session *ua_sess)
{
- lsu::registry_session *registry = NULL;
+ lsu::registry_session *registry = nullptr;
LTTNG_ASSERT(ua_sess);
}
case LTTNG_BUFFER_PER_UID:
{
- struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
- ua_sess->tracing_id, ua_sess->bits_per_long,
- lttng_credentials_get_uid(&ua_sess->real_credentials));
+ struct buffer_reg_uid *reg_uid =
+ buffer_reg_uid_find(ua_sess->tracing_id,
+ ua_sess->bits_per_long,
+ lttng_credentials_get_uid(&ua_sess->real_credentials));
if (!reg_uid) {
goto error;
}
return registry;
}
-lsu::registry_session::locked_ptr
-get_locked_session_registry(const struct ust_app_session *ua_sess)
+lsu::registry_session::locked_ptr get_locked_session_registry(const struct ust_app_session *ua_sess)
{
auto session = get_session_registry(ua_sess);
if (session) {
pthread_mutex_lock(&session->_lock);
}
- return lsu::registry_session::locked_ptr{session};
+ return lsu::registry_session::locked_ptr{ session };
}
} /* namespace */
/*
* Return the incremented value of next_channel_key.
*/
-static uint64_t get_next_channel_key(void)
+static uint64_t get_next_channel_key()
{
uint64_t ret;
/*
* Return the atomically incremented value of next_session_id.
*/
-static uint64_t get_next_session_id(void)
+static uint64_t get_next_session_id()
{
uint64_t ret;
return ret;
}
-static void copy_channel_attr_to_ustctl(
- struct lttng_ust_ctl_consumer_channel_attr *attr,
- struct lttng_ust_abi_channel_attr *uattr)
+static void copy_channel_attr_to_ustctl(struct lttng_ust_ctl_consumer_channel_attr *attr,
+ struct lttng_ust_abi_channel_attr *uattr)
{
/* Copy event attributes since the layout is different. */
attr->subbuf_size = uattr->subbuf_size;
/* Event loglevel. */
if (ev_loglevel_value != key->loglevel_type) {
- if (event->attr.loglevel_type == LTTNG_UST_ABI_LOGLEVEL_ALL
- && key->loglevel_type == 0 &&
- ev_loglevel_value == -1) {
+ if (event->attr.loglevel_type == LTTNG_UST_ABI_LOGLEVEL_ALL &&
+ key->loglevel_type == 0 && ev_loglevel_value == -1) {
/*
* Match is accepted. This is because on event creation, the
* loglevel is set to -1 if the event loglevel type is ALL so 0 and
if (key->filter && event->filter) {
/* Both filters exists, check length followed by the bytecode. */
if (event->filter->len != key->filter->len ||
- memcmp(event->filter->data, key->filter->data,
- event->filter->len) != 0) {
+ memcmp(event->filter->data, key->filter->data, event->filter->len) != 0) {
goto no_match;
}
}
if (key->exclusion && event->exclusion) {
/* Both exclusions exists, check count followed by the names. */
if (event->exclusion->count != key->exclusion->count ||
- memcmp(event->exclusion->names, key->exclusion->names,
- event->exclusion->count * LTTNG_UST_ABI_SYM_NAME_LEN) != 0) {
+ memcmp(event->exclusion->names,
+ key->exclusion->names,
+ event->exclusion->count * LTTNG_UST_ABI_SYM_NAME_LEN) != 0) {
goto no_match;
}
}
-
/* Match. */
return 1;
* Unique add of an ust app event in the given ht. This uses the custom
* ht_match_ust_app_event match function and the event name as hash.
*/
-static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
- struct ust_app_event *event)
+static void add_unique_ust_app_event(struct ust_app_channel *ua_chan, struct ust_app_event *event)
{
struct cds_lfht_node *node_ptr;
struct ust_app_ht_key key;
key.exclusion = event->exclusion;
node_ptr = cds_lfht_add_unique(ht->ht,
- ht->hash_fct(event->node.key, lttng_ht_seed),
- ht_match_ust_app_event, &key, &event->node.node);
+ ht->hash_fct(event->node.key, lttng_ht_seed),
+ ht_match_ust_app_event,
+ &key,
+ &event->node.node);
LTTNG_ASSERT(node_ptr == &event->node.node);
}
* Delete ust context safely. RCU read lock must be held before calling
* this function.
*/
-static
-void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx,
- struct ust_app *app)
+static void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx, struct ust_app *app)
{
int ret;
if (ret < 0) {
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
DBG3("UST app release ctx failed. Application is dead: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else if (ret == -EAGAIN) {
WARN("UST app release ctx failed. Communication time out: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else {
ERR("UST app release ctx obj handle %d failed with ret %d: pid = %d, sock = %d",
- ua_ctx->obj->handle, ret,
- app->pid, app->sock);
+ ua_ctx->obj->handle,
+ ret,
+ app->pid,
+ app->sock);
}
}
free(ua_ctx->obj);
* Delete ust app event safely. RCU read lock must be held before calling
* this function.
*/
-static
-void delete_ust_app_event(int sock, struct ust_app_event *ua_event,
- struct ust_app *app)
+static void delete_ust_app_event(int sock, struct ust_app_event *ua_event, struct ust_app *app)
{
int ret;
ASSERT_RCU_READ_LOCKED();
free(ua_event->filter);
- if (ua_event->exclusion != NULL)
+ if (ua_event->exclusion != nullptr)
free(ua_event->exclusion);
- if (ua_event->obj != NULL) {
+ if (ua_event->obj != nullptr) {
pthread_mutex_lock(&app->sock_lock);
ret = lttng_ust_ctl_release_object(sock, ua_event->obj);
pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
DBG3("UST app release event failed. Application is dead: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else if (ret == -EAGAIN) {
WARN("UST app release event failed. Communication time out: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else {
ERR("UST app release event obj failed with ret %d: pid = %d, sock = %d",
- ret, app->pid, app->sock);
+ ret,
+ app->pid,
+ app->sock);
}
}
free(ua_event->obj);
* Delayed reclaim of a ust_app_event_notifier_rule object. This MUST be called
* through a call_rcu().
*/
-static
-void free_ust_app_event_notifier_rule_rcu(struct rcu_head *head)
+static void free_ust_app_event_notifier_rule_rcu(struct rcu_head *head)
{
- struct ust_app_event_notifier_rule *obj = lttng::utils::container_of(
- head, &ust_app_event_notifier_rule::rcu_head);
+ struct ust_app_event_notifier_rule *obj =
+ lttng::utils::container_of(head, &ust_app_event_notifier_rule::rcu_head);
free(obj);
}
/*
* Delete ust app event notifier rule safely.
*/
-static void delete_ust_app_event_notifier_rule(int sock,
- struct ust_app_event_notifier_rule *ua_event_notifier_rule,
- struct ust_app *app)
+static void delete_ust_app_event_notifier_rule(
+ int sock, struct ust_app_event_notifier_rule *ua_event_notifier_rule, struct ust_app *app)
{
int ret;
LTTNG_ASSERT(ua_event_notifier_rule);
- if (ua_event_notifier_rule->exclusion != NULL) {
+ if (ua_event_notifier_rule->exclusion != nullptr) {
free(ua_event_notifier_rule->exclusion);
}
- if (ua_event_notifier_rule->obj != NULL) {
+ if (ua_event_notifier_rule->obj != nullptr) {
pthread_mutex_lock(&app->sock_lock);
ret = lttng_ust_ctl_release_object(sock, ua_event_notifier_rule->obj);
pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
DBG3("UST app release event notifier failed. Application is dead: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else if (ret == -EAGAIN) {
WARN("UST app release event notifier failed. Communication time out: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else {
ERR("UST app release event notifier failed with ret %d: pid = %d, sock = %d",
- ret, app->pid, app->sock);
+ ret,
+ app->pid,
+ app->sock);
}
}
}
lttng_trigger_put(ua_event_notifier_rule->trigger);
- call_rcu(&ua_event_notifier_rule->rcu_head,
- free_ust_app_event_notifier_rule_rcu);
+ call_rcu(&ua_event_notifier_rule->rcu_head, free_ust_app_event_notifier_rule_rcu);
}
/*
*
* Return 0 on success or else a negative value.
*/
-static int release_ust_app_stream(int sock, struct ust_app_stream *stream,
- struct ust_app *app)
+static int release_ust_app_stream(int sock, struct ust_app_stream *stream, struct ust_app *app)
{
int ret = 0;
if (ret < 0) {
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
DBG3("UST app release stream failed. Application is dead: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else if (ret == -EAGAIN) {
WARN("UST app release stream failed. Communication time out: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else {
ERR("UST app release stream obj failed with ret %d: pid = %d, sock = %d",
- ret, app->pid, app->sock);
+ ret,
+ app->pid,
+ app->sock);
}
}
lttng_fd_put(LTTNG_FD_APPS, 2);
* Delete ust app stream safely. RCU read lock must be held before calling
* this function.
*/
-static
-void delete_ust_app_stream(int sock, struct ust_app_stream *stream,
- struct ust_app *app)
+static void delete_ust_app_stream(int sock, struct ust_app_stream *stream, struct ust_app *app)
{
LTTNG_ASSERT(stream);
ASSERT_RCU_READ_LOCKED();
free(stream);
}
-static
-void delete_ust_app_channel_rcu(struct rcu_head *head)
+static void delete_ust_app_channel_rcu(struct rcu_head *head)
{
struct ust_app_channel *ua_chan =
lttng::utils::container_of(head, &ust_app_channel::rcu_head);
*
* The session list lock must be held by the caller.
*/
-static
-void save_per_pid_lost_discarded_counters(struct ust_app_channel *ua_chan)
+static void save_per_pid_lost_discarded_counters(struct ust_app_channel *ua_chan)
{
uint64_t discarded = 0, lost = 0;
struct ltt_session *session;
return;
}
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
session = session_find_by_id(ua_chan->session->tracing_id);
if (!session || !session->ust_session) {
/*
if (ua_chan->attr.overwrite) {
consumer_get_lost_packets(ua_chan->session->tracing_id,
- ua_chan->key, session->ust_session->consumer,
- &lost);
+ ua_chan->key,
+ session->ust_session->consumer,
+ &lost);
} else {
consumer_get_discarded_events(ua_chan->session->tracing_id,
- ua_chan->key, session->ust_session->consumer,
- &discarded);
+ ua_chan->key,
+ session->ust_session->consumer,
+ &discarded);
}
- uchan = trace_ust_find_channel_by_name(
- session->ust_session->domain_global.channels,
- ua_chan->name);
+ uchan = trace_ust_find_channel_by_name(session->ust_session->domain_global.channels,
+ ua_chan->name);
if (!uchan) {
ERR("Missing UST channel to store discarded counters");
goto end;
uchan->per_pid_closed_app_lost += lost;
end:
- rcu_read_unlock();
if (session) {
session_put(session);
}
* The session list lock must be held by the caller.
*/
static void delete_ust_app_channel(int sock,
- struct ust_app_channel *ua_chan,
- struct ust_app *app,
- const lsu::registry_session::locked_ptr& locked_registry)
+ struct ust_app_channel *ua_chan,
+ struct ust_app *app,
+ const lsu::registry_session::locked_ptr& locked_registry)
{
int ret;
struct lttng_ht_iter iter;
DBG3("UST app deleting channel %s", ua_chan->name);
/* Wipe stream */
- cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
+ cds_list_for_each_entry_safe (stream, stmp, &ua_chan->streams.head, list) {
cds_list_del(&stream->list);
delete_ust_app_stream(sock, stream, app);
}
/* Wipe context */
- cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
+ cds_lfht_for_each_entry (ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
cds_list_del(&ua_ctx->list);
ret = lttng_ht_del(ua_chan->ctx, &iter);
LTTNG_ASSERT(!ret);
}
/* Wipe events */
- cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
- node.node) {
+ cds_lfht_for_each_entry (ua_chan->events->ht, &iter.iter, ua_event, node.node) {
ret = lttng_ht_del(ua_chan->events, &iter);
LTTNG_ASSERT(!ret);
delete_ust_app_event(sock, ua_event, app);
if (locked_registry) {
try {
locked_registry->remove_channel(ua_chan->key, sock >= 0);
- } catch (const std::exception &ex) {
+ } catch (const std::exception& ex) {
DBG("Could not find channel for removal: %s", ex.what());
}
}
}
}
- if (ua_chan->obj != NULL) {
+ if (ua_chan->obj != nullptr) {
/* Remove channel from application UST object descriptor. */
iter.iter.node = &ua_chan->ust_objd_node.node;
ret = lttng_ht_del(app->ust_objd, &iter);
if (ret < 0) {
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
DBG3("UST app channel %s release failed. Application is dead: pid = %d, sock = %d",
- ua_chan->name, app->pid,
- app->sock);
+ ua_chan->name,
+ app->pid,
+ app->sock);
} else if (ret == -EAGAIN) {
WARN("UST app channel %s release failed. Communication time out: pid = %d, sock = %d",
- ua_chan->name, app->pid,
- app->sock);
+ ua_chan->name,
+ app->pid,
+ app->sock);
} else {
ERR("UST app channel %s release failed with ret %d: pid = %d, sock = %d",
- ua_chan->name, ret, app->pid,
- app->sock);
+ ua_chan->name,
+ ret,
+ app->pid,
+ app->sock);
}
}
lttng_fd_put(LTTNG_FD_APPS, 1);
* terminated concurrently).
*/
ssize_t ust_app_push_metadata(const lsu::registry_session::locked_ptr& locked_registry,
- struct consumer_socket *socket,
- int send_zero_data)
+ struct consumer_socket *socket,
+ int send_zero_data)
{
int ret;
- char *metadata_str = NULL;
+ char *metadata_str = nullptr;
size_t len, offset, new_metadata_len_sent;
ssize_t ret_val;
uint64_t metadata_key, metadata_version;
metadata_version = locked_registry->_metadata_version;
if (len == 0) {
DBG3("No metadata to push for metadata key %" PRIu64,
- locked_registry->_metadata_key);
+ locked_registry->_metadata_key);
ret_val = len;
if (send_zero_data) {
DBG("No metadata to push");
* daemon. Those push and pull schemes are performed on two
* different bidirectionnal communication sockets.
*/
- ret = consumer_push_metadata(socket, metadata_key,
- metadata_str, len, offset, metadata_version);
+ ret = consumer_push_metadata(
+ socket, metadata_key, metadata_str, len, offset, metadata_version);
pthread_mutex_lock(&locked_registry->_lock);
if (ret < 0) {
/*
* send.
*/
locked_registry->_metadata_len_sent =
- std::max(locked_registry->_metadata_len_sent,
- new_metadata_len_sent);
+ std::max(locked_registry->_metadata_len_sent, new_metadata_len_sent);
}
free(metadata_str);
return len;
* terminated concurrently).
*/
static int push_metadata(const lsu::registry_session::locked_ptr& locked_registry,
- struct consumer_output *consumer)
+ struct consumer_output *consumer)
{
int ret_val;
ssize_t ret;
}
/* Get consumer socket to use to push the metadata.*/
- socket = consumer_find_socket_by_bitness(locked_registry->abi.bits_per_long,
- consumer);
+ socket = consumer_find_socket_by_bitness(locked_registry->abi.bits_per_long, consumer);
if (!socket) {
ret_val = -1;
goto error;
*
* Return 0 on success else a negative value.
*/
-static int close_metadata(uint64_t metadata_key, unsigned int consumer_bitness,
- struct consumer_output *consumer)
+static int close_metadata(uint64_t metadata_key,
+ unsigned int consumer_bitness,
+ struct consumer_output *consumer)
{
int ret;
struct consumer_socket *socket;
LTTNG_ASSERT(consumer);
/* Get consumer socket to use to push the metadata. */
- socket = consumer_find_socket_by_bitness(consumer_bitness,
- consumer);
+ socket = consumer_find_socket_by_bitness(consumer_bitness, consumer);
if (!socket) {
ret = -1;
goto end;
return ret;
}
-static
-void delete_ust_app_session_rcu(struct rcu_head *head)
+static void delete_ust_app_session_rcu(struct rcu_head *head)
{
struct ust_app_session *ua_sess =
lttng::utils::container_of(head, &ust_app_session::rcu_head);
*
* The session list lock must be held by the caller.
*/
-static
-void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
- struct ust_app *app)
+static void delete_ust_app_session(int sock, struct ust_app_session *ua_sess, struct ust_app *app)
{
int ret;
struct lttng_ht_iter iter;
(void) push_metadata(locked_registry, ua_sess->consumer);
}
- cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
- node.node) {
+ cds_lfht_for_each_entry (ua_sess->channels->ht, &iter.iter, ua_chan, node.node) {
ret = lttng_ht_del(ua_sess->channels, &iter);
LTTNG_ASSERT(!ret);
delete_ust_app_channel(sock, ua_chan, app, locked_registry);
if (ret < 0) {
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
DBG3("UST app release session handle failed. Application is dead: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else if (ret == -EAGAIN) {
WARN("UST app release session handle failed. Communication time out: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else {
ERR("UST app release session handle failed with ret %d: pid = %d, sock = %d",
- ret, app->pid, app->sock);
+ ret,
+ app->pid,
+ app->sock);
}
}
* Delete a traceable application structure from the global list. Never call
* this function outside of a call_rcu call.
*/
-static
-void delete_ust_app(struct ust_app *app)
+static void delete_ust_app(struct ust_app *app)
{
int ret, sock;
struct ust_app_session *ua_sess, *tmp_ua_sess;
app->sock = -1;
/* Wipe sessions */
- cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
- teardown_node) {
+ cds_list_for_each_entry_safe (ua_sess, tmp_ua_sess, &app->teardown_head, teardown_node) {
/* Free every object in the session and the session. */
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
delete_ust_app_session(sock, ua_sess, app);
- rcu_read_unlock();
}
/* Remove the event notifier rules associated with this app. */
- rcu_read_lock();
- cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
- &iter.iter, event_notifier_rule, node.node) {
- ret = lttng_ht_del(app->token_to_event_notifier_rule_ht, &iter);
- LTTNG_ASSERT(!ret);
+ {
+ lttng::urcu::read_lock_guard read_lock;
- delete_ust_app_event_notifier_rule(
- app->sock, event_notifier_rule, app);
- }
+ cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
+ &iter.iter,
+ event_notifier_rule,
+ node.node) {
+ ret = lttng_ht_del(app->token_to_event_notifier_rule_ht, &iter);
+ LTTNG_ASSERT(!ret);
- rcu_read_unlock();
+ delete_ust_app_event_notifier_rule(app->sock, event_notifier_rule, app);
+ }
+ }
lttng_ht_destroy(app->sessions);
lttng_ht_destroy(app->ust_sessions_objd);
enum lttng_error_code ret_code;
enum event_notifier_error_accounting_status status;
- const int event_notifier_read_fd = lttng_pipe_get_readfd(
- app->event_notifier_group.event_pipe);
+ const int event_notifier_read_fd =
+ lttng_pipe_get_readfd(app->event_notifier_group.event_pipe);
ret_code = notification_thread_command_remove_tracer_event_source(
- the_notification_thread_handle,
- event_notifier_read_fd);
+ the_notification_thread_handle, event_notifier_read_fd);
if (ret_code != LTTNG_OK) {
ERR("Failed to remove application tracer event source from notification thread");
}
free(app->event_notifier_group.object);
}
- event_notifier_write_fd_is_open = lttng_pipe_is_write_open(
- app->event_notifier_group.event_pipe);
+ event_notifier_write_fd_is_open =
+ lttng_pipe_is_write_open(app->event_notifier_group.event_pipe);
lttng_pipe_destroy(app->event_notifier_group.event_pipe);
/*
* Release the file descriptors reserved for the event notifier pipe.
/*
* URCU intermediate call to delete an UST app.
*/
-static
-void delete_ust_app_rcu(struct rcu_head *head)
+static void delete_ust_app_rcu(struct rcu_head *head)
{
struct lttng_ht_node_ulong *node =
lttng::utils::container_of(head, <tng_ht_node_ulong::head);
- struct ust_app *app =
- lttng::utils::container_of(node, &ust_app::pid_n);
+ struct ust_app *app = lttng::utils::container_of(node, &ust_app::pid_n);
DBG3("Call RCU deleting app PID %d", app->pid);
delete_ust_app(app);
*
* The session list lock must be held by the caller.
*/
-static void destroy_app_session(struct ust_app *app,
- struct ust_app_session *ua_sess)
+static void destroy_app_session(struct ust_app *app, struct ust_app_session *ua_sess)
{
int ret;
struct lttng_ht_iter iter;
/*
* Alloc new UST app session.
*/
-static
-struct ust_app_session *alloc_ust_app_session(void)
+static struct ust_app_session *alloc_ust_app_session()
{
struct ust_app_session *ua_sess;
/* Init most of the default value by allocating and zeroing */
ua_sess = zmalloc<ust_app_session>();
- if (ua_sess == NULL) {
+ if (ua_sess == nullptr) {
PERROR("malloc");
goto error_free;
}
ua_sess->handle = -1;
ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
ua_sess->metadata_attr.type = LTTNG_UST_ABI_CHAN_METADATA;
- pthread_mutex_init(&ua_sess->lock, NULL);
+ pthread_mutex_init(&ua_sess->lock, nullptr);
return ua_sess;
error_free:
- return NULL;
+ return nullptr;
}
/*
* Alloc new UST app channel.
*/
-static
-struct ust_app_channel *alloc_ust_app_channel(const char *name,
- struct ust_app_session *ua_sess,
- struct lttng_ust_abi_channel_attr *attr)
+static struct ust_app_channel *alloc_ust_app_channel(const char *name,
+ struct ust_app_session *ua_sess,
+ struct lttng_ust_abi_channel_attr *attr)
{
struct ust_app_channel *ua_chan;
/* Init most of the default value by allocating and zeroing */
ua_chan = zmalloc<ust_app_channel>();
- if (ua_chan == NULL) {
+ if (ua_chan == nullptr) {
PERROR("malloc");
goto error;
}
strncpy(ua_chan->name, name, sizeof(ua_chan->name));
ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
- ua_chan->enabled = 1;
+ ua_chan->enabled = true;
ua_chan->handle = -1;
ua_chan->session = ua_sess;
ua_chan->key = get_next_channel_key();
return ua_chan;
error:
- return NULL;
+ return nullptr;
}
/*
*
* Return newly allocated stream pointer or NULL on error.
*/
-struct ust_app_stream *ust_app_alloc_stream(void)
+struct ust_app_stream *ust_app_alloc_stream()
{
- struct ust_app_stream *stream = NULL;
+ struct ust_app_stream *stream = nullptr;
stream = zmalloc<ust_app_stream>();
- if (stream == NULL) {
+ if (stream == nullptr) {
PERROR("zmalloc ust app stream");
goto error;
}
/*
* Alloc new UST app event.
*/
-static
-struct ust_app_event *alloc_ust_app_event(char *name,
- struct lttng_ust_abi_event *attr)
+static struct ust_app_event *alloc_ust_app_event(char *name, struct lttng_ust_abi_event *attr)
{
struct ust_app_event *ua_event;
/* Init most of the default value by allocating and zeroing */
ua_event = zmalloc<ust_app_event>();
- if (ua_event == NULL) {
+ if (ua_event == nullptr) {
PERROR("Failed to allocate ust_app_event structure");
goto error;
}
- ua_event->enabled = 1;
+ ua_event->enabled = true;
strncpy(ua_event->name, name, sizeof(ua_event->name));
ua_event->name[sizeof(ua_event->name) - 1] = '\0';
lttng_ht_node_init_str(&ua_event->node, ua_event->name);
return ua_event;
error:
- return NULL;
+ return nullptr;
}
/*
* Allocate a new UST app event notifier rule.
*/
-static struct ust_app_event_notifier_rule *alloc_ust_app_event_notifier_rule(
- struct lttng_trigger *trigger)
+static struct ust_app_event_notifier_rule *
+alloc_ust_app_event_notifier_rule(struct lttng_trigger *trigger)
{
- enum lttng_event_rule_generate_exclusions_status
- generate_exclusion_status;
+ enum lttng_event_rule_generate_exclusions_status generate_exclusion_status;
enum lttng_condition_status cond_status;
struct ust_app_event_notifier_rule *ua_event_notifier_rule;
- struct lttng_condition *condition = NULL;
- const struct lttng_event_rule *event_rule = NULL;
+ struct lttng_condition *condition = nullptr;
+ const struct lttng_event_rule *event_rule = nullptr;
ua_event_notifier_rule = zmalloc<ust_app_event_notifier_rule>();
- if (ua_event_notifier_rule == NULL) {
+ if (ua_event_notifier_rule == nullptr) {
PERROR("Failed to allocate ust_app_event_notifier_rule structure");
goto error;
}
- ua_event_notifier_rule->enabled = 1;
+ ua_event_notifier_rule->enabled = true;
ua_event_notifier_rule->token = lttng_trigger_get_tracer_token(trigger);
- lttng_ht_node_init_u64(&ua_event_notifier_rule->node,
- ua_event_notifier_rule->token);
+ lttng_ht_node_init_u64(&ua_event_notifier_rule->node, ua_event_notifier_rule->token);
condition = lttng_trigger_get_condition(trigger);
LTTNG_ASSERT(condition);
LTTNG_ASSERT(lttng_condition_get_type(condition) ==
- LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES);
+ LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES);
- cond_status = lttng_condition_event_rule_matches_get_rule(
- condition, &event_rule);
+ cond_status = lttng_condition_event_rule_matches_get_rule(condition, &event_rule);
LTTNG_ASSERT(cond_status == LTTNG_CONDITION_STATUS_OK);
LTTNG_ASSERT(event_rule);
ua_event_notifier_rule->error_counter_index =
- lttng_condition_event_rule_matches_get_error_counter_index(condition);
+ lttng_condition_event_rule_matches_get_error_counter_index(condition);
/* Acquire the event notifier's reference to the trigger. */
lttng_trigger_get(trigger);
ua_event_notifier_rule->trigger = trigger;
ua_event_notifier_rule->filter = lttng_event_rule_get_filter_bytecode(event_rule);
generate_exclusion_status = lttng_event_rule_generate_exclusions(
- event_rule, &ua_event_notifier_rule->exclusion);
+ event_rule, &ua_event_notifier_rule->exclusion);
switch (generate_exclusion_status) {
case LTTNG_EVENT_RULE_GENERATE_EXCLUSIONS_STATUS_OK:
case LTTNG_EVENT_RULE_GENERATE_EXCLUSIONS_STATUS_NONE:
}
DBG3("UST app event notifier rule allocated: token = %" PRIu64,
- ua_event_notifier_rule->token);
+ ua_event_notifier_rule->token);
return ua_event_notifier_rule;
lttng_trigger_put(trigger);
error:
free(ua_event_notifier_rule);
- return NULL;
+ return nullptr;
}
/*
* Alloc new UST app context.
*/
-static
-struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context_attr *uctx)
+static struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context_attr *uctx)
{
struct ust_app_ctx *ua_ctx;
ua_ctx = zmalloc<ust_app_ctx>();
- if (ua_ctx == NULL) {
+ if (ua_ctx == nullptr) {
goto error;
}
if (uctx) {
memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
if (uctx->ctx == LTTNG_UST_ABI_CONTEXT_APP_CONTEXT) {
- char *provider_name = NULL, *ctx_name = NULL;
+ char *provider_name = nullptr, *ctx_name = nullptr;
provider_name = strdup(uctx->u.app_ctx.provider_name);
ctx_name = strdup(uctx->u.app_ctx.ctx_name);
return ua_ctx;
error:
free(ua_ctx);
- return NULL;
+ return nullptr;
}
/*
*
* Return allocated filter or NULL on error.
*/
-static struct lttng_ust_abi_filter_bytecode *create_ust_filter_bytecode_from_bytecode(
- const struct lttng_bytecode *orig_f)
+static struct lttng_ust_abi_filter_bytecode *
+create_ust_filter_bytecode_from_bytecode(const struct lttng_bytecode *orig_f)
{
- struct lttng_ust_abi_filter_bytecode *filter = NULL;
+ struct lttng_ust_abi_filter_bytecode *filter = nullptr;
/* Copy filter bytecode. */
filter = zmalloc<lttng_ust_abi_filter_bytecode>(sizeof(*filter) + orig_f->len);
if (!filter) {
- PERROR("Failed to allocate lttng_ust_filter_bytecode: bytecode len = %" PRIu32 " bytes", orig_f->len);
+ PERROR("Failed to allocate lttng_ust_filter_bytecode: bytecode len = %" PRIu32
+ " bytes",
+ orig_f->len);
goto error;
}
- LTTNG_ASSERT(sizeof(struct lttng_bytecode) ==
- sizeof(struct lttng_ust_abi_filter_bytecode));
+ LTTNG_ASSERT(sizeof(struct lttng_bytecode) == sizeof(struct lttng_ust_abi_filter_bytecode));
memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
error:
return filter;
static struct lttng_ust_abi_capture_bytecode *
create_ust_capture_bytecode_from_bytecode(const struct lttng_bytecode *orig_f)
{
- struct lttng_ust_abi_capture_bytecode *capture = NULL;
+ struct lttng_ust_abi_capture_bytecode *capture = nullptr;
/* Copy capture bytecode. */
capture = zmalloc<lttng_ust_abi_capture_bytecode>(sizeof(*capture) + orig_f->len);
if (!capture) {
- PERROR("Failed to allocate lttng_ust_abi_capture_bytecode: bytecode len = %" PRIu32 " bytes", orig_f->len);
+ PERROR("Failed to allocate lttng_ust_abi_capture_bytecode: bytecode len = %" PRIu32
+ " bytes",
+ orig_f->len);
goto error;
}
LTTNG_ASSERT(sizeof(struct lttng_bytecode) ==
- sizeof(struct lttng_ust_abi_capture_bytecode));
+ sizeof(struct lttng_ust_abi_capture_bytecode));
memcpy(capture, orig_f, sizeof(*capture) + orig_f->len);
error:
return capture;
ASSERT_RCU_READ_LOCKED();
- lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
+ lttng_ht_lookup(ust_app_ht_by_sock, (void *) ((unsigned long) sock), &iter);
node = lttng_ht_iter_get_node_ulong(&iter);
- if (node == NULL) {
+ if (node == nullptr) {
DBG2("UST app find by sock %d not found", sock);
goto error;
}
return lttng::utils::container_of(node, &ust_app::sock_n);
error:
- return NULL;
+ return nullptr;
}
/*
ASSERT_RCU_READ_LOCKED();
- lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
- &iter);
+ lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *) ((unsigned long) sock), &iter);
node = lttng_ht_iter_get_node_ulong(&iter);
- if (node == NULL) {
+ if (node == nullptr) {
DBG2("UST app find by notify sock %d not found", sock);
goto error;
}
return lttng::utils::container_of(node, &ust_app::notify_sock_n);
error:
- return NULL;
+ return nullptr;
}
/*
* Return an ust_app_event object or NULL on error.
*/
static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
- const char *name, const struct lttng_bytecode *filter,
- int loglevel_value,
- const struct lttng_event_exclusion *exclusion)
+ const char *name,
+ const struct lttng_bytecode *filter,
+ int loglevel_value,
+ const struct lttng_event_exclusion *exclusion)
{
struct lttng_ht_iter iter;
struct lttng_ht_node_str *node;
- struct ust_app_event *event = NULL;
+ struct ust_app_event *event = nullptr;
struct ust_app_ht_key key;
LTTNG_ASSERT(name);
key.exclusion = exclusion;
/* Lookup using the event name as hash and a custom match fct. */
- cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
- ht_match_ust_app_event, &key, &iter.iter);
+ cds_lfht_lookup(ht->ht,
+ ht->hash_fct((void *) name, lttng_ht_seed),
+ ht_match_ust_app_event,
+ &key,
+ &iter.iter);
node = lttng_ht_iter_get_node_str(&iter);
- if (node == NULL) {
+ if (node == nullptr) {
goto end;
}
* Must be called with the RCU read lock held.
* Return an ust_app_event_notifier_rule object or NULL on error.
*/
-static struct ust_app_event_notifier_rule *find_ust_app_event_notifier_rule(
- struct lttng_ht *ht, uint64_t token)
+static struct ust_app_event_notifier_rule *find_ust_app_event_notifier_rule(struct lttng_ht *ht,
+ uint64_t token)
{
struct lttng_ht_iter iter;
struct lttng_ht_node_u64 *node;
- struct ust_app_event_notifier_rule *event_notifier_rule = NULL;
+ struct ust_app_event_notifier_rule *event_notifier_rule = nullptr;
LTTNG_ASSERT(ht);
ASSERT_RCU_READ_LOCKED();
lttng_ht_lookup(ht, &token, &iter);
node = lttng_ht_iter_get_node_u64(&iter);
- if (node == NULL) {
- DBG2("UST app event notifier rule token not found: token = %" PRIu64,
- token);
+ if (node == nullptr) {
+ DBG2("UST app event notifier rule token not found: token = %" PRIu64, token);
goto end;
}
- event_notifier_rule = lttng::utils::container_of(
- node, &ust_app_event_notifier_rule::node);
+ event_notifier_rule = lttng::utils::container_of(node, &ust_app_event_notifier_rule::node);
end:
return event_notifier_rule;
}
*
* Called with UST app session lock held.
*/
-static
-int create_ust_channel_context(struct ust_app_channel *ua_chan,
- struct ust_app_ctx *ua_ctx, struct ust_app *app)
+static int create_ust_channel_context(struct ust_app_channel *ua_chan,
+ struct ust_app_ctx *ua_ctx,
+ struct ust_app *app)
{
int ret;
health_code_update();
pthread_mutex_lock(&app->sock_lock);
- ret = lttng_ust_ctl_add_context(app->sock, &ua_ctx->ctx,
- ua_chan->obj, &ua_ctx->obj);
+ ret = lttng_ust_ctl_add_context(app->sock, &ua_ctx->ctx, ua_chan->obj, &ua_ctx->obj);
pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
ret = 0;
DBG3("UST app create channel context failed. Application is dead: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else if (ret == -EAGAIN) {
ret = 0;
WARN("UST app create channel context failed. Communication time out: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else {
ERR("UST app create channel context failed with ret %d: pid = %d, sock = %d",
- ret, app->pid, app->sock);
+ ret,
+ app->pid,
+ app->sock);
}
goto error;
}
ua_ctx->handle = ua_ctx->obj->handle;
DBG2("UST app context handle %d created successfully for channel %s",
- ua_ctx->handle, ua_chan->name);
+ ua_ctx->handle,
+ ua_chan->name);
error:
health_code_update();
* Set the filter on the tracer.
*/
static int set_ust_object_filter(struct ust_app *app,
- const struct lttng_bytecode *bytecode,
- struct lttng_ust_abi_object_data *ust_object)
+ const struct lttng_bytecode *bytecode,
+ struct lttng_ust_abi_object_data *ust_object)
{
int ret;
- struct lttng_ust_abi_filter_bytecode *ust_bytecode = NULL;
+ struct lttng_ust_abi_filter_bytecode *ust_bytecode = nullptr;
health_code_update();
goto error;
}
pthread_mutex_lock(&app->sock_lock);
- ret = lttng_ust_ctl_set_filter(app->sock, ust_bytecode,
- ust_object);
+ ret = lttng_ust_ctl_set_filter(app->sock, ust_bytecode, ust_object);
pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
ret = 0;
DBG3("UST app set filter failed. Application is dead: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else if (ret == -EAGAIN) {
ret = 0;
WARN("UST app set filter failed. Communication time out: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else {
ERR("UST app set filter failed with ret %d: pid = %d, sock = %d, object = %p",
- ret, app->pid, app->sock, ust_object);
+ ret,
+ app->pid,
+ app->sock,
+ ust_object);
}
goto error;
}
* the captured payloads.
*/
static int set_ust_capture(struct ust_app *app,
- const struct lttng_bytecode *bytecode,
- unsigned int capture_seqnum,
- struct lttng_ust_abi_object_data *ust_object)
+ const struct lttng_bytecode *bytecode,
+ unsigned int capture_seqnum,
+ struct lttng_ust_abi_object_data *ust_object)
{
int ret;
- struct lttng_ust_abi_capture_bytecode *ust_bytecode = NULL;
+ struct lttng_ust_abi_capture_bytecode *ust_bytecode = nullptr;
health_code_update();
ust_bytecode->seqnum = capture_seqnum;
pthread_mutex_lock(&app->sock_lock);
- ret = lttng_ust_ctl_set_capture(app->sock, ust_bytecode,
- ust_object);
+ ret = lttng_ust_ctl_set_capture(app->sock, ust_bytecode, ust_object);
pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
ret = 0;
DBG3("UST app set capture failed. Application is dead: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else if (ret == -EAGAIN) {
ret = 0;
DBG3("UST app set capture failed. Communication timeout: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else {
ERR("UST app event set capture failed with ret %d: pid = %d, sock = %d",
- ret, app->pid,
- app->sock);
+ ret,
+ app->pid,
+ app->sock);
}
goto error;
return ret;
}
-static
-struct lttng_ust_abi_event_exclusion *create_ust_exclusion_from_exclusion(
- const struct lttng_event_exclusion *exclusion)
+static struct lttng_ust_abi_event_exclusion *
+create_ust_exclusion_from_exclusion(const struct lttng_event_exclusion *exclusion)
{
- struct lttng_ust_abi_event_exclusion *ust_exclusion = NULL;
+ struct lttng_ust_abi_event_exclusion *ust_exclusion = nullptr;
size_t exclusion_alloc_size = sizeof(struct lttng_ust_abi_event_exclusion) +
LTTNG_UST_ABI_SYM_NAME_LEN * exclusion->count;
}
LTTNG_ASSERT(sizeof(struct lttng_event_exclusion) ==
- sizeof(struct lttng_ust_abi_event_exclusion));
+ sizeof(struct lttng_ust_abi_event_exclusion));
memcpy(ust_exclusion, exclusion, exclusion_alloc_size);
end:
return ust_exclusion;
* Set event exclusions on the tracer.
*/
static int set_ust_object_exclusions(struct ust_app *app,
- const struct lttng_event_exclusion *exclusions,
- struct lttng_ust_abi_object_data *ust_object)
+ const struct lttng_event_exclusion *exclusions,
+ struct lttng_ust_abi_object_data *ust_object)
{
int ret;
- struct lttng_ust_abi_event_exclusion *ust_exclusions = NULL;
+ struct lttng_ust_abi_event_exclusion *ust_exclusions = nullptr;
LTTNG_ASSERT(exclusions && exclusions->count > 0);
health_code_update();
- ust_exclusions = create_ust_exclusion_from_exclusion(
- exclusions);
+ ust_exclusions = create_ust_exclusion_from_exclusion(exclusions);
if (!ust_exclusions) {
ret = -LTTNG_ERR_NOMEM;
goto error;
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
ret = 0;
DBG3("UST app event exclusion failed. Application is dead: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else if (ret == -EAGAIN) {
ret = 0;
WARN("UST app event exclusion failed. Communication time out(pid: %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else {
ERR("UST app event exclusions failed with ret %d: pid = %d, sock = %d, object = %p",
- ret, app->pid, app->sock, ust_object);
+ ret,
+ app->pid,
+ app->sock,
+ ust_object);
}
goto error;
}
/*
* Disable the specified event on to UST tracer for the UST session.
*/
-static int disable_ust_object(struct ust_app *app,
- struct lttng_ust_abi_object_data *object)
+static int disable_ust_object(struct ust_app *app, struct lttng_ust_abi_object_data *object)
{
int ret;
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
ret = 0;
DBG3("UST app disable object failed. Application is dead: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else if (ret == -EAGAIN) {
ret = 0;
WARN("UST app disable object failed. Communication time out: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else {
ERR("UST app disable object failed with ret %d: pid = %d, sock = %d, object = %p",
- ret, app->pid, app->sock, object);
+ ret,
+ app->pid,
+ app->sock,
+ object);
}
goto error;
}
- DBG2("UST app object %p disabled successfully for app: pid = %d",
- object, app->pid);
+ DBG2("UST app object %p disabled successfully for app: pid = %d", object, app->pid);
error:
health_code_update();
* Disable the specified channel on to UST tracer for the UST session.
*/
static int disable_ust_channel(struct ust_app *app,
- struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
+ struct ust_app_session *ua_sess,
+ struct ust_app_channel *ua_chan)
{
int ret;
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
ret = 0;
DBG3("UST app disable channel failed. Application is dead: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else if (ret == -EAGAIN) {
ret = 0;
WARN("UST app disable channel failed. Communication time out: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else {
ERR("UST app channel %s disable failed, session handle %d, with ret %d: pid = %d, sock = %d",
- ua_chan->name, ua_sess->handle, ret,
- app->pid, app->sock);
+ ua_chan->name,
+ ua_sess->handle,
+ ret,
+ app->pid,
+ app->sock);
}
goto error;
}
- DBG2("UST app channel %s disabled successfully for app: pid = %d",
- ua_chan->name, app->pid);
+ DBG2("UST app channel %s disabled successfully for app: pid = %d", ua_chan->name, app->pid);
error:
health_code_update();
* Enable the specified channel on to UST tracer for the UST session.
*/
static int enable_ust_channel(struct ust_app *app,
- struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
+ struct ust_app_session *ua_sess,
+ struct ust_app_channel *ua_chan)
{
int ret;
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
ret = 0;
DBG3("UST app channel %s enable failed. Application is dead: pid = %d, sock = %d",
- ua_chan->name, app->pid, app->sock);
+ ua_chan->name,
+ app->pid,
+ app->sock);
} else if (ret == -EAGAIN) {
ret = 0;
WARN("UST app channel %s enable failed. Communication time out: pid = %d, sock = %d",
- ua_chan->name, app->pid, app->sock);
+ ua_chan->name,
+ app->pid,
+ app->sock);
} else {
ERR("UST app channel %s enable failed, session handle %d, with ret %d: pid = %d, sock = %d",
- ua_chan->name, ua_sess->handle, ret,
- app->pid, app->sock);
+ ua_chan->name,
+ ua_sess->handle,
+ ret,
+ app->pid,
+ app->sock);
}
goto error;
}
- ua_chan->enabled = 1;
+ ua_chan->enabled = true;
- DBG2("UST app channel %s enabled successfully for app: pid = %d",
- ua_chan->name, app->pid);
+ DBG2("UST app channel %s enabled successfully for app: pid = %d", ua_chan->name, app->pid);
error:
health_code_update();
/*
* Enable the specified event on to UST tracer for the UST session.
*/
-static int enable_ust_object(
- struct ust_app *app, struct lttng_ust_abi_object_data *ust_object)
+static int enable_ust_object(struct ust_app *app, struct lttng_ust_abi_object_data *ust_object)
{
int ret;
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
ret = 0;
DBG3("UST app enable object failed. Application is dead: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else if (ret == -EAGAIN) {
ret = 0;
WARN("UST app enable object failed. Communication time out: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else {
ERR("UST app enable object failed with ret %d: pid = %d, sock = %d, object = %p",
- ret, app->pid, app->sock, ust_object);
+ ret,
+ app->pid,
+ app->sock,
+ ust_object);
}
goto error;
}
- DBG2("UST app object %p enabled successfully for app: pid = %d",
- ust_object, app->pid);
+ DBG2("UST app object %p enabled successfully for app: pid = %d", ust_object, app->pid);
error:
health_code_update();
* Return 0 on success. On error, a negative value is returned.
*/
static int send_channel_pid_to_ust(struct ust_app *app,
- struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
+ struct ust_app_session *ua_sess,
+ struct ust_app_channel *ua_chan)
{
int ret;
struct ust_app_stream *stream, *stmp;
health_code_update();
- DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
- app->sock);
+ DBG("UST app sending channel %s to UST app sock %d", ua_chan->name, app->sock);
/* Send channel to the application. */
ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
- ret = -ENOTCONN; /* Caused by app exiting. */
+ ret = -ENOTCONN; /* Caused by app exiting. */
goto error;
} else if (ret == -EAGAIN) {
/* Caused by timeout. */
- WARN("Communication with application %d timed out on send_channel for channel \"%s\" of session \"%" PRIu64 "\".",
- app->pid, ua_chan->name, ua_sess->tracing_id);
+ WARN("Communication with application %d timed out on send_channel for channel \"%s\" of session \"%" PRIu64
+ "\".",
+ app->pid,
+ ua_chan->name,
+ ua_sess->tracing_id);
/* Treat this the same way as an application that is exiting. */
ret = -ENOTCONN;
goto error;
health_code_update();
/* Send all streams to application. */
- cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
+ cds_list_for_each_entry_safe (stream, stmp, &ua_chan->streams.head, list) {
ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
ret = -ENOTCONN; /* Caused by app exiting. */
goto error;
} else if (ret == -EAGAIN) {
/* Caused by timeout. */
- WARN("Communication with application %d timed out on send_stream for stream \"%s\" of channel \"%s\" of session \"%" PRIu64 "\".",
- app->pid, stream->name, ua_chan->name,
- ua_sess->tracing_id);
+ WARN("Communication with application %d timed out on send_stream for stream \"%s\" of channel \"%s\" of session \"%" PRIu64
+ "\".",
+ app->pid,
+ stream->name,
+ ua_chan->name,
+ ua_sess->tracing_id);
/*
* Treat this the same way as an application that is
* exiting.
*
* Should be called with session mutex held.
*/
-static
-int create_ust_event(struct ust_app *app,
- struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
+static int create_ust_event(struct ust_app *app,
+ struct ust_app_channel *ua_chan,
+ struct ust_app_event *ua_event)
{
int ret = 0;
/* Create UST event on tracer */
pthread_mutex_lock(&app->sock_lock);
- ret = lttng_ust_ctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
- &ua_event->obj);
+ ret = lttng_ust_ctl_create_event(app->sock, &ua_event->attr, ua_chan->obj, &ua_event->obj);
pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
ret = 0;
DBG3("UST app create event failed. Application is dead: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else if (ret == -EAGAIN) {
ret = 0;
WARN("UST app create event failed. Communication time out: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else {
ERR("UST app create event '%s' failed with ret %d: pid = %d, sock = %d",
- ua_event->attr.name, ret, app->pid,
- app->sock);
+ ua_event->attr.name,
+ ret,
+ app->pid,
+ app->sock);
}
goto error;
}
ua_event->handle = ua_event->obj->handle;
DBG2("UST app event %s created successfully for pid:%d object = %p",
- ua_event->attr.name, app->pid, ua_event->obj);
+ ua_event->attr.name,
+ app->pid,
+ ua_event->obj);
health_code_update();
return ret;
}
-static int init_ust_event_notifier_from_event_rule(
- const struct lttng_event_rule *rule,
- struct lttng_ust_abi_event_notifier *event_notifier)
+static int
+init_ust_event_notifier_from_event_rule(const struct lttng_event_rule *rule,
+ struct lttng_ust_abi_event_notifier *event_notifier)
{
enum lttng_event_rule_status status;
enum lttng_ust_abi_loglevel_type ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL;
int loglevel = -1, ret = 0;
const char *pattern;
-
memset(event_notifier, 0, sizeof(*event_notifier));
if (lttng_event_rule_targets_agent_domain(rule)) {
* attached later on.
* Set the default values for the agent event.
*/
- pattern = event_get_default_agent_ust_name(
- lttng_event_rule_get_domain_type(rule));
+ pattern = event_get_default_agent_ust_name(lttng_event_rule_get_domain_type(rule));
loglevel = 0;
ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL;
} else {
const struct lttng_log_level_rule *log_level_rule;
LTTNG_ASSERT(lttng_event_rule_get_type(rule) ==
- LTTNG_EVENT_RULE_TYPE_USER_TRACEPOINT);
+ LTTNG_EVENT_RULE_TYPE_USER_TRACEPOINT);
status = lttng_event_rule_user_tracepoint_get_name_pattern(rule, &pattern);
if (status != LTTNG_EVENT_RULE_STATUS_OK) {
abort();
}
- status = lttng_event_rule_user_tracepoint_get_log_level_rule(
- rule, &log_level_rule);
+ status = lttng_event_rule_user_tracepoint_get_log_level_rule(rule, &log_level_rule);
if (status == LTTNG_EVENT_RULE_STATUS_UNSET) {
ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL;
} else if (status == LTTNG_EVENT_RULE_STATUS_OK) {
switch (lttng_log_level_rule_get_type(log_level_rule)) {
case LTTNG_LOG_LEVEL_RULE_TYPE_EXACTLY:
ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_SINGLE;
- llr_status = lttng_log_level_rule_exactly_get_level(
- log_level_rule, &loglevel);
+ llr_status = lttng_log_level_rule_exactly_get_level(log_level_rule,
+ &loglevel);
break;
case LTTNG_LOG_LEVEL_RULE_TYPE_AT_LEAST_AS_SEVERE_AS:
ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_RANGE;
llr_status = lttng_log_level_rule_at_least_as_severe_as_get_level(
- log_level_rule, &loglevel);
+ log_level_rule, &loglevel);
break;
default:
abort();
}
event_notifier->event.instrumentation = LTTNG_UST_ABI_TRACEPOINT;
- ret = lttng_strncpy(event_notifier->event.name, pattern,
- sizeof(event_notifier->event.name));
+ ret = lttng_strncpy(
+ event_notifier->event.name, pattern, sizeof(event_notifier->event.name));
if (ret) {
- ERR("Failed to copy event rule pattern to notifier: pattern = '%s' ",
- pattern);
+ ERR("Failed to copy event rule pattern to notifier: pattern = '%s' ", pattern);
goto end;
}
* given application.
*/
static int create_ust_event_notifier(struct ust_app *app,
- struct ust_app_event_notifier_rule *ua_event_notifier_rule)
+ struct ust_app_event_notifier_rule *ua_event_notifier_rule)
{
int ret = 0;
enum lttng_condition_status condition_status;
- const struct lttng_condition *condition = NULL;
+ const struct lttng_condition *condition = nullptr;
struct lttng_ust_abi_event_notifier event_notifier;
- const struct lttng_event_rule *event_rule = NULL;
+ const struct lttng_event_rule *event_rule = nullptr;
unsigned int capture_bytecode_count = 0, i;
enum lttng_condition_status cond_status;
enum lttng_event_rule_type event_rule_type;
health_code_update();
LTTNG_ASSERT(app->event_notifier_group.object);
- condition = lttng_trigger_get_const_condition(
- ua_event_notifier_rule->trigger);
+ condition = lttng_trigger_get_const_condition(ua_event_notifier_rule->trigger);
LTTNG_ASSERT(condition);
LTTNG_ASSERT(lttng_condition_get_type(condition) ==
- LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES);
+ LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES);
- condition_status = lttng_condition_event_rule_matches_get_rule(
- condition, &event_rule);
+ condition_status = lttng_condition_event_rule_matches_get_rule(condition, &event_rule);
LTTNG_ASSERT(condition_status == LTTNG_CONDITION_STATUS_OK);
LTTNG_ASSERT(event_rule);
event_rule_type = lttng_event_rule_get_type(event_rule);
LTTNG_ASSERT(event_rule_type == LTTNG_EVENT_RULE_TYPE_USER_TRACEPOINT ||
- event_rule_type == LTTNG_EVENT_RULE_TYPE_JUL_LOGGING ||
- event_rule_type ==
- LTTNG_EVENT_RULE_TYPE_LOG4J_LOGGING ||
- event_rule_type ==
- LTTNG_EVENT_RULE_TYPE_PYTHON_LOGGING);
+ event_rule_type == LTTNG_EVENT_RULE_TYPE_JUL_LOGGING ||
+ event_rule_type == LTTNG_EVENT_RULE_TYPE_LOG4J_LOGGING ||
+ event_rule_type == LTTNG_EVENT_RULE_TYPE_PYTHON_LOGGING);
init_ust_event_notifier_from_event_rule(event_rule, &event_notifier);
event_notifier.event.token = ua_event_notifier_rule->token;
/* Create UST event notifier against the tracer. */
pthread_mutex_lock(&app->sock_lock);
- ret = lttng_ust_ctl_create_event_notifier(app->sock, &event_notifier,
- app->event_notifier_group.object,
- &ua_event_notifier_rule->obj);
+ ret = lttng_ust_ctl_create_event_notifier(app->sock,
+ &event_notifier,
+ app->event_notifier_group.object,
+ &ua_event_notifier_rule->obj);
pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
ret = 0;
DBG3("UST app create event notifier failed. Application is dead: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else if (ret == -EAGAIN) {
ret = 0;
WARN("UST app create event notifier failed. Communication time out: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else {
ERR("UST app create event notifier '%s' failed with ret %d: pid = %d, sock = %d",
- event_notifier.event.name, ret, app->pid,
- app->sock);
+ event_notifier.event.name,
+ ret,
+ app->pid,
+ app->sock);
}
goto error;
}
ua_event_notifier_rule->handle = ua_event_notifier_rule->obj->handle;
DBG2("UST app event notifier %s created successfully: app = '%s': pid = %d, object = %p",
- event_notifier.event.name, app->name, app->pid,
- ua_event_notifier_rule->obj);
+ event_notifier.event.name,
+ app->name,
+ app->pid,
+ ua_event_notifier_rule->obj);
health_code_update();
/* Set filter if one is present. */
if (ua_event_notifier_rule->filter) {
- ret = set_ust_object_filter(app, ua_event_notifier_rule->filter,
- ua_event_notifier_rule->obj);
+ ret = set_ust_object_filter(
+ app, ua_event_notifier_rule->filter, ua_event_notifier_rule->obj);
if (ret < 0) {
goto error;
}
/* Set exclusions for the event. */
if (ua_event_notifier_rule->exclusion) {
- ret = set_ust_object_exclusions(app,
- ua_event_notifier_rule->exclusion,
- ua_event_notifier_rule->obj);
+ ret = set_ust_object_exclusions(
+ app, ua_event_notifier_rule->exclusion, ua_event_notifier_rule->obj);
if (ret < 0) {
goto error;
}
/* Set the capture bytecodes. */
cond_status = lttng_condition_event_rule_matches_get_capture_descriptor_count(
- condition, &capture_bytecode_count);
+ condition, &capture_bytecode_count);
LTTNG_ASSERT(cond_status == LTTNG_CONDITION_STATUS_OK);
for (i = 0; i < capture_bytecode_count; i++) {
const struct lttng_bytecode *capture_bytecode =
- lttng_condition_event_rule_matches_get_capture_bytecode_at_index(
- condition, i);
+ lttng_condition_event_rule_matches_get_capture_bytecode_at_index(condition,
+ i);
- ret = set_ust_capture(app, capture_bytecode, i,
- ua_event_notifier_rule->obj);
+ ret = set_ust_capture(app, capture_bytecode, i, ua_event_notifier_rule->obj);
if (ret < 0) {
goto error;
}
/*
* Copy data between an UST app event and a LTT event.
*/
-static void shadow_copy_event(struct ust_app_event *ua_event,
- struct ltt_ust_event *uevent)
+static void shadow_copy_event(struct ust_app_event *ua_event, struct ltt_ust_event *uevent)
{
size_t exclusion_alloc_size;
/* Copy exclusion data */
if (uevent->exclusion) {
exclusion_alloc_size = sizeof(struct lttng_event_exclusion) +
- LTTNG_UST_ABI_SYM_NAME_LEN * uevent->exclusion->count;
+ LTTNG_UST_ABI_SYM_NAME_LEN * uevent->exclusion->count;
ua_event->exclusion = zmalloc<lttng_event_exclusion>(exclusion_alloc_size);
- if (ua_event->exclusion == NULL) {
+ if (ua_event->exclusion == nullptr) {
PERROR("malloc");
} else {
- memcpy(ua_event->exclusion, uevent->exclusion,
- exclusion_alloc_size);
+ memcpy(ua_event->exclusion, uevent->exclusion, exclusion_alloc_size);
}
}
}
/*
* Copy data between an UST app channel and a LTT channel.
*/
-static void shadow_copy_channel(struct ust_app_channel *ua_chan,
- struct ltt_ust_channel *uchan)
+static void shadow_copy_channel(struct ust_app_channel *ua_chan, struct ltt_ust_channel *uchan)
{
DBG2("UST app shadow copy of channel %s started", ua_chan->name);
* Copy data between a UST app session and a regular LTT session.
*/
static void shadow_copy_session(struct ust_app_session *ua_sess,
- struct ltt_ust_session *usess, struct ust_app *app)
+ struct ltt_ust_session *usess,
+ struct ust_app *app)
{
struct tm *timeinfo;
char datetime[16];
ua_sess->output_traces = usess->output_traces;
ua_sess->live_timer_interval = usess->live_timer_interval;
- copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
- &usess->metadata_attr);
+ copy_channel_attr_to_ustctl(&ua_sess->metadata_attr, &usess->metadata_attr);
switch (ua_sess->buffer_type) {
case LTTNG_BUFFER_PER_PID:
- ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
- DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
- datetime);
+ ret = snprintf(ua_sess->path,
+ sizeof(ua_sess->path),
+ DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s",
+ app->name,
+ app->pid,
+ datetime);
break;
case LTTNG_BUFFER_PER_UID:
- ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
- DEFAULT_UST_TRACE_UID_PATH,
- lttng_credentials_get_uid(&ua_sess->real_credentials),
- app->abi.bits_per_long);
+ ret = snprintf(ua_sess->path,
+ sizeof(ua_sess->path),
+ DEFAULT_UST_TRACE_UID_PATH,
+ lttng_credentials_get_uid(&ua_sess->real_credentials),
+ app->abi.bits_per_long);
break;
default:
abort();
goto error;
}
- strncpy(ua_sess->root_shm_path, usess->root_shm_path,
- sizeof(ua_sess->root_shm_path));
+ strncpy(ua_sess->root_shm_path, usess->root_shm_path, sizeof(ua_sess->root_shm_path));
ua_sess->root_shm_path[sizeof(ua_sess->root_shm_path) - 1] = '\0';
- strncpy(ua_sess->shm_path, usess->shm_path,
- sizeof(ua_sess->shm_path));
+ strncpy(ua_sess->shm_path, usess->shm_path, sizeof(ua_sess->shm_path));
ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
if (ua_sess->shm_path[0]) {
switch (ua_sess->buffer_type) {
case LTTNG_BUFFER_PER_PID:
- ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
- "/" DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s",
- app->name, app->pid, datetime);
+ ret = snprintf(tmp_shm_path,
+ sizeof(tmp_shm_path),
+ "/" DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s",
+ app->name,
+ app->pid,
+ datetime);
break;
case LTTNG_BUFFER_PER_UID:
- ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
- "/" DEFAULT_UST_TRACE_UID_PATH,
- app->uid, app->abi.bits_per_long);
+ ret = snprintf(tmp_shm_path,
+ sizeof(tmp_shm_path),
+ "/" DEFAULT_UST_TRACE_UID_PATH,
+ app->uid,
+ app->abi.bits_per_long);
break;
default:
abort();
abort();
goto error;
}
- strncat(ua_sess->shm_path, tmp_shm_path,
+ strncat(ua_sess->shm_path,
+ tmp_shm_path,
sizeof(ua_sess->shm_path) - strlen(ua_sess->shm_path) - 1);
ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
}
/*
* Lookup sesison wrapper.
*/
-static
-void __lookup_session_by_app(const struct ltt_ust_session *usess,
- struct ust_app *app, struct lttng_ht_iter *iter)
+static void __lookup_session_by_app(const struct ltt_ust_session *usess,
+ struct ust_app *app,
+ struct lttng_ht_iter *iter)
{
/* Get right UST app session from app */
lttng_ht_lookup(app->sessions, &usess->id, iter);
* Return ust app session from the app session hashtable using the UST session
* id.
*/
-static struct ust_app_session *lookup_session_by_app(
- const struct ltt_ust_session *usess, struct ust_app *app)
+static struct ust_app_session *lookup_session_by_app(const struct ltt_ust_session *usess,
+ struct ust_app *app)
{
struct lttng_ht_iter iter;
struct lttng_ht_node_u64 *node;
__lookup_session_by_app(usess, app, &iter);
node = lttng_ht_iter_get_node_u64(&iter);
- if (node == NULL) {
+ if (node == nullptr) {
goto error;
}
return lttng::utils::container_of(node, &ust_app_session::node);
error:
- return NULL;
+ return nullptr;
}
/*
* Return 0 on success or else a negative value.
*/
static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
- struct ust_app *app, struct buffer_reg_pid **regp)
+ struct ust_app *app,
+ struct buffer_reg_pid **regp)
{
int ret = 0;
struct buffer_reg_pid *reg_pid;
LTTNG_ASSERT(ua_sess);
LTTNG_ASSERT(app);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
reg_pid = buffer_reg_pid_find(ua_sess->id);
if (!reg_pid) {
* This is the create channel path meaning that if there is NO
* registry available, we have to create one for this session.
*/
- ret = buffer_reg_pid_create(ua_sess->id, ®_pid,
- ua_sess->root_shm_path, ua_sess->shm_path);
+ ret = buffer_reg_pid_create(
+ ua_sess->id, ®_pid, ua_sess->root_shm_path, ua_sess->shm_path);
if (ret < 0) {
goto error;
}
}
/* Initialize registry. */
- reg_pid->registry->reg.ust = ust_registry_session_per_pid_create(app, app->abi,
- app->version.major, app->version.minor, reg_pid->root_shm_path,
- reg_pid->shm_path,
- lttng_credentials_get_uid(&ua_sess->effective_credentials),
- lttng_credentials_get_gid(&ua_sess->effective_credentials),
- ua_sess->tracing_id);
+ reg_pid->registry->reg.ust = ust_registry_session_per_pid_create(
+ app,
+ app->abi,
+ app->version.major,
+ app->version.minor,
+ reg_pid->root_shm_path,
+ reg_pid->shm_path,
+ lttng_credentials_get_uid(&ua_sess->effective_credentials),
+ lttng_credentials_get_gid(&ua_sess->effective_credentials),
+ ua_sess->tracing_id);
if (!reg_pid->registry->reg.ust) {
/*
* reg_pid->registry->reg.ust is NULL upon error, so we need to
*regp = reg_pid;
}
error:
- rcu_read_unlock();
return ret;
}
* Return 0 on success or else a negative value.
*/
static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
- struct ust_app_session *ua_sess,
- struct ust_app *app, struct buffer_reg_uid **regp)
+ struct ust_app_session *ua_sess,
+ struct ust_app *app,
+ struct buffer_reg_uid **regp)
{
int ret = 0;
struct buffer_reg_uid *reg_uid;
LTTNG_ASSERT(usess);
LTTNG_ASSERT(app);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
reg_uid = buffer_reg_uid_find(usess->id, app->abi.bits_per_long, app->uid);
if (!reg_uid) {
* This is the create channel path meaning that if there is NO
* registry available, we have to create one for this session.
*/
- ret = buffer_reg_uid_create(usess->id, app->abi.bits_per_long, app->uid,
- LTTNG_DOMAIN_UST, ®_uid, ua_sess->root_shm_path,
- ua_sess->shm_path);
+ ret = buffer_reg_uid_create(usess->id,
+ app->abi.bits_per_long,
+ app->uid,
+ LTTNG_DOMAIN_UST,
+ ®_uid,
+ ua_sess->root_shm_path,
+ ua_sess->shm_path);
if (ret < 0) {
goto error;
}
/* Initialize registry. */
reg_uid->registry->reg.ust = ust_registry_session_per_uid_create(app->abi,
- app->version.major, app->version.minor, reg_uid->root_shm_path,
- reg_uid->shm_path, usess->uid, usess->gid, ua_sess->tracing_id, app->uid);
+ app->version.major,
+ app->version.minor,
+ reg_uid->root_shm_path,
+ reg_uid->shm_path,
+ usess->uid,
+ usess->gid,
+ ua_sess->tracing_id,
+ app->uid);
if (!reg_uid->registry->reg.ust) {
/*
* reg_uid->registry->reg.ust is NULL upon error, so we need to
* that if the buffer registry can be found, its ust registry is
* non-NULL.
*/
- buffer_reg_uid_destroy(reg_uid, NULL);
+ buffer_reg_uid_destroy(reg_uid, nullptr);
goto error;
}
*regp = reg_uid;
}
error:
- rcu_read_unlock();
return ret;
}
* -ENOTCONN which is the default code if the lttng_ust_ctl_create_session fails.
*/
static int find_or_create_ust_app_session(struct ltt_ust_session *usess,
- struct ust_app *app, struct ust_app_session **ua_sess_ptr,
- int *is_created)
+ struct ust_app *app,
+ struct ust_app_session **ua_sess_ptr,
+ int *is_created)
{
int ret, created = 0;
struct ust_app_session *ua_sess;
health_code_update();
ua_sess = lookup_session_by_app(usess, app);
- if (ua_sess == NULL) {
+ if (ua_sess == nullptr) {
DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
- app->pid, usess->id);
+ app->pid,
+ usess->id);
ua_sess = alloc_ust_app_session();
- if (ua_sess == NULL) {
+ if (ua_sess == nullptr) {
/* Only malloc can failed so something is really wrong */
ret = -ENOMEM;
goto error;
switch (usess->buffer_type) {
case LTTNG_BUFFER_PER_PID:
/* Init local registry. */
- ret = setup_buffer_reg_pid(ua_sess, app, NULL);
+ ret = setup_buffer_reg_pid(ua_sess, app, nullptr);
if (ret < 0) {
delete_ust_app_session(-1, ua_sess, app);
goto error;
break;
case LTTNG_BUFFER_PER_UID:
/* Look for a global registry. If none exists, create one. */
- ret = setup_buffer_reg_uid(usess, ua_sess, app, NULL);
+ ret = setup_buffer_reg_uid(usess, ua_sess, app, nullptr);
if (ret < 0) {
delete_ust_app_session(-1, ua_sess, app);
goto error;
if (ret < 0) {
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
DBG("UST app creating session failed. Application is dead: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
ret = 0;
} else if (ret == -EAGAIN) {
DBG("UST app creating session failed. Communication time out: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
ret = 0;
} else {
ERR("UST app creating session failed with ret %d: pid = %d, sock =%d",
- ret, app->pid, app->sock);
+ ret,
+ app->pid,
+ app->sock);
}
delete_ust_app_session(-1, ua_sess, app);
if (ret != -ENOMEM) {
ua_sess->handle = ret;
/* Add ust app session to app's HT */
- lttng_ht_node_init_u64(&ua_sess->node,
- ua_sess->tracing_id);
+ lttng_ht_node_init_u64(&ua_sess->node, ua_sess->tracing_id);
lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
lttng_ht_node_init_ulong(&ua_sess->ust_objd_node, ua_sess->handle);
- lttng_ht_add_unique_ulong(app->ust_sessions_objd,
- &ua_sess->ust_objd_node);
+ lttng_ht_add_unique_ulong(app->ust_sessions_objd, &ua_sess->ust_objd_node);
DBG2("UST app session created successfully with handle %d", ret);
}
goto no_match;
}
- switch(key->ctx) {
+ switch (key->ctx) {
case LTTNG_UST_ABI_CONTEXT_PERF_THREAD_COUNTER:
if (strncmp(key->u.perf_counter.name,
- ctx->ctx.u.perf_counter.name,
- sizeof(key->u.perf_counter.name))) {
+ ctx->ctx.u.perf_counter.name,
+ sizeof(key->u.perf_counter.name)) != 0) {
goto no_match;
}
break;
case LTTNG_UST_ABI_CONTEXT_APP_CONTEXT:
- if (strcmp(key->u.app_ctx.provider_name,
- ctx->ctx.u.app_ctx.provider_name) ||
- strcmp(key->u.app_ctx.ctx_name,
- ctx->ctx.u.app_ctx.ctx_name)) {
+ if (strcmp(key->u.app_ctx.provider_name, ctx->ctx.u.app_ctx.provider_name) != 0 ||
+ strcmp(key->u.app_ctx.ctx_name, ctx->ctx.u.app_ctx.ctx_name) != 0) {
goto no_match;
}
break;
* Must be called while holding RCU read side lock.
* Return an ust_app_ctx object or NULL on error.
*/
-static
-struct ust_app_ctx *find_ust_app_context(struct lttng_ht *ht,
- struct lttng_ust_context_attr *uctx)
+static struct ust_app_ctx *find_ust_app_context(struct lttng_ht *ht,
+ struct lttng_ust_context_attr *uctx)
{
struct lttng_ht_iter iter;
struct lttng_ht_node_ulong *node;
- struct ust_app_ctx *app_ctx = NULL;
+ struct ust_app_ctx *app_ctx = nullptr;
LTTNG_ASSERT(uctx);
LTTNG_ASSERT(ht);
ASSERT_RCU_READ_LOCKED();
/* Lookup using the lttng_ust_context_type and a custom match fct. */
- cds_lfht_lookup(ht->ht, ht->hash_fct((void *) uctx->ctx, lttng_ht_seed),
- ht_match_ust_app_ctx, uctx, &iter.iter);
+ cds_lfht_lookup(ht->ht,
+ ht->hash_fct((void *) uctx->ctx, lttng_ht_seed),
+ ht_match_ust_app_ctx,
+ uctx,
+ &iter.iter);
node = lttng_ht_iter_get_node_ulong(&iter);
if (!node) {
goto end;
*
* Called with UST app session lock held and a RCU read side lock.
*/
-static
-int create_ust_app_channel_context(struct ust_app_channel *ua_chan,
- struct lttng_ust_context_attr *uctx,
- struct ust_app *app)
+static int create_ust_app_channel_context(struct ust_app_channel *ua_chan,
+ struct lttng_ust_context_attr *uctx,
+ struct ust_app *app)
{
int ret = 0;
struct ust_app_ctx *ua_ctx;
}
ua_ctx = alloc_ust_app_ctx(uctx);
- if (ua_ctx == NULL) {
+ if (ua_ctx == nullptr) {
/* malloc failed */
ret = -ENOMEM;
goto error;
*
* Called with UST app session lock held.
*/
-static
-int enable_ust_app_event(struct ust_app_event *ua_event,
- struct ust_app *app)
+static int enable_ust_app_event(struct ust_app_event *ua_event, struct ust_app *app)
{
int ret;
goto error;
}
- ua_event->enabled = 1;
+ ua_event->enabled = true;
error:
return ret;
/*
* Disable on the tracer side a ust app event for the session and channel.
*/
-static int disable_ust_app_event(struct ust_app_event *ua_event,
- struct ust_app *app)
+static int disable_ust_app_event(struct ust_app_event *ua_event, struct ust_app *app)
{
int ret;
goto error;
}
- ua_event->enabled = 0;
+ ua_event->enabled = false;
error:
return ret;
/*
* Lookup ust app channel for session and disable it on the tracer side.
*/
-static
-int disable_ust_app_channel(struct ust_app_session *ua_sess,
- struct ust_app_channel *ua_chan, struct ust_app *app)
+static int disable_ust_app_channel(struct ust_app_session *ua_sess,
+ struct ust_app_channel *ua_chan,
+ struct ust_app *app)
{
int ret;
goto error;
}
- ua_chan->enabled = 0;
+ ua_chan->enabled = false;
error:
return ret;
* MUST be called with a RCU read side lock acquired.
*/
static int enable_ust_app_channel(struct ust_app_session *ua_sess,
- struct ltt_ust_channel *uchan, struct ust_app *app)
+ struct ltt_ust_channel *uchan,
+ struct ust_app *app)
{
int ret = 0;
struct lttng_ht_iter iter;
ASSERT_RCU_READ_LOCKED();
- lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
+ lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &iter);
ua_chan_node = lttng_ht_iter_get_node_str(&iter);
- if (ua_chan_node == NULL) {
+ if (ua_chan_node == nullptr) {
DBG2("Unable to find channel %s in ust session id %" PRIu64,
- uchan->name, ua_sess->tracing_id);
+ uchan->name,
+ ua_sess->tracing_id);
goto error;
}
* Return 0 on success or else a negative value.
*/
static int do_consumer_create_channel(struct ltt_ust_session *usess,
- struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
- int bitness, lsu::registry_session *registry)
+ struct ust_app_session *ua_sess,
+ struct ust_app_channel *ua_chan,
+ int bitness,
+ lsu::registry_session *registry)
{
int ret;
unsigned int nb_fd = 0;
LTTNG_ASSERT(ua_chan);
LTTNG_ASSERT(registry);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
health_code_update();
/* Get the right consumer socket for the application. */
* Ask consumer to create channel. The consumer will return the number of
* stream we have to expect.
*/
- ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
- registry, usess->current_trace_chunk);
+ ret = ust_consumer_ask_channel(
+ ua_sess, ua_chan, usess->consumer, socket, registry, usess->current_trace_chunk);
if (ret < 0) {
goto error_ask;
}
}
}
- rcu_read_unlock();
return 0;
error_destroy:
lttng_fd_put(LTTNG_FD_APPS, 1);
error:
health_code_update();
- rcu_read_unlock();
return ret;
}
* Return 0 on success or else a negative value.
*/
static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
- struct ust_app_stream *stream)
+ struct ust_app_stream *stream)
{
int ret;
}
/* Duplicate object for stream once the original is in the registry. */
- ret = lttng_ust_ctl_duplicate_ust_object_data(&stream->obj,
- reg_stream->obj.ust);
+ ret = lttng_ust_ctl_duplicate_ust_object_data(&stream->obj, reg_stream->obj.ust);
if (ret < 0) {
ERR("Duplicate stream obj from %p to %p failed with ret %d",
- reg_stream->obj.ust, stream->obj, ret);
+ reg_stream->obj.ust,
+ stream->obj,
+ ret);
lttng_fd_put(LTTNG_FD_APPS, 2);
goto error;
}
* Return 0 on success or else a negative value.
*/
static int duplicate_channel_object(struct buffer_reg_channel *buf_reg_chan,
- struct ust_app_channel *ua_chan)
+ struct ust_app_channel *ua_chan)
{
int ret;
ret = lttng_ust_ctl_duplicate_ust_object_data(&ua_chan->obj, buf_reg_chan->obj.ust);
if (ret < 0) {
ERR("Duplicate channel obj from %p to %p failed with ret: %d",
- buf_reg_chan->obj.ust, ua_chan->obj, ret);
+ buf_reg_chan->obj.ust,
+ ua_chan->obj,
+ ret);
goto error;
}
ua_chan->handle = ua_chan->obj->handle;
* Return 0 on success or else a negative value.
*/
static int setup_buffer_reg_streams(struct buffer_reg_channel *buf_reg_chan,
- struct ust_app_channel *ua_chan,
- struct ust_app *app)
+ struct ust_app_channel *ua_chan,
+ struct ust_app *app)
{
int ret = 0;
struct ust_app_stream *stream, *stmp;
DBG2("UST app setup buffer registry stream");
/* Send all streams to application. */
- cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
+ cds_list_for_each_entry_safe (stream, stmp, &ua_chan->streams.head, list) {
struct buffer_reg_stream *reg_stream;
ret = buffer_reg_stream_create(®_stream);
* stream call does not release the object.
*/
reg_stream->obj.ust = stream->obj;
- stream->obj = NULL;
+ stream->obj = nullptr;
buffer_reg_stream_add(reg_stream, buf_reg_chan);
/* We don't need the streams anymore. */
* Return 0 on success else a negative value.
*/
static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
- struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
+ struct ust_app_channel *ua_chan,
+ struct buffer_reg_channel **regp)
{
int ret;
- struct buffer_reg_channel *buf_reg_chan = NULL;
+ struct buffer_reg_channel *buf_reg_chan = nullptr;
LTTNG_ASSERT(reg_sess);
LTTNG_ASSERT(ua_chan);
try {
reg_sess->reg.ust->add_channel(ua_chan->tracing_channel_id);
} catch (const std::exception& ex) {
- ERR("Failed to add a channel registry to userspace registry session: %s", ex.what());
+ ERR("Failed to add a channel registry to userspace registry session: %s",
+ ex.what());
ret = -1;
goto error;
}
* Return 0 on success else a negative value.
*/
static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
- struct ust_app_channel *ua_chan, struct buffer_reg_channel *buf_reg_chan,
- struct ust_app *app)
+ struct ust_app_channel *ua_chan,
+ struct buffer_reg_channel *buf_reg_chan,
+ struct ust_app *app)
{
int ret;
}
buf_reg_chan->obj.ust = ua_chan->obj;
- ua_chan->obj = NULL;
+ ua_chan->obj = nullptr;
return 0;
* Return 0 on success else a negative value.
*/
static int send_channel_uid_to_ust(struct buffer_reg_channel *buf_reg_chan,
- struct ust_app *app, struct ust_app_session *ua_sess,
- struct ust_app_channel *ua_chan)
+ struct ust_app *app,
+ struct ust_app_session *ua_sess,
+ struct ust_app_channel *ua_chan)
{
int ret;
struct buffer_reg_stream *reg_stream;
/* Send channel to the application. */
ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
- ret = -ENOTCONN; /* Caused by app exiting. */
+ ret = -ENOTCONN; /* Caused by app exiting. */
goto error;
} else if (ret == -EAGAIN) {
/* Caused by timeout. */
- WARN("Communication with application %d timed out on send_channel for channel \"%s\" of session \"%" PRIu64 "\".",
- app->pid, ua_chan->name, ua_sess->tracing_id);
+ WARN("Communication with application %d timed out on send_channel for channel \"%s\" of session \"%" PRIu64
+ "\".",
+ app->pid,
+ ua_chan->name,
+ ua_sess->tracing_id);
/* Treat this the same way as an application that is exiting. */
ret = -ENOTCONN;
goto error;
/* Send all streams to application. */
pthread_mutex_lock(&buf_reg_chan->stream_list_lock);
- cds_list_for_each_entry(reg_stream, &buf_reg_chan->streams, lnode) {
+ cds_list_for_each_entry (reg_stream, &buf_reg_chan->streams, lnode) {
struct ust_app_stream stream = {};
ret = duplicate_stream_object(reg_stream, &stream);
* Treat this the same way as an application
* that is exiting.
*/
- WARN("Communication with application %d timed out on send_stream for stream of channel \"%s\" of session \"%" PRIu64 "\".",
- app->pid,
- ua_chan->name,
- ua_sess->tracing_id);
+ WARN("Communication with application %d timed out on send_stream for stream of channel \"%s\" of session \"%" PRIu64
+ "\".",
+ app->pid,
+ ua_chan->name,
+ ua_sess->tracing_id);
ret = -ENOTCONN;
}
(void) release_ust_app_stream(-1, &stream, app);
* Return 0 on success else a negative value.
*/
static int create_channel_per_uid(struct ust_app *app,
- struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
- struct ust_app_channel *ua_chan)
+ struct ltt_ust_session *usess,
+ struct ust_app_session *ua_sess,
+ struct ust_app_channel *ua_chan)
{
int ret;
struct buffer_reg_uid *reg_uid;
struct buffer_reg_channel *buf_reg_chan;
- struct ltt_session *session = NULL;
+ struct ltt_session *session = nullptr;
enum lttng_error_code notification_ret;
LTTNG_ASSERT(app);
*/
LTTNG_ASSERT(reg_uid);
- buf_reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
- reg_uid);
+ buf_reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id, reg_uid);
if (buf_reg_chan) {
goto send_channel;
}
/* Create the buffer registry channel object. */
ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &buf_reg_chan);
if (ret < 0) {
- ERR("Error creating the UST channel \"%s\" registry instance",
- ua_chan->name);
+ ERR("Error creating the UST channel \"%s\" registry instance", ua_chan->name);
goto error;
}
* Create the buffers on the consumer side. This call populates the
* ust app channel object with all streams and data object.
*/
- ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
- app->abi.bits_per_long, reg_uid->registry->reg.ust);
+ ret = do_consumer_create_channel(
+ usess, ua_sess, ua_chan, app->abi.bits_per_long, reg_uid->registry->reg.ust);
if (ret < 0) {
- ERR("Error creating UST channel \"%s\" on the consumer daemon",
- ua_chan->name);
+ ERR("Error creating UST channel \"%s\" on the consumer daemon", ua_chan->name);
/*
* Let's remove the previously created buffer registry channel so
auto locked_registry = reg_uid->registry->reg.ust->lock();
try {
locked_registry->remove_channel(ua_chan->tracing_channel_id, false);
- } catch (const std::exception &ex) {
+ } catch (const std::exception& ex) {
DBG("Could not find channel for removal: %s", ex.what());
}
buffer_reg_channel_remove(reg_uid->registry, buf_reg_chan);
/*
* Setup the streams and add it to the session registry.
*/
- ret = setup_buffer_reg_channel(reg_uid->registry,
- ua_chan, buf_reg_chan, app);
+ ret = setup_buffer_reg_channel(reg_uid->registry, ua_chan, buf_reg_chan, app);
if (ret < 0) {
ERR("Error setting up UST channel \"%s\"", ua_chan->name);
goto error;
{
auto locked_registry = reg_uid->registry->reg.ust->lock();
- auto& ust_reg_chan = locked_registry->get_channel(ua_chan->tracing_channel_id);
+ auto& ust_reg_chan = locked_registry->channel(ua_chan->tracing_channel_id);
ust_reg_chan._consumer_key = ua_chan->key;
}
/* Notify the notification subsystem of the channel's creation. */
notification_ret = notification_thread_command_add_channel(
- the_notification_thread_handle, session->name,
- lttng_credentials_get_uid(
- &ua_sess->effective_credentials),
- lttng_credentials_get_gid(
- &ua_sess->effective_credentials),
- ua_chan->name, ua_chan->key, LTTNG_DOMAIN_UST,
- ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
+ the_notification_thread_handle,
+ session->id,
+ ua_chan->name,
+ ua_chan->key,
+ LTTNG_DOMAIN_UST,
+ ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
if (notification_ret != LTTNG_OK) {
- ret = - (int) notification_ret;
+ ret = -(int) notification_ret;
ERR("Failed to add channel to notification thread");
goto error;
}
* Return 0 on success else a negative value.
*/
static int create_channel_per_pid(struct ust_app *app,
- struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
- struct ust_app_channel *ua_chan)
+ struct ltt_ust_session *usess,
+ struct ust_app_session *ua_sess,
+ struct ust_app_channel *ua_chan)
{
int ret;
lsu::registry_session *registry;
enum lttng_error_code cmd_ret;
- struct ltt_session *session = NULL;
+ struct ltt_session *session = nullptr;
uint64_t chan_reg_key;
LTTNG_ASSERT(app);
DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
registry = get_session_registry(ua_sess);
/* The UST app session lock is held, registry shall not be null. */
try {
registry->add_channel(ua_chan->key);
} catch (const std::exception& ex) {
- ERR("Error creating the UST channel \"%s\" registry instance: %s", ua_chan->name,
- ex.what());
+ ERR("Error creating the UST channel \"%s\" registry instance: %s",
+ ua_chan->name,
+ ex.what());
ret = -1;
goto error;
}
ASSERT_SESSION_LIST_LOCKED();
/* Create and get channel on the consumer side. */
- ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
- app->abi.bits_per_long, registry);
+ ret = do_consumer_create_channel(usess, ua_sess, ua_chan, app->abi.bits_per_long, registry);
if (ret < 0) {
- ERR("Error creating UST channel \"%s\" on the consumer daemon",
- ua_chan->name);
+ ERR("Error creating UST channel \"%s\" on the consumer daemon", ua_chan->name);
goto error_remove_from_registry;
}
{
auto locked_registry = registry->lock();
- auto& ust_reg_chan = locked_registry->get_channel(chan_reg_key);
+ auto& ust_reg_chan = locked_registry->channel(chan_reg_key);
ust_reg_chan._consumer_key = ua_chan->key;
}
- cmd_ret = notification_thread_command_add_channel(
- the_notification_thread_handle, session->name,
- lttng_credentials_get_uid(
- &ua_sess->effective_credentials),
- lttng_credentials_get_gid(
- &ua_sess->effective_credentials),
- ua_chan->name, ua_chan->key, LTTNG_DOMAIN_UST,
- ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
+ cmd_ret = notification_thread_command_add_channel(the_notification_thread_handle,
+ session->id,
+ ua_chan->name,
+ ua_chan->key,
+ LTTNG_DOMAIN_UST,
+ ua_chan->attr.subbuf_size *
+ ua_chan->attr.num_subbuf);
if (cmd_ret != LTTNG_OK) {
- ret = - (int) cmd_ret;
+ ret = -(int) cmd_ret;
ERR("Failed to add channel to notification thread");
goto error_remove_from_registry;
}
}
}
error:
- rcu_read_unlock();
if (session) {
session_put(session);
}
* the application exited concurrently.
*/
static int ust_app_channel_send(struct ust_app *app,
- struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
- struct ust_app_channel *ua_chan)
+ struct ltt_ust_session *usess,
+ struct ust_app_session *ua_sess,
+ struct ust_app_channel *ua_chan)
{
int ret;
* Return 0 on success or else a negative value.
*/
static int ust_app_channel_allocate(struct ust_app_session *ua_sess,
- struct ltt_ust_channel *uchan,
- enum lttng_ust_abi_chan_type type,
- struct ltt_ust_session *usess __attribute__((unused)),
- struct ust_app_channel **ua_chanp)
+ struct ltt_ust_channel *uchan,
+ enum lttng_ust_abi_chan_type type,
+ struct ltt_ust_session *usess __attribute__((unused)),
+ struct ust_app_channel **ua_chanp)
{
int ret = 0;
struct lttng_ht_iter iter;
ASSERT_RCU_READ_LOCKED();
/* Lookup channel in the ust app session */
- lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
+ lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &iter);
ua_chan_node = lttng_ht_iter_get_node_str(&iter);
- if (ua_chan_node != NULL) {
+ if (ua_chan_node != nullptr) {
ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
goto end;
}
ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
- if (ua_chan == NULL) {
+ if (ua_chan == nullptr) {
/* Only malloc can fail here */
ret = -ENOMEM;
goto error;
* Must be called with the RCU read side lock held.
* Called with ust app session mutex held.
*/
-static
-int create_ust_app_event(struct ust_app_channel *ua_chan,
- struct ltt_ust_event *uevent,
- struct ust_app *app)
+static int create_ust_app_event(struct ust_app_channel *ua_chan,
+ struct ltt_ust_event *uevent,
+ struct ust_app *app)
{
int ret = 0;
struct ust_app_event *ua_event;
ASSERT_RCU_READ_LOCKED();
ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
- if (ua_event == NULL) {
+ if (ua_event == nullptr) {
/* Only failure mode of alloc_ust_app_event(). */
ret = -ENOMEM;
goto end;
*/
if (ret == -LTTNG_UST_ERR_EXIST) {
ERR("Tracer for application reported that an event being created already existed: "
- "event_name = \"%s\", pid = %d, ppid = %d, uid = %d, gid = %d",
- uevent->attr.name,
- app->pid, app->ppid, app->uid,
- app->gid);
+ "event_name = \"%s\", pid = %d, ppid = %d, uid = %d, gid = %d",
+ uevent->attr.name,
+ app->pid,
+ app->ppid,
+ app->uid,
+ app->gid);
}
goto error;
}
add_unique_ust_app_event(ua_chan, ua_event);
- DBG2("UST app create event completed: app = '%s' pid = %d",
- app->name, app->pid);
+ DBG2("UST app create event completed: app = '%s' pid = %d", app->name, app->pid);
end:
return ret;
* Must be called with the RCU read side lock held.
* Called with ust app session mutex held.
*/
-static
-int create_ust_app_event_notifier_rule(struct lttng_trigger *trigger,
- struct ust_app *app)
+static int create_ust_app_event_notifier_rule(struct lttng_trigger *trigger, struct ust_app *app)
{
int ret = 0;
struct ust_app_event_notifier_rule *ua_event_notifier_rule;
ASSERT_RCU_READ_LOCKED();
ua_event_notifier_rule = alloc_ust_app_event_notifier_rule(trigger);
- if (ua_event_notifier_rule == NULL) {
+ if (ua_event_notifier_rule == nullptr) {
ret = -ENOMEM;
goto end;
}
*/
if (ret == -LTTNG_UST_ERR_EXIST) {
ERR("Tracer for application reported that an event notifier being created already exists: "
- "token = \"%" PRIu64 "\", pid = %d, ppid = %d, uid = %d, gid = %d",
- lttng_trigger_get_tracer_token(trigger),
- app->pid, app->ppid, app->uid,
- app->gid);
+ "token = \"%" PRIu64 "\", pid = %d, ppid = %d, uid = %d, gid = %d",
+ lttng_trigger_get_tracer_token(trigger),
+ app->pid,
+ app->ppid,
+ app->uid,
+ app->gid);
}
goto error;
}
lttng_ht_add_unique_u64(app->token_to_event_notifier_rule_ht,
- &ua_event_notifier_rule->node);
+ &ua_event_notifier_rule->node);
DBG2("UST app create token event rule completed: app = '%s', pid = %d, token = %" PRIu64,
- app->name, app->pid, lttng_trigger_get_tracer_token(trigger));
+ app->name,
+ app->pid,
+ lttng_trigger_get_tracer_token(trigger));
goto end;
* Called with UST app session lock held and RCU read side lock.
*/
static int create_ust_app_metadata(struct ust_app_session *ua_sess,
- struct ust_app *app, struct consumer_output *consumer)
+ struct ust_app *app,
+ struct consumer_output *consumer)
{
int ret = 0;
struct ust_app_channel *metadata;
struct consumer_socket *socket;
- struct ltt_session *session = NULL;
+ struct ltt_session *session = nullptr;
LTTNG_ASSERT(ua_sess);
LTTNG_ASSERT(app);
}
/* Allocate UST metadata */
- metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
+ metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, nullptr);
if (!metadata) {
/* malloc() failed */
ret = -ENOMEM;
* never added or monitored until we do a first push metadata to the
* consumer.
*/
- ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
- locked_registry.get(), session->current_trace_chunk);
+ ret = ust_consumer_ask_channel(ua_sess,
+ metadata,
+ consumer,
+ socket,
+ locked_registry.get(),
+ session->current_trace_chunk);
if (ret < 0) {
/* Nullify the metadata key so we don't try to close it later on. */
locked_registry->_metadata_key = 0;
goto error_consumer;
}
- DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
- metadata->key, app->pid);
+ DBG2("UST metadata with key %" PRIu64 " created for app pid %d", metadata->key, app->pid);
error_consumer:
lttng_fd_put(LTTNG_FD_APPS, 1);
*/
struct ust_app *ust_app_find_by_pid(pid_t pid)
{
- struct ust_app *app = NULL;
+ struct ust_app *app = nullptr;
struct lttng_ht_node_ulong *node;
struct lttng_ht_iter iter;
- lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
+ lttng_ht_lookup(ust_app_ht, (void *) ((unsigned long) pid), &iter);
node = lttng_ht_iter_get_node_ulong(&iter);
- if (node == NULL) {
+ if (node == nullptr) {
DBG2("UST app no found with pid %d", pid);
goto error;
}
struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
{
int ret;
- struct ust_app *lta = NULL;
- struct lttng_pipe *event_notifier_event_source_pipe = NULL;
+ struct ust_app *lta = nullptr;
+ struct lttng_pipe *event_notifier_event_source_pipe = nullptr;
LTTNG_ASSERT(msg);
LTTNG_ASSERT(sock >= 0);
DBG3("UST app creating application for socket %d", sock);
- if ((msg->bits_per_long == 64 &&
- (uatomic_read(&the_ust_consumerd64_fd) ==
- -EINVAL)) ||
- (msg->bits_per_long == 32 &&
- (uatomic_read(&the_ust_consumerd32_fd) ==
- -EINVAL))) {
+ if ((msg->bits_per_long == 64 && (uatomic_read(&the_ust_consumerd64_fd) == -EINVAL)) ||
+ (msg->bits_per_long == 32 && (uatomic_read(&the_ust_consumerd32_fd) == -EINVAL))) {
ERR("Registration failed: application \"%s\" (pid: %d) has "
- "%d-bit long, but no consumerd for this size is available.\n",
- msg->name, msg->pid, msg->bits_per_long);
+ "%d-bit long, but no consumerd for this size is available.\n",
+ msg->name,
+ msg->pid,
+ msg->bits_per_long);
goto error;
}
ret = lttng_fd_get(LTTNG_FD_APPS, 2);
if (ret) {
ERR("Failed to reserve two file descriptors for the event source pipe while creating a new application instance: app = '%s', pid = %d",
- msg->name, (int) msg->pid);
+ msg->name,
+ (int) msg->pid);
goto error;
}
event_notifier_event_source_pipe = lttng_pipe_open(FD_CLOEXEC);
if (!event_notifier_event_source_pipe) {
PERROR("Failed to open application event source pipe: '%s' (pid = %d)",
- msg->name, msg->pid);
+ msg->name,
+ msg->pid);
goto error;
}
lta = zmalloc<ust_app>();
- if (lta == NULL) {
+ if (lta == nullptr) {
PERROR("malloc");
goto error_free_pipe;
}
.uint32_t_alignment = msg->uint32_t_alignment,
.uint64_t_alignment = msg->uint64_t_alignment,
.byte_order = msg->byte_order == LITTLE_ENDIAN ?
- lttng::sessiond::trace::byte_order::LITTLE_ENDIAN_ :
- lttng::sessiond::trace::byte_order::BIG_ENDIAN_,
+ lttng::sessiond::trace::byte_order::LITTLE_ENDIAN_ :
+ lttng::sessiond::trace::byte_order::BIG_ENDIAN_,
};
lta->v_major = msg->major;
lta->pid = msg->pid;
lttng_ht_node_init_ulong(<a->pid_n, (unsigned long) lta->pid);
lta->sock = sock;
- pthread_mutex_init(<a->sock_lock, NULL);
+ pthread_mutex_init(<a->sock_lock, nullptr);
lttng_ht_node_init_ulong(<a->sock_n, (unsigned long) lta->sock);
CDS_INIT_LIST_HEAD(<a->teardown_head);
lttng_pipe_destroy(event_notifier_event_source_pipe);
lttng_fd_put(LTTNG_FD_APPS, 2);
error:
- return NULL;
+ return nullptr;
}
/*
LTTNG_ASSERT(app);
LTTNG_ASSERT(app->notify_sock >= 0);
- app->registration_time = time(NULL);
+ app->registration_time = time(nullptr);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
/*
* On a re-registration, we want to kick out the previous registration of
lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock =%d name:%s "
- "notify_sock =%d (version %d.%d)", app->pid, app->ppid, app->uid,
- app->gid, app->sock, app->name, app->notify_sock, app->v_major,
- app->v_minor);
-
- rcu_read_unlock();
+ "notify_sock =%d (version %d.%d)",
+ app->pid,
+ app->ppid,
+ app->uid,
+ app->gid,
+ app->sock,
+ app->name,
+ app->notify_sock,
+ app->v_major,
+ app->v_minor);
}
/*
if (ret < 0) {
if (ret == -LTTNG_UST_ERR_EXITING || ret == -EPIPE) {
DBG3("UST app version failed. Application is dead: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else if (ret == -EAGAIN) {
WARN("UST app version failed. Communication time out: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else {
ERR("UST app version failed with ret %d: pid = %d, sock = %d",
- ret, app->pid, app->sock);
+ ret,
+ app->pid,
+ app->sock);
}
}
{
int ret;
int event_pipe_write_fd;
- struct lttng_ust_abi_object_data *event_notifier_group = NULL;
+ struct lttng_ust_abi_object_data *event_notifier_group = nullptr;
enum lttng_error_code lttng_ret;
enum event_notifier_error_accounting_status event_notifier_error_accounting_status;
}
/* Get the write side of the pipe. */
- event_pipe_write_fd = lttng_pipe_get_writefd(
- app->event_notifier_group.event_pipe);
+ event_pipe_write_fd = lttng_pipe_get_writefd(app->event_notifier_group.event_pipe);
pthread_mutex_lock(&app->sock_lock);
- ret = lttng_ust_ctl_create_event_notifier_group(app->sock,
- event_pipe_write_fd, &event_notifier_group);
+ ret = lttng_ust_ctl_create_event_notifier_group(
+ app->sock, event_pipe_write_fd, &event_notifier_group);
pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
ret = 0;
DBG3("UST app create event notifier group failed. Application is dead: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else if (ret == -EAGAIN) {
ret = 0;
WARN("UST app create event notifier group failed. Communication time out: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else {
ERR("UST app create event notifier group failed with ret %d: pid = %d, sock = %d, event_pipe_write_fd: %d",
- ret, app->pid, app->sock, event_pipe_write_fd);
+ ret,
+ app->pid,
+ app->sock,
+ event_pipe_write_fd);
}
goto error;
}
ret = lttng_pipe_write_close(app->event_notifier_group.event_pipe);
if (ret) {
ERR("Failed to close write end of the application's event source pipe: app = '%s' (pid = %d)",
- app->name, app->pid);
+ app->name,
+ app->pid);
goto error;
}
lttng_fd_put(LTTNG_FD_APPS, 1);
lttng_ret = notification_thread_command_add_tracer_event_source(
- the_notification_thread_handle,
- lttng_pipe_get_readfd(
- app->event_notifier_group.event_pipe),
- LTTNG_DOMAIN_UST);
+ the_notification_thread_handle,
+ lttng_pipe_get_readfd(app->event_notifier_group.event_pipe),
+ LTTNG_DOMAIN_UST);
if (lttng_ret != LTTNG_OK) {
ERR("Failed to add tracer event source to notification thread");
- ret = - 1;
+ ret = -1;
goto error;
}
/* Assign handle only when the complete setup is valid. */
app->event_notifier_group.object = event_notifier_group;
- event_notifier_error_accounting_status =
- event_notifier_error_accounting_register_app(app);
+ event_notifier_error_accounting_status = event_notifier_error_accounting_register_app(app);
switch (event_notifier_error_accounting_status) {
case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK:
break;
case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_UNSUPPORTED:
DBG3("Failed to setup event notifier error accounting (application does not support notifier error accounting): app socket fd = %d, app name = '%s', app pid = %d",
- app->sock, app->name, (int) app->pid);
+ app->sock,
+ app->name,
+ (int) app->pid);
ret = 0;
goto error_accounting;
case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_APP_DEAD:
DBG3("Failed to setup event notifier error accounting (application is dead): app socket fd = %d, app name = '%s', app pid = %d",
- app->sock, app->name, (int) app->pid);
+ app->sock,
+ app->name,
+ (int) app->pid);
ret = 0;
goto error_accounting;
default:
error_accounting:
lttng_ret = notification_thread_command_remove_tracer_event_source(
- the_notification_thread_handle,
- lttng_pipe_get_readfd(
- app->event_notifier_group.event_pipe));
+ the_notification_thread_handle,
+ lttng_pipe_get_readfd(app->event_notifier_group.event_pipe));
if (lttng_ret != LTTNG_OK) {
ERR("Failed to remove application tracer event source from notification thread");
}
error:
lttng_ust_ctl_release_object(app->sock, app->event_notifier_group.object);
free(app->event_notifier_group.object);
- app->event_notifier_group.object = NULL;
+ app->event_notifier_group.object = nullptr;
return ret;
}
struct ust_app_session *ua_sess;
int ret;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
/* Get the node reference for a call_rcu */
- lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &ust_app_sock_iter);
+ lttng_ht_lookup(ust_app_ht_by_sock, (void *) ((unsigned long) sock), &ust_app_sock_iter);
node = lttng_ht_iter_get_node_ulong(&ust_app_sock_iter);
LTTNG_ASSERT(node);
* ensuring proper behavior of data_pending check.
* Remove sessions so they are not visible during deletion.
*/
- cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
- node.node) {
+ cds_lfht_for_each_entry (lta->sessions->ht, &iter.iter, ua_sess, node.node) {
ret = lttng_ht_del(lta->sessions, &iter);
if (ret) {
/* The session was already removed so scheduled for teardown. */
locked_registry->_metadata_closed = true;
}
- /* Release lock before communication, see comments in close_metadata(). */
+ /* Release lock before communication, see comments in
+ * close_metadata(). */
locked_registry.reset();
- (void) close_metadata(metadata_key, consumer_bitness, ua_sess->consumer);
+ (void) close_metadata(
+ metadata_key, consumer_bitness, ua_sess->consumer);
} else {
locked_registry.reset();
}
iter.iter.node = <a->pid_n.node;
ret = lttng_ht_del(ust_app_ht, &iter);
if (ret) {
- DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
- lta->pid);
+ DBG3("Unregister app by PID %d failed. This can happen on pid reuse", lta->pid);
}
/* Free memory */
call_rcu(<a->pid_n.head, delete_ust_app_rcu);
- rcu_read_unlock();
return;
}
nbmem = UST_APP_EVENT_LIST_SIZE;
tmp_event = calloc<lttng_event>(nbmem);
- if (tmp_event == NULL) {
+ if (tmp_event == nullptr) {
PERROR("zmalloc ust app events");
ret = -ENOMEM;
goto error;
}
- rcu_read_lock();
+ {
+ lttng::urcu::read_lock_guard read_lock;
- cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- struct lttng_ust_abi_tracepoint_iter uiter;
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ struct lttng_ust_abi_tracepoint_iter uiter;
- health_code_update();
+ health_code_update();
- if (!app->compatible) {
- /*
- * TODO: In time, we should notice the caller of this error by
- * telling him that this is a version error.
- */
- continue;
- }
- pthread_mutex_lock(&app->sock_lock);
- handle = lttng_ust_ctl_tracepoint_list(app->sock);
- if (handle < 0) {
- if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
- ERR("UST app list events getting handle failed for app pid %d",
- app->pid);
+ if (!app->compatible) {
+ /*
+ * TODO: In time, we should notice the caller of this error by
+ * telling him that this is a version error.
+ */
+ continue;
}
- pthread_mutex_unlock(&app->sock_lock);
- continue;
- }
- while ((ret = lttng_ust_ctl_tracepoint_list_get(app->sock, handle,
- &uiter)) != -LTTNG_UST_ERR_NOENT) {
- /* Handle ustctl error. */
- if (ret < 0) {
- int release_ret;
-
- if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
- ERR("UST app tp list get failed for app %d with ret %d",
- app->sock, ret);
- } else {
- DBG3("UST app tp list get failed. Application is dead");
- break;
- }
- free(tmp_event);
- release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
- if (release_ret < 0 &&
- release_ret != -LTTNG_UST_ERR_EXITING &&
- release_ret != -EPIPE) {
- ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
+ pthread_mutex_lock(&app->sock_lock);
+ handle = lttng_ust_ctl_tracepoint_list(app->sock);
+ if (handle < 0) {
+ if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
+ ERR("UST app list events getting handle failed for app pid %d",
+ app->pid);
}
pthread_mutex_unlock(&app->sock_lock);
- goto rcu_error;
+ continue;
}
- health_code_update();
- if (count >= nbmem) {
- /* In case the realloc fails, we free the memory */
- struct lttng_event *new_tmp_event;
- size_t new_nbmem;
-
- new_nbmem = nbmem << 1;
- DBG2("Reallocating event list from %zu to %zu entries",
- nbmem, new_nbmem);
- new_tmp_event = (lttng_event *) realloc(tmp_event,
- new_nbmem * sizeof(struct lttng_event));
- if (new_tmp_event == NULL) {
+ while ((ret = lttng_ust_ctl_tracepoint_list_get(
+ app->sock, handle, &uiter)) != -LTTNG_UST_ERR_NOENT) {
+ /* Handle ustctl error. */
+ if (ret < 0) {
int release_ret;
- PERROR("realloc ust app events");
+ if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
+ ERR("UST app tp list get failed for app %d with ret %d",
+ app->sock,
+ ret);
+ } else {
+ DBG3("UST app tp list get failed. Application is dead");
+ break;
+ }
+
free(tmp_event);
- ret = -ENOMEM;
- release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
+ release_ret =
+ lttng_ust_ctl_release_handle(app->sock, handle);
if (release_ret < 0 &&
- release_ret != -LTTNG_UST_ERR_EXITING &&
- release_ret != -EPIPE) {
- ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
+ release_ret != -LTTNG_UST_ERR_EXITING &&
+ release_ret != -EPIPE) {
+ ERR("Error releasing app handle for app %d with ret %d",
+ app->sock,
+ release_ret);
}
+
pthread_mutex_unlock(&app->sock_lock);
goto rcu_error;
}
- /* Zero the new memory */
- memset(new_tmp_event + nbmem, 0,
- (new_nbmem - nbmem) * sizeof(struct lttng_event));
- nbmem = new_nbmem;
- tmp_event = new_tmp_event;
+
+ health_code_update();
+ if (count >= nbmem) {
+ /* In case the realloc fails, we free the memory */
+ struct lttng_event *new_tmp_event;
+ size_t new_nbmem;
+
+ new_nbmem = nbmem << 1;
+ DBG2("Reallocating event list from %zu to %zu entries",
+ nbmem,
+ new_nbmem);
+ new_tmp_event = (lttng_event *) realloc(
+ tmp_event, new_nbmem * sizeof(struct lttng_event));
+ if (new_tmp_event == nullptr) {
+ int release_ret;
+
+ PERROR("realloc ust app events");
+ free(tmp_event);
+ ret = -ENOMEM;
+ release_ret = lttng_ust_ctl_release_handle(
+ app->sock, handle);
+ if (release_ret < 0 &&
+ release_ret != -LTTNG_UST_ERR_EXITING &&
+ release_ret != -EPIPE) {
+ ERR("Error releasing app handle for app %d with ret %d",
+ app->sock,
+ release_ret);
+ }
+
+ pthread_mutex_unlock(&app->sock_lock);
+ goto rcu_error;
+ }
+ /* Zero the new memory */
+ memset(new_tmp_event + nbmem,
+ 0,
+ (new_nbmem - nbmem) * sizeof(struct lttng_event));
+ nbmem = new_nbmem;
+ tmp_event = new_tmp_event;
+ }
+
+ memcpy(tmp_event[count].name,
+ uiter.name,
+ LTTNG_UST_ABI_SYM_NAME_LEN);
+ tmp_event[count].loglevel = uiter.loglevel;
+ tmp_event[count].type =
+ (enum lttng_event_type) LTTNG_UST_ABI_TRACEPOINT;
+ tmp_event[count].pid = app->pid;
+ tmp_event[count].enabled = -1;
+ count++;
}
- memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_ABI_SYM_NAME_LEN);
- tmp_event[count].loglevel = uiter.loglevel;
- tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_ABI_TRACEPOINT;
- tmp_event[count].pid = app->pid;
- tmp_event[count].enabled = -1;
- count++;
- }
- ret = lttng_ust_ctl_release_handle(app->sock, handle);
- pthread_mutex_unlock(&app->sock_lock);
- if (ret < 0) {
- if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
- DBG3("Error releasing app handle. Application died: pid = %d, sock = %d",
- app->pid, app->sock);
- } else if (ret == -EAGAIN) {
- WARN("Error releasing app handle. Communication time out: pid = %d, sock = %d",
- app->pid, app->sock);
- } else {
- ERR("Error releasing app handle with ret %d: pid = %d, sock = %d",
- ret, app->pid, app->sock);
+
+ ret = lttng_ust_ctl_release_handle(app->sock, handle);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (ret < 0) {
+ if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+ DBG3("Error releasing app handle. Application died: pid = %d, sock = %d",
+ app->pid,
+ app->sock);
+ } else if (ret == -EAGAIN) {
+ WARN("Error releasing app handle. Communication time out: pid = %d, sock = %d",
+ app->pid,
+ app->sock);
+ } else {
+ ERR("Error releasing app handle with ret %d: pid = %d, sock = %d",
+ ret,
+ app->pid,
+ app->sock);
+ }
}
}
}
DBG2("UST app list events done (%zu events)", count);
rcu_error:
- rcu_read_unlock();
error:
health_code_update();
return ret;
nbmem = UST_APP_EVENT_LIST_SIZE;
tmp_event = calloc<lttng_event_field>(nbmem);
- if (tmp_event == NULL) {
+ if (tmp_event == nullptr) {
PERROR("zmalloc ust app event fields");
ret = -ENOMEM;
goto error;
}
- rcu_read_lock();
+ {
+ lttng::urcu::read_lock_guard read_lock;
- cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- struct lttng_ust_abi_field_iter uiter;
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ struct lttng_ust_abi_field_iter uiter;
- health_code_update();
+ health_code_update();
- if (!app->compatible) {
- /*
- * TODO: In time, we should notice the caller of this error by
- * telling him that this is a version error.
- */
- continue;
- }
- pthread_mutex_lock(&app->sock_lock);
- handle = lttng_ust_ctl_tracepoint_field_list(app->sock);
- if (handle < 0) {
- if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
- ERR("UST app list field getting handle failed for app pid %d",
- app->pid);
+ if (!app->compatible) {
+ /*
+ * TODO: In time, we should notice the caller of this error by
+ * telling him that this is a version error.
+ */
+ continue;
}
- pthread_mutex_unlock(&app->sock_lock);
- continue;
- }
-
- while ((ret = lttng_ust_ctl_tracepoint_field_list_get(app->sock, handle,
- &uiter)) != -LTTNG_UST_ERR_NOENT) {
- /* Handle ustctl error. */
- if (ret < 0) {
- int release_ret;
- if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
- ERR("UST app tp list field failed for app %d with ret %d",
- app->sock, ret);
- } else {
- DBG3("UST app tp list field failed. Application is dead");
- break;
+ pthread_mutex_lock(&app->sock_lock);
+ handle = lttng_ust_ctl_tracepoint_field_list(app->sock);
+ if (handle < 0) {
+ if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
+ ERR("UST app list field getting handle failed for app pid %d",
+ app->pid);
}
- free(tmp_event);
- release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
pthread_mutex_unlock(&app->sock_lock);
- if (release_ret < 0 &&
- release_ret != -LTTNG_UST_ERR_EXITING &&
- release_ret != -EPIPE) {
- ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
- }
- goto rcu_error;
+ continue;
}
- health_code_update();
- if (count >= nbmem) {
- /* In case the realloc fails, we free the memory */
- struct lttng_event_field *new_tmp_event;
- size_t new_nbmem;
-
- new_nbmem = nbmem << 1;
- DBG2("Reallocating event field list from %zu to %zu entries",
- nbmem, new_nbmem);
- new_tmp_event = (lttng_event_field *) realloc(tmp_event,
- new_nbmem * sizeof(struct lttng_event_field));
- if (new_tmp_event == NULL) {
+ while ((ret = lttng_ust_ctl_tracepoint_field_list_get(
+ app->sock, handle, &uiter)) != -LTTNG_UST_ERR_NOENT) {
+ /* Handle ustctl error. */
+ if (ret < 0) {
int release_ret;
- PERROR("realloc ust app event fields");
+ if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
+ ERR("UST app tp list field failed for app %d with ret %d",
+ app->sock,
+ ret);
+ } else {
+ DBG3("UST app tp list field failed. Application is dead");
+ break;
+ }
+
free(tmp_event);
- ret = -ENOMEM;
- release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
+ release_ret =
+ lttng_ust_ctl_release_handle(app->sock, handle);
pthread_mutex_unlock(&app->sock_lock);
- if (release_ret &&
- release_ret != -LTTNG_UST_ERR_EXITING &&
- release_ret != -EPIPE) {
- ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
+ if (release_ret < 0 &&
+ release_ret != -LTTNG_UST_ERR_EXITING &&
+ release_ret != -EPIPE) {
+ ERR("Error releasing app handle for app %d with ret %d",
+ app->sock,
+ release_ret);
}
+
goto rcu_error;
}
- /* Zero the new memory */
- memset(new_tmp_event + nbmem, 0,
- (new_nbmem - nbmem) * sizeof(struct lttng_event_field));
- nbmem = new_nbmem;
- tmp_event = new_tmp_event;
- }
- memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_ABI_SYM_NAME_LEN);
- /* Mapping between these enums matches 1 to 1. */
- tmp_event[count].type = (enum lttng_event_field_type) uiter.type;
- tmp_event[count].nowrite = uiter.nowrite;
+ health_code_update();
+ if (count >= nbmem) {
+ /* In case the realloc fails, we free the memory */
+ struct lttng_event_field *new_tmp_event;
+ size_t new_nbmem;
+
+ new_nbmem = nbmem << 1;
+ DBG2("Reallocating event field list from %zu to %zu entries",
+ nbmem,
+ new_nbmem);
+ new_tmp_event = (lttng_event_field *) realloc(
+ tmp_event,
+ new_nbmem * sizeof(struct lttng_event_field));
+ if (new_tmp_event == nullptr) {
+ int release_ret;
+
+ PERROR("realloc ust app event fields");
+ free(tmp_event);
+ ret = -ENOMEM;
+ release_ret = lttng_ust_ctl_release_handle(
+ app->sock, handle);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (release_ret &&
+ release_ret != -LTTNG_UST_ERR_EXITING &&
+ release_ret != -EPIPE) {
+ ERR("Error releasing app handle for app %d with ret %d",
+ app->sock,
+ release_ret);
+ }
+
+ goto rcu_error;
+ }
- memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_ABI_SYM_NAME_LEN);
- tmp_event[count].event.loglevel = uiter.loglevel;
- tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT;
- tmp_event[count].event.pid = app->pid;
- tmp_event[count].event.enabled = -1;
- count++;
- }
- ret = lttng_ust_ctl_release_handle(app->sock, handle);
- pthread_mutex_unlock(&app->sock_lock);
- if (ret < 0 &&
- ret != -LTTNG_UST_ERR_EXITING &&
- ret != -EPIPE) {
- ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
+ /* Zero the new memory */
+ memset(new_tmp_event + nbmem,
+ 0,
+ (new_nbmem - nbmem) *
+ sizeof(struct lttng_event_field));
+ nbmem = new_nbmem;
+ tmp_event = new_tmp_event;
+ }
+
+ memcpy(tmp_event[count].field_name,
+ uiter.field_name,
+ LTTNG_UST_ABI_SYM_NAME_LEN);
+ /* Mapping between these enums matches 1 to 1. */
+ tmp_event[count].type = (enum lttng_event_field_type) uiter.type;
+ tmp_event[count].nowrite = uiter.nowrite;
+
+ memcpy(tmp_event[count].event.name,
+ uiter.event_name,
+ LTTNG_UST_ABI_SYM_NAME_LEN);
+ tmp_event[count].event.loglevel = uiter.loglevel;
+ tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT;
+ tmp_event[count].event.pid = app->pid;
+ tmp_event[count].event.enabled = -1;
+ count++;
+ }
+
+ ret = lttng_ust_ctl_release_handle(app->sock, handle);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (ret < 0 && ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
+ ERR("Error releasing app handle for app %d with ret %d",
+ app->sock,
+ ret);
+ }
}
}
DBG2("UST app list event fields done (%zu events)", count);
rcu_error:
- rcu_read_unlock();
error:
health_code_update();
return ret;
/*
* Free and clean all traceable apps of the global list.
*/
-void ust_app_clean_list(void)
+void ust_app_clean_list()
{
int ret;
struct ust_app *app;
DBG2("UST app cleaning registered apps hash table");
- rcu_read_lock();
-
/* Cleanup notify socket hash table */
if (ust_app_ht_by_notify_sock) {
- cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
- notify_sock_n.node) {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (
+ ust_app_ht_by_notify_sock->ht, &iter.iter, app, notify_sock_n.node) {
/*
* Assert that all notifiers are gone as all triggers
* are unregistered prior to this clean-up.
}
if (ust_app_ht) {
- cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
ret = lttng_ht_del(ust_app_ht, &iter);
LTTNG_ASSERT(!ret);
call_rcu(&app->pid_n.head, delete_ust_app_rcu);
/* Cleanup socket hash table */
if (ust_app_ht_by_sock) {
- cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
- sock_n.node) {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (ust_app_ht_by_sock->ht, &iter.iter, app, sock_n.node) {
ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
LTTNG_ASSERT(!ret);
}
}
- rcu_read_unlock();
-
/* Destroy is done only when the ht is empty */
if (ust_app_ht) {
lttng_ht_destroy(ust_app_ht);
/*
* Init UST app hash table.
*/
-int ust_app_ht_alloc(void)
+int ust_app_ht_alloc()
{
ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
if (!ust_app_ht) {
/*
* For a specific UST session, disable the channel for all registered apps.
*/
-int ust_app_disable_channel_glb(struct ltt_ust_session *usess,
- struct ltt_ust_channel *uchan)
+int ust_app_disable_channel_glb(struct ltt_ust_session *usess, struct ltt_ust_channel *uchan)
{
int ret = 0;
struct lttng_ht_iter iter;
LTTNG_ASSERT(usess->active);
DBG2("UST app disabling channel %s from global domain for session id %" PRIu64,
- uchan->name, usess->id);
+ uchan->name,
+ usess->id);
- rcu_read_lock();
+ {
+ lttng::urcu::read_lock_guard read_lock;
- /* For every registered applications */
- cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- struct lttng_ht_iter uiter;
- if (!app->compatible) {
- /*
- * TODO: In time, we should notice the caller of this error by
- * telling him that this is a version error.
- */
- continue;
- }
- ua_sess = lookup_session_by_app(usess, app);
- if (ua_sess == NULL) {
- continue;
- }
+ /* For every registered applications */
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ struct lttng_ht_iter uiter;
+ if (!app->compatible) {
+ /*
+ * TODO: In time, we should notice the caller of this error by
+ * telling him that this is a version error.
+ */
+ continue;
+ }
+ ua_sess = lookup_session_by_app(usess, app);
+ if (ua_sess == nullptr) {
+ continue;
+ }
- /* Get channel */
- lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
- ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
- /* If the session if found for the app, the channel must be there */
- LTTNG_ASSERT(ua_chan_node);
+ /* Get channel */
+ lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
+ ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
+ /* If the session if found for the app, the channel must be there */
+ LTTNG_ASSERT(ua_chan_node);
- ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
- /* The channel must not be already disabled */
- LTTNG_ASSERT(ua_chan->enabled == 1);
+ ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
+ /* The channel must not be already disabled */
+ LTTNG_ASSERT(ua_chan->enabled);
- /* Disable channel onto application */
- ret = disable_ust_app_channel(ua_sess, ua_chan, app);
- if (ret < 0) {
- /* XXX: We might want to report this error at some point... */
- continue;
+ /* Disable channel onto application */
+ ret = disable_ust_app_channel(ua_sess, ua_chan, app);
+ if (ret < 0) {
+ /* XXX: We might want to report this error at some point... */
+ continue;
+ }
}
}
- rcu_read_unlock();
return ret;
}
/*
* For a specific UST session, enable the channel for all registered apps.
*/
-int ust_app_enable_channel_glb(struct ltt_ust_session *usess,
- struct ltt_ust_channel *uchan)
+int ust_app_enable_channel_glb(struct ltt_ust_session *usess, struct ltt_ust_channel *uchan)
{
int ret = 0;
struct lttng_ht_iter iter;
LTTNG_ASSERT(usess->active);
DBG2("UST app enabling channel %s to global domain for session id %" PRIu64,
- uchan->name, usess->id);
+ uchan->name,
+ usess->id);
- rcu_read_lock();
+ {
+ lttng::urcu::read_lock_guard read_lock;
- /* For every registered applications */
- cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- if (!app->compatible) {
- /*
- * TODO: In time, we should notice the caller of this error by
- * telling him that this is a version error.
- */
- continue;
- }
- ua_sess = lookup_session_by_app(usess, app);
- if (ua_sess == NULL) {
- continue;
- }
+ /* For every registered applications */
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ if (!app->compatible) {
+ /*
+ * TODO: In time, we should notice the caller of this error by
+ * telling him that this is a version error.
+ */
+ continue;
+ }
+ ua_sess = lookup_session_by_app(usess, app);
+ if (ua_sess == nullptr) {
+ continue;
+ }
- /* Enable channel onto application */
- ret = enable_ust_app_channel(ua_sess, uchan, app);
- if (ret < 0) {
- /* XXX: We might want to report this error at some point... */
- continue;
+ /* Enable channel onto application */
+ ret = enable_ust_app_channel(ua_sess, uchan, app);
+ if (ret < 0) {
+ /* XXX: We might want to report this error at some point... */
+ continue;
+ }
}
}
- rcu_read_unlock();
return ret;
}
* Disable an event in a channel and for a specific session.
*/
int ust_app_disable_event_glb(struct ltt_ust_session *usess,
- struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
+ struct ltt_ust_channel *uchan,
+ struct ltt_ust_event *uevent)
{
int ret = 0;
struct lttng_ht_iter iter, uiter;
LTTNG_ASSERT(usess->active);
DBG("UST app disabling event %s for all apps in channel "
- "%s for session id %" PRIu64,
- uevent->attr.name, uchan->name, usess->id);
+ "%s for session id %" PRIu64,
+ uevent->attr.name,
+ uchan->name,
+ usess->id);
- rcu_read_lock();
-
- /* For all registered applications */
- cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- if (!app->compatible) {
- /*
- * TODO: In time, we should notice the caller of this error by
- * telling him that this is a version error.
- */
- continue;
- }
- ua_sess = lookup_session_by_app(usess, app);
- if (ua_sess == NULL) {
- /* Next app */
- continue;
- }
+ {
+ lttng::urcu::read_lock_guard read_lock;
- /* Lookup channel in the ust app session */
- lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
- ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
- if (ua_chan_node == NULL) {
- DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
- "Skipping", uchan->name, usess->id, app->pid);
- continue;
- }
- ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
+ /* For all registered applications */
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ if (!app->compatible) {
+ /*
+ * TODO: In time, we should notice the caller of this error by
+ * telling him that this is a version error.
+ */
+ continue;
+ }
+ ua_sess = lookup_session_by_app(usess, app);
+ if (ua_sess == nullptr) {
+ /* Next app */
+ continue;
+ }
- ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
- uevent->filter, uevent->attr.loglevel,
- uevent->exclusion);
- if (ua_event == NULL) {
- DBG2("Event %s not found in channel %s for app pid %d."
- "Skipping", uevent->attr.name, uchan->name, app->pid);
- continue;
- }
+ /* Lookup channel in the ust app session */
+ lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
+ ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
+ if (ua_chan_node == nullptr) {
+ DBG2("Channel %s not found in session id %" PRIu64
+ " for app pid %d."
+ "Skipping",
+ uchan->name,
+ usess->id,
+ app->pid);
+ continue;
+ }
+ ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
+
+ ua_event = find_ust_app_event(ua_chan->events,
+ uevent->attr.name,
+ uevent->filter,
+ uevent->attr.loglevel,
+ uevent->exclusion);
+ if (ua_event == nullptr) {
+ DBG2("Event %s not found in channel %s for app pid %d."
+ "Skipping",
+ uevent->attr.name,
+ uchan->name,
+ app->pid);
+ continue;
+ }
- ret = disable_ust_app_event(ua_event, app);
- if (ret < 0) {
- /* XXX: Report error someday... */
- continue;
+ ret = disable_ust_app_event(ua_event, app);
+ if (ret < 0) {
+ /* XXX: Report error someday... */
+ continue;
+ }
}
}
- rcu_read_unlock();
return ret;
}
/* The ua_sess lock must be held by the caller. */
-static
-int ust_app_channel_create(struct ltt_ust_session *usess,
- struct ust_app_session *ua_sess,
- struct ltt_ust_channel *uchan, struct ust_app *app,
- struct ust_app_channel **_ua_chan)
+static int ust_app_channel_create(struct ltt_ust_session *usess,
+ struct ust_app_session *ua_sess,
+ struct ltt_ust_channel *uchan,
+ struct ust_app *app,
+ struct ust_app_channel **_ua_chan)
{
int ret = 0;
- struct ust_app_channel *ua_chan = NULL;
+ struct ust_app_channel *ua_chan = nullptr;
LTTNG_ASSERT(ua_sess);
ASSERT_LOCKED(ua_sess->lock);
- if (!strncmp(uchan->name, DEFAULT_METADATA_NAME,
- sizeof(uchan->name))) {
- copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
- &uchan->attr);
+ if (!strncmp(uchan->name, DEFAULT_METADATA_NAME, sizeof(uchan->name))) {
+ copy_channel_attr_to_ustctl(&ua_sess->metadata_attr, &uchan->attr);
ret = 0;
} else {
- struct ltt_ust_context *uctx = NULL;
+ struct ltt_ust_context *uctx = nullptr;
/*
* Create channel onto application and synchronize its
* configuration.
*/
- ret = ust_app_channel_allocate(ua_sess, uchan,
- LTTNG_UST_ABI_CHAN_PER_CPU, usess,
- &ua_chan);
+ ret = ust_app_channel_allocate(
+ ua_sess, uchan, LTTNG_UST_ABI_CHAN_PER_CPU, usess, &ua_chan);
if (ret < 0) {
goto error;
}
- ret = ust_app_channel_send(app, usess,
- ua_sess, ua_chan);
+ ret = ust_app_channel_send(app, usess, ua_sess, ua_chan);
if (ret) {
goto error;
}
/* Add contexts. */
- cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
- ret = create_ust_app_channel_context(ua_chan,
- &uctx->ctx, app);
+ cds_list_for_each_entry (uctx, &uchan->ctx_list, list) {
+ ret = create_ust_app_channel_context(ua_chan, &uctx->ctx, app);
if (ret) {
goto error;
}
* or a timeout on it. We can't inform the caller that for a
* specific app, the session failed so lets continue here.
*/
- ret = 0; /* Not an error. */
+ ret = 0; /* Not an error. */
break;
case -ENOMEM:
default:
* Enable event for a specific session and channel on the tracer.
*/
int ust_app_enable_event_glb(struct ltt_ust_session *usess,
- struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
+ struct ltt_ust_channel *uchan,
+ struct ltt_ust_event *uevent)
{
int ret = 0;
struct lttng_ht_iter iter, uiter;
LTTNG_ASSERT(usess->active);
DBG("UST app enabling event %s for all apps for session id %" PRIu64,
- uevent->attr.name, usess->id);
+ uevent->attr.name,
+ usess->id);
/*
* NOTE: At this point, this function is called only if the session and
* tracer also.
*/
- rcu_read_lock();
-
- /* For all registered applications */
- cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- if (!app->compatible) {
- /*
- * TODO: In time, we should notice the caller of this error by
- * telling him that this is a version error.
- */
- continue;
- }
- ua_sess = lookup_session_by_app(usess, app);
- if (!ua_sess) {
- /* The application has problem or is probably dead. */
- continue;
- }
+ {
+ lttng::urcu::read_lock_guard read_lock;
- pthread_mutex_lock(&ua_sess->lock);
+ /* For all registered applications */
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ if (!app->compatible) {
+ /*
+ * TODO: In time, we should notice the caller of this error by
+ * telling him that this is a version error.
+ */
+ continue;
+ }
+ ua_sess = lookup_session_by_app(usess, app);
+ if (!ua_sess) {
+ /* The application has problem or is probably dead. */
+ continue;
+ }
- if (ua_sess->deleted) {
- pthread_mutex_unlock(&ua_sess->lock);
- continue;
- }
+ pthread_mutex_lock(&ua_sess->lock);
- /* Lookup channel in the ust app session */
- lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
- ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
- /*
- * It is possible that the channel cannot be found is
- * the channel/event creation occurs concurrently with
- * an application exit.
- */
- if (!ua_chan_node) {
- pthread_mutex_unlock(&ua_sess->lock);
- continue;
- }
+ if (ua_sess->deleted) {
+ pthread_mutex_unlock(&ua_sess->lock);
+ continue;
+ }
- ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
+ /* Lookup channel in the ust app session */
+ lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
+ ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
+ /*
+ * It is possible that the channel cannot be found is
+ * the channel/event creation occurs concurrently with
+ * an application exit.
+ */
+ if (!ua_chan_node) {
+ pthread_mutex_unlock(&ua_sess->lock);
+ continue;
+ }
- /* Get event node */
- ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
- uevent->filter, uevent->attr.loglevel, uevent->exclusion);
- if (ua_event == NULL) {
- DBG3("UST app enable event %s not found for app PID %d."
- "Skipping app", uevent->attr.name, app->pid);
- goto next_app;
- }
+ ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
+
+ /* Get event node */
+ ua_event = find_ust_app_event(ua_chan->events,
+ uevent->attr.name,
+ uevent->filter,
+ uevent->attr.loglevel,
+ uevent->exclusion);
+ if (ua_event == nullptr) {
+ DBG3("UST app enable event %s not found for app PID %d."
+ "Skipping app",
+ uevent->attr.name,
+ app->pid);
+ goto next_app;
+ }
- ret = enable_ust_app_event(ua_event, app);
- if (ret < 0) {
+ ret = enable_ust_app_event(ua_event, app);
+ if (ret < 0) {
+ pthread_mutex_unlock(&ua_sess->lock);
+ goto error;
+ }
+ next_app:
pthread_mutex_unlock(&ua_sess->lock);
- goto error;
}
- next_app:
- pthread_mutex_unlock(&ua_sess->lock);
}
-
error:
- rcu_read_unlock();
return ret;
}
* all registered apps.
*/
int ust_app_create_event_glb(struct ltt_ust_session *usess,
- struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
+ struct ltt_ust_channel *uchan,
+ struct ltt_ust_event *uevent)
{
int ret = 0;
struct lttng_ht_iter iter, uiter;
LTTNG_ASSERT(usess->active);
DBG("UST app creating event %s for all apps for session id %" PRIu64,
- uevent->attr.name, usess->id);
+ uevent->attr.name,
+ usess->id);
- rcu_read_lock();
+ {
+ lttng::urcu::read_lock_guard read_lock;
- /* For all registered applications */
- cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- if (!app->compatible) {
- /*
- * TODO: In time, we should notice the caller of this error by
- * telling him that this is a version error.
- */
- continue;
- }
- ua_sess = lookup_session_by_app(usess, app);
- if (!ua_sess) {
- /* The application has problem or is probably dead. */
- continue;
- }
+ /* For all registered applications */
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ if (!app->compatible) {
+ /*
+ * TODO: In time, we should notice the caller of this error by
+ * telling him that this is a version error.
+ */
+ continue;
+ }
- pthread_mutex_lock(&ua_sess->lock);
+ ua_sess = lookup_session_by_app(usess, app);
+ if (!ua_sess) {
+ /* The application has problem or is probably dead. */
+ continue;
+ }
- if (ua_sess->deleted) {
- pthread_mutex_unlock(&ua_sess->lock);
- continue;
- }
+ pthread_mutex_lock(&ua_sess->lock);
- /* Lookup channel in the ust app session */
- lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
- ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
- /* If the channel is not found, there is a code flow error */
- LTTNG_ASSERT(ua_chan_node);
+ if (ua_sess->deleted) {
+ pthread_mutex_unlock(&ua_sess->lock);
+ continue;
+ }
- ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
+ /* Lookup channel in the ust app session */
+ lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
+ ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
+ /* If the channel is not found, there is a code flow error */
+ LTTNG_ASSERT(ua_chan_node);
- ret = create_ust_app_event(ua_chan, uevent, app);
- pthread_mutex_unlock(&ua_sess->lock);
- if (ret < 0) {
- if (ret != -LTTNG_UST_ERR_EXIST) {
- /* Possible value at this point: -ENOMEM. If so, we stop! */
- break;
+ ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
+
+ ret = create_ust_app_event(ua_chan, uevent, app);
+ pthread_mutex_unlock(&ua_sess->lock);
+ if (ret < 0) {
+ if (ret != -LTTNG_UST_ERR_EXIST) {
+ /* Possible value at this point: -ENOMEM. If so, we stop! */
+ break;
+ }
+
+ DBG2("UST app event %s already exist on app PID %d",
+ uevent->attr.name,
+ app->pid);
+ continue;
}
- DBG2("UST app event %s already exist on app PID %d",
- uevent->attr.name, app->pid);
- continue;
}
}
- rcu_read_unlock();
return ret;
}
* Called with UST app session lock held.
*
*/
-static
-int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
+static int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
{
int ret = 0;
struct ust_app_session *ua_sess;
DBG("Starting tracing for ust app pid %d", app->pid);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
if (!app->compatible) {
goto end;
}
ua_sess = lookup_session_by_app(usess, app);
- if (ua_sess == NULL) {
+ if (ua_sess == nullptr) {
/* The session is in teardown process. Ignore and continue. */
goto end;
}
if (ret < 0) {
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
DBG3("UST app start session failed. Application is dead: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
pthread_mutex_unlock(&ua_sess->lock);
goto end;
} else if (ret == -EAGAIN) {
WARN("UST app start session failed. Communication time out: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
pthread_mutex_unlock(&ua_sess->lock);
goto end;
} else {
ERR("UST app start session failed with ret %d: pid = %d, sock = %d",
- ret, app->pid, app->sock);
+ ret,
+ app->pid,
+ app->sock);
}
goto error_unlock;
}
/* Indicate that the session has been started once */
- ua_sess->started = 1;
- ua_sess->enabled = 1;
+ ua_sess->started = true;
+ ua_sess->enabled = true;
pthread_mutex_unlock(&ua_sess->lock);
if (ret < 0) {
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
DBG3("UST app wait quiescent failed. Application is dead: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else if (ret == -EAGAIN) {
WARN("UST app wait quiescent failed. Communication time out: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else {
ERR("UST app wait quiescent failed with ret %d: pid %d, sock = %d",
- ret, app->pid, app->sock);
+ ret,
+ app->pid,
+ app->sock);
}
}
end:
- rcu_read_unlock();
health_code_update();
return 0;
error_unlock:
pthread_mutex_unlock(&ua_sess->lock);
- rcu_read_unlock();
health_code_update();
return -1;
}
/*
* Stop tracing for a specific UST session and app.
*/
-static
-int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
+static int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
{
int ret = 0;
struct ust_app_session *ua_sess;
DBG("Stopping tracing for ust app pid %d", app->pid);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
if (!app->compatible) {
goto end_no_session;
}
ua_sess = lookup_session_by_app(usess, app);
- if (ua_sess == NULL) {
+ if (ua_sess == nullptr) {
goto end_no_session;
}
if (ret < 0) {
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
DBG3("UST app stop session failed. Application is dead: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
goto end_unlock;
} else if (ret == -EAGAIN) {
WARN("UST app stop session failed. Communication time out: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
goto end_unlock;
} else {
ERR("UST app stop session failed with ret %d: pid = %d, sock = %d",
- ret, app->pid, app->sock);
+ ret,
+ app->pid,
+ app->sock);
}
goto error_rcu_unlock;
}
health_code_update();
- ua_sess->enabled = 0;
+ ua_sess->enabled = false;
/* Quiescent wait after stopping trace */
pthread_mutex_lock(&app->sock_lock);
if (ret < 0) {
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
DBG3("UST app wait quiescent failed. Application is dead: pid= %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else if (ret == -EAGAIN) {
WARN("UST app wait quiescent failed. Communication time out: pid= %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else {
ERR("UST app wait quiescent failed with ret %d: pid= %d, sock = %d",
- ret, app->pid, app->sock);
+ ret,
+ app->pid,
+ app->sock);
}
}
end_unlock:
pthread_mutex_unlock(&ua_sess->lock);
end_no_session:
- rcu_read_unlock();
health_code_update();
return 0;
error_rcu_unlock:
pthread_mutex_unlock(&ua_sess->lock);
- rcu_read_unlock();
health_code_update();
return -1;
}
-static
-int ust_app_flush_app_session(struct ust_app *app,
- struct ust_app_session *ua_sess)
+static int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess)
{
int ret, retval = 0;
struct lttng_ht_iter iter;
DBG("Flushing app session buffers for ust app pid %d", app->pid);
- rcu_read_lock();
-
if (!app->compatible) {
goto end_not_compatible;
}
health_code_update();
/* Flushing buffers */
- socket = consumer_find_socket_by_bitness(app->abi.bits_per_long,
- ua_sess->consumer);
+ socket = consumer_find_socket_by_bitness(app->abi.bits_per_long, ua_sess->consumer);
/* Flush buffers and push metadata. */
switch (ua_sess->buffer_type) {
case LTTNG_BUFFER_PER_PID:
- cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
- node.node) {
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (ua_sess->channels->ht, &iter.iter, ua_chan, node.node) {
health_code_update();
ret = consumer_flush_channel(socket, ua_chan->key);
if (ret) {
continue;
}
}
+
break;
+ }
case LTTNG_BUFFER_PER_UID:
default:
abort();
pthread_mutex_unlock(&ua_sess->lock);
end_not_compatible:
- rcu_read_unlock();
health_code_update();
return retval;
}
* Flush buffers for all applications for a specific UST session.
* Called with UST session lock held.
*/
-static
-int ust_app_flush_session(struct ltt_ust_session *usess)
+static int ust_app_flush_session(struct ltt_ust_session *usess)
{
int ret = 0;
DBG("Flushing session buffers for all ust apps");
- rcu_read_lock();
-
/* Flush buffers and push metadata. */
switch (usess->buffer_type) {
case LTTNG_BUFFER_PER_UID:
struct lttng_ht_iter iter;
/* Flush all per UID buffers associated to that session. */
- cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
+ cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
+ lttng::urcu::read_lock_guard read_lock;
lsu::registry_session *ust_session_reg;
struct buffer_reg_channel *buf_reg_chan;
struct consumer_socket *socket;
/* Get consumer socket to use to push the metadata.*/
socket = consumer_find_socket_by_bitness(reg->bits_per_long,
- usess->consumer);
+ usess->consumer);
if (!socket) {
/* Ignore request if no consumer is found for the session. */
continue;
}
- cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
- buf_reg_chan, node.node) {
+ cds_lfht_for_each_entry (
+ reg->registry->channels->ht, &iter.iter, buf_reg_chan, node.node) {
/*
* The following call will print error values so the return
* code is of little importance because whatever happens, we
auto locked_registry = ust_session_reg->lock();
(void) push_metadata(locked_registry, usess->consumer);
}
+
break;
}
case LTTNG_BUFFER_PER_PID:
struct ust_app_session *ua_sess;
struct lttng_ht_iter iter;
struct ust_app *app;
+ lttng::urcu::read_lock_guard read_lock;
- cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
ua_sess = lookup_session_by_app(usess, app);
- if (ua_sess == NULL) {
+ if (ua_sess == nullptr) {
continue;
}
+
(void) ust_app_flush_app_session(app, ua_sess);
}
+
break;
}
default:
break;
}
- rcu_read_unlock();
health_code_update();
return ret;
}
-static
-int ust_app_clear_quiescent_app_session(struct ust_app *app,
- struct ust_app_session *ua_sess)
+static int ust_app_clear_quiescent_app_session(struct ust_app *app, struct ust_app_session *ua_sess)
{
int ret = 0;
struct lttng_ht_iter iter;
DBG("Clearing stream quiescent state for ust app pid %d", app->pid);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
if (!app->compatible) {
goto end_not_compatible;
health_code_update();
- socket = consumer_find_socket_by_bitness(app->abi.bits_per_long,
- ua_sess->consumer);
+ socket = consumer_find_socket_by_bitness(app->abi.bits_per_long, ua_sess->consumer);
if (!socket) {
- ERR("Failed to find consumer (%" PRIu32 ") socket",
- app->abi.bits_per_long);
+ ERR("Failed to find consumer (%" PRIu32 ") socket", app->abi.bits_per_long);
ret = -1;
goto end_unlock;
}
/* Clear quiescent state. */
switch (ua_sess->buffer_type) {
case LTTNG_BUFFER_PER_PID:
- cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter,
- ua_chan, node.node) {
+ cds_lfht_for_each_entry (ua_sess->channels->ht, &iter.iter, ua_chan, node.node) {
health_code_update();
- ret = consumer_clear_quiescent_channel(socket,
- ua_chan->key);
+ ret = consumer_clear_quiescent_channel(socket, ua_chan->key);
if (ret) {
ERR("Error clearing quiescent state for consumer channel");
ret = -1;
pthread_mutex_unlock(&ua_sess->lock);
end_not_compatible:
- rcu_read_unlock();
health_code_update();
return ret;
}
* specific UST session.
* Called with UST session lock held.
*/
-static
-int ust_app_clear_quiescent_session(struct ltt_ust_session *usess)
+static int ust_app_clear_quiescent_session(struct ltt_ust_session *usess)
{
int ret = 0;
DBG("Clearing stream quiescent state for all ust apps");
- rcu_read_lock();
-
switch (usess->buffer_type) {
case LTTNG_BUFFER_PER_UID:
{
* Clear quiescent for all per UID buffers associated to
* that session.
*/
- cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
+ cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
struct consumer_socket *socket;
struct buffer_reg_channel *buf_reg_chan;
+ lttng::urcu::read_lock_guard read_lock;
/* Get associated consumer socket.*/
- socket = consumer_find_socket_by_bitness(
- reg->bits_per_long, usess->consumer);
+ socket = consumer_find_socket_by_bitness(reg->bits_per_long,
+ usess->consumer);
if (!socket) {
/*
* Ignore request if no consumer is found for
continue;
}
- cds_lfht_for_each_entry(reg->registry->channels->ht,
- &iter.iter, buf_reg_chan, node.node) {
+ cds_lfht_for_each_entry (
+ reg->registry->channels->ht, &iter.iter, buf_reg_chan, node.node) {
/*
* The following call will print error values so
* the return code is of little importance
* all.
*/
(void) consumer_clear_quiescent_channel(socket,
- buf_reg_chan->consumer_key);
+ buf_reg_chan->consumer_key);
}
}
+
break;
}
case LTTNG_BUFFER_PER_PID:
struct ust_app_session *ua_sess;
struct lttng_ht_iter iter;
struct ust_app *app;
+ lttng::urcu::read_lock_guard read_lock;
- cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
- pid_n.node) {
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
ua_sess = lookup_session_by_app(usess, app);
- if (ua_sess == NULL) {
+ if (ua_sess == nullptr) {
continue;
}
- (void) ust_app_clear_quiescent_app_session(app,
- ua_sess);
+ (void) ust_app_clear_quiescent_app_session(app, ua_sess);
}
+
break;
}
default:
break;
}
- rcu_read_unlock();
health_code_update();
return ret;
}
DBG("Destroy tracing for ust app pid %d", app->pid);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
if (!app->compatible) {
goto end;
__lookup_session_by_app(usess, app, &iter);
node = lttng_ht_iter_get_node_u64(&iter);
- if (node == NULL) {
+ if (node == nullptr) {
/* Session is being or is deleted. */
goto end;
}
if (ret < 0) {
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
DBG3("UST app wait quiescent failed. Application is dead: pid= %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else if (ret == -EAGAIN) {
WARN("UST app wait quiescent failed. Communication time out: pid= %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else {
ERR("UST app wait quiescent failed with ret %d: pid= %d, sock = %d",
- ret, app->pid, app->sock);
+ ret,
+ app->pid,
+ app->sock);
}
}
end:
- rcu_read_unlock();
health_code_update();
return 0;
}
* Even though the start trace might fail, flag this session active so
* other application coming in are started by default.
*/
- usess->active = 1;
-
- rcu_read_lock();
+ usess->active = true;
/*
* In a start-stop-start use-case, we need to clear the quiescent state
*/
(void) ust_app_clear_quiescent_session(usess);
- cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- ust_app_global_update(usess, app);
- }
+ {
+ lttng::urcu::read_lock_guard read_lock;
- rcu_read_unlock();
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ ust_app_global_update(usess, app);
+ }
+ }
return 0;
}
* Even though the stop trace might fail, flag this session inactive so
* other application coming in are not started by default.
*/
- usess->active = 0;
+ usess->active = false;
- rcu_read_lock();
+ {
+ lttng::urcu::read_lock_guard read_lock;
- cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- ret = ust_app_stop_trace(usess, app);
- if (ret < 0) {
- /* Continue to next apps even on error */
- continue;
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ ret = ust_app_stop_trace(usess, app);
+ if (ret < 0) {
+ /* Continue to next apps even on error */
+ continue;
+ }
}
}
(void) ust_app_flush_session(usess);
- rcu_read_unlock();
-
return 0;
}
DBG("Destroy all UST traces");
- rcu_read_lock();
+ {
+ lttng::urcu::read_lock_guard read_lock;
- cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- ret = destroy_trace(usess, app);
- if (ret < 0) {
- /* Continue to next apps even on error */
- continue;
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ ret = destroy_trace(usess, app);
+ if (ret < 0) {
+ /* Continue to next apps even on error */
+ continue;
+ }
}
}
- rcu_read_unlock();
-
return 0;
}
/* The ua_sess lock must be held by the caller. */
-static
-int find_or_create_ust_app_channel(
- struct ltt_ust_session *usess,
- struct ust_app_session *ua_sess,
- struct ust_app *app,
- struct ltt_ust_channel *uchan,
- struct ust_app_channel **ua_chan)
+static int find_or_create_ust_app_channel(struct ltt_ust_session *usess,
+ struct ust_app_session *ua_sess,
+ struct ust_app *app,
+ struct ltt_ust_channel *uchan,
+ struct ust_app_channel **ua_chan)
{
int ret = 0;
struct lttng_ht_iter iter;
lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &iter);
ua_chan_node = lttng_ht_iter_get_node_str(&iter);
if (ua_chan_node) {
- *ua_chan = caa_container_of(ua_chan_node,
- struct ust_app_channel, node);
+ *ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
goto end;
}
return ret;
}
-static
-int ust_app_channel_synchronize_event(struct ust_app_channel *ua_chan,
- struct ltt_ust_event *uevent,
- struct ust_app *app)
+static int ust_app_channel_synchronize_event(struct ust_app_channel *ua_chan,
+ struct ltt_ust_event *uevent,
+ struct ust_app *app)
{
int ret = 0;
- struct ust_app_event *ua_event = NULL;
+ struct ust_app_event *ua_event = nullptr;
- ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
- uevent->filter, uevent->attr.loglevel, uevent->exclusion);
+ ua_event = find_ust_app_event(ua_chan->events,
+ uevent->attr.name,
+ uevent->filter,
+ uevent->attr.loglevel,
+ uevent->exclusion);
if (!ua_event) {
ret = create_ust_app_event(ua_chan, uevent, app);
if (ret < 0) {
}
} else {
if (ua_event->enabled != uevent->enabled) {
- ret = uevent->enabled ?
- enable_ust_app_event(ua_event, app) :
- disable_ust_app_event(ua_event, app);
+ ret = uevent->enabled ? enable_ust_app_event(ua_event, app) :
+ disable_ust_app_event(ua_event, app);
}
}
}
/* Called with RCU read-side lock held. */
-static
-void ust_app_synchronize_event_notifier_rules(struct ust_app *app)
+static void ust_app_synchronize_event_notifier_rules(struct ust_app *app)
{
int ret = 0;
enum lttng_error_code ret_code;
enum lttng_trigger_status t_status;
struct lttng_ht_iter app_trigger_iter;
- struct lttng_triggers *triggers = NULL;
+ struct lttng_triggers *triggers = nullptr;
struct ust_app_event_notifier_rule *event_notifier_rule;
unsigned int count, i;
/* Get all triggers using uid 0 (root) */
ret_code = notification_thread_command_list_triggers(
- the_notification_thread_handle, 0, &triggers);
+ the_notification_thread_handle, 0, &triggers);
if (ret_code != LTTNG_OK) {
goto end;
}
condition = lttng_trigger_get_condition(trigger);
if (lttng_condition_get_type(condition) !=
- LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES) {
+ LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES) {
/* Does not apply */
continue;
}
- condition_status =
- lttng_condition_event_rule_matches_borrow_rule_mutable(
- condition, &event_rule);
+ condition_status = lttng_condition_event_rule_matches_borrow_rule_mutable(
+ condition, &event_rule);
LTTNG_ASSERT(condition_status == LTTNG_CONDITION_STATUS_OK);
if (lttng_event_rule_get_domain_type(event_rule) == LTTNG_DOMAIN_KERNEL) {
* explicitly acquiring it here.
*/
looked_up_event_notifier_rule = find_ust_app_event_notifier_rule(
- app->token_to_event_notifier_rule_ht, token);
+ app->token_to_event_notifier_rule_ht, token);
if (!looked_up_event_notifier_rule) {
ret = create_ust_app_event_notifier_rule(trigger, app);
if (ret < 0) {
}
}
- rcu_read_lock();
- /* Remove all unknown event sources from the app. */
- cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
- &app_trigger_iter.iter, event_notifier_rule,
- node.node) {
- const uint64_t app_token = event_notifier_rule->token;
- bool found = false;
+ {
+ lttng::urcu::read_lock_guard read_lock;
- /*
- * Check if the app event trigger still exists on the
- * notification side.
- */
- for (i = 0; i < count; i++) {
- uint64_t notification_thread_token;
- const struct lttng_trigger *trigger =
- lttng_triggers_get_at_index(
- triggers, i);
+ /* Remove all unknown event sources from the app. */
+ cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
+ &app_trigger_iter.iter,
+ event_notifier_rule,
+ node.node) {
+ const uint64_t app_token = event_notifier_rule->token;
+ bool found = false;
+
+ /*
+ * Check if the app event trigger still exists on the
+ * notification side.
+ */
+ for (i = 0; i < count; i++) {
+ uint64_t notification_thread_token;
+ const struct lttng_trigger *trigger =
+ lttng_triggers_get_at_index(triggers, i);
- LTTNG_ASSERT(trigger);
+ LTTNG_ASSERT(trigger);
- notification_thread_token =
- lttng_trigger_get_tracer_token(trigger);
+ notification_thread_token = lttng_trigger_get_tracer_token(trigger);
- if (notification_thread_token == app_token) {
- found = true;
- break;
+ if (notification_thread_token == app_token) {
+ found = true;
+ break;
+ }
}
- }
- if (found) {
- /* Still valid. */
- continue;
- }
+ if (found) {
+ /* Still valid. */
+ continue;
+ }
- /*
- * This trigger was unregistered, disable it on the tracer's
- * side.
- */
- ret = lttng_ht_del(app->token_to_event_notifier_rule_ht,
- &app_trigger_iter);
- LTTNG_ASSERT(ret == 0);
+ /*
+ * This trigger was unregistered, disable it on the tracer's
+ * side.
+ */
+ ret = lttng_ht_del(app->token_to_event_notifier_rule_ht, &app_trigger_iter);
+ LTTNG_ASSERT(ret == 0);
- /* Callee logs errors. */
- (void) disable_ust_object(app, event_notifier_rule->obj);
+ /* Callee logs errors. */
+ (void) disable_ust_object(app, event_notifier_rule->obj);
- delete_ust_app_event_notifier_rule(
- app->sock, event_notifier_rule, app);
+ delete_ust_app_event_notifier_rule(app->sock, event_notifier_rule, app);
+ }
}
- rcu_read_unlock();
-
end:
lttng_triggers_destroy(triggers);
return;
/*
* RCU read lock must be held by the caller.
*/
-static
-void ust_app_synchronize_all_channels(struct ltt_ust_session *usess,
- struct ust_app_session *ua_sess,
- struct ust_app *app)
+static void ust_app_synchronize_all_channels(struct ltt_ust_session *usess,
+ struct ust_app_session *ua_sess,
+ struct ust_app *app)
{
int ret = 0;
struct cds_lfht_iter uchan_iter;
LTTNG_ASSERT(app);
ASSERT_RCU_READ_LOCKED();
- cds_lfht_for_each_entry(usess->domain_global.channels->ht, &uchan_iter,
- uchan, node.node) {
+ cds_lfht_for_each_entry (usess->domain_global.channels->ht, &uchan_iter, uchan, node.node) {
struct ust_app_channel *ua_chan;
struct cds_lfht_iter uevent_iter;
struct ltt_ust_event *uevent;
* allocated (if necessary) and sent to the application, and
* all enabled contexts will be added to the channel.
*/
- ret = find_or_create_ust_app_channel(usess, ua_sess,
- app, uchan, &ua_chan);
+ ret = find_or_create_ust_app_channel(usess, ua_sess, app, uchan, &ua_chan);
if (ret) {
/* Tracer is probably gone or ENOMEM. */
goto end;
continue;
}
- cds_lfht_for_each_entry(uchan->events->ht, &uevent_iter, uevent,
- node.node) {
- ret = ust_app_channel_synchronize_event(ua_chan,
- uevent, app);
+ cds_lfht_for_each_entry (uchan->events->ht, &uevent_iter, uevent, node.node) {
+ ret = ust_app_channel_synchronize_event(ua_chan, uevent, app);
if (ret) {
goto end;
}
}
if (ua_chan->enabled != uchan->enabled) {
- ret = uchan->enabled ?
- enable_ust_app_channel(ua_sess, uchan, app) :
- disable_ust_app_channel(ua_sess, ua_chan, app);
+ ret = uchan->enabled ? enable_ust_app_channel(ua_sess, uchan, app) :
+ disable_ust_app_channel(ua_sess, ua_chan, app);
if (ret) {
goto end;
}
* The caller must ensure that the application is compatible and is tracked
* by the process attribute trackers.
*/
-static
-void ust_app_synchronize(struct ltt_ust_session *usess,
- struct ust_app *app)
+static void ust_app_synchronize(struct ltt_ust_session *usess, struct ust_app *app)
{
int ret = 0;
- struct ust_app_session *ua_sess = NULL;
+ struct ust_app_session *ua_sess = nullptr;
/*
* The application's configuration should only be synchronized for
*/
LTTNG_ASSERT(usess->active);
- ret = find_or_create_ust_app_session(usess, app, &ua_sess, NULL);
+ ret = find_or_create_ust_app_session(usess, app, &ua_sess, nullptr);
if (ret < 0) {
/* Tracer is probably gone or ENOMEM. */
if (ua_sess) {
goto deleted_session;
}
- rcu_read_lock();
+ {
+ lttng::urcu::read_lock_guard read_lock;
- ust_app_synchronize_all_channels(usess, ua_sess, app);
+ ust_app_synchronize_all_channels(usess, ua_sess, app);
- /*
- * Create the metadata for the application. This returns gracefully if a
- * metadata was already set for the session.
- *
- * The metadata channel must be created after the data channels as the
- * consumer daemon assumes this ordering. When interacting with a relay
- * daemon, the consumer will use this assumption to send the
- * "STREAMS_SENT" message to the relay daemon.
- */
- ret = create_ust_app_metadata(ua_sess, app, usess->consumer);
- if (ret < 0) {
- ERR("Metadata creation failed for app sock %d for session id %" PRIu64,
- app->sock, usess->id);
+ /*
+ * Create the metadata for the application. This returns gracefully if a
+ * metadata was already set for the session.
+ *
+ * The metadata channel must be created after the data channels as the
+ * consumer daemon assumes this ordering. When interacting with a relay
+ * daemon, the consumer will use this assumption to send the
+ * "STREAMS_SENT" message to the relay daemon.
+ */
+ ret = create_ust_app_metadata(ua_sess, app, usess->consumer);
+ if (ret < 0) {
+ ERR("Metadata creation failed for app sock %d for session id %" PRIu64,
+ app->sock,
+ usess->id);
+ }
}
- rcu_read_unlock();
-
deleted_session:
pthread_mutex_unlock(&ua_sess->lock);
end:
return;
}
-static
-void ust_app_global_destroy(struct ltt_ust_session *usess, struct ust_app *app)
+static void ust_app_global_destroy(struct ltt_ust_session *usess, struct ust_app *app)
{
struct ust_app_session *ua_sess;
ua_sess = lookup_session_by_app(usess, app);
- if (ua_sess == NULL) {
+ if (ua_sess == nullptr) {
return;
}
destroy_app_session(app, ua_sess);
LTTNG_ASSERT(usess->active);
ASSERT_RCU_READ_LOCKED();
- DBG2("UST app global update for app sock %d for session id %" PRIu64,
- app->sock, usess->id);
+ DBG2("UST app global update for app sock %d for session id %" PRIu64, app->sock, usess->id);
if (!app->compatible) {
return;
}
- if (trace_ust_id_tracker_lookup(LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID,
- usess, app->pid) &&
- trace_ust_id_tracker_lookup(
- LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID,
- usess, app->uid) &&
- trace_ust_id_tracker_lookup(
- LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID,
- usess, app->gid)) {
+ if (trace_ust_id_tracker_lookup(LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID, usess, app->pid) &&
+ trace_ust_id_tracker_lookup(LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID, usess, app->uid) &&
+ trace_ust_id_tracker_lookup(LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID, usess, app->gid)) {
/*
* Synchronize the application's internal tracing configuration
* and start tracing.
ASSERT_RCU_READ_LOCKED();
DBG2("UST application global event notifier rules update: app = '%s', pid = %d",
- app->name, app->pid);
+ app->name,
+ app->pid);
if (!app->compatible || !ust_app_supports_notifiers(app)) {
return;
}
- if (app->event_notifier_group.object == NULL) {
+ if (app->event_notifier_group.object == nullptr) {
WARN("UST app global update of event notifiers for app skipped since communication handle is null: app = '%s', pid = %d",
- app->name, app->pid);
+ app->name,
+ app->pid);
return;
}
struct lttng_ht_iter iter;
struct ust_app *app;
- rcu_read_lock();
- cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- ust_app_global_update(usess, app);
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ ust_app_global_update(usess, app);
+ }
}
- rcu_read_unlock();
}
-void ust_app_global_update_all_event_notifier_rules(void)
+void ust_app_global_update_all_event_notifier_rules()
{
struct lttng_ht_iter iter;
struct ust_app *app;
- rcu_read_lock();
- cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ lttng::urcu::read_lock_guard read_lock;
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
ust_app_global_update_event_notifier_rules(app);
}
-
- rcu_read_unlock();
}
/*
* Add context to a specific channel for global UST domain.
*/
int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
- struct ltt_ust_channel *uchan, struct ltt_ust_context *uctx)
+ struct ltt_ust_channel *uchan,
+ struct ltt_ust_context *uctx)
{
int ret = 0;
struct lttng_ht_node_str *ua_chan_node;
struct lttng_ht_iter iter, uiter;
- struct ust_app_channel *ua_chan = NULL;
+ struct ust_app_channel *ua_chan = nullptr;
struct ust_app_session *ua_sess;
struct ust_app *app;
LTTNG_ASSERT(usess->active);
- rcu_read_lock();
- cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- if (!app->compatible) {
- /*
- * TODO: In time, we should notice the caller of this error by
- * telling him that this is a version error.
- */
- continue;
- }
- ua_sess = lookup_session_by_app(usess, app);
- if (ua_sess == NULL) {
- continue;
- }
+ {
+ lttng::urcu::read_lock_guard read_lock;
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ if (!app->compatible) {
+ /*
+ * TODO: In time, we should notice the caller of this error by
+ * telling him that this is a version error.
+ */
+ continue;
+ }
+ ua_sess = lookup_session_by_app(usess, app);
+ if (ua_sess == nullptr) {
+ continue;
+ }
- pthread_mutex_lock(&ua_sess->lock);
+ pthread_mutex_lock(&ua_sess->lock);
- if (ua_sess->deleted) {
- pthread_mutex_unlock(&ua_sess->lock);
- continue;
- }
+ if (ua_sess->deleted) {
+ pthread_mutex_unlock(&ua_sess->lock);
+ continue;
+ }
- /* Lookup channel in the ust app session */
- lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
- ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
- if (ua_chan_node == NULL) {
- goto next_app;
- }
- ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel,
- node);
- ret = create_ust_app_channel_context(ua_chan, &uctx->ctx, app);
- if (ret < 0) {
- goto next_app;
+ /* Lookup channel in the ust app session */
+ lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
+ ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
+ if (ua_chan_node == nullptr) {
+ goto next_app;
+ }
+ ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
+ ret = create_ust_app_channel_context(ua_chan, &uctx->ctx, app);
+ if (ret < 0) {
+ goto next_app;
+ }
+ next_app:
+ pthread_mutex_unlock(&ua_sess->lock);
}
- next_app:
- pthread_mutex_unlock(&ua_sess->lock);
}
- rcu_read_unlock();
return ret;
}
LTTNG_ASSERT(msg);
- ret = lttng_ust_ctl_recv_reg_msg(sock, &msg->type, &msg->major, &msg->minor,
- &pid, &ppid, &uid, &gid,
- &msg->bits_per_long,
- &msg->uint8_t_alignment,
- &msg->uint16_t_alignment,
- &msg->uint32_t_alignment,
- &msg->uint64_t_alignment,
- &msg->long_alignment,
- &msg->byte_order,
- msg->name);
+ ret = lttng_ust_ctl_recv_reg_msg(sock,
+ &msg->type,
+ &msg->major,
+ &msg->minor,
+ &pid,
+ &ppid,
+ &uid,
+ &gid,
+ &msg->bits_per_long,
+ &msg->uint8_t_alignment,
+ &msg->uint16_t_alignment,
+ &msg->uint32_t_alignment,
+ &msg->uint64_t_alignment,
+ &msg->long_alignment,
+ &msg->byte_order,
+ msg->name);
if (ret < 0) {
switch (-ret) {
case EPIPE:
break;
case LTTNG_UST_ERR_UNSUP_MAJOR:
ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
- msg->major, msg->minor, LTTNG_UST_ABI_MAJOR_VERSION,
- LTTNG_UST_ABI_MINOR_VERSION);
+ msg->major,
+ msg->minor,
+ LTTNG_UST_ABI_MAJOR_VERSION,
+ LTTNG_UST_ABI_MINOR_VERSION);
break;
default:
ERR("UST app recv reg message failed with ret %d", ret);
* Return a ust app session object using the application object and the
* session object descriptor has a key. If not found, NULL is returned.
* A RCU read side lock MUST be acquired when calling this function.
-*/
-static struct ust_app_session *find_session_by_objd(struct ust_app *app,
- int objd)
+ */
+static struct ust_app_session *find_session_by_objd(struct ust_app *app, int objd)
{
struct lttng_ht_node_ulong *node;
struct lttng_ht_iter iter;
- struct ust_app_session *ua_sess = NULL;
+ struct ust_app_session *ua_sess = nullptr;
LTTNG_ASSERT(app);
ASSERT_RCU_READ_LOCKED();
- lttng_ht_lookup(app->ust_sessions_objd, (void *)((unsigned long) objd), &iter);
+ lttng_ht_lookup(app->ust_sessions_objd, (void *) ((unsigned long) objd), &iter);
node = lttng_ht_iter_get_node_ulong(&iter);
- if (node == NULL) {
+ if (node == nullptr) {
DBG2("UST app session find by objd %d not found", objd);
goto error;
}
* object descriptor has a key. If not found, NULL is returned. A RCU read side
* lock MUST be acquired before calling this function.
*/
-static struct ust_app_channel *find_channel_by_objd(struct ust_app *app,
- int objd)
+static struct ust_app_channel *find_channel_by_objd(struct ust_app *app, int objd)
{
struct lttng_ht_node_ulong *node;
struct lttng_ht_iter iter;
- struct ust_app_channel *ua_chan = NULL;
+ struct ust_app_channel *ua_chan = nullptr;
LTTNG_ASSERT(app);
ASSERT_RCU_READ_LOCKED();
- lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter);
+ lttng_ht_lookup(app->ust_objd, (void *) ((unsigned long) objd), &iter);
node = lttng_ht_iter_get_node_ulong(&iter);
- if (node == NULL) {
+ if (node == nullptr) {
DBG2("UST app channel find by objd %d not found", objd);
goto error;
}
* On success 0 is returned else a negative value.
*/
static int handle_app_register_channel_notification(int sock,
- int cobjd,
- struct lttng_ust_ctl_field *raw_context_fields,
- size_t context_field_count)
+ int cobjd,
+ struct lttng_ust_ctl_field *raw_context_fields,
+ size_t context_field_count)
{
int ret, ret_code = 0;
uint32_t chan_id;
struct ust_app *app;
struct ust_app_channel *ua_chan;
struct ust_app_session *ua_sess;
- auto ust_ctl_context_fields = lttng::make_unique_wrapper<lttng_ust_ctl_field, lttng::free>(
- raw_context_fields);
+ auto ust_ctl_context_fields =
+ lttng::make_unique_wrapper<lttng_ust_ctl_field, lttng::free>(raw_context_fields);
lttng::urcu::read_lock_guard read_lock_guard;
/* Lookup application. If not found, there is a code flow error. */
app = find_app_by_notify_sock(sock);
if (!app) {
- DBG("Application socket %d is being torn down. Abort event notify",
- sock);
+ DBG("Application socket %d is being torn down. Abort event notify", sock);
return -1;
}
chan_reg_key = ua_chan->key;
}
- auto& ust_reg_chan = locked_registry_session->get_channel(chan_reg_key);
+ auto& ust_reg_chan = locked_registry_session->channel(chan_reg_key);
/* Channel id is set during the object creation. */
chan_id = ust_reg_chan.id;
* that all apps provide the same typing for the context fields as a
* sanity check.
*/
- lst::type::cuptr context_fields = lttng::make_unique<lst::structure_type>(0,
- lsu::create_trace_fields_from_ust_ctl_fields(*locked_registry_session,
- ust_ctl_context_fields.get(), context_field_count));
+ try {
+ auto app_context_fields = lsu::create_trace_fields_from_ust_ctl_fields(
+ *locked_registry_session,
+ ust_ctl_context_fields.get(),
+ context_field_count,
+ lst::field_location::root::EVENT_RECORD_COMMON_CONTEXT,
+ lsu::ctl_field_quirks::UNDERSCORE_PREFIXED_VARIANT_TAG_MAPPINGS);
+
+ if (!ust_reg_chan.is_registered()) {
+ lst::type::cuptr event_context = app_context_fields.size() ?
+ lttng::make_unique<lst::structure_type>(
+ 0, std::move(app_context_fields)) :
+ nullptr;
+
+ ust_reg_chan.event_context(std::move(event_context));
+ } else {
+ /*
+ * Validate that the context fields match between
+ * registry and newcoming application.
+ */
+ bool context_fields_match;
+ const auto *previous_event_context = ust_reg_chan.event_context();
- if (!ust_reg_chan.is_registered()) {
- ust_reg_chan.set_context(std::move(context_fields));
- } else {
- /*
- * Validate that the context fields match between
- * registry and newcoming application.
- */
- if (ust_reg_chan.get_context() != *context_fields) {
- ERR("Registering application channel due to context field mismatch: pid = %d, sock = %d",
- app->pid, app->sock);
- ret_code = -EINVAL;
- goto reply;
- }
- }
+ if (!previous_event_context) {
+ context_fields_match = app_context_fields.size() == 0;
+ } else {
+ const lst::structure_type app_event_context_struct(
+ 0, std::move(app_context_fields));
+
+ context_fields_match = *previous_event_context ==
+ app_event_context_struct;
+ }
- /* Append to metadata */
- if (!ust_reg_chan._metadata_dumped) {
- /*ret_code = ust_metadata_channel_statedump(registry, ust_reg_chan);*/
- if (ret_code) {
- ERR("Error appending channel metadata (errno = %d)", ret_code);
- goto reply;
+ if (!context_fields_match) {
+ ERR("Registering application channel due to context field mismatch: pid = %d, sock = %d",
+ app->pid,
+ app->sock);
+ ret_code = -EINVAL;
+ goto reply;
+ }
}
+ } catch (const std::exception& ex) {
+ ERR("Failed to handle application context: %s", ex.what());
+ ret_code = -EINVAL;
+ goto reply;
}
reply:
- DBG3("UST app replying to register channel key %" PRIu64
- " with id %u, ret = %d", chan_reg_key, chan_id,
- ret_code);
-
- ret = lttng_ust_ctl_reply_register_channel(sock, chan_id,
- ust_reg_chan.header_type_ == lst::stream_class::header_type::COMPACT ?
- LTTNG_UST_CTL_CHANNEL_HEADER_COMPACT :
- LTTNG_UST_CTL_CHANNEL_HEADER_LARGE,
- ret_code);
+ DBG3("UST app replying to register channel key %" PRIu64 " with id %u, ret = %d",
+ chan_reg_key,
+ chan_id,
+ ret_code);
+
+ ret = lttng_ust_ctl_reply_register_channel(
+ sock,
+ chan_id,
+ ust_reg_chan.header_type_ == lst::stream_class::header_type::COMPACT ?
+ LTTNG_UST_CTL_CHANNEL_HEADER_COMPACT :
+ LTTNG_UST_CTL_CHANNEL_HEADER_LARGE,
+ ret_code);
if (ret < 0) {
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
DBG3("UST app reply channel failed. Application died: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else if (ret == -EAGAIN) {
WARN("UST app reply channel failed. Communication time out: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else {
ERR("UST app reply channel failed with ret %d: pid = %d, sock = %d",
- ret, app->pid, app->sock);
+ ret,
+ app->pid,
+ app->sock);
}
return ret;
*
* On success 0 is returned else a negative value.
*/
-static int add_event_ust_registry(int sock, int sobjd, int cobjd, const char *name,
- char *raw_signature, size_t nr_fields, struct lttng_ust_ctl_field *raw_fields,
- int loglevel_value, char *raw_model_emf_uri)
+static int add_event_ust_registry(int sock,
+ int sobjd,
+ int cobjd,
+ const char *name,
+ char *raw_signature,
+ size_t nr_fields,
+ struct lttng_ust_ctl_field *raw_fields,
+ int loglevel_value,
+ char *raw_model_emf_uri)
{
int ret, ret_code;
uint32_t event_id = 0;
/* Lookup application. If not found, there is a code flow error. */
app = find_app_by_notify_sock(sock);
if (!app) {
- DBG("Application socket %d is being torn down. Abort event notify",
- sock);
+ DBG("Application socket %d is being torn down. Abort event notify", sock);
return -1;
}
* These three variables MUST NOT be read/write after this.
*/
try {
- auto& channel = locked_registry->get_channel(chan_reg_key);
+ auto& channel = locked_registry->channel(chan_reg_key);
/* event_id is set on success. */
- channel.add_event(sobjd, cobjd, name, signature.get(),
- lsu::create_trace_fields_from_ust_ctl_fields(
- *locked_registry, fields.get(),
- nr_fields),
- loglevel_value,
- model_emf_uri.get() ?
- nonstd::optional<std::string>(
- model_emf_uri.get()) :
- nonstd::nullopt,
- ua_sess->buffer_type, *app, event_id);
+ channel.add_event(
+ sobjd,
+ cobjd,
+ name,
+ signature.get(),
+ lsu::create_trace_fields_from_ust_ctl_fields(
+ *locked_registry,
+ fields.get(),
+ nr_fields,
+ lst::field_location::root::EVENT_RECORD_PAYLOAD,
+ lsu::ctl_field_quirks::
+ UNDERSCORE_PREFIXED_VARIANT_TAG_MAPPINGS),
+ loglevel_value,
+ model_emf_uri.get() ?
+ nonstd::optional<std::string>(model_emf_uri.get()) :
+ nonstd::nullopt,
+ ua_sess->buffer_type,
+ *app,
+ event_id);
ret_code = 0;
} catch (const std::exception& ex) {
- ERR("Failed to add event `%s` to registry session: %s", name,
- ex.what());
+ ERR("Failed to add event `%s` to registry session: %s",
+ name,
+ ex.what());
/* Inform the application of the error; don't return directly. */
ret_code = -EINVAL;
}
if (ret < 0) {
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
DBG3("UST app reply event failed. Application died: pid = %d, sock = %d.",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else if (ret == -EAGAIN) {
WARN("UST app reply event failed. Communication time out: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else {
ERR("UST app reply event failed with ret %d: pid = %d, sock = %d",
- ret, app->pid, app->sock);
+ ret,
+ app->pid,
+ app->sock);
}
/*
* No need to wipe the create event since the application socket will
return ret;
}
- DBG3("UST registry event %s with id %" PRId32 " added successfully",
- name, event_id);
+ DBG3("UST registry event %s with id %" PRId32 " added successfully", name, event_id);
return ret;
}
*
* On success 0 is returned else a negative value.
*/
-static int add_enum_ust_registry(int sock, int sobjd, const char *name,
- struct lttng_ust_ctl_enum_entry *raw_entries, size_t nr_entries)
+static int add_enum_ust_registry(int sock,
+ int sobjd,
+ const char *name,
+ struct lttng_ust_ctl_enum_entry *raw_entries,
+ size_t nr_entries)
{
int ret = 0;
struct ust_app *app;
uint64_t enum_id = -1ULL;
lttng::urcu::read_lock_guard read_lock_guard;
auto entries = lttng::make_unique_wrapper<struct lttng_ust_ctl_enum_entry, lttng::free>(
- raw_entries);
+ raw_entries);
/* Lookup application. If not found, there is a code flow error. */
app = find_app_by_notify_sock(sock);
if (!app) {
/* Return an error since this is not an error */
- DBG("Application socket %d is being torn down. Aborting enum registration",
- sock);
+ DBG("Application socket %d is being torn down. Aborting enum registration", sock);
return -1;
}
int application_reply_code;
try {
locked_registry->create_or_find_enum(
- sobjd, name, entries.release(), nr_entries, &enum_id);
+ sobjd, name, entries.release(), nr_entries, &enum_id);
application_reply_code = 0;
} catch (const std::exception& ex) {
- ERR("%s: %s", fmt::format("Failed to create or find enumeration provided by application: app = {}, enumeration name = {}",
- *app, name).c_str(), ex.what());
+ ERR("%s: %s",
+ fmt::format(
+ "Failed to create or find enumeration provided by application: app = {}, enumeration name = {}",
+ *app,
+ name)
+ .c_str(),
+ ex.what());
application_reply_code = -1;
}
if (ret < 0) {
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
DBG3("UST app reply enum failed. Application died: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else if (ret == -EAGAIN) {
WARN("UST app reply enum failed. Communication time out: pid = %d, sock = %d",
- app->pid, app->sock);
+ app->pid,
+ app->sock);
} else {
ERR("UST app reply enum failed with ret %d: pid = %d, sock = %d",
- ret, app->pid, app->sock);
+ ret,
+ app->pid,
+ app->sock);
}
/*
* No need to wipe the create enum since the application socket will
ret = lttng_ust_ctl_recv_notify(sock, &cmd);
if (ret < 0) {
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
- DBG3("UST app recv notify failed. Application died: sock = %d",
- sock);
+ DBG3("UST app recv notify failed. Application died: sock = %d", sock);
} else if (ret == -EAGAIN) {
- WARN("UST app recv notify failed. Communication time out: sock = %d",
- sock);
+ WARN("UST app recv notify failed. Communication time out: sock = %d", sock);
} else {
- ERR("UST app recv notify failed with ret %d: sock = %d",
- ret, sock);
+ ERR("UST app recv notify failed with ret %d: sock = %d", ret, sock);
}
goto error;
}
DBG2("UST app ustctl register event received");
- ret = lttng_ust_ctl_recv_register_event(sock, &sobjd, &cobjd, name, &loglevel_value,
- &sig, &nr_fields, &fields, &model_emf_uri);
+ ret = lttng_ust_ctl_recv_register_event(sock,
+ &sobjd,
+ &cobjd,
+ name,
+ &loglevel_value,
+ &sig,
+ &nr_fields,
+ &fields,
+ &model_emf_uri);
if (ret < 0) {
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
DBG3("UST app recv event failed. Application died: sock = %d",
- sock);
+ sock);
} else if (ret == -EAGAIN) {
WARN("UST app recv event failed. Communication time out: sock = %d",
- sock);
+ sock);
} else {
- ERR("UST app recv event failed with ret %d: sock = %d",
- ret, sock);
+ ERR("UST app recv event failed with ret %d: sock = %d", ret, sock);
}
goto error;
}
lttng::urcu::read_lock_guard rcu_lock;
const struct ust_app *app = find_app_by_notify_sock(sock);
if (!app) {
- DBG("Application socket %d is being torn down. Abort event notify", sock);
+ DBG("Application socket %d is being torn down. Abort event notify",
+ sock);
ret = -1;
goto error;
}
if ((!fields && nr_fields > 0) || (fields && nr_fields == 0)) {
ERR("Invalid return value from lttng_ust_ctl_recv_register_event: fields = %p, nr_fields = %zu",
- fields, nr_fields);
+ fields,
+ nr_fields);
ret = -1;
free(fields);
goto error;
* code path loses the ownsership of these variables and transfer them
* to the this function.
*/
- ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
- fields, loglevel_value, model_emf_uri);
+ ret = add_event_ust_registry(sock,
+ sobjd,
+ cobjd,
+ name,
+ sig,
+ nr_fields,
+ fields,
+ loglevel_value,
+ model_emf_uri);
if (ret < 0) {
goto error;
}
DBG2("UST app ustctl register channel received");
ret = lttng_ust_ctl_recv_register_channel(
- sock, &sobjd, &cobjd, &field_count, &context_fields);
+ sock, &sobjd, &cobjd, &field_count, &context_fields);
if (ret < 0) {
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
DBG3("UST app recv channel failed. Application died: sock = %d",
- sock);
+ sock);
} else if (ret == -EAGAIN) {
WARN("UST app recv channel failed. Communication time out: sock = %d",
- sock);
+ sock);
} else {
- ERR("UST app recv channel failed with ret %d: sock = %d", ret,
- sock);
+ ERR("UST app recv channel failed with ret %d: sock = %d",
+ ret,
+ sock);
}
goto error;
}
* that if needed it will be freed. After this, it's invalid to access
* fields or clean them up.
*/
- ret = handle_app_register_channel_notification(sock, cobjd, context_fields, field_count);
+ ret = handle_app_register_channel_notification(
+ sock, cobjd, context_fields, field_count);
if (ret < 0) {
goto error;
}
DBG2("UST app ustctl register enum received");
- ret = lttng_ust_ctl_recv_register_enum(sock, &sobjd, name,
- &entries, &nr_entries);
+ ret = lttng_ust_ctl_recv_register_enum(sock, &sobjd, name, &entries, &nr_entries);
if (ret < 0) {
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
- DBG3("UST app recv enum failed. Application died: sock = %d",
- sock);
+ DBG3("UST app recv enum failed. Application died: sock = %d", sock);
} else if (ret == -EAGAIN) {
WARN("UST app recv enum failed. Communication time out: sock = %d",
- sock);
+ sock);
} else {
- ERR("UST app recv enum failed with ret %d: sock = %d",
- ret, sock);
+ ERR("UST app recv enum failed with ret %d: sock = %d", ret, sock);
}
goto error;
}
/* Callee assumes ownership of entries. */
- ret = add_enum_ust_registry(sock, sobjd, name,
- entries, nr_entries);
+ ret = add_enum_ust_registry(sock, sobjd, name, entries, nr_entries);
if (ret < 0) {
goto error;
}
LTTNG_ASSERT(sock >= 0);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
obj = zmalloc<ust_app_notify_sock_obj>();
if (!obj) {
(void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
close_socket:
- rcu_read_unlock();
/*
* Close socket after a grace period to avoid for the socket to be reused
*
* Returns LTTNG_OK on success or a LTTNG_ERR error code.
*/
-enum lttng_error_code ust_app_snapshot_record(
- const struct ltt_ust_session *usess,
- const struct consumer_output *output,
- uint64_t nb_packets_per_stream)
+enum lttng_error_code ust_app_snapshot_record(const struct ltt_ust_session *usess,
+ const struct consumer_output *output,
+ uint64_t nb_packets_per_stream)
{
int ret = 0;
enum lttng_error_code status = LTTNG_OK;
struct lttng_ht_iter iter;
struct ust_app *app;
- char *trace_path = NULL;
+ char *trace_path = nullptr;
LTTNG_ASSERT(usess);
LTTNG_ASSERT(output);
- rcu_read_lock();
-
switch (usess->buffer_type) {
case LTTNG_BUFFER_PER_UID:
{
struct buffer_reg_uid *reg;
- cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
struct buffer_reg_channel *buf_reg_chan;
struct consumer_socket *socket;
char pathname[PATH_MAX];
/* Get consumer socket to use to push the metadata.*/
socket = consumer_find_socket_by_bitness(reg->bits_per_long,
- usess->consumer);
+ usess->consumer);
if (!socket) {
status = LTTNG_ERR_INVALID;
goto error;
}
memset(pathname, 0, sizeof(pathname));
- ret = snprintf(pathname, sizeof(pathname),
- DEFAULT_UST_TRACE_UID_PATH,
- reg->uid, reg->bits_per_long);
+ ret = snprintf(pathname,
+ sizeof(pathname),
+ DEFAULT_UST_TRACE_UID_PATH,
+ reg->uid,
+ reg->bits_per_long);
if (ret < 0) {
PERROR("snprintf snapshot path");
status = LTTNG_ERR_INVALID;
}
/* Free path allowed on previous iteration. */
free(trace_path);
- trace_path = setup_channel_trace_path(usess->consumer, pathname,
- &consumer_path_offset);
+ trace_path = setup_channel_trace_path(
+ usess->consumer, pathname, &consumer_path_offset);
if (!trace_path) {
status = LTTNG_ERR_INVALID;
goto error;
}
/* Add the UST default trace dir to path. */
- cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
- buf_reg_chan, node.node) {
- status = consumer_snapshot_channel(socket,
- buf_reg_chan->consumer_key,
- output, 0, &trace_path[consumer_path_offset],
- nb_packets_per_stream);
+ cds_lfht_for_each_entry (
+ reg->registry->channels->ht, &iter.iter, buf_reg_chan, node.node) {
+ status =
+ consumer_snapshot_channel(socket,
+ buf_reg_chan->consumer_key,
+ output,
+ 0,
+ &trace_path[consumer_path_offset],
+ nb_packets_per_stream);
if (status != LTTNG_OK) {
goto error;
}
}
status = consumer_snapshot_channel(socket,
- reg->registry->reg.ust->_metadata_key, output, 1,
- &trace_path[consumer_path_offset], 0);
+ reg->registry->reg.ust->_metadata_key,
+ output,
+ 1,
+ &trace_path[consumer_path_offset],
+ 0);
if (status != LTTNG_OK) {
goto error;
}
}
+
break;
}
case LTTNG_BUFFER_PER_PID:
{
- cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
struct consumer_socket *socket;
struct lttng_ht_iter chan_iter;
struct ust_app_channel *ua_chan;
}
/* Get the right consumer socket for the application. */
- socket = consumer_find_socket_by_bitness(app->abi.bits_per_long,
- output);
+ socket = consumer_find_socket_by_bitness(app->abi.bits_per_long, output);
if (!socket) {
status = LTTNG_ERR_INVALID;
goto error;
/* Add the UST default trace dir to path. */
memset(pathname, 0, sizeof(pathname));
- ret = snprintf(pathname, sizeof(pathname), "%s",
- ua_sess->path);
+ ret = snprintf(pathname, sizeof(pathname), "%s", ua_sess->path);
if (ret < 0) {
status = LTTNG_ERR_INVALID;
PERROR("snprintf snapshot path");
}
/* Free path allowed on previous iteration. */
free(trace_path);
- trace_path = setup_channel_trace_path(usess->consumer, pathname,
- &consumer_path_offset);
+ trace_path = setup_channel_trace_path(
+ usess->consumer, pathname, &consumer_path_offset);
if (!trace_path) {
status = LTTNG_ERR_INVALID;
goto error;
}
- cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
- ua_chan, node.node) {
- status = consumer_snapshot_channel(socket,
- ua_chan->key, output, 0,
- &trace_path[consumer_path_offset],
- nb_packets_per_stream);
+ cds_lfht_for_each_entry (
+ ua_sess->channels->ht, &chan_iter.iter, ua_chan, node.node) {
+ status =
+ consumer_snapshot_channel(socket,
+ ua_chan->key,
+ output,
+ 0,
+ &trace_path[consumer_path_offset],
+ nb_packets_per_stream);
switch (status) {
case LTTNG_OK:
break;
continue;
}
status = consumer_snapshot_channel(socket,
- registry->_metadata_key, output, 1,
- &trace_path[consumer_path_offset], 0);
+ registry->_metadata_key,
+ output,
+ 1,
+ &trace_path[consumer_path_offset],
+ 0);
switch (status) {
case LTTNG_OK:
break;
error:
free(trace_path);
- rcu_read_unlock();
return status;
}
/*
* Return the size taken by one more packet per stream.
*/
-uint64_t ust_app_get_size_one_more_packet_per_stream(
- const struct ltt_ust_session *usess, uint64_t cur_nr_packets)
+uint64_t ust_app_get_size_one_more_packet_per_stream(const struct ltt_ust_session *usess,
+ uint64_t cur_nr_packets)
{
uint64_t tot_size = 0;
struct ust_app *app;
{
struct buffer_reg_uid *reg;
- cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
+ cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
struct buffer_reg_channel *buf_reg_chan;
- rcu_read_lock();
- cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
- buf_reg_chan, node.node) {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (
+ reg->registry->channels->ht, &iter.iter, buf_reg_chan, node.node) {
if (cur_nr_packets >= buf_reg_chan->num_subbuf) {
/*
* Don't take channel into account if we
}
tot_size += buf_reg_chan->subbuf_size * buf_reg_chan->stream_count;
}
- rcu_read_unlock();
}
break;
}
case LTTNG_BUFFER_PER_PID:
{
- rcu_read_lock();
- cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
struct ust_app_channel *ua_chan;
struct ust_app_session *ua_sess;
struct lttng_ht_iter chan_iter;
continue;
}
- cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
- ua_chan, node.node) {
+ cds_lfht_for_each_entry (
+ ua_sess->channels->ht, &chan_iter.iter, ua_chan, node.node) {
if (cur_nr_packets >= ua_chan->attr.num_subbuf) {
/*
* Don't take channel into account if we
tot_size += ua_chan->attr.subbuf_size * ua_chan->streams.count;
}
}
- rcu_read_unlock();
break;
}
default:
}
int ust_app_uid_get_channel_runtime_stats(uint64_t ust_session_id,
- struct cds_list_head *buffer_reg_uid_list,
- struct consumer_output *consumer, uint64_t uchan_id,
- int overwrite, uint64_t *discarded, uint64_t *lost)
+ struct cds_list_head *buffer_reg_uid_list,
+ struct consumer_output *consumer,
+ uint64_t uchan_id,
+ int overwrite,
+ uint64_t *discarded,
+ uint64_t *lost)
{
int ret;
uint64_t consumer_chan_key;
*lost = 0;
ret = buffer_reg_uid_consumer_channel_key(
- buffer_reg_uid_list, uchan_id, &consumer_chan_key);
+ buffer_reg_uid_list, uchan_id, &consumer_chan_key);
if (ret < 0) {
/* Not found */
ret = 0;
}
if (overwrite) {
- ret = consumer_get_lost_packets(ust_session_id,
- consumer_chan_key, consumer, lost);
+ ret = consumer_get_lost_packets(ust_session_id, consumer_chan_key, consumer, lost);
} else {
- ret = consumer_get_discarded_events(ust_session_id,
- consumer_chan_key, consumer, discarded);
+ ret = consumer_get_discarded_events(
+ ust_session_id, consumer_chan_key, consumer, discarded);
}
end:
}
int ust_app_pid_get_channel_runtime_stats(struct ltt_ust_session *usess,
- struct ltt_ust_channel *uchan,
- struct consumer_output *consumer, int overwrite,
- uint64_t *discarded, uint64_t *lost)
+ struct ltt_ust_channel *uchan,
+ struct consumer_output *consumer,
+ int overwrite,
+ uint64_t *discarded,
+ uint64_t *lost)
{
int ret = 0;
struct lttng_ht_iter iter;
*discarded = 0;
*lost = 0;
- rcu_read_lock();
/*
* Iterate over every registered applications. Sum counters for
* all applications containing requested session and channel.
*/
- cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
struct lttng_ht_iter uiter;
ua_sess = lookup_session_by_app(usess, app);
- if (ua_sess == NULL) {
+ if (ua_sess == nullptr) {
continue;
}
if (overwrite) {
uint64_t _lost;
- ret = consumer_get_lost_packets(usess->id, ua_chan->key,
- consumer, &_lost);
+ ret = consumer_get_lost_packets(usess->id, ua_chan->key, consumer, &_lost);
if (ret < 0) {
break;
}
} else {
uint64_t _discarded;
- ret = consumer_get_discarded_events(usess->id,
- ua_chan->key, consumer, &_discarded);
+ ret = consumer_get_discarded_events(
+ usess->id, ua_chan->key, consumer, &_discarded);
if (ret < 0) {
break;
}
}
}
- rcu_read_unlock();
return ret;
}
-static
-int ust_app_regenerate_statedump(struct ltt_ust_session *usess,
- struct ust_app *app)
+static int ust_app_regenerate_statedump(struct ltt_ust_session *usess, struct ust_app *app)
{
int ret = 0;
struct ust_app_session *ua_sess;
DBG("Regenerating the metadata for ust app pid %d", app->pid);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
ua_sess = lookup_session_by_app(usess, app);
- if (ua_sess == NULL) {
+ if (ua_sess == nullptr) {
/* The session is in teardown process. Ignore and continue. */
goto end;
}
pthread_mutex_unlock(&ua_sess->lock);
end:
- rcu_read_unlock();
health_code_update();
return ret;
}
DBG("Regenerating the metadata for all UST apps");
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
- cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
if (!app->compatible) {
continue;
}
}
}
- rcu_read_unlock();
-
return 0;
}
LTTNG_ASSERT(usess);
- rcu_read_lock();
-
switch (usess->buffer_type) {
case LTTNG_BUFFER_PER_UID:
{
struct buffer_reg_uid *reg;
- cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
+ cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
struct buffer_reg_channel *buf_reg_chan;
struct consumer_socket *socket;
+ lttng::urcu::read_lock_guard read_lock;
/* Get consumer socket to use to push the metadata.*/
socket = consumer_find_socket_by_bitness(reg->bits_per_long,
- usess->consumer);
+ usess->consumer);
if (!socket) {
cmd_ret = LTTNG_ERR_INVALID;
goto error;
}
/* Rotate the data channels. */
- cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
- buf_reg_chan, node.node) {
+ cds_lfht_for_each_entry (
+ reg->registry->channels->ht, &iter.iter, buf_reg_chan, node.node) {
ret = consumer_rotate_channel(socket,
- buf_reg_chan->consumer_key,
- usess->consumer,
- /* is_metadata_channel */ false);
+ buf_reg_chan->consumer_key,
+ usess->consumer,
+ /* is_metadata_channel */ false);
if (ret < 0) {
cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
goto error;
}
ret = consumer_rotate_channel(socket,
- reg->registry->reg.ust->_metadata_key,
- usess->consumer,
- /* is_metadata_channel */ true);
+ reg->registry->reg.ust->_metadata_key,
+ usess->consumer,
+ /* is_metadata_channel */ true);
if (ret < 0) {
cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
goto error;
}
case LTTNG_BUFFER_PER_PID:
{
- cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
struct consumer_socket *socket;
struct lttng_ht_iter chan_iter;
struct ust_app_channel *ua_chan;
/* Get the right consumer socket for the application. */
socket = consumer_find_socket_by_bitness(app->abi.bits_per_long,
- usess->consumer);
+ usess->consumer);
if (!socket) {
cmd_ret = LTTNG_ERR_INVALID;
goto error;
}
/* Rotate the data channels. */
- cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
- ua_chan, node.node) {
+ cds_lfht_for_each_entry (
+ ua_sess->channels->ht, &chan_iter.iter, ua_chan, node.node) {
ret = consumer_rotate_channel(socket,
- ua_chan->key,
- ua_sess->consumer,
- /* is_metadata_channel */ false);
+ ua_chan->key,
+ ua_sess->consumer,
+ /* is_metadata_channel */ false);
if (ret < 0) {
/* Per-PID buffer and application going away. */
if (ret == -LTTNG_ERR_CHAN_NOT_FOUND)
(void) push_metadata(locked_registry, usess->consumer);
}
ret = consumer_rotate_channel(socket,
- registry->_metadata_key,
- ua_sess->consumer,
- /* is_metadata_channel */ true);
+ registry->_metadata_key,
+ ua_sess->consumer,
+ /* is_metadata_channel */ true);
if (ret < 0) {
/* Per-PID buffer and application going away. */
if (ret == -LTTNG_ERR_CHAN_NOT_FOUND)
cmd_ret = LTTNG_OK;
error:
- rcu_read_unlock();
return cmd_ret;
}
-enum lttng_error_code ust_app_create_channel_subdirectories(
- const struct ltt_ust_session *usess)
+enum lttng_error_code ust_app_create_channel_subdirectories(const struct ltt_ust_session *usess)
{
enum lttng_error_code ret = LTTNG_OK;
struct lttng_ht_iter iter;
int fmt_ret;
LTTNG_ASSERT(usess->current_trace_chunk);
- rcu_read_lock();
switch (usess->buffer_type) {
case LTTNG_BUFFER_PER_UID:
{
struct buffer_reg_uid *reg;
+ lttng::urcu::read_lock_guard read_lock;
- cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
+ cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
fmt_ret = asprintf(&pathname_index,
- DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH "/" DEFAULT_INDEX_DIR,
- reg->uid, reg->bits_per_long);
+ DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH
+ "/" DEFAULT_INDEX_DIR,
+ reg->uid,
+ reg->bits_per_long);
if (fmt_ret < 0) {
ERR("Failed to format channel index directory");
ret = LTTNG_ERR_CREATE_DIR_FAIL;
* of implicitly creating the channel's path.
*/
chunk_status = lttng_trace_chunk_create_subdirectory(
- usess->current_trace_chunk,
- pathname_index);
+ usess->current_trace_chunk, pathname_index);
free(pathname_index);
if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
ret = LTTNG_ERR_CREATE_DIR_FAIL;
case LTTNG_BUFFER_PER_PID:
{
struct ust_app *app;
+ lttng::urcu::read_lock_guard read_lock;
/*
* Create the toplevel ust/ directory in case no apps are running.
*/
- chunk_status = lttng_trace_chunk_create_subdirectory(
- usess->current_trace_chunk,
- DEFAULT_UST_TRACE_DIR);
+ chunk_status = lttng_trace_chunk_create_subdirectory(usess->current_trace_chunk,
+ DEFAULT_UST_TRACE_DIR);
if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
ret = LTTNG_ERR_CREATE_DIR_FAIL;
goto error;
}
- cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
- pid_n.node) {
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
struct ust_app_session *ua_sess;
lsu::registry_session *registry;
}
fmt_ret = asprintf(&pathname_index,
- DEFAULT_UST_TRACE_DIR "/%s/" DEFAULT_INDEX_DIR,
- ua_sess->path);
+ DEFAULT_UST_TRACE_DIR "/%s/" DEFAULT_INDEX_DIR,
+ ua_sess->path);
if (fmt_ret < 0) {
ERR("Failed to format channel index directory");
ret = LTTNG_ERR_CREATE_DIR_FAIL;
* of implicitly creating the channel's path.
*/
chunk_status = lttng_trace_chunk_create_subdirectory(
- usess->current_trace_chunk,
- pathname_index);
+ usess->current_trace_chunk, pathname_index);
free(pathname_index);
if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
ret = LTTNG_ERR_CREATE_DIR_FAIL;
ret = LTTNG_OK;
error:
- rcu_read_unlock();
return ret;
}
LTTNG_ASSERT(usess);
- rcu_read_lock();
-
if (usess->active) {
ERR("Expecting inactive session %s (%" PRIu64 ")", session->name, session->id);
cmd_ret = LTTNG_ERR_FATAL;
case LTTNG_BUFFER_PER_UID:
{
struct buffer_reg_uid *reg;
+ lttng::urcu::read_lock_guard read_lock;
- cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
+ cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
struct buffer_reg_channel *buf_reg_chan;
struct consumer_socket *socket;
/* Get consumer socket to use to push the metadata.*/
socket = consumer_find_socket_by_bitness(reg->bits_per_long,
- usess->consumer);
+ usess->consumer);
if (!socket) {
cmd_ret = LTTNG_ERR_INVALID;
goto error_socket;
}
/* Clear the data channels. */
- cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
- buf_reg_chan, node.node) {
- ret = consumer_clear_channel(socket,
- buf_reg_chan->consumer_key);
+ cds_lfht_for_each_entry (
+ reg->registry->channels->ht, &iter.iter, buf_reg_chan, node.node) {
+ ret = consumer_clear_channel(socket, buf_reg_chan->consumer_key);
if (ret < 0) {
goto error;
}
* Metadata channel is not cleared per se but we still need to
* perform a rotation operation on it behind the scene.
*/
- ret = consumer_clear_channel(socket,
- reg->registry->reg.ust->_metadata_key);
+ ret = consumer_clear_channel(socket, reg->registry->reg.ust->_metadata_key);
if (ret < 0) {
goto error;
}
}
case LTTNG_BUFFER_PER_PID:
{
- cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
struct consumer_socket *socket;
struct lttng_ht_iter chan_iter;
struct ust_app_channel *ua_chan;
/* Get the right consumer socket for the application. */
socket = consumer_find_socket_by_bitness(app->abi.bits_per_long,
- usess->consumer);
+ usess->consumer);
if (!socket) {
cmd_ret = LTTNG_ERR_INVALID;
goto error_socket;
}
/* Clear the data channels. */
- cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
- ua_chan, node.node) {
+ cds_lfht_for_each_entry (
+ ua_sess->channels->ht, &chan_iter.iter, ua_chan, node.node) {
ret = consumer_clear_channel(socket, ua_chan->key);
if (ret < 0) {
/* Per-PID buffer and application going away. */
error_socket:
end:
- rcu_read_unlock();
return cmd_ret;
}
LTTNG_ASSERT(usess);
- rcu_read_lock();
-
switch (usess->buffer_type) {
case LTTNG_BUFFER_PER_UID:
{
struct buffer_reg_uid *reg;
- cds_list_for_each_entry (
- reg, &usess->buffer_reg_uid_list, lnode) {
+ cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
struct buffer_reg_channel *buf_reg_chan;
struct consumer_socket *socket;
+ lttng::urcu::read_lock_guard read_lock;
- socket = consumer_find_socket_by_bitness(
- reg->bits_per_long, usess->consumer);
+ socket = consumer_find_socket_by_bitness(reg->bits_per_long,
+ usess->consumer);
if (!socket) {
ret = LTTNG_ERR_FATAL;
goto error;
}
- cds_lfht_for_each_entry(reg->registry->channels->ht,
- &iter.iter, buf_reg_chan, node.node) {
- const int open_ret =
- consumer_open_channel_packets(
- socket,
- buf_reg_chan->consumer_key);
+ cds_lfht_for_each_entry (
+ reg->registry->channels->ht, &iter.iter, buf_reg_chan, node.node) {
+ const int open_ret = consumer_open_channel_packets(
+ socket, buf_reg_chan->consumer_key);
if (open_ret < 0) {
ret = LTTNG_ERR_UNK;
case LTTNG_BUFFER_PER_PID:
{
struct ust_app *app;
+ lttng::urcu::read_lock_guard read_lock;
- cds_lfht_for_each_entry (
- ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
struct consumer_socket *socket;
struct lttng_ht_iter chan_iter;
struct ust_app_channel *ua_chan;
}
/* Get the right consumer socket for the application. */
- socket = consumer_find_socket_by_bitness(
- app->abi.bits_per_long, usess->consumer);
+ socket = consumer_find_socket_by_bitness(app->abi.bits_per_long,
+ usess->consumer);
if (!socket) {
ret = LTTNG_ERR_FATAL;
goto error;
continue;
}
- cds_lfht_for_each_entry(ua_sess->channels->ht,
- &chan_iter.iter, ua_chan, node.node) {
+ cds_lfht_for_each_entry (
+ ua_sess->channels->ht, &chan_iter.iter, ua_chan, node.node) {
const int open_ret =
- consumer_open_channel_packets(
- socket,
- ua_chan->key);
+ consumer_open_channel_packets(socket, ua_chan->key);
if (open_ret < 0) {
/*
}
error:
- rcu_read_unlock();
return ret;
}
+
+lsu::ctl_field_quirks ust_app::ctl_field_quirks() const
+{
+ /*
+ * Application contexts are expressed as variants. LTTng-UST announces
+ * those by registering an enumeration named `..._tag`. It then registers a
+ * variant as part of the event context that contains the various possible
+ * types.
+ *
+ * Unfortunately, the names used in the enumeration and variant don't
+ * match: the enumeration names are all prefixed with an underscore while
+ * the variant type tag fields aren't.
+ *
+ * While the CTF 1.8.3 specification mentions that
+ * underscores *should* (not *must*) be removed by CTF readers. Babeltrace
+ * 1.x (and possibly others) expect a perfect match between the names used
+ * by tags and variants.
+ *
+ * When the UNDERSCORE_PREFIXED_VARIANT_TAG_MAPPINGS quirk is enabled,
+ * the variant's fields are modified to match the mappings of its tag.
+ *
+ * From ABI version >= 10.x, the variant fields and tag mapping names
+ * correctly match, making this quirk unnecessary.
+ */
+ return v_major <= 9 ? lsu::ctl_field_quirks::UNDERSCORE_PREFIXED_VARIANT_TAG_MAPPINGS :
+ lsu::ctl_field_quirks::NONE;
+}