/*
* Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
* Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2017 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2 only,
#include <unistd.h>
#include <urcu/list.h>
#include <signal.h>
+#include <stdbool.h>
#include <bin/lttng-consumerd/health-consumerd.h>
#include <common/common.h>
extern struct lttng_consumer_global_data consumer_data;
extern int consumer_poll_timeout;
-extern volatile int consumer_quit;
/*
* Free channel object and all streams associated with it. This MUST be used
cds_list_del(&stream->send_node);
ustctl_destroy_stream(stream->ustream);
+ lttng_trace_chunk_put(stream->trace_chunk);
free(stream);
}
* Allocate and return a consumer channel object.
*/
static struct lttng_consumer_channel *allocate_channel(uint64_t session_id,
- const char *pathname, const char *name, uid_t uid, gid_t gid,
+ const uint64_t *chunk_id, const char *pathname, const char *name,
uint64_t relayd_id, uint64_t key, enum lttng_event_output output,
uint64_t tracefile_size, uint64_t tracefile_count,
uint64_t session_id_per_pid, unsigned int monitor,
assert(pathname);
assert(name);
- return consumer_allocate_channel(key, session_id, pathname, name, uid,
- gid, relayd_id, output, tracefile_size,
+ return consumer_allocate_channel(key, session_id, chunk_id, pathname,
+ name, relayd_id, output, tracefile_size,
tracefile_count, session_id_per_pid, monitor,
live_timer_interval, root_shm_path, shm_path);
}
stream = consumer_allocate_stream(channel->key,
key,
- LTTNG_CONSUMER_ACTIVE_STREAM,
channel->name,
- channel->uid,
- channel->gid,
channel->relayd_id,
channel->session_id,
+ channel->trace_chunk,
cpu,
&alloc_ret,
channel->type,
goto error;
}
+ consumer_stream_update_channel_attributes(stream, channel);
stream->chan = channel;
error:
/* Get the right pipe where the stream will be sent. */
if (stream->metadata_flag) {
- ret = consumer_add_metadata_stream(stream);
- if (ret) {
- ERR("Consumer add metadata stream %" PRIu64 " failed.",
- stream->key);
- goto error;
- }
+ consumer_add_metadata_stream(stream);
stream_pipe = ctx->consumer_metadata_pipe;
} else {
- ret = consumer_add_data_stream(stream);
- if (ret) {
- ERR("Consumer add stream %" PRIu64 " failed.",
- stream->key);
- goto error;
- }
+ consumer_add_data_stream(stream);
stream_pipe = ctx->consumer_data_pipe;
}
/*
* From this point on, the stream's ownership has been moved away from
- * the channel and becomes globally visible.
+ * the channel and it becomes globally visible. Hence, remove it from
+ * the local stream list to prevent the stream from being both local and
+ * global.
*/
stream->globally_visible = 1;
+ cds_list_del(&stream->send_node);
ret = lttng_pipe_write(stream_pipe, &stream, sizeof(stream));
if (ret < 0) {
} else {
consumer_del_stream_for_data(stream);
}
+ goto error;
}
+
error:
return ret;
}
/*
* Create streams for the given channel using liblttng-ust-ctl.
+ * The channel lock must be acquired by the caller.
*
* Return 0 on success else a negative value.
*/
int ret, cpu = 0;
struct ustctl_consumer_stream *ustream;
struct lttng_consumer_stream *stream;
+ pthread_mutex_t *current_stream_lock = NULL;
assert(channel);
assert(ctx);
uatomic_inc(&stream->chan->refcount);
}
+ pthread_mutex_lock(&stream->lock);
+ current_stream_lock = &stream->lock;
/*
* Order is important this is why a list is used. On error, the caller
* should clean this list.
sizeof(ust_metadata_pipe));
}
}
+ pthread_mutex_unlock(&stream->lock);
+ current_stream_lock = NULL;
}
return 0;
error:
error_alloc:
+ if (current_stream_lock) {
+ pthread_mutex_unlock(current_stream_lock);
+ }
return ret;
}
return -1;
}
-static int open_ust_stream_fd(struct lttng_consumer_channel *channel,
- struct ustctl_consumer_channel_attr *attr,
- int cpu)
+static int open_ust_stream_fd(struct lttng_consumer_channel *channel, int cpu,
+ const struct lttng_credentials *session_credentials)
{
char shm_path[PATH_MAX];
int ret;
}
return run_as_open(shm_path,
O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR,
- channel->uid, channel->gid);
+ session_credentials->uid, session_credentials->gid);
error_shm_path:
return -1;
assert(channel);
assert(attr);
assert(ust_chanp);
+ assert(channel->buffer_credentials.is_set);
DBG3("Creating channel to ustctl with attr: [overwrite: %d, "
"subbuf_size: %" PRIu64 ", num_subbuf: %" PRIu64 ", "
goto error_alloc;
}
for (i = 0; i < nr_stream_fds; i++) {
- stream_fds[i] = open_ust_stream_fd(channel, attr, i);
+ stream_fds[i] = open_ust_stream_fd(channel, i,
+ &channel->buffer_credentials.value);
if (stream_fds[i] < 0) {
ret = -1;
goto error_open;
ERR("Cannot get stream shm path");
}
closeret = run_as_unlink(shm_path,
- channel->uid, channel->gid);
+ channel->buffer_credentials.value.uid,
+ channel->buffer_credentials.value.gid);
if (closeret) {
PERROR("unlink %s", shm_path);
}
}
/* Try to rmdir all directories under shm_path root. */
if (channel->root_shm_path[0]) {
- (void) run_as_recursive_rmdir(channel->root_shm_path,
- channel->uid, channel->gid);
+ (void) run_as_rmdir_recursive(channel->root_shm_path,
+ channel->buffer_credentials.value.uid,
+ channel->buffer_credentials.value.gid,
+ LTTNG_DIRECTORY_HANDLE_SKIP_NON_EMPTY_FLAG);
}
free(stream_fds);
error_alloc:
}
/*
- * Send channel to sessiond.
+ * Send channel to sessiond and relayd if applicable.
*
* Return 0 on success or else a negative value.
*/
-static int send_sessiond_channel(int sock,
+static int send_channel_to_sessiond_and_relayd(int sock,
struct lttng_consumer_channel *channel,
struct lttng_consumer_local_data *ctx, int *relayd_error)
{
health_code_update();
/* Try to send the stream to the relayd if one is available. */
+ DBG("Sending stream %" PRIu64 " of channel \"%s\" to relayd",
+ stream->key, channel->name);
ret = consumer_send_relayd_stream(stream, stream->chan->pathname);
if (ret < 0) {
/*
* Return 0 on success or else, a negative value is returned and the channel
* MUST be destroyed by consumer_del_channel().
*/
-static int ask_channel(struct lttng_consumer_local_data *ctx, int sock,
+static int ask_channel(struct lttng_consumer_local_data *ctx,
struct lttng_consumer_channel *channel,
struct ustctl_consumer_channel_attr *attr)
{
}
/* Open all streams for this channel. */
+ pthread_mutex_lock(&channel->lock);
ret = create_ust_streams(channel, ctx);
+ pthread_mutex_unlock(&channel->lock);
if (ret < 0) {
goto end;
}
* If we are unable to send the stream to the thread, there is
* a big problem so just stop everything.
*/
- /* Remove node from the channel stream list. */
- cds_list_del(&stream->send_node);
goto error;
}
-
- /* Remove node from the channel stream list. */
- cds_list_del(&stream->send_node);
-
}
error:
health_code_update();
- ustctl_flush_buffer(stream->ustream, 1);
+ pthread_mutex_lock(&stream->lock);
+
+ /*
+ * Protect against concurrent teardown of a stream.
+ */
+ if (cds_lfht_is_node_deleted(&stream->node.node)) {
+ goto next;
+ }
+
+ if (!stream->quiescent) {
+ ustctl_flush_buffer(stream->ustream, 0);
+ stream->quiescent = true;
+ }
+next:
+ pthread_mutex_unlock(&stream->lock);
+ }
+error:
+ rcu_read_unlock();
+ return ret;
+}
+
+/*
+ * Clear quiescent state from channel's streams using the given key to
+ * retrieve the channel.
+ *
+ * Return 0 on success else an LTTng error code.
+ */
+static int clear_quiescent_channel(uint64_t chan_key)
+{
+ int ret = 0;
+ struct lttng_consumer_channel *channel;
+ struct lttng_consumer_stream *stream;
+ struct lttng_ht *ht;
+ struct lttng_ht_iter iter;
+
+ DBG("UST consumer clear quiescent channel key %" PRIu64, chan_key);
+
+ rcu_read_lock();
+ channel = consumer_find_channel(chan_key);
+ if (!channel) {
+ ERR("UST consumer clear quiescent channel %" PRIu64 " not found", chan_key);
+ ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
+ goto error;
+ }
+
+ ht = consumer_data.stream_per_chan_id_ht;
+
+ /* For each stream of the channel id, clear quiescent state. */
+ cds_lfht_for_each_entry_duplicate(ht->ht,
+ ht->hash_fct(&channel->key, lttng_ht_seed), ht->match_fct,
+ &channel->key, &iter.iter, stream, node_channel_id.node) {
+
+ health_code_update();
+
+ pthread_mutex_lock(&stream->lock);
+ stream->quiescent = false;
+ pthread_mutex_unlock(&stream->lock);
}
error:
rcu_read_unlock();
/*
* Close metadata stream wakeup_fd using the given key to retrieve the channel.
- * RCU read side lock MUST be acquired before calling this function.
*
* Return 0 on success else an LTTng error code.
*/
{
int ret = 0;
struct lttng_consumer_channel *channel;
+ unsigned int channel_monitor;
DBG("UST consumer close metadata key %" PRIu64, chan_key);
pthread_mutex_lock(&consumer_data.lock);
pthread_mutex_lock(&channel->lock);
-
+ channel_monitor = channel->monitor;
if (cds_lfht_is_node_deleted(&channel->node.node)) {
goto error_unlock;
}
lttng_ustconsumer_close_metadata(channel);
+ pthread_mutex_unlock(&channel->lock);
+ pthread_mutex_unlock(&consumer_data.lock);
+
+ /*
+ * The ownership of a metadata channel depends on the type of
+ * session to which it belongs. In effect, the monitor flag is checked
+ * to determine if this metadata channel is in "snapshot" mode or not.
+ *
+ * In the non-snapshot case, the metadata channel is created along with
+ * a single stream which will remain present until the metadata channel
+ * is destroyed (on the destruction of its session). In this case, the
+ * metadata stream in "monitored" by the metadata poll thread and holds
+ * the ownership of its channel.
+ *
+ * Closing the metadata will cause the metadata stream's "metadata poll
+ * pipe" to be closed. Closing this pipe will wake-up the metadata poll
+ * thread which will teardown the metadata stream which, in return,
+ * deletes the metadata channel.
+ *
+ * In the snapshot case, the metadata stream is created and destroyed
+ * on every snapshot record. Since the channel doesn't have an owner
+ * other than the session daemon, it is safe to destroy it immediately
+ * on reception of the CLOSE_METADATA command.
+ */
+ if (!channel_monitor) {
+ /*
+ * The channel and consumer_data locks must be
+ * released before this call since consumer_del_channel
+ * re-acquires the channel and consumer_data locks to teardown
+ * the channel and queue its reclamation by the "call_rcu"
+ * worker thread.
+ */
+ consumer_del_channel(channel);
+ }
+ return ret;
error_unlock:
pthread_mutex_unlock(&channel->lock);
pthread_mutex_unlock(&consumer_data.lock);
}
}
+ /*
+ * Ownership of metadata stream is passed along. Freeing is handled by
+ * the callee.
+ */
ret = send_streams_to_thread(metadata, ctx);
if (ret < 0) {
/*
* a big problem so just stop everything.
*/
ret = LTTCOMM_CONSUMERD_FATAL;
- goto error;
+ goto send_streams_error;
}
/* List MUST be empty after or else it could be reused. */
assert(cds_list_empty(&metadata->streams.head));
consumer_stream_destroy(metadata->metadata_stream, NULL);
cds_list_del(&metadata->metadata_stream->send_node);
metadata->metadata_stream = NULL;
+send_streams_error:
error_no_stream:
end:
return ret;
/*
* Snapshot the whole metadata.
+ * RCU read-side lock must be held by the caller.
*
* Returns 0 on success, < 0 on error
*/
-static int snapshot_metadata(uint64_t key, char *path, uint64_t relayd_id,
+static int snapshot_metadata(struct lttng_consumer_channel *metadata_channel,
+ uint64_t key, char *path, uint64_t relayd_id,
struct lttng_consumer_local_data *ctx)
{
int ret = 0;
- struct lttng_consumer_channel *metadata_channel;
struct lttng_consumer_stream *metadata_stream;
assert(path);
rcu_read_lock();
- metadata_channel = consumer_find_channel(key);
- if (!metadata_channel) {
- ERR("UST snapshot metadata channel not found for key %" PRIu64,
- key);
- ret = -1;
- goto error;
- }
assert(!metadata_channel->monitor);
health_code_update();
metadata_stream = metadata_channel->metadata_stream;
assert(metadata_stream);
+ pthread_mutex_lock(&metadata_stream->lock);
if (relayd_id != (uint64_t) -1ULL) {
metadata_stream->net_seq_idx = relayd_id;
ret = consumer_send_relayd_stream(metadata_stream, path);
- if (ret < 0) {
- goto error_stream;
- }
} else {
- ret = utils_create_stream_file(path, metadata_stream->name,
- metadata_stream->chan->tracefile_size,
- metadata_stream->tracefile_count_current,
- metadata_stream->uid, metadata_stream->gid, NULL);
- if (ret < 0) {
- goto error_stream;
- }
- metadata_stream->out_fd = ret;
- metadata_stream->tracefile_size_current = 0;
+ ret = consumer_stream_create_output_files(metadata_stream,
+ false);
+ }
+ pthread_mutex_unlock(&metadata_stream->lock);
+ if (ret < 0) {
+ goto error_stream;
}
do {
/*
* Take a snapshot of all the stream of a channel.
+ * RCU read-side lock and the channel lock must be held by the caller.
*
* Returns 0 on success, < 0 on error
*/
-static int snapshot_channel(uint64_t key, char *path, uint64_t relayd_id,
- uint64_t nb_packets_per_stream, struct lttng_consumer_local_data *ctx)
+static int snapshot_channel(struct lttng_consumer_channel *channel,
+ uint64_t key, char *path, uint64_t relayd_id,
+ uint64_t nb_packets_per_stream,
+ struct lttng_consumer_local_data *ctx)
{
int ret;
unsigned use_relayd = 0;
unsigned long consumed_pos, produced_pos;
- struct lttng_consumer_channel *channel;
struct lttng_consumer_stream *stream;
assert(path);
use_relayd = 1;
}
- channel = consumer_find_channel(key);
- if (!channel) {
- ERR("UST snapshot channel not found for key %" PRIu64, key);
- ret = -1;
- goto error;
- }
assert(!channel->monitor);
DBG("UST consumer snapshot channel %" PRIu64, key);
cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
- /* Are we at a position _before_ the first available packet ? */
- bool before_first_packet = true;
-
health_code_update();
/* Lock stream because we are about to change its state. */
pthread_mutex_lock(&stream->lock);
+ assert(channel->trace_chunk);
+ if (!lttng_trace_chunk_get(channel->trace_chunk)) {
+ /*
+ * Can't happen barring an internal error as the channel
+ * holds a reference to the trace chunk.
+ */
+ ERR("Failed to acquire reference to channel's trace chunk");
+ ret = -1;
+ goto error_unlock;
+ }
+ assert(!stream->trace_chunk);
+ stream->trace_chunk = channel->trace_chunk;
+
stream->net_seq_idx = relayd_id;
if (use_relayd) {
goto error_unlock;
}
} else {
- ret = utils_create_stream_file(path, stream->name,
- stream->chan->tracefile_size,
- stream->tracefile_count_current,
- stream->uid, stream->gid, NULL);
- if (ret < 0) {
- goto error_unlock;
- }
- stream->out_fd = ret;
- stream->tracefile_size_current = 0;
-
- DBG("UST consumer snapshot stream %s/%s (%" PRIu64 ")", path,
- stream->name, stream->key);
- }
- if (relayd_id != -1ULL) {
- ret = consumer_send_relayd_streams_sent(relayd_id);
+ ret = consumer_stream_create_output_files(stream,
+ false);
if (ret < 0) {
goto error_unlock;
}
+ DBG("UST consumer snapshot stream (%" PRIu64 ")",
+ stream->key);
}
- ustctl_flush_buffer(stream->ustream, 1);
+ /*
+ * If tracing is active, we want to perform a "full" buffer flush.
+ * Else, if quiescent, it has already been done by the prior stop.
+ */
+ if (!stream->quiescent) {
+ ustctl_flush_buffer(stream->ustream, 0);
+ }
ret = lttng_ustconsumer_take_snapshot(stream);
if (ret < 0) {
produced_pos, nb_packets_per_stream,
stream->max_sb_size);
- while (consumed_pos < produced_pos) {
+ while ((long) (consumed_pos - produced_pos) < 0) {
ssize_t read_len;
unsigned long len, padded_len;
- int lost_packet = 0;
health_code_update();
}
DBG("UST consumer get subbuf failed. Skipping it.");
consumed_pos += stream->max_sb_size;
-
- /*
- * Start accounting lost packets only when we
- * already have extracted packets (to match the
- * content of the final snapshot).
- */
- if (!before_first_packet) {
- lost_packet = 1;
- }
+ stream->chan->lost_packets++;
continue;
}
goto error_close_stream;
}
consumed_pos += stream->max_sb_size;
-
- /*
- * Only account lost packets located between
- * succesfully extracted packets (do not account before
- * and after since they are not visible in the
- * resulting snapshot).
- */
- stream->chan->lost_packets += lost_packet;
- lost_packet = 0;
- before_first_packet = false;
}
/* Simply close the stream so we can use it on the next snapshot. */
consumer_stream_close(stream);
error_unlock:
pthread_mutex_unlock(&stream->lock);
-error:
rcu_read_unlock();
return ret;
}
case LTTNG_CONSUMER_ADD_RELAYD_SOCKET:
{
/* Session daemon status message are handled in the following call. */
- ret = consumer_add_relayd_socket(msg.u.relayd_sock.net_index,
+ consumer_add_relayd_socket(msg.u.relayd_sock.net_index,
msg.u.relayd_sock.type, ctx, sock, consumer_sockpoll,
&msg.u.relayd_sock.sock, msg.u.relayd_sock.session_id,
msg.u.relayd_sock.relayd_session_id);
{
int ret;
struct ustctl_consumer_channel_attr attr;
+ const uint64_t chunk_id = msg.u.ask_channel.chunk_id.value;
+ const struct lttng_credentials buffer_credentials = {
+ .uid = msg.u.ask_channel.buffer_credentials.uid,
+ .gid = msg.u.ask_channel.buffer_credentials.gid,
+ };
/* Create a plain object and reserve a channel key. */
channel = allocate_channel(msg.u.ask_channel.session_id,
- msg.u.ask_channel.pathname, msg.u.ask_channel.name,
- msg.u.ask_channel.uid, msg.u.ask_channel.gid,
- msg.u.ask_channel.relayd_id, msg.u.ask_channel.key,
+ msg.u.ask_channel.chunk_id.is_set ?
+ &chunk_id : NULL,
+ msg.u.ask_channel.pathname,
+ msg.u.ask_channel.name,
+ msg.u.ask_channel.relayd_id,
+ msg.u.ask_channel.key,
(enum lttng_event_output) msg.u.ask_channel.output,
msg.u.ask_channel.tracefile_size,
msg.u.ask_channel.tracefile_count,
goto end_channel_error;
}
+ LTTNG_OPTIONAL_SET(&channel->buffer_credentials,
+ buffer_credentials);
+
/*
* Assign UST application UID to the channel. This value is ignored for
* per PID buffers. This is specific to UST thus setting this after the
attr.read_timer_interval = msg.u.ask_channel.read_timer_interval;
attr.chan_id = msg.u.ask_channel.chan_id;
memcpy(attr.uuid, msg.u.ask_channel.uuid, sizeof(attr.uuid));
+ attr.blocking_timeout= msg.u.ask_channel.blocking_timeout;
/* Match channel buffer type to the UST abi. */
switch (msg.u.ask_channel.output) {
health_code_update();
- ret = ask_channel(ctx, sock, channel, &attr);
+ ret = ask_channel(ctx, channel, &attr);
if (ret < 0) {
goto end_channel_error;
}
consumer_timer_switch_start(channel, attr.switch_timer_interval);
attr.switch_timer_interval = 0;
} else {
+ int monitor_start_ret;
+
consumer_timer_live_start(channel,
msg.u.ask_channel.live_timer_interval);
+ monitor_start_ret = consumer_timer_monitor_start(
+ channel,
+ msg.u.ask_channel.monitor_timer_interval);
+ if (monitor_start_ret < 0) {
+ ERR("Starting channel monitoring timer failed");
+ goto end_channel_error;
+ }
}
health_code_update();
if (channel->live_timer_enabled == 1) {
consumer_timer_live_stop(channel);
}
+ if (channel->monitor_timer_enabled == 1) {
+ consumer_timer_monitor_stop(channel);
+ }
goto end_channel_error;
}
if (!channel) {
ERR("UST consumer get channel key %" PRIu64 " not found", key);
ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
- goto end_msg_sessiond;
+ goto end_get_channel;
}
health_code_update();
- /* Send everything to sessiond. */
- ret = send_sessiond_channel(sock, channel, ctx, &relayd_err);
+ /* Send the channel to sessiond (and relayd, if applicable). */
+ ret = send_channel_to_sessiond_and_relayd(sock, channel, ctx,
+ &relayd_err);
if (ret < 0) {
if (relayd_err) {
/*
* and the consumer can continue its work. The above call
* has sent the error status message to the sessiond.
*/
- goto end_nosignal;
+ goto end_get_channel_nosignal;
}
/*
* The communicaton was broken hence there is a bad state between
* the consumer and sessiond so stop everything.
*/
- goto error_fatal;
+ goto error_get_channel_fatal;
}
health_code_update();
* so don't send them to the data thread.
*/
if (!channel->monitor) {
- goto end_msg_sessiond;
+ goto end_get_channel;
}
ret = send_streams_to_thread(channel, ctx);
* If we are unable to send the stream to the thread, there is
* a big problem so just stop everything.
*/
- goto error_fatal;
+ goto error_get_channel_fatal;
}
/* List MUST be empty after or else it could be reused. */
assert(cds_list_empty(&channel->streams.head));
+end_get_channel:
goto end_msg_sessiond;
+error_get_channel_fatal:
+ goto error_fatal;
+end_get_channel_nosignal:
+ goto end_nosignal;
}
case LTTNG_CONSUMER_DESTROY_CHANNEL:
{
goto end_msg_sessiond;
}
+ case LTTNG_CONSUMER_CLEAR_QUIESCENT_CHANNEL:
+ {
+ int ret;
+
+ ret = clear_quiescent_channel(
+ msg.u.clear_quiescent_channel.key);
+ if (ret != 0) {
+ ret_code = ret;
+ }
+
+ goto end_msg_sessiond;
+ }
case LTTNG_CONSUMER_PUSH_METADATA:
{
int ret;
*/
DBG("UST consumer push metadata %" PRIu64 " not found", key);
ret_code = LTTCOMM_CONSUMERD_CHANNEL_FAIL;
- goto end_msg_sessiond;
+ goto end_push_metadata_msg_sessiond;
}
health_code_update();
* checked whether the channel can be found.
*/
ret_code = LTTCOMM_CONSUMERD_SUCCESS;
- goto end_msg_sessiond;
+ goto end_push_metadata_msg_sessiond;
}
/* Tell session daemon we are ready to receive the metadata. */
ret = consumer_send_status_msg(sock, LTTCOMM_CONSUMERD_SUCCESS);
if (ret < 0) {
/* Somehow, the session daemon is not responding anymore. */
- goto error_fatal;
+ goto error_push_metadata_fatal;
}
health_code_update();
ret = lttng_consumer_poll_socket(consumer_sockpoll);
health_poll_exit();
if (ret) {
- goto error_fatal;
+ goto error_push_metadata_fatal;
}
health_code_update();
len, version, channel, 0, 1);
if (ret < 0) {
/* error receiving from sessiond */
- goto error_fatal;
+ goto error_push_metadata_fatal;
} else {
ret_code = ret;
- goto end_msg_sessiond;
+ goto end_push_metadata_msg_sessiond;
}
+end_push_metadata_msg_sessiond:
+ goto end_msg_sessiond;
+error_push_metadata_fatal:
+ goto error_fatal;
}
case LTTNG_CONSUMER_SETUP_METADATA:
{
}
case LTTNG_CONSUMER_SNAPSHOT_CHANNEL:
{
- if (msg.u.snapshot_channel.metadata) {
- ret = snapshot_metadata(msg.u.snapshot_channel.key,
- msg.u.snapshot_channel.pathname,
- msg.u.snapshot_channel.relayd_id,
- ctx);
- if (ret < 0) {
- ERR("Snapshot metadata failed");
- ret_code = LTTCOMM_CONSUMERD_ERROR_METADATA;
- }
+ struct lttng_consumer_channel *channel;
+ uint64_t key = msg.u.snapshot_channel.key;
+
+ channel = consumer_find_channel(key);
+ if (!channel) {
+ DBG("UST snapshot channel not found for key %" PRIu64, key);
+ ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
} else {
- ret = snapshot_channel(msg.u.snapshot_channel.key,
- msg.u.snapshot_channel.pathname,
- msg.u.snapshot_channel.relayd_id,
- msg.u.snapshot_channel.nb_packets_per_stream,
- ctx);
- if (ret < 0) {
- ERR("Snapshot channel failed");
- ret_code = LTTCOMM_CONSUMERD_CHANNEL_FAIL;
+ if (msg.u.snapshot_channel.metadata) {
+ ret = snapshot_metadata(channel, key,
+ msg.u.snapshot_channel.pathname,
+ msg.u.snapshot_channel.relayd_id,
+ ctx);
+ if (ret < 0) {
+ ERR("Snapshot metadata failed");
+ ret_code = LTTCOMM_CONSUMERD_SNAPSHOT_FAILED;
+ }
+ } else {
+ ret = snapshot_channel(channel, key,
+ msg.u.snapshot_channel.pathname,
+ msg.u.snapshot_channel.relayd_id,
+ msg.u.snapshot_channel.nb_packets_per_stream,
+ ctx);
+ if (ret < 0) {
+ ERR("Snapshot channel failed");
+ ret_code = LTTCOMM_CONSUMERD_SNAPSHOT_FAILED;
+ }
}
}
-
health_code_update();
ret = consumer_send_status_msg(sock, ret_code);
if (ret < 0) {
}
case LTTNG_CONSUMER_DISCARDED_EVENTS:
{
- uint64_t ret;
+ int ret = 0;
+ uint64_t discarded_events;
struct lttng_ht_iter iter;
struct lttng_ht *ht;
struct lttng_consumer_stream *stream;
* found (no events are dropped if the channel is not yet in
* use).
*/
- ret = 0;
+ discarded_events = 0;
cds_lfht_for_each_entry_duplicate(ht->ht,
ht->hash_fct(&id, lttng_ht_seed),
ht->match_fct, &id,
&iter.iter, stream, node_session_id.node) {
if (stream->chan->key == key) {
- ret = stream->chan->discarded_events;
+ discarded_events = stream->chan->discarded_events;
break;
}
}
health_code_update();
/* Send back returned value to session daemon */
- ret = lttcomm_send_unix_sock(sock, &ret, sizeof(ret));
+ ret = lttcomm_send_unix_sock(sock, &discarded_events, sizeof(discarded_events));
if (ret < 0) {
PERROR("send discarded events");
goto error_fatal;
break;
}
- default:
+ case LTTNG_CONSUMER_SET_CHANNEL_MONITOR_PIPE:
+ {
+ int channel_monitor_pipe;
+
+ ret_code = LTTCOMM_CONSUMERD_SUCCESS;
+ /* Successfully received the command's type. */
+ ret = consumer_send_status_msg(sock, ret_code);
+ if (ret < 0) {
+ goto error_fatal;
+ }
+
+ ret = lttcomm_recv_fds_unix_sock(sock, &channel_monitor_pipe,
+ 1);
+ if (ret != sizeof(channel_monitor_pipe)) {
+ ERR("Failed to receive channel monitor pipe");
+ goto error_fatal;
+ }
+
+ DBG("Received channel monitor pipe (%d)", channel_monitor_pipe);
+ ret = consumer_timer_thread_set_channel_monitor_pipe(
+ channel_monitor_pipe);
+ if (!ret) {
+ int flags;
+
+ ret_code = LTTCOMM_CONSUMERD_SUCCESS;
+ /* Set the pipe as non-blocking. */
+ ret = fcntl(channel_monitor_pipe, F_GETFL, 0);
+ if (ret == -1) {
+ PERROR("fcntl get flags of the channel monitoring pipe");
+ goto error_fatal;
+ }
+ flags = ret;
+
+ ret = fcntl(channel_monitor_pipe, F_SETFL,
+ flags | O_NONBLOCK);
+ if (ret == -1) {
+ PERROR("fcntl set O_NONBLOCK flag of the channel monitoring pipe");
+ goto error_fatal;
+ }
+ DBG("Channel monitor pipe set as non-blocking");
+ } else {
+ ret_code = LTTCOMM_CONSUMERD_ALREADY_SET;
+ }
+ goto end_msg_sessiond;
+ }
+ case LTTNG_CONSUMER_ROTATE_CHANNEL:
+ {
+ struct lttng_consumer_channel *channel;
+ uint64_t key = msg.u.rotate_channel.key;
+
+ channel = consumer_find_channel(key);
+ if (!channel) {
+ DBG("Channel %" PRIu64 " not found", key);
+ ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
+ } else {
+ /*
+ * Sample the rotate position of all the streams in
+ * this channel.
+ */
+ ret = lttng_consumer_rotate_channel(channel, key,
+ msg.u.rotate_channel.relayd_id,
+ msg.u.rotate_channel.metadata,
+ ctx);
+ if (ret < 0) {
+ ERR("Rotate channel failed");
+ ret_code = LTTCOMM_CONSUMERD_ROTATION_FAIL;
+ }
+
+ health_code_update();
+ }
+ ret = consumer_send_status_msg(sock, ret_code);
+ if (ret < 0) {
+ /* Somehow, the session daemon is not responding anymore. */
+ goto end_rotate_channel_nosignal;
+ }
+
+ /*
+ * Rotate the streams that are ready right now.
+ * FIXME: this is a second consecutive iteration over the
+ * streams in a channel, there is probably a better way to
+ * handle this, but it needs to be after the
+ * consumer_send_status_msg() call.
+ */
+ if (channel) {
+ ret = lttng_consumer_rotate_ready_streams(
+ channel, key, ctx);
+ if (ret < 0) {
+ ERR("Rotate channel failed");
+ }
+ }
break;
+end_rotate_channel_nosignal:
+ goto end_nosignal;
}
+ case LTTNG_CONSUMER_INIT:
+ {
+ ret_code = lttng_consumer_init_command(ctx,
+ msg.u.init.sessiond_uuid);
+ health_code_update();
+ ret = consumer_send_status_msg(sock, ret_code);
+ if (ret < 0) {
+ /* Somehow, the session daemon is not responding anymore. */
+ goto end_nosignal;
+ }
+ break;
+ }
+ case LTTNG_CONSUMER_CREATE_TRACE_CHUNK:
+ {
+ const struct lttng_credentials credentials = {
+ .uid = msg.u.create_trace_chunk.credentials.value.uid,
+ .gid = msg.u.create_trace_chunk.credentials.value.gid,
+ };
+ const bool is_local_trace =
+ !msg.u.create_trace_chunk.relayd_id.is_set;
+ const uint64_t relayd_id =
+ msg.u.create_trace_chunk.relayd_id.value;
+ const char *chunk_override_name =
+ *msg.u.create_trace_chunk.override_name ?
+ msg.u.create_trace_chunk.override_name :
+ NULL;
+ LTTNG_OPTIONAL(struct lttng_directory_handle) chunk_directory_handle =
+ LTTNG_OPTIONAL_INIT;
-end_nosignal:
- rcu_read_unlock();
+ /*
+ * The session daemon will only provide a chunk directory file
+ * descriptor for local traces.
+ */
+ if (is_local_trace) {
+ int chunk_dirfd;
- health_code_update();
+ /* Acnowledge the reception of the command. */
+ ret = consumer_send_status_msg(sock,
+ LTTCOMM_CONSUMERD_SUCCESS);
+ if (ret < 0) {
+ /* Somehow, the session daemon is not responding anymore. */
+ goto end_nosignal;
+ }
+
+ ret = lttcomm_recv_fds_unix_sock(sock, &chunk_dirfd, 1);
+ if (ret != sizeof(chunk_dirfd)) {
+ ERR("Failed to receive trace chunk directory file descriptor");
+ goto error_fatal;
+ }
+
+ DBG("Received trace chunk directory fd (%d)",
+ chunk_dirfd);
+ ret = lttng_directory_handle_init_from_dirfd(
+ &chunk_directory_handle.value,
+ chunk_dirfd);
+ if (ret) {
+ ERR("Failed to initialize chunk directory handle from directory file descriptor");
+ if (close(chunk_dirfd)) {
+ PERROR("Failed to close chunk directory file descriptor");
+ }
+ goto error_fatal;
+ }
+ chunk_directory_handle.is_set = true;
+ }
+
+ ret_code = lttng_consumer_create_trace_chunk(
+ !is_local_trace ? &relayd_id : NULL,
+ msg.u.create_trace_chunk.session_id,
+ msg.u.create_trace_chunk.chunk_id,
+ (time_t) msg.u.create_trace_chunk
+ .creation_timestamp,
+ chunk_override_name,
+ msg.u.create_trace_chunk.credentials.is_set ?
+ &credentials :
+ NULL,
+ chunk_directory_handle.is_set ?
+ &chunk_directory_handle.value :
+ NULL);
+
+ if (chunk_directory_handle.is_set) {
+ lttng_directory_handle_fini(
+ &chunk_directory_handle.value);
+ }
+ goto end_msg_sessiond;
+ }
+ case LTTNG_CONSUMER_CLOSE_TRACE_CHUNK:
+ {
+ enum lttng_trace_chunk_command_type close_command =
+ msg.u.close_trace_chunk.close_command.value;
+ const uint64_t relayd_id =
+ msg.u.close_trace_chunk.relayd_id.value;
+ struct lttcomm_consumer_close_trace_chunk_reply reply;
+ char closed_trace_chunk_path[LTTNG_PATH_MAX];
+ int ret;
+
+ ret_code = lttng_consumer_close_trace_chunk(
+ msg.u.close_trace_chunk.relayd_id.is_set ?
+ &relayd_id :
+ NULL,
+ msg.u.close_trace_chunk.session_id,
+ msg.u.close_trace_chunk.chunk_id,
+ (time_t) msg.u.close_trace_chunk.close_timestamp,
+ msg.u.close_trace_chunk.close_command.is_set ?
+ &close_command :
+ NULL, closed_trace_chunk_path);
+ reply.ret_code = ret_code;
+ reply.path_length = strlen(closed_trace_chunk_path) + 1;
+ ret = lttcomm_send_unix_sock(sock, &reply, sizeof(reply));
+ if (ret != sizeof(reply)) {
+ goto error_fatal;
+ }
+ ret = lttcomm_send_unix_sock(sock, closed_trace_chunk_path,
+ reply.path_length);
+ if (ret != reply.path_length) {
+ goto error_fatal;
+ }
+ goto end_nosignal;
+ }
+ case LTTNG_CONSUMER_TRACE_CHUNK_EXISTS:
+ {
+ const uint64_t relayd_id =
+ msg.u.trace_chunk_exists.relayd_id.value;
+
+ ret_code = lttng_consumer_trace_chunk_exists(
+ msg.u.trace_chunk_exists.relayd_id.is_set ?
+ &relayd_id : NULL,
+ msg.u.trace_chunk_exists.session_id,
+ msg.u.trace_chunk_exists.chunk_id);
+ goto end_msg_sessiond;
+ }
+ default:
+ break;
+ }
+end_nosignal:
/*
* Return 1 to indicate success since the 0 value can be a socket
* shutdown during the recv() or send() call.
*/
- return 1;
+ ret = 1;
+ goto end;
end_msg_sessiond:
/*
if (ret < 0) {
goto error_fatal;
}
- rcu_read_unlock();
-
- health_code_update();
+ ret = 1;
+ goto end;
- return 1;
end_channel_error:
if (channel) {
/*
/* Stop everything if session daemon can not be notified. */
goto error_fatal;
}
- rcu_read_unlock();
-
- health_code_update();
+ ret = 1;
+ goto end;
- return 1;
error_fatal:
- rcu_read_unlock();
/* This will issue a consumer stop. */
- return -1;
+ ret = -1;
+ goto end;
+
+end:
+ rcu_read_unlock();
+ health_code_update();
+ return ret;
}
/*
return ustctl_get_mmap_base(stream->ustream);
}
+void lttng_ustctl_flush_buffer(struct lttng_consumer_stream *stream,
+ int producer_active)
+{
+ assert(stream);
+ assert(stream->ustream);
+
+ ustctl_flush_buffer(stream->ustream, producer_active);
+}
+
/*
- * Take a snapshot for a specific fd
+ * Take a snapshot for a specific stream.
*
* Returns 0 on success, < 0 on error
*/
return ustctl_snapshot(stream->ustream);
}
+/*
+ * Sample consumed and produced positions for a specific stream.
+ *
+ * Returns 0 on success, < 0 on error.
+ */
+int lttng_ustconsumer_sample_snapshot_positions(
+ struct lttng_consumer_stream *stream)
+{
+ assert(stream);
+ assert(stream->ustream);
+
+ return ustctl_snapshot_sample_positions(stream->ustream);
+}
+
/*
* Get the produced position
*
}
/*
- * Called when the stream signal the consumer that it has hang up.
+ * Called when the stream signals the consumer that it has hung up.
*/
void lttng_ustconsumer_on_stream_hangup(struct lttng_consumer_stream *stream)
{
assert(stream);
assert(stream->ustream);
- ustctl_flush_buffer(stream->ustream, 0);
+ pthread_mutex_lock(&stream->lock);
+ if (!stream->quiescent) {
+ ustctl_flush_buffer(stream->ustream, 0);
+ stream->quiescent = true;
+ }
+ pthread_mutex_unlock(&stream->lock);
stream->hangup_flush_done = 1;
}
assert(chan);
assert(chan->uchan);
+ assert(chan->buffer_credentials.is_set);
if (chan->switch_timer_enabled == 1) {
consumer_timer_switch_stop(chan);
if (ret) {
ERR("Cannot get stream shm path");
}
- ret = run_as_unlink(shm_path, chan->uid, chan->gid);
+ ret = run_as_unlink(shm_path,
+ chan->buffer_credentials.value.uid,
+ chan->buffer_credentials.value.gid);
if (ret) {
PERROR("unlink %s", shm_path);
}
{
assert(chan);
assert(chan->uchan);
+ assert(chan->buffer_credentials.is_set);
consumer_metadata_cache_destroy(chan);
ustctl_destroy_channel(chan->uchan);
/* Try to rmdir all directories under shm_path root. */
if (chan->root_shm_path[0]) {
- (void) run_as_recursive_rmdir(chan->root_shm_path,
- chan->uid, chan->gid);
+ (void) run_as_rmdir_recursive(chan->root_shm_path,
+ chan->buffer_credentials.value.uid,
+ chan->buffer_credentials.value.gid,
+ LTTNG_DIRECTORY_HANDLE_SKIP_NON_EMPTY_FLAG);
}
free(chan->stream_fds);
}
struct ustctl_consumer_stream *ustream)
{
int ret;
+ uint64_t packet_size, content_size, timestamp_begin, timestamp_end,
+ events_discarded, stream_id, stream_instance_id,
+ packet_seq_num;
- ret = ustctl_get_timestamp_begin(ustream, &index->timestamp_begin);
+ ret = ustctl_get_timestamp_begin(ustream, ×tamp_begin);
if (ret < 0) {
PERROR("ustctl_get_timestamp_begin");
goto error;
}
- index->timestamp_begin = htobe64(index->timestamp_begin);
- ret = ustctl_get_timestamp_end(ustream, &index->timestamp_end);
+ ret = ustctl_get_timestamp_end(ustream, ×tamp_end);
if (ret < 0) {
PERROR("ustctl_get_timestamp_end");
goto error;
}
- index->timestamp_end = htobe64(index->timestamp_end);
- ret = ustctl_get_events_discarded(ustream, &index->events_discarded);
+ ret = ustctl_get_events_discarded(ustream, &events_discarded);
if (ret < 0) {
PERROR("ustctl_get_events_discarded");
goto error;
}
- index->events_discarded = htobe64(index->events_discarded);
- ret = ustctl_get_content_size(ustream, &index->content_size);
+ ret = ustctl_get_content_size(ustream, &content_size);
if (ret < 0) {
PERROR("ustctl_get_content_size");
goto error;
}
- index->content_size = htobe64(index->content_size);
- ret = ustctl_get_packet_size(ustream, &index->packet_size);
+ ret = ustctl_get_packet_size(ustream, &packet_size);
if (ret < 0) {
PERROR("ustctl_get_packet_size");
goto error;
}
- index->packet_size = htobe64(index->packet_size);
- ret = ustctl_get_stream_id(ustream, &index->stream_id);
+ ret = ustctl_get_stream_id(ustream, &stream_id);
if (ret < 0) {
PERROR("ustctl_get_stream_id");
goto error;
}
- index->stream_id = htobe64(index->stream_id);
- ret = ustctl_get_instance_id(ustream, &index->stream_instance_id);
+ ret = ustctl_get_instance_id(ustream, &stream_instance_id);
if (ret < 0) {
PERROR("ustctl_get_instance_id");
goto error;
}
- index->stream_instance_id = htobe64(index->stream_instance_id);
- ret = ustctl_get_sequence_number(ustream, &index->packet_seq_num);
+ ret = ustctl_get_sequence_number(ustream, &packet_seq_num);
if (ret < 0) {
PERROR("ustctl_get_sequence_number");
goto error;
}
- index->packet_seq_num = htobe64(index->packet_seq_num);
+
+ *index = (typeof(*index)) {
+ .offset = index->offset,
+ .packet_size = htobe64(packet_size),
+ .content_size = htobe64(content_size),
+ .timestamp_begin = htobe64(timestamp_begin),
+ .timestamp_end = htobe64(timestamp_end),
+ .events_discarded = htobe64(events_discarded),
+ .stream_id = htobe64(stream_id),
+ .stream_instance_id = htobe64(stream_instance_id),
+ .packet_seq_num = htobe64(packet_seq_num),
+ };
error:
return ret;
stream->ust_metadata_pushed);
ret = write_len;
+ /*
+ * Switch packet (but don't open the next one) on every commit of
+ * a metadata packet. Since the subbuffer is fully filled (with padding,
+ * if needed), the stream is "quiescent" after this commit.
+ */
+ ustctl_flush_buffer(stream->ustream, 1);
+ stream->quiescent = true;
end:
pthread_mutex_unlock(&stream->chan->metadata_cache->lock);
return ret;
* because we locked the metadata thread.
*/
ret = lttng_ustconsumer_request_metadata(ctx, metadata->chan, 0, 0);
+ pthread_mutex_lock(&metadata->lock);
if (ret < 0) {
goto end;
}
- pthread_mutex_lock(&metadata->lock);
ret = commit_one_metadata_packet(metadata);
if (ret <= 0) {
retry = 1;
}
- ustctl_flush_buffer(metadata->ustream, 1);
ret = ustctl_snapshot(metadata->ustream);
if (ret < 0) {
if (errno != EAGAIN) {
}
if (discarded < stream->last_discarded_events) {
/*
- * Overflow has occured. We assume only one wrap-around
- * has occured.
+ * Overflow has occurred. We assume only one wrap-around
+ * has occurred.
*/
stream->chan->discarded_events +=
(1ULL << (CAA_BITS_PER_LONG - 1)) -
/*
* Read subbuffer from the given stream.
*
- * Stream lock MUST be acquired.
+ * Stream and channel locks MUST be acquired by the caller.
*
* Return 0 on success else a negative value.
*/
struct lttng_consumer_local_data *ctx)
{
unsigned long len, subbuf_size, padding;
- int err, write_index = 1;
+ int err, write_index = 1, rotation_ret;
long ret = 0;
struct ustctl_consumer_stream *ustream;
struct ctf_packet_index index;
readlen = lttng_read(stream->wait_fd, &dummy, 1);
if (readlen < 0 && errno != EAGAIN && errno != EWOULDBLOCK) {
ret = readlen;
- goto end;
+ goto error;
+ }
+ }
+
+ /*
+ * If the stream was flagged to be ready for rotation before we extract the
+ * next packet, rotate it now.
+ */
+ if (stream->rotate_ready) {
+ DBG("Rotate stream before extracting data");
+ rotation_ret = lttng_consumer_rotate_stream(ctx, stream);
+ if (rotation_ret < 0) {
+ ERR("Stream rotation error");
+ ret = -1;
+ goto error;
}
}
if (stream->metadata_flag) {
ret = commit_one_metadata_packet(stream);
if (ret <= 0) {
- goto end;
+ goto error;
}
- ustctl_flush_buffer(stream->ustream, 1);
goto retry;
}
*/
DBG("Reserving sub buffer failed (everything is normal, "
"it is due to concurrency) [ret: %d]", err);
- goto end;
+ goto error;
}
assert(stream->chan->output == CONSUMER_CHANNEL_MMAP);
index.offset = htobe64(stream->out_fd_offset);
ret = get_index_values(&index, ustream);
if (ret < 0) {
- goto end;
+ err = ustctl_put_subbuf(ustream);
+ assert(err == 0);
+ goto error;
}
/* Update the stream's sequence and discarded events count. */
ret = update_stream_stats(stream);
if (ret < 0) {
PERROR("kernctl_get_events_discarded");
- goto end;
+ err = ustctl_put_subbuf(ustream);
+ assert(err == 0);
+ goto error;
}
} else {
write_index = 0;
assert(len >= subbuf_size);
padding = len - subbuf_size;
+
/* write the subbuffer to the tracefile */
ret = lttng_consumer_on_read_subbuffer_mmap(ctx, stream, subbuf_size, padding, &index);
/*
if (!stream->metadata_flag) {
ret = notify_if_more_data(stream, ctx);
if (ret < 0) {
- goto end;
+ goto error;
}
}
/* Write index if needed. */
if (!write_index) {
- goto end;
+ goto rotate;
}
if (stream->chan->live_timer_interval && !stream->metadata_flag) {
}
if (err < 0) {
- goto end;
+ goto error;
}
}
assert(!stream->metadata_flag);
err = consumer_stream_write_index(stream, &index);
if (err < 0) {
- goto end;
+ goto error;
}
-end:
+rotate:
+ /*
+ * After extracting the packet, we check if the stream is now ready to be
+ * rotated and perform the action immediately.
+ */
+ rotation_ret = lttng_consumer_stream_is_rotate_ready(stream);
+ if (rotation_ret == 1) {
+ rotation_ret = lttng_consumer_rotate_stream(ctx, stream);
+ if (rotation_ret < 0) {
+ ERR("Stream rotation error");
+ ret = -1;
+ goto error;
+ }
+ } else if (rotation_ret < 0) {
+ ERR("Checking if stream is ready to rotate");
+ ret = -1;
+ goto error;
+ }
+error:
return ret;
}
assert(stream);
- /* Don't create anything if this is set for streaming. */
- if (stream->net_seq_idx == (uint64_t) -1ULL && stream->chan->monitor) {
- ret = utils_create_stream_file(stream->chan->pathname, stream->name,
- stream->chan->tracefile_size, stream->tracefile_count_current,
- stream->uid, stream->gid, NULL);
- if (ret < 0) {
+ /*
+ * Don't create anything if this is set for streaming or if there is
+ * no current trace chunk on the parent channel.
+ */
+ if (stream->net_seq_idx == (uint64_t) -1ULL && stream->chan->monitor &&
+ stream->chan->trace_chunk) {
+ ret = consumer_stream_create_output_files(stream, true);
+ if (ret) {
goto error;
}
- stream->out_fd = ret;
- stream->tracefile_size_current = 0;
-
- if (!stream->metadata_flag) {
- ret = index_create_file(stream->chan->pathname,
- stream->name, stream->uid, stream->gid,
- stream->chan->tracefile_size,
- stream->tracefile_count_current);
- if (ret < 0) {
- goto error;
- }
- stream->index_fd = ret;
- }
}
ret = 0;
* Stop a given metadata channel timer if enabled and close the wait fd which
* is the poll pipe of the metadata stream.
*
- * This MUST be called with the metadata channel acquired.
+ * This MUST be called with the metadata channel lock acquired.
*/
void lttng_ustconsumer_close_metadata(struct lttng_consumer_channel *metadata)
{
request.key = channel->key;
DBG("Sending metadata request to sessiond, session id %" PRIu64
- ", per-pid %" PRIu64 ", app UID %u and channek key %" PRIu64,
+ ", per-pid %" PRIu64 ", app UID %u and channel key %" PRIu64,
request.session_id, request.session_id_per_pid, request.uid,
request.key);