#include <common/relayd/relayd.h>
#include <common/compat/fcntl.h>
#include <common/consumer-metadata-cache.h>
+#include <common/consumer-stream.h>
#include <common/consumer-timer.h>
#include <common/utils.h>
*/
static struct lttng_consumer_channel *allocate_channel(uint64_t session_id,
const char *pathname, const char *name, uid_t uid, gid_t gid,
- int relayd_id, uint64_t key, enum lttng_event_output output,
- uint64_t tracefile_size, uint64_t tracefile_count)
+ uint64_t relayd_id, uint64_t key, enum lttng_event_output output,
+ uint64_t tracefile_size, uint64_t tracefile_count,
+ uint64_t session_id_per_pid, unsigned int monitor)
{
assert(pathname);
assert(name);
- return consumer_allocate_channel(key, session_id, pathname, name, uid, gid,
- relayd_id, output, tracefile_size, tracefile_count);
+ return consumer_allocate_channel(key, session_id, pathname, name, uid,
+ gid, relayd_id, output, tracefile_size,
+ tracefile_count, session_id_per_pid, monitor);
}
/*
static int send_stream_to_thread(struct lttng_consumer_stream *stream,
struct lttng_consumer_local_data *ctx)
{
- int ret, stream_pipe;
+ int ret;
+ struct lttng_pipe *stream_pipe;
/* Get the right pipe where the stream will be sent. */
if (stream->metadata_flag) {
- stream_pipe = ctx->consumer_metadata_pipe[1];
+ stream_pipe = ctx->consumer_metadata_pipe;
} else {
- stream_pipe = ctx->consumer_data_pipe[1];
+ stream_pipe = ctx->consumer_data_pipe;
}
- do {
- ret = write(stream_pipe, &stream, sizeof(stream));
- } while (ret < 0 && errno == EINTR);
+ ret = lttng_pipe_write(stream_pipe, &stream, sizeof(stream));
if (ret < 0) {
- PERROR("Consumer write %s stream to pipe %d",
- stream->metadata_flag ? "metadata" : "data", stream_pipe);
+ ERR("Consumer write %s stream to pipe %d",
+ stream->metadata_flag ? "metadata" : "data",
+ lttng_pipe_get_writefd(stream_pipe));
}
return ret;
*/
stream->wait_fd = wait_fd;
+ /*
+ * Increment channel refcount since the channel reference has now been
+ * assigned in the allocation process above.
+ */
+ if (stream->chan->monitor) {
+ uatomic_inc(&stream->chan->refcount);
+ }
+
/*
* Order is important this is why a list is used. On error, the caller
* should clean this list.
struct lttng_consumer_channel *channel,
struct lttng_consumer_local_data *ctx, int *relayd_error)
{
- int ret;
+ int ret, ret_code = LTTNG_OK;
struct lttng_consumer_stream *stream;
assert(channel);
DBG("UST consumer sending channel %s to sessiond", channel->name);
- /* Send channel to sessiond. */
- ret = ustctl_send_channel_to_sessiond(sock, channel->uchan);
- if (ret < 0) {
- goto error;
- }
-
- ret = ustctl_channel_close_wakeup_fd(channel->uchan);
- if (ret < 0) {
- goto error;
- }
-
- /* The channel was sent successfully to the sessiond at this point. */
cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
/* Try to send the stream to the relayd if one is available. */
ret = send_stream_to_relayd(stream);
if (relayd_error) {
*relayd_error = 1;
}
- goto error;
+ ret_code = LTTNG_ERR_RELAYD_CONNECT_FAIL;
}
+ }
+
+ /* Inform sessiond that we are about to send channel and streams. */
+ ret = consumer_send_status_msg(sock, ret_code);
+ if (ret < 0 || ret_code != LTTNG_OK) {
+ /*
+ * Either the session daemon is not responding or the relayd died so we
+ * stop now.
+ */
+ goto error;
+ }
+
+ /* Send channel to sessiond. */
+ ret = ustctl_send_channel_to_sessiond(sock, channel->uchan);
+ if (ret < 0) {
+ goto error;
+ }
+
+ ret = ustctl_channel_close_wakeup_fd(channel->uchan);
+ if (ret < 0) {
+ goto error;
+ }
+ /* The channel was sent successfully to the sessiond at this point. */
+ cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
/* Send stream to session daemon. */
ret = send_sessiond_stream(sock, stream);
if (ret < 0) {
return 0;
error:
+ if (ret_code != LTTNG_OK) {
+ ret = -1;
+ }
return ret;
}
/* The reply msg status is handled in the following call. */
ret = create_ust_channel(attr, &channel->uchan);
if (ret < 0) {
- goto error;
+ goto end;
}
channel->wait_fd = ustctl_channel_get_wait_fd(channel->uchan);
- if (ret < 0) {
- goto error;
+ /*
+ * For the snapshots (no monitor), we create the metadata streams
+ * on demand, not during the channel creation.
+ */
+ if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA && !channel->monitor) {
+ ret = 0;
+ goto end;
}
/* Open all streams for this channel. */
ret = create_ust_streams(channel, ctx);
if (ret < 0) {
- goto error;
+ goto end;
}
-error:
+end:
return ret;
}
DBG("UST consumer writing metadata to channel %s", metadata->name);
+ if (!metadata->metadata_stream) {
+ ret = 0;
+ goto error;
+ }
+
assert(target_offset <= metadata->metadata_cache->max_offset);
ret = ustctl_write_metadata_to_channel(metadata->uchan,
metadata_str + target_offset, len);
DBG("UST consumer flush channel key %" PRIu64, chan_key);
+ rcu_read_lock();
channel = consumer_find_channel(chan_key);
if (!channel) {
ERR("UST consumer flush channel %" PRIu64 " not found", chan_key);
ht = consumer_data.stream_per_chan_id_ht;
/* For each stream of the channel id, flush it. */
- rcu_read_lock();
cds_lfht_for_each_entry_duplicate(ht->ht,
ht->hash_fct(&channel->key, lttng_ht_seed), ht->match_fct,
&channel->key, &iter.iter, stream, node_channel_id.node) {
ustctl_flush_buffer(stream->ustream, 1);
}
- rcu_read_unlock();
-
error:
+ rcu_read_unlock();
return ret;
}
/*
* Close metadata stream wakeup_fd using the given key to retrieve the channel.
+ * RCU read side lock MUST be acquired before calling this function.
*
* Return 0 on success else an LTTng error code.
*/
static int close_metadata(uint64_t chan_key)
{
- int ret;
+ int ret = 0;
struct lttng_consumer_channel *channel;
DBG("UST consumer close metadata key %" PRIu64, chan_key);
channel = consumer_find_channel(chan_key);
if (!channel) {
- ERR("UST consumer close metadata %" PRIu64 " not found", chan_key);
+ /*
+ * This is possible if the metadata thread has issue a delete because
+ * the endpoint point of the stream hung up. There is no way the
+ * session daemon can know about it thus use a DBG instead of an actual
+ * error.
+ */
+ DBG("UST consumer close metadata %" PRIu64 " not found", chan_key);
ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
goto error;
}
- ret = ustctl_stream_close_wakeup_fd(channel->metadata_stream->ustream);
- if (ret < 0) {
- ERR("UST consumer unable to close fd of metadata (ret: %d)", ret);
- ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
- goto error;
+ pthread_mutex_lock(&consumer_data.lock);
+
+ if (cds_lfht_is_node_deleted(&channel->node.node)) {
+ goto error_unlock;
}
+
if (channel->switch_timer_enabled == 1) {
DBG("Deleting timer on metadata channel");
consumer_timer_switch_stop(channel);
}
- consumer_metadata_cache_destroy(channel);
+ if (channel->metadata_stream) {
+ ret = ustctl_stream_close_wakeup_fd(channel->metadata_stream->ustream);
+ if (ret < 0) {
+ ERR("UST consumer unable to close fd of metadata (ret: %d)", ret);
+ ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
+ goto error_unlock;
+ }
+ }
+
+error_unlock:
+ pthread_mutex_unlock(&consumer_data.lock);
error:
return ret;
}
if (!metadata) {
ERR("UST consumer push metadata %" PRIu64 " not found", key);
ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
- goto error;
+ goto end;
+ }
+
+ /*
+ * In no monitor mode, the metadata channel has no stream(s) so skip the
+ * ownership transfer to the metadata thread.
+ */
+ if (!metadata->monitor) {
+ DBG("Metadata channel in no monitor");
+ ret = 0;
+ goto end;
}
/*
assert(cds_list_empty(&metadata->streams.head));
ret = 0;
+ goto end;
error:
+ /*
+ * Delete metadata channel on error. At this point, the metadata stream can
+ * NOT be monitored by the metadata thread thus having the guarantee that
+ * the stream is still in the local stream list of the channel. This call
+ * will make sure to clean that list.
+ */
+ consumer_del_channel(metadata);
+end:
+ return ret;
+}
+
+/*
+ * Snapshot the whole metadata.
+ *
+ * Returns 0 on success, < 0 on error
+ */
+static int snapshot_metadata(uint64_t key, char *path, uint64_t relayd_id,
+ struct lttng_consumer_local_data *ctx)
+{
+ int ret = 0;
+ ssize_t write_len;
+ uint64_t total_len = 0;
+ struct lttng_consumer_channel *metadata_channel;
+ struct lttng_consumer_stream *metadata_stream;
+
+ assert(path);
+ assert(ctx);
+
+ DBG("UST consumer snapshot metadata with key %" PRIu64 " at path %s",
+ key, path);
+
+ rcu_read_lock();
+
+ metadata_channel = consumer_find_channel(key);
+ if (!metadata_channel) {
+ ERR("UST snapshot metadata channel not found for key %lu", key);
+ ret = -1;
+ goto error;
+ }
+ assert(!metadata_channel->monitor);
+
+ /*
+ * Ask the sessiond if we have new metadata waiting and update the
+ * consumer metadata cache.
+ */
+ ret = lttng_ustconsumer_request_metadata(ctx, metadata_channel);
+ if (ret < 0) {
+ goto error;
+ }
+
+ /*
+ * The metadata stream is NOT created in no monitor mode when the channel
+ * is created on a sessiond ask channel command.
+ */
+ ret = create_ust_streams(metadata_channel, ctx);
+ if (ret < 0) {
+ goto error;
+ }
+
+ metadata_stream = metadata_channel->metadata_stream;
+ assert(metadata_stream);
+
+ if (relayd_id != (uint64_t) -1ULL) {
+ metadata_stream->net_seq_idx = relayd_id;
+ ret = consumer_send_relayd_stream(metadata_stream, path);
+ if (ret < 0) {
+ goto error_stream;
+ }
+ } else {
+ ret = utils_create_stream_file(path, metadata_stream->name,
+ metadata_stream->chan->tracefile_size,
+ metadata_stream->tracefile_count_current,
+ metadata_stream->uid, metadata_stream->gid);
+ if (ret < 0) {
+ goto error_stream;
+ }
+ metadata_stream->out_fd = ret;
+ metadata_stream->tracefile_size_current = 0;
+ }
+
+ pthread_mutex_lock(&metadata_channel->metadata_cache->lock);
+ while (total_len < metadata_channel->metadata_cache->total_bytes_written) {
+ /*
+ * Write at most one packet of metadata into the channel
+ * to avoid blocking here.
+ */
+ write_len = ustctl_write_one_packet_to_channel(metadata_channel->uchan,
+ metadata_channel->metadata_cache->data,
+ metadata_channel->metadata_cache->total_bytes_written);
+ if (write_len < 0) {
+ ERR("UST consumer snapshot writing metadata packet");
+ ret = -1;
+ goto error_unlock;
+ }
+ total_len += write_len;
+
+ DBG("Written %" PRIu64 " bytes to metadata (left: %" PRIu64 ")",
+ write_len,
+ metadata_channel->metadata_cache->total_bytes_written - write_len);
+ ustctl_flush_buffer(metadata_stream->ustream, 1);
+ ret = lttng_consumer_read_subbuffer(metadata_stream, ctx);
+ if (ret < 0) {
+ goto error_unlock;
+ }
+ }
+
+error_unlock:
+ pthread_mutex_unlock(&metadata_channel->metadata_cache->lock);
+
+error_stream:
+ /*
+ * Clean up the stream completly because the next snapshot will use a new
+ * metadata stream.
+ */
+ cds_list_del(&metadata_stream->send_node);
+ consumer_stream_destroy(metadata_stream, NULL);
+ metadata_channel->metadata_stream = NULL;
+
+error:
+ rcu_read_unlock();
+ return ret;
+}
+
+/*
+ * Take a snapshot of all the stream of a channel.
+ *
+ * Returns 0 on success, < 0 on error
+ */
+static int snapshot_channel(uint64_t key, char *path, uint64_t relayd_id,
+ struct lttng_consumer_local_data *ctx)
+{
+ int ret;
+ unsigned use_relayd = 0;
+ unsigned long consumed_pos, produced_pos;
+ struct lttng_consumer_channel *channel;
+ struct lttng_consumer_stream *stream;
+
+ assert(path);
+ assert(ctx);
+
+ rcu_read_lock();
+
+ if (relayd_id != (uint64_t) -1ULL) {
+ use_relayd = 1;
+ }
+
+ channel = consumer_find_channel(key);
+ if (!channel) {
+ ERR("UST snapshot channel not found for key %lu", key);
+ ret = -1;
+ goto error;
+ }
+ assert(!channel->monitor);
+ DBG("UST consumer snapshot channel %lu", key);
+
+ cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
+ /* Lock stream because we are about to change its state. */
+ pthread_mutex_lock(&stream->lock);
+ stream->net_seq_idx = relayd_id;
+
+ if (use_relayd) {
+ ret = consumer_send_relayd_stream(stream, path);
+ if (ret < 0) {
+ goto error_unlock;
+ }
+ } else {
+ ret = utils_create_stream_file(path, stream->name,
+ stream->chan->tracefile_size,
+ stream->tracefile_count_current,
+ stream->uid, stream->gid);
+ if (ret < 0) {
+ goto error_unlock;
+ }
+ stream->out_fd = ret;
+ stream->tracefile_size_current = 0;
+
+ DBG("UST consumer snapshot stream %s/%s (%" PRIu64 ")", path,
+ stream->name, stream->key);
+ }
+
+ ustctl_flush_buffer(stream->ustream, 1);
+
+ ret = lttng_ustconsumer_take_snapshot(stream);
+ if (ret < 0) {
+ ERR("Taking UST snapshot");
+ goto error_unlock;
+ }
+
+ ret = lttng_ustconsumer_get_produced_snapshot(stream, &produced_pos);
+ if (ret < 0) {
+ ERR("Produced UST snapshot position");
+ goto error_unlock;
+ }
+
+ ret = lttng_ustconsumer_get_consumed_snapshot(stream, &consumed_pos);
+ if (ret < 0) {
+ ERR("Consumerd UST snapshot position");
+ goto error_unlock;
+ }
+
+ while (consumed_pos < produced_pos) {
+ ssize_t read_len;
+ unsigned long len, padded_len;
+
+ DBG("UST consumer taking snapshot at pos %lu", consumed_pos);
+
+ ret = ustctl_get_subbuf(stream->ustream, &consumed_pos);
+ if (ret < 0) {
+ if (ret != -EAGAIN) {
+ PERROR("ustctl_get_subbuf snapshot");
+ goto error_close_stream;
+ }
+ DBG("UST consumer get subbuf failed. Skipping it.");
+ consumed_pos += stream->max_sb_size;
+ continue;
+ }
+
+ ret = ustctl_get_subbuf_size(stream->ustream, &len);
+ if (ret < 0) {
+ ERR("Snapshot ustctl_get_subbuf_size");
+ goto error_put_subbuf;
+ }
+
+ ret = ustctl_get_padded_subbuf_size(stream->ustream, &padded_len);
+ if (ret < 0) {
+ ERR("Snapshot ustctl_get_padded_subbuf_size");
+ goto error_put_subbuf;
+ }
+
+ read_len = lttng_consumer_on_read_subbuffer_mmap(ctx, stream, len,
+ padded_len - len);
+ if (use_relayd) {
+ if (read_len != len) {
+ ret = -1;
+ goto error_put_subbuf;
+ }
+ } else {
+ if (read_len != padded_len) {
+ ret = -1;
+ goto error_put_subbuf;
+ }
+ }
+
+ ret = ustctl_put_subbuf(stream->ustream);
+ if (ret < 0) {
+ ERR("Snapshot ustctl_put_subbuf");
+ goto error_close_stream;
+ }
+ consumed_pos += stream->max_sb_size;
+ }
+
+ /* Simply close the stream so we can use it on the next snapshot. */
+ consumer_stream_close(stream);
+ pthread_mutex_unlock(&stream->lock);
+ }
+
+ rcu_read_unlock();
+ return 0;
+
+error_put_subbuf:
+ if (ustctl_put_subbuf(stream->ustream) < 0) {
+ ERR("Snapshot ustctl_put_subbuf");
+ }
+error_close_stream:
+ consumer_stream_close(stream);
+error_unlock:
+ pthread_mutex_unlock(&stream->lock);
+error:
+ rcu_read_unlock();
return ret;
}
goto end_free;
}
+ /*
+ * XXX: The consumer data lock is acquired before calling metadata cache
+ * write which calls push metadata that MUST be protected by the consumer
+ * lock in order to be able to check the validity of the metadata stream of
+ * the channel.
+ *
+ * Note that this will be subject to change to better fine grained locking
+ * and ultimately try to get rid of this global consumer data lock.
+ */
+ pthread_mutex_lock(&consumer_data.lock);
+
pthread_mutex_lock(&channel->metadata_cache->lock);
ret = consumer_metadata_cache_write(channel, offset, len, metadata_str);
if (ret < 0) {
/* Unable to handle metadata. Notify session daemon. */
ret_code = LTTCOMM_CONSUMERD_ERROR_METADATA;
+ /*
+ * Skip metadata flush on write error since the offset and len might
+ * not have been updated which could create an infinite loop below when
+ * waiting for the metadata cache to be flushed.
+ */
+ pthread_mutex_unlock(&channel->metadata_cache->lock);
+ pthread_mutex_unlock(&consumer_data.lock);
+ goto end_free;
}
pthread_mutex_unlock(&channel->metadata_cache->lock);
+ pthread_mutex_unlock(&consumer_data.lock);
while (consumer_metadata_cache_flushed(channel, offset + len)) {
DBG("Waiting for metadata to be flushed");
* The ret value might 0 meaning an orderly shutdown but this is ok
* since the caller handles this.
*/
+ if (ret > 0) {
+ ret = -1;
+ }
return ret;
}
if (msg.cmd_type == LTTNG_CONSUMER_STOP) {
sizeof(is_data_pending));
if (ret < 0) {
DBG("Error when sending the data pending ret code: %d", ret);
+ goto error_fatal;
}
/*
msg.u.ask_channel.relayd_id, msg.u.ask_channel.key,
(enum lttng_event_output) msg.u.ask_channel.output,
msg.u.ask_channel.tracefile_size,
- msg.u.ask_channel.tracefile_count);
+ msg.u.ask_channel.tracefile_count,
+ msg.u.ask_channel.session_id_per_pid,
+ msg.u.ask_channel.monitor);
if (!channel) {
goto end_channel_error;
}
attr.switch_timer_interval = msg.u.ask_channel.switch_timer_interval;
attr.read_timer_interval = msg.u.ask_channel.read_timer_interval;
attr.chan_id = msg.u.ask_channel.chan_id;
+ attr.output = msg.u.ask_channel.output;
memcpy(attr.uuid, msg.u.ask_channel.uuid, sizeof(attr.uuid));
- /* Translate the event output type to UST. */
- switch (channel->output) {
- case LTTNG_EVENT_SPLICE:
- /* Splice not supported so fallback on mmap(). */
- case LTTNG_EVENT_MMAP:
- default:
- attr.output = CONSUMER_CHANNEL_MMAP;
- break;
- };
-
/* Translate and save channel type. */
switch (msg.u.ask_channel.type) {
case LTTNG_UST_CHAN_PER_CPU:
channel->type = CONSUMER_CHANNEL_TYPE_DATA;
attr.type = LTTNG_UST_CHAN_PER_CPU;
+ /*
+ * Set refcount to 1 for owner. Below, we will
+ * pass ownership to the
+ * consumer_thread_channel_poll() thread.
+ */
+ channel->refcount = 1;
break;
case LTTNG_UST_CHAN_METADATA:
channel->type = CONSUMER_CHANNEL_TYPE_METADATA;
goto end_channel_error;
}
+ if (msg.u.ask_channel.type == LTTNG_UST_CHAN_METADATA) {
+ ret = consumer_metadata_cache_allocate(channel);
+ if (ret < 0) {
+ ERR("Allocating metadata cache");
+ goto end_channel_error;
+ }
+ consumer_timer_switch_start(channel, attr.switch_timer_interval);
+ attr.switch_timer_interval = 0;
+ }
+
/*
* Add the channel to the internal state AFTER all streams were created
* and successfully sent to session daemon. This way, all streams must
* be ready before this channel is visible to the threads.
+ * If add_channel succeeds, ownership of the channel is
+ * passed to consumer_thread_channel_poll().
*/
ret = add_channel(channel, ctx);
if (ret < 0) {
+ if (msg.u.ask_channel.type == LTTNG_UST_CHAN_METADATA) {
+ if (channel->switch_timer_enabled == 1) {
+ consumer_timer_switch_stop(channel);
+ }
+ consumer_metadata_cache_destroy(channel);
+ }
goto end_channel_error;
}
-
/*
* Channel and streams are now created. Inform the session daemon that
* everything went well and should wait to receive the channel and
ret = consumer_send_status_channel(sock, channel);
if (ret < 0) {
/*
- * There is probably a problem on the socket so the poll will get
- * it and clean everything up.
+ * There is probably a problem on the socket.
*/
- goto end_nosignal;
- }
-
- if (msg.u.ask_channel.type == LTTNG_UST_CHAN_METADATA) {
- ret = consumer_metadata_cache_allocate(channel);
- if (ret < 0) {
- ERR("Allocating metadata cache");
- goto end_channel_error;
- }
- consumer_timer_switch_start(channel, attr.switch_timer_interval);
- attr.switch_timer_interval = 0;
+ goto error_fatal;
}
break;
goto end_msg_sessiond;
}
- /* Inform sessiond that we are about to send channel and streams. */
- ret = consumer_send_status_msg(sock, LTTNG_OK);
- if (ret < 0) {
- /* Somehow, the session daemon is not responding anymore. */
- goto end_nosignal;
- }
-
/* Send everything to sessiond. */
ret = send_sessiond_channel(sock, channel, ctx, &relayd_err);
if (ret < 0) {
/*
* We were unable to send to the relayd the stream so avoid
* sending back a fatal error to the thread since this is OK
- * and the consumer can continue its work.
+ * and the consumer can continue its work. The above call
+ * has sent the error status message to the sessiond.
*/
- ret_code = LTTNG_ERR_RELAYD_CONNECT_FAIL;
- goto end_msg_sessiond;
+ goto end_nosignal;
}
/*
* The communicaton was broken hence there is a bad state between
goto error_fatal;
}
+ /*
+ * In no monitor mode, the streams ownership is kept inside the channel
+ * so don't send them to the data thread.
+ */
+ if (!channel->monitor) {
+ goto end_msg_sessiond;
+ }
+
ret = send_streams_to_thread(channel, ctx);
if (ret < 0) {
/*
}
/* List MUST be empty after or else it could be reused. */
assert(cds_list_empty(&channel->streams.head));
-
goto end_msg_sessiond;
}
case LTTNG_CONSUMER_DESTROY_CHANNEL:
{
uint64_t key = msg.u.destroy_channel.key;
- struct lttng_consumer_channel *channel;
-
- channel = consumer_find_channel(key);
- if (!channel) {
- ERR("UST consumer get channel key %" PRIu64 " not found", key);
- ret_code = LTTNG_ERR_UST_CHAN_NOT_FOUND;
- goto end_msg_sessiond;
- }
-
- destroy_channel(channel);
+ /*
+ * Only called if streams have not been sent to stream
+ * manager thread. However, channel has been sent to
+ * channel manager thread.
+ */
+ notify_thread_del_channel(ctx, key);
goto end_msg_sessiond;
}
case LTTNG_CONSUMER_CLOSE_METADATA:
if (!channel) {
ERR("UST consumer push metadata %" PRIu64 " not found", key);
ret_code = LTTNG_ERR_UST_CHAN_NOT_FOUND;
+ goto end_msg_sessiond;
}
/* Tell session daemon we are ready to receive the metadata. */
/* Wait for more data. */
if (lttng_consumer_poll_socket(consumer_sockpoll) < 0) {
- goto end_nosignal;
+ goto error_fatal;
}
ret = lttng_ustconsumer_recv_metadata(sock, key, offset,
len, channel);
if (ret < 0) {
/* error receiving from sessiond */
- goto end_nosignal;
+ goto error_fatal;
} else {
ret_code = ret;
goto end_msg_sessiond;
}
goto end_msg_sessiond;
}
+ case LTTNG_CONSUMER_SNAPSHOT_CHANNEL:
+ {
+ if (msg.u.snapshot_channel.metadata) {
+ ret = snapshot_metadata(msg.u.snapshot_channel.key,
+ msg.u.snapshot_channel.pathname,
+ msg.u.snapshot_channel.relayd_id,
+ ctx);
+ if (ret < 0) {
+ ERR("Snapshot metadata failed");
+ ret_code = LTTNG_ERR_UST_META_FAIL;
+ }
+ } else {
+ ret = snapshot_channel(msg.u.snapshot_channel.key,
+ msg.u.snapshot_channel.pathname,
+ msg.u.snapshot_channel.relayd_id,
+ ctx);
+ if (ret < 0) {
+ ERR("Snapshot channel failed");
+ ret_code = LTTNG_ERR_UST_CHAN_FAIL;
+ }
+ }
+
+ ret = consumer_send_status_msg(sock, ret_code);
+ if (ret < 0) {
+ /* Somehow, the session daemon is not responding anymore. */
+ goto end_nosignal;
+ }
+ break;
+ }
default:
break;
}
* the caller because the session daemon socket management is done
* elsewhere. Returning a negative code or 0 will shutdown the consumer.
*/
- (void) consumer_send_status_msg(sock, ret_code);
+ ret = consumer_send_status_msg(sock, ret_code);
+ if (ret < 0) {
+ goto error_fatal;
+ }
rcu_read_unlock();
return 1;
end_channel_error:
return ustctl_snapshot_get_produced(stream->ustream, pos);
}
+/*
+ * Get the consumed position
+ *
+ * Returns 0 on success, < 0 on error
+ */
+int lttng_ustconsumer_get_consumed_snapshot(
+ struct lttng_consumer_stream *stream, unsigned long *pos)
+{
+ assert(stream);
+ assert(stream->ustream);
+ assert(pos);
+
+ return ustctl_snapshot_get_consumed(stream->ustream, pos);
+}
+
/*
* Called when the stream signal the consumer that it has hang up.
*/
assert(chan);
assert(chan->uchan);
+ if (chan->switch_timer_enabled == 1) {
+ consumer_timer_switch_stop(chan);
+ }
+ consumer_metadata_cache_destroy(chan);
ustctl_destroy_channel(chan->uchan);
}
assert(stream);
assert(stream->ustream);
+ if (stream->chan->switch_timer_enabled == 1) {
+ consumer_timer_switch_stop(stream->chan);
+ }
ustctl_destroy_stream(stream->ustream);
}
{
int ret;
+ assert(stream);
+
/* Don't create anything if this is set for streaming. */
- if (stream->net_seq_idx == (uint64_t) -1ULL) {
+ if (stream->net_seq_idx == (uint64_t) -1ULL && stream->chan->monitor) {
ret = utils_create_stream_file(stream->chan->pathname, stream->name,
stream->chan->tracefile_size, stream->tracefile_count_current,
stream->uid, stream->gid);
}
request.session_id = channel->session_id;
+ request.session_id_per_pid = channel->session_id_per_pid;
request.uid = channel->uid;
request.key = channel->key;
- DBG("Sending metadata request to sessiond, session %" PRIu64,
- channel->session_id);
+ DBG("Sending metadata request to sessiond, session id %" PRIu64
+ ", per-pid %" PRIu64,
+ channel->session_id,
+ channel->session_id_per_pid);
ret = lttcomm_send_unix_sock(ctx->consumer_metadata_socket, &request,
sizeof(request));
ret_code = lttng_ustconsumer_recv_metadata(ctx->consumer_metadata_socket,
key, offset, len, channel);
- (void) consumer_send_status_msg(ctx->consumer_metadata_socket, ret_code);
+ if (ret_code >= 0) {
+ /*
+ * Only send the status msg if the sessiond is alive meaning a positive
+ * ret code.
+ */
+ (void) consumer_send_status_msg(ctx->consumer_metadata_socket, ret_code);
+ }
ret = 0;
end: