Fix: correctly close metadata on sessiond thread shutdown
[lttng-tools.git] / src / common / ust-consumer / ust-consumer.c
index 442925754c634ec4d067382b5cdd7ed8f35b385e..6a692b9ba5a8bffd7826bd164dfc3f1de9f8c9df 100644 (file)
 #include <inttypes.h>
 #include <unistd.h>
 #include <urcu/list.h>
+#include <signal.h>
 
 #include <common/common.h>
 #include <common/sessiond-comm/sessiond-comm.h>
 #include <common/relayd/relayd.h>
 #include <common/compat/fcntl.h>
+#include <common/consumer-metadata-cache.h>
+#include <common/consumer-stream.h>
+#include <common/consumer-timer.h>
+#include <common/utils.h>
 
 #include "ust-consumer.h"
 
@@ -88,17 +93,17 @@ static int add_channel(struct lttng_consumer_channel *channel,
        if (ctx->on_recv_channel != NULL) {
                ret = ctx->on_recv_channel(channel);
                if (ret == 0) {
-                       ret = consumer_add_channel(channel);
+                       ret = consumer_add_channel(channel, ctx);
                } else if (ret < 0) {
                        /* Most likely an ENOMEM. */
                        lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
                        goto error;
                }
        } else {
-               ret = consumer_add_channel(channel);
+               ret = consumer_add_channel(channel, ctx);
        }
 
-       DBG("UST consumer channel added (key: %u)", channel->key);
+       DBG("UST consumer channel added (key: %" PRIu64 ")", channel->key);
 
 error:
        return ret;
@@ -109,13 +114,16 @@ error:
  */
 static struct lttng_consumer_channel *allocate_channel(uint64_t session_id,
                const char *pathname, const char *name, uid_t uid, gid_t gid,
-               int relayd_id, unsigned long key, enum lttng_event_output output)
+               uint64_t relayd_id, uint64_t key, enum lttng_event_output output,
+               uint64_t tracefile_size, uint64_t tracefile_count,
+               uint64_t session_id_per_pid, unsigned int monitor)
 {
        assert(pathname);
        assert(name);
 
-       return consumer_allocate_channel(key, session_id, pathname, name, uid, gid,
-                       relayd_id, output);
+       return consumer_allocate_channel(key, session_id, pathname, name, uid,
+                       gid, relayd_id, output, tracefile_size,
+                       tracefile_count, session_id_per_pid, monitor);
 }
 
 /*
@@ -144,7 +152,8 @@ static struct lttng_consumer_stream *allocate_stream(int cpu, int key,
                        channel->session_id,
                        cpu,
                        &alloc_ret,
-                       channel->type);
+                       channel->type,
+                       channel->monitor);
        if (stream == NULL) {
                switch (alloc_ret) {
                case -ENOENT:
@@ -180,60 +189,54 @@ error:
 static int send_stream_to_thread(struct lttng_consumer_stream *stream,
                struct lttng_consumer_local_data *ctx)
 {
-       int ret, stream_pipe;
+       int ret;
+       struct lttng_pipe *stream_pipe;
 
        /* Get the right pipe where the stream will be sent. */
        if (stream->metadata_flag) {
-               stream_pipe = ctx->consumer_metadata_pipe[1];
+               ret = consumer_add_metadata_stream(stream);
+               if (ret) {
+                       ERR("Consumer add metadata stream %" PRIu64 " failed.",
+                                       stream->key);
+                       goto error;
+               }
+               stream_pipe = ctx->consumer_metadata_pipe;
        } else {
-               stream_pipe = ctx->consumer_data_pipe[1];
+               ret = consumer_add_data_stream(stream);
+               if (ret) {
+                       ERR("Consumer add stream %" PRIu64 " failed.",
+                                       stream->key);
+                       goto error;
+               }
+               stream_pipe = ctx->consumer_data_pipe;
        }
 
-       do {
-               ret = write(stream_pipe, &stream, sizeof(stream));
-       } while (ret < 0 && errno == EINTR);
+       /*
+        * From this point on, the stream's ownership has been moved away from
+        * the channel and becomes globally visible.
+        */
+       stream->globally_visible = 1;
+
+       ret = lttng_pipe_write(stream_pipe, &stream, sizeof(stream));
        if (ret < 0) {
-               PERROR("Consumer write %s stream to pipe %d",
-                               stream->metadata_flag ? "metadata" : "data", stream_pipe);
+               ERR("Consumer write %s stream to pipe %d",
+                               stream->metadata_flag ? "metadata" : "data",
+                               lttng_pipe_get_writefd(stream_pipe));
+               if (stream->metadata_flag) {
+                       consumer_del_stream_for_metadata(stream);
+               } else {
+                       consumer_del_stream_for_data(stream);
+               }
        }
-
+error:
        return ret;
 }
 
 /*
- * Search for a relayd object related to the stream. If found, send the stream
- * to the relayd.
+ * Create streams for the given channel using liblttng-ust-ctl.
  *
- * On success, returns 0 else a negative value.
+ * Return 0 on success else a negative value.
  */
-static int send_stream_to_relayd(struct lttng_consumer_stream *stream)
-{
-       int ret = 0;
-       struct consumer_relayd_sock_pair *relayd;
-
-       assert(stream);
-
-       relayd = consumer_find_relayd(stream->net_seq_idx);
-       if (relayd != NULL) {
-               pthread_mutex_lock(&relayd->ctrl_sock_mutex);
-               /* Add stream on the relayd */
-               ret = relayd_add_stream(&relayd->control_sock, stream->name,
-                               stream->chan->pathname, &stream->relayd_stream_id);
-               pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
-               if (ret < 0) {
-                       goto error;
-               }
-       } else if (stream->net_seq_idx != -1) {
-               ERR("Network sequence index %d unknown. Not adding stream.",
-                               stream->net_seq_idx);
-               ret = -1;
-               goto error;
-       }
-
-error:
-       return ret;
-}
-
 static int create_ust_streams(struct lttng_consumer_channel *channel,
                struct lttng_consumer_local_data *ctx)
 {
@@ -250,8 +253,18 @@ static int create_ust_streams(struct lttng_consumer_channel *channel,
         */
        while ((ustream = ustctl_create_stream(channel->uchan, cpu))) {
                int wait_fd;
+               int ust_metadata_pipe[2];
 
-               wait_fd = ustctl_get_wait_fd(ustream);
+               if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA && channel->monitor) {
+                       ret = utils_create_pipe_cloexec_nonblock(ust_metadata_pipe);
+                       if (ret < 0) {
+                               ERR("Create ust metadata poll pipe");
+                               goto error;
+                       }
+                       wait_fd = ust_metadata_pipe[0];
+               } else {
+                       wait_fd = ustctl_stream_get_wait_fd(ustream);
+               }
 
                /* Allocate consumer stream object. */
                stream = allocate_stream(cpu, wait_fd, channel, ctx, &ret);
@@ -266,6 +279,14 @@ static int create_ust_streams(struct lttng_consumer_channel *channel,
                 */
                stream->wait_fd = wait_fd;
 
+               /*
+                * Increment channel refcount since the channel reference has now been
+                * assigned in the allocation process above.
+                */
+               if (stream->chan->monitor) {
+                       uatomic_inc(&stream->chan->refcount);
+               }
+
                /*
                 * Order is important this is why a list is used. On error, the caller
                 * should clean this list.
@@ -288,11 +309,18 @@ static int create_ust_streams(struct lttng_consumer_channel *channel,
                        }
                }
 
-               DBG("UST consumer add stream %s (key: %d) with relayd id %" PRIu64,
+               DBG("UST consumer add stream %s (key: %" PRIu64 ") with relayd id %" PRIu64,
                                stream->name, stream->key, stream->relayd_stream_id);
 
                /* Set next CPU stream. */
                channel->streams.count = ++cpu;
+
+               /* Keep stream reference when creating metadata. */
+               if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA) {
+                       channel->metadata_stream = stream;
+                       stream->ust_metadata_poll_pipe[0] = ust_metadata_pipe[0];
+                       stream->ust_metadata_poll_pipe[1] = ust_metadata_pipe[1];
+               }
        }
 
        return 0;
@@ -338,6 +366,11 @@ error_create:
        return ret;
 }
 
+/*
+ * Send a single given stream to the session daemon using the sock.
+ *
+ * Return 0 on success else a negative value.
+ */
 static int send_sessiond_stream(int sock, struct lttng_consumer_stream *stream)
 {
        int ret;
@@ -345,7 +378,7 @@ static int send_sessiond_stream(int sock, struct lttng_consumer_stream *stream)
        assert(stream);
        assert(sock >= 0);
 
-       DBG2("UST consumer sending stream %d to sessiond", stream->key);
+       DBG("UST consumer sending stream %" PRIu64 " to sessiond", stream->key);
 
        /* Send stream to session daemon. */
        ret = ustctl_send_stream_to_sessiond(sock, stream->ustream);
@@ -353,11 +386,6 @@ static int send_sessiond_stream(int sock, struct lttng_consumer_stream *stream)
                goto error;
        }
 
-       ret = ustctl_stream_close_wakeup_fd(stream->ustream);
-       if (ret < 0) {
-               goto error;
-       }
-
 error:
        return ret;
 }
@@ -365,14 +393,13 @@ error:
 /*
  * Send channel to sessiond.
  *
- * Return 0 on success or else a negative value. On error, the channel is
- * destroy using ustctl.
+ * Return 0 on success or else a negative value.
  */
 static int send_sessiond_channel(int sock,
                struct lttng_consumer_channel *channel,
                struct lttng_consumer_local_data *ctx, int *relayd_error)
 {
-       int ret;
+       int ret, ret_code = LTTNG_OK;
        struct lttng_consumer_stream *stream;
 
        assert(channel);
@@ -381,27 +408,46 @@ static int send_sessiond_channel(int sock,
 
        DBG("UST consumer sending channel %s to sessiond", channel->name);
 
+       if (channel->relayd_id != (uint64_t) -1ULL) {
+               cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
+                       /* Try to send the stream to the relayd if one is available. */
+                       ret = consumer_send_relayd_stream(stream, stream->chan->pathname);
+                       if (ret < 0) {
+                               /*
+                                * Flag that the relayd was the problem here probably due to a
+                                * communicaton error on the socket.
+                                */
+                               if (relayd_error) {
+                                       *relayd_error = 1;
+                               }
+                               ret_code = LTTNG_ERR_RELAYD_CONNECT_FAIL;
+                       }
+               }
+       }
+
+       /* Inform sessiond that we are about to send channel and streams. */
+       ret = consumer_send_status_msg(sock, ret_code);
+       if (ret < 0 || ret_code != LTTNG_OK) {
+               /*
+                * Either the session daemon is not responding or the relayd died so we
+                * stop now.
+                */
+               goto error;
+       }
+
        /* Send channel to sessiond. */
        ret = ustctl_send_channel_to_sessiond(sock, channel->uchan);
        if (ret < 0) {
                goto error;
        }
 
+       ret = ustctl_channel_close_wakeup_fd(channel->uchan);
+       if (ret < 0) {
+               goto error;
+       }
+
        /* The channel was sent successfully to the sessiond at this point. */
        cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
-               /* Try to send the stream to the relayd if one is available. */
-               ret = send_stream_to_relayd(stream);
-               if (ret < 0) {
-                       /*
-                        * Flag that the relayd was the problem here probably due to a
-                        * communicaton error on the socket.
-                        */
-                       if (relayd_error) {
-                               *relayd_error = 1;
-                       }
-                       goto error;
-               }
-
                /* Send stream to session daemon. */
                ret = send_sessiond_stream(sock, stream);
                if (ret < 0) {
@@ -420,6 +466,9 @@ static int send_sessiond_channel(int sock,
        return 0;
 
 error:
+       if (ret_code != LTTNG_OK) {
+               ret = -1;
+       }
        return ret;
 }
 
@@ -456,20 +505,569 @@ static int ask_channel(struct lttng_consumer_local_data *ctx, int sock,
         */
        channel->nb_init_stream_left = 0;
 
-       /* The reply msg status is handled in the following call. */
-       ret = create_ust_channel(attr, &channel->uchan);
+       /* The reply msg status is handled in the following call. */
+       ret = create_ust_channel(attr, &channel->uchan);
+       if (ret < 0) {
+               goto end;
+       }
+
+       channel->wait_fd = ustctl_channel_get_wait_fd(channel->uchan);
+
+       /*
+        * For the snapshots (no monitor), we create the metadata streams
+        * on demand, not during the channel creation.
+        */
+       if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA && !channel->monitor) {
+               ret = 0;
+               goto end;
+       }
+
+       /* Open all streams for this channel. */
+       ret = create_ust_streams(channel, ctx);
+       if (ret < 0) {
+               goto end;
+       }
+
+end:
+       return ret;
+}
+
+/*
+ * Send all stream of a channel to the right thread handling it.
+ *
+ * On error, return a negative value else 0 on success.
+ */
+static int send_streams_to_thread(struct lttng_consumer_channel *channel,
+               struct lttng_consumer_local_data *ctx)
+{
+       int ret = 0;
+       struct lttng_consumer_stream *stream, *stmp;
+
+       assert(channel);
+       assert(ctx);
+
+       /* Send streams to the corresponding thread. */
+       cds_list_for_each_entry_safe(stream, stmp, &channel->streams.head,
+                       send_node) {
+               /* Sending the stream to the thread. */
+               ret = send_stream_to_thread(stream, ctx);
+               if (ret < 0) {
+                       /*
+                        * If we are unable to send the stream to the thread, there is
+                        * a big problem so just stop everything.
+                        */
+                       /* Remove node from the channel stream list. */
+                       cds_list_del(&stream->send_node);
+                       goto error;
+               }
+
+               /* Remove node from the channel stream list. */
+               cds_list_del(&stream->send_node);
+
+       }
+
+error:
+       return ret;
+}
+
+/*
+ * Flush channel's streams using the given key to retrieve the channel.
+ *
+ * Return 0 on success else an LTTng error code.
+ */
+static int flush_channel(uint64_t chan_key)
+{
+       int ret = 0;
+       struct lttng_consumer_channel *channel;
+       struct lttng_consumer_stream *stream;
+       struct lttng_ht *ht;
+       struct lttng_ht_iter iter;
+
+       DBG("UST consumer flush channel key %" PRIu64, chan_key);
+
+       rcu_read_lock();
+       channel = consumer_find_channel(chan_key);
+       if (!channel) {
+               ERR("UST consumer flush channel %" PRIu64 " not found", chan_key);
+               ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
+               goto error;
+       }
+
+       ht = consumer_data.stream_per_chan_id_ht;
+
+       /* For each stream of the channel id, flush it. */
+       cds_lfht_for_each_entry_duplicate(ht->ht,
+                       ht->hash_fct(&channel->key, lttng_ht_seed), ht->match_fct,
+                       &channel->key, &iter.iter, stream, node_channel_id.node) {
+               ustctl_flush_buffer(stream->ustream, 1);
+       }
+error:
+       rcu_read_unlock();
+       return ret;
+}
+/*
+ * Close metadata stream wakeup_fd using the given key to retrieve the channel.
+ * RCU read side lock MUST be acquired before calling this function.
+ *
+ * NOTE: This function does NOT take any channel nor stream lock.
+ *
+ * Return 0 on success else LTTng error code.
+ */
+static int _close_metadata(struct lttng_consumer_channel *channel)
+{
+       int ret = LTTNG_OK;
+
+       assert(channel);
+       assert(channel->type == CONSUMER_CHANNEL_TYPE_METADATA);
+
+       if (channel->switch_timer_enabled == 1) {
+               DBG("Deleting timer on metadata channel");
+               consumer_timer_switch_stop(channel);
+       }
+
+       if (channel->metadata_stream) {
+               ret = ustctl_stream_close_wakeup_fd(channel->metadata_stream->ustream);
+               if (ret < 0) {
+                       ERR("UST consumer unable to close fd of metadata (ret: %d)", ret);
+                       ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
+               }
+
+               if (channel->monitor) {
+                       /* Close the read-side in consumer_del_metadata_stream */
+                       ret = close(channel->metadata_stream->ust_metadata_poll_pipe[1]);
+                       if (ret < 0) {
+                               PERROR("Close UST metadata write-side poll pipe");
+                               ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
+                       }
+               }
+       }
+
+       return ret;
+}
+
+/*
+ * Close metadata stream wakeup_fd using the given key to retrieve the channel.
+ * RCU read side lock MUST be acquired before calling this function.
+ *
+ * Return 0 on success else an LTTng error code.
+ */
+static int close_metadata(uint64_t chan_key)
+{
+       int ret = 0;
+       struct lttng_consumer_channel *channel;
+
+       DBG("UST consumer close metadata key %" PRIu64, chan_key);
+
+       channel = consumer_find_channel(chan_key);
+       if (!channel) {
+               /*
+                * This is possible if the metadata thread has issue a delete because
+                * the endpoint point of the stream hung up. There is no way the
+                * session daemon can know about it thus use a DBG instead of an actual
+                * error.
+                */
+               DBG("UST consumer close metadata %" PRIu64 " not found", chan_key);
+               ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
+               goto error;
+       }
+
+       pthread_mutex_lock(&consumer_data.lock);
+       pthread_mutex_lock(&channel->lock);
+
+       if (cds_lfht_is_node_deleted(&channel->node.node)) {
+               goto error_unlock;
+       }
+
+       ret = _close_metadata(channel);
+
+error_unlock:
+       pthread_mutex_unlock(&channel->lock);
+       pthread_mutex_unlock(&consumer_data.lock);
+error:
+       return ret;
+}
+
+/*
+ * RCU read side lock MUST be acquired before calling this function.
+ *
+ * Return 0 on success else an LTTng error code.
+ */
+static int setup_metadata(struct lttng_consumer_local_data *ctx, uint64_t key)
+{
+       int ret;
+       struct lttng_consumer_channel *metadata;
+
+       DBG("UST consumer setup metadata key %" PRIu64, key);
+
+       metadata = consumer_find_channel(key);
+       if (!metadata) {
+               ERR("UST consumer push metadata %" PRIu64 " not found", key);
+               ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
+               goto end;
+       }
+
+       /*
+        * In no monitor mode, the metadata channel has no stream(s) so skip the
+        * ownership transfer to the metadata thread.
+        */
+       if (!metadata->monitor) {
+               DBG("Metadata channel in no monitor");
+               ret = 0;
+               goto end;
+       }
+
+       /*
+        * Send metadata stream to relayd if one available. Availability is
+        * known if the stream is still in the list of the channel.
+        */
+       if (cds_list_empty(&metadata->streams.head)) {
+               ERR("Metadata channel key %" PRIu64 ", no stream available.", key);
+               ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
+               goto error_no_stream;
+       }
+
+       /* Send metadata stream to relayd if needed. */
+       if (metadata->metadata_stream->net_seq_idx != (uint64_t) -1ULL) {
+               ret = consumer_send_relayd_stream(metadata->metadata_stream,
+                               metadata->pathname);
+               if (ret < 0) {
+                       ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
+                       goto error;
+               }
+       }
+
+       ret = send_streams_to_thread(metadata, ctx);
+       if (ret < 0) {
+               /*
+                * If we are unable to send the stream to the thread, there is
+                * a big problem so just stop everything.
+                */
+               ret = LTTCOMM_CONSUMERD_FATAL;
+               goto error;
+       }
+       /* List MUST be empty after or else it could be reused. */
+       assert(cds_list_empty(&metadata->streams.head));
+
+       ret = 0;
+       goto end;
+
+error:
+       /*
+        * Delete metadata channel on error. At this point, the metadata stream can
+        * NOT be monitored by the metadata thread thus having the guarantee that
+        * the stream is still in the local stream list of the channel. This call
+        * will make sure to clean that list.
+        */
+       cds_list_del(&metadata->metadata_stream->send_node);
+       consumer_stream_destroy(metadata->metadata_stream, NULL);
+error_no_stream:
+end:
+       return ret;
+}
+
+/*
+ * Snapshot the whole metadata.
+ *
+ * Returns 0 on success, < 0 on error
+ */
+static int snapshot_metadata(uint64_t key, char *path, uint64_t relayd_id,
+               struct lttng_consumer_local_data *ctx)
+{
+       int ret = 0;
+       struct lttng_consumer_channel *metadata_channel;
+       struct lttng_consumer_stream *metadata_stream;
+
+       assert(path);
+       assert(ctx);
+
+       DBG("UST consumer snapshot metadata with key %" PRIu64 " at path %s",
+                       key, path);
+
+       rcu_read_lock();
+
+       metadata_channel = consumer_find_channel(key);
+       if (!metadata_channel) {
+               ERR("UST snapshot metadata channel not found for key %" PRIu64,
+                       key);
+               ret = -1;
+               goto error;
+       }
+       assert(!metadata_channel->monitor);
+
+       /*
+        * Ask the sessiond if we have new metadata waiting and update the
+        * consumer metadata cache.
+        */
+       ret = lttng_ustconsumer_request_metadata(ctx, metadata_channel, 0);
+       if (ret < 0) {
+               goto error;
+       }
+
+       /*
+        * The metadata stream is NOT created in no monitor mode when the channel
+        * is created on a sessiond ask channel command.
+        */
+       ret = create_ust_streams(metadata_channel, ctx);
+       if (ret < 0) {
+               goto error;
+       }
+
+       metadata_stream = metadata_channel->metadata_stream;
+       assert(metadata_stream);
+
+       if (relayd_id != (uint64_t) -1ULL) {
+               metadata_stream->net_seq_idx = relayd_id;
+               ret = consumer_send_relayd_stream(metadata_stream, path);
+               if (ret < 0) {
+                       goto error_stream;
+               }
+       } else {
+               ret = utils_create_stream_file(path, metadata_stream->name,
+                               metadata_stream->chan->tracefile_size,
+                               metadata_stream->tracefile_count_current,
+                               metadata_stream->uid, metadata_stream->gid);
+               if (ret < 0) {
+                       goto error_stream;
+               }
+               metadata_stream->out_fd = ret;
+               metadata_stream->tracefile_size_current = 0;
+       }
+
+       pthread_mutex_lock(&metadata_channel->metadata_cache->lock);
+
+       do {
+               ret = lttng_consumer_read_subbuffer(metadata_stream, ctx);
+               if (ret < 0) {
+                       goto error_unlock;
+               }
+       } while (ret > 0);
+
+error_unlock:
+       pthread_mutex_unlock(&metadata_channel->metadata_cache->lock);
+
+error_stream:
+       /*
+        * Clean up the stream completly because the next snapshot will use a new
+        * metadata stream.
+        */
+       cds_list_del(&metadata_stream->send_node);
+       consumer_stream_destroy(metadata_stream, NULL);
+       metadata_channel->metadata_stream = NULL;
+
+error:
+       rcu_read_unlock();
+       return ret;
+}
+
+/*
+ * Take a snapshot of all the stream of a channel.
+ *
+ * Returns 0 on success, < 0 on error
+ */
+static int snapshot_channel(uint64_t key, char *path, uint64_t relayd_id,
+               uint64_t max_stream_size, struct lttng_consumer_local_data *ctx)
+{
+       int ret;
+       unsigned use_relayd = 0;
+       unsigned long consumed_pos, produced_pos;
+       struct lttng_consumer_channel *channel;
+       struct lttng_consumer_stream *stream;
+
+       assert(path);
+       assert(ctx);
+
+       rcu_read_lock();
+
+       if (relayd_id != (uint64_t) -1ULL) {
+               use_relayd = 1;
+       }
+
+       channel = consumer_find_channel(key);
+       if (!channel) {
+               ERR("UST snapshot channel not found for key %" PRIu64, key);
+               ret = -1;
+               goto error;
+       }
+       assert(!channel->monitor);
+       DBG("UST consumer snapshot channel %" PRIu64, key);
+
+       cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
+               /* Lock stream because we are about to change its state. */
+               pthread_mutex_lock(&stream->lock);
+               stream->net_seq_idx = relayd_id;
+
+               if (use_relayd) {
+                       ret = consumer_send_relayd_stream(stream, path);
+                       if (ret < 0) {
+                               goto error_unlock;
+                       }
+               } else {
+                       ret = utils_create_stream_file(path, stream->name,
+                                       stream->chan->tracefile_size,
+                                       stream->tracefile_count_current,
+                                       stream->uid, stream->gid);
+                       if (ret < 0) {
+                               goto error_unlock;
+                       }
+                       stream->out_fd = ret;
+                       stream->tracefile_size_current = 0;
+
+                       DBG("UST consumer snapshot stream %s/%s (%" PRIu64 ")", path,
+                                       stream->name, stream->key);
+               }
+
+               ustctl_flush_buffer(stream->ustream, 1);
+
+               ret = lttng_ustconsumer_take_snapshot(stream);
+               if (ret < 0) {
+                       ERR("Taking UST snapshot");
+                       goto error_unlock;
+               }
+
+               ret = lttng_ustconsumer_get_produced_snapshot(stream, &produced_pos);
+               if (ret < 0) {
+                       ERR("Produced UST snapshot position");
+                       goto error_unlock;
+               }
+
+               ret = lttng_ustconsumer_get_consumed_snapshot(stream, &consumed_pos);
+               if (ret < 0) {
+                       ERR("Consumerd UST snapshot position");
+                       goto error_unlock;
+               }
+
+               /*
+                * The original value is sent back if max stream size is larger than
+                * the possible size of the snapshot. Also, we asume that the session
+                * daemon should never send a maximum stream size that is lower than
+                * subbuffer size.
+                */
+               consumed_pos = consumer_get_consumed_maxsize(consumed_pos,
+                               produced_pos, max_stream_size);
+
+               while (consumed_pos < produced_pos) {
+                       ssize_t read_len;
+                       unsigned long len, padded_len;
+
+                       DBG("UST consumer taking snapshot at pos %lu", consumed_pos);
+
+                       ret = ustctl_get_subbuf(stream->ustream, &consumed_pos);
+                       if (ret < 0) {
+                               if (ret != -EAGAIN) {
+                                       PERROR("ustctl_get_subbuf snapshot");
+                                       goto error_close_stream;
+                               }
+                               DBG("UST consumer get subbuf failed. Skipping it.");
+                               consumed_pos += stream->max_sb_size;
+                               continue;
+                       }
+
+                       ret = ustctl_get_subbuf_size(stream->ustream, &len);
+                       if (ret < 0) {
+                               ERR("Snapshot ustctl_get_subbuf_size");
+                               goto error_put_subbuf;
+                       }
+
+                       ret = ustctl_get_padded_subbuf_size(stream->ustream, &padded_len);
+                       if (ret < 0) {
+                               ERR("Snapshot ustctl_get_padded_subbuf_size");
+                               goto error_put_subbuf;
+                       }
+
+                       read_len = lttng_consumer_on_read_subbuffer_mmap(ctx, stream, len,
+                                       padded_len - len);
+                       if (use_relayd) {
+                               if (read_len != len) {
+                                       ret = -EPERM;
+                                       goto error_put_subbuf;
+                               }
+                       } else {
+                               if (read_len != padded_len) {
+                                       ret = -EPERM;
+                                       goto error_put_subbuf;
+                               }
+                       }
+
+                       ret = ustctl_put_subbuf(stream->ustream);
+                       if (ret < 0) {
+                               ERR("Snapshot ustctl_put_subbuf");
+                               goto error_close_stream;
+                       }
+                       consumed_pos += stream->max_sb_size;
+               }
+
+               /* Simply close the stream so we can use it on the next snapshot. */
+               consumer_stream_close(stream);
+               pthread_mutex_unlock(&stream->lock);
+       }
+
+       rcu_read_unlock();
+       return 0;
+
+error_put_subbuf:
+       if (ustctl_put_subbuf(stream->ustream) < 0) {
+               ERR("Snapshot ustctl_put_subbuf");
+       }
+error_close_stream:
+       consumer_stream_close(stream);
+error_unlock:
+       pthread_mutex_unlock(&stream->lock);
+error:
+       rcu_read_unlock();
+       return ret;
+}
+
+/*
+ * Receive the metadata updates from the sessiond.
+ */
+int lttng_ustconsumer_recv_metadata(int sock, uint64_t key, uint64_t offset,
+               uint64_t len, struct lttng_consumer_channel *channel,
+               int timer)
+{
+       int ret, ret_code = LTTNG_OK;
+       char *metadata_str;
+
+       DBG("UST consumer push metadata key %" PRIu64 " of len %" PRIu64, key, len);
+
+       metadata_str = zmalloc(len * sizeof(char));
+       if (!metadata_str) {
+               PERROR("zmalloc metadata string");
+               ret_code = LTTCOMM_CONSUMERD_ENOMEM;
+               goto end;
+       }
+
+       /* Receive metadata string. */
+       ret = lttcomm_recv_unix_sock(sock, metadata_str, len);
        if (ret < 0) {
-               goto error;
+               /* Session daemon is dead so return gracefully. */
+               ret_code = ret;
+               goto end_free;
        }
 
-       /* Open all streams for this channel. */
-       ret = create_ust_streams(channel, ctx);
+       pthread_mutex_lock(&channel->metadata_cache->lock);
+       ret = consumer_metadata_cache_write(channel, offset, len, metadata_str);
        if (ret < 0) {
-               goto error;
+               /* Unable to handle metadata. Notify session daemon. */
+               ret_code = LTTCOMM_CONSUMERD_ERROR_METADATA;
+               /*
+                * Skip metadata flush on write error since the offset and len might
+                * not have been updated which could create an infinite loop below when
+                * waiting for the metadata cache to be flushed.
+                */
+               pthread_mutex_unlock(&channel->metadata_cache->lock);
+               goto end_free;
        }
+       pthread_mutex_unlock(&channel->metadata_cache->lock);
 
-error:
-       return ret;
+       while (consumer_metadata_cache_flushed(channel, offset + len, timer)) {
+               DBG("Waiting for metadata to be flushed");
+               usleep(DEFAULT_METADATA_AVAILABILITY_WAIT_TIME);
+       }
+
+end_free:
+       free(metadata_str);
+end:
+       return ret_code;
 }
 
 /*
@@ -489,11 +1087,14 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
        if (ret != sizeof(msg)) {
                DBG("Consumer received unexpected message size %zd (expects %zu)",
                        ret, sizeof(msg));
-               lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_CMD);
                /*
                 * The ret value might 0 meaning an orderly shutdown but this is ok
                 * since the caller handles this.
                 */
+               if (ret > 0) {
+                       lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_CMD);
+                       ret = -1;
+               }
                return ret;
        }
        if (msg.cmd_type == LTTNG_CONSUMER_STOP) {
@@ -548,13 +1149,7 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
                        consumer_flag_relayd_for_destroy(relayd);
                }
 
-               ret = consumer_send_status_msg(sock, ret_code);
-               if (ret < 0) {
-                       /* Somehow, the session daemon is not responding anymore. */
-                       goto end_nosignal;
-               }
-
-               goto end_nosignal;
+               goto end_msg_sessiond;
        }
        case LTTNG_CONSUMER_UPDATE_STREAM:
        {
@@ -575,6 +1170,7 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
                                sizeof(is_data_pending));
                if (ret < 0) {
                        DBG("Error when sending the data pending ret code: %d", ret);
+                       goto error_fatal;
                }
 
                /*
@@ -593,34 +1189,43 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
                                msg.u.ask_channel.pathname, msg.u.ask_channel.name,
                                msg.u.ask_channel.uid, msg.u.ask_channel.gid,
                                msg.u.ask_channel.relayd_id, msg.u.ask_channel.key,
-                               (enum lttng_event_output) msg.u.ask_channel.output);
+                               (enum lttng_event_output) msg.u.ask_channel.output,
+                               msg.u.ask_channel.tracefile_size,
+                               msg.u.ask_channel.tracefile_count,
+                               msg.u.ask_channel.session_id_per_pid,
+                               msg.u.ask_channel.monitor);
                if (!channel) {
                        goto end_channel_error;
                }
 
+               /*
+                * Assign UST application UID to the channel. This value is ignored for
+                * per PID buffers. This is specific to UST thus setting this after the
+                * allocation.
+                */
+               channel->ust_app_uid = msg.u.ask_channel.ust_app_uid;
+
                /* Build channel attributes from received message. */
                attr.subbuf_size = msg.u.ask_channel.subbuf_size;
                attr.num_subbuf = msg.u.ask_channel.num_subbuf;
                attr.overwrite = msg.u.ask_channel.overwrite;
                attr.switch_timer_interval = msg.u.ask_channel.switch_timer_interval;
                attr.read_timer_interval = msg.u.ask_channel.read_timer_interval;
+               attr.chan_id = msg.u.ask_channel.chan_id;
+               attr.output = msg.u.ask_channel.output;
                memcpy(attr.uuid, msg.u.ask_channel.uuid, sizeof(attr.uuid));
 
-               /* Translate the event output type to UST. */
-               switch (channel->output) {
-               case LTTNG_EVENT_SPLICE:
-                       /* Splice not supported so fallback on mmap(). */
-               case LTTNG_EVENT_MMAP:
-               default:
-                       attr.output = CONSUMER_CHANNEL_MMAP;
-                       break;
-               };
-
                /* Translate and save channel type. */
                switch (msg.u.ask_channel.type) {
                case LTTNG_UST_CHAN_PER_CPU:
                        channel->type = CONSUMER_CHANNEL_TYPE_DATA;
                        attr.type = LTTNG_UST_CHAN_PER_CPU;
+                       /*
+                        * Set refcount to 1 for owner. Below, we will
+                        * pass ownership to the
+                        * consumer_thread_channel_poll() thread.
+                        */
+                       channel->refcount = 1;
                        break;
                case LTTNG_UST_CHAN_METADATA:
                        channel->type = CONSUMER_CHANNEL_TYPE_METADATA;
@@ -636,13 +1241,31 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
                        goto end_channel_error;
                }
 
+               if (msg.u.ask_channel.type == LTTNG_UST_CHAN_METADATA) {
+                       ret = consumer_metadata_cache_allocate(channel);
+                       if (ret < 0) {
+                               ERR("Allocating metadata cache");
+                               goto end_channel_error;
+                       }
+                       consumer_timer_switch_start(channel, attr.switch_timer_interval);
+                       attr.switch_timer_interval = 0;
+               }
+
                /*
                 * Add the channel to the internal state AFTER all streams were created
                 * and successfully sent to session daemon. This way, all streams must
                 * be ready before this channel is visible to the threads.
+                * If add_channel succeeds, ownership of the channel is
+                * passed to consumer_thread_channel_poll().
                 */
                ret = add_channel(channel, ctx);
                if (ret < 0) {
+                       if (msg.u.ask_channel.type == LTTNG_UST_CHAN_METADATA) {
+                               if (channel->switch_timer_enabled == 1) {
+                                       consumer_timer_switch_stop(channel);
+                               }
+                               consumer_metadata_cache_destroy(channel);
+                       }
                        goto end_channel_error;
                }
 
@@ -654,10 +1277,9 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
                ret = consumer_send_status_channel(sock, channel);
                if (ret < 0) {
                        /*
-                        * There is probably a problem on the socket so the poll will get
-                        * it and clean everything up.
+                        * There is probably a problem on the socket.
                         */
-                       goto end_nosignal;
+                       goto error_fatal;
                }
 
                break;
@@ -665,24 +1287,16 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
        case LTTNG_CONSUMER_GET_CHANNEL:
        {
                int ret, relayd_err = 0;
-               unsigned long key = msg.u.get_channel.key;
+               uint64_t key = msg.u.get_channel.key;
                struct lttng_consumer_channel *channel;
-               struct lttng_consumer_stream *stream, *stmp;
 
                channel = consumer_find_channel(key);
                if (!channel) {
-                       ERR("UST consumer get channel key %lu not found", key);
+                       ERR("UST consumer get channel key %" PRIu64 " not found", key);
                        ret_code = LTTNG_ERR_UST_CHAN_NOT_FOUND;
                        goto end_msg_sessiond;
                }
 
-               /* Inform sessiond that we are about to send channel and streams. */
-               ret = consumer_send_status_msg(sock, LTTNG_OK);
-               if (ret < 0) {
-                       /* Somehow, the session daemon is not responding anymore. */
-                       goto end_nosignal;
-               }
-
                /* Send everything to sessiond. */
                ret = send_sessiond_channel(sock, channel, ctx, &relayd_err);
                if (ret < 0) {
@@ -690,10 +1304,10 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
                                /*
                                 * We were unable to send to the relayd the stream so avoid
                                 * sending back a fatal error to the thread since this is OK
-                                * and the consumer can continue its work.
+                                * and the consumer can continue its work. The above call
+                                * has sent the error status message to the sessiond.
                                 */
-                               ret_code = LTTNG_ERR_RELAYD_CONNECT_FAIL;
-                               goto end_msg_sessiond;
+                               goto end_nosignal;
                        }
                        /*
                         * The communicaton was broken hence there is a bad state between
@@ -702,58 +1316,139 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
                        goto error_fatal;
                }
 
-               /* Send streams to the corresponding thread. */
-               cds_list_for_each_entry_safe(stream, stmp, &channel->streams.head,
-                               send_node) {
-                       /* Sending the stream to the thread. */
-                       ret = send_stream_to_thread(stream, ctx);
-                       if (ret < 0) {
-                               /*
-                                * If we are unable to send the stream to the thread, there is
-                                * a big problem so just stop everything.
-                                */
-                               goto error_fatal;
-                       }
-
-                       /* Remove node from the channel stream list. */
-                       cds_list_del(&stream->send_node);
+               /*
+                * In no monitor mode, the streams ownership is kept inside the channel
+                * so don't send them to the data thread.
+                */
+               if (!channel->monitor) {
+                       goto end_msg_sessiond;
                }
 
+               ret = send_streams_to_thread(channel, ctx);
+               if (ret < 0) {
+                       /*
+                        * If we are unable to send the stream to the thread, there is
+                        * a big problem so just stop everything.
+                        */
+                       goto error_fatal;
+               }
                /* List MUST be empty after or else it could be reused. */
                assert(cds_list_empty(&channel->streams.head));
+               goto end_msg_sessiond;
+       }
+       case LTTNG_CONSUMER_DESTROY_CHANNEL:
+       {
+               uint64_t key = msg.u.destroy_channel.key;
 
-               /* Inform sessiond that everything is done and OK on our side. */
-               ret = consumer_send_status_msg(sock, LTTNG_OK);
-               if (ret < 0) {
-                       /* Somehow, the session daemon is not responding anymore. */
-                       goto end_nosignal;
+               /*
+                * Only called if streams have not been sent to stream
+                * manager thread. However, channel has been sent to
+                * channel manager thread.
+                */
+               notify_thread_del_channel(ctx, key);
+               goto end_msg_sessiond;
+       }
+       case LTTNG_CONSUMER_CLOSE_METADATA:
+       {
+               int ret;
+
+               ret = close_metadata(msg.u.close_metadata.key);
+               if (ret != 0) {
+                       ret_code = ret;
                }
 
-               break;
+               goto end_msg_sessiond;
        }
-       case LTTNG_CONSUMER_DESTROY_CHANNEL:
+       case LTTNG_CONSUMER_FLUSH_CHANNEL:
+       {
+               int ret;
+
+               ret = flush_channel(msg.u.flush_channel.key);
+               if (ret != 0) {
+                       ret_code = ret;
+               }
+
+               goto end_msg_sessiond;
+       }
+       case LTTNG_CONSUMER_PUSH_METADATA:
        {
                int ret;
-               unsigned long key = msg.u.destroy_channel.key;
+               uint64_t len = msg.u.push_metadata.len;
+               uint64_t key = msg.u.push_metadata.key;
+               uint64_t offset = msg.u.push_metadata.target_offset;
                struct lttng_consumer_channel *channel;
 
-               DBG("UST consumer destroy channel key %lu", key);
+               DBG("UST consumer push metadata key %" PRIu64 " of len %" PRIu64, key,
+                               len);
 
                channel = consumer_find_channel(key);
                if (!channel) {
-                       ERR("UST consumer destroy channel %lu not found", key);
+                       ERR("UST consumer push metadata %" PRIu64 " not found", key);
                        ret_code = LTTNG_ERR_UST_CHAN_NOT_FOUND;
-               } else {
-                       /* Protocol error if the stream list is NOT empty. */
-                       assert(!cds_list_empty(&channel->streams.head));
-                       consumer_del_channel(channel);
+                       goto end_msg_sessiond;
                }
 
+               /* Tell session daemon we are ready to receive the metadata. */
                ret = consumer_send_status_msg(sock, LTTNG_OK);
+               if (ret < 0) {
+                       /* Somehow, the session daemon is not responding anymore. */
+                       goto error_fatal;
+               }
+
+               /* Wait for more data. */
+               if (lttng_consumer_poll_socket(consumer_sockpoll) < 0) {
+                       goto error_fatal;
+               }
+
+               ret = lttng_ustconsumer_recv_metadata(sock, key, offset,
+                               len, channel, 0);
+               if (ret < 0) {
+                       /* error receiving from sessiond */
+                       goto error_fatal;
+               } else {
+                       ret_code = ret;
+                       goto end_msg_sessiond;
+               }
+       }
+       case LTTNG_CONSUMER_SETUP_METADATA:
+       {
+               int ret;
+
+               ret = setup_metadata(ctx, msg.u.setup_metadata.key);
+               if (ret) {
+                       ret_code = ret;
+               }
+               goto end_msg_sessiond;
+       }
+       case LTTNG_CONSUMER_SNAPSHOT_CHANNEL:
+       {
+               if (msg.u.snapshot_channel.metadata) {
+                       ret = snapshot_metadata(msg.u.snapshot_channel.key,
+                                       msg.u.snapshot_channel.pathname,
+                                       msg.u.snapshot_channel.relayd_id,
+                                       ctx);
+                       if (ret < 0) {
+                               ERR("Snapshot metadata failed");
+                               ret_code = LTTNG_ERR_UST_META_FAIL;
+                       }
+               } else {
+                       ret = snapshot_channel(msg.u.snapshot_channel.key,
+                                       msg.u.snapshot_channel.pathname,
+                                       msg.u.snapshot_channel.relayd_id,
+                                       msg.u.snapshot_channel.max_stream_size,
+                                       ctx);
+                       if (ret < 0) {
+                               ERR("Snapshot channel failed");
+                               ret_code = LTTNG_ERR_UST_CHAN_FAIL;
+                       }
+               }
+
+               ret = consumer_send_status_msg(sock, ret_code);
                if (ret < 0) {
                        /* Somehow, the session daemon is not responding anymore. */
                        goto end_nosignal;
                }
+               break;
        }
        default:
                break;
@@ -774,7 +1469,10 @@ end_msg_sessiond:
         * the caller because the session daemon socket management is done
         * elsewhere. Returning a negative code or 0 will shutdown the consumer.
         */
-       (void) consumer_send_status_msg(sock, ret_code);
+       ret = consumer_send_status_msg(sock, ret_code);
+       if (ret < 0) {
+               goto error_fatal;
+       }
        rcu_read_unlock();
        return 1;
 end_channel_error:
@@ -852,6 +1550,21 @@ int lttng_ustconsumer_get_produced_snapshot(
        return ustctl_snapshot_get_produced(stream->ustream, pos);
 }
 
+/*
+ * Get the consumed position
+ *
+ * Returns 0 on success, < 0 on error
+ */
+int lttng_ustconsumer_get_consumed_snapshot(
+               struct lttng_consumer_stream *stream, unsigned long *pos)
+{
+       assert(stream);
+       assert(stream->ustream);
+       assert(pos);
+
+       return ustctl_snapshot_get_consumed(stream->ustream, pos);
+}
+
 /*
  * Called when the stream signal the consumer that it has hang up.
  */
@@ -869,6 +1582,10 @@ void lttng_ustconsumer_del_channel(struct lttng_consumer_channel *chan)
        assert(chan);
        assert(chan->uchan);
 
+       if (chan->switch_timer_enabled == 1) {
+               consumer_timer_switch_stop(chan);
+       }
+       consumer_metadata_cache_destroy(chan);
        ustctl_destroy_channel(chan->uchan);
 }
 
@@ -877,6 +1594,9 @@ void lttng_ustconsumer_del_stream(struct lttng_consumer_stream *stream)
        assert(stream);
        assert(stream->ustream);
 
+       if (stream->chan->switch_timer_enabled == 1) {
+               consumer_timer_switch_stop(stream->chan);
+       }
        ustctl_destroy_stream(stream->ustream);
 }
 
@@ -893,28 +1613,57 @@ int lttng_ustconsumer_read_subbuffer(struct lttng_consumer_stream *stream,
        assert(stream->ustream);
        assert(ctx);
 
-       DBG2("In UST read_subbuffer (wait_fd: %d, name: %s)", stream->wait_fd,
+       DBG("In UST read_subbuffer (wait_fd: %d, name: %s)", stream->wait_fd,
                        stream->name);
 
        /* Ease our life for what's next. */
        ustream = stream->ustream;
 
        /* We can consume the 1 byte written into the wait_fd by UST */
-       if (!stream->hangup_flush_done) {
+       if (stream->monitor && !stream->hangup_flush_done) {
                ssize_t readlen;
 
                do {
                        readlen = read(stream->wait_fd, &dummy, 1);
                } while (readlen == -1 && errno == EINTR);
-               if (readlen == -1) {
+               if (readlen == -1 && errno != EAGAIN && errno != EWOULDBLOCK) {
                        ret = readlen;
                        goto end;
                }
        }
 
+retry:
        /* Get the next subbuffer */
        err = ustctl_get_next_subbuf(ustream);
        if (err != 0) {
+               /*
+                * Populate metadata info if the existing info has
+                * already been read.
+                */
+               if (stream->metadata_flag) {
+                       ssize_t write_len;
+
+                       if (stream->chan->metadata_cache->contiguous
+                                       == stream->ust_metadata_pushed) {
+                               ret = 0;
+                               goto end;
+                       }
+
+                       write_len = ustctl_write_one_packet_to_channel(stream->chan->uchan,
+                                       &stream->chan->metadata_cache->data[stream->ust_metadata_pushed],
+                                       stream->chan->metadata_cache->contiguous
+                                               - stream->ust_metadata_pushed);
+                       assert(write_len != 0);
+                       if (write_len < 0) {
+                               ERR("Writing one metadata packet");
+                               ret = -1;
+                               goto end;
+                       }
+                       stream->ust_metadata_pushed += write_len;
+                       ustctl_flush_buffer(stream->ustream, 1);
+                       goto retry;
+               }
+
                ret = err;      /* ustctl_get_next_subbuf returns negative, caller expect positive. */
                /*
                 * This is a debug message even for single-threaded consumer,
@@ -945,8 +1694,8 @@ int lttng_ustconsumer_read_subbuffer(struct lttng_consumer_stream *stream,
         * The mmap operation should write subbuf_size amount of data when network
         * streaming or the full padding (len) size when we are _not_ streaming.
         */
-       if ((ret != subbuf_size && stream->net_seq_idx != -1) ||
-                       (ret != len && stream->net_seq_idx == -1)) {
+       if ((ret != subbuf_size && stream->net_seq_idx != (uint64_t) -1ULL) ||
+                       (ret != len && stream->net_seq_idx == (uint64_t) -1ULL)) {
                /*
                 * Display the error but continue processing to try to release the
                 * subbuffer. This is a DBG statement since any unexpected kill or
@@ -956,46 +1705,39 @@ int lttng_ustconsumer_read_subbuffer(struct lttng_consumer_stream *stream,
                 * happen and it is OK with the code flow.
                 */
                DBG("Error writing to tracefile "
-                               "(ret: %zd != len: %lu != subbuf_size: %lu)",
+                               "(ret: %ld != len: %lu != subbuf_size: %lu)",
                                ret, len, subbuf_size);
        }
        err = ustctl_put_next_subbuf(ustream);
        assert(err == 0);
+
 end:
        return ret;
 }
 
 /*
  * Called when a stream is created.
+ *
+ * Return 0 on success or else a negative value.
  */
 int lttng_ustconsumer_on_recv_stream(struct lttng_consumer_stream *stream)
 {
        int ret;
-       char full_path[PATH_MAX];
-
-       /* Opening the tracefile in write mode */
-       if (stream->net_seq_idx != -1) {
-               goto end;
-       }
 
-       ret = snprintf(full_path, sizeof(full_path), "%s/%s",
-                       stream->chan->pathname, stream->name);
-       if (ret < 0) {
-               PERROR("snprintf on_recv_stream");
-               goto error;
-       }
+       assert(stream);
 
-       ret = run_as_open(full_path, O_WRONLY | O_CREAT | O_TRUNC,
-                       S_IRWXU | S_IRWXG | S_IRWXO, stream->uid, stream->gid);
-       if (ret < 0) {
-               PERROR("open stream path %s", full_path);
-               goto error;
+       /* Don't create anything if this is set for streaming. */
+       if (stream->net_seq_idx == (uint64_t) -1ULL && stream->chan->monitor) {
+               ret = utils_create_stream_file(stream->chan->pathname, stream->name,
+                               stream->chan->tracefile_size, stream->tracefile_count_current,
+                               stream->uid, stream->gid);
+               if (ret < 0) {
+                       goto error;
+               }
+               stream->out_fd = ret;
+               stream->tracefile_size_current = 0;
        }
-       stream->out_fd = ret;
-
-end:
-       /* we return 0 to let the library handle the FD internally */
-       return 0;
+       ret = 0;
 
 error:
        return ret;
@@ -1018,18 +1760,215 @@ int lttng_ustconsumer_data_pending(struct lttng_consumer_stream *stream)
 
        DBG("UST consumer checking data pending");
 
-       ret = ustctl_get_next_subbuf(stream->ustream);
-       if (ret == 0) {
-               /* There is still data so let's put back this subbuffer. */
-               ret = ustctl_put_subbuf(stream->ustream);
-               assert(ret == 0);
-               ret = 1;  /* Data is pending */
+       if (stream->endpoint_status != CONSUMER_ENDPOINT_ACTIVE) {
+               ret = 0;
                goto end;
        }
 
+       if (stream->chan->type == CONSUMER_CHANNEL_TYPE_METADATA) {
+               uint64_t contiguous, pushed;
+
+               /* Ease our life a bit. */
+               contiguous = stream->chan->metadata_cache->contiguous;
+               pushed = stream->ust_metadata_pushed;
+
+               /*
+                * We can simply check whether all contiguously available data
+                * has been pushed to the ring buffer, since the push operation
+                * is performed within get_next_subbuf(), and because both
+                * get_next_subbuf() and put_next_subbuf() are issued atomically
+                * thanks to the stream lock within
+                * lttng_ustconsumer_read_subbuffer(). This basically means that
+                * whetnever ust_metadata_pushed is incremented, the associated
+                * metadata has been consumed from the metadata stream.
+                */
+               DBG("UST consumer metadata pending check: contiguous %" PRIu64 " vs pushed %" PRIu64,
+                               contiguous, pushed);
+               assert(((int64_t) contiguous - pushed) >= 0);
+               if ((contiguous != pushed) ||
+                               (((int64_t) contiguous - pushed) > 0 || contiguous == 0)) {
+                       ret = 1;        /* Data is pending */
+                       goto end;
+               }
+       } else {
+               ret = ustctl_get_next_subbuf(stream->ustream);
+               if (ret == 0) {
+                       /*
+                        * There is still data so let's put back this
+                        * subbuffer.
+                        */
+                       ret = ustctl_put_subbuf(stream->ustream);
+                       assert(ret == 0);
+                       ret = 1;        /* Data is pending */
+                       goto end;
+               }
+       }
+
        /* Data is NOT pending so ready to be read. */
        ret = 0;
 
 end:
        return ret;
 }
+
+/*
+ * Close every metadata stream wait fd of the metadata hash table. This
+ * function MUST be used very carefully so not to run into a race between the
+ * metadata thread handling streams and this function closing their wait fd.
+ *
+ * For UST, this is used when the session daemon hangs up. Its the metadata
+ * producer so calling this is safe because we are assured that no state change
+ * can occur in the metadata thread for the streams in the hash table.
+ */
+void lttng_ustconsumer_close_metadata(struct lttng_ht *metadata_ht)
+{
+       struct lttng_ht_iter iter;
+       struct lttng_consumer_stream *stream;
+
+       assert(metadata_ht);
+       assert(metadata_ht->ht);
+
+       DBG("UST consumer closing all metadata streams");
+
+       rcu_read_lock();
+       cds_lfht_for_each_entry(metadata_ht->ht, &iter.iter, stream,
+                       node.node) {
+               pthread_mutex_lock(&stream->chan->lock);
+               /*
+                * Whatever returned value, we must continue to try to close everything
+                * so ignore it.
+                */
+               (void) _close_metadata(stream->chan);
+               DBG("Metadata wait fd %d and poll pipe fd %d closed", stream->wait_fd,
+                               stream->ust_metadata_poll_pipe[1]);
+               pthread_mutex_unlock(&stream->chan->lock);
+
+       }
+       rcu_read_unlock();
+}
+
+void lttng_ustconsumer_close_stream_wakeup(struct lttng_consumer_stream *stream)
+{
+       int ret;
+
+       ret = ustctl_stream_close_wakeup_fd(stream->ustream);
+       if (ret < 0) {
+               ERR("Unable to close wakeup fd");
+       }
+}
+
+/*
+ * Please refer to consumer-timer.c before adding any lock within this
+ * function or any of its callees. Timers have a very strict locking
+ * semantic with respect to teardown. Failure to respect this semantic
+ * introduces deadlocks.
+ */
+int lttng_ustconsumer_request_metadata(struct lttng_consumer_local_data *ctx,
+               struct lttng_consumer_channel *channel, int timer)
+{
+       struct lttcomm_metadata_request_msg request;
+       struct lttcomm_consumer_msg msg;
+       enum lttng_error_code ret_code = LTTNG_OK;
+       uint64_t len, key, offset;
+       int ret;
+
+       assert(channel);
+       assert(channel->metadata_cache);
+
+       /* send the metadata request to sessiond */
+       switch (consumer_data.type) {
+       case LTTNG_CONSUMER64_UST:
+               request.bits_per_long = 64;
+               break;
+       case LTTNG_CONSUMER32_UST:
+               request.bits_per_long = 32;
+               break;
+       default:
+               request.bits_per_long = 0;
+               break;
+       }
+
+       request.session_id = channel->session_id;
+       request.session_id_per_pid = channel->session_id_per_pid;
+       /*
+        * Request the application UID here so the metadata of that application can
+        * be sent back. The channel UID corresponds to the user UID of the session
+        * used for the rights on the stream file(s).
+        */
+       request.uid = channel->ust_app_uid;
+       request.key = channel->key;
+
+       DBG("Sending metadata request to sessiond, session id %" PRIu64
+                       ", per-pid %" PRIu64 ", app UID %u and channek key %" PRIu64,
+                       request.session_id, request.session_id_per_pid, request.uid,
+                       request.key);
+
+       pthread_mutex_lock(&ctx->metadata_socket_lock);
+       ret = lttcomm_send_unix_sock(ctx->consumer_metadata_socket, &request,
+                       sizeof(request));
+       if (ret < 0) {
+               ERR("Asking metadata to sessiond");
+               goto end;
+       }
+
+       /* Receive the metadata from sessiond */
+       ret = lttcomm_recv_unix_sock(ctx->consumer_metadata_socket, &msg,
+                       sizeof(msg));
+       if (ret != sizeof(msg)) {
+               DBG("Consumer received unexpected message size %d (expects %zu)",
+                       ret, sizeof(msg));
+               lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_CMD);
+               /*
+                * The ret value might 0 meaning an orderly shutdown but this is ok
+                * since the caller handles this.
+                */
+               goto end;
+       }
+
+       if (msg.cmd_type == LTTNG_ERR_UND) {
+               /* No registry found */
+               (void) consumer_send_status_msg(ctx->consumer_metadata_socket,
+                               ret_code);
+               ret = 0;
+               goto end;
+       } else if (msg.cmd_type != LTTNG_CONSUMER_PUSH_METADATA) {
+               ERR("Unexpected cmd_type received %d", msg.cmd_type);
+               ret = -1;
+               goto end;
+       }
+
+       len = msg.u.push_metadata.len;
+       key = msg.u.push_metadata.key;
+       offset = msg.u.push_metadata.target_offset;
+
+       assert(key == channel->key);
+       if (len == 0) {
+               DBG("No new metadata to receive for key %" PRIu64, key);
+       }
+
+       /* Tell session daemon we are ready to receive the metadata. */
+       ret = consumer_send_status_msg(ctx->consumer_metadata_socket,
+                       LTTNG_OK);
+       if (ret < 0 || len == 0) {
+               /*
+                * Somehow, the session daemon is not responding anymore or there is
+                * nothing to receive.
+                */
+               goto end;
+       }
+
+       ret_code = lttng_ustconsumer_recv_metadata(ctx->consumer_metadata_socket,
+                       key, offset, len, channel, timer);
+       if (ret_code >= 0) {
+               /*
+                * Only send the status msg if the sessiond is alive meaning a positive
+                * ret code.
+                */
+               (void) consumer_send_status_msg(ctx->consumer_metadata_socket, ret_code);
+       }
+       ret = 0;
+
+end:
+       pthread_mutex_unlock(&ctx->metadata_socket_lock);
+       return ret;
+}
This page took 0.040908 seconds and 4 git commands to generate.