Fix: free metadata cache after grace period in consumer
[lttng-tools.git] / src / common / consumer.c
index 300fd2a2fc896108ac60c43ce920a5f633be111b..f2ccf9536b147b72574638046135da3e425b771a 100644 (file)
@@ -18,6 +18,7 @@
  */
 
 #define _GNU_SOURCE
+#define _LGPL_SOURCE
 #include <assert.h>
 #include <poll.h>
 #include <pthread.h>
 #include <sys/types.h>
 #include <unistd.h>
 #include <inttypes.h>
+#include <signal.h>
 
+#include <bin/lttng-consumerd/health-consumerd.h>
 #include <common/common.h>
 #include <common/utils.h>
 #include <common/compat/poll.h>
+#include <common/compat/endian.h>
+#include <common/index/index.h>
 #include <common/kernel-ctl/kernel-ctl.h>
 #include <common/sessiond-comm/relayd.h>
 #include <common/sessiond-comm/sessiond-comm.h>
 #include <common/kernel-consumer/kernel-consumer.h>
 #include <common/relayd/relayd.h>
 #include <common/ust-consumer/ust-consumer.h>
+#include <common/consumer-timer.h>
 
 #include "consumer.h"
+#include "consumer-stream.h"
+#include "consumer-testpoint.h"
+#include "align.h"
 
 struct lttng_consumer_global_data consumer_data = {
        .stream_count = 0,
@@ -49,12 +58,14 @@ struct lttng_consumer_global_data consumer_data = {
 
 enum consumer_channel_action {
        CONSUMER_CHANNEL_ADD,
+       CONSUMER_CHANNEL_DEL,
        CONSUMER_CHANNEL_QUIT,
 };
 
 struct consumer_channel_msg {
        enum consumer_channel_action action;
-       struct lttng_consumer_channel *chan;
+       struct lttng_consumer_channel *chan;    /* add */
+       uint64_t key;                           /* del */
 };
 
 /*
@@ -74,49 +85,97 @@ static struct lttng_ht *metadata_ht;
 static struct lttng_ht *data_ht;
 
 /*
- * Notify a thread pipe to poll back again. This usually means that some global
- * state has changed so we just send back the thread in a poll wait call.
+ * Notify a thread lttng pipe to poll back again. This usually means that some
+ * global state has changed so we just send back the thread in a poll wait
+ * call.
  */
-static void notify_thread_pipe(int wpipe)
+static void notify_thread_lttng_pipe(struct lttng_pipe *pipe)
 {
-       int ret;
+       struct lttng_consumer_stream *null_stream = NULL;
+
+       assert(pipe);
+
+       (void) lttng_pipe_write(pipe, &null_stream, sizeof(null_stream));
+}
 
-       do {
-               struct lttng_consumer_stream *null_stream = NULL;
+static void notify_health_quit_pipe(int *pipe)
+{
+       ssize_t ret;
 
-               ret = write(wpipe, &null_stream, sizeof(null_stream));
-       } while (ret < 0 && errno == EINTR);
+       ret = lttng_write(pipe[1], "4", 1);
+       if (ret < 1) {
+               PERROR("write consumer health quit");
+       }
 }
 
 static void notify_channel_pipe(struct lttng_consumer_local_data *ctx,
                struct lttng_consumer_channel *chan,
+               uint64_t key,
                enum consumer_channel_action action)
 {
        struct consumer_channel_msg msg;
-       int ret;
+       ssize_t ret;
+
+       memset(&msg, 0, sizeof(msg));
 
        msg.action = action;
        msg.chan = chan;
-       do {
-               ret = write(ctx->consumer_channel_pipe[1], &msg, sizeof(msg));
-       } while (ret < 0 && errno == EINTR);
+       msg.key = key;
+       ret = lttng_write(ctx->consumer_channel_pipe[1], &msg, sizeof(msg));
+       if (ret < sizeof(msg)) {
+               PERROR("notify_channel_pipe write error");
+       }
+}
+
+void notify_thread_del_channel(struct lttng_consumer_local_data *ctx,
+               uint64_t key)
+{
+       notify_channel_pipe(ctx, NULL, key, CONSUMER_CHANNEL_DEL);
 }
 
 static int read_channel_pipe(struct lttng_consumer_local_data *ctx,
                struct lttng_consumer_channel **chan,
+               uint64_t *key,
                enum consumer_channel_action *action)
 {
        struct consumer_channel_msg msg;
-       int ret;
+       ssize_t ret;
 
-       do {
-               ret = read(ctx->consumer_channel_pipe[0], &msg, sizeof(msg));
-       } while (ret < 0 && errno == EINTR);
-       if (ret > 0) {
-               *action = msg.action;
-               *chan = msg.chan;
+       ret = lttng_read(ctx->consumer_channel_pipe[0], &msg, sizeof(msg));
+       if (ret < sizeof(msg)) {
+               ret = -1;
+               goto error;
+       }
+       *action = msg.action;
+       *chan = msg.chan;
+       *key = msg.key;
+error:
+       return (int) ret;
+}
+
+/*
+ * Cleanup the stream list of a channel. Those streams are not yet globally
+ * visible
+ */
+static void clean_channel_stream_list(struct lttng_consumer_channel *channel)
+{
+       struct lttng_consumer_stream *stream, *stmp;
+
+       assert(channel);
+
+       /* Delete streams that might have been left in the stream list. */
+       cds_list_for_each_entry_safe(stream, stmp, &channel->streams.head,
+                       send_node) {
+               cds_list_del(&stream->send_node);
+               /*
+                * Once a stream is added to this list, the buffers were created so we
+                * have a guarantee that this call will succeed. Setting the monitor
+                * mode to 0 so we don't lock nor try to delete the stream from the
+                * global hash table.
+                */
+               stream->monitor = 0;
+               consumer_stream_destroy(stream, NULL);
        }
-       return ret;
 }
 
 /*
@@ -150,20 +209,20 @@ static struct lttng_consumer_stream *find_stream(uint64_t key,
        return stream;
 }
 
-static void steal_stream_key(int key, struct lttng_ht *ht)
+static void steal_stream_key(uint64_t key, struct lttng_ht *ht)
 {
        struct lttng_consumer_stream *stream;
 
        rcu_read_lock();
        stream = find_stream(key, ht);
        if (stream) {
-               stream->key = -1ULL;
+               stream->key = (uint64_t) -1ULL;
                /*
                 * We don't want the lookup to match, but we still need
                 * to iterate on this stream when iterating over the hash table. Just
                 * change the node key.
                 */
-               stream->node.key = -1ULL;
+               stream->node.key = (uint64_t) -1ULL;
        }
        rcu_read_unlock();
 }
@@ -194,14 +253,30 @@ struct lttng_consumer_channel *consumer_find_channel(uint64_t key)
        return channel;
 }
 
-static void free_stream_rcu(struct rcu_head *head)
+/*
+ * There is a possibility that the consumer does not have enough time between
+ * the close of the channel on the session daemon and the cleanup in here thus
+ * once we have a channel add with an existing key, we know for sure that this
+ * channel will eventually get cleaned up by all streams being closed.
+ *
+ * This function just nullifies the already existing channel key.
+ */
+static void steal_channel_key(uint64_t key)
 {
-       struct lttng_ht_node_u64 *node =
-               caa_container_of(head, struct lttng_ht_node_u64, head);
-       struct lttng_consumer_stream *stream =
-               caa_container_of(node, struct lttng_consumer_stream, node);
+       struct lttng_consumer_channel *channel;
 
-       free(stream);
+       rcu_read_lock();
+       channel = consumer_find_channel(key);
+       if (channel) {
+               channel->key = (uint64_t) -1ULL;
+               /*
+                * We don't want the lookup to match, but we still need to iterate on
+                * this channel when iterating over the hash table. Just change the
+                * node key.
+                */
+               channel->node.key = (uint64_t) -1ULL;
+       }
+       rcu_read_unlock();
 }
 
 static void free_channel_rcu(struct rcu_head *head)
@@ -211,6 +286,17 @@ static void free_channel_rcu(struct rcu_head *head)
        struct lttng_consumer_channel *channel =
                caa_container_of(node, struct lttng_consumer_channel, node);
 
+       switch (consumer_data.type) {
+       case LTTNG_CONSUMER_KERNEL:
+               break;
+       case LTTNG_CONSUMER32_UST:
+       case LTTNG_CONSUMER64_UST:
+               lttng_ustconsumer_free_channel(channel);
+               break;
+       default:
+               ERR("Unknown consumer_data type");
+               abort();
+       }
        free(channel);
 }
 
@@ -240,10 +326,8 @@ static void free_relayd_rcu(struct rcu_head *head)
 
 /*
  * Destroy and free relayd socket pair object.
- *
- * This function MUST be called with the consumer_data lock acquired.
  */
-static void destroy_relayd(struct consumer_relayd_sock_pair *relayd)
+void consumer_destroy_relayd(struct consumer_relayd_sock_pair *relayd)
 {
        int ret;
        struct lttng_ht_iter iter;
@@ -277,6 +361,14 @@ void consumer_del_channel(struct lttng_consumer_channel *channel)
        DBG("Consumer delete channel key %" PRIu64, channel->key);
 
        pthread_mutex_lock(&consumer_data.lock);
+       pthread_mutex_lock(&channel->lock);
+
+       /* Destroy streams that might have been left in the stream list. */
+       clean_channel_stream_list(channel);
+
+       if (channel->live_timer_enabled == 1) {
+               consumer_timer_live_stop(channel);
+       }
 
        switch (consumer_data.type) {
        case LTTNG_CONSUMER_KERNEL:
@@ -299,6 +391,7 @@ void consumer_del_channel(struct lttng_consumer_channel *channel)
 
        call_rcu(&channel->node.head, free_channel_rcu);
 end:
+       pthread_mutex_unlock(&channel->lock);
        pthread_mutex_unlock(&consumer_data.lock);
 }
 
@@ -315,12 +408,12 @@ static void cleanup_relayd_ht(void)
 
        cds_lfht_for_each_entry(consumer_data.relayd_ht->ht, &iter.iter, relayd,
                        node.node) {
-               destroy_relayd(relayd);
+               consumer_destroy_relayd(relayd);
        }
 
-       lttng_ht_destroy(consumer_data.relayd_ht);
-
        rcu_read_unlock();
+
+       lttng_ht_destroy(consumer_data.relayd_ht);
 }
 
 /*
@@ -330,13 +423,13 @@ static void cleanup_relayd_ht(void)
  * It's atomically set without having the stream mutex locked which is fine
  * because we handle the write/read race with a pipe wakeup for each thread.
  */
-static void update_endpoint_status_by_netidx(int net_seq_idx,
+static void update_endpoint_status_by_netidx(uint64_t net_seq_idx,
                enum consumer_endpoint_status status)
 {
        struct lttng_ht_iter iter;
        struct lttng_consumer_stream *stream;
 
-       DBG("Consumer set delete flag on stream by idx %d", net_seq_idx);
+       DBG("Consumer set delete flag on stream by idx %" PRIu64, net_seq_idx);
 
        rcu_read_lock();
 
@@ -369,7 +462,7 @@ static void update_endpoint_status_by_netidx(int net_seq_idx,
 static void cleanup_relayd(struct consumer_relayd_sock_pair *relayd,
                struct lttng_consumer_local_data *ctx)
 {
-       int netidx;
+       uint64_t netidx;
 
        assert(relayd);
 
@@ -382,7 +475,7 @@ static void cleanup_relayd(struct consumer_relayd_sock_pair *relayd,
         * Delete the relayd from the relayd hash table, close the sockets and free
         * the object in a RCU call.
         */
-       destroy_relayd(relayd);
+       consumer_destroy_relayd(relayd);
 
        /* Set inactive endpoint to all streams */
        update_endpoint_status_by_netidx(netidx, CONSUMER_ENDPOINT_INACTIVE);
@@ -394,8 +487,8 @@ static void cleanup_relayd(struct consumer_relayd_sock_pair *relayd,
         * read of this status which happens AFTER receiving this notify.
         */
        if (ctx) {
-               notify_thread_pipe(ctx->consumer_data_pipe[1]);
-               notify_thread_pipe(ctx->consumer_metadata_pipe[1]);
+               notify_thread_lttng_pipe(ctx->consumer_data_pipe);
+               notify_thread_lttng_pipe(ctx->consumer_metadata_pipe);
        }
 }
 
@@ -414,124 +507,33 @@ void consumer_flag_relayd_for_destroy(struct consumer_relayd_sock_pair *relayd)
 
        /* Destroy the relayd if refcount is 0 */
        if (uatomic_read(&relayd->refcount) == 0) {
-               destroy_relayd(relayd);
+               consumer_destroy_relayd(relayd);
        }
 }
 
 /*
- * Remove a stream from the global list protected by a mutex. This
- * function is also responsible for freeing its data structures.
+ * Completly destroy stream from every visiable data structure and the given
+ * hash table if one.
+ *
+ * One this call returns, the stream object is not longer usable nor visible.
  */
 void consumer_del_stream(struct lttng_consumer_stream *stream,
                struct lttng_ht *ht)
 {
-       int ret;
-       struct lttng_ht_iter iter;
-       struct lttng_consumer_channel *free_chan = NULL;
-       struct consumer_relayd_sock_pair *relayd;
-
-       assert(stream);
-
-       DBG("Consumer del stream %d", stream->wait_fd);
-
-       if (ht == NULL) {
-               /* Means the stream was allocated but not successfully added */
-               goto free_stream_rcu;
-       }
-
-       pthread_mutex_lock(&consumer_data.lock);
-       pthread_mutex_lock(&stream->lock);
-
-       switch (consumer_data.type) {
-       case LTTNG_CONSUMER_KERNEL:
-               if (stream->mmap_base != NULL) {
-                       ret = munmap(stream->mmap_base, stream->mmap_len);
-                       if (ret != 0) {
-                               PERROR("munmap");
-                       }
-               }
-               break;
-       case LTTNG_CONSUMER32_UST:
-       case LTTNG_CONSUMER64_UST:
-               lttng_ustconsumer_del_stream(stream);
-               break;
-       default:
-               ERR("Unknown consumer_data type");
-               assert(0);
-               goto end;
-       }
-
-       rcu_read_lock();
-       iter.iter.node = &stream->node.node;
-       ret = lttng_ht_del(ht, &iter);
-       assert(!ret);
-
-       iter.iter.node = &stream->node_channel_id.node;
-       ret = lttng_ht_del(consumer_data.stream_per_chan_id_ht, &iter);
-       assert(!ret);
-
-       iter.iter.node = &stream->node_session_id.node;
-       ret = lttng_ht_del(consumer_data.stream_list_ht, &iter);
-       assert(!ret);
-       rcu_read_unlock();
-
-       assert(consumer_data.stream_count > 0);
-       consumer_data.stream_count--;
-
-       if (stream->out_fd >= 0) {
-               ret = close(stream->out_fd);
-               if (ret) {
-                       PERROR("close");
-               }
-       }
-
-       /* Check and cleanup relayd */
-       rcu_read_lock();
-       relayd = consumer_find_relayd(stream->net_seq_idx);
-       if (relayd != NULL) {
-               uatomic_dec(&relayd->refcount);
-               assert(uatomic_read(&relayd->refcount) >= 0);
-
-               /* Closing streams requires to lock the control socket. */
-               pthread_mutex_lock(&relayd->ctrl_sock_mutex);
-               ret = relayd_send_close_stream(&relayd->control_sock,
-                               stream->relayd_stream_id,
-                               stream->next_net_seq_num - 1);
-               pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
-               if (ret < 0) {
-                       DBG("Unable to close stream on the relayd. Continuing");
-                       /*
-                        * Continue here. There is nothing we can do for the relayd.
-                        * Chances are that the relayd has closed the socket so we just
-                        * continue cleaning up.
-                        */
-               }
-
-               /* Both conditions are met, we destroy the relayd. */
-               if (uatomic_read(&relayd->refcount) == 0 &&
-                               uatomic_read(&relayd->destroy_flag)) {
-                       destroy_relayd(relayd);
-               }
-       }
-       rcu_read_unlock();
-
-       uatomic_dec(&stream->chan->refcount);
-       if (!uatomic_read(&stream->chan->refcount)
-                       && !uatomic_read(&stream->chan->nb_init_stream_left)) {
-               free_chan = stream->chan;
-       }
-
-end:
-       consumer_data.need_update = 1;
-       pthread_mutex_unlock(&stream->lock);
-       pthread_mutex_unlock(&consumer_data.lock);
+       consumer_stream_destroy(stream, ht);
+}
 
-       if (free_chan) {
-               consumer_del_channel(free_chan);
-       }
+/*
+ * XXX naming of del vs destroy is all mixed up.
+ */
+void consumer_del_stream_for_data(struct lttng_consumer_stream *stream)
+{
+       consumer_stream_destroy(stream, data_ht);
+}
 
-free_stream_rcu:
-       call_rcu(&stream->node.head, free_stream_rcu);
+void consumer_del_stream_for_metadata(struct lttng_consumer_stream *stream)
+{
+       consumer_stream_destroy(stream, metadata_ht);
 }
 
 struct lttng_consumer_stream *consumer_allocate_stream(uint64_t channel_key,
@@ -540,11 +542,12 @@ struct lttng_consumer_stream *consumer_allocate_stream(uint64_t channel_key,
                const char *channel_name,
                uid_t uid,
                gid_t gid,
-               int relayd_id,
+               uint64_t relayd_id,
                uint64_t session_id,
                int cpu,
                int *alloc_ret,
-               enum consumer_channel_type type)
+               enum consumer_channel_type type,
+               unsigned int monitor)
 {
        int ret;
        struct lttng_consumer_stream *stream;
@@ -561,18 +564,26 @@ struct lttng_consumer_stream *consumer_allocate_stream(uint64_t channel_key,
        stream->key = stream_key;
        stream->out_fd = -1;
        stream->out_fd_offset = 0;
+       stream->output_written = 0;
        stream->state = state;
        stream->uid = uid;
        stream->gid = gid;
        stream->net_seq_idx = relayd_id;
        stream->session_id = session_id;
+       stream->monitor = monitor;
+       stream->endpoint_status = CONSUMER_ENDPOINT_ACTIVE;
+       stream->index_fd = -1;
        pthread_mutex_init(&stream->lock, NULL);
+       pthread_mutex_init(&stream->metadata_timer_lock, NULL);
 
        /* If channel is the metadata, flag this stream as metadata. */
        if (type == CONSUMER_CHANNEL_TYPE_METADATA) {
                stream->metadata_flag = 1;
                /* Metadata is flat out. */
                strncpy(stream->name, DEFAULT_METADATA_NAME, sizeof(stream->name));
+               /* Live rendez-vous point. */
+               pthread_cond_init(&stream->metadata_rdv, NULL);
+               pthread_mutex_init(&stream->metadata_rdv_lock, NULL);
        } else {
                /* Format stream name to <channel_name>_<cpu_number> */
                ret = snprintf(stream->name, sizeof(stream->name), "%s_%d",
@@ -592,8 +603,10 @@ struct lttng_consumer_stream *consumer_allocate_stream(uint64_t channel_key,
        /* Init session id node with the stream session id */
        lttng_ht_node_init_u64(&stream->node_session_id, stream->session_id);
 
-       DBG3("Allocated stream %s (key %" PRIu64 ", chan_key %" PRIu64 " relayd_id %" PRIu64 ", session_id %" PRIu64,
-                       stream->name, stream->key, channel_key, stream->net_seq_idx, stream->session_id);
+       DBG3("Allocated stream %s (key %" PRIu64 ", chan_key %" PRIu64
+                       " relayd_id %" PRIu64 ", session_id %" PRIu64,
+                       stream->name, stream->key, channel_key,
+                       stream->net_seq_idx, stream->session_id);
 
        rcu_read_unlock();
        return stream;
@@ -611,11 +624,10 @@ end:
 /*
  * Add a stream to the global list protected by a mutex.
  */
-static int add_stream(struct lttng_consumer_stream *stream,
-               struct lttng_ht *ht)
+int consumer_add_data_stream(struct lttng_consumer_stream *stream)
 {
+       struct lttng_ht *ht = data_ht;
        int ret = 0;
-       struct consumer_relayd_sock_pair *relayd;
 
        assert(stream);
        assert(ht);
@@ -623,6 +635,8 @@ static int add_stream(struct lttng_consumer_stream *stream,
        DBG3("Adding consumer stream %" PRIu64, stream->key);
 
        pthread_mutex_lock(&consumer_data.lock);
+       pthread_mutex_lock(&stream->chan->lock);
+       pthread_mutex_lock(&stream->chan->timer_lock);
        pthread_mutex_lock(&stream->lock);
        rcu_read_lock();
 
@@ -641,15 +655,6 @@ static int add_stream(struct lttng_consumer_stream *stream,
         */
        lttng_ht_add_u64(consumer_data.stream_list_ht, &stream->node_session_id);
 
-       /* Check and cleanup relayd */
-       relayd = consumer_find_relayd(stream->net_seq_idx);
-       if (relayd != NULL) {
-               uatomic_inc(&relayd->refcount);
-       }
-
-       /* Update channel refcount once added without error(s). */
-       uatomic_inc(&stream->chan->refcount);
-
        /*
         * When nb_init_stream_left reaches 0, we don't need to trigger any action
         * in terms of destroying the associated channel, because the action that
@@ -658,6 +663,8 @@ static int add_stream(struct lttng_consumer_stream *stream,
         * stream.
         */
        if (uatomic_read(&stream->chan->nb_init_stream_left) > 0) {
+               /* Increment refcount before decrementing nb_init_stream_left */
+               cmm_smp_wmb();
                uatomic_dec(&stream->chan->nb_init_stream_left);
        }
 
@@ -667,11 +674,18 @@ static int add_stream(struct lttng_consumer_stream *stream,
 
        rcu_read_unlock();
        pthread_mutex_unlock(&stream->lock);
+       pthread_mutex_unlock(&stream->chan->timer_lock);
+       pthread_mutex_unlock(&stream->chan->lock);
        pthread_mutex_unlock(&consumer_data.lock);
 
        return ret;
 }
 
+void consumer_del_data_stream(struct lttng_consumer_stream *stream)
+{
+       consumer_del_stream(stream, data_ht);
+}
+
 /*
  * Add relayd socket to global consumer data hashtable. RCU read side lock MUST
  * be acquired before calling this.
@@ -700,12 +714,12 @@ end:
  * Allocate and return a consumer relayd socket.
  */
 struct consumer_relayd_sock_pair *consumer_allocate_relayd_sock_pair(
-               int net_seq_idx)
+               uint64_t net_seq_idx)
 {
        struct consumer_relayd_sock_pair *obj = NULL;
 
-       /* Negative net sequence index is a failure */
-       if (net_seq_idx < 0) {
+       /* net sequence index of -1 is a failure */
+       if (net_seq_idx == (uint64_t) -1ULL) {
                goto error;
        }
 
@@ -718,6 +732,8 @@ struct consumer_relayd_sock_pair *consumer_allocate_relayd_sock_pair(
        obj->net_seq_idx = net_seq_idx;
        obj->refcount = 0;
        obj->destroy_flag = 0;
+       obj->control_sock.sock.fd = -1;
+       obj->data_sock.sock.fd = -1;
        lttng_ht_node_init_u64(&obj->node, obj->net_seq_idx);
        pthread_mutex_init(&obj->ctrl_sock_mutex, NULL);
 
@@ -754,6 +770,106 @@ error:
        return relayd;
 }
 
+/*
+ * Find a relayd and send the stream
+ *
+ * Returns 0 on success, < 0 on error
+ */
+int consumer_send_relayd_stream(struct lttng_consumer_stream *stream,
+               char *path)
+{
+       int ret = 0;
+       struct consumer_relayd_sock_pair *relayd;
+
+       assert(stream);
+       assert(stream->net_seq_idx != -1ULL);
+       assert(path);
+
+       /* The stream is not metadata. Get relayd reference if exists. */
+       rcu_read_lock();
+       relayd = consumer_find_relayd(stream->net_seq_idx);
+       if (relayd != NULL) {
+               /* Add stream on the relayd */
+               pthread_mutex_lock(&relayd->ctrl_sock_mutex);
+               ret = relayd_add_stream(&relayd->control_sock, stream->name,
+                               path, &stream->relayd_stream_id,
+                               stream->chan->tracefile_size, stream->chan->tracefile_count);
+               pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
+               if (ret < 0) {
+                       goto end;
+               }
+
+               uatomic_inc(&relayd->refcount);
+               stream->sent_to_relayd = 1;
+       } else {
+               ERR("Stream %" PRIu64 " relayd ID %" PRIu64 " unknown. Can't send it.",
+                               stream->key, stream->net_seq_idx);
+               ret = -1;
+               goto end;
+       }
+
+       DBG("Stream %s with key %" PRIu64 " sent to relayd id %" PRIu64,
+                       stream->name, stream->key, stream->net_seq_idx);
+
+end:
+       rcu_read_unlock();
+       return ret;
+}
+
+/*
+ * Find a relayd and send the streams sent message
+ *
+ * Returns 0 on success, < 0 on error
+ */
+int consumer_send_relayd_streams_sent(uint64_t net_seq_idx)
+{
+       int ret = 0;
+       struct consumer_relayd_sock_pair *relayd;
+
+       assert(net_seq_idx != -1ULL);
+
+       /* The stream is not metadata. Get relayd reference if exists. */
+       rcu_read_lock();
+       relayd = consumer_find_relayd(net_seq_idx);
+       if (relayd != NULL) {
+               /* Add stream on the relayd */
+               pthread_mutex_lock(&relayd->ctrl_sock_mutex);
+               ret = relayd_streams_sent(&relayd->control_sock);
+               pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
+               if (ret < 0) {
+                       goto end;
+               }
+       } else {
+               ERR("Relayd ID %" PRIu64 " unknown. Can't send streams_sent.",
+                               net_seq_idx);
+               ret = -1;
+               goto end;
+       }
+
+       ret = 0;
+       DBG("All streams sent relayd id %" PRIu64, net_seq_idx);
+
+end:
+       rcu_read_unlock();
+       return ret;
+}
+
+/*
+ * Find a relayd and close the stream
+ */
+void close_relayd_stream(struct lttng_consumer_stream *stream)
+{
+       struct consumer_relayd_sock_pair *relayd;
+
+       /* The stream is not metadata. Get relayd reference if exists. */
+       rcu_read_lock();
+       relayd = consumer_find_relayd(stream->net_seq_idx);
+       if (relayd) {
+               consumer_stream_relayd_close(stream, relayd);
+       }
+       rcu_read_unlock();
+}
+
 /*
  * Handle stream for relayd transmission if the stream applies for network
  * streaming where the net sequence index is set.
@@ -782,7 +898,7 @@ static int write_relayd_stream_header(struct lttng_consumer_stream *stream,
                }
 
                /* Metadata are always sent on the control socket. */
-               outfd = relayd->control_sock.fd;
+               outfd = relayd->control_sock.sock.fd;
        } else {
                /* Set header with stream information */
                data_hdr.stream_id = htobe64(stream->relayd_stream_id);
@@ -807,7 +923,7 @@ static int write_relayd_stream_header(struct lttng_consumer_stream *stream,
                ++stream->next_net_seq_num;
 
                /* Set to go on data socket */
-               outfd = relayd->data_sock.fd;
+               outfd = relayd->data_sock.sock.fd;
        }
 
 error:
@@ -820,14 +936,21 @@ error:
  *
  * On error, return NULL.
  */
-struct lttng_consumer_channel *consumer_allocate_channel(unsigned long key,
+struct lttng_consumer_channel *consumer_allocate_channel(uint64_t key,
                uint64_t session_id,
                const char *pathname,
                const char *name,
                uid_t uid,
                gid_t gid,
-               int relayd_id,
-               enum lttng_event_output output)
+               uint64_t relayd_id,
+               enum lttng_event_output output,
+               uint64_t tracefile_size,
+               uint64_t tracefile_count,
+               uint64_t session_id_per_pid,
+               unsigned int monitor,
+               unsigned int live_timer_interval,
+               const char *root_shm_path,
+               const char *shm_path)
 {
        struct lttng_consumer_channel *channel;
 
@@ -840,10 +963,43 @@ struct lttng_consumer_channel *consumer_allocate_channel(unsigned long key,
        channel->key = key;
        channel->refcount = 0;
        channel->session_id = session_id;
+       channel->session_id_per_pid = session_id_per_pid;
        channel->uid = uid;
        channel->gid = gid;
        channel->relayd_id = relayd_id;
-       channel->output = output;
+       channel->tracefile_size = tracefile_size;
+       channel->tracefile_count = tracefile_count;
+       channel->monitor = monitor;
+       channel->live_timer_interval = live_timer_interval;
+       pthread_mutex_init(&channel->lock, NULL);
+       pthread_mutex_init(&channel->timer_lock, NULL);
+
+       switch (output) {
+       case LTTNG_EVENT_SPLICE:
+               channel->output = CONSUMER_CHANNEL_SPLICE;
+               break;
+       case LTTNG_EVENT_MMAP:
+               channel->output = CONSUMER_CHANNEL_MMAP;
+               break;
+       default:
+               assert(0);
+               free(channel);
+               channel = NULL;
+               goto end;
+       }
+
+       /*
+        * In monitor mode, the streams associated with the channel will be put in
+        * a special list ONLY owned by this channel. So, the refcount is set to 1
+        * here meaning that the channel itself has streams that are referenced.
+        *
+        * On a channel deletion, once the channel is no longer visible, the
+        * refcount is decremented and checked for a zero value to delete it. With
+        * streams in no monitor mode, it will now be safe to destroy the channel.
+        */
+       if (!channel->monitor) {
+               channel->refcount = 1;
+       }
 
        strncpy(channel->pathname, pathname, sizeof(channel->pathname));
        channel->pathname[sizeof(channel->pathname) - 1] = '\0';
@@ -851,6 +1007,15 @@ struct lttng_consumer_channel *consumer_allocate_channel(unsigned long key,
        strncpy(channel->name, name, sizeof(channel->name));
        channel->name[sizeof(channel->name) - 1] = '\0';
 
+       if (root_shm_path) {
+               strncpy(channel->root_shm_path, root_shm_path, sizeof(channel->root_shm_path));
+               channel->root_shm_path[sizeof(channel->root_shm_path) - 1] = '\0';
+       }
+       if (shm_path) {
+               strncpy(channel->shm_path, shm_path, sizeof(channel->shm_path));
+               channel->shm_path[sizeof(channel->shm_path) - 1] = '\0';
+       }
+
        lttng_ht_node_init_u64(&channel->node, channel->key);
 
        channel->wait_fd = -1;
@@ -865,39 +1030,36 @@ end:
 
 /*
  * Add a channel to the global list protected by a mutex.
+ *
+ * Always return 0 indicating success.
  */
 int consumer_add_channel(struct lttng_consumer_channel *channel,
                struct lttng_consumer_local_data *ctx)
 {
-       int ret = 0;
-       struct lttng_ht_node_u64 *node;
-       struct lttng_ht_iter iter;
-
        pthread_mutex_lock(&consumer_data.lock);
-       rcu_read_lock();
+       pthread_mutex_lock(&channel->lock);
+       pthread_mutex_lock(&channel->timer_lock);
 
-       lttng_ht_lookup(consumer_data.channel_ht,
-                       &channel->key, &iter);
-       node = lttng_ht_iter_get_node_u64(&iter);
-       if (node != NULL) {
-               /* Channel already exist. Ignore the insertion */
-               ERR("Consumer add channel key %" PRIu64 " already exists!",
-                       channel->key);
-               ret = -1;
-               goto end;
-       }
+       /*
+        * This gives us a guarantee that the channel we are about to add to the
+        * channel hash table will be unique. See this function comment on the why
+        * we need to steel the channel key at this stage.
+        */
+       steal_channel_key(channel->key);
 
+       rcu_read_lock();
        lttng_ht_add_unique_u64(consumer_data.channel_ht, &channel->node);
-
-end:
        rcu_read_unlock();
+
+       pthread_mutex_unlock(&channel->timer_lock);
+       pthread_mutex_unlock(&channel->lock);
        pthread_mutex_unlock(&consumer_data.lock);
 
-       if (!ret && channel->wait_fd != -1 &&
-                       channel->metadata_stream == NULL) {
-               notify_channel_pipe(ctx, channel, CONSUMER_CHANNEL_ADD);
+       if (channel->wait_fd != -1 && channel->type == CONSUMER_CHANNEL_TYPE_DATA) {
+               notify_channel_pipe(ctx, channel, -1, CONSUMER_CHANNEL_ADD);
        }
-       return ret;
+
+       return 0;
 }
 
 /*
@@ -936,7 +1098,12 @@ static int update_poll_array(struct lttng_consumer_local_data *ctx,
                                stream->endpoint_status == CONSUMER_ENDPOINT_INACTIVE) {
                        continue;
                }
-               DBG("Active FD %d", stream->wait_fd);
+               /*
+                * This clobbers way too much the debug output. Uncomment that if you
+                * need it for debugging purposes.
+                *
+                * DBG("Active FD %d", stream->wait_fd);
+                */
                (*pollfd)[i].fd = stream->wait_fd;
                (*pollfd)[i].events = POLLIN | POLLPRI;
                local_stream[i] = stream;
@@ -948,14 +1115,17 @@ static int update_poll_array(struct lttng_consumer_local_data *ctx,
         * Insert the consumer_data_pipe at the end of the array and don't
         * increment i so nb_fd is the number of real FD.
         */
-       (*pollfd)[i].fd = ctx->consumer_data_pipe[0];
+       (*pollfd)[i].fd = lttng_pipe_get_readfd(ctx->consumer_data_pipe);
        (*pollfd)[i].events = POLLIN | POLLPRI;
+
+       (*pollfd)[i + 1].fd = lttng_pipe_get_readfd(ctx->consumer_wakeup_pipe);
+       (*pollfd)[i + 1].events = POLLIN | POLLPRI;
        return i;
 }
 
 /*
- * Poll on the should_quit pipe and the command socket return -1 on error and
- * should exit, 0 if data is available on the command socket
+ * Poll on the should_quit pipe and the command socket return -1 on
+ * error, 1 if should exit, 0 if data is available on the command socket
  */
 int lttng_consumer_poll_socket(struct pollfd *consumer_sockpoll)
 {
@@ -971,17 +1141,14 @@ restart:
                        goto restart;
                }
                PERROR("Poll error");
-               goto exit;
+               return -1;
        }
        if (consumer_sockpoll[0].revents & (POLLIN | POLLPRI)) {
                DBG("consumer_should_quit wake up");
-               goto exit;
+               return 1;
        }
        return 0;
-
-exit:
-       return -1;
-}
+}
 
 /*
  * Set the error socket.
@@ -1052,12 +1219,11 @@ void lttng_consumer_cleanup(void)
  */
 void lttng_consumer_should_exit(struct lttng_consumer_local_data *ctx)
 {
-       int ret;
+       ssize_t ret;
+
        consumer_quit = 1;
-       do {
-               ret = write(ctx->consumer_should_quit[1], "4", 1);
-       } while (ret < 0 && errno == EINTR);
-       if (ret < 0 || ret != 1) {
+       ret = lttng_write(ctx->consumer_should_quit[1], "4", 1);
+       if (ret < 1) {
                PERROR("write consumer quit");
        }
 
@@ -1121,7 +1287,7 @@ struct lttng_consumer_local_data *lttng_consumer_create(
                        struct lttng_consumer_local_data *ctx),
                int (*recv_channel)(struct lttng_consumer_channel *channel),
                int (*recv_stream)(struct lttng_consumer_stream *stream),
-               int (*update_stream)(int stream_key, uint32_t state))
+               int (*update_stream)(uint64_t stream_key, uint32_t state))
 {
        int ret;
        struct lttng_consumer_local_data *ctx;
@@ -1137,30 +1303,22 @@ struct lttng_consumer_local_data *lttng_consumer_create(
        }
 
        ctx->consumer_error_socket = -1;
+       ctx->consumer_metadata_socket = -1;
+       pthread_mutex_init(&ctx->metadata_socket_lock, NULL);
        /* assign the callbacks */
        ctx->on_buffer_ready = buffer_ready;
        ctx->on_recv_channel = recv_channel;
        ctx->on_recv_stream = recv_stream;
        ctx->on_update_stream = update_stream;
 
-       ret = pipe(ctx->consumer_data_pipe);
-       if (ret < 0) {
-               PERROR("Error creating poll pipe");
+       ctx->consumer_data_pipe = lttng_pipe_open(0);
+       if (!ctx->consumer_data_pipe) {
                goto error_poll_pipe;
        }
 
-       /* set read end of the pipe to non-blocking */
-       ret = fcntl(ctx->consumer_data_pipe[0], F_SETFL, O_NONBLOCK);
-       if (ret < 0) {
-               PERROR("fcntl O_NONBLOCK");
-               goto error_poll_fcntl;
-       }
-
-       /* set write end of the pipe to non-blocking */
-       ret = fcntl(ctx->consumer_data_pipe[1], F_SETFL, O_NONBLOCK);
-       if (ret < 0) {
-               PERROR("fcntl O_NONBLOCK");
-               goto error_poll_fcntl;
+       ctx->consumer_wakeup_pipe = lttng_pipe_open(0);
+       if (!ctx->consumer_wakeup_pipe) {
+               goto error_wakeup_pipe;
        }
 
        ret = pipe(ctx->consumer_should_quit);
@@ -1169,47 +1327,84 @@ struct lttng_consumer_local_data *lttng_consumer_create(
                goto error_quit_pipe;
        }
 
-       ret = pipe(ctx->consumer_thread_pipe);
-       if (ret < 0) {
-               PERROR("Error creating thread pipe");
-               goto error_thread_pipe;
-       }
-
        ret = pipe(ctx->consumer_channel_pipe);
        if (ret < 0) {
                PERROR("Error creating channel pipe");
                goto error_channel_pipe;
        }
 
-       ret = utils_create_pipe(ctx->consumer_metadata_pipe);
-       if (ret < 0) {
+       ctx->consumer_metadata_pipe = lttng_pipe_open(0);
+       if (!ctx->consumer_metadata_pipe) {
                goto error_metadata_pipe;
        }
 
-       ret = utils_create_pipe(ctx->consumer_splice_metadata_pipe);
-       if (ret < 0) {
-               goto error_splice_pipe;
-       }
-
        return ctx;
 
-error_splice_pipe:
-       utils_close_pipe(ctx->consumer_metadata_pipe);
 error_metadata_pipe:
        utils_close_pipe(ctx->consumer_channel_pipe);
 error_channel_pipe:
-       utils_close_pipe(ctx->consumer_thread_pipe);
-error_thread_pipe:
        utils_close_pipe(ctx->consumer_should_quit);
-error_poll_fcntl:
 error_quit_pipe:
-       utils_close_pipe(ctx->consumer_data_pipe);
+       lttng_pipe_destroy(ctx->consumer_wakeup_pipe);
+error_wakeup_pipe:
+       lttng_pipe_destroy(ctx->consumer_data_pipe);
 error_poll_pipe:
        free(ctx);
 error:
        return NULL;
 }
 
+/*
+ * Iterate over all streams of the hashtable and free them properly.
+ */
+static void destroy_data_stream_ht(struct lttng_ht *ht)
+{
+       struct lttng_ht_iter iter;
+       struct lttng_consumer_stream *stream;
+
+       if (ht == NULL) {
+               return;
+       }
+
+       rcu_read_lock();
+       cds_lfht_for_each_entry(ht->ht, &iter.iter, stream, node.node) {
+               /*
+                * Ignore return value since we are currently cleaning up so any error
+                * can't be handled.
+                */
+               (void) consumer_del_stream(stream, ht);
+       }
+       rcu_read_unlock();
+
+       lttng_ht_destroy(ht);
+}
+
+/*
+ * Iterate over all streams of the metadata hashtable and free them
+ * properly.
+ */
+static void destroy_metadata_stream_ht(struct lttng_ht *ht)
+{
+       struct lttng_ht_iter iter;
+       struct lttng_consumer_stream *stream;
+
+       if (ht == NULL) {
+               return;
+       }
+
+       rcu_read_lock();
+       cds_lfht_for_each_entry(ht->ht, &iter.iter, stream, node.node) {
+               /*
+                * Ignore return value since we are currently cleaning up so any error
+                * can't be handled.
+                */
+               (void) consumer_del_metadata_stream(stream, ht);
+       }
+       rcu_read_unlock();
+
+       lttng_ht_destroy(ht);
+}
+
 /*
  * Close all fds associated with the instance and free the context.
  */
@@ -1219,15 +1414,26 @@ void lttng_consumer_destroy(struct lttng_consumer_local_data *ctx)
 
        DBG("Consumer destroying it. Closing everything.");
 
+       if (!ctx) {
+               return;
+       }
+
+       destroy_data_stream_ht(data_ht);
+       destroy_metadata_stream_ht(metadata_ht);
+
        ret = close(ctx->consumer_error_socket);
        if (ret) {
                PERROR("close");
        }
-       utils_close_pipe(ctx->consumer_thread_pipe);
+       ret = close(ctx->consumer_metadata_socket);
+       if (ret) {
+               PERROR("close");
+       }
        utils_close_pipe(ctx->consumer_channel_pipe);
-       utils_close_pipe(ctx->consumer_data_pipe);
+       lttng_pipe_destroy(ctx->consumer_data_pipe);
+       lttng_pipe_destroy(ctx->consumer_metadata_pipe);
+       lttng_pipe_destroy(ctx->consumer_wakeup_pipe);
        utils_close_pipe(ctx->consumer_should_quit);
-       utils_close_pipe(ctx->consumer_splice_metadata_pipe);
 
        unlink(ctx->consumer_command_sock_path);
        free(ctx);
@@ -1240,17 +1446,15 @@ static int write_relayd_metadata_id(int fd,
                struct lttng_consumer_stream *stream,
                struct consumer_relayd_sock_pair *relayd, unsigned long padding)
 {
-       int ret;
+       ssize_t ret;
        struct lttcomm_relayd_metadata_payload hdr;
 
        hdr.stream_id = htobe64(stream->relayd_stream_id);
        hdr.padding_size = htobe32(padding);
-       do {
-               ret = write(fd, (void *) &hdr, sizeof(hdr));
-       } while (ret < 0 && errno == EINTR);
-       if (ret < 0 || ret != sizeof(hdr)) {
+       ret = lttng_write(fd, (void *) &hdr, sizeof(hdr));
+       if (ret < sizeof(hdr)) {
                /*
-                * This error means that the fd's end is closed so ignore the perror
+                * This error means that the fd's end is closed so ignore the PERROR
                 * not to clubber the error output since this can happen in a normal
                 * code path.
                 */
@@ -1270,7 +1474,7 @@ static int write_relayd_metadata_id(int fd,
                        stream->relayd_stream_id, padding);
 
 end:
-       return ret;
+       return (int) ret;
 }
 
 /*
@@ -1287,11 +1491,12 @@ end:
 ssize_t lttng_consumer_on_read_subbuffer_mmap(
                struct lttng_consumer_local_data *ctx,
                struct lttng_consumer_stream *stream, unsigned long len,
-               unsigned long padding)
+               unsigned long padding,
+               struct ctf_packet_index *index)
 {
        unsigned long mmap_offset;
        void *mmap_base;
-       ssize_t ret = 0, written = 0;
+       ssize_t ret = 0;
        off_t orig_offset = stream->out_fd_offset;
        /* Default is on the disk */
        int outfd = stream->out_fd;
@@ -1302,9 +1507,10 @@ ssize_t lttng_consumer_on_read_subbuffer_mmap(
        rcu_read_lock();
 
        /* Flag that the current stream if set for network streaming. */
-       if (stream->net_seq_idx != -1) {
+       if (stream->net_seq_idx != (uint64_t) -1ULL) {
                relayd = consumer_find_relayd(stream->net_seq_idx);
                if (relayd == NULL) {
+                       ret = -EPIPE;
                        goto end;
                }
        }
@@ -1314,27 +1520,31 @@ ssize_t lttng_consumer_on_read_subbuffer_mmap(
        case LTTNG_CONSUMER_KERNEL:
                mmap_base = stream->mmap_base;
                ret = kernctl_get_mmap_read_offset(stream->wait_fd, &mmap_offset);
+               if (ret < 0) {
+                       ret = -errno;
+                       PERROR("tracer ctl get_mmap_read_offset");
+                       goto end;
+               }
                break;
        case LTTNG_CONSUMER32_UST:
        case LTTNG_CONSUMER64_UST:
                mmap_base = lttng_ustctl_get_mmap_base(stream);
                if (!mmap_base) {
                        ERR("read mmap get mmap base for stream %s", stream->name);
-                       written = -1;
+                       ret = -EPERM;
                        goto end;
                }
                ret = lttng_ustctl_get_mmap_read_offset(stream, &mmap_offset);
+               if (ret != 0) {
+                       PERROR("tracer ctl get_mmap_read_offset");
+                       ret = -EINVAL;
+                       goto end;
+               }
                break;
        default:
                ERR("Unknown consumer_data type");
                assert(0);
        }
-       if (ret != 0) {
-               errno = -ret;
-               PERROR("tracer ctl get_mmap_read_offset");
-               written = ret;
-               goto end;
-       }
 
        /* Handle stream on the relayd if the output is on the network */
        if (relayd) {
@@ -1351,75 +1561,103 @@ ssize_t lttng_consumer_on_read_subbuffer_mmap(
                }
 
                ret = write_relayd_stream_header(stream, netlen, padding, relayd);
-               if (ret >= 0) {
-                       /* Use the returned socket. */
-                       outfd = ret;
+               if (ret < 0) {
+                       relayd_hang_up = 1;
+                       goto write_error;
+               }
+               /* Use the returned socket. */
+               outfd = ret;
 
-                       /* Write metadata stream id before payload */
-                       if (stream->metadata_flag) {
-                               ret = write_relayd_metadata_id(outfd, stream, relayd, padding);
-                               if (ret < 0) {
-                                       written = ret;
-                                       /* Socket operation failed. We consider the relayd dead */
-                                       if (ret == -EPIPE || ret == -EINVAL) {
-                                               relayd_hang_up = 1;
-                                               goto write_error;
-                                       }
-                                       goto end;
-                               }
-                       }
-               } else {
-                       /* Socket operation failed. We consider the relayd dead */
-                       if (ret == -EPIPE || ret == -EINVAL) {
+               /* Write metadata stream id before payload */
+               if (stream->metadata_flag) {
+                       ret = write_relayd_metadata_id(outfd, stream, relayd, padding);
+                       if (ret < 0) {
                                relayd_hang_up = 1;
                                goto write_error;
                        }
-                       /* Else, use the default set before which is the filesystem. */
                }
        } else {
                /* No streaming, we have to set the len with the full padding */
                len += padding;
+
+               /*
+                * Check if we need to change the tracefile before writing the packet.
+                */
+               if (stream->chan->tracefile_size > 0 &&
+                               (stream->tracefile_size_current + len) >
+                               stream->chan->tracefile_size) {
+                       ret = utils_rotate_stream_file(stream->chan->pathname,
+                                       stream->name, stream->chan->tracefile_size,
+                                       stream->chan->tracefile_count, stream->uid, stream->gid,
+                                       stream->out_fd, &(stream->tracefile_count_current),
+                                       &stream->out_fd);
+                       if (ret < 0) {
+                               ERR("Rotating output file");
+                               goto end;
+                       }
+                       outfd = stream->out_fd;
+
+                       if (stream->index_fd >= 0) {
+                               ret = index_create_file(stream->chan->pathname,
+                                               stream->name, stream->uid, stream->gid,
+                                               stream->chan->tracefile_size,
+                                               stream->tracefile_count_current);
+                               if (ret < 0) {
+                                       goto end;
+                               }
+                               stream->index_fd = ret;
+                       }
+
+                       /* Reset current size because we just perform a rotation. */
+                       stream->tracefile_size_current = 0;
+                       stream->out_fd_offset = 0;
+                       orig_offset = 0;
+               }
+               stream->tracefile_size_current += len;
+               if (index) {
+                       index->offset = htobe64(stream->out_fd_offset);
+               }
        }
 
-       while (len > 0) {
-               do {
-                       ret = write(outfd, mmap_base + mmap_offset, len);
-               } while (ret < 0 && errno == EINTR);
-               DBG("Consumer mmap write() ret %zd (len %lu)", ret, len);
+       /*
+        * This call guarantee that len or less is returned. It's impossible to
+        * receive a ret value that is bigger than len.
+        */
+       ret = lttng_write(outfd, mmap_base + mmap_offset, len);
+       DBG("Consumer mmap write() ret %zd (len %lu)", ret, len);
+       if (ret < 0 || ((size_t) ret != len)) {
+               /*
+                * Report error to caller if nothing was written else at least send the
+                * amount written.
+                */
                if (ret < 0) {
+                       ret = -errno;
+               }
+               relayd_hang_up = 1;
+
+               /* Socket operation failed. We consider the relayd dead */
+               if (errno == EPIPE || errno == EINVAL || errno == EBADF) {
                        /*
-                        * This is possible if the fd is closed on the other side (outfd)
-                        * or any write problem. It can be verbose a bit for a normal
-                        * execution if for instance the relayd is stopped abruptly. This
-                        * can happen so set this to a DBG statement.
+                        * This is possible if the fd is closed on the other side
+                        * (outfd) or any write problem. It can be verbose a bit for a
+                        * normal execution if for instance the relayd is stopped
+                        * abruptly. This can happen so set this to a DBG statement.
                         */
-                       DBG("Error in file write mmap");
-                       if (written == 0) {
-                               written = ret;
-                       }
-                       /* Socket operation failed. We consider the relayd dead */
-                       if (errno == EPIPE || errno == EINVAL) {
-                               relayd_hang_up = 1;
-                               goto write_error;
-                       }
-                       goto end;
-               } else if (ret > len) {
-                       PERROR("Error in file write (ret %zd > len %lu)", ret, len);
-                       written += ret;
-                       goto end;
+                       DBG("Consumer mmap write detected relayd hang up");
                } else {
-                       len -= ret;
-                       mmap_offset += ret;
+                       /* Unhandled error, print it and stop function right now. */
+                       PERROR("Error in write mmap (ret %zd != len %lu)", ret, len);
                }
+               goto write_error;
+       }
+       stream->output_written += ret;
 
-               /* This call is useless on a socket so better save a syscall. */
-               if (!relayd) {
-                       /* This won't block, but will start writeout asynchronously */
-                       lttng_sync_file_range(outfd, stream->out_fd_offset, ret,
-                                       SYNC_FILE_RANGE_WRITE);
-                       stream->out_fd_offset += ret;
-               }
-               written += ret;
+       /* This call is useless on a socket so better save a syscall. */
+       if (!relayd) {
+               /* This won't block, but will start writeout asynchronously */
+               lttng_sync_file_range(outfd, stream->out_fd_offset, len,
+                               SYNC_FILE_RANGE_WRITE);
+               stream->out_fd_offset += len;
        }
        lttng_consumer_sync_trace_file(stream, orig_offset);
 
@@ -1439,7 +1677,7 @@ end:
        }
 
        rcu_read_unlock();
-       return written;
+       return ret;
 }
 
 /*
@@ -1452,7 +1690,8 @@ end:
 ssize_t lttng_consumer_on_read_subbuffer_splice(
                struct lttng_consumer_local_data *ctx,
                struct lttng_consumer_stream *stream, unsigned long len,
-               unsigned long padding)
+               unsigned long padding,
+               struct ctf_packet_index *index)
 {
        ssize_t ret = 0, written = 0, ret_splice = 0;
        loff_t offset = 0;
@@ -1480,27 +1719,18 @@ ssize_t lttng_consumer_on_read_subbuffer_splice(
        rcu_read_lock();
 
        /* Flag that the current stream if set for network streaming. */
-       if (stream->net_seq_idx != -1) {
+       if (stream->net_seq_idx != (uint64_t) -1ULL) {
                relayd = consumer_find_relayd(stream->net_seq_idx);
                if (relayd == NULL) {
+                       written = -ret;
                        goto end;
                }
        }
-
-       /*
-        * Choose right pipe for splice. Metadata and trace data are handled by
-        * different threads hence the use of two pipes in order not to race or
-        * corrupt the written data.
-        */
-       if (stream->metadata_flag) {
-               splice_pipe = ctx->consumer_splice_metadata_pipe;
-       } else {
-               splice_pipe = ctx->consumer_thread_pipe;
-       }
+       splice_pipe = stream->splice_pipe;
 
        /* Write metadata stream id before payload */
        if (relayd) {
-               int total_len = len;
+               unsigned long total_len = len;
 
                if (stream->metadata_flag) {
                        /*
@@ -1513,34 +1743,62 @@ ssize_t lttng_consumer_on_read_subbuffer_splice(
                                        padding);
                        if (ret < 0) {
                                written = ret;
-                               /* Socket operation failed. We consider the relayd dead */
-                               if (ret == -EBADF) {
-                                       WARN("Remote relayd disconnected. Stopping");
-                                       relayd_hang_up = 1;
-                                       goto write_error;
-                               }
-                               goto end;
+                               relayd_hang_up = 1;
+                               goto write_error;
                        }
 
                        total_len += sizeof(struct lttcomm_relayd_metadata_payload);
                }
 
                ret = write_relayd_stream_header(stream, total_len, padding, relayd);
-               if (ret >= 0) {
-                       /* Use the returned socket. */
-                       outfd = ret;
-               } else {
-                       /* Socket operation failed. We consider the relayd dead */
-                       if (ret == -EBADF) {
-                               WARN("Remote relayd disconnected. Stopping");
-                               relayd_hang_up = 1;
-                               goto write_error;
-                       }
-                       goto end;
+               if (ret < 0) {
+                       written = ret;
+                       relayd_hang_up = 1;
+                       goto write_error;
                }
+               /* Use the returned socket. */
+               outfd = ret;
        } else {
                /* No streaming, we have to set the len with the full padding */
                len += padding;
+
+               /*
+                * Check if we need to change the tracefile before writing the packet.
+                */
+               if (stream->chan->tracefile_size > 0 &&
+                               (stream->tracefile_size_current + len) >
+                               stream->chan->tracefile_size) {
+                       ret = utils_rotate_stream_file(stream->chan->pathname,
+                                       stream->name, stream->chan->tracefile_size,
+                                       stream->chan->tracefile_count, stream->uid, stream->gid,
+                                       stream->out_fd, &(stream->tracefile_count_current),
+                                       &stream->out_fd);
+                       if (ret < 0) {
+                               written = ret;
+                               ERR("Rotating output file");
+                               goto end;
+                       }
+                       outfd = stream->out_fd;
+
+                       if (stream->index_fd >= 0) {
+                               ret = index_create_file(stream->chan->pathname,
+                                               stream->name, stream->uid, stream->gid,
+                                               stream->chan->tracefile_size,
+                                               stream->tracefile_count_current);
+                               if (ret < 0) {
+                                       written = ret;
+                                       goto end;
+                               }
+                               stream->index_fd = ret;
+                       }
+
+                       /* Reset current size because we just perform a rotation. */
+                       stream->tracefile_size_current = 0;
+                       stream->out_fd_offset = 0;
+                       orig_offset = 0;
+               }
+               stream->tracefile_size_current += len;
+               index->offset = htobe64(stream->out_fd_offset);
        }
 
        while (len > 0) {
@@ -1550,57 +1808,51 @@ ssize_t lttng_consumer_on_read_subbuffer_splice(
                                SPLICE_F_MOVE | SPLICE_F_MORE);
                DBG("splice chan to pipe, ret %zd", ret_splice);
                if (ret_splice < 0) {
-                       PERROR("Error in relay splice");
-                       if (written == 0) {
-                               written = ret_splice;
-                       }
                        ret = errno;
+                       written = -ret;
+                       PERROR("Error in relay splice");
                        goto splice_error;
                }
 
                /* Handle stream on the relayd if the output is on the network */
-               if (relayd) {
-                       if (stream->metadata_flag) {
-                               size_t metadata_payload_size =
-                                       sizeof(struct lttcomm_relayd_metadata_payload);
+               if (relayd && stream->metadata_flag) {
+                       size_t metadata_payload_size =
+                               sizeof(struct lttcomm_relayd_metadata_payload);
 
-                               /* Update counter to fit the spliced data */
-                               ret_splice += metadata_payload_size;
-                               len += metadata_payload_size;
-                               /*
-                                * We do this so the return value can match the len passed as
-                                * argument to this function.
-                                */
-                               written -= metadata_payload_size;
-                       }
+                       /* Update counter to fit the spliced data */
+                       ret_splice += metadata_payload_size;
+                       len += metadata_payload_size;
+                       /*
+                        * We do this so the return value can match the len passed as
+                        * argument to this function.
+                        */
+                       written -= metadata_payload_size;
                }
 
                /* Splice data out */
                ret_splice = splice(splice_pipe[0], NULL, outfd, NULL,
                                ret_splice, SPLICE_F_MOVE | SPLICE_F_MORE);
-               DBG("Consumer splice pipe to file, ret %zd", ret_splice);
+               DBG("Consumer splice pipe to file (out_fd: %d), ret %zd",
+                               outfd, ret_splice);
                if (ret_splice < 0) {
-                       PERROR("Error in file splice");
-                       if (written == 0) {
-                               written = ret_splice;
-                       }
-                       /* Socket operation failed. We consider the relayd dead */
-                       if (errno == EBADF || errno == EPIPE) {
-                               WARN("Remote relayd disconnected. Stopping");
-                               relayd_hang_up = 1;
-                               goto write_error;
-                       }
                        ret = errno;
-                       goto splice_error;
+                       written = -ret;
+                       relayd_hang_up = 1;
+                       goto write_error;
                } else if (ret_splice > len) {
-                       errno = EINVAL;
-                       PERROR("Wrote more data than requested %zd (len: %lu)",
-                                       ret_splice, len);
-                       written += ret_splice;
+                       /*
+                        * We don't expect this code path to be executed but you never know
+                        * so this is an extra protection agains a buggy splice().
+                        */
                        ret = errno;
+                       written += ret_splice;
+                       PERROR("Wrote more data than requested %zd (len: %lu)", ret_splice,
+                                       len);
                        goto splice_error;
+               } else {
+                       /* All good, update current len and continue. */
+                       len -= ret_splice;
                }
-               len -= ret_splice;
 
                /* This call is useless on a socket so better save a syscall. */
                if (!relayd) {
@@ -1609,12 +1861,10 @@ ssize_t lttng_consumer_on_read_subbuffer_splice(
                                        SYNC_FILE_RANGE_WRITE);
                        stream->out_fd_offset += ret_splice;
                }
+               stream->output_written += ret_splice;
                written += ret_splice;
        }
        lttng_consumer_sync_trace_file(stream, orig_offset);
-
-       ret = ret_splice;
-
        goto end;
 
 write_error:
@@ -1708,61 +1958,7 @@ int lttng_consumer_recv_cmd(struct lttng_consumer_local_data *ctx,
        }
 }
 
-/*
- * Iterate over all streams of the hashtable and free them properly.
- *
- * WARNING: *MUST* be used with data stream only.
- */
-static void destroy_data_stream_ht(struct lttng_ht *ht)
-{
-       struct lttng_ht_iter iter;
-       struct lttng_consumer_stream *stream;
-
-       if (ht == NULL) {
-               return;
-       }
-
-       rcu_read_lock();
-       cds_lfht_for_each_entry(ht->ht, &iter.iter, stream, node.node) {
-               /*
-                * Ignore return value since we are currently cleaning up so any error
-                * can't be handled.
-                */
-               (void) consumer_del_stream(stream, ht);
-       }
-       rcu_read_unlock();
-
-       lttng_ht_destroy(ht);
-}
-
-/*
- * Iterate over all streams of the hashtable and free them properly.
- *
- * XXX: Should not be only for metadata stream or else use an other name.
- */
-static void destroy_stream_ht(struct lttng_ht *ht)
-{
-       struct lttng_ht_iter iter;
-       struct lttng_consumer_stream *stream;
-
-       if (ht == NULL) {
-               return;
-       }
-
-       rcu_read_lock();
-       cds_lfht_for_each_entry(ht->ht, &iter.iter, stream, node.node) {
-               /*
-                * Ignore return value since we are currently cleaning up so any error
-                * can't be handled.
-                */
-               (void) consumer_del_metadata_stream(stream, ht);
-       }
-       rcu_read_unlock();
-
-       lttng_ht_destroy(ht);
-}
-
-void lttng_consumer_close_metadata(void)
+void lttng_consumer_close_all_metadata(void)
 {
        switch (consumer_data.type) {
        case LTTNG_CONSUMER_KERNEL:
@@ -1780,7 +1976,7 @@ void lttng_consumer_close_metadata(void)
                 * because at this point we are sure that the metadata producer is
                 * either dead or blocked.
                 */
-               lttng_ustconsumer_close_metadata(metadata_ht);
+               lttng_ustconsumer_close_all_metadata(metadata_ht);
                break;
        default:
                ERR("Unknown consumer_data type");
@@ -1794,10 +1990,7 @@ void lttng_consumer_close_metadata(void)
 void consumer_del_metadata_stream(struct lttng_consumer_stream *stream,
                struct lttng_ht *ht)
 {
-       int ret;
-       struct lttng_ht_iter iter;
        struct lttng_consumer_channel *free_chan = NULL;
-       struct consumer_relayd_sock_pair *relayd;
 
        assert(stream);
        /*
@@ -1808,112 +2001,51 @@ void consumer_del_metadata_stream(struct lttng_consumer_stream *stream,
 
        DBG3("Consumer delete metadata stream %d", stream->wait_fd);
 
-       if (ht == NULL) {
-               /* Means the stream was allocated but not successfully added */
-               goto free_stream_rcu;
-       }
-
-       pthread_mutex_lock(&consumer_data.lock);
-       pthread_mutex_lock(&stream->lock);
-
-       switch (consumer_data.type) {
-       case LTTNG_CONSUMER_KERNEL:
-               if (stream->mmap_base != NULL) {
-                       ret = munmap(stream->mmap_base, stream->mmap_len);
-                       if (ret != 0) {
-                               PERROR("munmap metadata stream");
-                       }
-               }
-               break;
-       case LTTNG_CONSUMER32_UST:
-       case LTTNG_CONSUMER64_UST:
-               lttng_ustconsumer_del_stream(stream);
-               break;
-       default:
-               ERR("Unknown consumer_data type");
-               assert(0);
-               goto end;
-       }
-
-       rcu_read_lock();
-       iter.iter.node = &stream->node.node;
-       ret = lttng_ht_del(ht, &iter);
-       assert(!ret);
-
-       iter.iter.node = &stream->node_channel_id.node;
-       ret = lttng_ht_del(consumer_data.stream_per_chan_id_ht, &iter);
-       assert(!ret);
-
-       iter.iter.node = &stream->node_session_id.node;
-       ret = lttng_ht_del(consumer_data.stream_list_ht, &iter);
-       assert(!ret);
-       rcu_read_unlock();
-
-       if (stream->out_fd >= 0) {
-               ret = close(stream->out_fd);
-               if (ret) {
-                       PERROR("close");
-               }
-       }
-
-       /* Check and cleanup relayd */
-       rcu_read_lock();
-       relayd = consumer_find_relayd(stream->net_seq_idx);
-       if (relayd != NULL) {
-               uatomic_dec(&relayd->refcount);
-               assert(uatomic_read(&relayd->refcount) >= 0);
-
-               /* Closing streams requires to lock the control socket. */
-               pthread_mutex_lock(&relayd->ctrl_sock_mutex);
-               ret = relayd_send_close_stream(&relayd->control_sock,
-                               stream->relayd_stream_id, stream->next_net_seq_num - 1);
-               pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
-               if (ret < 0) {
-                       DBG("Unable to close stream on the relayd. Continuing");
-                       /*
-                        * Continue here. There is nothing we can do for the relayd.
-                        * Chances are that the relayd has closed the socket so we just
-                        * continue cleaning up.
-                        */
-               }
+       pthread_mutex_lock(&consumer_data.lock);
+       pthread_mutex_lock(&stream->chan->lock);
+       pthread_mutex_lock(&stream->lock);
 
-               /* Both conditions are met, we destroy the relayd. */
-               if (uatomic_read(&relayd->refcount) == 0 &&
-                               uatomic_read(&relayd->destroy_flag)) {
-                       destroy_relayd(relayd);
-               }
-       }
-       rcu_read_unlock();
+       /* Remove any reference to that stream. */
+       consumer_stream_delete(stream, ht);
+
+       /* Close down everything including the relayd if one. */
+       consumer_stream_close(stream);
+       /* Destroy tracer buffers of the stream. */
+       consumer_stream_destroy_buffers(stream);
 
        /* Atomically decrement channel refcount since other threads can use it. */
-       uatomic_dec(&stream->chan->refcount);
-       if (!uatomic_read(&stream->chan->refcount)
+       if (!uatomic_sub_return(&stream->chan->refcount, 1)
                        && !uatomic_read(&stream->chan->nb_init_stream_left)) {
                /* Go for channel deletion! */
                free_chan = stream->chan;
        }
 
-end:
+       /*
+        * Nullify the stream reference so it is not used after deletion. The
+        * channel lock MUST be acquired before being able to check for a NULL
+        * pointer value.
+        */
+       stream->chan->metadata_stream = NULL;
+
        pthread_mutex_unlock(&stream->lock);
+       pthread_mutex_unlock(&stream->chan->lock);
        pthread_mutex_unlock(&consumer_data.lock);
 
        if (free_chan) {
                consumer_del_channel(free_chan);
        }
 
-free_stream_rcu:
-       call_rcu(&stream->node.head, free_stream_rcu);
+       consumer_stream_free(stream);
 }
 
 /*
  * Action done with the metadata stream when adding it to the consumer internal
  * data structures to handle it.
  */
-static int add_metadata_stream(struct lttng_consumer_stream *stream,
-               struct lttng_ht *ht)
+int consumer_add_metadata_stream(struct lttng_consumer_stream *stream)
 {
+       struct lttng_ht *ht = metadata_ht;
        int ret = 0;
-       struct consumer_relayd_sock_pair *relayd;
        struct lttng_ht_iter iter;
        struct lttng_ht_node_u64 *node;
 
@@ -1923,6 +2055,8 @@ static int add_metadata_stream(struct lttng_consumer_stream *stream,
        DBG3("Adding metadata stream %" PRIu64 " to hash table", stream->key);
 
        pthread_mutex_lock(&consumer_data.lock);
+       pthread_mutex_lock(&stream->chan->lock);
+       pthread_mutex_lock(&stream->chan->timer_lock);
        pthread_mutex_lock(&stream->lock);
 
        /*
@@ -1940,15 +2074,6 @@ static int add_metadata_stream(struct lttng_consumer_stream *stream,
        node = lttng_ht_iter_get_node_u64(&iter);
        assert(!node);
 
-       /* Find relayd and, if one is found, increment refcount. */
-       relayd = consumer_find_relayd(stream->net_seq_idx);
-       if (relayd != NULL) {
-               uatomic_inc(&relayd->refcount);
-       }
-
-       /* Update channel refcount once added without error(s). */
-       uatomic_inc(&stream->chan->refcount);
-
        /*
         * When nb_init_stream_left reaches 0, we don't need to trigger any action
         * in terms of destroying the associated channel, because the action that
@@ -1957,6 +2082,8 @@ static int add_metadata_stream(struct lttng_consumer_stream *stream,
         * stream.
         */
        if (uatomic_read(&stream->chan->nb_init_stream_left) > 0) {
+               /* Increment refcount before decrementing nb_init_stream_left */
+               cmm_smp_wmb();
                uatomic_dec(&stream->chan->nb_init_stream_left);
        }
 
@@ -1975,6 +2102,8 @@ static int add_metadata_stream(struct lttng_consumer_stream *stream,
        rcu_read_unlock();
 
        pthread_mutex_unlock(&stream->lock);
+       pthread_mutex_unlock(&stream->chan->lock);
+       pthread_mutex_unlock(&stream->chan->timer_lock);
        pthread_mutex_unlock(&consumer_data.lock);
        return ret;
 }
@@ -2038,7 +2167,7 @@ static void validate_endpoint_status_metadata_stream(
  */
 void *consumer_thread_metadata_poll(void *data)
 {
-       int ret, i, pollfd;
+       int ret, i, pollfd, err = -1;
        uint32_t revents, nb_fd;
        struct lttng_consumer_stream *stream = NULL;
        struct lttng_ht_iter iter;
@@ -2049,12 +2178,14 @@ void *consumer_thread_metadata_poll(void *data)
 
        rcu_register_thread();
 
-       metadata_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
-       if (!metadata_ht) {
-               /* ENOMEM at this point. Better to bail out. */
-               goto end_ht;
+       health_register(health_consumerd, HEALTH_CONSUMERD_TYPE_METADATA);
+
+       if (testpoint(consumerd_thread_metadata)) {
+               goto error_testpoint;
        }
 
+       health_code_update();
+
        DBG("Thread metadata poll started");
 
        /* Size is set to 1 for the consumer_metadata pipe */
@@ -2064,7 +2195,8 @@ void *consumer_thread_metadata_poll(void *data)
                goto end_poll;
        }
 
-       ret = lttng_poll_add(&events, ctx->consumer_metadata_pipe[0], LPOLLIN);
+       ret = lttng_poll_add(&events,
+                       lttng_pipe_get_readfd(ctx->consumer_metadata_pipe), LPOLLIN);
        if (ret < 0) {
                goto end;
        }
@@ -2073,59 +2205,60 @@ void *consumer_thread_metadata_poll(void *data)
        DBG("Metadata main loop started");
 
        while (1) {
-               /* Only the metadata pipe is set */
-               if (LTTNG_POLL_GETNB(&events) == 0 && consumer_quit == 1) {
-                       goto end;
-               }
-
 restart:
-               DBG("Metadata poll wait with %d fd(s)", LTTNG_POLL_GETNB(&events));
+               health_code_update();
+               health_poll_entry();
+               DBG("Metadata poll wait");
                ret = lttng_poll_wait(&events, -1);
+               DBG("Metadata poll return from wait with %d fd(s)",
+                               LTTNG_POLL_GETNB(&events));
+               health_poll_exit();
                DBG("Metadata event catched in thread");
                if (ret < 0) {
                        if (errno == EINTR) {
                                ERR("Poll EINTR catched");
                                goto restart;
                        }
-                       goto error;
+                       if (LTTNG_POLL_GETNB(&events) == 0) {
+                               err = 0;        /* All is OK */
+                       }
+                       goto end;
                }
 
                nb_fd = ret;
 
                /* From here, the event is a metadata wait fd */
                for (i = 0; i < nb_fd; i++) {
+                       health_code_update();
+
                        revents = LTTNG_POLL_GETEV(&events, i);
                        pollfd = LTTNG_POLL_GETFD(&events, i);
 
-                       /* Just don't waste time if no returned events for the fd */
                        if (!revents) {
+                               /* No activity for this FD (poll implementation). */
                                continue;
                        }
 
-                       if (pollfd == ctx->consumer_metadata_pipe[0]) {
+                       if (pollfd == lttng_pipe_get_readfd(ctx->consumer_metadata_pipe)) {
                                if (revents & (LPOLLERR | LPOLLHUP )) {
                                        DBG("Metadata thread pipe hung up");
                                        /*
                                         * Remove the pipe from the poll set and continue the loop
                                         * since their might be data to consume.
                                         */
-                                       lttng_poll_del(&events, ctx->consumer_metadata_pipe[0]);
-                                       ret = close(ctx->consumer_metadata_pipe[0]);
-                                       if (ret < 0) {
-                                               PERROR("close metadata pipe");
-                                       }
+                                       lttng_poll_del(&events,
+                                                       lttng_pipe_get_readfd(ctx->consumer_metadata_pipe));
+                                       lttng_pipe_read_close(ctx->consumer_metadata_pipe);
                                        continue;
                                } else if (revents & LPOLLIN) {
-                                       do {
-                                               /* Get the stream pointer received */
-                                               ret = read(pollfd, &stream, sizeof(stream));
-                                       } while (ret < 0 && errno == EINTR);
-                                       if (ret < 0 ||
-                                                       ret < sizeof(struct lttng_consumer_stream *)) {
+                                       ssize_t pipe_len;
+
+                                       pipe_len = lttng_pipe_read(ctx->consumer_metadata_pipe,
+                                                       &stream, sizeof(stream));
+                                       if (pipe_len < sizeof(stream)) {
                                                PERROR("read metadata stream");
                                                /*
-                                                * Let's continue here and hope we can still work
-                                                * without stopping the consumer. XXX: Should we?
+                                                * Continue here to handle the rest of the streams.
                                                 */
                                                continue;
                                        }
@@ -2140,17 +2273,9 @@ restart:
                                        DBG("Adding metadata stream %d to poll set",
                                                        stream->wait_fd);
 
-                                       ret = add_metadata_stream(stream, metadata_ht);
-                                       if (ret) {
-                                               ERR("Unable to add metadata stream");
-                                               /* Stream was not setup properly. Continuing. */
-                                               consumer_del_metadata_stream(stream, NULL);
-                                               continue;
-                                       }
-
                                        /* Add metadata stream to the global poll events list */
                                        lttng_poll_add(&events, stream->wait_fd,
-                                                       LPOLLIN | LPOLLPRI);
+                                                       LPOLLIN | LPOLLPRI | LPOLLHUP);
                                }
 
                                /* Handle other stream */
@@ -2180,6 +2305,8 @@ restart:
 
                                        /* We just flushed the stream now read it. */
                                        do {
+                                               health_code_update();
+
                                                len = ctx->on_buffer_ready(stream, ctx);
                                                /*
                                                 * We don't check the return value here since if we get
@@ -2201,14 +2328,23 @@ restart:
                                DBG("Metadata available on fd %d", pollfd);
                                assert(stream->wait_fd == pollfd);
 
-                               len = ctx->on_buffer_ready(stream, ctx);
+                               do {
+                                       health_code_update();
+
+                                       len = ctx->on_buffer_ready(stream, ctx);
+                                       /*
+                                        * We don't check the return value here since if we get
+                                        * a negative len, it means an error occured thus we
+                                        * simply remove it from the poll set and free the
+                                        * stream.
+                                        */
+                               } while (len > 0);
+
                                /* It's ok to have an unavailable sub-buffer */
                                if (len < 0 && len != -EAGAIN && len != -ENODATA) {
                                        /* Clean up stream from consumer and free it. */
                                        lttng_poll_del(&events, stream->wait_fd);
                                        consumer_del_metadata_stream(stream, metadata_ht);
-                               } else if (len > 0) {
-                                       stream->data_read = 1;
                                }
                        }
 
@@ -2217,14 +2353,19 @@ restart:
                }
        }
 
-error:
+       /* All is OK */
+       err = 0;
 end:
        DBG("Metadata poll thread exiting");
 
        lttng_poll_clean(&events);
 end_poll:
-       destroy_stream_ht(metadata_ht);
-end_ht:
+error_testpoint:
+       if (err) {
+               health_error();
+               ERR("Health error occurred in %s", __func__);
+       }
+       health_unregister(health_consumerd);
        rcu_unregister_thread();
        return NULL;
 }
@@ -2235,7 +2376,7 @@ end_ht:
  */
 void *consumer_thread_data_poll(void *data)
 {
-       int num_rdy, num_hup, high_prio, ret, i;
+       int num_rdy, num_hup, high_prio, ret, i, err = -1;
        struct pollfd *pollfd = NULL;
        /* local view of the streams */
        struct lttng_consumer_stream **local_stream = NULL, *new_stream = NULL;
@@ -2246,15 +2387,23 @@ void *consumer_thread_data_poll(void *data)
 
        rcu_register_thread();
 
-       data_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
-       if (data_ht == NULL) {
-               /* ENOMEM at this point. Better to bail out. */
-               goto end;
+       health_register(health_consumerd, HEALTH_CONSUMERD_TYPE_DATA);
+
+       if (testpoint(consumerd_thread_data)) {
+               goto error_testpoint;
        }
 
-       local_stream = zmalloc(sizeof(struct lttng_consumer_stream));
+       health_code_update();
+
+       local_stream = zmalloc(sizeof(struct lttng_consumer_stream *));
+       if (local_stream == NULL) {
+               PERROR("local_stream malloc");
+               goto end;
+       }
 
        while (1) {
+               health_code_update();
+
                high_prio = 0;
                num_hup = 0;
 
@@ -2270,17 +2419,19 @@ void *consumer_thread_data_poll(void *data)
                        free(local_stream);
                        local_stream = NULL;
 
-                       /* allocate for all fds + 1 for the consumer_data_pipe */
-                       pollfd = zmalloc((consumer_data.stream_count + 1) * sizeof(struct pollfd));
+                       /*
+                        * Allocate for all fds +1 for the consumer_data_pipe and +1 for
+                        * wake up pipe.
+                        */
+                       pollfd = zmalloc((consumer_data.stream_count + 2) * sizeof(struct pollfd));
                        if (pollfd == NULL) {
                                PERROR("pollfd malloc");
                                pthread_mutex_unlock(&consumer_data.lock);
                                goto end;
                        }
 
-                       /* allocate for all fds + 1 for the consumer_data_pipe */
-                       local_stream = zmalloc((consumer_data.stream_count + 1) *
-                                       sizeof(struct lttng_consumer_stream));
+                       local_stream = zmalloc((consumer_data.stream_count + 2) *
+                                       sizeof(struct lttng_consumer_stream *));
                        if (local_stream == NULL) {
                                PERROR("local_stream malloc");
                                pthread_mutex_unlock(&consumer_data.lock);
@@ -2301,12 +2452,15 @@ void *consumer_thread_data_poll(void *data)
 
                /* No FDs and consumer_quit, consumer_cleanup the thread */
                if (nb_fd == 0 && consumer_quit == 1) {
+                       err = 0;        /* All is OK */
                        goto end;
                }
                /* poll on the array of fds */
        restart:
-               DBG("polling on %d fd", nb_fd + 1);
-               num_rdy = poll(pollfd, nb_fd + 1, -1);
+               DBG("polling on %d fd", nb_fd + 2);
+               health_poll_entry();
+               num_rdy = poll(pollfd, nb_fd + 2, -1);
+               health_poll_exit();
                DBG("poll num_rdy : %d", num_rdy);
                if (num_rdy == -1) {
                        /*
@@ -2332,13 +2486,10 @@ void *consumer_thread_data_poll(void *data)
                        ssize_t pipe_readlen;
 
                        DBG("consumer_data_pipe wake up");
-                       /* Consume 1 byte of pipe data */
-                       do {
-                               pipe_readlen = read(ctx->consumer_data_pipe[0], &new_stream,
-                                               sizeof(new_stream));
-                       } while (pipe_readlen == -1 && errno == EINTR);
-                       if (pipe_readlen < 0) {
-                               PERROR("read consumer data pipe");
+                       pipe_readlen = lttng_pipe_read(ctx->consumer_data_pipe,
+                                       &new_stream, sizeof(new_stream));
+                       if (pipe_readlen < sizeof(new_stream)) {
+                               PERROR("Consumer data pipe");
                                /* Continue so we can at least handle the current stream(s). */
                                continue;
                        }
@@ -2353,23 +2504,28 @@ void *consumer_thread_data_poll(void *data)
                                continue;
                        }
 
-                       ret = add_stream(new_stream, data_ht);
-                       if (ret) {
-                               ERR("Consumer add stream %" PRIu64 " failed. Continuing",
-                                               new_stream->key);
-                               /*
-                                * At this point, if the add_stream fails, it is not in the
-                                * hash table thus passing the NULL value here.
-                                */
-                               consumer_del_stream(new_stream, NULL);
-                       }
-
                        /* Continue to update the local streams and handle prio ones */
                        continue;
                }
 
+               /* Handle wakeup pipe. */
+               if (pollfd[nb_fd + 1].revents & (POLLIN | POLLPRI)) {
+                       char dummy;
+                       ssize_t pipe_readlen;
+
+                       pipe_readlen = lttng_pipe_read(ctx->consumer_wakeup_pipe, &dummy,
+                                       sizeof(dummy));
+                       if (pipe_readlen < 0) {
+                               PERROR("Consumer data wakeup pipe");
+                       }
+                       /* We've been awakened to handle stream(s). */
+                       ctx->has_wakeup = 0;
+               }
+
                /* Take care of high priority channels first. */
                for (i = 0; i < nb_fd; i++) {
+                       health_code_update();
+
                        if (local_stream[i] == NULL) {
                                continue;
                        }
@@ -2398,11 +2554,14 @@ void *consumer_thread_data_poll(void *data)
 
                /* Take care of low priority channels. */
                for (i = 0; i < nb_fd; i++) {
+                       health_code_update();
+
                        if (local_stream[i] == NULL) {
                                continue;
                        }
                        if ((pollfd[i].revents & POLLIN) ||
-                                       local_stream[i]->hangup_flush_done) {
+                                       local_stream[i]->hangup_flush_done ||
+                                       local_stream[i]->has_data) {
                                DBG("Normal read on fd %d", pollfd[i].fd);
                                len = ctx->on_buffer_ready(local_stream[i], ctx);
                                /* it's ok to have an unavailable sub-buffer */
@@ -2418,6 +2577,8 @@ void *consumer_thread_data_poll(void *data)
 
                /* Handle hangup and errors */
                for (i = 0; i < nb_fd; i++) {
+                       health_code_update();
+
                        if (local_stream[i] == NULL) {
                                continue;
                        }
@@ -2463,6 +2624,8 @@ void *consumer_thread_data_poll(void *data)
                        }
                }
        }
+       /* All is OK */
+       err = 0;
 end:
        DBG("polling thread exiting");
        free(pollfd);
@@ -2476,12 +2639,14 @@ end:
         * only tracked fd in the poll set. The thread will take care of closing
         * the read side.
         */
-       ret = close(ctx->consumer_metadata_pipe[1]);
-       if (ret < 0) {
-               PERROR("close data pipe");
-       }
+       (void) lttng_pipe_write_close(ctx->consumer_metadata_pipe);
 
-       destroy_data_stream_ht(data_ht);
+error_testpoint:
+       if (err) {
+               health_error();
+               ERR("Health error occurred in %s", __func__);
+       }
+       health_unregister(health_consumerd);
 
        rcu_unregister_thread();
        return NULL;
@@ -2506,22 +2671,36 @@ void consumer_close_channel_streams(struct lttng_consumer_channel *channel)
                        ht->hash_fct(&channel->key, lttng_ht_seed),
                        ht->match_fct, &channel->key,
                        &iter.iter, stream, node_channel_id.node) {
+               /*
+                * Protect against teardown with mutex.
+                */
+               pthread_mutex_lock(&stream->lock);
+               if (cds_lfht_is_node_deleted(&stream->node.node)) {
+                       goto next;
+               }
                switch (consumer_data.type) {
                case LTTNG_CONSUMER_KERNEL:
                        break;
                case LTTNG_CONSUMER32_UST:
                case LTTNG_CONSUMER64_UST:
-                       /*
-                        * Note: a mutex is taken internally within
-                        * liblttng-ust-ctl to protect timer wakeup_fd
-                        * use from concurrent close.
-                        */
-                       lttng_ustconsumer_close_stream_wakeup(stream);
+                       if (stream->metadata_flag) {
+                               /* Safe and protected by the stream lock. */
+                               lttng_ustconsumer_close_metadata(stream->chan);
+                       } else {
+                               /*
+                                * Note: a mutex is taken internally within
+                                * liblttng-ust-ctl to protect timer wakeup_fd
+                                * use from concurrent close.
+                                */
+                               lttng_ustconsumer_close_stream_wakeup(stream);
+                       }
                        break;
                default:
                        ERR("Unknown consumer_data type");
                        assert(0);
                }
+       next:
+               pthread_mutex_unlock(&stream->lock);
        }
        rcu_read_unlock();
 }
@@ -2555,7 +2734,7 @@ static void destroy_channel_ht(struct lttng_ht *ht)
  */
 void *consumer_thread_channel_poll(void *data)
 {
-       int ret, i, pollfd;
+       int ret, i, pollfd, err = -1;
        uint32_t revents, nb_fd;
        struct lttng_consumer_channel *chan = NULL;
        struct lttng_ht_iter iter;
@@ -2566,6 +2745,14 @@ void *consumer_thread_channel_poll(void *data)
 
        rcu_register_thread();
 
+       health_register(health_consumerd, HEALTH_CONSUMERD_TYPE_CHANNEL);
+
+       if (testpoint(consumerd_thread_channel)) {
+               goto error_testpoint;
+       }
+
+       health_code_update();
+
        channel_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
        if (!channel_ht) {
                /* ENOMEM at this point. Better to bail out. */
@@ -2590,20 +2777,23 @@ void *consumer_thread_channel_poll(void *data)
        DBG("Channel main loop started");
 
        while (1) {
-               /* Only the channel pipe is set */
-               if (LTTNG_POLL_GETNB(&events) == 0 && consumer_quit == 1) {
-                       goto end;
-               }
-
 restart:
-               DBG("Channel poll wait with %d fd(s)", LTTNG_POLL_GETNB(&events));
+               health_code_update();
+               DBG("Channel poll wait");
+               health_poll_entry();
                ret = lttng_poll_wait(&events, -1);
+               DBG("Channel poll return from wait with %d fd(s)",
+                               LTTNG_POLL_GETNB(&events));
+               health_poll_exit();
                DBG("Channel event catched in thread");
                if (ret < 0) {
                        if (errno == EINTR) {
                                ERR("Poll EINTR catched");
                                goto restart;
                        }
+                       if (LTTNG_POLL_GETNB(&events) == 0) {
+                               err = 0;        /* All is OK */
+                       }
                        goto end;
                }
 
@@ -2611,13 +2801,16 @@ restart:
 
                /* From here, the event is a channel wait fd */
                for (i = 0; i < nb_fd; i++) {
+                       health_code_update();
+
                        revents = LTTNG_POLL_GETEV(&events, i);
                        pollfd = LTTNG_POLL_GETFD(&events, i);
 
-                       /* Just don't waste time if no returned events for the fd */
                        if (!revents) {
+                               /* No activity for this FD (poll implementation). */
                                continue;
                        }
+
                        if (pollfd == ctx->consumer_channel_pipe[0]) {
                                if (revents & (LPOLLERR | LPOLLHUP)) {
                                        DBG("Channel thread pipe hung up");
@@ -2629,8 +2822,9 @@ restart:
                                        continue;
                                } else if (revents & LPOLLIN) {
                                        enum consumer_channel_action action;
+                                       uint64_t key;
 
-                                       ret = read_channel_pipe(ctx, &chan, &action);
+                                       ret = read_channel_pipe(ctx, &chan, &key, &action);
                                        if (ret <= 0) {
                                                ERR("Error reading channel pipe");
                                                continue;
@@ -2643,12 +2837,61 @@ restart:
 
                                                lttng_ht_node_init_u64(&chan->wait_fd_node,
                                                        chan->wait_fd);
+                                               rcu_read_lock();
                                                lttng_ht_add_unique_u64(channel_ht,
                                                                &chan->wait_fd_node);
+                                               rcu_read_unlock();
                                                /* Add channel to the global poll events list */
                                                lttng_poll_add(&events, chan->wait_fd,
                                                                LPOLLIN | LPOLLPRI);
                                                break;
+                                       case CONSUMER_CHANNEL_DEL:
+                                       {
+                                               /*
+                                                * This command should never be called if the channel
+                                                * has streams monitored by either the data or metadata
+                                                * thread. The consumer only notify this thread with a
+                                                * channel del. command if it receives a destroy
+                                                * channel command from the session daemon that send it
+                                                * if a command prior to the GET_CHANNEL failed.
+                                                */
+
+                                               rcu_read_lock();
+                                               chan = consumer_find_channel(key);
+                                               if (!chan) {
+                                                       rcu_read_unlock();
+                                                       ERR("UST consumer get channel key %" PRIu64 " not found for del channel", key);
+                                                       break;
+                                               }
+                                               lttng_poll_del(&events, chan->wait_fd);
+                                               iter.iter.node = &chan->wait_fd_node.node;
+                                               ret = lttng_ht_del(channel_ht, &iter);
+                                               assert(ret == 0);
+
+                                               switch (consumer_data.type) {
+                                               case LTTNG_CONSUMER_KERNEL:
+                                                       break;
+                                               case LTTNG_CONSUMER32_UST:
+                                               case LTTNG_CONSUMER64_UST:
+                                                       health_code_update();
+                                                       /* Destroy streams that might have been left in the stream list. */
+                                                       clean_channel_stream_list(chan);
+                                                       break;
+                                               default:
+                                                       ERR("Unknown consumer_data type");
+                                                       assert(0);
+                                               }
+
+                                               /*
+                                                * Release our own refcount. Force channel deletion even if
+                                                * streams were not initialized.
+                                                */
+                                               if (!uatomic_sub_return(&chan->refcount, 1)) {
+                                                       consumer_del_channel(chan);
+                                               }
+                                               rcu_read_unlock();
+                                               goto restart;
+                                       }
                                        case CONSUMER_CHANNEL_QUIT:
                                                /*
                                                 * Remove the pipe from the poll set and continue the loop
@@ -2685,7 +2928,19 @@ restart:
                                lttng_poll_del(&events, chan->wait_fd);
                                ret = lttng_ht_del(channel_ht, &iter);
                                assert(ret == 0);
+
+                               /*
+                                * This will close the wait fd for each stream associated to
+                                * this channel AND monitored by the data/metadata thread thus
+                                * will be clean by the right thread.
+                                */
                                consumer_close_channel_streams(chan);
+
+                               /* Release our own refcount */
+                               if (!uatomic_sub_return(&chan->refcount, 1)
+                                               && !uatomic_read(&chan->nb_init_stream_left)) {
+                                       consumer_del_channel(chan);
+                               }
                        }
 
                        /* Release RCU lock for the channel looked up */
@@ -2693,23 +2948,58 @@ restart:
                }
        }
 
+       /* All is OK */
+       err = 0;
 end:
        lttng_poll_clean(&events);
 end_poll:
        destroy_channel_ht(channel_ht);
 end_ht:
+error_testpoint:
        DBG("Channel poll thread exiting");
+       if (err) {
+               health_error();
+               ERR("Health error occurred in %s", __func__);
+       }
+       health_unregister(health_consumerd);
        rcu_unregister_thread();
        return NULL;
 }
 
+static int set_metadata_socket(struct lttng_consumer_local_data *ctx,
+               struct pollfd *sockpoll, int client_socket)
+{
+       int ret;
+
+       assert(ctx);
+       assert(sockpoll);
+
+       ret = lttng_consumer_poll_socket(sockpoll);
+       if (ret) {
+               goto error;
+       }
+       DBG("Metadata connection on client_socket");
+
+       /* Blocking call, waiting for transmission */
+       ctx->consumer_metadata_socket = lttcomm_accept_unix_sock(client_socket);
+       if (ctx->consumer_metadata_socket < 0) {
+               WARN("On accept metadata");
+               ret = -1;
+               goto error;
+       }
+       ret = 0;
+
+error:
+       return ret;
+}
+
 /*
  * This thread listens on the consumerd socket and receives the file
  * descriptors from the session daemon.
  */
 void *consumer_thread_sessiond_poll(void *data)
 {
-       int sock = -1, client_socket, ret;
+       int sock = -1, client_socket, ret, err = -1;
        /*
         * structure to poll for incoming data on communication socket avoids
         * making blocking sockets.
@@ -2719,6 +3009,14 @@ void *consumer_thread_sessiond_poll(void *data)
 
        rcu_register_thread();
 
+       health_register(health_consumerd, HEALTH_CONSUMERD_TYPE_SESSIOND);
+
+       if (testpoint(consumerd_thread_sessiond)) {
+               goto error_testpoint;
+       }
+
+       health_code_update();
+
        DBG("Creating command socket %s", ctx->consumer_command_sock_path);
        unlink(ctx->consumer_command_sock_path);
        client_socket = lttcomm_create_unix_sock(ctx->consumer_command_sock_path);
@@ -2740,19 +3038,18 @@ void *consumer_thread_sessiond_poll(void *data)
                goto end;
        }
 
-       ret = fcntl(client_socket, F_SETFL, O_NONBLOCK);
-       if (ret < 0) {
-               PERROR("fcntl O_NONBLOCK");
-               goto end;
-       }
-
        /* prepare the FDs to poll : to client socket and the should_quit pipe */
        consumer_sockpoll[0].fd = ctx->consumer_should_quit[0];
        consumer_sockpoll[0].events = POLLIN | POLLPRI;
        consumer_sockpoll[1].fd = client_socket;
        consumer_sockpoll[1].events = POLLIN | POLLPRI;
 
-       if (lttng_consumer_poll_socket(consumer_sockpoll) < 0) {
+       ret = lttng_consumer_poll_socket(consumer_sockpoll);
+       if (ret) {
+               if (ret > 0) {
+                       /* should exit */
+                       err = 0;
+               }
                goto end;
        }
        DBG("Connection on client_socket");
@@ -2763,9 +3060,17 @@ void *consumer_thread_sessiond_poll(void *data)
                WARN("On accept");
                goto end;
        }
-       ret = fcntl(sock, F_SETFL, O_NONBLOCK);
-       if (ret < 0) {
-               PERROR("fcntl O_NONBLOCK");
+
+       /*
+        * Setup metadata socket which is the second socket connection on the
+        * command unix socket.
+        */
+       ret = set_metadata_socket(ctx, consumer_sockpoll, client_socket);
+       if (ret) {
+               if (ret > 0) {
+                       /* should exit */
+                       err = 0;
+               }
                goto end;
        }
 
@@ -2781,29 +3086,39 @@ void *consumer_thread_sessiond_poll(void *data)
        consumer_sockpoll[1].events = POLLIN | POLLPRI;
 
        while (1) {
-               if (lttng_consumer_poll_socket(consumer_sockpoll) < 0) {
+               health_code_update();
+
+               health_poll_entry();
+               ret = lttng_consumer_poll_socket(consumer_sockpoll);
+               health_poll_exit();
+               if (ret) {
+                       if (ret > 0) {
+                               /* should exit */
+                               err = 0;
+                       }
                        goto end;
                }
                DBG("Incoming command on sock");
                ret = lttng_consumer_recv_cmd(ctx, sock, consumer_sockpoll);
-               if (ret == -ENOENT) {
-                       DBG("Received STOP command");
-                       goto end;
-               }
                if (ret <= 0) {
                        /*
                         * This could simply be a session daemon quitting. Don't output
                         * ERR() here.
                         */
                        DBG("Communication interrupted on command socket");
+                       err = 0;
                        goto end;
                }
                if (consumer_quit) {
                        DBG("consumer_thread_receive_fds received quit from signal");
+                       err = 0;        /* All is OK */
                        goto end;
                }
                DBG("received command on sock");
        }
+       /* All is OK */
+       err = 0;
+
 end:
        DBG("Consumer thread sessiond poll exiting");
 
@@ -2813,7 +3128,7 @@ end:
         *
         * NOTE: for now, this only applies to the UST tracer.
         */
-       lttng_consumer_close_metadata();
+       lttng_consumer_close_all_metadata();
 
        /*
         * when all fds have hung up, the polling thread
@@ -2825,9 +3140,11 @@ end:
         * Notify the data poll thread to poll back again and test the
         * consumer_quit state that we just set so to quit gracefully.
         */
-       notify_thread_pipe(ctx->consumer_data_pipe[1]);
+       notify_thread_lttng_pipe(ctx->consumer_data_pipe);
+
+       notify_channel_pipe(ctx, NULL, -1, CONSUMER_CHANNEL_QUIT);
 
-       notify_channel_pipe(ctx, NULL, CONSUMER_CHANNEL_QUIT);
+       notify_health_quit_pipe(health_quit_pipe);
 
        /* Cleaning up possibly open sockets. */
        if (sock >= 0) {
@@ -2837,12 +3154,19 @@ end:
                }
        }
        if (client_socket >= 0) {
-               ret = close(sock);
+               ret = close(client_socket);
                if (ret < 0) {
                        PERROR("close client_socket sessiond poll");
                }
        }
 
+error_testpoint:
+       if (err) {
+               health_error();
+               ERR("Health error occurred in %s", __func__);
+       }
+       health_unregister(health_consumerd);
+
        rcu_unregister_thread();
        return NULL;
 }
@@ -2853,6 +3177,9 @@ ssize_t lttng_consumer_read_subbuffer(struct lttng_consumer_stream *stream,
        ssize_t ret;
 
        pthread_mutex_lock(&stream->lock);
+       if (stream->metadata_flag) {
+               pthread_mutex_lock(&stream->metadata_rdv_lock);
+       }
 
        switch (consumer_data.type) {
        case LTTNG_CONSUMER_KERNEL:
@@ -2869,6 +3196,10 @@ ssize_t lttng_consumer_read_subbuffer(struct lttng_consumer_stream *stream,
                break;
        }
 
+       if (stream->metadata_flag) {
+               pthread_cond_broadcast(&stream->metadata_rdv);
+               pthread_mutex_unlock(&stream->metadata_rdv_lock);
+       }
        pthread_mutex_unlock(&stream->lock);
        return ret;
 }
@@ -2891,12 +3222,42 @@ int lttng_consumer_on_recv_stream(struct lttng_consumer_stream *stream)
 /*
  * Allocate and set consumer data hash tables.
  */
-void lttng_consumer_init(void)
+int lttng_consumer_init(void)
 {
        consumer_data.channel_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
+       if (!consumer_data.channel_ht) {
+               goto error;
+       }
+
        consumer_data.relayd_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
+       if (!consumer_data.relayd_ht) {
+               goto error;
+       }
+
        consumer_data.stream_list_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
+       if (!consumer_data.stream_list_ht) {
+               goto error;
+       }
+
        consumer_data.stream_per_chan_id_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
+       if (!consumer_data.stream_per_chan_id_ht) {
+               goto error;
+       }
+
+       data_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
+       if (!data_ht) {
+               goto error;
+       }
+
+       metadata_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
+       if (!metadata_ht) {
+               goto error;
+       }
+
+       return 0;
+
+error:
+       return -1;
 }
 
 /*
@@ -2905,57 +3266,82 @@ void lttng_consumer_init(void)
  * This will create a relayd socket pair and add it to the relayd hash table.
  * The caller MUST acquire a RCU read side lock before calling it.
  */
-int consumer_add_relayd_socket(int net_seq_idx, int sock_type,
+int consumer_add_relayd_socket(uint64_t net_seq_idx, int sock_type,
                struct lttng_consumer_local_data *ctx, int sock,
-               struct pollfd *consumer_sockpoll, struct lttcomm_sock *relayd_sock,
-               unsigned int sessiond_id)
+               struct pollfd *consumer_sockpoll,
+               struct lttcomm_relayd_sock *relayd_sock, uint64_t sessiond_id,
+               uint64_t relayd_session_id)
 {
        int fd = -1, ret = -1, relayd_created = 0;
-       enum lttng_error_code ret_code = LTTNG_OK;
-       struct consumer_relayd_sock_pair *relayd;
+       enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS;
+       struct consumer_relayd_sock_pair *relayd = NULL;
 
-       DBG("Consumer adding relayd socket (idx: %d)", net_seq_idx);
+       assert(ctx);
+       assert(relayd_sock);
 
-       /* First send a status message before receiving the fds. */
-       ret = consumer_send_status_msg(sock, ret_code);
-       if (ret < 0) {
-               /* Somehow, the session daemon is not responding anymore. */
-               goto error;
-       }
+       DBG("Consumer adding relayd socket (idx: %" PRIu64 ")", net_seq_idx);
 
        /* Get relayd reference if exists. */
        relayd = consumer_find_relayd(net_seq_idx);
        if (relayd == NULL) {
+               assert(sock_type == LTTNG_STREAM_CONTROL);
                /* Not found. Allocate one. */
                relayd = consumer_allocate_relayd_sock_pair(net_seq_idx);
                if (relayd == NULL) {
-                       lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
-                       ret = -1;
+                       ret = -ENOMEM;
+                       ret_code = LTTCOMM_CONSUMERD_ENOMEM;
                        goto error;
+               } else {
+                       relayd->sessiond_session_id = sessiond_id;
+                       relayd_created = 1;
                }
-               relayd->sessiond_session_id = (uint64_t) sessiond_id;
-               relayd_created = 1;
+
+               /*
+                * This code path MUST continue to the consumer send status message to
+                * we can notify the session daemon and continue our work without
+                * killing everything.
+                */
+       } else {
+               /*
+                * relayd key should never be found for control socket.
+                */
+               assert(sock_type != LTTNG_STREAM_CONTROL);
+       }
+
+       /* First send a status message before receiving the fds. */
+       ret = consumer_send_status_msg(sock, LTTCOMM_CONSUMERD_SUCCESS);
+       if (ret < 0) {
+               /* Somehow, the session daemon is not responding anymore. */
+               lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_FATAL);
+               goto error_nosignal;
        }
 
        /* Poll on consumer socket. */
-       if (lttng_consumer_poll_socket(consumer_sockpoll) < 0) {
+       ret = lttng_consumer_poll_socket(consumer_sockpoll);
+       if (ret) {
+               /* Needing to exit in the middle of a command: error. */
+               lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_POLL_ERROR);
                ret = -EINTR;
-               goto error;
+               goto error_nosignal;
        }
 
        /* Get relayd socket from session daemon */
        ret = lttcomm_recv_fds_unix_sock(sock, &fd, 1);
        if (ret != sizeof(fd)) {
-               lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_FD);
                ret = -1;
                fd = -1;        /* Just in case it gets set with an invalid value. */
-               goto error_close;
-       }
 
-       /* We have the fds without error. Send status back. */
-       ret = consumer_send_status_msg(sock, ret_code);
-       if (ret < 0) {
-               /* Somehow, the session daemon is not responding anymore. */
+               /*
+                * Failing to receive FDs might indicate a major problem such as
+                * reaching a fd limit during the receive where the kernel returns a
+                * MSG_CTRUNC and fails to cleanup the fd in the queue. Any case, we
+                * don't take any chances and stop everything.
+                *
+                * XXX: Feature request #558 will fix that and avoid this possible
+                * issue when reaching the fd limit.
+                */
+               lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_FD);
+               ret_code = LTTCOMM_CONSUMERD_ERROR_RECV_FD;
                goto error;
        }
 
@@ -2963,67 +3349,61 @@ int consumer_add_relayd_socket(int net_seq_idx, int sock_type,
        switch (sock_type) {
        case LTTNG_STREAM_CONTROL:
                /* Copy received lttcomm socket */
-               lttcomm_copy_sock(&relayd->control_sock, relayd_sock);
-               ret = lttcomm_create_sock(&relayd->control_sock);
-               /* Immediately try to close the created socket if valid. */
-               if (relayd->control_sock.fd >= 0) {
-                       if (close(relayd->control_sock.fd)) {
-                               PERROR("close relayd control socket");
-                       }
-               }
+               lttcomm_copy_sock(&relayd->control_sock.sock, &relayd_sock->sock);
+               ret = lttcomm_create_sock(&relayd->control_sock.sock);
                /* Handle create_sock error. */
                if (ret < 0) {
+                       ret_code = LTTCOMM_CONSUMERD_ENOMEM;
                        goto error;
                }
-
-               /* Assign new file descriptor */
-               relayd->control_sock.fd = fd;
-
                /*
-                * Create a session on the relayd and store the returned id. Lock the
-                * control socket mutex if the relayd was NOT created before.
+                * Close the socket created internally by
+                * lttcomm_create_sock, so we can replace it by the one
+                * received from sessiond.
                 */
-               if (!relayd_created) {
-                       pthread_mutex_lock(&relayd->ctrl_sock_mutex);
-               }
-               ret = relayd_create_session(&relayd->control_sock,
-                               &relayd->relayd_session_id);
-               if (!relayd_created) {
-                       pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
-               }
-               if (ret < 0) {
-                       /*
-                        * Close all sockets of a relayd object. It will be freed if it was
-                        * created at the error code path or else it will be garbage
-                        * collect.
-                        */
-                       (void) relayd_close(&relayd->control_sock);
-                       (void) relayd_close(&relayd->data_sock);
-                       goto error;
+               if (close(relayd->control_sock.sock.fd)) {
+                       PERROR("close");
                }
 
+               /* Assign new file descriptor */
+               relayd->control_sock.sock.fd = fd;
+               fd = -1;        /* For error path */
+               /* Assign version values. */
+               relayd->control_sock.major = relayd_sock->major;
+               relayd->control_sock.minor = relayd_sock->minor;
+
+               relayd->relayd_session_id = relayd_session_id;
+
                break;
        case LTTNG_STREAM_DATA:
                /* Copy received lttcomm socket */
-               lttcomm_copy_sock(&relayd->data_sock, relayd_sock);
-               ret = lttcomm_create_sock(&relayd->data_sock);
-               /* Immediately try to close the created socket if valid. */
-               if (relayd->data_sock.fd >= 0) {
-                       if (close(relayd->data_sock.fd)) {
-                               PERROR("close relayd data socket");
-                       }
-               }
+               lttcomm_copy_sock(&relayd->data_sock.sock, &relayd_sock->sock);
+               ret = lttcomm_create_sock(&relayd->data_sock.sock);
                /* Handle create_sock error. */
                if (ret < 0) {
+                       ret_code = LTTCOMM_CONSUMERD_ENOMEM;
                        goto error;
                }
+               /*
+                * Close the socket created internally by
+                * lttcomm_create_sock, so we can replace it by the one
+                * received from sessiond.
+                */
+               if (close(relayd->data_sock.sock.fd)) {
+                       PERROR("close");
+               }
 
                /* Assign new file descriptor */
-               relayd->data_sock.fd = fd;
+               relayd->data_sock.sock.fd = fd;
+               fd = -1;        /* for eventual error paths */
+               /* Assign version values. */
+               relayd->data_sock.major = relayd_sock->major;
+               relayd->data_sock.minor = relayd_sock->minor;
                break;
        default:
                ERR("Unknown relayd socket type (%d)", sock_type);
                ret = -1;
+               ret_code = LTTCOMM_CONSUMERD_FATAL;
                goto error;
        }
 
@@ -3031,6 +3411,14 @@ int consumer_add_relayd_socket(int net_seq_idx, int sock_type,
                        sock_type == LTTNG_STREAM_CONTROL ? "control" : "data",
                        relayd->net_seq_idx, fd);
 
+       /* We successfully added the socket. Send status back. */
+       ret = consumer_send_status_msg(sock, ret_code);
+       if (ret < 0) {
+               /* Somehow, the session daemon is not responding anymore. */
+               lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_FATAL);
+               goto error_nosignal;
+       }
+
        /*
         * Add relayd socket pair to consumer data hashtable. If object already
         * exists or on error, the function gracefully returns.
@@ -3041,6 +3429,11 @@ int consumer_add_relayd_socket(int net_seq_idx, int sock_type,
        return 0;
 
 error:
+       if (consumer_send_status_msg(sock, ret_code) < 0) {
+               lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_FATAL);
+       }
+
+error_nosignal:
        /* Close received socket if valid. */
        if (fd >= 0) {
                if (close(fd)) {
@@ -3048,7 +3441,6 @@ error:
                }
        }
 
-error_close:
        if (relayd_created) {
                free(relayd);
        }
@@ -3253,6 +3645,7 @@ int consumer_send_status_msg(int sock, int ret_code)
 {
        struct lttcomm_consumer_status_msg msg;
 
+       memset(&msg, 0, sizeof(msg));
        msg.ret_code = ret_code;
 
        return lttcomm_send_unix_sock(sock, &msg, sizeof(msg));
@@ -3270,13 +3663,31 @@ int consumer_send_status_channel(int sock,
 
        assert(sock >= 0);
 
+       memset(&msg, 0, sizeof(msg));
        if (!channel) {
-               msg.ret_code = -LTTNG_ERR_UST_CHAN_FAIL;
+               msg.ret_code = LTTCOMM_CONSUMERD_CHANNEL_FAIL;
        } else {
-               msg.ret_code = LTTNG_OK;
+               msg.ret_code = LTTCOMM_CONSUMERD_SUCCESS;
                msg.key = channel->key;
                msg.stream_count = channel->streams.count;
        }
 
        return lttcomm_send_unix_sock(sock, &msg, sizeof(msg));
 }
+
+unsigned long consumer_get_consume_start_pos(unsigned long consumed_pos,
+               unsigned long produced_pos, uint64_t nb_packets_per_stream,
+               uint64_t max_sb_size)
+{
+       unsigned long start_pos;
+
+       if (!nb_packets_per_stream) {
+               return consumed_pos;    /* Grab everything */
+       }
+       start_pos = produced_pos - offset_align_floor(produced_pos, max_sb_size);
+       start_pos -= max_sb_size * nb_packets_per_stream;
+       if ((long) (start_pos - consumed_pos) < 0) {
+               return consumed_pos;    /* Grab everything */
+       }
+       return start_pos;
+}
This page took 0.089725 seconds and 4 git commands to generate.