Fix: relayd and sessiond: dispatch thread exit is shared across threads
[lttng-tools.git] / src / bin / lttng-sessiond / main.c
index 5cfd57971f7423fe21f69c1d58b2cfafca99a05b..5664b0be1e86c46e4c2d4f4b1e42250fb30cee8e 100644 (file)
 #include <common/compat/socket.h>
 #include <common/defaults.h>
 #include <common/kernel-consumer/kernel-consumer.h>
-#include <common/ust-consumer/ust-consumer.h>
 #include <common/futex.h>
+#include <common/relayd/relayd.h>
+#include <common/utils.h>
 
 #include "lttng-sessiond.h"
 #include "channel.h"
+#include "consumer.h"
 #include "context.h"
 #include "event.h"
 #include "kernel.h"
+#include "kernel-consumer.h"
 #include "modprobe.h"
 #include "shm.h"
 #include "ust-ctl.h"
+#include "ust-consumer.h"
 #include "utils.h"
 #include "fd-limit.h"
+#include "filter.h"
+#include "health.h"
 
 #define CONSUMERD_FILE "lttng-consumerd"
 
-struct consumer_data {
-       enum lttng_consumer_type type;
-
-       pthread_t thread;       /* Worker thread interacting with the consumer */
-       sem_t sem;
-
-       /* Mutex to control consumerd pid assignation */
-       pthread_mutex_t pid_mutex;
-       pid_t pid;
-
-       int err_sock;
-       int cmd_sock;
-
-       /* consumer error and command Unix socket path */
-       char err_unix_sock_path[PATH_MAX];
-       char cmd_unix_sock_path[PATH_MAX];
-};
-
 /* Const values */
 const char default_home_dir[] = DEFAULT_HOME_DIR;
 const char default_tracing_group[] = DEFAULT_TRACING_GROUP;
@@ -115,6 +103,7 @@ static struct consumer_data ustconsumer32_data = {
        .cmd_sock = -1,
 };
 
+/* Shared between threads */
 static int dispatch_thread_exit;
 
 /* Global application Unix socket path */
@@ -123,6 +112,8 @@ static char apps_unix_sock_path[PATH_MAX];
 static char client_unix_sock_path[PATH_MAX];
 /* global wait shm path for UST */
 static char wait_shm_path[PATH_MAX];
+/* Global health check unix path */
+static char health_unix_sock_path[PATH_MAX];
 
 /* Sockets and FDs */
 static int client_sock = -1;
@@ -148,7 +139,7 @@ static pthread_t reg_apps_thread;
 static pthread_t client_thread;
 static pthread_t kernel_thread;
 static pthread_t dispatch_thread;
-
+static pthread_t health_thread;
 
 /*
  * UST registration command queue. This queue is tied with a futex and uses a N
@@ -213,6 +204,21 @@ enum consumerd_state {
 static enum consumerd_state ust_consumerd_state;
 static enum consumerd_state kernel_consumerd_state;
 
+/*
+ * Used to keep a unique index for each relayd socket created where this value
+ * is associated with streams on the consumer so it can match the right relayd
+ * to send to.
+ *
+ * This value should be incremented atomically for safety purposes and future
+ * possible concurrent access.
+ */
+static unsigned int relayd_net_seq_idx;
+
+/* Used for the health monitoring of the session daemon. See health.h */
+struct health_state health_thread_cmd;
+struct health_state health_thread_app_reg;
+struct health_state health_thread_kernel;
+
 static
 void setup_consumerd_path(void)
 {
@@ -415,7 +421,7 @@ static void stop_threads(void)
        }
 
        /* Dispatch thread */
-       dispatch_thread_exit = 1;
+       CMM_STORE_SHARED(dispatch_thread_exit, 1);
        futex_nto1_wake(&ust_cmd_queue.futex);
 }
 
@@ -424,7 +430,7 @@ static void stop_threads(void)
  */
 static void cleanup(void)
 {
-       int ret, i;
+       int ret;
        char *cmd;
        struct ltt_session *sess, *stmp;
 
@@ -474,34 +480,9 @@ static void cleanup(void)
                DBG("Unloading kernel modules");
                modprobe_remove_lttng_all();
        }
-
-       /*
-        * Closing all pipes used for communication between threads.
-        */
-       for (i = 0; i < 2; i++) {
-               if (kernel_poll_pipe[i] >= 0) {
-                       ret = close(kernel_poll_pipe[i]);
-                       if (ret) {
-                               PERROR("close");
-                       }
-               }
-       }
-       for (i = 0; i < 2; i++) {
-               if (thread_quit_pipe[i] >= 0) {
-                       ret = close(thread_quit_pipe[i]);
-                       if (ret) {
-                               PERROR("close");
-                       }
-               }
-       }
-       for (i = 0; i < 2; i++) {
-               if (apps_cmd_pipe[i] >= 0) {
-                       ret = close(apps_cmd_pipe[i]);
-                       if (ret) {
-                               PERROR("close");
-                       }
-               }
-       }
+       utils_close_pipe(kernel_poll_pipe);
+       utils_close_pipe(thread_quit_pipe);
+       utils_close_pipe(apps_cmd_pipe);
 
        /* <fun> */
        DBG("%c[%d;%dm*** assert failed :-) *** ==> %c[%dm%c[%d;%dm"
@@ -543,139 +524,6 @@ static void clean_command_ctx(struct command_ctx **cmd_ctx)
        }
 }
 
-/*
- * Send all stream fds of kernel channel to the consumer.
- */
-static int send_kconsumer_channel_streams(struct consumer_data *consumer_data,
-               int sock, struct ltt_kernel_channel *channel,
-               uid_t uid, gid_t gid)
-{
-       int ret;
-       struct ltt_kernel_stream *stream;
-       struct lttcomm_consumer_msg lkm;
-
-       DBG("Sending streams of channel %s to kernel consumer",
-                       channel->channel->name);
-
-       /* Send channel */
-       lkm.cmd_type = LTTNG_CONSUMER_ADD_CHANNEL;
-       lkm.u.channel.channel_key = channel->fd;
-       lkm.u.channel.max_sb_size = channel->channel->attr.subbuf_size;
-       lkm.u.channel.mmap_len = 0;     /* for kernel */
-       DBG("Sending channel %d to consumer", lkm.u.channel.channel_key);
-       ret = lttcomm_send_unix_sock(sock, &lkm, sizeof(lkm));
-       if (ret < 0) {
-               PERROR("send consumer channel");
-               goto error;
-       }
-
-       /* Send streams */
-       cds_list_for_each_entry(stream, &channel->stream_list.head, list) {
-               if (!stream->fd) {
-                       continue;
-               }
-               lkm.cmd_type = LTTNG_CONSUMER_ADD_STREAM;
-               lkm.u.stream.channel_key = channel->fd;
-               lkm.u.stream.stream_key = stream->fd;
-               lkm.u.stream.state = stream->state;
-               lkm.u.stream.output = channel->channel->attr.output;
-               lkm.u.stream.mmap_len = 0;      /* for kernel */
-               lkm.u.stream.uid = uid;
-               lkm.u.stream.gid = gid;
-               strncpy(lkm.u.stream.path_name, stream->pathname, PATH_MAX - 1);
-               lkm.u.stream.path_name[PATH_MAX - 1] = '\0';
-               DBG("Sending stream %d to consumer", lkm.u.stream.stream_key);
-               ret = lttcomm_send_unix_sock(sock, &lkm, sizeof(lkm));
-               if (ret < 0) {
-                       PERROR("send consumer stream");
-                       goto error;
-               }
-               ret = lttcomm_send_fds_unix_sock(sock, &stream->fd, 1);
-               if (ret < 0) {
-                       PERROR("send consumer stream ancillary data");
-                       goto error;
-               }
-       }
-
-       DBG("consumer channel streams sent");
-
-       return 0;
-
-error:
-       return ret;
-}
-
-/*
- * Send all stream fds of the kernel session to the consumer.
- */
-static int send_kconsumer_session_streams(struct consumer_data *consumer_data,
-               struct ltt_kernel_session *session)
-{
-       int ret;
-       struct ltt_kernel_channel *chan;
-       struct lttcomm_consumer_msg lkm;
-       int sock = session->consumer_fd;
-
-       DBG("Sending metadata stream fd");
-
-       /* Extra protection. It's NOT supposed to be set to -1 at this point */
-       if (session->consumer_fd < 0) {
-               session->consumer_fd = consumer_data->cmd_sock;
-       }
-
-       if (session->metadata_stream_fd >= 0) {
-               /* Send metadata channel fd */
-               lkm.cmd_type = LTTNG_CONSUMER_ADD_CHANNEL;
-               lkm.u.channel.channel_key = session->metadata->fd;
-               lkm.u.channel.max_sb_size = session->metadata->conf->attr.subbuf_size;
-               lkm.u.channel.mmap_len = 0;     /* for kernel */
-               DBG("Sending metadata channel %d to consumer", lkm.u.channel.channel_key);
-               ret = lttcomm_send_unix_sock(sock, &lkm, sizeof(lkm));
-               if (ret < 0) {
-                       PERROR("send consumer channel");
-                       goto error;
-               }
-
-               /* Send metadata stream fd */
-               lkm.cmd_type = LTTNG_CONSUMER_ADD_STREAM;
-               lkm.u.stream.channel_key = session->metadata->fd;
-               lkm.u.stream.stream_key = session->metadata_stream_fd;
-               lkm.u.stream.state = LTTNG_CONSUMER_ACTIVE_STREAM;
-               lkm.u.stream.output = DEFAULT_KERNEL_CHANNEL_OUTPUT;
-               lkm.u.stream.mmap_len = 0;      /* for kernel */
-               lkm.u.stream.uid = session->uid;
-               lkm.u.stream.gid = session->gid;
-               strncpy(lkm.u.stream.path_name, session->metadata->pathname, PATH_MAX - 1);
-               lkm.u.stream.path_name[PATH_MAX - 1] = '\0';
-               DBG("Sending metadata stream %d to consumer", lkm.u.stream.stream_key);
-               ret = lttcomm_send_unix_sock(sock, &lkm, sizeof(lkm));
-               if (ret < 0) {
-                       PERROR("send consumer stream");
-                       goto error;
-               }
-               ret = lttcomm_send_fds_unix_sock(sock, &session->metadata_stream_fd, 1);
-               if (ret < 0) {
-                       PERROR("send consumer stream");
-                       goto error;
-               }
-       }
-
-       cds_list_for_each_entry(chan, &session->channel_list.head, list) {
-               ret = send_kconsumer_channel_streams(consumer_data, sock, chan,
-                               session->uid, session->gid);
-               if (ret < 0) {
-                       goto error;
-               }
-       }
-
-       DBG("consumer fds (metadata and channel streams) sent");
-
-       return 0;
-
-error:
-       return ret;
-}
-
 /*
  * Notify UST applications using the shm mmap futex.
  */
@@ -816,10 +664,11 @@ static int update_kernel_stream(struct consumer_data *consumer_data, int fd)
                                 * that tracing is started so it is safe to send our updated
                                 * stream fds.
                                 */
-                               if (session->kernel_session->consumer_fds_sent == 1) {
-                                       ret = send_kconsumer_channel_streams(consumer_data,
+                               if (session->kernel_session->consumer_fds_sent == 1 &&
+                                               session->kernel_session->consumer != NULL) {
+                                       ret = kernel_consumer_send_channel_stream(
                                                        session->kernel_session->consumer_fd, channel,
-                                                       session->uid, session->gid);
+                                                       session->kernel_session);
                                        if (ret < 0) {
                                                goto error;
                                        }
@@ -874,6 +723,8 @@ static void *thread_manage_kernel(void *data)
 
        DBG("Thread manage kernel started");
 
+       health_code_update(&health_thread_kernel);
+
        ret = create_thread_poll_set(&events, 2);
        if (ret < 0) {
                goto error_poll_create;
@@ -885,6 +736,8 @@ static void *thread_manage_kernel(void *data)
        }
 
        while (1) {
+               health_code_update(&health_thread_kernel);
+
                if (update_poll_flag == 1) {
                        /*
                         * Reset number of fd in the poll set. Always 2 since there is the thread
@@ -908,7 +761,9 @@ static void *thread_manage_kernel(void *data)
 
                /* Poll infinite value of time */
        restart:
+               health_poll_update(&health_thread_kernel);
                ret = lttng_poll_wait(&events, -1);
+               health_poll_update(&health_thread_kernel);
                if (ret < 0) {
                        /*
                         * Restart interrupted system call.
@@ -929,6 +784,8 @@ static void *thread_manage_kernel(void *data)
                        revents = LTTNG_POLL_GETEV(&events, i);
                        pollfd = LTTNG_POLL_GETFD(&events, i);
 
+                       health_code_update(&health_thread_kernel);
+
                        /* Thread quit pipe has been closed. Killing thread. */
                        ret = check_thread_quit_pipe(pollfd, revents);
                        if (ret) {
@@ -963,6 +820,7 @@ static void *thread_manage_kernel(void *data)
 error:
        lttng_poll_clean(&events);
 error_poll_create:
+       health_reset(&health_thread_kernel);
        DBG("Kernel thread dying");
        return NULL;
 }
@@ -980,6 +838,8 @@ static void *thread_manage_consumer(void *data)
 
        DBG("[thread] Manage consumer started");
 
+       health_code_update(&consumer_data->health);
+
        ret = lttcomm_listen_unix_sock(consumer_data->err_sock);
        if (ret < 0) {
                goto error_listen;
@@ -1001,9 +861,13 @@ static void *thread_manage_consumer(void *data)
 
        nb_fd = LTTNG_POLL_GETNB(&events);
 
+       health_code_update(&consumer_data->health);
+
        /* Inifinite blocking call, waiting for transmission */
 restart:
+       health_poll_update(&consumer_data->health);
        ret = lttng_poll_wait(&events, -1);
+       health_poll_update(&consumer_data->health);
        if (ret < 0) {
                /*
                 * Restart interrupted system call.
@@ -1019,6 +883,8 @@ restart:
                revents = LTTNG_POLL_GETEV(&events, i);
                pollfd = LTTNG_POLL_GETFD(&events, i);
 
+               health_code_update(&consumer_data->health);
+
                /* Thread quit pipe has been closed. Killing thread. */
                ret = check_thread_quit_pipe(pollfd, revents);
                if (ret) {
@@ -1039,6 +905,8 @@ restart:
                goto error;
        }
 
+       health_code_update(&consumer_data->health);
+
        DBG2("Receiving code from consumer err_sock");
 
        /* Getting status code from kconsumerd */
@@ -1048,6 +916,8 @@ restart:
                goto error;
        }
 
+       health_code_update(&consumer_data->health);
+
        if (code == CONSUMERD_COMMAND_SOCK_READY) {
                consumer_data->cmd_sock =
                        lttcomm_connect_unix_sock(consumer_data->cmd_unix_sock_path);
@@ -1076,12 +946,16 @@ restart:
                goto error;
        }
 
+       health_code_update(&consumer_data->health);
+
        /* Update number of fd */
        nb_fd = LTTNG_POLL_GETNB(&events);
 
        /* Inifinite blocking call, waiting for transmission */
 restart_poll:
+       health_poll_update(&consumer_data->health);
        ret = lttng_poll_wait(&events, -1);
+       health_poll_update(&consumer_data->health);
        if (ret < 0) {
                /*
                 * Restart interrupted system call.
@@ -1097,6 +971,8 @@ restart_poll:
                revents = LTTNG_POLL_GETEV(&events, i);
                pollfd = LTTNG_POLL_GETFD(&events, i);
 
+               health_code_update(&consumer_data->health);
+
                /* Thread quit pipe has been closed. Killing thread. */
                ret = check_thread_quit_pipe(pollfd, revents);
                if (ret) {
@@ -1112,6 +988,8 @@ restart_poll:
                }
        }
 
+       health_code_update(&consumer_data->health);
+
        /* Wait for any kconsumerd error */
        ret = lttcomm_recv_unix_sock(sock, &code,
                        sizeof(enum lttcomm_return_code));
@@ -1160,6 +1038,7 @@ error:
        lttng_poll_clean(&events);
 error_poll:
 error_listen:
+       health_reset(&consumer_data->health);
        DBG("consumer thread cleanup completed");
 
        return NULL;
@@ -1180,6 +1059,8 @@ static void *thread_manage_apps(void *data)
        rcu_register_thread();
        rcu_thread_online();
 
+       health_code_update(&health_thread_app_reg);
+
        ret = create_thread_poll_set(&events, 2);
        if (ret < 0) {
                goto error_poll_create;
@@ -1190,6 +1071,8 @@ static void *thread_manage_apps(void *data)
                goto error;
        }
 
+       health_code_update(&health_thread_app_reg);
+
        while (1) {
                /* Zeroed the events structure */
                lttng_poll_reset(&events);
@@ -1200,7 +1083,9 @@ static void *thread_manage_apps(void *data)
 
                /* Inifinite blocking call, waiting for transmission */
        restart:
+               health_poll_update(&health_thread_app_reg);
                ret = lttng_poll_wait(&events, -1);
+               health_poll_update(&health_thread_app_reg);
                if (ret < 0) {
                        /*
                         * Restart interrupted system call.
@@ -1216,6 +1101,8 @@ static void *thread_manage_apps(void *data)
                        revents = LTTNG_POLL_GETEV(&events, i);
                        pollfd = LTTNG_POLL_GETFD(&events, i);
 
+                       health_code_update(&health_thread_app_reg);
+
                        /* Thread quit pipe has been closed. Killing thread. */
                        ret = check_thread_quit_pipe(pollfd, revents);
                        if (ret) {
@@ -1235,6 +1122,8 @@ static void *thread_manage_apps(void *data)
                                                goto error;
                                        }
 
+                                       health_code_update(&health_thread_app_reg);
+
                                        /* Register applicaton to the session daemon */
                                        ret = ust_app_register(&ust_cmd.reg_msg,
                                                        ust_cmd.sock);
@@ -1244,6 +1133,8 @@ static void *thread_manage_apps(void *data)
                                                break;
                                        }
 
+                                       health_code_update(&health_thread_app_reg);
+
                                        /*
                                         * Validate UST version compatibility.
                                         */
@@ -1256,6 +1147,8 @@ static void *thread_manage_apps(void *data)
                                                update_ust_app(ust_cmd.sock);
                                        }
 
+                                       health_code_update(&health_thread_app_reg);
+
                                        ret = ust_app_register_done(ust_cmd.sock);
                                        if (ret < 0) {
                                                /*
@@ -1279,6 +1172,8 @@ static void *thread_manage_apps(void *data)
                                                                ust_cmd.sock);
                                        }
 
+                                       health_code_update(&health_thread_app_reg);
+
                                        break;
                                }
                        } else {
@@ -1298,12 +1193,15 @@ static void *thread_manage_apps(void *data)
                                        break;
                                }
                        }
+
+                       health_code_update(&health_thread_app_reg);
                }
        }
 
 error:
        lttng_poll_clean(&events);
 error_poll_create:
+       health_reset(&health_thread_app_reg);
        DBG("Application communication apps thread cleanup complete");
        rcu_thread_offline();
        rcu_unregister_thread();
@@ -1322,7 +1220,7 @@ static void *thread_dispatch_ust_registration(void *data)
 
        DBG("[thread] Dispatch UST command started");
 
-       while (!dispatch_thread_exit) {
+       while (!CMM_LOAD_SHARED(dispatch_thread_exit)) {
                /* Atomically prepare the queue futex */
                futex_nto1_prepare(&ust_cmd_queue.futex);
 
@@ -1843,6 +1741,23 @@ error:
        return ret;
 }
 
+/*
+ * Compute health status of each consumer.
+ */
+static int check_consumer_health(void)
+{
+       int ret;
+
+       ret =
+               health_check_state(&kconsumer_data.health) &
+               health_check_state(&ustconsumer32_data.health) &
+               health_check_state(&ustconsumer64_data.health);
+
+       DBG3("Health consumer check %d", ret);
+
+       return ret;
+}
+
 /*
  * Check version of the lttng-modules.
  */
@@ -1921,7 +1836,7 @@ static int init_kernel_tracing(struct ltt_kernel_session *session)
 {
        int ret = 0;
 
-       if (session->consumer_fds_sent == 0) {
+       if (session->consumer_fds_sent == 0 && session->consumer != NULL) {
                /*
                 * Assign default kernel consumer socket if no consumer assigned to the
                 * kernel session. At this point, it's NOT supposed to be -1 but this is
@@ -1931,13 +1846,11 @@ static int init_kernel_tracing(struct ltt_kernel_session *session)
                        session->consumer_fd = kconsumer_data.cmd_sock;
                }
 
-               ret = send_kconsumer_session_streams(&kconsumer_data, session);
+               ret = kernel_consumer_send_session(session->consumer_fd, session);
                if (ret < 0) {
                        ret = LTTCOMM_KERN_CONSUMER_FAIL;
                        goto error;
                }
-
-               session->consumer_fds_sent = 1;
        }
 
 error:
@@ -1945,146 +1858,422 @@ error:
 }
 
 /*
- * Create an UST session and add it to the session ust list.
+ * Create a socket to the relayd using the URI.
+ *
+ * On success, the relayd_sock pointer is set to the created socket.
+ * Else, it is untouched and an lttcomm error code is returned.
  */
-static int create_ust_session(struct ltt_session *session,
-               struct lttng_domain *domain)
+static int create_connect_relayd(struct consumer_output *output,
+               const char *session_name, struct lttng_uri *uri,
+               struct lttcomm_sock **relayd_sock)
 {
-       struct ltt_ust_session *lus = NULL;
        int ret;
+       struct lttcomm_sock *sock;
 
-       switch (domain->type) {
-       case LTTNG_DOMAIN_UST:
-               break;
-       default:
-               ret = LTTCOMM_UNKNOWN_DOMAIN;
+       /* Create socket object from URI */
+       sock = lttcomm_alloc_sock_from_uri(uri);
+       if (sock == NULL) {
+               ret = LTTCOMM_FATAL;
                goto error;
        }
 
-       DBG("Creating UST session");
-
-       lus = trace_ust_create_session(session->path, session->id, domain);
-       if (lus == NULL) {
-               ret = LTTCOMM_UST_SESS_FAIL;
+       ret = lttcomm_create_sock(sock);
+       if (ret < 0) {
+               ret = LTTCOMM_FATAL;
                goto error;
        }
 
-       ret = run_as_mkdir_recursive(lus->pathname, S_IRWXU | S_IRWXG,
-                       session->uid, session->gid);
+       /* Connect to relayd so we can proceed with a session creation. */
+       ret = relayd_connect(sock);
        if (ret < 0) {
-               if (ret != -EEXIST) {
-                       ERR("Trace directory creation error");
-                       ret = LTTCOMM_UST_SESS_FAIL;
-                       goto error;
-               }
+               ERR("Unable to reach lttng-relayd");
+               ret = LTTCOMM_RELAYD_SESSION_FAIL;
+               goto free_sock;
        }
 
-       /* The domain type dictate different actions on session creation */
-       switch (domain->type) {
-       case LTTNG_DOMAIN_UST:
-               /* No ustctl for the global UST domain */
-               break;
-       default:
-               ERR("Unknown UST domain on create session %d", domain->type);
-               goto error;
+       /* Create socket for control stream. */
+       if (uri->stype == LTTNG_STREAM_CONTROL) {
+               DBG3("Creating relayd stream socket from URI");
+
+               /* Check relayd version */
+               ret = relayd_version_check(sock, LTTNG_UST_COMM_MAJOR, 0);
+               if (ret < 0) {
+                       ret = LTTCOMM_RELAYD_VERSION_FAIL;
+                       goto close_sock;
+               }
+       } else if (uri->stype == LTTNG_STREAM_DATA) {
+               DBG3("Creating relayd data socket from URI");
+       } else {
+               /* Command is not valid */
+               ERR("Relayd invalid stream type: %d", uri->stype);
+               ret = LTTCOMM_INVALID;
+               goto close_sock;
        }
-       lus->uid = session->uid;
-       lus->gid = session->gid;
-       session->ust_session = lus;
+
+       *relayd_sock = sock;
 
        return LTTCOMM_OK;
 
+close_sock:
+       if (sock) {
+               (void) relayd_close(sock);
+       }
+free_sock:
+       if (sock) {
+               lttcomm_destroy_sock(sock);
+       }
 error:
-       free(lus);
        return ret;
 }
 
 /*
- * Create a kernel tracer session then create the default channel.
+ * Connect to the relayd using URI and send the socket to the right consumer.
  */
-static int create_kernel_session(struct ltt_session *session)
+static int send_socket_relayd_consumer(int domain, struct ltt_session *session,
+               struct lttng_uri *relayd_uri, struct consumer_output *consumer,
+               int consumer_fd)
 {
        int ret;
+       struct lttcomm_sock *sock = NULL;
 
-       DBG("Creating kernel session");
+       /* Set the network sequence index if not set. */
+       if (consumer->net_seq_index == -1) {
+               /*
+                * Increment net_seq_idx because we are about to transfer the
+                * new relayd socket to the consumer.
+                */
+               uatomic_inc(&relayd_net_seq_idx);
+               /* Assign unique key so the consumer can match streams */
+               consumer->net_seq_index = uatomic_read(&relayd_net_seq_idx);
+       }
 
-       ret = kernel_create_session(session, kernel_tracer_fd);
-       if (ret < 0) {
-               ret = LTTCOMM_KERN_SESS_FAIL;
-               goto error;
+       /* Connect to relayd and make version check if uri is the control. */
+       ret = create_connect_relayd(consumer, session->name, relayd_uri, &sock);
+       if (ret != LTTCOMM_OK) {
+               goto close_sock;
        }
 
-       /* Set kernel consumer socket fd */
-       if (kconsumer_data.cmd_sock >= 0) {
-               session->kernel_session->consumer_fd = kconsumer_data.cmd_sock;
+       /* If the control socket is connected, network session is ready */
+       if (relayd_uri->stype == LTTNG_STREAM_CONTROL) {
+               session->net_handle = 1;
        }
 
-       ret = run_as_mkdir_recursive(session->kernel_session->trace_path,
-                       S_IRWXU | S_IRWXG, session->uid, session->gid);
+       /* Send relayd socket to consumer. */
+       ret = consumer_send_relayd_socket(consumer_fd, sock,
+                       consumer, relayd_uri->stype);
        if (ret < 0) {
-               if (ret != -EEXIST) {
-                       ERR("Trace directory creation error");
-                       goto error;
-               }
+               ret = LTTCOMM_ENABLE_CONSUMER_FAIL;
+               goto close_sock;
+       }
+
+       ret = LTTCOMM_OK;
+
+       /*
+        * Close socket which was dup on the consumer side. The session daemon does
+        * NOT keep track of the relayd socket(s) once transfer to the consumer.
+        */
+
+close_sock:
+       if (sock) {
+               (void) relayd_close(sock);
+               lttcomm_destroy_sock(sock);
        }
-       session->kernel_session->uid = session->uid;
-       session->kernel_session->gid = session->gid;
 
-error:
        return ret;
 }
 
 /*
- * Check if the UID or GID match the session. Root user has access to all
- * sessions.
+ * Send both relayd sockets to a specific consumer and domain.  This is a
+ * helper function to facilitate sending the information to the consumer for a
+ * session.
  */
-static int session_access_ok(struct ltt_session *session, uid_t uid, gid_t gid)
+static int send_sockets_relayd_consumer(int domain,
+               struct ltt_session *session, struct consumer_output *consumer, int fd)
 {
-       if (uid != session->uid && gid != session->gid && uid != 0) {
-               return 0;
-       } else {
-               return 1;
+       int ret;
+
+       /* Sending control relayd socket. */
+       ret = send_socket_relayd_consumer(domain, session,
+                       &consumer->dst.net.control, consumer, fd);
+       if (ret != LTTCOMM_OK) {
+               goto error;
+       }
+
+       /* Sending data relayd socket. */
+       ret = send_socket_relayd_consumer(domain, session,
+                       &consumer->dst.net.data, consumer, fd);
+       if (ret != LTTCOMM_OK) {
+               goto error;
        }
+
+error:
+       return ret;
 }
 
-static unsigned int lttng_sessions_count(uid_t uid, gid_t gid)
+/*
+ * Setup relayd connections for a tracing session. First creates the socket to
+ * the relayd and send them to the right domain consumer. Consumer type MUST be
+ * network.
+ */
+static int setup_relayd(struct ltt_session *session)
 {
-       unsigned int i = 0;
-       struct ltt_session *session;
+       int ret = LTTCOMM_OK;
+       struct ltt_ust_session *usess;
+       struct ltt_kernel_session *ksess;
 
-       DBG("Counting number of available session for UID %d GID %d",
-               uid, gid);
-       cds_list_for_each_entry(session, &session_list_ptr->head, list) {
-               /*
-                * Only list the sessions the user can control.
-                */
-               if (!session_access_ok(session, uid, gid)) {
-                       continue;
+       assert(session);
+
+       usess = session->ust_session;
+       ksess = session->kernel_session;
+
+       DBG2("Setting relayd for session %s", session->name);
+
+       if (usess && usess->consumer->sock == -1 &&
+                       usess->consumer->type == CONSUMER_DST_NET &&
+                       usess->consumer->enabled) {
+               /* Setup relayd for 64 bits consumer */
+               if (ust_consumerd64_fd >= 0) {
+                       send_sockets_relayd_consumer(LTTNG_DOMAIN_UST, session,
+                                       usess->consumer, ust_consumerd64_fd);
+                       if (ret != LTTCOMM_OK) {
+                               goto error;
+                       }
+               }
+
+               /* Setup relayd for 32 bits consumer */
+               if (ust_consumerd32_fd >= 0) {
+                       send_sockets_relayd_consumer(LTTNG_DOMAIN_UST, session,
+                                       usess->consumer, ust_consumerd32_fd);
+                       if (ret != LTTCOMM_OK) {
+                               goto error;
+                       }
+               }
+       } else if (ksess && ksess->consumer->sock == -1 &&
+                       ksess->consumer->type == CONSUMER_DST_NET &&
+                       ksess->consumer->enabled) {
+               send_sockets_relayd_consumer(LTTNG_DOMAIN_KERNEL, session,
+                               ksess->consumer, ksess->consumer_fd);
+               if (ret != LTTCOMM_OK) {
+                       goto error;
                }
-               i++;
        }
-       return i;
+
+error:
+       return ret;
 }
 
 /*
- * Using the session list, filled a lttng_session array to send back to the
- * client for session listing.
- *
- * The session list lock MUST be acquired before calling this function. Use
- * session_lock_list() and session_unlock_list().
+ * Copy consumer output from the tracing session to the domain session. The
+ * function also applies the right modification on a per domain basis for the
+ * trace files destination directory.
  */
-static void list_lttng_sessions(struct lttng_session *sessions, uid_t uid,
-               gid_t gid)
+static int copy_session_consumer(int domain, struct ltt_session *session)
 {
-       unsigned int i = 0;
-       struct ltt_session *session;
+       int ret;
+       const char *dir_name;
+       struct consumer_output *consumer;
 
-       DBG("Getting all available session for UID %d GID %d",
-               uid, gid);
-       /*
-        * Iterate over session list and append data after the control struct in
-        * the buffer.
-        */
+       switch (domain) {
+       case LTTNG_DOMAIN_KERNEL:
+               DBG3("Copying tracing session consumer output in kernel session");
+               session->kernel_session->consumer =
+                       consumer_copy_output(session->consumer);
+               /* Ease our life a bit for the next part */
+               consumer = session->kernel_session->consumer;
+               dir_name = DEFAULT_KERNEL_TRACE_DIR;
+               break;
+       case LTTNG_DOMAIN_UST:
+               DBG3("Copying tracing session consumer output in UST session");
+               session->ust_session->consumer =
+                       consumer_copy_output(session->consumer);
+               /* Ease our life a bit for the next part */
+               consumer = session->ust_session->consumer;
+               dir_name = DEFAULT_UST_TRACE_DIR;
+               break;
+       default:
+               ret = LTTCOMM_UNKNOWN_DOMAIN;
+               goto error;
+       }
+
+       /* Append correct directory to subdir */
+       strncat(consumer->subdir, dir_name, sizeof(consumer->subdir));
+       DBG3("Copy session consumer subdir %s", consumer->subdir);
+
+       /* Add default trace directory name */
+       if (consumer->type == CONSUMER_DST_LOCAL) {
+               strncat(consumer->dst.trace_path, dir_name,
+                               sizeof(consumer->dst.trace_path));
+       }
+
+       ret = LTTCOMM_OK;
+
+error:
+       return ret;
+}
+
+/*
+ * Create an UST session and add it to the session ust list.
+ */
+static int create_ust_session(struct ltt_session *session,
+               struct lttng_domain *domain)
+{
+       int ret;
+       struct ltt_ust_session *lus = NULL;
+
+       assert(session);
+       assert(session->consumer);
+
+       switch (domain->type) {
+       case LTTNG_DOMAIN_UST:
+               break;
+       default:
+               ERR("Unknown UST domain on create session %d", domain->type);
+               ret = LTTCOMM_UNKNOWN_DOMAIN;
+               goto error;
+       }
+
+       DBG("Creating UST session");
+
+       lus = trace_ust_create_session(session->path, session->id, domain);
+       if (lus == NULL) {
+               ret = LTTCOMM_UST_SESS_FAIL;
+               goto error;
+       }
+
+       if (session->consumer->type == CONSUMER_DST_LOCAL) {
+               ret = run_as_mkdir_recursive(lus->pathname, S_IRWXU | S_IRWXG,
+                               session->uid, session->gid);
+               if (ret < 0) {
+                       if (ret != -EEXIST) {
+                               ERR("Trace directory creation error");
+                               ret = LTTCOMM_UST_SESS_FAIL;
+                               goto error;
+                       }
+               }
+       }
+
+       lus->uid = session->uid;
+       lus->gid = session->gid;
+       session->ust_session = lus;
+
+       /* Copy session output to the newly created UST session */
+       ret = copy_session_consumer(domain->type, session);
+       if (ret != LTTCOMM_OK) {
+               goto error;
+       }
+
+       return LTTCOMM_OK;
+
+error:
+       free(lus);
+       session->ust_session = NULL;
+       return ret;
+}
+
+/*
+ * Create a kernel tracer session then create the default channel.
+ */
+static int create_kernel_session(struct ltt_session *session)
+{
+       int ret;
+
+       DBG("Creating kernel session");
+
+       ret = kernel_create_session(session, kernel_tracer_fd);
+       if (ret < 0) {
+               ret = LTTCOMM_KERN_SESS_FAIL;
+               goto error;
+       }
+
+       /* Set kernel consumer socket fd */
+       if (kconsumer_data.cmd_sock >= 0) {
+               session->kernel_session->consumer_fd = kconsumer_data.cmd_sock;
+       }
+
+       /* Copy session output to the newly created Kernel session */
+       ret = copy_session_consumer(LTTNG_DOMAIN_KERNEL, session);
+       if (ret != LTTCOMM_OK) {
+               goto error;
+       }
+
+       /* Create directory(ies) on local filesystem. */
+       if (session->consumer->type == CONSUMER_DST_LOCAL) {
+               ret = run_as_mkdir_recursive(
+                               session->kernel_session->consumer->dst.trace_path,
+                               S_IRWXU | S_IRWXG, session->uid, session->gid);
+               if (ret < 0) {
+                       if (ret != -EEXIST) {
+                               ERR("Trace directory creation error");
+                               goto error;
+                       }
+               }
+       }
+
+       session->kernel_session->uid = session->uid;
+       session->kernel_session->gid = session->gid;
+
+       return LTTCOMM_OK;
+
+error:
+       trace_kernel_destroy_session(session->kernel_session);
+       session->kernel_session = NULL;
+       return ret;
+}
+
+/*
+ * Check if the UID or GID match the session. Root user has access to all
+ * sessions.
+ */
+static int session_access_ok(struct ltt_session *session, uid_t uid, gid_t gid)
+{
+       if (uid != session->uid && gid != session->gid && uid != 0) {
+               return 0;
+       } else {
+               return 1;
+       }
+}
+
+/*
+ * Count number of session permitted by uid/gid.
+ */
+static unsigned int lttng_sessions_count(uid_t uid, gid_t gid)
+{
+       unsigned int i = 0;
+       struct ltt_session *session;
+
+       DBG("Counting number of available session for UID %d GID %d",
+               uid, gid);
+       cds_list_for_each_entry(session, &session_list_ptr->head, list) {
+               /*
+                * Only list the sessions the user can control.
+                */
+               if (!session_access_ok(session, uid, gid)) {
+                       continue;
+               }
+               i++;
+       }
+       return i;
+}
+
+/*
+ * Using the session list, filled a lttng_session array to send back to the
+ * client for session listing.
+ *
+ * The session list lock MUST be acquired before calling this function. Use
+ * session_lock_list() and session_unlock_list().
+ */
+static void list_lttng_sessions(struct lttng_session *sessions, uid_t uid,
+               gid_t gid)
+{
+       unsigned int i = 0;
+       struct ltt_session *session;
+
+       DBG("Getting all available session for UID %d GID %d",
+               uid, gid);
+       /*
+        * Iterate over session list and append data after the control struct in
+        * the buffer.
+        */
        cds_list_for_each_entry(session, &session_list_ptr->head, list) {
                /*
                 * Only list the sessions the user can control.
@@ -2225,6 +2414,9 @@ static int list_lttng_ust_global_events(char *channel_name,
                        tmp[i].loglevel_type = LTTNG_EVENT_LOGLEVEL_SINGLE;
                        break;
                }
+               if (uevent->filter) {
+                       tmp[i].filter = 1;
+               }
                i++;
        }
 
@@ -2607,6 +2799,46 @@ error:
        return ret;
 }
 
+/*
+ * Command LTTNG_SET_FILTER processed by the client thread.
+ */
+static int cmd_set_filter(struct ltt_session *session, int domain,
+               char *channel_name, char *event_name,
+               struct lttng_filter_bytecode *bytecode)
+{
+       int ret;
+
+       switch (domain) {
+       case LTTNG_DOMAIN_KERNEL:
+               ret = LTTCOMM_FATAL;
+               break;
+       case LTTNG_DOMAIN_UST:
+       {
+               struct ltt_ust_session *usess = session->ust_session;
+
+               ret = filter_ust_set(usess, domain, bytecode, event_name, channel_name);
+               if (ret != LTTCOMM_OK) {
+                       goto error;
+               }
+               break;
+       }
+#if 0
+       case LTTNG_DOMAIN_UST_EXEC_NAME:
+       case LTTNG_DOMAIN_UST_PID:
+       case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
+#endif
+       default:
+               ret = LTTCOMM_UND;
+               goto error;
+       }
+
+       ret = LTTCOMM_OK;
+
+error:
+       return ret;
+
+}
+
 /*
  * Command LTTNG_ENABLE_EVENT processed by the client thread.
  */
@@ -2928,8 +3160,9 @@ static int cmd_start_trace(struct ltt_session *session)
        int ret;
        struct ltt_kernel_session *ksession;
        struct ltt_ust_session *usess;
+       struct ltt_kernel_channel *kchan;
 
-       /* Short cut */
+       /* Ease our life a bit ;) */
        ksession = session->kernel_session;
        usess = session->ust_session;
 
@@ -2941,13 +3174,18 @@ static int cmd_start_trace(struct ltt_session *session)
 
        session->enabled = 1;
 
+       ret = setup_relayd(session);
+       if (ret != LTTCOMM_OK) {
+               ERR("Error setting up relayd for session %s", session->name);
+               goto error;
+       }
+
        /* Kernel tracing */
        if (ksession != NULL) {
-               struct ltt_kernel_channel *kchan;
-
                /* Open kernel metadata */
                if (ksession->metadata == NULL) {
-                       ret = kernel_open_metadata(ksession, ksession->trace_path);
+                       ret = kernel_open_metadata(ksession,
+                                       ksession->consumer->dst.trace_path);
                        if (ret < 0) {
                                ret = LTTCOMM_KERN_META_FAIL;
                                goto error;
@@ -3037,12 +3275,15 @@ static int cmd_stop_trace(struct ltt_session *session)
        if (ksession != NULL) {
                DBG("Stop kernel tracing");
 
-               /* Flush all buffers before stopping */
-               ret = kernel_metadata_flush_buffer(ksession->metadata_stream_fd);
-               if (ret < 0) {
-                       ERR("Kernel metadata flush failed");
+               /* Flush metadata if exist */
+               if (ksession->metadata_stream_fd >= 0) {
+                       ret = kernel_metadata_flush_buffer(ksession->metadata_stream_fd);
+                       if (ret < 0) {
+                               ERR("Kernel metadata flush failed");
+                       }
                }
 
+               /* Flush all buffers before stopping */
                cds_list_for_each_entry(kchan, &ksession->channel_list.head, list) {
                        ret = kernel_flush_buffer(kchan);
                        if (ret < 0) {
@@ -3076,19 +3317,108 @@ error:
 }
 
 /*
- * Command LTTNG_CREATE_SESSION processed by the client thread.
+ * Command LTTNG_CREATE_SESSION_URI processed by the client thread.
  */
-static int cmd_create_session(char *name, char *path, lttng_sock_cred *creds)
+static int cmd_create_session_uri(char *name, struct lttng_uri *ctrl_uri,
+               struct lttng_uri *data_uri, unsigned int enable_consumer,
+               lttng_sock_cred *creds)
 {
        int ret;
+       char *path = NULL;
+       struct ltt_session *session;
+       struct consumer_output *consumer;
+
+       /* Verify if the session already exist */
+       session = session_find_by_name(name);
+       if (session != NULL) {
+               ret = LTTCOMM_EXIST_SESS;
+               goto error;
+       }
+
+       /* TODO: validate URIs */
+
+       /* Create default consumer output */
+       consumer = consumer_create_output(CONSUMER_DST_LOCAL);
+       if (consumer == NULL) {
+               ret = LTTCOMM_FATAL;
+               goto error;
+       }
+       strncpy(consumer->subdir, ctrl_uri->subdir, sizeof(consumer->subdir));
+       DBG2("Consumer subdir set to %s", consumer->subdir);
+
+       switch (ctrl_uri->dtype) {
+       case LTTNG_DST_IPV4:
+       case LTTNG_DST_IPV6:
+               /* Set control URI into consumer output object */
+               ret = consumer_set_network_uri(consumer, ctrl_uri);
+               if (ret < 0) {
+                       ret = LTTCOMM_FATAL;
+                       goto error;
+               }
+
+               /* Set data URI into consumer output object */
+               ret = consumer_set_network_uri(consumer, data_uri);
+               if (ret < 0) {
+                       ret = LTTCOMM_FATAL;
+                       goto error;
+               }
+
+               /* Empty path since the session is network */
+               path = "";
+               break;
+       case LTTNG_DST_PATH:
+               /* Very volatile pointer. Only used for the create session. */
+               path = ctrl_uri->dst.path;
+               strncpy(consumer->dst.trace_path, path,
+                               sizeof(consumer->dst.trace_path));
+               break;
+       }
+
+       /* Set if the consumer is enabled or not */
+       consumer->enabled = enable_consumer;
 
        ret = session_create(name, path, LTTNG_SOCK_GET_UID_CRED(creds),
                        LTTNG_SOCK_GET_GID_CRED(creds));
        if (ret != LTTCOMM_OK) {
-               goto error;
+               goto consumer_error;
        }
 
-       ret = LTTCOMM_OK;
+       /* Get the newly created session pointer back */
+       session = session_find_by_name(name);
+       assert(session);
+
+       /* Assign consumer to session */
+       session->consumer = consumer;
+
+       return LTTCOMM_OK;
+
+consumer_error:
+       consumer_destroy_output(consumer);
+error:
+       return ret;
+}
+
+/*
+ * Command LTTNG_CREATE_SESSION processed by the client thread.
+ */
+static int cmd_create_session(char *name, char *path, lttng_sock_cred *creds)
+{
+       int ret;
+       struct lttng_uri uri;
+
+       /* Zeroed temporary URI */
+       memset(&uri, 0, sizeof(uri));
+
+       uri.dtype = LTTNG_DST_PATH;
+       uri.utype = LTTNG_URI_DST;
+       strncpy(uri.dst.path, path, sizeof(uri.dst.path));
+
+       /* TODO: Strip date-time from path and put it in uri's subdir */
+
+       ret = cmd_create_session_uri(name, &uri, NULL, 1, creds);
+       if (ret != LTTCOMM_OK) {
+               goto error;
+       }
 
 error:
        return ret;
@@ -3283,40 +3613,418 @@ static ssize_t cmd_list_channels(int domain, struct ltt_session *session,
 
        return nb_chan;
 
-error:
-       return ret;
-}
+error:
+       return ret;
+}
+
+/*
+ * Command LTTNG_LIST_EVENTS processed by the client thread.
+ */
+static ssize_t cmd_list_events(int domain, struct ltt_session *session,
+               char *channel_name, struct lttng_event **events)
+{
+       int ret = 0;
+       ssize_t nb_event = 0;
+
+       switch (domain) {
+       case LTTNG_DOMAIN_KERNEL:
+               if (session->kernel_session != NULL) {
+                       nb_event = list_lttng_kernel_events(channel_name,
+                                       session->kernel_session, events);
+               }
+               break;
+       case LTTNG_DOMAIN_UST:
+       {
+               if (session->ust_session != NULL) {
+                       nb_event = list_lttng_ust_global_events(channel_name,
+                                       &session->ust_session->domain_global, events);
+               }
+               break;
+       }
+       default:
+               ret = -LTTCOMM_UND;
+               goto error;
+       }
+
+       ret = nb_event;
+
+error:
+       return ret;
+}
+
+/*
+ * Command LTTNG_SET_CONSUMER_URI processed by the client thread.
+ */
+static int cmd_set_consumer_uri(int domain, struct ltt_session *session,
+               struct lttng_uri *uri)
+{
+       int ret;
+       struct ltt_kernel_session *ksess = session->kernel_session;
+       struct ltt_ust_session *usess = session->ust_session;
+       struct consumer_output *consumer;
+
+       /* Can't enable consumer after session started. */
+       if (session->enabled) {
+               ret = LTTCOMM_TRACE_ALREADY_STARTED;
+               goto error;
+       }
+
+       switch (domain) {
+       case LTTNG_DOMAIN_KERNEL:
+               /* Code flow error if we don't have a kernel session here. */
+               assert(ksess);
+
+               /* Create consumer output if none exists */
+               consumer = ksess->tmp_consumer;
+               if (consumer == NULL) {
+                       consumer = consumer_copy_output(ksess->consumer);
+                       if (consumer == NULL) {
+                               ret = LTTCOMM_FATAL;
+                               goto error;
+                       }
+                       /* Reassign new pointer */
+                       ksess->tmp_consumer = consumer;
+               }
+
+               switch (uri->dtype) {
+               case LTTNG_DST_IPV4:
+               case LTTNG_DST_IPV6:
+                       DBG2("Setting network URI for kernel session %s", session->name);
+
+                       /* Set URI into consumer output object */
+                       ret = consumer_set_network_uri(consumer, uri);
+                       if (ret < 0) {
+                               ret = LTTCOMM_FATAL;
+                               goto error;
+                       }
+
+                       /* On a new subdir, reappend the default trace dir. */
+                       if (strlen(uri->subdir) != 0) {
+                               strncat(consumer->subdir, DEFAULT_KERNEL_TRACE_DIR,
+                                               sizeof(consumer->subdir));
+                       }
+
+                       ret = send_socket_relayd_consumer(domain, session, uri, consumer,
+                                       ksess->consumer_fd);
+                       if (ret != LTTCOMM_OK) {
+                               goto error;
+                       }
+                       break;
+               case LTTNG_DST_PATH:
+                       DBG2("Setting trace directory path from URI to %s", uri->dst.path);
+                       memset(consumer->dst.trace_path, 0,
+                                       sizeof(consumer->dst.trace_path));
+                       strncpy(consumer->dst.trace_path, uri->dst.path,
+                                       sizeof(consumer->dst.trace_path));
+                       /* Append default kernel trace dir */
+                       strncat(consumer->dst.trace_path, DEFAULT_KERNEL_TRACE_DIR,
+                                       sizeof(consumer->dst.trace_path));
+                       break;
+               }
+
+               /* All good! */
+               break;
+       case LTTNG_DOMAIN_UST:
+               /* Code flow error if we don't have a kernel session here. */
+               assert(usess);
+
+               /* Create consumer output if none exists */
+               consumer = usess->tmp_consumer;
+               if (consumer == NULL) {
+                       consumer = consumer_copy_output(usess->consumer);
+                       if (consumer == NULL) {
+                               ret = LTTCOMM_FATAL;
+                               goto error;
+                       }
+                       /* Reassign new pointer */
+                       usess->tmp_consumer = consumer;
+               }
+
+               switch (uri->dtype) {
+               case LTTNG_DST_IPV4:
+               case LTTNG_DST_IPV6:
+               {
+                       DBG2("Setting network URI for UST session %s", session->name);
+
+                       /* Set URI into consumer object */
+                       ret = consumer_set_network_uri(consumer, uri);
+                       if (ret < 0) {
+                               ret = LTTCOMM_FATAL;
+                               goto error;
+                       }
+
+                       /* On a new subdir, reappend the default trace dir. */
+                       if (strlen(uri->subdir) != 0) {
+                               strncat(consumer->subdir, DEFAULT_UST_TRACE_DIR,
+                                               sizeof(consumer->subdir));
+                       }
+
+                       if (ust_consumerd64_fd >= 0) {
+                               ret = send_socket_relayd_consumer(domain, session, uri,
+                                               consumer, ust_consumerd64_fd);
+                               if (ret != LTTCOMM_OK) {
+                                       goto error;
+                               }
+                       }
+
+                       if (ust_consumerd32_fd >= 0) {
+                               ret = send_socket_relayd_consumer(domain, session, uri,
+                                               consumer, ust_consumerd32_fd);
+                               if (ret != LTTCOMM_OK) {
+                                       goto error;
+                               }
+                       }
+
+                       break;
+               }
+               case LTTNG_DST_PATH:
+                       DBG2("Setting trace directory path from URI to %s", uri->dst.path);
+                       memset(consumer->dst.trace_path, 0,
+                                       sizeof(consumer->dst.trace_path));
+                       strncpy(consumer->dst.trace_path, uri->dst.path,
+                                       sizeof(consumer->dst.trace_path));
+                       /* Append default UST trace dir */
+                       strncat(consumer->dst.trace_path, DEFAULT_UST_TRACE_DIR,
+                                       sizeof(consumer->dst.trace_path));
+                       break;
+               }
+               break;
+       }
+
+       /* All good! */
+       ret = LTTCOMM_OK;
+
+error:
+       return ret;
+}
+
+/*
+ * Command LTTNG_DISABLE_CONSUMER processed by the client thread.
+ */
+static int cmd_disable_consumer(int domain, struct ltt_session *session)
+{
+       int ret;
+       struct ltt_kernel_session *ksess = session->kernel_session;
+       struct ltt_ust_session *usess = session->ust_session;
+       struct consumer_output *consumer;
+
+       if (session->enabled) {
+               /* Can't disable consumer on an already started session */
+               ret = LTTCOMM_TRACE_ALREADY_STARTED;
+               goto error;
+       }
+
+       switch (domain) {
+       case LTTNG_DOMAIN_KERNEL:
+               /* Code flow error if we don't have a kernel session here. */
+               assert(ksess);
+
+               DBG("Disabling kernel consumer");
+               consumer = ksess->consumer;
+
+               break;
+       case LTTNG_DOMAIN_UST:
+               /* Code flow error if we don't have a UST session here. */
+               assert(usess);
+
+               DBG("Disabling UST consumer");
+               consumer = usess->consumer;
+
+               break;
+       default:
+               ret = LTTCOMM_UNKNOWN_DOMAIN;
+               goto error;
+       }
+
+       assert(consumer);
+       consumer->enabled = 0;
+
+       /* Success at this point */
+       ret = LTTCOMM_OK;
+
+error:
+       return ret;
+}
+
+/*
+ * Command LTTNG_ENABLE_CONSUMER processed by the client thread.
+ */
+static int cmd_enable_consumer(int domain, struct ltt_session *session)
+{
+       int ret;
+       struct ltt_kernel_session *ksess = session->kernel_session;
+       struct ltt_ust_session *usess = session->ust_session;
+       struct consumer_output *tmp_out;
+
+       /* Can't enable consumer after session started. */
+       if (session->enabled) {
+               ret = LTTCOMM_TRACE_ALREADY_STARTED;
+               goto error;
+       }
+
+       switch (domain) {
+       case LTTNG_DOMAIN_KERNEL:
+               /* Code flow error if we don't have a kernel session here. */
+               assert(ksess);
+
+               /*
+                * Check if we have already sent fds to the consumer. In that case,
+                * the enable-consumer command can't be used because a start trace
+                * had previously occured.
+                */
+               if (ksess->consumer_fds_sent) {
+                       ret = LTTCOMM_ENABLE_CONSUMER_FAIL;
+                       goto error;
+               }
+
+               tmp_out = ksess->tmp_consumer;
+               if (tmp_out == NULL) {
+                       /* No temp. consumer output exists. Using the current one. */
+                       DBG3("No temporary consumer. Using default");
+                       ret = LTTCOMM_OK;
+                       goto error;
+               }
+
+               switch (tmp_out->type) {
+               case CONSUMER_DST_LOCAL:
+                       DBG2("Consumer output is local. Creating directory(ies)");
+
+                       /* Create directory(ies) */
+                       ret = run_as_mkdir_recursive(tmp_out->dst.trace_path,
+                                       S_IRWXU | S_IRWXG, session->uid, session->gid);
+                       if (ret < 0) {
+                               if (ret != -EEXIST) {
+                                       ERR("Trace directory creation error");
+                                       ret = LTTCOMM_FATAL;
+                                       goto error;
+                               }
+                       }
+                       break;
+               case CONSUMER_DST_NET:
+                       DBG2("Consumer output is network. Validating URIs");
+                       /* Validate if we have both control and data path set. */
+                       if (!tmp_out->dst.net.control_isset) {
+                               ret = LTTCOMM_URI_CTRL_MISS;
+                               goto error;
+                       }
+
+                       if (!tmp_out->dst.net.data_isset) {
+                               ret = LTTCOMM_URI_DATA_MISS;
+                               goto error;
+                       }
+
+                       /* Check established network session state */
+                       if (session->net_handle == 0) {
+                               ret = LTTCOMM_ENABLE_CONSUMER_FAIL;
+                               ERR("Session network handle is not set on enable-consumer");
+                               goto error;
+                       }
+
+                       /* Append default kernel trace dir to subdir */
+                       strncat(ksess->consumer->subdir, DEFAULT_KERNEL_TRACE_DIR,
+                                       sizeof(ksess->consumer->subdir));
+
+                       break;
+               }
+
+               /*
+                * @session-lock
+                * This is race free for now since the session lock is acquired before
+                * ending up in this function. No other threads can access this kernel
+                * session without this lock hence freeing the consumer output object
+                * is valid.
+                */
+               consumer_destroy_output(ksess->consumer);
+               ksess->consumer = tmp_out;
+               ksess->tmp_consumer = NULL;
+
+               break;
+       case LTTNG_DOMAIN_UST:
+               /* Code flow error if we don't have a UST session here. */
+               assert(usess);
+
+               /*
+                * Check if we have already sent fds to the consumer. In that case,
+                * the enable-consumer command can't be used because a start trace
+                * had previously occured.
+                */
+               if (usess->start_trace) {
+                       ret = LTTCOMM_ENABLE_CONSUMER_FAIL;
+                       goto error;
+               }
+
+               tmp_out = usess->tmp_consumer;
+               if (tmp_out == NULL) {
+                       /* No temp. consumer output exists. Using the current one. */
+                       DBG3("No temporary consumer. Using default");
+                       ret = LTTCOMM_OK;
+                       goto error;
+               }
+
+               switch (tmp_out->type) {
+               case CONSUMER_DST_LOCAL:
+                       DBG2("Consumer output is local. Creating directory(ies)");
+
+                       /* Create directory(ies) */
+                       ret = run_as_mkdir_recursive(tmp_out->dst.trace_path,
+                                       S_IRWXU | S_IRWXG, session->uid, session->gid);
+                       if (ret < 0) {
+                               if (ret != -EEXIST) {
+                                       ERR("Trace directory creation error");
+                                       ret = LTTCOMM_FATAL;
+                                       goto error;
+                               }
+                       }
+                       break;
+               case CONSUMER_DST_NET:
+                       DBG2("Consumer output is network. Validating URIs");
+                       /* Validate if we have both control and data path set. */
+                       if (!tmp_out->dst.net.control_isset) {
+                               ret = LTTCOMM_URI_CTRL_MISS;
+                               goto error;
+                       }
+
+                       if (!tmp_out->dst.net.data_isset) {
+                               ret = LTTCOMM_URI_DATA_MISS;
+                               goto error;
+                       }
+
+                       /* Check established network session state */
+                       if (session->net_handle == 0) {
+                               ret = LTTCOMM_ENABLE_CONSUMER_FAIL;
+                               DBG2("Session network handle is not set on enable-consumer");
+                               goto error;
+                       }
+
+                       if (tmp_out->net_seq_index == -1) {
+                               ret = LTTCOMM_ENABLE_CONSUMER_FAIL;
+                               DBG2("Network index is not set on the consumer");
+                               goto error;
+                       }
 
-/*
- * Command LTTNG_LIST_EVENTS processed by the client thread.
- */
-static ssize_t cmd_list_events(int domain, struct ltt_session *session,
-               char *channel_name, struct lttng_event **events)
-{
-       int ret = 0;
-       ssize_t nb_event = 0;
+                       /* Append default kernel trace dir to subdir */
+                       strncat(usess->consumer->subdir, DEFAULT_UST_TRACE_DIR,
+                                       sizeof(usess->consumer->subdir));
 
-       switch (domain) {
-       case LTTNG_DOMAIN_KERNEL:
-               if (session->kernel_session != NULL) {
-                       nb_event = list_lttng_kernel_events(channel_name,
-                                       session->kernel_session, events);
-               }
-               break;
-       case LTTNG_DOMAIN_UST:
-       {
-               if (session->ust_session != NULL) {
-                       nb_event = list_lttng_ust_global_events(channel_name,
-                                       &session->ust_session->domain_global, events);
+                       break;
                }
+
+               /*
+                * @session-lock
+                * This is race free for now since the session lock is acquired before
+                * ending up in this function. No other threads can access this kernel
+                * session without this lock hence freeing the consumer output object
+                * is valid.
+                */
+               consumer_destroy_output(usess->consumer);
+               usess->consumer = tmp_out;
+               usess->tmp_consumer = NULL;
+
                break;
        }
-       default:
-               ret = -LTTCOMM_UND;
-               goto error;
-       }
 
-       ret = nb_event;
+       /* Success at this point */
+       ret = LTTCOMM_OK;
 
 error:
        return ret;
@@ -3328,8 +4036,11 @@ error:
  * is set and ready for transmission before returning.
  *
  * Return any error encountered or 0 for success.
+ *
+ * "sock" is only used for special-case var. len data.
  */
-static int process_client_msg(struct command_ctx *cmd_ctx)
+static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
+               int *sock_error)
 {
        int ret = LTTCOMM_OK;
        int need_tracing_session = 1;
@@ -3337,8 +4048,11 @@ static int process_client_msg(struct command_ctx *cmd_ctx)
 
        DBG("Processing client command %d", cmd_ctx->lsm->cmd_type);
 
+       *sock_error = 0;
+
        switch (cmd_ctx->lsm->cmd_type) {
        case LTTNG_CREATE_SESSION:
+       case LTTNG_CREATE_SESSION_URI:
        case LTTNG_DESTROY_SESSION:
        case LTTNG_LIST_SESSIONS:
        case LTTNG_LIST_DOMAINS:
@@ -3385,6 +4099,7 @@ static int process_client_msg(struct command_ctx *cmd_ctx)
        /* Commands that DO NOT need a session. */
        switch (cmd_ctx->lsm->cmd_type) {
        case LTTNG_CREATE_SESSION:
+       case LTTNG_CREATE_SESSION_URI:
        case LTTNG_CALIBRATE:
        case LTTNG_LIST_SESSIONS:
        case LTTNG_LIST_TRACEPOINTS:
@@ -3464,6 +4179,10 @@ static int process_client_msg(struct command_ctx *cmd_ctx)
                                        goto error;
                                }
                                uatomic_set(&kernel_consumerd_state, CONSUMER_STARTED);
+
+                               /* Set consumer fd of the session */
+                               cmd_ctx->session->kernel_session->consumer_fd =
+                                       kconsumer_data.cmd_sock;
                        } else {
                                pthread_mutex_unlock(&kconsumer_data.pid_mutex);
                        }
@@ -3486,6 +4205,7 @@ static int process_client_msg(struct command_ctx *cmd_ctx)
                                        goto error;
                                }
                        }
+
                        /* Start the UST consumer daemons */
                        /* 64-bit */
                        pthread_mutex_lock(&ustconsumer64_data.pid_mutex);
@@ -3593,12 +4313,22 @@ skip_domain:
                                cmd_ctx->lsm->u.disable.channel_name);
                break;
        }
+       case LTTNG_DISABLE_CONSUMER:
+       {
+               ret = cmd_disable_consumer(cmd_ctx->lsm->domain.type, cmd_ctx->session);
+               break;
+       }
        case LTTNG_ENABLE_CHANNEL:
        {
                ret = cmd_enable_channel(cmd_ctx->session, cmd_ctx->lsm->domain.type,
                                &cmd_ctx->lsm->u.channel.chan);
                break;
        }
+       case LTTNG_ENABLE_CONSUMER:
+       {
+               ret = cmd_enable_consumer(cmd_ctx->lsm->domain.type, cmd_ctx->session);
+               break;
+       }
        case LTTNG_ENABLE_EVENT:
        {
                ret = cmd_enable_event(cmd_ctx->session, cmd_ctx->lsm->domain.type,
@@ -3675,7 +4405,12 @@ skip_domain:
                ret = LTTCOMM_OK;
                break;
        }
-
+       case LTTNG_SET_CONSUMER_URI:
+       {
+               ret = cmd_set_consumer_uri(cmd_ctx->lsm->domain.type, cmd_ctx->session,
+                               &cmd_ctx->lsm->u.uri);
+               break;
+       }
        case LTTNG_START_TRACE:
        {
                ret = cmd_start_trace(cmd_ctx->session);
@@ -3692,6 +4427,14 @@ skip_domain:
                                cmd_ctx->lsm->session.path, &cmd_ctx->creds);
                break;
        }
+       case LTTNG_CREATE_SESSION_URI:
+       {
+               ret = cmd_create_session_uri(cmd_ctx->lsm->session.name,
+                               &cmd_ctx->lsm->u.create_uri.ctrl_uri,
+                               &cmd_ctx->lsm->u.create_uri.data_uri,
+                               cmd_ctx->lsm->u.create_uri.enable_consumer, &cmd_ctx->creds);
+               break;
+       }
        case LTTNG_DESTROY_SESSION:
        {
                ret = cmd_destroy_session(cmd_ctx->session,
@@ -3817,6 +4560,43 @@ skip_domain:
                                cmd_ctx->lsm->u.reg.path);
                break;
        }
+       case LTTNG_SET_FILTER:
+       {
+               struct lttng_filter_bytecode *bytecode;
+
+               if (cmd_ctx->lsm->u.filter.bytecode_len > 65336) {
+                       ret = LTTCOMM_FILTER_INVAL;
+                       goto error;
+               }
+               bytecode = zmalloc(cmd_ctx->lsm->u.filter.bytecode_len);
+               if (!bytecode) {
+                       ret = LTTCOMM_FILTER_NOMEM;
+                       goto error;
+               }
+               /* Receive var. len. data */
+               DBG("Receiving var len data from client ...");
+               ret = lttcomm_recv_unix_sock(sock, bytecode,
+                               cmd_ctx->lsm->u.filter.bytecode_len);
+               if (ret <= 0) {
+                       DBG("Nothing recv() from client var len data... continuing");
+                       *sock_error = 1;
+                       ret = LTTCOMM_FILTER_INVAL;
+                       goto error;
+               }
+
+               if (bytecode->len + sizeof(*bytecode)
+                               != cmd_ctx->lsm->u.filter.bytecode_len) {
+                       free(bytecode);
+                       ret = LTTCOMM_FILTER_INVAL;
+                       goto error;
+               }
+
+               ret = cmd_set_filter(cmd_ctx->session, cmd_ctx->lsm->domain.type,
+                               cmd_ctx->lsm->u.filter.channel_name,
+                               cmd_ctx->lsm->u.filter.event_name,
+                               bytecode);
+               break;
+       }
        default:
                ret = LTTCOMM_UND;
                break;
@@ -3842,6 +4622,180 @@ init_setup_error:
        return ret;
 }
 
+/*
+ * Thread managing health check socket.
+ */
+static void *thread_manage_health(void *data)
+{
+       int sock = -1, new_sock, ret, i, pollfd;
+       uint32_t revents, nb_fd;
+       struct lttng_poll_event events;
+       struct lttcomm_health_msg msg;
+       struct lttcomm_health_data reply;
+
+       DBG("[thread] Manage health check started");
+
+       rcu_register_thread();
+
+       /* Create unix socket */
+       sock = lttcomm_create_unix_sock(health_unix_sock_path);
+       if (sock < 0) {
+               ERR("Unable to create health check Unix socket");
+               ret = -1;
+               goto error;
+       }
+
+       ret = lttcomm_listen_unix_sock(sock);
+       if (ret < 0) {
+               goto error;
+       }
+
+       /*
+        * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
+        * more will be added to this poll set.
+        */
+       ret = create_thread_poll_set(&events, 2);
+       if (ret < 0) {
+               goto error;
+       }
+
+       /* Add the application registration socket */
+       ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLPRI);
+       if (ret < 0) {
+               goto error;
+       }
+
+       while (1) {
+               DBG("Health check ready");
+
+               nb_fd = LTTNG_POLL_GETNB(&events);
+
+               /* Inifinite blocking call, waiting for transmission */
+restart:
+               ret = lttng_poll_wait(&events, -1);
+               if (ret < 0) {
+                       /*
+                        * Restart interrupted system call.
+                        */
+                       if (errno == EINTR) {
+                               goto restart;
+                       }
+                       goto error;
+               }
+
+               for (i = 0; i < nb_fd; i++) {
+                       /* Fetch once the poll data */
+                       revents = LTTNG_POLL_GETEV(&events, i);
+                       pollfd = LTTNG_POLL_GETFD(&events, i);
+
+                       /* Thread quit pipe has been closed. Killing thread. */
+                       ret = check_thread_quit_pipe(pollfd, revents);
+                       if (ret) {
+                               goto error;
+                       }
+
+                       /* Event on the registration socket */
+                       if (pollfd == sock) {
+                               if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+                                       ERR("Health socket poll error");
+                                       goto error;
+                               }
+                       }
+               }
+
+               new_sock = lttcomm_accept_unix_sock(sock);
+               if (new_sock < 0) {
+                       goto error;
+               }
+
+               DBG("Receiving data from client for health...");
+               ret = lttcomm_recv_unix_sock(new_sock, (void *)&msg, sizeof(msg));
+               if (ret <= 0) {
+                       DBG("Nothing recv() from client... continuing");
+                       ret = close(new_sock);
+                       if (ret) {
+                               PERROR("close");
+                       }
+                       new_sock = -1;
+                       continue;
+               }
+
+               rcu_thread_online();
+
+               switch (msg.component) {
+               case LTTNG_HEALTH_CMD:
+                       reply.ret_code = health_check_state(&health_thread_cmd);
+                       break;
+               case LTTNG_HEALTH_APP_REG:
+                       reply.ret_code = health_check_state(&health_thread_app_reg);
+                       break;
+               case LTTNG_HEALTH_KERNEL:
+                       reply.ret_code = health_check_state(&health_thread_kernel);
+                       break;
+               case LTTNG_HEALTH_CONSUMER:
+                       reply.ret_code = check_consumer_health();
+                       break;
+               case LTTNG_HEALTH_ALL:
+                       ret = check_consumer_health();
+
+                       reply.ret_code =
+                               health_check_state(&health_thread_app_reg) &
+                               health_check_state(&health_thread_cmd) &
+                               health_check_state(&health_thread_kernel) &
+                               ret;
+                       break;
+               default:
+                       reply.ret_code = LTTCOMM_UND;
+                       break;
+               }
+
+               /*
+                * Flip ret value since 0 is a success and 1 indicates a bad health for
+                * the client where in the sessiond it is the opposite. Again, this is
+                * just to make things easier for us poor developer which enjoy a lot
+                * lazyness.
+                */
+               if (reply.ret_code == 0 || reply.ret_code == 1) {
+                       reply.ret_code = !reply.ret_code;
+               }
+
+               DBG2("Health check return value %d", reply.ret_code);
+
+               ret = send_unix_sock(new_sock, (void *) &reply, sizeof(reply));
+               if (ret < 0) {
+                       ERR("Failed to send health data back to client");
+               }
+
+               /* End of transmission */
+               ret = close(new_sock);
+               if (ret) {
+                       PERROR("close");
+               }
+               new_sock = -1;
+       }
+
+error:
+       DBG("Health check thread dying");
+       unlink(health_unix_sock_path);
+       if (sock >= 0) {
+               ret = close(sock);
+               if (ret) {
+                       PERROR("close");
+               }
+       }
+       if (new_sock >= 0) {
+               ret = close(new_sock);
+               if (ret) {
+                       PERROR("close");
+               }
+       }
+
+       lttng_poll_clean(&events);
+
+       rcu_unregister_thread();
+       return NULL;
+}
+
 /*
  * This thread manage all clients request using the unix client socket for
  * communication.
@@ -3849,6 +4803,7 @@ init_setup_error:
 static void *thread_manage_clients(void *data)
 {
        int sock = -1, ret, i, pollfd;
+       int sock_error;
        uint32_t revents, nb_fd;
        struct command_ctx *cmd_ctx = NULL;
        struct lttng_poll_event events;
@@ -3857,6 +4812,8 @@ static void *thread_manage_clients(void *data)
 
        rcu_register_thread();
 
+       health_code_update(&health_thread_cmd);
+
        ret = lttcomm_listen_unix_sock(client_sock);
        if (ret < 0) {
                goto error;
@@ -3884,6 +4841,8 @@ static void *thread_manage_clients(void *data)
                kill(ppid, SIGUSR1);
        }
 
+       health_code_update(&health_thread_cmd);
+
        while (1) {
                DBG("Accepting client command ...");
 
@@ -3891,7 +4850,9 @@ static void *thread_manage_clients(void *data)
 
                /* Inifinite blocking call, waiting for transmission */
        restart:
+               health_poll_update(&health_thread_cmd);
                ret = lttng_poll_wait(&events, -1);
+               health_poll_update(&health_thread_cmd);
                if (ret < 0) {
                        /*
                         * Restart interrupted system call.
@@ -3907,6 +4868,8 @@ static void *thread_manage_clients(void *data)
                        revents = LTTNG_POLL_GETEV(&events, i);
                        pollfd = LTTNG_POLL_GETFD(&events, i);
 
+                       health_code_update(&health_thread_cmd);
+
                        /* Thread quit pipe has been closed. Killing thread. */
                        ret = check_thread_quit_pipe(pollfd, revents);
                        if (ret) {
@@ -3924,6 +4887,8 @@ static void *thread_manage_clients(void *data)
 
                DBG("Wait for client response");
 
+               health_code_update(&health_thread_cmd);
+
                sock = lttcomm_accept_unix_sock(client_sock);
                if (sock < 0) {
                        goto error;
@@ -3952,6 +4917,8 @@ static void *thread_manage_clients(void *data)
                cmd_ctx->llm = NULL;
                cmd_ctx->session = NULL;
 
+               health_code_update(&health_thread_cmd);
+
                /*
                 * Data is received from the lttng client. The struct
                 * lttcomm_session_msg (lsm) contains the command and data request of
@@ -3971,6 +4938,8 @@ static void *thread_manage_clients(void *data)
                        continue;
                }
 
+               health_code_update(&health_thread_cmd);
+
                // TODO: Validate cmd_ctx including sanity check for
                // security purpose.
 
@@ -3981,18 +4950,29 @@ static void *thread_manage_clients(void *data)
                 * informations for the client. The command context struct contains
                 * everything this function may needs.
                 */
-               ret = process_client_msg(cmd_ctx);
+               ret = process_client_msg(cmd_ctx, sock, &sock_error);
                rcu_thread_offline();
                if (ret < 0) {
+                       if (sock_error) {
+                               ret = close(sock);
+                               if (ret) {
+                                       PERROR("close");
+                               }
+                               sock = -1;
+                       }
                        /*
                         * TODO: Inform client somehow of the fatal error. At
                         * this point, ret < 0 means that a zmalloc failed
-                        * (ENOMEM). Error detected but still accept command.
+                        * (ENOMEM). Error detected but still accept
+                        * command, unless a socket error has been
+                        * detected.
                         */
                        clean_command_ctx(&cmd_ctx);
                        continue;
                }
 
+               health_code_update(&health_thread_cmd);
+
                DBG("Sending response (size: %d, retcode: %s)",
                                cmd_ctx->lttng_msg_size,
                                lttng_strerror(-cmd_ctx->llm->ret_code));
@@ -4009,9 +4989,13 @@ static void *thread_manage_clients(void *data)
                sock = -1;
 
                clean_command_ctx(&cmd_ctx);
+
+               health_code_update(&health_thread_cmd);
        }
 
 error:
+       health_reset(&health_thread_cmd);
+
        DBG("Client thread dying");
        unlink(client_unix_sock_path);
        if (client_sock >= 0) {
@@ -4317,58 +5301,6 @@ end:
        return ret;
 }
 
-/*
- * Create the pipe used to wake up the kernel thread.
- * Closed in cleanup().
- */
-static int create_kernel_poll_pipe(void)
-{
-       int ret, i;
-
-       ret = pipe(kernel_poll_pipe);
-       if (ret < 0) {
-               PERROR("kernel poll pipe");
-               goto error;
-       }
-
-       for (i = 0; i < 2; i++) {
-               ret = fcntl(kernel_poll_pipe[i], F_SETFD, FD_CLOEXEC);
-               if (ret < 0) {
-                       PERROR("fcntl kernel_poll_pipe");
-                       goto error;
-               }
-       }
-
-error:
-       return ret;
-}
-
-/*
- * Create the application command pipe to wake thread_manage_apps.
- * Closed in cleanup().
- */
-static int create_apps_cmd_pipe(void)
-{
-       int ret, i;
-
-       ret = pipe(apps_cmd_pipe);
-       if (ret < 0) {
-               PERROR("apps cmd pipe");
-               goto error;
-       }
-
-       for (i = 0; i < 2; i++) {
-               ret = fcntl(apps_cmd_pipe[i], F_SETFD, FD_CLOEXEC);
-               if (ret < 0) {
-                       PERROR("fcntl apps_cmd_pipe");
-                       goto error;
-               }
-       }
-
-error:
-       return ret;
-}
-
 /*
  * Create the lttng run directory needed for all global sockets and pipe.
  */
@@ -4612,6 +5544,11 @@ int main(int argc, char **argv)
                                        DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH);
                }
 
+               if (strlen(health_unix_sock_path) == 0) {
+                       snprintf(health_unix_sock_path, sizeof(health_unix_sock_path),
+                                       DEFAULT_GLOBAL_HEALTH_UNIX_SOCK);
+               }
+
                /* Setup kernel consumerd path */
                snprintf(kconsumer_data.err_unix_sock_path, PATH_MAX,
                                DEFAULT_KCONSUMERD_ERR_SOCK_PATH, rundir);
@@ -4662,6 +5599,12 @@ int main(int argc, char **argv)
                        snprintf(wait_shm_path, PATH_MAX,
                                        DEFAULT_HOME_APPS_WAIT_SHM_PATH, geteuid());
                }
+
+               /* Set health check Unix path */
+               if (strlen(health_unix_sock_path) == 0) {
+                       snprintf(health_unix_sock_path, sizeof(health_unix_sock_path),
+                                       DEFAULT_HOME_HEALTH_UNIX_SOCK, home_path);
+               }
        }
 
        /* Set consumer initial state */
@@ -4767,12 +5710,12 @@ int main(int argc, char **argv)
        }
 
        /* Setup the kernel pipe for waking up the kernel thread */
-       if ((ret = create_kernel_poll_pipe()) < 0) {
+       if ((ret = utils_create_pipe_cloexec(kernel_poll_pipe)) < 0) {
                goto exit;
        }
 
        /* Setup the thread apps communication pipe. */
-       if ((ret = create_apps_cmd_pipe()) < 0) {
+       if ((ret = utils_create_pipe_cloexec(apps_cmd_pipe)) < 0) {
                goto exit;
        }
 
@@ -4788,6 +5731,37 @@ int main(int argc, char **argv)
        /* Set up max poll set size */
        lttng_poll_set_max_size();
 
+       /*
+        * Set network sequence index to 1 for streams to match a relayd socket on
+        * the consumer side.
+        */
+       uatomic_set(&relayd_net_seq_idx, 1);
+
+       /* Init all health thread counters. */
+       health_init(&health_thread_cmd);
+       health_init(&health_thread_kernel);
+       health_init(&health_thread_app_reg);
+
+       /*
+        * Init health counters of the consumer thread. We do a quick hack here to
+        * the state of the consumer health is fine even if the thread is not
+        * started.  This is simply to ease our life and has no cost what so ever.
+        */
+       health_init(&kconsumer_data.health);
+       health_poll_update(&kconsumer_data.health);
+       health_init(&ustconsumer32_data.health);
+       health_poll_update(&ustconsumer32_data.health);
+       health_init(&ustconsumer64_data.health);
+       health_poll_update(&ustconsumer64_data.health);
+
+       /* Create thread to manage the client socket */
+       ret = pthread_create(&health_thread, NULL,
+                       thread_manage_health, (void *) NULL);
+       if (ret != 0) {
+               PERROR("pthread_create health");
+               goto exit_health;
+       }
+
        /* Create thread to manage the client socket */
        ret = pthread_create(&client_thread, NULL,
                        thread_manage_clients, (void *) NULL);
@@ -4869,6 +5843,7 @@ exit_dispatch:
        }
 
 exit_client:
+exit_health:
 exit:
        /*
         * cleanup() is called when no other thread is running.
This page took 0.077834 seconds and 4 git commands to generate.