Fix: set globally visible flag to kernel stream
[lttng-tools.git] / src / bin / lttng-sessiond / main.c
index 859223a11216b1d342b0899ebdd4e66bc657d657..c02f6d91e2ba904819c58f3058d83efef218c6fd 100644 (file)
@@ -25,6 +25,7 @@
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
+#include <inttypes.h>
 #include <sys/mman.h>
 #include <sys/mount.h>
 #include <sys/resource.h>
@@ -37,7 +38,6 @@
 #include <config.h>
 
 #include <common/common.h>
-#include <common/compat/poll.h>
 #include <common/compat/socket.h>
 #include <common/defaults.h>
 #include <common/kernel-consumer/kernel-consumer.h>
@@ -46,6 +46,7 @@
 #include <common/utils.h>
 
 #include "lttng-sessiond.h"
+#include "buffer-registry.h"
 #include "channel.h"
 #include "cmd.h"
 #include "consumer.h"
 #include "fd-limit.h"
 #include "health.h"
 #include "testpoint.h"
+#include "ust-thread.h"
 
 #define CONSUMERD_FILE "lttng-consumerd"
 
 /* Const values */
-const char default_home_dir[] = DEFAULT_HOME_DIR;
 const char default_tracing_group[] = DEFAULT_TRACING_GROUP;
-const char default_ust_sock_dir[] = DEFAULT_UST_SOCK_DIR;
-const char default_global_apps_pipe[] = DEFAULT_GLOBAL_APPS_PIPE;
 
 const char *progname;
 const char *opt_tracing_group;
@@ -91,6 +90,7 @@ static struct consumer_data kconsumer_data = {
        .cmd_unix_sock_path = DEFAULT_KCONSUMERD_CMD_SOCK_PATH,
        .err_sock = -1,
        .cmd_sock = -1,
+       .metadata_sock.fd = -1,
        .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
        .lock = PTHREAD_MUTEX_INITIALIZER,
        .cond = PTHREAD_COND_INITIALIZER,
@@ -102,6 +102,7 @@ static struct consumer_data ustconsumer64_data = {
        .cmd_unix_sock_path = DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH,
        .err_sock = -1,
        .cmd_sock = -1,
+       .metadata_sock.fd = -1,
        .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
        .lock = PTHREAD_MUTEX_INITIALIZER,
        .cond = PTHREAD_COND_INITIALIZER,
@@ -113,6 +114,7 @@ static struct consumer_data ustconsumer32_data = {
        .cmd_unix_sock_path = DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH,
        .err_sock = -1,
        .cmd_sock = -1,
+       .metadata_sock.fd = -1,
        .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
        .lock = PTHREAD_MUTEX_INITIALIZER,
        .cond = PTHREAD_COND_INITIALIZER,
@@ -149,13 +151,17 @@ static int thread_quit_pipe[2] = { -1, -1 };
  */
 static int apps_cmd_pipe[2] = { -1, -1 };
 
+int apps_cmd_notify_pipe[2] = { -1, -1 };
+
 /* Pthread, Mutexes and Semaphores */
 static pthread_t apps_thread;
+static pthread_t apps_notify_thread;
 static pthread_t reg_apps_thread;
 static pthread_t client_thread;
 static pthread_t kernel_thread;
 static pthread_t dispatch_thread;
 static pthread_t health_thread;
+static pthread_t ht_cleanup_thread;
 
 /*
  * UST registration command queue. This queue is tied with a futex and uses a N
@@ -227,6 +233,9 @@ static enum consumerd_state kernel_consumerd_state;
  */
 static int app_socket_timeout;
 
+/* Set in main() with the current page size. */
+long page_size;
+
 static
 void setup_consumerd_path(void)
 {
@@ -279,15 +288,11 @@ void setup_consumerd_path(void)
 /*
  * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
  */
-static int create_thread_poll_set(struct lttng_poll_event *events,
-               unsigned int size)
+int sessiond_set_thread_pollset(struct lttng_poll_event *events, size_t size)
 {
        int ret;
 
-       if (events == NULL || size == 0) {
-               ret = -1;
-               goto error;
-       }
+       assert(events);
 
        ret = lttng_poll_create(events, size, LTTNG_CLOEXEC);
        if (ret < 0) {
@@ -295,7 +300,7 @@ static int create_thread_poll_set(struct lttng_poll_event *events,
        }
 
        /* Add quit pipe */
-       ret = lttng_poll_add(events, thread_quit_pipe[0], LPOLLIN);
+       ret = lttng_poll_add(events, thread_quit_pipe[0], LPOLLIN | LPOLLERR);
        if (ret < 0) {
                goto error;
        }
@@ -311,7 +316,7 @@ error:
  *
  * Return 1 if it was triggered else 0;
  */
-static int check_thread_quit_pipe(int fd, uint32_t events)
+int sessiond_check_thread_quit_pipe(int fd, uint32_t events)
 {
        if (fd == thread_quit_pipe[0] && (events & LPOLLIN)) {
                return 1;
@@ -439,6 +444,7 @@ static void cleanup(void)
 
        DBG("Closing all UST sockets");
        ust_app_clean_list();
+       buffer_reg_destroy_registries();
 
        if (is_root && !opt_no_kernel) {
                DBG2("Closing kernel fd");
@@ -640,7 +646,8 @@ static int update_kernel_stream(struct consumer_data *consumer_data, int fd)
 
                                                pthread_mutex_lock(socket->lock);
                                                ret = kernel_consumer_send_channel_stream(socket,
-                                                               channel, ksess);
+                                                               channel, ksess,
+                                                               session->output_traces ? 1 : 0);
                                                pthread_mutex_unlock(socket->lock);
                                                if (ret < 0) {
                                                        rcu_read_unlock();
@@ -664,14 +671,13 @@ error:
 }
 
 /*
- * For each tracing session, update newly registered apps.
+ * For each tracing session, update newly registered apps. The session list
+ * lock MUST be acquired before calling this.
  */
 static void update_ust_app(int app_sock)
 {
        struct ltt_session *sess, *stmp;
 
-       session_lock_list();
-
        /* For all tracing session(s) */
        cds_list_for_each_entry_safe(sess, stmp, &session_list_ptr->head, list) {
                session_lock(sess);
@@ -680,8 +686,6 @@ static void update_ust_app(int app_sock)
                }
                session_unlock(sess);
        }
-
-       session_unlock_list();
 }
 
 /*
@@ -703,9 +707,9 @@ static void *thread_manage_kernel(void *data)
 
        /*
         * This first step of the while is to clean this structure which could free
-        * non NULL pointers so zero it before the loop.
+        * non NULL pointers so initialize it before the loop.
         */
-       memset(&events, 0, sizeof(events));
+       lttng_poll_init(&events);
 
        if (testpoint(thread_manage_kernel)) {
                goto error_testpoint;
@@ -724,7 +728,7 @@ static void *thread_manage_kernel(void *data)
                        /* Clean events object. We are about to populate it again. */
                        lttng_poll_clean(&events);
 
-                       ret = create_thread_poll_set(&events, 2);
+                       ret = sessiond_set_thread_pollset(&events, 2);
                        if (ret < 0) {
                                goto error_poll_create;
                        }
@@ -746,9 +750,9 @@ static void *thread_manage_kernel(void *data)
 
                /* Poll infinite value of time */
        restart:
-               health_poll_update();
+               health_poll_entry();
                ret = lttng_poll_wait(&events, -1);
-               health_poll_update();
+               health_poll_exit();
                if (ret < 0) {
                        /*
                         * Restart interrupted system call.
@@ -774,7 +778,7 @@ static void *thread_manage_kernel(void *data)
                        health_code_update();
 
                        /* Thread quit pipe has been closed. Killing thread. */
-                       ret = check_thread_quit_pipe(pollfd, revents);
+                       ret = sessiond_check_thread_quit_pipe(pollfd, revents);
                        if (ret) {
                                err = 0;
                                goto exit;
@@ -867,30 +871,13 @@ static void *thread_manage_consumer(void *data)
 
        health_register(HEALTH_TYPE_CONSUMER);
 
-       /*
-        * Since the consumer thread can be spawned at any moment in time, we init
-        * the health to a poll status (1, which is a valid health over time).
-        * When the thread starts, we update here the health to a "code" path being
-        * an even value so this thread, when reaching a poll wait, does not
-        * trigger an error with an even value.
-        *
-        * Here is the use case we avoid.
-        *
-        * +1: the first poll update during initialization (main())
-        * +2 * x: multiple code update once in this thread.
-        * +1: poll wait in this thread (being a good health state).
-        * == even number which after the wait period shows as a bad health.
-        *
-        * In a nutshell, the following poll update to the health state brings back
-        * the state to an even value meaning a code path.
-        */
-       health_poll_update();
+       health_code_update();
 
        /*
-        * Pass 2 as size here for the thread quit pipe and kconsumerd_err_sock.
-        * Nothing more will be added to this poll set.
+        * Pass 3 as size here for the thread quit pipe, consumerd_err_sock and the
+        * metadata_sock. Nothing more will be added to this poll set.
         */
-       ret = create_thread_poll_set(&events, 2);
+       ret = sessiond_set_thread_pollset(&events, 3);
        if (ret < 0) {
                goto error_poll;
        }
@@ -907,16 +894,16 @@ static void *thread_manage_consumer(void *data)
 
        health_code_update();
 
-       /* Inifinite blocking call, waiting for transmission */
+       /* Infinite blocking call, waiting for transmission */
 restart:
-       health_poll_update();
+       health_poll_entry();
 
        if (testpoint(thread_manage_consumer)) {
                goto error;
        }
 
        ret = lttng_poll_wait(&events, -1);
-       health_poll_update();
+       health_poll_exit();
        if (ret < 0) {
                /*
                 * Restart interrupted system call.
@@ -937,7 +924,7 @@ restart:
                health_code_update();
 
                /* Thread quit pipe has been closed. Killing thread. */
-               ret = check_thread_quit_pipe(pollfd, revents);
+               ret = sessiond_check_thread_quit_pipe(pollfd, revents);
                if (ret) {
                        err = 0;
                        goto exit;
@@ -977,87 +964,126 @@ restart:
        health_code_update();
 
        if (code == LTTCOMM_CONSUMERD_COMMAND_SOCK_READY) {
+               /* Connect both socket, command and metadata. */
                consumer_data->cmd_sock =
                        lttcomm_connect_unix_sock(consumer_data->cmd_unix_sock_path);
-               if (consumer_data->cmd_sock < 0) {
+               consumer_data->metadata_sock.fd =
+                       lttcomm_connect_unix_sock(consumer_data->cmd_unix_sock_path);
+               if (consumer_data->cmd_sock < 0 ||
+                               consumer_data->metadata_sock.fd < 0) {
+                       PERROR("consumer connect cmd socket");
                        /* On error, signal condition and quit. */
                        signal_consumer_condition(consumer_data, -1);
-                       PERROR("consumer connect");
                        goto error;
                }
+               /* Create metadata socket lock. */
+               consumer_data->metadata_sock.lock = zmalloc(sizeof(pthread_mutex_t));
+               if (consumer_data->metadata_sock.lock == NULL) {
+                       PERROR("zmalloc pthread mutex");
+                       ret = -1;
+                       goto error;
+               }
+               pthread_mutex_init(consumer_data->metadata_sock.lock, NULL);
+
                signal_consumer_condition(consumer_data, 1);
-               DBG("Consumer command socket ready");
+               DBG("Consumer command socket ready (fd: %d", consumer_data->cmd_sock);
+               DBG("Consumer metadata socket ready (fd: %d)",
+                               consumer_data->metadata_sock.fd);
        } else {
                ERR("consumer error when waiting for SOCK_READY : %s",
                                lttcomm_get_readable_code(-code));
                goto error;
        }
 
-       /* Remove the kconsumerd error sock since we've established a connexion */
+       /* Remove the consumerd error sock since we've established a connexion */
        ret = lttng_poll_del(&events, consumer_data->err_sock);
        if (ret < 0) {
                goto error;
        }
 
+       /* Add new accepted error socket. */
        ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLRDHUP);
        if (ret < 0) {
                goto error;
        }
 
+       /* Add metadata socket that is successfully connected. */
+       ret = lttng_poll_add(&events, consumer_data->metadata_sock.fd,
+                       LPOLLIN | LPOLLRDHUP);
+       if (ret < 0) {
+               goto error;
+       }
+
        health_code_update();
 
-       /* Inifinite blocking call, waiting for transmission */
+       /* Infinite blocking call, waiting for transmission */
 restart_poll:
-       health_poll_update();
-       ret = lttng_poll_wait(&events, -1);
-       health_poll_update();
-       if (ret < 0) {
-               /*
-                * Restart interrupted system call.
-                */
-               if (errno == EINTR) {
-                       goto restart_poll;
+       while (1) {
+               health_poll_entry();
+               ret = lttng_poll_wait(&events, -1);
+               health_poll_exit();
+               if (ret < 0) {
+                       /*
+                        * Restart interrupted system call.
+                        */
+                       if (errno == EINTR) {
+                               goto restart_poll;
+                       }
+                       goto error;
                }
-               goto error;
-       }
 
-       nb_fd = ret;
+               nb_fd = ret;
 
-       for (i = 0; i < nb_fd; i++) {
-               /* Fetch once the poll data */
-               revents = LTTNG_POLL_GETEV(&events, i);
-               pollfd = LTTNG_POLL_GETFD(&events, i);
+               for (i = 0; i < nb_fd; i++) {
+                       /* Fetch once the poll data */
+                       revents = LTTNG_POLL_GETEV(&events, i);
+                       pollfd = LTTNG_POLL_GETFD(&events, i);
 
-               health_code_update();
+                       health_code_update();
 
-               /* Thread quit pipe has been closed. Killing thread. */
-               ret = check_thread_quit_pipe(pollfd, revents);
-               if (ret) {
-                       err = 0;
-                       goto exit;
-               }
+                       /* Thread quit pipe has been closed. Killing thread. */
+                       ret = sessiond_check_thread_quit_pipe(pollfd, revents);
+                       if (ret) {
+                               err = 0;
+                               goto exit;
+                       }
 
-               /* Event on the kconsumerd socket */
-               if (pollfd == sock) {
-                       if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
-                               ERR("consumer err socket second poll error");
+                       if (pollfd == sock) {
+                               /* Event on the consumerd socket */
+                               if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+                                       ERR("consumer err socket second poll error");
+                                       goto error;
+                               }
+                               health_code_update();
+                               /* Wait for any kconsumerd error */
+                               ret = lttcomm_recv_unix_sock(sock, &code,
+                                               sizeof(enum lttcomm_return_code));
+                               if (ret <= 0) {
+                                       ERR("consumer closed the command socket");
+                                       goto error;
+                               }
+
+                               ERR("consumer return code : %s",
+                                               lttcomm_get_readable_code(-code));
+
+                               goto exit;
+                       } else if (pollfd == consumer_data->metadata_sock.fd) {
+                               /* UST metadata requests */
+                               ret = ust_consumer_metadata_request(
+                                               &consumer_data->metadata_sock);
+                               if (ret < 0) {
+                                       ERR("Handling metadata request");
+                                       goto error;
+                               }
+                               break;
+                       } else {
+                               ERR("Unknown pollfd");
                                goto error;
                        }
                }
+               health_code_update();
        }
 
-       health_code_update();
-
-       /* Wait for any kconsumerd error */
-       ret = lttcomm_recv_unix_sock(sock, &code,
-                       sizeof(enum lttcomm_return_code));
-       if (ret <= 0) {
-               ERR("consumer closed the command socket");
-               goto error;
-       }
-
-       ERR("consumer return code : %s", lttcomm_get_readable_code(-code));
-
 exit:
 error:
        /* Immediately set the consumerd state to stopped */
@@ -1083,6 +1109,16 @@ error:
                        PERROR("close");
                }
        }
+       if (consumer_data->metadata_sock.fd >= 0) {
+               ret = close(consumer_data->metadata_sock.fd);
+               if (ret) {
+                       PERROR("close");
+               }
+       }
+       /* Cleanup metadata socket mutex. */
+       pthread_mutex_destroy(consumer_data->metadata_sock.lock);
+       free(consumer_data->metadata_sock.lock);
+
        if (sock >= 0) {
                ret = close(sock);
                if (ret) {
@@ -1113,7 +1149,6 @@ static void *thread_manage_apps(void *data)
 {
        int i, ret, pollfd, err = -1;
        uint32_t revents, nb_fd;
-       struct ust_command ust_cmd;
        struct lttng_poll_event events;
 
        DBG("[thread] Manage application started");
@@ -1129,7 +1164,7 @@ static void *thread_manage_apps(void *data)
 
        health_code_update();
 
-       ret = create_thread_poll_set(&events, 2);
+       ret = sessiond_set_thread_pollset(&events, 2);
        if (ret < 0) {
                goto error_poll_create;
        }
@@ -1150,9 +1185,9 @@ static void *thread_manage_apps(void *data)
 
                /* Inifinite blocking call, waiting for transmission */
        restart:
-               health_poll_update();
+               health_poll_entry();
                ret = lttng_poll_wait(&events, -1);
-               health_poll_update();
+               health_poll_exit();
                if (ret < 0) {
                        /*
                         * Restart interrupted system call.
@@ -1173,7 +1208,7 @@ static void *thread_manage_apps(void *data)
                        health_code_update();
 
                        /* Thread quit pipe has been closed. Killing thread. */
-                       ret = check_thread_quit_pipe(pollfd, revents);
+                       ret = sessiond_check_thread_quit_pipe(pollfd, revents);
                        if (ret) {
                                err = 0;
                                goto exit;
@@ -1185,70 +1220,37 @@ static void *thread_manage_apps(void *data)
                                        ERR("Apps command pipe error");
                                        goto error;
                                } else if (revents & LPOLLIN) {
+                                       int sock;
+
                                        /* Empty pipe */
                                        do {
-                                               ret = read(apps_cmd_pipe[0], &ust_cmd, sizeof(ust_cmd));
+                                               ret = read(apps_cmd_pipe[0], &sock, sizeof(sock));
                                        } while (ret < 0 && errno == EINTR);
-                                       if (ret < 0 || ret < sizeof(ust_cmd)) {
+                                       if (ret < 0 || ret < sizeof(sock)) {
                                                PERROR("read apps cmd pipe");
                                                goto error;
                                        }
 
                                        health_code_update();
 
-                                       /* Register applicaton to the session daemon */
-                                       ret = ust_app_register(&ust_cmd.reg_msg,
-                                                       ust_cmd.sock);
-                                       if (ret == -ENOMEM) {
-                                               goto error;
-                                       } else if (ret < 0) {
-                                               break;
-                                       }
-
-                                       health_code_update();
-
                                        /*
-                                        * Validate UST version compatibility.
+                                        * We only monitor the error events of the socket. This
+                                        * thread does not handle any incoming data from UST
+                                        * (POLLIN).
                                         */
-                                       ret = ust_app_validate_version(ust_cmd.sock);
-                                       if (ret >= 0) {
-                                               /*
-                                                * Add channel(s) and event(s) to newly registered apps
-                                                * from lttng global UST domain.
-                                                */
-                                               update_ust_app(ust_cmd.sock);
-                                       }
-
-                                       health_code_update();
-
-                                       ret = ust_app_register_done(ust_cmd.sock);
+                                       ret = lttng_poll_add(&events, sock,
+                                                       LPOLLERR | LPOLLHUP | LPOLLRDHUP);
                                        if (ret < 0) {
-                                               /*
-                                                * If the registration is not possible, we simply
-                                                * unregister the apps and continue
-                                                */
-                                               ust_app_unregister(ust_cmd.sock);
-                                       } else {
-                                               /*
-                                                * We only monitor the error events of the socket. This
-                                                * thread does not handle any incoming data from UST
-                                                * (POLLIN).
-                                                */
-                                               ret = lttng_poll_add(&events, ust_cmd.sock,
-                                                               LPOLLERR & LPOLLHUP & LPOLLRDHUP);
-                                               if (ret < 0) {
-                                                       goto error;
-                                               }
+                                               goto error;
+                                       }
 
-                                               /* Set socket timeout for both receiving and ending */
-                                               (void) lttcomm_setsockopt_rcv_timeout(ust_cmd.sock,
-                                                               app_socket_timeout);
-                                               (void) lttcomm_setsockopt_snd_timeout(ust_cmd.sock,
-                                                               app_socket_timeout);
+                                       /* Set socket timeout for both receiving and ending */
+                                       (void) lttcomm_setsockopt_rcv_timeout(sock,
+                                                       app_socket_timeout);
+                                       (void) lttcomm_setsockopt_snd_timeout(sock,
+                                                       app_socket_timeout);
 
-                                               DBG("Apps with sock %d added to poll set",
-                                                               ust_cmd.sock);
-                                       }
+                                       DBG("Apps with sock %d added to poll set", sock);
 
                                        health_code_update();
 
@@ -1301,23 +1303,163 @@ error_testpoint:
        return NULL;
 }
 
+/*
+ * Send a socket to a thread This is called from the dispatch UST registration
+ * thread once all sockets are set for the application.
+ *
+ * On success, return 0 else a negative value being the errno message of the
+ * write().
+ */
+static int send_socket_to_thread(int fd, int sock)
+{
+       int ret;
+
+       /* Sockets MUST be set or else this should not have been called. */
+       assert(fd >= 0);
+       assert(sock >= 0);
+
+       do {
+               ret = write(fd, &sock, sizeof(sock));
+       } while (ret < 0 && errno == EINTR);
+       if (ret < 0 || ret != sizeof(sock)) {
+               PERROR("write apps pipe %d", fd);
+               if (ret < 0) {
+                       ret = -errno;
+               }
+               goto error;
+       }
+
+       /* All good. Don't send back the write positive ret value. */
+       ret = 0;
+error:
+       return ret;
+}
+
+/*
+ * Sanitize the wait queue of the dispatch registration thread meaning removing
+ * invalid nodes from it. This is to avoid memory leaks for the case the UST
+ * notify socket is never received.
+ */
+static void sanitize_wait_queue(struct ust_reg_wait_queue *wait_queue)
+{
+       int ret, nb_fd = 0, i;
+       unsigned int fd_added = 0;
+       struct lttng_poll_event events;
+       struct ust_reg_wait_node *wait_node = NULL, *tmp_wait_node;
+
+       assert(wait_queue);
+
+       lttng_poll_init(&events);
+
+       /* Just skip everything for an empty queue. */
+       if (!wait_queue->count) {
+               goto end;
+       }
+
+       ret = lttng_poll_create(&events, wait_queue->count, LTTNG_CLOEXEC);
+       if (ret < 0) {
+               goto error_create;
+       }
+
+       cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
+                       &wait_queue->head, head) {
+               assert(wait_node->app);
+               ret = lttng_poll_add(&events, wait_node->app->sock,
+                               LPOLLHUP | LPOLLERR);
+               if (ret < 0) {
+                       goto error;
+               }
+
+               fd_added = 1;
+       }
+
+       if (!fd_added) {
+               goto end;
+       }
+
+       /*
+        * Poll but don't block so we can quickly identify the faulty events and
+        * clean them afterwards from the wait queue.
+        */
+       ret = lttng_poll_wait(&events, 0);
+       if (ret < 0) {
+               goto error;
+       }
+       nb_fd = ret;
+
+       for (i = 0; i < nb_fd; i++) {
+               /* Get faulty FD. */
+               uint32_t revents = LTTNG_POLL_GETEV(&events, i);
+               int pollfd = LTTNG_POLL_GETFD(&events, i);
+
+               cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
+                               &wait_queue->head, head) {
+                       if (pollfd == wait_node->app->sock &&
+                                       (revents & (LPOLLHUP | LPOLLERR))) {
+                               cds_list_del(&wait_node->head);
+                               wait_queue->count--;
+                               ust_app_destroy(wait_node->app);
+                               free(wait_node);
+                               break;
+                       }
+               }
+       }
+
+       if (nb_fd > 0) {
+               DBG("Wait queue sanitized, %d node were cleaned up", nb_fd);
+       }
+
+end:
+       lttng_poll_clean(&events);
+       return;
+
+error:
+       lttng_poll_clean(&events);
+error_create:
+       ERR("Unable to sanitize wait queue");
+       return;
+}
+
 /*
  * Dispatch request from the registration threads to the application
  * communication thread.
  */
 static void *thread_dispatch_ust_registration(void *data)
 {
-       int ret;
+       int ret, err = -1;
        struct cds_wfq_node *node;
        struct ust_command *ust_cmd = NULL;
+       struct ust_reg_wait_node *wait_node = NULL, *tmp_wait_node;
+       struct ust_reg_wait_queue wait_queue = {
+               .count = 0,
+       };
+
+       health_register(HEALTH_TYPE_APP_REG_DISPATCH);
+
+       health_code_update();
+
+       CDS_INIT_LIST_HEAD(&wait_queue.head);
 
        DBG("[thread] Dispatch UST command started");
 
        while (!CMM_LOAD_SHARED(dispatch_thread_exit)) {
+               health_code_update();
+
                /* Atomically prepare the queue futex */
                futex_nto1_prepare(&ust_cmd_queue.futex);
 
                do {
+                       struct ust_app *app = NULL;
+                       ust_cmd = NULL;
+
+                       /*
+                        * Make sure we don't have node(s) that have hung up before receiving
+                        * the notify socket. This is to clean the list in order to avoid
+                        * memory leaks from notify socket that are never seen.
+                        */
+                       sanitize_wait_queue(&wait_queue);
+
+                       health_code_update();
                        /* Dequeue command for registration */
                        node = cds_wfq_dequeue_blocking(&ust_cmd_queue.queue);
                        if (node == NULL) {
@@ -1334,44 +1476,166 @@ static void *thread_dispatch_ust_registration(void *data)
                                        ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
                                        ust_cmd->sock, ust_cmd->reg_msg.name,
                                        ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
-                       /*
-                        * Inform apps thread of the new application registration. This
-                        * call is blocking so we can be assured that the data will be read
-                        * at some point in time or wait to the end of the world :)
-                        */
-                       if (apps_cmd_pipe[1] >= 0) {
-                               do {
-                                       ret = write(apps_cmd_pipe[1], ust_cmd,
-                                                       sizeof(struct ust_command));
-                               } while (ret < 0 && errno == EINTR);
-                               if (ret < 0 || ret != sizeof(struct ust_command)) {
-                                       PERROR("write apps cmd pipe");
-                                       if (errno == EBADF) {
-                                               /*
-                                                * We can't inform the application thread to process
-                                                * registration. We will exit or else application
-                                                * registration will not occur and tracing will never
-                                                * start.
-                                                */
-                                               goto error;
+
+                       if (ust_cmd->reg_msg.type == USTCTL_SOCKET_CMD) {
+                               wait_node = zmalloc(sizeof(*wait_node));
+                               if (!wait_node) {
+                                       PERROR("zmalloc wait_node dispatch");
+                                       ret = close(ust_cmd->sock);
+                                       if (ret < 0) {
+                                               PERROR("close ust sock dispatch %d", ust_cmd->sock);
+                                       }
+                                       lttng_fd_put(1, LTTNG_FD_APPS);
+                                       free(ust_cmd);
+                                       goto error;
+                               }
+                               CDS_INIT_LIST_HEAD(&wait_node->head);
+
+                               /* Create application object if socket is CMD. */
+                               wait_node->app = ust_app_create(&ust_cmd->reg_msg,
+                                               ust_cmd->sock);
+                               if (!wait_node->app) {
+                                       ret = close(ust_cmd->sock);
+                                       if (ret < 0) {
+                                               PERROR("close ust sock dispatch %d", ust_cmd->sock);
                                        }
+                                       lttng_fd_put(1, LTTNG_FD_APPS);
+                                       free(wait_node);
+                                       free(ust_cmd);
+                                       continue;
                                }
+                               /*
+                                * Add application to the wait queue so we can set the notify
+                                * socket before putting this object in the global ht.
+                                */
+                               cds_list_add(&wait_node->head, &wait_queue.head);
+                               wait_queue.count++;
+
+                               free(ust_cmd);
+                               /*
+                                * We have to continue here since we don't have the notify
+                                * socket and the application MUST be added to the hash table
+                                * only at that moment.
+                                */
+                               continue;
                        } else {
-                               /* Application manager thread is not available. */
-                               ret = close(ust_cmd->sock);
+                               /*
+                                * Look for the application in the local wait queue and set the
+                                * notify socket if found.
+                                */
+                               cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
+                                               &wait_queue.head, head) {
+                                       health_code_update();
+                                       if (wait_node->app->pid == ust_cmd->reg_msg.pid) {
+                                               wait_node->app->notify_sock = ust_cmd->sock;
+                                               cds_list_del(&wait_node->head);
+                                               wait_queue.count--;
+                                               app = wait_node->app;
+                                               free(wait_node);
+                                               DBG3("UST app notify socket %d is set", ust_cmd->sock);
+                                               break;
+                                       }
+                               }
+
+                               /*
+                                * With no application at this stage the received socket is
+                                * basically useless so close it before we free the cmd data
+                                * structure for good.
+                                */
+                               if (!app) {
+                                       ret = close(ust_cmd->sock);
+                                       if (ret < 0) {
+                                               PERROR("close ust sock dispatch %d", ust_cmd->sock);
+                                       }
+                                       lttng_fd_put(1, LTTNG_FD_APPS);
+                               }
+                               free(ust_cmd);
+                       }
+
+                       if (app) {
+                               /*
+                                * @session_lock_list
+                                *
+                                * Lock the global session list so from the register up to the
+                                * registration done message, no thread can see the application
+                                * and change its state.
+                                */
+                               session_lock_list();
+                               rcu_read_lock();
+
+                               /*
+                                * Add application to the global hash table. This needs to be
+                                * done before the update to the UST registry can locate the
+                                * application.
+                                */
+                               ust_app_add(app);
+
+                               /* Set app version. This call will print an error if needed. */
+                               (void) ust_app_version(app);
+
+                               /* Send notify socket through the notify pipe. */
+                               ret = send_socket_to_thread(apps_cmd_notify_pipe[1],
+                                               app->notify_sock);
                                if (ret < 0) {
-                                       PERROR("close ust_cmd sock");
+                                       rcu_read_unlock();
+                                       session_unlock_list();
+                                       /* No notify thread, stop the UST tracing. */
+                                       goto error;
                                }
+
+                               /*
+                                * Update newly registered application with the tracing
+                                * registry info already enabled information.
+                                */
+                               update_ust_app(app->sock);
+
+                               /*
+                                * Don't care about return value. Let the manage apps threads
+                                * handle app unregistration upon socket close.
+                                */
+                               (void) ust_app_register_done(app->sock);
+
+                               /*
+                                * Even if the application socket has been closed, send the app
+                                * to the thread and unregistration will take place at that
+                                * place.
+                                */
+                               ret = send_socket_to_thread(apps_cmd_pipe[1], app->sock);
+                               if (ret < 0) {
+                                       rcu_read_unlock();
+                                       session_unlock_list();
+                                       /* No apps. thread, stop the UST tracing. */
+                                       goto error;
+                               }
+
+                               rcu_read_unlock();
+                               session_unlock_list();
                        }
-                       free(ust_cmd);
                } while (node != NULL);
 
+               health_poll_entry();
                /* Futex wait on queue. Blocking call on futex() */
                futex_nto1_wait(&ust_cmd_queue.futex);
+               health_poll_exit();
        }
+       /* Normal exit, no error */
+       err = 0;
 
 error:
+       /* Clean up wait queue. */
+       cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
+                       &wait_queue.head, head) {
+               cds_list_del(&wait_node->head);
+               wait_queue.count--;
+               free(wait_node);
+       }
+
        DBG("Dispatch thread dying");
+       if (err) {
+               health_error();
+               ERR("Health error occurred in %s", __func__);
+       }
+       health_unregister();
        return NULL;
 }
 
@@ -1406,7 +1670,7 @@ static void *thread_registration_apps(void *data)
         * Pass 2 as size here for the thread quit pipe and apps socket. Nothing
         * more will be added to this poll set.
         */
-       ret = create_thread_poll_set(&events, 2);
+       ret = sessiond_set_thread_pollset(&events, 2);
        if (ret < 0) {
                goto error_create_poll;
        }
@@ -1430,9 +1694,9 @@ static void *thread_registration_apps(void *data)
 
                /* Inifinite blocking call, waiting for transmission */
        restart:
-               health_poll_update();
+               health_poll_entry();
                ret = lttng_poll_wait(&events, -1);
-               health_poll_update();
+               health_poll_exit();
                if (ret < 0) {
                        /*
                         * Restart interrupted system call.
@@ -1453,7 +1717,7 @@ static void *thread_registration_apps(void *data)
                        pollfd = LTTNG_POLL_GETFD(&events, i);
 
                        /* Thread quit pipe has been closed. Killing thread. */
-                       ret = check_thread_quit_pipe(pollfd, revents);
+                       ret = sessiond_check_thread_quit_pipe(pollfd, revents);
                        if (ret) {
                                err = 0;
                                goto exit;
@@ -1498,16 +1762,12 @@ static void *thread_registration_apps(void *data)
                                                sock = -1;
                                                continue;
                                        }
+
                                        health_code_update();
-                                       ret = lttcomm_recv_unix_sock(sock, &ust_cmd->reg_msg,
-                                                       sizeof(struct ust_register_msg));
-                                       if (ret < 0 || ret < sizeof(struct ust_register_msg)) {
-                                               if (ret < 0) {
-                                                       PERROR("lttcomm_recv_unix_sock register apps");
-                                               } else {
-                                                       ERR("Wrong size received on apps register");
-                                               }
+                                       ret = ust_app_recv_registration(sock, &ust_cmd->reg_msg);
+                                       if (ret < 0) {
                                                free(ust_cmd);
+                                               /* Close socket of the application. */
                                                ret = close(sock);
                                                if (ret) {
                                                        PERROR("close");
@@ -1806,6 +2066,7 @@ static pid_t spawn_consumerd(struct consumer_data *consumer_data)
                                ret = putenv(tmpnew);
                                if (ret) {
                                        ret = -errno;
+                                       free(tmpnew);
                                        goto error;
                                }
                        }
@@ -1850,6 +2111,7 @@ static pid_t spawn_consumerd(struct consumer_data *consumer_data)
                                ret = putenv(tmpnew);
                                if (ret) {
                                        ret = -errno;
+                                       free(tmpnew);
                                        goto error;
                                }
                        }
@@ -1930,7 +2192,7 @@ end:
        return 0;
 
 error:
-       /* Cleanup already created socket on error. */
+       /* Cleanup already created sockets on error. */
        if (consumer_data->err_sock >= 0) {
                int err;
 
@@ -2025,6 +2287,8 @@ error:
  * Copy consumer output from the tracing session to the domain session. The
  * function also applies the right modification on a per domain basis for the
  * trace files destination directory.
+ *
+ * Should *NOT* be called with RCU read-side lock held.
  */
 static int copy_session_consumer(int domain, struct ltt_session *session)
 {
@@ -2082,6 +2346,8 @@ error:
 
 /*
  * Create an UST session and add it to the session ust list.
+ *
+ * Should *NOT* be called with RCU read-side lock held.
  */
 static int create_ust_session(struct ltt_session *session,
                struct lttng_domain *domain)
@@ -2104,7 +2370,7 @@ static int create_ust_session(struct ltt_session *session,
 
        DBG("Creating UST session");
 
-       lus = trace_ust_create_session(session->path, session->id);
+       lus = trace_ust_create_session(session->id);
        if (lus == NULL) {
                ret = LTTNG_ERR_UST_SESS_FAIL;
                goto error;
@@ -2112,6 +2378,7 @@ static int create_ust_session(struct ltt_session *session,
 
        lus->uid = session->uid;
        lus->gid = session->gid;
+       lus->output_traces = session->output_traces;
        session->ust_session = lus;
 
        /* Copy session output to the newly created UST session */
@@ -2168,6 +2435,7 @@ static int create_kernel_session(struct ltt_session *session)
 
        session->kernel_session->uid = session->uid;
        session->kernel_session->gid = session->gid;
+       session->kernel_session->output_traces = session->output_traces;
 
        return LTTNG_OK;
 
@@ -2207,6 +2475,8 @@ static unsigned int lttng_sessions_count(uid_t uid, gid_t gid)
  * Return any error encountered or 0 for success.
  *
  * "sock" is only used for special-case var. len data.
+ *
+ * Should *NOT* be called with RCU read-side lock held.
  */
 static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
                int *sock_error)
@@ -2227,6 +2497,10 @@ static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
        case LTTNG_START_TRACE:
        case LTTNG_STOP_TRACE:
        case LTTNG_DATA_PENDING:
+       case LTTNG_SNAPSHOT_ADD_OUTPUT:
+       case LTTNG_SNAPSHOT_DEL_OUTPUT:
+       case LTTNG_SNAPSHOT_LIST_OUTPUT:
+       case LTTNG_SNAPSHOT_RECORD:
                need_domain = 0;
                break;
        default:
@@ -2295,12 +2569,7 @@ static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
                session_lock_list();
                cmd_ctx->session = session_find_by_name(cmd_ctx->lsm->session.name);
                if (cmd_ctx->session == NULL) {
-                       if (cmd_ctx->lsm->session.name != NULL) {
-                               ret = LTTNG_ERR_SESS_NOT_FOUND;
-                       } else {
-                               /* If no session name specified */
-                               ret = LTTNG_ERR_SELECT_SESS;
-                       }
+                       ret = LTTNG_ERR_SESS_NOT_FOUND;
                        goto error;
                } else {
                        /* Acquire lock for the session */
@@ -2351,8 +2620,7 @@ static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
                        /* Start the kernel consumer daemon */
                        pthread_mutex_lock(&kconsumer_data.pid_mutex);
                        if (kconsumer_data.pid == 0 &&
-                                       cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER &&
-                                       cmd_ctx->session->start_consumer) {
+                                       cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
                                pthread_mutex_unlock(&kconsumer_data.pid_mutex);
                                ret = start_consumerd(&kconsumer_data);
                                if (ret < 0) {
@@ -2399,8 +2667,7 @@ static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
                        pthread_mutex_lock(&ustconsumer64_data.pid_mutex);
                        if (consumerd64_bin[0] != '\0' &&
                                        ustconsumer64_data.pid == 0 &&
-                                       cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER &&
-                                       cmd_ctx->session->start_consumer) {
+                                       cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
                                pthread_mutex_unlock(&ustconsumer64_data.pid_mutex);
                                ret = start_consumerd(&ustconsumer64_data);
                                if (ret < 0) {
@@ -2428,8 +2695,7 @@ static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
                        /* 32-bit */
                        if (consumerd32_bin[0] != '\0' &&
                                        ustconsumer32_data.pid == 0 &&
-                                       cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER &&
-                                       cmd_ctx->session->start_consumer) {
+                                       cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
                                pthread_mutex_unlock(&ustconsumer32_data.pid_mutex);
                                ret = start_consumerd(&ustconsumer32_data);
                                if (ret < 0) {
@@ -2493,6 +2759,21 @@ skip_domain:
                }
        }
 
+       /*
+        * Send relayd information to consumer as soon as we have a domain and a
+        * session defined.
+        */
+       if (cmd_ctx->session && need_domain) {
+               /*
+                * Setup relayd if not done yet. If the relayd information was already
+                * sent to the consumer, this call will gracefully return.
+                */
+               ret = cmd_setup_relayd(cmd_ctx->session);
+               if (ret != LTTNG_OK) {
+                       goto error;
+               }
+       }
+
        /* Process by command type */
        switch (cmd_ctx->lsm->cmd_type) {
        case LTTNG_ADD_CONTEXT:
@@ -2523,44 +2804,15 @@ skip_domain:
                                cmd_ctx->lsm->u.disable.channel_name);
                break;
        }
-       case LTTNG_DISABLE_CONSUMER:
-       {
-               ret = cmd_disable_consumer(cmd_ctx->lsm->domain.type, cmd_ctx->session);
-               break;
-       }
        case LTTNG_ENABLE_CHANNEL:
        {
-               ret = cmd_enable_channel(cmd_ctx->session, cmd_ctx->lsm->domain.type,
+               ret = cmd_enable_channel(cmd_ctx->session, &cmd_ctx->lsm->domain,
                                &cmd_ctx->lsm->u.channel.chan, kernel_poll_pipe[1]);
                break;
        }
-       case LTTNG_ENABLE_CONSUMER:
-       {
-               /*
-                * XXX: 0 means that this URI should be applied on the session. Should
-                * be a DOMAIN enuam.
-                */
-               ret = cmd_enable_consumer(cmd_ctx->lsm->domain.type, cmd_ctx->session);
-               if (ret != LTTNG_OK) {
-                       goto error;
-               }
-
-               if (cmd_ctx->lsm->domain.type == 0) {
-                       /* Add the URI for the UST session if a consumer is present. */
-                       if (cmd_ctx->session->ust_session &&
-                                       cmd_ctx->session->ust_session->consumer) {
-                               ret = cmd_enable_consumer(LTTNG_DOMAIN_UST, cmd_ctx->session);
-                       } else if (cmd_ctx->session->kernel_session &&
-                                       cmd_ctx->session->kernel_session->consumer) {
-                               ret = cmd_enable_consumer(LTTNG_DOMAIN_KERNEL,
-                                               cmd_ctx->session);
-                       }
-               }
-               break;
-       }
        case LTTNG_ENABLE_EVENT:
        {
-               ret = cmd_enable_event(cmd_ctx->session, cmd_ctx->lsm->domain.type,
+               ret = cmd_enable_event(cmd_ctx->session, &cmd_ctx->lsm->domain,
                                cmd_ctx->lsm->u.enable.channel_name,
                                &cmd_ctx->lsm->u.enable.event, NULL, kernel_poll_pipe[1]);
                break;
@@ -2569,7 +2821,7 @@ skip_domain:
        {
                DBG("Enabling all events");
 
-               ret = cmd_enable_event_all(cmd_ctx->session, cmd_ctx->lsm->domain.type,
+               ret = cmd_enable_event_all(cmd_ctx->session, &cmd_ctx->lsm->domain,
                                cmd_ctx->lsm->u.enable.channel_name,
                                cmd_ctx->lsm->u.enable.event.type, NULL, kernel_poll_pipe[1]);
                break;
@@ -2769,6 +3021,7 @@ skip_domain:
 
                ret = setup_lttng_msg(cmd_ctx, nb_dom * sizeof(struct lttng_domain));
                if (ret < 0) {
+                       free(domains);
                        goto setup_error;
                }
 
@@ -2796,6 +3049,7 @@ skip_domain:
 
                ret = setup_lttng_msg(cmd_ctx, nb_chan * sizeof(struct lttng_channel));
                if (ret < 0) {
+                       free(channels);
                        goto setup_error;
                }
 
@@ -2823,6 +3077,7 @@ skip_domain:
 
                ret = setup_lttng_msg(cmd_ctx, nb_event * sizeof(struct lttng_event));
                if (ret < 0) {
+                       free(events);
                        goto setup_error;
                }
 
@@ -2918,7 +3173,7 @@ skip_domain:
                        goto error;
                }
 
-               ret = cmd_enable_event(cmd_ctx->session, cmd_ctx->lsm->domain.type,
+               ret = cmd_enable_event(cmd_ctx->session, &cmd_ctx->lsm->domain,
                                cmd_ctx->lsm->u.enable.channel_name,
                                &cmd_ctx->lsm->u.enable.event, bytecode, kernel_poll_pipe[1]);
                break;
@@ -2928,6 +3183,67 @@ skip_domain:
                ret = cmd_data_pending(cmd_ctx->session);
                break;
        }
+       case LTTNG_SNAPSHOT_ADD_OUTPUT:
+       {
+               struct lttcomm_lttng_output_id reply;
+
+               ret = cmd_snapshot_add_output(cmd_ctx->session,
+                               &cmd_ctx->lsm->u.snapshot_output.output, &reply.id);
+               if (ret != LTTNG_OK) {
+                       goto error;
+               }
+
+               ret = setup_lttng_msg(cmd_ctx, sizeof(reply));
+               if (ret < 0) {
+                       goto setup_error;
+               }
+
+               /* Copy output list into message payload */
+               memcpy(cmd_ctx->llm->payload, &reply, sizeof(reply));
+               ret = LTTNG_OK;
+               break;
+       }
+       case LTTNG_SNAPSHOT_DEL_OUTPUT:
+       {
+               ret = cmd_snapshot_del_output(cmd_ctx->session,
+                               &cmd_ctx->lsm->u.snapshot_output.output);
+               break;
+       }
+       case LTTNG_SNAPSHOT_LIST_OUTPUT:
+       {
+               ssize_t nb_output;
+               struct lttng_snapshot_output *outputs = NULL;
+
+               nb_output = cmd_snapshot_list_outputs(cmd_ctx->session, &outputs);
+               if (nb_output < 0) {
+                       ret = -nb_output;
+                       goto error;
+               }
+
+               ret = setup_lttng_msg(cmd_ctx,
+                               nb_output * sizeof(struct lttng_snapshot_output));
+               if (ret < 0) {
+                       free(outputs);
+                       goto setup_error;
+               }
+
+               if (outputs) {
+                       /* Copy output list into message payload */
+                       memcpy(cmd_ctx->llm->payload, outputs,
+                                       nb_output * sizeof(struct lttng_snapshot_output));
+                       free(outputs);
+               }
+
+               ret = LTTNG_OK;
+               break;
+       }
+       case LTTNG_SNAPSHOT_RECORD:
+       {
+               ret = cmd_snapshot_record(cmd_ctx->session,
+                               &cmd_ctx->lsm->u.snapshot_record.output,
+                               cmd_ctx->lsm->u.snapshot_record.wait);
+               break;
+       }
        default:
                ret = LTTNG_ERR_UND;
                break;
@@ -2968,6 +3284,9 @@ static void *thread_manage_health(void *data)
 
        rcu_register_thread();
 
+       /* We might hit an error path before this is created. */
+       lttng_poll_init(&events);
+
        /* Create unix socket */
        sock = lttcomm_create_unix_sock(health_unix_sock_path);
        if (sock < 0) {
@@ -2991,7 +3310,7 @@ static void *thread_manage_health(void *data)
         * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
         * more will be added to this poll set.
         */
-       ret = create_thread_poll_set(&events, 2);
+       ret = sessiond_set_thread_pollset(&events, 2);
        if (ret < 0) {
                goto error;
        }
@@ -3026,7 +3345,7 @@ restart:
                        pollfd = LTTNG_POLL_GETFD(&events, i);
 
                        /* Thread quit pipe has been closed. Killing thread. */
-                       ret = check_thread_quit_pipe(pollfd, revents);
+                       ret = sessiond_check_thread_quit_pipe(pollfd, revents);
                        if (ret) {
                                err = 0;
                                goto exit;
@@ -3082,13 +3401,25 @@ restart:
                case LTTNG_HEALTH_CONSUMER:
                        reply.ret_code = check_consumer_health();
                        break;
+               case LTTNG_HEALTH_HT_CLEANUP:
+                       reply.ret_code = health_check_state(HEALTH_TYPE_HT_CLEANUP);
+                       break;
+               case LTTNG_HEALTH_APP_MANAGE_NOTIFY:
+                       reply.ret_code = health_check_state(HEALTH_TYPE_APP_MANAGE_NOTIFY);
+                       break;
+               case LTTNG_HEALTH_APP_REG_DISPATCH:
+                       reply.ret_code = health_check_state(HEALTH_TYPE_APP_REG_DISPATCH);
+                       break;
                case LTTNG_HEALTH_ALL:
                        reply.ret_code =
                                health_check_state(HEALTH_TYPE_APP_MANAGE) &&
                                health_check_state(HEALTH_TYPE_APP_REG) &&
                                health_check_state(HEALTH_TYPE_CMD) &&
                                health_check_state(HEALTH_TYPE_KERNEL) &&
-                               check_consumer_health();
+                               check_consumer_health() &&
+                               health_check_state(HEALTH_TYPE_HT_CLEANUP) &&
+                               health_check_state(HEALTH_TYPE_APP_MANAGE_NOTIFY) &&
+                               health_check_state(HEALTH_TYPE_APP_REG_DISPATCH);
                        break;
                default:
                        reply.ret_code = LTTNG_ERR_UND;
@@ -3133,12 +3464,6 @@ error:
                        PERROR("close");
                }
        }
-       if (new_sock >= 0) {
-               ret = close(new_sock);
-               if (ret) {
-                       PERROR("close");
-               }
-       }
 
        lttng_poll_clean(&events);
 
@@ -3179,7 +3504,7 @@ static void *thread_manage_clients(void *data)
         * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
         * more will be added to this poll set.
         */
-       ret = create_thread_poll_set(&events, 2);
+       ret = sessiond_set_thread_pollset(&events, 2);
        if (ret < 0) {
                goto error_create_poll;
        }
@@ -3208,9 +3533,9 @@ static void *thread_manage_clients(void *data)
 
                /* Inifinite blocking call, waiting for transmission */
        restart:
-               health_poll_update();
+               health_poll_entry();
                ret = lttng_poll_wait(&events, -1);
-               health_poll_update();
+               health_poll_exit();
                if (ret < 0) {
                        /*
                         * Restart interrupted system call.
@@ -3231,7 +3556,7 @@ static void *thread_manage_clients(void *data)
                        health_code_update();
 
                        /* Thread quit pipe has been closed. Killing thread. */
-                       ret = check_thread_quit_pipe(pollfd, revents);
+                       ret = sessiond_check_thread_quit_pipe(pollfd, revents);
                        if (ret) {
                                err = 0;
                                goto exit;
@@ -3320,13 +3645,11 @@ static void *thread_manage_clients(void *data)
                ret = process_client_msg(cmd_ctx, sock, &sock_error);
                rcu_thread_offline();
                if (ret < 0) {
-                       if (sock_error) {
-                               ret = close(sock);
-                               if (ret) {
-                                       PERROR("close");
-                               }
-                               sock = -1;
+                       ret = close(sock);
+                       if (ret) {
+                               PERROR("close");
                        }
+                       sock = -1;
                        /*
                         * TODO: Inform client somehow of the fatal error. At
                         * this point, ret < 0 means that a zmalloc failed
@@ -3915,6 +4238,13 @@ int main(int argc, char **argv)
 
        setup_consumerd_path();
 
+       page_size = sysconf(_SC_PAGESIZE);
+       if (page_size < 0) {
+               PERROR("sysconf _SC_PAGESIZE");
+               page_size = LONG_MAX;
+               WARN("Fallback page size to %ld", page_size);
+       }
+
        /* Parse arguments */
        progname = argv[0];
        if ((ret = parse_args(argc, argv)) < 0) {
@@ -3994,7 +4324,7 @@ int main(int argc, char **argv)
                DBG2("Kernel consumer cmd path: %s",
                                kconsumer_data.cmd_unix_sock_path);
        } else {
-               home_path = get_home_dir();
+               home_path = utils_get_home_dir();
                if (home_path == NULL) {
                        /* TODO: Add --socket PATH option */
                        ERR("Can't get HOME directory for sockets creation.");
@@ -4031,7 +4361,7 @@ int main(int argc, char **argv)
                /* Set global SHM for ust */
                if (strlen(wait_shm_path) == 0) {
                        snprintf(wait_shm_path, PATH_MAX,
-                                       DEFAULT_HOME_APPS_WAIT_SHM_PATH, geteuid());
+                                       DEFAULT_HOME_APPS_WAIT_SHM_PATH, getuid());
                }
 
                /* Set health check Unix path */
@@ -4047,6 +4377,7 @@ int main(int argc, char **argv)
 
        DBG("Client socket path %s", client_unix_sock_path);
        DBG("Application socket path %s", apps_unix_sock_path);
+       DBG("Application wait path %s", wait_shm_path);
        DBG("LTTng run directory path: %s", rundir);
 
        /* 32 bits consumerd path setup */
@@ -4150,11 +4481,25 @@ int main(int argc, char **argv)
                }
        }
 
+       /* Setup the thread ht_cleanup communication pipe. */
+       if (utils_create_pipe_cloexec(ht_cleanup_pipe) < 0) {
+               goto exit;
+       }
+
        /* Setup the thread apps communication pipe. */
        if ((ret = utils_create_pipe_cloexec(apps_cmd_pipe)) < 0) {
                goto exit;
        }
 
+       /* Setup the thread apps notify communication pipe. */
+       if (utils_create_pipe_cloexec(apps_cmd_notify_pipe) < 0) {
+               goto exit;
+       }
+
+       /* Initialize global buffer per UID and PID registry. */
+       buffer_reg_init_uid_registry();
+       buffer_reg_init_pid_registry();
+
        /* Init UST command queue. */
        cds_wfq_init(&ust_cmd_queue.queue);
 
@@ -4179,6 +4524,14 @@ int main(int argc, char **argv)
 
        write_pidfile();
 
+       /* Create thread to manage the client socket */
+       ret = pthread_create(&ht_cleanup_thread, NULL,
+                       thread_ht_cleanup, (void *) NULL);
+       if (ret != 0) {
+               PERROR("pthread_create ht_cleanup");
+               goto exit_ht_cleanup;
+       }
+
        /* Create thread to manage the client socket */
        ret = pthread_create(&health_thread, NULL,
                        thread_manage_health, (void *) NULL);
@@ -4219,6 +4572,14 @@ int main(int argc, char **argv)
                goto exit_apps;
        }
 
+       /* Create thread to manage application notify socket */
+       ret = pthread_create(&apps_notify_thread, NULL,
+                       ust_thread_manage_notify, (void *) NULL);
+       if (ret != 0) {
+               PERROR("pthread_create apps");
+               goto exit_apps;
+       }
+
        /* Don't start this thread if kernel tracing is not requested nor root */
        if (is_root && !opt_no_kernel) {
                /* Create kernel thread to manage kernel event */
@@ -4290,6 +4651,12 @@ exit_client:
        }
 
 exit_health:
+       ret = pthread_join(ht_cleanup_thread, &status);
+       if (ret != 0) {
+               PERROR("pthread_join ht cleanup thread");
+               goto error;     /* join error, exit without cleanup */
+       }
+exit_ht_cleanup:
 exit:
        /*
         * cleanup() is called when no other thread is running.
This page took 0.043131 seconds and 4 git commands to generate.