+static pid_t ppid; /* Parent PID for --sig-parent option */
+static pid_t kconsumerd_pid;
+static struct pollfd *kernel_pollfd;
+
+static char apps_unix_sock_path[PATH_MAX]; /* Global application Unix socket path */
+static char client_unix_sock_path[PATH_MAX]; /* Global client Unix socket path */
+static char kconsumerd_err_unix_sock_path[PATH_MAX]; /* kconsumerd error Unix socket path */
+static char kconsumerd_cmd_unix_sock_path[PATH_MAX]; /* kconsumerd command Unix socket path */
+
+/* Sockets and FDs */
+static int client_sock;
+static int apps_sock;
+static int kconsumerd_err_sock;
+static int kconsumerd_cmd_sock;
+static int kernel_tracer_fd;
+static int kernel_poll_pipe[2];
+
+/*
+ * Quit pipe for all threads. This permits a single cancellation point
+ * for all threads when receiving an event on the pipe.
+ */
+static int thread_quit_pipe[2];
+
+/* Pthread, Mutexes and Semaphores */
+static pthread_t kconsumerd_thread;
+static pthread_t apps_thread;
+static pthread_t client_thread;
+static pthread_t kernel_thread;
+static sem_t kconsumerd_sem;
+
+static pthread_mutex_t kconsumerd_pid_mutex; /* Mutex to control kconsumerd pid assignation */
+
+/*
+ * Pointer initialized before thread creation.
+ *
+ * This points to the tracing session list containing the session count and a
+ * mutex lock. The lock MUST be taken if you iterate over the list. The lock
+ * MUST NOT be taken if you call a public function in session.c.
+ *
+ * The lock is nested inside the structure: session_list_ptr->lock.
+ */
+static struct ltt_session_list *session_list_ptr;
+
+/*
+ * Init quit pipe.
+ *
+ * Return -1 on error or 0 if all pipes are created.
+ */
+static int init_thread_quit_pipe(void)
+{
+ int ret;
+
+ ret = pipe2(thread_quit_pipe, O_CLOEXEC);
+ if (ret < 0) {
+ perror("thread quit pipe");
+ goto error;
+ }
+
+error:
+ return ret;
+}
+
+/*
+ * teardown_kernel_session
+ *
+ * Complete teardown of a kernel session. This free all data structure related
+ * to a kernel session and update counter.
+ */
+static void teardown_kernel_session(struct ltt_session *session)
+{
+ if (session->kernel_session != NULL) {
+ DBG("Tearing down kernel session");
+ trace_destroy_kernel_session(session->kernel_session);
+ /* Extra precaution */
+ session->kernel_session = NULL;
+ }
+}
+
+/*
+ * Cleanup the daemon
+ */
+static void cleanup()
+{
+ int ret;
+ char *cmd;
+ struct ltt_session *sess;
+
+ DBG("Cleaning up");
+
+ /* <fun> */
+ MSG("\n%c[%d;%dm*** assert failed *** ==> %c[%dm%c[%d;%dm"
+ "Matthew, BEET driven development works!%c[%dm",
+ 27, 1, 31, 27, 0, 27, 1, 33, 27, 0);
+ /* </fun> */
+
+ /* Stopping all threads */
+ DBG("Terminating all threads");
+ close(thread_quit_pipe[0]);
+ close(thread_quit_pipe[1]);
+
+ DBG("Removing %s directory", LTTNG_RUNDIR);
+ ret = asprintf(&cmd, "rm -rf " LTTNG_RUNDIR);
+ if (ret < 0) {
+ ERR("asprintf failed. Something is really wrong!");
+ }
+
+ /* Remove lttng run directory */
+ ret = system(cmd);
+ if (ret < 0) {
+ ERR("Unable to clean " LTTNG_RUNDIR);
+ }
+
+ DBG("Cleaning up all session");
+
+ /* Destroy session list mutex */
+ if (session_list_ptr != NULL) {
+ pthread_mutex_destroy(&session_list_ptr->lock);
+
+ /* Cleanup ALL session */
+ cds_list_for_each_entry(sess, &session_list_ptr->head, list) {
+ teardown_kernel_session(sess);
+ // TODO complete session cleanup (including UST)
+ }
+ }
+
+ pthread_mutex_destroy(&kconsumerd_pid_mutex);
+
+ DBG("Closing kernel fd");
+ close(kernel_tracer_fd);
+}
+
+/*
+ * send_unix_sock
+ *
+ * Send data on a unix socket using the liblttsessiondcomm API.
+ *
+ * Return lttcomm error code.
+ */
+static int send_unix_sock(int sock, void *buf, size_t len)
+{
+ /* Check valid length */
+ if (len <= 0) {
+ return -1;
+ }
+
+ return lttcomm_send_unix_sock(sock, buf, len);
+}
+
+/*
+ * clean_command_ctx
+ *
+ * Free memory of a command context structure.
+ */
+static void clean_command_ctx(struct command_ctx **cmd_ctx)
+{
+ DBG("Clean command context structure");
+ if (*cmd_ctx) {
+ if ((*cmd_ctx)->llm) {
+ free((*cmd_ctx)->llm);
+ }
+ if ((*cmd_ctx)->lsm) {
+ free((*cmd_ctx)->lsm);
+ }
+ free(*cmd_ctx);
+ *cmd_ctx = NULL;
+ }
+}
+
+/*
+ * send_kconsumerd_channel_fds
+ *
+ * Send all stream fds of kernel channel to the consumer.
+ */
+static int send_kconsumerd_channel_fds(int sock, struct ltt_kernel_channel *channel)
+{
+ int ret;
+ size_t nb_fd;
+ struct ltt_kernel_stream *stream;
+ struct lttcomm_kconsumerd_header lkh;
+ struct lttcomm_kconsumerd_msg lkm;
+
+ DBG("Sending fds of channel %s to kernel consumer", channel->channel->name);
+
+ nb_fd = channel->stream_count;
+
+ /* Setup header */
+ lkh.payload_size = nb_fd * sizeof(struct lttcomm_kconsumerd_msg);
+ lkh.cmd_type = ADD_STREAM;
+
+ DBG("Sending kconsumerd header");
+
+ ret = lttcomm_send_unix_sock(sock, &lkh, sizeof(struct lttcomm_kconsumerd_header));
+ if (ret < 0) {
+ perror("send kconsumerd header");
+ goto error;
+ }
+
+ cds_list_for_each_entry(stream, &channel->stream_list.head, list) {
+ if (stream->fd != 0) {
+ lkm.fd = stream->fd;
+ lkm.state = stream->state;
+ lkm.max_sb_size = channel->channel->attr.subbuf_size;
+ strncpy(lkm.path_name, stream->pathname, PATH_MAX);
+
+ DBG("Sending fd %d to kconsumerd", lkm.fd);
+
+ ret = lttcomm_send_fds_unix_sock(sock, &lkm, &lkm.fd, 1, sizeof(lkm));
+ if (ret < 0) {
+ perror("send kconsumerd fd");
+ goto error;
+ }
+ }
+ }
+
+ DBG("Kconsumerd channel fds sent");
+
+ return 0;
+
+error:
+ return ret;
+}
+
+/*
+ * send_kconsumerd_fds
+ *
+ * Send all stream fds of the kernel session to the consumer.
+ */
+static int send_kconsumerd_fds(int sock, struct ltt_kernel_session *session)
+{
+ int ret;
+ struct ltt_kernel_channel *chan;
+ struct lttcomm_kconsumerd_header lkh;
+ struct lttcomm_kconsumerd_msg lkm;
+
+ /* Setup header */
+ lkh.payload_size = sizeof(struct lttcomm_kconsumerd_msg);
+ lkh.cmd_type = ADD_STREAM;
+
+ DBG("Sending kconsumerd header for metadata");
+
+ ret = lttcomm_send_unix_sock(sock, &lkh, sizeof(struct lttcomm_kconsumerd_header));
+ if (ret < 0) {
+ perror("send kconsumerd header");
+ goto error;
+ }
+
+ DBG("Sending metadata stream fd");
+
+ if (session->metadata_stream_fd != 0) {
+ /* Send metadata stream fd first */
+ lkm.fd = session->metadata_stream_fd;
+ lkm.state = ACTIVE_FD;
+ lkm.max_sb_size = session->metadata->conf->attr.subbuf_size;
+ strncpy(lkm.path_name, session->metadata->pathname, PATH_MAX);
+
+ ret = lttcomm_send_fds_unix_sock(sock, &lkm, &lkm.fd, 1, sizeof(lkm));
+ if (ret < 0) {
+ perror("send kconsumerd fd");
+ goto error;
+ }
+ }
+
+ cds_list_for_each_entry(chan, &session->channel_list.head, list) {
+ ret = send_kconsumerd_channel_fds(sock, chan);
+ if (ret < 0) {
+ goto error;
+ }
+ }
+
+ DBG("Kconsumerd fds (metadata and channel streams) sent");
+
+ return 0;
+
+error:
+ return ret;
+}
+
+#ifdef DISABLED
+/*
+ * ust_connect_app
+ *
+ * Return a socket connected to the libust communication socket
+ * of the application identified by the pid.
+ *
+ * If the pid is not found in the traceable list,
+ * return -1 to indicate error.
+ */
+static int ust_connect_app(pid_t pid)
+{
+ int sock;
+ struct ltt_traceable_app *lta;
+
+ DBG("Connect to application pid %d", pid);
+
+ lta = find_app_by_pid(pid);
+ if (lta == NULL) {
+ /* App not found */
+ DBG("Application pid %d not found", pid);
+ return -1;
+ }
+
+ sock = ustctl_connect_pid(lta->pid);
+ if (sock < 0) {
+ ERR("Fail connecting to the PID %d", pid);
+ }
+
+ return sock;
+}
+#endif /* DISABLED */
+
+/*
+ * notify_apps
+ *
+ * Notify apps by writing 42 to a named pipe using name.
+ * Every applications waiting for a ltt-sessiond will be notified
+ * and re-register automatically to the session daemon.
+ *
+ * Return open or write error value.
+ */
+static int notify_apps(const char *name)
+{
+ int fd;
+ int ret = -1;
+
+ DBG("Notify the global application pipe");
+
+ /* Try opening the global pipe */
+ fd = open(name, O_WRONLY);
+ if (fd < 0) {
+ goto error;
+ }
+
+ /* Notify by writing on the pipe */
+ ret = write(fd, "42", 2);
+ if (ret < 0) {
+ perror("write");
+ }
+
+error:
+ return ret;
+}
+
+/*
+ * setup_lttng_msg
+ *
+ * Setup the outgoing data buffer for the response (llm) by allocating the
+ * right amount of memory and copying the original information from the lsm
+ * structure.
+ *
+ * Return total size of the buffer pointed by buf.
+ */
+static int setup_lttng_msg(struct command_ctx *cmd_ctx, size_t size)
+{
+ int ret, buf_size;
+
+ buf_size = size;
+
+ cmd_ctx->llm = malloc(sizeof(struct lttcomm_lttng_msg) + buf_size);
+ if (cmd_ctx->llm == NULL) {
+ perror("malloc");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* Copy common data */
+ cmd_ctx->llm->cmd_type = cmd_ctx->lsm->cmd_type;
+ cmd_ctx->llm->pid = cmd_ctx->lsm->pid;
+
+ cmd_ctx->llm->data_size = size;
+ cmd_ctx->lttng_msg_size = sizeof(struct lttcomm_lttng_msg) + buf_size;
+
+ return buf_size;
+
+error:
+ return ret;
+}
+
+/*
+ * update_kernel_pollfd
+ *
+ * Update the kernel pollfd set of all channel fd available over
+ * all tracing session. Add the wakeup pipe at the end of the set.
+ */
+static int update_kernel_pollfd(void)
+{
+ int i = 0;
+ /*
+ * The wakup pipe and the quit pipe are needed so the number of fds starts
+ * at 2 for those pipes.
+ */
+ unsigned int nb_fd = 2;
+ struct ltt_session *session;
+ struct ltt_kernel_channel *channel;
+
+ DBG("Updating kernel_pollfd");
+
+ /* Get the number of channel of all kernel session */
+ lock_session_list();
+ cds_list_for_each_entry(session, &session_list_ptr->head, list) {
+ lock_session(session);
+ if (session->kernel_session == NULL) {
+ unlock_session(session);
+ continue;
+ }
+ nb_fd += session->kernel_session->channel_count;
+ unlock_session(session);
+ }
+
+ DBG("Resizing kernel_pollfd to size %d", nb_fd);
+
+ kernel_pollfd = realloc(kernel_pollfd, nb_fd * sizeof(struct pollfd));
+ if (kernel_pollfd == NULL) {
+ perror("malloc kernel_pollfd");
+ goto error;
+ }
+
+ cds_list_for_each_entry(session, &session_list_ptr->head, list) {
+ lock_session(session);
+ if (session->kernel_session == NULL) {
+ unlock_session(session);
+ continue;
+ }
+ if (i >= nb_fd) {
+ ERR("To much channel for kernel_pollfd size");
+ unlock_session(session);
+ break;
+ }
+ cds_list_for_each_entry(channel, &session->kernel_session->channel_list.head, list) {
+ kernel_pollfd[i].fd = channel->fd;
+ kernel_pollfd[i].events = POLLIN | POLLRDNORM;
+ i++;
+ }
+ unlock_session(session);
+ }
+ unlock_session_list();
+
+ /* Adding wake up pipe */
+ kernel_pollfd[nb_fd - 2].fd = kernel_poll_pipe[0];
+ kernel_pollfd[nb_fd - 2].events = POLLIN;
+
+ /* Adding the quit pipe */
+ kernel_pollfd[nb_fd - 1].fd = thread_quit_pipe[0];
+
+ return nb_fd;
+
+error:
+ unlock_session_list();
+ return -1;
+}
+
+/*
+ * update_kernel_stream
+ *
+ * Find the channel fd from 'fd' over all tracing session. When found, check
+ * for new channel stream and send those stream fds to the kernel consumer.
+ *
+ * Useful for CPU hotplug feature.
+ */
+static int update_kernel_stream(int fd)
+{
+ int ret = 0;
+ struct ltt_session *session;
+ struct ltt_kernel_channel *channel;
+
+ DBG("Updating kernel streams for channel fd %d", fd);
+
+ lock_session_list();
+ cds_list_for_each_entry(session, &session_list_ptr->head, list) {
+ lock_session(session);
+ if (session->kernel_session == NULL) {
+ unlock_session(session);
+ continue;
+ }
+ cds_list_for_each_entry(channel, &session->kernel_session->channel_list.head, list) {
+ if (channel->fd == fd) {
+ DBG("Channel found, updating kernel streams");
+ ret = kernel_open_channel_stream(channel);
+ if (ret < 0) {
+ goto end;
+ }
+ /*
+ * Have we already sent fds to the consumer? If yes, it means that
+ * tracing is started so it is safe to send our updated stream fds.
+ */
+ if (session->kernel_session->kconsumer_fds_sent == 1) {
+ ret = send_kconsumerd_channel_fds(kconsumerd_cmd_sock, channel);
+ if (ret < 0) {
+ goto end;
+ }
+ }
+ goto end;
+ }
+ }
+ unlock_session(session);
+ }
+
+end:
+ unlock_session_list();
+ if (session) {
+ unlock_session(session);
+ }
+ return ret;
+}
+
+/*
+ * thread_manage_kernel
+ *
+ * This thread manage event coming from the kernel.
+ *
+ * Features supported in this thread:
+ * -) CPU Hotplug
+ */
+static void *thread_manage_kernel(void *data)
+{
+ int ret, i, nb_fd = 0;
+ char tmp;
+ int update_poll_flag = 1;
+
+ DBG("Thread manage kernel started");
+
+ while (1) {
+ if (update_poll_flag == 1) {
+ nb_fd = update_kernel_pollfd();
+ if (nb_fd < 0) {
+ goto error;
+ }
+ update_poll_flag = 0;
+ }
+
+ DBG("Polling on %d fds", nb_fd);
+
+ /* Poll infinite value of time */
+ ret = poll(kernel_pollfd, nb_fd, -1);
+ if (ret < 0) {
+ perror("poll kernel thread");
+ goto error;
+ } else if (ret == 0) {
+ /* Should not happen since timeout is infinite */
+ continue;
+ }
+
+ /* Thread quit pipe has been closed. Killing thread. */
+ if (kernel_pollfd[nb_fd - 1].revents == POLLNVAL) {
+ goto error;
+ }
+
+ DBG("Kernel poll event triggered");
+
+ /*
+ * Check if the wake up pipe was triggered. If so, the kernel_pollfd
+ * must be updated.
+ */
+ switch (kernel_pollfd[nb_fd - 2].revents) {
+ case POLLIN:
+ ret = read(kernel_poll_pipe[0], &tmp, 1);
+ update_poll_flag = 1;
+ continue;
+ case POLLERR:
+ goto error;
+ default:
+ break;
+ }
+
+ for (i = 0; i < nb_fd; i++) {
+ switch (kernel_pollfd[i].revents) {
+ /*
+ * New CPU detected by the kernel. Adding kernel stream to kernel
+ * session and updating the kernel consumer
+ */
+ case POLLIN | POLLRDNORM:
+ ret = update_kernel_stream(kernel_pollfd[i].fd);
+ if (ret < 0) {
+ continue;
+ }
+ break;
+ }
+ }
+ }