+ * Return total size of the buffer pointed by buf.
+ */
+static int setup_lttng_msg(struct command_ctx *cmd_ctx, size_t size)
+{
+ int ret, buf_size;
+
+ buf_size = size;
+
+ cmd_ctx->llm = malloc(sizeof(struct lttcomm_lttng_msg) + buf_size);
+ if (cmd_ctx->llm == NULL) {
+ perror("malloc");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* Copy common data */
+ cmd_ctx->llm->cmd_type = cmd_ctx->lsm->cmd_type;
+ cmd_ctx->llm->pid = cmd_ctx->lsm->pid;
+
+ cmd_ctx->llm->data_size = size;
+ cmd_ctx->lttng_msg_size = sizeof(struct lttcomm_lttng_msg) + buf_size;
+
+ return buf_size;
+
+error:
+ return ret;
+}
+
+/*
+ * update_kernel_pollfd
+ *
+ * Update the kernel pollfd set of all channel fd available over
+ * all tracing session. Add the wakeup pipe at the end of the set.
+ */
+static int update_kernel_pollfd(void)
+{
+ int i = 0;
+ /*
+ * The wakup pipe and the quit pipe are needed so the number of fds starts
+ * at 2 for those pipes.
+ */
+ unsigned int nb_fd = 2;
+ struct ltt_session *session;
+ struct ltt_kernel_channel *channel;
+
+ DBG("Updating kernel_pollfd");
+
+ /* Get the number of channel of all kernel session */
+ lock_session_list();
+ cds_list_for_each_entry(session, &session_list_ptr->head, list) {
+ lock_session(session);
+ if (session->kernel_session == NULL) {
+ unlock_session(session);
+ continue;
+ }
+ nb_fd += session->kernel_session->channel_count;
+ unlock_session(session);
+ }
+
+ DBG("Resizing kernel_pollfd to size %d", nb_fd);
+
+ kernel_pollfd = realloc(kernel_pollfd, nb_fd * sizeof(struct pollfd));
+ if (kernel_pollfd == NULL) {
+ perror("malloc kernel_pollfd");
+ goto error;
+ }
+
+ cds_list_for_each_entry(session, &session_list_ptr->head, list) {
+ lock_session(session);
+ if (session->kernel_session == NULL) {
+ unlock_session(session);
+ continue;
+ }
+ if (i >= nb_fd) {
+ ERR("To much channel for kernel_pollfd size");
+ unlock_session(session);
+ break;
+ }
+ cds_list_for_each_entry(channel, &session->kernel_session->channel_list.head, list) {
+ kernel_pollfd[i].fd = channel->fd;
+ kernel_pollfd[i].events = POLLIN | POLLRDNORM;
+ i++;
+ }
+ unlock_session(session);
+ }
+ unlock_session_list();
+
+ /* Adding wake up pipe */
+ kernel_pollfd[nb_fd - 2].fd = kernel_poll_pipe[0];
+ kernel_pollfd[nb_fd - 2].events = POLLIN;
+
+ /* Adding the quit pipe */
+ kernel_pollfd[nb_fd - 1].fd = thread_quit_pipe[0];
+
+ return nb_fd;
+
+error:
+ unlock_session_list();
+ return -1;
+}
+
+/*
+ * update_kernel_stream
+ *
+ * Find the channel fd from 'fd' over all tracing session. When found, check
+ * for new channel stream and send those stream fds to the kernel consumer.
+ *
+ * Useful for CPU hotplug feature.
+ */
+static int update_kernel_stream(int fd)
+{
+ int ret = 0;
+ struct ltt_session *session;
+ struct ltt_kernel_channel *channel;
+
+ DBG("Updating kernel streams for channel fd %d", fd);
+
+ lock_session_list();
+ cds_list_for_each_entry(session, &session_list_ptr->head, list) {
+ lock_session(session);
+ if (session->kernel_session == NULL) {
+ unlock_session(session);
+ continue;
+ }
+ cds_list_for_each_entry(channel, &session->kernel_session->channel_list.head, list) {
+ if (channel->fd == fd) {
+ DBG("Channel found, updating kernel streams");
+ ret = kernel_open_channel_stream(channel);
+ if (ret < 0) {
+ goto end;
+ }
+ /*
+ * Have we already sent fds to the consumer? If yes, it means that
+ * tracing is started so it is safe to send our updated stream fds.
+ */
+ if (session->kernel_session->kconsumer_fds_sent == 1) {
+ ret = send_kconsumerd_channel_fds(kconsumerd_cmd_sock, channel);
+ if (ret < 0) {
+ goto end;
+ }
+ }
+ goto end;
+ }
+ }
+ unlock_session(session);
+ }
+
+end:
+ unlock_session_list();
+ if (session) {
+ unlock_session(session);
+ }
+ return ret;
+}
+
+/*
+ * thread_manage_kernel
+ *
+ * This thread manage event coming from the kernel.
+ *
+ * Features supported in this thread:
+ * -) CPU Hotplug
+ */
+static void *thread_manage_kernel(void *data)
+{
+ int ret, i, nb_fd = 0;
+ char tmp;
+ int update_poll_flag = 1;
+
+ DBG("Thread manage kernel started");
+
+ while (1) {
+ if (update_poll_flag == 1) {
+ nb_fd = update_kernel_pollfd();
+ if (nb_fd < 0) {
+ goto error;
+ }
+ update_poll_flag = 0;
+ }
+
+ DBG("Polling on %d fds", nb_fd);
+
+ /* Poll infinite value of time */
+ ret = poll(kernel_pollfd, nb_fd, -1);
+ if (ret < 0) {
+ perror("poll kernel thread");
+ goto error;
+ } else if (ret == 0) {
+ /* Should not happen since timeout is infinite */
+ continue;
+ }
+
+ /* Thread quit pipe has been closed. Killing thread. */
+ if (kernel_pollfd[nb_fd - 1].revents == POLLNVAL) {
+ goto error;
+ }
+
+ DBG("Kernel poll event triggered");
+
+ /*
+ * Check if the wake up pipe was triggered. If so, the kernel_pollfd
+ * must be updated.
+ */
+ switch (kernel_pollfd[nb_fd - 2].revents) {
+ case POLLIN:
+ ret = read(kernel_poll_pipe[0], &tmp, 1);
+ update_poll_flag = 1;
+ continue;
+ case POLLERR:
+ goto error;
+ default:
+ break;
+ }
+
+ for (i = 0; i < nb_fd; i++) {
+ switch (kernel_pollfd[i].revents) {
+ /*
+ * New CPU detected by the kernel. Adding kernel stream to kernel
+ * session and updating the kernel consumer
+ */
+ case POLLIN | POLLRDNORM:
+ ret = update_kernel_stream(kernel_pollfd[i].fd);
+ if (ret < 0) {
+ continue;
+ }
+ break;
+ }
+ }
+ }
+
+error:
+ DBG("Kernel thread dying");
+ if (kernel_pollfd) {
+ free(kernel_pollfd);
+ }
+
+ close(kernel_poll_pipe[0]);
+ close(kernel_poll_pipe[1]);
+ return NULL;
+}
+
+/*
+ * thread_manage_kconsumerd
+ *
+ * This thread manage the kconsumerd error sent
+ * back to the session daemon.
+ */
+static void *thread_manage_kconsumerd(void *data)
+{
+ int sock = 0, ret;
+ enum lttcomm_return_code code;
+ struct pollfd pollfd[2];
+
+ DBG("[thread] Manage kconsumerd started");
+
+ ret = lttcomm_listen_unix_sock(kconsumerd_err_sock);
+ if (ret < 0) {
+ goto error;
+ }
+
+ /* First fd is always the quit pipe */
+ pollfd[0].fd = thread_quit_pipe[0];
+
+ /* Apps socket */
+ pollfd[1].fd = kconsumerd_err_sock;
+ pollfd[1].events = POLLIN;
+
+ /* Inifinite blocking call, waiting for transmission */
+ ret = poll(pollfd, 2, -1);
+ if (ret < 0) {
+ perror("poll kconsumerd thread");
+ goto error;
+ }
+
+ /* Thread quit pipe has been closed. Killing thread. */
+ if (pollfd[0].revents == POLLNVAL) {
+ goto error;
+ } else if (pollfd[1].revents == POLLERR) {
+ ERR("Kconsumerd err socket poll error");
+ goto error;
+ }
+
+ sock = lttcomm_accept_unix_sock(kconsumerd_err_sock);
+ if (sock < 0) {
+ goto error;
+ }
+
+ /* Getting status code from kconsumerd */
+ ret = lttcomm_recv_unix_sock(sock, &code, sizeof(enum lttcomm_return_code));
+ if (ret <= 0) {
+ goto error;
+ }
+
+ if (code == KCONSUMERD_COMMAND_SOCK_READY) {
+ kconsumerd_cmd_sock = lttcomm_connect_unix_sock(kconsumerd_cmd_unix_sock_path);
+ if (kconsumerd_cmd_sock < 0) {
+ sem_post(&kconsumerd_sem);
+ perror("kconsumerd connect");
+ goto error;
+ }
+ /* Signal condition to tell that the kconsumerd is ready */
+ sem_post(&kconsumerd_sem);
+ DBG("Kconsumerd command socket ready");
+ } else {
+ DBG("Kconsumerd error when waiting for SOCK_READY : %s",
+ lttcomm_get_readable_code(-code));
+ goto error;
+ }
+
+ /* Wait for any kconsumerd error */
+ ret = lttcomm_recv_unix_sock(sock, &code, sizeof(enum lttcomm_return_code));
+ if (ret <= 0) {
+ ERR("Kconsumerd closed the command socket");
+ goto error;
+ }
+
+ ERR("Kconsumerd return code : %s", lttcomm_get_readable_code(-code));
+
+error:
+ DBG("Kconsumerd thread dying");
+ if (kconsumerd_err_sock) {
+ close(kconsumerd_err_sock);
+ }
+ if (kconsumerd_cmd_sock) {
+ close(kconsumerd_cmd_sock);
+ }
+ if (sock) {
+ close(sock);
+ }
+
+ unlink(kconsumerd_err_unix_sock_path);
+ unlink(kconsumerd_cmd_unix_sock_path);
+
+ kconsumerd_pid = 0;
+ return NULL;
+}
+
+/*
+ * thread_manage_apps
+ *
+ * This thread manage the application socket communication