b665deaac1dfe851e4fd82a19c59177a72945e64
[lttng-tools.git] / src / bin / lttng-sessiond / main.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19 #define _GNU_SOURCE
20 #include <getopt.h>
21 #include <grp.h>
22 #include <limits.h>
23 #include <pthread.h>
24 #include <signal.h>
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include <inttypes.h>
29 #include <sys/mman.h>
30 #include <sys/mount.h>
31 #include <sys/resource.h>
32 #include <sys/socket.h>
33 #include <sys/stat.h>
34 #include <sys/types.h>
35 #include <sys/wait.h>
36 #include <urcu/uatomic.h>
37 #include <unistd.h>
38 #include <config.h>
39
40 #include <common/common.h>
41 #include <common/compat/socket.h>
42 #include <common/defaults.h>
43 #include <common/kernel-consumer/kernel-consumer.h>
44 #include <common/futex.h>
45 #include <common/relayd/relayd.h>
46 #include <common/utils.h>
47
48 #include "lttng-sessiond.h"
49 #include "buffer-registry.h"
50 #include "channel.h"
51 #include "cmd.h"
52 #include "consumer.h"
53 #include "context.h"
54 #include "event.h"
55 #include "kernel.h"
56 #include "kernel-consumer.h"
57 #include "modprobe.h"
58 #include "shm.h"
59 #include "ust-ctl.h"
60 #include "ust-consumer.h"
61 #include "utils.h"
62 #include "fd-limit.h"
63 #include "health.h"
64 #include "testpoint.h"
65 #include "ust-thread.h"
66
67 #define CONSUMERD_FILE "lttng-consumerd"
68
69 /* Const values */
70 const char default_tracing_group[] = DEFAULT_TRACING_GROUP;
71
72 const char *progname;
73 const char *opt_tracing_group;
74 static const char *opt_pidfile;
75 static int opt_sig_parent;
76 static int opt_verbose_consumer;
77 static int opt_daemon;
78 static int opt_no_kernel;
79 static int is_root; /* Set to 1 if the daemon is running as root */
80 static pid_t ppid; /* Parent PID for --sig-parent option */
81 static char *rundir;
82
83 /*
84 * Consumer daemon specific control data. Every value not initialized here is
85 * set to 0 by the static definition.
86 */
87 static struct consumer_data kconsumer_data = {
88 .type = LTTNG_CONSUMER_KERNEL,
89 .err_unix_sock_path = DEFAULT_KCONSUMERD_ERR_SOCK_PATH,
90 .cmd_unix_sock_path = DEFAULT_KCONSUMERD_CMD_SOCK_PATH,
91 .err_sock = -1,
92 .cmd_sock = -1,
93 .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
94 .lock = PTHREAD_MUTEX_INITIALIZER,
95 .cond = PTHREAD_COND_INITIALIZER,
96 .cond_mutex = PTHREAD_MUTEX_INITIALIZER,
97 };
98 static struct consumer_data ustconsumer64_data = {
99 .type = LTTNG_CONSUMER64_UST,
100 .err_unix_sock_path = DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH,
101 .cmd_unix_sock_path = DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH,
102 .err_sock = -1,
103 .cmd_sock = -1,
104 .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
105 .lock = PTHREAD_MUTEX_INITIALIZER,
106 .cond = PTHREAD_COND_INITIALIZER,
107 .cond_mutex = PTHREAD_MUTEX_INITIALIZER,
108 };
109 static struct consumer_data ustconsumer32_data = {
110 .type = LTTNG_CONSUMER32_UST,
111 .err_unix_sock_path = DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH,
112 .cmd_unix_sock_path = DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH,
113 .err_sock = -1,
114 .cmd_sock = -1,
115 .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
116 .lock = PTHREAD_MUTEX_INITIALIZER,
117 .cond = PTHREAD_COND_INITIALIZER,
118 .cond_mutex = PTHREAD_MUTEX_INITIALIZER,
119 };
120
121 /* Shared between threads */
122 static int dispatch_thread_exit;
123
124 /* Global application Unix socket path */
125 static char apps_unix_sock_path[PATH_MAX];
126 /* Global client Unix socket path */
127 static char client_unix_sock_path[PATH_MAX];
128 /* global wait shm path for UST */
129 static char wait_shm_path[PATH_MAX];
130 /* Global health check unix path */
131 static char health_unix_sock_path[PATH_MAX];
132
133 /* Sockets and FDs */
134 static int client_sock = -1;
135 static int apps_sock = -1;
136 int kernel_tracer_fd = -1;
137 static int kernel_poll_pipe[2] = { -1, -1 };
138
139 /*
140 * Quit pipe for all threads. This permits a single cancellation point
141 * for all threads when receiving an event on the pipe.
142 */
143 static int thread_quit_pipe[2] = { -1, -1 };
144
145 /*
146 * This pipe is used to inform the thread managing application communication
147 * that a command is queued and ready to be processed.
148 */
149 static int apps_cmd_pipe[2] = { -1, -1 };
150
151 int apps_cmd_notify_pipe[2] = { -1, -1 };
152
153 /* Pthread, Mutexes and Semaphores */
154 static pthread_t apps_thread;
155 static pthread_t apps_notify_thread;
156 static pthread_t reg_apps_thread;
157 static pthread_t client_thread;
158 static pthread_t kernel_thread;
159 static pthread_t dispatch_thread;
160 static pthread_t health_thread;
161 static pthread_t ht_cleanup_thread;
162
163 /*
164 * UST registration command queue. This queue is tied with a futex and uses a N
165 * wakers / 1 waiter implemented and detailed in futex.c/.h
166 *
167 * The thread_manage_apps and thread_dispatch_ust_registration interact with
168 * this queue and the wait/wake scheme.
169 */
170 static struct ust_cmd_queue ust_cmd_queue;
171
172 /*
173 * Pointer initialized before thread creation.
174 *
175 * This points to the tracing session list containing the session count and a
176 * mutex lock. The lock MUST be taken if you iterate over the list. The lock
177 * MUST NOT be taken if you call a public function in session.c.
178 *
179 * The lock is nested inside the structure: session_list_ptr->lock. Please use
180 * session_lock_list and session_unlock_list for lock acquisition.
181 */
182 static struct ltt_session_list *session_list_ptr;
183
184 int ust_consumerd64_fd = -1;
185 int ust_consumerd32_fd = -1;
186
187 static const char *consumerd32_bin = CONFIG_CONSUMERD32_BIN;
188 static const char *consumerd64_bin = CONFIG_CONSUMERD64_BIN;
189 static const char *consumerd32_libdir = CONFIG_CONSUMERD32_LIBDIR;
190 static const char *consumerd64_libdir = CONFIG_CONSUMERD64_LIBDIR;
191
192 static const char *module_proc_lttng = "/proc/lttng";
193
194 /*
195 * Consumer daemon state which is changed when spawning it, killing it or in
196 * case of a fatal error.
197 */
198 enum consumerd_state {
199 CONSUMER_STARTED = 1,
200 CONSUMER_STOPPED = 2,
201 CONSUMER_ERROR = 3,
202 };
203
204 /*
205 * This consumer daemon state is used to validate if a client command will be
206 * able to reach the consumer. If not, the client is informed. For instance,
207 * doing a "lttng start" when the consumer state is set to ERROR will return an
208 * error to the client.
209 *
210 * The following example shows a possible race condition of this scheme:
211 *
212 * consumer thread error happens
213 * client cmd arrives
214 * client cmd checks state -> still OK
215 * consumer thread exit, sets error
216 * client cmd try to talk to consumer
217 * ...
218 *
219 * However, since the consumer is a different daemon, we have no way of making
220 * sure the command will reach it safely even with this state flag. This is why
221 * we consider that up to the state validation during command processing, the
222 * command is safe. After that, we can not guarantee the correctness of the
223 * client request vis-a-vis the consumer.
224 */
225 static enum consumerd_state ust_consumerd_state;
226 static enum consumerd_state kernel_consumerd_state;
227
228 /*
229 * Socket timeout for receiving and sending in seconds.
230 */
231 static int app_socket_timeout;
232
233 /* Set in main() with the current page size. */
234 long page_size;
235
236 static
237 void setup_consumerd_path(void)
238 {
239 const char *bin, *libdir;
240
241 /*
242 * Allow INSTALL_BIN_PATH to be used as a target path for the
243 * native architecture size consumer if CONFIG_CONSUMER*_PATH
244 * has not been defined.
245 */
246 #if (CAA_BITS_PER_LONG == 32)
247 if (!consumerd32_bin[0]) {
248 consumerd32_bin = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
249 }
250 if (!consumerd32_libdir[0]) {
251 consumerd32_libdir = INSTALL_LIB_PATH;
252 }
253 #elif (CAA_BITS_PER_LONG == 64)
254 if (!consumerd64_bin[0]) {
255 consumerd64_bin = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
256 }
257 if (!consumerd64_libdir[0]) {
258 consumerd64_libdir = INSTALL_LIB_PATH;
259 }
260 #else
261 #error "Unknown bitness"
262 #endif
263
264 /*
265 * runtime env. var. overrides the build default.
266 */
267 bin = getenv("LTTNG_CONSUMERD32_BIN");
268 if (bin) {
269 consumerd32_bin = bin;
270 }
271 bin = getenv("LTTNG_CONSUMERD64_BIN");
272 if (bin) {
273 consumerd64_bin = bin;
274 }
275 libdir = getenv("LTTNG_CONSUMERD32_LIBDIR");
276 if (libdir) {
277 consumerd32_libdir = libdir;
278 }
279 libdir = getenv("LTTNG_CONSUMERD64_LIBDIR");
280 if (libdir) {
281 consumerd64_libdir = libdir;
282 }
283 }
284
285 /*
286 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
287 */
288 int sessiond_set_thread_pollset(struct lttng_poll_event *events, size_t size)
289 {
290 int ret;
291
292 assert(events);
293
294 ret = lttng_poll_create(events, size, LTTNG_CLOEXEC);
295 if (ret < 0) {
296 goto error;
297 }
298
299 /* Add quit pipe */
300 ret = lttng_poll_add(events, thread_quit_pipe[0], LPOLLIN | LPOLLERR);
301 if (ret < 0) {
302 goto error;
303 }
304
305 return 0;
306
307 error:
308 return ret;
309 }
310
311 /*
312 * Check if the thread quit pipe was triggered.
313 *
314 * Return 1 if it was triggered else 0;
315 */
316 int sessiond_check_thread_quit_pipe(int fd, uint32_t events)
317 {
318 if (fd == thread_quit_pipe[0] && (events & LPOLLIN)) {
319 return 1;
320 }
321
322 return 0;
323 }
324
325 /*
326 * Return group ID of the tracing group or -1 if not found.
327 */
328 static gid_t allowed_group(void)
329 {
330 struct group *grp;
331
332 if (opt_tracing_group) {
333 grp = getgrnam(opt_tracing_group);
334 } else {
335 grp = getgrnam(default_tracing_group);
336 }
337 if (!grp) {
338 return -1;
339 } else {
340 return grp->gr_gid;
341 }
342 }
343
344 /*
345 * Init thread quit pipe.
346 *
347 * Return -1 on error or 0 if all pipes are created.
348 */
349 static int init_thread_quit_pipe(void)
350 {
351 int ret, i;
352
353 ret = pipe(thread_quit_pipe);
354 if (ret < 0) {
355 PERROR("thread quit pipe");
356 goto error;
357 }
358
359 for (i = 0; i < 2; i++) {
360 ret = fcntl(thread_quit_pipe[i], F_SETFD, FD_CLOEXEC);
361 if (ret < 0) {
362 PERROR("fcntl");
363 goto error;
364 }
365 }
366
367 error:
368 return ret;
369 }
370
371 /*
372 * Stop all threads by closing the thread quit pipe.
373 */
374 static void stop_threads(void)
375 {
376 int ret;
377
378 /* Stopping all threads */
379 DBG("Terminating all threads");
380 ret = notify_thread_pipe(thread_quit_pipe[1]);
381 if (ret < 0) {
382 ERR("write error on thread quit pipe");
383 }
384
385 /* Dispatch thread */
386 CMM_STORE_SHARED(dispatch_thread_exit, 1);
387 futex_nto1_wake(&ust_cmd_queue.futex);
388 }
389
390 /*
391 * Close every consumer sockets.
392 */
393 static void close_consumer_sockets(void)
394 {
395 int ret;
396
397 if (kconsumer_data.err_sock >= 0) {
398 ret = close(kconsumer_data.err_sock);
399 if (ret < 0) {
400 PERROR("kernel consumer err_sock close");
401 }
402 }
403 if (ustconsumer32_data.err_sock >= 0) {
404 ret = close(ustconsumer32_data.err_sock);
405 if (ret < 0) {
406 PERROR("UST consumerd32 err_sock close");
407 }
408 }
409 if (ustconsumer64_data.err_sock >= 0) {
410 ret = close(ustconsumer64_data.err_sock);
411 if (ret < 0) {
412 PERROR("UST consumerd64 err_sock close");
413 }
414 }
415 if (kconsumer_data.cmd_sock >= 0) {
416 ret = close(kconsumer_data.cmd_sock);
417 if (ret < 0) {
418 PERROR("kernel consumer cmd_sock close");
419 }
420 }
421 if (ustconsumer32_data.cmd_sock >= 0) {
422 ret = close(ustconsumer32_data.cmd_sock);
423 if (ret < 0) {
424 PERROR("UST consumerd32 cmd_sock close");
425 }
426 }
427 if (ustconsumer64_data.cmd_sock >= 0) {
428 ret = close(ustconsumer64_data.cmd_sock);
429 if (ret < 0) {
430 PERROR("UST consumerd64 cmd_sock close");
431 }
432 }
433 }
434
435 /*
436 * Cleanup the daemon
437 */
438 static void cleanup(void)
439 {
440 int ret;
441 char *cmd = NULL;
442 struct ltt_session *sess, *stmp;
443
444 DBG("Cleaning up");
445
446 /*
447 * Close the thread quit pipe. It has already done its job,
448 * since we are now called.
449 */
450 utils_close_pipe(thread_quit_pipe);
451
452 /*
453 * If opt_pidfile is undefined, the default file will be wiped when
454 * removing the rundir.
455 */
456 if (opt_pidfile) {
457 ret = remove(opt_pidfile);
458 if (ret < 0) {
459 PERROR("remove pidfile %s", opt_pidfile);
460 }
461 }
462
463 DBG("Removing %s directory", rundir);
464 ret = asprintf(&cmd, "rm -rf %s", rundir);
465 if (ret < 0) {
466 ERR("asprintf failed. Something is really wrong!");
467 }
468
469 /* Remove lttng run directory */
470 ret = system(cmd);
471 if (ret < 0) {
472 ERR("Unable to clean %s", rundir);
473 }
474 free(cmd);
475 free(rundir);
476
477 DBG("Cleaning up all sessions");
478
479 /* Destroy session list mutex */
480 if (session_list_ptr != NULL) {
481 pthread_mutex_destroy(&session_list_ptr->lock);
482
483 /* Cleanup ALL session */
484 cds_list_for_each_entry_safe(sess, stmp,
485 &session_list_ptr->head, list) {
486 cmd_destroy_session(sess, kernel_poll_pipe[1]);
487 }
488 }
489
490 DBG("Closing all UST sockets");
491 ust_app_clean_list();
492 buffer_reg_destroy_registries();
493
494 if (is_root && !opt_no_kernel) {
495 DBG2("Closing kernel fd");
496 if (kernel_tracer_fd >= 0) {
497 ret = close(kernel_tracer_fd);
498 if (ret) {
499 PERROR("close");
500 }
501 }
502 DBG("Unloading kernel modules");
503 modprobe_remove_lttng_all();
504 }
505
506 close_consumer_sockets();
507
508 /* <fun> */
509 DBG("%c[%d;%dm*** assert failed :-) *** ==> %c[%dm%c[%d;%dm"
510 "Matthew, BEET driven development works!%c[%dm",
511 27, 1, 31, 27, 0, 27, 1, 33, 27, 0);
512 /* </fun> */
513 }
514
515 /*
516 * Send data on a unix socket using the liblttsessiondcomm API.
517 *
518 * Return lttcomm error code.
519 */
520 static int send_unix_sock(int sock, void *buf, size_t len)
521 {
522 /* Check valid length */
523 if (len == 0) {
524 return -1;
525 }
526
527 return lttcomm_send_unix_sock(sock, buf, len);
528 }
529
530 /*
531 * Free memory of a command context structure.
532 */
533 static void clean_command_ctx(struct command_ctx **cmd_ctx)
534 {
535 DBG("Clean command context structure");
536 if (*cmd_ctx) {
537 if ((*cmd_ctx)->llm) {
538 free((*cmd_ctx)->llm);
539 }
540 if ((*cmd_ctx)->lsm) {
541 free((*cmd_ctx)->lsm);
542 }
543 free(*cmd_ctx);
544 *cmd_ctx = NULL;
545 }
546 }
547
548 /*
549 * Notify UST applications using the shm mmap futex.
550 */
551 static int notify_ust_apps(int active)
552 {
553 char *wait_shm_mmap;
554
555 DBG("Notifying applications of session daemon state: %d", active);
556
557 /* See shm.c for this call implying mmap, shm and futex calls */
558 wait_shm_mmap = shm_ust_get_mmap(wait_shm_path, is_root);
559 if (wait_shm_mmap == NULL) {
560 goto error;
561 }
562
563 /* Wake waiting process */
564 futex_wait_update((int32_t *) wait_shm_mmap, active);
565
566 /* Apps notified successfully */
567 return 0;
568
569 error:
570 return -1;
571 }
572
573 /*
574 * Setup the outgoing data buffer for the response (llm) by allocating the
575 * right amount of memory and copying the original information from the lsm
576 * structure.
577 *
578 * Return total size of the buffer pointed by buf.
579 */
580 static int setup_lttng_msg(struct command_ctx *cmd_ctx, size_t size)
581 {
582 int ret, buf_size;
583
584 buf_size = size;
585
586 cmd_ctx->llm = zmalloc(sizeof(struct lttcomm_lttng_msg) + buf_size);
587 if (cmd_ctx->llm == NULL) {
588 PERROR("zmalloc");
589 ret = -ENOMEM;
590 goto error;
591 }
592
593 /* Copy common data */
594 cmd_ctx->llm->cmd_type = cmd_ctx->lsm->cmd_type;
595 cmd_ctx->llm->pid = cmd_ctx->lsm->domain.attr.pid;
596
597 cmd_ctx->llm->data_size = size;
598 cmd_ctx->lttng_msg_size = sizeof(struct lttcomm_lttng_msg) + buf_size;
599
600 return buf_size;
601
602 error:
603 return ret;
604 }
605
606 /*
607 * Update the kernel poll set of all channel fd available over all tracing
608 * session. Add the wakeup pipe at the end of the set.
609 */
610 static int update_kernel_poll(struct lttng_poll_event *events)
611 {
612 int ret;
613 struct ltt_session *session;
614 struct ltt_kernel_channel *channel;
615
616 DBG("Updating kernel poll set");
617
618 session_lock_list();
619 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
620 session_lock(session);
621 if (session->kernel_session == NULL) {
622 session_unlock(session);
623 continue;
624 }
625
626 cds_list_for_each_entry(channel,
627 &session->kernel_session->channel_list.head, list) {
628 /* Add channel fd to the kernel poll set */
629 ret = lttng_poll_add(events, channel->fd, LPOLLIN | LPOLLRDNORM);
630 if (ret < 0) {
631 session_unlock(session);
632 goto error;
633 }
634 DBG("Channel fd %d added to kernel set", channel->fd);
635 }
636 session_unlock(session);
637 }
638 session_unlock_list();
639
640 return 0;
641
642 error:
643 session_unlock_list();
644 return -1;
645 }
646
647 /*
648 * Find the channel fd from 'fd' over all tracing session. When found, check
649 * for new channel stream and send those stream fds to the kernel consumer.
650 *
651 * Useful for CPU hotplug feature.
652 */
653 static int update_kernel_stream(struct consumer_data *consumer_data, int fd)
654 {
655 int ret = 0;
656 struct ltt_session *session;
657 struct ltt_kernel_session *ksess;
658 struct ltt_kernel_channel *channel;
659
660 DBG("Updating kernel streams for channel fd %d", fd);
661
662 session_lock_list();
663 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
664 session_lock(session);
665 if (session->kernel_session == NULL) {
666 session_unlock(session);
667 continue;
668 }
669 ksess = session->kernel_session;
670
671 cds_list_for_each_entry(channel, &ksess->channel_list.head, list) {
672 if (channel->fd == fd) {
673 DBG("Channel found, updating kernel streams");
674 ret = kernel_open_channel_stream(channel);
675 if (ret < 0) {
676 goto error;
677 }
678 /* Update the stream global counter */
679 ksess->stream_count_global += ret;
680
681 /*
682 * Have we already sent fds to the consumer? If yes, it means
683 * that tracing is started so it is safe to send our updated
684 * stream fds.
685 */
686 if (ksess->consumer_fds_sent == 1 && ksess->consumer != NULL) {
687 struct lttng_ht_iter iter;
688 struct consumer_socket *socket;
689
690 rcu_read_lock();
691 cds_lfht_for_each_entry(ksess->consumer->socks->ht,
692 &iter.iter, socket, node.node) {
693 pthread_mutex_lock(socket->lock);
694 ret = kernel_consumer_send_channel_stream(socket,
695 channel, ksess,
696 session->output_traces ? 1 : 0);
697 pthread_mutex_unlock(socket->lock);
698 if (ret < 0) {
699 rcu_read_unlock();
700 goto error;
701 }
702 }
703 rcu_read_unlock();
704 }
705 goto error;
706 }
707 }
708 session_unlock(session);
709 }
710 session_unlock_list();
711 return ret;
712
713 error:
714 session_unlock(session);
715 session_unlock_list();
716 return ret;
717 }
718
719 /*
720 * For each tracing session, update newly registered apps. The session list
721 * lock MUST be acquired before calling this.
722 */
723 static void update_ust_app(int app_sock)
724 {
725 struct ltt_session *sess, *stmp;
726
727 /* Consumer is in an ERROR state. Stop any application update. */
728 if (uatomic_read(&ust_consumerd_state) == CONSUMER_ERROR) {
729 /* Stop the update process since the consumer is dead. */
730 return;
731 }
732
733 /* For all tracing session(s) */
734 cds_list_for_each_entry_safe(sess, stmp, &session_list_ptr->head, list) {
735 session_lock(sess);
736 if (sess->ust_session) {
737 ust_app_global_update(sess->ust_session, app_sock);
738 }
739 session_unlock(sess);
740 }
741 }
742
743 /*
744 * This thread manage event coming from the kernel.
745 *
746 * Features supported in this thread:
747 * -) CPU Hotplug
748 */
749 static void *thread_manage_kernel(void *data)
750 {
751 int ret, i, pollfd, update_poll_flag = 1, err = -1;
752 uint32_t revents, nb_fd;
753 char tmp;
754 struct lttng_poll_event events;
755
756 DBG("[thread] Thread manage kernel started");
757
758 health_register(HEALTH_TYPE_KERNEL);
759
760 /*
761 * This first step of the while is to clean this structure which could free
762 * non NULL pointers so initialize it before the loop.
763 */
764 lttng_poll_init(&events);
765
766 if (testpoint(thread_manage_kernel)) {
767 goto error_testpoint;
768 }
769
770 health_code_update();
771
772 if (testpoint(thread_manage_kernel_before_loop)) {
773 goto error_testpoint;
774 }
775
776 while (1) {
777 health_code_update();
778
779 if (update_poll_flag == 1) {
780 /* Clean events object. We are about to populate it again. */
781 lttng_poll_clean(&events);
782
783 ret = sessiond_set_thread_pollset(&events, 2);
784 if (ret < 0) {
785 goto error_poll_create;
786 }
787
788 ret = lttng_poll_add(&events, kernel_poll_pipe[0], LPOLLIN);
789 if (ret < 0) {
790 goto error;
791 }
792
793 /* This will add the available kernel channel if any. */
794 ret = update_kernel_poll(&events);
795 if (ret < 0) {
796 goto error;
797 }
798 update_poll_flag = 0;
799 }
800
801 DBG("Thread kernel polling on %d fds", LTTNG_POLL_GETNB(&events));
802
803 /* Poll infinite value of time */
804 restart:
805 health_poll_entry();
806 ret = lttng_poll_wait(&events, -1);
807 health_poll_exit();
808 if (ret < 0) {
809 /*
810 * Restart interrupted system call.
811 */
812 if (errno == EINTR) {
813 goto restart;
814 }
815 goto error;
816 } else if (ret == 0) {
817 /* Should not happen since timeout is infinite */
818 ERR("Return value of poll is 0 with an infinite timeout.\n"
819 "This should not have happened! Continuing...");
820 continue;
821 }
822
823 nb_fd = ret;
824
825 for (i = 0; i < nb_fd; i++) {
826 /* Fetch once the poll data */
827 revents = LTTNG_POLL_GETEV(&events, i);
828 pollfd = LTTNG_POLL_GETFD(&events, i);
829
830 health_code_update();
831
832 /* Thread quit pipe has been closed. Killing thread. */
833 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
834 if (ret) {
835 err = 0;
836 goto exit;
837 }
838
839 /* Check for data on kernel pipe */
840 if (pollfd == kernel_poll_pipe[0] && (revents & LPOLLIN)) {
841 do {
842 ret = read(kernel_poll_pipe[0], &tmp, 1);
843 } while (ret < 0 && errno == EINTR);
844 /*
845 * Ret value is useless here, if this pipe gets any actions an
846 * update is required anyway.
847 */
848 update_poll_flag = 1;
849 continue;
850 } else {
851 /*
852 * New CPU detected by the kernel. Adding kernel stream to
853 * kernel session and updating the kernel consumer
854 */
855 if (revents & LPOLLIN) {
856 ret = update_kernel_stream(&kconsumer_data, pollfd);
857 if (ret < 0) {
858 continue;
859 }
860 break;
861 /*
862 * TODO: We might want to handle the LPOLLERR | LPOLLHUP
863 * and unregister kernel stream at this point.
864 */
865 }
866 }
867 }
868 }
869
870 exit:
871 error:
872 lttng_poll_clean(&events);
873 error_poll_create:
874 error_testpoint:
875 utils_close_pipe(kernel_poll_pipe);
876 kernel_poll_pipe[0] = kernel_poll_pipe[1] = -1;
877 if (err) {
878 health_error();
879 ERR("Health error occurred in %s", __func__);
880 WARN("Kernel thread died unexpectedly. "
881 "Kernel tracing can continue but CPU hotplug is disabled.");
882 }
883 health_unregister();
884 DBG("Kernel thread dying");
885 return NULL;
886 }
887
888 /*
889 * Signal pthread condition of the consumer data that the thread.
890 */
891 static void signal_consumer_condition(struct consumer_data *data, int state)
892 {
893 pthread_mutex_lock(&data->cond_mutex);
894
895 /*
896 * The state is set before signaling. It can be any value, it's the waiter
897 * job to correctly interpret this condition variable associated to the
898 * consumer pthread_cond.
899 *
900 * A value of 0 means that the corresponding thread of the consumer data
901 * was not started. 1 indicates that the thread has started and is ready
902 * for action. A negative value means that there was an error during the
903 * thread bootstrap.
904 */
905 data->consumer_thread_is_ready = state;
906 (void) pthread_cond_signal(&data->cond);
907
908 pthread_mutex_unlock(&data->cond_mutex);
909 }
910
911 /*
912 * This thread manage the consumer error sent back to the session daemon.
913 */
914 static void *thread_manage_consumer(void *data)
915 {
916 int sock = -1, i, ret, pollfd, err = -1;
917 uint32_t revents, nb_fd;
918 enum lttcomm_return_code code;
919 struct lttng_poll_event events;
920 struct consumer_data *consumer_data = data;
921
922 DBG("[thread] Manage consumer started");
923
924 health_register(HEALTH_TYPE_CONSUMER);
925
926 health_code_update();
927
928 /*
929 * Pass 3 as size here for the thread quit pipe, consumerd_err_sock and the
930 * metadata_sock. Nothing more will be added to this poll set.
931 */
932 ret = sessiond_set_thread_pollset(&events, 3);
933 if (ret < 0) {
934 goto error_poll;
935 }
936
937 /*
938 * The error socket here is already in a listening state which was done
939 * just before spawning this thread to avoid a race between the consumer
940 * daemon exec trying to connect and the listen() call.
941 */
942 ret = lttng_poll_add(&events, consumer_data->err_sock, LPOLLIN | LPOLLRDHUP);
943 if (ret < 0) {
944 goto error;
945 }
946
947 health_code_update();
948
949 /* Infinite blocking call, waiting for transmission */
950 restart:
951 health_poll_entry();
952
953 if (testpoint(thread_manage_consumer)) {
954 goto error;
955 }
956
957 ret = lttng_poll_wait(&events, -1);
958 health_poll_exit();
959 if (ret < 0) {
960 /*
961 * Restart interrupted system call.
962 */
963 if (errno == EINTR) {
964 goto restart;
965 }
966 goto error;
967 }
968
969 nb_fd = ret;
970
971 for (i = 0; i < nb_fd; i++) {
972 /* Fetch once the poll data */
973 revents = LTTNG_POLL_GETEV(&events, i);
974 pollfd = LTTNG_POLL_GETFD(&events, i);
975
976 health_code_update();
977
978 /* Thread quit pipe has been closed. Killing thread. */
979 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
980 if (ret) {
981 err = 0;
982 goto exit;
983 }
984
985 /* Event on the registration socket */
986 if (pollfd == consumer_data->err_sock) {
987 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
988 ERR("consumer err socket poll error");
989 goto error;
990 }
991 }
992 }
993
994 sock = lttcomm_accept_unix_sock(consumer_data->err_sock);
995 if (sock < 0) {
996 goto error;
997 }
998
999 /*
1000 * Set the CLOEXEC flag. Return code is useless because either way, the
1001 * show must go on.
1002 */
1003 (void) utils_set_fd_cloexec(sock);
1004
1005 health_code_update();
1006
1007 DBG2("Receiving code from consumer err_sock");
1008
1009 /* Getting status code from kconsumerd */
1010 ret = lttcomm_recv_unix_sock(sock, &code,
1011 sizeof(enum lttcomm_return_code));
1012 if (ret <= 0) {
1013 goto error;
1014 }
1015
1016 health_code_update();
1017
1018 if (code == LTTCOMM_CONSUMERD_COMMAND_SOCK_READY) {
1019 /* Connect both socket, command and metadata. */
1020 consumer_data->cmd_sock =
1021 lttcomm_connect_unix_sock(consumer_data->cmd_unix_sock_path);
1022 consumer_data->metadata_fd =
1023 lttcomm_connect_unix_sock(consumer_data->cmd_unix_sock_path);
1024 if (consumer_data->cmd_sock < 0
1025 || consumer_data->metadata_fd < 0) {
1026 PERROR("consumer connect cmd socket");
1027 /* On error, signal condition and quit. */
1028 signal_consumer_condition(consumer_data, -1);
1029 goto error;
1030 }
1031 consumer_data->metadata_sock.fd_ptr = &consumer_data->metadata_fd;
1032 /* Create metadata socket lock. */
1033 consumer_data->metadata_sock.lock = zmalloc(sizeof(pthread_mutex_t));
1034 if (consumer_data->metadata_sock.lock == NULL) {
1035 PERROR("zmalloc pthread mutex");
1036 ret = -1;
1037 goto error;
1038 }
1039 pthread_mutex_init(consumer_data->metadata_sock.lock, NULL);
1040
1041 signal_consumer_condition(consumer_data, 1);
1042 DBG("Consumer command socket ready (fd: %d", consumer_data->cmd_sock);
1043 DBG("Consumer metadata socket ready (fd: %d)",
1044 consumer_data->metadata_fd);
1045 } else {
1046 ERR("consumer error when waiting for SOCK_READY : %s",
1047 lttcomm_get_readable_code(-code));
1048 goto error;
1049 }
1050
1051 /* Remove the consumerd error sock since we've established a connexion */
1052 ret = lttng_poll_del(&events, consumer_data->err_sock);
1053 if (ret < 0) {
1054 goto error;
1055 }
1056
1057 /* Add new accepted error socket. */
1058 ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLRDHUP);
1059 if (ret < 0) {
1060 goto error;
1061 }
1062
1063 /* Add metadata socket that is successfully connected. */
1064 ret = lttng_poll_add(&events, consumer_data->metadata_fd,
1065 LPOLLIN | LPOLLRDHUP);
1066 if (ret < 0) {
1067 goto error;
1068 }
1069
1070 health_code_update();
1071
1072 /* Infinite blocking call, waiting for transmission */
1073 restart_poll:
1074 while (1) {
1075 health_poll_entry();
1076 ret = lttng_poll_wait(&events, -1);
1077 health_poll_exit();
1078 if (ret < 0) {
1079 /*
1080 * Restart interrupted system call.
1081 */
1082 if (errno == EINTR) {
1083 goto restart_poll;
1084 }
1085 goto error;
1086 }
1087
1088 nb_fd = ret;
1089
1090 for (i = 0; i < nb_fd; i++) {
1091 /* Fetch once the poll data */
1092 revents = LTTNG_POLL_GETEV(&events, i);
1093 pollfd = LTTNG_POLL_GETFD(&events, i);
1094
1095 health_code_update();
1096
1097 /* Thread quit pipe has been closed. Killing thread. */
1098 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
1099 if (ret) {
1100 err = 0;
1101 goto exit;
1102 }
1103
1104 if (pollfd == sock) {
1105 /* Event on the consumerd socket */
1106 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1107 ERR("consumer err socket second poll error");
1108 goto error;
1109 }
1110 health_code_update();
1111 /* Wait for any kconsumerd error */
1112 ret = lttcomm_recv_unix_sock(sock, &code,
1113 sizeof(enum lttcomm_return_code));
1114 if (ret <= 0) {
1115 ERR("consumer closed the command socket");
1116 goto error;
1117 }
1118
1119 ERR("consumer return code : %s",
1120 lttcomm_get_readable_code(-code));
1121
1122 goto exit;
1123 } else if (pollfd == consumer_data->metadata_fd) {
1124 /* UST metadata requests */
1125 ret = ust_consumer_metadata_request(
1126 &consumer_data->metadata_sock);
1127 if (ret < 0) {
1128 ERR("Handling metadata request");
1129 goto error;
1130 }
1131 break;
1132 } else {
1133 ERR("Unknown pollfd");
1134 goto error;
1135 }
1136 }
1137 health_code_update();
1138 }
1139
1140 exit:
1141 error:
1142 /*
1143 * We lock here because we are about to close the sockets and some other
1144 * thread might be using them so get exclusive access which will abort all
1145 * other consumer command by other threads.
1146 */
1147 pthread_mutex_lock(&consumer_data->lock);
1148
1149 /* Immediately set the consumerd state to stopped */
1150 if (consumer_data->type == LTTNG_CONSUMER_KERNEL) {
1151 uatomic_set(&kernel_consumerd_state, CONSUMER_ERROR);
1152 } else if (consumer_data->type == LTTNG_CONSUMER64_UST ||
1153 consumer_data->type == LTTNG_CONSUMER32_UST) {
1154 uatomic_set(&ust_consumerd_state, CONSUMER_ERROR);
1155 } else {
1156 /* Code flow error... */
1157 assert(0);
1158 }
1159
1160 if (consumer_data->err_sock >= 0) {
1161 ret = close(consumer_data->err_sock);
1162 if (ret) {
1163 PERROR("close");
1164 }
1165 consumer_data->err_sock = -1;
1166 }
1167 if (consumer_data->cmd_sock >= 0) {
1168 ret = close(consumer_data->cmd_sock);
1169 if (ret) {
1170 PERROR("close");
1171 }
1172 consumer_data->cmd_sock = -1;
1173 }
1174 if (*consumer_data->metadata_sock.fd_ptr >= 0) {
1175 ret = close(*consumer_data->metadata_sock.fd_ptr);
1176 if (ret) {
1177 PERROR("close");
1178 }
1179 }
1180
1181 if (sock >= 0) {
1182 ret = close(sock);
1183 if (ret) {
1184 PERROR("close");
1185 }
1186 }
1187
1188 unlink(consumer_data->err_unix_sock_path);
1189 unlink(consumer_data->cmd_unix_sock_path);
1190 consumer_data->pid = 0;
1191 pthread_mutex_unlock(&consumer_data->lock);
1192
1193 /* Cleanup metadata socket mutex. */
1194 pthread_mutex_destroy(consumer_data->metadata_sock.lock);
1195 free(consumer_data->metadata_sock.lock);
1196
1197 lttng_poll_clean(&events);
1198 error_poll:
1199 if (err) {
1200 health_error();
1201 ERR("Health error occurred in %s", __func__);
1202 }
1203 health_unregister();
1204 DBG("consumer thread cleanup completed");
1205
1206 return NULL;
1207 }
1208
1209 /*
1210 * This thread manage application communication.
1211 */
1212 static void *thread_manage_apps(void *data)
1213 {
1214 int i, ret, pollfd, err = -1;
1215 uint32_t revents, nb_fd;
1216 struct lttng_poll_event events;
1217
1218 DBG("[thread] Manage application started");
1219
1220 rcu_register_thread();
1221 rcu_thread_online();
1222
1223 health_register(HEALTH_TYPE_APP_MANAGE);
1224
1225 if (testpoint(thread_manage_apps)) {
1226 goto error_testpoint;
1227 }
1228
1229 health_code_update();
1230
1231 ret = sessiond_set_thread_pollset(&events, 2);
1232 if (ret < 0) {
1233 goto error_poll_create;
1234 }
1235
1236 ret = lttng_poll_add(&events, apps_cmd_pipe[0], LPOLLIN | LPOLLRDHUP);
1237 if (ret < 0) {
1238 goto error;
1239 }
1240
1241 if (testpoint(thread_manage_apps_before_loop)) {
1242 goto error;
1243 }
1244
1245 health_code_update();
1246
1247 while (1) {
1248 DBG("Apps thread polling on %d fds", LTTNG_POLL_GETNB(&events));
1249
1250 /* Inifinite blocking call, waiting for transmission */
1251 restart:
1252 health_poll_entry();
1253 ret = lttng_poll_wait(&events, -1);
1254 health_poll_exit();
1255 if (ret < 0) {
1256 /*
1257 * Restart interrupted system call.
1258 */
1259 if (errno == EINTR) {
1260 goto restart;
1261 }
1262 goto error;
1263 }
1264
1265 nb_fd = ret;
1266
1267 for (i = 0; i < nb_fd; i++) {
1268 /* Fetch once the poll data */
1269 revents = LTTNG_POLL_GETEV(&events, i);
1270 pollfd = LTTNG_POLL_GETFD(&events, i);
1271
1272 health_code_update();
1273
1274 /* Thread quit pipe has been closed. Killing thread. */
1275 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
1276 if (ret) {
1277 err = 0;
1278 goto exit;
1279 }
1280
1281 /* Inspect the apps cmd pipe */
1282 if (pollfd == apps_cmd_pipe[0]) {
1283 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1284 ERR("Apps command pipe error");
1285 goto error;
1286 } else if (revents & LPOLLIN) {
1287 int sock;
1288
1289 /* Empty pipe */
1290 do {
1291 ret = read(apps_cmd_pipe[0], &sock, sizeof(sock));
1292 } while (ret < 0 && errno == EINTR);
1293 if (ret < 0 || ret < sizeof(sock)) {
1294 PERROR("read apps cmd pipe");
1295 goto error;
1296 }
1297
1298 health_code_update();
1299
1300 /*
1301 * We only monitor the error events of the socket. This
1302 * thread does not handle any incoming data from UST
1303 * (POLLIN).
1304 */
1305 ret = lttng_poll_add(&events, sock,
1306 LPOLLERR | LPOLLHUP | LPOLLRDHUP);
1307 if (ret < 0) {
1308 goto error;
1309 }
1310
1311 /*
1312 * Set socket timeout for both receiving and ending.
1313 * app_socket_timeout is in seconds, whereas
1314 * lttcomm_setsockopt_rcv_timeout and
1315 * lttcomm_setsockopt_snd_timeout expect msec as
1316 * parameter.
1317 */
1318 (void) lttcomm_setsockopt_rcv_timeout(sock,
1319 app_socket_timeout * 1000);
1320 (void) lttcomm_setsockopt_snd_timeout(sock,
1321 app_socket_timeout * 1000);
1322
1323 DBG("Apps with sock %d added to poll set", sock);
1324
1325 health_code_update();
1326
1327 break;
1328 }
1329 } else {
1330 /*
1331 * At this point, we know that a registered application made
1332 * the event at poll_wait.
1333 */
1334 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1335 /* Removing from the poll set */
1336 ret = lttng_poll_del(&events, pollfd);
1337 if (ret < 0) {
1338 goto error;
1339 }
1340
1341 /* Socket closed on remote end. */
1342 ust_app_unregister(pollfd);
1343 break;
1344 }
1345 }
1346
1347 health_code_update();
1348 }
1349 }
1350
1351 exit:
1352 error:
1353 lttng_poll_clean(&events);
1354 error_poll_create:
1355 error_testpoint:
1356 utils_close_pipe(apps_cmd_pipe);
1357 apps_cmd_pipe[0] = apps_cmd_pipe[1] = -1;
1358
1359 /*
1360 * We don't clean the UST app hash table here since already registered
1361 * applications can still be controlled so let them be until the session
1362 * daemon dies or the applications stop.
1363 */
1364
1365 if (err) {
1366 health_error();
1367 ERR("Health error occurred in %s", __func__);
1368 }
1369 health_unregister();
1370 DBG("Application communication apps thread cleanup complete");
1371 rcu_thread_offline();
1372 rcu_unregister_thread();
1373 return NULL;
1374 }
1375
1376 /*
1377 * Send a socket to a thread This is called from the dispatch UST registration
1378 * thread once all sockets are set for the application.
1379 *
1380 * The sock value can be invalid, we don't really care, the thread will handle
1381 * it and make the necessary cleanup if so.
1382 *
1383 * On success, return 0 else a negative value being the errno message of the
1384 * write().
1385 */
1386 static int send_socket_to_thread(int fd, int sock)
1387 {
1388 int ret;
1389
1390 /*
1391 * It's possible that the FD is set as invalid with -1 concurrently just
1392 * before calling this function being a shutdown state of the thread.
1393 */
1394 if (fd < 0) {
1395 ret = -EBADF;
1396 goto error;
1397 }
1398
1399 do {
1400 ret = write(fd, &sock, sizeof(sock));
1401 } while (ret < 0 && errno == EINTR);
1402 if (ret < 0 || ret != sizeof(sock)) {
1403 PERROR("write apps pipe %d", fd);
1404 if (ret < 0) {
1405 ret = -errno;
1406 }
1407 goto error;
1408 }
1409
1410 /* All good. Don't send back the write positive ret value. */
1411 ret = 0;
1412 error:
1413 return ret;
1414 }
1415
1416 /*
1417 * Sanitize the wait queue of the dispatch registration thread meaning removing
1418 * invalid nodes from it. This is to avoid memory leaks for the case the UST
1419 * notify socket is never received.
1420 */
1421 static void sanitize_wait_queue(struct ust_reg_wait_queue *wait_queue)
1422 {
1423 int ret, nb_fd = 0, i;
1424 unsigned int fd_added = 0;
1425 struct lttng_poll_event events;
1426 struct ust_reg_wait_node *wait_node = NULL, *tmp_wait_node;
1427
1428 assert(wait_queue);
1429
1430 lttng_poll_init(&events);
1431
1432 /* Just skip everything for an empty queue. */
1433 if (!wait_queue->count) {
1434 goto end;
1435 }
1436
1437 ret = lttng_poll_create(&events, wait_queue->count, LTTNG_CLOEXEC);
1438 if (ret < 0) {
1439 goto error_create;
1440 }
1441
1442 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1443 &wait_queue->head, head) {
1444 assert(wait_node->app);
1445 ret = lttng_poll_add(&events, wait_node->app->sock,
1446 LPOLLHUP | LPOLLERR);
1447 if (ret < 0) {
1448 goto error;
1449 }
1450
1451 fd_added = 1;
1452 }
1453
1454 if (!fd_added) {
1455 goto end;
1456 }
1457
1458 /*
1459 * Poll but don't block so we can quickly identify the faulty events and
1460 * clean them afterwards from the wait queue.
1461 */
1462 ret = lttng_poll_wait(&events, 0);
1463 if (ret < 0) {
1464 goto error;
1465 }
1466 nb_fd = ret;
1467
1468 for (i = 0; i < nb_fd; i++) {
1469 /* Get faulty FD. */
1470 uint32_t revents = LTTNG_POLL_GETEV(&events, i);
1471 int pollfd = LTTNG_POLL_GETFD(&events, i);
1472
1473 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1474 &wait_queue->head, head) {
1475 if (pollfd == wait_node->app->sock &&
1476 (revents & (LPOLLHUP | LPOLLERR))) {
1477 cds_list_del(&wait_node->head);
1478 wait_queue->count--;
1479 ust_app_destroy(wait_node->app);
1480 free(wait_node);
1481 break;
1482 }
1483 }
1484 }
1485
1486 if (nb_fd > 0) {
1487 DBG("Wait queue sanitized, %d node were cleaned up", nb_fd);
1488 }
1489
1490 end:
1491 lttng_poll_clean(&events);
1492 return;
1493
1494 error:
1495 lttng_poll_clean(&events);
1496 error_create:
1497 ERR("Unable to sanitize wait queue");
1498 return;
1499 }
1500
1501 /*
1502 * Dispatch request from the registration threads to the application
1503 * communication thread.
1504 */
1505 static void *thread_dispatch_ust_registration(void *data)
1506 {
1507 int ret, err = -1;
1508 struct cds_wfq_node *node;
1509 struct ust_command *ust_cmd = NULL;
1510 struct ust_reg_wait_node *wait_node = NULL, *tmp_wait_node;
1511 struct ust_reg_wait_queue wait_queue = {
1512 .count = 0,
1513 };
1514
1515 health_register(HEALTH_TYPE_APP_REG_DISPATCH);
1516
1517 health_code_update();
1518
1519 CDS_INIT_LIST_HEAD(&wait_queue.head);
1520
1521 DBG("[thread] Dispatch UST command started");
1522
1523 while (!CMM_LOAD_SHARED(dispatch_thread_exit)) {
1524 health_code_update();
1525
1526 /* Atomically prepare the queue futex */
1527 futex_nto1_prepare(&ust_cmd_queue.futex);
1528
1529 do {
1530 struct ust_app *app = NULL;
1531 ust_cmd = NULL;
1532
1533 /*
1534 * Make sure we don't have node(s) that have hung up before receiving
1535 * the notify socket. This is to clean the list in order to avoid
1536 * memory leaks from notify socket that are never seen.
1537 */
1538 sanitize_wait_queue(&wait_queue);
1539
1540 health_code_update();
1541 /* Dequeue command for registration */
1542 node = cds_wfq_dequeue_blocking(&ust_cmd_queue.queue);
1543 if (node == NULL) {
1544 DBG("Woken up but nothing in the UST command queue");
1545 /* Continue thread execution */
1546 break;
1547 }
1548
1549 ust_cmd = caa_container_of(node, struct ust_command, node);
1550
1551 DBG("Dispatching UST registration pid:%d ppid:%d uid:%d"
1552 " gid:%d sock:%d name:%s (version %d.%d)",
1553 ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
1554 ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
1555 ust_cmd->sock, ust_cmd->reg_msg.name,
1556 ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
1557
1558 if (ust_cmd->reg_msg.type == USTCTL_SOCKET_CMD) {
1559 wait_node = zmalloc(sizeof(*wait_node));
1560 if (!wait_node) {
1561 PERROR("zmalloc wait_node dispatch");
1562 ret = close(ust_cmd->sock);
1563 if (ret < 0) {
1564 PERROR("close ust sock dispatch %d", ust_cmd->sock);
1565 }
1566 lttng_fd_put(1, LTTNG_FD_APPS);
1567 free(ust_cmd);
1568 goto error;
1569 }
1570 CDS_INIT_LIST_HEAD(&wait_node->head);
1571
1572 /* Create application object if socket is CMD. */
1573 wait_node->app = ust_app_create(&ust_cmd->reg_msg,
1574 ust_cmd->sock);
1575 if (!wait_node->app) {
1576 ret = close(ust_cmd->sock);
1577 if (ret < 0) {
1578 PERROR("close ust sock dispatch %d", ust_cmd->sock);
1579 }
1580 lttng_fd_put(1, LTTNG_FD_APPS);
1581 free(wait_node);
1582 free(ust_cmd);
1583 continue;
1584 }
1585 /*
1586 * Add application to the wait queue so we can set the notify
1587 * socket before putting this object in the global ht.
1588 */
1589 cds_list_add(&wait_node->head, &wait_queue.head);
1590 wait_queue.count++;
1591
1592 free(ust_cmd);
1593 /*
1594 * We have to continue here since we don't have the notify
1595 * socket and the application MUST be added to the hash table
1596 * only at that moment.
1597 */
1598 continue;
1599 } else {
1600 /*
1601 * Look for the application in the local wait queue and set the
1602 * notify socket if found.
1603 */
1604 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1605 &wait_queue.head, head) {
1606 health_code_update();
1607 if (wait_node->app->pid == ust_cmd->reg_msg.pid) {
1608 wait_node->app->notify_sock = ust_cmd->sock;
1609 cds_list_del(&wait_node->head);
1610 wait_queue.count--;
1611 app = wait_node->app;
1612 free(wait_node);
1613 DBG3("UST app notify socket %d is set", ust_cmd->sock);
1614 break;
1615 }
1616 }
1617
1618 /*
1619 * With no application at this stage the received socket is
1620 * basically useless so close it before we free the cmd data
1621 * structure for good.
1622 */
1623 if (!app) {
1624 ret = close(ust_cmd->sock);
1625 if (ret < 0) {
1626 PERROR("close ust sock dispatch %d", ust_cmd->sock);
1627 }
1628 lttng_fd_put(1, LTTNG_FD_APPS);
1629 }
1630 free(ust_cmd);
1631 }
1632
1633 if (app) {
1634 /*
1635 * @session_lock_list
1636 *
1637 * Lock the global session list so from the register up to the
1638 * registration done message, no thread can see the application
1639 * and change its state.
1640 */
1641 session_lock_list();
1642 rcu_read_lock();
1643
1644 /*
1645 * Add application to the global hash table. This needs to be
1646 * done before the update to the UST registry can locate the
1647 * application.
1648 */
1649 ust_app_add(app);
1650
1651 /* Set app version. This call will print an error if needed. */
1652 (void) ust_app_version(app);
1653
1654 /* Send notify socket through the notify pipe. */
1655 ret = send_socket_to_thread(apps_cmd_notify_pipe[1],
1656 app->notify_sock);
1657 if (ret < 0) {
1658 rcu_read_unlock();
1659 session_unlock_list();
1660 /*
1661 * No notify thread, stop the UST tracing. However, this is
1662 * not an internal error of the this thread thus setting
1663 * the health error code to a normal exit.
1664 */
1665 err = 0;
1666 goto error;
1667 }
1668
1669 /*
1670 * Update newly registered application with the tracing
1671 * registry info already enabled information.
1672 */
1673 update_ust_app(app->sock);
1674
1675 /*
1676 * Don't care about return value. Let the manage apps threads
1677 * handle app unregistration upon socket close.
1678 */
1679 (void) ust_app_register_done(app->sock);
1680
1681 /*
1682 * Even if the application socket has been closed, send the app
1683 * to the thread and unregistration will take place at that
1684 * place.
1685 */
1686 ret = send_socket_to_thread(apps_cmd_pipe[1], app->sock);
1687 if (ret < 0) {
1688 rcu_read_unlock();
1689 session_unlock_list();
1690 /*
1691 * No apps. thread, stop the UST tracing. However, this is
1692 * not an internal error of the this thread thus setting
1693 * the health error code to a normal exit.
1694 */
1695 err = 0;
1696 goto error;
1697 }
1698
1699 rcu_read_unlock();
1700 session_unlock_list();
1701 }
1702 } while (node != NULL);
1703
1704 health_poll_entry();
1705 /* Futex wait on queue. Blocking call on futex() */
1706 futex_nto1_wait(&ust_cmd_queue.futex);
1707 health_poll_exit();
1708 }
1709 /* Normal exit, no error */
1710 err = 0;
1711
1712 error:
1713 /* Clean up wait queue. */
1714 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1715 &wait_queue.head, head) {
1716 cds_list_del(&wait_node->head);
1717 wait_queue.count--;
1718 free(wait_node);
1719 }
1720
1721 DBG("Dispatch thread dying");
1722 if (err) {
1723 health_error();
1724 ERR("Health error occurred in %s", __func__);
1725 }
1726 health_unregister();
1727 return NULL;
1728 }
1729
1730 /*
1731 * This thread manage application registration.
1732 */
1733 static void *thread_registration_apps(void *data)
1734 {
1735 int sock = -1, i, ret, pollfd, err = -1;
1736 uint32_t revents, nb_fd;
1737 struct lttng_poll_event events;
1738 /*
1739 * Get allocated in this thread, enqueued to a global queue, dequeued and
1740 * freed in the manage apps thread.
1741 */
1742 struct ust_command *ust_cmd = NULL;
1743
1744 DBG("[thread] Manage application registration started");
1745
1746 health_register(HEALTH_TYPE_APP_REG);
1747
1748 if (testpoint(thread_registration_apps)) {
1749 goto error_testpoint;
1750 }
1751
1752 ret = lttcomm_listen_unix_sock(apps_sock);
1753 if (ret < 0) {
1754 goto error_listen;
1755 }
1756
1757 /*
1758 * Pass 2 as size here for the thread quit pipe and apps socket. Nothing
1759 * more will be added to this poll set.
1760 */
1761 ret = sessiond_set_thread_pollset(&events, 2);
1762 if (ret < 0) {
1763 goto error_create_poll;
1764 }
1765
1766 /* Add the application registration socket */
1767 ret = lttng_poll_add(&events, apps_sock, LPOLLIN | LPOLLRDHUP);
1768 if (ret < 0) {
1769 goto error_poll_add;
1770 }
1771
1772 /* Notify all applications to register */
1773 ret = notify_ust_apps(1);
1774 if (ret < 0) {
1775 ERR("Failed to notify applications or create the wait shared memory.\n"
1776 "Execution continues but there might be problem for already\n"
1777 "running applications that wishes to register.");
1778 }
1779
1780 while (1) {
1781 DBG("Accepting application registration");
1782
1783 /* Inifinite blocking call, waiting for transmission */
1784 restart:
1785 health_poll_entry();
1786 ret = lttng_poll_wait(&events, -1);
1787 health_poll_exit();
1788 if (ret < 0) {
1789 /*
1790 * Restart interrupted system call.
1791 */
1792 if (errno == EINTR) {
1793 goto restart;
1794 }
1795 goto error;
1796 }
1797
1798 nb_fd = ret;
1799
1800 for (i = 0; i < nb_fd; i++) {
1801 health_code_update();
1802
1803 /* Fetch once the poll data */
1804 revents = LTTNG_POLL_GETEV(&events, i);
1805 pollfd = LTTNG_POLL_GETFD(&events, i);
1806
1807 /* Thread quit pipe has been closed. Killing thread. */
1808 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
1809 if (ret) {
1810 err = 0;
1811 goto exit;
1812 }
1813
1814 /* Event on the registration socket */
1815 if (pollfd == apps_sock) {
1816 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1817 ERR("Register apps socket poll error");
1818 goto error;
1819 } else if (revents & LPOLLIN) {
1820 sock = lttcomm_accept_unix_sock(apps_sock);
1821 if (sock < 0) {
1822 goto error;
1823 }
1824
1825 /*
1826 * Set the CLOEXEC flag. Return code is useless because
1827 * either way, the show must go on.
1828 */
1829 (void) utils_set_fd_cloexec(sock);
1830
1831 /* Create UST registration command for enqueuing */
1832 ust_cmd = zmalloc(sizeof(struct ust_command));
1833 if (ust_cmd == NULL) {
1834 PERROR("ust command zmalloc");
1835 goto error;
1836 }
1837
1838 /*
1839 * Using message-based transmissions to ensure we don't
1840 * have to deal with partially received messages.
1841 */
1842 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
1843 if (ret < 0) {
1844 ERR("Exhausted file descriptors allowed for applications.");
1845 free(ust_cmd);
1846 ret = close(sock);
1847 if (ret) {
1848 PERROR("close");
1849 }
1850 sock = -1;
1851 continue;
1852 }
1853
1854 health_code_update();
1855 ret = ust_app_recv_registration(sock, &ust_cmd->reg_msg);
1856 if (ret < 0) {
1857 free(ust_cmd);
1858 /* Close socket of the application. */
1859 ret = close(sock);
1860 if (ret) {
1861 PERROR("close");
1862 }
1863 lttng_fd_put(LTTNG_FD_APPS, 1);
1864 sock = -1;
1865 continue;
1866 }
1867 health_code_update();
1868
1869 ust_cmd->sock = sock;
1870 sock = -1;
1871
1872 DBG("UST registration received with pid:%d ppid:%d uid:%d"
1873 " gid:%d sock:%d name:%s (version %d.%d)",
1874 ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
1875 ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
1876 ust_cmd->sock, ust_cmd->reg_msg.name,
1877 ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
1878
1879 /*
1880 * Lock free enqueue the registration request. The red pill
1881 * has been taken! This apps will be part of the *system*.
1882 */
1883 cds_wfq_enqueue(&ust_cmd_queue.queue, &ust_cmd->node);
1884
1885 /*
1886 * Wake the registration queue futex. Implicit memory
1887 * barrier with the exchange in cds_wfq_enqueue.
1888 */
1889 futex_nto1_wake(&ust_cmd_queue.futex);
1890 }
1891 }
1892 }
1893 }
1894
1895 exit:
1896 error:
1897 if (err) {
1898 health_error();
1899 ERR("Health error occurred in %s", __func__);
1900 }
1901
1902 /* Notify that the registration thread is gone */
1903 notify_ust_apps(0);
1904
1905 if (apps_sock >= 0) {
1906 ret = close(apps_sock);
1907 if (ret) {
1908 PERROR("close");
1909 }
1910 }
1911 if (sock >= 0) {
1912 ret = close(sock);
1913 if (ret) {
1914 PERROR("close");
1915 }
1916 lttng_fd_put(LTTNG_FD_APPS, 1);
1917 }
1918 unlink(apps_unix_sock_path);
1919
1920 error_poll_add:
1921 lttng_poll_clean(&events);
1922 error_listen:
1923 error_create_poll:
1924 error_testpoint:
1925 DBG("UST Registration thread cleanup complete");
1926 health_unregister();
1927
1928 return NULL;
1929 }
1930
1931 /*
1932 * Start the thread_manage_consumer. This must be done after a lttng-consumerd
1933 * exec or it will fails.
1934 */
1935 static int spawn_consumer_thread(struct consumer_data *consumer_data)
1936 {
1937 int ret, clock_ret;
1938 struct timespec timeout;
1939
1940 /* Make sure we set the readiness flag to 0 because we are NOT ready */
1941 consumer_data->consumer_thread_is_ready = 0;
1942
1943 /* Setup pthread condition */
1944 ret = pthread_condattr_init(&consumer_data->condattr);
1945 if (ret != 0) {
1946 errno = ret;
1947 PERROR("pthread_condattr_init consumer data");
1948 goto error;
1949 }
1950
1951 /*
1952 * Set the monotonic clock in order to make sure we DO NOT jump in time
1953 * between the clock_gettime() call and the timedwait call. See bug #324
1954 * for a more details and how we noticed it.
1955 */
1956 ret = pthread_condattr_setclock(&consumer_data->condattr, CLOCK_MONOTONIC);
1957 if (ret != 0) {
1958 errno = ret;
1959 PERROR("pthread_condattr_setclock consumer data");
1960 goto error;
1961 }
1962
1963 ret = pthread_cond_init(&consumer_data->cond, &consumer_data->condattr);
1964 if (ret != 0) {
1965 errno = ret;
1966 PERROR("pthread_cond_init consumer data");
1967 goto error;
1968 }
1969
1970 ret = pthread_create(&consumer_data->thread, NULL, thread_manage_consumer,
1971 consumer_data);
1972 if (ret != 0) {
1973 PERROR("pthread_create consumer");
1974 ret = -1;
1975 goto error;
1976 }
1977
1978 /* We are about to wait on a pthread condition */
1979 pthread_mutex_lock(&consumer_data->cond_mutex);
1980
1981 /* Get time for sem_timedwait absolute timeout */
1982 clock_ret = clock_gettime(CLOCK_MONOTONIC, &timeout);
1983 /*
1984 * Set the timeout for the condition timed wait even if the clock gettime
1985 * call fails since we might loop on that call and we want to avoid to
1986 * increment the timeout too many times.
1987 */
1988 timeout.tv_sec += DEFAULT_SEM_WAIT_TIMEOUT;
1989
1990 /*
1991 * The following loop COULD be skipped in some conditions so this is why we
1992 * set ret to 0 in order to make sure at least one round of the loop is
1993 * done.
1994 */
1995 ret = 0;
1996
1997 /*
1998 * Loop until the condition is reached or when a timeout is reached. Note
1999 * that the pthread_cond_timedwait(P) man page specifies that EINTR can NOT
2000 * be returned but the pthread_cond(3), from the glibc-doc, says that it is
2001 * possible. This loop does not take any chances and works with both of
2002 * them.
2003 */
2004 while (!consumer_data->consumer_thread_is_ready && ret != ETIMEDOUT) {
2005 if (clock_ret < 0) {
2006 PERROR("clock_gettime spawn consumer");
2007 /* Infinite wait for the consumerd thread to be ready */
2008 ret = pthread_cond_wait(&consumer_data->cond,
2009 &consumer_data->cond_mutex);
2010 } else {
2011 ret = pthread_cond_timedwait(&consumer_data->cond,
2012 &consumer_data->cond_mutex, &timeout);
2013 }
2014 }
2015
2016 /* Release the pthread condition */
2017 pthread_mutex_unlock(&consumer_data->cond_mutex);
2018
2019 if (ret != 0) {
2020 errno = ret;
2021 if (ret == ETIMEDOUT) {
2022 /*
2023 * Call has timed out so we kill the kconsumerd_thread and return
2024 * an error.
2025 */
2026 ERR("Condition timed out. The consumer thread was never ready."
2027 " Killing it");
2028 ret = pthread_cancel(consumer_data->thread);
2029 if (ret < 0) {
2030 PERROR("pthread_cancel consumer thread");
2031 }
2032 } else {
2033 PERROR("pthread_cond_wait failed consumer thread");
2034 }
2035 goto error;
2036 }
2037
2038 pthread_mutex_lock(&consumer_data->pid_mutex);
2039 if (consumer_data->pid == 0) {
2040 ERR("Consumerd did not start");
2041 pthread_mutex_unlock(&consumer_data->pid_mutex);
2042 goto error;
2043 }
2044 pthread_mutex_unlock(&consumer_data->pid_mutex);
2045
2046 return 0;
2047
2048 error:
2049 return ret;
2050 }
2051
2052 /*
2053 * Join consumer thread
2054 */
2055 static int join_consumer_thread(struct consumer_data *consumer_data)
2056 {
2057 void *status;
2058
2059 /* Consumer pid must be a real one. */
2060 if (consumer_data->pid > 0) {
2061 int ret;
2062 ret = kill(consumer_data->pid, SIGTERM);
2063 if (ret) {
2064 ERR("Error killing consumer daemon");
2065 return ret;
2066 }
2067 return pthread_join(consumer_data->thread, &status);
2068 } else {
2069 return 0;
2070 }
2071 }
2072
2073 /*
2074 * Fork and exec a consumer daemon (consumerd).
2075 *
2076 * Return pid if successful else -1.
2077 */
2078 static pid_t spawn_consumerd(struct consumer_data *consumer_data)
2079 {
2080 int ret;
2081 pid_t pid;
2082 const char *consumer_to_use;
2083 const char *verbosity;
2084 struct stat st;
2085
2086 DBG("Spawning consumerd");
2087
2088 pid = fork();
2089 if (pid == 0) {
2090 /*
2091 * Exec consumerd.
2092 */
2093 if (opt_verbose_consumer) {
2094 verbosity = "--verbose";
2095 } else {
2096 verbosity = "--quiet";
2097 }
2098 switch (consumer_data->type) {
2099 case LTTNG_CONSUMER_KERNEL:
2100 /*
2101 * Find out which consumerd to execute. We will first try the
2102 * 64-bit path, then the sessiond's installation directory, and
2103 * fallback on the 32-bit one,
2104 */
2105 DBG3("Looking for a kernel consumer at these locations:");
2106 DBG3(" 1) %s", consumerd64_bin);
2107 DBG3(" 2) %s/%s", INSTALL_BIN_PATH, CONSUMERD_FILE);
2108 DBG3(" 3) %s", consumerd32_bin);
2109 if (stat(consumerd64_bin, &st) == 0) {
2110 DBG3("Found location #1");
2111 consumer_to_use = consumerd64_bin;
2112 } else if (stat(INSTALL_BIN_PATH "/" CONSUMERD_FILE, &st) == 0) {
2113 DBG3("Found location #2");
2114 consumer_to_use = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
2115 } else if (stat(consumerd32_bin, &st) == 0) {
2116 DBG3("Found location #3");
2117 consumer_to_use = consumerd32_bin;
2118 } else {
2119 DBG("Could not find any valid consumerd executable");
2120 break;
2121 }
2122 DBG("Using kernel consumer at: %s", consumer_to_use);
2123 execl(consumer_to_use,
2124 "lttng-consumerd", verbosity, "-k",
2125 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
2126 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
2127 NULL);
2128 break;
2129 case LTTNG_CONSUMER64_UST:
2130 {
2131 char *tmpnew = NULL;
2132
2133 if (consumerd64_libdir[0] != '\0') {
2134 char *tmp;
2135 size_t tmplen;
2136
2137 tmp = getenv("LD_LIBRARY_PATH");
2138 if (!tmp) {
2139 tmp = "";
2140 }
2141 tmplen = strlen("LD_LIBRARY_PATH=")
2142 + strlen(consumerd64_libdir) + 1 /* : */ + strlen(tmp);
2143 tmpnew = zmalloc(tmplen + 1 /* \0 */);
2144 if (!tmpnew) {
2145 ret = -ENOMEM;
2146 goto error;
2147 }
2148 strcpy(tmpnew, "LD_LIBRARY_PATH=");
2149 strcat(tmpnew, consumerd64_libdir);
2150 if (tmp[0] != '\0') {
2151 strcat(tmpnew, ":");
2152 strcat(tmpnew, tmp);
2153 }
2154 ret = putenv(tmpnew);
2155 if (ret) {
2156 ret = -errno;
2157 free(tmpnew);
2158 goto error;
2159 }
2160 }
2161 DBG("Using 64-bit UST consumer at: %s", consumerd64_bin);
2162 ret = execl(consumerd64_bin, "lttng-consumerd", verbosity, "-u",
2163 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
2164 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
2165 NULL);
2166 if (consumerd64_libdir[0] != '\0') {
2167 free(tmpnew);
2168 }
2169 if (ret) {
2170 goto error;
2171 }
2172 break;
2173 }
2174 case LTTNG_CONSUMER32_UST:
2175 {
2176 char *tmpnew = NULL;
2177
2178 if (consumerd32_libdir[0] != '\0') {
2179 char *tmp;
2180 size_t tmplen;
2181
2182 tmp = getenv("LD_LIBRARY_PATH");
2183 if (!tmp) {
2184 tmp = "";
2185 }
2186 tmplen = strlen("LD_LIBRARY_PATH=")
2187 + strlen(consumerd32_libdir) + 1 /* : */ + strlen(tmp);
2188 tmpnew = zmalloc(tmplen + 1 /* \0 */);
2189 if (!tmpnew) {
2190 ret = -ENOMEM;
2191 goto error;
2192 }
2193 strcpy(tmpnew, "LD_LIBRARY_PATH=");
2194 strcat(tmpnew, consumerd32_libdir);
2195 if (tmp[0] != '\0') {
2196 strcat(tmpnew, ":");
2197 strcat(tmpnew, tmp);
2198 }
2199 ret = putenv(tmpnew);
2200 if (ret) {
2201 ret = -errno;
2202 free(tmpnew);
2203 goto error;
2204 }
2205 }
2206 DBG("Using 32-bit UST consumer at: %s", consumerd32_bin);
2207 ret = execl(consumerd32_bin, "lttng-consumerd", verbosity, "-u",
2208 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
2209 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
2210 NULL);
2211 if (consumerd32_libdir[0] != '\0') {
2212 free(tmpnew);
2213 }
2214 if (ret) {
2215 goto error;
2216 }
2217 break;
2218 }
2219 default:
2220 PERROR("unknown consumer type");
2221 exit(EXIT_FAILURE);
2222 }
2223 if (errno != 0) {
2224 PERROR("kernel start consumer exec");
2225 }
2226 exit(EXIT_FAILURE);
2227 } else if (pid > 0) {
2228 ret = pid;
2229 } else {
2230 PERROR("start consumer fork");
2231 ret = -errno;
2232 }
2233 error:
2234 return ret;
2235 }
2236
2237 /*
2238 * Spawn the consumerd daemon and session daemon thread.
2239 */
2240 static int start_consumerd(struct consumer_data *consumer_data)
2241 {
2242 int ret;
2243
2244 /*
2245 * Set the listen() state on the socket since there is a possible race
2246 * between the exec() of the consumer daemon and this call if place in the
2247 * consumer thread. See bug #366 for more details.
2248 */
2249 ret = lttcomm_listen_unix_sock(consumer_data->err_sock);
2250 if (ret < 0) {
2251 goto error;
2252 }
2253
2254 pthread_mutex_lock(&consumer_data->pid_mutex);
2255 if (consumer_data->pid != 0) {
2256 pthread_mutex_unlock(&consumer_data->pid_mutex);
2257 goto end;
2258 }
2259
2260 ret = spawn_consumerd(consumer_data);
2261 if (ret < 0) {
2262 ERR("Spawning consumerd failed");
2263 pthread_mutex_unlock(&consumer_data->pid_mutex);
2264 goto error;
2265 }
2266
2267 /* Setting up the consumer_data pid */
2268 consumer_data->pid = ret;
2269 DBG2("Consumer pid %d", consumer_data->pid);
2270 pthread_mutex_unlock(&consumer_data->pid_mutex);
2271
2272 DBG2("Spawning consumer control thread");
2273 ret = spawn_consumer_thread(consumer_data);
2274 if (ret < 0) {
2275 ERR("Fatal error spawning consumer control thread");
2276 goto error;
2277 }
2278
2279 end:
2280 return 0;
2281
2282 error:
2283 /* Cleanup already created sockets on error. */
2284 if (consumer_data->err_sock >= 0) {
2285 int err;
2286
2287 err = close(consumer_data->err_sock);
2288 if (err < 0) {
2289 PERROR("close consumer data error socket");
2290 }
2291 }
2292 return ret;
2293 }
2294
2295 /*
2296 * Compute health status of each consumer. If one of them is zero (bad
2297 * state), we return 0.
2298 */
2299 static int check_consumer_health(void)
2300 {
2301 int ret;
2302
2303 ret = health_check_state(HEALTH_TYPE_CONSUMER);
2304
2305 DBG3("Health consumer check %d", ret);
2306
2307 return ret;
2308 }
2309
2310 /*
2311 * Setup necessary data for kernel tracer action.
2312 */
2313 static int init_kernel_tracer(void)
2314 {
2315 int ret;
2316
2317 /* Modprobe lttng kernel modules */
2318 ret = modprobe_lttng_control();
2319 if (ret < 0) {
2320 goto error;
2321 }
2322
2323 /* Open debugfs lttng */
2324 kernel_tracer_fd = open(module_proc_lttng, O_RDWR);
2325 if (kernel_tracer_fd < 0) {
2326 DBG("Failed to open %s", module_proc_lttng);
2327 ret = -1;
2328 goto error_open;
2329 }
2330
2331 /* Validate kernel version */
2332 ret = kernel_validate_version(kernel_tracer_fd);
2333 if (ret < 0) {
2334 goto error_version;
2335 }
2336
2337 ret = modprobe_lttng_data();
2338 if (ret < 0) {
2339 goto error_modules;
2340 }
2341
2342 DBG("Kernel tracer fd %d", kernel_tracer_fd);
2343 return 0;
2344
2345 error_version:
2346 modprobe_remove_lttng_control();
2347 ret = close(kernel_tracer_fd);
2348 if (ret) {
2349 PERROR("close");
2350 }
2351 kernel_tracer_fd = -1;
2352 return LTTNG_ERR_KERN_VERSION;
2353
2354 error_modules:
2355 ret = close(kernel_tracer_fd);
2356 if (ret) {
2357 PERROR("close");
2358 }
2359
2360 error_open:
2361 modprobe_remove_lttng_control();
2362
2363 error:
2364 WARN("No kernel tracer available");
2365 kernel_tracer_fd = -1;
2366 if (!is_root) {
2367 return LTTNG_ERR_NEED_ROOT_SESSIOND;
2368 } else {
2369 return LTTNG_ERR_KERN_NA;
2370 }
2371 }
2372
2373
2374 /*
2375 * Copy consumer output from the tracing session to the domain session. The
2376 * function also applies the right modification on a per domain basis for the
2377 * trace files destination directory.
2378 *
2379 * Should *NOT* be called with RCU read-side lock held.
2380 */
2381 static int copy_session_consumer(int domain, struct ltt_session *session)
2382 {
2383 int ret;
2384 const char *dir_name;
2385 struct consumer_output *consumer;
2386
2387 assert(session);
2388 assert(session->consumer);
2389
2390 switch (domain) {
2391 case LTTNG_DOMAIN_KERNEL:
2392 DBG3("Copying tracing session consumer output in kernel session");
2393 /*
2394 * XXX: We should audit the session creation and what this function
2395 * does "extra" in order to avoid a destroy since this function is used
2396 * in the domain session creation (kernel and ust) only. Same for UST
2397 * domain.
2398 */
2399 if (session->kernel_session->consumer) {
2400 consumer_destroy_output(session->kernel_session->consumer);
2401 }
2402 session->kernel_session->consumer =
2403 consumer_copy_output(session->consumer);
2404 /* Ease our life a bit for the next part */
2405 consumer = session->kernel_session->consumer;
2406 dir_name = DEFAULT_KERNEL_TRACE_DIR;
2407 break;
2408 case LTTNG_DOMAIN_UST:
2409 DBG3("Copying tracing session consumer output in UST session");
2410 if (session->ust_session->consumer) {
2411 consumer_destroy_output(session->ust_session->consumer);
2412 }
2413 session->ust_session->consumer =
2414 consumer_copy_output(session->consumer);
2415 /* Ease our life a bit for the next part */
2416 consumer = session->ust_session->consumer;
2417 dir_name = DEFAULT_UST_TRACE_DIR;
2418 break;
2419 default:
2420 ret = LTTNG_ERR_UNKNOWN_DOMAIN;
2421 goto error;
2422 }
2423
2424 /* Append correct directory to subdir */
2425 strncat(consumer->subdir, dir_name,
2426 sizeof(consumer->subdir) - strlen(consumer->subdir) - 1);
2427 DBG3("Copy session consumer subdir %s", consumer->subdir);
2428
2429 ret = LTTNG_OK;
2430
2431 error:
2432 return ret;
2433 }
2434
2435 /*
2436 * Create an UST session and add it to the session ust list.
2437 *
2438 * Should *NOT* be called with RCU read-side lock held.
2439 */
2440 static int create_ust_session(struct ltt_session *session,
2441 struct lttng_domain *domain)
2442 {
2443 int ret;
2444 struct ltt_ust_session *lus = NULL;
2445
2446 assert(session);
2447 assert(domain);
2448 assert(session->consumer);
2449
2450 switch (domain->type) {
2451 case LTTNG_DOMAIN_UST:
2452 break;
2453 default:
2454 ERR("Unknown UST domain on create session %d", domain->type);
2455 ret = LTTNG_ERR_UNKNOWN_DOMAIN;
2456 goto error;
2457 }
2458
2459 DBG("Creating UST session");
2460
2461 lus = trace_ust_create_session(session->id);
2462 if (lus == NULL) {
2463 ret = LTTNG_ERR_UST_SESS_FAIL;
2464 goto error;
2465 }
2466
2467 lus->uid = session->uid;
2468 lus->gid = session->gid;
2469 lus->output_traces = session->output_traces;
2470 lus->snapshot_mode = session->snapshot_mode;
2471 session->ust_session = lus;
2472
2473 /* Copy session output to the newly created UST session */
2474 ret = copy_session_consumer(domain->type, session);
2475 if (ret != LTTNG_OK) {
2476 goto error;
2477 }
2478
2479 return LTTNG_OK;
2480
2481 error:
2482 free(lus);
2483 session->ust_session = NULL;
2484 return ret;
2485 }
2486
2487 /*
2488 * Create a kernel tracer session then create the default channel.
2489 */
2490 static int create_kernel_session(struct ltt_session *session)
2491 {
2492 int ret;
2493
2494 DBG("Creating kernel session");
2495
2496 ret = kernel_create_session(session, kernel_tracer_fd);
2497 if (ret < 0) {
2498 ret = LTTNG_ERR_KERN_SESS_FAIL;
2499 goto error;
2500 }
2501
2502 /* Code flow safety */
2503 assert(session->kernel_session);
2504
2505 /* Copy session output to the newly created Kernel session */
2506 ret = copy_session_consumer(LTTNG_DOMAIN_KERNEL, session);
2507 if (ret != LTTNG_OK) {
2508 goto error;
2509 }
2510
2511 /* Create directory(ies) on local filesystem. */
2512 if (session->kernel_session->consumer->type == CONSUMER_DST_LOCAL &&
2513 strlen(session->kernel_session->consumer->dst.trace_path) > 0) {
2514 ret = run_as_mkdir_recursive(
2515 session->kernel_session->consumer->dst.trace_path,
2516 S_IRWXU | S_IRWXG, session->uid, session->gid);
2517 if (ret < 0) {
2518 if (ret != -EEXIST) {
2519 ERR("Trace directory creation error");
2520 goto error;
2521 }
2522 }
2523 }
2524
2525 session->kernel_session->uid = session->uid;
2526 session->kernel_session->gid = session->gid;
2527 session->kernel_session->output_traces = session->output_traces;
2528 session->kernel_session->snapshot_mode = session->snapshot_mode;
2529
2530 return LTTNG_OK;
2531
2532 error:
2533 trace_kernel_destroy_session(session->kernel_session);
2534 session->kernel_session = NULL;
2535 return ret;
2536 }
2537
2538 /*
2539 * Count number of session permitted by uid/gid.
2540 */
2541 static unsigned int lttng_sessions_count(uid_t uid, gid_t gid)
2542 {
2543 unsigned int i = 0;
2544 struct ltt_session *session;
2545
2546 DBG("Counting number of available session for UID %d GID %d",
2547 uid, gid);
2548 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
2549 /*
2550 * Only list the sessions the user can control.
2551 */
2552 if (!session_access_ok(session, uid, gid)) {
2553 continue;
2554 }
2555 i++;
2556 }
2557 return i;
2558 }
2559
2560 /*
2561 * Process the command requested by the lttng client within the command
2562 * context structure. This function make sure that the return structure (llm)
2563 * is set and ready for transmission before returning.
2564 *
2565 * Return any error encountered or 0 for success.
2566 *
2567 * "sock" is only used for special-case var. len data.
2568 *
2569 * Should *NOT* be called with RCU read-side lock held.
2570 */
2571 static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
2572 int *sock_error)
2573 {
2574 int ret = LTTNG_OK;
2575 int need_tracing_session = 1;
2576 int need_domain;
2577
2578 DBG("Processing client command %d", cmd_ctx->lsm->cmd_type);
2579
2580 *sock_error = 0;
2581
2582 switch (cmd_ctx->lsm->cmd_type) {
2583 case LTTNG_CREATE_SESSION:
2584 case LTTNG_CREATE_SESSION_SNAPSHOT:
2585 case LTTNG_DESTROY_SESSION:
2586 case LTTNG_LIST_SESSIONS:
2587 case LTTNG_LIST_DOMAINS:
2588 case LTTNG_START_TRACE:
2589 case LTTNG_STOP_TRACE:
2590 case LTTNG_DATA_PENDING:
2591 case LTTNG_SNAPSHOT_ADD_OUTPUT:
2592 case LTTNG_SNAPSHOT_DEL_OUTPUT:
2593 case LTTNG_SNAPSHOT_LIST_OUTPUT:
2594 case LTTNG_SNAPSHOT_RECORD:
2595 need_domain = 0;
2596 break;
2597 default:
2598 need_domain = 1;
2599 }
2600
2601 if (opt_no_kernel && need_domain
2602 && cmd_ctx->lsm->domain.type == LTTNG_DOMAIN_KERNEL) {
2603 if (!is_root) {
2604 ret = LTTNG_ERR_NEED_ROOT_SESSIOND;
2605 } else {
2606 ret = LTTNG_ERR_KERN_NA;
2607 }
2608 goto error;
2609 }
2610
2611 /* Deny register consumer if we already have a spawned consumer. */
2612 if (cmd_ctx->lsm->cmd_type == LTTNG_REGISTER_CONSUMER) {
2613 pthread_mutex_lock(&kconsumer_data.pid_mutex);
2614 if (kconsumer_data.pid > 0) {
2615 ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
2616 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2617 goto error;
2618 }
2619 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2620 }
2621
2622 /*
2623 * Check for command that don't needs to allocate a returned payload. We do
2624 * this here so we don't have to make the call for no payload at each
2625 * command.
2626 */
2627 switch(cmd_ctx->lsm->cmd_type) {
2628 case LTTNG_LIST_SESSIONS:
2629 case LTTNG_LIST_TRACEPOINTS:
2630 case LTTNG_LIST_TRACEPOINT_FIELDS:
2631 case LTTNG_LIST_DOMAINS:
2632 case LTTNG_LIST_CHANNELS:
2633 case LTTNG_LIST_EVENTS:
2634 break;
2635 default:
2636 /* Setup lttng message with no payload */
2637 ret = setup_lttng_msg(cmd_ctx, 0);
2638 if (ret < 0) {
2639 /* This label does not try to unlock the session */
2640 goto init_setup_error;
2641 }
2642 }
2643
2644 /* Commands that DO NOT need a session. */
2645 switch (cmd_ctx->lsm->cmd_type) {
2646 case LTTNG_CREATE_SESSION:
2647 case LTTNG_CREATE_SESSION_SNAPSHOT:
2648 case LTTNG_CALIBRATE:
2649 case LTTNG_LIST_SESSIONS:
2650 case LTTNG_LIST_TRACEPOINTS:
2651 case LTTNG_LIST_TRACEPOINT_FIELDS:
2652 need_tracing_session = 0;
2653 break;
2654 default:
2655 DBG("Getting session %s by name", cmd_ctx->lsm->session.name);
2656 /*
2657 * We keep the session list lock across _all_ commands
2658 * for now, because the per-session lock does not
2659 * handle teardown properly.
2660 */
2661 session_lock_list();
2662 cmd_ctx->session = session_find_by_name(cmd_ctx->lsm->session.name);
2663 if (cmd_ctx->session == NULL) {
2664 ret = LTTNG_ERR_SESS_NOT_FOUND;
2665 goto error;
2666 } else {
2667 /* Acquire lock for the session */
2668 session_lock(cmd_ctx->session);
2669 }
2670 break;
2671 }
2672
2673 if (!need_domain) {
2674 goto skip_domain;
2675 }
2676
2677 /*
2678 * Check domain type for specific "pre-action".
2679 */
2680 switch (cmd_ctx->lsm->domain.type) {
2681 case LTTNG_DOMAIN_KERNEL:
2682 if (!is_root) {
2683 ret = LTTNG_ERR_NEED_ROOT_SESSIOND;
2684 goto error;
2685 }
2686
2687 /* Kernel tracer check */
2688 if (kernel_tracer_fd == -1) {
2689 /* Basically, load kernel tracer modules */
2690 ret = init_kernel_tracer();
2691 if (ret != 0) {
2692 goto error;
2693 }
2694 }
2695
2696 /* Consumer is in an ERROR state. Report back to client */
2697 if (uatomic_read(&kernel_consumerd_state) == CONSUMER_ERROR) {
2698 ret = LTTNG_ERR_NO_KERNCONSUMERD;
2699 goto error;
2700 }
2701
2702 /* Need a session for kernel command */
2703 if (need_tracing_session) {
2704 if (cmd_ctx->session->kernel_session == NULL) {
2705 ret = create_kernel_session(cmd_ctx->session);
2706 if (ret < 0) {
2707 ret = LTTNG_ERR_KERN_SESS_FAIL;
2708 goto error;
2709 }
2710 }
2711
2712 /* Start the kernel consumer daemon */
2713 pthread_mutex_lock(&kconsumer_data.pid_mutex);
2714 if (kconsumer_data.pid == 0 &&
2715 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
2716 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2717 ret = start_consumerd(&kconsumer_data);
2718 if (ret < 0) {
2719 ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
2720 goto error;
2721 }
2722 uatomic_set(&kernel_consumerd_state, CONSUMER_STARTED);
2723 } else {
2724 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2725 }
2726
2727 /*
2728 * The consumer was just spawned so we need to add the socket to
2729 * the consumer output of the session if exist.
2730 */
2731 ret = consumer_create_socket(&kconsumer_data,
2732 cmd_ctx->session->kernel_session->consumer);
2733 if (ret < 0) {
2734 goto error;
2735 }
2736 }
2737
2738 break;
2739 case LTTNG_DOMAIN_UST:
2740 {
2741 if (!ust_app_supported()) {
2742 ret = LTTNG_ERR_NO_UST;
2743 goto error;
2744 }
2745 /* Consumer is in an ERROR state. Report back to client */
2746 if (uatomic_read(&ust_consumerd_state) == CONSUMER_ERROR) {
2747 ret = LTTNG_ERR_NO_USTCONSUMERD;
2748 goto error;
2749 }
2750
2751 if (need_tracing_session) {
2752 /* Create UST session if none exist. */
2753 if (cmd_ctx->session->ust_session == NULL) {
2754 ret = create_ust_session(cmd_ctx->session,
2755 &cmd_ctx->lsm->domain);
2756 if (ret != LTTNG_OK) {
2757 goto error;
2758 }
2759 }
2760
2761 /* Start the UST consumer daemons */
2762 /* 64-bit */
2763 pthread_mutex_lock(&ustconsumer64_data.pid_mutex);
2764 if (consumerd64_bin[0] != '\0' &&
2765 ustconsumer64_data.pid == 0 &&
2766 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
2767 pthread_mutex_unlock(&ustconsumer64_data.pid_mutex);
2768 ret = start_consumerd(&ustconsumer64_data);
2769 if (ret < 0) {
2770 ret = LTTNG_ERR_UST_CONSUMER64_FAIL;
2771 uatomic_set(&ust_consumerd64_fd, -EINVAL);
2772 goto error;
2773 }
2774
2775 uatomic_set(&ust_consumerd64_fd, ustconsumer64_data.cmd_sock);
2776 uatomic_set(&ust_consumerd_state, CONSUMER_STARTED);
2777 } else {
2778 pthread_mutex_unlock(&ustconsumer64_data.pid_mutex);
2779 }
2780
2781 /*
2782 * Setup socket for consumer 64 bit. No need for atomic access
2783 * since it was set above and can ONLY be set in this thread.
2784 */
2785 ret = consumer_create_socket(&ustconsumer64_data,
2786 cmd_ctx->session->ust_session->consumer);
2787 if (ret < 0) {
2788 goto error;
2789 }
2790
2791 /* 32-bit */
2792 if (consumerd32_bin[0] != '\0' &&
2793 ustconsumer32_data.pid == 0 &&
2794 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
2795 pthread_mutex_unlock(&ustconsumer32_data.pid_mutex);
2796 ret = start_consumerd(&ustconsumer32_data);
2797 if (ret < 0) {
2798 ret = LTTNG_ERR_UST_CONSUMER32_FAIL;
2799 uatomic_set(&ust_consumerd32_fd, -EINVAL);
2800 goto error;
2801 }
2802
2803 uatomic_set(&ust_consumerd32_fd, ustconsumer32_data.cmd_sock);
2804 uatomic_set(&ust_consumerd_state, CONSUMER_STARTED);
2805 } else {
2806 pthread_mutex_unlock(&ustconsumer32_data.pid_mutex);
2807 }
2808
2809 /*
2810 * Setup socket for consumer 64 bit. No need for atomic access
2811 * since it was set above and can ONLY be set in this thread.
2812 */
2813 ret = consumer_create_socket(&ustconsumer32_data,
2814 cmd_ctx->session->ust_session->consumer);
2815 if (ret < 0) {
2816 goto error;
2817 }
2818 }
2819 break;
2820 }
2821 default:
2822 break;
2823 }
2824 skip_domain:
2825
2826 /* Validate consumer daemon state when start/stop trace command */
2827 if (cmd_ctx->lsm->cmd_type == LTTNG_START_TRACE ||
2828 cmd_ctx->lsm->cmd_type == LTTNG_STOP_TRACE) {
2829 switch (cmd_ctx->lsm->domain.type) {
2830 case LTTNG_DOMAIN_UST:
2831 if (uatomic_read(&ust_consumerd_state) != CONSUMER_STARTED) {
2832 ret = LTTNG_ERR_NO_USTCONSUMERD;
2833 goto error;
2834 }
2835 break;
2836 case LTTNG_DOMAIN_KERNEL:
2837 if (uatomic_read(&kernel_consumerd_state) != CONSUMER_STARTED) {
2838 ret = LTTNG_ERR_NO_KERNCONSUMERD;
2839 goto error;
2840 }
2841 break;
2842 }
2843 }
2844
2845 /*
2846 * Check that the UID or GID match that of the tracing session.
2847 * The root user can interact with all sessions.
2848 */
2849 if (need_tracing_session) {
2850 if (!session_access_ok(cmd_ctx->session,
2851 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
2852 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds))) {
2853 ret = LTTNG_ERR_EPERM;
2854 goto error;
2855 }
2856 }
2857
2858 /*
2859 * Send relayd information to consumer as soon as we have a domain and a
2860 * session defined.
2861 */
2862 if (cmd_ctx->session && need_domain) {
2863 /*
2864 * Setup relayd if not done yet. If the relayd information was already
2865 * sent to the consumer, this call will gracefully return.
2866 */
2867 ret = cmd_setup_relayd(cmd_ctx->session);
2868 if (ret != LTTNG_OK) {
2869 goto error;
2870 }
2871 }
2872
2873 /* Process by command type */
2874 switch (cmd_ctx->lsm->cmd_type) {
2875 case LTTNG_ADD_CONTEXT:
2876 {
2877 ret = cmd_add_context(cmd_ctx->session, cmd_ctx->lsm->domain.type,
2878 cmd_ctx->lsm->u.context.channel_name,
2879 &cmd_ctx->lsm->u.context.ctx, kernel_poll_pipe[1]);
2880 break;
2881 }
2882 case LTTNG_DISABLE_CHANNEL:
2883 {
2884 ret = cmd_disable_channel(cmd_ctx->session, cmd_ctx->lsm->domain.type,
2885 cmd_ctx->lsm->u.disable.channel_name);
2886 break;
2887 }
2888 case LTTNG_DISABLE_EVENT:
2889 {
2890 ret = cmd_disable_event(cmd_ctx->session, cmd_ctx->lsm->domain.type,
2891 cmd_ctx->lsm->u.disable.channel_name,
2892 cmd_ctx->lsm->u.disable.name);
2893 break;
2894 }
2895 case LTTNG_DISABLE_ALL_EVENT:
2896 {
2897 DBG("Disabling all events");
2898
2899 ret = cmd_disable_event_all(cmd_ctx->session, cmd_ctx->lsm->domain.type,
2900 cmd_ctx->lsm->u.disable.channel_name);
2901 break;
2902 }
2903 case LTTNG_ENABLE_CHANNEL:
2904 {
2905 ret = cmd_enable_channel(cmd_ctx->session, &cmd_ctx->lsm->domain,
2906 &cmd_ctx->lsm->u.channel.chan, kernel_poll_pipe[1]);
2907 break;
2908 }
2909 case LTTNG_ENABLE_EVENT:
2910 {
2911 ret = cmd_enable_event(cmd_ctx->session, &cmd_ctx->lsm->domain,
2912 cmd_ctx->lsm->u.enable.channel_name,
2913 &cmd_ctx->lsm->u.enable.event, NULL, kernel_poll_pipe[1]);
2914 break;
2915 }
2916 case LTTNG_ENABLE_ALL_EVENT:
2917 {
2918 DBG("Enabling all events");
2919
2920 ret = cmd_enable_event_all(cmd_ctx->session, &cmd_ctx->lsm->domain,
2921 cmd_ctx->lsm->u.enable.channel_name,
2922 cmd_ctx->lsm->u.enable.event.type, NULL, kernel_poll_pipe[1]);
2923 break;
2924 }
2925 case LTTNG_LIST_TRACEPOINTS:
2926 {
2927 struct lttng_event *events;
2928 ssize_t nb_events;
2929
2930 nb_events = cmd_list_tracepoints(cmd_ctx->lsm->domain.type, &events);
2931 if (nb_events < 0) {
2932 /* Return value is a negative lttng_error_code. */
2933 ret = -nb_events;
2934 goto error;
2935 }
2936
2937 /*
2938 * Setup lttng message with payload size set to the event list size in
2939 * bytes and then copy list into the llm payload.
2940 */
2941 ret = setup_lttng_msg(cmd_ctx, sizeof(struct lttng_event) * nb_events);
2942 if (ret < 0) {
2943 free(events);
2944 goto setup_error;
2945 }
2946
2947 /* Copy event list into message payload */
2948 memcpy(cmd_ctx->llm->payload, events,
2949 sizeof(struct lttng_event) * nb_events);
2950
2951 free(events);
2952
2953 ret = LTTNG_OK;
2954 break;
2955 }
2956 case LTTNG_LIST_TRACEPOINT_FIELDS:
2957 {
2958 struct lttng_event_field *fields;
2959 ssize_t nb_fields;
2960
2961 nb_fields = cmd_list_tracepoint_fields(cmd_ctx->lsm->domain.type,
2962 &fields);
2963 if (nb_fields < 0) {
2964 /* Return value is a negative lttng_error_code. */
2965 ret = -nb_fields;
2966 goto error;
2967 }
2968
2969 /*
2970 * Setup lttng message with payload size set to the event list size in
2971 * bytes and then copy list into the llm payload.
2972 */
2973 ret = setup_lttng_msg(cmd_ctx,
2974 sizeof(struct lttng_event_field) * nb_fields);
2975 if (ret < 0) {
2976 free(fields);
2977 goto setup_error;
2978 }
2979
2980 /* Copy event list into message payload */
2981 memcpy(cmd_ctx->llm->payload, fields,
2982 sizeof(struct lttng_event_field) * nb_fields);
2983
2984 free(fields);
2985
2986 ret = LTTNG_OK;
2987 break;
2988 }
2989 case LTTNG_SET_CONSUMER_URI:
2990 {
2991 size_t nb_uri, len;
2992 struct lttng_uri *uris;
2993
2994 nb_uri = cmd_ctx->lsm->u.uri.size;
2995 len = nb_uri * sizeof(struct lttng_uri);
2996
2997 if (nb_uri == 0) {
2998 ret = LTTNG_ERR_INVALID;
2999 goto error;
3000 }
3001
3002 uris = zmalloc(len);
3003 if (uris == NULL) {
3004 ret = LTTNG_ERR_FATAL;
3005 goto error;
3006 }
3007
3008 /* Receive variable len data */
3009 DBG("Receiving %zu URI(s) from client ...", nb_uri);
3010 ret = lttcomm_recv_unix_sock(sock, uris, len);
3011 if (ret <= 0) {
3012 DBG("No URIs received from client... continuing");
3013 *sock_error = 1;
3014 ret = LTTNG_ERR_SESSION_FAIL;
3015 free(uris);
3016 goto error;
3017 }
3018
3019 ret = cmd_set_consumer_uri(cmd_ctx->lsm->domain.type, cmd_ctx->session,
3020 nb_uri, uris);
3021 if (ret != LTTNG_OK) {
3022 free(uris);
3023 goto error;
3024 }
3025
3026 /*
3027 * XXX: 0 means that this URI should be applied on the session. Should
3028 * be a DOMAIN enuam.
3029 */
3030 if (cmd_ctx->lsm->domain.type == 0) {
3031 /* Add the URI for the UST session if a consumer is present. */
3032 if (cmd_ctx->session->ust_session &&
3033 cmd_ctx->session->ust_session->consumer) {
3034 ret = cmd_set_consumer_uri(LTTNG_DOMAIN_UST, cmd_ctx->session,
3035 nb_uri, uris);
3036 } else if (cmd_ctx->session->kernel_session &&
3037 cmd_ctx->session->kernel_session->consumer) {
3038 ret = cmd_set_consumer_uri(LTTNG_DOMAIN_KERNEL,
3039 cmd_ctx->session, nb_uri, uris);
3040 }
3041 }
3042
3043 free(uris);
3044
3045 break;
3046 }
3047 case LTTNG_START_TRACE:
3048 {
3049 ret = cmd_start_trace(cmd_ctx->session);
3050 break;
3051 }
3052 case LTTNG_STOP_TRACE:
3053 {
3054 ret = cmd_stop_trace(cmd_ctx->session);
3055 break;
3056 }
3057 case LTTNG_CREATE_SESSION:
3058 {
3059 size_t nb_uri, len;
3060 struct lttng_uri *uris = NULL;
3061
3062 nb_uri = cmd_ctx->lsm->u.uri.size;
3063 len = nb_uri * sizeof(struct lttng_uri);
3064
3065 if (nb_uri > 0) {
3066 uris = zmalloc(len);
3067 if (uris == NULL) {
3068 ret = LTTNG_ERR_FATAL;
3069 goto error;
3070 }
3071
3072 /* Receive variable len data */
3073 DBG("Waiting for %zu URIs from client ...", nb_uri);
3074 ret = lttcomm_recv_unix_sock(sock, uris, len);
3075 if (ret <= 0) {
3076 DBG("No URIs received from client... continuing");
3077 *sock_error = 1;
3078 ret = LTTNG_ERR_SESSION_FAIL;
3079 free(uris);
3080 goto error;
3081 }
3082
3083 if (nb_uri == 1 && uris[0].dtype != LTTNG_DST_PATH) {
3084 DBG("Creating session with ONE network URI is a bad call");
3085 ret = LTTNG_ERR_SESSION_FAIL;
3086 free(uris);
3087 goto error;
3088 }
3089 }
3090
3091 ret = cmd_create_session_uri(cmd_ctx->lsm->session.name, uris, nb_uri,
3092 &cmd_ctx->creds);
3093
3094 free(uris);
3095
3096 break;
3097 }
3098 case LTTNG_DESTROY_SESSION:
3099 {
3100 ret = cmd_destroy_session(cmd_ctx->session, kernel_poll_pipe[1]);
3101
3102 /* Set session to NULL so we do not unlock it after free. */
3103 cmd_ctx->session = NULL;
3104 break;
3105 }
3106 case LTTNG_LIST_DOMAINS:
3107 {
3108 ssize_t nb_dom;
3109 struct lttng_domain *domains;
3110
3111 nb_dom = cmd_list_domains(cmd_ctx->session, &domains);
3112 if (nb_dom < 0) {
3113 /* Return value is a negative lttng_error_code. */
3114 ret = -nb_dom;
3115 goto error;
3116 }
3117
3118 ret = setup_lttng_msg(cmd_ctx, nb_dom * sizeof(struct lttng_domain));
3119 if (ret < 0) {
3120 free(domains);
3121 goto setup_error;
3122 }
3123
3124 /* Copy event list into message payload */
3125 memcpy(cmd_ctx->llm->payload, domains,
3126 nb_dom * sizeof(struct lttng_domain));
3127
3128 free(domains);
3129
3130 ret = LTTNG_OK;
3131 break;
3132 }
3133 case LTTNG_LIST_CHANNELS:
3134 {
3135 int nb_chan;
3136 struct lttng_channel *channels;
3137
3138 nb_chan = cmd_list_channels(cmd_ctx->lsm->domain.type,
3139 cmd_ctx->session, &channels);
3140 if (nb_chan < 0) {
3141 /* Return value is a negative lttng_error_code. */
3142 ret = -nb_chan;
3143 goto error;
3144 }
3145
3146 ret = setup_lttng_msg(cmd_ctx, nb_chan * sizeof(struct lttng_channel));
3147 if (ret < 0) {
3148 free(channels);
3149 goto setup_error;
3150 }
3151
3152 /* Copy event list into message payload */
3153 memcpy(cmd_ctx->llm->payload, channels,
3154 nb_chan * sizeof(struct lttng_channel));
3155
3156 free(channels);
3157
3158 ret = LTTNG_OK;
3159 break;
3160 }
3161 case LTTNG_LIST_EVENTS:
3162 {
3163 ssize_t nb_event;
3164 struct lttng_event *events = NULL;
3165
3166 nb_event = cmd_list_events(cmd_ctx->lsm->domain.type, cmd_ctx->session,
3167 cmd_ctx->lsm->u.list.channel_name, &events);
3168 if (nb_event < 0) {
3169 /* Return value is a negative lttng_error_code. */
3170 ret = -nb_event;
3171 goto error;
3172 }
3173
3174 ret = setup_lttng_msg(cmd_ctx, nb_event * sizeof(struct lttng_event));
3175 if (ret < 0) {
3176 free(events);
3177 goto setup_error;
3178 }
3179
3180 /* Copy event list into message payload */
3181 memcpy(cmd_ctx->llm->payload, events,
3182 nb_event * sizeof(struct lttng_event));
3183
3184 free(events);
3185
3186 ret = LTTNG_OK;
3187 break;
3188 }
3189 case LTTNG_LIST_SESSIONS:
3190 {
3191 unsigned int nr_sessions;
3192
3193 session_lock_list();
3194 nr_sessions = lttng_sessions_count(
3195 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
3196 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds));
3197
3198 ret = setup_lttng_msg(cmd_ctx, sizeof(struct lttng_session) * nr_sessions);
3199 if (ret < 0) {
3200 session_unlock_list();
3201 goto setup_error;
3202 }
3203
3204 /* Filled the session array */
3205 cmd_list_lttng_sessions((struct lttng_session *)(cmd_ctx->llm->payload),
3206 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
3207 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds));
3208
3209 session_unlock_list();
3210
3211 ret = LTTNG_OK;
3212 break;
3213 }
3214 case LTTNG_CALIBRATE:
3215 {
3216 ret = cmd_calibrate(cmd_ctx->lsm->domain.type,
3217 &cmd_ctx->lsm->u.calibrate);
3218 break;
3219 }
3220 case LTTNG_REGISTER_CONSUMER:
3221 {
3222 struct consumer_data *cdata;
3223
3224 switch (cmd_ctx->lsm->domain.type) {
3225 case LTTNG_DOMAIN_KERNEL:
3226 cdata = &kconsumer_data;
3227 break;
3228 default:
3229 ret = LTTNG_ERR_UND;
3230 goto error;
3231 }
3232
3233 ret = cmd_register_consumer(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3234 cmd_ctx->lsm->u.reg.path, cdata);
3235 break;
3236 }
3237 case LTTNG_ENABLE_EVENT_WITH_FILTER:
3238 {
3239 struct lttng_filter_bytecode *bytecode;
3240
3241 if (cmd_ctx->lsm->u.enable.bytecode_len > LTTNG_FILTER_MAX_LEN) {
3242 ret = LTTNG_ERR_FILTER_INVAL;
3243 goto error;
3244 }
3245 if (cmd_ctx->lsm->u.enable.bytecode_len == 0) {
3246 ret = LTTNG_ERR_FILTER_INVAL;
3247 goto error;
3248 }
3249 bytecode = zmalloc(cmd_ctx->lsm->u.enable.bytecode_len);
3250 if (!bytecode) {
3251 ret = LTTNG_ERR_FILTER_NOMEM;
3252 goto error;
3253 }
3254 /* Receive var. len. data */
3255 DBG("Receiving var len data from client ...");
3256 ret = lttcomm_recv_unix_sock(sock, bytecode,
3257 cmd_ctx->lsm->u.enable.bytecode_len);
3258 if (ret <= 0) {
3259 DBG("Nothing recv() from client var len data... continuing");
3260 *sock_error = 1;
3261 ret = LTTNG_ERR_FILTER_INVAL;
3262 goto error;
3263 }
3264
3265 if (bytecode->len + sizeof(*bytecode)
3266 != cmd_ctx->lsm->u.enable.bytecode_len) {
3267 free(bytecode);
3268 ret = LTTNG_ERR_FILTER_INVAL;
3269 goto error;
3270 }
3271
3272 ret = cmd_enable_event(cmd_ctx->session, &cmd_ctx->lsm->domain,
3273 cmd_ctx->lsm->u.enable.channel_name,
3274 &cmd_ctx->lsm->u.enable.event, bytecode, kernel_poll_pipe[1]);
3275 break;
3276 }
3277 case LTTNG_DATA_PENDING:
3278 {
3279 ret = cmd_data_pending(cmd_ctx->session);
3280 break;
3281 }
3282 case LTTNG_SNAPSHOT_ADD_OUTPUT:
3283 {
3284 struct lttcomm_lttng_output_id reply;
3285
3286 ret = cmd_snapshot_add_output(cmd_ctx->session,
3287 &cmd_ctx->lsm->u.snapshot_output.output, &reply.id);
3288 if (ret != LTTNG_OK) {
3289 goto error;
3290 }
3291
3292 ret = setup_lttng_msg(cmd_ctx, sizeof(reply));
3293 if (ret < 0) {
3294 goto setup_error;
3295 }
3296
3297 /* Copy output list into message payload */
3298 memcpy(cmd_ctx->llm->payload, &reply, sizeof(reply));
3299 ret = LTTNG_OK;
3300 break;
3301 }
3302 case LTTNG_SNAPSHOT_DEL_OUTPUT:
3303 {
3304 ret = cmd_snapshot_del_output(cmd_ctx->session,
3305 &cmd_ctx->lsm->u.snapshot_output.output);
3306 break;
3307 }
3308 case LTTNG_SNAPSHOT_LIST_OUTPUT:
3309 {
3310 ssize_t nb_output;
3311 struct lttng_snapshot_output *outputs = NULL;
3312
3313 nb_output = cmd_snapshot_list_outputs(cmd_ctx->session, &outputs);
3314 if (nb_output < 0) {
3315 ret = -nb_output;
3316 goto error;
3317 }
3318
3319 ret = setup_lttng_msg(cmd_ctx,
3320 nb_output * sizeof(struct lttng_snapshot_output));
3321 if (ret < 0) {
3322 free(outputs);
3323 goto setup_error;
3324 }
3325
3326 if (outputs) {
3327 /* Copy output list into message payload */
3328 memcpy(cmd_ctx->llm->payload, outputs,
3329 nb_output * sizeof(struct lttng_snapshot_output));
3330 free(outputs);
3331 }
3332
3333 ret = LTTNG_OK;
3334 break;
3335 }
3336 case LTTNG_SNAPSHOT_RECORD:
3337 {
3338 ret = cmd_snapshot_record(cmd_ctx->session,
3339 &cmd_ctx->lsm->u.snapshot_record.output,
3340 cmd_ctx->lsm->u.snapshot_record.wait);
3341 break;
3342 }
3343 case LTTNG_CREATE_SESSION_SNAPSHOT:
3344 {
3345 size_t nb_uri, len;
3346 struct lttng_uri *uris = NULL;
3347
3348 nb_uri = cmd_ctx->lsm->u.uri.size;
3349 len = nb_uri * sizeof(struct lttng_uri);
3350
3351 if (nb_uri > 0) {
3352 uris = zmalloc(len);
3353 if (uris == NULL) {
3354 ret = LTTNG_ERR_FATAL;
3355 goto error;
3356 }
3357
3358 /* Receive variable len data */
3359 DBG("Waiting for %zu URIs from client ...", nb_uri);
3360 ret = lttcomm_recv_unix_sock(sock, uris, len);
3361 if (ret <= 0) {
3362 DBG("No URIs received from client... continuing");
3363 *sock_error = 1;
3364 ret = LTTNG_ERR_SESSION_FAIL;
3365 free(uris);
3366 goto error;
3367 }
3368
3369 if (nb_uri == 1 && uris[0].dtype != LTTNG_DST_PATH) {
3370 DBG("Creating session with ONE network URI is a bad call");
3371 ret = LTTNG_ERR_SESSION_FAIL;
3372 free(uris);
3373 goto error;
3374 }
3375 }
3376
3377 ret = cmd_create_session_snapshot(cmd_ctx->lsm->session.name, uris,
3378 nb_uri, &cmd_ctx->creds);
3379 free(uris);
3380 break;
3381 }
3382 default:
3383 ret = LTTNG_ERR_UND;
3384 break;
3385 }
3386
3387 error:
3388 if (cmd_ctx->llm == NULL) {
3389 DBG("Missing llm structure. Allocating one.");
3390 if (setup_lttng_msg(cmd_ctx, 0) < 0) {
3391 goto setup_error;
3392 }
3393 }
3394 /* Set return code */
3395 cmd_ctx->llm->ret_code = ret;
3396 setup_error:
3397 if (cmd_ctx->session) {
3398 session_unlock(cmd_ctx->session);
3399 }
3400 if (need_tracing_session) {
3401 session_unlock_list();
3402 }
3403 init_setup_error:
3404 return ret;
3405 }
3406
3407 /*
3408 * Thread managing health check socket.
3409 */
3410 static void *thread_manage_health(void *data)
3411 {
3412 int sock = -1, new_sock = -1, ret, i, pollfd, err = -1;
3413 uint32_t revents, nb_fd;
3414 struct lttng_poll_event events;
3415 struct lttcomm_health_msg msg;
3416 struct lttcomm_health_data reply;
3417
3418 DBG("[thread] Manage health check started");
3419
3420 rcu_register_thread();
3421
3422 /* We might hit an error path before this is created. */
3423 lttng_poll_init(&events);
3424
3425 /* Create unix socket */
3426 sock = lttcomm_create_unix_sock(health_unix_sock_path);
3427 if (sock < 0) {
3428 ERR("Unable to create health check Unix socket");
3429 ret = -1;
3430 goto error;
3431 }
3432
3433 /*
3434 * Set the CLOEXEC flag. Return code is useless because either way, the
3435 * show must go on.
3436 */
3437 (void) utils_set_fd_cloexec(sock);
3438
3439 ret = lttcomm_listen_unix_sock(sock);
3440 if (ret < 0) {
3441 goto error;
3442 }
3443
3444 /*
3445 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
3446 * more will be added to this poll set.
3447 */
3448 ret = sessiond_set_thread_pollset(&events, 2);
3449 if (ret < 0) {
3450 goto error;
3451 }
3452
3453 /* Add the application registration socket */
3454 ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLPRI);
3455 if (ret < 0) {
3456 goto error;
3457 }
3458
3459 while (1) {
3460 DBG("Health check ready");
3461
3462 /* Inifinite blocking call, waiting for transmission */
3463 restart:
3464 ret = lttng_poll_wait(&events, -1);
3465 if (ret < 0) {
3466 /*
3467 * Restart interrupted system call.
3468 */
3469 if (errno == EINTR) {
3470 goto restart;
3471 }
3472 goto error;
3473 }
3474
3475 nb_fd = ret;
3476
3477 for (i = 0; i < nb_fd; i++) {
3478 /* Fetch once the poll data */
3479 revents = LTTNG_POLL_GETEV(&events, i);
3480 pollfd = LTTNG_POLL_GETFD(&events, i);
3481
3482 /* Thread quit pipe has been closed. Killing thread. */
3483 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
3484 if (ret) {
3485 err = 0;
3486 goto exit;
3487 }
3488
3489 /* Event on the registration socket */
3490 if (pollfd == sock) {
3491 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3492 ERR("Health socket poll error");
3493 goto error;
3494 }
3495 }
3496 }
3497
3498 new_sock = lttcomm_accept_unix_sock(sock);
3499 if (new_sock < 0) {
3500 goto error;
3501 }
3502
3503 /*
3504 * Set the CLOEXEC flag. Return code is useless because either way, the
3505 * show must go on.
3506 */
3507 (void) utils_set_fd_cloexec(new_sock);
3508
3509 DBG("Receiving data from client for health...");
3510 ret = lttcomm_recv_unix_sock(new_sock, (void *)&msg, sizeof(msg));
3511 if (ret <= 0) {
3512 DBG("Nothing recv() from client... continuing");
3513 ret = close(new_sock);
3514 if (ret) {
3515 PERROR("close");
3516 }
3517 new_sock = -1;
3518 continue;
3519 }
3520
3521 rcu_thread_online();
3522
3523 switch (msg.component) {
3524 case LTTNG_HEALTH_CMD:
3525 reply.ret_code = health_check_state(HEALTH_TYPE_CMD);
3526 break;
3527 case LTTNG_HEALTH_APP_MANAGE:
3528 reply.ret_code = health_check_state(HEALTH_TYPE_APP_MANAGE);
3529 break;
3530 case LTTNG_HEALTH_APP_REG:
3531 reply.ret_code = health_check_state(HEALTH_TYPE_APP_REG);
3532 break;
3533 case LTTNG_HEALTH_KERNEL:
3534 reply.ret_code = health_check_state(HEALTH_TYPE_KERNEL);
3535 break;
3536 case LTTNG_HEALTH_CONSUMER:
3537 reply.ret_code = check_consumer_health();
3538 break;
3539 case LTTNG_HEALTH_HT_CLEANUP:
3540 reply.ret_code = health_check_state(HEALTH_TYPE_HT_CLEANUP);
3541 break;
3542 case LTTNG_HEALTH_APP_MANAGE_NOTIFY:
3543 reply.ret_code = health_check_state(HEALTH_TYPE_APP_MANAGE_NOTIFY);
3544 break;
3545 case LTTNG_HEALTH_APP_REG_DISPATCH:
3546 reply.ret_code = health_check_state(HEALTH_TYPE_APP_REG_DISPATCH);
3547 break;
3548 case LTTNG_HEALTH_ALL:
3549 reply.ret_code =
3550 health_check_state(HEALTH_TYPE_APP_MANAGE) &&
3551 health_check_state(HEALTH_TYPE_APP_REG) &&
3552 health_check_state(HEALTH_TYPE_CMD) &&
3553 health_check_state(HEALTH_TYPE_KERNEL) &&
3554 check_consumer_health() &&
3555 health_check_state(HEALTH_TYPE_HT_CLEANUP) &&
3556 health_check_state(HEALTH_TYPE_APP_MANAGE_NOTIFY) &&
3557 health_check_state(HEALTH_TYPE_APP_REG_DISPATCH);
3558 break;
3559 default:
3560 reply.ret_code = LTTNG_ERR_UND;
3561 break;
3562 }
3563
3564 /*
3565 * Flip ret value since 0 is a success and 1 indicates a bad health for
3566 * the client where in the sessiond it is the opposite. Again, this is
3567 * just to make things easier for us poor developer which enjoy a lot
3568 * lazyness.
3569 */
3570 if (reply.ret_code == 0 || reply.ret_code == 1) {
3571 reply.ret_code = !reply.ret_code;
3572 }
3573
3574 DBG2("Health check return value %d", reply.ret_code);
3575
3576 ret = send_unix_sock(new_sock, (void *) &reply, sizeof(reply));
3577 if (ret < 0) {
3578 ERR("Failed to send health data back to client");
3579 }
3580
3581 /* End of transmission */
3582 ret = close(new_sock);
3583 if (ret) {
3584 PERROR("close");
3585 }
3586 new_sock = -1;
3587 }
3588
3589 exit:
3590 error:
3591 if (err) {
3592 ERR("Health error occurred in %s", __func__);
3593 }
3594 DBG("Health check thread dying");
3595 unlink(health_unix_sock_path);
3596 if (sock >= 0) {
3597 ret = close(sock);
3598 if (ret) {
3599 PERROR("close");
3600 }
3601 }
3602
3603 lttng_poll_clean(&events);
3604
3605 rcu_unregister_thread();
3606 return NULL;
3607 }
3608
3609 /*
3610 * This thread manage all clients request using the unix client socket for
3611 * communication.
3612 */
3613 static void *thread_manage_clients(void *data)
3614 {
3615 int sock = -1, ret, i, pollfd, err = -1;
3616 int sock_error;
3617 uint32_t revents, nb_fd;
3618 struct command_ctx *cmd_ctx = NULL;
3619 struct lttng_poll_event events;
3620
3621 DBG("[thread] Manage client started");
3622
3623 rcu_register_thread();
3624
3625 health_register(HEALTH_TYPE_CMD);
3626
3627 if (testpoint(thread_manage_clients)) {
3628 goto error_testpoint;
3629 }
3630
3631 health_code_update();
3632
3633 ret = lttcomm_listen_unix_sock(client_sock);
3634 if (ret < 0) {
3635 goto error_listen;
3636 }
3637
3638 /*
3639 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
3640 * more will be added to this poll set.
3641 */
3642 ret = sessiond_set_thread_pollset(&events, 2);
3643 if (ret < 0) {
3644 goto error_create_poll;
3645 }
3646
3647 /* Add the application registration socket */
3648 ret = lttng_poll_add(&events, client_sock, LPOLLIN | LPOLLPRI);
3649 if (ret < 0) {
3650 goto error;
3651 }
3652
3653 /*
3654 * Notify parent pid that we are ready to accept command for client side.
3655 */
3656 if (opt_sig_parent) {
3657 kill(ppid, SIGUSR1);
3658 }
3659
3660 if (testpoint(thread_manage_clients_before_loop)) {
3661 goto error;
3662 }
3663
3664 health_code_update();
3665
3666 while (1) {
3667 DBG("Accepting client command ...");
3668
3669 /* Inifinite blocking call, waiting for transmission */
3670 restart:
3671 health_poll_entry();
3672 ret = lttng_poll_wait(&events, -1);
3673 health_poll_exit();
3674 if (ret < 0) {
3675 /*
3676 * Restart interrupted system call.
3677 */
3678 if (errno == EINTR) {
3679 goto restart;
3680 }
3681 goto error;
3682 }
3683
3684 nb_fd = ret;
3685
3686 for (i = 0; i < nb_fd; i++) {
3687 /* Fetch once the poll data */
3688 revents = LTTNG_POLL_GETEV(&events, i);
3689 pollfd = LTTNG_POLL_GETFD(&events, i);
3690
3691 health_code_update();
3692
3693 /* Thread quit pipe has been closed. Killing thread. */
3694 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
3695 if (ret) {
3696 err = 0;
3697 goto exit;
3698 }
3699
3700 /* Event on the registration socket */
3701 if (pollfd == client_sock) {
3702 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3703 ERR("Client socket poll error");
3704 goto error;
3705 }
3706 }
3707 }
3708
3709 DBG("Wait for client response");
3710
3711 health_code_update();
3712
3713 sock = lttcomm_accept_unix_sock(client_sock);
3714 if (sock < 0) {
3715 goto error;
3716 }
3717
3718 /*
3719 * Set the CLOEXEC flag. Return code is useless because either way, the
3720 * show must go on.
3721 */
3722 (void) utils_set_fd_cloexec(sock);
3723
3724 /* Set socket option for credentials retrieval */
3725 ret = lttcomm_setsockopt_creds_unix_sock(sock);
3726 if (ret < 0) {
3727 goto error;
3728 }
3729
3730 /* Allocate context command to process the client request */
3731 cmd_ctx = zmalloc(sizeof(struct command_ctx));
3732 if (cmd_ctx == NULL) {
3733 PERROR("zmalloc cmd_ctx");
3734 goto error;
3735 }
3736
3737 /* Allocate data buffer for reception */
3738 cmd_ctx->lsm = zmalloc(sizeof(struct lttcomm_session_msg));
3739 if (cmd_ctx->lsm == NULL) {
3740 PERROR("zmalloc cmd_ctx->lsm");
3741 goto error;
3742 }
3743
3744 cmd_ctx->llm = NULL;
3745 cmd_ctx->session = NULL;
3746
3747 health_code_update();
3748
3749 /*
3750