Fix: sessiond: wait for health check readiness
[lttng-tools.git] / src / bin / lttng-sessiond / main.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19 #define _GNU_SOURCE
20 #include <getopt.h>
21 #include <grp.h>
22 #include <limits.h>
23 #include <paths.h>
24 #include <pthread.h>
25 #include <signal.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <string.h>
29 #include <inttypes.h>
30 #include <sys/mman.h>
31 #include <sys/mount.h>
32 #include <sys/resource.h>
33 #include <sys/socket.h>
34 #include <sys/stat.h>
35 #include <sys/types.h>
36 #include <sys/wait.h>
37 #include <urcu/uatomic.h>
38 #include <unistd.h>
39 #include <config.h>
40
41 #include <common/common.h>
42 #include <common/compat/socket.h>
43 #include <common/defaults.h>
44 #include <common/kernel-consumer/kernel-consumer.h>
45 #include <common/futex.h>
46 #include <common/relayd/relayd.h>
47 #include <common/utils.h>
48
49 #include "lttng-sessiond.h"
50 #include "buffer-registry.h"
51 #include "channel.h"
52 #include "cmd.h"
53 #include "consumer.h"
54 #include "context.h"
55 #include "event.h"
56 #include "kernel.h"
57 #include "kernel-consumer.h"
58 #include "modprobe.h"
59 #include "shm.h"
60 #include "ust-ctl.h"
61 #include "ust-consumer.h"
62 #include "utils.h"
63 #include "fd-limit.h"
64 #include "health-sessiond.h"
65 #include "testpoint.h"
66 #include "ust-thread.h"
67 #include "jul-thread.h"
68
69 #define CONSUMERD_FILE "lttng-consumerd"
70
71 const char *progname;
72 static const char *tracing_group_name = DEFAULT_TRACING_GROUP;
73 static const char *opt_pidfile;
74 static int opt_sig_parent;
75 static int opt_verbose_consumer;
76 static int opt_daemon;
77 static int opt_no_kernel;
78 static int is_root; /* Set to 1 if the daemon is running as root */
79 static pid_t ppid; /* Parent PID for --sig-parent option */
80 static pid_t child_ppid; /* Internal parent PID use with daemonize. */
81 static char *rundir;
82
83 /* Set to 1 when a SIGUSR1 signal is received. */
84 static int recv_child_signal;
85
86 /*
87 * Consumer daemon specific control data. Every value not initialized here is
88 * set to 0 by the static definition.
89 */
90 static struct consumer_data kconsumer_data = {
91 .type = LTTNG_CONSUMER_KERNEL,
92 .err_unix_sock_path = DEFAULT_KCONSUMERD_ERR_SOCK_PATH,
93 .cmd_unix_sock_path = DEFAULT_KCONSUMERD_CMD_SOCK_PATH,
94 .err_sock = -1,
95 .cmd_sock = -1,
96 .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
97 .lock = PTHREAD_MUTEX_INITIALIZER,
98 .cond = PTHREAD_COND_INITIALIZER,
99 .cond_mutex = PTHREAD_MUTEX_INITIALIZER,
100 };
101 static struct consumer_data ustconsumer64_data = {
102 .type = LTTNG_CONSUMER64_UST,
103 .err_unix_sock_path = DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH,
104 .cmd_unix_sock_path = DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH,
105 .err_sock = -1,
106 .cmd_sock = -1,
107 .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
108 .lock = PTHREAD_MUTEX_INITIALIZER,
109 .cond = PTHREAD_COND_INITIALIZER,
110 .cond_mutex = PTHREAD_MUTEX_INITIALIZER,
111 };
112 static struct consumer_data ustconsumer32_data = {
113 .type = LTTNG_CONSUMER32_UST,
114 .err_unix_sock_path = DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH,
115 .cmd_unix_sock_path = DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH,
116 .err_sock = -1,
117 .cmd_sock = -1,
118 .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
119 .lock = PTHREAD_MUTEX_INITIALIZER,
120 .cond = PTHREAD_COND_INITIALIZER,
121 .cond_mutex = PTHREAD_MUTEX_INITIALIZER,
122 };
123
124 /* Shared between threads */
125 static int dispatch_thread_exit;
126
127 /* Global application Unix socket path */
128 static char apps_unix_sock_path[PATH_MAX];
129 /* Global client Unix socket path */
130 static char client_unix_sock_path[PATH_MAX];
131 /* global wait shm path for UST */
132 static char wait_shm_path[PATH_MAX];
133 /* Global health check unix path */
134 static char health_unix_sock_path[PATH_MAX];
135
136 /* Sockets and FDs */
137 static int client_sock = -1;
138 static int apps_sock = -1;
139 int kernel_tracer_fd = -1;
140 static int kernel_poll_pipe[2] = { -1, -1 };
141
142 /*
143 * Quit pipe for all threads. This permits a single cancellation point
144 * for all threads when receiving an event on the pipe.
145 */
146 static int thread_quit_pipe[2] = { -1, -1 };
147
148 /*
149 * This pipe is used to inform the thread managing application communication
150 * that a command is queued and ready to be processed.
151 */
152 static int apps_cmd_pipe[2] = { -1, -1 };
153
154 int apps_cmd_notify_pipe[2] = { -1, -1 };
155
156 /* Pthread, Mutexes and Semaphores */
157 static pthread_t apps_thread;
158 static pthread_t apps_notify_thread;
159 static pthread_t reg_apps_thread;
160 static pthread_t client_thread;
161 static pthread_t kernel_thread;
162 static pthread_t dispatch_thread;
163 static pthread_t health_thread;
164 static pthread_t ht_cleanup_thread;
165 static pthread_t jul_reg_thread;
166
167 /*
168 * UST registration command queue. This queue is tied with a futex and uses a N
169 * wakers / 1 waiter implemented and detailed in futex.c/.h
170 *
171 * The thread_manage_apps and thread_dispatch_ust_registration interact with
172 * this queue and the wait/wake scheme.
173 */
174 static struct ust_cmd_queue ust_cmd_queue;
175
176 /*
177 * Pointer initialized before thread creation.
178 *
179 * This points to the tracing session list containing the session count and a
180 * mutex lock. The lock MUST be taken if you iterate over the list. The lock
181 * MUST NOT be taken if you call a public function in session.c.
182 *
183 * The lock is nested inside the structure: session_list_ptr->lock. Please use
184 * session_lock_list and session_unlock_list for lock acquisition.
185 */
186 static struct ltt_session_list *session_list_ptr;
187
188 int ust_consumerd64_fd = -1;
189 int ust_consumerd32_fd = -1;
190
191 static const char *consumerd32_bin = CONFIG_CONSUMERD32_BIN;
192 static const char *consumerd64_bin = CONFIG_CONSUMERD64_BIN;
193 static const char *consumerd32_libdir = CONFIG_CONSUMERD32_LIBDIR;
194 static const char *consumerd64_libdir = CONFIG_CONSUMERD64_LIBDIR;
195
196 static const char *module_proc_lttng = "/proc/lttng";
197
198 /*
199 * Consumer daemon state which is changed when spawning it, killing it or in
200 * case of a fatal error.
201 */
202 enum consumerd_state {
203 CONSUMER_STARTED = 1,
204 CONSUMER_STOPPED = 2,
205 CONSUMER_ERROR = 3,
206 };
207
208 /*
209 * This consumer daemon state is used to validate if a client command will be
210 * able to reach the consumer. If not, the client is informed. For instance,
211 * doing a "lttng start" when the consumer state is set to ERROR will return an
212 * error to the client.
213 *
214 * The following example shows a possible race condition of this scheme:
215 *
216 * consumer thread error happens
217 * client cmd arrives
218 * client cmd checks state -> still OK
219 * consumer thread exit, sets error
220 * client cmd try to talk to consumer
221 * ...
222 *
223 * However, since the consumer is a different daemon, we have no way of making
224 * sure the command will reach it safely even with this state flag. This is why
225 * we consider that up to the state validation during command processing, the
226 * command is safe. After that, we can not guarantee the correctness of the
227 * client request vis-a-vis the consumer.
228 */
229 static enum consumerd_state ust_consumerd_state;
230 static enum consumerd_state kernel_consumerd_state;
231
232 /*
233 * Socket timeout for receiving and sending in seconds.
234 */
235 static int app_socket_timeout;
236
237 /* Set in main() with the current page size. */
238 long page_size;
239
240 /* Application health monitoring */
241 struct health_app *health_sessiond;
242
243 /* JUL TCP port for registration. Used by the JUL thread. */
244 unsigned int jul_tcp_port = DEFAULT_JUL_TCP_PORT;
245
246 /*
247 * Whether sessiond is ready for commands/health check requests.
248 * NR_LTTNG_SESSIOND_READY must match the number of calls to
249 * lttng_sessiond_notify_ready().
250 */
251 #define NR_LTTNG_SESSIOND_READY 2
252 int lttng_sessiond_ready = NR_LTTNG_SESSIOND_READY;
253
254 /* Notify parents that we are ready for cmd and health check */
255 static
256 void lttng_sessiond_notify_ready(void)
257 {
258 if (uatomic_sub_return(&lttng_sessiond_ready, 1) == 0) {
259 /*
260 * Notify parent pid that we are ready to accept command
261 * for client side. This ppid is the one from the
262 * external process that spawned us.
263 */
264 if (opt_sig_parent) {
265 kill(ppid, SIGUSR1);
266 }
267
268 /*
269 * Notify the parent of the fork() process that we are
270 * ready.
271 */
272 if (opt_daemon) {
273 kill(child_ppid, SIGUSR1);
274 }
275 }
276 }
277
278 static
279 void setup_consumerd_path(void)
280 {
281 const char *bin, *libdir;
282
283 /*
284 * Allow INSTALL_BIN_PATH to be used as a target path for the
285 * native architecture size consumer if CONFIG_CONSUMER*_PATH
286 * has not been defined.
287 */
288 #if (CAA_BITS_PER_LONG == 32)
289 if (!consumerd32_bin[0]) {
290 consumerd32_bin = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
291 }
292 if (!consumerd32_libdir[0]) {
293 consumerd32_libdir = INSTALL_LIB_PATH;
294 }
295 #elif (CAA_BITS_PER_LONG == 64)
296 if (!consumerd64_bin[0]) {
297 consumerd64_bin = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
298 }
299 if (!consumerd64_libdir[0]) {
300 consumerd64_libdir = INSTALL_LIB_PATH;
301 }
302 #else
303 #error "Unknown bitness"
304 #endif
305
306 /*
307 * runtime env. var. overrides the build default.
308 */
309 bin = getenv("LTTNG_CONSUMERD32_BIN");
310 if (bin) {
311 consumerd32_bin = bin;
312 }
313 bin = getenv("LTTNG_CONSUMERD64_BIN");
314 if (bin) {
315 consumerd64_bin = bin;
316 }
317 libdir = getenv("LTTNG_CONSUMERD32_LIBDIR");
318 if (libdir) {
319 consumerd32_libdir = libdir;
320 }
321 libdir = getenv("LTTNG_CONSUMERD64_LIBDIR");
322 if (libdir) {
323 consumerd64_libdir = libdir;
324 }
325 }
326
327 /*
328 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
329 */
330 int sessiond_set_thread_pollset(struct lttng_poll_event *events, size_t size)
331 {
332 int ret;
333
334 assert(events);
335
336 ret = lttng_poll_create(events, size, LTTNG_CLOEXEC);
337 if (ret < 0) {
338 goto error;
339 }
340
341 /* Add quit pipe */
342 ret = lttng_poll_add(events, thread_quit_pipe[0], LPOLLIN | LPOLLERR);
343 if (ret < 0) {
344 goto error;
345 }
346
347 return 0;
348
349 error:
350 return ret;
351 }
352
353 /*
354 * Check if the thread quit pipe was triggered.
355 *
356 * Return 1 if it was triggered else 0;
357 */
358 int sessiond_check_thread_quit_pipe(int fd, uint32_t events)
359 {
360 if (fd == thread_quit_pipe[0] && (events & LPOLLIN)) {
361 return 1;
362 }
363
364 return 0;
365 }
366
367 /*
368 * Init thread quit pipe.
369 *
370 * Return -1 on error or 0 if all pipes are created.
371 */
372 static int init_thread_quit_pipe(void)
373 {
374 int ret, i;
375
376 ret = pipe(thread_quit_pipe);
377 if (ret < 0) {
378 PERROR("thread quit pipe");
379 goto error;
380 }
381
382 for (i = 0; i < 2; i++) {
383 ret = fcntl(thread_quit_pipe[i], F_SETFD, FD_CLOEXEC);
384 if (ret < 0) {
385 PERROR("fcntl");
386 goto error;
387 }
388 }
389
390 error:
391 return ret;
392 }
393
394 /*
395 * Stop all threads by closing the thread quit pipe.
396 */
397 static void stop_threads(void)
398 {
399 int ret;
400
401 /* Stopping all threads */
402 DBG("Terminating all threads");
403 ret = notify_thread_pipe(thread_quit_pipe[1]);
404 if (ret < 0) {
405 ERR("write error on thread quit pipe");
406 }
407
408 /* Dispatch thread */
409 CMM_STORE_SHARED(dispatch_thread_exit, 1);
410 futex_nto1_wake(&ust_cmd_queue.futex);
411 }
412
413 /*
414 * Close every consumer sockets.
415 */
416 static void close_consumer_sockets(void)
417 {
418 int ret;
419
420 if (kconsumer_data.err_sock >= 0) {
421 ret = close(kconsumer_data.err_sock);
422 if (ret < 0) {
423 PERROR("kernel consumer err_sock close");
424 }
425 }
426 if (ustconsumer32_data.err_sock >= 0) {
427 ret = close(ustconsumer32_data.err_sock);
428 if (ret < 0) {
429 PERROR("UST consumerd32 err_sock close");
430 }
431 }
432 if (ustconsumer64_data.err_sock >= 0) {
433 ret = close(ustconsumer64_data.err_sock);
434 if (ret < 0) {
435 PERROR("UST consumerd64 err_sock close");
436 }
437 }
438 if (kconsumer_data.cmd_sock >= 0) {
439 ret = close(kconsumer_data.cmd_sock);
440 if (ret < 0) {
441 PERROR("kernel consumer cmd_sock close");
442 }
443 }
444 if (ustconsumer32_data.cmd_sock >= 0) {
445 ret = close(ustconsumer32_data.cmd_sock);
446 if (ret < 0) {
447 PERROR("UST consumerd32 cmd_sock close");
448 }
449 }
450 if (ustconsumer64_data.cmd_sock >= 0) {
451 ret = close(ustconsumer64_data.cmd_sock);
452 if (ret < 0) {
453 PERROR("UST consumerd64 cmd_sock close");
454 }
455 }
456 }
457
458 /*
459 * Cleanup the daemon
460 */
461 static void cleanup(void)
462 {
463 int ret;
464 struct ltt_session *sess, *stmp;
465 char path[PATH_MAX];
466
467 DBG("Cleaning up");
468
469 /*
470 * Close the thread quit pipe. It has already done its job,
471 * since we are now called.
472 */
473 utils_close_pipe(thread_quit_pipe);
474
475 /*
476 * If opt_pidfile is undefined, the default file will be wiped when
477 * removing the rundir.
478 */
479 if (opt_pidfile) {
480 ret = remove(opt_pidfile);
481 if (ret < 0) {
482 PERROR("remove pidfile %s", opt_pidfile);
483 }
484 }
485
486 DBG("Removing sessiond and consumerd content of directory %s", rundir);
487
488 /* sessiond */
489 snprintf(path, PATH_MAX,
490 "%s/%s",
491 rundir, DEFAULT_LTTNG_SESSIOND_PIDFILE);
492 DBG("Removing %s", path);
493 (void) unlink(path);
494
495 snprintf(path, PATH_MAX, "%s/%s", rundir,
496 DEFAULT_LTTNG_SESSIOND_JULPORT_FILE);
497 DBG("Removing %s", path);
498 (void) unlink(path);
499
500 /* kconsumerd */
501 snprintf(path, PATH_MAX,
502 DEFAULT_KCONSUMERD_ERR_SOCK_PATH,
503 rundir);
504 DBG("Removing %s", path);
505 (void) unlink(path);
506
507 snprintf(path, PATH_MAX,
508 DEFAULT_KCONSUMERD_PATH,
509 rundir);
510 DBG("Removing directory %s", path);
511 (void) rmdir(path);
512
513 /* ust consumerd 32 */
514 snprintf(path, PATH_MAX,
515 DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH,
516 rundir);
517 DBG("Removing %s", path);
518 (void) unlink(path);
519
520 snprintf(path, PATH_MAX,
521 DEFAULT_USTCONSUMERD32_PATH,
522 rundir);
523 DBG("Removing directory %s", path);
524 (void) rmdir(path);
525
526 /* ust consumerd 64 */
527 snprintf(path, PATH_MAX,
528 DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH,
529 rundir);
530 DBG("Removing %s", path);
531 (void) unlink(path);
532
533 snprintf(path, PATH_MAX,
534 DEFAULT_USTCONSUMERD64_PATH,
535 rundir);
536 DBG("Removing directory %s", path);
537 (void) rmdir(path);
538
539 /*
540 * We do NOT rmdir rundir because there are other processes
541 * using it, for instance lttng-relayd, which can start in
542 * parallel with this teardown.
543 */
544
545 free(rundir);
546
547 DBG("Cleaning up all sessions");
548
549 /* Destroy session list mutex */
550 if (session_list_ptr != NULL) {
551 pthread_mutex_destroy(&session_list_ptr->lock);
552
553 /* Cleanup ALL session */
554 cds_list_for_each_entry_safe(sess, stmp,
555 &session_list_ptr->head, list) {
556 cmd_destroy_session(sess, kernel_poll_pipe[1]);
557 }
558 }
559
560 DBG("Closing all UST sockets");
561 ust_app_clean_list();
562 buffer_reg_destroy_registries();
563
564 if (is_root && !opt_no_kernel) {
565 DBG2("Closing kernel fd");
566 if (kernel_tracer_fd >= 0) {
567 ret = close(kernel_tracer_fd);
568 if (ret) {
569 PERROR("close");
570 }
571 }
572 DBG("Unloading kernel modules");
573 modprobe_remove_lttng_all();
574 }
575
576 close_consumer_sockets();
577
578 /* <fun> */
579 DBG("%c[%d;%dm*** assert failed :-) *** ==> %c[%dm%c[%d;%dm"
580 "Matthew, BEET driven development works!%c[%dm",
581 27, 1, 31, 27, 0, 27, 1, 33, 27, 0);
582 /* </fun> */
583 }
584
585 /*
586 * Send data on a unix socket using the liblttsessiondcomm API.
587 *
588 * Return lttcomm error code.
589 */
590 static int send_unix_sock(int sock, void *buf, size_t len)
591 {
592 /* Check valid length */
593 if (len == 0) {
594 return -1;
595 }
596
597 return lttcomm_send_unix_sock(sock, buf, len);
598 }
599
600 /*
601 * Free memory of a command context structure.
602 */
603 static void clean_command_ctx(struct command_ctx **cmd_ctx)
604 {
605 DBG("Clean command context structure");
606 if (*cmd_ctx) {
607 if ((*cmd_ctx)->llm) {
608 free((*cmd_ctx)->llm);
609 }
610 if ((*cmd_ctx)->lsm) {
611 free((*cmd_ctx)->lsm);
612 }
613 free(*cmd_ctx);
614 *cmd_ctx = NULL;
615 }
616 }
617
618 /*
619 * Notify UST applications using the shm mmap futex.
620 */
621 static int notify_ust_apps(int active)
622 {
623 char *wait_shm_mmap;
624
625 DBG("Notifying applications of session daemon state: %d", active);
626
627 /* See shm.c for this call implying mmap, shm and futex calls */
628 wait_shm_mmap = shm_ust_get_mmap(wait_shm_path, is_root);
629 if (wait_shm_mmap == NULL) {
630 goto error;
631 }
632
633 /* Wake waiting process */
634 futex_wait_update((int32_t *) wait_shm_mmap, active);
635
636 /* Apps notified successfully */
637 return 0;
638
639 error:
640 return -1;
641 }
642
643 /*
644 * Setup the outgoing data buffer for the response (llm) by allocating the
645 * right amount of memory and copying the original information from the lsm
646 * structure.
647 *
648 * Return total size of the buffer pointed by buf.
649 */
650 static int setup_lttng_msg(struct command_ctx *cmd_ctx, size_t size)
651 {
652 int ret, buf_size;
653
654 buf_size = size;
655
656 cmd_ctx->llm = zmalloc(sizeof(struct lttcomm_lttng_msg) + buf_size);
657 if (cmd_ctx->llm == NULL) {
658 PERROR("zmalloc");
659 ret = -ENOMEM;
660 goto error;
661 }
662
663 /* Copy common data */
664 cmd_ctx->llm->cmd_type = cmd_ctx->lsm->cmd_type;
665 cmd_ctx->llm->pid = cmd_ctx->lsm->domain.attr.pid;
666
667 cmd_ctx->llm->data_size = size;
668 cmd_ctx->lttng_msg_size = sizeof(struct lttcomm_lttng_msg) + buf_size;
669
670 return buf_size;
671
672 error:
673 return ret;
674 }
675
676 /*
677 * Update the kernel poll set of all channel fd available over all tracing
678 * session. Add the wakeup pipe at the end of the set.
679 */
680 static int update_kernel_poll(struct lttng_poll_event *events)
681 {
682 int ret;
683 struct ltt_session *session;
684 struct ltt_kernel_channel *channel;
685
686 DBG("Updating kernel poll set");
687
688 session_lock_list();
689 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
690 session_lock(session);
691 if (session->kernel_session == NULL) {
692 session_unlock(session);
693 continue;
694 }
695
696 cds_list_for_each_entry(channel,
697 &session->kernel_session->channel_list.head, list) {
698 /* Add channel fd to the kernel poll set */
699 ret = lttng_poll_add(events, channel->fd, LPOLLIN | LPOLLRDNORM);
700 if (ret < 0) {
701 session_unlock(session);
702 goto error;
703 }
704 DBG("Channel fd %d added to kernel set", channel->fd);
705 }
706 session_unlock(session);
707 }
708 session_unlock_list();
709
710 return 0;
711
712 error:
713 session_unlock_list();
714 return -1;
715 }
716
717 /*
718 * Find the channel fd from 'fd' over all tracing session. When found, check
719 * for new channel stream and send those stream fds to the kernel consumer.
720 *
721 * Useful for CPU hotplug feature.
722 */
723 static int update_kernel_stream(struct consumer_data *consumer_data, int fd)
724 {
725 int ret = 0;
726 struct ltt_session *session;
727 struct ltt_kernel_session *ksess;
728 struct ltt_kernel_channel *channel;
729
730 DBG("Updating kernel streams for channel fd %d", fd);
731
732 session_lock_list();
733 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
734 session_lock(session);
735 if (session->kernel_session == NULL) {
736 session_unlock(session);
737 continue;
738 }
739 ksess = session->kernel_session;
740
741 cds_list_for_each_entry(channel, &ksess->channel_list.head, list) {
742 if (channel->fd == fd) {
743 DBG("Channel found, updating kernel streams");
744 ret = kernel_open_channel_stream(channel);
745 if (ret < 0) {
746 goto error;
747 }
748 /* Update the stream global counter */
749 ksess->stream_count_global += ret;
750
751 /*
752 * Have we already sent fds to the consumer? If yes, it means
753 * that tracing is started so it is safe to send our updated
754 * stream fds.
755 */
756 if (ksess->consumer_fds_sent == 1 && ksess->consumer != NULL) {
757 struct lttng_ht_iter iter;
758 struct consumer_socket *socket;
759
760 rcu_read_lock();
761 cds_lfht_for_each_entry(ksess->consumer->socks->ht,
762 &iter.iter, socket, node.node) {
763 pthread_mutex_lock(socket->lock);
764 ret = kernel_consumer_send_channel_stream(socket,
765 channel, ksess,
766 session->output_traces ? 1 : 0);
767 pthread_mutex_unlock(socket->lock);
768 if (ret < 0) {
769 rcu_read_unlock();
770 goto error;
771 }
772 }
773 rcu_read_unlock();
774 }
775 goto error;
776 }
777 }
778 session_unlock(session);
779 }
780 session_unlock_list();
781 return ret;
782
783 error:
784 session_unlock(session);
785 session_unlock_list();
786 return ret;
787 }
788
789 /*
790 * For each tracing session, update newly registered apps. The session list
791 * lock MUST be acquired before calling this.
792 */
793 static void update_ust_app(int app_sock)
794 {
795 struct ltt_session *sess, *stmp;
796
797 /* Consumer is in an ERROR state. Stop any application update. */
798 if (uatomic_read(&ust_consumerd_state) == CONSUMER_ERROR) {
799 /* Stop the update process since the consumer is dead. */
800 return;
801 }
802
803 /* For all tracing session(s) */
804 cds_list_for_each_entry_safe(sess, stmp, &session_list_ptr->head, list) {
805 session_lock(sess);
806 if (sess->ust_session) {
807 ust_app_global_update(sess->ust_session, app_sock);
808 }
809 session_unlock(sess);
810 }
811 }
812
813 /*
814 * This thread manage event coming from the kernel.
815 *
816 * Features supported in this thread:
817 * -) CPU Hotplug
818 */
819 static void *thread_manage_kernel(void *data)
820 {
821 int ret, i, pollfd, update_poll_flag = 1, err = -1;
822 uint32_t revents, nb_fd;
823 char tmp;
824 struct lttng_poll_event events;
825
826 DBG("[thread] Thread manage kernel started");
827
828 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_KERNEL);
829
830 /*
831 * This first step of the while is to clean this structure which could free
832 * non NULL pointers so initialize it before the loop.
833 */
834 lttng_poll_init(&events);
835
836 if (testpoint(sessiond_thread_manage_kernel)) {
837 goto error_testpoint;
838 }
839
840 health_code_update();
841
842 if (testpoint(sessiond_thread_manage_kernel_before_loop)) {
843 goto error_testpoint;
844 }
845
846 while (1) {
847 health_code_update();
848
849 if (update_poll_flag == 1) {
850 /* Clean events object. We are about to populate it again. */
851 lttng_poll_clean(&events);
852
853 ret = sessiond_set_thread_pollset(&events, 2);
854 if (ret < 0) {
855 goto error_poll_create;
856 }
857
858 ret = lttng_poll_add(&events, kernel_poll_pipe[0], LPOLLIN);
859 if (ret < 0) {
860 goto error;
861 }
862
863 /* This will add the available kernel channel if any. */
864 ret = update_kernel_poll(&events);
865 if (ret < 0) {
866 goto error;
867 }
868 update_poll_flag = 0;
869 }
870
871 DBG("Thread kernel polling on %d fds", LTTNG_POLL_GETNB(&events));
872
873 /* Poll infinite value of time */
874 restart:
875 health_poll_entry();
876 ret = lttng_poll_wait(&events, -1);
877 health_poll_exit();
878 if (ret < 0) {
879 /*
880 * Restart interrupted system call.
881 */
882 if (errno == EINTR) {
883 goto restart;
884 }
885 goto error;
886 } else if (ret == 0) {
887 /* Should not happen since timeout is infinite */
888 ERR("Return value of poll is 0 with an infinite timeout.\n"
889 "This should not have happened! Continuing...");
890 continue;
891 }
892
893 nb_fd = ret;
894
895 for (i = 0; i < nb_fd; i++) {
896 /* Fetch once the poll data */
897 revents = LTTNG_POLL_GETEV(&events, i);
898 pollfd = LTTNG_POLL_GETFD(&events, i);
899
900 health_code_update();
901
902 /* Thread quit pipe has been closed. Killing thread. */
903 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
904 if (ret) {
905 err = 0;
906 goto exit;
907 }
908
909 /* Check for data on kernel pipe */
910 if (pollfd == kernel_poll_pipe[0] && (revents & LPOLLIN)) {
911 (void) lttng_read(kernel_poll_pipe[0],
912 &tmp, 1);
913 /*
914 * Ret value is useless here, if this pipe gets any actions an
915 * update is required anyway.
916 */
917 update_poll_flag = 1;
918 continue;
919 } else {
920 /*
921 * New CPU detected by the kernel. Adding kernel stream to
922 * kernel session and updating the kernel consumer
923 */
924 if (revents & LPOLLIN) {
925 ret = update_kernel_stream(&kconsumer_data, pollfd);
926 if (ret < 0) {
927 continue;
928 }
929 break;
930 /*
931 * TODO: We might want to handle the LPOLLERR | LPOLLHUP
932 * and unregister kernel stream at this point.
933 */
934 }
935 }
936 }
937 }
938
939 exit:
940 error:
941 lttng_poll_clean(&events);
942 error_poll_create:
943 error_testpoint:
944 utils_close_pipe(kernel_poll_pipe);
945 kernel_poll_pipe[0] = kernel_poll_pipe[1] = -1;
946 if (err) {
947 health_error();
948 ERR("Health error occurred in %s", __func__);
949 WARN("Kernel thread died unexpectedly. "
950 "Kernel tracing can continue but CPU hotplug is disabled.");
951 }
952 health_unregister(health_sessiond);
953 DBG("Kernel thread dying");
954 return NULL;
955 }
956
957 /*
958 * Signal pthread condition of the consumer data that the thread.
959 */
960 static void signal_consumer_condition(struct consumer_data *data, int state)
961 {
962 pthread_mutex_lock(&data->cond_mutex);
963
964 /*
965 * The state is set before signaling. It can be any value, it's the waiter
966 * job to correctly interpret this condition variable associated to the
967 * consumer pthread_cond.
968 *
969 * A value of 0 means that the corresponding thread of the consumer data
970 * was not started. 1 indicates that the thread has started and is ready
971 * for action. A negative value means that there was an error during the
972 * thread bootstrap.
973 */
974 data->consumer_thread_is_ready = state;
975 (void) pthread_cond_signal(&data->cond);
976
977 pthread_mutex_unlock(&data->cond_mutex);
978 }
979
980 /*
981 * This thread manage the consumer error sent back to the session daemon.
982 */
983 static void *thread_manage_consumer(void *data)
984 {
985 int sock = -1, i, ret, pollfd, err = -1;
986 uint32_t revents, nb_fd;
987 enum lttcomm_return_code code;
988 struct lttng_poll_event events;
989 struct consumer_data *consumer_data = data;
990
991 DBG("[thread] Manage consumer started");
992
993 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_CONSUMER);
994
995 health_code_update();
996
997 /*
998 * Pass 3 as size here for the thread quit pipe, consumerd_err_sock and the
999 * metadata_sock. Nothing more will be added to this poll set.
1000 */
1001 ret = sessiond_set_thread_pollset(&events, 3);
1002 if (ret < 0) {
1003 goto error_poll;
1004 }
1005
1006 /*
1007 * The error socket here is already in a listening state which was done
1008 * just before spawning this thread to avoid a race between the consumer
1009 * daemon exec trying to connect and the listen() call.
1010 */
1011 ret = lttng_poll_add(&events, consumer_data->err_sock, LPOLLIN | LPOLLRDHUP);
1012 if (ret < 0) {
1013 goto error;
1014 }
1015
1016 health_code_update();
1017
1018 /* Infinite blocking call, waiting for transmission */
1019 restart:
1020 health_poll_entry();
1021
1022 if (testpoint(sessiond_thread_manage_consumer)) {
1023 goto error;
1024 }
1025
1026 ret = lttng_poll_wait(&events, -1);
1027 health_poll_exit();
1028 if (ret < 0) {
1029 /*
1030 * Restart interrupted system call.
1031 */
1032 if (errno == EINTR) {
1033 goto restart;
1034 }
1035 goto error;
1036 }
1037
1038 nb_fd = ret;
1039
1040 for (i = 0; i < nb_fd; i++) {
1041 /* Fetch once the poll data */
1042 revents = LTTNG_POLL_GETEV(&events, i);
1043 pollfd = LTTNG_POLL_GETFD(&events, i);
1044
1045 health_code_update();
1046
1047 /* Thread quit pipe has been closed. Killing thread. */
1048 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
1049 if (ret) {
1050 err = 0;
1051 goto exit;
1052 }
1053
1054 /* Event on the registration socket */
1055 if (pollfd == consumer_data->err_sock) {
1056 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1057 ERR("consumer err socket poll error");
1058 goto error;
1059 }
1060 }
1061 }
1062
1063 sock = lttcomm_accept_unix_sock(consumer_data->err_sock);
1064 if (sock < 0) {
1065 goto error;
1066 }
1067
1068 /*
1069 * Set the CLOEXEC flag. Return code is useless because either way, the
1070 * show must go on.
1071 */
1072 (void) utils_set_fd_cloexec(sock);
1073
1074 health_code_update();
1075
1076 DBG2("Receiving code from consumer err_sock");
1077
1078 /* Getting status code from kconsumerd */
1079 ret = lttcomm_recv_unix_sock(sock, &code,
1080 sizeof(enum lttcomm_return_code));
1081 if (ret <= 0) {
1082 goto error;
1083 }
1084
1085 health_code_update();
1086
1087 if (code == LTTCOMM_CONSUMERD_COMMAND_SOCK_READY) {
1088 /* Connect both socket, command and metadata. */
1089 consumer_data->cmd_sock =
1090 lttcomm_connect_unix_sock(consumer_data->cmd_unix_sock_path);
1091 consumer_data->metadata_fd =
1092 lttcomm_connect_unix_sock(consumer_data->cmd_unix_sock_path);
1093 if (consumer_data->cmd_sock < 0
1094 || consumer_data->metadata_fd < 0) {
1095 PERROR("consumer connect cmd socket");
1096 /* On error, signal condition and quit. */
1097 signal_consumer_condition(consumer_data, -1);
1098 goto error;
1099 }
1100 consumer_data->metadata_sock.fd_ptr = &consumer_data->metadata_fd;
1101 /* Create metadata socket lock. */
1102 consumer_data->metadata_sock.lock = zmalloc(sizeof(pthread_mutex_t));
1103 if (consumer_data->metadata_sock.lock == NULL) {
1104 PERROR("zmalloc pthread mutex");
1105 ret = -1;
1106 goto error;
1107 }
1108 pthread_mutex_init(consumer_data->metadata_sock.lock, NULL);
1109
1110 signal_consumer_condition(consumer_data, 1);
1111 DBG("Consumer command socket ready (fd: %d", consumer_data->cmd_sock);
1112 DBG("Consumer metadata socket ready (fd: %d)",
1113 consumer_data->metadata_fd);
1114 } else {
1115 ERR("consumer error when waiting for SOCK_READY : %s",
1116 lttcomm_get_readable_code(-code));
1117 goto error;
1118 }
1119
1120 /* Remove the consumerd error sock since we've established a connexion */
1121 ret = lttng_poll_del(&events, consumer_data->err_sock);
1122 if (ret < 0) {
1123 goto error;
1124 }
1125
1126 /* Add new accepted error socket. */
1127 ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLRDHUP);
1128 if (ret < 0) {
1129 goto error;
1130 }
1131
1132 /* Add metadata socket that is successfully connected. */
1133 ret = lttng_poll_add(&events, consumer_data->metadata_fd,
1134 LPOLLIN | LPOLLRDHUP);
1135 if (ret < 0) {
1136 goto error;
1137 }
1138
1139 health_code_update();
1140
1141 /* Infinite blocking call, waiting for transmission */
1142 restart_poll:
1143 while (1) {
1144 health_poll_entry();
1145 ret = lttng_poll_wait(&events, -1);
1146 health_poll_exit();
1147 if (ret < 0) {
1148 /*
1149 * Restart interrupted system call.
1150 */
1151 if (errno == EINTR) {
1152 goto restart_poll;
1153 }
1154 goto error;
1155 }
1156
1157 nb_fd = ret;
1158
1159 for (i = 0; i < nb_fd; i++) {
1160 /* Fetch once the poll data */
1161 revents = LTTNG_POLL_GETEV(&events, i);
1162 pollfd = LTTNG_POLL_GETFD(&events, i);
1163
1164 health_code_update();
1165
1166 /* Thread quit pipe has been closed. Killing thread. */
1167 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
1168 if (ret) {
1169 err = 0;
1170 goto exit;
1171 }
1172
1173 if (pollfd == sock) {
1174 /* Event on the consumerd socket */
1175 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1176 ERR("consumer err socket second poll error");
1177 goto error;
1178 }
1179 health_code_update();
1180 /* Wait for any kconsumerd error */
1181 ret = lttcomm_recv_unix_sock(sock, &code,
1182 sizeof(enum lttcomm_return_code));
1183 if (ret <= 0) {
1184 ERR("consumer closed the command socket");
1185 goto error;
1186 }
1187
1188 ERR("consumer return code : %s",
1189 lttcomm_get_readable_code(-code));
1190
1191 goto exit;
1192 } else if (pollfd == consumer_data->metadata_fd) {
1193 /* UST metadata requests */
1194 ret = ust_consumer_metadata_request(
1195 &consumer_data->metadata_sock);
1196 if (ret < 0) {
1197 ERR("Handling metadata request");
1198 goto error;
1199 }
1200 break;
1201 } else {
1202 ERR("Unknown pollfd");
1203 goto error;
1204 }
1205 }
1206 health_code_update();
1207 }
1208
1209 exit:
1210 error:
1211 /*
1212 * We lock here because we are about to close the sockets and some other
1213 * thread might be using them so get exclusive access which will abort all
1214 * other consumer command by other threads.
1215 */
1216 pthread_mutex_lock(&consumer_data->lock);
1217
1218 /* Immediately set the consumerd state to stopped */
1219 if (consumer_data->type == LTTNG_CONSUMER_KERNEL) {
1220 uatomic_set(&kernel_consumerd_state, CONSUMER_ERROR);
1221 } else if (consumer_data->type == LTTNG_CONSUMER64_UST ||
1222 consumer_data->type == LTTNG_CONSUMER32_UST) {
1223 uatomic_set(&ust_consumerd_state, CONSUMER_ERROR);
1224 } else {
1225 /* Code flow error... */
1226 assert(0);
1227 }
1228
1229 if (consumer_data->err_sock >= 0) {
1230 ret = close(consumer_data->err_sock);
1231 if (ret) {
1232 PERROR("close");
1233 }
1234 consumer_data->err_sock = -1;
1235 }
1236 if (consumer_data->cmd_sock >= 0) {
1237 ret = close(consumer_data->cmd_sock);
1238 if (ret) {
1239 PERROR("close");
1240 }
1241 consumer_data->cmd_sock = -1;
1242 }
1243 if (*consumer_data->metadata_sock.fd_ptr >= 0) {
1244 ret = close(*consumer_data->metadata_sock.fd_ptr);
1245 if (ret) {
1246 PERROR("close");
1247 }
1248 }
1249
1250 if (sock >= 0) {
1251 ret = close(sock);
1252 if (ret) {
1253 PERROR("close");
1254 }
1255 }
1256
1257 unlink(consumer_data->err_unix_sock_path);
1258 unlink(consumer_data->cmd_unix_sock_path);
1259 consumer_data->pid = 0;
1260 pthread_mutex_unlock(&consumer_data->lock);
1261
1262 /* Cleanup metadata socket mutex. */
1263 pthread_mutex_destroy(consumer_data->metadata_sock.lock);
1264 free(consumer_data->metadata_sock.lock);
1265
1266 lttng_poll_clean(&events);
1267 error_poll:
1268 if (err) {
1269 health_error();
1270 ERR("Health error occurred in %s", __func__);
1271 }
1272 health_unregister(health_sessiond);
1273 DBG("consumer thread cleanup completed");
1274
1275 return NULL;
1276 }
1277
1278 /*
1279 * This thread manage application communication.
1280 */
1281 static void *thread_manage_apps(void *data)
1282 {
1283 int i, ret, pollfd, err = -1;
1284 ssize_t size_ret;
1285 uint32_t revents, nb_fd;
1286 struct lttng_poll_event events;
1287
1288 DBG("[thread] Manage application started");
1289
1290 rcu_register_thread();
1291 rcu_thread_online();
1292
1293 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_APP_MANAGE);
1294
1295 if (testpoint(sessiond_thread_manage_apps)) {
1296 goto error_testpoint;
1297 }
1298
1299 health_code_update();
1300
1301 ret = sessiond_set_thread_pollset(&events, 2);
1302 if (ret < 0) {
1303 goto error_poll_create;
1304 }
1305
1306 ret = lttng_poll_add(&events, apps_cmd_pipe[0], LPOLLIN | LPOLLRDHUP);
1307 if (ret < 0) {
1308 goto error;
1309 }
1310
1311 if (testpoint(sessiond_thread_manage_apps_before_loop)) {
1312 goto error;
1313 }
1314
1315 health_code_update();
1316
1317 while (1) {
1318 DBG("Apps thread polling on %d fds", LTTNG_POLL_GETNB(&events));
1319
1320 /* Inifinite blocking call, waiting for transmission */
1321 restart:
1322 health_poll_entry();
1323 ret = lttng_poll_wait(&events, -1);
1324 health_poll_exit();
1325 if (ret < 0) {
1326 /*
1327 * Restart interrupted system call.
1328 */
1329 if (errno == EINTR) {
1330 goto restart;
1331 }
1332 goto error;
1333 }
1334
1335 nb_fd = ret;
1336
1337 for (i = 0; i < nb_fd; i++) {
1338 /* Fetch once the poll data */
1339 revents = LTTNG_POLL_GETEV(&events, i);
1340 pollfd = LTTNG_POLL_GETFD(&events, i);
1341
1342 health_code_update();
1343
1344 /* Thread quit pipe has been closed. Killing thread. */
1345 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
1346 if (ret) {
1347 err = 0;
1348 goto exit;
1349 }
1350
1351 /* Inspect the apps cmd pipe */
1352 if (pollfd == apps_cmd_pipe[0]) {
1353 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1354 ERR("Apps command pipe error");
1355 goto error;
1356 } else if (revents & LPOLLIN) {
1357 int sock;
1358
1359 /* Empty pipe */
1360 size_ret = lttng_read(apps_cmd_pipe[0], &sock, sizeof(sock));
1361 if (size_ret < sizeof(sock)) {
1362 PERROR("read apps cmd pipe");
1363 goto error;
1364 }
1365
1366 health_code_update();
1367
1368 /*
1369 * We only monitor the error events of the socket. This
1370 * thread does not handle any incoming data from UST
1371 * (POLLIN).
1372 */
1373 ret = lttng_poll_add(&events, sock,
1374 LPOLLERR | LPOLLHUP | LPOLLRDHUP);
1375 if (ret < 0) {
1376 goto error;
1377 }
1378
1379 DBG("Apps with sock %d added to poll set", sock);
1380 }
1381 } else {
1382 /*
1383 * At this point, we know that a registered application made
1384 * the event at poll_wait.
1385 */
1386 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1387 /* Removing from the poll set */
1388 ret = lttng_poll_del(&events, pollfd);
1389 if (ret < 0) {
1390 goto error;
1391 }
1392
1393 /* Socket closed on remote end. */
1394 ust_app_unregister(pollfd);
1395 }
1396 }
1397
1398 health_code_update();
1399 }
1400 }
1401
1402 exit:
1403 error:
1404 lttng_poll_clean(&events);
1405 error_poll_create:
1406 error_testpoint:
1407 utils_close_pipe(apps_cmd_pipe);
1408 apps_cmd_pipe[0] = apps_cmd_pipe[1] = -1;
1409
1410 /*
1411 * We don't clean the UST app hash table here since already registered
1412 * applications can still be controlled so let them be until the session
1413 * daemon dies or the applications stop.
1414 */
1415
1416 if (err) {
1417 health_error();
1418 ERR("Health error occurred in %s", __func__);
1419 }
1420 health_unregister(health_sessiond);
1421 DBG("Application communication apps thread cleanup complete");
1422 rcu_thread_offline();
1423 rcu_unregister_thread();
1424 return NULL;
1425 }
1426
1427 /*
1428 * Send a socket to a thread This is called from the dispatch UST registration
1429 * thread once all sockets are set for the application.
1430 *
1431 * The sock value can be invalid, we don't really care, the thread will handle
1432 * it and make the necessary cleanup if so.
1433 *
1434 * On success, return 0 else a negative value being the errno message of the
1435 * write().
1436 */
1437 static int send_socket_to_thread(int fd, int sock)
1438 {
1439 ssize_t ret;
1440
1441 /*
1442 * It's possible that the FD is set as invalid with -1 concurrently just
1443 * before calling this function being a shutdown state of the thread.
1444 */
1445 if (fd < 0) {
1446 ret = -EBADF;
1447 goto error;
1448 }
1449
1450 ret = lttng_write(fd, &sock, sizeof(sock));
1451 if (ret < sizeof(sock)) {
1452 PERROR("write apps pipe %d", fd);
1453 if (ret < 0) {
1454 ret = -errno;
1455 }
1456 goto error;
1457 }
1458
1459 /* All good. Don't send back the write positive ret value. */
1460 ret = 0;
1461 error:
1462 return (int) ret;
1463 }
1464
1465 /*
1466 * Sanitize the wait queue of the dispatch registration thread meaning removing
1467 * invalid nodes from it. This is to avoid memory leaks for the case the UST
1468 * notify socket is never received.
1469 */
1470 static void sanitize_wait_queue(struct ust_reg_wait_queue *wait_queue)
1471 {
1472 int ret, nb_fd = 0, i;
1473 unsigned int fd_added = 0;
1474 struct lttng_poll_event events;
1475 struct ust_reg_wait_node *wait_node = NULL, *tmp_wait_node;
1476
1477 assert(wait_queue);
1478
1479 lttng_poll_init(&events);
1480
1481 /* Just skip everything for an empty queue. */
1482 if (!wait_queue->count) {
1483 goto end;
1484 }
1485
1486 ret = lttng_poll_create(&events, wait_queue->count, LTTNG_CLOEXEC);
1487 if (ret < 0) {
1488 goto error_create;
1489 }
1490
1491 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1492 &wait_queue->head, head) {
1493 assert(wait_node->app);
1494 ret = lttng_poll_add(&events, wait_node->app->sock,
1495 LPOLLHUP | LPOLLERR);
1496 if (ret < 0) {
1497 goto error;
1498 }
1499
1500 fd_added = 1;
1501 }
1502
1503 if (!fd_added) {
1504 goto end;
1505 }
1506
1507 /*
1508 * Poll but don't block so we can quickly identify the faulty events and
1509 * clean them afterwards from the wait queue.
1510 */
1511 ret = lttng_poll_wait(&events, 0);
1512 if (ret < 0) {
1513 goto error;
1514 }
1515 nb_fd = ret;
1516
1517 for (i = 0; i < nb_fd; i++) {
1518 /* Get faulty FD. */
1519 uint32_t revents = LTTNG_POLL_GETEV(&events, i);
1520 int pollfd = LTTNG_POLL_GETFD(&events, i);
1521
1522 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1523 &wait_queue->head, head) {
1524 if (pollfd == wait_node->app->sock &&
1525 (revents & (LPOLLHUP | LPOLLERR))) {
1526 cds_list_del(&wait_node->head);
1527 wait_queue->count--;
1528 ust_app_destroy(wait_node->app);
1529 free(wait_node);
1530 break;
1531 }
1532 }
1533 }
1534
1535 if (nb_fd > 0) {
1536 DBG("Wait queue sanitized, %d node were cleaned up", nb_fd);
1537 }
1538
1539 end:
1540 lttng_poll_clean(&events);
1541 return;
1542
1543 error:
1544 lttng_poll_clean(&events);
1545 error_create:
1546 ERR("Unable to sanitize wait queue");
1547 return;
1548 }
1549
1550 /*
1551 * Dispatch request from the registration threads to the application
1552 * communication thread.
1553 */
1554 static void *thread_dispatch_ust_registration(void *data)
1555 {
1556 int ret, err = -1;
1557 struct cds_wfq_node *node;
1558 struct ust_command *ust_cmd = NULL;
1559 struct ust_reg_wait_node *wait_node = NULL, *tmp_wait_node;
1560 struct ust_reg_wait_queue wait_queue = {
1561 .count = 0,
1562 };
1563
1564 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_APP_REG_DISPATCH);
1565
1566 health_code_update();
1567
1568 CDS_INIT_LIST_HEAD(&wait_queue.head);
1569
1570 DBG("[thread] Dispatch UST command started");
1571
1572 while (!CMM_LOAD_SHARED(dispatch_thread_exit)) {
1573 health_code_update();
1574
1575 /* Atomically prepare the queue futex */
1576 futex_nto1_prepare(&ust_cmd_queue.futex);
1577
1578 do {
1579 struct ust_app *app = NULL;
1580 ust_cmd = NULL;
1581
1582 /*
1583 * Make sure we don't have node(s) that have hung up before receiving
1584 * the notify socket. This is to clean the list in order to avoid
1585 * memory leaks from notify socket that are never seen.
1586 */
1587 sanitize_wait_queue(&wait_queue);
1588
1589 health_code_update();
1590 /* Dequeue command for registration */
1591 node = cds_wfq_dequeue_blocking(&ust_cmd_queue.queue);
1592 if (node == NULL) {
1593 DBG("Woken up but nothing in the UST command queue");
1594 /* Continue thread execution */
1595 break;
1596 }
1597
1598 ust_cmd = caa_container_of(node, struct ust_command, node);
1599
1600 DBG("Dispatching UST registration pid:%d ppid:%d uid:%d"
1601 " gid:%d sock:%d name:%s (version %d.%d)",
1602 ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
1603 ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
1604 ust_cmd->sock, ust_cmd->reg_msg.name,
1605 ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
1606
1607 if (ust_cmd->reg_msg.type == USTCTL_SOCKET_CMD) {
1608 wait_node = zmalloc(sizeof(*wait_node));
1609 if (!wait_node) {
1610 PERROR("zmalloc wait_node dispatch");
1611 ret = close(ust_cmd->sock);
1612 if (ret < 0) {
1613 PERROR("close ust sock dispatch %d", ust_cmd->sock);
1614 }
1615 lttng_fd_put(LTTNG_FD_APPS, 1);
1616 free(ust_cmd);
1617 goto error;
1618 }
1619 CDS_INIT_LIST_HEAD(&wait_node->head);
1620
1621 /* Create application object if socket is CMD. */
1622 wait_node->app = ust_app_create(&ust_cmd->reg_msg,
1623 ust_cmd->sock);
1624 if (!wait_node->app) {
1625 ret = close(ust_cmd->sock);
1626 if (ret < 0) {
1627 PERROR("close ust sock dispatch %d", ust_cmd->sock);
1628 }
1629 lttng_fd_put(LTTNG_FD_APPS, 1);
1630 free(wait_node);
1631 free(ust_cmd);
1632 continue;
1633 }
1634 /*
1635 * Add application to the wait queue so we can set the notify
1636 * socket before putting this object in the global ht.
1637 */
1638 cds_list_add(&wait_node->head, &wait_queue.head);
1639 wait_queue.count++;
1640
1641 free(ust_cmd);
1642 /*
1643 * We have to continue here since we don't have the notify
1644 * socket and the application MUST be added to the hash table
1645 * only at that moment.
1646 */
1647 continue;
1648 } else {
1649 /*
1650 * Look for the application in the local wait queue and set the
1651 * notify socket if found.
1652 */
1653 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1654 &wait_queue.head, head) {
1655 health_code_update();
1656 if (wait_node->app->pid == ust_cmd->reg_msg.pid) {
1657 wait_node->app->notify_sock = ust_cmd->sock;
1658 cds_list_del(&wait_node->head);
1659 wait_queue.count--;
1660 app = wait_node->app;
1661 free(wait_node);
1662 DBG3("UST app notify socket %d is set", ust_cmd->sock);
1663 break;
1664 }
1665 }
1666
1667 /*
1668 * With no application at this stage the received socket is
1669 * basically useless so close it before we free the cmd data
1670 * structure for good.
1671 */
1672 if (!app) {
1673 ret = close(ust_cmd->sock);
1674 if (ret < 0) {
1675 PERROR("close ust sock dispatch %d", ust_cmd->sock);
1676 }
1677 lttng_fd_put(LTTNG_FD_APPS, 1);
1678 }
1679 free(ust_cmd);
1680 }
1681
1682 if (app) {
1683 /*
1684 * @session_lock_list
1685 *
1686 * Lock the global session list so from the register up to the
1687 * registration done message, no thread can see the application
1688 * and change its state.
1689 */
1690 session_lock_list();
1691 rcu_read_lock();
1692
1693 /*
1694 * Add application to the global hash table. This needs to be
1695 * done before the update to the UST registry can locate the
1696 * application.
1697 */
1698 ust_app_add(app);
1699
1700 /* Set app version. This call will print an error if needed. */
1701 (void) ust_app_version(app);
1702
1703 /* Send notify socket through the notify pipe. */
1704 ret = send_socket_to_thread(apps_cmd_notify_pipe[1],
1705 app->notify_sock);
1706 if (ret < 0) {
1707 rcu_read_unlock();
1708 session_unlock_list();
1709 /*
1710 * No notify thread, stop the UST tracing. However, this is
1711 * not an internal error of the this thread thus setting
1712 * the health error code to a normal exit.
1713 */
1714 err = 0;
1715 goto error;
1716 }
1717
1718 /*
1719 * Update newly registered application with the tracing
1720 * registry info already enabled information.
1721 */
1722 update_ust_app(app->sock);
1723
1724 /*
1725 * Don't care about return value. Let the manage apps threads
1726 * handle app unregistration upon socket close.
1727 */
1728 (void) ust_app_register_done(app->sock);
1729
1730 /*
1731 * Even if the application socket has been closed, send the app
1732 * to the thread and unregistration will take place at that
1733 * place.
1734 */
1735 ret = send_socket_to_thread(apps_cmd_pipe[1], app->sock);
1736 if (ret < 0) {
1737 rcu_read_unlock();
1738 session_unlock_list();
1739 /*
1740 * No apps. thread, stop the UST tracing. However, this is
1741 * not an internal error of the this thread thus setting
1742 * the health error code to a normal exit.
1743 */
1744 err = 0;
1745 goto error;
1746 }
1747
1748 rcu_read_unlock();
1749 session_unlock_list();
1750 }
1751 } while (node != NULL);
1752
1753 health_poll_entry();
1754 /* Futex wait on queue. Blocking call on futex() */
1755 futex_nto1_wait(&ust_cmd_queue.futex);
1756 health_poll_exit();
1757 }
1758 /* Normal exit, no error */
1759 err = 0;
1760
1761 error:
1762 /* Clean up wait queue. */
1763 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1764 &wait_queue.head, head) {
1765 cds_list_del(&wait_node->head);
1766 wait_queue.count--;
1767 free(wait_node);
1768 }
1769
1770 DBG("Dispatch thread dying");
1771 if (err) {
1772 health_error();
1773 ERR("Health error occurred in %s", __func__);
1774 }
1775 health_unregister(health_sessiond);
1776 return NULL;
1777 }
1778
1779 /*
1780 * This thread manage application registration.
1781 */
1782 static void *thread_registration_apps(void *data)
1783 {
1784 int sock = -1, i, ret, pollfd, err = -1;
1785 uint32_t revents, nb_fd;
1786 struct lttng_poll_event events;
1787 /*
1788 * Get allocated in this thread, enqueued to a global queue, dequeued and
1789 * freed in the manage apps thread.
1790 */
1791 struct ust_command *ust_cmd = NULL;
1792
1793 DBG("[thread] Manage application registration started");
1794
1795 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_APP_REG);
1796
1797 if (testpoint(sessiond_thread_registration_apps)) {
1798 goto error_testpoint;
1799 }
1800
1801 ret = lttcomm_listen_unix_sock(apps_sock);
1802 if (ret < 0) {
1803 goto error_listen;
1804 }
1805
1806 /*
1807 * Pass 2 as size here for the thread quit pipe and apps socket. Nothing
1808 * more will be added to this poll set.
1809 */
1810 ret = sessiond_set_thread_pollset(&events, 2);
1811 if (ret < 0) {
1812 goto error_create_poll;
1813 }
1814
1815 /* Add the application registration socket */
1816 ret = lttng_poll_add(&events, apps_sock, LPOLLIN | LPOLLRDHUP);
1817 if (ret < 0) {
1818 goto error_poll_add;
1819 }
1820
1821 /* Notify all applications to register */
1822 ret = notify_ust_apps(1);
1823 if (ret < 0) {
1824 ERR("Failed to notify applications or create the wait shared memory.\n"
1825 "Execution continues but there might be problem for already\n"
1826 "running applications that wishes to register.");
1827 }
1828
1829 while (1) {
1830 DBG("Accepting application registration");
1831
1832 /* Inifinite blocking call, waiting for transmission */
1833 restart:
1834 health_poll_entry();
1835 ret = lttng_poll_wait(&events, -1);
1836 health_poll_exit();
1837 if (ret < 0) {
1838 /*
1839 * Restart interrupted system call.
1840 */
1841 if (errno == EINTR) {
1842 goto restart;
1843 }
1844 goto error;
1845 }
1846
1847 nb_fd = ret;
1848
1849 for (i = 0; i < nb_fd; i++) {
1850 health_code_update();
1851
1852 /* Fetch once the poll data */
1853 revents = LTTNG_POLL_GETEV(&events, i);
1854 pollfd = LTTNG_POLL_GETFD(&events, i);
1855
1856 /* Thread quit pipe has been closed. Killing thread. */
1857 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
1858 if (ret) {
1859 err = 0;
1860 goto exit;
1861 }
1862
1863 /* Event on the registration socket */
1864 if (pollfd == apps_sock) {
1865 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1866 ERR("Register apps socket poll error");
1867 goto error;
1868 } else if (revents & LPOLLIN) {
1869 sock = lttcomm_accept_unix_sock(apps_sock);
1870 if (sock < 0) {
1871 goto error;
1872 }
1873
1874 /*
1875 * Set socket timeout for both receiving and ending.
1876 * app_socket_timeout is in seconds, whereas
1877 * lttcomm_setsockopt_rcv_timeout and
1878 * lttcomm_setsockopt_snd_timeout expect msec as
1879 * parameter.
1880 */
1881 (void) lttcomm_setsockopt_rcv_timeout(sock,
1882 app_socket_timeout * 1000);
1883 (void) lttcomm_setsockopt_snd_timeout(sock,
1884 app_socket_timeout * 1000);
1885
1886 /*
1887 * Set the CLOEXEC flag. Return code is useless because
1888 * either way, the show must go on.
1889 */
1890 (void) utils_set_fd_cloexec(sock);
1891
1892 /* Create UST registration command for enqueuing */
1893 ust_cmd = zmalloc(sizeof(struct ust_command));
1894 if (ust_cmd == NULL) {
1895 PERROR("ust command zmalloc");
1896 goto error;
1897 }
1898
1899 /*
1900 * Using message-based transmissions to ensure we don't
1901 * have to deal with partially received messages.
1902 */
1903 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
1904 if (ret < 0) {
1905 ERR("Exhausted file descriptors allowed for applications.");
1906 free(ust_cmd);
1907 ret = close(sock);
1908 if (ret) {
1909 PERROR("close");
1910 }
1911 sock = -1;
1912 continue;
1913 }
1914
1915 health_code_update();
1916 ret = ust_app_recv_registration(sock, &ust_cmd->reg_msg);
1917 if (ret < 0) {
1918 free(ust_cmd);
1919 /* Close socket of the application. */
1920 ret = close(sock);
1921 if (ret) {
1922 PERROR("close");
1923 }
1924 lttng_fd_put(LTTNG_FD_APPS, 1);
1925 sock = -1;
1926 continue;
1927 }
1928 health_code_update();
1929
1930 ust_cmd->sock = sock;
1931 sock = -1;
1932
1933 DBG("UST registration received with pid:%d ppid:%d uid:%d"
1934 " gid:%d sock:%d name:%s (version %d.%d)",
1935 ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
1936 ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
1937 ust_cmd->sock, ust_cmd->reg_msg.name,
1938 ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
1939
1940 /*
1941 * Lock free enqueue the registration request. The red pill
1942 * has been taken! This apps will be part of the *system*.
1943 */
1944 cds_wfq_enqueue(&ust_cmd_queue.queue, &ust_cmd->node);
1945
1946 /*
1947 * Wake the registration queue futex. Implicit memory
1948 * barrier with the exchange in cds_wfq_enqueue.
1949 */
1950 futex_nto1_wake(&ust_cmd_queue.futex);
1951 }
1952 }
1953 }
1954 }
1955
1956 exit:
1957 error:
1958 if (err) {
1959 health_error();
1960 ERR("Health error occurred in %s", __func__);
1961 }
1962
1963 /* Notify that the registration thread is gone */
1964 notify_ust_apps(0);
1965
1966 if (apps_sock >= 0) {
1967 ret = close(apps_sock);
1968 if (ret) {
1969 PERROR("close");
1970 }
1971 }
1972 if (sock >= 0) {
1973 ret = close(sock);
1974 if (ret) {
1975 PERROR("close");
1976 }
1977 lttng_fd_put(LTTNG_FD_APPS, 1);
1978 }
1979 unlink(apps_unix_sock_path);
1980
1981 error_poll_add:
1982 lttng_poll_clean(&events);
1983 error_listen:
1984 error_create_poll:
1985 error_testpoint:
1986 DBG("UST Registration thread cleanup complete");
1987 health_unregister(health_sessiond);
1988
1989 return NULL;
1990 }
1991
1992 /*
1993 * Start the thread_manage_consumer. This must be done after a lttng-consumerd
1994 * exec or it will fails.
1995 */
1996 static int spawn_consumer_thread(struct consumer_data *consumer_data)
1997 {
1998 int ret, clock_ret;
1999 struct timespec timeout;
2000
2001 /* Make sure we set the readiness flag to 0 because we are NOT ready */
2002 consumer_data->consumer_thread_is_ready = 0;
2003
2004 /* Setup pthread condition */
2005 ret = pthread_condattr_init(&consumer_data->condattr);
2006 if (ret != 0) {
2007 errno = ret;
2008 PERROR("pthread_condattr_init consumer data");
2009 goto error;
2010 }
2011
2012 /*
2013 * Set the monotonic clock in order to make sure we DO NOT jump in time
2014 * between the clock_gettime() call and the timedwait call. See bug #324
2015 * for a more details and how we noticed it.
2016 */
2017 ret = pthread_condattr_setclock(&consumer_data->condattr, CLOCK_MONOTONIC);
2018 if (ret != 0) {
2019 errno = ret;
2020 PERROR("pthread_condattr_setclock consumer data");
2021 goto error;
2022 }
2023
2024 ret = pthread_cond_init(&consumer_data->cond, &consumer_data->condattr);
2025 if (ret != 0) {
2026 errno = ret;
2027 PERROR("pthread_cond_init consumer data");
2028 goto error;
2029 }
2030
2031 ret = pthread_create(&consumer_data->thread, NULL, thread_manage_consumer,
2032 consumer_data);
2033 if (ret != 0) {
2034 PERROR("pthread_create consumer");
2035 ret = -1;
2036 goto error;
2037 }
2038
2039 /* We are about to wait on a pthread condition */
2040 pthread_mutex_lock(&consumer_data->cond_mutex);
2041
2042 /* Get time for sem_timedwait absolute timeout */
2043 clock_ret = clock_gettime(CLOCK_MONOTONIC, &timeout);
2044 /*
2045 * Set the timeout for the condition timed wait even if the clock gettime
2046 * call fails since we might loop on that call and we want to avoid to
2047 * increment the timeout too many times.
2048 */
2049 timeout.tv_sec += DEFAULT_SEM_WAIT_TIMEOUT;
2050
2051 /*
2052 * The following loop COULD be skipped in some conditions so this is why we
2053 * set ret to 0 in order to make sure at least one round of the loop is
2054 * done.
2055 */
2056 ret = 0;
2057
2058 /*
2059 * Loop until the condition is reached or when a timeout is reached. Note
2060 * that the pthread_cond_timedwait(P) man page specifies that EINTR can NOT
2061 * be returned but the pthread_cond(3), from the glibc-doc, says that it is
2062 * possible. This loop does not take any chances and works with both of
2063 * them.
2064 */
2065 while (!consumer_data->consumer_thread_is_ready && ret != ETIMEDOUT) {
2066 if (clock_ret < 0) {
2067 PERROR("clock_gettime spawn consumer");
2068 /* Infinite wait for the consumerd thread to be ready */
2069 ret = pthread_cond_wait(&consumer_data->cond,
2070 &consumer_data->cond_mutex);
2071 } else {
2072 ret = pthread_cond_timedwait(&consumer_data->cond,
2073 &consumer_data->cond_mutex, &timeout);
2074 }
2075 }
2076
2077 /* Release the pthread condition */
2078 pthread_mutex_unlock(&consumer_data->cond_mutex);
2079
2080 if (ret != 0) {
2081 errno = ret;
2082 if (ret == ETIMEDOUT) {
2083 /*
2084 * Call has timed out so we kill the kconsumerd_thread and return
2085 * an error.
2086 */
2087 ERR("Condition timed out. The consumer thread was never ready."
2088 " Killing it");
2089 ret = pthread_cancel(consumer_data->thread);
2090 if (ret < 0) {
2091 PERROR("pthread_cancel consumer thread");
2092 }
2093 } else {
2094 PERROR("pthread_cond_wait failed consumer thread");
2095 }
2096 goto error;
2097 }
2098
2099 pthread_mutex_lock(&consumer_data->pid_mutex);
2100 if (consumer_data->pid == 0) {
2101 ERR("Consumerd did not start");
2102 pthread_mutex_unlock(&consumer_data->pid_mutex);
2103 goto error;
2104 }
2105 pthread_mutex_unlock(&consumer_data->pid_mutex);
2106
2107 return 0;
2108
2109 error:
2110 return ret;
2111 }
2112
2113 /*
2114 * Join consumer thread
2115 */
2116 static int join_consumer_thread(struct consumer_data *consumer_data)
2117 {
2118 void *status;
2119
2120 /* Consumer pid must be a real one. */
2121 if (consumer_data->pid > 0) {
2122 int ret;
2123 ret = kill(consumer_data->pid, SIGTERM);
2124 if (ret) {
2125 ERR("Error killing consumer daemon");
2126 return ret;
2127 }
2128 return pthread_join(consumer_data->thread, &status);
2129 } else {
2130 return 0;
2131 }
2132 }
2133
2134 /*
2135 * Fork and exec a consumer daemon (consumerd).
2136 *
2137 * Return pid if successful else -1.
2138 */
2139 static pid_t spawn_consumerd(struct consumer_data *consumer_data)
2140 {
2141 int ret;
2142 pid_t pid;
2143 const char *consumer_to_use;
2144 const char *verbosity;
2145 struct stat st;
2146
2147 DBG("Spawning consumerd");
2148
2149 pid = fork();
2150 if (pid == 0) {
2151 /*
2152 * Exec consumerd.
2153 */
2154 if (opt_verbose_consumer) {
2155 verbosity = "--verbose";
2156 } else {
2157 verbosity = "--quiet";
2158 }
2159 switch (consumer_data->type) {
2160 case LTTNG_CONSUMER_KERNEL:
2161 /*
2162 * Find out which consumerd to execute. We will first try the
2163 * 64-bit path, then the sessiond's installation directory, and
2164 * fallback on the 32-bit one,
2165 */
2166 DBG3("Looking for a kernel consumer at these locations:");
2167 DBG3(" 1) %s", consumerd64_bin);
2168 DBG3(" 2) %s/%s", INSTALL_BIN_PATH, CONSUMERD_FILE);
2169 DBG3(" 3) %s", consumerd32_bin);
2170 if (stat(consumerd64_bin, &st) == 0) {
2171 DBG3("Found location #1");
2172 consumer_to_use = consumerd64_bin;
2173 } else if (stat(INSTALL_BIN_PATH "/" CONSUMERD_FILE, &st) == 0) {
2174 DBG3("Found location #2");
2175 consumer_to_use = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
2176 } else if (stat(consumerd32_bin, &st) == 0) {
2177 DBG3("Found location #3");
2178 consumer_to_use = consumerd32_bin;
2179 } else {
2180 DBG("Could not find any valid consumerd executable");
2181 break;
2182 }
2183 DBG("Using kernel consumer at: %s", consumer_to_use);
2184 execl(consumer_to_use,
2185 "lttng-consumerd", verbosity, "-k",
2186 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
2187 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
2188 "--group", tracing_group_name,
2189 NULL);
2190 break;
2191 case LTTNG_CONSUMER64_UST:
2192 {
2193 char *tmpnew = NULL;
2194
2195 if (consumerd64_libdir[0] != '\0') {
2196 char *tmp;
2197 size_t tmplen;
2198
2199 tmp = getenv("LD_LIBRARY_PATH");
2200 if (!tmp) {
2201 tmp = "";
2202 }
2203 tmplen = strlen("LD_LIBRARY_PATH=")
2204 + strlen(consumerd64_libdir) + 1 /* : */ + strlen(tmp);
2205 tmpnew = zmalloc(tmplen + 1 /* \0 */);
2206 if (!tmpnew) {
2207 ret = -ENOMEM;
2208 goto error;
2209 }
2210 strcpy(tmpnew, "LD_LIBRARY_PATH=");
2211 strcat(tmpnew, consumerd64_libdir);
2212 if (tmp[0] != '\0') {
2213 strcat(tmpnew, ":");
2214 strcat(tmpnew, tmp);
2215 }
2216 ret = putenv(tmpnew);
2217 if (ret) {
2218 ret = -errno;
2219 free(tmpnew);
2220 goto error;
2221 }
2222 }
2223 DBG("Using 64-bit UST consumer at: %s", consumerd64_bin);
2224 ret = execl(consumerd64_bin, "lttng-consumerd", verbosity, "-u",
2225 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
2226 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
2227 "--group", tracing_group_name,
2228 NULL);
2229 if (consumerd64_libdir[0] != '\0') {
2230 free(tmpnew);
2231 }
2232 if (ret) {
2233 goto error;
2234 }
2235 break;
2236 }
2237 case LTTNG_CONSUMER32_UST:
2238 {
2239 char *tmpnew = NULL;
2240
2241 if (consumerd32_libdir[0] != '\0') {
2242 char *tmp;
2243 size_t tmplen;
2244
2245 tmp = getenv("LD_LIBRARY_PATH");
2246 if (!tmp) {
2247 tmp = "";
2248 }
2249 tmplen = strlen("LD_LIBRARY_PATH=")
2250 + strlen(consumerd32_libdir) + 1 /* : */ + strlen(tmp);
2251 tmpnew = zmalloc(tmplen + 1 /* \0 */);
2252 if (!tmpnew) {
2253 ret = -ENOMEM;
2254 goto error;
2255 }
2256 strcpy(tmpnew, "LD_LIBRARY_PATH=");
2257 strcat(tmpnew, consumerd32_libdir);
2258 if (tmp[0] != '\0') {
2259 strcat(tmpnew, ":");
2260 strcat(tmpnew, tmp);
2261 }
2262 ret = putenv(tmpnew);
2263 if (ret) {
2264 ret = -errno;
2265 free(tmpnew);
2266 goto error;
2267 }
2268 }
2269 DBG("Using 32-bit UST consumer at: %s", consumerd32_bin);
2270 ret = execl(consumerd32_bin, "lttng-consumerd", verbosity, "-u",
2271 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
2272 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
2273 "--group", tracing_group_name,
2274 NULL);
2275 if (consumerd32_libdir[0] != '\0') {
2276 free(tmpnew);
2277 }
2278 if (ret) {
2279 goto error;
2280 }
2281 break;
2282 }
2283 default:
2284 PERROR("unknown consumer type");
2285 exit(EXIT_FAILURE);
2286 }
2287 if (errno != 0) {
2288 PERROR("kernel start consumer exec");
2289 }
2290 exit(EXIT_FAILURE);
2291 } else if (pid > 0) {
2292 ret = pid;
2293 } else {
2294 PERROR("start consumer fork");
2295 ret = -errno;
2296 }
2297 error:
2298 return ret;
2299 }
2300
2301 /*
2302 * Spawn the consumerd daemon and session daemon thread.
2303 */
2304 static int start_consumerd(struct consumer_data *consumer_data)
2305 {
2306 int ret;
2307
2308 /*
2309 * Set the listen() state on the socket since there is a possible race
2310 * between the exec() of the consumer daemon and this call if place in the
2311 * consumer thread. See bug #366 for more details.
2312 */
2313 ret = lttcomm_listen_unix_sock(consumer_data->err_sock);
2314 if (ret < 0) {
2315 goto error;
2316 }
2317
2318 pthread_mutex_lock(&consumer_data->pid_mutex);
2319 if (consumer_data->pid != 0) {
2320 pthread_mutex_unlock(&consumer_data->pid_mutex);
2321 goto end;
2322 }
2323
2324 ret = spawn_consumerd(consumer_data);
2325 if (ret < 0) {
2326 ERR("Spawning consumerd failed");
2327 pthread_mutex_unlock(&consumer_data->pid_mutex);
2328 goto error;
2329 }
2330
2331 /* Setting up the consumer_data pid */
2332 consumer_data->pid = ret;
2333 DBG2("Consumer pid %d", consumer_data->pid);
2334 pthread_mutex_unlock(&consumer_data->pid_mutex);
2335
2336 DBG2("Spawning consumer control thread");
2337 ret = spawn_consumer_thread(consumer_data);
2338 if (ret < 0) {
2339 ERR("Fatal error spawning consumer control thread");
2340 goto error;
2341 }
2342
2343 end:
2344 return 0;
2345
2346 error:
2347 /* Cleanup already created sockets on error. */
2348 if (consumer_data->err_sock >= 0) {
2349 int err;
2350
2351 err = close(consumer_data->err_sock);
2352 if (err < 0) {
2353 PERROR("close consumer data error socket");
2354 }
2355 }
2356 return ret;
2357 }
2358
2359 /*
2360 * Setup necessary data for kernel tracer action.
2361 */
2362 static int init_kernel_tracer(void)
2363 {
2364 int ret;
2365
2366 /* Modprobe lttng kernel modules */
2367 ret = modprobe_lttng_control();
2368 if (ret < 0) {
2369 goto error;
2370 }
2371
2372 /* Open debugfs lttng */
2373 kernel_tracer_fd = open(module_proc_lttng, O_RDWR);
2374 if (kernel_tracer_fd < 0) {
2375 DBG("Failed to open %s", module_proc_lttng);
2376 ret = -1;
2377 goto error_open;
2378 }
2379
2380 /* Validate kernel version */
2381 ret = kernel_validate_version(kernel_tracer_fd);
2382 if (ret < 0) {
2383 goto error_version;
2384 }
2385
2386 ret = modprobe_lttng_data();
2387 if (ret < 0) {
2388 goto error_modules;
2389 }
2390
2391 DBG("Kernel tracer fd %d", kernel_tracer_fd);
2392 return 0;
2393
2394 error_version:
2395 modprobe_remove_lttng_control();
2396 ret = close(kernel_tracer_fd);
2397 if (ret) {
2398 PERROR("close");
2399 }
2400 kernel_tracer_fd = -1;
2401 return LTTNG_ERR_KERN_VERSION;
2402
2403 error_modules:
2404 ret = close(kernel_tracer_fd);
2405 if (ret) {
2406 PERROR("close");
2407 }
2408
2409 error_open:
2410 modprobe_remove_lttng_control();
2411
2412 error:
2413 WARN("No kernel tracer available");
2414 kernel_tracer_fd = -1;
2415 if (!is_root) {
2416 return LTTNG_ERR_NEED_ROOT_SESSIOND;
2417 } else {
2418 return LTTNG_ERR_KERN_NA;
2419 }
2420 }
2421
2422
2423 /*
2424 * Copy consumer output from the tracing session to the domain session. The
2425 * function also applies the right modification on a per domain basis for the
2426 * trace files destination directory.
2427 *
2428 * Should *NOT* be called with RCU read-side lock held.
2429 */
2430 static int copy_session_consumer(int domain, struct ltt_session *session)
2431 {
2432 int ret;
2433 const char *dir_name;
2434 struct consumer_output *consumer;
2435
2436 assert(session);
2437 assert(session->consumer);
2438
2439 switch (domain) {
2440 case LTTNG_DOMAIN_KERNEL:
2441 DBG3("Copying tracing session consumer output in kernel session");
2442 /*
2443 * XXX: We should audit the session creation and what this function
2444 * does "extra" in order to avoid a destroy since this function is used
2445 * in the domain session creation (kernel and ust) only. Same for UST
2446 * domain.
2447 */
2448 if (session->kernel_session->consumer) {
2449 consumer_destroy_output(session->kernel_session->consumer);
2450 }
2451 session->kernel_session->consumer =
2452 consumer_copy_output(session->consumer);
2453 /* Ease our life a bit for the next part */
2454 consumer = session->kernel_session->consumer;
2455 dir_name = DEFAULT_KERNEL_TRACE_DIR;
2456 break;
2457 case LTTNG_DOMAIN_JUL:
2458 case LTTNG_DOMAIN_UST:
2459 DBG3("Copying tracing session consumer output in UST session");
2460 if (session->ust_session->consumer) {
2461 consumer_destroy_output(session->ust_session->consumer);
2462 }
2463 session->ust_session->consumer =
2464 consumer_copy_output(session->consumer);
2465 /* Ease our life a bit for the next part */
2466 consumer = session->ust_session->consumer;
2467 dir_name = DEFAULT_UST_TRACE_DIR;
2468 break;
2469 default:
2470 ret = LTTNG_ERR_UNKNOWN_DOMAIN;
2471 goto error;
2472 }
2473
2474 /* Append correct directory to subdir */
2475 strncat(consumer->subdir, dir_name,
2476 sizeof(consumer->subdir) - strlen(consumer->subdir) - 1);
2477 DBG3("Copy session consumer subdir %s", consumer->subdir);
2478
2479 ret = LTTNG_OK;
2480
2481 error:
2482 return ret;
2483 }
2484
2485 /*
2486 * Create an UST session and add it to the session ust list.
2487 *
2488 * Should *NOT* be called with RCU read-side lock held.
2489 */
2490 static int create_ust_session(struct ltt_session *session,
2491 struct lttng_domain *domain)
2492 {
2493 int ret;
2494 struct ltt_ust_session *lus = NULL;
2495
2496 assert(session);
2497 assert(domain);
2498 assert(session->consumer);
2499
2500 switch (domain->type) {
2501 case LTTNG_DOMAIN_JUL:
2502 case LTTNG_DOMAIN_UST:
2503 break;
2504 default:
2505 ERR("Unknown UST domain on create session %d", domain->type);
2506 ret = LTTNG_ERR_UNKNOWN_DOMAIN;
2507 goto error;
2508 }
2509
2510 DBG("Creating UST session");
2511
2512 lus = trace_ust_create_session(session->id);
2513 if (lus == NULL) {
2514 ret = LTTNG_ERR_UST_SESS_FAIL;
2515 goto error;
2516 }
2517
2518 lus->uid = session->uid;
2519 lus->gid = session->gid;
2520 lus->output_traces = session->output_traces;
2521 lus->snapshot_mode = session->snapshot_mode;
2522 lus->live_timer_interval = session->live_timer;
2523 session->ust_session = lus;
2524
2525 /* Copy session output to the newly created UST session */
2526 ret = copy_session_consumer(domain->type, session);
2527 if (ret != LTTNG_OK) {
2528 goto error;
2529 }
2530
2531 return LTTNG_OK;
2532
2533 error:
2534 free(lus);
2535 session->ust_session = NULL;
2536 return ret;
2537 }
2538
2539 /*
2540 * Create a kernel tracer session then create the default channel.
2541 */
2542 static int create_kernel_session(struct ltt_session *session)
2543 {
2544 int ret;
2545
2546 DBG("Creating kernel session");
2547
2548 ret = kernel_create_session(session, kernel_tracer_fd);
2549 if (ret < 0) {
2550 ret = LTTNG_ERR_KERN_SESS_FAIL;
2551 goto error;
2552 }
2553
2554 /* Code flow safety */
2555 assert(session->kernel_session);
2556
2557 /* Copy session output to the newly created Kernel session */
2558 ret = copy_session_consumer(LTTNG_DOMAIN_KERNEL, session);
2559 if (ret != LTTNG_OK) {
2560 goto error;
2561 }
2562
2563 /* Create directory(ies) on local filesystem. */
2564 if (session->kernel_session->consumer->type == CONSUMER_DST_LOCAL &&
2565 strlen(session->kernel_session->consumer->dst.trace_path) > 0) {
2566 ret = run_as_mkdir_recursive(
2567 session->kernel_session->consumer->dst.trace_path,
2568 S_IRWXU | S_IRWXG, session->uid, session->gid);
2569 if (ret < 0) {
2570 if (ret != -EEXIST) {
2571 ERR("Trace directory creation error");
2572 goto error;
2573 }
2574 }
2575 }
2576
2577 session->kernel_session->uid = session->uid;
2578 session->kernel_session->gid = session->gid;
2579 session->kernel_session->output_traces = session->output_traces;
2580 session->kernel_session->snapshot_mode = session->snapshot_mode;
2581
2582 return LTTNG_OK;
2583
2584 error:
2585 trace_kernel_destroy_session(session->kernel_session);
2586 session->kernel_session = NULL;
2587 return ret;
2588 }
2589
2590 /*
2591 * Count number of session permitted by uid/gid.
2592 */
2593 static unsigned int lttng_sessions_count(uid_t uid, gid_t gid)
2594 {
2595 unsigned int i = 0;
2596 struct ltt_session *session;
2597
2598 DBG("Counting number of available session for UID %d GID %d",
2599 uid, gid);
2600 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
2601 /*
2602 * Only list the sessions the user can control.
2603 */
2604 if (!session_access_ok(session, uid, gid)) {
2605 continue;
2606 }
2607 i++;
2608 }
2609 return i;
2610 }
2611
2612 /*
2613 * Process the command requested by the lttng client within the command
2614 * context structure. This function make sure that the return structure (llm)
2615 * is set and ready for transmission before returning.
2616 *
2617 * Return any error encountered or 0 for success.
2618 *
2619 * "sock" is only used for special-case var. len data.
2620 *
2621 * Should *NOT* be called with RCU read-side lock held.
2622 */
2623 static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
2624 int *sock_error)
2625 {
2626 int ret = LTTNG_OK;
2627 int need_tracing_session = 1;
2628 int need_domain;
2629
2630 DBG("Processing client command %d", cmd_ctx->lsm->cmd_type);
2631
2632 *sock_error = 0;
2633
2634 switch (cmd_ctx->lsm->cmd_type) {
2635 case LTTNG_CREATE_SESSION:
2636 case LTTNG_CREATE_SESSION_SNAPSHOT:
2637 case LTTNG_CREATE_SESSION_LIVE:
2638 case LTTNG_DESTROY_SESSION:
2639 case LTTNG_LIST_SESSIONS:
2640 case LTTNG_LIST_DOMAINS:
2641 case LTTNG_START_TRACE:
2642 case LTTNG_STOP_TRACE:
2643 case LTTNG_DATA_PENDING:
2644 case LTTNG_SNAPSHOT_ADD_OUTPUT:
2645 case LTTNG_SNAPSHOT_DEL_OUTPUT:
2646 case LTTNG_SNAPSHOT_LIST_OUTPUT:
2647 case LTTNG_SNAPSHOT_RECORD:
2648 need_domain = 0;
2649 break;
2650 default:
2651 need_domain = 1;
2652 }
2653
2654 if (opt_no_kernel && need_domain
2655 && cmd_ctx->lsm->domain.type == LTTNG_DOMAIN_KERNEL) {
2656 if (!is_root) {
2657 ret = LTTNG_ERR_NEED_ROOT_SESSIOND;
2658 } else {
2659 ret = LTTNG_ERR_KERN_NA;
2660 }
2661 goto error;
2662 }
2663
2664 /* Deny register consumer if we already have a spawned consumer. */
2665 if (cmd_ctx->lsm->cmd_type == LTTNG_REGISTER_CONSUMER) {
2666 pthread_mutex_lock(&kconsumer_data.pid_mutex);
2667 if (kconsumer_data.pid > 0) {
2668 ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
2669 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2670 goto error;
2671 }
2672 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2673 }
2674
2675 /*
2676 * Check for command that don't needs to allocate a returned payload. We do
2677 * this here so we don't have to make the call for no payload at each
2678 * command.
2679 */
2680 switch(cmd_ctx->lsm->cmd_type) {
2681 case LTTNG_LIST_SESSIONS:
2682 case LTTNG_LIST_TRACEPOINTS:
2683 case LTTNG_LIST_TRACEPOINT_FIELDS:
2684 case LTTNG_LIST_DOMAINS:
2685 case LTTNG_LIST_CHANNELS:
2686 case LTTNG_LIST_EVENTS:
2687 break;
2688 default:
2689 /* Setup lttng message with no payload */
2690 ret = setup_lttng_msg(cmd_ctx, 0);
2691 if (ret < 0) {
2692 /* This label does not try to unlock the session */
2693 goto init_setup_error;
2694 }
2695 }
2696
2697 /* Commands that DO NOT need a session. */
2698 switch (cmd_ctx->lsm->cmd_type) {
2699 case LTTNG_CREATE_SESSION:
2700 case LTTNG_CREATE_SESSION_SNAPSHOT:
2701 case LTTNG_CREATE_SESSION_LIVE:
2702 case LTTNG_CALIBRATE:
2703 case LTTNG_LIST_SESSIONS:
2704 case LTTNG_LIST_TRACEPOINTS:
2705 case LTTNG_LIST_TRACEPOINT_FIELDS:
2706 need_tracing_session = 0;
2707 break;
2708 default:
2709 DBG("Getting session %s by name", cmd_ctx->lsm->session.name);
2710 /*
2711 * We keep the session list lock across _all_ commands
2712 * for now, because the per-session lock does not
2713 * handle teardown properly.
2714 */
2715 session_lock_list();
2716 cmd_ctx->session = session_find_by_name(cmd_ctx->lsm->session.name);
2717 if (cmd_ctx->session == NULL) {
2718 ret = LTTNG_ERR_SESS_NOT_FOUND;
2719 goto error;
2720 } else {
2721 /* Acquire lock for the session */
2722 session_lock(cmd_ctx->session);
2723 }
2724 break;
2725 }
2726
2727 if (!need_domain) {
2728 goto skip_domain;
2729 }
2730
2731 /*
2732 * Check domain type for specific "pre-action".
2733 */
2734 switch (cmd_ctx->lsm->domain.type) {
2735 case LTTNG_DOMAIN_KERNEL:
2736 if (!is_root) {
2737 ret = LTTNG_ERR_NEED_ROOT_SESSIOND;
2738 goto error;
2739 }
2740
2741 /* Kernel tracer check */
2742 if (kernel_tracer_fd == -1) {
2743 /* Basically, load kernel tracer modules */
2744 ret = init_kernel_tracer();
2745 if (ret != 0) {
2746 goto error;
2747 }
2748 }
2749
2750 /* Consumer is in an ERROR state. Report back to client */
2751 if (uatomic_read(&kernel_consumerd_state) == CONSUMER_ERROR) {
2752 ret = LTTNG_ERR_NO_KERNCONSUMERD;
2753 goto error;
2754 }
2755
2756 /* Need a session for kernel command */
2757 if (need_tracing_session) {
2758 if (cmd_ctx->session->kernel_session == NULL) {
2759 ret = create_kernel_session(cmd_ctx->session);
2760 if (ret < 0) {
2761 ret = LTTNG_ERR_KERN_SESS_FAIL;
2762 goto error;
2763 }
2764 }
2765
2766 /* Start the kernel consumer daemon */
2767 pthread_mutex_lock(&kconsumer_data.pid_mutex);
2768 if (kconsumer_data.pid == 0 &&
2769 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
2770 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2771 ret = start_consumerd(&kconsumer_data);
2772 if (ret < 0) {
2773 ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
2774 goto error;
2775 }
2776 uatomic_set(&kernel_consumerd_state, CONSUMER_STARTED);
2777 } else {
2778 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2779 }
2780
2781 /*
2782 * The consumer was just spawned so we need to add the socket to
2783 * the consumer output of the session if exist.
2784 */
2785 ret = consumer_create_socket(&kconsumer_data,
2786 cmd_ctx->session->kernel_session->consumer);
2787 if (ret < 0) {
2788 goto error;
2789 }
2790 }
2791
2792 break;
2793 case LTTNG_DOMAIN_JUL:
2794 case LTTNG_DOMAIN_UST:
2795 {
2796 if (!ust_app_supported()) {
2797 ret = LTTNG_ERR_NO_UST;
2798 goto error;
2799 }
2800 /* Consumer is in an ERROR state. Report back to client */
2801 if (uatomic_read(&ust_consumerd_state) == CONSUMER_ERROR) {
2802 ret = LTTNG_ERR_NO_USTCONSUMERD;
2803 goto error;
2804 }
2805
2806 if (need_tracing_session) {
2807 /* Create UST session if none exist. */
2808 if (cmd_ctx->session->ust_session == NULL) {
2809 ret = create_ust_session(cmd_ctx->session,
2810 &cmd_ctx->lsm->domain);
2811 if (ret != LTTNG_OK) {
2812 goto error;
2813 }
2814 }
2815
2816 /* Start the UST consumer daemons */
2817 /* 64-bit */
2818 pthread_mutex_lock(&ustconsumer64_data.pid_mutex);
2819 if (consumerd64_bin[0] != '\0' &&
2820 ustconsumer64_data.pid == 0 &&
2821 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
2822 pthread_mutex_unlock(&ustconsumer64_data.pid_mutex);
2823 ret = start_consumerd(&ustconsumer64_data);
2824 if (ret < 0) {
2825 ret = LTTNG_ERR_UST_CONSUMER64_FAIL;
2826 uatomic_set(&ust_consumerd64_fd, -EINVAL);
2827 goto error;
2828 }
2829
2830 uatomic_set(&ust_consumerd64_fd, ustconsumer64_data.cmd_sock);
2831 uatomic_set(&ust_consumerd_state, CONSUMER_STARTED);
2832 } else {
2833 pthread_mutex_unlock(&ustconsumer64_data.pid_mutex);
2834 }
2835
2836 /*
2837 * Setup socket for consumer 64 bit. No need for atomic access
2838 * since it was set above and can ONLY be set in this thread.
2839 */
2840 ret = consumer_create_socket(&ustconsumer64_data,
2841 cmd_ctx->session->ust_session->consumer);
2842 if (ret < 0) {
2843 goto error;
2844 }
2845
2846 /* 32-bit */
2847 if (consumerd32_bin[0] != '\0' &&
2848 ustconsumer32_data.pid == 0 &&
2849 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
2850 pthread_mutex_unlock(&ustconsumer32_data.pid_mutex);
2851 ret = start_consumerd(&ustconsumer32_data);
2852 if (ret < 0) {
2853 ret = LTTNG_ERR_UST_CONSUMER32_FAIL;
2854 uatomic_set(&ust_consumerd32_fd, -EINVAL);
2855 goto error;
2856 }
2857
2858 uatomic_set(&ust_consumerd32_fd, ustconsumer32_data.cmd_sock);
2859 uatomic_set(&ust_consumerd_state, CONSUMER_STARTED);
2860 } else {
2861 pthread_mutex_unlock(&ustconsumer32_data.pid_mutex);
2862 }
2863
2864 /*
2865 * Setup socket for consumer 64 bit. No need for atomic access
2866 * since it was set above and can ONLY be set in this thread.
2867 */
2868 ret = consumer_create_socket(&ustconsumer32_data,
2869 cmd_ctx->session->ust_session->consumer);
2870 if (ret < 0) {
2871 goto error;
2872 }
2873 }
2874 break;
2875 }
2876 default:
2877 break;
2878 }
2879 skip_domain:
2880
2881 /* Validate consumer daemon state when start/stop trace command */
2882 if (cmd_ctx->lsm->cmd_type == LTTNG_START_TRACE ||
2883 cmd_ctx->lsm->cmd_type == LTTNG_STOP_TRACE) {
2884 switch (cmd_ctx->lsm->domain.type) {
2885 case LTTNG_DOMAIN_JUL:
2886 case LTTNG_DOMAIN_UST:
2887 if (uatomic_read(&ust_consumerd_state) != CONSUMER_STARTED) {
2888 ret = LTTNG_ERR_NO_USTCONSUMERD;
2889 goto error;
2890 }
2891 break;
2892 case LTTNG_DOMAIN_KERNEL:
2893 if (uatomic_read(&kernel_consumerd_state) != CONSUMER_STARTED) {
2894 ret = LTTNG_ERR_NO_KERNCONSUMERD;
2895 goto error;
2896 }
2897 break;
2898 }
2899 }
2900
2901 /*
2902 * Check that the UID or GID match that of the tracing session.
2903 * The root user can interact with all sessions.
2904 */
2905 if (need_tracing_session) {
2906 if (!session_access_ok(cmd_ctx->session,
2907 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
2908 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds))) {
2909 ret = LTTNG_ERR_EPERM;
2910 goto error;
2911 }
2912 }
2913
2914 /*
2915 * Send relayd information to consumer as soon as we have a domain and a
2916 * session defined.
2917 */
2918 if (cmd_ctx->session && need_domain) {
2919 /*
2920 * Setup relayd if not done yet. If the relayd information was already
2921 * sent to the consumer, this call will gracefully return.
2922 */
2923 ret = cmd_setup_relayd(cmd_ctx->session);
2924 if (ret != LTTNG_OK) {
2925 goto error;
2926 }
2927 }
2928
2929 /* Process by command type */
2930 switch (cmd_ctx->lsm->cmd_type) {
2931 case LTTNG_ADD_CONTEXT:
2932 {
2933 ret = cmd_add_context(cmd_ctx->session, cmd_ctx->lsm->domain.type,
2934 cmd_ctx->lsm->u.context.channel_name,
2935 &cmd_ctx->lsm->u.context.ctx, kernel_poll_pipe[1]);
2936 break;
2937 }
2938 case LTTNG_DISABLE_CHANNEL:
2939 {
2940 ret = cmd_disable_channel(cmd_ctx->session, cmd_ctx->lsm->domain.type,
2941 cmd_ctx->lsm->u.disable.channel_name);
2942 break;
2943 }
2944 case LTTNG_DISABLE_EVENT:
2945 {
2946 ret = cmd_disable_event(cmd_ctx->session, cmd_ctx->lsm->domain.type,
2947 cmd_ctx->lsm->u.disable.channel_name,
2948 cmd_ctx->lsm->u.disable.name);
2949 break;
2950 }
2951 case LTTNG_DISABLE_ALL_EVENT:
2952 {
2953 DBG("Disabling all events");
2954
2955 ret = cmd_disable_event_all(cmd_ctx->session, cmd_ctx->lsm->domain.type,
2956 cmd_ctx->lsm->u.disable.channel_name);
2957 break;
2958 }
2959 case LTTNG_ENABLE_CHANNEL:
2960 {
2961 ret = cmd_enable_channel(cmd_ctx->session, &cmd_ctx->lsm->domain,
2962 &cmd_ctx->lsm->u.channel.chan, kernel_poll_pipe[1]);
2963 break;
2964 }
2965 case LTTNG_ENABLE_EVENT:
2966 {
2967 struct lttng_event_exclusion *exclusion = NULL;
2968 struct lttng_filter_bytecode *bytecode = NULL;
2969
2970 /* Handle exclusion events and receive it from the client. */
2971 if (cmd_ctx->lsm->u.enable.exclusion_count > 0) {
2972 size_t count = cmd_ctx->lsm->u.enable.exclusion_count;
2973
2974 exclusion = zmalloc(sizeof(struct lttng_event_exclusion) +
2975 (count * LTTNG_SYMBOL_NAME_LEN));
2976 if (!exclusion) {
2977 ret = LTTNG_ERR_EXCLUSION_NOMEM;
2978 goto error;
2979 }
2980
2981 DBG("Receiving var len exclusion event list from client ...");
2982 exclusion->count = count;
2983 ret = lttcomm_recv_unix_sock(sock, exclusion->names,
2984 count * LTTNG_SYMBOL_NAME_LEN);
2985 if (ret <= 0) {
2986 DBG("Nothing recv() from client var len data... continuing");
2987 *sock_error = 1;
2988 free(exclusion);
2989 ret = LTTNG_ERR_EXCLUSION_INVAL;
2990 goto error;
2991 }
2992 }
2993
2994 /* Handle filter and get bytecode from client. */
2995 if (cmd_ctx->lsm->u.enable.bytecode_len > 0) {
2996 size_t bytecode_len = cmd_ctx->lsm->u.enable.bytecode_len;
2997
2998 if (bytecode_len > LTTNG_FILTER_MAX_LEN) {
2999 ret = LTTNG_ERR_FILTER_INVAL;
3000 free(exclusion);
3001 goto error;
3002 }
3003
3004 bytecode = zmalloc(bytecode_len);
3005 if (!bytecode) {
3006 free(exclusion);
3007 ret = LTTNG_ERR_FILTER_NOMEM;
3008 goto error;
3009 }
3010
3011 /* Receive var. len. data */
3012 DBG("Receiving var len filter's bytecode from client ...");
3013 ret = lttcomm_recv_unix_sock(sock, bytecode, bytecode_len);
3014 if (ret <= 0) {
3015 DBG("Nothing recv() from client car len data... continuing");
3016 *sock_error = 1;
3017 free(bytecode);
3018 free(exclusion);
3019 ret = LTTNG_ERR_FILTER_INVAL;
3020 goto error;
3021 }
3022
3023 if ((bytecode->len + sizeof(*bytecode)) != bytecode_len) {
3024 free(bytecode);
3025 free(exclusion);
3026 ret = LTTNG_ERR_FILTER_INVAL;
3027 goto error;
3028 }
3029 }
3030
3031 ret = cmd_enable_event(cmd_ctx->session, &cmd_ctx->lsm->domain,
3032 cmd_ctx->lsm->u.enable.channel_name,
3033 &cmd_ctx->lsm->u.enable.event, bytecode, exclusion,
3034 kernel_poll_pipe[1]);
3035 break;
3036 }
3037 case LTTNG_ENABLE_ALL_EVENT:
3038 {
3039 DBG("Enabling all events");
3040
3041 ret = cmd_enable_event_all(cmd_ctx->session, &cmd_ctx->lsm->domain,
3042 cmd_ctx->lsm->u.enable.channel_name,
3043 cmd_ctx->lsm->u.enable.event.type, NULL, kernel_poll_pipe[1]);
3044 break;
3045 }
3046 case LTTNG_LIST_TRACEPOINTS:
3047 {
3048 struct lttng_event *events;
3049 ssize_t nb_events;
3050
3051 nb_events = cmd_list_tracepoints(cmd_ctx->lsm->domain.type, &events);
3052 if (nb_events < 0) {
3053 /* Return value is a negative lttng_error_code. */
3054 ret = -nb_events;
3055 goto error;
3056 }
3057
3058 /*
3059 * Setup lttng message with payload size set to the event list size in
3060 * bytes and then copy list into the llm payload.
3061 */
3062 ret = setup_lttng_msg(cmd_ctx, sizeof(struct lttng_event) * nb_events);
3063 if (ret < 0) {
3064 free(events);
3065 goto setup_error;
3066 }
3067
3068 /* Copy event list into message payload */
3069 memcpy(cmd_ctx->llm->payload, events,
3070 sizeof(struct lttng_event) * nb_events);
3071
3072 free(events);
3073
3074 ret = LTTNG_OK;
3075 break;
3076 }
3077 case LTTNG_LIST_TRACEPOINT_FIELDS:
3078 {
3079 struct lttng_event_field *fields;
3080 ssize_t nb_fields;
3081
3082 nb_fields = cmd_list_tracepoint_fields(cmd_ctx->lsm->domain.type,
3083 &fields);
3084 if (nb_fields < 0) {
3085 /* Return value is a negative lttng_error_code. */
3086 ret = -nb_fields;
3087 goto error;
3088 }
3089
3090 /*
3091 * Setup lttng message with payload size set to the event list size in
3092 * bytes and then copy list into the llm payload.
3093 */
3094 ret = setup_lttng_msg(cmd_ctx,
3095 sizeof(struct lttng_event_field) * nb_fields);
3096 if (ret < 0) {
3097 free(fields);
3098 goto setup_error;
3099 }
3100
3101 /* Copy event list into message payload */
3102 memcpy(cmd_ctx->llm->payload, fields,
3103 sizeof(struct lttng_event_field) * nb_fields);
3104
3105 free(fields);
3106
3107 ret = LTTNG_OK;
3108 break;
3109 }
3110 case LTTNG_SET_CONSUMER_URI:
3111 {
3112 size_t nb_uri, len;
3113 struct lttng_uri *uris;
3114
3115 nb_uri = cmd_ctx->lsm->u.uri.size;
3116 len = nb_uri * sizeof(struct lttng_uri);
3117
3118 if (nb_uri == 0) {
3119 ret = LTTNG_ERR_INVALID;
3120 goto error;
3121 }
3122
3123 uris = zmalloc(len);
3124 if (uris == NULL) {
3125 ret = LTTNG_ERR_FATAL;
3126 goto error;
3127 }
3128
3129 /* Receive variable len data */
3130 DBG("Receiving %zu URI(s) from client ...", nb_uri);
3131 ret = lttcomm_recv_unix_sock(sock, uris, len);
3132 if (ret <= 0) {
3133 DBG("No URIs received from client... continuing");
3134 *sock_error = 1;
3135 ret = LTTNG_ERR_SESSION_FAIL;
3136 free(uris);
3137 goto error;
3138 }
3139
3140 ret = cmd_set_consumer_uri(cmd_ctx->lsm->domain.type, cmd_ctx->session,
3141 nb_uri, uris);
3142 if (ret != LTTNG_OK) {
3143 free(uris);
3144 goto error;
3145 }
3146
3147 /*
3148 * XXX: 0 means that this URI should be applied on the session. Should
3149 * be a DOMAIN enuam.
3150 */
3151 if (cmd_ctx->lsm->domain.type == 0) {
3152 /* Add the URI for the UST session if a consumer is present. */
3153 if (cmd_ctx->session->ust_session &&
3154 cmd_ctx->session->ust_session->consumer) {
3155 ret = cmd_set_consumer_uri(LTTNG_DOMAIN_UST, cmd_ctx->session,
3156 nb_uri, uris);
3157 } else if (cmd_ctx->session->kernel_session &&
3158 cmd_ctx->session->kernel_session->consumer) {
3159 ret = cmd_set_consumer_uri(LTTNG_DOMAIN_KERNEL,
3160 cmd_ctx->session, nb_uri, uris);
3161 }
3162 }
3163
3164 free(uris);
3165
3166 break;
3167 }
3168 case LTTNG_START_TRACE:
3169 {
3170 ret = cmd_start_trace(cmd_ctx->session);
3171 break;
3172 }
3173 case LTTNG_STOP_TRACE:
3174 {
3175 ret = cmd_stop_trace(cmd_ctx->session);
3176 break;
3177 }
3178 case LTTNG_CREATE_SESSION:
3179 {
3180 size_t nb_uri, len;
3181 struct lttng_uri *uris = NULL;
3182
3183 nb_uri = cmd_ctx->lsm->u.uri.size;
3184 len = nb_uri * sizeof(struct lttng_uri);
3185
3186 if (nb_uri > 0) {
3187 uris = zmalloc(len);
3188 if (uris == NULL) {
3189 ret = LTTNG_ERR_FATAL;
3190 goto error;
3191 }
3192
3193 /* Receive variable len data */
3194 DBG("Waiting for %zu URIs from client ...", nb_uri);
3195 ret = lttcomm_recv_unix_sock(sock, uris, len);
3196 if (ret <= 0) {
3197 DBG("No URIs received from client... continuing");
3198 *sock_error = 1;
3199 ret = LTTNG_ERR_SESSION_FAIL;
3200 free(uris);
3201 goto error;
3202 }
3203
3204 if (nb_uri == 1 && uris[0].dtype != LTTNG_DST_PATH) {
3205 DBG("Creating session with ONE network URI is a bad call");
3206 ret = LTTNG_ERR_SESSION_FAIL;
3207 free(uris);
3208 goto error;
3209 }
3210 }
3211
3212 ret = cmd_create_session_uri(cmd_ctx->lsm->session.name, uris, nb_uri,
3213 &cmd_ctx->creds, 0);
3214
3215 free(uris);
3216
3217 break;
3218 }
3219 case LTTNG_DESTROY_SESSION:
3220 {
3221 ret = cmd_destroy_session(cmd_ctx->session, kernel_poll_pipe[1]);
3222
3223 /* Set session to NULL so we do not unlock it after free. */
3224 cmd_ctx->session = NULL;
3225 break;
3226 }
3227 case LTTNG_LIST_DOMAINS:
3228 {
3229 ssize_t nb_dom;
3230 struct lttng_domain *domains;
3231
3232 nb_dom = cmd_list_domains(cmd_ctx->session, &domains);
3233 if (nb_dom < 0) {
3234 /* Return value is a negative lttng_error_code. */
3235 ret = -nb_dom;
3236 goto error;
3237 }
3238
3239 ret = setup_lttng_msg(cmd_ctx, nb_dom * sizeof(struct lttng_domain));
3240 if (ret < 0) {
3241 free(domains);
3242 goto setup_error;
3243 }
3244
3245 /* Copy event list into message payload */
3246 memcpy(cmd_ctx->llm->payload, domains,
3247 nb_dom * sizeof(struct lttng_domain));
3248
3249 free(domains);
3250
3251 ret = LTTNG_OK;
3252 break;
3253 }
3254 case LTTNG_LIST_CHANNELS:
3255 {
3256 int nb_chan;
3257 struct lttng_channel *channels;
3258
3259 nb_chan = cmd_list_channels(cmd_ctx->lsm->domain.type,
3260 cmd_ctx->session, &channels);
3261 if (nb_chan < 0) {
3262 /* Return value is a negative lttng_error_code. */
3263 ret = -nb_chan;
3264 goto error;
3265 }
3266
3267 ret = setup_lttng_msg(cmd_ctx, nb_chan * sizeof(struct lttng_channel));
3268 if (ret < 0) {
3269 free(channels);
3270 goto setup_error;
3271 }
3272
3273 /* Copy event list into message payload */
3274 memcpy(cmd_ctx->llm->payload, channels,
3275 nb_chan * sizeof(struct lttng_channel));
3276
3277 free(channels);
3278
3279 ret = LTTNG_OK;
3280 break;
3281 }
3282 case LTTNG_LIST_EVENTS:
3283 {
3284 ssize_t nb_event;
3285 struct lttng_event *events = NULL;
3286
3287 nb_event = cmd_list_events(cmd_ctx->lsm->domain.type, cmd_ctx->session,
3288 cmd_ctx->lsm->u.list.channel_name, &events);
3289 if (nb_event < 0) {
3290 /* Return value is a negative lttng_error_code. */
3291 ret = -nb_event;
3292 goto error;
3293 }
3294
3295 ret = setup_lttng_msg(cmd_ctx, nb_event * sizeof(struct lttng_event));
3296 if (ret < 0) {
3297 free(events);
3298 goto setup_error;
3299 }
3300
3301 /* Copy event list into message payload */
3302 memcpy(cmd_ctx->llm->payload, events,
3303 nb_event * sizeof(struct lttng_event));
3304
3305 free(events);
3306
3307 ret = LTTNG_OK;
3308 break;
3309 }
3310 case LTTNG_LIST_SESSIONS:
3311 {
3312 unsigned int nr_sessions;
3313
3314 session_lock_list();
3315 nr_sessions = lttng_sessions_count(
3316 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
3317 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds));
3318
3319 ret = setup_lttng_msg(cmd_ctx, sizeof(struct lttng_session) * nr_sessions);
3320 if (ret < 0) {
3321 session_unlock_list();
3322 goto setup_error;
3323 }
3324
3325 /* Filled the session array */
3326 cmd_list_lttng_sessions((struct lttng_session *)(cmd_ctx->llm->payload),
3327 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
3328 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds));
3329
3330 session_unlock_list();
3331
3332 ret = LTTNG_OK;
3333 break;
3334 }
3335 case LTTNG_CALIBRATE:
3336 {
3337 ret = cmd_calibrate(cmd_ctx->lsm->domain.type,
3338 &cmd_ctx->lsm->u.calibrate);
3339 break;
3340 }
3341 case LTTNG_REGISTER_CONSUMER:
3342 {
3343 struct consumer_data *cdata;
3344
3345 switch (cmd_ctx->lsm->domain.type) {
3346 case LTTNG_DOMAIN_KERNEL:
3347 cdata = &kconsumer_data;
3348 break;
3349 default:
3350 ret = LTTNG_ERR_UND;
3351 goto error;
3352 }
3353
3354 ret = cmd_register_consumer(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3355 cmd_ctx->lsm->u.reg.path, cdata);
3356 break;
3357 }
3358 case LTTNG_DATA_PENDING:
3359 {
3360 ret = cmd_data_pending(cmd_ctx->session);
3361 break;
3362 }
3363 case LTTNG_SNAPSHOT_ADD_OUTPUT:
3364 {
3365 struct lttcomm_lttng_output_id reply;
3366
3367 ret = cmd_snapshot_add_output(cmd_ctx->session,
3368 &cmd_ctx->lsm->u.snapshot_output.output, &reply.id);
3369 if (ret != LTTNG_OK) {
3370 goto error;
3371 }
3372
3373 ret = setup_lttng_msg(cmd_ctx, sizeof(reply));
3374 if (ret < 0) {
3375 goto setup_error;
3376 }
3377
3378 /* Copy output list into message payload */
3379 memcpy(cmd_ctx->llm->payload, &reply, sizeof(reply));
3380 ret = LTTNG_OK;
3381 break;
3382 }
3383 case LTTNG_SNAPSHOT_DEL_OUTPUT:
3384 {
3385 ret = cmd_snapshot_del_output(cmd_ctx->session,
3386 &cmd_ctx->lsm->u.snapshot_output.output);
3387 break;
3388 }
3389 case LTTNG_SNAPSHOT_LIST_OUTPUT:
3390 {
3391 ssize_t nb_output;
3392 struct lttng_snapshot_output *outputs = NULL;
3393
3394 nb_output = cmd_snapshot_list_outputs(cmd_ctx->session, &outputs);
3395 if (nb_output < 0) {
3396 ret = -nb_output;
3397 goto error;
3398 }
3399
3400 ret = setup_lttng_msg(cmd_ctx,
3401 nb_output * sizeof(struct lttng_snapshot_output));
3402 if (ret < 0) {
3403 free(outputs);
3404 goto setup_error;
3405 }
3406
3407 if (outputs) {
3408 /* Copy output list into message payload */
3409 memcpy(cmd_ctx->llm->payload, outputs,
3410 nb_output * sizeof(struct lttng_snapshot_output));
3411 free(outputs);
3412 }
3413
3414 ret = LTTNG_OK;
3415 break;
3416 }
3417 case LTTNG_SNAPSHOT_RECORD:
3418 {
3419 ret = cmd_snapshot_record(cmd_ctx->session,
3420 &cmd_ctx->lsm->u.snapshot_record.output,
3421 cmd_ctx->lsm->u.snapshot_record.wait);
3422 break;
3423 }
3424 case LTTNG_CREATE_SESSION_SNAPSHOT:
3425 {
3426 size_t nb_uri, len;
3427 struct lttng_uri *uris = NULL;
3428
3429 nb_uri = cmd_ctx->lsm->u.uri.size;
3430 len = nb_uri * sizeof(struct lttng_uri);
3431
3432 if (nb_uri > 0) {
3433 uris = zmalloc(len);
3434 if (uris == NULL) {
3435 ret = LTTNG_ERR_FATAL;
3436 goto error;
3437 }
3438
3439 /* Receive variable len data */
3440 DBG("Waiting for %zu URIs from client ...", nb_uri);
3441 ret = lttcomm_recv_unix_sock(sock, uris, len);
3442 if (ret <= 0) {
3443 DBG("No URIs received from client... continuing");
3444 *sock_error = 1;
3445 ret = LTTNG_ERR_SESSION_FAIL;
3446 free(uris);
3447 goto error;
3448 }
3449
3450 if (nb_uri == 1 && uris[0].dtype != LTTNG_DST_PATH) {
3451 DBG("Creating session with ONE network URI is a bad call");
3452 ret = LTTNG_ERR_SESSION_FAIL;
3453 free(uris);
3454 goto error;
3455 }
3456 }
3457
3458 ret = cmd_create_session_snapshot(cmd_ctx->lsm->session.name, uris,
3459 nb_uri, &cmd_ctx->creds);
3460 free(uris);
3461 break;
3462 }
3463 case LTTNG_CREATE_SESSION_LIVE:
3464 {
3465 size_t nb_uri, len;
3466 struct lttng_uri *uris = NULL;
3467
3468 nb_uri = cmd_ctx->lsm->u.uri.size;
3469 len = nb_uri * sizeof(struct lttng_uri);
3470
3471 if (nb_uri > 0) {
3472 uris = zmalloc(len);
3473 if (uris == NULL) {
3474 ret = LTTNG_ERR_FATAL;
3475 goto error;
3476 }
3477
3478 /* Receive variable len data */
3479 DBG("Waiting for %zu URIs from client ...", nb_uri);
3480 ret = lttcomm_recv_unix_sock(sock, uris, len);
3481 if (ret <= 0) {
3482 DBG("No URIs received from client... continuing");
3483 *sock_error = 1;
3484 ret = LTTNG_ERR_SESSION_FAIL;
3485 free(uris);
3486 goto error;
3487 }
3488
3489 if (nb_uri == 1 && uris[0].dtype != LTTNG_DST_PATH) {
3490 DBG("Creating session with ONE network URI is a bad call");
3491 ret = LTTNG_ERR_SESSION_FAIL;
3492 free(uris);
3493 goto error;
3494 }
3495 }
3496
3497 ret = cmd_create_session_uri(cmd_ctx->lsm->session.name, uris,
3498 nb_uri, &cmd_ctx->creds, cmd_ctx->lsm->u.session_live.timer_interval);
3499 free(uris);
3500 break;
3501 }
3502 default:
3503 ret = LTTNG_ERR_UND;
3504 break;
3505 }
3506
3507 error:
3508 if (cmd_ctx->llm == NULL) {
3509 DBG("Missing llm structure. Allocating one.");
3510 if (setup_lttng_msg(cmd_ctx, 0) < 0) {
3511 goto setup_error;
3512 }
3513 }
3514 /* Set return code */
3515 cmd_ctx->llm->ret_code = ret;
3516 setup_error:
3517 if (cmd_ctx->session) {
3518 session_unlock(cmd_ctx->session);
3519 }
3520 if (need_tracing_session) {
3521 session_unlock_list();
3522 }
3523 init_setup_error:
3524 return ret;
3525 }
3526
3527 /*
3528 * Thread managing health check socket.
3529 */
3530 static void *thread_manage_health(void *data)
3531 {
3532 int sock = -1, new_sock = -1, ret, i, pollfd, err = -1;
3533 uint32_t revents, nb_fd;
3534 struct lttng_poll_event events;
3535 struct health_comm_msg msg;
3536 struct health_comm_reply reply;
3537
3538 DBG("[thread] Manage health check started");
3539
3540 rcu_register_thread();
3541
3542 /* We might hit an error path before this is created. */
3543 lttng_poll_init(&events);
3544
3545 /* Create unix socket */
3546 sock = lttcomm_create_unix_sock(health_unix_sock_path);
3547 if (sock < 0) {
3548 ERR("Unable to create health check Unix socket");
3549 ret = -1;
3550 goto error;
3551 }
3552
3553 if (is_root) {
3554 /* lttng health client socket path permissions */
3555 ret = chown(health_unix_sock_path, 0,
3556 utils_get_group_id(tracing_group_name));
3557 if (ret < 0) {
3558 ERR("Unable to set group on %s", health_unix_sock_path);
3559 PERROR("chown");
3560 ret = -1;
3561 goto error;
3562 }
3563
3564 ret = chmod(health_unix_sock_path,
3565 S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
3566 if (ret < 0) {
3567 ERR("Unable to set permissions on %s", health_unix_sock_path);
3568 PERROR("chmod");
3569 ret = -1;
3570 goto error;
3571 }
3572 }
3573
3574 /*
3575 * Set the CLOEXEC flag. Return code is useless because either way, the
3576 * show must go on.
3577 */
3578 (void) utils_set_fd_cloexec(sock);
3579
3580 ret = lttcomm_listen_unix_sock(sock);
3581 if (ret < 0) {
3582 goto error;
3583 }
3584
3585 /*
3586 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
3587 * more will be added to this poll set.
3588 */
3589 ret = sessiond_set_thread_pollset(&events, 2);
3590 if (ret < 0) {
3591 goto error;
3592 }
3593
3594 /* Add the application registration socket */
3595 ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLPRI);
3596 if (ret < 0) {
3597 goto error;
3598 }
3599
3600 lttng_sessiond_notify_ready();
3601
3602 while (1) {
3603 DBG("Health check ready");
3604
3605 /* Inifinite blocking call, waiting for transmission */
3606 restart:
3607 ret = lttng_poll_wait(&events, -1);
3608 if (ret < 0) {
3609 /*
3610 * Restart interrupted system call.
3611 */
3612 if (errno == EINTR) {
3613 goto restart;
3614 }
3615 goto error;
3616 }
3617
3618 nb_fd = ret;
3619
3620 for (i = 0; i < nb_fd; i++) {
3621 /* Fetch once the poll data */
3622 revents = LTTNG_POLL_GETEV(&events, i);
3623 pollfd = LTTNG_POLL_GETFD(&events, i);
3624
3625 /* Thread quit pipe has been closed. Killing thread. */
3626 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
3627 if (ret) {
3628 err = 0;
3629 goto exit;
3630 }
3631
3632 /* Event on the registration socket */
3633 if (pollfd == sock) {
3634 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3635 ERR("Health socket poll error");
3636 goto error;
3637 }
3638 }
3639 }
3640
3641 new_sock = lttcomm_accept_unix_sock(sock);
3642 if (new_sock < 0) {
3643 goto error;
3644 }
3645
3646 /*
3647 * Set the CLOEXEC flag. Return code is useless because either way, the
3648 * show must go on.
3649 */
3650 (void) utils_set_fd_cloexec(new_sock);
3651
3652 DBG("Receiving data from client for health...");
3653 ret = lttcomm_recv_unix_sock(new_sock, (void *)&msg, sizeof(msg));
3654 if (ret <= 0) {
3655 DBG("Nothing recv() from client... continuing");
3656 ret = close(new_sock);
3657 if (ret) {
3658 PERROR("close");
3659 }
3660 new_sock = -1;
3661 continue;
3662 }
3663
3664 rcu_thread_online();
3665
3666 reply.ret_code = 0;
3667 for (i = 0; i < NR_HEALTH_SESSIOND_TYPES; i++) {
3668 /*
3669 * health_check_state returns 0 if health is
3670 * bad.
3671 */
3672 if (!health_check_state(health_sessiond, i)) {
3673 reply.ret_code |= 1ULL << i;
3674 }
3675 }
3676
3677 DBG2("Health check return value %" PRIx64, reply.ret_code);
3678
3679 ret = send_unix_sock(new_sock, (void *) &reply, sizeof(reply));
3680 if (ret < 0) {
3681 ERR("Failed to send health data back to client");
3682 }
3683
3684 /* End of transmission */
3685 ret = close(new_sock);
3686 if (ret) {
3687 PERROR("close");
3688 }
3689 new_sock = -1;
3690 }
3691
3692 exit:
3693 error:
3694 if (err) {
3695 ERR("Health error occurred in %s", __func__);
3696 }
3697 DBG("Health check thread dying");
3698 unlink(health_unix_sock_path);
3699 if (sock >= 0) {
3700 ret = close(sock);
3701 if (ret) {
3702 PERROR("close");
3703 }
3704 }
3705
3706 lttng_poll_clean(&events);
3707
3708 rcu_unregister_thread();
3709 return NULL;
3710 }
3711
3712 /*
3713 * This thread manage all clients request using the unix client socket for
3714 * communication.
3715 */
3716 static void *thread_manage_clients(void *data)
3717 {
3718 int sock = -1, ret, i, pollfd, err = -1;
3719 int sock_error;
3720 uint32_t revents, nb_fd;
3721 struct command_ctx *cmd_ctx = NULL;
3722 struct lttng_poll_event events;
3723
3724 DBG("[thread] Manage client started");
3725
3726 rcu_register_thread();
3727
3728 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_CMD);
3729
3730 health_code_update();
3731
3732 ret = lttcomm_listen_unix_sock(client_sock);
3733 if (ret < 0) {
3734 goto error_listen;
3735 }
3736
3737 /*
3738 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
3739 * more will be added to this poll set.
3740 */
3741 ret = sessiond_set_thread_pollset(&events, 2);
3742 if (ret < 0) {
3743 goto error_create_poll;
3744 }
3745
3746 /* Add the application registration socket */
3747 ret = lttng_poll_add(&events, client_sock, LPOLLIN | LPOLLPRI);
3748 if (ret < 0) {
3749 goto error;
3750 }
3751
3752 lttng_sessiond_notify_ready();
3753
3754 /* This testpoint is after we signal readiness to the parent. */
3755 if (testpoint(sessiond_thread_manage_clients)) {
3756 goto error;
3757 }
3758
3759 if (testpoint(sessiond_thread_manage_clients_before_loop)) {
3760 goto error;
3761 }
3762
3763 health_code_update();
3764
3765 while (1) {
3766 DBG("Accepting client command ...");
3767
3768 /* Inifinite blocking call, waiting for transmission */
3769 restart:
3770 health_poll_entry();
3771 ret = lttng_poll_wait(&events, -1);
3772 health_poll_exit();
3773 if (ret < 0) {
3774 /*
3775 * Restart interrupted system call.
3776 */
3777 if (errno == EINTR) {
3778 goto restart;
3779 }
3780 goto error;
3781 }
3782
3783 nb_fd = ret;
3784
3785 for (i = 0; i < nb_fd; i++) {
3786 /* Fetch once the poll data */
3787 revents = LTTNG_POLL_GETEV(&events, i);
3788 pollfd = LTTNG_POLL_GETFD(&events, i);
3789
3790 health_code_update();
3791
3792 /* Thread quit pipe has been closed. Killing thread. */
3793 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
3794 if (ret) {
3795 err = 0;
3796 goto exit;
3797 }
3798
3799 /* Event on the registration socket */
3800 if (pollfd == client_sock) {
3801 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3802 ERR("Client socket poll error");
3803 goto error;
3804 }
3805 }
3806 }
3807
3808 DBG("Wait for client response");
3809
3810 health_code_update();
3811
3812 sock = lttcomm_accept_unix_sock(client_sock);
3813 if (sock < 0) {
3814 goto error;
3815 }
3816
3817 /*
3818 * Set the CLOEXEC flag. Return code is useless because either way, the
3819 * show must go on.
3820 */
3821 (void) utils_set_fd_cloexec(sock);
3822
3823 /* Set socket option for credentials retrieval */
3824 ret = lttcomm_setsockopt_creds_unix_sock(sock);
3825 if (ret < 0) {
3826 goto error;
3827 }
3828
3829 /* Allocate context command to process the client request */
3830 cmd_ctx = zmalloc(sizeof(struct command_ctx));
3831 if (cmd_ctx == NULL) {
3832 PERROR("zmalloc cmd_ctx");
3833 goto error;
3834 }
3835
3836 /* Allocate data buffer for reception */
3837 cmd_ctx->lsm = zmalloc(sizeof(struct lttcomm_session_msg));
3838 if (cmd_ctx->lsm == NULL) {
3839 PERROR("zmalloc cmd_ctx->lsm");
3840 goto error;
3841 }
3842
3843 cmd_ctx->llm = NULL;
3844 cmd_ctx->session = NULL;
3845
3846 health_code_update();
3847
3848 /*
3849 * Data is received from the lttng client. The struct
3850 * lttcomm_session_msg (lsm) contains the command and data request of
3851 * the client.
3852 */
3853 DBG("Receiving data from client ...");
3854 ret = lttcomm_recv_creds_unix_sock(sock, cmd_ctx->lsm,
3855 sizeof(struct lttcomm_session_msg), &cmd_ctx->creds);
3856 if (ret <= 0) {
3857 DBG("Nothing recv() from client... continuing");
3858 ret = close(sock);
3859 if (ret) {
3860 PERROR("close");
3861 }
3862 sock = -1;
3863 clean_command_ctx(&cmd_ctx);
3864 continue;
3865 }
3866
3867 health_code_update();
3868
3869 // TODO: Validate cmd_ctx including sanity check for
3870 // security purpose.
3871
3872 rcu_thread_online();
3873 /*
3874 * This function dispatch the work to the kernel or userspace tracer
3875 * libs and fill the lttcomm_lttng_msg data structure of all the needed
3876 * informations for the client. The command context struct contains
3877 * everything this function may needs.
3878 */
3879 ret = process_client_msg(cmd_ctx, sock, &sock_error);
3880 rcu_thread_offline();
3881 if (ret < 0) {
3882 ret = close(sock);
3883 if (ret) {
3884 PERROR("close");
3885 }
3886 sock = -1;
3887 /*
3888 * TODO: Inform client somehow of the fatal error. At
3889 * this point, ret < 0 means that a zmalloc failed
3890 * (ENOMEM). Error detected but still accept
3891 * command, unless a socket error has been
3892 * detected.
3893 */
3894 clean_command_ctx(&cmd_ctx);
3895 continue;
3896 }
3897
3898 health_code_update();
3899
3900 DBG("Sending response (size: %d, retcode: %s)",
3901 cmd_ctx->lttng_msg_size,
3902 lttng_strerror(-cmd_ctx->llm->ret_code));
3903 ret = send_unix_sock(sock, cmd_ctx->llm, cmd_ctx->lttng_msg_size);
3904 if (ret < 0) {
3905 ERR("Failed to send data back to client");
3906 }
3907
3908 /* End of transmission */
3909 ret = close(sock);
3910 if (ret) {
3911 PERROR("close");
3912 }
3913 sock = -1;
3914
3915 clean_command_ctx(&cmd_ctx);
3916
3917 health_code_update();
3918 }
3919
3920 exit:
3921 error:
3922 if (sock >= 0) {
3923 ret = close(sock);
3924 if (ret) {
3925 PERROR("close");
3926 }
3927 }
3928
3929 lttng_poll_clean(&events);
3930 clean_command_ctx(&cmd_ctx);
3931
3932 error_listen:
3933 error_create_poll:
3934 unlink(client_unix_sock_path);
3935 if (client_sock >= 0) {
3936 ret = close(client_sock);
3937 if (ret) {
3938 PERROR("close");
3939 }
3940 }
3941
3942 if (err) {
3943 health_error();
3944 ERR("Health error occurred in %s", __func__);
3945 }
3946
3947 health_unregister(health_sessiond);
3948
3949 DBG("Client thread dying");
3950
3951 rcu_unregister_thread();
3952 return NULL;
3953 }
3954
3955
3956 /*
3957 * usage function on stderr
3958 */
3959 static void usage(void)
3960 {
3961 fprintf(stderr, "Usage: %s OPTIONS\n\nOptions:\n", progname);
3962 fprintf(stderr, " -h, --help Display this usage.\n");
3963 fprintf(stderr, " -c, --client-sock PATH Specify path for the client unix socket\n");
3964 fprintf(stderr, " -a, --apps-sock PATH Specify path for apps unix socket\n");
3965 fprintf(stderr, " --kconsumerd-err-sock PATH Specify path for the kernel consumer error socket\n");
3966 fprintf(stderr, " --kconsumerd-cmd-sock PATH Specify path for the kernel consumer command socket\n");
3967 fprintf(stderr, " --ustconsumerd32-err-sock PATH Specify path for the 32-bit UST consumer error socket\n");
3968 fprintf(stderr, " --ustconsumerd64-err-sock PATH Specify path for the 64-bit UST consumer error socket\n");
3969 fprintf(stderr, " --ustconsumerd32-cmd-sock PATH Specify path for the 32-bit UST consumer command socket\n");
3970 fprintf(stderr, " --ustconsumerd64-cmd-sock PATH Specify path for the 64-bit UST consumer command socket\n");
3971 fprintf(stderr, " --consumerd32-path PATH Specify path for the 32-bit UST consumer daemon binary\n");
3972 fprintf(stderr, " --consumerd32-libdir PATH Specify path for the 32-bit UST consumer daemon libraries\n");
3973 fprintf(stderr, " --consumerd64-path PATH Specify path for the 64-bit UST consumer daemon binary\n");
3974 fprintf(stderr, " --consumerd64-libdir PATH Specify path for the 64-bit UST consumer daemon libraries\n");
3975 fprintf(stderr, " -d, --daemonize Start as a daemon.\n");
3976 fprintf(stderr, " -g, --group NAME Specify the tracing group name. (default: tracing)\n");
3977 fprintf(stderr, " -V, --version Show version number.\n");
3978 fprintf(stderr, " -S, --sig-parent Send SIGUSR1 to parent pid to notify readiness.\n");
3979 fprintf(stderr, " -q, --quiet No output at all.\n");
3980 fprintf(stderr, " -v, --verbose Verbose mode. Activate DBG() macro.\n");
3981 fprintf(stderr, " -p, --pidfile FILE Write a pid to FILE name overriding the default value.\n");
3982 fprintf(stderr, " --verbose-consumer Verbose mode for consumer. Activate DBG() macro.\n");
3983 fprintf(stderr, " --no-kernel Disable kernel tracer\n");
3984 fprintf(stderr, " --jul-tcp-port JUL application registration TCP port\n");
3985 }
3986
3987 /*
3988 * daemon argument parsing
3989 */
3990 static int parse_args(int argc, char **argv)
3991 {
3992 int c;
3993
3994 static struct option long_options[] = {
3995 { "client-sock", 1, 0, 'c' },
3996 { "apps-sock", 1, 0, 'a' },
3997 { "kconsumerd-cmd-sock", 1, 0, 'C' },
3998 { "kconsumerd-err-sock", 1, 0, 'E' },
3999 { "ustconsumerd32-cmd-sock", 1, 0, 'G' },
4000 { "ustconsumerd32-err-sock", 1, 0, 'H' },
4001 { "ustconsumerd64-cmd-sock", 1, 0, 'D' },
4002 { "ustconsumerd64-err-sock", 1, 0, 'F' },
4003 { "consumerd32-path", 1, 0, 'u' },
4004 { "consumerd32-libdir", 1, 0, 'U' },
4005 { "consumerd64-path", 1, 0, 't' },
4006 { "consumerd64-libdir", 1, 0, 'T' },
4007 { "daemonize", 0, 0, 'd' },
4008 { "sig-parent", 0, 0, 'S' },
4009 { "help", 0, 0, 'h' },
4010 { "group", 1, 0, 'g' },
4011 { "version", 0, 0, 'V' },
4012 { "quiet", 0, 0, 'q' },
4013 { "verbose", 0, 0, 'v' },
4014 { "verbose-consumer", 0, 0, 'Z' },
4015 { "no-kernel", 0, 0, 'N' },
4016 { "pidfile", 1, 0, 'p' },
4017 { "jul-tcp-port", 1, 0, 'J' },
4018 { NULL, 0, 0, 0 }
4019 };
4020
4021 while (1) {
4022 int option_index = 0;
4023 c = getopt_long(argc, argv, "dhqvVSN" "a:c:g:s:C:E:D:F:Z:u:t:p:J:",
4024 long_options, &option_index);
4025 if (c == -1) {
4026 break;
4027 }
4028
4029 switch (c) {
4030 case 0:
4031 fprintf(stderr, "option %s", long_options[option_index].name);
4032 if (optarg) {
4033 fprintf(stderr, " with arg %s\n", optarg);
4034 }
4035 break;
4036 case 'c':
4037 snprintf(client_unix_sock_path, PATH_MAX, "%s", optarg);
4038 break;
4039 case 'a':
4040 snprintf(apps_unix_sock_path, PATH_MAX, "%s", optarg);
4041 break;
4042 case 'd':
4043 opt_daemon = 1;
4044 break;
4045 case 'g':
4046 tracing_group_name = optarg;
4047 break;
4048 case 'h':
4049 usage();
4050 exit(EXIT_FAILURE);
4051 case 'V':
4052 fprintf(stdout, "%s\n", VERSION);
4053 exit(EXIT_SUCCESS);
4054 case 'S':
4055 opt_sig_parent = 1;
4056 break;
4057 case 'E':
4058 snprintf(kconsumer_data.err_unix_sock_path, PATH_MAX, "%s", optarg);
4059 break;
4060 case 'C':
4061 snprintf(kconsumer_data.cmd_unix_sock_path, PATH_MAX, "%s", optarg);
4062 break;
4063 case 'F':
4064 snprintf(ustconsumer64_data.err_unix_sock_path, PATH_MAX, "%s", optarg);
4065 break;
4066 case 'D':
4067 snprintf(ustconsumer64_data.cmd_unix_sock_path, PATH_MAX, "%s", optarg);
4068 break;
4069 case 'H':
4070 snprintf(ustconsumer32_data.err_unix_sock_path, PATH_MAX, "%s", optarg);
4071 break;
4072 case 'G':
4073 snprintf(ustconsumer32_data.cmd_unix_sock_path, PATH_MAX, "%s", optarg);
4074 break;
4075 case 'N':
4076 opt_no_kernel = 1;
4077 break;
4078 case 'q':
4079 lttng_opt_quiet = 1;
4080 break;
4081 case 'v':
4082 /* Verbose level can increase using multiple -v */
4083 lttng_opt_verbose += 1;
4084 break;
4085 case 'Z':
4086 opt_verbose_consumer += 1;
4087 break;
4088 case 'u':
4089 consumerd32_bin= optarg;
4090 break;
4091 case 'U':
4092 consumerd32_libdir = optarg;
4093 break;
4094 case 't':
4095 consumerd64_bin = optarg;
4096 break;
4097 case 'T':
4098 consumerd64_libdir = optarg;
4099 break;
4100 case 'p':
4101 opt_pidfile = optarg;
4102 break;
4103 case 'J': /* JUL TCP port. */
4104 {
4105 unsigned long v;
4106
4107 errno = 0;
4108 v = strtoul(optarg, NULL, 0);
4109 if (errno != 0 || !isdigit(optarg[0])) {
4110 ERR("Wrong value in --jul-tcp-port parameter: %s", optarg);
4111 return -1;
4112 }
4113 if (v == 0 || v >= 65535) {
4114 ERR("Port overflow in --jul-tcp-port parameter: %s", optarg);
4115 return -1;
4116 }
4117 jul_tcp_port = (uint32_t) v;
4118 DBG3("JUL TCP port set to non default: %u", jul_tcp_port);
4119 break;
4120 }
4121 default:
4122 /* Unknown option or other error.
4123 * Error is printed by getopt, just return */
4124 return -1;
4125 }
4126 }
4127
4128 return 0;
4129 }
4130
4131 /*
4132 * Creates the two needed socket by the daemon.
4133 * apps_sock - The communication socket for all UST apps.
4134 * client_sock - The communication of the cli tool (lttng).
4135 */
4136 static int init_daemon_socket(void)
4137 {
4138 int ret = 0;
4139 mode_t old_umask;
4140
4141 old_umask = umask(0);
4142
4143 /* Create client tool unix socket */
4144 client_sock = lttcomm_create_unix_sock(client_unix_sock_path);
4145 if (client_sock < 0) {
4146 ERR("Create unix sock failed: %s", client_unix_sock_path);
4147 ret = -1;
4148 goto end;
4149 }
4150
4151 /* Set the cloexec flag */
4152 ret = utils_set_fd_cloexec(client_sock);
4153 if (ret < 0) {
4154 ERR("Unable to set CLOEXEC flag to the client Unix socket (fd: %d). "
4155 "Continuing but note that the consumer daemon will have a "
4156 "reference to this socket on exec()", client_sock);
4157 }
4158
4159 /* File permission MUST be 660 */
4160 ret = chmod(client_unix_sock_path, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
4161 if (ret < 0) {
4162 ERR("Set file permissions failed: %s", client_unix_sock_path);
4163 PERROR("chmod");
4164 goto end;
4165 }
4166
4167 /* Create the application unix socket */
4168 apps_sock = lttcomm_create_unix_sock(apps_unix_sock_path);
4169 if (apps_sock < 0) {
4170 ERR("Create unix sock failed: %s", apps_unix_sock_path);
4171 ret = -1;
4172 goto end;
4173 }
4174
4175 /* Set the cloexec flag */
4176 ret = utils_set_fd_cloexec(apps_sock);
4177 if (ret < 0) {
4178 ERR("Unable to set CLOEXEC flag to the app Unix socket (fd: %d). "
4179 "Continuing but note that the consumer daemon will have a "
4180 "reference to this socket on exec()", apps_sock);
4181 }
4182
4183 /* File permission MUST be 666 */
4184 ret = chmod(apps_unix_sock_path,
4185 S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH);
4186 if (ret < 0) {
4187 ERR("Set file permissions failed: %s", apps_unix_sock_path);
4188 PERROR("chmod");
4189 goto end;
4190 }
4191
4192 DBG3("Session daemon client socket %d and application socket %d created",
4193 client_sock, apps_sock);
4194
4195 end:
4196 umask(old_umask);
4197 return ret;
4198 }
4199
4200 /*
4201 * Check if the global socket is available, and if a daemon is answering at the
4202 * other side. If yes, error is returned.
4203 */
4204 static int check_existing_daemon(void)
4205 {
4206 /* Is there anybody out there ? */
4207 if (lttng_session_daemon_alive()) {
4208 return -EEXIST;
4209 }
4210
4211 return 0;
4212 }
4213
4214 /*
4215 * Set the tracing group gid onto the client socket.
4216 *
4217 * Race window between mkdir and chown is OK because we are going from more
4218 * permissive (root.root) to less permissive (root.tracing).
4219 */
4220 static int set_permissions(char *rundir)
4221 {
4222 int ret;
4223 gid_t gid;
4224
4225 gid = utils_get_group_id(tracing_group_name);
4226
4227 /* Set lttng run dir */
4228 ret = chown(rundir, 0, gid);
4229 if (ret < 0) {
4230 ERR("Unable to set group on %s", rundir);
4231 PERROR("chown");
4232 }
4233
4234 /*
4235 * Ensure all applications and tracing group can search the run
4236 * dir. Allow everyone to read the directory, since it does not
4237 * buy us anything to hide its content.
4238 */
4239 ret = chmod(rundir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
4240 if (ret < 0) {
4241 ERR("Unable to set permissions on %s", rundir);
4242 PERROR("chmod");
4243 }
4244
4245 /* lttng client socket path */
4246 ret = chown(client_unix_sock_path, 0, gid);
4247 if (ret < 0) {
4248 ERR("Unable to set group on %s", client_unix_sock_path);
4249 PERROR("chown");
4250 }
4251
4252 /* kconsumer error socket path */
4253 ret = chown(kconsumer_data.err_unix_sock_path, 0, 0);
4254 if (ret < 0) {
4255 ERR("Unable to set group on %s", kconsumer_data.err_unix_sock_path);
4256 PERROR("chown");
4257 }
4258
4259 /* 64-bit ustconsumer error socket path */
4260 ret = chown(ustconsumer64_data.err_unix_sock_path, 0, 0);
4261 if (ret < 0) {
4262 ERR("Unable to set group on %s", ustconsumer64_data.err_unix_sock_path);
4263 PERROR("chown");
4264 }
4265
4266 /* 32-bit ustconsumer compat32 error socket path */
4267 ret = chown(ustconsumer32_data.err_unix_sock_path, 0, 0);
4268 if (ret < 0) {
4269 ERR("Unable to set group on %s", ustconsumer32_data.err_unix_sock_path);
4270 PERROR("chown");
4271 }
4272
4273 DBG("All permissions are set");
4274
4275 return ret;
4276 }
4277
4278 /*
4279 * Create the lttng run directory needed for all global sockets and pipe.
4280 */
4281 static int create_lttng_rundir(const char *rundir)
4282 {
4283 int ret;
4284
4285 DBG3("Creating LTTng run directory: %s", rundir);
4286
4287 ret = mkdir(rundir, S_IRWXU);
4288 if (ret < 0) {
4289 if (errno != EEXIST) {
4290 ERR("Unable to create %s", rundir);
4291 goto error;
4292 } else {
4293 ret = 0;
4294 }
4295 }
4296
4297 error:
4298 return ret;
4299 }
4300
4301 /*
4302 * Setup sockets and directory needed by the kconsumerd communication with the
4303 * session daemon.
4304 */
4305 static int set_consumer_sockets(struct consumer_data *consumer_data,
4306 const char *rundir)
4307 {
4308 int ret;
4309 char path[PATH_MAX];
4310
4311 switch (consumer_data->type) {
4312 case LTTNG_CONSUMER_KERNEL:
4313 snprintf(path, PATH_MAX, DEFAULT_KCONSUMERD_PATH, rundir);
4314 break;
4315 case LTTNG_CONSUMER64_UST:
4316 snprintf(path, PATH_MAX, DEFAULT_USTCONSUMERD64_PATH, rundir);
4317 break;
4318 case LTTNG_CONSUMER32_UST:
4319 snprintf(path, PATH_MAX, DEFAULT_USTCONSUMERD32_PATH, rundir);
4320 break;
4321 default:
4322 ERR("Consumer type unknown");
4323 ret = -EINVAL;
4324 goto error;
4325 }
4326
4327 DBG2("Creating consumer directory: %s", path);
4328
4329 ret = mkdir(path, S_IRWXU | S_IRGRP | S_IXGRP);
4330 if (ret < 0) {
4331 if (errno != EEXIST) {
4332 PERROR("mkdir");
4333 ERR("Failed to create %s", path);
4334 goto error;
4335 }
4336 ret = -1;
4337 }
4338 if (is_root) {
4339 ret = chown(path, 0, utils_get_group_id(tracing_group_name));
4340 if (ret < 0) {
4341 ERR("Unable to set group on %s", path);
4342 PERROR("chown");
4343 goto error;
4344 }
4345 }
4346
4347 /* Create the kconsumerd error unix socket */
4348 consumer_data->err_sock =
4349 lttcomm_create_unix_sock(consumer_data->err_unix_sock_path);
4350 if (consumer_data->err_sock < 0) {
4351 ERR("Create unix sock failed: %s", consumer_data->err_unix_sock_path);
4352 ret = -1;
4353 goto error;
4354 }
4355
4356 /*
4357 * Set the CLOEXEC flag. Return code is useless because either way, the
4358 * show must go on.
4359 */
4360 ret = utils_set_fd_cloexec(consumer_data->err_sock);
4361 if (ret < 0) {
4362 PERROR("utils_set_fd_cloexec");
4363 /* continue anyway */
4364 }
4365
4366 /* File permission MUST be 660 */
4367 ret = chmod(consumer_data->err_unix_sock_path,
4368 S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
4369 if (ret < 0) {
4370 ERR("Set file permissions failed: %s", consumer_data->err_unix_sock_path);
4371 PERROR("chmod");
4372 goto error;
4373 }
4374
4375 error:
4376 return ret;
4377 }
4378
4379 /*
4380 * Signal handler for the daemon
4381 *
4382 * Simply stop all worker threads, leaving main() return gracefully after
4383 * joining all threads and calling cleanup().
4384 */
4385 static void sighandler(int sig)
4386 {
4387 switch (sig) {
4388 case SIGPIPE:
4389 DBG("SIGPIPE caught");
4390 return;
4391 case SIGINT:
4392 DBG("SIGINT caught");
4393 stop_threads();
4394 break;
4395 case SIGTERM:
4396 DBG("SIGTERM caught");
4397 stop_threads();
4398 break;
4399 case SIGUSR1:
4400 CMM_STORE_SHARED(recv_child_signal, 1);
4401 break;
4402 default:
4403 break;
4404 }
4405 }
4406
4407 /*
4408 * Setup signal handler for :
4409 * SIGINT, SIGTERM, SIGPIPE
4410 */
4411 static int set_signal_handler(void)
4412 {
4413 int ret = 0;
4414 struct sigaction sa;
4415 sigset_t sigset;
4416
4417 if ((ret = sigemptyset(&sigset)) < 0) {
4418 PERROR("sigemptyset");
4419 return ret;
4420 }
4421
4422 sa.sa_handler = sighandler;
4423 sa.sa_mask = sigset;
4424 sa.sa_flags = 0;
4425 if ((ret = sigaction(SIGTERM, &sa, NULL)) < 0) {
4426 PERROR("sigaction");
4427 return ret;
4428 }
4429
4430 if ((ret = sigaction(SIGINT, &sa, NULL)) < 0) {
4431 PERROR("sigaction");
4432 return ret;
4433 }
4434
4435 if ((ret = sigaction(SIGPIPE, &sa, NULL)) < 0) {
4436 PERROR("sigaction");
4437 return ret;
4438 }
4439
4440 if ((ret = sigaction(SIGUSR1, &sa, NULL)) < 0) {
4441 PERROR("sigaction");
4442 return ret;
4443 }
4444
4445 DBG("Signal handler set for SIGTERM, SIGUSR1, SIGPIPE and SIGINT");
4446
4447 return ret;
4448 }
4449
4450 /*
4451 * Set open files limit to unlimited. This daemon can open a large number of
4452 * file descriptors in order to consumer multiple kernel traces.
4453 */
4454 static void set_ulimit(void)
4455 {
4456 int ret;
4457 struct rlimit lim;
4458
4459 /* The kernel does not allowed an infinite limit for open files */
4460 lim.rlim_cur = 65535;
4461 lim.rlim_max = 65535;
4462
4463 ret = setrlimit(RLIMIT_NOFILE, &lim);
4464 if (ret < 0) {
4465 PERROR("failed to set open files limit");
4466 }
4467 }
4468
4469 /*
4470 * Write pidfile using the rundir and opt_pidfile.
4471 */
4472 static void write_pidfile(void)
4473 {
4474 int ret;
4475 char pidfile_path[PATH_MAX];
4476
4477 assert(rundir);
4478
4479 if (opt_pidfile) {
4480 strncpy(pidfile_path, opt_pidfile, sizeof(pidfile_path));
4481 } else {
4482 /* Build pidfile path from rundir and opt_pidfile. */
4483 ret = snprintf(pidfile_path, sizeof(pidfile_path), "%s/"
4484 DEFAULT_LTTNG_SESSIOND_PIDFILE, rundir);
4485 if (ret < 0) {
4486 PERROR("snprintf pidfile path");
4487 goto error;
4488 }
4489 }
4490
4491 /*
4492 * Create pid file in rundir. Return value is of no importance. The
4493 * execution will continue even though we are not able to write the file.
4494 */
4495 (void) utils_create_pid_file(getpid(), pidfile_path);
4496
4497 error:
4498 return;
4499 }
4500
4501 /*
4502 * Write JUL TCP port using the rundir.
4503 */
4504 static void write_julport(void)
4505 {
4506 int ret;
4507 char path[PATH_MAX];
4508
4509 assert(rundir);
4510
4511 ret = snprintf(path, sizeof(path), "%s/"
4512 DEFAULT_LTTNG_SESSIOND_JULPORT_FILE, rundir);
4513 if (ret < 0) {
4514 PERROR("snprintf julport path");
4515 goto error;
4516 }
4517
4518 /*
4519 * Create TCP JUL port file in rundir. Return value is of no importance.
4520 * The execution will continue even though we are not able to write the
4521 * file.
4522 */
4523 (void) utils_create_pid_file(jul_tcp_port, path);
4524
4525 error:
4526 return;
4527 }
4528
4529 /*
4530 * Daemonize this process by forking and making the parent wait for the child
4531 * to signal it indicating readiness. Once received, the parent successfully
4532 * quits.
4533 *
4534 * The child process undergoes the same action that daemon(3) does meaning
4535 * setsid, chdir, and dup /dev/null into 0, 1 and 2.
4536 *
4537 * Return 0 on success else -1 on error.
4538 */
4539 static int daemonize(void)
4540 {
4541 int ret;
4542 pid_t pid;
4543
4544 /* Get parent pid of this process. */
4545 child_ppid = getppid();
4546
4547 pid = fork();
4548 if (pid < 0) {
4549 PERROR("fork");
4550 goto error;
4551 } else if (pid == 0) {
4552 int fd;
4553 pid_t sid;
4554
4555 /* Child */
4556
4557 /*
4558 * Get the newly created parent pid so we can signal that process when
4559 * we are ready to operate.
4560 */
4561 child_ppid = getppid();
4562
4563 sid = setsid();
4564 if (sid < 0) {
4565 PERROR("setsid");
4566 goto error;
4567 }
4568
4569 /* Try to change directory to /. If we can't well at least notify. */
4570 ret = chdir("/");
4571 if (ret < 0) {
4572 PERROR("chdir");
4573 }
4574
4575 fd = open(_PATH_DEVNULL, O_RDWR, 0);
4576 if (fd < 0) {
4577 PERROR("open %s", _PATH_DEVNULL);
4578 /* Let 0, 1 and 2 open since we can't bind them to /dev/null. */
4579 } else {
4580 (void) dup2(fd, STDIN_FILENO);
4581 (void) dup2(fd, STDOUT_FILENO);
4582 (void) dup2(fd, STDERR_FILENO);
4583 if (fd > 2) {
4584 ret = close(fd);
4585 if (ret < 0) {
4586 PERROR("close");
4587 }
4588 }
4589 }
4590 goto end;
4591 } else {
4592 /* Parent */
4593
4594 /*
4595 * Waiting for child to notify this parent that it can exit. Note that
4596 * sleep() is interrupted before the 1 second delay as soon as the
4597 * signal is received, so it will not cause visible delay for the
4598 * user.
4599 */
4600 while (!CMM_LOAD_SHARED(recv_child_signal)) {
4601 int status;
4602 pid_t ret;
4603
4604 /*
4605 * Check if child exists without blocking. If so, we have to stop
4606 * this parent process and return an error.
4607 */
4608 ret = waitpid(pid, &status, WNOHANG);
4609 if (ret < 0 || (ret != 0 && WIFEXITED(status))) {
4610 /* The child exited somehow or was not valid. */
4611 goto error;
4612 }
4613 sleep(1);
4614 }
4615
4616 /*
4617 * From this point on, the parent can exit and the child is now an
4618 * operationnal session daemon ready to serve clients and applications.
4619 */
4620 exit(EXIT_SUCCESS);
4621 }
4622
4623 end:
4624 return 0;
4625
4626 error:
4627 return -1;
4628 }
4629
4630 /*
4631 * main
4632 */
4633 int main(int argc, char **argv)
4634 {
4635 int ret = 0;
4636 void *status;
4637 const char *home_path, *env_app_timeout;
4638
4639 init_kernel_workarounds();
4640
4641 rcu_register_thread();
4642
4643 if ((ret = set_signal_handler()) < 0) {
4644 goto error;
4645 }
4646
4647 setup_consumerd_path();
4648
4649 page_size = sysconf(_SC_PAGESIZE);
4650 if (page_size < 0) {
4651 PERROR("sysconf _SC_PAGESIZE");
4652 page_size = LONG_MAX;
4653 WARN("Fallback page size to %ld", page_size);
4654 }
4655
4656 /* Parse arguments */
4657 progname = argv[0];
4658 if ((ret = parse_args(argc, argv)) < 0) {
4659 goto error;
4660 }
4661
4662 /* Daemonize */
4663 if (opt_daemon) {
4664 int i;
4665
4666 ret = daemonize();
4667 if (ret < 0) {
4668 goto error;
4669 }
4670
4671 /*
4672 * We are in the child. Make sure all other file descriptors are
4673 * closed, in case we are called with more opened file descriptors than
4674 * the standard ones.
4675 */
4676 for (i = 3; i < sysconf(_SC_OPEN_MAX); i++) {
4677 (void) close(i);
4678 }
4679 }
4680
4681 /* Create thread quit pipe */
4682 if ((ret = init_thread_quit_pipe()) < 0) {
4683 goto error;
4684 }
4685
4686 /* Check if daemon is UID = 0 */
4687 is_root = !getuid();
4688
4689 if (is_root) {
4690 rundir = strdup(DEFAULT_LTTNG_RUNDIR);
4691
4692 /* Create global run dir with root access */
4693 ret = create_lttng_rundir(rundir);
4694 if (ret < 0) {
4695 goto error;
4696 }
4697
4698 if (strlen(apps_unix_sock_path) == 0) {
4699 snprintf(apps_unix_sock_path, PATH_MAX,
4700 DEFAULT_GLOBAL_APPS_UNIX_SOCK);
4701 }
4702
4703 if (strlen(client_unix_sock_path) == 0) {
4704 snprintf(client_unix_sock_path, PATH_MAX,
4705 DEFAULT_GLOBAL_CLIENT_UNIX_SOCK);
4706 }
4707
4708 /* Set global SHM for ust */
4709 if (strlen(wait_shm_path) == 0) {
4710 snprintf(wait_shm_path, PATH_MAX,
4711 DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH);
4712 }
4713
4714 if (strlen(health_unix_sock_path) == 0) {
4715 snprintf(health_unix_sock_path, sizeof(health_unix_sock_path),
4716 DEFAULT_GLOBAL_HEALTH_UNIX_SOCK);
4717 }
4718
4719 /* Setup kernel consumerd path */
4720 snprintf(kconsumer_data.err_unix_sock_path, PATH_MAX,
4721 DEFAULT_KCONSUMERD_ERR_SOCK_PATH, rundir);
4722 snprintf(kconsumer_data.cmd_unix_sock_path, PATH_MAX,
4723 DEFAULT_KCONSUMERD_CMD_SOCK_PATH, rundir);
4724
4725 DBG2("Kernel consumer err path: %s",
4726 kconsumer_data.err_unix_sock_path);
4727 DBG2("Kernel consumer cmd path: %s",
4728 kconsumer_data.cmd_unix_sock_path);
4729 } else {
4730 home_path = utils_get_home_dir();
4731 if (home_path == NULL) {
4732 /* TODO: Add --socket PATH option */
4733 ERR("Can't get HOME directory for sockets creation.");
4734 ret = -EPERM;
4735 goto error;
4736 }
4737
4738 /*
4739 * Create rundir from home path. This will create something like
4740 * $HOME/.lttng
4741 */
4742 ret = asprintf(&rundir, DEFAULT_LTTNG_HOME_RUNDIR, home_path);
4743 if (ret < 0) {
4744 ret = -ENOMEM;
4745 goto error;
4746 }
4747
4748 ret = create_lttng_rundir(rundir);
4749 if (ret < 0) {
4750 goto error;
4751 }
4752
4753 if (strlen(apps_unix_sock_path) == 0) {
4754 snprintf(apps_unix_sock_path, PATH_MAX,
4755 DEFAULT_HOME_APPS_UNIX_SOCK, home_path);
4756 }
4757
4758 /* Set the cli tool unix socket path */
4759 if (strlen(client_unix_sock_path) == 0) {
4760 snprintf(client_unix_sock_path, PATH_MAX,
4761 DEFAULT_HOME_CLIENT_UNIX_SOCK, home_path);
4762 }
4763
4764 /* Set global SHM for ust */
4765 if (strlen(wait_shm_path) == 0) {
4766 snprintf(wait_shm_path, PATH_MAX,
4767 DEFAULT_HOME_APPS_WAIT_SHM_PATH, getuid());
4768 }
4769
4770 /* Set health check Unix path */
4771 if (strlen(health_unix_sock_path) == 0) {
4772 snprintf(health_unix_sock_path, sizeof(health_unix_sock_path),
4773 DEFAULT_HOME_HEALTH_UNIX_SOCK, home_path);
4774 }
4775 }
4776
4777 /* Set consumer initial state */
4778 kernel_consumerd_state = CONSUMER_STOPPED;
4779 ust_consumerd_state = CONSUMER_STOPPED;
4780
4781 DBG("Client socket path %s", client_unix_sock_path);
4782 DBG("Application socket path %s", apps_unix_sock_path);
4783 DBG("Application wait path %s", wait_shm_path);
4784 DBG("LTTng run directory path: %s", rundir);
4785
4786 /* 32 bits consumerd path setup */
4787 snprintf(ustconsumer32_data.err_unix_sock_path, PATH_MAX,
4788 DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH, rundir);
4789 snprintf(ustconsumer32_data.cmd_unix_sock_path, PATH_MAX,
4790 DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH, rundir);
4791
4792 DBG2("UST consumer 32 bits err path: %s",
4793 ustconsumer32_data.err_unix_sock_path);
4794 DBG2("UST consumer 32 bits cmd path: %s",
4795 ustconsumer32_data.cmd_unix_sock_path);
4796
4797 /* 64 bits consumerd path setup */
4798 snprintf(ustconsumer64_data.err_unix_sock_path, PATH_MAX,
4799 DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH, rundir);
4800 snprintf(ustconsumer64_data.cmd_unix_sock_path, PATH_MAX,
4801 DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH, rundir);
4802
4803 DBG2("UST consumer 64 bits err path: %s",
4804 ustconsumer64_data.err_unix_sock_path);
4805 DBG2("UST consumer 64 bits cmd path: %s",
4806 ustconsumer64_data.cmd_unix_sock_path);
4807
4808 /*
4809 * See if daemon already exist.
4810 */
4811 if ((ret = check_existing_daemon()) < 0) {
4812 ERR("Already running daemon.\n");
4813 /*
4814 * We do not goto exit because we must not cleanup()
4815 * because a daemon is already running.
4816 */
4817 goto error;
4818 }
4819
4820 /*
4821 * Init UST app hash table. Alloc hash table before this point since
4822 * cleanup() can get called after that point.
4823 */
4824 ust_app_ht_alloc();
4825
4826 /* Initialize JUL domain subsystem. */
4827 if ((ret = jul_init()) < 0) {
4828 /* ENOMEM at this point. */
4829 goto error;
4830 }
4831
4832 /* After this point, we can safely call cleanup() with "goto exit" */
4833
4834 /*
4835 * These actions must be executed as root. We do that *after* setting up
4836 * the sockets path because we MUST make the check for another daemon using
4837 * those paths *before* trying to set the kernel consumer sockets and init
4838 * kernel tracer.
4839 */
4840 if (is_root) {
4841 ret = set_consumer_sockets(&kconsumer_data, rundir);
4842 if (ret < 0) {
4843 goto exit;
4844 }
4845
4846 /* Setup kernel tracer */
4847 if (!opt_no_kernel) {
4848 init_kernel_tracer();
4849 }
4850
4851 /* Set ulimit for open files */
4852 set_ulimit();
4853 }
4854 /* init lttng_fd tracking must be done after set_ulimit. */
4855 lttng_fd_init();
4856
4857 ret = set_consumer_sockets(&ustconsumer64_data, rundir);
4858 if (ret < 0) {
4859 goto exit;
4860 }
4861
4862 ret = set_consumer_sockets(&ustconsumer32_data, rundir);
4863 if (ret < 0) {
4864 goto exit;
4865 }
4866
4867 /* Setup the needed unix socket */
4868 if ((ret = init_daemon_socket()) < 0) {
4869 goto exit;
4870 }
4871
4872 /* Set credentials to socket */
4873 if (is_root && ((ret = set_permissions(rundir)) < 0)) {
4874 goto exit;
4875 }
4876
4877 /* Get parent pid if -S, --sig-parent is specified. */
4878 if (opt_sig_parent) {
4879 ppid = getppid();
4880 }
4881
4882 /* Setup the kernel pipe for waking up the kernel thread */
4883 if (is_root && !opt_no_kernel) {
4884 if ((ret = utils_create_pipe_cloexec(kernel_poll_pipe)) < 0) {
4885 goto exit;
4886 }
4887 }
4888
4889 /* Setup the thread ht_cleanup communication pipe. */
4890 if (utils_create_pipe_cloexec(ht_cleanup_pipe) < 0) {
4891 goto exit;
4892 }
4893
4894 /* Setup the thread apps communication pipe. */
4895 if ((ret = utils_create_pipe_cloexec(apps_cmd_pipe)) < 0) {
4896 goto exit;
4897 }
4898
4899 /* Setup the thread apps notify communication pipe. */
4900 if (utils_create_pipe_cloexec(apps_cmd_notify_pipe) < 0) {
4901 goto exit;
4902 }
4903
4904 /* Initialize global buffer per UID and PID registry. */
4905 buffer_reg_init_uid_registry();
4906 buffer_reg_init_pid_registry();
4907
4908 /* Init UST command queue. */
4909 cds_wfq_init(&ust_cmd_queue.queue);
4910
4911 /*
4912 * Get session list pointer. This pointer MUST NOT be free(). This list is
4913 * statically declared in session.c
4914 */
4915 session_list_ptr = session_get_list();
4916
4917 /* Set up max poll set size */
4918 lttng_poll_set_max_size();
4919
4920 cmd_init();
4921
4922 /* Check for the application socket timeout env variable. */
4923 env_app_timeout = getenv(DEFAULT_APP_SOCKET_TIMEOUT_ENV);
4924 if (env_app_timeout) {
4925 app_socket_timeout = atoi(env_app_timeout);
4926 } else {
4927 app_socket_timeout = DEFAULT_APP_SOCKET_RW_TIMEOUT;
4928 }
4929
4930 write_pidfile();
4931 write_julport();
4932
4933 /* Initialize communication library */
4934 lttcomm_init();
4935 /* This is to get the TCP timeout value. */
4936 lttcomm_inet_init();
4937
4938 /*
4939 * Initialize the health check subsystem. This call should set the
4940 * appropriate time values.
4941 */
4942 health_sessiond = health_app_create(NR_HEALTH_SESSIOND_TYPES);
4943 if (!health_sessiond) {
4944 PERROR("health_app_create error");
4945 goto exit_health_sessiond_cleanup;
4946 }
4947
4948 /* Create thread to clean up RCU hash tables */
4949 ret = pthread_create(&ht_cleanup_thread, NULL,
4950 thread_ht_cleanup, (void *) NULL);
4951 if (ret != 0) {
4952 PERROR("pthread_create ht_cleanup");
4953 goto exit_ht_cleanup;
4954 }
4955
4956 /* Create health-check thread */
4957 ret = pthread_create(&health_thread, NULL,
4958 thread_manage_health, (void *) NULL);
4959 if (ret != 0) {
4960 PERROR("pthread_create health");
4961 goto exit_health;
4962 }
4963
4964 /* Create thread to manage the client socket */
4965 ret = pthread_create(&client_thread, NULL,
4966 thread_manage_clients, (void *) NULL);
4967 if (ret != 0) {
4968 PERROR("pthread_create clients");
4969 goto exit_client;
4970 }
4971
4972 /* Create thread to dispatch registration */
4973 ret = pthread_create(&dispatch_thread, NULL,
4974 thread_dispatch_ust_registration, (void *) NULL);
4975 if (ret != 0) {
4976 PERROR("pthread_create dispatch");
4977 goto exit_dispatch;
4978 }
4979
4980 /* Create thread to manage application registration. */
4981 ret = pthread_create(&reg_apps_thread, NULL,
4982 thread_registration_apps, (void *) NULL);
4983 if (ret != 0) {
4984 PERROR("pthread_create registration");
4985 goto exit_reg_apps;
4986 }
4987
4988 /* Create thread to manage application socket */
4989 ret = pthread_create(&apps_thread, NULL,
4990 thread_manage_apps, (void *) NULL);
4991 if (ret != 0) {
4992 PERROR("pthread_create apps");
4993 goto exit_apps;
4994 }
4995
4996 /* Create thread to manage application notify socket */
4997 ret = pthread_create(&apps_notify_thread, NULL,
4998 ust_thread_manage_notify, (void *) NULL);
4999 if (ret != 0) {
5000 PERROR("pthread_create apps");
5001 goto exit_apps_notify;
5002 }
5003
5004 /* Create JUL registration thread. */
5005 ret = pthread_create(&jul_reg_thread, NULL,
5006 jul_thread_manage_registration, (void *) NULL);
5007 if (ret != 0) {
5008 PERROR("pthread_create apps");
5009 goto exit_jul_reg;
5010 }
5011
5012 /* Don't start this thread if kernel tracing is not requested nor root */
5013 if (is_root && !opt_no_kernel) {
5014 /* Create kernel thread to manage kernel event */
5015 ret = pthread_create(&kernel_thread, NULL,
5016 thread_manage_kernel, (void *) NULL);
5017 if (ret != 0) {
5018 PERROR("pthread_create kernel");
5019 goto exit_kernel;
5020 }
5021
5022 ret = pthread_join(kernel_thread, &status);
5023 if (ret != 0) {
5024 PERROR("pthread_join");
5025 goto error; /* join error, exit without cleanup */
5026 }
5027 }
5028
5029 exit_kernel:
5030 ret = pthread_join(jul_reg_thread, &status);
5031 if (ret != 0) {
5032 PERROR("pthread_join JUL");
5033 goto error; /* join error, exit without cleanup */
5034 }
5035
5036 exit_jul_reg:
5037 ret = pthread_join(apps_notify_thread, &status);
5038 if (ret != 0) {
5039 PERROR("pthread_join apps notify");
5040 goto error; /* join error, exit without cleanup */
5041 }
5042
5043 exit_apps_notify:
5044 ret = pthread_join(apps_thread, &status);
5045 if (ret != 0) {
5046 PERROR("pthread_join apps");
5047 goto error; /* join error, exit without cleanup */
5048 }
5049
5050
5051 exit_apps:
5052 ret = pthread_join(reg_apps_thread, &status);
5053 if (ret != 0) {
5054 PERROR("pthread_join");
5055 goto error; /* join error, exit without cleanup */
5056 }
5057
5058 exit_reg_apps:
5059 ret = pthread_join(dispatch_thread, &status);
5060 if (ret != 0) {
5061 PERROR("pthread_join");
5062 goto error; /* join error, exit without cleanup */
5063 }
5064
5065 exit_dispatch:
5066 ret = pthread_join(client_thread, &status);
5067 if (ret != 0) {
5068 PERROR("pthread_join");
5069 goto error; /* join error, exit without cleanup */
5070 }
5071
5072 ret = join_consumer_thread(&kconsumer_data);
5073 if (ret != 0) {
5074 PERROR("join_consumer");
5075 goto error; /* join error, exit without cleanup */
5076 }
5077
5078 ret = join_consumer_thread(&ustconsumer32_data);
5079 if (ret != 0) {
5080 PERROR("join_consumer ust32");
5081 goto error; /* join error, exit without cleanup */
5082 }
5083
5084 ret = join_consumer_thread(&ustconsumer64_data);
5085 if (ret != 0) {
5086 PERROR("join_consumer ust64");
5087 goto error; /* join error, exit without cleanup */
5088 }
5089
5090 exit_client:
5091 ret = pthread_join(health_thread, &status);
5092 if (ret != 0) {
5093 PERROR("pthread_join health thread");
5094 goto error; /* join error, exit without cleanup */
5095 }
5096
5097 exit_health:
5098 ret = pthread_join(ht_cleanup_thread, &status);
5099 if (ret != 0) {
5100 PERROR("pthread_join ht cleanup thread");
5101 goto error; /* join error, exit without cleanup */
5102 }
5103 exit_ht_cleanup:
5104 health_app_destroy(health_sessiond);
5105 exit_health_sessiond_cleanup:
5106 exit:
5107 /*
5108 * cleanup() is called when no other thread is running.
5109 */
5110 rcu_thread_online();
5111 cleanup();
5112 rcu_thread_offline();
5113 rcu_unregister_thread();
5114 if (!ret) {
5115 exit(EXIT_SUCCESS);
5116 }
5117 error:
5118 exit(EXIT_FAILURE);
5119 }
This page took 0.143147 seconds and 5 git commands to generate.