a1fbe5128f4983fab1e32bb8b9cf4d7576a3a570
[lttng-tools.git] / src / bin / lttng-sessiond / main.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * 2013 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20 #define _GNU_SOURCE
21 #define _LGPL_SOURCE
22 #include <getopt.h>
23 #include <grp.h>
24 #include <limits.h>
25 #include <paths.h>
26 #include <pthread.h>
27 #include <signal.h>
28 #include <stdio.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <inttypes.h>
32 #include <sys/mman.h>
33 #include <sys/mount.h>
34 #include <sys/resource.h>
35 #include <sys/socket.h>
36 #include <sys/stat.h>
37 #include <sys/types.h>
38 #include <sys/wait.h>
39 #include <urcu/uatomic.h>
40 #include <unistd.h>
41 #include <config.h>
42
43 #include <common/common.h>
44 #include <common/compat/socket.h>
45 #include <common/compat/getenv.h>
46 #include <common/defaults.h>
47 #include <common/kernel-consumer/kernel-consumer.h>
48 #include <common/futex.h>
49 #include <common/relayd/relayd.h>
50 #include <common/utils.h>
51 #include <common/daemonize.h>
52 #include <common/config/config.h>
53
54 #include "lttng-sessiond.h"
55 #include "buffer-registry.h"
56 #include "channel.h"
57 #include "cmd.h"
58 #include "consumer.h"
59 #include "context.h"
60 #include "event.h"
61 #include "kernel.h"
62 #include "kernel-consumer.h"
63 #include "modprobe.h"
64 #include "shm.h"
65 #include "ust-ctl.h"
66 #include "ust-consumer.h"
67 #include "utils.h"
68 #include "fd-limit.h"
69 #include "health-sessiond.h"
70 #include "testpoint.h"
71 #include "ust-thread.h"
72 #include "agent-thread.h"
73 #include "save.h"
74 #include "load-session-thread.h"
75 #include "syscall.h"
76 #include "agent.h"
77
78 #define CONSUMERD_FILE "lttng-consumerd"
79
80 const char *progname;
81 static const char *tracing_group_name = DEFAULT_TRACING_GROUP;
82 static int tracing_group_name_override;
83 static char *opt_pidfile;
84 static int opt_sig_parent;
85 static int opt_verbose_consumer;
86 static int opt_daemon, opt_background;
87 static int opt_no_kernel;
88 static char *opt_load_session_path;
89 static pid_t ppid; /* Parent PID for --sig-parent option */
90 static pid_t child_ppid; /* Internal parent PID use with daemonize. */
91 static char *rundir;
92 static int lockfile_fd = -1;
93
94 /* Set to 1 when a SIGUSR1 signal is received. */
95 static int recv_child_signal;
96
97 /*
98 * Consumer daemon specific control data. Every value not initialized here is
99 * set to 0 by the static definition.
100 */
101 static struct consumer_data kconsumer_data = {
102 .type = LTTNG_CONSUMER_KERNEL,
103 .err_unix_sock_path = DEFAULT_KCONSUMERD_ERR_SOCK_PATH,
104 .cmd_unix_sock_path = DEFAULT_KCONSUMERD_CMD_SOCK_PATH,
105 .err_sock = -1,
106 .cmd_sock = -1,
107 .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
108 .lock = PTHREAD_MUTEX_INITIALIZER,
109 .cond = PTHREAD_COND_INITIALIZER,
110 .cond_mutex = PTHREAD_MUTEX_INITIALIZER,
111 };
112 static struct consumer_data ustconsumer64_data = {
113 .type = LTTNG_CONSUMER64_UST,
114 .err_unix_sock_path = DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH,
115 .cmd_unix_sock_path = DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH,
116 .err_sock = -1,
117 .cmd_sock = -1,
118 .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
119 .lock = PTHREAD_MUTEX_INITIALIZER,
120 .cond = PTHREAD_COND_INITIALIZER,
121 .cond_mutex = PTHREAD_MUTEX_INITIALIZER,
122 };
123 static struct consumer_data ustconsumer32_data = {
124 .type = LTTNG_CONSUMER32_UST,
125 .err_unix_sock_path = DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH,
126 .cmd_unix_sock_path = DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH,
127 .err_sock = -1,
128 .cmd_sock = -1,
129 .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
130 .lock = PTHREAD_MUTEX_INITIALIZER,
131 .cond = PTHREAD_COND_INITIALIZER,
132 .cond_mutex = PTHREAD_MUTEX_INITIALIZER,
133 };
134
135 /* Command line options */
136 static const struct option long_options[] = {
137 { "client-sock", required_argument, 0, 'c' },
138 { "apps-sock", required_argument, 0, 'a' },
139 { "kconsumerd-cmd-sock", required_argument, 0, '\0' },
140 { "kconsumerd-err-sock", required_argument, 0, '\0' },
141 { "ustconsumerd32-cmd-sock", required_argument, 0, '\0' },
142 { "ustconsumerd32-err-sock", required_argument, 0, '\0' },
143 { "ustconsumerd64-cmd-sock", required_argument, 0, '\0' },
144 { "ustconsumerd64-err-sock", required_argument, 0, '\0' },
145 { "consumerd32-path", required_argument, 0, '\0' },
146 { "consumerd32-libdir", required_argument, 0, '\0' },
147 { "consumerd64-path", required_argument, 0, '\0' },
148 { "consumerd64-libdir", required_argument, 0, '\0' },
149 { "daemonize", no_argument, 0, 'd' },
150 { "background", no_argument, 0, 'b' },
151 { "sig-parent", no_argument, 0, 'S' },
152 { "help", no_argument, 0, 'h' },
153 { "group", required_argument, 0, 'g' },
154 { "version", no_argument, 0, 'V' },
155 { "quiet", no_argument, 0, 'q' },
156 { "verbose", no_argument, 0, 'v' },
157 { "verbose-consumer", no_argument, 0, '\0' },
158 { "no-kernel", no_argument, 0, '\0' },
159 { "pidfile", required_argument, 0, 'p' },
160 { "agent-tcp-port", required_argument, 0, '\0' },
161 { "config", required_argument, 0, 'f' },
162 { "load", required_argument, 0, 'l' },
163 { "kmod-probes", required_argument, 0, '\0' },
164 { "extra-kmod-probes", required_argument, 0, '\0' },
165 { NULL, 0, 0, 0 }
166 };
167
168 /* Command line options to ignore from configuration file */
169 static const char *config_ignore_options[] = { "help", "version", "config" };
170
171 /* Shared between threads */
172 static int dispatch_thread_exit;
173
174 /* Global application Unix socket path */
175 static char apps_unix_sock_path[PATH_MAX];
176 /* Global client Unix socket path */
177 static char client_unix_sock_path[PATH_MAX];
178 /* global wait shm path for UST */
179 static char wait_shm_path[PATH_MAX];
180 /* Global health check unix path */
181 static char health_unix_sock_path[PATH_MAX];
182
183 /* Sockets and FDs */
184 static int client_sock = -1;
185 static int apps_sock = -1;
186 int kernel_tracer_fd = -1;
187 static int kernel_poll_pipe[2] = { -1, -1 };
188
189 /*
190 * Quit pipe for all threads. This permits a single cancellation point
191 * for all threads when receiving an event on the pipe.
192 */
193 static int thread_quit_pipe[2] = { -1, -1 };
194 static int ht_cleanup_quit_pipe[2] = { -1, -1 };
195
196 /*
197 * This pipe is used to inform the thread managing application communication
198 * that a command is queued and ready to be processed.
199 */
200 static int apps_cmd_pipe[2] = { -1, -1 };
201
202 int apps_cmd_notify_pipe[2] = { -1, -1 };
203
204 /* Pthread, Mutexes and Semaphores */
205 static pthread_t apps_thread;
206 static pthread_t apps_notify_thread;
207 static pthread_t reg_apps_thread;
208 static pthread_t client_thread;
209 static pthread_t kernel_thread;
210 static pthread_t dispatch_thread;
211 static pthread_t health_thread;
212 static pthread_t ht_cleanup_thread;
213 static pthread_t agent_reg_thread;
214 static pthread_t load_session_thread;
215
216 /*
217 * UST registration command queue. This queue is tied with a futex and uses a N
218 * wakers / 1 waiter implemented and detailed in futex.c/.h
219 *
220 * The thread_registration_apps and thread_dispatch_ust_registration uses this
221 * queue along with the wait/wake scheme. The thread_manage_apps receives down
222 * the line new application socket and monitors it for any I/O error or clean
223 * close that triggers an unregistration of the application.
224 */
225 static struct ust_cmd_queue ust_cmd_queue;
226
227 /*
228 * Pointer initialized before thread creation.
229 *
230 * This points to the tracing session list containing the session count and a
231 * mutex lock. The lock MUST be taken if you iterate over the list. The lock
232 * MUST NOT be taken if you call a public function in session.c.
233 *
234 * The lock is nested inside the structure: session_list_ptr->lock. Please use
235 * session_lock_list and session_unlock_list for lock acquisition.
236 */
237 static struct ltt_session_list *session_list_ptr;
238
239 int ust_consumerd64_fd = -1;
240 int ust_consumerd32_fd = -1;
241
242 static const char *consumerd32_bin = CONFIG_CONSUMERD32_BIN;
243 static const char *consumerd64_bin = CONFIG_CONSUMERD64_BIN;
244 static const char *consumerd32_libdir = CONFIG_CONSUMERD32_LIBDIR;
245 static const char *consumerd64_libdir = CONFIG_CONSUMERD64_LIBDIR;
246 static int consumerd32_bin_override;
247 static int consumerd64_bin_override;
248 static int consumerd32_libdir_override;
249 static int consumerd64_libdir_override;
250
251 static const char *module_proc_lttng = "/proc/lttng";
252
253 /*
254 * Consumer daemon state which is changed when spawning it, killing it or in
255 * case of a fatal error.
256 */
257 enum consumerd_state {
258 CONSUMER_STARTED = 1,
259 CONSUMER_STOPPED = 2,
260 CONSUMER_ERROR = 3,
261 };
262
263 /*
264 * This consumer daemon state is used to validate if a client command will be
265 * able to reach the consumer. If not, the client is informed. For instance,
266 * doing a "lttng start" when the consumer state is set to ERROR will return an
267 * error to the client.
268 *
269 * The following example shows a possible race condition of this scheme:
270 *
271 * consumer thread error happens
272 * client cmd arrives
273 * client cmd checks state -> still OK
274 * consumer thread exit, sets error
275 * client cmd try to talk to consumer
276 * ...
277 *
278 * However, since the consumer is a different daemon, we have no way of making
279 * sure the command will reach it safely even with this state flag. This is why
280 * we consider that up to the state validation during command processing, the
281 * command is safe. After that, we can not guarantee the correctness of the
282 * client request vis-a-vis the consumer.
283 */
284 static enum consumerd_state ust_consumerd_state;
285 static enum consumerd_state kernel_consumerd_state;
286
287 /*
288 * Socket timeout for receiving and sending in seconds.
289 */
290 static int app_socket_timeout;
291
292 /* Set in main() with the current page size. */
293 long page_size;
294
295 /* Application health monitoring */
296 struct health_app *health_sessiond;
297
298 /* Agent TCP port for registration. Used by the agent thread. */
299 unsigned int agent_tcp_port = DEFAULT_AGENT_TCP_PORT;
300
301 /* Am I root or not. */
302 int is_root; /* Set to 1 if the daemon is running as root */
303
304 const char * const config_section_name = "sessiond";
305
306 /* Load session thread information to operate. */
307 struct load_session_thread_data *load_info;
308
309 /* Global hash tables */
310 struct lttng_ht *agent_apps_ht_by_sock = NULL;
311
312 /*
313 * Whether sessiond is ready for commands/health check requests.
314 * NR_LTTNG_SESSIOND_READY must match the number of calls to
315 * sessiond_notify_ready().
316 */
317 #define NR_LTTNG_SESSIOND_READY 3
318 int lttng_sessiond_ready = NR_LTTNG_SESSIOND_READY;
319
320 /* Notify parents that we are ready for cmd and health check */
321 LTTNG_HIDDEN
322 void sessiond_notify_ready(void)
323 {
324 if (uatomic_sub_return(&lttng_sessiond_ready, 1) == 0) {
325 /*
326 * Notify parent pid that we are ready to accept command
327 * for client side. This ppid is the one from the
328 * external process that spawned us.
329 */
330 if (opt_sig_parent) {
331 kill(ppid, SIGUSR1);
332 }
333
334 /*
335 * Notify the parent of the fork() process that we are
336 * ready.
337 */
338 if (opt_daemon || opt_background) {
339 kill(child_ppid, SIGUSR1);
340 }
341 }
342 }
343
344 static
345 void setup_consumerd_path(void)
346 {
347 const char *bin, *libdir;
348
349 /*
350 * Allow INSTALL_BIN_PATH to be used as a target path for the
351 * native architecture size consumer if CONFIG_CONSUMER*_PATH
352 * has not been defined.
353 */
354 #if (CAA_BITS_PER_LONG == 32)
355 if (!consumerd32_bin[0]) {
356 consumerd32_bin = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
357 }
358 if (!consumerd32_libdir[0]) {
359 consumerd32_libdir = INSTALL_LIB_PATH;
360 }
361 #elif (CAA_BITS_PER_LONG == 64)
362 if (!consumerd64_bin[0]) {
363 consumerd64_bin = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
364 }
365 if (!consumerd64_libdir[0]) {
366 consumerd64_libdir = INSTALL_LIB_PATH;
367 }
368 #else
369 #error "Unknown bitness"
370 #endif
371
372 /*
373 * runtime env. var. overrides the build default.
374 */
375 bin = lttng_secure_getenv("LTTNG_CONSUMERD32_BIN");
376 if (bin) {
377 consumerd32_bin = bin;
378 }
379 bin = lttng_secure_getenv("LTTNG_CONSUMERD64_BIN");
380 if (bin) {
381 consumerd64_bin = bin;
382 }
383 libdir = lttng_secure_getenv("LTTNG_CONSUMERD32_LIBDIR");
384 if (libdir) {
385 consumerd32_libdir = libdir;
386 }
387 libdir = lttng_secure_getenv("LTTNG_CONSUMERD64_LIBDIR");
388 if (libdir) {
389 consumerd64_libdir = libdir;
390 }
391 }
392
393 static
394 int __sessiond_set_thread_pollset(struct lttng_poll_event *events, size_t size,
395 int *a_pipe)
396 {
397 int ret;
398
399 assert(events);
400
401 ret = lttng_poll_create(events, size, LTTNG_CLOEXEC);
402 if (ret < 0) {
403 goto error;
404 }
405
406 /* Add quit pipe */
407 ret = lttng_poll_add(events, a_pipe[0], LPOLLIN | LPOLLERR);
408 if (ret < 0) {
409 goto error;
410 }
411
412 return 0;
413
414 error:
415 return ret;
416 }
417
418 /*
419 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
420 */
421 int sessiond_set_thread_pollset(struct lttng_poll_event *events, size_t size)
422 {
423 return __sessiond_set_thread_pollset(events, size, thread_quit_pipe);
424 }
425
426 /*
427 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
428 */
429 int sessiond_set_ht_cleanup_thread_pollset(struct lttng_poll_event *events,
430 size_t size)
431 {
432 return __sessiond_set_thread_pollset(events, size,
433 ht_cleanup_quit_pipe);
434 }
435
436 static
437 int __sessiond_check_thread_quit_pipe(int fd, uint32_t events, int a_pipe)
438 {
439 if (fd == a_pipe && (events & LPOLLIN)) {
440 return 1;
441 }
442 return 0;
443 }
444
445 /*
446 * Check if the thread quit pipe was triggered.
447 *
448 * Return 1 if it was triggered else 0;
449 */
450 int sessiond_check_thread_quit_pipe(int fd, uint32_t events)
451 {
452 return __sessiond_check_thread_quit_pipe(fd, events,
453 thread_quit_pipe[0]);
454 }
455
456 /*
457 * Check if the ht_cleanup thread quit pipe was triggered.
458 *
459 * Return 1 if it was triggered else 0;
460 */
461 int sessiond_check_ht_cleanup_quit(int fd, uint32_t events)
462 {
463 return __sessiond_check_thread_quit_pipe(fd, events,
464 ht_cleanup_quit_pipe[0]);
465 }
466
467 /*
468 * Init thread quit pipe.
469 *
470 * Return -1 on error or 0 if all pipes are created.
471 */
472 static int __init_thread_quit_pipe(int *a_pipe)
473 {
474 int ret, i;
475
476 ret = pipe(a_pipe);
477 if (ret < 0) {
478 PERROR("thread quit pipe");
479 goto error;
480 }
481
482 for (i = 0; i < 2; i++) {
483 ret = fcntl(a_pipe[i], F_SETFD, FD_CLOEXEC);
484 if (ret < 0) {
485 PERROR("fcntl");
486 goto error;
487 }
488 }
489
490 error:
491 return ret;
492 }
493
494 static int init_thread_quit_pipe(void)
495 {
496 return __init_thread_quit_pipe(thread_quit_pipe);
497 }
498
499 static int init_ht_cleanup_quit_pipe(void)
500 {
501 return __init_thread_quit_pipe(ht_cleanup_quit_pipe);
502 }
503
504 /*
505 * Stop all threads by closing the thread quit pipe.
506 */
507 static void stop_threads(void)
508 {
509 int ret;
510
511 /* Stopping all threads */
512 DBG("Terminating all threads");
513 ret = notify_thread_pipe(thread_quit_pipe[1]);
514 if (ret < 0) {
515 ERR("write error on thread quit pipe");
516 }
517
518 /* Dispatch thread */
519 CMM_STORE_SHARED(dispatch_thread_exit, 1);
520 futex_nto1_wake(&ust_cmd_queue.futex);
521 }
522
523 /*
524 * Close every consumer sockets.
525 */
526 static void close_consumer_sockets(void)
527 {
528 int ret;
529
530 if (kconsumer_data.err_sock >= 0) {
531 ret = close(kconsumer_data.err_sock);
532 if (ret < 0) {
533 PERROR("kernel consumer err_sock close");
534 }
535 }
536 if (ustconsumer32_data.err_sock >= 0) {
537 ret = close(ustconsumer32_data.err_sock);
538 if (ret < 0) {
539 PERROR("UST consumerd32 err_sock close");
540 }
541 }
542 if (ustconsumer64_data.err_sock >= 0) {
543 ret = close(ustconsumer64_data.err_sock);
544 if (ret < 0) {
545 PERROR("UST consumerd64 err_sock close");
546 }
547 }
548 if (kconsumer_data.cmd_sock >= 0) {
549 ret = close(kconsumer_data.cmd_sock);
550 if (ret < 0) {
551 PERROR("kernel consumer cmd_sock close");
552 }
553 }
554 if (ustconsumer32_data.cmd_sock >= 0) {
555 ret = close(ustconsumer32_data.cmd_sock);
556 if (ret < 0) {
557 PERROR("UST consumerd32 cmd_sock close");
558 }
559 }
560 if (ustconsumer64_data.cmd_sock >= 0) {
561 ret = close(ustconsumer64_data.cmd_sock);
562 if (ret < 0) {
563 PERROR("UST consumerd64 cmd_sock close");
564 }
565 }
566 }
567
568 /*
569 * Generate the full lock file path using the rundir.
570 *
571 * Return the snprintf() return value thus a negative value is an error.
572 */
573 static int generate_lock_file_path(char *path, size_t len)
574 {
575 int ret;
576
577 assert(path);
578 assert(rundir);
579
580 /* Build lockfile path from rundir. */
581 ret = snprintf(path, len, "%s/" DEFAULT_LTTNG_SESSIOND_LOCKFILE, rundir);
582 if (ret < 0) {
583 PERROR("snprintf lockfile path");
584 }
585
586 return ret;
587 }
588
589 /*
590 * Cleanup the session daemon's data structures.
591 */
592 static void sessiond_cleanup(void)
593 {
594 int ret;
595 struct ltt_session *sess, *stmp;
596 char path[PATH_MAX];
597
598 DBG("Cleanup sessiond");
599
600 /*
601 * Close the thread quit pipe. It has already done its job,
602 * since we are now called.
603 */
604 utils_close_pipe(thread_quit_pipe);
605
606 /*
607 * If opt_pidfile is undefined, the default file will be wiped when
608 * removing the rundir.
609 */
610 if (opt_pidfile) {
611 ret = remove(opt_pidfile);
612 if (ret < 0) {
613 PERROR("remove pidfile %s", opt_pidfile);
614 }
615 }
616
617 DBG("Removing sessiond and consumerd content of directory %s", rundir);
618
619 /* sessiond */
620 snprintf(path, PATH_MAX,
621 "%s/%s",
622 rundir, DEFAULT_LTTNG_SESSIOND_PIDFILE);
623 DBG("Removing %s", path);
624 (void) unlink(path);
625
626 snprintf(path, PATH_MAX, "%s/%s", rundir,
627 DEFAULT_LTTNG_SESSIOND_AGENTPORT_FILE);
628 DBG("Removing %s", path);
629 (void) unlink(path);
630
631 /* kconsumerd */
632 snprintf(path, PATH_MAX,
633 DEFAULT_KCONSUMERD_ERR_SOCK_PATH,
634 rundir);
635 DBG("Removing %s", path);
636 (void) unlink(path);
637
638 snprintf(path, PATH_MAX,
639 DEFAULT_KCONSUMERD_PATH,
640 rundir);
641 DBG("Removing directory %s", path);
642 (void) rmdir(path);
643
644 /* ust consumerd 32 */
645 snprintf(path, PATH_MAX,
646 DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH,
647 rundir);
648 DBG("Removing %s", path);
649 (void) unlink(path);
650
651 snprintf(path, PATH_MAX,
652 DEFAULT_USTCONSUMERD32_PATH,
653 rundir);
654 DBG("Removing directory %s", path);
655 (void) rmdir(path);
656
657 /* ust consumerd 64 */
658 snprintf(path, PATH_MAX,
659 DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH,
660 rundir);
661 DBG("Removing %s", path);
662 (void) unlink(path);
663
664 snprintf(path, PATH_MAX,
665 DEFAULT_USTCONSUMERD64_PATH,
666 rundir);
667 DBG("Removing directory %s", path);
668 (void) rmdir(path);
669
670 DBG("Cleaning up all sessions");
671
672 /* Destroy session list mutex */
673 if (session_list_ptr != NULL) {
674 pthread_mutex_destroy(&session_list_ptr->lock);
675
676 /* Cleanup ALL session */
677 cds_list_for_each_entry_safe(sess, stmp,
678 &session_list_ptr->head, list) {
679 cmd_destroy_session(sess, kernel_poll_pipe[1]);
680 }
681 }
682
683 DBG("Cleaning up all agent apps");
684 agent_app_ht_clean();
685
686 DBG("Closing all UST sockets");
687 ust_app_clean_list();
688 buffer_reg_destroy_registries();
689
690 if (is_root && !opt_no_kernel) {
691 DBG2("Closing kernel fd");
692 if (kernel_tracer_fd >= 0) {
693 ret = close(kernel_tracer_fd);
694 if (ret) {
695 PERROR("close");
696 }
697 }
698 DBG("Unloading kernel modules");
699 modprobe_remove_lttng_all();
700 free(syscall_table);
701 }
702
703 close_consumer_sockets();
704
705 if (load_info) {
706 load_session_destroy_data(load_info);
707 free(load_info);
708 }
709
710 /*
711 * Cleanup lock file by deleting it and finaly closing it which will
712 * release the file system lock.
713 */
714 if (lockfile_fd >= 0) {
715 char lockfile_path[PATH_MAX];
716
717 ret = generate_lock_file_path(lockfile_path,
718 sizeof(lockfile_path));
719 if (ret > 0) {
720 ret = remove(lockfile_path);
721 if (ret < 0) {
722 PERROR("remove lock file");
723 }
724 ret = close(lockfile_fd);
725 if (ret < 0) {
726 PERROR("close lock file");
727 }
728 }
729 }
730
731 /*
732 * We do NOT rmdir rundir because there are other processes
733 * using it, for instance lttng-relayd, which can start in
734 * parallel with this teardown.
735 */
736
737 free(rundir);
738 }
739
740 /*
741 * Cleanup the daemon's option data structures.
742 */
743 static void sessiond_cleanup_options(void)
744 {
745 DBG("Cleaning up options");
746
747 /*
748 * If the override option is set, the pointer points to a *non* const
749 * thus freeing it even though the variable type is set to const.
750 */
751 if (tracing_group_name_override) {
752 free((void *) tracing_group_name);
753 }
754 if (consumerd32_bin_override) {
755 free((void *) consumerd32_bin);
756 }
757 if (consumerd64_bin_override) {
758 free((void *) consumerd64_bin);
759 }
760 if (consumerd32_libdir_override) {
761 free((void *) consumerd32_libdir);
762 }
763 if (consumerd64_libdir_override) {
764 free((void *) consumerd64_libdir);
765 }
766
767 free(opt_pidfile);
768 free(opt_load_session_path);
769 free(kmod_probes_list);
770 free(kmod_extra_probes_list);
771
772 /* <fun> */
773 DBG("%c[%d;%dm*** assert failed :-) *** ==> %c[%dm%c[%d;%dm"
774 "Matthew, BEET driven development works!%c[%dm",
775 27, 1, 31, 27, 0, 27, 1, 33, 27, 0);
776 /* </fun> */
777 }
778
779 /*
780 * Send data on a unix socket using the liblttsessiondcomm API.
781 *
782 * Return lttcomm error code.
783 */
784 static int send_unix_sock(int sock, void *buf, size_t len)
785 {
786 /* Check valid length */
787 if (len == 0) {
788 return -1;
789 }
790
791 return lttcomm_send_unix_sock(sock, buf, len);
792 }
793
794 /*
795 * Free memory of a command context structure.
796 */
797 static void clean_command_ctx(struct command_ctx **cmd_ctx)
798 {
799 DBG("Clean command context structure");
800 if (*cmd_ctx) {
801 if ((*cmd_ctx)->llm) {
802 free((*cmd_ctx)->llm);
803 }
804 if ((*cmd_ctx)->lsm) {
805 free((*cmd_ctx)->lsm);
806 }
807 free(*cmd_ctx);
808 *cmd_ctx = NULL;
809 }
810 }
811
812 /*
813 * Notify UST applications using the shm mmap futex.
814 */
815 static int notify_ust_apps(int active)
816 {
817 char *wait_shm_mmap;
818
819 DBG("Notifying applications of session daemon state: %d", active);
820
821 /* See shm.c for this call implying mmap, shm and futex calls */
822 wait_shm_mmap = shm_ust_get_mmap(wait_shm_path, is_root);
823 if (wait_shm_mmap == NULL) {
824 goto error;
825 }
826
827 /* Wake waiting process */
828 futex_wait_update((int32_t *) wait_shm_mmap, active);
829
830 /* Apps notified successfully */
831 return 0;
832
833 error:
834 return -1;
835 }
836
837 /*
838 * Setup the outgoing data buffer for the response (llm) by allocating the
839 * right amount of memory and copying the original information from the lsm
840 * structure.
841 *
842 * Return total size of the buffer pointed by buf.
843 */
844 static int setup_lttng_msg(struct command_ctx *cmd_ctx, size_t size)
845 {
846 int ret, buf_size;
847
848 buf_size = size;
849
850 cmd_ctx->llm = zmalloc(sizeof(struct lttcomm_lttng_msg) + buf_size);
851 if (cmd_ctx->llm == NULL) {
852 PERROR("zmalloc");
853 ret = -ENOMEM;
854 goto error;
855 }
856
857 /* Copy common data */
858 cmd_ctx->llm->cmd_type = cmd_ctx->lsm->cmd_type;
859 cmd_ctx->llm->pid = cmd_ctx->lsm->domain.attr.pid;
860
861 cmd_ctx->llm->data_size = size;
862 cmd_ctx->lttng_msg_size = sizeof(struct lttcomm_lttng_msg) + buf_size;
863
864 return buf_size;
865
866 error:
867 return ret;
868 }
869
870 /*
871 * Update the kernel poll set of all channel fd available over all tracing
872 * session. Add the wakeup pipe at the end of the set.
873 */
874 static int update_kernel_poll(struct lttng_poll_event *events)
875 {
876 int ret;
877 struct ltt_session *session;
878 struct ltt_kernel_channel *channel;
879
880 DBG("Updating kernel poll set");
881
882 session_lock_list();
883 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
884 session_lock(session);
885 if (session->kernel_session == NULL) {
886 session_unlock(session);
887 continue;
888 }
889
890 cds_list_for_each_entry(channel,
891 &session->kernel_session->channel_list.head, list) {
892 /* Add channel fd to the kernel poll set */
893 ret = lttng_poll_add(events, channel->fd, LPOLLIN | LPOLLRDNORM);
894 if (ret < 0) {
895 session_unlock(session);
896 goto error;
897 }
898 DBG("Channel fd %d added to kernel set", channel->fd);
899 }
900 session_unlock(session);
901 }
902 session_unlock_list();
903
904 return 0;
905
906 error:
907 session_unlock_list();
908 return -1;
909 }
910
911 /*
912 * Find the channel fd from 'fd' over all tracing session. When found, check
913 * for new channel stream and send those stream fds to the kernel consumer.
914 *
915 * Useful for CPU hotplug feature.
916 */
917 static int update_kernel_stream(struct consumer_data *consumer_data, int fd)
918 {
919 int ret = 0;
920 struct ltt_session *session;
921 struct ltt_kernel_session *ksess;
922 struct ltt_kernel_channel *channel;
923
924 DBG("Updating kernel streams for channel fd %d", fd);
925
926 session_lock_list();
927 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
928 session_lock(session);
929 if (session->kernel_session == NULL) {
930 session_unlock(session);
931 continue;
932 }
933 ksess = session->kernel_session;
934
935 cds_list_for_each_entry(channel,
936 &ksess->channel_list.head, list) {
937 struct lttng_ht_iter iter;
938 struct consumer_socket *socket;
939
940 if (channel->fd != fd) {
941 continue;
942 }
943 DBG("Channel found, updating kernel streams");
944 ret = kernel_open_channel_stream(channel);
945 if (ret < 0) {
946 goto error;
947 }
948 /* Update the stream global counter */
949 ksess->stream_count_global += ret;
950
951 /*
952 * Have we already sent fds to the consumer? If yes, it
953 * means that tracing is started so it is safe to send
954 * our updated stream fds.
955 */
956 if (ksess->consumer_fds_sent != 1
957 || ksess->consumer == NULL) {
958 ret = -1;
959 goto error;
960 }
961
962 rcu_read_lock();
963 cds_lfht_for_each_entry(ksess->consumer->socks->ht,
964 &iter.iter, socket, node.node) {
965 pthread_mutex_lock(socket->lock);
966 ret = kernel_consumer_send_channel_stream(socket,
967 channel, ksess,
968 session->output_traces ? 1 : 0);
969 pthread_mutex_unlock(socket->lock);
970 if (ret < 0) {
971 rcu_read_unlock();
972 goto error;
973 }
974 }
975 rcu_read_unlock();
976 }
977 session_unlock(session);
978 }
979 session_unlock_list();
980 return ret;
981
982 error:
983 session_unlock(session);
984 session_unlock_list();
985 return ret;
986 }
987
988 /*
989 * For each tracing session, update newly registered apps. The session list
990 * lock MUST be acquired before calling this.
991 */
992 static void update_ust_app(int app_sock)
993 {
994 struct ltt_session *sess, *stmp;
995
996 /* Consumer is in an ERROR state. Stop any application update. */
997 if (uatomic_read(&ust_consumerd_state) == CONSUMER_ERROR) {
998 /* Stop the update process since the consumer is dead. */
999 return;
1000 }
1001
1002 /* For all tracing session(s) */
1003 cds_list_for_each_entry_safe(sess, stmp, &session_list_ptr->head, list) {
1004 struct ust_app *app;
1005
1006 session_lock(sess);
1007 if (!sess->ust_session) {
1008 goto unlock_session;
1009 }
1010
1011 rcu_read_lock();
1012 assert(app_sock >= 0);
1013 app = ust_app_find_by_sock(app_sock);
1014 if (app == NULL) {
1015 /*
1016 * Application can be unregistered before so
1017 * this is possible hence simply stopping the
1018 * update.
1019 */
1020 DBG3("UST app update failed to find app sock %d",
1021 app_sock);
1022 goto unlock_rcu;
1023 }
1024 ust_app_global_update(sess->ust_session, app);
1025 unlock_rcu:
1026 rcu_read_unlock();
1027 unlock_session:
1028 session_unlock(sess);
1029 }
1030 }
1031
1032 /*
1033 * This thread manage event coming from the kernel.
1034 *
1035 * Features supported in this thread:
1036 * -) CPU Hotplug
1037 */
1038 static void *thread_manage_kernel(void *data)
1039 {
1040 int ret, i, pollfd, update_poll_flag = 1, err = -1;
1041 uint32_t revents, nb_fd;
1042 char tmp;
1043 struct lttng_poll_event events;
1044
1045 DBG("[thread] Thread manage kernel started");
1046
1047 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_KERNEL);
1048
1049 /*
1050 * This first step of the while is to clean this structure which could free
1051 * non NULL pointers so initialize it before the loop.
1052 */
1053 lttng_poll_init(&events);
1054
1055 if (testpoint(sessiond_thread_manage_kernel)) {
1056 goto error_testpoint;
1057 }
1058
1059 health_code_update();
1060
1061 if (testpoint(sessiond_thread_manage_kernel_before_loop)) {
1062 goto error_testpoint;
1063 }
1064
1065 while (1) {
1066 health_code_update();
1067
1068 if (update_poll_flag == 1) {
1069 /* Clean events object. We are about to populate it again. */
1070 lttng_poll_clean(&events);
1071
1072 ret = sessiond_set_thread_pollset(&events, 2);
1073 if (ret < 0) {
1074 goto error_poll_create;
1075 }
1076
1077 ret = lttng_poll_add(&events, kernel_poll_pipe[0], LPOLLIN);
1078 if (ret < 0) {
1079 goto error;
1080 }
1081
1082 /* This will add the available kernel channel if any. */
1083 ret = update_kernel_poll(&events);
1084 if (ret < 0) {
1085 goto error;
1086 }
1087 update_poll_flag = 0;
1088 }
1089
1090 DBG("Thread kernel polling");
1091
1092 /* Poll infinite value of time */
1093 restart:
1094 health_poll_entry();
1095 ret = lttng_poll_wait(&events, -1);
1096 DBG("Thread kernel return from poll on %d fds",
1097 LTTNG_POLL_GETNB(&events));
1098 health_poll_exit();
1099 if (ret < 0) {
1100 /*
1101 * Restart interrupted system call.
1102 */
1103 if (errno == EINTR) {
1104 goto restart;
1105 }
1106 goto error;
1107 } else if (ret == 0) {
1108 /* Should not happen since timeout is infinite */
1109 ERR("Return value of poll is 0 with an infinite timeout.\n"
1110 "This should not have happened! Continuing...");
1111 continue;
1112 }
1113
1114 nb_fd = ret;
1115
1116 for (i = 0; i < nb_fd; i++) {
1117 /* Fetch once the poll data */
1118 revents = LTTNG_POLL_GETEV(&events, i);
1119 pollfd = LTTNG_POLL_GETFD(&events, i);
1120
1121 health_code_update();
1122
1123 if (!revents) {
1124 /* No activity for this FD (poll implementation). */
1125 continue;
1126 }
1127
1128 /* Thread quit pipe has been closed. Killing thread. */
1129 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
1130 if (ret) {
1131 err = 0;
1132 goto exit;
1133 }
1134
1135 /* Check for data on kernel pipe */
1136 if (pollfd == kernel_poll_pipe[0] && (revents & LPOLLIN)) {
1137 (void) lttng_read(kernel_poll_pipe[0],
1138 &tmp, 1);
1139 /*
1140 * Ret value is useless here, if this pipe gets any actions an
1141 * update is required anyway.
1142 */
1143 update_poll_flag = 1;
1144 continue;
1145 } else {
1146 /*
1147 * New CPU detected by the kernel. Adding kernel stream to
1148 * kernel session and updating the kernel consumer
1149 */
1150 if (revents & LPOLLIN) {
1151 ret = update_kernel_stream(&kconsumer_data, pollfd);
1152 if (ret < 0) {
1153 continue;
1154 }
1155 break;
1156 /*
1157 * TODO: We might want to handle the LPOLLERR | LPOLLHUP
1158 * and unregister kernel stream at this point.
1159 */
1160 }
1161 }
1162 }
1163 }
1164
1165 exit:
1166 error:
1167 lttng_poll_clean(&events);
1168 error_poll_create:
1169 error_testpoint:
1170 utils_close_pipe(kernel_poll_pipe);
1171 kernel_poll_pipe[0] = kernel_poll_pipe[1] = -1;
1172 if (err) {
1173 health_error();
1174 ERR("Health error occurred in %s", __func__);
1175 WARN("Kernel thread died unexpectedly. "
1176 "Kernel tracing can continue but CPU hotplug is disabled.");
1177 }
1178 health_unregister(health_sessiond);
1179 DBG("Kernel thread dying");
1180 return NULL;
1181 }
1182
1183 /*
1184 * Signal pthread condition of the consumer data that the thread.
1185 */
1186 static void signal_consumer_condition(struct consumer_data *data, int state)
1187 {
1188 pthread_mutex_lock(&data->cond_mutex);
1189
1190 /*
1191 * The state is set before signaling. It can be any value, it's the waiter
1192 * job to correctly interpret this condition variable associated to the
1193 * consumer pthread_cond.
1194 *
1195 * A value of 0 means that the corresponding thread of the consumer data
1196 * was not started. 1 indicates that the thread has started and is ready
1197 * for action. A negative value means that there was an error during the
1198 * thread bootstrap.
1199 */
1200 data->consumer_thread_is_ready = state;
1201 (void) pthread_cond_signal(&data->cond);
1202
1203 pthread_mutex_unlock(&data->cond_mutex);
1204 }
1205
1206 /*
1207 * This thread manage the consumer error sent back to the session daemon.
1208 */
1209 static void *thread_manage_consumer(void *data)
1210 {
1211 int sock = -1, i, ret, pollfd, err = -1, should_quit = 0;
1212 uint32_t revents, nb_fd;
1213 enum lttcomm_return_code code;
1214 struct lttng_poll_event events;
1215 struct consumer_data *consumer_data = data;
1216
1217 DBG("[thread] Manage consumer started");
1218
1219 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_CONSUMER);
1220
1221 health_code_update();
1222
1223 /*
1224 * Pass 3 as size here for the thread quit pipe, consumerd_err_sock and the
1225 * metadata_sock. Nothing more will be added to this poll set.
1226 */
1227 ret = sessiond_set_thread_pollset(&events, 3);
1228 if (ret < 0) {
1229 goto error_poll;
1230 }
1231
1232 /*
1233 * The error socket here is already in a listening state which was done
1234 * just before spawning this thread to avoid a race between the consumer
1235 * daemon exec trying to connect and the listen() call.
1236 */
1237 ret = lttng_poll_add(&events, consumer_data->err_sock, LPOLLIN | LPOLLRDHUP);
1238 if (ret < 0) {
1239 goto error;
1240 }
1241
1242 health_code_update();
1243
1244 /* Infinite blocking call, waiting for transmission */
1245 restart:
1246 health_poll_entry();
1247
1248 if (testpoint(sessiond_thread_manage_consumer)) {
1249 goto error;
1250 }
1251
1252 ret = lttng_poll_wait(&events, -1);
1253 health_poll_exit();
1254 if (ret < 0) {
1255 /*
1256 * Restart interrupted system call.
1257 */
1258 if (errno == EINTR) {
1259 goto restart;
1260 }
1261 goto error;
1262 }
1263
1264 nb_fd = ret;
1265
1266 for (i = 0; i < nb_fd; i++) {
1267 /* Fetch once the poll data */
1268 revents = LTTNG_POLL_GETEV(&events, i);
1269 pollfd = LTTNG_POLL_GETFD(&events, i);
1270
1271 health_code_update();
1272
1273 if (!revents) {
1274 /* No activity for this FD (poll implementation). */
1275 continue;
1276 }
1277
1278 /* Thread quit pipe has been closed. Killing thread. */
1279 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
1280 if (ret) {
1281 err = 0;
1282 goto exit;
1283 }
1284
1285 /* Event on the registration socket */
1286 if (pollfd == consumer_data->err_sock) {
1287 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1288 ERR("consumer err socket poll error");
1289 goto error;
1290 }
1291 }
1292 }
1293
1294 sock = lttcomm_accept_unix_sock(consumer_data->err_sock);
1295 if (sock < 0) {
1296 goto error;
1297 }
1298
1299 /*
1300 * Set the CLOEXEC flag. Return code is useless because either way, the
1301 * show must go on.
1302 */
1303 (void) utils_set_fd_cloexec(sock);
1304
1305 health_code_update();
1306
1307 DBG2("Receiving code from consumer err_sock");
1308
1309 /* Getting status code from kconsumerd */
1310 ret = lttcomm_recv_unix_sock(sock, &code,
1311 sizeof(enum lttcomm_return_code));
1312 if (ret <= 0) {
1313 goto error;
1314 }
1315
1316 health_code_update();
1317 if (code == LTTCOMM_CONSUMERD_COMMAND_SOCK_READY) {
1318 /* Connect both socket, command and metadata. */
1319 consumer_data->cmd_sock =
1320 lttcomm_connect_unix_sock(consumer_data->cmd_unix_sock_path);
1321 consumer_data->metadata_fd =
1322 lttcomm_connect_unix_sock(consumer_data->cmd_unix_sock_path);
1323 if (consumer_data->cmd_sock < 0
1324 || consumer_data->metadata_fd < 0) {
1325 PERROR("consumer connect cmd socket");
1326 /* On error, signal condition and quit. */
1327 signal_consumer_condition(consumer_data, -1);
1328 goto error;
1329 }
1330 consumer_data->metadata_sock.fd_ptr = &consumer_data->metadata_fd;
1331 /* Create metadata socket lock. */
1332 consumer_data->metadata_sock.lock = zmalloc(sizeof(pthread_mutex_t));
1333 if (consumer_data->metadata_sock.lock == NULL) {
1334 PERROR("zmalloc pthread mutex");
1335 ret = -1;
1336 goto error;
1337 }
1338 pthread_mutex_init(consumer_data->metadata_sock.lock, NULL);
1339
1340 signal_consumer_condition(consumer_data, 1);
1341 DBG("Consumer command socket ready (fd: %d", consumer_data->cmd_sock);
1342 DBG("Consumer metadata socket ready (fd: %d)",
1343 consumer_data->metadata_fd);
1344 } else {
1345 ERR("consumer error when waiting for SOCK_READY : %s",
1346 lttcomm_get_readable_code(-code));
1347 goto error;
1348 }
1349
1350 /* Remove the consumerd error sock since we've established a connexion */
1351 ret = lttng_poll_del(&events, consumer_data->err_sock);
1352 if (ret < 0) {
1353 goto error;
1354 }
1355
1356 /* Add new accepted error socket. */
1357 ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLRDHUP);
1358 if (ret < 0) {
1359 goto error;
1360 }
1361
1362 /* Add metadata socket that is successfully connected. */
1363 ret = lttng_poll_add(&events, consumer_data->metadata_fd,
1364 LPOLLIN | LPOLLRDHUP);
1365 if (ret < 0) {
1366 goto error;
1367 }
1368
1369 health_code_update();
1370
1371 /* Infinite blocking call, waiting for transmission */
1372 restart_poll:
1373 while (1) {
1374 health_code_update();
1375
1376 /* Exit the thread because the thread quit pipe has been triggered. */
1377 if (should_quit) {
1378 /* Not a health error. */
1379 err = 0;
1380 goto exit;
1381 }
1382
1383 health_poll_entry();
1384 ret = lttng_poll_wait(&events, -1);
1385 health_poll_exit();
1386 if (ret < 0) {
1387 /*
1388 * Restart interrupted system call.
1389 */
1390 if (errno == EINTR) {
1391 goto restart_poll;
1392 }
1393 goto error;
1394 }
1395
1396 nb_fd = ret;
1397
1398 for (i = 0; i < nb_fd; i++) {
1399 /* Fetch once the poll data */
1400 revents = LTTNG_POLL_GETEV(&events, i);
1401 pollfd = LTTNG_POLL_GETFD(&events, i);
1402
1403 health_code_update();
1404
1405 if (!revents) {
1406 /* No activity for this FD (poll implementation). */
1407 continue;
1408 }
1409
1410 /*
1411 * Thread quit pipe has been triggered, flag that we should stop
1412 * but continue the current loop to handle potential data from
1413 * consumer.
1414 */
1415 should_quit = sessiond_check_thread_quit_pipe(pollfd, revents);
1416
1417 if (pollfd == sock) {
1418 /* Event on the consumerd socket */
1419 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1420 ERR("consumer err socket second poll error");
1421 goto error;
1422 }
1423 health_code_update();
1424 /* Wait for any kconsumerd error */
1425 ret = lttcomm_recv_unix_sock(sock, &code,
1426 sizeof(enum lttcomm_return_code));
1427 if (ret <= 0) {
1428 ERR("consumer closed the command socket");
1429 goto error;
1430 }
1431
1432 ERR("consumer return code : %s",
1433 lttcomm_get_readable_code(-code));
1434
1435 goto exit;
1436 } else if (pollfd == consumer_data->metadata_fd) {
1437 /* UST metadata requests */
1438 ret = ust_consumer_metadata_request(
1439 &consumer_data->metadata_sock);
1440 if (ret < 0) {
1441 ERR("Handling metadata request");
1442 goto error;
1443 }
1444 }
1445 /* No need for an else branch all FDs are tested prior. */
1446 }
1447 health_code_update();
1448 }
1449
1450 exit:
1451 error:
1452 /*
1453 * We lock here because we are about to close the sockets and some other
1454 * thread might be using them so get exclusive access which will abort all
1455 * other consumer command by other threads.
1456 */
1457 pthread_mutex_lock(&consumer_data->lock);
1458
1459 /* Immediately set the consumerd state to stopped */
1460 if (consumer_data->type == LTTNG_CONSUMER_KERNEL) {
1461 uatomic_set(&kernel_consumerd_state, CONSUMER_ERROR);
1462 } else if (consumer_data->type == LTTNG_CONSUMER64_UST ||
1463 consumer_data->type == LTTNG_CONSUMER32_UST) {
1464 uatomic_set(&ust_consumerd_state, CONSUMER_ERROR);
1465 } else {
1466 /* Code flow error... */
1467 assert(0);
1468 }
1469
1470 if (consumer_data->err_sock >= 0) {
1471 ret = close(consumer_data->err_sock);
1472 if (ret) {
1473 PERROR("close");
1474 }
1475 consumer_data->err_sock = -1;
1476 }
1477 if (consumer_data->cmd_sock >= 0) {
1478 ret = close(consumer_data->cmd_sock);
1479 if (ret) {
1480 PERROR("close");
1481 }
1482 consumer_data->cmd_sock = -1;
1483 }
1484 if (consumer_data->metadata_sock.fd_ptr &&
1485 *consumer_data->metadata_sock.fd_ptr >= 0) {
1486 ret = close(*consumer_data->metadata_sock.fd_ptr);
1487 if (ret) {
1488 PERROR("close");
1489 }
1490 }
1491 if (sock >= 0) {
1492 ret = close(sock);
1493 if (ret) {
1494 PERROR("close");
1495 }
1496 }
1497
1498 unlink(consumer_data->err_unix_sock_path);
1499 unlink(consumer_data->cmd_unix_sock_path);
1500 consumer_data->pid = 0;
1501 pthread_mutex_unlock(&consumer_data->lock);
1502
1503 /* Cleanup metadata socket mutex. */
1504 if (consumer_data->metadata_sock.lock) {
1505 pthread_mutex_destroy(consumer_data->metadata_sock.lock);
1506 free(consumer_data->metadata_sock.lock);
1507 }
1508 lttng_poll_clean(&events);
1509 error_poll:
1510 if (err) {
1511 health_error();
1512 ERR("Health error occurred in %s", __func__);
1513 }
1514 health_unregister(health_sessiond);
1515 DBG("consumer thread cleanup completed");
1516
1517 return NULL;
1518 }
1519
1520 /*
1521 * This thread manage application communication.
1522 */
1523 static void *thread_manage_apps(void *data)
1524 {
1525 int i, ret, pollfd, err = -1;
1526 ssize_t size_ret;
1527 uint32_t revents, nb_fd;
1528 struct lttng_poll_event events;
1529
1530 DBG("[thread] Manage application started");
1531
1532 rcu_register_thread();
1533 rcu_thread_online();
1534
1535 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_APP_MANAGE);
1536
1537 if (testpoint(sessiond_thread_manage_apps)) {
1538 goto error_testpoint;
1539 }
1540
1541 health_code_update();
1542
1543 ret = sessiond_set_thread_pollset(&events, 2);
1544 if (ret < 0) {
1545 goto error_poll_create;
1546 }
1547
1548 ret = lttng_poll_add(&events, apps_cmd_pipe[0], LPOLLIN | LPOLLRDHUP);
1549 if (ret < 0) {
1550 goto error;
1551 }
1552
1553 if (testpoint(sessiond_thread_manage_apps_before_loop)) {
1554 goto error;
1555 }
1556
1557 health_code_update();
1558
1559 while (1) {
1560 DBG("Apps thread polling");
1561
1562 /* Inifinite blocking call, waiting for transmission */
1563 restart:
1564 health_poll_entry();
1565 ret = lttng_poll_wait(&events, -1);
1566 DBG("Apps thread return from poll on %d fds",
1567 LTTNG_POLL_GETNB(&events));
1568 health_poll_exit();
1569 if (ret < 0) {
1570 /*
1571 * Restart interrupted system call.
1572 */
1573 if (errno == EINTR) {
1574 goto restart;
1575 }
1576 goto error;
1577 }
1578
1579 nb_fd = ret;
1580
1581 for (i = 0; i < nb_fd; i++) {
1582 /* Fetch once the poll data */
1583 revents = LTTNG_POLL_GETEV(&events, i);
1584 pollfd = LTTNG_POLL_GETFD(&events, i);
1585
1586 health_code_update();
1587
1588 if (!revents) {
1589 /* No activity for this FD (poll implementation). */
1590 continue;
1591 }
1592
1593 /* Thread quit pipe has been closed. Killing thread. */
1594 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
1595 if (ret) {
1596 err = 0;
1597 goto exit;
1598 }
1599
1600 /* Inspect the apps cmd pipe */
1601 if (pollfd == apps_cmd_pipe[0]) {
1602 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1603 ERR("Apps command pipe error");
1604 goto error;
1605 } else if (revents & LPOLLIN) {
1606 int sock;
1607
1608 /* Empty pipe */
1609 size_ret = lttng_read(apps_cmd_pipe[0], &sock, sizeof(sock));
1610 if (size_ret < sizeof(sock)) {
1611 PERROR("read apps cmd pipe");
1612 goto error;
1613 }
1614
1615 health_code_update();
1616
1617 /*
1618 * We only monitor the error events of the socket. This
1619 * thread does not handle any incoming data from UST
1620 * (POLLIN).
1621 */
1622 ret = lttng_poll_add(&events, sock,
1623 LPOLLERR | LPOLLHUP | LPOLLRDHUP);
1624 if (ret < 0) {
1625 goto error;
1626 }
1627
1628 DBG("Apps with sock %d added to poll set", sock);
1629 }
1630 } else {
1631 /*
1632 * At this point, we know that a registered application made
1633 * the event at poll_wait.
1634 */
1635 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1636 /* Removing from the poll set */
1637 ret = lttng_poll_del(&events, pollfd);
1638 if (ret < 0) {
1639 goto error;
1640 }
1641
1642 /* Socket closed on remote end. */
1643 ust_app_unregister(pollfd);
1644 }
1645 }
1646
1647 health_code_update();
1648 }
1649 }
1650
1651 exit:
1652 error:
1653 lttng_poll_clean(&events);
1654 error_poll_create:
1655 error_testpoint:
1656 utils_close_pipe(apps_cmd_pipe);
1657 apps_cmd_pipe[0] = apps_cmd_pipe[1] = -1;
1658
1659 /*
1660 * We don't clean the UST app hash table here since already registered
1661 * applications can still be controlled so let them be until the session
1662 * daemon dies or the applications stop.
1663 */
1664
1665 if (err) {
1666 health_error();
1667 ERR("Health error occurred in %s", __func__);
1668 }
1669 health_unregister(health_sessiond);
1670 DBG("Application communication apps thread cleanup complete");
1671 rcu_thread_offline();
1672 rcu_unregister_thread();
1673 return NULL;
1674 }
1675
1676 /*
1677 * Send a socket to a thread This is called from the dispatch UST registration
1678 * thread once all sockets are set for the application.
1679 *
1680 * The sock value can be invalid, we don't really care, the thread will handle
1681 * it and make the necessary cleanup if so.
1682 *
1683 * On success, return 0 else a negative value being the errno message of the
1684 * write().
1685 */
1686 static int send_socket_to_thread(int fd, int sock)
1687 {
1688 ssize_t ret;
1689
1690 /*
1691 * It's possible that the FD is set as invalid with -1 concurrently just
1692 * before calling this function being a shutdown state of the thread.
1693 */
1694 if (fd < 0) {
1695 ret = -EBADF;
1696 goto error;
1697 }
1698
1699 ret = lttng_write(fd, &sock, sizeof(sock));
1700 if (ret < sizeof(sock)) {
1701 PERROR("write apps pipe %d", fd);
1702 if (ret < 0) {
1703 ret = -errno;
1704 }
1705 goto error;
1706 }
1707
1708 /* All good. Don't send back the write positive ret value. */
1709 ret = 0;
1710 error:
1711 return (int) ret;
1712 }
1713
1714 /*
1715 * Sanitize the wait queue of the dispatch registration thread meaning removing
1716 * invalid nodes from it. This is to avoid memory leaks for the case the UST
1717 * notify socket is never received.
1718 */
1719 static void sanitize_wait_queue(struct ust_reg_wait_queue *wait_queue)
1720 {
1721 int ret, nb_fd = 0, i;
1722 unsigned int fd_added = 0;
1723 struct lttng_poll_event events;
1724 struct ust_reg_wait_node *wait_node = NULL, *tmp_wait_node;
1725
1726 assert(wait_queue);
1727
1728 lttng_poll_init(&events);
1729
1730 /* Just skip everything for an empty queue. */
1731 if (!wait_queue->count) {
1732 goto end;
1733 }
1734
1735 ret = lttng_poll_create(&events, wait_queue->count, LTTNG_CLOEXEC);
1736 if (ret < 0) {
1737 goto error_create;
1738 }
1739
1740 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1741 &wait_queue->head, head) {
1742 assert(wait_node->app);
1743 ret = lttng_poll_add(&events, wait_node->app->sock,
1744 LPOLLHUP | LPOLLERR);
1745 if (ret < 0) {
1746 goto error;
1747 }
1748
1749 fd_added = 1;
1750 }
1751
1752 if (!fd_added) {
1753 goto end;
1754 }
1755
1756 /*
1757 * Poll but don't block so we can quickly identify the faulty events and
1758 * clean them afterwards from the wait queue.
1759 */
1760 ret = lttng_poll_wait(&events, 0);
1761 if (ret < 0) {
1762 goto error;
1763 }
1764 nb_fd = ret;
1765
1766 for (i = 0; i < nb_fd; i++) {
1767 /* Get faulty FD. */
1768 uint32_t revents = LTTNG_POLL_GETEV(&events, i);
1769 int pollfd = LTTNG_POLL_GETFD(&events, i);
1770
1771 if (!revents) {
1772 /* No activity for this FD (poll implementation). */
1773 continue;
1774 }
1775
1776 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1777 &wait_queue->head, head) {
1778 if (pollfd == wait_node->app->sock &&
1779 (revents & (LPOLLHUP | LPOLLERR))) {
1780 cds_list_del(&wait_node->head);
1781 wait_queue->count--;
1782 ust_app_destroy(wait_node->app);
1783 free(wait_node);
1784 break;
1785 }
1786 }
1787 }
1788
1789 if (nb_fd > 0) {
1790 DBG("Wait queue sanitized, %d node were cleaned up", nb_fd);
1791 }
1792
1793 end:
1794 lttng_poll_clean(&events);
1795 return;
1796
1797 error:
1798 lttng_poll_clean(&events);
1799 error_create:
1800 ERR("Unable to sanitize wait queue");
1801 return;
1802 }
1803
1804 /*
1805 * Dispatch request from the registration threads to the application
1806 * communication thread.
1807 */
1808 static void *thread_dispatch_ust_registration(void *data)
1809 {
1810 int ret, err = -1;
1811 struct cds_wfcq_node *node;
1812 struct ust_command *ust_cmd = NULL;
1813 struct ust_reg_wait_node *wait_node = NULL, *tmp_wait_node;
1814 struct ust_reg_wait_queue wait_queue = {
1815 .count = 0,
1816 };
1817
1818 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_APP_REG_DISPATCH);
1819
1820 if (testpoint(sessiond_thread_app_reg_dispatch)) {
1821 goto error_testpoint;
1822 }
1823
1824 health_code_update();
1825
1826 CDS_INIT_LIST_HEAD(&wait_queue.head);
1827
1828 DBG("[thread] Dispatch UST command started");
1829
1830 while (!CMM_LOAD_SHARED(dispatch_thread_exit)) {
1831 health_code_update();
1832
1833 /* Atomically prepare the queue futex */
1834 futex_nto1_prepare(&ust_cmd_queue.futex);
1835
1836 do {
1837 struct ust_app *app = NULL;
1838 ust_cmd = NULL;
1839
1840 /*
1841 * Make sure we don't have node(s) that have hung up before receiving
1842 * the notify socket. This is to clean the list in order to avoid
1843 * memory leaks from notify socket that are never seen.
1844 */
1845 sanitize_wait_queue(&wait_queue);
1846
1847 health_code_update();
1848 /* Dequeue command for registration */
1849 node = cds_wfcq_dequeue_blocking(&ust_cmd_queue.head, &ust_cmd_queue.tail);
1850 if (node == NULL) {
1851 DBG("Woken up but nothing in the UST command queue");
1852 /* Continue thread execution */
1853 break;
1854 }
1855
1856 ust_cmd = caa_container_of(node, struct ust_command, node);
1857
1858 DBG("Dispatching UST registration pid:%d ppid:%d uid:%d"
1859 " gid:%d sock:%d name:%s (version %d.%d)",
1860 ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
1861 ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
1862 ust_cmd->sock, ust_cmd->reg_msg.name,
1863 ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
1864
1865 if (ust_cmd->reg_msg.type == USTCTL_SOCKET_CMD) {
1866 wait_node = zmalloc(sizeof(*wait_node));
1867 if (!wait_node) {
1868 PERROR("zmalloc wait_node dispatch");
1869 ret = close(ust_cmd->sock);
1870 if (ret < 0) {
1871 PERROR("close ust sock dispatch %d", ust_cmd->sock);
1872 }
1873 lttng_fd_put(LTTNG_FD_APPS, 1);
1874 free(ust_cmd);
1875 goto error;
1876 }
1877 CDS_INIT_LIST_HEAD(&wait_node->head);
1878
1879 /* Create application object if socket is CMD. */
1880 wait_node->app = ust_app_create(&ust_cmd->reg_msg,
1881 ust_cmd->sock);
1882 if (!wait_node->app) {
1883 ret = close(ust_cmd->sock);
1884 if (ret < 0) {
1885 PERROR("close ust sock dispatch %d", ust_cmd->sock);
1886 }
1887 lttng_fd_put(LTTNG_FD_APPS, 1);
1888 free(wait_node);
1889 free(ust_cmd);
1890 continue;
1891 }
1892 /*
1893 * Add application to the wait queue so we can set the notify
1894 * socket before putting this object in the global ht.
1895 */
1896 cds_list_add(&wait_node->head, &wait_queue.head);
1897 wait_queue.count++;
1898
1899 free(ust_cmd);
1900 /*
1901 * We have to continue here since we don't have the notify
1902 * socket and the application MUST be added to the hash table
1903 * only at that moment.
1904 */
1905 continue;
1906 } else {
1907 /*
1908 * Look for the application in the local wait queue and set the
1909 * notify socket if found.
1910 */
1911 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1912 &wait_queue.head, head) {
1913 health_code_update();
1914 if (wait_node->app->pid == ust_cmd->reg_msg.pid) {
1915 wait_node->app->notify_sock = ust_cmd->sock;
1916 cds_list_del(&wait_node->head);
1917 wait_queue.count--;
1918 app = wait_node->app;
1919 free(wait_node);
1920 DBG3("UST app notify socket %d is set", ust_cmd->sock);
1921 break;
1922 }
1923 }
1924
1925 /*
1926 * With no application at this stage the received socket is
1927 * basically useless so close it before we free the cmd data
1928 * structure for good.
1929 */
1930 if (!app) {
1931 ret = close(ust_cmd->sock);
1932 if (ret < 0) {
1933 PERROR("close ust sock dispatch %d", ust_cmd->sock);
1934 }
1935 lttng_fd_put(LTTNG_FD_APPS, 1);
1936 }
1937 free(ust_cmd);
1938 }
1939
1940 if (app) {
1941 /*
1942 * @session_lock_list
1943 *
1944 * Lock the global session list so from the register up to the
1945 * registration done message, no thread can see the application
1946 * and change its state.
1947 */
1948 session_lock_list();
1949 rcu_read_lock();
1950
1951 /*
1952 * Add application to the global hash table. This needs to be
1953 * done before the update to the UST registry can locate the
1954 * application.
1955 */
1956 ust_app_add(app);
1957
1958 /* Set app version. This call will print an error if needed. */
1959 (void) ust_app_version(app);
1960
1961 /* Send notify socket through the notify pipe. */
1962 ret = send_socket_to_thread(apps_cmd_notify_pipe[1],
1963 app->notify_sock);
1964 if (ret < 0) {
1965 rcu_read_unlock();
1966 session_unlock_list();
1967 /*
1968 * No notify thread, stop the UST tracing. However, this is
1969 * not an internal error of the this thread thus setting
1970 * the health error code to a normal exit.
1971 */
1972 err = 0;
1973 goto error;
1974 }
1975
1976 /*
1977 * Update newly registered application with the tracing
1978 * registry info already enabled information.
1979 */
1980 update_ust_app(app->sock);
1981
1982 /*
1983 * Don't care about return value. Let the manage apps threads
1984 * handle app unregistration upon socket close.
1985 */
1986 (void) ust_app_register_done(app->sock);
1987
1988 /*
1989 * Even if the application socket has been closed, send the app
1990 * to the thread and unregistration will take place at that
1991 * place.
1992 */
1993 ret = send_socket_to_thread(apps_cmd_pipe[1], app->sock);
1994 if (ret < 0) {
1995 rcu_read_unlock();
1996 session_unlock_list();
1997 /*
1998 * No apps. thread, stop the UST tracing. However, this is
1999 * not an internal error of the this thread thus setting
2000 * the health error code to a normal exit.
2001 */
2002 err = 0;
2003 goto error;
2004 }
2005
2006 rcu_read_unlock();
2007 session_unlock_list();
2008 }
2009 } while (node != NULL);
2010
2011 health_poll_entry();
2012 /* Futex wait on queue. Blocking call on futex() */
2013 futex_nto1_wait(&ust_cmd_queue.futex);
2014 health_poll_exit();
2015 }
2016 /* Normal exit, no error */
2017 err = 0;
2018
2019 error:
2020 /* Clean up wait queue. */
2021 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
2022 &wait_queue.head, head) {
2023 cds_list_del(&wait_node->head);
2024 wait_queue.count--;
2025 free(wait_node);
2026 }
2027
2028 error_testpoint:
2029 DBG("Dispatch thread dying");
2030 if (err) {
2031 health_error();
2032 ERR("Health error occurred in %s", __func__);
2033 }
2034 health_unregister(health_sessiond);
2035 return NULL;
2036 }
2037
2038 /*
2039 * This thread manage application registration.
2040 */
2041 static void *thread_registration_apps(void *data)
2042 {
2043 int sock = -1, i, ret, pollfd, err = -1;
2044 uint32_t revents, nb_fd;
2045 struct lttng_poll_event events;
2046 /*
2047 * Get allocated in this thread, enqueued to a global queue, dequeued and
2048 * freed in the manage apps thread.
2049 */
2050 struct ust_command *ust_cmd = NULL;
2051
2052 DBG("[thread] Manage application registration started");
2053
2054 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_APP_REG);
2055
2056 if (testpoint(sessiond_thread_registration_apps)) {
2057 goto error_testpoint;
2058 }
2059
2060 ret = lttcomm_listen_unix_sock(apps_sock);
2061 if (ret < 0) {
2062 goto error_listen;
2063 }
2064
2065 /*
2066 * Pass 2 as size here for the thread quit pipe and apps socket. Nothing
2067 * more will be added to this poll set.
2068 */
2069 ret = sessiond_set_thread_pollset(&events, 2);
2070 if (ret < 0) {
2071 goto error_create_poll;
2072 }
2073
2074 /* Add the application registration socket */
2075 ret = lttng_poll_add(&events, apps_sock, LPOLLIN | LPOLLRDHUP);
2076 if (ret < 0) {
2077 goto error_poll_add;
2078 }
2079
2080 /* Notify all applications to register */
2081 ret = notify_ust_apps(1);
2082 if (ret < 0) {
2083 ERR("Failed to notify applications or create the wait shared memory.\n"
2084 "Execution continues but there might be problem for already\n"
2085 "running applications that wishes to register.");
2086 }
2087
2088 while (1) {
2089 DBG("Accepting application registration");
2090
2091 /* Inifinite blocking call, waiting for transmission */
2092 restart:
2093 health_poll_entry();
2094 ret = lttng_poll_wait(&events, -1);
2095 health_poll_exit();
2096 if (ret < 0) {
2097 /*
2098 * Restart interrupted system call.
2099 */
2100 if (errno == EINTR) {
2101 goto restart;
2102 }
2103 goto error;
2104 }
2105
2106 nb_fd = ret;
2107
2108 for (i = 0; i < nb_fd; i++) {
2109 health_code_update();
2110
2111 /* Fetch once the poll data */
2112 revents = LTTNG_POLL_GETEV(&events, i);
2113 pollfd = LTTNG_POLL_GETFD(&events, i);
2114
2115 if (!revents) {
2116 /* No activity for this FD (poll implementation). */
2117 continue;
2118 }
2119
2120 /* Thread quit pipe has been closed. Killing thread. */
2121 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
2122 if (ret) {
2123 err = 0;
2124 goto exit;
2125 }
2126
2127 /* Event on the registration socket */
2128 if (pollfd == apps_sock) {
2129 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
2130 ERR("Register apps socket poll error");
2131 goto error;
2132 } else if (revents & LPOLLIN) {
2133 sock = lttcomm_accept_unix_sock(apps_sock);
2134 if (sock < 0) {
2135 goto error;
2136 }
2137
2138 /*
2139 * Set socket timeout for both receiving and ending.
2140 * app_socket_timeout is in seconds, whereas
2141 * lttcomm_setsockopt_rcv_timeout and
2142 * lttcomm_setsockopt_snd_timeout expect msec as
2143 * parameter.
2144 */
2145 (void) lttcomm_setsockopt_rcv_timeout(sock,
2146 app_socket_timeout * 1000);
2147 (void) lttcomm_setsockopt_snd_timeout(sock,
2148 app_socket_timeout * 1000);
2149
2150 /*
2151 * Set the CLOEXEC flag. Return code is useless because
2152 * either way, the show must go on.
2153 */
2154 (void) utils_set_fd_cloexec(sock);
2155
2156 /* Create UST registration command for enqueuing */
2157 ust_cmd = zmalloc(sizeof(struct ust_command));
2158 if (ust_cmd == NULL) {
2159 PERROR("ust command zmalloc");
2160 goto error;
2161 }
2162
2163 /*
2164 * Using message-based transmissions to ensure we don't
2165 * have to deal with partially received messages.
2166 */
2167 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2168 if (ret < 0) {
2169 ERR("Exhausted file descriptors allowed for applications.");
2170 free(ust_cmd);
2171 ret = close(sock);
2172 if (ret) {
2173 PERROR("close");
2174 }
2175 sock = -1;
2176 continue;
2177 }
2178
2179 health_code_update();
2180 ret = ust_app_recv_registration(sock, &ust_cmd->reg_msg);
2181 if (ret < 0) {
2182 free(ust_cmd);
2183 /* Close socket of the application. */
2184 ret = close(sock);
2185 if (ret) {
2186 PERROR("close");
2187 }
2188 lttng_fd_put(LTTNG_FD_APPS, 1);
2189 sock = -1;
2190 continue;
2191 }
2192 health_code_update();
2193
2194 ust_cmd->sock = sock;
2195 sock = -1;
2196
2197 DBG("UST registration received with pid:%d ppid:%d uid:%d"
2198 " gid:%d sock:%d name:%s (version %d.%d)",
2199 ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
2200 ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
2201 ust_cmd->sock, ust_cmd->reg_msg.name,
2202 ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
2203
2204 /*
2205 * Lock free enqueue the registration request. The red pill
2206 * has been taken! This apps will be part of the *system*.
2207 */
2208 cds_wfcq_enqueue(&ust_cmd_queue.head, &ust_cmd_queue.tail, &ust_cmd->node);
2209
2210 /*
2211 * Wake the registration queue futex. Implicit memory
2212 * barrier with the exchange in cds_wfcq_enqueue.
2213 */
2214 futex_nto1_wake(&ust_cmd_queue.futex);
2215 }
2216 }
2217 }
2218 }
2219
2220 exit:
2221 error:
2222 /* Notify that the registration thread is gone */
2223 notify_ust_apps(0);
2224
2225 if (apps_sock >= 0) {
2226 ret = close(apps_sock);
2227 if (ret) {
2228 PERROR("close");
2229 }
2230 }
2231 if (sock >= 0) {
2232 ret = close(sock);
2233 if (ret) {
2234 PERROR("close");
2235 }
2236 lttng_fd_put(LTTNG_FD_APPS, 1);
2237 }
2238 unlink(apps_unix_sock_path);
2239
2240 error_poll_add:
2241 lttng_poll_clean(&events);
2242 error_listen:
2243 error_create_poll:
2244 error_testpoint:
2245 DBG("UST Registration thread cleanup complete");
2246 if (err) {
2247 health_error();
2248 ERR("Health error occurred in %s", __func__);
2249 }
2250 health_unregister(health_sessiond);
2251
2252 return NULL;
2253 }
2254
2255 /*
2256 * Start the thread_manage_consumer. This must be done after a lttng-consumerd
2257 * exec or it will fails.
2258 */
2259 static int spawn_consumer_thread(struct consumer_data *consumer_data)
2260 {
2261 int ret, clock_ret;
2262 struct timespec timeout;
2263
2264 /* Make sure we set the readiness flag to 0 because we are NOT ready */
2265 consumer_data->consumer_thread_is_ready = 0;
2266
2267 /* Setup pthread condition */
2268 ret = pthread_condattr_init(&consumer_data->condattr);
2269 if (ret) {
2270 errno = ret;
2271 PERROR("pthread_condattr_init consumer data");
2272 goto error;
2273 }
2274
2275 /*
2276 * Set the monotonic clock in order to make sure we DO NOT jump in time
2277 * between the clock_gettime() call and the timedwait call. See bug #324
2278 * for a more details and how we noticed it.
2279 */
2280 ret = pthread_condattr_setclock(&consumer_data->condattr, CLOCK_MONOTONIC);
2281 if (ret) {
2282 errno = ret;
2283 PERROR("pthread_condattr_setclock consumer data");
2284 goto error;
2285 }
2286
2287 ret = pthread_cond_init(&consumer_data->cond, &consumer_data->condattr);
2288 if (ret) {
2289 errno = ret;
2290 PERROR("pthread_cond_init consumer data");
2291 goto error;
2292 }
2293
2294 ret = pthread_create(&consumer_data->thread, NULL, thread_manage_consumer,
2295 consumer_data);
2296 if (ret) {
2297 errno = ret;
2298 PERROR("pthread_create consumer");
2299 ret = -1;
2300 goto error;
2301 }
2302
2303 /* We are about to wait on a pthread condition */
2304 pthread_mutex_lock(&consumer_data->cond_mutex);
2305
2306 /* Get time for sem_timedwait absolute timeout */
2307 clock_ret = clock_gettime(CLOCK_MONOTONIC, &timeout);
2308 /*
2309 * Set the timeout for the condition timed wait even if the clock gettime
2310 * call fails since we might loop on that call and we want to avoid to
2311 * increment the timeout too many times.
2312 */
2313 timeout.tv_sec += DEFAULT_SEM_WAIT_TIMEOUT;
2314
2315 /*
2316 * The following loop COULD be skipped in some conditions so this is why we
2317 * set ret to 0 in order to make sure at least one round of the loop is
2318 * done.
2319 */
2320 ret = 0;
2321
2322 /*
2323 * Loop until the condition is reached or when a timeout is reached. Note
2324 * that the pthread_cond_timedwait(P) man page specifies that EINTR can NOT
2325 * be returned but the pthread_cond(3), from the glibc-doc, says that it is
2326 * possible. This loop does not take any chances and works with both of
2327 * them.
2328 */
2329 while (!consumer_data->consumer_thread_is_ready && ret != ETIMEDOUT) {
2330 if (clock_ret < 0) {
2331 PERROR("clock_gettime spawn consumer");
2332 /* Infinite wait for the consumerd thread to be ready */
2333 ret = pthread_cond_wait(&consumer_data->cond,
2334 &consumer_data->cond_mutex);
2335 } else {
2336 ret = pthread_cond_timedwait(&consumer_data->cond,
2337 &consumer_data->cond_mutex, &timeout);
2338 }
2339 }
2340
2341 /* Release the pthread condition */
2342 pthread_mutex_unlock(&consumer_data->cond_mutex);
2343
2344 if (ret != 0) {
2345 errno = ret;
2346 if (ret == ETIMEDOUT) {
2347 int pth_ret;
2348
2349 /*
2350 * Call has timed out so we kill the kconsumerd_thread and return
2351 * an error.
2352 */
2353 ERR("Condition timed out. The consumer thread was never ready."
2354 " Killing it");
2355 pth_ret = pthread_cancel(consumer_data->thread);
2356 if (pth_ret < 0) {
2357 PERROR("pthread_cancel consumer thread");
2358 }
2359 } else {
2360 PERROR("pthread_cond_wait failed consumer thread");
2361 }
2362 /* Caller is expecting a negative value on failure. */
2363 ret = -1;
2364 goto error;
2365 }
2366
2367 pthread_mutex_lock(&consumer_data->pid_mutex);
2368 if (consumer_data->pid == 0) {
2369 ERR("Consumerd did not start");
2370 pthread_mutex_unlock(&consumer_data->pid_mutex);
2371 goto error;
2372 }
2373 pthread_mutex_unlock(&consumer_data->pid_mutex);
2374
2375 return 0;
2376
2377 error:
2378 return ret;
2379 }
2380
2381 /*
2382 * Join consumer thread
2383 */
2384 static int join_consumer_thread(struct consumer_data *consumer_data)
2385 {
2386 void *status;
2387
2388 /* Consumer pid must be a real one. */
2389 if (consumer_data->pid > 0) {
2390 int ret;
2391 ret = kill(consumer_data->pid, SIGTERM);
2392 if (ret) {
2393 PERROR("Error killing consumer daemon");
2394 return ret;
2395 }
2396 return pthread_join(consumer_data->thread, &status);
2397 } else {
2398 return 0;
2399 }
2400 }
2401
2402 /*
2403 * Fork and exec a consumer daemon (consumerd).
2404 *
2405 * Return pid if successful else -1.
2406 */
2407 static pid_t spawn_consumerd(struct consumer_data *consumer_data)
2408 {
2409 int ret;
2410 pid_t pid;
2411 const char *consumer_to_use;
2412 const char *verbosity;
2413 struct stat st;
2414
2415 DBG("Spawning consumerd");
2416
2417 pid = fork();
2418 if (pid == 0) {
2419 /*
2420 * Exec consumerd.
2421 */
2422 if (opt_verbose_consumer) {
2423 verbosity = "--verbose";
2424 } else if (lttng_opt_quiet) {
2425 verbosity = "--quiet";
2426 } else {
2427 verbosity = "";
2428 }
2429
2430 switch (consumer_data->type) {
2431 case LTTNG_CONSUMER_KERNEL:
2432 /*
2433 * Find out which consumerd to execute. We will first try the
2434 * 64-bit path, then the sessiond's installation directory, and
2435 * fallback on the 32-bit one,
2436 */
2437 DBG3("Looking for a kernel consumer at these locations:");
2438 DBG3(" 1) %s", consumerd64_bin);
2439 DBG3(" 2) %s/%s", INSTALL_BIN_PATH, CONSUMERD_FILE);
2440 DBG3(" 3) %s", consumerd32_bin);
2441 if (stat(consumerd64_bin, &st) == 0) {
2442 DBG3("Found location #1");
2443 consumer_to_use = consumerd64_bin;
2444 } else if (stat(INSTALL_BIN_PATH "/" CONSUMERD_FILE, &st) == 0) {
2445 DBG3("Found location #2");
2446 consumer_to_use = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
2447 } else if (stat(consumerd32_bin, &st) == 0) {
2448 DBG3("Found location #3");
2449 consumer_to_use = consumerd32_bin;
2450 } else {
2451 DBG("Could not find any valid consumerd executable");
2452 ret = -EINVAL;
2453 break;
2454 }
2455 DBG("Using kernel consumer at: %s", consumer_to_use);
2456 ret = execl(consumer_to_use,
2457 "lttng-consumerd", verbosity, "-k",
2458 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
2459 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
2460 "--group", tracing_group_name,
2461 NULL);
2462 break;
2463 case LTTNG_CONSUMER64_UST:
2464 {
2465 char *tmpnew = NULL;
2466
2467 if (consumerd64_libdir[0] != '\0') {
2468 char *tmp;
2469 size_t tmplen;
2470
2471 tmp = lttng_secure_getenv("LD_LIBRARY_PATH");
2472 if (!tmp) {
2473 tmp = "";
2474 }
2475 tmplen = strlen("LD_LIBRARY_PATH=")
2476 + strlen(consumerd64_libdir) + 1 /* : */ + strlen(tmp);
2477 tmpnew = zmalloc(tmplen + 1 /* \0 */);
2478 if (!tmpnew) {
2479 ret = -ENOMEM;
2480 goto error;
2481 }
2482 strcpy(tmpnew, "LD_LIBRARY_PATH=");
2483 strcat(tmpnew, consumerd64_libdir);
2484 if (tmp[0] != '\0') {
2485 strcat(tmpnew, ":");
2486 strcat(tmpnew, tmp);
2487 }
2488 ret = putenv(tmpnew);
2489 if (ret) {
2490 ret = -errno;
2491 free(tmpnew);
2492 goto error;
2493 }
2494 }
2495 DBG("Using 64-bit UST consumer at: %s", consumerd64_bin);
2496 ret = execl(consumerd64_bin, "lttng-consumerd", verbosity, "-u",
2497 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
2498 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
2499 "--group", tracing_group_name,
2500 NULL);
2501 if (consumerd64_libdir[0] != '\0') {
2502 free(tmpnew);
2503 }
2504 break;
2505 }
2506 case LTTNG_CONSUMER32_UST:
2507 {
2508 char *tmpnew = NULL;
2509
2510 if (consumerd32_libdir[0] != '\0') {
2511 char *tmp;
2512 size_t tmplen;
2513
2514 tmp = lttng_secure_getenv("LD_LIBRARY_PATH");
2515 if (!tmp) {
2516 tmp = "";
2517 }
2518 tmplen = strlen("LD_LIBRARY_PATH=")
2519 + strlen(consumerd32_libdir) + 1 /* : */ + strlen(tmp);
2520 tmpnew = zmalloc(tmplen + 1 /* \0 */);
2521 if (!tmpnew) {
2522 ret = -ENOMEM;
2523 goto error;
2524 }
2525 strcpy(tmpnew, "LD_LIBRARY_PATH=");
2526 strcat(tmpnew, consumerd32_libdir);
2527 if (tmp[0] != '\0') {
2528 strcat(tmpnew, ":");
2529 strcat(tmpnew, tmp);
2530 }
2531 ret = putenv(tmpnew);
2532 if (ret) {
2533 ret = -errno;
2534 free(tmpnew);
2535 goto error;
2536 }
2537 }
2538 DBG("Using 32-bit UST consumer at: %s", consumerd32_bin);
2539 ret = execl(consumerd32_bin, "lttng-consumerd", verbosity, "-u",
2540 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
2541 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
2542 "--group", tracing_group_name,
2543 NULL);
2544 if (consumerd32_libdir[0] != '\0') {
2545 free(tmpnew);
2546 }
2547 break;
2548 }
2549 default:
2550 PERROR("unknown consumer type");
2551 exit(EXIT_FAILURE);
2552 }
2553 if (errno != 0) {
2554 PERROR("Consumer execl()");
2555 }
2556 /* Reaching this point, we got a failure on our execl(). */
2557 exit(EXIT_FAILURE);
2558 } else if (pid > 0) {
2559 ret = pid;
2560 } else {
2561 PERROR("start consumer fork");
2562 ret = -errno;
2563 }
2564 error:
2565 return ret;
2566 }
2567
2568 /*
2569 * Spawn the consumerd daemon and session daemon thread.
2570 */
2571 static int start_consumerd(struct consumer_data *consumer_data)
2572 {
2573 int ret;
2574
2575 /*
2576 * Set the listen() state on the socket since there is a possible race
2577 * between the exec() of the consumer daemon and this call if place in the
2578 * consumer thread. See bug #366 for more details.
2579 */
2580 ret = lttcomm_listen_unix_sock(consumer_data->err_sock);
2581 if (ret < 0) {
2582 goto error;
2583 }
2584
2585 pthread_mutex_lock(&consumer_data->pid_mutex);
2586 if (consumer_data->pid != 0) {
2587 pthread_mutex_unlock(&consumer_data->pid_mutex);
2588 goto end;
2589 }
2590
2591 ret = spawn_consumerd(consumer_data);
2592 if (ret < 0) {
2593 ERR("Spawning consumerd failed");
2594 pthread_mutex_unlock(&consumer_data->pid_mutex);
2595 goto error;
2596 }
2597
2598 /* Setting up the consumer_data pid */
2599 consumer_data->pid = ret;
2600 DBG2("Consumer pid %d", consumer_data->pid);
2601 pthread_mutex_unlock(&consumer_data->pid_mutex);
2602
2603 DBG2("Spawning consumer control thread");
2604 ret = spawn_consumer_thread(consumer_data);
2605 if (ret < 0) {
2606 ERR("Fatal error spawning consumer control thread");
2607 goto error;
2608 }
2609
2610 end:
2611 return 0;
2612
2613 error:
2614 /* Cleanup already created sockets on error. */
2615 if (consumer_data->err_sock >= 0) {
2616 int err;
2617
2618 err = close(consumer_data->err_sock);
2619 if (err < 0) {
2620 PERROR("close consumer data error socket");
2621 }
2622 }
2623 return ret;
2624 }
2625
2626 /*
2627 * Setup necessary data for kernel tracer action.
2628 */
2629 static int init_kernel_tracer(void)
2630 {
2631 int ret;
2632
2633 /* Modprobe lttng kernel modules */
2634 ret = modprobe_lttng_control();
2635 if (ret < 0) {
2636 goto error;
2637 }
2638
2639 /* Open debugfs lttng */
2640 kernel_tracer_fd = open(module_proc_lttng, O_RDWR);
2641 if (kernel_tracer_fd < 0) {
2642 DBG("Failed to open %s", module_proc_lttng);
2643 ret = -1;
2644 goto error_open;
2645 }
2646
2647 /* Validate kernel version */
2648 ret = kernel_validate_version(kernel_tracer_fd);
2649 if (ret < 0) {
2650 goto error_version;
2651 }
2652
2653 ret = modprobe_lttng_data();
2654 if (ret < 0) {
2655 goto error_modules;
2656 }
2657
2658 DBG("Kernel tracer fd %d", kernel_tracer_fd);
2659 return 0;
2660
2661 error_version:
2662 modprobe_remove_lttng_control();
2663 ret = close(kernel_tracer_fd);
2664 if (ret) {
2665 PERROR("close");
2666 }
2667 kernel_tracer_fd = -1;
2668 return LTTNG_ERR_KERN_VERSION;
2669
2670 error_modules:
2671 ret = close(kernel_tracer_fd);
2672 if (ret) {
2673 PERROR("close");
2674 }
2675
2676 error_open:
2677 modprobe_remove_lttng_control();
2678
2679 error:
2680 WARN("No kernel tracer available");
2681 kernel_tracer_fd = -1;
2682 if (!is_root) {
2683 return LTTNG_ERR_NEED_ROOT_SESSIOND;
2684 } else {
2685 return LTTNG_ERR_KERN_NA;
2686 }
2687 }
2688
2689
2690 /*
2691 * Copy consumer output from the tracing session to the domain session. The
2692 * function also applies the right modification on a per domain basis for the
2693 * trace files destination directory.
2694 *
2695 * Should *NOT* be called with RCU read-side lock held.
2696 */
2697 static int copy_session_consumer(int domain, struct ltt_session *session)
2698 {
2699 int ret;
2700 const char *dir_name;
2701 struct consumer_output *consumer;
2702
2703 assert(session);
2704 assert(session->consumer);
2705
2706 switch (domain) {
2707 case LTTNG_DOMAIN_KERNEL:
2708 DBG3("Copying tracing session consumer output in kernel session");
2709 /*
2710 * XXX: We should audit the session creation and what this function
2711 * does "extra" in order to avoid a destroy since this function is used
2712 * in the domain session creation (kernel and ust) only. Same for UST
2713 * domain.
2714 */
2715 if (session->kernel_session->consumer) {
2716 consumer_destroy_output(session->kernel_session->consumer);
2717 }
2718 session->kernel_session->consumer =
2719 consumer_copy_output(session->consumer);
2720 /* Ease our life a bit for the next part */
2721 consumer = session->kernel_session->consumer;
2722 dir_name = DEFAULT_KERNEL_TRACE_DIR;
2723 break;
2724 case LTTNG_DOMAIN_JUL:
2725 case LTTNG_DOMAIN_LOG4J:
2726 case LTTNG_DOMAIN_PYTHON:
2727 case LTTNG_DOMAIN_UST:
2728 DBG3("Copying tracing session consumer output in UST session");
2729 if (session->ust_session->consumer) {
2730 consumer_destroy_output(session->ust_session->consumer);
2731 }
2732 session->ust_session->consumer =
2733 consumer_copy_output(session->consumer);
2734 /* Ease our life a bit for the next part */
2735 consumer = session->ust_session->consumer;
2736 dir_name = DEFAULT_UST_TRACE_DIR;
2737 break;
2738 default:
2739 ret = LTTNG_ERR_UNKNOWN_DOMAIN;
2740 goto error;
2741 }
2742
2743 /* Append correct directory to subdir */
2744 strncat(consumer->subdir, dir_name,
2745 sizeof(consumer->subdir) - strlen(consumer->subdir) - 1);
2746 DBG3("Copy session consumer subdir %s", consumer->subdir);
2747
2748 ret = LTTNG_OK;
2749
2750 error:
2751 return ret;
2752 }
2753
2754 /*
2755 * Create an UST session and add it to the session ust list.
2756 *
2757 * Should *NOT* be called with RCU read-side lock held.
2758 */
2759 static int create_ust_session(struct ltt_session *session,
2760 struct lttng_domain *domain)
2761 {
2762 int ret;
2763 struct ltt_ust_session *lus = NULL;
2764
2765 assert(session);
2766 assert(domain);
2767 assert(session->consumer);
2768
2769 switch (domain->type) {
2770 case LTTNG_DOMAIN_JUL:
2771 case LTTNG_DOMAIN_LOG4J:
2772 case LTTNG_DOMAIN_PYTHON:
2773 case LTTNG_DOMAIN_UST:
2774 break;
2775 default:
2776 ERR("Unknown UST domain on create session %d", domain->type);
2777 ret = LTTNG_ERR_UNKNOWN_DOMAIN;
2778 goto error;
2779 }
2780
2781 DBG("Creating UST session");
2782
2783 lus = trace_ust_create_session(session->id);
2784 if (lus == NULL) {
2785 ret = LTTNG_ERR_UST_SESS_FAIL;
2786 goto error;
2787 }
2788
2789 lus->uid = session->uid;
2790 lus->gid = session->gid;
2791 lus->output_traces = session->output_traces;
2792 lus->snapshot_mode = session->snapshot_mode;
2793 lus->live_timer_interval = session->live_timer;
2794 session->ust_session = lus;
2795 if (session->shm_path[0]) {
2796 strncpy(lus->root_shm_path, session->shm_path,
2797 sizeof(lus->root_shm_path));
2798 lus->root_shm_path[sizeof(lus->root_shm_path) - 1] = '\0';
2799 strncpy(lus->shm_path, session->shm_path,
2800 sizeof(lus->shm_path));
2801 lus->shm_path[sizeof(lus->shm_path) - 1] = '\0';
2802 strncat(lus->shm_path, "/ust",
2803 sizeof(lus->shm_path) - strlen(lus->shm_path) - 1);
2804 }
2805 /* Copy session output to the newly created UST session */
2806 ret = copy_session_consumer(domain->type, session);
2807 if (ret != LTTNG_OK) {
2808 goto error;
2809 }
2810
2811 return LTTNG_OK;
2812
2813 error:
2814 free(lus);
2815 session->ust_session = NULL;
2816 return ret;
2817 }
2818
2819 /*
2820 * Create a kernel tracer session then create the default channel.
2821 */
2822 static int create_kernel_session(struct ltt_session *session)
2823 {
2824 int ret;
2825
2826 DBG("Creating kernel session");
2827
2828 ret = kernel_create_session(session, kernel_tracer_fd);
2829 if (ret < 0) {
2830 ret = LTTNG_ERR_KERN_SESS_FAIL;
2831 goto error;
2832 }
2833
2834 /* Code flow safety */
2835 assert(session->kernel_session);
2836
2837 /* Copy session output to the newly created Kernel session */
2838 ret = copy_session_consumer(LTTNG_DOMAIN_KERNEL, session);
2839 if (ret != LTTNG_OK) {
2840 goto error;
2841 }
2842
2843 /* Create directory(ies) on local filesystem. */
2844 if (session->kernel_session->consumer->type == CONSUMER_DST_LOCAL &&
2845 strlen(session->kernel_session->consumer->dst.trace_path) > 0) {
2846 ret = run_as_mkdir_recursive(
2847 session->kernel_session->consumer->dst.trace_path,
2848 S_IRWXU | S_IRWXG, session->uid, session->gid);
2849 if (ret < 0) {
2850 if (ret != -EEXIST) {
2851 ERR("Trace directory creation error");
2852 goto error;
2853 }
2854 }
2855 }
2856
2857 session->kernel_session->uid = session->uid;
2858 session->kernel_session->gid = session->gid;
2859 session->kernel_session->output_traces = session->output_traces;
2860 session->kernel_session->snapshot_mode = session->snapshot_mode;
2861
2862 return LTTNG_OK;
2863
2864 error:
2865 trace_kernel_destroy_session(session->kernel_session);
2866 session->kernel_session = NULL;
2867 return ret;
2868 }
2869
2870 /*
2871 * Count number of session permitted by uid/gid.
2872 */
2873 static unsigned int lttng_sessions_count(uid_t uid, gid_t gid)
2874 {
2875 unsigned int i = 0;
2876 struct ltt_session *session;
2877
2878 DBG("Counting number of available session for UID %d GID %d",
2879 uid, gid);
2880 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
2881 /*
2882 * Only list the sessions the user can control.
2883 */
2884 if (!session_access_ok(session, uid, gid)) {
2885 continue;
2886 }
2887 i++;
2888 }
2889 return i;
2890 }
2891
2892 /*
2893 * Process the command requested by the lttng client within the command
2894 * context structure. This function make sure that the return structure (llm)
2895 * is set and ready for transmission before returning.
2896 *
2897 * Return any error encountered or 0 for success.
2898 *
2899 * "sock" is only used for special-case var. len data.
2900 *
2901 * Should *NOT* be called with RCU read-side lock held.
2902 */
2903 static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
2904 int *sock_error)
2905 {
2906 int ret = LTTNG_OK;
2907 int need_tracing_session = 1;
2908 int need_domain;
2909
2910 DBG("Processing client command %d", cmd_ctx->lsm->cmd_type);
2911
2912 *sock_error = 0;
2913
2914 switch (cmd_ctx->lsm->cmd_type) {
2915 case LTTNG_CREATE_SESSION:
2916 case LTTNG_CREATE_SESSION_SNAPSHOT:
2917 case LTTNG_CREATE_SESSION_LIVE:
2918 case LTTNG_DESTROY_SESSION:
2919 case LTTNG_LIST_SESSIONS:
2920 case LTTNG_LIST_DOMAINS:
2921 case LTTNG_START_TRACE:
2922 case LTTNG_STOP_TRACE:
2923 case LTTNG_DATA_PENDING:
2924 case LTTNG_SNAPSHOT_ADD_OUTPUT:
2925 case LTTNG_SNAPSHOT_DEL_OUTPUT:
2926 case LTTNG_SNAPSHOT_LIST_OUTPUT:
2927 case LTTNG_SNAPSHOT_RECORD:
2928 case LTTNG_SAVE_SESSION:
2929 case LTTNG_SET_SESSION_SHM_PATH:
2930 need_domain = 0;
2931 break;
2932 default:
2933 need_domain = 1;
2934 }
2935
2936 if (opt_no_kernel && need_domain
2937 && cmd_ctx->lsm->domain.type == LTTNG_DOMAIN_KERNEL) {
2938 if (!is_root) {
2939 ret = LTTNG_ERR_NEED_ROOT_SESSIOND;
2940 } else {
2941 ret = LTTNG_ERR_KERN_NA;
2942 }
2943 goto error;
2944 }
2945
2946 /* Deny register consumer if we already have a spawned consumer. */
2947 if (cmd_ctx->lsm->cmd_type == LTTNG_REGISTER_CONSUMER) {
2948 pthread_mutex_lock(&kconsumer_data.pid_mutex);
2949 if (kconsumer_data.pid > 0) {
2950 ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
2951 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2952 goto error;
2953 }
2954 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2955 }
2956
2957 /*
2958 * Check for command that don't needs to allocate a returned payload. We do
2959 * this here so we don't have to make the call for no payload at each
2960 * command.
2961 */
2962 switch(cmd_ctx->lsm->cmd_type) {
2963 case LTTNG_LIST_SESSIONS:
2964 case LTTNG_LIST_TRACEPOINTS:
2965 case LTTNG_LIST_TRACEPOINT_FIELDS:
2966 case LTTNG_LIST_DOMAINS:
2967 case LTTNG_LIST_CHANNELS:
2968 case LTTNG_LIST_EVENTS:
2969 case LTTNG_LIST_SYSCALLS:
2970 case LTTNG_LIST_TRACKER_PIDS:
2971 break;
2972 default:
2973 /* Setup lttng message with no payload */
2974 ret = setup_lttng_msg(cmd_ctx, 0);
2975 if (ret < 0) {
2976 /* This label does not try to unlock the session */
2977 goto init_setup_error;
2978 }
2979 }
2980
2981 /* Commands that DO NOT need a session. */
2982 switch (cmd_ctx->lsm->cmd_type) {
2983 case LTTNG_CREATE_SESSION:
2984 case LTTNG_CREATE_SESSION_SNAPSHOT:
2985 case LTTNG_CREATE_SESSION_LIVE:
2986 case LTTNG_CALIBRATE:
2987 case LTTNG_LIST_SESSIONS:
2988 case LTTNG_LIST_TRACEPOINTS:
2989 case LTTNG_LIST_SYSCALLS:
2990 case LTTNG_LIST_TRACEPOINT_FIELDS:
2991 case LTTNG_SAVE_SESSION:
2992 need_tracing_session = 0;
2993 break;
2994 default:
2995 DBG("Getting session %s by name", cmd_ctx->lsm->session.name);
2996 /*
2997 * We keep the session list lock across _all_ commands
2998 * for now, because the per-session lock does not
2999 * handle teardown properly.
3000 */
3001 session_lock_list();
3002 cmd_ctx->session = session_find_by_name(cmd_ctx->lsm->session.name);
3003 if (cmd_ctx->session == NULL) {
3004 ret = LTTNG_ERR_SESS_NOT_FOUND;
3005 goto error;
3006 } else {
3007 /* Acquire lock for the session */
3008 session_lock(cmd_ctx->session);
3009 }
3010 break;
3011 }
3012
3013 /*
3014 * Commands that need a valid session but should NOT create one if none
3015 * exists. Instead of creating one and destroying it when the command is
3016 * handled, process that right before so we save some round trip in useless
3017 * code path.
3018 */
3019 switch (cmd_ctx->lsm->cmd_type) {
3020 case LTTNG_DISABLE_CHANNEL:
3021 case LTTNG_DISABLE_EVENT:
3022 switch (cmd_ctx->lsm->domain.type) {
3023 case LTTNG_DOMAIN_KERNEL:
3024 if (!cmd_ctx->session->kernel_session) {
3025 ret = LTTNG_ERR_NO_CHANNEL;
3026 goto error;
3027 }
3028 break;
3029 case LTTNG_DOMAIN_JUL:
3030 case LTTNG_DOMAIN_LOG4J:
3031 case LTTNG_DOMAIN_PYTHON:
3032 case LTTNG_DOMAIN_UST:
3033 if (!cmd_ctx->session->ust_session) {
3034 ret = LTTNG_ERR_NO_CHANNEL;
3035 goto error;
3036 }
3037 break;
3038 default:
3039 ret = LTTNG_ERR_UNKNOWN_DOMAIN;
3040 goto error;
3041 }
3042 default:
3043 break;
3044 }
3045
3046 if (!need_domain) {
3047 goto skip_domain;
3048 }
3049
3050 /*
3051 * Check domain type for specific "pre-action".
3052 */
3053 switch (cmd_ctx->lsm->domain.type) {
3054 case LTTNG_DOMAIN_KERNEL:
3055 if (!is_root) {
3056 ret = LTTNG_ERR_NEED_ROOT_SESSIOND;
3057 goto error;
3058 }
3059
3060 /* Kernel tracer check */
3061 if (kernel_tracer_fd == -1) {
3062 /* Basically, load kernel tracer modules */
3063 ret = init_kernel_tracer();
3064 if (ret != 0) {
3065 goto error;
3066 }
3067 }
3068
3069 /* Consumer is in an ERROR state. Report back to client */
3070 if (uatomic_read(&kernel_consumerd_state) == CONSUMER_ERROR) {
3071 ret = LTTNG_ERR_NO_KERNCONSUMERD;
3072 goto error;
3073 }
3074
3075 /* Need a session for kernel command */
3076 if (need_tracing_session) {
3077 if (cmd_ctx->session->kernel_session == NULL) {
3078 ret = create_kernel_session(cmd_ctx->session);
3079 if (ret < 0) {
3080 ret = LTTNG_ERR_KERN_SESS_FAIL;
3081 goto error;
3082 }
3083 }
3084
3085 /* Start the kernel consumer daemon */
3086 pthread_mutex_lock(&kconsumer_data.pid_mutex);
3087 if (kconsumer_data.pid == 0 &&
3088 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
3089 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
3090 ret = start_consumerd(&kconsumer_data);
3091 if (ret < 0) {
3092 ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
3093 goto error;
3094 }
3095 uatomic_set(&kernel_consumerd_state, CONSUMER_STARTED);
3096 } else {
3097 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
3098 }
3099
3100 /*
3101 * The consumer was just spawned so we need to add the socket to
3102 * the consumer output of the session if exist.
3103 */
3104 ret = consumer_create_socket(&kconsumer_data,
3105 cmd_ctx->session->kernel_session->consumer);
3106 if (ret < 0) {
3107 goto error;
3108 }
3109 }
3110
3111 break;
3112 case LTTNG_DOMAIN_JUL:
3113 case LTTNG_DOMAIN_LOG4J:
3114 case LTTNG_DOMAIN_PYTHON:
3115 case LTTNG_DOMAIN_UST:
3116 {
3117 if (!ust_app_supported()) {
3118 ret = LTTNG_ERR_NO_UST;
3119 goto error;
3120 }
3121 /* Consumer is in an ERROR state. Report back to client */
3122 if (uatomic_read(&ust_consumerd_state) == CONSUMER_ERROR) {
3123 ret = LTTNG_ERR_NO_USTCONSUMERD;
3124 goto error;
3125 }
3126
3127 if (need_tracing_session) {
3128 /* Create UST session if none exist. */
3129 if (cmd_ctx->session->ust_session == NULL) {
3130 ret = create_ust_session(cmd_ctx->session,
3131 &cmd_ctx->lsm->domain);
3132 if (ret != LTTNG_OK) {
3133 goto error;
3134 }
3135 }
3136
3137 /* Start the UST consumer daemons */
3138 /* 64-bit */
3139 pthread_mutex_lock(&ustconsumer64_data.pid_mutex);
3140 if (consumerd64_bin[0] != '\0' &&
3141 ustconsumer64_data.pid == 0 &&
3142 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
3143 pthread_mutex_unlock(&ustconsumer64_data.pid_mutex);
3144 ret = start_consumerd(&ustconsumer64_data);
3145 if (ret < 0) {
3146 ret = LTTNG_ERR_UST_CONSUMER64_FAIL;
3147 uatomic_set(&ust_consumerd64_fd, -EINVAL);
3148 goto error;
3149 }
3150
3151 uatomic_set(&ust_consumerd64_fd, ustconsumer64_data.cmd_sock);
3152 uatomic_set(&ust_consumerd_state, CONSUMER_STARTED);
3153 } else {
3154 pthread_mutex_unlock(&ustconsumer64_data.pid_mutex);
3155 }
3156
3157 /*
3158 * Setup socket for consumer 64 bit. No need for atomic access
3159 * since it was set above and can ONLY be set in this thread.
3160 */
3161 ret = consumer_create_socket(&ustconsumer64_data,
3162 cmd_ctx->session->ust_session->consumer);
3163 if (ret < 0) {
3164 goto error;
3165 }
3166
3167 /* 32-bit */
3168 pthread_mutex_lock(&ustconsumer32_data.pid_mutex);
3169 if (consumerd32_bin[0] != '\0' &&
3170 ustconsumer32_data.pid == 0 &&
3171 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
3172 pthread_mutex_unlock(&ustconsumer32_data.pid_mutex);
3173 ret = start_consumerd(&ustconsumer32_data);
3174 if (ret < 0) {
3175 ret = LTTNG_ERR_UST_CONSUMER32_FAIL;
3176 uatomic_set(&ust_consumerd32_fd, -EINVAL);
3177 goto error;
3178 }
3179
3180 uatomic_set(&ust_consumerd32_fd, ustconsumer32_data.cmd_sock);
3181 uatomic_set(&ust_consumerd_state, CONSUMER_STARTED);
3182 } else {
3183 pthread_mutex_unlock(&ustconsumer32_data.pid_mutex);
3184 }
3185
3186 /*
3187 * Setup socket for consumer 64 bit. No need for atomic access
3188 * since it was set above and can ONLY be set in this thread.
3189 */
3190 ret = consumer_create_socket(&ustconsumer32_data,
3191 cmd_ctx->session->ust_session->consumer);
3192 if (ret < 0) {
3193 goto error;
3194 }
3195 }
3196 break;
3197 }
3198 default:
3199 break;
3200 }
3201 skip_domain:
3202
3203 /* Validate consumer daemon state when start/stop trace command */
3204 if (cmd_ctx->lsm->cmd_type == LTTNG_START_TRACE ||
3205 cmd_ctx->lsm->cmd_type == LTTNG_STOP_TRACE) {
3206 switch (cmd_ctx->lsm->domain.type) {
3207 case LTTNG_DOMAIN_JUL:
3208 case LTTNG_DOMAIN_LOG4J:
3209 case LTTNG_DOMAIN_PYTHON:
3210 case LTTNG_DOMAIN_UST:
3211 if (uatomic_read(&ust_consumerd_state) != CONSUMER_STARTED) {
3212 ret = LTTNG_ERR_NO_USTCONSUMERD;
3213 goto error;
3214 }
3215 break;
3216 case LTTNG_DOMAIN_KERNEL:
3217 if (uatomic_read(&kernel_consumerd_state) != CONSUMER_STARTED) {
3218 ret = LTTNG_ERR_NO_KERNCONSUMERD;
3219 goto error;
3220 }
3221 break;
3222 }
3223 }
3224
3225 /*
3226 * Check that the UID or GID match that of the tracing session.
3227 * The root user can interact with all sessions.
3228 */
3229 if (need_tracing_session) {
3230 if (!session_access_ok(cmd_ctx->session,
3231 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
3232 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds))) {
3233 ret = LTTNG_ERR_EPERM;
3234 goto error;
3235 }
3236 }
3237
3238 /*
3239 * Send relayd information to consumer as soon as we have a domain and a
3240 * session defined.
3241 */
3242 if (cmd_ctx->session && need_domain) {
3243 /*
3244 * Setup relayd if not done yet. If the relayd information was already
3245 * sent to the consumer, this call will gracefully return.
3246 */
3247 ret = cmd_setup_relayd(cmd_ctx->session);
3248 if (ret != LTTNG_OK) {
3249 goto error;
3250 }
3251 }
3252
3253 /* Process by command type */
3254 switch (cmd_ctx->lsm->cmd_type) {
3255 case LTTNG_ADD_CONTEXT:
3256 {
3257 ret = cmd_add_context(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3258 cmd_ctx->lsm->u.context.channel_name,
3259 &cmd_ctx->lsm->u.context.ctx, kernel_poll_pipe[1]);
3260 break;
3261 }
3262 case LTTNG_DISABLE_CHANNEL:
3263 {
3264 ret = cmd_disable_channel(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3265 cmd_ctx->lsm->u.disable.channel_name);
3266 break;
3267 }
3268 case LTTNG_DISABLE_EVENT:
3269 {
3270 /* FIXME: passing packed structure to non-packed pointer */
3271 /* TODO: handle filter */
3272 ret = cmd_disable_event(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3273 cmd_ctx->lsm->u.disable.channel_name,
3274 &cmd_ctx->lsm->u.disable.event);
3275 break;
3276 }
3277 case LTTNG_ENABLE_CHANNEL:
3278 {
3279 ret = cmd_enable_channel(cmd_ctx->session, &cmd_ctx->lsm->domain,
3280 &cmd_ctx->lsm->u.channel.chan, kernel_poll_pipe[1]);
3281 break;
3282 }
3283 case LTTNG_TRACK_PID:
3284 {
3285 ret = cmd_track_pid(cmd_ctx->session,
3286 cmd_ctx->lsm->domain.type,
3287 cmd_ctx->lsm->u.pid_tracker.pid);
3288 break;
3289 }
3290 case LTTNG_UNTRACK_PID:
3291 {
3292 ret = cmd_untrack_pid(cmd_ctx->session,
3293 cmd_ctx->lsm->domain.type,
3294 cmd_ctx->lsm->u.pid_tracker.pid);
3295 break;
3296 }
3297 case LTTNG_ENABLE_EVENT:
3298 {
3299 struct lttng_event_exclusion *exclusion = NULL;
3300 struct lttng_filter_bytecode *bytecode = NULL;
3301 char *filter_expression = NULL;
3302
3303 /* Handle exclusion events and receive it from the client. */
3304 if (cmd_ctx->lsm->u.enable.exclusion_count > 0) {
3305 size_t count = cmd_ctx->lsm->u.enable.exclusion_count;
3306
3307 exclusion = zmalloc(sizeof(struct lttng_event_exclusion) +
3308 (count * LTTNG_SYMBOL_NAME_LEN));
3309 if (!exclusion) {
3310 ret = LTTNG_ERR_EXCLUSION_NOMEM;
3311 goto error;
3312 }
3313
3314 DBG("Receiving var len exclusion event list from client ...");
3315 exclusion->count = count;
3316 ret = lttcomm_recv_unix_sock(sock, exclusion->names,
3317 count * LTTNG_SYMBOL_NAME_LEN);
3318 if (ret <= 0) {
3319 DBG("Nothing recv() from client var len data... continuing");
3320 *sock_error = 1;
3321 free(exclusion);
3322 ret = LTTNG_ERR_EXCLUSION_INVAL;
3323 goto error;
3324 }
3325 }
3326
3327 /* Get filter expression from client. */
3328 if (cmd_ctx->lsm->u.enable.expression_len > 0) {
3329 size_t expression_len =
3330 cmd_ctx->lsm->u.enable.expression_len;
3331
3332 if (expression_len > LTTNG_FILTER_MAX_LEN) {
3333 ret = LTTNG_ERR_FILTER_INVAL;
3334 free(exclusion);
3335 goto error;
3336 }
3337
3338 filter_expression = zmalloc(expression_len);
3339 if (!filter_expression) {
3340 free(exclusion);
3341 ret = LTTNG_ERR_FILTER_NOMEM;
3342 goto error;
3343 }
3344
3345 /* Receive var. len. data */
3346 DBG("Receiving var len filter's expression from client ...");
3347 ret = lttcomm_recv_unix_sock(sock, filter_expression,
3348 expression_len);
3349 if (ret <= 0) {
3350 DBG("Nothing recv() from client car len data... continuing");
3351 *sock_error = 1;
3352 free(filter_expression);
3353 free(exclusion);
3354 ret = LTTNG_ERR_FILTER_INVAL;
3355 goto error;
3356 }
3357 }
3358
3359 /* Handle filter and get bytecode from client. */
3360 if (cmd_ctx->lsm->u.enable.bytecode_len > 0) {
3361 size_t bytecode_len = cmd_ctx->lsm->u.enable.bytecode_len;
3362
3363 if (bytecode_len > LTTNG_FILTER_MAX_LEN) {
3364 ret = LTTNG_ERR_FILTER_INVAL;
3365 free(filter_expression);
3366 free(exclusion);
3367 goto error;
3368 }
3369
3370 bytecode = zmalloc(bytecode_len);
3371 if (!bytecode) {
3372 free(filter_expression);
3373 free(exclusion);
3374 ret = LTTNG_ERR_FILTER_NOMEM;
3375 goto error;
3376 }
3377
3378 /* Receive var. len. data */
3379 DBG("Receiving var len filter's bytecode from client ...");
3380 ret = lttcomm_recv_unix_sock(sock, bytecode, bytecode_len);
3381 if (ret <= 0) {
3382 DBG("Nothing recv() from client car len data... continuing");
3383 *sock_error = 1;
3384 free(filter_expression);
3385 free(bytecode);
3386 free(exclusion);
3387 ret = LTTNG_ERR_FILTER_INVAL;
3388 goto error;
3389 }
3390
3391 if ((bytecode->len + sizeof(*bytecode)) != bytecode_len) {
3392 free(filter_expression);
3393 free(bytecode);
3394 free(exclusion);
3395 ret = LTTNG_ERR_FILTER_INVAL;
3396 goto error;
3397 }
3398 }
3399
3400 ret = cmd_enable_event(cmd_ctx->session, &cmd_ctx->lsm->domain,
3401 cmd_ctx->lsm->u.enable.channel_name,
3402 &cmd_ctx->lsm->u.enable.event,
3403 filter_expression, bytecode, exclusion,
3404 kernel_poll_pipe[1]);
3405 break;
3406 }
3407 case LTTNG_LIST_TRACEPOINTS:
3408 {
3409 struct lttng_event *events;
3410 ssize_t nb_events;
3411
3412 session_lock_list();
3413 nb_events = cmd_list_tracepoints(cmd_ctx->lsm->domain.type, &events);
3414 session_unlock_list();
3415 if (nb_events < 0) {
3416 /* Return value is a negative lttng_error_code. */
3417 ret = -nb_events;
3418 goto error;
3419 }
3420
3421 /*
3422 * Setup lttng message with payload size set to the event list size in
3423 * bytes and then copy list into the llm payload.
3424 */
3425 ret = setup_lttng_msg(cmd_ctx, sizeof(struct lttng_event) * nb_events);
3426 if (ret < 0) {
3427 free(events);
3428 goto setup_error;
3429 }
3430
3431 /* Copy event list into message payload */
3432 memcpy(cmd_ctx->llm->payload, events,
3433 sizeof(struct lttng_event) * nb_events);
3434
3435 free(events);
3436
3437 ret = LTTNG_OK;
3438 break;
3439 }
3440 case LTTNG_LIST_TRACEPOINT_FIELDS:
3441 {
3442 struct lttng_event_field *fields;
3443 ssize_t nb_fields;
3444
3445 session_lock_list();
3446 nb_fields = cmd_list_tracepoint_fields(cmd_ctx->lsm->domain.type,
3447 &fields);
3448 session_unlock_list();
3449 if (nb_fields < 0) {
3450 /* Return value is a negative lttng_error_code. */
3451 ret = -nb_fields;
3452 goto error;
3453 }
3454
3455 /*
3456 * Setup lttng message with payload size set to the event list size in
3457 * bytes and then copy list into the llm payload.
3458 */
3459 ret = setup_lttng_msg(cmd_ctx,
3460 sizeof(struct lttng_event_field) * nb_fields);
3461 if (ret < 0) {
3462 free(fields);
3463 goto setup_error;
3464 }
3465
3466 /* Copy event list into message payload */
3467 memcpy(cmd_ctx->llm->payload, fields,
3468 sizeof(struct lttng_event_field) * nb_fields);
3469
3470 free(fields);
3471
3472 ret = LTTNG_OK;
3473 break;
3474 }
3475 case LTTNG_LIST_SYSCALLS:
3476 {
3477 struct lttng_event *events;
3478 ssize_t nb_events;
3479
3480 nb_events = cmd_list_syscalls(&events);
3481 if (nb_events < 0) {
3482 /* Return value is a negative lttng_error_code. */
3483 ret = -nb_events;
3484 goto error;
3485 }
3486
3487 /*
3488 * Setup lttng message with payload size set to the event list size in
3489 * bytes and then copy list into the llm payload.
3490 */
3491 ret = setup_lttng_msg(cmd_ctx, sizeof(struct lttng_event) * nb_events);
3492 if (ret < 0) {
3493 free(events);
3494 goto setup_error;
3495 }
3496
3497 /* Copy event list into message payload */
3498 memcpy(cmd_ctx->llm->payload, events,
3499 sizeof(struct lttng_event) * nb_events);
3500
3501 free(events);
3502
3503 ret = LTTNG_OK;
3504 break;
3505 }
3506 case LTTNG_LIST_TRACKER_PIDS:
3507 {
3508 int32_t *pids = NULL;
3509 ssize_t nr_pids;
3510
3511 nr_pids = cmd_list_tracker_pids(cmd_ctx->session,
3512 cmd_ctx->lsm->domain.type, &pids);
3513 if (nr_pids < 0) {
3514 /* Return value is a negative lttng_error_code. */
3515 ret = -nr_pids;
3516 goto error;
3517 }
3518
3519 /*
3520 * Setup lttng message with payload size set to the event list size in
3521 * bytes and then copy list into the llm payload.
3522 */
3523 ret = setup_lttng_msg(cmd_ctx, sizeof(int32_t) * nr_pids);
3524 if (ret < 0) {
3525 free(pids);
3526 goto setup_error;
3527 }
3528
3529 /* Copy event list into message payload */
3530 memcpy(cmd_ctx->llm->payload, pids,
3531 sizeof(int) * nr_pids);
3532
3533 free(pids);
3534
3535 ret = LTTNG_OK;
3536 break;
3537 }
3538 case LTTNG_SET_CONSUMER_URI:
3539 {
3540 size_t nb_uri, len;
3541 struct lttng_uri *uris;
3542
3543 nb_uri = cmd_ctx->lsm->u.uri.size;
3544 len = nb_uri * sizeof(struct lttng_uri);
3545
3546 if (nb_uri == 0) {
3547 ret = LTTNG_ERR_INVALID;
3548 goto error;
3549 }
3550
3551 uris = zmalloc(len);
3552 if (uris == NULL) {
3553 ret = LTTNG_ERR_FATAL;
3554 goto error;
3555 }
3556
3557 /* Receive variable len data */
3558 DBG("Receiving %zu URI(s) from client ...", nb_uri);
3559 ret = lttcomm_recv_unix_sock(sock, uris, len);
3560 if (ret <= 0) {
3561 DBG("No URIs received from client... continuing");
3562 *sock_error = 1;
3563 ret = LTTNG_ERR_SESSION_FAIL;
3564 free(uris);
3565 goto error;
3566 }
3567
3568 ret = cmd_set_consumer_uri(cmd_ctx->session, nb_uri, uris);
3569 free(uris);
3570 if (ret != LTTNG_OK) {
3571 goto error;
3572 }
3573
3574
3575 break;
3576 }
3577 case LTTNG_START_TRACE:
3578 {
3579 ret = cmd_start_trace(cmd_ctx->session);
3580 break;
3581 }
3582 case LTTNG_STOP_TRACE:
3583 {
3584 ret = cmd_stop_trace(cmd_ctx->session);
3585 break;
3586 }
3587 case LTTNG_CREATE_SESSION:
3588 {
3589 size_t nb_uri, len;
3590 struct lttng_uri *uris = NULL;
3591
3592 nb_uri = cmd_ctx->lsm->u.uri.size;
3593 len = nb_uri * sizeof(struct lttng_uri);
3594
3595 if (nb_uri > 0) {
3596 uris = zmalloc(len);
3597 if (uris == NULL) {
3598 ret = LTTNG_ERR_FATAL;
3599 goto error;
3600 }
3601
3602 /* Receive variable len data */
3603 DBG("Waiting for %zu URIs from client ...", nb_uri);
3604 ret = lttcomm_recv_unix_sock(sock, uris, len);
3605 if (ret <= 0) {
3606 DBG("No URIs received from client... continuing");
3607 *sock_error = 1;
3608 ret = LTTNG_ERR_SESSION_FAIL;
3609 free(uris);
3610 goto error;
3611 }
3612
3613 if (nb_uri == 1 && uris[0].dtype != LTTNG_DST_PATH) {
3614 DBG("Creating session with ONE network URI is a bad call");
3615 ret = LTTNG_ERR_SESSION_FAIL;
3616 free(uris);
3617 goto error;
3618 }
3619 }
3620
3621 ret = cmd_create_session_uri(cmd_ctx->lsm->session.name, uris, nb_uri,
3622 &cmd_ctx->creds, 0);
3623
3624 free(uris);
3625
3626 break;
3627 }
3628 case LTTNG_DESTROY_SESSION:
3629 {
3630 ret = cmd_destroy_session(cmd_ctx->session, kernel_poll_pipe[1]);
3631
3632 /* Set session to NULL so we do not unlock it after free. */
3633 cmd_ctx->session = NULL;
3634 break;
3635 }
3636 case LTTNG_LIST_DOMAINS:
3637 {
3638 ssize_t nb_dom;
3639 struct lttng_domain *domains = NULL;
3640
3641 nb_dom = cmd_list_domains(cmd_ctx->session, &domains);
3642 if (nb_dom < 0) {
3643 /* Return value is a negative lttng_error_code. */
3644 ret = -nb_dom;
3645 goto error;
3646 }
3647
3648 ret = setup_lttng_msg(cmd_ctx, nb_dom * sizeof(struct lttng_domain));
3649 if (ret < 0) {
3650 free(domains);
3651 goto setup_error;
3652 }
3653
3654 /* Copy event list into message payload */
3655 memcpy(cmd_ctx->llm->payload, domains,
3656 nb_dom * sizeof(struct lttng_domain));
3657
3658 free(domains);
3659
3660 ret = LTTNG_OK;
3661 break;
3662 }
3663 case LTTNG_LIST_CHANNELS:
3664 {
3665 int nb_chan;
3666 struct lttng_channel *channels = NULL;
3667
3668 nb_chan = cmd_list_channels(cmd_ctx->lsm->domain.type,
3669 cmd_ctx->session, &channels);
3670 if (nb_chan < 0) {
3671 /* Return value is a negative lttng_error_code. */
3672 ret = -nb_chan;
3673 goto error;
3674 }
3675
3676 ret = setup_lttng_msg(cmd_ctx, nb_chan * sizeof(struct lttng_channel));
3677 if (ret < 0) {
3678 free(channels);
3679 goto setup_error;
3680 }
3681
3682 /* Copy event list into message payload */
3683 memcpy(cmd_ctx->llm->payload, channels,
3684 nb_chan * sizeof(struct lttng_channel));
3685
3686 free(channels);
3687
3688 ret = LTTNG_OK;
3689 break;
3690 }
3691 case LTTNG_LIST_EVENTS:
3692 {
3693 ssize_t nb_event;
3694 struct lttng_event *events = NULL;
3695
3696 nb_event = cmd_list_events(cmd_ctx->lsm->domain.type, cmd_ctx->session,
3697 cmd_ctx->lsm->u.list.channel_name, &events);
3698 if (nb_event < 0) {
3699 /* Return value is a negative lttng_error_code. */
3700 ret = -nb_event;
3701 goto error;
3702 }
3703
3704 ret = setup_lttng_msg(cmd_ctx, nb_event * sizeof(struct lttng_event));
3705 if (ret < 0) {
3706 free(events);
3707 goto setup_error;
3708 }
3709
3710 /* Copy event list into message payload */
3711 memcpy(cmd_ctx->llm->payload, events,
3712 nb_event * sizeof(struct lttng_event));
3713
3714 free(events);
3715
3716 ret = LTTNG_OK;
3717 break;
3718 }
3719 case LTTNG_LIST_SESSIONS:
3720 {
3721 unsigned int nr_sessions;
3722
3723 session_lock_list();
3724 nr_sessions = lttng_sessions_count(
3725 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
3726 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds));
3727
3728 ret = setup_lttng_msg(cmd_ctx, sizeof(struct lttng_session) * nr_sessions);
3729 if (ret < 0) {
3730 session_unlock_list();
3731 goto setup_error;
3732 }
3733
3734 /* Filled the session array */
3735 cmd_list_lttng_sessions((struct lttng_session *)(cmd_ctx->llm->payload),
3736 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
3737 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds));
3738
3739 session_unlock_list();