Add JUL registration thread
[lttng-tools.git] / src / bin / lttng-sessiond / main.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19 #define _GNU_SOURCE
20 #include <getopt.h>
21 #include <grp.h>
22 #include <limits.h>
23 #include <pthread.h>
24 #include <signal.h>
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include <inttypes.h>
29 #include <sys/mman.h>
30 #include <sys/mount.h>
31 #include <sys/resource.h>
32 #include <sys/socket.h>
33 #include <sys/stat.h>
34 #include <sys/types.h>
35 #include <sys/wait.h>
36 #include <urcu/uatomic.h>
37 #include <unistd.h>
38 #include <config.h>
39
40 #include <common/common.h>
41 #include <common/compat/socket.h>
42 #include <common/defaults.h>
43 #include <common/kernel-consumer/kernel-consumer.h>
44 #include <common/futex.h>
45 #include <common/relayd/relayd.h>
46 #include <common/utils.h>
47
48 #include "lttng-sessiond.h"
49 #include "buffer-registry.h"
50 #include "channel.h"
51 #include "cmd.h"
52 #include "consumer.h"
53 #include "context.h"
54 #include "event.h"
55 #include "kernel.h"
56 #include "kernel-consumer.h"
57 #include "modprobe.h"
58 #include "shm.h"
59 #include "ust-ctl.h"
60 #include "ust-consumer.h"
61 #include "utils.h"
62 #include "fd-limit.h"
63 #include "health-sessiond.h"
64 #include "testpoint.h"
65 #include "ust-thread.h"
66 #include "jul-thread.h"
67
68 #define CONSUMERD_FILE "lttng-consumerd"
69
70 const char *progname;
71 static const char *tracing_group_name = DEFAULT_TRACING_GROUP;
72 static const char *opt_pidfile;
73 static int opt_sig_parent;
74 static int opt_verbose_consumer;
75 static int opt_daemon;
76 static int opt_no_kernel;
77 static int is_root; /* Set to 1 if the daemon is running as root */
78 static pid_t ppid; /* Parent PID for --sig-parent option */
79 static char *rundir;
80
81 /*
82 * Consumer daemon specific control data. Every value not initialized here is
83 * set to 0 by the static definition.
84 */
85 static struct consumer_data kconsumer_data = {
86 .type = LTTNG_CONSUMER_KERNEL,
87 .err_unix_sock_path = DEFAULT_KCONSUMERD_ERR_SOCK_PATH,
88 .cmd_unix_sock_path = DEFAULT_KCONSUMERD_CMD_SOCK_PATH,
89 .err_sock = -1,
90 .cmd_sock = -1,
91 .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
92 .lock = PTHREAD_MUTEX_INITIALIZER,
93 .cond = PTHREAD_COND_INITIALIZER,
94 .cond_mutex = PTHREAD_MUTEX_INITIALIZER,
95 };
96 static struct consumer_data ustconsumer64_data = {
97 .type = LTTNG_CONSUMER64_UST,
98 .err_unix_sock_path = DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH,
99 .cmd_unix_sock_path = DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH,
100 .err_sock = -1,
101 .cmd_sock = -1,
102 .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
103 .lock = PTHREAD_MUTEX_INITIALIZER,
104 .cond = PTHREAD_COND_INITIALIZER,
105 .cond_mutex = PTHREAD_MUTEX_INITIALIZER,
106 };
107 static struct consumer_data ustconsumer32_data = {
108 .type = LTTNG_CONSUMER32_UST,
109 .err_unix_sock_path = DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH,
110 .cmd_unix_sock_path = DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH,
111 .err_sock = -1,
112 .cmd_sock = -1,
113 .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
114 .lock = PTHREAD_MUTEX_INITIALIZER,
115 .cond = PTHREAD_COND_INITIALIZER,
116 .cond_mutex = PTHREAD_MUTEX_INITIALIZER,
117 };
118
119 /* Shared between threads */
120 static int dispatch_thread_exit;
121
122 /* Global application Unix socket path */
123 static char apps_unix_sock_path[PATH_MAX];
124 /* Global client Unix socket path */
125 static char client_unix_sock_path[PATH_MAX];
126 /* global wait shm path for UST */
127 static char wait_shm_path[PATH_MAX];
128 /* Global health check unix path */
129 static char health_unix_sock_path[PATH_MAX];
130
131 /* Sockets and FDs */
132 static int client_sock = -1;
133 static int apps_sock = -1;
134 int kernel_tracer_fd = -1;
135 static int kernel_poll_pipe[2] = { -1, -1 };
136
137 /*
138 * Quit pipe for all threads. This permits a single cancellation point
139 * for all threads when receiving an event on the pipe.
140 */
141 static int thread_quit_pipe[2] = { -1, -1 };
142
143 /*
144 * This pipe is used to inform the thread managing application communication
145 * that a command is queued and ready to be processed.
146 */
147 static int apps_cmd_pipe[2] = { -1, -1 };
148
149 int apps_cmd_notify_pipe[2] = { -1, -1 };
150
151 /* Pthread, Mutexes and Semaphores */
152 static pthread_t apps_thread;
153 static pthread_t apps_notify_thread;
154 static pthread_t reg_apps_thread;
155 static pthread_t client_thread;
156 static pthread_t kernel_thread;
157 static pthread_t dispatch_thread;
158 static pthread_t health_thread;
159 static pthread_t ht_cleanup_thread;
160 static pthread_t jul_reg_thread;
161
162 /*
163 * UST registration command queue. This queue is tied with a futex and uses a N
164 * wakers / 1 waiter implemented and detailed in futex.c/.h
165 *
166 * The thread_manage_apps and thread_dispatch_ust_registration interact with
167 * this queue and the wait/wake scheme.
168 */
169 static struct ust_cmd_queue ust_cmd_queue;
170
171 /*
172 * Pointer initialized before thread creation.
173 *
174 * This points to the tracing session list containing the session count and a
175 * mutex lock. The lock MUST be taken if you iterate over the list. The lock
176 * MUST NOT be taken if you call a public function in session.c.
177 *
178 * The lock is nested inside the structure: session_list_ptr->lock. Please use
179 * session_lock_list and session_unlock_list for lock acquisition.
180 */
181 static struct ltt_session_list *session_list_ptr;
182
183 int ust_consumerd64_fd = -1;
184 int ust_consumerd32_fd = -1;
185
186 static const char *consumerd32_bin = CONFIG_CONSUMERD32_BIN;
187 static const char *consumerd64_bin = CONFIG_CONSUMERD64_BIN;
188 static const char *consumerd32_libdir = CONFIG_CONSUMERD32_LIBDIR;
189 static const char *consumerd64_libdir = CONFIG_CONSUMERD64_LIBDIR;
190
191 static const char *module_proc_lttng = "/proc/lttng";
192
193 /*
194 * Consumer daemon state which is changed when spawning it, killing it or in
195 * case of a fatal error.
196 */
197 enum consumerd_state {
198 CONSUMER_STARTED = 1,
199 CONSUMER_STOPPED = 2,
200 CONSUMER_ERROR = 3,
201 };
202
203 /*
204 * This consumer daemon state is used to validate if a client command will be
205 * able to reach the consumer. If not, the client is informed. For instance,
206 * doing a "lttng start" when the consumer state is set to ERROR will return an
207 * error to the client.
208 *
209 * The following example shows a possible race condition of this scheme:
210 *
211 * consumer thread error happens
212 * client cmd arrives
213 * client cmd checks state -> still OK
214 * consumer thread exit, sets error
215 * client cmd try to talk to consumer
216 * ...
217 *
218 * However, since the consumer is a different daemon, we have no way of making
219 * sure the command will reach it safely even with this state flag. This is why
220 * we consider that up to the state validation during command processing, the
221 * command is safe. After that, we can not guarantee the correctness of the
222 * client request vis-a-vis the consumer.
223 */
224 static enum consumerd_state ust_consumerd_state;
225 static enum consumerd_state kernel_consumerd_state;
226
227 /*
228 * Socket timeout for receiving and sending in seconds.
229 */
230 static int app_socket_timeout;
231
232 /* Set in main() with the current page size. */
233 long page_size;
234
235 /* Application health monitoring */
236 struct health_app *health_sessiond;
237
238 /* JUL TCP port for registration. Used by the JUL thread. */
239 unsigned int jul_tcp_port = DEFAULT_JUL_TCP_PORT;
240
241 static
242 void setup_consumerd_path(void)
243 {
244 const char *bin, *libdir;
245
246 /*
247 * Allow INSTALL_BIN_PATH to be used as a target path for the
248 * native architecture size consumer if CONFIG_CONSUMER*_PATH
249 * has not been defined.
250 */
251 #if (CAA_BITS_PER_LONG == 32)
252 if (!consumerd32_bin[0]) {
253 consumerd32_bin = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
254 }
255 if (!consumerd32_libdir[0]) {
256 consumerd32_libdir = INSTALL_LIB_PATH;
257 }
258 #elif (CAA_BITS_PER_LONG == 64)
259 if (!consumerd64_bin[0]) {
260 consumerd64_bin = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
261 }
262 if (!consumerd64_libdir[0]) {
263 consumerd64_libdir = INSTALL_LIB_PATH;
264 }
265 #else
266 #error "Unknown bitness"
267 #endif
268
269 /*
270 * runtime env. var. overrides the build default.
271 */
272 bin = getenv("LTTNG_CONSUMERD32_BIN");
273 if (bin) {
274 consumerd32_bin = bin;
275 }
276 bin = getenv("LTTNG_CONSUMERD64_BIN");
277 if (bin) {
278 consumerd64_bin = bin;
279 }
280 libdir = getenv("LTTNG_CONSUMERD32_LIBDIR");
281 if (libdir) {
282 consumerd32_libdir = libdir;
283 }
284 libdir = getenv("LTTNG_CONSUMERD64_LIBDIR");
285 if (libdir) {
286 consumerd64_libdir = libdir;
287 }
288 }
289
290 /*
291 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
292 */
293 int sessiond_set_thread_pollset(struct lttng_poll_event *events, size_t size)
294 {
295 int ret;
296
297 assert(events);
298
299 ret = lttng_poll_create(events, size, LTTNG_CLOEXEC);
300 if (ret < 0) {
301 goto error;
302 }
303
304 /* Add quit pipe */
305 ret = lttng_poll_add(events, thread_quit_pipe[0], LPOLLIN | LPOLLERR);
306 if (ret < 0) {
307 goto error;
308 }
309
310 return 0;
311
312 error:
313 return ret;
314 }
315
316 /*
317 * Check if the thread quit pipe was triggered.
318 *
319 * Return 1 if it was triggered else 0;
320 */
321 int sessiond_check_thread_quit_pipe(int fd, uint32_t events)
322 {
323 if (fd == thread_quit_pipe[0] && (events & LPOLLIN)) {
324 return 1;
325 }
326
327 return 0;
328 }
329
330 /*
331 * Init thread quit pipe.
332 *
333 * Return -1 on error or 0 if all pipes are created.
334 */
335 static int init_thread_quit_pipe(void)
336 {
337 int ret, i;
338
339 ret = pipe(thread_quit_pipe);
340 if (ret < 0) {
341 PERROR("thread quit pipe");
342 goto error;
343 }
344
345 for (i = 0; i < 2; i++) {
346 ret = fcntl(thread_quit_pipe[i], F_SETFD, FD_CLOEXEC);
347 if (ret < 0) {
348 PERROR("fcntl");
349 goto error;
350 }
351 }
352
353 error:
354 return ret;
355 }
356
357 /*
358 * Stop all threads by closing the thread quit pipe.
359 */
360 static void stop_threads(void)
361 {
362 int ret;
363
364 /* Stopping all threads */
365 DBG("Terminating all threads");
366 ret = notify_thread_pipe(thread_quit_pipe[1]);
367 if (ret < 0) {
368 ERR("write error on thread quit pipe");
369 }
370
371 /* Dispatch thread */
372 CMM_STORE_SHARED(dispatch_thread_exit, 1);
373 futex_nto1_wake(&ust_cmd_queue.futex);
374 }
375
376 /*
377 * Close every consumer sockets.
378 */
379 static void close_consumer_sockets(void)
380 {
381 int ret;
382
383 if (kconsumer_data.err_sock >= 0) {
384 ret = close(kconsumer_data.err_sock);
385 if (ret < 0) {
386 PERROR("kernel consumer err_sock close");
387 }
388 }
389 if (ustconsumer32_data.err_sock >= 0) {
390 ret = close(ustconsumer32_data.err_sock);
391 if (ret < 0) {
392 PERROR("UST consumerd32 err_sock close");
393 }
394 }
395 if (ustconsumer64_data.err_sock >= 0) {
396 ret = close(ustconsumer64_data.err_sock);
397 if (ret < 0) {
398 PERROR("UST consumerd64 err_sock close");
399 }
400 }
401 if (kconsumer_data.cmd_sock >= 0) {
402 ret = close(kconsumer_data.cmd_sock);
403 if (ret < 0) {
404 PERROR("kernel consumer cmd_sock close");
405 }
406 }
407 if (ustconsumer32_data.cmd_sock >= 0) {
408 ret = close(ustconsumer32_data.cmd_sock);
409 if (ret < 0) {
410 PERROR("UST consumerd32 cmd_sock close");
411 }
412 }
413 if (ustconsumer64_data.cmd_sock >= 0) {
414 ret = close(ustconsumer64_data.cmd_sock);
415 if (ret < 0) {
416 PERROR("UST consumerd64 cmd_sock close");
417 }
418 }
419 }
420
421 /*
422 * Cleanup the daemon
423 */
424 static void cleanup(void)
425 {
426 int ret;
427 struct ltt_session *sess, *stmp;
428 char path[PATH_MAX];
429
430 DBG("Cleaning up");
431
432 /*
433 * Close the thread quit pipe. It has already done its job,
434 * since we are now called.
435 */
436 utils_close_pipe(thread_quit_pipe);
437
438 /*
439 * If opt_pidfile is undefined, the default file will be wiped when
440 * removing the rundir.
441 */
442 if (opt_pidfile) {
443 ret = remove(opt_pidfile);
444 if (ret < 0) {
445 PERROR("remove pidfile %s", opt_pidfile);
446 }
447 }
448
449 DBG("Removing sessiond and consumerd content of directory %s", rundir);
450
451 /* sessiond */
452 snprintf(path, PATH_MAX,
453 "%s/%s",
454 rundir, DEFAULT_LTTNG_SESSIOND_PIDFILE);
455 DBG("Removing %s", path);
456 (void) unlink(path);
457
458 /* kconsumerd */
459 snprintf(path, PATH_MAX,
460 DEFAULT_KCONSUMERD_ERR_SOCK_PATH,
461 rundir);
462 DBG("Removing %s", path);
463 (void) unlink(path);
464
465 snprintf(path, PATH_MAX,
466 DEFAULT_KCONSUMERD_PATH,
467 rundir);
468 DBG("Removing directory %s", path);
469 (void) rmdir(path);
470
471 /* ust consumerd 32 */
472 snprintf(path, PATH_MAX,
473 DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH,
474 rundir);
475 DBG("Removing %s", path);
476 (void) unlink(path);
477
478 snprintf(path, PATH_MAX,
479 DEFAULT_USTCONSUMERD32_PATH,
480 rundir);
481 DBG("Removing directory %s", path);
482 (void) rmdir(path);
483
484 /* ust consumerd 64 */
485 snprintf(path, PATH_MAX,
486 DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH,
487 rundir);
488 DBG("Removing %s", path);
489 (void) unlink(path);
490
491 snprintf(path, PATH_MAX,
492 DEFAULT_USTCONSUMERD64_PATH,
493 rundir);
494 DBG("Removing directory %s", path);
495 (void) rmdir(path);
496
497 free(rundir);
498
499 DBG("Cleaning up all sessions");
500
501 /* Destroy session list mutex */
502 if (session_list_ptr != NULL) {
503 pthread_mutex_destroy(&session_list_ptr->lock);
504
505 /* Cleanup ALL session */
506 cds_list_for_each_entry_safe(sess, stmp,
507 &session_list_ptr->head, list) {
508 cmd_destroy_session(sess, kernel_poll_pipe[1]);
509 }
510 }
511
512 DBG("Closing all UST sockets");
513 ust_app_clean_list();
514 buffer_reg_destroy_registries();
515
516 if (is_root && !opt_no_kernel) {
517 DBG2("Closing kernel fd");
518 if (kernel_tracer_fd >= 0) {
519 ret = close(kernel_tracer_fd);
520 if (ret) {
521 PERROR("close");
522 }
523 }
524 DBG("Unloading kernel modules");
525 modprobe_remove_lttng_all();
526 }
527
528 close_consumer_sockets();
529
530 /* <fun> */
531 DBG("%c[%d;%dm*** assert failed :-) *** ==> %c[%dm%c[%d;%dm"
532 "Matthew, BEET driven development works!%c[%dm",
533 27, 1, 31, 27, 0, 27, 1, 33, 27, 0);
534 /* </fun> */
535 }
536
537 /*
538 * Send data on a unix socket using the liblttsessiondcomm API.
539 *
540 * Return lttcomm error code.
541 */
542 static int send_unix_sock(int sock, void *buf, size_t len)
543 {
544 /* Check valid length */
545 if (len == 0) {
546 return -1;
547 }
548
549 return lttcomm_send_unix_sock(sock, buf, len);
550 }
551
552 /*
553 * Free memory of a command context structure.
554 */
555 static void clean_command_ctx(struct command_ctx **cmd_ctx)
556 {
557 DBG("Clean command context structure");
558 if (*cmd_ctx) {
559 if ((*cmd_ctx)->llm) {
560 free((*cmd_ctx)->llm);
561 }
562 if ((*cmd_ctx)->lsm) {
563 free((*cmd_ctx)->lsm);
564 }
565 free(*cmd_ctx);
566 *cmd_ctx = NULL;
567 }
568 }
569
570 /*
571 * Notify UST applications using the shm mmap futex.
572 */
573 static int notify_ust_apps(int active)
574 {
575 char *wait_shm_mmap;
576
577 DBG("Notifying applications of session daemon state: %d", active);
578
579 /* See shm.c for this call implying mmap, shm and futex calls */
580 wait_shm_mmap = shm_ust_get_mmap(wait_shm_path, is_root);
581 if (wait_shm_mmap == NULL) {
582 goto error;
583 }
584
585 /* Wake waiting process */
586 futex_wait_update((int32_t *) wait_shm_mmap, active);
587
588 /* Apps notified successfully */
589 return 0;
590
591 error:
592 return -1;
593 }
594
595 /*
596 * Setup the outgoing data buffer for the response (llm) by allocating the
597 * right amount of memory and copying the original information from the lsm
598 * structure.
599 *
600 * Return total size of the buffer pointed by buf.
601 */
602 static int setup_lttng_msg(struct command_ctx *cmd_ctx, size_t size)
603 {
604 int ret, buf_size;
605
606 buf_size = size;
607
608 cmd_ctx->llm = zmalloc(sizeof(struct lttcomm_lttng_msg) + buf_size);
609 if (cmd_ctx->llm == NULL) {
610 PERROR("zmalloc");
611 ret = -ENOMEM;
612 goto error;
613 }
614
615 /* Copy common data */
616 cmd_ctx->llm->cmd_type = cmd_ctx->lsm->cmd_type;
617 cmd_ctx->llm->pid = cmd_ctx->lsm->domain.attr.pid;
618
619 cmd_ctx->llm->data_size = size;
620 cmd_ctx->lttng_msg_size = sizeof(struct lttcomm_lttng_msg) + buf_size;
621
622 return buf_size;
623
624 error:
625 return ret;
626 }
627
628 /*
629 * Update the kernel poll set of all channel fd available over all tracing
630 * session. Add the wakeup pipe at the end of the set.
631 */
632 static int update_kernel_poll(struct lttng_poll_event *events)
633 {
634 int ret;
635 struct ltt_session *session;
636 struct ltt_kernel_channel *channel;
637
638 DBG("Updating kernel poll set");
639
640 session_lock_list();
641 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
642 session_lock(session);
643 if (session->kernel_session == NULL) {
644 session_unlock(session);
645 continue;
646 }
647
648 cds_list_for_each_entry(channel,
649 &session->kernel_session->channel_list.head, list) {
650 /* Add channel fd to the kernel poll set */
651 ret = lttng_poll_add(events, channel->fd, LPOLLIN | LPOLLRDNORM);
652 if (ret < 0) {
653 session_unlock(session);
654 goto error;
655 }
656 DBG("Channel fd %d added to kernel set", channel->fd);
657 }
658 session_unlock(session);
659 }
660 session_unlock_list();
661
662 return 0;
663
664 error:
665 session_unlock_list();
666 return -1;
667 }
668
669 /*
670 * Find the channel fd from 'fd' over all tracing session. When found, check
671 * for new channel stream and send those stream fds to the kernel consumer.
672 *
673 * Useful for CPU hotplug feature.
674 */
675 static int update_kernel_stream(struct consumer_data *consumer_data, int fd)
676 {
677 int ret = 0;
678 struct ltt_session *session;
679 struct ltt_kernel_session *ksess;
680 struct ltt_kernel_channel *channel;
681
682 DBG("Updating kernel streams for channel fd %d", fd);
683
684 session_lock_list();
685 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
686 session_lock(session);
687 if (session->kernel_session == NULL) {
688 session_unlock(session);
689 continue;
690 }
691 ksess = session->kernel_session;
692
693 cds_list_for_each_entry(channel, &ksess->channel_list.head, list) {
694 if (channel->fd == fd) {
695 DBG("Channel found, updating kernel streams");
696 ret = kernel_open_channel_stream(channel);
697 if (ret < 0) {
698 goto error;
699 }
700 /* Update the stream global counter */
701 ksess->stream_count_global += ret;
702
703 /*
704 * Have we already sent fds to the consumer? If yes, it means
705 * that tracing is started so it is safe to send our updated
706 * stream fds.
707 */
708 if (ksess->consumer_fds_sent == 1 && ksess->consumer != NULL) {
709 struct lttng_ht_iter iter;
710 struct consumer_socket *socket;
711
712 rcu_read_lock();
713 cds_lfht_for_each_entry(ksess->consumer->socks->ht,
714 &iter.iter, socket, node.node) {
715 pthread_mutex_lock(socket->lock);
716 ret = kernel_consumer_send_channel_stream(socket,
717 channel, ksess,
718 session->output_traces ? 1 : 0);
719 pthread_mutex_unlock(socket->lock);
720 if (ret < 0) {
721 rcu_read_unlock();
722 goto error;
723 }
724 }
725 rcu_read_unlock();
726 }
727 goto error;
728 }
729 }
730 session_unlock(session);
731 }
732 session_unlock_list();
733 return ret;
734
735 error:
736 session_unlock(session);
737 session_unlock_list();
738 return ret;
739 }
740
741 /*
742 * For each tracing session, update newly registered apps. The session list
743 * lock MUST be acquired before calling this.
744 */
745 static void update_ust_app(int app_sock)
746 {
747 struct ltt_session *sess, *stmp;
748
749 /* Consumer is in an ERROR state. Stop any application update. */
750 if (uatomic_read(&ust_consumerd_state) == CONSUMER_ERROR) {
751 /* Stop the update process since the consumer is dead. */
752 return;
753 }
754
755 /* For all tracing session(s) */
756 cds_list_for_each_entry_safe(sess, stmp, &session_list_ptr->head, list) {
757 session_lock(sess);
758 if (sess->ust_session) {
759 ust_app_global_update(sess->ust_session, app_sock);
760 }
761 session_unlock(sess);
762 }
763 }
764
765 /*
766 * This thread manage event coming from the kernel.
767 *
768 * Features supported in this thread:
769 * -) CPU Hotplug
770 */
771 static void *thread_manage_kernel(void *data)
772 {
773 int ret, i, pollfd, update_poll_flag = 1, err = -1;
774 uint32_t revents, nb_fd;
775 char tmp;
776 struct lttng_poll_event events;
777
778 DBG("[thread] Thread manage kernel started");
779
780 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_KERNEL);
781
782 /*
783 * This first step of the while is to clean this structure which could free
784 * non NULL pointers so initialize it before the loop.
785 */
786 lttng_poll_init(&events);
787
788 if (testpoint(thread_manage_kernel)) {
789 goto error_testpoint;
790 }
791
792 health_code_update();
793
794 if (testpoint(thread_manage_kernel_before_loop)) {
795 goto error_testpoint;
796 }
797
798 while (1) {
799 health_code_update();
800
801 if (update_poll_flag == 1) {
802 /* Clean events object. We are about to populate it again. */
803 lttng_poll_clean(&events);
804
805 ret = sessiond_set_thread_pollset(&events, 2);
806 if (ret < 0) {
807 goto error_poll_create;
808 }
809
810 ret = lttng_poll_add(&events, kernel_poll_pipe[0], LPOLLIN);
811 if (ret < 0) {
812 goto error;
813 }
814
815 /* This will add the available kernel channel if any. */
816 ret = update_kernel_poll(&events);
817 if (ret < 0) {
818 goto error;
819 }
820 update_poll_flag = 0;
821 }
822
823 DBG("Thread kernel polling on %d fds", LTTNG_POLL_GETNB(&events));
824
825 /* Poll infinite value of time */
826 restart:
827 health_poll_entry();
828 ret = lttng_poll_wait(&events, -1);
829 health_poll_exit();
830 if (ret < 0) {
831 /*
832 * Restart interrupted system call.
833 */
834 if (errno == EINTR) {
835 goto restart;
836 }
837 goto error;
838 } else if (ret == 0) {
839 /* Should not happen since timeout is infinite */
840 ERR("Return value of poll is 0 with an infinite timeout.\n"
841 "This should not have happened! Continuing...");
842 continue;
843 }
844
845 nb_fd = ret;
846
847 for (i = 0; i < nb_fd; i++) {
848 /* Fetch once the poll data */
849 revents = LTTNG_POLL_GETEV(&events, i);
850 pollfd = LTTNG_POLL_GETFD(&events, i);
851
852 health_code_update();
853
854 /* Thread quit pipe has been closed. Killing thread. */
855 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
856 if (ret) {
857 err = 0;
858 goto exit;
859 }
860
861 /* Check for data on kernel pipe */
862 if (pollfd == kernel_poll_pipe[0] && (revents & LPOLLIN)) {
863 do {
864 ret = read(kernel_poll_pipe[0], &tmp, 1);
865 } while (ret < 0 && errno == EINTR);
866 /*
867 * Ret value is useless here, if this pipe gets any actions an
868 * update is required anyway.
869 */
870 update_poll_flag = 1;
871 continue;
872 } else {
873 /*
874 * New CPU detected by the kernel. Adding kernel stream to
875 * kernel session and updating the kernel consumer
876 */
877 if (revents & LPOLLIN) {
878 ret = update_kernel_stream(&kconsumer_data, pollfd);
879 if (ret < 0) {
880 continue;
881 }
882 break;
883 /*
884 * TODO: We might want to handle the LPOLLERR | LPOLLHUP
885 * and unregister kernel stream at this point.
886 */
887 }
888 }
889 }
890 }
891
892 exit:
893 error:
894 lttng_poll_clean(&events);
895 error_poll_create:
896 error_testpoint:
897 utils_close_pipe(kernel_poll_pipe);
898 kernel_poll_pipe[0] = kernel_poll_pipe[1] = -1;
899 if (err) {
900 health_error();
901 ERR("Health error occurred in %s", __func__);
902 WARN("Kernel thread died unexpectedly. "
903 "Kernel tracing can continue but CPU hotplug is disabled.");
904 }
905 health_unregister(health_sessiond);
906 DBG("Kernel thread dying");
907 return NULL;
908 }
909
910 /*
911 * Signal pthread condition of the consumer data that the thread.
912 */
913 static void signal_consumer_condition(struct consumer_data *data, int state)
914 {
915 pthread_mutex_lock(&data->cond_mutex);
916
917 /*
918 * The state is set before signaling. It can be any value, it's the waiter
919 * job to correctly interpret this condition variable associated to the
920 * consumer pthread_cond.
921 *
922 * A value of 0 means that the corresponding thread of the consumer data
923 * was not started. 1 indicates that the thread has started and is ready
924 * for action. A negative value means that there was an error during the
925 * thread bootstrap.
926 */
927 data->consumer_thread_is_ready = state;
928 (void) pthread_cond_signal(&data->cond);
929
930 pthread_mutex_unlock(&data->cond_mutex);
931 }
932
933 /*
934 * This thread manage the consumer error sent back to the session daemon.
935 */
936 static void *thread_manage_consumer(void *data)
937 {
938 int sock = -1, i, ret, pollfd, err = -1;
939 uint32_t revents, nb_fd;
940 enum lttcomm_return_code code;
941 struct lttng_poll_event events;
942 struct consumer_data *consumer_data = data;
943
944 DBG("[thread] Manage consumer started");
945
946 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_CONSUMER);
947
948 health_code_update();
949
950 /*
951 * Pass 3 as size here for the thread quit pipe, consumerd_err_sock and the
952 * metadata_sock. Nothing more will be added to this poll set.
953 */
954 ret = sessiond_set_thread_pollset(&events, 3);
955 if (ret < 0) {
956 goto error_poll;
957 }
958
959 /*
960 * The error socket here is already in a listening state which was done
961 * just before spawning this thread to avoid a race between the consumer
962 * daemon exec trying to connect and the listen() call.
963 */
964 ret = lttng_poll_add(&events, consumer_data->err_sock, LPOLLIN | LPOLLRDHUP);
965 if (ret < 0) {
966 goto error;
967 }
968
969 health_code_update();
970
971 /* Infinite blocking call, waiting for transmission */
972 restart:
973 health_poll_entry();
974
975 if (testpoint(thread_manage_consumer)) {
976 goto error;
977 }
978
979 ret = lttng_poll_wait(&events, -1);
980 health_poll_exit();
981 if (ret < 0) {
982 /*
983 * Restart interrupted system call.
984 */
985 if (errno == EINTR) {
986 goto restart;
987 }
988 goto error;
989 }
990
991 nb_fd = ret;
992
993 for (i = 0; i < nb_fd; i++) {
994 /* Fetch once the poll data */
995 revents = LTTNG_POLL_GETEV(&events, i);
996 pollfd = LTTNG_POLL_GETFD(&events, i);
997
998 health_code_update();
999
1000 /* Thread quit pipe has been closed. Killing thread. */
1001 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
1002 if (ret) {
1003 err = 0;
1004 goto exit;
1005 }
1006
1007 /* Event on the registration socket */
1008 if (pollfd == consumer_data->err_sock) {
1009 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1010 ERR("consumer err socket poll error");
1011 goto error;
1012 }
1013 }
1014 }
1015
1016 sock = lttcomm_accept_unix_sock(consumer_data->err_sock);
1017 if (sock < 0) {
1018 goto error;
1019 }
1020
1021 /*
1022 * Set the CLOEXEC flag. Return code is useless because either way, the
1023 * show must go on.
1024 */
1025 (void) utils_set_fd_cloexec(sock);
1026
1027 health_code_update();
1028
1029 DBG2("Receiving code from consumer err_sock");
1030
1031 /* Getting status code from kconsumerd */
1032 ret = lttcomm_recv_unix_sock(sock, &code,
1033 sizeof(enum lttcomm_return_code));
1034 if (ret <= 0) {
1035 goto error;
1036 }
1037
1038 health_code_update();
1039
1040 if (code == LTTCOMM_CONSUMERD_COMMAND_SOCK_READY) {
1041 /* Connect both socket, command and metadata. */
1042 consumer_data->cmd_sock =
1043 lttcomm_connect_unix_sock(consumer_data->cmd_unix_sock_path);
1044 consumer_data->metadata_fd =
1045 lttcomm_connect_unix_sock(consumer_data->cmd_unix_sock_path);
1046 if (consumer_data->cmd_sock < 0
1047 || consumer_data->metadata_fd < 0) {
1048 PERROR("consumer connect cmd socket");
1049 /* On error, signal condition and quit. */
1050 signal_consumer_condition(consumer_data, -1);
1051 goto error;
1052 }
1053 consumer_data->metadata_sock.fd_ptr = &consumer_data->metadata_fd;
1054 /* Create metadata socket lock. */
1055 consumer_data->metadata_sock.lock = zmalloc(sizeof(pthread_mutex_t));
1056 if (consumer_data->metadata_sock.lock == NULL) {
1057 PERROR("zmalloc pthread mutex");
1058 ret = -1;
1059 goto error;
1060 }
1061 pthread_mutex_init(consumer_data->metadata_sock.lock, NULL);
1062
1063 signal_consumer_condition(consumer_data, 1);
1064 DBG("Consumer command socket ready (fd: %d", consumer_data->cmd_sock);
1065 DBG("Consumer metadata socket ready (fd: %d)",
1066 consumer_data->metadata_fd);
1067 } else {
1068 ERR("consumer error when waiting for SOCK_READY : %s",
1069 lttcomm_get_readable_code(-code));
1070 goto error;
1071 }
1072
1073 /* Remove the consumerd error sock since we've established a connexion */
1074 ret = lttng_poll_del(&events, consumer_data->err_sock);
1075 if (ret < 0) {
1076 goto error;
1077 }
1078
1079 /* Add new accepted error socket. */
1080 ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLRDHUP);
1081 if (ret < 0) {
1082 goto error;
1083 }
1084
1085 /* Add metadata socket that is successfully connected. */
1086 ret = lttng_poll_add(&events, consumer_data->metadata_fd,
1087 LPOLLIN | LPOLLRDHUP);
1088 if (ret < 0) {
1089 goto error;
1090 }
1091
1092 health_code_update();
1093
1094 /* Infinite blocking call, waiting for transmission */
1095 restart_poll:
1096 while (1) {
1097 health_poll_entry();
1098 ret = lttng_poll_wait(&events, -1);
1099 health_poll_exit();
1100 if (ret < 0) {
1101 /*
1102 * Restart interrupted system call.
1103 */
1104 if (errno == EINTR) {
1105 goto restart_poll;
1106 }
1107 goto error;
1108 }
1109
1110 nb_fd = ret;
1111
1112 for (i = 0; i < nb_fd; i++) {
1113 /* Fetch once the poll data */
1114 revents = LTTNG_POLL_GETEV(&events, i);
1115 pollfd = LTTNG_POLL_GETFD(&events, i);
1116
1117 health_code_update();
1118
1119 /* Thread quit pipe has been closed. Killing thread. */
1120 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
1121 if (ret) {
1122 err = 0;
1123 goto exit;
1124 }
1125
1126 if (pollfd == sock) {
1127 /* Event on the consumerd socket */
1128 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1129 ERR("consumer err socket second poll error");
1130 goto error;
1131 }
1132 health_code_update();
1133 /* Wait for any kconsumerd error */
1134 ret = lttcomm_recv_unix_sock(sock, &code,
1135 sizeof(enum lttcomm_return_code));
1136 if (ret <= 0) {
1137 ERR("consumer closed the command socket");
1138 goto error;
1139 }
1140
1141 ERR("consumer return code : %s",
1142 lttcomm_get_readable_code(-code));
1143
1144 goto exit;
1145 } else if (pollfd == consumer_data->metadata_fd) {
1146 /* UST metadata requests */
1147 ret = ust_consumer_metadata_request(
1148 &consumer_data->metadata_sock);
1149 if (ret < 0) {
1150 ERR("Handling metadata request");
1151 goto error;
1152 }
1153 break;
1154 } else {
1155 ERR("Unknown pollfd");
1156 goto error;
1157 }
1158 }
1159 health_code_update();
1160 }
1161
1162 exit:
1163 error:
1164 /*
1165 * We lock here because we are about to close the sockets and some other
1166 * thread might be using them so get exclusive access which will abort all
1167 * other consumer command by other threads.
1168 */
1169 pthread_mutex_lock(&consumer_data->lock);
1170
1171 /* Immediately set the consumerd state to stopped */
1172 if (consumer_data->type == LTTNG_CONSUMER_KERNEL) {
1173 uatomic_set(&kernel_consumerd_state, CONSUMER_ERROR);
1174 } else if (consumer_data->type == LTTNG_CONSUMER64_UST ||
1175 consumer_data->type == LTTNG_CONSUMER32_UST) {
1176 uatomic_set(&ust_consumerd_state, CONSUMER_ERROR);
1177 } else {
1178 /* Code flow error... */
1179 assert(0);
1180 }
1181
1182 if (consumer_data->err_sock >= 0) {
1183 ret = close(consumer_data->err_sock);
1184 if (ret) {
1185 PERROR("close");
1186 }
1187 consumer_data->err_sock = -1;
1188 }
1189 if (consumer_data->cmd_sock >= 0) {
1190 ret = close(consumer_data->cmd_sock);
1191 if (ret) {
1192 PERROR("close");
1193 }
1194 consumer_data->cmd_sock = -1;
1195 }
1196 if (*consumer_data->metadata_sock.fd_ptr >= 0) {
1197 ret = close(*consumer_data->metadata_sock.fd_ptr);
1198 if (ret) {
1199 PERROR("close");
1200 }
1201 }
1202
1203 if (sock >= 0) {
1204 ret = close(sock);
1205 if (ret) {
1206 PERROR("close");
1207 }
1208 }
1209
1210 unlink(consumer_data->err_unix_sock_path);
1211 unlink(consumer_data->cmd_unix_sock_path);
1212 consumer_data->pid = 0;
1213 pthread_mutex_unlock(&consumer_data->lock);
1214
1215 /* Cleanup metadata socket mutex. */
1216 pthread_mutex_destroy(consumer_data->metadata_sock.lock);
1217 free(consumer_data->metadata_sock.lock);
1218
1219 lttng_poll_clean(&events);
1220 error_poll:
1221 if (err) {
1222 health_error();
1223 ERR("Health error occurred in %s", __func__);
1224 }
1225 health_unregister(health_sessiond);
1226 DBG("consumer thread cleanup completed");
1227
1228 return NULL;
1229 }
1230
1231 /*
1232 * This thread manage application communication.
1233 */
1234 static void *thread_manage_apps(void *data)
1235 {
1236 int i, ret, pollfd, err = -1;
1237 uint32_t revents, nb_fd;
1238 struct lttng_poll_event events;
1239
1240 DBG("[thread] Manage application started");
1241
1242 rcu_register_thread();
1243 rcu_thread_online();
1244
1245 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_APP_MANAGE);
1246
1247 if (testpoint(thread_manage_apps)) {
1248 goto error_testpoint;
1249 }
1250
1251 health_code_update();
1252
1253 ret = sessiond_set_thread_pollset(&events, 2);
1254 if (ret < 0) {
1255 goto error_poll_create;
1256 }
1257
1258 ret = lttng_poll_add(&events, apps_cmd_pipe[0], LPOLLIN | LPOLLRDHUP);
1259 if (ret < 0) {
1260 goto error;
1261 }
1262
1263 if (testpoint(thread_manage_apps_before_loop)) {
1264 goto error;
1265 }
1266
1267 health_code_update();
1268
1269 while (1) {
1270 DBG("Apps thread polling on %d fds", LTTNG_POLL_GETNB(&events));
1271
1272 /* Inifinite blocking call, waiting for transmission */
1273 restart:
1274 health_poll_entry();
1275 ret = lttng_poll_wait(&events, -1);
1276 health_poll_exit();
1277 if (ret < 0) {
1278 /*
1279 * Restart interrupted system call.
1280 */
1281 if (errno == EINTR) {
1282 goto restart;
1283 }
1284 goto error;
1285 }
1286
1287 nb_fd = ret;
1288
1289 for (i = 0; i < nb_fd; i++) {
1290 /* Fetch once the poll data */
1291 revents = LTTNG_POLL_GETEV(&events, i);
1292 pollfd = LTTNG_POLL_GETFD(&events, i);
1293
1294 health_code_update();
1295
1296 /* Thread quit pipe has been closed. Killing thread. */
1297 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
1298 if (ret) {
1299 err = 0;
1300 goto exit;
1301 }
1302
1303 /* Inspect the apps cmd pipe */
1304 if (pollfd == apps_cmd_pipe[0]) {
1305 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1306 ERR("Apps command pipe error");
1307 goto error;
1308 } else if (revents & LPOLLIN) {
1309 int sock;
1310
1311 /* Empty pipe */
1312 do {
1313 ret = read(apps_cmd_pipe[0], &sock, sizeof(sock));
1314 } while (ret < 0 && errno == EINTR);
1315 if (ret < 0 || ret < sizeof(sock)) {
1316 PERROR("read apps cmd pipe");
1317 goto error;
1318 }
1319
1320 health_code_update();
1321
1322 /*
1323 * We only monitor the error events of the socket. This
1324 * thread does not handle any incoming data from UST
1325 * (POLLIN).
1326 */
1327 ret = lttng_poll_add(&events, sock,
1328 LPOLLERR | LPOLLHUP | LPOLLRDHUP);
1329 if (ret < 0) {
1330 goto error;
1331 }
1332
1333 DBG("Apps with sock %d added to poll set", sock);
1334
1335 health_code_update();
1336
1337 break;
1338 }
1339 } else {
1340 /*
1341 * At this point, we know that a registered application made
1342 * the event at poll_wait.
1343 */
1344 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1345 /* Removing from the poll set */
1346 ret = lttng_poll_del(&events, pollfd);
1347 if (ret < 0) {
1348 goto error;
1349 }
1350
1351 /* Socket closed on remote end. */
1352 ust_app_unregister(pollfd);
1353 break;
1354 }
1355 }
1356
1357 health_code_update();
1358 }
1359 }
1360
1361 exit:
1362 error:
1363 lttng_poll_clean(&events);
1364 error_poll_create:
1365 error_testpoint:
1366 utils_close_pipe(apps_cmd_pipe);
1367 apps_cmd_pipe[0] = apps_cmd_pipe[1] = -1;
1368
1369 /*
1370 * We don't clean the UST app hash table here since already registered
1371 * applications can still be controlled so let them be until the session
1372 * daemon dies or the applications stop.
1373 */
1374
1375 if (err) {
1376 health_error();
1377 ERR("Health error occurred in %s", __func__);
1378 }
1379 health_unregister(health_sessiond);
1380 DBG("Application communication apps thread cleanup complete");
1381 rcu_thread_offline();
1382 rcu_unregister_thread();
1383 return NULL;
1384 }
1385
1386 /*
1387 * Send a socket to a thread This is called from the dispatch UST registration
1388 * thread once all sockets are set for the application.
1389 *
1390 * The sock value can be invalid, we don't really care, the thread will handle
1391 * it and make the necessary cleanup if so.
1392 *
1393 * On success, return 0 else a negative value being the errno message of the
1394 * write().
1395 */
1396 static int send_socket_to_thread(int fd, int sock)
1397 {
1398 int ret;
1399
1400 /*
1401 * It's possible that the FD is set as invalid with -1 concurrently just
1402 * before calling this function being a shutdown state of the thread.
1403 */
1404 if (fd < 0) {
1405 ret = -EBADF;
1406 goto error;
1407 }
1408
1409 do {
1410 ret = write(fd, &sock, sizeof(sock));
1411 } while (ret < 0 && errno == EINTR);
1412 if (ret < 0 || ret != sizeof(sock)) {
1413 PERROR("write apps pipe %d", fd);
1414 if (ret < 0) {
1415 ret = -errno;
1416 }
1417 goto error;
1418 }
1419
1420 /* All good. Don't send back the write positive ret value. */
1421 ret = 0;
1422 error:
1423 return ret;
1424 }
1425
1426 /*
1427 * Sanitize the wait queue of the dispatch registration thread meaning removing
1428 * invalid nodes from it. This is to avoid memory leaks for the case the UST
1429 * notify socket is never received.
1430 */
1431 static void sanitize_wait_queue(struct ust_reg_wait_queue *wait_queue)
1432 {
1433 int ret, nb_fd = 0, i;
1434 unsigned int fd_added = 0;
1435 struct lttng_poll_event events;
1436 struct ust_reg_wait_node *wait_node = NULL, *tmp_wait_node;
1437
1438 assert(wait_queue);
1439
1440 lttng_poll_init(&events);
1441
1442 /* Just skip everything for an empty queue. */
1443 if (!wait_queue->count) {
1444 goto end;
1445 }
1446
1447 ret = lttng_poll_create(&events, wait_queue->count, LTTNG_CLOEXEC);
1448 if (ret < 0) {
1449 goto error_create;
1450 }
1451
1452 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1453 &wait_queue->head, head) {
1454 assert(wait_node->app);
1455 ret = lttng_poll_add(&events, wait_node->app->sock,
1456 LPOLLHUP | LPOLLERR);
1457 if (ret < 0) {
1458 goto error;
1459 }
1460
1461 fd_added = 1;
1462 }
1463
1464 if (!fd_added) {
1465 goto end;
1466 }
1467
1468 /*
1469 * Poll but don't block so we can quickly identify the faulty events and
1470 * clean them afterwards from the wait queue.
1471 */
1472 ret = lttng_poll_wait(&events, 0);
1473 if (ret < 0) {
1474 goto error;
1475 }
1476 nb_fd = ret;
1477
1478 for (i = 0; i < nb_fd; i++) {
1479 /* Get faulty FD. */
1480 uint32_t revents = LTTNG_POLL_GETEV(&events, i);
1481 int pollfd = LTTNG_POLL_GETFD(&events, i);
1482
1483 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1484 &wait_queue->head, head) {
1485 if (pollfd == wait_node->app->sock &&
1486 (revents & (LPOLLHUP | LPOLLERR))) {
1487 cds_list_del(&wait_node->head);
1488 wait_queue->count--;
1489 ust_app_destroy(wait_node->app);
1490 free(wait_node);
1491 break;
1492 }
1493 }
1494 }
1495
1496 if (nb_fd > 0) {
1497 DBG("Wait queue sanitized, %d node were cleaned up", nb_fd);
1498 }
1499
1500 end:
1501 lttng_poll_clean(&events);
1502 return;
1503
1504 error:
1505 lttng_poll_clean(&events);
1506 error_create:
1507 ERR("Unable to sanitize wait queue");
1508 return;
1509 }
1510
1511 /*
1512 * Dispatch request from the registration threads to the application
1513 * communication thread.
1514 */
1515 static void *thread_dispatch_ust_registration(void *data)
1516 {
1517 int ret, err = -1;
1518 struct cds_wfq_node *node;
1519 struct ust_command *ust_cmd = NULL;
1520 struct ust_reg_wait_node *wait_node = NULL, *tmp_wait_node;
1521 struct ust_reg_wait_queue wait_queue = {
1522 .count = 0,
1523 };
1524
1525 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_APP_REG_DISPATCH);
1526
1527 health_code_update();
1528
1529 CDS_INIT_LIST_HEAD(&wait_queue.head);
1530
1531 DBG("[thread] Dispatch UST command started");
1532
1533 while (!CMM_LOAD_SHARED(dispatch_thread_exit)) {
1534 health_code_update();
1535
1536 /* Atomically prepare the queue futex */
1537 futex_nto1_prepare(&ust_cmd_queue.futex);
1538
1539 do {
1540 struct ust_app *app = NULL;
1541 ust_cmd = NULL;
1542
1543 /*
1544 * Make sure we don't have node(s) that have hung up before receiving
1545 * the notify socket. This is to clean the list in order to avoid
1546 * memory leaks from notify socket that are never seen.
1547 */
1548 sanitize_wait_queue(&wait_queue);
1549
1550 health_code_update();
1551 /* Dequeue command for registration */
1552 node = cds_wfq_dequeue_blocking(&ust_cmd_queue.queue);
1553 if (node == NULL) {
1554 DBG("Woken up but nothing in the UST command queue");
1555 /* Continue thread execution */
1556 break;
1557 }
1558
1559 ust_cmd = caa_container_of(node, struct ust_command, node);
1560
1561 DBG("Dispatching UST registration pid:%d ppid:%d uid:%d"
1562 " gid:%d sock:%d name:%s (version %d.%d)",
1563 ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
1564 ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
1565 ust_cmd->sock, ust_cmd->reg_msg.name,
1566 ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
1567
1568 if (ust_cmd->reg_msg.type == USTCTL_SOCKET_CMD) {
1569 wait_node = zmalloc(sizeof(*wait_node));
1570 if (!wait_node) {
1571 PERROR("zmalloc wait_node dispatch");
1572 ret = close(ust_cmd->sock);
1573 if (ret < 0) {
1574 PERROR("close ust sock dispatch %d", ust_cmd->sock);
1575 }
1576 lttng_fd_put(1, LTTNG_FD_APPS);
1577 free(ust_cmd);
1578 goto error;
1579 }
1580 CDS_INIT_LIST_HEAD(&wait_node->head);
1581
1582 /* Create application object if socket is CMD. */
1583 wait_node->app = ust_app_create(&ust_cmd->reg_msg,
1584 ust_cmd->sock);
1585 if (!wait_node->app) {
1586 ret = close(ust_cmd->sock);
1587 if (ret < 0) {
1588 PERROR("close ust sock dispatch %d", ust_cmd->sock);
1589 }
1590 lttng_fd_put(1, LTTNG_FD_APPS);
1591 free(wait_node);
1592 free(ust_cmd);
1593 continue;
1594 }
1595 /*
1596 * Add application to the wait queue so we can set the notify
1597 * socket before putting this object in the global ht.
1598 */
1599 cds_list_add(&wait_node->head, &wait_queue.head);
1600 wait_queue.count++;
1601
1602 free(ust_cmd);
1603 /*
1604 * We have to continue here since we don't have the notify
1605 * socket and the application MUST be added to the hash table
1606 * only at that moment.
1607 */
1608 continue;
1609 } else {
1610 /*
1611 * Look for the application in the local wait queue and set the
1612 * notify socket if found.
1613 */
1614 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1615 &wait_queue.head, head) {
1616 health_code_update();
1617 if (wait_node->app->pid == ust_cmd->reg_msg.pid) {
1618 wait_node->app->notify_sock = ust_cmd->sock;
1619 cds_list_del(&wait_node->head);
1620 wait_queue.count--;
1621 app = wait_node->app;
1622 free(wait_node);
1623 DBG3("UST app notify socket %d is set", ust_cmd->sock);
1624 break;
1625 }
1626 }
1627
1628 /*
1629 * With no application at this stage the received socket is
1630 * basically useless so close it before we free the cmd data
1631 * structure for good.
1632 */
1633 if (!app) {
1634 ret = close(ust_cmd->sock);
1635 if (ret < 0) {
1636 PERROR("close ust sock dispatch %d", ust_cmd->sock);
1637 }
1638 lttng_fd_put(1, LTTNG_FD_APPS);
1639 }
1640 free(ust_cmd);
1641 }
1642
1643 if (app) {
1644 /*
1645 * @session_lock_list
1646 *
1647 * Lock the global session list so from the register up to the
1648 * registration done message, no thread can see the application
1649 * and change its state.
1650 */
1651 session_lock_list();
1652 rcu_read_lock();
1653
1654 /*
1655 * Add application to the global hash table. This needs to be
1656 * done before the update to the UST registry can locate the
1657 * application.
1658 */
1659 ust_app_add(app);
1660
1661 /* Set app version. This call will print an error if needed. */
1662 (void) ust_app_version(app);
1663
1664 /* Send notify socket through the notify pipe. */
1665 ret = send_socket_to_thread(apps_cmd_notify_pipe[1],
1666 app->notify_sock);
1667 if (ret < 0) {
1668 rcu_read_unlock();
1669 session_unlock_list();
1670 /*
1671 * No notify thread, stop the UST tracing. However, this is
1672 * not an internal error of the this thread thus setting
1673 * the health error code to a normal exit.
1674 */
1675 err = 0;
1676 goto error;
1677 }
1678
1679 /*
1680 * Update newly registered application with the tracing
1681 * registry info already enabled information.
1682 */
1683 update_ust_app(app->sock);
1684
1685 /*
1686 * Don't care about return value. Let the manage apps threads
1687 * handle app unregistration upon socket close.
1688 */
1689 (void) ust_app_register_done(app->sock);
1690
1691 /*
1692 * Even if the application socket has been closed, send the app
1693 * to the thread and unregistration will take place at that
1694 * place.
1695 */
1696 ret = send_socket_to_thread(apps_cmd_pipe[1], app->sock);
1697 if (ret < 0) {
1698 rcu_read_unlock();
1699 session_unlock_list();
1700 /*
1701 * No apps. thread, stop the UST tracing. However, this is
1702 * not an internal error of the this thread thus setting
1703 * the health error code to a normal exit.
1704 */
1705 err = 0;
1706 goto error;
1707 }
1708
1709 rcu_read_unlock();
1710 session_unlock_list();
1711 }
1712 } while (node != NULL);
1713
1714 health_poll_entry();
1715 /* Futex wait on queue. Blocking call on futex() */
1716 futex_nto1_wait(&ust_cmd_queue.futex);
1717 health_poll_exit();
1718 }
1719 /* Normal exit, no error */
1720 err = 0;
1721
1722 error:
1723 /* Clean up wait queue. */
1724 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1725 &wait_queue.head, head) {
1726 cds_list_del(&wait_node->head);
1727 wait_queue.count--;
1728 free(wait_node);
1729 }
1730
1731 DBG("Dispatch thread dying");
1732 if (err) {
1733 health_error();
1734 ERR("Health error occurred in %s", __func__);
1735 }
1736 health_unregister(health_sessiond);
1737 return NULL;
1738 }
1739
1740 /*
1741 * This thread manage application registration.
1742 */
1743 static void *thread_registration_apps(void *data)
1744 {
1745 int sock = -1, i, ret, pollfd, err = -1;
1746 uint32_t revents, nb_fd;
1747 struct lttng_poll_event events;
1748 /*
1749 * Get allocated in this thread, enqueued to a global queue, dequeued and
1750 * freed in the manage apps thread.
1751 */
1752 struct ust_command *ust_cmd = NULL;
1753
1754 DBG("[thread] Manage application registration started");
1755
1756 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_APP_REG);
1757
1758 if (testpoint(thread_registration_apps)) {
1759 goto error_testpoint;
1760 }
1761
1762 ret = lttcomm_listen_unix_sock(apps_sock);
1763 if (ret < 0) {
1764 goto error_listen;
1765 }
1766
1767 /*
1768 * Pass 2 as size here for the thread quit pipe and apps socket. Nothing
1769 * more will be added to this poll set.
1770 */
1771 ret = sessiond_set_thread_pollset(&events, 2);
1772 if (ret < 0) {
1773 goto error_create_poll;
1774 }
1775
1776 /* Add the application registration socket */
1777 ret = lttng_poll_add(&events, apps_sock, LPOLLIN | LPOLLRDHUP);
1778 if (ret < 0) {
1779 goto error_poll_add;
1780 }
1781
1782 /* Notify all applications to register */
1783 ret = notify_ust_apps(1);
1784 if (ret < 0) {
1785 ERR("Failed to notify applications or create the wait shared memory.\n"
1786 "Execution continues but there might be problem for already\n"
1787 "running applications that wishes to register.");
1788 }
1789
1790 while (1) {
1791 DBG("Accepting application registration");
1792
1793 /* Inifinite blocking call, waiting for transmission */
1794 restart:
1795 health_poll_entry();
1796 ret = lttng_poll_wait(&events, -1);
1797 health_poll_exit();
1798 if (ret < 0) {
1799 /*
1800 * Restart interrupted system call.
1801 */
1802 if (errno == EINTR) {
1803 goto restart;
1804 }
1805 goto error;
1806 }
1807
1808 nb_fd = ret;
1809
1810 for (i = 0; i < nb_fd; i++) {
1811 health_code_update();
1812
1813 /* Fetch once the poll data */
1814 revents = LTTNG_POLL_GETEV(&events, i);
1815 pollfd = LTTNG_POLL_GETFD(&events, i);
1816
1817 /* Thread quit pipe has been closed. Killing thread. */
1818 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
1819 if (ret) {
1820 err = 0;
1821 goto exit;
1822 }
1823
1824 /* Event on the registration socket */
1825 if (pollfd == apps_sock) {
1826 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1827 ERR("Register apps socket poll error");
1828 goto error;
1829 } else if (revents & LPOLLIN) {
1830 sock = lttcomm_accept_unix_sock(apps_sock);
1831 if (sock < 0) {
1832 goto error;
1833 }
1834
1835 /*
1836 * Set socket timeout for both receiving and ending.
1837 * app_socket_timeout is in seconds, whereas
1838 * lttcomm_setsockopt_rcv_timeout and
1839 * lttcomm_setsockopt_snd_timeout expect msec as
1840 * parameter.
1841 */
1842 (void) lttcomm_setsockopt_rcv_timeout(sock,
1843 app_socket_timeout * 1000);
1844 (void) lttcomm_setsockopt_snd_timeout(sock,
1845 app_socket_timeout * 1000);
1846
1847 /*
1848 * Set the CLOEXEC flag. Return code is useless because
1849 * either way, the show must go on.
1850 */
1851 (void) utils_set_fd_cloexec(sock);
1852
1853 /* Create UST registration command for enqueuing */
1854 ust_cmd = zmalloc(sizeof(struct ust_command));
1855 if (ust_cmd == NULL) {
1856 PERROR("ust command zmalloc");
1857 goto error;
1858 }
1859
1860 /*
1861 * Using message-based transmissions to ensure we don't
1862 * have to deal with partially received messages.
1863 */
1864 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
1865 if (ret < 0) {
1866 ERR("Exhausted file descriptors allowed for applications.");
1867 free(ust_cmd);
1868 ret = close(sock);
1869 if (ret) {
1870 PERROR("close");
1871 }
1872 sock = -1;
1873 continue;
1874 }
1875
1876 health_code_update();
1877 ret = ust_app_recv_registration(sock, &ust_cmd->reg_msg);
1878 if (ret < 0) {
1879 free(ust_cmd);
1880 /* Close socket of the application. */
1881 ret = close(sock);
1882 if (ret) {
1883 PERROR("close");
1884 }
1885 lttng_fd_put(LTTNG_FD_APPS, 1);
1886 sock = -1;
1887 continue;
1888 }
1889 health_code_update();
1890
1891 ust_cmd->sock = sock;
1892 sock = -1;
1893
1894 DBG("UST registration received with pid:%d ppid:%d uid:%d"
1895 " gid:%d sock:%d name:%s (version %d.%d)",
1896 ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
1897 ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
1898 ust_cmd->sock, ust_cmd->reg_msg.name,
1899 ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
1900
1901 /*
1902 * Lock free enqueue the registration request. The red pill
1903 * has been taken! This apps will be part of the *system*.
1904 */
1905 cds_wfq_enqueue(&ust_cmd_queue.queue, &ust_cmd->node);
1906
1907 /*
1908 * Wake the registration queue futex. Implicit memory
1909 * barrier with the exchange in cds_wfq_enqueue.
1910 */
1911 futex_nto1_wake(&ust_cmd_queue.futex);
1912 }
1913 }
1914 }
1915 }
1916
1917 exit:
1918 error:
1919 if (err) {
1920 health_error();
1921 ERR("Health error occurred in %s", __func__);
1922 }
1923
1924 /* Notify that the registration thread is gone */
1925 notify_ust_apps(0);
1926
1927 if (apps_sock >= 0) {
1928 ret = close(apps_sock);
1929 if (ret) {
1930 PERROR("close");
1931 }
1932 }
1933 if (sock >= 0) {
1934 ret = close(sock);
1935 if (ret) {
1936 PERROR("close");
1937 }
1938 lttng_fd_put(LTTNG_FD_APPS, 1);
1939 }
1940 unlink(apps_unix_sock_path);
1941
1942 error_poll_add:
1943 lttng_poll_clean(&events);
1944 error_listen:
1945 error_create_poll:
1946 error_testpoint:
1947 DBG("UST Registration thread cleanup complete");
1948 health_unregister(health_sessiond);
1949
1950 return NULL;
1951 }
1952
1953 /*
1954 * Start the thread_manage_consumer. This must be done after a lttng-consumerd
1955 * exec or it will fails.
1956 */
1957 static int spawn_consumer_thread(struct consumer_data *consumer_data)
1958 {
1959 int ret, clock_ret;
1960 struct timespec timeout;
1961
1962 /* Make sure we set the readiness flag to 0 because we are NOT ready */
1963 consumer_data->consumer_thread_is_ready = 0;
1964
1965 /* Setup pthread condition */
1966 ret = pthread_condattr_init(&consumer_data->condattr);
1967 if (ret != 0) {
1968 errno = ret;
1969 PERROR("pthread_condattr_init consumer data");
1970 goto error;
1971 }
1972
1973 /*
1974 * Set the monotonic clock in order to make sure we DO NOT jump in time
1975 * between the clock_gettime() call and the timedwait call. See bug #324
1976 * for a more details and how we noticed it.
1977 */
1978 ret = pthread_condattr_setclock(&consumer_data->condattr, CLOCK_MONOTONIC);
1979 if (ret != 0) {
1980 errno = ret;
1981 PERROR("pthread_condattr_setclock consumer data");
1982 goto error;
1983 }
1984
1985 ret = pthread_cond_init(&consumer_data->cond, &consumer_data->condattr);
1986 if (ret != 0) {
1987 errno = ret;
1988 PERROR("pthread_cond_init consumer data");
1989 goto error;
1990 }
1991
1992 ret = pthread_create(&consumer_data->thread, NULL, thread_manage_consumer,
1993 consumer_data);
1994 if (ret != 0) {
1995 PERROR("pthread_create consumer");
1996 ret = -1;
1997 goto error;
1998 }
1999
2000 /* We are about to wait on a pthread condition */
2001 pthread_mutex_lock(&consumer_data->cond_mutex);
2002
2003 /* Get time for sem_timedwait absolute timeout */
2004 clock_ret = clock_gettime(CLOCK_MONOTONIC, &timeout);
2005 /*
2006 * Set the timeout for the condition timed wait even if the clock gettime
2007 * call fails since we might loop on that call and we want to avoid to
2008 * increment the timeout too many times.
2009 */
2010 timeout.tv_sec += DEFAULT_SEM_WAIT_TIMEOUT;
2011
2012 /*
2013 * The following loop COULD be skipped in some conditions so this is why we
2014 * set ret to 0 in order to make sure at least one round of the loop is
2015 * done.
2016 */
2017 ret = 0;
2018
2019 /*
2020 * Loop until the condition is reached or when a timeout is reached. Note
2021 * that the pthread_cond_timedwait(P) man page specifies that EINTR can NOT
2022 * be returned but the pthread_cond(3), from the glibc-doc, says that it is
2023 * possible. This loop does not take any chances and works with both of
2024 * them.
2025 */
2026 while (!consumer_data->consumer_thread_is_ready && ret != ETIMEDOUT) {
2027 if (clock_ret < 0) {
2028 PERROR("clock_gettime spawn consumer");
2029 /* Infinite wait for the consumerd thread to be ready */
2030 ret = pthread_cond_wait(&consumer_data->cond,
2031 &consumer_data->cond_mutex);
2032 } else {
2033 ret = pthread_cond_timedwait(&consumer_data->cond,
2034 &consumer_data->cond_mutex, &timeout);
2035 }
2036 }
2037
2038 /* Release the pthread condition */
2039 pthread_mutex_unlock(&consumer_data->cond_mutex);
2040
2041 if (ret != 0) {
2042 errno = ret;
2043 if (ret == ETIMEDOUT) {
2044 /*
2045 * Call has timed out so we kill the kconsumerd_thread and return
2046 * an error.
2047 */
2048 ERR("Condition timed out. The consumer thread was never ready."
2049 " Killing it");
2050 ret = pthread_cancel(consumer_data->thread);
2051 if (ret < 0) {
2052 PERROR("pthread_cancel consumer thread");
2053 }
2054 } else {
2055 PERROR("pthread_cond_wait failed consumer thread");
2056 }
2057 goto error;
2058 }
2059
2060 pthread_mutex_lock(&consumer_data->pid_mutex);
2061 if (consumer_data->pid == 0) {
2062 ERR("Consumerd did not start");
2063 pthread_mutex_unlock(&consumer_data->pid_mutex);
2064 goto error;
2065 }
2066 pthread_mutex_unlock(&consumer_data->pid_mutex);
2067
2068 return 0;
2069
2070 error:
2071 return ret;
2072 }
2073
2074 /*
2075 * Join consumer thread
2076 */
2077 static int join_consumer_thread(struct consumer_data *consumer_data)
2078 {
2079 void *status;
2080
2081 /* Consumer pid must be a real one. */
2082 if (consumer_data->pid > 0) {
2083 int ret;
2084 ret = kill(consumer_data->pid, SIGTERM);
2085 if (ret) {
2086 ERR("Error killing consumer daemon");
2087 return ret;
2088 }
2089 return pthread_join(consumer_data->thread, &status);
2090 } else {
2091 return 0;
2092 }
2093 }
2094
2095 /*
2096 * Fork and exec a consumer daemon (consumerd).
2097 *
2098 * Return pid if successful else -1.
2099 */
2100 static pid_t spawn_consumerd(struct consumer_data *consumer_data)
2101 {
2102 int ret;
2103 pid_t pid;
2104 const char *consumer_to_use;
2105 const char *verbosity;
2106 struct stat st;
2107
2108 DBG("Spawning consumerd");
2109
2110 pid = fork();
2111 if (pid == 0) {
2112 /*
2113 * Exec consumerd.
2114 */
2115 if (opt_verbose_consumer) {
2116 verbosity = "--verbose";
2117 } else {
2118 verbosity = "--quiet";
2119 }
2120 switch (consumer_data->type) {
2121 case LTTNG_CONSUMER_KERNEL:
2122 /*
2123 * Find out which consumerd to execute. We will first try the
2124 * 64-bit path, then the sessiond's installation directory, and
2125 * fallback on the 32-bit one,
2126 */
2127 DBG3("Looking for a kernel consumer at these locations:");
2128 DBG3(" 1) %s", consumerd64_bin);
2129 DBG3(" 2) %s/%s", INSTALL_BIN_PATH, CONSUMERD_FILE);
2130 DBG3(" 3) %s", consumerd32_bin);
2131 if (stat(consumerd64_bin, &st) == 0) {
2132 DBG3("Found location #1");
2133 consumer_to_use = consumerd64_bin;
2134 } else if (stat(INSTALL_BIN_PATH "/" CONSUMERD_FILE, &st) == 0) {
2135 DBG3("Found location #2");
2136 consumer_to_use = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
2137 } else if (stat(consumerd32_bin, &st) == 0) {
2138 DBG3("Found location #3");
2139 consumer_to_use = consumerd32_bin;
2140 } else {
2141 DBG("Could not find any valid consumerd executable");
2142 break;
2143 }
2144 DBG("Using kernel consumer at: %s", consumer_to_use);
2145 execl(consumer_to_use,
2146 "lttng-consumerd", verbosity, "-k",
2147 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
2148 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
2149 "--group", tracing_group_name,
2150 NULL);
2151 break;
2152 case LTTNG_CONSUMER64_UST:
2153 {
2154 char *tmpnew = NULL;
2155
2156 if (consumerd64_libdir[0] != '\0') {
2157 char *tmp;
2158 size_t tmplen;
2159
2160 tmp = getenv("LD_LIBRARY_PATH");
2161 if (!tmp) {
2162 tmp = "";
2163 }
2164 tmplen = strlen("LD_LIBRARY_PATH=")
2165 + strlen(consumerd64_libdir) + 1 /* : */ + strlen(tmp);
2166 tmpnew = zmalloc(tmplen + 1 /* \0 */);
2167 if (!tmpnew) {
2168 ret = -ENOMEM;
2169 goto error;
2170 }
2171 strcpy(tmpnew, "LD_LIBRARY_PATH=");
2172 strcat(tmpnew, consumerd64_libdir);
2173 if (tmp[0] != '\0') {
2174 strcat(tmpnew, ":");
2175 strcat(tmpnew, tmp);
2176 }
2177 ret = putenv(tmpnew);
2178 if (ret) {
2179 ret = -errno;
2180 free(tmpnew);
2181 goto error;
2182 }
2183 }
2184 DBG("Using 64-bit UST consumer at: %s", consumerd64_bin);
2185 ret = execl(consumerd64_bin, "lttng-consumerd", verbosity, "-u",
2186 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
2187 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
2188 "--group", tracing_group_name,
2189 NULL);
2190 if (consumerd64_libdir[0] != '\0') {
2191 free(tmpnew);
2192 }
2193 if (ret) {
2194 goto error;
2195 }
2196 break;
2197 }
2198 case LTTNG_CONSUMER32_UST:
2199 {
2200 char *tmpnew = NULL;
2201
2202 if (consumerd32_libdir[0] != '\0') {
2203 char *tmp;
2204 size_t tmplen;
2205
2206 tmp = getenv("LD_LIBRARY_PATH");
2207 if (!tmp) {
2208 tmp = "";
2209 }
2210 tmplen = strlen("LD_LIBRARY_PATH=")
2211 + strlen(consumerd32_libdir) + 1 /* : */ + strlen(tmp);
2212 tmpnew = zmalloc(tmplen + 1 /* \0 */);
2213 if (!tmpnew) {
2214 ret = -ENOMEM;
2215 goto error;
2216 }
2217 strcpy(tmpnew, "LD_LIBRARY_PATH=");
2218 strcat(tmpnew, consumerd32_libdir);
2219 if (tmp[0] != '\0') {
2220 strcat(tmpnew, ":");
2221 strcat(tmpnew, tmp);
2222 }
2223 ret = putenv(tmpnew);
2224 if (ret) {
2225 ret = -errno;
2226 free(tmpnew);
2227 goto error;
2228 }
2229 }
2230 DBG("Using 32-bit UST consumer at: %s", consumerd32_bin);
2231 ret = execl(consumerd32_bin, "lttng-consumerd", verbosity, "-u",
2232 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
2233 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
2234 "--group", tracing_group_name,
2235 NULL);
2236 if (consumerd32_libdir[0] != '\0') {
2237 free(tmpnew);
2238 }
2239 if (ret) {
2240 goto error;
2241 }
2242 break;
2243 }
2244 default:
2245 PERROR("unknown consumer type");
2246 exit(EXIT_FAILURE);
2247 }
2248 if (errno != 0) {
2249 PERROR("kernel start consumer exec");
2250 }
2251 exit(EXIT_FAILURE);
2252 } else if (pid > 0) {
2253 ret = pid;
2254 } else {
2255 PERROR("start consumer fork");
2256 ret = -errno;
2257 }
2258 error:
2259 return ret;
2260 }
2261
2262 /*
2263 * Spawn the consumerd daemon and session daemon thread.
2264 */
2265 static int start_consumerd(struct consumer_data *consumer_data)
2266 {
2267 int ret;
2268
2269 /*
2270 * Set the listen() state on the socket since there is a possible race
2271 * between the exec() of the consumer daemon and this call if place in the
2272 * consumer thread. See bug #366 for more details.
2273 */
2274 ret = lttcomm_listen_unix_sock(consumer_data->err_sock);
2275 if (ret < 0) {
2276 goto error;
2277 }
2278
2279 pthread_mutex_lock(&consumer_data->pid_mutex);
2280 if (consumer_data->pid != 0) {
2281 pthread_mutex_unlock(&consumer_data->pid_mutex);
2282 goto end;
2283 }
2284
2285 ret = spawn_consumerd(consumer_data);
2286 if (ret < 0) {
2287 ERR("Spawning consumerd failed");
2288 pthread_mutex_unlock(&consumer_data->pid_mutex);
2289 goto error;
2290 }
2291
2292 /* Setting up the consumer_data pid */
2293 consumer_data->pid = ret;
2294 DBG2("Consumer pid %d", consumer_data->pid);
2295 pthread_mutex_unlock(&consumer_data->pid_mutex);
2296
2297 DBG2("Spawning consumer control thread");
2298 ret = spawn_consumer_thread(consumer_data);
2299 if (ret < 0) {
2300 ERR("Fatal error spawning consumer control thread");
2301 goto error;
2302 }
2303
2304 end:
2305 return 0;
2306
2307 error:
2308 /* Cleanup already created sockets on error. */
2309 if (consumer_data->err_sock >= 0) {
2310 int err;
2311
2312 err = close(consumer_data->err_sock);
2313 if (err < 0) {
2314 PERROR("close consumer data error socket");
2315 }
2316 }
2317 return ret;
2318 }
2319
2320 /*
2321 * Setup necessary data for kernel tracer action.
2322 */
2323 static int init_kernel_tracer(void)
2324 {
2325 int ret;
2326
2327 /* Modprobe lttng kernel modules */
2328 ret = modprobe_lttng_control();
2329 if (ret < 0) {
2330 goto error;
2331 }
2332
2333 /* Open debugfs lttng */
2334 kernel_tracer_fd = open(module_proc_lttng, O_RDWR);
2335 if (kernel_tracer_fd < 0) {
2336 DBG("Failed to open %s", module_proc_lttng);
2337 ret = -1;
2338 goto error_open;
2339 }
2340
2341 /* Validate kernel version */
2342 ret = kernel_validate_version(kernel_tracer_fd);
2343 if (ret < 0) {
2344 goto error_version;
2345 }
2346
2347 ret = modprobe_lttng_data();
2348 if (ret < 0) {
2349 goto error_modules;
2350 }
2351
2352 DBG("Kernel tracer fd %d", kernel_tracer_fd);
2353 return 0;
2354
2355 error_version:
2356 modprobe_remove_lttng_control();
2357 ret = close(kernel_tracer_fd);
2358 if (ret) {
2359 PERROR("close");
2360 }
2361 kernel_tracer_fd = -1;
2362 return LTTNG_ERR_KERN_VERSION;
2363
2364 error_modules:
2365 ret = close(kernel_tracer_fd);
2366 if (ret) {
2367 PERROR("close");
2368 }
2369
2370 error_open:
2371 modprobe_remove_lttng_control();
2372
2373 error:
2374 WARN("No kernel tracer available");
2375 kernel_tracer_fd = -1;
2376 if (!is_root) {
2377 return LTTNG_ERR_NEED_ROOT_SESSIOND;
2378 } else {
2379 return LTTNG_ERR_KERN_NA;
2380 }
2381 }
2382
2383
2384 /*
2385 * Copy consumer output from the tracing session to the domain session. The
2386 * function also applies the right modification on a per domain basis for the
2387 * trace files destination directory.
2388 *
2389 * Should *NOT* be called with RCU read-side lock held.
2390 */
2391 static int copy_session_consumer(int domain, struct ltt_session *session)
2392 {
2393 int ret;
2394 const char *dir_name;
2395 struct consumer_output *consumer;
2396
2397 assert(session);
2398 assert(session->consumer);
2399
2400 switch (domain) {
2401 case LTTNG_DOMAIN_KERNEL:
2402 DBG3("Copying tracing session consumer output in kernel session");
2403 /*
2404 * XXX: We should audit the session creation and what this function
2405 * does "extra" in order to avoid a destroy since this function is used
2406 * in the domain session creation (kernel and ust) only. Same for UST
2407 * domain.
2408 */
2409 if (session->kernel_session->consumer) {
2410 consumer_destroy_output(session->kernel_session->consumer);
2411 }
2412 session->kernel_session->consumer =
2413 consumer_copy_output(session->consumer);
2414 /* Ease our life a bit for the next part */
2415 consumer = session->kernel_session->consumer;
2416 dir_name = DEFAULT_KERNEL_TRACE_DIR;
2417 break;
2418 case LTTNG_DOMAIN_UST:
2419 DBG3("Copying tracing session consumer output in UST session");
2420 if (session->ust_session->consumer) {
2421 consumer_destroy_output(session->ust_session->consumer);
2422 }
2423 session->ust_session->consumer =
2424 consumer_copy_output(session->consumer);
2425 /* Ease our life a bit for the next part */
2426 consumer = session->ust_session->consumer;
2427 dir_name = DEFAULT_UST_TRACE_DIR;
2428 break;
2429 default:
2430 ret = LTTNG_ERR_UNKNOWN_DOMAIN;
2431 goto error;
2432 }
2433
2434 /* Append correct directory to subdir */
2435 strncat(consumer->subdir, dir_name,
2436 sizeof(consumer->subdir) - strlen(consumer->subdir) - 1);
2437 DBG3("Copy session consumer subdir %s", consumer->subdir);
2438
2439 ret = LTTNG_OK;
2440
2441 error:
2442 return ret;
2443 }
2444
2445 /*
2446 * Create an UST session and add it to the session ust list.
2447 *
2448 * Should *NOT* be called with RCU read-side lock held.
2449 */
2450 static int create_ust_session(struct ltt_session *session,
2451 struct lttng_domain *domain)
2452 {
2453 int ret;
2454 struct ltt_ust_session *lus = NULL;
2455
2456 assert(session);
2457 assert(domain);
2458 assert(session->consumer);
2459
2460 switch (domain->type) {
2461 case LTTNG_DOMAIN_UST:
2462 break;
2463 default:
2464 ERR("Unknown UST domain on create session %d", domain->type);
2465 ret = LTTNG_ERR_UNKNOWN_DOMAIN;
2466 goto error;
2467 }
2468
2469 DBG("Creating UST session");
2470
2471 lus = trace_ust_create_session(session->id);
2472 if (lus == NULL) {
2473 ret = LTTNG_ERR_UST_SESS_FAIL;
2474 goto error;
2475 }
2476
2477 lus->uid = session->uid;
2478 lus->gid = session->gid;
2479 lus->output_traces = session->output_traces;
2480 lus->snapshot_mode = session->snapshot_mode;
2481 lus->live_timer_interval = session->live_timer;
2482 session->ust_session = lus;
2483
2484 /* Copy session output to the newly created UST session */
2485 ret = copy_session_consumer(domain->type, session);
2486 if (ret != LTTNG_OK) {
2487 goto error;
2488 }
2489
2490 return LTTNG_OK;
2491
2492 error:
2493 free(lus);
2494 session->ust_session = NULL;
2495 return ret;
2496 }
2497
2498 /*
2499 * Create a kernel tracer session then create the default channel.
2500 */
2501 static int create_kernel_session(struct ltt_session *session)
2502 {
2503 int ret;
2504
2505 DBG("Creating kernel session");
2506
2507 ret = kernel_create_session(session, kernel_tracer_fd);
2508 if (ret < 0) {
2509 ret = LTTNG_ERR_KERN_SESS_FAIL;
2510 goto error;
2511 }
2512
2513 /* Code flow safety */
2514 assert(session->kernel_session);
2515
2516 /* Copy session output to the newly created Kernel session */
2517 ret = copy_session_consumer(LTTNG_DOMAIN_KERNEL, session);
2518 if (ret != LTTNG_OK) {
2519 goto error;
2520 }
2521
2522 /* Create directory(ies) on local filesystem. */
2523 if (session->kernel_session->consumer->type == CONSUMER_DST_LOCAL &&
2524 strlen(session->kernel_session->consumer->dst.trace_path) > 0) {
2525 ret = run_as_mkdir_recursive(
2526 session->kernel_session->consumer->dst.trace_path,
2527 S_IRWXU | S_IRWXG, session->uid, session->gid);
2528 if (ret < 0) {
2529 if (ret != -EEXIST) {
2530 ERR("Trace directory creation error");
2531 goto error;
2532 }
2533 }
2534 }
2535
2536 session->kernel_session->uid = session->uid;
2537 session->kernel_session->gid = session->gid;
2538 session->kernel_session->output_traces = session->output_traces;
2539 session->kernel_session->snapshot_mode = session->snapshot_mode;
2540
2541 return LTTNG_OK;
2542
2543 error:
2544 trace_kernel_destroy_session(session->kernel_session);
2545 session->kernel_session = NULL;
2546 return ret;
2547 }
2548
2549 /*
2550 * Count number of session permitted by uid/gid.
2551 */
2552 static unsigned int lttng_sessions_count(uid_t uid, gid_t gid)
2553 {
2554 unsigned int i = 0;
2555 struct ltt_session *session;
2556
2557 DBG("Counting number of available session for UID %d GID %d",
2558 uid, gid);
2559 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
2560 /*
2561 * Only list the sessions the user can control.
2562 */
2563 if (!session_access_ok(session, uid, gid)) {
2564 continue;
2565 }
2566 i++;
2567 }
2568 return i;
2569 }
2570
2571 /*
2572 * Process the command requested by the lttng client within the command
2573 * context structure. This function make sure that the return structure (llm)
2574 * is set and ready for transmission before returning.
2575 *
2576 * Return any error encountered or 0 for success.
2577 *
2578 * "sock" is only used for special-case var. len data.
2579 *
2580 * Should *NOT* be called with RCU read-side lock held.
2581 */
2582 static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
2583 int *sock_error)
2584 {
2585 int ret = LTTNG_OK;
2586 int need_tracing_session = 1;
2587 int need_domain;
2588
2589 DBG("Processing client command %d", cmd_ctx->lsm->cmd_type);
2590
2591 *sock_error = 0;
2592
2593 switch (cmd_ctx->lsm->cmd_type) {
2594 case LTTNG_CREATE_SESSION:
2595 case LTTNG_CREATE_SESSION_SNAPSHOT:
2596 case LTTNG_CREATE_SESSION_LIVE:
2597 case LTTNG_DESTROY_SESSION:
2598 case LTTNG_LIST_SESSIONS:
2599 case LTTNG_LIST_DOMAINS:
2600 case LTTNG_START_TRACE:
2601 case LTTNG_STOP_TRACE:
2602 case LTTNG_DATA_PENDING:
2603 case LTTNG_SNAPSHOT_ADD_OUTPUT:
2604 case LTTNG_SNAPSHOT_DEL_OUTPUT:
2605 case LTTNG_SNAPSHOT_LIST_OUTPUT:
2606 case LTTNG_SNAPSHOT_RECORD:
2607 need_domain = 0;
2608 break;
2609 default:
2610 need_domain = 1;
2611 }
2612
2613 if (opt_no_kernel && need_domain
2614 && cmd_ctx->lsm->domain.type == LTTNG_DOMAIN_KERNEL) {
2615 if (!is_root) {
2616 ret = LTTNG_ERR_NEED_ROOT_SESSIOND;
2617 } else {
2618 ret = LTTNG_ERR_KERN_NA;
2619 }
2620 goto error;
2621 }
2622
2623 /* Deny register consumer if we already have a spawned consumer. */
2624 if (cmd_ctx->lsm->cmd_type == LTTNG_REGISTER_CONSUMER) {
2625 pthread_mutex_lock(&kconsumer_data.pid_mutex);
2626 if (kconsumer_data.pid > 0) {
2627 ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
2628 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2629 goto error;
2630 }
2631 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2632 }
2633
2634 /*
2635 * Check for command that don't needs to allocate a returned payload. We do
2636 * this here so we don't have to make the call for no payload at each
2637 * command.
2638 */
2639 switch(cmd_ctx->lsm->cmd_type) {
2640 case LTTNG_LIST_SESSIONS:
2641 case LTTNG_LIST_TRACEPOINTS:
2642 case LTTNG_LIST_TRACEPOINT_FIELDS:
2643 case LTTNG_LIST_DOMAINS:
2644 case LTTNG_LIST_CHANNELS:
2645 case LTTNG_LIST_EVENTS:
2646 break;
2647 default:
2648 /* Setup lttng message with no payload */
2649 ret = setup_lttng_msg(cmd_ctx, 0);
2650 if (ret < 0) {
2651 /* This label does not try to unlock the session */
2652 goto init_setup_error;
2653 }
2654 }
2655
2656 /* Commands that DO NOT need a session. */
2657 switch (cmd_ctx->lsm->cmd_type) {
2658 case LTTNG_CREATE_SESSION:
2659 case LTTNG_CREATE_SESSION_SNAPSHOT:
2660 case LTTNG_CREATE_SESSION_LIVE:
2661 case LTTNG_CALIBRATE:
2662 case LTTNG_LIST_SESSIONS:
2663 case LTTNG_LIST_TRACEPOINTS:
2664 case LTTNG_LIST_TRACEPOINT_FIELDS:
2665 need_tracing_session = 0;
2666 break;
2667 default:
2668 DBG("Getting session %s by name", cmd_ctx->lsm->session.name);
2669 /*
2670 * We keep the session list lock across _all_ commands
2671 * for now, because the per-session lock does not
2672 * handle teardown properly.
2673 */
2674 session_lock_list();
2675 cmd_ctx->session = session_find_by_name(cmd_ctx->lsm->session.name);
2676 if (cmd_ctx->session == NULL) {
2677 ret = LTTNG_ERR_SESS_NOT_FOUND;
2678 goto error;
2679 } else {
2680 /* Acquire lock for the session */
2681 session_lock(cmd_ctx->session);
2682 }
2683 break;
2684 }
2685
2686 if (!need_domain) {
2687 goto skip_domain;
2688 }
2689
2690 /*
2691 * Check domain type for specific "pre-action".
2692 */
2693 switch (cmd_ctx->lsm->domain.type) {
2694 case LTTNG_DOMAIN_KERNEL:
2695 if (!is_root) {
2696 ret = LTTNG_ERR_NEED_ROOT_SESSIOND;
2697 goto error;
2698 }
2699
2700 /* Kernel tracer check */
2701 if (kernel_tracer_fd == -1) {
2702 /* Basically, load kernel tracer modules */
2703 ret = init_kernel_tracer();
2704 if (ret != 0) {
2705 goto error;
2706 }
2707 }
2708
2709 /* Consumer is in an ERROR state. Report back to client */
2710 if (uatomic_read(&kernel_consumerd_state) == CONSUMER_ERROR) {
2711 ret = LTTNG_ERR_NO_KERNCONSUMERD;
2712 goto error;
2713 }
2714
2715 /* Need a session for kernel command */
2716 if (need_tracing_session) {
2717 if (cmd_ctx->session->kernel_session == NULL) {
2718 ret = create_kernel_session(cmd_ctx->session);
2719 if (ret < 0) {
2720 ret = LTTNG_ERR_KERN_SESS_FAIL;
2721 goto error;
2722 }
2723 }
2724
2725 /* Start the kernel consumer daemon */
2726 pthread_mutex_lock(&kconsumer_data.pid_mutex);
2727 if (kconsumer_data.pid == 0 &&
2728 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
2729 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2730 ret = start_consumerd(&kconsumer_data);
2731 if (ret < 0) {
2732 ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
2733 goto error;
2734 }
2735 uatomic_set(&kernel_consumerd_state, CONSUMER_STARTED);
2736 } else {
2737 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2738 }
2739
2740 /*
2741 * The consumer was just spawned so we need to add the socket to
2742 * the consumer output of the session if exist.
2743 */
2744 ret = consumer_create_socket(&kconsumer_data,
2745 cmd_ctx->session->kernel_session->consumer);
2746 if (ret < 0) {
2747 goto error;
2748 }
2749 }
2750
2751 break;
2752 case LTTNG_DOMAIN_JUL:
2753 {
2754 ret = LTTNG_ERR_UNKNOWN_DOMAIN;
2755 goto error;
2756 }
2757 case LTTNG_DOMAIN_UST:
2758 {
2759 if (!ust_app_supported()) {
2760 ret = LTTNG_ERR_NO_UST;
2761 goto error;
2762 }
2763 /* Consumer is in an ERROR state. Report back to client */
2764 if (uatomic_read(&ust_consumerd_state) == CONSUMER_ERROR) {
2765 ret = LTTNG_ERR_NO_USTCONSUMERD;
2766 goto error;
2767 }
2768
2769 if (need_tracing_session) {
2770 /* Create UST session if none exist. */
2771 if (cmd_ctx->session->ust_session == NULL) {
2772 ret = create_ust_session(cmd_ctx->session,
2773 &cmd_ctx->lsm->domain);
2774 if (ret != LTTNG_OK) {
2775 goto error;
2776 }
2777 }
2778
2779 /* Start the UST consumer daemons */
2780 /* 64-bit */
2781 pthread_mutex_lock(&ustconsumer64_data.pid_mutex);
2782 if (consumerd64_bin[0] != '\0' &&
2783 ustconsumer64_data.pid == 0 &&
2784 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
2785 pthread_mutex_unlock(&ustconsumer64_data.pid_mutex);
2786 ret = start_consumerd(&ustconsumer64_data);
2787 if (ret < 0) {
2788 ret = LTTNG_ERR_UST_CONSUMER64_FAIL;
2789 uatomic_set(&ust_consumerd64_fd, -EINVAL);
2790 goto error;
2791 }
2792
2793 uatomic_set(&ust_consumerd64_fd, ustconsumer64_data.cmd_sock);
2794 uatomic_set(&ust_consumerd_state, CONSUMER_STARTED);
2795 } else {
2796 pthread_mutex_unlock(&ustconsumer64_data.pid_mutex);
2797 }
2798
2799 /*
2800 * Setup socket for consumer 64 bit. No need for atomic access
2801 * since it was set above and can ONLY be set in this thread.
2802 */
2803 ret = consumer_create_socket(&ustconsumer64_data,
2804 cmd_ctx->session->ust_session->consumer);
2805 if (ret < 0) {
2806 goto error;
2807 }
2808
2809 /* 32-bit */
2810 if (consumerd32_bin[0] != '\0' &&
2811 ustconsumer32_data.pid == 0 &&
2812 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
2813 pthread_mutex_unlock(&ustconsumer32_data.pid_mutex);
2814 ret = start_consumerd(&ustconsumer32_data);
2815 if (ret < 0) {
2816 ret = LTTNG_ERR_UST_CONSUMER32_FAIL;
2817 uatomic_set(&ust_consumerd32_fd, -EINVAL);
2818 goto error;
2819 }
2820
2821 uatomic_set(&ust_consumerd32_fd, ustconsumer32_data.cmd_sock);
2822 uatomic_set(&ust_consumerd_state, CONSUMER_STARTED);
2823 } else {
2824 pthread_mutex_unlock(&ustconsumer32_data.pid_mutex);
2825 }
2826
2827 /*
2828 * Setup socket for consumer 64 bit. No need for atomic access
2829 * since it was set above and can ONLY be set in this thread.
2830 */
2831 ret = consumer_create_socket(&ustconsumer32_data,
2832 cmd_ctx->session->ust_session->consumer);
2833 if (ret < 0) {
2834 goto error;
2835 }
2836 }
2837 break;
2838 }
2839 default:
2840 break;
2841 }
2842 skip_domain:
2843
2844 /* Validate consumer daemon state when start/stop trace command */
2845 if (cmd_ctx->lsm->cmd_type == LTTNG_START_TRACE ||
2846 cmd_ctx->lsm->cmd_type == LTTNG_STOP_TRACE) {
2847 switch (cmd_ctx->lsm->domain.type) {
2848 case LTTNG_DOMAIN_JUL:
2849 case LTTNG_DOMAIN_UST:
2850 if (uatomic_read(&ust_consumerd_state) != CONSUMER_STARTED) {
2851 ret = LTTNG_ERR_NO_USTCONSUMERD;
2852 goto error;
2853 }
2854 break;
2855 case LTTNG_DOMAIN_KERNEL:
2856 if (uatomic_read(&kernel_consumerd_state) != CONSUMER_STARTED) {
2857 ret = LTTNG_ERR_NO_KERNCONSUMERD;
2858 goto error;
2859 }
2860 break;
2861 }
2862 }
2863
2864 /*
2865 * Check that the UID or GID match that of the tracing session.
2866 * The root user can interact with all sessions.
2867 */
2868 if (need_tracing_session) {
2869 if (!session_access_ok(cmd_ctx->session,
2870 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
2871 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds))) {
2872 ret = LTTNG_ERR_EPERM;
2873 goto error;
2874 }
2875 }
2876
2877 /*
2878 * Send relayd information to consumer as soon as we have a domain and a
2879 * session defined.
2880 */
2881 if (cmd_ctx->session && need_domain) {
2882 /*
2883 * Setup relayd if not done yet. If the relayd information was already
2884 * sent to the consumer, this call will gracefully return.
2885 */
2886 ret = cmd_setup_relayd(cmd_ctx->session);
2887 if (ret != LTTNG_OK) {
2888 goto error;
2889 }
2890 }
2891
2892 /* Process by command type */
2893 switch (cmd_ctx->lsm->cmd_type) {
2894 case LTTNG_ADD_CONTEXT:
2895 {
2896 ret = cmd_add_context(cmd_ctx->session, cmd_ctx->lsm->domain.type,
2897 cmd_ctx->lsm->u.context.channel_name,
2898 &cmd_ctx->lsm->u.context.ctx, kernel_poll_pipe[1]);
2899 break;
2900 }
2901 case LTTNG_DISABLE_CHANNEL:
2902 {
2903 ret = cmd_disable_channel(cmd_ctx->session, cmd_ctx->lsm->domain.type,
2904 cmd_ctx->lsm->u.disable.channel_name);
2905 break;
2906 }
2907 case LTTNG_DISABLE_EVENT:
2908 {
2909 ret = cmd_disable_event(cmd_ctx->session, cmd_ctx->lsm->domain.type,
2910 cmd_ctx->lsm->u.disable.channel_name,
2911 cmd_ctx->lsm->u.disable.name);
2912 break;
2913 }
2914 case LTTNG_DISABLE_ALL_EVENT:
2915 {
2916 DBG("Disabling all events");
2917
2918 ret = cmd_disable_event_all(cmd_ctx->session, cmd_ctx->lsm->domain.type,
2919 cmd_ctx->lsm->u.disable.channel_name);
2920 break;
2921 }
2922 case LTTNG_ENABLE_CHANNEL:
2923 {
2924 ret = cmd_enable_channel(cmd_ctx->session, &cmd_ctx->lsm->domain,
2925 &cmd_ctx->lsm->u.channel.chan, kernel_poll_pipe[1]);
2926 break;
2927 }
2928 case LTTNG_ENABLE_EVENT:
2929 {
2930 ret = cmd_enable_event(cmd_ctx->session, &cmd_ctx->lsm->domain,
2931 cmd_ctx->lsm->u.enable.channel_name,
2932 &cmd_ctx->lsm->u.enable.event, NULL, kernel_poll_pipe[1]);
2933 break;
2934 }
2935 case LTTNG_ENABLE_ALL_EVENT:
2936 {
2937 DBG("Enabling all events");
2938
2939 ret = cmd_enable_event_all(cmd_ctx->session, &cmd_ctx->lsm->domain,
2940 cmd_ctx->lsm->u.enable.channel_name,
2941 cmd_ctx->lsm->u.enable.event.type, NULL, kernel_poll_pipe[1]);
2942 break;
2943 }
2944 case LTTNG_LIST_TRACEPOINTS:
2945 {
2946 struct lttng_event *events;
2947 ssize_t nb_events;
2948
2949 nb_events = cmd_list_tracepoints(cmd_ctx->lsm->domain.type, &events);
2950 if (nb_events < 0) {
2951 /* Return value is a negative lttng_error_code. */
2952 ret = -nb_events;
2953 goto error;
2954 }
2955
2956 /*
2957 * Setup lttng message with payload size set to the event list size in
2958 * bytes and then copy list into the llm payload.
2959 */
2960 ret = setup_lttng_msg(cmd_ctx, sizeof(struct lttng_event) * nb_events);
2961 if (ret < 0) {
2962 free(events);
2963 goto setup_error;
2964 }
2965
2966 /* Copy event list into message payload */
2967 memcpy(cmd_ctx->llm->payload, events,
2968 sizeof(struct lttng_event) * nb_events);
2969
2970 free(events);
2971
2972 ret = LTTNG_OK;
2973 break;
2974 }
2975 case LTTNG_LIST_TRACEPOINT_FIELDS:
2976 {
2977 struct lttng_event_field *fields;
2978 ssize_t nb_fields;
2979
2980 nb_fields = cmd_list_tracepoint_fields(cmd_ctx->lsm->domain.type,
2981 &fields);
2982 if (nb_fields < 0) {
2983 /* Return value is a negative lttng_error_code. */
2984 ret = -nb_fields;
2985 goto error;
2986 }
2987
2988 /*
2989 * Setup lttng message with payload size set to the event list size in
2990 * bytes and then copy list into the llm payload.
2991 */
2992 ret = setup_lttng_msg(cmd_ctx,
2993 sizeof(struct lttng_event_field) * nb_fields);
2994 if (ret < 0) {
2995 free(fields);
2996 goto setup_error;
2997 }
2998
2999 /* Copy event list into message payload */
3000 memcpy(cmd_ctx->llm->payload, fields,
3001 sizeof(struct lttng_event_field) * nb_fields);
3002
3003 free(fields);
3004
3005 ret = LTTNG_OK;
3006 break;
3007 }
3008 case LTTNG_SET_CONSUMER_URI:
3009 {
3010 size_t nb_uri, len;
3011 struct lttng_uri *uris;
3012
3013 nb_uri = cmd_ctx->lsm->u.uri.size;
3014 len = nb_uri * sizeof(struct lttng_uri);
3015
3016 if (nb_uri == 0) {
3017 ret = LTTNG_ERR_INVALID;
3018 goto error;
3019 }
3020
3021 uris = zmalloc(len);
3022 if (uris == NULL) {
3023 ret = LTTNG_ERR_FATAL;
3024 goto error;
3025 }
3026
3027 /* Receive variable len data */
3028 DBG("Receiving %zu URI(s) from client ...", nb_uri);
3029 ret = lttcomm_recv_unix_sock(sock, uris, len);
3030 if (ret <= 0) {
3031 DBG("No URIs received from client... continuing");
3032 *sock_error = 1;
3033 ret = LTTNG_ERR_SESSION_FAIL;
3034 free(uris);
3035 goto error;
3036 }
3037
3038 ret = cmd_set_consumer_uri(cmd_ctx->lsm->domain.type, cmd_ctx->session,
3039 nb_uri, uris);
3040 if (ret != LTTNG_OK) {
3041 free(uris);
3042 goto error;
3043 }
3044
3045 /*
3046 * XXX: 0 means that this URI should be applied on the session. Should
3047 * be a DOMAIN enuam.
3048 */
3049 if (cmd_ctx->lsm->domain.type == 0) {
3050 /* Add the URI for the UST session if a consumer is present. */
3051 if (cmd_ctx->session->ust_session &&
3052 cmd_ctx->session->ust_session->consumer) {
3053 ret = cmd_set_consumer_uri(LTTNG_DOMAIN_UST, cmd_ctx->session,
3054 nb_uri, uris);
3055 } else if (cmd_ctx->session->kernel_session &&
3056 cmd_ctx->session->kernel_session->consumer) {
3057 ret = cmd_set_consumer_uri(LTTNG_DOMAIN_KERNEL,
3058 cmd_ctx->session, nb_uri, uris);
3059 }
3060 }
3061
3062 free(uris);
3063
3064 break;
3065 }
3066 case LTTNG_START_TRACE:
3067 {
3068 ret = cmd_start_trace(cmd_ctx->session);
3069 break;
3070 }
3071 case LTTNG_STOP_TRACE:
3072 {
3073 ret = cmd_stop_trace(cmd_ctx->session);
3074 break;
3075 }
3076 case LTTNG_CREATE_SESSION:
3077 {
3078 size_t nb_uri, len;
3079 struct lttng_uri *uris = NULL;
3080
3081 nb_uri = cmd_ctx->lsm->u.uri.size;
3082 len = nb_uri * sizeof(struct lttng_uri);
3083
3084 if (nb_uri > 0) {
3085 uris = zmalloc(len);
3086 if (uris == NULL) {
3087 ret = LTTNG_ERR_FATAL;
3088 goto error;
3089 }
3090
3091 /* Receive variable len data */
3092 DBG("Waiting for %zu URIs from client ...", nb_uri);
3093 ret = lttcomm_recv_unix_sock(sock, uris, len);
3094 if (ret <= 0) {
3095 DBG("No URIs received from client... continuing");
3096 *sock_error = 1;
3097 ret = LTTNG_ERR_SESSION_FAIL;
3098 free(uris);
3099 goto error;
3100 }
3101
3102 if (nb_uri == 1 && uris[0].dtype != LTTNG_DST_PATH) {
3103 DBG("Creating session with ONE network URI is a bad call");
3104 ret = LTTNG_ERR_SESSION_FAIL;
3105 free(uris);
3106 goto error;
3107 }
3108 }
3109
3110 ret = cmd_create_session_uri(cmd_ctx->lsm->session.name, uris, nb_uri,
3111 &cmd_ctx->creds, 0);
3112
3113 free(uris);
3114
3115 break;
3116 }
3117 case LTTNG_DESTROY_SESSION:
3118 {
3119 ret = cmd_destroy_session(cmd_ctx->session, kernel_poll_pipe[1]);
3120
3121 /* Set session to NULL so we do not unlock it after free. */
3122 cmd_ctx->session = NULL;
3123 break;
3124 }
3125 case LTTNG_LIST_DOMAINS:
3126 {
3127 ssize_t nb_dom;
3128 struct lttng_domain *domains;
3129
3130 nb_dom = cmd_list_domains(cmd_ctx->session, &domains);
3131 if (nb_dom < 0) {
3132 /* Return value is a negative lttng_error_code. */
3133 ret = -nb_dom;
3134 goto error;
3135 }
3136
3137 ret = setup_lttng_msg(cmd_ctx, nb_dom * sizeof(struct lttng_domain));
3138 if (ret < 0) {
3139 free(domains);
3140 goto setup_error;
3141 }
3142
3143 /* Copy event list into message payload */
3144 memcpy(cmd_ctx->llm->payload, domains,
3145 nb_dom * sizeof(struct lttng_domain));
3146
3147 free(domains);
3148
3149 ret = LTTNG_OK;
3150 break;
3151 }
3152 case LTTNG_LIST_CHANNELS:
3153 {
3154 int nb_chan;
3155 struct lttng_channel *channels;
3156
3157 nb_chan = cmd_list_channels(cmd_ctx->lsm->domain.type,
3158 cmd_ctx->session, &channels);
3159 if (nb_chan < 0) {
3160 /* Return value is a negative lttng_error_code. */
3161 ret = -nb_chan;
3162 goto error;
3163 }
3164
3165 ret = setup_lttng_msg(cmd_ctx, nb_chan * sizeof(struct lttng_channel));
3166 if (ret < 0) {
3167 free(channels);
3168 goto setup_error;
3169 }
3170
3171 /* Copy event list into message payload */
3172 memcpy(cmd_ctx->llm->payload, channels,
3173 nb_chan * sizeof(struct lttng_channel));
3174
3175 free(channels);
3176
3177 ret = LTTNG_OK;
3178 break;
3179 }
3180 case LTTNG_LIST_EVENTS:
3181 {
3182 ssize_t nb_event;
3183 struct lttng_event *events = NULL;
3184
3185 nb_event = cmd_list_events(cmd_ctx->lsm->domain.type, cmd_ctx->session,
3186 cmd_ctx->lsm->u.list.channel_name, &events);
3187 if (nb_event < 0) {
3188 /* Return value is a negative lttng_error_code. */
3189 ret = -nb_event;
3190 goto error;
3191 }
3192
3193 ret = setup_lttng_msg(cmd_ctx, nb_event * sizeof(struct lttng_event));
3194 if (ret < 0) {
3195 free(events);
3196 goto setup_error;
3197 }
3198
3199 /* Copy event list into message payload */
3200 memcpy(cmd_ctx->llm->payload, events,
3201 nb_event * sizeof(struct lttng_event));
3202
3203 free(events);
3204
3205 ret = LTTNG_OK;
3206 break;
3207 }
3208 case LTTNG_LIST_SESSIONS:
3209 {
3210 unsigned int nr_sessions;
3211
3212 session_lock_list();
3213 nr_sessions = lttng_sessions_count(
3214 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
3215 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds));
3216
3217 ret = setup_lttng_msg(cmd_ctx, sizeof(struct lttng_session) * nr_sessions);
3218 if (ret < 0) {
3219 session_unlock_list();
3220 goto setup_error;
3221 }
3222
3223 /* Filled the session array */
3224 cmd_list_lttng_sessions((struct lttng_session *)(cmd_ctx->llm->payload),
3225 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
3226 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds));
3227
3228 session_unlock_list();
3229
3230 ret = LTTNG_OK;
3231 break;
3232 }
3233 case LTTNG_CALIBRATE:
3234 {
3235 ret = cmd_calibrate(cmd_ctx->lsm->domain.type,
3236 &cmd_ctx->lsm->u.calibrate);
3237 break;
3238 }
3239 case LTTNG_REGISTER_CONSUMER:
3240 {
3241 struct consumer_data *cdata;
3242
3243 switch (cmd_ctx->lsm->domain.type) {
3244 case LTTNG_DOMAIN_KERNEL:
3245 cdata = &kconsumer_data;
3246 break;
3247 default:
3248 ret = LTTNG_ERR_UND;
3249 goto error;
3250 }
3251
3252 ret = cmd_register_consumer(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3253 cmd_ctx->lsm->u.reg.path, cdata);
3254 break;
3255 }
3256 case LTTNG_ENABLE_EVENT_WITH_FILTER:
3257 {
3258 struct lttng_filter_bytecode *bytecode;
3259
3260 if (cmd_ctx->lsm->u.enable.bytecode_len > LTTNG_FILTER_MAX_LEN) {
3261 ret = LTTNG_ERR_FILTER_INVAL;
3262 goto error;
3263 }
3264 if (cmd_ctx->lsm->u.enable.bytecode_len == 0) {
3265 ret = LTTNG_ERR_FILTER_INVAL;
3266 goto error;
3267 }
3268 bytecode = zmalloc(cmd_ctx->lsm->u.enable.bytecode_len);
3269 if (!bytecode) {
3270 ret = LTTNG_ERR_FILTER_NOMEM;
3271 goto error;
3272 }
3273 /* Receive var. len. data */
3274 DBG("Receiving var len data from client ...");
3275 ret = lttcomm_recv_unix_sock(sock, bytecode,
3276 cmd_ctx->lsm->u.enable.bytecode_len);
3277 if (ret <= 0) {
3278 DBG("Nothing recv() from client var len data... continuing");
3279 *sock_error = 1;
3280 ret = LTTNG_ERR_FILTER_INVAL;
3281 goto error;
3282 }
3283
3284 if (bytecode->len + sizeof(*bytecode)
3285 != cmd_ctx->lsm->u.enable.bytecode_len) {
3286 free(bytecode);
3287 ret = LTTNG_ERR_FILTER_INVAL;
3288 goto error;
3289 }
3290
3291 ret = cmd_enable_event(cmd_ctx->session, &cmd_ctx->lsm->domain,
3292 cmd_ctx->lsm->u.enable.channel_name,
3293 &cmd_ctx->lsm->u.enable.event, bytecode, kernel_poll_pipe[1]);
3294 break;
3295 }
3296 case LTTNG_DATA_PENDING:
3297 {
3298 ret = cmd_data_pending(cmd_ctx->session);
3299 break;
3300 }
3301 case LTTNG_SNAPSHOT_ADD_OUTPUT:
3302 {
3303 struct lttcomm_lttng_output_id reply;
3304
3305 ret = cmd_snapshot_add_output(cmd_ctx->session,
3306 &cmd_ctx->lsm->u.snapshot_output.output, &reply.id);
3307 if (ret != LTTNG_OK) {
3308 goto error;
3309 }
3310
3311 ret = setup_lttng_msg(cmd_ctx, sizeof(reply));
3312 if (ret < 0) {
3313 goto setup_error;
3314 }
3315
3316 /* Copy output list into message payload */
3317 memcpy(cmd_ctx->llm->payload, &reply, sizeof(reply));
3318 ret = LTTNG_OK;
3319 break;
3320 }
3321 case LTTNG_SNAPSHOT_DEL_OUTPUT:
3322 {
3323 ret = cmd_snapshot_del_output(cmd_ctx->session,
3324 &cmd_ctx->lsm->u.snapshot_output.output);
3325 break;
3326 }
3327 case LTTNG_SNAPSHOT_LIST_OUTPUT:
3328 {
3329 ssize_t nb_output;
3330 struct lttng_snapshot_output *outputs = NULL;
3331
3332 nb_output = cmd_snapshot_list_outputs(cmd_ctx->session, &outputs);
3333 if (nb_output < 0) {
3334 ret = -nb_output;
3335 goto error;
3336 }
3337
3338 ret = setup_lttng_msg(cmd_ctx,
3339 nb_output * sizeof(struct lttng_snapshot_output));
3340 if (ret < 0) {
3341 free(outputs);
3342 goto setup_error;
3343 }
3344
3345 if (outputs) {
3346 /* Copy output list into message payload */
3347 memcpy(cmd_ctx->llm->payload, outputs,
3348 nb_output * sizeof(struct lttng_snapshot_output));
3349 free(outputs);
3350 }
3351
3352 ret = LTTNG_OK;
3353 break;
3354 }
3355 case LTTNG_SNAPSHOT_RECORD:
3356 {
3357 ret = cmd_snapshot_record(cmd_ctx->session,
3358 &cmd_ctx->lsm->u.snapshot_record.output,
3359 cmd_ctx->lsm->u.snapshot_record.wait);
3360 break;
3361 }
3362 case LTTNG_CREATE_SESSION_SNAPSHOT:
3363 {
3364 size_t nb_uri, len;
3365 struct lttng_uri *uris = NULL;
3366
3367 nb_uri = cmd_ctx->lsm->u.uri.size;
3368 len = nb_uri * sizeof(struct lttng_uri);
3369
3370 if (nb_uri > 0) {
3371 uris = zmalloc(len);
3372 if (uris == NULL) {
3373 ret = LTTNG_ERR_FATAL;
3374 goto error;
3375 }
3376
3377 /* Receive variable len data */
3378 DBG("Waiting for %zu URIs from client ...", nb_uri);
3379 ret = lttcomm_recv_unix_sock(sock, uris, len);
3380 if (ret <= 0) {
3381 DBG("No URIs received from client... continuing");
3382 *sock_error = 1;
3383 ret = LTTNG_ERR_SESSION_FAIL;
3384 free(uris);
3385 goto error;
3386 }
3387
3388 if (nb_uri == 1 && uris[0].dtype != LTTNG_DST_PATH) {
3389 DBG("Creating session with ONE network URI is a bad call");
3390 ret = LTTNG_ERR_SESSION_FAIL;
3391 free(uris);
3392 goto error;
3393 }
3394 }
3395
3396 ret = cmd_create_session_snapshot(cmd_ctx->lsm->session.name, uris,
3397 nb_uri, &cmd_ctx->creds);
3398 free(uris);
3399 break;
3400 }
3401 case LTTNG_CREATE_SESSION_LIVE:
3402 {
3403 size_t nb_uri, len;
3404 struct lttng_uri *uris = NULL;
3405
3406 nb_uri = cmd_ctx->lsm->u.uri.size;
3407 len = nb_uri * sizeof(struct lttng_uri);
3408
3409 if (nb_uri > 0) {
3410 uris = zmalloc(len);
3411 if (uris == NULL) {
3412 ret = LTTNG_ERR_FATAL;
3413 goto error;
3414 }
3415
3416 /* Receive variable len data */
3417 DBG("Waiting for %zu URIs from client ...", nb_uri);
3418 ret = lttcomm_recv_unix_sock(sock, uris, len);
3419 if (ret <= 0) {
3420 DBG("No URIs received from client... continuing");
3421 *sock_error = 1;
3422 ret = LTTNG_ERR_SESSION_FAIL;
3423 free(uris);
3424 goto error;
3425 }
3426
3427 if (nb_uri == 1 && uris[0].dtype != LTTNG_DST_PATH) {
3428 DBG("Creating session with ONE network URI is a bad call");
3429 ret = LTTNG_ERR_SESSION_FAIL;
3430 free(uris);
3431 goto error;
3432 }
3433 }
3434
3435 ret = cmd_create_session_uri(cmd_ctx->lsm->session.name, uris,
3436 nb_uri, &cmd_ctx->creds, cmd_ctx->lsm->u.session_live.timer_interval);
3437 free(uris);
3438 break;
3439 }
3440 default:
3441 ret = LTTNG_ERR_UND;
3442 break;
3443 }
3444
3445 error:
3446 if (cmd_ctx->llm == NULL) {
3447 DBG("Missing llm structure. Allocating one.");
3448 if (setup_lttng_msg(cmd_ctx, 0) < 0) {
3449 goto setup_error;
3450 }
3451 }
3452 /* Set return code */
3453 cmd_ctx->llm->ret_code = ret;
3454 setup_error:
3455 if (cmd_ctx->session) {
3456 session_unlock(cmd_ctx->session);
3457 }
3458 if (need_tracing_session) {
3459 session_unlock_list();
3460 }
3461 init_setup_error:
3462 return ret;
3463 }
3464
3465 /*
3466 * Thread managing health check socket.
3467 */
3468 static void *thread_manage_health(void *data)
3469 {
3470 int sock = -1, new_sock = -1, ret, i, pollfd, err = -1;
3471 uint32_t revents, nb_fd;
3472 struct lttng_poll_event events;
3473 struct health_comm_msg msg;
3474 struct health_comm_reply reply;
3475
3476 DBG("[thread] Manage health check started");
3477
3478 rcu_register_thread();
3479
3480 /* We might hit an error path before this is created. */
3481 lttng_poll_init(&events);
3482
3483 /* Create unix socket */
3484 sock = lttcomm_create_unix_sock(health_unix_sock_path);
3485 if (sock < 0) {
3486 ERR("Unable to create health check Unix socket");
3487 ret = -1;
3488 goto error;
3489 }
3490
3491 if (is_root) {
3492 /* lttng health client socket path permissions */
3493 ret = chown(health_unix_sock_path, 0,
3494 utils_get_group_id(tracing_group_name));
3495 if (ret < 0) {
3496 ERR("Unable to set group on %s", health_unix_sock_path);
3497 PERROR("chown");
3498 ret = -1;
3499 goto error;
3500 }
3501
3502 ret = chmod(health_unix_sock_path,
3503 S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
3504 if (ret < 0) {
3505 ERR("Unable to set permissions on %s", health_unix_sock_path);
3506 PERROR("chmod");
3507 ret = -1;
3508 goto error;
3509 }
3510 }
3511
3512 /*
3513 * Set the CLOEXEC flag. Return code is useless because either way, the
3514 * show must go on.
3515 */
3516 (void) utils_set_fd_cloexec(sock);
3517
3518 ret = lttcomm_listen_unix_sock(sock);
3519 if (ret < 0) {
3520 goto error;
3521 }
3522
3523 /*
3524 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
3525 * more will be added to this poll set.
3526 */
3527 ret = sessiond_set_thread_pollset(&events, 2);
3528 if (ret < 0) {
3529 goto error;
3530 }
3531
3532 /* Add the application registration socket */
3533 ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLPRI);
3534 if (ret < 0) {
3535 goto error;
3536 }
3537
3538 while (1) {
3539 DBG("Health check ready");
3540
3541 /* Inifinite blocking call, waiting for transmission */
3542 restart:
3543 ret = lttng_poll_wait(&events, -1);
3544 if (ret < 0) {
3545 /*
3546 * Restart interrupted system call.
3547 */
3548 if (errno == EINTR) {
3549 goto restart;
3550 }
3551 goto error;
3552 }
3553
3554 nb_fd = ret;
3555
3556 for (i = 0; i < nb_fd; i++) {
3557 /* Fetch once the poll data */
3558 revents = LTTNG_POLL_GETEV(&events, i);
3559 pollfd = LTTNG_POLL_GETFD(&events, i);
3560
3561 /* Thread quit pipe has been closed. Killing thread. */
3562 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
3563 if (ret) {
3564 err = 0;
3565 goto exit;
3566 }
3567
3568 /* Event on the registration socket */
3569 if (pollfd == sock) {
3570 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3571 ERR("Health socket poll error");
3572 goto error;
3573 }
3574 }
3575 }
3576
3577 new_sock = lttcomm_accept_unix_sock(sock);
3578 if (new_sock < 0) {
3579 goto error;
3580 }
3581
3582 /*
3583 * Set the CLOEXEC flag. Return code is useless because either way, the
3584 * show must go on.
3585 */
3586 (void) utils_set_fd_cloexec(new_sock);
3587
3588 DBG("Receiving data from client for health...");
3589 ret = lttcomm_recv_unix_sock(new_sock, (void *)&msg, sizeof(msg));
3590 if (ret <= 0) {
3591 DBG("Nothing recv() from client... continuing");
3592 ret = close(new_sock);
3593 if (ret) {
3594 PERROR("close");
3595 }
3596 new_sock = -1;
3597 continue;
3598 }
3599
3600 rcu_thread_online();
3601
3602 reply.ret_code = 0;
3603 for (i = 0; i < NR_HEALTH_SESSIOND_TYPES; i++) {
3604 /*
3605 * health_check_state returns 0 if health is
3606 * bad.
3607 */
3608 if (!health_check_state(health_sessiond, i)) {
3609 reply.ret_code |= 1ULL << i;
3610 }
3611 }
3612
3613 DBG2("Health check return value %" PRIx64, reply.ret_code);
3614
3615 ret = send_unix_sock(new_sock, (void *) &reply, sizeof(reply));
3616 if (ret < 0) {
3617 ERR("Failed to send health data back to client");
3618 }
3619
3620 /* End of transmission */
3621 ret = close(new_sock);
3622 if (ret) {
3623 PERROR("close");
3624 }
3625 new_sock = -1;
3626 }
3627
3628 exit:
3629 error:
3630 if (err) {
3631 ERR("Health error occurred in %s", __func__);
3632 }
3633 DBG("Health check thread dying");
3634 unlink(health_unix_sock_path);
3635 if (sock >= 0) {
3636 ret = close(sock);
3637 if (ret) {
3638 PERROR("close");
3639 }
3640 }
3641
3642 lttng_poll_clean(&events);
3643
3644 rcu_unregister_thread();
3645 return NULL;
3646 }
3647
3648 /*
3649 * This thread manage all clients request using the unix client socket for
3650 * communication.
3651 */
3652 static void *thread_manage_clients(void *data)
3653 {
3654 int sock = -1, ret, i, pollfd, err = -1;
3655 int sock_error;
3656 uint32_t revents, nb_fd;
3657 struct command_ctx *cmd_ctx = NULL;
3658 struct lttng_poll_event events;
3659
3660 DBG("[thread] Manage client started");
3661
3662 rcu_register_thread();
3663
3664 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_CMD);
3665
3666 if (testpoint(thread_manage_clients)) {
3667 goto error_testpoint;
3668 }
3669
3670 health_code_update();
3671
3672 ret = lttcomm_listen_unix_sock(client_sock);
3673 if (ret < 0) {
3674 goto error_listen;
3675 }
3676
3677 /*
3678 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
3679 * more will be added to this poll set.
3680 */
3681 ret = sessiond_set_thread_pollset(&events, 2);
3682 if (ret < 0) {
3683 goto error_create_poll;
3684 }
3685
3686 /* Add the application registration socket */
3687 ret = lttng_poll_add(&events, client_sock, LPOLLIN | LPOLLPRI);
3688 if (ret < 0) {
3689 goto error;
3690 }
3691
3692 /*
3693 * Notify parent pid that we are ready to accept command for client side.
3694 */
3695 if (opt_sig_parent) {
3696 kill(ppid, SIGUSR1);
3697 }
3698
3699 if (testpoint(thread_manage_clients_before_loop)) {
3700 goto error;
3701 }
3702
3703 health_code_update();
3704
3705 while (1) {
3706 DBG("Accepting client command ...");
3707
3708 /* Inifinite blocking call, waiting for transmission */
3709 restart:
3710 health_poll_entry();
3711 ret = lttng_poll_wait(&events, -1);
3712 health_poll_exit();
3713 if (ret < 0) {
3714 /*
3715 * Restart interrupted system call.
3716 */
3717 if (errno == EINTR) {
3718 goto restart;
3719 }
3720 goto error;
3721 }
3722
3723 nb_fd = ret;
3724
3725 for (i = 0; i < nb_fd; i++) {
3726 /* Fetch once the poll data */
3727 revents = LTTNG_POLL_GETEV(&events, i);
3728 pollfd = LTTNG_POLL_GETFD(&events, i);
3729
3730 health_code_update();
3731
3732 /* Thread quit pipe has been closed. Killing thread. */
3733 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
3734 if (ret) {
3735 err = 0;
3736 goto exit;
3737 }
3738
3739 /* Event on the registration socket */
3740 if (pollfd == client_sock) {
3741 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3742 ERR("Client socket poll error");
3743 goto error;
3744 }
3745 }
3746 }
3747
3748 DBG("Wait for client response");
3749
3750 health_code_update();
3751
3752 sock = lttcomm_accept_unix_sock(client_sock);
3753 if (sock < 0) {
3754 goto error;
3755 }
3756
3757 /*
3758 * Set the CLOEXEC flag. Return code is useless because either way, the
3759 * show must go on.
3760 */
3761 (void) utils_set_fd_cloexec(sock);
3762
3763 /* Set socket option for credentials retrieval */
3764 ret = lttcomm_setsockopt_creds_unix_sock(sock);
3765 if (ret < 0) {
3766 goto error;
3767 }
3768
3769 /* Allocate context command to process the client request */
3770 cmd_ctx = zmalloc(sizeof(struct command_ctx));
3771 if (cmd_ctx == NULL) {
3772 PERROR("zmalloc cmd_ctx");
3773 goto error;
3774 }
3775
3776 /* Allocate data buffer for reception */
3777 cmd_ctx->lsm = zmalloc(sizeof(struct lttcomm_session_msg));
3778 if (cmd_ctx->lsm == NULL) {
3779 PERROR("zmalloc cmd_ctx->lsm");
3780 goto error;
3781 }
3782
3783 cmd_ctx->llm = NULL;
3784 cmd_ctx->session = NULL;
3785
3786 health_code_update();
3787
3788 /*
3789 * Data is received from the lttng client. The struct
3790 * lttcomm_session_msg (lsm) contains the command and data request of
3791 * the client.
3792 */
3793 DBG("Receiving data from client ...");
3794 ret = lttcomm_recv_creds_unix_sock(sock, cmd_ctx->lsm,
3795 sizeof(struct lttcomm_session_msg), &cmd_ctx->creds);
3796 if (ret <= 0) {
3797 DBG("Nothing recv() from client... continuing");
3798 ret = close(sock);
3799 if (ret) {
3800 PERROR("close");
3801 }
3802 sock = -1;
3803 clean_command_ctx(&cmd_ctx);
3804 continue;
3805 }
3806
3807 health_code_update();
3808
3809 // TODO: Validate cmd_ctx including sanity check for
3810 // security purpose.
3811
3812 rcu_thread_online();
3813 /*
3814 * This function dispatch the work to the kernel or userspace tracer
3815 * libs and fill the lttcomm_lttng_msg data structure of all the needed
3816 * informations for the client. The command context struct contains
3817 * everything this function may needs.
3818 */
3819 ret = process_client_msg(cmd_ctx, sock, &sock_error);
3820 rcu_thread_offline();
3821 if (ret < 0) {
3822 ret = close(sock);
3823 if (ret) {
3824 PERROR("close");
3825 }
3826 sock = -1;
3827 /*
3828 * TODO: Inform client somehow of the fatal error. At
3829 * this point, ret < 0 means that a zmalloc failed
3830 * (ENOMEM). Error detected but still accept
3831 * command, unless a socket error has been
3832 * detected.
3833 */
3834 clean_command_ctx(&cmd_ctx);
3835 continue;
3836 }
3837
3838 health_code_update();
3839
3840 DBG("Sending response (size: %d, retcode: %s)",
3841 cmd_ctx->lttng_msg_size,
3842 lttng_strerror(-cmd_ctx->llm->ret_code));
3843 ret = send_unix_sock(sock, cmd_ctx->llm, cmd_ctx->lttng_msg_size);
3844 if (ret < 0) {
3845 ERR("Failed to send data back to client");
3846 }
3847
3848 /* End of transmission */
3849 ret = close(sock);
3850 if (ret) {
3851 PERROR("close");
3852 }
3853 sock = -1;
3854
3855 clean_command_ctx(&cmd_ctx);
3856
3857 health_code_update();
3858 }
3859
3860 exit:
3861 error:
3862 if (sock >= 0) {
3863 ret = close(sock);
3864 if (ret) {
3865 PERROR("close");
3866 }
3867 }
3868
3869 lttng_poll_clean(&events);
3870 clean_command_ctx(&cmd_ctx);
3871
3872 error_listen:
3873 error_create_poll:
3874 error_testpoint:
3875 unlink(client_unix_sock_path);
3876 if (client_sock >= 0) {
3877 ret = close(client_sock);
3878 if (ret) {
3879 PERROR("close");
3880 }
3881 }
3882
3883 if (err) {
3884 health_error();
3885 ERR("Health error occurred in %s", __func__);
3886 }
3887
3888 health_unregister(health_sessiond);
3889
3890 DBG("Client thread dying");
3891
3892 rcu_unregister_thread();
3893 return NULL;
3894 }
3895
3896
3897 /*
3898 * usage function on stderr
3899 */
3900 static void usage(void)
3901 {
3902 fprintf(stderr, "Usage: %s OPTIONS\n\nOptions:\n", progname);
3903 fprintf(stderr, " -h, --help Display this usage.\n");
3904 fprintf(stderr, " -c, --client-sock PATH Specify path for the client unix socket\n");
3905 fprintf(stderr, " -a, --apps-sock PATH Specify path for apps unix socket\n");
3906 fprintf(stderr, " --kconsumerd-err-sock PATH Specify path for the kernel consumer error socket\n");
3907 fprintf(stderr, " --kconsumerd-cmd-sock PATH Specify path for the kernel consumer command socket\n");
3908 fprintf(stderr, " --ustconsumerd32-err-sock PATH Specify path for the 32-bit UST consumer error socket\n");
3909 fprintf(stderr, " --ustconsumerd64-err-sock PATH Specify path for the 64-bit UST consumer error socket\n");
3910 fprintf(stderr, " --ustconsumerd32-cmd-sock PATH Specify path for the 32-bit UST consumer command socket\n");
3911 fprintf(stderr, " --ustconsumerd64-cmd-sock PATH Specify path for the 64-bit UST consumer command socket\n");
3912 fprintf(stderr, " --consumerd32-path PATH Specify path for the 32-bit UST consumer daemon binary\n");
3913 fprintf(stderr, " --consumerd32-libdir PATH Specify path for the 32-bit UST consumer daemon libraries\n");
3914 fprintf(stderr, " --consumerd64-path PATH Specify path for the 64-bit UST consumer daemon binary\n");
3915 fprintf(stderr, " --consumerd64-libdir PATH Specify path for the 64-bit UST consumer daemon libraries\n");
3916 fprintf(stderr, " -d, --daemonize Start as a daemon.\n");
3917 fprintf(stderr, " -g, --group NAME Specify the tracing group name. (default: tracing)\n");
3918 fprintf(stderr, " -V, --version Show version number.\n");
3919 fprintf(stderr, " -S, --sig-parent Send SIGCHLD to parent pid to notify readiness.\n");
3920 fprintf(stderr, " -q, --quiet No output at all.\n");
3921 fprintf(stderr, " -v, --verbose Verbose mode. Activate DBG() macro.\n");
3922 fprintf(stderr, " -p, --pidfile FILE Write a pid to FILE name overriding the default value.\n");
3923 fprintf(stderr, " --verbose-consumer Verbose mode for consumer. Activate DBG() macro.\n");
3924 fprintf(stderr, " --no-kernel Disable kernel tracer\n");
3925 fprintf(stderr, " --jul-tcp-port JUL application registration TCP port\n");
3926 }
3927
3928 /*
3929 * daemon argument parsing
3930 */
3931 static int parse_args(int argc, char **argv)
3932 {
3933 int c;
3934
3935 static struct option long_options[] = {
3936 { "client-sock", 1, 0, 'c' },
3937 { "apps-sock", 1, 0, 'a' },
3938 { "kconsumerd-cmd-sock", 1, 0, 'C' },
3939 { "kconsumerd-err-sock", 1, 0, 'E' },
3940 { "ustconsumerd32-cmd-sock", 1, 0, 'G' },
3941 { "ustconsumerd32-err-sock", 1, 0, 'H' },
3942 { "ustconsumerd64-cmd-sock", 1, 0, 'D' },
3943 { "ustconsumerd64-err-sock", 1, 0, 'F' },
3944 { "consumerd32-path", 1, 0, 'u' },
3945 { "consumerd32-libdir", 1, 0, 'U' },
3946 { "consumerd64-path", 1, 0, 't' },
3947 { "consumerd64-libdir", 1, 0, 'T' },
3948 { "daemonize", 0, 0, 'd' },
3949 { "sig-parent", 0, 0, 'S' },
3950 { "help", 0, 0, 'h' },
3951 { "group", 1, 0, 'g' },
3952 { "version", 0, 0, 'V' },
3953 { "quiet", 0, 0, 'q' },
3954 { "verbose", 0, 0, 'v' },
3955 { "verbose-consumer", 0, 0, 'Z' },
3956 { "no-kernel", 0, 0, 'N' },
3957 { "pidfile", 1, 0, 'p' },
3958 { "jul-tcp-port", 1, 0, 'J' },
3959 { NULL, 0, 0, 0 }
3960 };
3961
3962 while (1) {
3963 int option_index = 0;
3964 c = getopt_long(argc, argv, "dhqvVSN" "a:c:g:s:C:E:D:F:Z:u:t:p:J:",
3965 long_options, &option_index);
3966 if (c == -1) {
3967 break;
3968 }
3969
3970 switch (c) {
3971 case 0:
3972 fprintf(stderr, "option %s", long_options[option_index].name);
3973 if (optarg) {
3974 fprintf(stderr, " with arg %s\n", optarg);
3975 }
3976 break;
3977 case 'c':
3978 snprintf(client_unix_sock_path, PATH_MAX, "%s", optarg);
3979 break;
3980 case 'a':
3981 snprintf(apps_unix_sock_path, PATH_MAX, "%s", optarg);
3982 break;
3983 case 'd':
3984 opt_daemon = 1;
3985 break;
3986 case 'g':
3987 tracing_group_name = optarg;
3988 break;
3989 case 'h':
3990 usage();
3991 exit(EXIT_FAILURE);
3992 case 'V':
3993 fprintf(stdout, "%s\n", VERSION);
3994 exit(EXIT_SUCCESS);
3995 case 'S':
3996 opt_sig_parent = 1;
3997 break;
3998 case 'E':
3999 snprintf(kconsumer_data.err_unix_sock_path, PATH_MAX, "%s", optarg);
4000 break;
4001 case 'C':
4002 snprintf(kconsumer_data.cmd_unix_sock_path, PATH_MAX, "%s", optarg);
4003 break;
4004 case 'F':
4005 snprintf(ustconsumer64_data.err_unix_sock_path, PATH_MAX, "%s", optarg);
4006 break;
4007 case 'D':
4008 snprintf(ustconsumer64_data.cmd_unix_sock_path, PATH_MAX, "%s", optarg);
4009 break;
4010 case 'H':
4011 snprintf(ustconsumer32_data.err_unix_sock_path, PATH_MAX, "%s", optarg);
4012 break;
4013 case 'G':
4014 snprintf(ustconsumer32_data.cmd_unix_sock_path, PATH_MAX, "%s", optarg);
4015 break;
4016 case 'N':
4017 opt_no_kernel = 1;
4018 break;
4019 case 'q':
4020 lttng_opt_quiet = 1;
4021 break;
4022 case 'v':
4023 /* Verbose level can increase using multiple -v */
4024 lttng_opt_verbose += 1;
4025 break;
4026 case 'Z':
4027 opt_verbose_consumer += 1;
4028 break;
4029 case 'u':
4030 consumerd32_bin= optarg;
4031 break;
4032 case 'U':
4033 consumerd32_libdir = optarg;
4034 break;
4035 case 't':
4036 consumerd64_bin = optarg;
4037 break;
4038 case 'T':
4039 consumerd64_libdir = optarg;
4040 break;
4041 case 'p':
4042 opt_pidfile = optarg;
4043 break;
4044 case 'J': /* JUL TCP port. */
4045 {
4046 unsigned long v;
4047
4048 errno = 0;
4049 v = strtoul(optarg, NULL, 0);
4050 if (errno != 0 || !isdigit(optarg[0])) {
4051 ERR("Wrong value in --jul-tcp-port parameter: %s", optarg);
4052 return -1;
4053 }
4054 if (v == 0 || v >= 65535) {
4055 ERR("Port overflow in --jul-tcp-port parameter: %s", optarg);
4056 return -1;
4057 }
4058 jul_tcp_port = (uint32_t) v;
4059 DBG3("JUL TCP port set to non default: %u", jul_tcp_port);
4060 break;
4061 }
4062 default:
4063 /* Unknown option or other error.
4064 * Error is printed by getopt, just return */
4065 return -1;
4066 }
4067 }
4068
4069 return 0;
4070 }
4071
4072 /*
4073 * Creates the two needed socket by the daemon.
4074 * apps_sock - The communication socket for all UST apps.
4075 * client_sock - The communication of the cli tool (lttng).
4076 */
4077 static int init_daemon_socket(void)
4078 {
4079 int ret = 0;
4080 mode_t old_umask;
4081
4082 old_umask = umask(0);
4083
4084 /* Create client tool unix socket */
4085 client_sock = lttcomm_create_unix_sock(client_unix_sock_path);
4086 if (client_sock < 0) {
4087 ERR("Create unix sock failed: %s", client_unix_sock_path);
4088 ret = -1;
4089 goto end;
4090 }
4091
4092 /* Set the cloexec flag */
4093 ret = utils_set_fd_cloexec(client_sock);
4094 if (ret < 0) {
4095 ERR("Unable to set CLOEXEC flag to the client Unix socket (fd: %d). "
4096 "Continuing but note that the consumer daemon will have a "
4097 "reference to this socket on exec()", client_sock);
4098 }
4099
4100 /* File permission MUST be 660 */
4101 ret = chmod(client_unix_sock_path, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
4102 if (ret < 0) {
4103 ERR("Set file permissions failed: %s", client_unix_sock_path);
4104 PERROR("chmod");
4105 goto end;
4106 }
4107
4108 /* Create the application unix socket */
4109 apps_sock = lttcomm_create_unix_sock(apps_unix_sock_path);
4110 if (apps_sock < 0) {
4111 ERR("Create unix sock failed: %s", apps_unix_sock_path);
4112 ret = -1;
4113 goto end;
4114 }
4115
4116 /* Set the cloexec flag */
4117 ret = utils_set_fd_cloexec(apps_sock);
4118 if (ret < 0) {
4119 ERR("Unable to set CLOEXEC flag to the app Unix socket (fd: %d). "
4120 "Continuing but note that the consumer daemon will have a "
4121 "reference to this socket on exec()", apps_sock);
4122 }
4123
4124 /* File permission MUST be 666 */
4125 ret = chmod(apps_unix_sock_path,
4126 S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH);
4127 if (ret < 0) {
4128 ERR("Set file permissions failed: %s", apps_unix_sock_path);
4129 PERROR("chmod");
4130 goto end;
4131 }
4132
4133 DBG3("Session daemon client socket %d and application socket %d created",
4134 client_sock, apps_sock);
4135
4136 end:
4137 umask(old_umask);
4138 return ret;
4139 }
4140
4141 /*
4142 * Check if the global socket is available, and if a daemon is answering at the
4143 * other side. If yes, error is returned.
4144 */
4145 static int check_existing_daemon(void)
4146 {
4147 /* Is there anybody out there ? */
4148 if (lttng_session_daemon_alive()) {
4149 return -EEXIST;
4150 }
4151
4152 return 0;
4153 }
4154
4155 /*
4156 * Set the tracing group gid onto the client socket.
4157 *
4158 * Race window between mkdir and chown is OK because we are going from more
4159 * permissive (root.root) to less permissive (root.tracing).
4160 */
4161 static int set_permissions(char *rundir)
4162 {
4163 int ret;
4164 gid_t gid;
4165
4166 gid = utils_get_group_id(tracing_group_name);
4167
4168 /* Set lttng run dir */
4169 ret = chown(rundir, 0, gid);
4170 if (ret < 0) {
4171 ERR("Unable to set group on %s", rundir);
4172 PERROR("chown");
4173 }
4174
4175 /*
4176 * Ensure all applications and tracing group can search the run
4177 * dir. Allow everyone to read the directory, since it does not
4178 * buy us anything to hide its content.
4179 */
4180 ret = chmod(rundir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
4181 if (ret < 0) {
4182 ERR("Unable to set permissions on %s", rundir);
4183 PERROR("chmod");
4184 }
4185
4186 /* lttng client socket path */
4187 ret = chown(client_unix_sock_path, 0, gid);
4188 if (ret < 0) {
4189 ERR("Unable to set group on %s", client_unix_sock_path);
4190 PERROR("chown");
4191 }
4192
4193 /* kconsumer error socket path */
4194 ret = chown(kconsumer_data.err_unix_sock_path, 0, 0);
4195 if (ret < 0) {
4196 ERR("Unable to set group on %s", kconsumer_data.err_unix_sock_path);
4197 PERROR("chown");
4198 }
4199
4200 /* 64-bit ustconsumer error socket path */
4201 ret = chown(ustconsumer64_data.err_unix_sock_path, 0, 0);
4202 if (ret < 0) {
4203 ERR("Unable to set group on %s", ustconsumer64_data.err_unix_sock_path);
4204 PERROR("chown");
4205 }
4206
4207 /* 32-bit ustconsumer compat32 error socket path */
4208 ret = chown(ustconsumer32_data.err_unix_sock_path, 0, 0);
4209 if (ret < 0) {
4210 ERR("Unable to set group on %s", ustconsumer32_data.err_unix_sock_path);
4211 PERROR("chown");
4212 }
4213
4214 DBG("All permissions are set");
4215
4216 return ret;
4217 }
4218
4219 /*
4220 * Create the lttng run directory needed for all global sockets and pipe.
4221 */
4222 static int create_lttng_rundir(const char *rundir)
4223 {
4224 int ret;
4225
4226 DBG3("Creating LTTng run directory: %s", rundir);
4227
4228 ret = mkdir(rundir, S_IRWXU);
4229 if (ret < 0) {
4230 if (errno != EEXIST) {
4231 ERR("Unable to create %s", rundir);
4232 goto error;
4233 } else {
4234 ret = 0;
4235 }
4236 }
4237
4238 error:
4239 return ret;
4240 }
4241
4242 /*
4243 * Setup sockets and directory needed by the kconsumerd communication with the
4244 * session daemon.
4245 */
4246 static int set_consumer_sockets(struct consumer_data *consumer_data,
4247 const char *rundir)
4248 {
4249 int ret;
4250 char path[PATH_MAX];
4251
4252 switch (consumer_data->type) {
4253 case LTTNG_CONSUMER_KERNEL:
4254 snprintf(path, PATH_MAX, DEFAULT_KCONSUMERD_PATH, rundir);
4255 break;
4256 case LTTNG_CONSUMER64_UST:
4257 snprintf(path, PATH_MAX, DEFAULT_USTCONSUMERD64_PATH, rundir);
4258 break;
4259 case LTTNG_CONSUMER32_UST:
4260 snprintf(path, PATH_MAX, DEFAULT_USTCONSUMERD32_PATH, rundir);
4261 break;
4262 default:
4263 ERR("Consumer type unknown");
4264 ret = -EINVAL;
4265 goto error;
4266 }
4267
4268 DBG2("Creating consumer directory: %s", path);
4269
4270 ret = mkdir(path, S_IRWXU | S_IRGRP | S_IXGRP);
4271 if (ret < 0) {
4272 if (errno != EEXIST) {
4273 PERROR("mkdir");
4274 ERR("Failed to create %s", path);
4275 goto error;
4276 }
4277 ret = -1;
4278 }
4279 if (is_root) {
4280 ret = chown(path, 0, utils_get_group_id(tracing_group_name));
4281 if (ret < 0) {
4282 ERR("Unable to set group on %s", path);
4283 PERROR("chown");
4284 goto error;
4285 }
4286 }
4287
4288 /* Create the kconsumerd error unix socket */
4289 consumer_data->err_sock =
4290 lttcomm_create_unix_sock(consumer_data->err_unix_sock_path);
4291 if (consumer_data->err_sock < 0) {
4292 ERR("Create unix sock failed: %s", consumer_data->err_unix_sock_path);
4293 ret = -1;
4294 goto error;
4295 }
4296
4297 /*
4298 * Set the CLOEXEC flag. Return code is useless because either way, the
4299 * show must go on.
4300 */
4301 ret = utils_set_fd_cloexec(consumer_data->err_sock);
4302 if (ret < 0) {
4303 PERROR("utils_set_fd_cloexec");
4304 /* continue anyway */
4305 }
4306
4307 /* File permission MUST be 660 */
4308 ret = chmod(consumer_data->err_unix_sock_path,
4309 S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
4310 if (ret < 0) {
4311 ERR("Set file permissions failed: %s", consumer_data->err_unix_sock_path);
4312 PERROR("chmod");
4313 goto error;
4314 }
4315
4316 error:
4317 return ret;
4318 }
4319
4320 /*
4321 * Signal handler for the daemon
4322 *
4323 * Simply stop all worker threads, leaving main() return gracefully after
4324 * joining all threads and calling cleanup().
4325 */
4326 static void sighandler(int sig)
4327 {
4328 switch (sig) {
4329 case SIGPIPE:
4330 DBG("SIGPIPE caught");
4331 return;
4332 case SIGINT:
4333 DBG("SIGINT caught");
4334 stop_threads();
4335 break;
4336 case SIGTERM:
4337 DBG("SIGTERM caught");
4338 stop_threads();
4339 break;
4340 default:
4341 break;
4342 }
4343 }
4344
4345 /*
4346 * Setup signal handler for :
4347 * SIGINT, SIGTERM, SIGPIPE
4348 */
4349 static int set_signal_handler(void)
4350 {
4351 int ret = 0;
4352 struct sigaction sa;
4353 sigset_t sigset;
4354
4355 if ((ret = sigemptyset(&sigset)) < 0) {
4356 PERROR("sigemptyset");
4357 return ret;
4358 }
4359
4360 sa.sa_handler = sighandler;
4361 sa.sa_mask = sigset;
4362 sa.sa_flags = 0;
4363 if ((ret = sigaction(SIGTERM, &sa, NULL)) < 0) {
4364 PERROR("sigaction");
4365 return ret;
4366 }
4367
4368 if ((ret = sigaction(SIGINT, &sa, NULL)) < 0) {
4369 PERROR("sigaction");
4370 return ret;
4371 }
4372
4373 if ((ret = sigaction(SIGPIPE, &sa, NULL)) < 0) {
4374 PERROR("sigaction");
4375 return ret;
4376 }
4377
4378 DBG("Signal handler set for SIGTERM, SIGPIPE and SIGINT");
4379
4380 return ret;
4381 }
4382
4383 /*
4384 * Set open files limit to unlimited. This daemon can open a large number of
4385 * file descriptors in order to consumer multiple kernel traces.
4386 */
4387 static void set_ulimit(void)
4388 {
4389 int ret;
4390 struct rlimit lim;
4391
4392 /* The kernel does not allowed an infinite limit for open files */
4393 lim.rlim_cur = 65535;
4394 lim.rlim_max = 65535;
4395
4396 ret = setrlimit(RLIMIT_NOFILE, &lim);
4397 if (ret < 0) {
4398 PERROR("failed to set open files limit");
4399 }
4400 }
4401
4402 /*
4403 * Write pidfile using the rundir and opt_pidfile.
4404 */
4405 static void write_pidfile(void)
4406 {
4407 int ret;
4408 char pidfile_path[PATH_MAX];
4409
4410 assert(rundir);
4411
4412 if (opt_pidfile) {
4413 strncpy(pidfile_path, opt_pidfile, sizeof(pidfile_path));
4414 } else {
4415 /* Build pidfile path from rundir and opt_pidfile. */
4416 ret = snprintf(pidfile_path, sizeof(pidfile_path), "%s/"
4417 DEFAULT_LTTNG_SESSIOND_PIDFILE, rundir);
4418 if (ret < 0) {
4419 PERROR("snprintf pidfile path");
4420 goto error;
4421 }
4422 }
4423
4424 /*
4425 * Create pid file in rundir. Return value is of no importance. The
4426 * execution will continue even though we are not able to write the file.
4427 */
4428 (void) utils_create_pid_file(getpid(), pidfile_path);
4429
4430 error:
4431 return;
4432 }
4433
4434 /*
4435 * main
4436 */
4437 int main(int argc, char **argv)
4438 {
4439 int ret = 0;
4440 void *status;
4441 const char *home_path, *env_app_timeout;
4442
4443 init_kernel_workarounds();
4444
4445 rcu_register_thread();
4446
4447 setup_consumerd_path();
4448
4449 page_size = sysconf(_SC_PAGESIZE);
4450 if (page_size < 0) {
4451 PERROR("sysconf _SC_PAGESIZE");
4452 page_size = LONG_MAX;
4453 WARN("Fallback page size to %ld", page_size);
4454 }
4455
4456 /* Parse arguments */
4457 progname = argv[0];
4458 if ((ret = parse_args(argc, argv)) < 0) {
4459 goto error;
4460 }
4461
4462 /* Daemonize */
4463 if (opt_daemon) {
4464 int i;
4465
4466 /*
4467 * fork
4468 * child: setsid, close FD 0, 1, 2, chdir /
4469 * parent: exit (if fork is successful)
4470 */
4471 ret = daemon(0, 0);
4472 if (ret < 0) {
4473 PERROR("daemon");
4474 goto error;
4475 }
4476 /*
4477 * We are in the child. Make sure all other file
4478 * descriptors are closed, in case we are called with
4479 * more opened file descriptors than the standard ones.
4480 */
4481 for (i = 3; i < sysconf(_SC_OPEN_MAX); i++) {
4482 (void) close(i);
4483 }
4484 }
4485
4486 /* Create thread quit pipe */
4487 if ((ret = init_thread_quit_pipe()) < 0) {
4488 goto error;
4489 }
4490
4491 /* Check if daemon is UID = 0 */
4492 is_root = !getuid();
4493
4494 if (is_root) {
4495 rundir = strdup(DEFAULT_LTTNG_RUNDIR);
4496
4497 /* Create global run dir with root access */
4498 ret = create_lttng_rundir(rundir);
4499 if (ret < 0) {
4500 goto error;
4501 }
4502
4503 if (strlen(apps_unix_sock_path) == 0) {
4504 snprintf(apps_unix_sock_path, PATH_MAX,
4505 DEFAULT_GLOBAL_APPS_UNIX_SOCK);
4506 }
4507
4508 if (strlen(client_unix_sock_path) == 0) {
4509 snprintf(client_unix_sock_path, PATH_MAX,
4510 DEFAULT_GLOBAL_CLIENT_UNIX_SOCK);
4511 }
4512
4513 /* Set global SHM for ust */
4514 if (strlen(wait_shm_path) == 0) {
4515 snprintf(wait_shm_path, PATH_MAX,
4516 DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH);
4517 }
4518
4519 if (strlen(health_unix_sock_path) == 0) {
4520 snprintf(health_unix_sock_path, sizeof(health_unix_sock_path),
4521 DEFAULT_GLOBAL_HEALTH_UNIX_SOCK);
4522 }
4523
4524 /* Setup kernel consumerd path */
4525 snprintf(kconsumer_data.err_unix_sock_path, PATH_MAX,
4526 DEFAULT_KCONSUMERD_ERR_SOCK_PATH, rundir);
4527 snprintf(kconsumer_data.cmd_unix_sock_path, PATH_MAX,
4528 DEFAULT_KCONSUMERD_CMD_SOCK_PATH, rundir);
4529
4530 DBG2("Kernel consumer err path: %s",
4531 kconsumer_data.err_unix_sock_path);
4532 DBG2("Kernel consumer cmd path: %s",
4533 kconsumer_data.cmd_unix_sock_path);
4534 } else {
4535 home_path = utils_get_home_dir();
4536 if (home_path == NULL) {
4537 /* TODO: Add --socket PATH option */
4538 ERR("Can't get HOME directory for sockets creation.");
4539 ret = -EPERM;
4540 goto error;
4541 }
4542
4543 /*
4544 * Create rundir from home path. This will create something like
4545 * $HOME/.lttng
4546 */
4547 ret = asprintf(&rundir, DEFAULT_LTTNG_HOME_RUNDIR, home_path);
4548 if (ret < 0) {
4549 ret = -ENOMEM;
4550 goto error;
4551 }
4552
4553 ret = create_lttng_rundir(rundir);
4554 if (ret < 0) {
4555 goto error;
4556 }
4557
4558 if (strlen(apps_unix_sock_path) == 0) {
4559 snprintf(apps_unix_sock_path, PATH_MAX,
4560 DEFAULT_HOME_APPS_UNIX_SOCK, home_path);
4561 }
4562
4563 /* Set the cli tool unix socket path */
4564 if (strlen(client_unix_sock_path) == 0) {
4565 snprintf(client_unix_sock_path, PATH_MAX,
4566 DEFAULT_HOME_CLIENT_UNIX_SOCK, home_path);
4567 }
4568
4569 /* Set global SHM for ust */
4570 if (strlen(wait_shm_path) == 0) {
4571 snprintf(wait_shm_path, PATH_MAX,
4572 DEFAULT_HOME_APPS_WAIT_SHM_PATH, getuid());
4573 }
4574
4575 /* Set health check Unix path */
4576 if (strlen(health_unix_sock_path) == 0) {
4577 snprintf(health_unix_sock_path, sizeof(health_unix_sock_path),
4578 DEFAULT_HOME_HEALTH_UNIX_SOCK, home_path);
4579 }
4580 }
4581
4582 /* Set consumer initial state */
4583 kernel_consumerd_state = CONSUMER_STOPPED;
4584 ust_consumerd_state = CONSUMER_STOPPED;
4585
4586 DBG("Client socket path %s", client_unix_sock_path);
4587 DBG("Application socket path %s", apps_unix_sock_path);
4588 DBG("Application wait path %s", wait_shm_path);
4589 DBG("LTTng run directory path: %s", rundir);
4590
4591 /* 32 bits consumerd path setup */
4592 snprintf(ustconsumer32_data.err_unix_sock_path, PATH_MAX,
4593 DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH, rundir);
4594 snprintf(ustconsumer32_data.cmd_unix_sock_path, PATH_MAX,
4595 DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH, rundir);
4596
4597 DBG2("UST consumer 32 bits err path: %s",
4598 ustconsumer32_data.err_unix_sock_path);
4599 DBG2("UST consumer 32 bits cmd path: %s",
4600 ustconsumer32_data.cmd_unix_sock_path);
4601
4602 /* 64 bits consumerd path setup */
4603 snprintf(ustconsumer64_data.err_unix_sock_path, PATH_MAX,
4604 DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH, rundir);
4605 snprintf(ustconsumer64_data.cmd_unix_sock_path, PATH_MAX,
4606 DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH, rundir);
4607
4608 DBG2("UST consumer 64 bits err path: %s",
4609 ustconsumer64_data.err_unix_sock_path);
4610 DBG2("UST consumer 64 bits cmd path: %s",
4611 ustconsumer64_data.cmd_unix_sock_path);
4612
4613 /*
4614 * See if daemon already exist.
4615 */
4616 if ((ret = check_existing_daemon()) < 0) {
4617 ERR("Already running daemon.\n");
4618 /*
4619 * We do not goto exit because we must not cleanup()
4620 * because a daemon is already running.
4621 */
4622 goto error;
4623 }
4624
4625 /*
4626 * Init UST app hash table. Alloc hash table before this point since
4627 * cleanup() can get called after that point.
4628 */
4629 ust_app_ht_alloc();
4630
4631 /* After this point, we can safely call cleanup() with "goto exit" */
4632
4633 /*
4634 * These actions must be executed as root. We do that *after* setting up
4635 * the sockets path because we MUST make the check for another daemon using
4636 * those paths *before* trying to set the kernel consumer sockets and init
4637 * kernel tracer.
4638 */
4639 if (is_root) {
4640 ret = set_consumer_sockets(&kconsumer_data, rundir);
4641 if (ret < 0) {
4642 goto exit;
4643 }
4644
4645 /* Setup kernel tracer */
4646 if (!opt_no_kernel) {
4647 init_kernel_tracer();
4648 }
4649
4650 /* Set ulimit for open files */
4651 set_ulimit();
4652 }
4653 /* init lttng_fd tracking must be done after set_ulimit. */
4654 lttng_fd_init();
4655
4656 ret = set_consumer_sockets(&ustconsumer64_data, rundir);
4657 if (ret < 0) {
4658 goto exit;
4659 }
4660
4661 ret = set_consumer_sockets(&ustconsumer32_data, rundir);
4662 if (ret < 0) {
4663 goto exit;
4664 }
4665
4666 if ((ret = set_signal_handler()) < 0) {
4667 goto exit;
4668 }
4669
4670 /* Setup the needed unix socket */
4671 if ((ret = init_daemon_socket()) < 0) {
4672 goto exit;
4673 }
4674
4675 /* Set credentials to socket */
4676 if (is_root && ((ret = set_permissions(rundir)) < 0)) {
4677 goto exit;
4678 }
4679
4680 /* Get parent pid if -S, --sig-parent is specified. */
4681 if (opt_sig_parent) {
4682 ppid = getppid();
4683 }
4684
4685 /* Setup the kernel pipe for waking up the kernel thread */
4686 if (is_root && !opt_no_kernel) {
4687 if ((ret = utils_create_pipe_cloexec(kernel_poll_pipe)) < 0) {
4688 goto exit;
4689 }
4690 }
4691
4692 /* Setup the thread ht_cleanup communication pipe. */
4693 if (utils_create_pipe_cloexec(ht_cleanup_pipe) < 0) {
4694 goto exit;
4695 }
4696
4697 /* Setup the thread apps communication pipe. */
4698 if ((ret = utils_create_pipe_cloexec(apps_cmd_pipe)) < 0) {
4699 goto exit;
4700 }
4701
4702 /* Setup the thread apps notify communication pipe. */
4703 if (utils_create_pipe_cloexec(apps_cmd_notify_pipe) < 0) {
4704 goto exit;
4705 }
4706
4707 /* Initialize global buffer per UID and PID registry. */
4708 buffer_reg_init_uid_registry();
4709 buffer_reg_init_pid_registry();
4710
4711 /* Init UST command queue. */
4712 cds_wfq_init(&ust_cmd_queue.queue);
4713
4714 /*
4715 * Get session list pointer. This pointer MUST NOT be free(). This list is
4716 * statically declared in session.c
4717 */
4718 session_list_ptr = session_get_list();
4719
4720 /* Set up max poll set size */
4721 lttng_poll_set_max_size();
4722
4723 cmd_init();
4724
4725 /* Check for the application socket timeout env variable. */
4726 env_app_timeout = getenv(DEFAULT_APP_SOCKET_TIMEOUT_ENV);
4727 if (env_app_timeout) {
4728 app_socket_timeout = atoi(env_app_timeout);
4729 } else {
4730 app_socket_timeout = DEFAULT_APP_SOCKET_RW_TIMEOUT;
4731 }
4732
4733 write_pidfile();
4734
4735 /* Initialize communication library */
4736 lttcomm_init();
4737 /* This is to get the TCP timeout value. */
4738 lttcomm_inet_init();
4739
4740 /*
4741 * Initialize the health check subsystem. This call should set the
4742 * appropriate time values.
4743 */
4744 health_sessiond = health_app_create(NR_HEALTH_SESSIOND_TYPES);
4745 if (!health_sessiond) {
4746 PERROR("health_app_create error");
4747 goto exit_health_sessiond_cleanup;
4748 }
4749
4750 /* Create thread to manage the client socket */
4751 ret = pthread_create(&ht_cleanup_thread, NULL,
4752 thread_ht_cleanup, (void *) NULL);
4753 if (ret != 0) {
4754 PERROR("pthread_create ht_cleanup");
4755 goto exit_ht_cleanup;
4756 }
4757
4758 /* Create thread to manage the client socket */
4759 ret = pthread_create(&health_thread, NULL,
4760 thread_manage_health, (void *) NULL);
4761 if (ret != 0) {
4762 PERROR("pthread_create health");
4763 goto exit_health;
4764 }
4765
4766 /* Create thread to manage the client socket */
4767 ret = pthread_create(&client_thread, NULL,
4768 thread_manage_clients, (void *) NULL);
4769 if (ret != 0) {
4770 PERROR("pthread_create clients");
4771 goto exit_client;
4772 }
4773
4774 /* Create thread to dispatch registration */
4775 ret = pthread_create(&dispatch_thread, NULL,
4776 thread_dispatch_ust_registration, (void *) NULL);
4777 if (ret != 0) {
4778 PERROR("pthread_create dispatch");
4779 goto exit_dispatch;
4780 }
4781
4782 /* Create thread to manage application registration. */
4783 ret = pthread_create(&reg_apps_thread, NULL,
4784 thread_registration_apps, (void *) NULL);
4785 if (ret != 0) {
4786 PERROR("pthread_create registration");
4787 goto exit_reg_apps;
4788 }
4789
4790 /* Create thread to manage application socket */
4791 ret = pthread_create(&apps_thread, NULL,
4792 thread_manage_apps, (void *) NULL);
4793 if (ret != 0) {
4794 PERROR("pthread_create apps");
4795 goto exit_apps;
4796 }
4797
4798 /* Create thread to manage application notify socket */
4799 ret = pthread_create(&apps_notify_thread, NULL,
4800 ust_thread_manage_notify, (void *) NULL);
4801 if (ret != 0) {
4802 PERROR("pthread_create apps");
4803 goto exit_apps_notify;
4804 }
4805
4806 /* Create JUL registration thread. */
4807 ret = pthread_create(&jul_reg_thread, NULL,
4808 jul_thread_manage_registration, (void *) NULL);
4809 if (ret != 0) {
4810 PERROR("pthread_create apps");
4811 goto exit_jul_reg;
4812 }
4813
4814 /* Don't start this thread if kernel tracing is not requested nor root */
4815 if (is_root && !opt_no_kernel) {
4816 /* Create kernel thread to manage kernel event */
4817 ret = pthread_create(&kernel_thread, NULL,
4818 thread_manage_kernel, (void *) NULL);
4819 if (ret != 0) {
4820 PERROR("pthread_create kernel");
4821 goto exit_kernel;
4822 }
4823
4824 ret = pthread_join(kernel_thread, &status);
4825 if (ret != 0) {
4826 PERROR("pthread_join");
4827 goto error; /* join error, exit without cleanup */
4828 }
4829 }
4830
4831 exit_kernel:
4832 ret = pthread_join(jul_reg_thread, &status);
4833 if (ret != 0) {
4834 PERROR("pthread_join JUL");
4835 goto error; /* join error, exit without cleanup */
4836 }
4837
4838 exit_jul_reg:
4839 ret = pthread_join(apps_notify_thread, &status);
4840 if (ret != 0) {
4841 PERROR("pthread_join apps notify");
4842 goto error; /* join error, exit without cleanup */
4843 }
4844
4845 exit_apps_notify:
4846 ret = pthread_join(apps_thread, &status);
4847 if (ret != 0) {
4848 PERROR("pthread_join apps");
4849 goto error; /* join error, exit without cleanup */
4850 }
4851
4852
4853 exit_apps:
4854 ret = pthread_join(reg_apps_thread, &status);
4855 if (ret != 0) {
4856 PERROR("pthread_join");
4857 goto error; /* join error, exit without cleanup */
4858 }
4859
4860 exit_reg_apps:
4861 ret = pthread_join(dispatch_thread, &status);
4862 if (ret != 0) {
4863 PERROR("pthread_join");
4864 goto error; /* join error, exit without cleanup */
4865 }
4866
4867 exit_dispatch:
4868 ret = pthread_join(client_thread, &status);
4869 if (ret != 0) {
4870 PERROR("pthread_join");
4871 goto error; /* join error, exit without cleanup */
4872 }
4873
4874 ret = join_consumer_thread(&kconsumer_data);
4875 if (ret != 0) {
4876 PERROR("join_consumer");
4877 goto error; /* join error, exit without cleanup */
4878 }
4879
4880 ret = join_consumer_thread(&ustconsumer32_data);
4881 if (ret != 0) {
4882 PERROR("join_consumer ust32");
4883 goto error; /* join error, exit without cleanup */
4884 }
4885
4886 ret = join_consumer_thread(&ustconsumer64_data);
4887 if (ret != 0) {
4888 PERROR("join_consumer ust64");
4889 goto error; /* join error, exit without cleanup */
4890 }
4891
4892 exit_client:
4893 ret = pthread_join(health_thread, &status);
4894 if (ret != 0) {
4895 PERROR("pthread_join health thread");
4896 goto error; /* join error, exit without cleanup */
4897 }
4898
4899 exit_health:
4900 ret = pthread_join(ht_cleanup_thread, &status);
4901 if (ret != 0) {
4902 PERROR("pthread_join ht cleanup thread");
4903 goto error; /* join error, exit without cleanup */
4904 }
4905 exit_ht_cleanup:
4906 health_app_destroy(health_sessiond);
4907 exit_health_sessiond_cleanup:
4908 exit:
4909 /*
4910 * cleanup() is called when no other thread is running.
4911 */
4912 rcu_thread_online();
4913 cleanup();
4914 rcu_thread_offline();
4915 rcu_unregister_thread();
4916 if (!ret) {
4917 exit(EXIT_SUCCESS);
4918 }
4919 error:
4920 exit(EXIT_FAILURE);
4921 }
This page took 0.1655 seconds and 5 git commands to generate.