Create utils.c/.h in libcommon
[lttng-tools.git] / src / bin / lttng-sessiond / main.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19 #define _GNU_SOURCE
20 #include <getopt.h>
21 #include <grp.h>
22 #include <limits.h>
23 #include <pthread.h>
24 #include <semaphore.h>
25 #include <signal.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <string.h>
29 #include <sys/mman.h>
30 #include <sys/mount.h>
31 #include <sys/resource.h>
32 #include <sys/socket.h>
33 #include <sys/stat.h>
34 #include <sys/types.h>
35 #include <sys/wait.h>
36 #include <urcu/uatomic.h>
37 #include <unistd.h>
38 #include <config.h>
39
40 #include <common/common.h>
41 #include <common/compat/poll.h>
42 #include <common/compat/socket.h>
43 #include <common/defaults.h>
44 #include <common/kernel-consumer/kernel-consumer.h>
45 #include <common/futex.h>
46 #include <common/relayd/relayd.h>
47 #include <common/utils.h>
48
49 #include "lttng-sessiond.h"
50 #include "channel.h"
51 #include "consumer.h"
52 #include "context.h"
53 #include "event.h"
54 #include "kernel.h"
55 #include "kernel-consumer.h"
56 #include "modprobe.h"
57 #include "shm.h"
58 #include "ust-ctl.h"
59 #include "ust-consumer.h"
60 #include "utils.h"
61 #include "fd-limit.h"
62 #include "filter.h"
63 #include "health.h"
64
65 #define CONSUMERD_FILE "lttng-consumerd"
66
67 /* Const values */
68 const char default_home_dir[] = DEFAULT_HOME_DIR;
69 const char default_tracing_group[] = DEFAULT_TRACING_GROUP;
70 const char default_ust_sock_dir[] = DEFAULT_UST_SOCK_DIR;
71 const char default_global_apps_pipe[] = DEFAULT_GLOBAL_APPS_PIPE;
72
73 const char *progname;
74 const char *opt_tracing_group;
75 static int opt_sig_parent;
76 static int opt_verbose_consumer;
77 static int opt_daemon;
78 static int opt_no_kernel;
79 static int is_root; /* Set to 1 if the daemon is running as root */
80 static pid_t ppid; /* Parent PID for --sig-parent option */
81 static char *rundir;
82
83 /* Consumer daemon specific control data */
84 static struct consumer_data kconsumer_data = {
85 .type = LTTNG_CONSUMER_KERNEL,
86 .err_unix_sock_path = DEFAULT_KCONSUMERD_ERR_SOCK_PATH,
87 .cmd_unix_sock_path = DEFAULT_KCONSUMERD_CMD_SOCK_PATH,
88 .err_sock = -1,
89 .cmd_sock = -1,
90 };
91 static struct consumer_data ustconsumer64_data = {
92 .type = LTTNG_CONSUMER64_UST,
93 .err_unix_sock_path = DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH,
94 .cmd_unix_sock_path = DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH,
95 .err_sock = -1,
96 .cmd_sock = -1,
97 };
98 static struct consumer_data ustconsumer32_data = {
99 .type = LTTNG_CONSUMER32_UST,
100 .err_unix_sock_path = DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH,
101 .cmd_unix_sock_path = DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH,
102 .err_sock = -1,
103 .cmd_sock = -1,
104 };
105
106 static int dispatch_thread_exit;
107
108 /* Global application Unix socket path */
109 static char apps_unix_sock_path[PATH_MAX];
110 /* Global client Unix socket path */
111 static char client_unix_sock_path[PATH_MAX];
112 /* global wait shm path for UST */
113 static char wait_shm_path[PATH_MAX];
114 /* Global health check unix path */
115 static char health_unix_sock_path[PATH_MAX];
116
117 /* Sockets and FDs */
118 static int client_sock = -1;
119 static int apps_sock = -1;
120 static int kernel_tracer_fd = -1;
121 static int kernel_poll_pipe[2] = { -1, -1 };
122
123 /*
124 * Quit pipe for all threads. This permits a single cancellation point
125 * for all threads when receiving an event on the pipe.
126 */
127 static int thread_quit_pipe[2] = { -1, -1 };
128
129 /*
130 * This pipe is used to inform the thread managing application communication
131 * that a command is queued and ready to be processed.
132 */
133 static int apps_cmd_pipe[2] = { -1, -1 };
134
135 /* Pthread, Mutexes and Semaphores */
136 static pthread_t apps_thread;
137 static pthread_t reg_apps_thread;
138 static pthread_t client_thread;
139 static pthread_t kernel_thread;
140 static pthread_t dispatch_thread;
141 static pthread_t health_thread;
142
143 /*
144 * UST registration command queue. This queue is tied with a futex and uses a N
145 * wakers / 1 waiter implemented and detailed in futex.c/.h
146 *
147 * The thread_manage_apps and thread_dispatch_ust_registration interact with
148 * this queue and the wait/wake scheme.
149 */
150 static struct ust_cmd_queue ust_cmd_queue;
151
152 /*
153 * Pointer initialized before thread creation.
154 *
155 * This points to the tracing session list containing the session count and a
156 * mutex lock. The lock MUST be taken if you iterate over the list. The lock
157 * MUST NOT be taken if you call a public function in session.c.
158 *
159 * The lock is nested inside the structure: session_list_ptr->lock. Please use
160 * session_lock_list and session_unlock_list for lock acquisition.
161 */
162 static struct ltt_session_list *session_list_ptr;
163
164 int ust_consumerd64_fd = -1;
165 int ust_consumerd32_fd = -1;
166
167 static const char *consumerd32_bin = CONFIG_CONSUMERD32_BIN;
168 static const char *consumerd64_bin = CONFIG_CONSUMERD64_BIN;
169 static const char *consumerd32_libdir = CONFIG_CONSUMERD32_LIBDIR;
170 static const char *consumerd64_libdir = CONFIG_CONSUMERD64_LIBDIR;
171
172 /*
173 * Consumer daemon state which is changed when spawning it, killing it or in
174 * case of a fatal error.
175 */
176 enum consumerd_state {
177 CONSUMER_STARTED = 1,
178 CONSUMER_STOPPED = 2,
179 CONSUMER_ERROR = 3,
180 };
181
182 /*
183 * This consumer daemon state is used to validate if a client command will be
184 * able to reach the consumer. If not, the client is informed. For instance,
185 * doing a "lttng start" when the consumer state is set to ERROR will return an
186 * error to the client.
187 *
188 * The following example shows a possible race condition of this scheme:
189 *
190 * consumer thread error happens
191 * client cmd arrives
192 * client cmd checks state -> still OK
193 * consumer thread exit, sets error
194 * client cmd try to talk to consumer
195 * ...
196 *
197 * However, since the consumer is a different daemon, we have no way of making
198 * sure the command will reach it safely even with this state flag. This is why
199 * we consider that up to the state validation during command processing, the
200 * command is safe. After that, we can not guarantee the correctness of the
201 * client request vis-a-vis the consumer.
202 */
203 static enum consumerd_state ust_consumerd_state;
204 static enum consumerd_state kernel_consumerd_state;
205
206 /*
207 * Used to keep a unique index for each relayd socket created where this value
208 * is associated with streams on the consumer so it can match the right relayd
209 * to send to.
210 *
211 * This value should be incremented atomically for safety purposes and future
212 * possible concurrent access.
213 */
214 static unsigned int relayd_net_seq_idx;
215
216 /* Used for the health monitoring of the session daemon. See health.h */
217 struct health_state health_thread_cmd;
218 struct health_state health_thread_app_reg;
219 struct health_state health_thread_kernel;
220
221 static
222 void setup_consumerd_path(void)
223 {
224 const char *bin, *libdir;
225
226 /*
227 * Allow INSTALL_BIN_PATH to be used as a target path for the
228 * native architecture size consumer if CONFIG_CONSUMER*_PATH
229 * has not been defined.
230 */
231 #if (CAA_BITS_PER_LONG == 32)
232 if (!consumerd32_bin[0]) {
233 consumerd32_bin = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
234 }
235 if (!consumerd32_libdir[0]) {
236 consumerd32_libdir = INSTALL_LIB_PATH;
237 }
238 #elif (CAA_BITS_PER_LONG == 64)
239 if (!consumerd64_bin[0]) {
240 consumerd64_bin = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
241 }
242 if (!consumerd64_libdir[0]) {
243 consumerd64_libdir = INSTALL_LIB_PATH;
244 }
245 #else
246 #error "Unknown bitness"
247 #endif
248
249 /*
250 * runtime env. var. overrides the build default.
251 */
252 bin = getenv("LTTNG_CONSUMERD32_BIN");
253 if (bin) {
254 consumerd32_bin = bin;
255 }
256 bin = getenv("LTTNG_CONSUMERD64_BIN");
257 if (bin) {
258 consumerd64_bin = bin;
259 }
260 libdir = getenv("LTTNG_CONSUMERD32_LIBDIR");
261 if (libdir) {
262 consumerd32_libdir = libdir;
263 }
264 libdir = getenv("LTTNG_CONSUMERD64_LIBDIR");
265 if (libdir) {
266 consumerd64_libdir = libdir;
267 }
268 }
269
270 /*
271 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
272 */
273 static int create_thread_poll_set(struct lttng_poll_event *events,
274 unsigned int size)
275 {
276 int ret;
277
278 if (events == NULL || size == 0) {
279 ret = -1;
280 goto error;
281 }
282
283 ret = lttng_poll_create(events, size, LTTNG_CLOEXEC);
284 if (ret < 0) {
285 goto error;
286 }
287
288 /* Add quit pipe */
289 ret = lttng_poll_add(events, thread_quit_pipe[0], LPOLLIN);
290 if (ret < 0) {
291 goto error;
292 }
293
294 return 0;
295
296 error:
297 return ret;
298 }
299
300 /*
301 * Check if the thread quit pipe was triggered.
302 *
303 * Return 1 if it was triggered else 0;
304 */
305 static int check_thread_quit_pipe(int fd, uint32_t events)
306 {
307 if (fd == thread_quit_pipe[0] && (events & LPOLLIN)) {
308 return 1;
309 }
310
311 return 0;
312 }
313
314 /*
315 * Return group ID of the tracing group or -1 if not found.
316 */
317 static gid_t allowed_group(void)
318 {
319 struct group *grp;
320
321 if (opt_tracing_group) {
322 grp = getgrnam(opt_tracing_group);
323 } else {
324 grp = getgrnam(default_tracing_group);
325 }
326 if (!grp) {
327 return -1;
328 } else {
329 return grp->gr_gid;
330 }
331 }
332
333 /*
334 * Init thread quit pipe.
335 *
336 * Return -1 on error or 0 if all pipes are created.
337 */
338 static int init_thread_quit_pipe(void)
339 {
340 int ret, i;
341
342 ret = pipe(thread_quit_pipe);
343 if (ret < 0) {
344 PERROR("thread quit pipe");
345 goto error;
346 }
347
348 for (i = 0; i < 2; i++) {
349 ret = fcntl(thread_quit_pipe[i], F_SETFD, FD_CLOEXEC);
350 if (ret < 0) {
351 PERROR("fcntl");
352 goto error;
353 }
354 }
355
356 error:
357 return ret;
358 }
359
360 /*
361 * Complete teardown of a kernel session. This free all data structure related
362 * to a kernel session and update counter.
363 */
364 static void teardown_kernel_session(struct ltt_session *session)
365 {
366 if (!session->kernel_session) {
367 DBG3("No kernel session when tearing down session");
368 return;
369 }
370
371 DBG("Tearing down kernel session");
372
373 /*
374 * If a custom kernel consumer was registered, close the socket before
375 * tearing down the complete kernel session structure
376 */
377 if (kconsumer_data.cmd_sock >= 0 &&
378 session->kernel_session->consumer_fd != kconsumer_data.cmd_sock) {
379 lttcomm_close_unix_sock(session->kernel_session->consumer_fd);
380 }
381
382 trace_kernel_destroy_session(session->kernel_session);
383 }
384
385 /*
386 * Complete teardown of all UST sessions. This will free everything on his path
387 * and destroy the core essence of all ust sessions :)
388 */
389 static void teardown_ust_session(struct ltt_session *session)
390 {
391 int ret;
392
393 if (!session->ust_session) {
394 DBG3("No UST session when tearing down session");
395 return;
396 }
397
398 DBG("Tearing down UST session(s)");
399
400 ret = ust_app_destroy_trace_all(session->ust_session);
401 if (ret) {
402 ERR("Error in ust_app_destroy_trace_all");
403 }
404
405 trace_ust_destroy_session(session->ust_session);
406 }
407
408 /*
409 * Stop all threads by closing the thread quit pipe.
410 */
411 static void stop_threads(void)
412 {
413 int ret;
414
415 /* Stopping all threads */
416 DBG("Terminating all threads");
417 ret = notify_thread_pipe(thread_quit_pipe[1]);
418 if (ret < 0) {
419 ERR("write error on thread quit pipe");
420 }
421
422 /* Dispatch thread */
423 dispatch_thread_exit = 1;
424 futex_nto1_wake(&ust_cmd_queue.futex);
425 }
426
427 /*
428 * Cleanup the daemon
429 */
430 static void cleanup(void)
431 {
432 int ret;
433 char *cmd;
434 struct ltt_session *sess, *stmp;
435
436 DBG("Cleaning up");
437
438 DBG("Removing %s directory", rundir);
439 ret = asprintf(&cmd, "rm -rf %s", rundir);
440 if (ret < 0) {
441 ERR("asprintf failed. Something is really wrong!");
442 }
443
444 /* Remove lttng run directory */
445 ret = system(cmd);
446 if (ret < 0) {
447 ERR("Unable to clean %s", rundir);
448 }
449 free(cmd);
450
451 DBG("Cleaning up all sessions");
452
453 /* Destroy session list mutex */
454 if (session_list_ptr != NULL) {
455 pthread_mutex_destroy(&session_list_ptr->lock);
456
457 /* Cleanup ALL session */
458 cds_list_for_each_entry_safe(sess, stmp,
459 &session_list_ptr->head, list) {
460 teardown_kernel_session(sess);
461 teardown_ust_session(sess);
462 free(sess);
463 }
464 }
465
466 DBG("Closing all UST sockets");
467 ust_app_clean_list();
468
469 pthread_mutex_destroy(&kconsumer_data.pid_mutex);
470
471 if (is_root && !opt_no_kernel) {
472 DBG2("Closing kernel fd");
473 if (kernel_tracer_fd >= 0) {
474 ret = close(kernel_tracer_fd);
475 if (ret) {
476 PERROR("close");
477 }
478 }
479 DBG("Unloading kernel modules");
480 modprobe_remove_lttng_all();
481 }
482 utils_close_pipe(kernel_poll_pipe);
483 utils_close_pipe(thread_quit_pipe);
484 utils_close_pipe(apps_cmd_pipe);
485
486 /* <fun> */
487 DBG("%c[%d;%dm*** assert failed :-) *** ==> %c[%dm%c[%d;%dm"
488 "Matthew, BEET driven development works!%c[%dm",
489 27, 1, 31, 27, 0, 27, 1, 33, 27, 0);
490 /* </fun> */
491 }
492
493 /*
494 * Send data on a unix socket using the liblttsessiondcomm API.
495 *
496 * Return lttcomm error code.
497 */
498 static int send_unix_sock(int sock, void *buf, size_t len)
499 {
500 /* Check valid length */
501 if (len <= 0) {
502 return -1;
503 }
504
505 return lttcomm_send_unix_sock(sock, buf, len);
506 }
507
508 /*
509 * Free memory of a command context structure.
510 */
511 static void clean_command_ctx(struct command_ctx **cmd_ctx)
512 {
513 DBG("Clean command context structure");
514 if (*cmd_ctx) {
515 if ((*cmd_ctx)->llm) {
516 free((*cmd_ctx)->llm);
517 }
518 if ((*cmd_ctx)->lsm) {
519 free((*cmd_ctx)->lsm);
520 }
521 free(*cmd_ctx);
522 *cmd_ctx = NULL;
523 }
524 }
525
526 /*
527 * Notify UST applications using the shm mmap futex.
528 */
529 static int notify_ust_apps(int active)
530 {
531 char *wait_shm_mmap;
532
533 DBG("Notifying applications of session daemon state: %d", active);
534
535 /* See shm.c for this call implying mmap, shm and futex calls */
536 wait_shm_mmap = shm_ust_get_mmap(wait_shm_path, is_root);
537 if (wait_shm_mmap == NULL) {
538 goto error;
539 }
540
541 /* Wake waiting process */
542 futex_wait_update((int32_t *) wait_shm_mmap, active);
543
544 /* Apps notified successfully */
545 return 0;
546
547 error:
548 return -1;
549 }
550
551 /*
552 * Setup the outgoing data buffer for the response (llm) by allocating the
553 * right amount of memory and copying the original information from the lsm
554 * structure.
555 *
556 * Return total size of the buffer pointed by buf.
557 */
558 static int setup_lttng_msg(struct command_ctx *cmd_ctx, size_t size)
559 {
560 int ret, buf_size;
561
562 buf_size = size;
563
564 cmd_ctx->llm = zmalloc(sizeof(struct lttcomm_lttng_msg) + buf_size);
565 if (cmd_ctx->llm == NULL) {
566 PERROR("zmalloc");
567 ret = -ENOMEM;
568 goto error;
569 }
570
571 /* Copy common data */
572 cmd_ctx->llm->cmd_type = cmd_ctx->lsm->cmd_type;
573 cmd_ctx->llm->pid = cmd_ctx->lsm->domain.attr.pid;
574
575 cmd_ctx->llm->data_size = size;
576 cmd_ctx->lttng_msg_size = sizeof(struct lttcomm_lttng_msg) + buf_size;
577
578 return buf_size;
579
580 error:
581 return ret;
582 }
583
584 /*
585 * Update the kernel poll set of all channel fd available over all tracing
586 * session. Add the wakeup pipe at the end of the set.
587 */
588 static int update_kernel_poll(struct lttng_poll_event *events)
589 {
590 int ret;
591 struct ltt_session *session;
592 struct ltt_kernel_channel *channel;
593
594 DBG("Updating kernel poll set");
595
596 session_lock_list();
597 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
598 session_lock(session);
599 if (session->kernel_session == NULL) {
600 session_unlock(session);
601 continue;
602 }
603
604 cds_list_for_each_entry(channel,
605 &session->kernel_session->channel_list.head, list) {
606 /* Add channel fd to the kernel poll set */
607 ret = lttng_poll_add(events, channel->fd, LPOLLIN | LPOLLRDNORM);
608 if (ret < 0) {
609 session_unlock(session);
610 goto error;
611 }
612 DBG("Channel fd %d added to kernel set", channel->fd);
613 }
614 session_unlock(session);
615 }
616 session_unlock_list();
617
618 return 0;
619
620 error:
621 session_unlock_list();
622 return -1;
623 }
624
625 /*
626 * Find the channel fd from 'fd' over all tracing session. When found, check
627 * for new channel stream and send those stream fds to the kernel consumer.
628 *
629 * Useful for CPU hotplug feature.
630 */
631 static int update_kernel_stream(struct consumer_data *consumer_data, int fd)
632 {
633 int ret = 0;
634 struct ltt_session *session;
635 struct ltt_kernel_channel *channel;
636
637 DBG("Updating kernel streams for channel fd %d", fd);
638
639 session_lock_list();
640 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
641 session_lock(session);
642 if (session->kernel_session == NULL) {
643 session_unlock(session);
644 continue;
645 }
646
647 /* This is not suppose to be -1 but this is an extra security check */
648 if (session->kernel_session->consumer_fd < 0) {
649 session->kernel_session->consumer_fd = consumer_data->cmd_sock;
650 }
651
652 cds_list_for_each_entry(channel,
653 &session->kernel_session->channel_list.head, list) {
654 if (channel->fd == fd) {
655 DBG("Channel found, updating kernel streams");
656 ret = kernel_open_channel_stream(channel);
657 if (ret < 0) {
658 goto error;
659 }
660
661 /*
662 * Have we already sent fds to the consumer? If yes, it means
663 * that tracing is started so it is safe to send our updated
664 * stream fds.
665 */
666 if (session->kernel_session->consumer_fds_sent == 1 &&
667 session->kernel_session->consumer != NULL) {
668 ret = kernel_consumer_send_channel_stream(
669 session->kernel_session->consumer_fd, channel,
670 session->kernel_session);
671 if (ret < 0) {
672 goto error;
673 }
674 }
675 goto error;
676 }
677 }
678 session_unlock(session);
679 }
680 session_unlock_list();
681 return ret;
682
683 error:
684 session_unlock(session);
685 session_unlock_list();
686 return ret;
687 }
688
689 /*
690 * For each tracing session, update newly registered apps.
691 */
692 static void update_ust_app(int app_sock)
693 {
694 struct ltt_session *sess, *stmp;
695
696 session_lock_list();
697
698 /* For all tracing session(s) */
699 cds_list_for_each_entry_safe(sess, stmp, &session_list_ptr->head, list) {
700 session_lock(sess);
701 if (sess->ust_session) {
702 ust_app_global_update(sess->ust_session, app_sock);
703 }
704 session_unlock(sess);
705 }
706
707 session_unlock_list();
708 }
709
710 /*
711 * This thread manage event coming from the kernel.
712 *
713 * Features supported in this thread:
714 * -) CPU Hotplug
715 */
716 static void *thread_manage_kernel(void *data)
717 {
718 int ret, i, pollfd, update_poll_flag = 1;
719 uint32_t revents, nb_fd;
720 char tmp;
721 struct lttng_poll_event events;
722
723 DBG("Thread manage kernel started");
724
725 health_code_update(&health_thread_kernel);
726
727 ret = create_thread_poll_set(&events, 2);
728 if (ret < 0) {
729 goto error_poll_create;
730 }
731
732 ret = lttng_poll_add(&events, kernel_poll_pipe[0], LPOLLIN);
733 if (ret < 0) {
734 goto error;
735 }
736
737 while (1) {
738 health_code_update(&health_thread_kernel);
739
740 if (update_poll_flag == 1) {
741 /*
742 * Reset number of fd in the poll set. Always 2 since there is the thread
743 * quit pipe and the kernel pipe.
744 */
745 events.nb_fd = 2;
746
747 ret = update_kernel_poll(&events);
748 if (ret < 0) {
749 goto error;
750 }
751 update_poll_flag = 0;
752 }
753
754 nb_fd = LTTNG_POLL_GETNB(&events);
755
756 DBG("Thread kernel polling on %d fds", nb_fd);
757
758 /* Zeroed the poll events */
759 lttng_poll_reset(&events);
760
761 /* Poll infinite value of time */
762 restart:
763 health_poll_update(&health_thread_kernel);
764 ret = lttng_poll_wait(&events, -1);
765 health_poll_update(&health_thread_kernel);
766 if (ret < 0) {
767 /*
768 * Restart interrupted system call.
769 */
770 if (errno == EINTR) {
771 goto restart;
772 }
773 goto error;
774 } else if (ret == 0) {
775 /* Should not happen since timeout is infinite */
776 ERR("Return value of poll is 0 with an infinite timeout.\n"
777 "This should not have happened! Continuing...");
778 continue;
779 }
780
781 for (i = 0; i < nb_fd; i++) {
782 /* Fetch once the poll data */
783 revents = LTTNG_POLL_GETEV(&events, i);
784 pollfd = LTTNG_POLL_GETFD(&events, i);
785
786 health_code_update(&health_thread_kernel);
787
788 /* Thread quit pipe has been closed. Killing thread. */
789 ret = check_thread_quit_pipe(pollfd, revents);
790 if (ret) {
791 goto error;
792 }
793
794 /* Check for data on kernel pipe */
795 if (pollfd == kernel_poll_pipe[0] && (revents & LPOLLIN)) {
796 ret = read(kernel_poll_pipe[0], &tmp, 1);
797 update_poll_flag = 1;
798 continue;
799 } else {
800 /*
801 * New CPU detected by the kernel. Adding kernel stream to
802 * kernel session and updating the kernel consumer
803 */
804 if (revents & LPOLLIN) {
805 ret = update_kernel_stream(&kconsumer_data, pollfd);
806 if (ret < 0) {
807 continue;
808 }
809 break;
810 /*
811 * TODO: We might want to handle the LPOLLERR | LPOLLHUP
812 * and unregister kernel stream at this point.
813 */
814 }
815 }
816 }
817 }
818
819 error:
820 lttng_poll_clean(&events);
821 error_poll_create:
822 health_reset(&health_thread_kernel);
823 DBG("Kernel thread dying");
824 return NULL;
825 }
826
827 /*
828 * This thread manage the consumer error sent back to the session daemon.
829 */
830 static void *thread_manage_consumer(void *data)
831 {
832 int sock = -1, i, ret, pollfd;
833 uint32_t revents, nb_fd;
834 enum lttcomm_return_code code;
835 struct lttng_poll_event events;
836 struct consumer_data *consumer_data = data;
837
838 DBG("[thread] Manage consumer started");
839
840 health_code_update(&consumer_data->health);
841
842 ret = lttcomm_listen_unix_sock(consumer_data->err_sock);
843 if (ret < 0) {
844 goto error_listen;
845 }
846
847 /*
848 * Pass 2 as size here for the thread quit pipe and kconsumerd_err_sock.
849 * Nothing more will be added to this poll set.
850 */
851 ret = create_thread_poll_set(&events, 2);
852 if (ret < 0) {
853 goto error_poll;
854 }
855
856 ret = lttng_poll_add(&events, consumer_data->err_sock, LPOLLIN | LPOLLRDHUP);
857 if (ret < 0) {
858 goto error;
859 }
860
861 nb_fd = LTTNG_POLL_GETNB(&events);
862
863 health_code_update(&consumer_data->health);
864
865 /* Inifinite blocking call, waiting for transmission */
866 restart:
867 health_poll_update(&consumer_data->health);
868 ret = lttng_poll_wait(&events, -1);
869 health_poll_update(&consumer_data->health);
870 if (ret < 0) {
871 /*
872 * Restart interrupted system call.
873 */
874 if (errno == EINTR) {
875 goto restart;
876 }
877 goto error;
878 }
879
880 for (i = 0; i < nb_fd; i++) {
881 /* Fetch once the poll data */
882 revents = LTTNG_POLL_GETEV(&events, i);
883 pollfd = LTTNG_POLL_GETFD(&events, i);
884
885 health_code_update(&consumer_data->health);
886
887 /* Thread quit pipe has been closed. Killing thread. */
888 ret = check_thread_quit_pipe(pollfd, revents);
889 if (ret) {
890 goto error;
891 }
892
893 /* Event on the registration socket */
894 if (pollfd == consumer_data->err_sock) {
895 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
896 ERR("consumer err socket poll error");
897 goto error;
898 }
899 }
900 }
901
902 sock = lttcomm_accept_unix_sock(consumer_data->err_sock);
903 if (sock < 0) {
904 goto error;
905 }
906
907 health_code_update(&consumer_data->health);
908
909 DBG2("Receiving code from consumer err_sock");
910
911 /* Getting status code from kconsumerd */
912 ret = lttcomm_recv_unix_sock(sock, &code,
913 sizeof(enum lttcomm_return_code));
914 if (ret <= 0) {
915 goto error;
916 }
917
918 health_code_update(&consumer_data->health);
919
920 if (code == CONSUMERD_COMMAND_SOCK_READY) {
921 consumer_data->cmd_sock =
922 lttcomm_connect_unix_sock(consumer_data->cmd_unix_sock_path);
923 if (consumer_data->cmd_sock < 0) {
924 sem_post(&consumer_data->sem);
925 PERROR("consumer connect");
926 goto error;
927 }
928 /* Signal condition to tell that the kconsumerd is ready */
929 sem_post(&consumer_data->sem);
930 DBG("consumer command socket ready");
931 } else {
932 ERR("consumer error when waiting for SOCK_READY : %s",
933 lttcomm_get_readable_code(-code));
934 goto error;
935 }
936
937 /* Remove the kconsumerd error sock since we've established a connexion */
938 ret = lttng_poll_del(&events, consumer_data->err_sock);
939 if (ret < 0) {
940 goto error;
941 }
942
943 ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLRDHUP);
944 if (ret < 0) {
945 goto error;
946 }
947
948 health_code_update(&consumer_data->health);
949
950 /* Update number of fd */
951 nb_fd = LTTNG_POLL_GETNB(&events);
952
953 /* Inifinite blocking call, waiting for transmission */
954 restart_poll:
955 health_poll_update(&consumer_data->health);
956 ret = lttng_poll_wait(&events, -1);
957 health_poll_update(&consumer_data->health);
958 if (ret < 0) {
959 /*
960 * Restart interrupted system call.
961 */
962 if (errno == EINTR) {
963 goto restart_poll;
964 }
965 goto error;
966 }
967
968 for (i = 0; i < nb_fd; i++) {
969 /* Fetch once the poll data */
970 revents = LTTNG_POLL_GETEV(&events, i);
971 pollfd = LTTNG_POLL_GETFD(&events, i);
972
973 health_code_update(&consumer_data->health);
974
975 /* Thread quit pipe has been closed. Killing thread. */
976 ret = check_thread_quit_pipe(pollfd, revents);
977 if (ret) {
978 goto error;
979 }
980
981 /* Event on the kconsumerd socket */
982 if (pollfd == sock) {
983 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
984 ERR("consumer err socket second poll error");
985 goto error;
986 }
987 }
988 }
989
990 health_code_update(&consumer_data->health);
991
992 /* Wait for any kconsumerd error */
993 ret = lttcomm_recv_unix_sock(sock, &code,
994 sizeof(enum lttcomm_return_code));
995 if (ret <= 0) {
996 ERR("consumer closed the command socket");
997 goto error;
998 }
999
1000 ERR("consumer return code : %s", lttcomm_get_readable_code(-code));
1001
1002 error:
1003 /* Immediately set the consumerd state to stopped */
1004 if (consumer_data->type == LTTNG_CONSUMER_KERNEL) {
1005 uatomic_set(&kernel_consumerd_state, CONSUMER_ERROR);
1006 } else if (consumer_data->type == LTTNG_CONSUMER64_UST ||
1007 consumer_data->type == LTTNG_CONSUMER32_UST) {
1008 uatomic_set(&ust_consumerd_state, CONSUMER_ERROR);
1009 } else {
1010 /* Code flow error... */
1011 assert(0);
1012 }
1013
1014 if (consumer_data->err_sock >= 0) {
1015 ret = close(consumer_data->err_sock);
1016 if (ret) {
1017 PERROR("close");
1018 }
1019 }
1020 if (consumer_data->cmd_sock >= 0) {
1021 ret = close(consumer_data->cmd_sock);
1022 if (ret) {
1023 PERROR("close");
1024 }
1025 }
1026 if (sock >= 0) {
1027 ret = close(sock);
1028 if (ret) {
1029 PERROR("close");
1030 }
1031 }
1032
1033 unlink(consumer_data->err_unix_sock_path);
1034 unlink(consumer_data->cmd_unix_sock_path);
1035 consumer_data->pid = 0;
1036
1037 lttng_poll_clean(&events);
1038 error_poll:
1039 error_listen:
1040 health_reset(&consumer_data->health);
1041 DBG("consumer thread cleanup completed");
1042
1043 return NULL;
1044 }
1045
1046 /*
1047 * This thread manage application communication.
1048 */
1049 static void *thread_manage_apps(void *data)
1050 {
1051 int i, ret, pollfd;
1052 uint32_t revents, nb_fd;
1053 struct ust_command ust_cmd;
1054 struct lttng_poll_event events;
1055
1056 DBG("[thread] Manage application started");
1057
1058 rcu_register_thread();
1059 rcu_thread_online();
1060
1061 health_code_update(&health_thread_app_reg);
1062
1063 ret = create_thread_poll_set(&events, 2);
1064 if (ret < 0) {
1065 goto error_poll_create;
1066 }
1067
1068 ret = lttng_poll_add(&events, apps_cmd_pipe[0], LPOLLIN | LPOLLRDHUP);
1069 if (ret < 0) {
1070 goto error;
1071 }
1072
1073 health_code_update(&health_thread_app_reg);
1074
1075 while (1) {
1076 /* Zeroed the events structure */
1077 lttng_poll_reset(&events);
1078
1079 nb_fd = LTTNG_POLL_GETNB(&events);
1080
1081 DBG("Apps thread polling on %d fds", nb_fd);
1082
1083 /* Inifinite blocking call, waiting for transmission */
1084 restart:
1085 health_poll_update(&health_thread_app_reg);
1086 ret = lttng_poll_wait(&events, -1);
1087 health_poll_update(&health_thread_app_reg);
1088 if (ret < 0) {
1089 /*
1090 * Restart interrupted system call.
1091 */
1092 if (errno == EINTR) {
1093 goto restart;
1094 }
1095 goto error;
1096 }
1097
1098 for (i = 0; i < nb_fd; i++) {
1099 /* Fetch once the poll data */
1100 revents = LTTNG_POLL_GETEV(&events, i);
1101 pollfd = LTTNG_POLL_GETFD(&events, i);
1102
1103 health_code_update(&health_thread_app_reg);
1104
1105 /* Thread quit pipe has been closed. Killing thread. */
1106 ret = check_thread_quit_pipe(pollfd, revents);
1107 if (ret) {
1108 goto error;
1109 }
1110
1111 /* Inspect the apps cmd pipe */
1112 if (pollfd == apps_cmd_pipe[0]) {
1113 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1114 ERR("Apps command pipe error");
1115 goto error;
1116 } else if (revents & LPOLLIN) {
1117 /* Empty pipe */
1118 ret = read(apps_cmd_pipe[0], &ust_cmd, sizeof(ust_cmd));
1119 if (ret < 0 || ret < sizeof(ust_cmd)) {
1120 PERROR("read apps cmd pipe");
1121 goto error;
1122 }
1123
1124 health_code_update(&health_thread_app_reg);
1125
1126 /* Register applicaton to the session daemon */
1127 ret = ust_app_register(&ust_cmd.reg_msg,
1128 ust_cmd.sock);
1129 if (ret == -ENOMEM) {
1130 goto error;
1131 } else if (ret < 0) {
1132 break;
1133 }
1134
1135 health_code_update(&health_thread_app_reg);
1136
1137 /*
1138 * Validate UST version compatibility.
1139 */
1140 ret = ust_app_validate_version(ust_cmd.sock);
1141 if (ret >= 0) {
1142 /*
1143 * Add channel(s) and event(s) to newly registered apps
1144 * from lttng global UST domain.
1145 */
1146 update_ust_app(ust_cmd.sock);
1147 }
1148
1149 health_code_update(&health_thread_app_reg);
1150
1151 ret = ust_app_register_done(ust_cmd.sock);
1152 if (ret < 0) {
1153 /*
1154 * If the registration is not possible, we simply
1155 * unregister the apps and continue
1156 */
1157 ust_app_unregister(ust_cmd.sock);
1158 } else {
1159 /*
1160 * We just need here to monitor the close of the UST
1161 * socket and poll set monitor those by default.
1162 * Listen on POLLIN (even if we never expect any
1163 * data) to ensure that hangup wakes us.
1164 */
1165 ret = lttng_poll_add(&events, ust_cmd.sock, LPOLLIN);
1166 if (ret < 0) {
1167 goto error;
1168 }
1169
1170 DBG("Apps with sock %d added to poll set",
1171 ust_cmd.sock);
1172 }
1173
1174 health_code_update(&health_thread_app_reg);
1175
1176 break;
1177 }
1178 } else {
1179 /*
1180 * At this point, we know that a registered application made
1181 * the event at poll_wait.
1182 */
1183 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1184 /* Removing from the poll set */
1185 ret = lttng_poll_del(&events, pollfd);
1186 if (ret < 0) {
1187 goto error;
1188 }
1189
1190 /* Socket closed on remote end. */
1191 ust_app_unregister(pollfd);
1192 break;
1193 }
1194 }
1195
1196 health_code_update(&health_thread_app_reg);
1197 }
1198 }
1199
1200 error:
1201 lttng_poll_clean(&events);
1202 error_poll_create:
1203 health_reset(&health_thread_app_reg);
1204 DBG("Application communication apps thread cleanup complete");
1205 rcu_thread_offline();
1206 rcu_unregister_thread();
1207 return NULL;
1208 }
1209
1210 /*
1211 * Dispatch request from the registration threads to the application
1212 * communication thread.
1213 */
1214 static void *thread_dispatch_ust_registration(void *data)
1215 {
1216 int ret;
1217 struct cds_wfq_node *node;
1218 struct ust_command *ust_cmd = NULL;
1219
1220 DBG("[thread] Dispatch UST command started");
1221
1222 while (!dispatch_thread_exit) {
1223 /* Atomically prepare the queue futex */
1224 futex_nto1_prepare(&ust_cmd_queue.futex);
1225
1226 do {
1227 /* Dequeue command for registration */
1228 node = cds_wfq_dequeue_blocking(&ust_cmd_queue.queue);
1229 if (node == NULL) {
1230 DBG("Woken up but nothing in the UST command queue");
1231 /* Continue thread execution */
1232 break;
1233 }
1234
1235 ust_cmd = caa_container_of(node, struct ust_command, node);
1236
1237 DBG("Dispatching UST registration pid:%d ppid:%d uid:%d"
1238 " gid:%d sock:%d name:%s (version %d.%d)",
1239 ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
1240 ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
1241 ust_cmd->sock, ust_cmd->reg_msg.name,
1242 ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
1243 /*
1244 * Inform apps thread of the new application registration. This
1245 * call is blocking so we can be assured that the data will be read
1246 * at some point in time or wait to the end of the world :)
1247 */
1248 ret = write(apps_cmd_pipe[1], ust_cmd,
1249 sizeof(struct ust_command));
1250 if (ret < 0) {
1251 PERROR("write apps cmd pipe");
1252 if (errno == EBADF) {
1253 /*
1254 * We can't inform the application thread to process
1255 * registration. We will exit or else application
1256 * registration will not occur and tracing will never
1257 * start.
1258 */
1259 goto error;
1260 }
1261 }
1262 free(ust_cmd);
1263 } while (node != NULL);
1264
1265 /* Futex wait on queue. Blocking call on futex() */
1266 futex_nto1_wait(&ust_cmd_queue.futex);
1267 }
1268
1269 error:
1270 DBG("Dispatch thread dying");
1271 return NULL;
1272 }
1273
1274 /*
1275 * This thread manage application registration.
1276 */
1277 static void *thread_registration_apps(void *data)
1278 {
1279 int sock = -1, i, ret, pollfd;
1280 uint32_t revents, nb_fd;
1281 struct lttng_poll_event events;
1282 /*
1283 * Get allocated in this thread, enqueued to a global queue, dequeued and
1284 * freed in the manage apps thread.
1285 */
1286 struct ust_command *ust_cmd = NULL;
1287
1288 DBG("[thread] Manage application registration started");
1289
1290 ret = lttcomm_listen_unix_sock(apps_sock);
1291 if (ret < 0) {
1292 goto error_listen;
1293 }
1294
1295 /*
1296 * Pass 2 as size here for the thread quit pipe and apps socket. Nothing
1297 * more will be added to this poll set.
1298 */
1299 ret = create_thread_poll_set(&events, 2);
1300 if (ret < 0) {
1301 goto error_create_poll;
1302 }
1303
1304 /* Add the application registration socket */
1305 ret = lttng_poll_add(&events, apps_sock, LPOLLIN | LPOLLRDHUP);
1306 if (ret < 0) {
1307 goto error_poll_add;
1308 }
1309
1310 /* Notify all applications to register */
1311 ret = notify_ust_apps(1);
1312 if (ret < 0) {
1313 ERR("Failed to notify applications or create the wait shared memory.\n"
1314 "Execution continues but there might be problem for already\n"
1315 "running applications that wishes to register.");
1316 }
1317
1318 while (1) {
1319 DBG("Accepting application registration");
1320
1321 nb_fd = LTTNG_POLL_GETNB(&events);
1322
1323 /* Inifinite blocking call, waiting for transmission */
1324 restart:
1325 ret = lttng_poll_wait(&events, -1);
1326 if (ret < 0) {
1327 /*
1328 * Restart interrupted system call.
1329 */
1330 if (errno == EINTR) {
1331 goto restart;
1332 }
1333 goto error;
1334 }
1335
1336 for (i = 0; i < nb_fd; i++) {
1337 /* Fetch once the poll data */
1338 revents = LTTNG_POLL_GETEV(&events, i);
1339 pollfd = LTTNG_POLL_GETFD(&events, i);
1340
1341 /* Thread quit pipe has been closed. Killing thread. */
1342 ret = check_thread_quit_pipe(pollfd, revents);
1343 if (ret) {
1344 goto error;
1345 }
1346
1347 /* Event on the registration socket */
1348 if (pollfd == apps_sock) {
1349 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1350 ERR("Register apps socket poll error");
1351 goto error;
1352 } else if (revents & LPOLLIN) {
1353 sock = lttcomm_accept_unix_sock(apps_sock);
1354 if (sock < 0) {
1355 goto error;
1356 }
1357
1358 /* Create UST registration command for enqueuing */
1359 ust_cmd = zmalloc(sizeof(struct ust_command));
1360 if (ust_cmd == NULL) {
1361 PERROR("ust command zmalloc");
1362 goto error;
1363 }
1364
1365 /*
1366 * Using message-based transmissions to ensure we don't
1367 * have to deal with partially received messages.
1368 */
1369 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
1370 if (ret < 0) {
1371 ERR("Exhausted file descriptors allowed for applications.");
1372 free(ust_cmd);
1373 ret = close(sock);
1374 if (ret) {
1375 PERROR("close");
1376 }
1377 sock = -1;
1378 continue;
1379 }
1380 ret = lttcomm_recv_unix_sock(sock, &ust_cmd->reg_msg,
1381 sizeof(struct ust_register_msg));
1382 if (ret < 0 || ret < sizeof(struct ust_register_msg)) {
1383 if (ret < 0) {
1384 PERROR("lttcomm_recv_unix_sock register apps");
1385 } else {
1386 ERR("Wrong size received on apps register");
1387 }
1388 free(ust_cmd);
1389 ret = close(sock);
1390 if (ret) {
1391 PERROR("close");
1392 }
1393 lttng_fd_put(LTTNG_FD_APPS, 1);
1394 sock = -1;
1395 continue;
1396 }
1397
1398 ust_cmd->sock = sock;
1399 sock = -1;
1400
1401 DBG("UST registration received with pid:%d ppid:%d uid:%d"
1402 " gid:%d sock:%d name:%s (version %d.%d)",
1403 ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
1404 ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
1405 ust_cmd->sock, ust_cmd->reg_msg.name,
1406 ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
1407
1408 /*
1409 * Lock free enqueue the registration request. The red pill
1410 * has been taken! This apps will be part of the *system*.
1411 */
1412 cds_wfq_enqueue(&ust_cmd_queue.queue, &ust_cmd->node);
1413
1414 /*
1415 * Wake the registration queue futex. Implicit memory
1416 * barrier with the exchange in cds_wfq_enqueue.
1417 */
1418 futex_nto1_wake(&ust_cmd_queue.futex);
1419 }
1420 }
1421 }
1422 }
1423
1424 error:
1425 /* Notify that the registration thread is gone */
1426 notify_ust_apps(0);
1427
1428 if (apps_sock >= 0) {
1429 ret = close(apps_sock);
1430 if (ret) {
1431 PERROR("close");
1432 }
1433 }
1434 if (sock >= 0) {
1435 ret = close(sock);
1436 if (ret) {
1437 PERROR("close");
1438 }
1439 lttng_fd_put(LTTNG_FD_APPS, 1);
1440 }
1441 unlink(apps_unix_sock_path);
1442
1443 error_poll_add:
1444 lttng_poll_clean(&events);
1445 error_listen:
1446 error_create_poll:
1447 DBG("UST Registration thread cleanup complete");
1448
1449 return NULL;
1450 }
1451
1452 /*
1453 * Start the thread_manage_consumer. This must be done after a lttng-consumerd
1454 * exec or it will fails.
1455 */
1456 static int spawn_consumer_thread(struct consumer_data *consumer_data)
1457 {
1458 int ret;
1459 struct timespec timeout;
1460
1461 timeout.tv_sec = DEFAULT_SEM_WAIT_TIMEOUT;
1462 timeout.tv_nsec = 0;
1463
1464 /* Setup semaphore */
1465 ret = sem_init(&consumer_data->sem, 0, 0);
1466 if (ret < 0) {
1467 PERROR("sem_init consumer semaphore");
1468 goto error;
1469 }
1470
1471 ret = pthread_create(&consumer_data->thread, NULL,
1472 thread_manage_consumer, consumer_data);
1473 if (ret != 0) {
1474 PERROR("pthread_create consumer");
1475 ret = -1;
1476 goto error;
1477 }
1478
1479 /* Get time for sem_timedwait absolute timeout */
1480 ret = clock_gettime(CLOCK_REALTIME, &timeout);
1481 if (ret < 0) {
1482 PERROR("clock_gettime spawn consumer");
1483 /* Infinite wait for the kconsumerd thread to be ready */
1484 ret = sem_wait(&consumer_data->sem);
1485 } else {
1486 /* Normal timeout if the gettime was successful */
1487 timeout.tv_sec += DEFAULT_SEM_WAIT_TIMEOUT;
1488 ret = sem_timedwait(&consumer_data->sem, &timeout);
1489 }
1490
1491 if (ret < 0) {
1492 if (errno == ETIMEDOUT) {
1493 /*
1494 * Call has timed out so we kill the kconsumerd_thread and return
1495 * an error.
1496 */
1497 ERR("The consumer thread was never ready. Killing it");
1498 ret = pthread_cancel(consumer_data->thread);
1499 if (ret < 0) {
1500 PERROR("pthread_cancel consumer thread");
1501 }
1502 } else {
1503 PERROR("semaphore wait failed consumer thread");
1504 }
1505 goto error;
1506 }
1507
1508 pthread_mutex_lock(&consumer_data->pid_mutex);
1509 if (consumer_data->pid == 0) {
1510 ERR("Kconsumerd did not start");
1511 pthread_mutex_unlock(&consumer_data->pid_mutex);
1512 goto error;
1513 }
1514 pthread_mutex_unlock(&consumer_data->pid_mutex);
1515
1516 return 0;
1517
1518 error:
1519 return ret;
1520 }
1521
1522 /*
1523 * Join consumer thread
1524 */
1525 static int join_consumer_thread(struct consumer_data *consumer_data)
1526 {
1527 void *status;
1528 int ret;
1529
1530 if (consumer_data->pid != 0) {
1531 ret = kill(consumer_data->pid, SIGTERM);
1532 if (ret) {
1533 ERR("Error killing consumer daemon");
1534 return ret;
1535 }
1536 return pthread_join(consumer_data->thread, &status);
1537 } else {
1538 return 0;
1539 }
1540 }
1541
1542 /*
1543 * Fork and exec a consumer daemon (consumerd).
1544 *
1545 * Return pid if successful else -1.
1546 */
1547 static pid_t spawn_consumerd(struct consumer_data *consumer_data)
1548 {
1549 int ret;
1550 pid_t pid;
1551 const char *consumer_to_use;
1552 const char *verbosity;
1553 struct stat st;
1554
1555 DBG("Spawning consumerd");
1556
1557 pid = fork();
1558 if (pid == 0) {
1559 /*
1560 * Exec consumerd.
1561 */
1562 if (opt_verbose_consumer) {
1563 verbosity = "--verbose";
1564 } else {
1565 verbosity = "--quiet";
1566 }
1567 switch (consumer_data->type) {
1568 case LTTNG_CONSUMER_KERNEL:
1569 /*
1570 * Find out which consumerd to execute. We will first try the
1571 * 64-bit path, then the sessiond's installation directory, and
1572 * fallback on the 32-bit one,
1573 */
1574 DBG3("Looking for a kernel consumer at these locations:");
1575 DBG3(" 1) %s", consumerd64_bin);
1576 DBG3(" 2) %s/%s", INSTALL_BIN_PATH, CONSUMERD_FILE);
1577 DBG3(" 3) %s", consumerd32_bin);
1578 if (stat(consumerd64_bin, &st) == 0) {
1579 DBG3("Found location #1");
1580 consumer_to_use = consumerd64_bin;
1581 } else if (stat(INSTALL_BIN_PATH "/" CONSUMERD_FILE, &st) == 0) {
1582 DBG3("Found location #2");
1583 consumer_to_use = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
1584 } else if (stat(consumerd32_bin, &st) == 0) {
1585 DBG3("Found location #3");
1586 consumer_to_use = consumerd32_bin;
1587 } else {
1588 DBG("Could not find any valid consumerd executable");
1589 break;
1590 }
1591 DBG("Using kernel consumer at: %s", consumer_to_use);
1592 execl(consumer_to_use,
1593 "lttng-consumerd", verbosity, "-k",
1594 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
1595 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
1596 NULL);
1597 break;
1598 case LTTNG_CONSUMER64_UST:
1599 {
1600 char *tmpnew = NULL;
1601
1602 if (consumerd64_libdir[0] != '\0') {
1603 char *tmp;
1604 size_t tmplen;
1605
1606 tmp = getenv("LD_LIBRARY_PATH");
1607 if (!tmp) {
1608 tmp = "";
1609 }
1610 tmplen = strlen("LD_LIBRARY_PATH=")
1611 + strlen(consumerd64_libdir) + 1 /* : */ + strlen(tmp);
1612 tmpnew = zmalloc(tmplen + 1 /* \0 */);
1613 if (!tmpnew) {
1614 ret = -ENOMEM;
1615 goto error;
1616 }
1617 strcpy(tmpnew, "LD_LIBRARY_PATH=");
1618 strcat(tmpnew, consumerd64_libdir);
1619 if (tmp[0] != '\0') {
1620 strcat(tmpnew, ":");
1621 strcat(tmpnew, tmp);
1622 }
1623 ret = putenv(tmpnew);
1624 if (ret) {
1625 ret = -errno;
1626 goto error;
1627 }
1628 }
1629 DBG("Using 64-bit UST consumer at: %s", consumerd64_bin);
1630 ret = execl(consumerd64_bin, "lttng-consumerd", verbosity, "-u",
1631 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
1632 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
1633 NULL);
1634 if (consumerd64_libdir[0] != '\0') {
1635 free(tmpnew);
1636 }
1637 if (ret) {
1638 goto error;
1639 }
1640 break;
1641 }
1642 case LTTNG_CONSUMER32_UST:
1643 {
1644 char *tmpnew = NULL;
1645
1646 if (consumerd32_libdir[0] != '\0') {
1647 char *tmp;
1648 size_t tmplen;
1649
1650 tmp = getenv("LD_LIBRARY_PATH");
1651 if (!tmp) {
1652 tmp = "";
1653 }
1654 tmplen = strlen("LD_LIBRARY_PATH=")
1655 + strlen(consumerd32_libdir) + 1 /* : */ + strlen(tmp);
1656 tmpnew = zmalloc(tmplen + 1 /* \0 */);
1657 if (!tmpnew) {
1658 ret = -ENOMEM;
1659 goto error;
1660 }
1661 strcpy(tmpnew, "LD_LIBRARY_PATH=");
1662 strcat(tmpnew, consumerd32_libdir);
1663 if (tmp[0] != '\0') {
1664 strcat(tmpnew, ":");
1665 strcat(tmpnew, tmp);
1666 }
1667 ret = putenv(tmpnew);
1668 if (ret) {
1669 ret = -errno;
1670 goto error;
1671 }
1672 }
1673 DBG("Using 32-bit UST consumer at: %s", consumerd32_bin);
1674 ret = execl(consumerd32_bin, "lttng-consumerd", verbosity, "-u",
1675 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
1676 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
1677 NULL);
1678 if (consumerd32_libdir[0] != '\0') {
1679 free(tmpnew);
1680 }
1681 if (ret) {
1682 goto error;
1683 }
1684 break;
1685 }
1686 default:
1687 PERROR("unknown consumer type");
1688 exit(EXIT_FAILURE);
1689 }
1690 if (errno != 0) {
1691 PERROR("kernel start consumer exec");
1692 }
1693 exit(EXIT_FAILURE);
1694 } else if (pid > 0) {
1695 ret = pid;
1696 } else {
1697 PERROR("start consumer fork");
1698 ret = -errno;
1699 }
1700 error:
1701 return ret;
1702 }
1703
1704 /*
1705 * Spawn the consumerd daemon and session daemon thread.
1706 */
1707 static int start_consumerd(struct consumer_data *consumer_data)
1708 {
1709 int ret;
1710
1711 pthread_mutex_lock(&consumer_data->pid_mutex);
1712 if (consumer_data->pid != 0) {
1713 pthread_mutex_unlock(&consumer_data->pid_mutex);
1714 goto end;
1715 }
1716
1717 ret = spawn_consumerd(consumer_data);
1718 if (ret < 0) {
1719 ERR("Spawning consumerd failed");
1720 pthread_mutex_unlock(&consumer_data->pid_mutex);
1721 goto error;
1722 }
1723
1724 /* Setting up the consumer_data pid */
1725 consumer_data->pid = ret;
1726 DBG2("Consumer pid %d", consumer_data->pid);
1727 pthread_mutex_unlock(&consumer_data->pid_mutex);
1728
1729 DBG2("Spawning consumer control thread");
1730 ret = spawn_consumer_thread(consumer_data);
1731 if (ret < 0) {
1732 ERR("Fatal error spawning consumer control thread");
1733 goto error;
1734 }
1735
1736 end:
1737 return 0;
1738
1739 error:
1740 return ret;
1741 }
1742
1743 /*
1744 * Compute health status of each consumer.
1745 */
1746 static int check_consumer_health(void)
1747 {
1748 int ret;
1749
1750 ret =
1751 health_check_state(&kconsumer_data.health) &
1752 health_check_state(&ustconsumer32_data.health) &
1753 health_check_state(&ustconsumer64_data.health);
1754
1755 DBG3("Health consumer check %d", ret);
1756
1757 return ret;
1758 }
1759
1760 /*
1761 * Check version of the lttng-modules.
1762 */
1763 static int validate_lttng_modules_version(void)
1764 {
1765 return kernel_validate_version(kernel_tracer_fd);
1766 }
1767
1768 /*
1769 * Setup necessary data for kernel tracer action.
1770 */
1771 static int init_kernel_tracer(void)
1772 {
1773 int ret;
1774
1775 /* Modprobe lttng kernel modules */
1776 ret = modprobe_lttng_control();
1777 if (ret < 0) {
1778 goto error;
1779 }
1780
1781 /* Open debugfs lttng */
1782 kernel_tracer_fd = open(module_proc_lttng, O_RDWR);
1783 if (kernel_tracer_fd < 0) {
1784 DBG("Failed to open %s", module_proc_lttng);
1785 ret = -1;
1786 goto error_open;
1787 }
1788
1789 /* Validate kernel version */
1790 ret = validate_lttng_modules_version();
1791 if (ret < 0) {
1792 goto error_version;
1793 }
1794
1795 ret = modprobe_lttng_data();
1796 if (ret < 0) {
1797 goto error_modules;
1798 }
1799
1800 DBG("Kernel tracer fd %d", kernel_tracer_fd);
1801 return 0;
1802
1803 error_version:
1804 modprobe_remove_lttng_control();
1805 ret = close(kernel_tracer_fd);
1806 if (ret) {
1807 PERROR("close");
1808 }
1809 kernel_tracer_fd = -1;
1810 return LTTCOMM_KERN_VERSION;
1811
1812 error_modules:
1813 ret = close(kernel_tracer_fd);
1814 if (ret) {
1815 PERROR("close");
1816 }
1817
1818 error_open:
1819 modprobe_remove_lttng_control();
1820
1821 error:
1822 WARN("No kernel tracer available");
1823 kernel_tracer_fd = -1;
1824 if (!is_root) {
1825 return LTTCOMM_NEED_ROOT_SESSIOND;
1826 } else {
1827 return LTTCOMM_KERN_NA;
1828 }
1829 }
1830
1831 /*
1832 * Init tracing by creating trace directory and sending fds kernel consumer.
1833 */
1834 static int init_kernel_tracing(struct ltt_kernel_session *session)
1835 {
1836 int ret = 0;
1837
1838 if (session->consumer_fds_sent == 0 && session->consumer != NULL) {
1839 /*
1840 * Assign default kernel consumer socket if no consumer assigned to the
1841 * kernel session. At this point, it's NOT supposed to be -1 but this is
1842 * an extra security check.
1843 */
1844 if (session->consumer_fd < 0) {
1845 session->consumer_fd = kconsumer_data.cmd_sock;
1846 }
1847
1848 ret = kernel_consumer_send_session(session->consumer_fd, session);
1849 if (ret < 0) {
1850 ret = LTTCOMM_KERN_CONSUMER_FAIL;
1851 goto error;
1852 }
1853 }
1854
1855 error:
1856 return ret;
1857 }
1858
1859 /*
1860 * Create a socket to the relayd using the URI.
1861 *
1862 * On success, the relayd_sock pointer is set to the created socket.
1863 * Else, it is untouched and an lttcomm error code is returned.
1864 */
1865 static int create_connect_relayd(struct consumer_output *output,
1866 const char *session_name, struct lttng_uri *uri,
1867 struct lttcomm_sock **relayd_sock)
1868 {
1869 int ret;
1870 struct lttcomm_sock *sock;
1871
1872 /* Create socket object from URI */
1873 sock = lttcomm_alloc_sock_from_uri(uri);
1874 if (sock == NULL) {
1875 ret = LTTCOMM_FATAL;
1876 goto error;
1877 }
1878
1879 ret = lttcomm_create_sock(sock);
1880 if (ret < 0) {
1881 ret = LTTCOMM_FATAL;
1882 goto error;
1883 }
1884
1885 /* Connect to relayd so we can proceed with a session creation. */
1886 ret = relayd_connect(sock);
1887 if (ret < 0) {
1888 ERR("Unable to reach lttng-relayd");
1889 ret = LTTCOMM_RELAYD_SESSION_FAIL;
1890 goto free_sock;
1891 }
1892
1893 /* Create socket for control stream. */
1894 if (uri->stype == LTTNG_STREAM_CONTROL) {
1895 DBG3("Creating relayd stream socket from URI");
1896
1897 /* Check relayd version */
1898 ret = relayd_version_check(sock, LTTNG_UST_COMM_MAJOR, 0);
1899 if (ret < 0) {
1900 ret = LTTCOMM_RELAYD_VERSION_FAIL;
1901 goto close_sock;
1902 }
1903 } else if (uri->stype == LTTNG_STREAM_DATA) {
1904 DBG3("Creating relayd data socket from URI");
1905 } else {
1906 /* Command is not valid */
1907 ERR("Relayd invalid stream type: %d", uri->stype);
1908 ret = LTTCOMM_INVALID;
1909 goto close_sock;
1910 }
1911
1912 *relayd_sock = sock;
1913
1914 return LTTCOMM_OK;
1915
1916 close_sock:
1917 if (sock) {
1918 (void) relayd_close(sock);
1919 }
1920 free_sock:
1921 if (sock) {
1922 lttcomm_destroy_sock(sock);
1923 }
1924 error:
1925 return ret;
1926 }
1927
1928 /*
1929 * Connect to the relayd using URI and send the socket to the right consumer.
1930 */
1931 static int send_socket_relayd_consumer(int domain, struct ltt_session *session,
1932 struct lttng_uri *relayd_uri, struct consumer_output *consumer,
1933 int consumer_fd)
1934 {
1935 int ret;
1936 struct lttcomm_sock *sock = NULL;
1937
1938 /* Set the network sequence index if not set. */
1939 if (consumer->net_seq_index == -1) {
1940 /*
1941 * Increment net_seq_idx because we are about to transfer the
1942 * new relayd socket to the consumer.
1943 */
1944 uatomic_inc(&relayd_net_seq_idx);
1945 /* Assign unique key so the consumer can match streams */
1946 consumer->net_seq_index = uatomic_read(&relayd_net_seq_idx);
1947 }
1948
1949 /* Connect to relayd and make version check if uri is the control. */
1950 ret = create_connect_relayd(consumer, session->name, relayd_uri, &sock);
1951 if (ret != LTTCOMM_OK) {
1952 goto close_sock;
1953 }
1954
1955 /* If the control socket is connected, network session is ready */
1956 if (relayd_uri->stype == LTTNG_STREAM_CONTROL) {
1957 session->net_handle = 1;
1958 }
1959
1960 switch (domain) {
1961 case LTTNG_DOMAIN_KERNEL:
1962 /* Send relayd socket to consumer. */
1963 ret = kernel_consumer_send_relayd_socket(consumer_fd, sock,
1964 consumer, relayd_uri->stype);
1965 if (ret < 0) {
1966 ret = LTTCOMM_ENABLE_CONSUMER_FAIL;
1967 goto close_sock;
1968 }
1969 break;
1970 case LTTNG_DOMAIN_UST:
1971 /* Send relayd socket to consumer. */
1972 ret = ust_consumer_send_relayd_socket(consumer_fd, sock,
1973 consumer, relayd_uri->stype);
1974 if (ret < 0) {
1975 ret = LTTCOMM_ENABLE_CONSUMER_FAIL;
1976 goto close_sock;
1977 }
1978 break;
1979 }
1980
1981 ret = LTTCOMM_OK;
1982
1983 /*
1984 * Close socket which was dup on the consumer side. The session daemon does
1985 * NOT keep track of the relayd socket(s) once transfer to the consumer.
1986 */
1987
1988 close_sock:
1989 if (sock) {
1990 (void) relayd_close(sock);
1991 lttcomm_destroy_sock(sock);
1992 }
1993
1994 return ret;
1995 }
1996
1997 /*
1998 * Send both relayd sockets to a specific consumer and domain. This is a
1999 * helper function to facilitate sending the information to the consumer for a
2000 * session.
2001 */
2002 static int send_sockets_relayd_consumer(int domain,
2003 struct ltt_session *session, struct consumer_output *consumer, int fd)
2004 {
2005 int ret;
2006
2007 /* Sending control relayd socket. */
2008 ret = send_socket_relayd_consumer(domain, session,
2009 &consumer->dst.net.control, consumer, fd);
2010 if (ret != LTTCOMM_OK) {
2011 goto error;
2012 }
2013
2014 /* Sending data relayd socket. */
2015 ret = send_socket_relayd_consumer(domain, session,
2016 &consumer->dst.net.data, consumer, fd);
2017 if (ret != LTTCOMM_OK) {
2018 goto error;
2019 }
2020
2021 error:
2022 return ret;
2023 }
2024
2025 /*
2026 * Setup relayd connections for a tracing session. First creates the socket to
2027 * the relayd and send them to the right domain consumer. Consumer type MUST be
2028 * network.
2029 */
2030 static int setup_relayd(struct ltt_session *session)
2031 {
2032 int ret = LTTCOMM_OK;
2033 struct ltt_ust_session *usess;
2034 struct ltt_kernel_session *ksess;
2035
2036 assert(session);
2037
2038 usess = session->ust_session;
2039 ksess = session->kernel_session;
2040
2041 DBG2("Setting relayd for session %s", session->name);
2042
2043 if (usess && usess->consumer->sock == -1 &&
2044 usess->consumer->type == CONSUMER_DST_NET &&
2045 usess->consumer->enabled) {
2046 /* Setup relayd for 64 bits consumer */
2047 if (ust_consumerd64_fd >= 0) {
2048 send_sockets_relayd_consumer(LTTNG_DOMAIN_UST, session,
2049 usess->consumer, ust_consumerd64_fd);
2050 if (ret != LTTCOMM_OK) {
2051 goto error;
2052 }
2053 }
2054
2055 /* Setup relayd for 32 bits consumer */
2056 if (ust_consumerd32_fd >= 0) {
2057 send_sockets_relayd_consumer(LTTNG_DOMAIN_UST, session,
2058 usess->consumer, ust_consumerd32_fd);
2059 if (ret != LTTCOMM_OK) {
2060 goto error;
2061 }
2062 }
2063 } else if (ksess && ksess->consumer->sock == -1 &&
2064 ksess->consumer->type == CONSUMER_DST_NET &&
2065 ksess->consumer->enabled) {
2066 send_sockets_relayd_consumer(LTTNG_DOMAIN_KERNEL, session,
2067 ksess->consumer, ksess->consumer_fd);
2068 if (ret != LTTCOMM_OK) {
2069 goto error;
2070 }
2071 }
2072
2073 error:
2074 return ret;
2075 }
2076
2077 /*
2078 * Copy consumer output from the tracing session to the domain session. The
2079 * function also applies the right modification on a per domain basis for the
2080 * trace files destination directory.
2081 */
2082 static int copy_session_consumer(int domain, struct ltt_session *session)
2083 {
2084 int ret;
2085 const char *dir_name;
2086 struct consumer_output *consumer;
2087
2088 switch (domain) {
2089 case LTTNG_DOMAIN_KERNEL:
2090 DBG3("Copying tracing session consumer output in kernel session");
2091 session->kernel_session->consumer =
2092 consumer_copy_output(session->consumer);
2093 /* Ease our life a bit for the next part */
2094 consumer = session->kernel_session->consumer;
2095 dir_name = DEFAULT_KERNEL_TRACE_DIR;
2096 break;
2097 case LTTNG_DOMAIN_UST:
2098 DBG3("Copying tracing session consumer output in UST session");
2099 session->ust_session->consumer =
2100 consumer_copy_output(session->consumer);
2101 /* Ease our life a bit for the next part */
2102 consumer = session->ust_session->consumer;
2103 dir_name = DEFAULT_UST_TRACE_DIR;
2104 break;
2105 default:
2106 ret = LTTCOMM_UNKNOWN_DOMAIN;
2107 goto error;
2108 }
2109
2110 /* Append correct directory to subdir */
2111 strncat(consumer->subdir, dir_name, sizeof(consumer->subdir));
2112 DBG3("Copy session consumer subdir %s", consumer->subdir);
2113
2114 /* Add default trace directory name */
2115 if (consumer->type == CONSUMER_DST_LOCAL) {
2116 strncat(consumer->dst.trace_path, dir_name,
2117 sizeof(consumer->dst.trace_path));
2118 }
2119
2120 ret = LTTCOMM_OK;
2121
2122 error:
2123 return ret;
2124 }
2125
2126 /*
2127 * Create an UST session and add it to the session ust list.
2128 */
2129 static int create_ust_session(struct ltt_session *session,
2130 struct lttng_domain *domain)
2131 {
2132 int ret;
2133 struct ltt_ust_session *lus = NULL;
2134
2135 assert(session);
2136 assert(session->consumer);
2137
2138 switch (domain->type) {
2139 case LTTNG_DOMAIN_UST:
2140 break;
2141 default:
2142 ERR("Unknown UST domain on create session %d", domain->type);
2143 ret = LTTCOMM_UNKNOWN_DOMAIN;
2144 goto error;
2145 }
2146
2147 DBG("Creating UST session");
2148
2149 lus = trace_ust_create_session(session->path, session->id, domain);
2150 if (lus == NULL) {
2151 ret = LTTCOMM_UST_SESS_FAIL;
2152 goto error;
2153 }
2154
2155 if (session->consumer->type == CONSUMER_DST_LOCAL) {
2156 ret = run_as_mkdir_recursive(lus->pathname, S_IRWXU | S_IRWXG,
2157 session->uid, session->gid);
2158 if (ret < 0) {
2159 if (ret != -EEXIST) {
2160 ERR("Trace directory creation error");
2161 ret = LTTCOMM_UST_SESS_FAIL;
2162 goto error;
2163 }
2164 }
2165 }
2166
2167 lus->uid = session->uid;
2168 lus->gid = session->gid;
2169 session->ust_session = lus;
2170
2171 /* Copy session output to the newly created UST session */
2172 ret = copy_session_consumer(domain->type, session);
2173 if (ret != LTTCOMM_OK) {
2174 goto error;
2175 }
2176
2177 return LTTCOMM_OK;
2178
2179 error:
2180 free(lus);
2181 session->ust_session = NULL;
2182 return ret;
2183 }
2184
2185 /*
2186 * Create a kernel tracer session then create the default channel.
2187 */
2188 static int create_kernel_session(struct ltt_session *session)
2189 {
2190 int ret;
2191
2192 DBG("Creating kernel session");
2193
2194 ret = kernel_create_session(session, kernel_tracer_fd);
2195 if (ret < 0) {
2196 ret = LTTCOMM_KERN_SESS_FAIL;
2197 goto error;
2198 }
2199
2200 /* Set kernel consumer socket fd */
2201 if (kconsumer_data.cmd_sock >= 0) {
2202 session->kernel_session->consumer_fd = kconsumer_data.cmd_sock;
2203 }
2204
2205 /* Copy session output to the newly created Kernel session */
2206 ret = copy_session_consumer(LTTNG_DOMAIN_KERNEL, session);
2207 if (ret != LTTCOMM_OK) {
2208 goto error;
2209 }
2210
2211 /* Create directory(ies) on local filesystem. */
2212 if (session->consumer->type == CONSUMER_DST_LOCAL) {
2213 ret = run_as_mkdir_recursive(
2214 session->kernel_session->consumer->dst.trace_path,
2215 S_IRWXU | S_IRWXG, session->uid, session->gid);
2216 if (ret < 0) {
2217 if (ret != -EEXIST) {
2218 ERR("Trace directory creation error");
2219 goto error;
2220 }
2221 }
2222 }
2223
2224 session->kernel_session->uid = session->uid;
2225 session->kernel_session->gid = session->gid;
2226
2227 return LTTCOMM_OK;
2228
2229 error:
2230 trace_kernel_destroy_session(session->kernel_session);
2231 session->kernel_session = NULL;
2232 return ret;
2233 }
2234
2235 /*
2236 * Check if the UID or GID match the session. Root user has access to all
2237 * sessions.
2238 */
2239 static int session_access_ok(struct ltt_session *session, uid_t uid, gid_t gid)
2240 {
2241 if (uid != session->uid && gid != session->gid && uid != 0) {
2242 return 0;
2243 } else {
2244 return 1;
2245 }
2246 }
2247
2248 /*
2249 * Count number of session permitted by uid/gid.
2250 */
2251 static unsigned int lttng_sessions_count(uid_t uid, gid_t gid)
2252 {
2253 unsigned int i = 0;
2254 struct ltt_session *session;
2255
2256 DBG("Counting number of available session for UID %d GID %d",
2257 uid, gid);
2258 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
2259 /*
2260 * Only list the sessions the user can control.
2261 */
2262 if (!session_access_ok(session, uid, gid)) {
2263 continue;
2264 }
2265 i++;
2266 }
2267 return i;
2268 }
2269
2270 /*
2271 * Using the session list, filled a lttng_session array to send back to the
2272 * client for session listing.
2273 *
2274 * The session list lock MUST be acquired before calling this function. Use
2275 * session_lock_list() and session_unlock_list().
2276 */
2277 static void list_lttng_sessions(struct lttng_session *sessions, uid_t uid,
2278 gid_t gid)
2279 {
2280 unsigned int i = 0;
2281 struct ltt_session *session;
2282
2283 DBG("Getting all available session for UID %d GID %d",
2284 uid, gid);
2285 /*
2286 * Iterate over session list and append data after the control struct in
2287 * the buffer.
2288 */
2289 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
2290 /*
2291 * Only list the sessions the user can control.
2292 */
2293 if (!session_access_ok(session, uid, gid)) {
2294 continue;
2295 }
2296 strncpy(sessions[i].path, session->path, PATH_MAX);
2297 sessions[i].path[PATH_MAX - 1] = '\0';
2298 strncpy(sessions[i].name, session->name, NAME_MAX);
2299 sessions[i].name[NAME_MAX - 1] = '\0';
2300 sessions[i].enabled = session->enabled;
2301 i++;
2302 }
2303 }
2304
2305 /*
2306 * Fill lttng_channel array of all channels.
2307 */
2308 static void list_lttng_channels(int domain, struct ltt_session *session,
2309 struct lttng_channel *channels)
2310 {
2311 int i = 0;
2312 struct ltt_kernel_channel *kchan;
2313
2314 DBG("Listing channels for session %s", session->name);
2315
2316 switch (domain) {
2317 case LTTNG_DOMAIN_KERNEL:
2318 /* Kernel channels */
2319 if (session->kernel_session != NULL) {
2320 cds_list_for_each_entry(kchan,
2321 &session->kernel_session->channel_list.head, list) {
2322 /* Copy lttng_channel struct to array */
2323 memcpy(&channels[i], kchan->channel, sizeof(struct lttng_channel));
2324 channels[i].enabled = kchan->enabled;
2325 i++;
2326 }
2327 }
2328 break;
2329 case LTTNG_DOMAIN_UST:
2330 {
2331 struct lttng_ht_iter iter;
2332 struct ltt_ust_channel *uchan;
2333
2334 cds_lfht_for_each_entry(session->ust_session->domain_global.channels->ht,
2335 &iter.iter, uchan, node.node) {
2336 strncpy(channels[i].name, uchan->name, LTTNG_SYMBOL_NAME_LEN);
2337 channels[i].attr.overwrite = uchan->attr.overwrite;
2338 channels[i].attr.subbuf_size = uchan->attr.subbuf_size;
2339 channels[i].attr.num_subbuf = uchan->attr.num_subbuf;
2340 channels[i].attr.switch_timer_interval =
2341 uchan->attr.switch_timer_interval;
2342 channels[i].attr.read_timer_interval =
2343 uchan->attr.read_timer_interval;
2344 channels[i].enabled = uchan->enabled;
2345 switch (uchan->attr.output) {
2346 case LTTNG_UST_MMAP:
2347 default:
2348 channels[i].attr.output = LTTNG_EVENT_MMAP;
2349 break;
2350 }
2351 i++;
2352 }
2353 break;
2354 }
2355 default:
2356 break;
2357 }
2358 }
2359
2360 /*
2361 * Create a list of ust global domain events.
2362 */
2363 static int list_lttng_ust_global_events(char *channel_name,
2364 struct ltt_ust_domain_global *ust_global, struct lttng_event **events)
2365 {
2366 int i = 0, ret = 0;
2367 unsigned int nb_event = 0;
2368 struct lttng_ht_iter iter;
2369 struct lttng_ht_node_str *node;
2370 struct ltt_ust_channel *uchan;
2371 struct ltt_ust_event *uevent;
2372 struct lttng_event *tmp;
2373
2374 DBG("Listing UST global events for channel %s", channel_name);
2375
2376 rcu_read_lock();
2377
2378 lttng_ht_lookup(ust_global->channels, (void *)channel_name, &iter);
2379 node = lttng_ht_iter_get_node_str(&iter);
2380 if (node == NULL) {
2381 ret = -LTTCOMM_UST_CHAN_NOT_FOUND;
2382 goto error;
2383 }
2384
2385 uchan = caa_container_of(&node->node, struct ltt_ust_channel, node.node);
2386
2387 nb_event += lttng_ht_get_count(uchan->events);
2388
2389 if (nb_event == 0) {
2390 ret = nb_event;
2391 goto error;
2392 }
2393
2394 DBG3("Listing UST global %d events", nb_event);
2395
2396 tmp = zmalloc(nb_event * sizeof(struct lttng_event));
2397 if (tmp == NULL) {
2398 ret = -LTTCOMM_FATAL;
2399 goto error;
2400 }
2401
2402 cds_lfht_for_each_entry(uchan->events->ht, &iter.iter, uevent, node.node) {
2403 strncpy(tmp[i].name, uevent->attr.name, LTTNG_SYMBOL_NAME_LEN);
2404 tmp[i].name[LTTNG_SYMBOL_NAME_LEN - 1] = '\0';
2405 tmp[i].enabled = uevent->enabled;
2406 switch (uevent->attr.instrumentation) {
2407 case LTTNG_UST_TRACEPOINT:
2408 tmp[i].type = LTTNG_EVENT_TRACEPOINT;
2409 break;
2410 case LTTNG_UST_PROBE:
2411 tmp[i].type = LTTNG_EVENT_PROBE;
2412 break;
2413 case LTTNG_UST_FUNCTION:
2414 tmp[i].type = LTTNG_EVENT_FUNCTION;
2415 break;
2416 }
2417 tmp[i].loglevel = uevent->attr.loglevel;
2418 switch (uevent->attr.loglevel_type) {
2419 case LTTNG_UST_LOGLEVEL_ALL:
2420 tmp[i].loglevel_type = LTTNG_EVENT_LOGLEVEL_ALL;
2421 break;
2422 case LTTNG_UST_LOGLEVEL_RANGE:
2423 tmp[i].loglevel_type = LTTNG_EVENT_LOGLEVEL_RANGE;
2424 break;
2425 case LTTNG_UST_LOGLEVEL_SINGLE:
2426 tmp[i].loglevel_type = LTTNG_EVENT_LOGLEVEL_SINGLE;
2427 break;
2428 }
2429 if (uevent->filter) {
2430 tmp[i].filter = 1;
2431 }
2432 i++;
2433 }
2434
2435 ret = nb_event;
2436 *events = tmp;
2437
2438 error:
2439 rcu_read_unlock();
2440 return ret;
2441 }
2442
2443 /*
2444 * Fill lttng_event array of all kernel events in the channel.
2445 */
2446 static int list_lttng_kernel_events(char *channel_name,
2447 struct ltt_kernel_session *kernel_session, struct lttng_event **events)
2448 {
2449 int i = 0, ret;
2450 unsigned int nb_event;
2451 struct ltt_kernel_event *event;
2452 struct ltt_kernel_channel *kchan;
2453
2454 kchan = trace_kernel_get_channel_by_name(channel_name, kernel_session);
2455 if (kchan == NULL) {
2456 ret = LTTCOMM_KERN_CHAN_NOT_FOUND;
2457 goto error;
2458 }
2459
2460 nb_event = kchan->event_count;
2461
2462 DBG("Listing events for channel %s", kchan->channel->name);
2463
2464 if (nb_event == 0) {
2465 ret = nb_event;
2466 goto error;
2467 }
2468
2469 *events = zmalloc(nb_event * sizeof(struct lttng_event));
2470 if (*events == NULL) {
2471 ret = LTTCOMM_FATAL;
2472 goto error;
2473 }
2474
2475 /* Kernel channels */
2476 cds_list_for_each_entry(event, &kchan->events_list.head , list) {
2477 strncpy((*events)[i].name, event->event->name, LTTNG_SYMBOL_NAME_LEN);
2478 (*events)[i].name[LTTNG_SYMBOL_NAME_LEN - 1] = '\0';
2479 (*events)[i].enabled = event->enabled;
2480 switch (event->event->instrumentation) {
2481 case LTTNG_KERNEL_TRACEPOINT:
2482 (*events)[i].type = LTTNG_EVENT_TRACEPOINT;
2483 break;
2484 case LTTNG_KERNEL_KPROBE:
2485 case LTTNG_KERNEL_KRETPROBE:
2486 (*events)[i].type = LTTNG_EVENT_PROBE;
2487 memcpy(&(*events)[i].attr.probe, &event->event->u.kprobe,
2488 sizeof(struct lttng_kernel_kprobe));
2489 break;
2490 case LTTNG_KERNEL_FUNCTION:
2491 (*events)[i].type = LTTNG_EVENT_FUNCTION;
2492 memcpy(&((*events)[i].attr.ftrace), &event->event->u.ftrace,
2493 sizeof(struct lttng_kernel_function));
2494 break;
2495 case LTTNG_KERNEL_NOOP:
2496 (*events)[i].type = LTTNG_EVENT_NOOP;
2497 break;
2498 case LTTNG_KERNEL_SYSCALL:
2499 (*events)[i].type = LTTNG_EVENT_SYSCALL;
2500 break;
2501 case LTTNG_KERNEL_ALL:
2502 assert(0);
2503 break;
2504 }
2505 i++;
2506 }
2507
2508 return nb_event;
2509
2510 error:
2511 return ret;
2512 }
2513
2514 /*
2515 * Command LTTNG_DISABLE_CHANNEL processed by the client thread.
2516 */
2517 static int cmd_disable_channel(struct ltt_session *session,
2518 int domain, char *channel_name)
2519 {
2520 int ret;
2521 struct ltt_ust_session *usess;
2522
2523 usess = session->ust_session;
2524
2525 switch (domain) {
2526 case LTTNG_DOMAIN_KERNEL:
2527 {
2528 ret = channel_kernel_disable(session->kernel_session,
2529 channel_name);
2530 if (ret != LTTCOMM_OK) {
2531 goto error;
2532 }
2533
2534 kernel_wait_quiescent(kernel_tracer_fd);
2535 break;
2536 }
2537 case LTTNG_DOMAIN_UST:
2538 {
2539 struct ltt_ust_channel *uchan;
2540 struct lttng_ht *chan_ht;
2541
2542 chan_ht = usess->domain_global.channels;
2543
2544 uchan = trace_ust_find_channel_by_name(chan_ht, channel_name);
2545 if (uchan == NULL) {
2546 ret = LTTCOMM_UST_CHAN_NOT_FOUND;
2547 goto error;
2548 }
2549
2550 ret = channel_ust_disable(usess, domain, uchan);
2551 if (ret != LTTCOMM_OK) {
2552 goto error;
2553 }
2554 break;
2555 }
2556 #if 0
2557 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
2558 case LTTNG_DOMAIN_UST_EXEC_NAME:
2559 case LTTNG_DOMAIN_UST_PID:
2560 #endif
2561 default:
2562 ret = LTTCOMM_UNKNOWN_DOMAIN;
2563 goto error;
2564 }
2565
2566 ret = LTTCOMM_OK;
2567
2568 error:
2569 return ret;
2570 }
2571
2572 /*
2573 * Command LTTNG_ENABLE_CHANNEL processed by the client thread.
2574 */
2575 static int cmd_enable_channel(struct ltt_session *session,
2576 int domain, struct lttng_channel *attr)
2577 {
2578 int ret;
2579 struct ltt_ust_session *usess = session->ust_session;
2580 struct lttng_ht *chan_ht;
2581
2582 DBG("Enabling channel %s for session %s", attr->name, session->name);
2583
2584 switch (domain) {
2585 case LTTNG_DOMAIN_KERNEL:
2586 {
2587 struct ltt_kernel_channel *kchan;
2588
2589 kchan = trace_kernel_get_channel_by_name(attr->name,
2590 session->kernel_session);
2591 if (kchan == NULL) {
2592 ret = channel_kernel_create(session->kernel_session,
2593 attr, kernel_poll_pipe[1]);
2594 } else {
2595 ret = channel_kernel_enable(session->kernel_session, kchan);
2596 }
2597
2598 if (ret != LTTCOMM_OK) {
2599 goto error;
2600 }
2601
2602 kernel_wait_quiescent(kernel_tracer_fd);
2603 break;
2604 }
2605 case LTTNG_DOMAIN_UST:
2606 {
2607 struct ltt_ust_channel *uchan;
2608
2609 chan_ht = usess->domain_global.channels;
2610
2611 uchan = trace_ust_find_channel_by_name(chan_ht, attr->name);
2612 if (uchan == NULL) {
2613 ret = channel_ust_create(usess, domain, attr);
2614 } else {
2615 ret = channel_ust_enable(usess, domain, uchan);
2616 }
2617 break;
2618 }
2619 #if 0
2620 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
2621 case LTTNG_DOMAIN_UST_EXEC_NAME:
2622 case LTTNG_DOMAIN_UST_PID:
2623 #endif
2624 default:
2625 ret = LTTCOMM_UNKNOWN_DOMAIN;
2626 goto error;
2627 }
2628
2629 error:
2630 return ret;
2631 }
2632
2633 /*
2634 * Command LTTNG_DISABLE_EVENT processed by the client thread.
2635 */
2636 static int cmd_disable_event(struct ltt_session *session, int domain,
2637 char *channel_name, char *event_name)
2638 {
2639 int ret;
2640
2641 switch (domain) {
2642 case LTTNG_DOMAIN_KERNEL:
2643 {
2644 struct ltt_kernel_channel *kchan;
2645 struct ltt_kernel_session *ksess;
2646
2647 ksess = session->kernel_session;
2648
2649 kchan = trace_kernel_get_channel_by_name(channel_name, ksess);
2650 if (kchan == NULL) {
2651 ret = LTTCOMM_KERN_CHAN_NOT_FOUND;
2652 goto error;
2653 }
2654
2655 ret = event_kernel_disable_tracepoint(ksess, kchan, event_name);
2656 if (ret != LTTCOMM_OK) {
2657 goto error;
2658 }
2659
2660 kernel_wait_quiescent(kernel_tracer_fd);
2661 break;
2662 }
2663 case LTTNG_DOMAIN_UST:
2664 {
2665 struct ltt_ust_channel *uchan;
2666 struct ltt_ust_session *usess;
2667
2668 usess = session->ust_session;
2669
2670 uchan = trace_ust_find_channel_by_name(usess->domain_global.channels,
2671 channel_name);
2672 if (uchan == NULL) {
2673 ret = LTTCOMM_UST_CHAN_NOT_FOUND;
2674 goto error;
2675 }
2676
2677 ret = event_ust_disable_tracepoint(usess, domain, uchan, event_name);
2678 if (ret != LTTCOMM_OK) {
2679 goto error;
2680 }
2681
2682 DBG3("Disable UST event %s in channel %s completed", event_name,
2683 channel_name);
2684 break;
2685 }
2686 #if 0
2687 case LTTNG_DOMAIN_UST_EXEC_NAME:
2688 case LTTNG_DOMAIN_UST_PID:
2689 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
2690 #endif
2691 default:
2692 ret = LTTCOMM_UND;
2693 goto error;
2694 }
2695
2696 ret = LTTCOMM_OK;
2697
2698 error:
2699 return ret;
2700 }
2701
2702 /*
2703 * Command LTTNG_DISABLE_ALL_EVENT processed by the client thread.
2704 */
2705 static int cmd_disable_event_all(struct ltt_session *session, int domain,
2706 char *channel_name)
2707 {
2708 int ret;
2709
2710 switch (domain) {
2711 case LTTNG_DOMAIN_KERNEL:
2712 {
2713 struct ltt_kernel_session *ksess;
2714 struct ltt_kernel_channel *kchan;
2715
2716 ksess = session->kernel_session;
2717
2718 kchan = trace_kernel_get_channel_by_name(channel_name, ksess);
2719 if (kchan == NULL) {
2720 ret = LTTCOMM_KERN_CHAN_NOT_FOUND;
2721 goto error;
2722 }
2723
2724 ret = event_kernel_disable_all(ksess, kchan);
2725 if (ret != LTTCOMM_OK) {
2726 goto error;
2727 }
2728
2729 kernel_wait_quiescent(kernel_tracer_fd);
2730 break;
2731 }
2732 case LTTNG_DOMAIN_UST:
2733 {
2734 struct ltt_ust_session *usess;
2735 struct ltt_ust_channel *uchan;
2736
2737 usess = session->ust_session;
2738
2739 uchan = trace_ust_find_channel_by_name(usess->domain_global.channels,
2740 channel_name);
2741 if (uchan == NULL) {
2742 ret = LTTCOMM_UST_CHAN_NOT_FOUND;
2743 goto error;
2744 }
2745
2746 ret = event_ust_disable_all_tracepoints(usess, domain, uchan);
2747 if (ret != 0) {
2748 goto error;
2749 }
2750
2751 DBG3("Disable all UST events in channel %s completed", channel_name);
2752
2753 break;
2754 }
2755 #if 0
2756 case LTTNG_DOMAIN_UST_EXEC_NAME:
2757 case LTTNG_DOMAIN_UST_PID:
2758 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
2759 #endif
2760 default:
2761 ret = LTTCOMM_UND;
2762 goto error;
2763 }
2764
2765 ret = LTTCOMM_OK;
2766
2767 error:
2768 return ret;
2769 }
2770
2771 /*
2772 * Command LTTNG_ADD_CONTEXT processed by the client thread.
2773 */
2774 static int cmd_add_context(struct ltt_session *session, int domain,
2775 char *channel_name, char *event_name, struct lttng_event_context *ctx)
2776 {
2777 int ret;
2778
2779 switch (domain) {
2780 case LTTNG_DOMAIN_KERNEL:
2781 /* Add kernel context to kernel tracer */
2782 ret = context_kernel_add(session->kernel_session, ctx,
2783 event_name, channel_name);
2784 if (ret != LTTCOMM_OK) {
2785 goto error;
2786 }
2787 break;
2788 case LTTNG_DOMAIN_UST:
2789 {
2790 struct ltt_ust_session *usess = session->ust_session;
2791
2792 ret = context_ust_add(usess, domain, ctx, event_name, channel_name);
2793 if (ret != LTTCOMM_OK) {
2794 goto error;
2795 }
2796 break;
2797 }
2798 #if 0
2799 case LTTNG_DOMAIN_UST_EXEC_NAME:
2800 case LTTNG_DOMAIN_UST_PID:
2801 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
2802 #endif
2803 default:
2804 ret = LTTCOMM_UND;
2805 goto error;
2806 }
2807
2808 ret = LTTCOMM_OK;
2809
2810 error:
2811 return ret;
2812 }
2813
2814 /*
2815 * Command LTTNG_SET_FILTER processed by the client thread.
2816 */
2817 static int cmd_set_filter(struct ltt_session *session, int domain,
2818 char *channel_name, char *event_name,
2819 struct lttng_filter_bytecode *bytecode)
2820 {
2821 int ret;
2822
2823 switch (domain) {
2824 case LTTNG_DOMAIN_KERNEL:
2825 ret = LTTCOMM_FATAL;
2826 break;
2827 case LTTNG_DOMAIN_UST:
2828 {
2829 struct ltt_ust_session *usess = session->ust_session;
2830
2831 ret = filter_ust_set(usess, domain, bytecode, event_name, channel_name);
2832 if (ret != LTTCOMM_OK) {
2833 goto error;
2834 }
2835 break;
2836 }
2837 #if 0
2838 case LTTNG_DOMAIN_UST_EXEC_NAME:
2839 case LTTNG_DOMAIN_UST_PID:
2840 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
2841 #endif
2842 default:
2843 ret = LTTCOMM_UND;
2844 goto error;
2845 }
2846
2847 ret = LTTCOMM_OK;
2848
2849 error:
2850 return ret;
2851
2852 }
2853
2854 /*
2855 * Command LTTNG_ENABLE_EVENT processed by the client thread.
2856 */
2857 static int cmd_enable_event(struct ltt_session *session, int domain,
2858 char *channel_name, struct lttng_event *event)
2859 {
2860 int ret;
2861 struct lttng_channel *attr;
2862 struct ltt_ust_session *usess = session->ust_session;
2863
2864 switch (domain) {
2865 case LTTNG_DOMAIN_KERNEL:
2866 {
2867 struct ltt_kernel_channel *kchan;
2868
2869 kchan = trace_kernel_get_channel_by_name(channel_name,
2870 session->kernel_session);
2871 if (kchan == NULL) {
2872 attr = channel_new_default_attr(domain);
2873 if (attr == NULL) {
2874 ret = LTTCOMM_FATAL;
2875 goto error;
2876 }
2877 snprintf(attr->name, NAME_MAX, "%s", channel_name);
2878
2879 /* This call will notify the kernel thread */
2880 ret = channel_kernel_create(session->kernel_session,
2881 attr, kernel_poll_pipe[1]);
2882 if (ret != LTTCOMM_OK) {
2883 free(attr);
2884 goto error;
2885 }
2886 free(attr);
2887 }
2888
2889 /* Get the newly created kernel channel pointer */
2890 kchan = trace_kernel_get_channel_by_name(channel_name,
2891 session->kernel_session);
2892 if (kchan == NULL) {
2893 /* This sould not happen... */
2894 ret = LTTCOMM_FATAL;
2895 goto error;
2896 }
2897
2898 ret = event_kernel_enable_tracepoint(session->kernel_session, kchan,
2899 event);
2900 if (ret != LTTCOMM_OK) {
2901 goto error;
2902 }
2903
2904 kernel_wait_quiescent(kernel_tracer_fd);
2905 break;
2906 }
2907 case LTTNG_DOMAIN_UST:
2908 {
2909 struct lttng_channel *attr;
2910 struct ltt_ust_channel *uchan;
2911
2912 /* Get channel from global UST domain */
2913 uchan = trace_ust_find_channel_by_name(usess->domain_global.channels,
2914 channel_name);
2915 if (uchan == NULL) {
2916 /* Create default channel */
2917 attr = channel_new_default_attr(domain);
2918 if (attr == NULL) {
2919 ret = LTTCOMM_FATAL;
2920 goto error;
2921 }
2922 snprintf(attr->name, NAME_MAX, "%s", channel_name);
2923 attr->name[NAME_MAX - 1] = '\0';
2924
2925 ret = channel_ust_create(usess, domain, attr);
2926 if (ret != LTTCOMM_OK) {
2927 free(attr);
2928 goto error;
2929 }
2930 free(attr);
2931
2932 /* Get the newly created channel reference back */
2933 uchan = trace_ust_find_channel_by_name(
2934 usess->domain_global.channels, channel_name);
2935 if (uchan == NULL) {
2936 /* Something is really wrong */
2937 ret = LTTCOMM_FATAL;
2938 goto error;
2939 }
2940 }
2941
2942 /* At this point, the session and channel exist on the tracer */
2943 ret = event_ust_enable_tracepoint(usess, domain, uchan, event);
2944 if (ret != LTTCOMM_OK) {
2945 goto error;
2946 }
2947 break;
2948 }
2949 #if 0
2950 case LTTNG_DOMAIN_UST_EXEC_NAME:
2951 case LTTNG_DOMAIN_UST_PID:
2952 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
2953 #endif
2954 default:
2955 ret = LTTCOMM_UND;
2956 goto error;
2957 }
2958
2959 ret = LTTCOMM_OK;
2960
2961 error:
2962 return ret;
2963 }
2964
2965 /*
2966 * Command LTTNG_ENABLE_ALL_EVENT processed by the client thread.
2967 */
2968 static int cmd_enable_event_all(struct ltt_session *session, int domain,
2969 char *channel_name, int event_type)
2970 {
2971 int ret;
2972 struct ltt_kernel_channel *kchan;
2973
2974 switch (domain) {
2975 case LTTNG_DOMAIN_KERNEL:
2976 kchan = trace_kernel_get_channel_by_name(channel_name,
2977 session->kernel_session);
2978 if (kchan == NULL) {
2979 /* This call will notify the kernel thread */
2980 ret = channel_kernel_create(session->kernel_session, NULL,
2981 kernel_poll_pipe[1]);
2982 if (ret != LTTCOMM_OK) {
2983 goto error;
2984 }
2985
2986 /* Get the newly created kernel channel pointer */
2987 kchan = trace_kernel_get_channel_by_name(channel_name,
2988 session->kernel_session);
2989 if (kchan == NULL) {
2990 /* This sould not happen... */
2991 ret = LTTCOMM_FATAL;
2992 goto error;
2993 }
2994
2995 }
2996
2997 switch (event_type) {
2998 case LTTNG_EVENT_SYSCALL:
2999 ret = event_kernel_enable_all_syscalls(session->kernel_session,
3000 kchan, kernel_tracer_fd);
3001 break;
3002 case LTTNG_EVENT_TRACEPOINT:
3003 /*
3004 * This call enables all LTTNG_KERNEL_TRACEPOINTS and
3005 * events already registered to the channel.
3006 */
3007 ret = event_kernel_enable_all_tracepoints(session->kernel_session,
3008 kchan, kernel_tracer_fd);
3009 break;
3010 case LTTNG_EVENT_ALL:
3011 /* Enable syscalls and tracepoints */
3012 ret = event_kernel_enable_all(session->kernel_session,
3013 kchan, kernel_tracer_fd);
3014 break;
3015 default:
3016 ret = LTTCOMM_KERN_ENABLE_FAIL;
3017 goto error;
3018 }
3019
3020 /* Manage return value */
3021 if (ret != LTTCOMM_OK) {
3022 goto error;
3023 }
3024
3025 kernel_wait_quiescent(kernel_tracer_fd);
3026 break;
3027 case LTTNG_DOMAIN_UST:
3028 {
3029 struct lttng_channel *attr;
3030 struct ltt_ust_channel *uchan;
3031 struct ltt_ust_session *usess = session->ust_session;
3032
3033 /* Get channel from global UST domain */
3034 uchan = trace_ust_find_channel_by_name(usess->domain_global.channels,
3035 channel_name);
3036 if (uchan == NULL) {
3037 /* Create default channel */
3038 attr = channel_new_default_attr(domain);
3039 if (attr == NULL) {
3040 ret = LTTCOMM_FATAL;
3041 goto error;
3042 }
3043 snprintf(attr->name, NAME_MAX, "%s", channel_name);
3044 attr->name[NAME_MAX - 1] = '\0';
3045
3046 /* Use the internal command enable channel */
3047 ret = channel_ust_create(usess, domain, attr);
3048 if (ret != LTTCOMM_OK) {
3049 free(attr);
3050 goto error;
3051 }
3052 free(attr);
3053
3054 /* Get the newly created channel reference back */
3055 uchan = trace_ust_find_channel_by_name(
3056 usess->domain_global.channels, channel_name);
3057 if (uchan == NULL) {
3058 /* Something is really wrong */
3059 ret = LTTCOMM_FATAL;
3060 goto error;
3061 }
3062 }
3063
3064 /* At this point, the session and channel exist on the tracer */
3065
3066 switch (event_type) {
3067 case LTTNG_EVENT_ALL:
3068 case LTTNG_EVENT_TRACEPOINT:
3069 ret = event_ust_enable_all_tracepoints(usess, domain, uchan);
3070 if (ret != LTTCOMM_OK) {
3071 goto error;
3072 }
3073 break;
3074 default:
3075 ret = LTTCOMM_UST_ENABLE_FAIL;
3076 goto error;
3077 }
3078
3079 /* Manage return value */
3080 if (ret != LTTCOMM_OK) {
3081 goto error;
3082 }
3083
3084 break;
3085 }
3086 #if 0
3087 case LTTNG_DOMAIN_UST_EXEC_NAME:
3088 case LTTNG_DOMAIN_UST_PID:
3089 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
3090 #endif
3091 default:
3092 ret = LTTCOMM_UND;
3093 goto error;
3094 }
3095
3096 ret = LTTCOMM_OK;
3097
3098 error:
3099 return ret;
3100 }
3101
3102 /*
3103 * Command LTTNG_LIST_TRACEPOINTS processed by the client thread.
3104 */
3105 static ssize_t cmd_list_tracepoints(int domain, struct lttng_event **events)
3106 {
3107 int ret;
3108 ssize_t nb_events = 0;
3109
3110 switch (domain) {
3111 case LTTNG_DOMAIN_KERNEL:
3112 nb_events = kernel_list_events(kernel_tracer_fd, events);
3113 if (nb_events < 0) {
3114 ret = LTTCOMM_KERN_LIST_FAIL;
3115 goto error;
3116 }
3117 break;
3118 case LTTNG_DOMAIN_UST:
3119 nb_events = ust_app_list_events(events);
3120 if (nb_events < 0) {
3121 ret = LTTCOMM_UST_LIST_FAIL;
3122 goto error;
3123 }
3124 break;
3125 default:
3126 ret = LTTCOMM_UND;
3127 goto error;
3128 }
3129
3130 return nb_events;
3131
3132 error:
3133 /* Return negative value to differentiate return code */
3134 return -ret;
3135 }
3136
3137 /*
3138 * Command LTTNG_LIST_TRACEPOINT_FIELDS processed by the client thread.
3139 */
3140 static ssize_t cmd_list_tracepoint_fields(int domain,
3141 struct lttng_event_field **fields)
3142 {
3143 int ret;
3144 ssize_t nb_fields = 0;
3145
3146 switch (domain) {
3147 case LTTNG_DOMAIN_UST:
3148 nb_fields = ust_app_list_event_fields(fields);
3149 if (nb_fields < 0) {
3150 ret = LTTCOMM_UST_LIST_FAIL;
3151 goto error;
3152 }
3153 break;
3154 case LTTNG_DOMAIN_KERNEL:
3155 default: /* fall-through */
3156 ret = LTTCOMM_UND;
3157 goto error;
3158 }
3159
3160 return nb_fields;
3161
3162 error:
3163 /* Return negative value to differentiate return code */
3164 return -ret;
3165 }
3166
3167 /*
3168 * Command LTTNG_START_TRACE processed by the client thread.
3169 */
3170 static int cmd_start_trace(struct ltt_session *session)
3171 {
3172 int ret;
3173 struct ltt_kernel_session *ksession;
3174 struct ltt_ust_session *usess;
3175 struct ltt_kernel_channel *kchan;
3176
3177 /* Ease our life a bit ;) */
3178 ksession = session->kernel_session;
3179 usess = session->ust_session;
3180
3181 if (session->enabled) {
3182 /* Already started. */
3183 ret = LTTCOMM_TRACE_ALREADY_STARTED;
3184 goto error;
3185 }
3186
3187 session->enabled = 1;
3188
3189 ret = setup_relayd(session);
3190 if (ret != LTTCOMM_OK) {
3191 ERR("Error setting up relayd for session %s", session->name);
3192 goto error;
3193 }
3194
3195 /* Kernel tracing */
3196 if (ksession != NULL) {
3197 /* Open kernel metadata */
3198 if (ksession->metadata == NULL) {
3199 ret = kernel_open_metadata(ksession,
3200 ksession->consumer->dst.trace_path);
3201 if (ret < 0) {
3202 ret = LTTCOMM_KERN_META_FAIL;
3203 goto error;
3204 }
3205 }
3206
3207 /* Open kernel metadata stream */
3208 if (ksession->metadata_stream_fd < 0) {
3209 ret = kernel_open_metadata_stream(ksession);
3210 if (ret < 0) {
3211 ERR("Kernel create metadata stream failed");
3212 ret = LTTCOMM_KERN_STREAM_FAIL;
3213 goto error;
3214 }
3215 }
3216
3217 /* For each channel */
3218 cds_list_for_each_entry(kchan, &ksession->channel_list.head, list) {
3219 if (kchan->stream_count == 0) {
3220 ret = kernel_open_channel_stream(kchan);
3221 if (ret < 0) {
3222 ret = LTTCOMM_KERN_STREAM_FAIL;
3223 goto error;
3224 }
3225 /* Update the stream global counter */
3226 ksession->stream_count_global += ret;
3227 }
3228 }
3229
3230 /* Setup kernel consumer socket and send fds to it */
3231 ret = init_kernel_tracing(ksession);
3232 if (ret < 0) {
3233 ret = LTTCOMM_KERN_START_FAIL;
3234 goto error;
3235 }
3236
3237 /* This start the kernel tracing */
3238 ret = kernel_start_session(ksession);
3239 if (ret < 0) {
3240 ret = LTTCOMM_KERN_START_FAIL;
3241 goto error;
3242 }
3243
3244 /* Quiescent wait after starting trace */
3245 kernel_wait_quiescent(kernel_tracer_fd);
3246 }
3247
3248 /* Flag session that trace should start automatically */
3249 if (usess) {
3250 usess->start_trace = 1;
3251
3252 ret = ust_app_start_trace_all(usess);
3253 if (ret < 0) {
3254 ret = LTTCOMM_UST_START_FAIL;
3255 goto error;
3256 }
3257 }
3258
3259 ret = LTTCOMM_OK;
3260
3261 error:
3262 return ret;
3263 }
3264
3265 /*
3266 * Command LTTNG_STOP_TRACE processed by the client thread.
3267 */
3268 static int cmd_stop_trace(struct ltt_session *session)
3269 {
3270 int ret;
3271 struct ltt_kernel_channel *kchan;
3272 struct ltt_kernel_session *ksession;
3273 struct ltt_ust_session *usess;
3274
3275 /* Short cut */
3276 ksession = session->kernel_session;
3277 usess = session->ust_session;
3278
3279 if (!session->enabled) {
3280 ret = LTTCOMM_TRACE_ALREADY_STOPPED;
3281 goto error;
3282 }
3283
3284 session->enabled = 0;
3285
3286 /* Kernel tracer */
3287 if (ksession != NULL) {
3288 DBG("Stop kernel tracing");
3289
3290 /* Flush metadata if exist */
3291 if (ksession->metadata_stream_fd >= 0) {
3292 ret = kernel_metadata_flush_buffer(ksession->metadata_stream_fd);
3293 if (ret < 0) {
3294 ERR("Kernel metadata flush failed");
3295 }
3296 }
3297
3298 /* Flush all buffers before stopping */
3299 cds_list_for_each_entry(kchan, &ksession->channel_list.head, list) {
3300 ret = kernel_flush_buffer(kchan);
3301 if (ret < 0) {
3302 ERR("Kernel flush buffer error");
3303 }
3304 }
3305
3306 ret = kernel_stop_session(ksession);
3307 if (ret < 0) {
3308 ret = LTTCOMM_KERN_STOP_FAIL;
3309 goto error;
3310 }
3311
3312 kernel_wait_quiescent(kernel_tracer_fd);
3313 }
3314
3315 if (usess) {
3316 usess->start_trace = 0;
3317
3318 ret = ust_app_stop_trace_all(usess);
3319 if (ret < 0) {
3320 ret = LTTCOMM_UST_STOP_FAIL;
3321 goto error;
3322 }
3323 }
3324
3325 ret = LTTCOMM_OK;
3326
3327 error:
3328 return ret;
3329 }
3330
3331 /*
3332 * Command LTTNG_CREATE_SESSION_URI processed by the client thread.
3333 */
3334 static int cmd_create_session_uri(char *name, struct lttng_uri *ctrl_uri,
3335 struct lttng_uri *data_uri, unsigned int enable_consumer,
3336 lttng_sock_cred *creds)
3337 {
3338 int ret;
3339 char *path = NULL;
3340 struct ltt_session *session;
3341 struct consumer_output *consumer;
3342
3343 /* Verify if the session already exist */
3344 session = session_find_by_name(name);
3345 if (session != NULL) {
3346 ret = LTTCOMM_EXIST_SESS;
3347 goto error;
3348 }
3349
3350 /* TODO: validate URIs */
3351
3352 /* Create default consumer output */
3353 consumer = consumer_create_output(CONSUMER_DST_LOCAL);
3354 if (consumer == NULL) {
3355 ret = LTTCOMM_FATAL;
3356 goto error;
3357 }
3358 strncpy(consumer->subdir, ctrl_uri->subdir, sizeof(consumer->subdir));
3359 DBG2("Consumer subdir set to %s", consumer->subdir);
3360
3361 switch (ctrl_uri->dtype) {
3362 case LTTNG_DST_IPV4:
3363 case LTTNG_DST_IPV6:
3364 /* Set control URI into consumer output object */
3365 ret = consumer_set_network_uri(consumer, ctrl_uri);
3366 if (ret < 0) {
3367 ret = LTTCOMM_FATAL;
3368 goto error;
3369 }
3370
3371 /* Set data URI into consumer output object */
3372 ret = consumer_set_network_uri(consumer, data_uri);
3373 if (ret < 0) {
3374 ret = LTTCOMM_FATAL;
3375 goto error;
3376 }
3377
3378 /* Empty path since the session is network */
3379 path = "";
3380 break;
3381 case LTTNG_DST_PATH:
3382 /* Very volatile pointer. Only used for the create session. */
3383 path = ctrl_uri->dst.path;
3384 strncpy(consumer->dst.trace_path, path,
3385 sizeof(consumer->dst.trace_path));
3386 break;
3387 }
3388
3389 /* Set if the consumer is enabled or not */
3390 consumer->enabled = enable_consumer;
3391
3392 ret = session_create(name, path, LTTNG_SOCK_GET_UID_CRED(creds),
3393 LTTNG_SOCK_GET_GID_CRED(creds));
3394 if (ret != LTTCOMM_OK) {
3395 goto consumer_error;
3396 }
3397
3398 /* Get the newly created session pointer back */
3399 session = session_find_by_name(name);
3400 assert(session);
3401
3402 /* Assign consumer to session */
3403 session->consumer = consumer;
3404
3405 return LTTCOMM_OK;
3406
3407 consumer_error:
3408 consumer_destroy_output(consumer);
3409 error:
3410 return ret;
3411 }
3412
3413 /*
3414 * Command LTTNG_CREATE_SESSION processed by the client thread.
3415 */
3416 static int cmd_create_session(char *name, char *path, lttng_sock_cred *creds)
3417 {
3418 int ret;
3419 struct lttng_uri uri;
3420
3421 /* Zeroed temporary URI */
3422 memset(&uri, 0, sizeof(uri));
3423
3424 uri.dtype = LTTNG_DST_PATH;
3425 uri.utype = LTTNG_URI_DST;
3426 strncpy(uri.dst.path, path, sizeof(uri.dst.path));
3427
3428 /* TODO: Strip date-time from path and put it in uri's subdir */
3429
3430 ret = cmd_create_session_uri(name, &uri, NULL, 1, creds);
3431 if (ret != LTTCOMM_OK) {
3432 goto error;
3433 }
3434
3435 error:
3436 return ret;
3437 }
3438
3439 /*
3440 * Command LTTNG_DESTROY_SESSION processed by the client thread.
3441 */
3442 static int cmd_destroy_session(struct ltt_session *session, char *name)
3443 {
3444 int ret;
3445
3446 /* Clean kernel session teardown */
3447 teardown_kernel_session(session);
3448 /* UST session teardown */
3449 teardown_ust_session(session);
3450
3451 /*
3452 * Must notify the kernel thread here to update it's poll setin order
3453 * to remove the channel(s)' fd just destroyed.
3454 */
3455 ret = notify_thread_pipe(kernel_poll_pipe[1]);
3456 if (ret < 0) {
3457 PERROR("write kernel poll pipe");
3458 }
3459
3460 ret = session_destroy(session);
3461
3462 return ret;
3463 }
3464
3465 /*
3466 * Command LTTNG_CALIBRATE processed by the client thread.
3467 */
3468 static int cmd_calibrate(int domain, struct lttng_calibrate *calibrate)
3469 {
3470 int ret;
3471
3472 switch (domain) {
3473 case LTTNG_DOMAIN_KERNEL:
3474 {
3475 struct lttng_kernel_calibrate kcalibrate;
3476
3477 kcalibrate.type = calibrate->type;
3478 ret = kernel_calibrate(kernel_tracer_fd, &kcalibrate);
3479 if (ret < 0) {
3480 ret = LTTCOMM_KERN_ENABLE_FAIL;
3481 goto error;
3482 }
3483 break;
3484 }
3485 case LTTNG_DOMAIN_UST:
3486 {
3487 struct lttng_ust_calibrate ucalibrate;
3488
3489 ucalibrate.type = calibrate->type;
3490 ret = ust_app_calibrate_glb(&ucalibrate);
3491 if (ret < 0) {
3492 ret = LTTCOMM_UST_CALIBRATE_FAIL;
3493 goto error;
3494 }
3495 break;
3496 }
3497 default:
3498 ret = LTTCOMM_UND;
3499 goto error;
3500 }
3501
3502 ret = LTTCOMM_OK;
3503
3504 error:
3505 return ret;
3506 }
3507
3508 /*
3509 * Command LTTNG_REGISTER_CONSUMER processed by the client thread.
3510 */
3511 static int cmd_register_consumer(struct ltt_session *session, int domain,
3512 char *sock_path)
3513 {
3514 int ret, sock;
3515
3516 switch (domain) {
3517 case LTTNG_DOMAIN_KERNEL:
3518 /* Can't register a consumer if there is already one */
3519 if (session->kernel_session->consumer_fds_sent != 0) {
3520 ret = LTTCOMM_KERN_CONSUMER_FAIL;
3521 goto error;
3522 }
3523
3524 sock = lttcomm_connect_unix_sock(sock_path);
3525 if (sock < 0) {
3526 ret = LTTCOMM_CONNECT_FAIL;
3527 goto error;
3528 }
3529
3530 session->kernel_session->consumer_fd = sock;
3531 break;
3532 default:
3533 /* TODO: Userspace tracing */
3534 ret = LTTCOMM_UND;
3535 goto error;
3536 }
3537
3538 ret = LTTCOMM_OK;
3539
3540 error:
3541 return ret;
3542 }
3543
3544 /*
3545 * Command LTTNG_LIST_DOMAINS processed by the client thread.
3546 */
3547 static ssize_t cmd_list_domains(struct ltt_session *session,
3548 struct lttng_domain **domains)
3549 {
3550 int ret, index = 0;
3551 ssize_t nb_dom = 0;
3552
3553 if (session->kernel_session != NULL) {
3554 DBG3("Listing domains found kernel domain");
3555 nb_dom++;
3556 }
3557
3558 if (session->ust_session != NULL) {
3559 DBG3("Listing domains found UST global domain");
3560 nb_dom++;
3561 }
3562
3563 *domains = zmalloc(nb_dom * sizeof(struct lttng_domain));
3564 if (*domains == NULL) {
3565 ret = -LTTCOMM_FATAL;
3566 goto error;
3567 }
3568
3569 if (session->kernel_session != NULL) {
3570 (*domains)[index].type = LTTNG_DOMAIN_KERNEL;
3571 index++;
3572 }
3573
3574 if (session->ust_session != NULL) {
3575 (*domains)[index].type = LTTNG_DOMAIN_UST;
3576 index++;
3577 }
3578
3579 return nb_dom;
3580
3581 error:
3582 return ret;
3583 }
3584
3585 /*
3586 * Command LTTNG_LIST_CHANNELS processed by the client thread.
3587 */
3588 static ssize_t cmd_list_channels(int domain, struct ltt_session *session,
3589 struct lttng_channel **channels)
3590 {
3591 int ret;
3592 ssize_t nb_chan = 0;
3593
3594 switch (domain) {
3595 case LTTNG_DOMAIN_KERNEL:
3596 if (session->kernel_session != NULL) {
3597 nb_chan = session->kernel_session->channel_count;
3598 }
3599 DBG3("Number of kernel channels %zd", nb_chan);
3600 break;
3601 case LTTNG_DOMAIN_UST:
3602 if (session->ust_session != NULL) {
3603 nb_chan = lttng_ht_get_count(
3604 session->ust_session->domain_global.channels);
3605 }
3606 DBG3("Number of UST global channels %zd", nb_chan);
3607 break;
3608 default:
3609 *channels = NULL;
3610 ret = -LTTCOMM_UND;
3611 goto error;
3612 }
3613
3614 if (nb_chan > 0) {
3615 *channels = zmalloc(nb_chan * sizeof(struct lttng_channel));
3616 if (*channels == NULL) {
3617 ret = -LTTCOMM_FATAL;
3618 goto error;
3619 }
3620
3621 list_lttng_channels(domain, session, *channels);
3622 } else {
3623 *channels = NULL;
3624 }
3625
3626 return nb_chan;
3627
3628 error:
3629 return ret;
3630 }
3631
3632 /*
3633 * Command LTTNG_LIST_EVENTS processed by the client thread.
3634 */
3635 static ssize_t cmd_list_events(int domain, struct ltt_session *session,
3636 char *channel_name, struct lttng_event **events)
3637 {
3638 int ret = 0;
3639 ssize_t nb_event = 0;
3640
3641 switch (domain) {
3642 case LTTNG_DOMAIN_KERNEL:
3643 if (session->kernel_session != NULL) {
3644 nb_event = list_lttng_kernel_events(channel_name,
3645 session->kernel_session, events);
3646 }
3647 break;
3648 case LTTNG_DOMAIN_UST:
3649 {
3650 if (session->ust_session != NULL) {
3651 nb_event = list_lttng_ust_global_events(channel_name,
3652 &session->ust_session->domain_global, events);
3653 }
3654 break;
3655 }
3656 default:
3657 ret = -LTTCOMM_UND;
3658 goto error;
3659 }
3660
3661 ret = nb_event;
3662
3663 error:
3664 return ret;
3665 }
3666
3667 /*
3668 * Command LTTNG_SET_CONSUMER_URI processed by the client thread.
3669 */
3670 static int cmd_set_consumer_uri(int domain, struct ltt_session *session,
3671 struct lttng_uri *uri)
3672 {
3673 int ret;
3674 struct ltt_kernel_session *ksess = session->kernel_session;
3675 struct ltt_ust_session *usess = session->ust_session;
3676 struct consumer_output *consumer;
3677
3678 /* Can't enable consumer after session started. */
3679 if (session->enabled) {
3680 ret = LTTCOMM_TRACE_ALREADY_STARTED;
3681 goto error;
3682 }
3683
3684 switch (domain) {
3685 case LTTNG_DOMAIN_KERNEL:
3686 /* Code flow error if we don't have a kernel session here. */
3687 assert(ksess);
3688
3689 /* Create consumer output if none exists */
3690 consumer = ksess->tmp_consumer;
3691 if (consumer == NULL) {
3692 consumer = consumer_copy_output(ksess->consumer);
3693 if (consumer == NULL) {
3694 ret = LTTCOMM_FATAL;
3695 goto error;
3696 }
3697 /* Reassign new pointer */
3698 ksess->tmp_consumer = consumer;
3699 }
3700
3701 switch (uri->dtype) {
3702 case LTTNG_DST_IPV4:
3703 case LTTNG_DST_IPV6:
3704 DBG2("Setting network URI for kernel session %s", session->name);
3705
3706 /* Set URI into consumer output object */
3707 ret = consumer_set_network_uri(consumer, uri);
3708 if (ret < 0) {
3709 ret = LTTCOMM_FATAL;
3710 goto error;
3711 }
3712
3713 /* On a new subdir, reappend the default trace dir. */
3714 if (strlen(uri->subdir) != 0) {
3715 strncat(consumer->subdir, DEFAULT_KERNEL_TRACE_DIR,
3716 sizeof(consumer->subdir));
3717 }
3718
3719 ret = send_socket_relayd_consumer(domain, session, uri, consumer,
3720 ksess->consumer_fd);
3721 if (ret != LTTCOMM_OK) {
3722 goto error;
3723 }
3724 break;
3725 case LTTNG_DST_PATH:
3726 DBG2("Setting trace directory path from URI to %s", uri->dst.path);
3727 memset(consumer->dst.trace_path, 0,
3728 sizeof(consumer->dst.trace_path));
3729 strncpy(consumer->dst.trace_path, uri->dst.path,
3730 sizeof(consumer->dst.trace_path));
3731 /* Append default kernel trace dir */
3732 strncat(consumer->dst.trace_path, DEFAULT_KERNEL_TRACE_DIR,
3733 sizeof(consumer->dst.trace_path));
3734 break;
3735 }
3736
3737 /* All good! */
3738 break;
3739 case LTTNG_DOMAIN_UST:
3740 /* Code flow error if we don't have a kernel session here. */
3741 assert(usess);
3742
3743 /* Create consumer output if none exists */
3744 consumer = usess->tmp_consumer;
3745 if (consumer == NULL) {
3746 consumer = consumer_copy_output(usess->consumer);
3747 if (consumer == NULL) {
3748 ret = LTTCOMM_FATAL;
3749 goto error;
3750 }
3751 /* Reassign new pointer */
3752 usess->tmp_consumer = consumer;
3753 }
3754
3755 switch (uri->dtype) {
3756 case LTTNG_DST_IPV4:
3757 case LTTNG_DST_IPV6:
3758 {
3759 DBG2("Setting network URI for UST session %s", session->name);
3760
3761 /* Set URI into consumer object */
3762 ret = consumer_set_network_uri(consumer, uri);
3763 if (ret < 0) {
3764 ret = LTTCOMM_FATAL;
3765 goto error;
3766 }
3767
3768 /* On a new subdir, reappend the default trace dir. */
3769 if (strlen(uri->subdir) != 0) {
3770 strncat(consumer->subdir, DEFAULT_UST_TRACE_DIR,
3771 sizeof(consumer->subdir));
3772 }
3773
3774 if (ust_consumerd64_fd >= 0) {
3775 ret = send_socket_relayd_consumer(domain, session, uri,
3776 consumer, ust_consumerd64_fd);
3777 if (ret != LTTCOMM_OK) {
3778 goto error;
3779 }
3780 }
3781
3782 if (ust_consumerd32_fd >= 0) {
3783 ret = send_socket_relayd_consumer(domain, session, uri,
3784 consumer, ust_consumerd32_fd);
3785 if (ret != LTTCOMM_OK) {
3786 goto error;
3787 }
3788 }
3789
3790 break;
3791 }
3792 case LTTNG_DST_PATH:
3793 DBG2("Setting trace directory path from URI to %s", uri->dst.path);
3794 memset(consumer->dst.trace_path, 0,
3795 sizeof(consumer->dst.trace_path));
3796 strncpy(consumer->dst.trace_path, uri->dst.path,
3797 sizeof(consumer->dst.trace_path));
3798 /* Append default UST trace dir */
3799 strncat(consumer->dst.trace_path, DEFAULT_UST_TRACE_DIR,
3800 sizeof(consumer->dst.trace_path));
3801 break;
3802 }
3803 break;
3804 }
3805
3806 /* All good! */
3807 ret = LTTCOMM_OK;
3808
3809 error:
3810 return ret;
3811 }
3812
3813 /*
3814 * Command LTTNG_DISABLE_CONSUMER processed by the client thread.
3815 */
3816 static int cmd_disable_consumer(int domain, struct ltt_session *session)
3817 {
3818 int ret;
3819 struct ltt_kernel_session *ksess = session->kernel_session;
3820 struct ltt_ust_session *usess = session->ust_session;
3821 struct consumer_output *consumer;
3822
3823 if (session->enabled) {
3824 /* Can't disable consumer on an already started session */
3825 ret = LTTCOMM_TRACE_ALREADY_STARTED;
3826 goto error;
3827 }
3828
3829 switch (domain) {
3830 case LTTNG_DOMAIN_KERNEL:
3831 /* Code flow error if we don't have a kernel session here. */
3832 assert(ksess);
3833
3834 DBG("Disabling kernel consumer");
3835 consumer = ksess->consumer;
3836
3837 break;
3838 case LTTNG_DOMAIN_UST:
3839 /* Code flow error if we don't have a UST session here. */
3840 assert(usess);
3841
3842 DBG("Disabling UST consumer");
3843 consumer = usess->consumer;
3844
3845 break;
3846 default:
3847 ret = LTTCOMM_UNKNOWN_DOMAIN;
3848 goto error;
3849 }
3850
3851 assert(consumer);
3852 consumer->enabled = 0;
3853
3854 /* Success at this point */
3855 ret = LTTCOMM_OK;
3856
3857 error:
3858 return ret;
3859 }
3860
3861 /*
3862 * Command LTTNG_ENABLE_CONSUMER processed by the client thread.
3863 */
3864 static int cmd_enable_consumer(int domain, struct ltt_session *session)
3865 {
3866 int ret;
3867 struct ltt_kernel_session *ksess = session->kernel_session;
3868 struct ltt_ust_session *usess = session->ust_session;
3869 struct consumer_output *tmp_out;
3870
3871 /* Can't enable consumer after session started. */
3872 if (session->enabled) {
3873 ret = LTTCOMM_TRACE_ALREADY_STARTED;
3874 goto error;
3875 }
3876
3877 switch (domain) {
3878 case LTTNG_DOMAIN_KERNEL:
3879 /* Code flow error if we don't have a kernel session here. */
3880 assert(ksess);
3881
3882 /*
3883 * Check if we have already sent fds to the consumer. In that case,
3884 * the enable-consumer command can't be used because a start trace
3885 * had previously occured.
3886 */
3887 if (ksess->consumer_fds_sent) {
3888 ret = LTTCOMM_ENABLE_CONSUMER_FAIL;
3889 goto error;
3890 }
3891
3892 tmp_out = ksess->tmp_consumer;
3893 if (tmp_out == NULL) {
3894 /* No temp. consumer output exists. Using the current one. */
3895 DBG3("No temporary consumer. Using default");
3896 ret = LTTCOMM_OK;
3897 goto error;
3898 }
3899
3900 switch (tmp_out->type) {
3901 case CONSUMER_DST_LOCAL:
3902 DBG2("Consumer output is local. Creating directory(ies)");
3903
3904 /* Create directory(ies) */
3905 ret = run_as_mkdir_recursive(tmp_out->dst.trace_path,
3906 S_IRWXU | S_IRWXG, session->uid, session->gid);
3907 if (ret < 0) {
3908 if (ret != -EEXIST) {
3909 ERR("Trace directory creation error");
3910 ret = LTTCOMM_FATAL;
3911 goto error;
3912 }
3913 }
3914 break;
3915 case CONSUMER_DST_NET:
3916 DBG2("Consumer output is network. Validating URIs");
3917 /* Validate if we have both control and data path set. */
3918 if (!tmp_out->dst.net.control_isset) {
3919 ret = LTTCOMM_URI_CTRL_MISS;
3920 goto error;
3921 }
3922
3923 if (!tmp_out->dst.net.data_isset) {
3924 ret = LTTCOMM_URI_DATA_MISS;
3925 goto error;
3926 }
3927
3928 /* Check established network session state */
3929 if (session->net_handle == 0) {
3930 ret = LTTCOMM_ENABLE_CONSUMER_FAIL;
3931 ERR("Session network handle is not set on enable-consumer");
3932 goto error;
3933 }
3934
3935 /* Append default kernel trace dir to subdir */
3936 strncat(ksess->consumer->subdir, DEFAULT_KERNEL_TRACE_DIR,
3937 sizeof(ksess->consumer->subdir));
3938
3939 break;
3940 }
3941
3942 /*
3943 * @session-lock
3944 * This is race free for now since the session lock is acquired before
3945 * ending up in this function. No other threads can access this kernel
3946 * session without this lock hence freeing the consumer output object
3947 * is valid.
3948 */
3949 consumer_destroy_output(ksess->consumer);
3950 ksess->consumer = tmp_out;
3951 ksess->tmp_consumer = NULL;
3952
3953 break;
3954 case LTTNG_DOMAIN_UST:
3955 /* Code flow error if we don't have a UST session here. */
3956 assert(usess);
3957
3958 /*
3959 * Check if we have already sent fds to the consumer. In that case,
3960 * the enable-consumer command can't be used because a start trace
3961 * had previously occured.
3962 */
3963 if (usess->start_trace) {
3964 ret = LTTCOMM_ENABLE_CONSUMER_FAIL;
3965 goto error;
3966 }
3967
3968 tmp_out = usess->tmp_consumer;
3969 if (tmp_out == NULL) {
3970 /* No temp. consumer output exists. Using the current one. */
3971 DBG3("No temporary consumer. Using default");
3972 ret = LTTCOMM_OK;
3973 goto error;
3974 }
3975
3976 switch (tmp_out->type) {
3977 case CONSUMER_DST_LOCAL:
3978 DBG2("Consumer output is local. Creating directory(ies)");
3979
3980 /* Create directory(ies) */
3981 ret = run_as_mkdir_recursive(tmp_out->dst.trace_path,
3982 S_IRWXU | S_IRWXG, session->uid, session->gid);
3983 if (ret < 0) {
3984 if (ret != -EEXIST) {
3985 ERR("Trace directory creation error");
3986 ret = LTTCOMM_FATAL;
3987 goto error;
3988 }
3989 }
3990 break;
3991 case CONSUMER_DST_NET:
3992 DBG2("Consumer output is network. Validating URIs");
3993 /* Validate if we have both control and data path set. */
3994 if (!tmp_out->dst.net.control_isset) {
3995 ret = LTTCOMM_URI_CTRL_MISS;
3996 goto error;
3997 }
3998
3999 if (!tmp_out->dst.net.data_isset) {
4000 ret = LTTCOMM_URI_DATA_MISS;
4001 goto error;
4002 }
4003
4004 /* Check established network session state */
4005 if (session->net_handle == 0) {
4006 ret = LTTCOMM_ENABLE_CONSUMER_FAIL;
4007 DBG2("Session network handle is not set on enable-consumer");
4008 goto error;
4009 }
4010
4011 if (tmp_out->net_seq_index == -1) {
4012 ret = LTTCOMM_ENABLE_CONSUMER_FAIL;
4013 DBG2("Network index is not set on the consumer");
4014 goto error;
4015 }
4016
4017 /* Append default kernel trace dir to subdir */
4018 strncat(usess->consumer->subdir, DEFAULT_UST_TRACE_DIR,
4019 sizeof(usess->consumer->subdir));
4020
4021 break;
4022 }
4023
4024 /*
4025 * @session-lock
4026 * This is race free for now since the session lock is acquired before
4027 * ending up in this function. No other threads can access this kernel
4028 * session without this lock hence freeing the consumer output object
4029 * is valid.
4030 */
4031 consumer_destroy_output(usess->consumer);
4032 usess->consumer = tmp_out;
4033 usess->tmp_consumer = NULL;
4034
4035 break;
4036 }
4037
4038 /* Success at this point */
4039 ret = LTTCOMM_OK;
4040
4041 error:
4042 return ret;
4043 }
4044
4045 /*
4046 * Process the command requested by the lttng client within the command
4047 * context structure. This function make sure that the return structure (llm)
4048 * is set and ready for transmission before returning.
4049 *
4050 * Return any error encountered or 0 for success.
4051 *
4052 * "sock" is only used for special-case var. len data.
4053 */
4054 static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
4055 int *sock_error)
4056 {
4057 int ret = LTTCOMM_OK;
4058 int need_tracing_session = 1;
4059 int need_domain;
4060
4061 DBG("Processing client command %d", cmd_ctx->lsm->cmd_type);
4062
4063 *sock_error = 0;
4064
4065 switch (cmd_ctx->lsm->cmd_type) {
4066 case LTTNG_CREATE_SESSION:
4067 case LTTNG_CREATE_SESSION_URI:
4068 case LTTNG_DESTROY_SESSION:
4069 case LTTNG_LIST_SESSIONS:
4070 case LTTNG_LIST_DOMAINS:
4071 case LTTNG_START_TRACE:
4072 case LTTNG_STOP_TRACE:
4073 need_domain = 0;
4074 break;
4075 default:
4076 need_domain = 1;
4077 }
4078
4079 if (opt_no_kernel && need_domain
4080 && cmd_ctx->lsm->domain.type == LTTNG_DOMAIN_KERNEL) {
4081 if (!is_root) {
4082 ret = LTTCOMM_NEED_ROOT_SESSIOND;
4083 } else {
4084 ret = LTTCOMM_KERN_NA;
4085 }
4086 goto error;
4087 }
4088
4089 /*
4090 * Check for command that don't needs to allocate a returned payload. We do
4091 * this here so we don't have to make the call for no payload at each
4092 * command.
4093 */
4094 switch(cmd_ctx->lsm->cmd_type) {
4095 case LTTNG_LIST_SESSIONS:
4096 case LTTNG_LIST_TRACEPOINTS:
4097 case LTTNG_LIST_TRACEPOINT_FIELDS:
4098 case LTTNG_LIST_DOMAINS:
4099 case LTTNG_LIST_CHANNELS:
4100 case LTTNG_LIST_EVENTS:
4101 break;
4102 default:
4103 /* Setup lttng message with no payload */
4104 ret = setup_lttng_msg(cmd_ctx, 0);
4105 if (ret < 0) {
4106 /* This label does not try to unlock the session */
4107 goto init_setup_error;
4108 }
4109 }
4110
4111 /* Commands that DO NOT need a session. */
4112 switch (cmd_ctx->lsm->cmd_type) {
4113 case LTTNG_CREATE_SESSION:
4114 case LTTNG_CREATE_SESSION_URI:
4115 case LTTNG_CALIBRATE:
4116 case LTTNG_LIST_SESSIONS:
4117 case LTTNG_LIST_TRACEPOINTS:
4118 case LTTNG_LIST_TRACEPOINT_FIELDS:
4119 need_tracing_session = 0;
4120 break;
4121 default:
4122 DBG("Getting session %s by name", cmd_ctx->lsm->session.name);
4123 /*
4124 * We keep the session list lock across _all_ commands
4125 * for now, because the per-session lock does not
4126 * handle teardown properly.
4127 */
4128 session_lock_list();
4129 cmd_ctx->session = session_find_by_name(cmd_ctx->lsm->session.name);
4130 if (cmd_ctx->session == NULL) {
4131 if (cmd_ctx->lsm->session.name != NULL) {
4132 ret = LTTCOMM_SESS_NOT_FOUND;
4133 } else {
4134 /* If no session name specified */
4135 ret = LTTCOMM_SELECT_SESS;
4136 }
4137 goto error;
4138 } else {
4139 /* Acquire lock for the session */
4140 session_lock(cmd_ctx->session);
4141 }
4142 break;
4143 }
4144
4145 if (!need_domain) {
4146 goto skip_domain;
4147 }
4148 /*
4149 * Check domain type for specific "pre-action".
4150 */
4151 switch (cmd_ctx->lsm->domain.type) {
4152 case LTTNG_DOMAIN_KERNEL:
4153 if (!is_root) {
4154 ret = LTTCOMM_NEED_ROOT_SESSIOND;
4155 goto error;
4156 }
4157
4158 /* Kernel tracer check */
4159 if (kernel_tracer_fd == -1) {
4160 /* Basically, load kernel tracer modules */
4161 ret = init_kernel_tracer();
4162 if (ret != 0) {
4163 goto error;
4164 }
4165 }
4166
4167 /* Consumer is in an ERROR state. Report back to client */
4168 if (uatomic_read(&kernel_consumerd_state) == CONSUMER_ERROR) {
4169 ret = LTTCOMM_NO_KERNCONSUMERD;
4170 goto error;
4171 }
4172
4173 /* Need a session for kernel command */
4174 if (need_tracing_session) {
4175 if (cmd_ctx->session->kernel_session == NULL) {
4176 ret = create_kernel_session(cmd_ctx->session);
4177 if (ret < 0) {
4178 ret = LTTCOMM_KERN_SESS_FAIL;
4179 goto error;
4180 }
4181 }
4182
4183 /* Start the kernel consumer daemon */
4184 pthread_mutex_lock(&kconsumer_data.pid_mutex);
4185 if (kconsumer_data.pid == 0 &&
4186 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
4187 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
4188 ret = start_consumerd(&kconsumer_data);
4189 if (ret < 0) {
4190 ret = LTTCOMM_KERN_CONSUMER_FAIL;
4191 goto error;
4192 }
4193 uatomic_set(&kernel_consumerd_state, CONSUMER_STARTED);
4194
4195 /* Set consumer fd of the session */
4196 cmd_ctx->session->kernel_session->consumer_fd =
4197 kconsumer_data.cmd_sock;
4198 } else {
4199 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
4200 }
4201 }
4202
4203 break;
4204 case LTTNG_DOMAIN_UST:
4205 {
4206 /* Consumer is in an ERROR state. Report back to client */
4207 if (uatomic_read(&ust_consumerd_state) == CONSUMER_ERROR) {
4208 ret = LTTCOMM_NO_USTCONSUMERD;
4209 goto error;
4210 }
4211
4212 if (need_tracing_session) {
4213 if (cmd_ctx->session->ust_session == NULL) {
4214 ret = create_ust_session(cmd_ctx->session,
4215 &cmd_ctx->lsm->domain);
4216 if (ret != LTTCOMM_OK) {
4217 goto error;
4218 }
4219 }
4220
4221 /* Start the UST consumer daemons */
4222 /* 64-bit */
4223 pthread_mutex_lock(&ustconsumer64_data.pid_mutex);
4224 if (consumerd64_bin[0] != '\0' &&
4225 ustconsumer64_data.pid == 0 &&
4226 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
4227 pthread_mutex_unlock(&ustconsumer64_data.pid_mutex);
4228 ret = start_consumerd(&ustconsumer64_data);
4229 if (ret < 0) {
4230 ret = LTTCOMM_UST_CONSUMER64_FAIL;
4231 ust_consumerd64_fd = -EINVAL;
4232 goto error;
4233 }
4234
4235 ust_consumerd64_fd = ustconsumer64_data.cmd_sock;
4236 uatomic_set(&ust_consumerd_state, CONSUMER_STARTED);
4237 } else {
4238 pthread_mutex_unlock(&ustconsumer64_data.pid_mutex);
4239 }
4240 /* 32-bit */
4241 if (consumerd32_bin[0] != '\0' &&
4242 ustconsumer32_data.pid == 0 &&
4243 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
4244 pthread_mutex_unlock(&ustconsumer32_data.pid_mutex);
4245 ret = start_consumerd(&ustconsumer32_data);
4246 if (ret < 0) {
4247 ret = LTTCOMM_UST_CONSUMER32_FAIL;
4248 ust_consumerd32_fd = -EINVAL;
4249 goto error;
4250 }
4251
4252 ust_consumerd32_fd = ustconsumer32_data.cmd_sock;
4253 uatomic_set(&ust_consumerd_state, CONSUMER_STARTED);
4254 } else {
4255 pthread_mutex_unlock(&ustconsumer32_data.pid_mutex);
4256 }
4257 }
4258 break;
4259 }
4260 default:
4261 break;
4262 }
4263 skip_domain:
4264
4265 /* Validate consumer daemon state when start/stop trace command */
4266 if (cmd_ctx->lsm->cmd_type == LTTNG_START_TRACE ||
4267 cmd_ctx->lsm->cmd_type == LTTNG_STOP_TRACE) {
4268 switch (cmd_ctx->lsm->domain.type) {
4269 case LTTNG_DOMAIN_UST:
4270 if (uatomic_read(&ust_consumerd_state) != CONSUMER_STARTED) {
4271 ret = LTTCOMM_NO_USTCONSUMERD;
4272 goto error;
4273 }
4274 break;
4275 case LTTNG_DOMAIN_KERNEL:
4276 if (uatomic_read(&kernel_consumerd_state) != CONSUMER_STARTED) {
4277 ret = LTTCOMM_NO_KERNCONSUMERD;
4278 goto error;
4279 }
4280 break;
4281 }
4282 }
4283
4284 /*
4285 * Check that the UID or GID match that of the tracing session.
4286 * The root user can interact with all sessions.
4287 */
4288 if (need_tracing_session) {
4289 if (!session_access_ok(cmd_ctx->session,
4290 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
4291 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds))) {
4292 ret = LTTCOMM_EPERM;
4293 goto error;
4294 }
4295 }
4296
4297 /* Process by command type */
4298 switch (cmd_ctx->lsm->cmd_type) {
4299 case LTTNG_ADD_CONTEXT:
4300 {
4301 ret = cmd_add_context(cmd_ctx->session, cmd_ctx->lsm->domain.type,
4302 cmd_ctx->lsm->u.context.channel_name,
4303 cmd_ctx->lsm->u.context.event_name,
4304 &cmd_ctx->lsm->u.context.ctx);
4305 break;
4306 }
4307 case LTTNG_DISABLE_CHANNEL:
4308 {
4309 ret = cmd_disable_channel(cmd_ctx->session, cmd_ctx->lsm->domain.type,
4310 cmd_ctx->lsm->u.disable.channel_name);
4311 break;
4312 }
4313 case LTTNG_DISABLE_EVENT:
4314 {
4315 ret = cmd_disable_event(cmd_ctx->session, cmd_ctx->lsm->domain.type,
4316 cmd_ctx->lsm->u.disable.channel_name,
4317 cmd_ctx->lsm->u.disable.name);
4318 break;
4319 }
4320 case LTTNG_DISABLE_ALL_EVENT:
4321 {
4322 DBG("Disabling all events");
4323
4324 ret = cmd_disable_event_all(cmd_ctx->session, cmd_ctx->lsm->domain.type,
4325 cmd_ctx->lsm->u.disable.channel_name);
4326 break;
4327 }
4328 case LTTNG_DISABLE_CONSUMER:
4329 {
4330 ret = cmd_disable_consumer(cmd_ctx->lsm->domain.type, cmd_ctx->session);
4331 break;
4332 }
4333 case LTTNG_ENABLE_CHANNEL:
4334 {
4335 ret = cmd_enable_channel(cmd_ctx->session, cmd_ctx->lsm->domain.type,
4336 &cmd_ctx->lsm->u.channel.chan);
4337 break;
4338 }
4339 case LTTNG_ENABLE_CONSUMER:
4340 {
4341 ret = cmd_enable_consumer(cmd_ctx->lsm->domain.type, cmd_ctx->session);
4342 break;
4343 }
4344 case LTTNG_ENABLE_EVENT:
4345 {
4346 ret = cmd_enable_event(cmd_ctx->session, cmd_ctx->lsm->domain.type,
4347 cmd_ctx->lsm->u.enable.channel_name,
4348 &cmd_ctx->lsm->u.enable.event);
4349 break;
4350 }
4351 case LTTNG_ENABLE_ALL_EVENT:
4352 {
4353 DBG("Enabling all events");
4354
4355 ret = cmd_enable_event_all(cmd_ctx->session, cmd_ctx->lsm->domain.type,
4356 cmd_ctx->lsm->u.enable.channel_name,
4357 cmd_ctx->lsm->u.enable.event.type);
4358 break;
4359 }
4360 case LTTNG_LIST_TRACEPOINTS:
4361 {
4362 struct lttng_event *events;
4363 ssize_t nb_events;
4364
4365 nb_events = cmd_list_tracepoints(cmd_ctx->lsm->domain.type, &events);
4366 if (nb_events < 0) {
4367 ret = -nb_events;
4368 goto error;
4369 }
4370
4371 /*
4372 * Setup lttng message with payload size set to the event list size in
4373 * bytes and then copy list into the llm payload.
4374 */
4375 ret = setup_lttng_msg(cmd_ctx, sizeof(struct lttng_event) * nb_events);
4376 if (ret < 0) {
4377 free(events);
4378 goto setup_error;
4379 }
4380
4381 /* Copy event list into message payload */
4382 memcpy(cmd_ctx->llm->payload, events,
4383 sizeof(struct lttng_event) * nb_events);
4384
4385 free(events);
4386
4387 ret = LTTCOMM_OK;
4388 break;
4389 }
4390 case LTTNG_LIST_TRACEPOINT_FIELDS:
4391 {
4392 struct lttng_event_field *fields;
4393 ssize_t nb_fields;
4394
4395 nb_fields = cmd_list_tracepoint_fields(cmd_ctx->lsm->domain.type, &fields);
4396 if (nb_fields < 0) {
4397 ret = -nb_fields;
4398 goto error;
4399 }
4400
4401 /*
4402 * Setup lttng message with payload size set to the event list size in
4403 * bytes and then copy list into the llm payload.
4404 */
4405 ret = setup_lttng_msg(cmd_ctx, sizeof(struct lttng_event_field) * nb_fields);
4406 if (ret < 0) {
4407 free(fields);
4408 goto setup_error;
4409 }
4410
4411 /* Copy event list into message payload */
4412 memcpy(cmd_ctx->llm->payload, fields,
4413 sizeof(struct lttng_event_field) * nb_fields);
4414
4415 free(fields);
4416
4417 ret = LTTCOMM_OK;
4418 break;
4419 }
4420 case LTTNG_SET_CONSUMER_URI:
4421 {
4422 ret = cmd_set_consumer_uri(cmd_ctx->lsm->domain.type, cmd_ctx->session,
4423 &cmd_ctx->lsm->u.uri);
4424 break;
4425 }
4426 case LTTNG_START_TRACE:
4427 {
4428 ret = cmd_start_trace(cmd_ctx->session);
4429 break;
4430 }
4431 case LTTNG_STOP_TRACE:
4432 {
4433 ret = cmd_stop_trace(cmd_ctx->session);
4434 break;
4435 }
4436 case LTTNG_CREATE_SESSION:
4437 {
4438 ret = cmd_create_session(cmd_ctx->lsm->session.name,
4439 cmd_ctx->lsm->session.path, &cmd_ctx->creds);
4440 break;
4441 }
4442 case LTTNG_CREATE_SESSION_URI:
4443 {
4444 ret = cmd_create_session_uri(cmd_ctx->lsm->session.name,
4445 &cmd_ctx->lsm->u.create_uri.ctrl_uri,
4446 &cmd_ctx->lsm->u.create_uri.data_uri,
4447 cmd_ctx->lsm->u.create_uri.enable_consumer, &cmd_ctx->creds);
4448 break;
4449 }
4450 case LTTNG_DESTROY_SESSION:
4451 {
4452 ret = cmd_destroy_session(cmd_ctx->session,
4453 cmd_ctx->lsm->session.name);
4454 /*
4455 * Set session to NULL so we do not unlock it after
4456 * free.
4457 */
4458 cmd_ctx->session = NULL;
4459 break;
4460 }
4461 case LTTNG_LIST_DOMAINS:
4462 {
4463 ssize_t nb_dom;
4464 struct lttng_domain *domains;
4465
4466 nb_dom = cmd_list_domains(cmd_ctx->session, &domains);
4467 if (nb_dom < 0) {
4468 ret = -nb_dom;
4469 goto error;
4470 }
4471
4472 ret = setup_lttng_msg(cmd_ctx, nb_dom * sizeof(struct lttng_domain));
4473 if (ret < 0) {
4474 goto setup_error;
4475 }
4476
4477 /* Copy event list into message payload */
4478 memcpy(cmd_ctx->llm->payload, domains,
4479 nb_dom * sizeof(struct lttng_domain));
4480
4481 free(domains);
4482
4483 ret = LTTCOMM_OK;
4484 break;
4485 }
4486 case LTTNG_LIST_CHANNELS:
4487 {
4488 int nb_chan;
4489 struct lttng_channel *channels;
4490
4491 nb_chan = cmd_list_channels(cmd_ctx->lsm->domain.type,
4492 cmd_ctx->session, &channels);
4493 if (nb_chan < 0) {
4494 ret = -nb_chan;
4495 goto error;
4496 }
4497
4498 ret = setup_lttng_msg(cmd_ctx, nb_chan * sizeof(struct lttng_channel));
4499 if (ret < 0) {
4500 goto setup_error;
4501 }
4502
4503 /* Copy event list into message payload */
4504 memcpy(cmd_ctx->llm->payload, channels,
4505 nb_chan * sizeof(struct lttng_channel));
4506
4507 free(channels);
4508
4509 ret = LTTCOMM_OK;
4510 break;
4511 }
4512 case LTTNG_LIST_EVENTS:
4513 {
4514 ssize_t nb_event;
4515 struct lttng_event *events = NULL;
4516
4517 nb_event = cmd_list_events(cmd_ctx->lsm->domain.type, cmd_ctx->session,
4518 cmd_ctx->lsm->u.list.channel_name, &events);
4519 if (nb_event < 0) {
4520 ret = -nb_event;
4521 goto error;
4522 }
4523
4524 ret = setup_lttng_msg(cmd_ctx, nb_event * sizeof(struct lttng_event));
4525 if (ret < 0) {
4526 goto setup_error;
4527 }
4528
4529 /* Copy event list into message payload */
4530 memcpy(cmd_ctx->llm->payload, events,
4531 nb_event * sizeof(struct lttng_event));
4532
4533 free(events);
4534
4535 ret = LTTCOMM_OK;
4536 break;
4537 }
4538 case LTTNG_LIST_SESSIONS:
4539 {
4540 unsigned int nr_sessions;
4541
4542 session_lock_list();
4543 nr_sessions = lttng_sessions_count(
4544 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
4545 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds));
4546
4547 ret = setup_lttng_msg(cmd_ctx, sizeof(struct lttng_session) * nr_sessions);
4548 if (ret < 0) {
4549 session_unlock_list();
4550 goto setup_error;
4551 }
4552
4553 /* Filled the session array */
4554 list_lttng_sessions((struct lttng_session *)(cmd_ctx->llm->payload),
4555 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
4556 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds));
4557
4558 session_unlock_list();
4559
4560 ret = LTTCOMM_OK;
4561 break;
4562 }
4563 case LTTNG_CALIBRATE:
4564 {
4565 ret = cmd_calibrate(cmd_ctx->lsm->domain.type,
4566 &cmd_ctx->lsm->u.calibrate);
4567 break;
4568 }
4569 case LTTNG_REGISTER_CONSUMER:
4570 {
4571 ret = cmd_register_consumer(cmd_ctx->session, cmd_ctx->lsm->domain.type,
4572 cmd_ctx->lsm->u.reg.path);
4573 break;
4574 }
4575 case LTTNG_SET_FILTER:
4576 {
4577 struct lttng_filter_bytecode *bytecode;
4578
4579 if (cmd_ctx->lsm->u.filter.bytecode_len > 65336) {
4580 ret = LTTCOMM_FILTER_INVAL;
4581 goto error;
4582 }
4583 bytecode = zmalloc(cmd_ctx->lsm->u.filter.bytecode_len);
4584 if (!bytecode) {
4585 ret = LTTCOMM_FILTER_NOMEM;
4586 goto error;
4587 }
4588 /* Receive var. len. data */
4589 DBG("Receiving var len data from client ...");
4590 ret = lttcomm_recv_unix_sock(sock, bytecode,
4591 cmd_ctx->lsm->u.filter.bytecode_len);
4592 if (ret <= 0) {
4593 DBG("Nothing recv() from client var len data... continuing");
4594 *sock_error = 1;
4595 ret = LTTCOMM_FILTER_INVAL;
4596 goto error;
4597 }
4598
4599 if (bytecode->len + sizeof(*bytecode)
4600 != cmd_ctx->lsm->u.filter.bytecode_len) {
4601 free(bytecode);
4602 ret = LTTCOMM_FILTER_INVAL;
4603 goto error;
4604 }
4605
4606 ret = cmd_set_filter(cmd_ctx->session, cmd_ctx->lsm->domain.type,
4607 cmd_ctx->lsm->u.filter.channel_name,
4608 cmd_ctx->lsm->u.filter.event_name,
4609 bytecode);
4610 break;
4611 }
4612 default:
4613 ret = LTTCOMM_UND;
4614 break;
4615 }
4616
4617 error:
4618 if (cmd_ctx->llm == NULL) {
4619 DBG("Missing llm structure. Allocating one.");
4620 if (setup_lttng_msg(cmd_ctx, 0) < 0) {
4621 goto setup_error;
4622 }
4623 }
4624 /* Set return code */
4625 cmd_ctx->llm->ret_code = ret;
4626 setup_error:
4627 if (cmd_ctx->session) {
4628 session_unlock(cmd_ctx->session);
4629 }
4630 if (need_tracing_session) {
4631 session_unlock_list();
4632 }
4633 init_setup_error:
4634 return ret;
4635 }
4636
4637 /*
4638 * Thread managing health check socket.
4639 */
4640 static void *thread_manage_health(void *data)
4641 {
4642 int sock = -1, new_sock, ret, i, pollfd;
4643 uint32_t revents, nb_fd;
4644 struct lttng_poll_event events;
4645 struct lttcomm_health_msg msg;
4646 struct lttcomm_health_data reply;
4647
4648 DBG("[thread] Manage health check started");
4649
4650 rcu_register_thread();
4651
4652 /* Create unix socket */
4653 sock = lttcomm_create_unix_sock(health_unix_sock_path);
4654 if (sock < 0) {
4655 ERR("Unable to create health check Unix socket");
4656 ret = -1;
4657 goto error;
4658 }
4659
4660 ret = lttcomm_listen_unix_sock(sock);
4661 if (ret < 0) {
4662 goto error;
4663 }
4664
4665 /*
4666 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
4667 * more will be added to this poll set.
4668 */
4669 ret = create_thread_poll_set(&events, 2);
4670 if (ret < 0) {
4671 goto error;
4672 }
4673
4674 /* Add the application registration socket */
4675 ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLPRI);
4676 if (ret < 0) {
4677 goto error;
4678 }
4679
4680 while (1) {
4681 DBG("Health check ready");
4682
4683 nb_fd = LTTNG_POLL_GETNB(&events);
4684
4685 /* Inifinite blocking call, waiting for transmission */
4686 restart:
4687 ret = lttng_poll_wait(&events, -1);
4688 if (ret < 0) {
4689 /*
4690 * Restart interrupted system call.
4691 */
4692 if (errno == EINTR) {
4693 goto restart;
4694 }
4695 goto error;
4696 }
4697
4698 for (i = 0; i < nb_fd; i++) {
4699 /* Fetch once the poll data */
4700 revents = LTTNG_POLL_GETEV(&events, i);
4701 pollfd = LTTNG_POLL_GETFD(&events, i);
4702
4703 /* Thread quit pipe has been closed. Killing thread. */
4704 ret = check_thread_quit_pipe(pollfd, revents);
4705 if (ret) {
4706 goto error;
4707 }
4708
4709 /* Event on the registration socket */
4710 if (pollfd == sock) {
4711 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
4712 ERR("Health socket poll error");
4713 goto error;
4714 }
4715 }
4716 }
4717
4718 new_sock = lttcomm_accept_unix_sock(sock);
4719 if (new_sock < 0) {
4720 goto error;
4721 }
4722
4723 DBG("Receiving data from client for health...");
4724 ret = lttcomm_recv_unix_sock(new_sock, (void *)&msg, sizeof(msg));
4725 if (ret <= 0) {
4726 DBG("Nothing recv() from client... continuing");
4727 ret = close(new_sock);
4728 if (ret) {
4729 PERROR("close");
4730 }
4731 new_sock = -1;
4732 continue;
4733 }
4734
4735 rcu_thread_online();
4736
4737 switch (msg.component) {
4738 case LTTNG_HEALTH_CMD:
4739 reply.ret_code = health_check_state(&health_thread_cmd);
4740 break;
4741 case LTTNG_HEALTH_APP_REG:
4742 reply.ret_code = health_check_state(&health_thread_app_reg);
4743 break;
4744 case LTTNG_HEALTH_KERNEL:
4745 reply.ret_code = health_check_state(&health_thread_kernel);
4746 break;
4747 case LTTNG_HEALTH_CONSUMER:
4748 reply.ret_code = check_consumer_health();
4749 break;
4750 case LTTNG_HEALTH_ALL:
4751 ret = check_consumer_health();
4752
4753 reply.ret_code =
4754 health_check_state(&health_thread_app_reg) &
4755 health_check_state(&health_thread_cmd) &
4756 health_check_state(&health_thread_kernel) &
4757 ret;
4758 break;
4759 default:
4760 reply.ret_code = LTTCOMM_UND;
4761 break;
4762 }
4763
4764 /*
4765 * Flip ret value since 0 is a success and 1 indicates a bad health for
4766 * the client where in the sessiond it is the opposite. Again, this is
4767 * just to make things easier for us poor developer which enjoy a lot
4768 * lazyness.
4769 */
4770 if (reply.ret_code == 0 || reply.ret_code == 1) {
4771 reply.ret_code = !reply.ret_code;
4772 }
4773
4774 DBG2("Health check return value %d", reply.ret_code);
4775
4776 ret = send_unix_sock(new_sock, (void *) &reply, sizeof(reply));
4777 if (ret < 0) {
4778 ERR("Failed to send health data back to client");
4779 }
4780
4781 /* End of transmission */
4782 ret = close(new_sock);
4783 if (ret) {
4784 PERROR("close");
4785 }
4786 new_sock = -1;
4787 }
4788
4789 error:
4790 DBG("Health check thread dying");
4791 unlink(health_unix_sock_path);
4792 if (sock >= 0) {
4793 ret = close(sock);
4794 if (ret) {
4795 PERROR("close");
4796 }
4797 }
4798 if (new_sock >= 0) {
4799 ret = close(new_sock);
4800 if (ret) {
4801 PERROR("close");
4802 }
4803 }
4804
4805 lttng_poll_clean(&events);
4806
4807 rcu_unregister_thread();
4808 return NULL;
4809 }
4810
4811 /*
4812 * This thread manage all clients request using the unix client socket for
4813 * communication.
4814 */
4815 static void *thread_manage_clients(void *data)
4816 {
4817 int sock = -1, ret, i, pollfd;
4818 int sock_error;
4819 uint32_t revents, nb_fd;
4820 struct command_ctx *cmd_ctx = NULL;
4821 struct lttng_poll_event events;
4822
4823 DBG("[thread] Manage client started");
4824
4825 rcu_register_thread();
4826
4827 health_code_update(&health_thread_cmd);
4828
4829 ret = lttcomm_listen_unix_sock(client_sock);
4830 if (ret < 0) {
4831 goto error;
4832 }
4833
4834 /*
4835 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
4836 * more will be added to this poll set.
4837 */
4838 ret = create_thread_poll_set(&events, 2);
4839 if (ret < 0) {
4840 goto error;
4841 }
4842
4843 /* Add the application registration socket */
4844 ret = lttng_poll_add(&events, client_sock, LPOLLIN | LPOLLPRI);
4845 if (ret < 0) {
4846 goto error;
4847 }
4848
4849 /*
4850 * Notify parent pid that we are ready to accept command for client side.
4851 */
4852 if (opt_sig_parent) {
4853 kill(ppid, SIGUSR1);
4854 }
4855
4856 health_code_update(&health_thread_cmd);
4857
4858 while (1) {
4859 DBG("Accepting client command ...");
4860
4861 nb_fd = LTTNG_POLL_GETNB(&events);
4862
4863 /* Inifinite blocking call, waiting for transmission */
4864 restart:
4865 health_poll_update(&health_thread_cmd);
4866 ret = lttng_poll_wait(&events, -1);
4867 health_poll_update(&health_thread_cmd);
4868 if (ret < 0) {
4869 /*
4870 * Restart interrupted system call.
4871 */
4872 if (errno == EINTR) {
4873 goto restart;
4874 }
4875 goto error;
4876 }
4877
4878 for (i = 0; i < nb_fd; i++) {
4879 /* Fetch once the poll data */
4880 revents = LTTNG_POLL_GETEV(&events, i);
4881 pollfd = LTTNG_POLL_GETFD(&events, i);
4882
4883 health_code_update(&health_thread_cmd);
4884
4885 /* Thread quit pipe has been closed. Killing thread. */
4886 ret = check_thread_quit_pipe(pollfd, revents);
4887 if (ret) {
4888 goto error;
4889 }
4890
4891 /* Event on the registration socket */
4892 if (pollfd == client_sock) {
4893 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
4894 ERR("Client socket poll error");
4895 goto error;
4896 }
4897 }
4898 }
4899
4900 DBG("Wait for client response");
4901
4902 health_code_update(&health_thread_cmd);
4903
4904 sock = lttcomm_accept_unix_sock(client_sock);
4905 if (sock < 0) {
4906 goto error;
4907 }
4908
4909 /* Set socket option for credentials retrieval */
4910 ret = lttcomm_setsockopt_creds_unix_sock(sock);
4911 if (ret < 0) {
4912 goto error;
4913 }
4914
4915 /* Allocate context command to process the client request */
4916 cmd_ctx = zmalloc(sizeof(struct command_ctx));
4917 if (cmd_ctx == NULL) {
4918 PERROR("zmalloc cmd_ctx");
4919 goto error;
4920 }
4921
4922 /* Allocate data buffer for reception */
4923 cmd_ctx->lsm = zmalloc(sizeof(struct lttcomm_session_msg));
4924 if (cmd_ctx->lsm == NULL) {
4925 PERROR("zmalloc cmd_ctx->lsm");
4926 goto error;
4927 }
4928
4929 cmd_ctx->llm = NULL;
4930 cmd_ctx->session = NULL;
4931
4932 health_code_update(&health_thread_cmd);
4933
4934 /*
4935 * Data is received from the lttng client. The struct
4936 * lttcomm_session_msg (lsm) contains the command and data request of
4937 * the client.
4938 */
4939 DBG("Receiving data from client ...");
4940 ret = lttcomm_recv_creds_unix_sock(sock, cmd_ctx->lsm,
4941 sizeof(struct lttcomm_session_msg), &cmd_ctx->creds);
4942 if (ret <= 0) {
4943 DBG("Nothing recv() from client... continuing");
4944 ret = close(sock);
4945 if (ret) {
4946 PERROR("close");
4947 }
4948 sock = -1;
4949 clean_command_ctx(&cmd_ctx);
4950 continue;
4951 }
4952
4953 health_code_update(&health_thread_cmd);
4954
4955 // TODO: Validate cmd_ctx including sanity check for
4956 // security purpose.
4957
4958 rcu_thread_online();
4959 /*
4960 * This function dispatch the work to the kernel or userspace tracer
4961 * libs and fill the lttcomm_lttng_msg data structure of all the needed
4962 * informations for the client. The command context struct contains
4963 * everything this function may needs.
4964 */
4965 ret = process_client_msg(cmd_ctx, sock, &sock_error);
4966 rcu_thread_offline();
4967 if (ret < 0) {
4968 if (sock_error) {
4969 ret = close(sock);
4970 if (ret) {
4971 PERROR("close");
4972 }
4973 sock = -1;
4974 }
4975 /*
4976 * TODO: Inform client somehow of the fatal error. At
4977 * this point, ret < 0 means that a zmalloc failed
4978 * (ENOMEM). Error detected but still accept
4979 * command, unless a socket error has been
4980 * detected.
4981 */
4982 clean_command_ctx(&cmd_ctx);
4983 continue;
4984 }
4985
4986 health_code_update(&health_thread_cmd);
4987
4988 DBG("Sending response (size: %d, retcode: %s)",
4989 cmd_ctx->lttng_msg_size,
4990 lttng_strerror(-cmd_ctx->llm->ret_code));
4991 ret = send_unix_sock(sock, cmd_ctx->llm, cmd_ctx->lttng_msg_size);
4992 if (ret < 0) {
4993 ERR("Failed to send data back to client");
4994 }
4995
4996 /* End of transmission */
4997 ret = close(sock);
4998 if (ret) {
4999 PERROR("close");
5000 }
5001 sock = -1;
5002
5003 clean_command_ctx(&cmd_ctx);
5004
5005 health_code_update(&health_thread_cmd);
5006 }
5007
5008 error:
5009 health_reset(&health_thread_cmd);
5010
5011 DBG("Client thread dying");
5012 unlink(client_unix_sock_path);
5013 if (client_sock >= 0) {
5014 ret = close(client_sock);
5015 if (ret) {
5016 PERROR("close");
5017 }
5018 }
5019 if (sock >= 0) {
5020 ret = close(sock);
5021 if (ret) {
5022 PERROR("close");
5023 }
5024 }
5025
5026 lttng_poll_clean(&events);
5027 clean_command_ctx(&cmd_ctx);
5028
5029 rcu_unregister_thread();
5030 return NULL;
5031 }
5032
5033
5034 /*
5035 * usage function on stderr
5036 */
5037 static void usage(void)
5038 {
5039 fprintf(stderr, "Usage: %s OPTIONS\n\nOptions:\n", progname);
5040 fprintf(stderr, " -h, --help Display this usage.\n");
5041 fprintf(stderr, " -c, --client-sock PATH Specify path for the client unix socket\n");
5042 fprintf(stderr, " -a, --apps-sock PATH Specify path for apps unix socket\n");
5043 fprintf(stderr, " --kconsumerd-err-sock PATH Specify path for the kernel consumer error socket\n");
5044 fprintf(stderr, " --kconsumerd-cmd-sock PATH Specify path for the kernel consumer command socket\n");
5045 fprintf(stderr, " --ustconsumerd32-err-sock PATH Specify path for the 32-bit UST consumer error socket\n");
5046 fprintf(stderr, " --ustconsumerd64-err-sock PATH Specify path for the 64-bit UST consumer error socket\n");
5047 fprintf(stderr, " --ustconsumerd32-cmd-sock PATH Specify path for the 32-bit UST consumer command socket\n");
5048 fprintf(stderr, " --ustconsumerd64-cmd-sock PATH Specify path for the 64-bit UST consumer command socket\n");
5049 fprintf(stderr, " --consumerd32-path PATH Specify path for the 32-bit UST consumer daemon binary\n");
5050 fprintf(stderr, " --consumerd32-libdir PATH Specify path for the 32-bit UST consumer daemon libraries\n");
5051 fprintf(stderr, " --consumerd64-path PATH Specify path for the 64-bit UST consumer daemon binary\n");
5052 fprintf(stderr, " --consumerd64-libdir PATH Specify path for the 64-bit UST consumer daemon libraries\n");
5053 fprintf(stderr, " -d, --daemonize Start as a daemon.\n");
5054 fprintf(stderr, " -g, --group NAME Specify the tracing group name. (default: tracing)\n");
5055 fprintf(stderr, " -V, --version Show version number.\n");
5056 fprintf(stderr, " -S, --sig-parent Send SIGCHLD to parent pid to notify readiness.\n");
5057 fprintf(stderr, " -q, --quiet No output at all.\n");
5058 fprintf(stderr, " -v, --verbose Verbose mode. Activate DBG() macro.\n");
5059 fprintf(stderr, " --verbose-consumer Verbose mode for consumer. Activate DBG() macro.\n");
5060 fprintf(stderr, " --no-kernel Disable kernel tracer\n");
5061 }
5062
5063 /*
5064 * daemon argument parsing
5065 */
5066 static int parse_args(int argc, char **argv)
5067 {
5068 int c;
5069
5070 static struct option long_options[] = {
5071 { "client-sock", 1, 0, 'c' },
5072 { "apps-sock", 1, 0, 'a' },
5073 { "kconsumerd-cmd-sock", 1, 0, 'C' },
5074 { "kconsumerd-err-sock", 1, 0, 'E' },
5075 { "ustconsumerd32-cmd-sock", 1, 0, 'G' },
5076 { "ustconsumerd32-err-sock", 1, 0, 'H' },
5077 { "ustconsumerd64-cmd-sock", 1, 0, 'D' },
5078 { "ustconsumerd64-err-sock", 1, 0, 'F' },
5079 { "consumerd32-path", 1, 0, 'u' },
5080 { "consumerd32-libdir", 1, 0, 'U' },
5081 { "consumerd64-path", 1, 0, 't' },
5082 { "consumerd64-libdir", 1, 0, 'T' },
5083 { "daemonize", 0, 0, 'd' },
5084 { "sig-parent", 0, 0, 'S' },
5085 { "help", 0, 0, 'h' },
5086 { "group", 1, 0, 'g' },
5087 { "version", 0, 0, 'V' },
5088 { "quiet", 0, 0, 'q' },
5089 { "verbose", 0, 0, 'v' },
5090 { "verbose-consumer", 0, 0, 'Z' },
5091 { "no-kernel", 0, 0, 'N' },
5092 { NULL, 0, 0, 0 }
5093 };
5094
5095 while (1) {
5096 int option_index = 0;
5097 c = getopt_long(argc, argv, "dhqvVSN" "a:c:g:s:C:E:D:F:Z:u:t",
5098 long_options, &option_index);
5099 if (c == -1) {
5100 break;
5101 }
5102
5103 switch (c) {
5104 case 0:
5105 fprintf(stderr, "option %s", long_options[option_index].name);
5106 if (optarg) {
5107 fprintf(stderr, " with arg %s\n", optarg);
5108 }
5109 break;
5110 case 'c':
5111 snprintf(client_unix_sock_path, PATH_MAX, "%s", optarg);
5112 break;
5113 case 'a':
5114 snprintf(apps_unix_sock_path, PATH_MAX, "%s", optarg);
5115 break;
5116 case 'd':
5117 opt_daemon = 1;
5118 break;
5119 case 'g':
5120 opt_tracing_group = optarg;
5121 break;
5122 case 'h':
5123 usage();
5124 exit(EXIT_FAILURE);
5125 case 'V':
5126 fprintf(stdout, "%s\n", VERSION);
5127 exit(EXIT_SUCCESS);
5128 case 'S':
5129 opt_sig_parent = 1;
5130 break;
5131 case 'E':
5132 snprintf(kconsumer_data.err_unix_sock_path, PATH_MAX, "%s", optarg);
5133 break;
5134 case 'C':
5135 snprintf(kconsumer_data.cmd_unix_sock_path, PATH_MAX, "%s", optarg);
5136 break;
5137 case 'F':
5138 snprintf(ustconsumer64_data.err_unix_sock_path, PATH_MAX, "%s", optarg);
5139 break;
5140 case 'D':
5141 snprintf(ustconsumer64_data.cmd_unix_sock_path, PATH_MAX, "%s", optarg);
5142 break;
5143 case 'H':
5144 snprintf(ustconsumer32_data.err_unix_sock_path, PATH_MAX, "%s", optarg);
5145 break;
5146 case 'G':
5147 snprintf(ustconsumer32_data.cmd_unix_sock_path, PATH_MAX, "%s", optarg);
5148 break;
5149 case 'N':
5150 opt_no_kernel = 1;
5151 break;
5152 case 'q':
5153 lttng_opt_quiet = 1;
5154 break;
5155 case 'v':
5156 /* Verbose level can increase using multiple -v */
5157 lttng_opt_verbose += 1;
5158 break;
5159 case 'Z':
5160 opt_verbose_consumer += 1;
5161 break;
5162 case 'u':
5163 consumerd32_bin= optarg;
5164 break;
5165 case 'U':
5166 consumerd32_libdir = optarg;
5167 break;
5168 case 't':
5169 consumerd64_bin = optarg;
5170 break;
5171 case 'T':
5172 consumerd64_libdir = optarg;
5173 break;
5174 default:
5175 /* Unknown option or other error.
5176 * Error is printed by getopt, just return */
5177 return -1;
5178 }
5179 }
5180
5181 return 0;
5182 }
5183
5184 /*
5185 * Creates the two needed socket by the daemon.
5186 * apps_sock - The communication socket for all UST apps.
5187 * client_sock - The communication of the cli tool (lttng).
5188 */
5189 static int init_daemon_socket(void)
5190 {
5191 int ret = 0;
5192 mode_t old_umask;
5193
5194 old_umask = umask(0);
5195
5196 /* Create client tool unix socket */
5197 client_sock = lttcomm_create_unix_sock(client_unix_sock_path);
5198 if (client_sock < 0) {
5199 ERR("Create unix sock failed: %s", client_unix_sock_path);
5200 ret = -1;
5201 goto end;
5202 }
5203
5204 /* File permission MUST be 660 */
5205 ret = chmod(client_unix_sock_path, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
5206 if (ret < 0) {
5207 ERR("Set file permissions failed: %s", client_unix_sock_path);
5208 PERROR("chmod");
5209 goto end;
5210 }
5211
5212 /* Create the application unix socket */
5213 apps_sock = lttcomm_create_unix_sock(apps_unix_sock_path);
5214 if (apps_sock < 0) {
5215 ERR("Create unix sock failed: %s", apps_unix_sock_path);
5216 ret = -1;
5217 goto end;
5218 }
5219
5220 /* File permission MUST be 666 */
5221 ret = chmod(apps_unix_sock_path,
5222 S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH);
5223 if (ret < 0) {
5224 ERR("Set file permissions failed: %s", apps_unix_sock_path);
5225 PERROR("chmod");
5226 goto end;
5227 }
5228
5229 end:
5230 umask(old_umask);
5231 return ret;
5232 }
5233
5234 /*
5235 * Check if the global socket is available, and if a daemon is answering at the
5236 * other side. If yes, error is returned.
5237 */
5238 static int check_existing_daemon(void)
5239 {
5240 /* Is there anybody out there ? */
5241 if (lttng_session_daemon_alive()) {
5242 return -EEXIST;
5243 }
5244
5245 return 0;
5246 }
5247
5248 /*
5249 * Set the tracing group gid onto the client socket.
5250 *
5251 * Race window between mkdir and chown is OK because we are going from more
5252 * permissive (root.root) to less permissive (root.tracing).
5253 */
5254 static int set_permissions(char *rundir)
5255 {
5256 int ret;
5257 gid_t gid;
5258
5259 ret = allowed_group();
5260 if (ret < 0) {
5261 WARN("No tracing group detected");
5262 ret = 0;
5263 goto end;
5264 }
5265
5266 gid = ret;
5267
5268 /* Set lttng run dir */
5269 ret = chown(rundir, 0, gid);
5270 if (ret < 0) {
5271 ERR("Unable to set group on %s", rundir);
5272 PERROR("chown");
5273 }
5274
5275 /* Ensure tracing group can search the run dir */
5276 ret = chmod(rundir, S_IRWXU | S_IXGRP | S_IXOTH);
5277 if (ret < 0) {
5278 ERR("Unable to set permissions on %s", rundir);
5279 PERROR("chmod");
5280 }
5281
5282 /* lttng client socket path */
5283 ret = chown(client_unix_sock_path, 0, gid);
5284 if (ret < 0) {
5285 ERR("Unable to set group on %s", client_unix_sock_path);
5286 PERROR("chown");
5287 }
5288
5289 /* kconsumer error socket path */
5290 ret = chown(kconsumer_data.err_unix_sock_path, 0, gid);
5291 if (ret < 0) {
5292 ERR("Unable to set group on %s", kconsumer_data.err_unix_sock_path);
5293 PERROR("chown");
5294 }
5295
5296 /* 64-bit ustconsumer error socket path */
5297 ret = chown(ustconsumer64_data.err_unix_sock_path, 0, gid);
5298 if (ret < 0) {
5299 ERR("Unable to set group on %s", ustconsumer64_data.err_unix_sock_path);
5300 PERROR("chown");
5301 }
5302
5303 /* 32-bit ustconsumer compat32 error socket path */
5304 ret = chown(ustconsumer32_data.err_unix_sock_path, 0, gid);
5305 if (ret < 0) {
5306 ERR("Unable to set group on %s", ustconsumer32_data.err_unix_sock_path);
5307 PERROR("chown");
5308 }
5309
5310 DBG("All permissions are set");
5311
5312 end:
5313 return ret;
5314 }
5315
5316 /*
5317 * Create the lttng run directory needed for all global sockets and pipe.
5318 */
5319 static int create_lttng_rundir(const char *rundir)
5320 {
5321 int ret;
5322
5323 DBG3("Creating LTTng run directory: %s", rundir);
5324
5325 ret = mkdir(rundir, S_IRWXU);
5326 if (ret < 0) {
5327 if (errno != EEXIST) {
5328 ERR("Unable to create %s", rundir);
5329 goto error;
5330 } else {
5331 ret = 0;
5332 }
5333 }
5334
5335 error:
5336 return ret;
5337 }
5338
5339 /*
5340 * Setup sockets and directory needed by the kconsumerd communication with the
5341 * session daemon.
5342 */
5343 static int set_consumer_sockets(struct consumer_data *consumer_data,
5344 const char *rundir)
5345 {
5346 int ret;
5347 char path[PATH_MAX];
5348
5349 switch (consumer_data->type) {
5350 case LTTNG_CONSUMER_KERNEL:
5351 snprintf(path, PATH_MAX, DEFAULT_KCONSUMERD_PATH, rundir);
5352 break;
5353 case LTTNG_CONSUMER64_UST:
5354 snprintf(path, PATH_MAX, DEFAULT_USTCONSUMERD64_PATH, rundir);
5355 break;
5356 case LTTNG_CONSUMER32_UST:
5357 snprintf(path, PATH_MAX, DEFAULT_USTCONSUMERD32_PATH, rundir);
5358 break;
5359 default:
5360 ERR("Consumer type unknown");
5361 ret = -EINVAL;
5362 goto error;
5363 }
5364
5365 DBG2("Creating consumer directory: %s", path);
5366
5367 ret = mkdir(path, S_IRWXU);
5368 if (ret < 0) {
5369 if (errno != EEXIST) {
5370 PERROR("mkdir");
5371 ERR("Failed to create %s", path);
5372 goto error;
5373 }
5374 ret = -1;
5375 }
5376
5377 /* Create the kconsumerd error unix socket */
5378 consumer_data->err_sock =
5379 lttcomm_create_unix_sock(consumer_data->err_unix_sock_path);
5380 if (consumer_data->err_sock < 0) {
5381 ERR("Create unix sock failed: %s", consumer_data->err_unix_sock_path);
5382 ret = -1;
5383 goto error;
5384 }
5385
5386 /* File permission MUST be 660 */
5387 ret = chmod(consumer_data->err_unix_sock_path,
5388 S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
5389 if (ret < 0) {
5390 ERR("Set file permissions failed: %s", consumer_data->err_unix_sock_path);
5391 PERROR("chmod");
5392 goto error;
5393 }
5394
5395 error:
5396 return ret;
5397 }
5398
5399 /*
5400 * Signal handler for the daemon
5401 *
5402 * Simply stop all worker threads, leaving main() return gracefully after
5403 * joining all threads and calling cleanup().
5404 */
5405 static void sighandler(int sig)
5406 {
5407 switch (sig) {
5408 case SIGPIPE:
5409 DBG("SIGPIPE caught");
5410 return;
5411 case SIGINT:
5412 DBG("SIGINT caught");
5413 stop_threads();
5414 break;
5415 case SIGTERM:
5416 DBG("SIGTERM caught");
5417 stop_threads();
5418 break;
5419 default:
5420 break;
5421 }
5422 }
5423
5424 /*
5425 * Setup signal handler for :
5426 * SIGINT, SIGTERM, SIGPIPE
5427 */
5428 static int set_signal_handler(void)
5429 {
5430 int ret = 0;
5431 struct sigaction sa;
5432 sigset_t sigset;
5433
5434 if ((ret = sigemptyset(&sigset)) < 0) {
5435 PERROR("sigemptyset");
5436 return ret;
5437 }
5438
5439 sa.sa_handler = sighandler;
5440 sa.sa_mask = sigset;
5441 sa.sa_flags = 0;
5442 if ((ret = sigaction(SIGTERM, &sa, NULL)) < 0) {
5443 PERROR("sigaction");
5444 return ret;
5445 }
5446
5447 if ((ret = sigaction(SIGINT, &sa, NULL)) < 0) {
5448 PERROR("sigaction");
5449 return ret;
5450 }
5451
5452 if ((ret = sigaction(SIGPIPE, &sa, NULL)) < 0) {
5453 PERROR("sigaction");
5454 return ret;
5455 }
5456
5457 DBG("Signal handler set for SIGTERM, SIGPIPE and SIGINT");
5458
5459 return ret;
5460 }
5461
5462 /*
5463 * Set open files limit to unlimited. This daemon can open a large number of
5464 * file descriptors in order to consumer multiple kernel traces.
5465 */
5466 static void set_ulimit(void)
5467 {
5468 int ret;
5469 struct rlimit lim;
5470
5471 /* The kernel does not allowed an infinite limit for open files */
5472 lim.rlim_cur = 65535;
5473 lim.rlim_max = 65535;
5474
5475 ret = setrlimit(RLIMIT_NOFILE, &lim);
5476 if (ret < 0) {
5477 PERROR("failed to set open files limit");
5478 }
5479 }
5480
5481 /*
5482 * main
5483 */
5484 int main(int argc, char **argv)
5485 {
5486 int ret = 0;
5487 void *status;
5488 const char *home_path;
5489
5490 init_kernel_workarounds();
5491
5492 rcu_register_thread();
5493
5494 setup_consumerd_path();
5495
5496 /* Parse arguments */
5497 progname = argv[0];
5498 if ((ret = parse_args(argc, argv) < 0)) {
5499 goto error;
5500 }
5501
5502 /* Daemonize */
5503 if (opt_daemon) {
5504 int i;
5505
5506 /*
5507 * fork
5508 * child: setsid, close FD 0, 1, 2, chdir /
5509 * parent: exit (if fork is successful)
5510 */
5511 ret = daemon(0, 0);
5512 if (ret < 0) {
5513 PERROR("daemon");
5514 goto error;
5515 }
5516 /*
5517 * We are in the child. Make sure all other file
5518 * descriptors are closed, in case we are called with
5519 * more opened file descriptors than the standard ones.
5520 */
5521 for (i = 3; i < sysconf(_SC_OPEN_MAX); i++) {
5522 (void) close(i);
5523 }
5524 }
5525
5526 /* Create thread quit pipe */
5527 if ((ret = init_thread_quit_pipe()) < 0) {
5528 goto error;
5529 }
5530
5531 /* Check if daemon is UID = 0 */
5532 is_root = !getuid();
5533
5534 if (is_root) {
5535 rundir = strdup(DEFAULT_LTTNG_RUNDIR);
5536
5537 /* Create global run dir with root access */
5538 ret = create_lttng_rundir(rundir);
5539 if (ret < 0) {
5540 goto error;
5541 }
5542
5543 if (strlen(apps_unix_sock_path) == 0) {
5544 snprintf(apps_unix_sock_path, PATH_MAX,
5545 DEFAULT_GLOBAL_APPS_UNIX_SOCK);
5546 }
5547
5548 if (strlen(client_unix_sock_path) == 0) {
5549 snprintf(client_unix_sock_path, PATH_MAX,
5550 DEFAULT_GLOBAL_CLIENT_UNIX_SOCK);
5551 }
5552
5553 /* Set global SHM for ust */
5554 if (strlen(wait_shm_path) == 0) {
5555 snprintf(wait_shm_path, PATH_MAX,
5556 DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH);
5557 }
5558
5559 if (strlen(health_unix_sock_path) == 0) {
5560 snprintf(health_unix_sock_path, sizeof(health_unix_sock_path),
5561 DEFAULT_GLOBAL_HEALTH_UNIX_SOCK);
5562 }
5563
5564 /* Setup kernel consumerd path */
5565 snprintf(kconsumer_data.err_unix_sock_path, PATH_MAX,
5566 DEFAULT_KCONSUMERD_ERR_SOCK_PATH, rundir);
5567 snprintf(kconsumer_data.cmd_unix_sock_path, PATH_MAX,
5568 DEFAULT_KCONSUMERD_CMD_SOCK_PATH, rundir);
5569
5570 DBG2("Kernel consumer err path: %s",
5571 kconsumer_data.err_unix_sock_path);
5572 DBG2("Kernel consumer cmd path: %s",
5573 kconsumer_data.cmd_unix_sock_path);
5574 } else {
5575 home_path = get_home_dir();
5576 if (home_path == NULL) {
5577 /* TODO: Add --socket PATH option */
5578 ERR("Can't get HOME directory for sockets creation.");
5579 ret = -EPERM;
5580 goto error;
5581 }
5582
5583 /*
5584 * Create rundir from home path. This will create something like
5585 * $HOME/.lttng
5586 */
5587 ret = asprintf(&rundir, DEFAULT_LTTNG_HOME_RUNDIR, home_path);
5588 if (ret < 0) {
5589 ret = -ENOMEM;
5590 goto error;
5591 }
5592
5593 ret = create_lttng_rundir(rundir);
5594 if (ret < 0) {
5595 goto error;
5596 }
5597
5598 if (strlen(apps_unix_sock_path) == 0) {
5599 snprintf(apps_unix_sock_path, PATH_MAX,
5600 DEFAULT_HOME_APPS_UNIX_SOCK, home_path);
5601 }
5602
5603 /* Set the cli tool unix socket path */
5604 if (strlen(client_unix_sock_path) == 0) {
5605 snprintf(client_unix_sock_path, PATH_MAX,
5606 DEFAULT_HOME_CLIENT_UNIX_SOCK, home_path);
5607 }
5608
5609 /* Set global SHM for ust */
5610 if (strlen(wait_shm_path) == 0) {
5611 snprintf(wait_shm_path, PATH_MAX,
5612 DEFAULT_HOME_APPS_WAIT_SHM_PATH, geteuid());
5613 }
5614
5615 /* Set health check Unix path */
5616 if (strlen(health_unix_sock_path) == 0) {
5617 snprintf(health_unix_sock_path, sizeof(health_unix_sock_path),
5618 DEFAULT_HOME_HEALTH_UNIX_SOCK, home_path);
5619 }
5620 }
5621
5622 /* Set consumer initial state */
5623 kernel_consumerd_state = CONSUMER_STOPPED;
5624 ust_consumerd_state = CONSUMER_STOPPED;
5625
5626 DBG("Client socket path %s", client_unix_sock_path);
5627 DBG("Application socket path %s", apps_unix_sock_path);
5628 DBG("LTTng run directory path: %s", rundir);
5629
5630 /* 32 bits consumerd path setup */
5631 snprintf(ustconsumer32_data.err_unix_sock_path, PATH_MAX,
5632 DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH, rundir);
5633 snprintf(ustconsumer32_data.cmd_unix_sock_path, PATH_MAX,
5634 DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH, rundir);
5635
5636 DBG2("UST consumer 32 bits err path: %s",
5637 ustconsumer32_data.err_unix_sock_path);
5638 DBG2("UST consumer 32 bits cmd path: %s",
5639 ustconsumer32_data.cmd_unix_sock_path);
5640
5641 /* 64 bits consumerd path setup */
5642 snprintf(ustconsumer64_data.err_unix_sock_path, PATH_MAX,
5643 DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH, rundir);
5644 snprintf(ustconsumer64_data.cmd_unix_sock_path, PATH_MAX,
5645 DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH, rundir);
5646
5647 DBG2("UST consumer 64 bits err path: %s",
5648 ustconsumer64_data.err_unix_sock_path);
5649 DBG2("UST consumer 64 bits cmd path: %s",
5650 ustconsumer64_data.cmd_unix_sock_path);
5651
5652 /*
5653 * See if daemon already exist.
5654 */
5655 if ((ret = check_existing_daemon()) < 0) {
5656 ERR("Already running daemon.\n");
5657 /*
5658 * We do not goto exit because we must not cleanup()
5659 * because a daemon is already running.
5660 */
5661 goto error;
5662 }
5663
5664 /*
5665 * Init UST app hash table. Alloc hash table before this point since
5666 * cleanup() can get called after that point.
5667 */
5668 ust_app_ht_alloc();
5669
5670 /* After this point, we can safely call cleanup() with "goto exit" */
5671
5672 /*
5673 * These actions must be executed as root. We do that *after* setting up
5674 * the sockets path because we MUST make the check for another daemon using
5675 * those paths *before* trying to set the kernel consumer sockets and init
5676 * kernel tracer.
5677 */
5678 if (is_root) {
5679 ret = set_consumer_sockets(&kconsumer_data, rundir);
5680 if (ret < 0) {
5681 goto exit;
5682 }
5683
5684 /* Setup kernel tracer */
5685 if (!opt_no_kernel) {
5686 init_kernel_tracer();
5687 }
5688
5689 /* Set ulimit for open files */
5690 set_ulimit();
5691 }
5692 /* init lttng_fd tracking must be done after set_ulimit. */
5693 lttng_fd_init();
5694
5695 ret = set_consumer_sockets(&ustconsumer64_data, rundir);
5696 if (ret < 0) {
5697 goto exit;
5698 }
5699
5700 ret = set_consumer_sockets(&ustconsumer32_data, rundir);
5701 if (ret < 0) {
5702 goto exit;
5703 }
5704
5705 if ((ret = set_signal_handler()) < 0) {
5706 goto exit;
5707 }
5708
5709 /* Setup the needed unix socket */
5710 if ((ret = init_daemon_socket()) < 0) {
5711 goto exit;
5712 }
5713
5714 /* Set credentials to socket */
5715 if (is_root && ((ret = set_permissions(rundir)) < 0)) {
5716 goto exit;
5717 }
5718
5719 /* Get parent pid if -S, --sig-parent is specified. */
5720 if (opt_sig_parent) {
5721 ppid = getppid();
5722 }
5723
5724 /* Setup the kernel pipe for waking up the kernel thread */
5725 if ((ret = utils_create_pipe_cloexec(kernel_poll_pipe)) < 0) {
5726 goto exit;
5727 }
5728
5729 /* Setup the thread apps communication pipe. */
5730 if ((ret = utils_create_pipe_cloexec(apps_cmd_pipe)) < 0) {
5731 goto exit;
5732 }
5733
5734 /* Init UST command queue. */
5735 cds_wfq_init(&ust_cmd_queue.queue);
5736
5737 /*
5738 * Get session list pointer. This pointer MUST NOT be free(). This list is
5739 * statically declared in session.c
5740 */
5741 session_list_ptr = session_get_list();
5742
5743 /* Set up max poll set size */
5744 lttng_poll_set_max_size();
5745
5746 /*
5747 * Set network sequence index to 1 for streams to match a relayd socket on
5748 * the consumer side.
5749 */
5750 uatomic_set(&relayd_net_seq_idx, 1);
5751
5752 /* Init all health thread counters. */
5753 health_init(&health_thread_cmd);
5754 health_init(&health_thread_kernel);
5755 health_init(&health_thread_app_reg);
5756
5757 /*
5758 * Init health counters of the consumer thread. We do a quick hack here to
5759 * the state of the consumer health is fine even if the thread is not
5760 * started. This is simply to ease our life and has no cost what so ever.
5761 */
5762 health_init(&kconsumer_data.health);
5763 health_poll_update(&kconsumer_data.health);
5764 health_init(&ustconsumer32_data.health);
5765 health_poll_update(&ustconsumer32_data.health);
5766 health_init(&ustconsumer64_data.health);
5767 health_poll_update(&ustconsumer64_data.health);
5768
5769 /* Create thread to manage the client socket */
5770 ret = pthread_create(&health_thread, NULL,
5771 thread_manage_health, (void *) NULL);
5772 if (ret != 0) {
5773 PERROR("pthread_create health");
5774 goto exit_health;
5775 }
5776
5777 /* Create thread to manage the client socket */
5778 ret = pthread_create(&client_thread, NULL,
5779 thread_manage_clients, (void *) NULL);
5780 if (ret != 0) {
5781 PERROR("pthread_create clients");
5782 goto exit_client;
5783 }
5784
5785 /* Create thread to dispatch registration */
5786 ret = pthread_create(&dispatch_thread, NULL,
5787 thread_dispatch_ust_registration, (void *) NULL);
5788 if (ret != 0) {
5789 PERROR("pthread_create dispatch");
5790 goto exit_dispatch;
5791 }
5792
5793 /* Create thread to manage application registration. */
5794 ret = pthread_create(&reg_apps_thread, NULL,
5795 thread_registration_apps, (void *) NULL);
5796 if (ret != 0) {
5797 PERROR("pthread_create registration");
5798 goto exit_reg_apps;
5799 }
5800
5801 /* Create thread to manage application socket */
5802 ret = pthread_create(&apps_thread, NULL,
5803 thread_manage_apps, (void *) NULL);
5804 if (ret != 0) {
5805 PERROR("pthread_create apps");
5806 goto exit_apps;
5807 }
5808
5809 /* Create kernel thread to manage kernel event */
5810 ret = pthread_create(&kernel_thread, NULL,
5811 thread_manage_kernel, (void *) NULL);
5812 if (ret != 0) {
5813 PERROR("pthread_create kernel");
5814 goto exit_kernel;
5815 }
5816
5817 ret = pthread_join(kernel_thread, &status);
5818 if (ret != 0) {
5819 PERROR("pthread_join");
5820 goto error; /* join error, exit without cleanup */
5821 }
5822
5823 exit_kernel:
5824 ret = pthread_join(apps_thread, &status);
5825 if (ret != 0) {
5826 PERROR("pthread_join");
5827 goto error; /* join error, exit without cleanup */
5828 }
5829
5830 exit_apps:
5831 ret = pthread_join(reg_apps_thread, &status);
5832 if (ret != 0) {
5833 PERROR("pthread_join");
5834 goto error; /* join error, exit without cleanup */
5835 }
5836
5837 exit_reg_apps:
5838 ret = pthread_join(dispatch_thread, &status);
5839 if (ret != 0) {
5840 PERROR("pthread_join");
5841 goto error; /* join error, exit without cleanup */
5842 }
5843
5844 exit_dispatch:
5845 ret = pthread_join(client_thread, &status);
5846 if (ret != 0) {
5847 PERROR("pthread_join");
5848 goto error; /* join error, exit without cleanup */
5849 }
5850
5851 ret = join_consumer_thread(&kconsumer_data);
5852 if (ret != 0) {
5853 PERROR("join_consumer");
5854 goto error; /* join error, exit without cleanup */
5855 }
5856
5857 exit_client:
5858 exit_health:
5859 exit:
5860 /*
5861 * cleanup() is called when no other thread is running.
5862 */
5863 rcu_thread_online();
5864 cleanup();
5865 rcu_thread_offline();
5866 rcu_unregister_thread();
5867 if (!ret) {
5868 exit(EXIT_SUCCESS);
5869 }
5870 error:
5871 exit(EXIT_FAILURE);
5872 }
This page took 0.188427 seconds and 4 git commands to generate.