85b960ee3c23be267d4b5558e9c1dc661539583e
[lttng-tools.git] / src / bin / lttng-sessiond / main.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19 #define _GNU_SOURCE
20 #include <getopt.h>
21 #include <grp.h>
22 #include <limits.h>
23 #include <pthread.h>
24 #include <semaphore.h>
25 #include <signal.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <string.h>
29 #include <sys/mman.h>
30 #include <sys/mount.h>
31 #include <sys/resource.h>
32 #include <sys/socket.h>
33 #include <sys/stat.h>
34 #include <sys/types.h>
35 #include <sys/wait.h>
36 #include <urcu/uatomic.h>
37 #include <unistd.h>
38 #include <config.h>
39
40 #include <common/common.h>
41 #include <common/compat/poll.h>
42 #include <common/compat/socket.h>
43 #include <common/defaults.h>
44 #include <common/kernel-consumer/kernel-consumer.h>
45 #include <common/futex.h>
46 #include <common/relayd/relayd.h>
47 #include <common/utils.h>
48
49 #include "lttng-sessiond.h"
50 #include "channel.h"
51 #include "consumer.h"
52 #include "context.h"
53 #include "event.h"
54 #include "kernel.h"
55 #include "kernel-consumer.h"
56 #include "modprobe.h"
57 #include "shm.h"
58 #include "ust-ctl.h"
59 #include "ust-consumer.h"
60 #include "utils.h"
61 #include "fd-limit.h"
62 #include "filter.h"
63 #include "health.h"
64
65 #define CONSUMERD_FILE "lttng-consumerd"
66
67 /* Const values */
68 const char default_home_dir[] = DEFAULT_HOME_DIR;
69 const char default_tracing_group[] = DEFAULT_TRACING_GROUP;
70 const char default_ust_sock_dir[] = DEFAULT_UST_SOCK_DIR;
71 const char default_global_apps_pipe[] = DEFAULT_GLOBAL_APPS_PIPE;
72
73 const char *progname;
74 const char *opt_tracing_group;
75 static int opt_sig_parent;
76 static int opt_verbose_consumer;
77 static int opt_daemon;
78 static int opt_no_kernel;
79 static int is_root; /* Set to 1 if the daemon is running as root */
80 static pid_t ppid; /* Parent PID for --sig-parent option */
81 static char *rundir;
82
83 /* Consumer daemon specific control data */
84 static struct consumer_data kconsumer_data = {
85 .type = LTTNG_CONSUMER_KERNEL,
86 .err_unix_sock_path = DEFAULT_KCONSUMERD_ERR_SOCK_PATH,
87 .cmd_unix_sock_path = DEFAULT_KCONSUMERD_CMD_SOCK_PATH,
88 .err_sock = -1,
89 .cmd_sock = -1,
90 };
91 static struct consumer_data ustconsumer64_data = {
92 .type = LTTNG_CONSUMER64_UST,
93 .err_unix_sock_path = DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH,
94 .cmd_unix_sock_path = DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH,
95 .err_sock = -1,
96 .cmd_sock = -1,
97 };
98 static struct consumer_data ustconsumer32_data = {
99 .type = LTTNG_CONSUMER32_UST,
100 .err_unix_sock_path = DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH,
101 .cmd_unix_sock_path = DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH,
102 .err_sock = -1,
103 .cmd_sock = -1,
104 };
105
106 static int dispatch_thread_exit;
107
108 /* Global application Unix socket path */
109 static char apps_unix_sock_path[PATH_MAX];
110 /* Global client Unix socket path */
111 static char client_unix_sock_path[PATH_MAX];
112 /* global wait shm path for UST */
113 static char wait_shm_path[PATH_MAX];
114 /* Global health check unix path */
115 static char health_unix_sock_path[PATH_MAX];
116
117 /* Sockets and FDs */
118 static int client_sock = -1;
119 static int apps_sock = -1;
120 static int kernel_tracer_fd = -1;
121 static int kernel_poll_pipe[2] = { -1, -1 };
122
123 /*
124 * Quit pipe for all threads. This permits a single cancellation point
125 * for all threads when receiving an event on the pipe.
126 */
127 static int thread_quit_pipe[2] = { -1, -1 };
128
129 /*
130 * This pipe is used to inform the thread managing application communication
131 * that a command is queued and ready to be processed.
132 */
133 static int apps_cmd_pipe[2] = { -1, -1 };
134
135 /* Pthread, Mutexes and Semaphores */
136 static pthread_t apps_thread;
137 static pthread_t reg_apps_thread;
138 static pthread_t client_thread;
139 static pthread_t kernel_thread;
140 static pthread_t dispatch_thread;
141 static pthread_t health_thread;
142
143 /*
144 * UST registration command queue. This queue is tied with a futex and uses a N
145 * wakers / 1 waiter implemented and detailed in futex.c/.h
146 *
147 * The thread_manage_apps and thread_dispatch_ust_registration interact with
148 * this queue and the wait/wake scheme.
149 */
150 static struct ust_cmd_queue ust_cmd_queue;
151
152 /*
153 * Pointer initialized before thread creation.
154 *
155 * This points to the tracing session list containing the session count and a
156 * mutex lock. The lock MUST be taken if you iterate over the list. The lock
157 * MUST NOT be taken if you call a public function in session.c.
158 *
159 * The lock is nested inside the structure: session_list_ptr->lock. Please use
160 * session_lock_list and session_unlock_list for lock acquisition.
161 */
162 static struct ltt_session_list *session_list_ptr;
163
164 int ust_consumerd64_fd = -1;
165 int ust_consumerd32_fd = -1;
166
167 static const char *consumerd32_bin = CONFIG_CONSUMERD32_BIN;
168 static const char *consumerd64_bin = CONFIG_CONSUMERD64_BIN;
169 static const char *consumerd32_libdir = CONFIG_CONSUMERD32_LIBDIR;
170 static const char *consumerd64_libdir = CONFIG_CONSUMERD64_LIBDIR;
171
172 /*
173 * Consumer daemon state which is changed when spawning it, killing it or in
174 * case of a fatal error.
175 */
176 enum consumerd_state {
177 CONSUMER_STARTED = 1,
178 CONSUMER_STOPPED = 2,
179 CONSUMER_ERROR = 3,
180 };
181
182 /*
183 * This consumer daemon state is used to validate if a client command will be
184 * able to reach the consumer. If not, the client is informed. For instance,
185 * doing a "lttng start" when the consumer state is set to ERROR will return an
186 * error to the client.
187 *
188 * The following example shows a possible race condition of this scheme:
189 *
190 * consumer thread error happens
191 * client cmd arrives
192 * client cmd checks state -> still OK
193 * consumer thread exit, sets error
194 * client cmd try to talk to consumer
195 * ...
196 *
197 * However, since the consumer is a different daemon, we have no way of making
198 * sure the command will reach it safely even with this state flag. This is why
199 * we consider that up to the state validation during command processing, the
200 * command is safe. After that, we can not guarantee the correctness of the
201 * client request vis-a-vis the consumer.
202 */
203 static enum consumerd_state ust_consumerd_state;
204 static enum consumerd_state kernel_consumerd_state;
205
206 /*
207 * Used to keep a unique index for each relayd socket created where this value
208 * is associated with streams on the consumer so it can match the right relayd
209 * to send to.
210 *
211 * This value should be incremented atomically for safety purposes and future
212 * possible concurrent access.
213 */
214 static unsigned int relayd_net_seq_idx;
215
216 /* Used for the health monitoring of the session daemon. See health.h */
217 struct health_state health_thread_cmd;
218 struct health_state health_thread_app_reg;
219 struct health_state health_thread_kernel;
220
221 static
222 void setup_consumerd_path(void)
223 {
224 const char *bin, *libdir;
225
226 /*
227 * Allow INSTALL_BIN_PATH to be used as a target path for the
228 * native architecture size consumer if CONFIG_CONSUMER*_PATH
229 * has not been defined.
230 */
231 #if (CAA_BITS_PER_LONG == 32)
232 if (!consumerd32_bin[0]) {
233 consumerd32_bin = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
234 }
235 if (!consumerd32_libdir[0]) {
236 consumerd32_libdir = INSTALL_LIB_PATH;
237 }
238 #elif (CAA_BITS_PER_LONG == 64)
239 if (!consumerd64_bin[0]) {
240 consumerd64_bin = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
241 }
242 if (!consumerd64_libdir[0]) {
243 consumerd64_libdir = INSTALL_LIB_PATH;
244 }
245 #else
246 #error "Unknown bitness"
247 #endif
248
249 /*
250 * runtime env. var. overrides the build default.
251 */
252 bin = getenv("LTTNG_CONSUMERD32_BIN");
253 if (bin) {
254 consumerd32_bin = bin;
255 }
256 bin = getenv("LTTNG_CONSUMERD64_BIN");
257 if (bin) {
258 consumerd64_bin = bin;
259 }
260 libdir = getenv("LTTNG_CONSUMERD32_LIBDIR");
261 if (libdir) {
262 consumerd32_libdir = libdir;
263 }
264 libdir = getenv("LTTNG_CONSUMERD64_LIBDIR");
265 if (libdir) {
266 consumerd64_libdir = libdir;
267 }
268 }
269
270 /*
271 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
272 */
273 static int create_thread_poll_set(struct lttng_poll_event *events,
274 unsigned int size)
275 {
276 int ret;
277
278 if (events == NULL || size == 0) {
279 ret = -1;
280 goto error;
281 }
282
283 ret = lttng_poll_create(events, size, LTTNG_CLOEXEC);
284 if (ret < 0) {
285 goto error;
286 }
287
288 /* Add quit pipe */
289 ret = lttng_poll_add(events, thread_quit_pipe[0], LPOLLIN);
290 if (ret < 0) {
291 goto error;
292 }
293
294 return 0;
295
296 error:
297 return ret;
298 }
299
300 /*
301 * Check if the thread quit pipe was triggered.
302 *
303 * Return 1 if it was triggered else 0;
304 */
305 static int check_thread_quit_pipe(int fd, uint32_t events)
306 {
307 if (fd == thread_quit_pipe[0] && (events & LPOLLIN)) {
308 return 1;
309 }
310
311 return 0;
312 }
313
314 /*
315 * Return group ID of the tracing group or -1 if not found.
316 */
317 static gid_t allowed_group(void)
318 {
319 struct group *grp;
320
321 if (opt_tracing_group) {
322 grp = getgrnam(opt_tracing_group);
323 } else {
324 grp = getgrnam(default_tracing_group);
325 }
326 if (!grp) {
327 return -1;
328 } else {
329 return grp->gr_gid;
330 }
331 }
332
333 /*
334 * Init thread quit pipe.
335 *
336 * Return -1 on error or 0 if all pipes are created.
337 */
338 static int init_thread_quit_pipe(void)
339 {
340 int ret, i;
341
342 ret = pipe(thread_quit_pipe);
343 if (ret < 0) {
344 PERROR("thread quit pipe");
345 goto error;
346 }
347
348 for (i = 0; i < 2; i++) {
349 ret = fcntl(thread_quit_pipe[i], F_SETFD, FD_CLOEXEC);
350 if (ret < 0) {
351 PERROR("fcntl");
352 goto error;
353 }
354 }
355
356 error:
357 return ret;
358 }
359
360 /*
361 * Complete teardown of a kernel session. This free all data structure related
362 * to a kernel session and update counter.
363 */
364 static void teardown_kernel_session(struct ltt_session *session)
365 {
366 if (!session->kernel_session) {
367 DBG3("No kernel session when tearing down session");
368 return;
369 }
370
371 DBG("Tearing down kernel session");
372
373 /*
374 * If a custom kernel consumer was registered, close the socket before
375 * tearing down the complete kernel session structure
376 */
377 if (kconsumer_data.cmd_sock >= 0 &&
378 session->kernel_session->consumer_fd != kconsumer_data.cmd_sock) {
379 lttcomm_close_unix_sock(session->kernel_session->consumer_fd);
380 }
381
382 trace_kernel_destroy_session(session->kernel_session);
383 }
384
385 /*
386 * Complete teardown of all UST sessions. This will free everything on his path
387 * and destroy the core essence of all ust sessions :)
388 */
389 static void teardown_ust_session(struct ltt_session *session)
390 {
391 int ret;
392
393 if (!session->ust_session) {
394 DBG3("No UST session when tearing down session");
395 return;
396 }
397
398 DBG("Tearing down UST session(s)");
399
400 ret = ust_app_destroy_trace_all(session->ust_session);
401 if (ret) {
402 ERR("Error in ust_app_destroy_trace_all");
403 }
404
405 trace_ust_destroy_session(session->ust_session);
406 }
407
408 /*
409 * Stop all threads by closing the thread quit pipe.
410 */
411 static void stop_threads(void)
412 {
413 int ret;
414
415 /* Stopping all threads */
416 DBG("Terminating all threads");
417 ret = notify_thread_pipe(thread_quit_pipe[1]);
418 if (ret < 0) {
419 ERR("write error on thread quit pipe");
420 }
421
422 /* Dispatch thread */
423 dispatch_thread_exit = 1;
424 futex_nto1_wake(&ust_cmd_queue.futex);
425 }
426
427 /*
428 * Cleanup the daemon
429 */
430 static void cleanup(void)
431 {
432 int ret;
433 char *cmd;
434 struct ltt_session *sess, *stmp;
435
436 DBG("Cleaning up");
437
438 DBG("Removing %s directory", rundir);
439 ret = asprintf(&cmd, "rm -rf %s", rundir);
440 if (ret < 0) {
441 ERR("asprintf failed. Something is really wrong!");
442 }
443
444 /* Remove lttng run directory */
445 ret = system(cmd);
446 if (ret < 0) {
447 ERR("Unable to clean %s", rundir);
448 }
449 free(cmd);
450
451 DBG("Cleaning up all sessions");
452
453 /* Destroy session list mutex */
454 if (session_list_ptr != NULL) {
455 pthread_mutex_destroy(&session_list_ptr->lock);
456
457 /* Cleanup ALL session */
458 cds_list_for_each_entry_safe(sess, stmp,
459 &session_list_ptr->head, list) {
460 teardown_kernel_session(sess);
461 teardown_ust_session(sess);
462 free(sess);
463 }
464 }
465
466 DBG("Closing all UST sockets");
467 ust_app_clean_list();
468
469 pthread_mutex_destroy(&kconsumer_data.pid_mutex);
470
471 if (is_root && !opt_no_kernel) {
472 DBG2("Closing kernel fd");
473 if (kernel_tracer_fd >= 0) {
474 ret = close(kernel_tracer_fd);
475 if (ret) {
476 PERROR("close");
477 }
478 }
479 DBG("Unloading kernel modules");
480 modprobe_remove_lttng_all();
481 }
482 utils_close_pipe(kernel_poll_pipe);
483 utils_close_pipe(thread_quit_pipe);
484 utils_close_pipe(apps_cmd_pipe);
485
486 /* <fun> */
487 DBG("%c[%d;%dm*** assert failed :-) *** ==> %c[%dm%c[%d;%dm"
488 "Matthew, BEET driven development works!%c[%dm",
489 27, 1, 31, 27, 0, 27, 1, 33, 27, 0);
490 /* </fun> */
491 }
492
493 /*
494 * Send data on a unix socket using the liblttsessiondcomm API.
495 *
496 * Return lttcomm error code.
497 */
498 static int send_unix_sock(int sock, void *buf, size_t len)
499 {
500 /* Check valid length */
501 if (len <= 0) {
502 return -1;
503 }
504
505 return lttcomm_send_unix_sock(sock, buf, len);
506 }
507
508 /*
509 * Free memory of a command context structure.
510 */
511 static void clean_command_ctx(struct command_ctx **cmd_ctx)
512 {
513 DBG("Clean command context structure");
514 if (*cmd_ctx) {
515 if ((*cmd_ctx)->llm) {
516 free((*cmd_ctx)->llm);
517 }
518 if ((*cmd_ctx)->lsm) {
519 free((*cmd_ctx)->lsm);
520 }
521 free(*cmd_ctx);
522 *cmd_ctx = NULL;
523 }
524 }
525
526 /*
527 * Notify UST applications using the shm mmap futex.
528 */
529 static int notify_ust_apps(int active)
530 {
531 char *wait_shm_mmap;
532
533 DBG("Notifying applications of session daemon state: %d", active);
534
535 /* See shm.c for this call implying mmap, shm and futex calls */
536 wait_shm_mmap = shm_ust_get_mmap(wait_shm_path, is_root);
537 if (wait_shm_mmap == NULL) {
538 goto error;
539 }
540
541 /* Wake waiting process */
542 futex_wait_update((int32_t *) wait_shm_mmap, active);
543
544 /* Apps notified successfully */
545 return 0;
546
547 error:
548 return -1;
549 }
550
551 /*
552 * Setup the outgoing data buffer for the response (llm) by allocating the
553 * right amount of memory and copying the original information from the lsm
554 * structure.
555 *
556 * Return total size of the buffer pointed by buf.
557 */
558 static int setup_lttng_msg(struct command_ctx *cmd_ctx, size_t size)
559 {
560 int ret, buf_size;
561
562 buf_size = size;
563
564 cmd_ctx->llm = zmalloc(sizeof(struct lttcomm_lttng_msg) + buf_size);
565 if (cmd_ctx->llm == NULL) {
566 PERROR("zmalloc");
567 ret = -ENOMEM;
568 goto error;
569 }
570
571 /* Copy common data */
572 cmd_ctx->llm->cmd_type = cmd_ctx->lsm->cmd_type;
573 cmd_ctx->llm->pid = cmd_ctx->lsm->domain.attr.pid;
574
575 cmd_ctx->llm->data_size = size;
576 cmd_ctx->lttng_msg_size = sizeof(struct lttcomm_lttng_msg) + buf_size;
577
578 return buf_size;
579
580 error:
581 return ret;
582 }
583
584 /*
585 * Update the kernel poll set of all channel fd available over all tracing
586 * session. Add the wakeup pipe at the end of the set.
587 */
588 static int update_kernel_poll(struct lttng_poll_event *events)
589 {
590 int ret;
591 struct ltt_session *session;
592 struct ltt_kernel_channel *channel;
593
594 DBG("Updating kernel poll set");
595
596 session_lock_list();
597 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
598 session_lock(session);
599 if (session->kernel_session == NULL) {
600 session_unlock(session);
601 continue;
602 }
603
604 cds_list_for_each_entry(channel,
605 &session->kernel_session->channel_list.head, list) {
606 /* Add channel fd to the kernel poll set */
607 ret = lttng_poll_add(events, channel->fd, LPOLLIN | LPOLLRDNORM);
608 if (ret < 0) {
609 session_unlock(session);
610 goto error;
611 }
612 DBG("Channel fd %d added to kernel set", channel->fd);
613 }
614 session_unlock(session);
615 }
616 session_unlock_list();
617
618 return 0;
619
620 error:
621 session_unlock_list();
622 return -1;
623 }
624
625 /*
626 * Find the channel fd from 'fd' over all tracing session. When found, check
627 * for new channel stream and send those stream fds to the kernel consumer.
628 *
629 * Useful for CPU hotplug feature.
630 */
631 static int update_kernel_stream(struct consumer_data *consumer_data, int fd)
632 {
633 int ret = 0;
634 struct ltt_session *session;
635 struct ltt_kernel_channel *channel;
636
637 DBG("Updating kernel streams for channel fd %d", fd);
638
639 session_lock_list();
640 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
641 session_lock(session);
642 if (session->kernel_session == NULL) {
643 session_unlock(session);
644 continue;
645 }
646
647 /* This is not suppose to be -1 but this is an extra security check */
648 if (session->kernel_session->consumer_fd < 0) {
649 session->kernel_session->consumer_fd = consumer_data->cmd_sock;
650 }
651
652 cds_list_for_each_entry(channel,
653 &session->kernel_session->channel_list.head, list) {
654 if (channel->fd == fd) {
655 DBG("Channel found, updating kernel streams");
656 ret = kernel_open_channel_stream(channel);
657 if (ret < 0) {
658 goto error;
659 }
660
661 /*
662 * Have we already sent fds to the consumer? If yes, it means
663 * that tracing is started so it is safe to send our updated
664 * stream fds.
665 */
666 if (session->kernel_session->consumer_fds_sent == 1 &&
667 session->kernel_session->consumer != NULL) {
668 ret = kernel_consumer_send_channel_stream(
669 session->kernel_session->consumer_fd, channel,
670 session->kernel_session);
671 if (ret < 0) {
672 goto error;
673 }
674 }
675 goto error;
676 }
677 }
678 session_unlock(session);
679 }
680 session_unlock_list();
681 return ret;
682
683 error:
684 session_unlock(session);
685 session_unlock_list();
686 return ret;
687 }
688
689 /*
690 * For each tracing session, update newly registered apps.
691 */
692 static void update_ust_app(int app_sock)
693 {
694 struct ltt_session *sess, *stmp;
695
696 session_lock_list();
697
698 /* For all tracing session(s) */
699 cds_list_for_each_entry_safe(sess, stmp, &session_list_ptr->head, list) {
700 session_lock(sess);
701 if (sess->ust_session) {
702 ust_app_global_update(sess->ust_session, app_sock);
703 }
704 session_unlock(sess);
705 }
706
707 session_unlock_list();
708 }
709
710 /*
711 * This thread manage event coming from the kernel.
712 *
713 * Features supported in this thread:
714 * -) CPU Hotplug
715 */
716 static void *thread_manage_kernel(void *data)
717 {
718 int ret, i, pollfd, update_poll_flag = 1;
719 uint32_t revents, nb_fd;
720 char tmp;
721 struct lttng_poll_event events;
722
723 DBG("Thread manage kernel started");
724
725 health_code_update(&health_thread_kernel);
726
727 ret = create_thread_poll_set(&events, 2);
728 if (ret < 0) {
729 goto error_poll_create;
730 }
731
732 ret = lttng_poll_add(&events, kernel_poll_pipe[0], LPOLLIN);
733 if (ret < 0) {
734 goto error;
735 }
736
737 while (1) {
738 health_code_update(&health_thread_kernel);
739
740 if (update_poll_flag == 1) {
741 /*
742 * Reset number of fd in the poll set. Always 2 since there is the thread
743 * quit pipe and the kernel pipe.
744 */
745 events.nb_fd = 2;
746
747 ret = update_kernel_poll(&events);
748 if (ret < 0) {
749 goto error;
750 }
751 update_poll_flag = 0;
752 }
753
754 nb_fd = LTTNG_POLL_GETNB(&events);
755
756 DBG("Thread kernel polling on %d fds", nb_fd);
757
758 /* Zeroed the poll events */
759 lttng_poll_reset(&events);
760
761 /* Poll infinite value of time */
762 restart:
763 health_poll_update(&health_thread_kernel);
764 ret = lttng_poll_wait(&events, -1);
765 health_poll_update(&health_thread_kernel);
766 if (ret < 0) {
767 /*
768 * Restart interrupted system call.
769 */
770 if (errno == EINTR) {
771 goto restart;
772 }
773 goto error;
774 } else if (ret == 0) {
775 /* Should not happen since timeout is infinite */
776 ERR("Return value of poll is 0 with an infinite timeout.\n"
777 "This should not have happened! Continuing...");
778 continue;
779 }
780
781 for (i = 0; i < nb_fd; i++) {
782 /* Fetch once the poll data */
783 revents = LTTNG_POLL_GETEV(&events, i);
784 pollfd = LTTNG_POLL_GETFD(&events, i);
785
786 health_code_update(&health_thread_kernel);
787
788 /* Thread quit pipe has been closed. Killing thread. */
789 ret = check_thread_quit_pipe(pollfd, revents);
790 if (ret) {
791 goto error;
792 }
793
794 /* Check for data on kernel pipe */
795 if (pollfd == kernel_poll_pipe[0] && (revents & LPOLLIN)) {
796 ret = read(kernel_poll_pipe[0], &tmp, 1);
797 update_poll_flag = 1;
798 continue;
799 } else {
800 /*
801 * New CPU detected by the kernel. Adding kernel stream to
802 * kernel session and updating the kernel consumer
803 */
804 if (revents & LPOLLIN) {
805 ret = update_kernel_stream(&kconsumer_data, pollfd);
806 if (ret < 0) {
807 continue;
808 }
809 break;
810 /*
811 * TODO: We might want to handle the LPOLLERR | LPOLLHUP
812 * and unregister kernel stream at this point.
813 */
814 }
815 }
816 }
817 }
818
819 error:
820 lttng_poll_clean(&events);
821 error_poll_create:
822 health_reset(&health_thread_kernel);
823 DBG("Kernel thread dying");
824 return NULL;
825 }
826
827 /*
828 * This thread manage the consumer error sent back to the session daemon.
829 */
830 static void *thread_manage_consumer(void *data)
831 {
832 int sock = -1, i, ret, pollfd;
833 uint32_t revents, nb_fd;
834 enum lttcomm_return_code code;
835 struct lttng_poll_event events;
836 struct consumer_data *consumer_data = data;
837
838 DBG("[thread] Manage consumer started");
839
840 health_code_update(&consumer_data->health);
841
842 ret = lttcomm_listen_unix_sock(consumer_data->err_sock);
843 if (ret < 0) {
844 goto error_listen;
845 }
846
847 /*
848 * Pass 2 as size here for the thread quit pipe and kconsumerd_err_sock.
849 * Nothing more will be added to this poll set.
850 */
851 ret = create_thread_poll_set(&events, 2);
852 if (ret < 0) {
853 goto error_poll;
854 }
855
856 ret = lttng_poll_add(&events, consumer_data->err_sock, LPOLLIN | LPOLLRDHUP);
857 if (ret < 0) {
858 goto error;
859 }
860
861 nb_fd = LTTNG_POLL_GETNB(&events);
862
863 health_code_update(&consumer_data->health);
864
865 /* Inifinite blocking call, waiting for transmission */
866 restart:
867 health_poll_update(&consumer_data->health);
868 ret = lttng_poll_wait(&events, -1);
869 health_poll_update(&consumer_data->health);
870 if (ret < 0) {
871 /*
872 * Restart interrupted system call.
873 */
874 if (errno == EINTR) {
875 goto restart;
876 }
877 goto error;
878 }
879
880 for (i = 0; i < nb_fd; i++) {
881 /* Fetch once the poll data */
882 revents = LTTNG_POLL_GETEV(&events, i);
883 pollfd = LTTNG_POLL_GETFD(&events, i);
884
885 health_code_update(&consumer_data->health);
886
887 /* Thread quit pipe has been closed. Killing thread. */
888 ret = check_thread_quit_pipe(pollfd, revents);
889 if (ret) {
890 goto error;
891 }
892
893 /* Event on the registration socket */
894 if (pollfd == consumer_data->err_sock) {
895 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
896 ERR("consumer err socket poll error");
897 goto error;
898 }
899 }
900 }
901
902 sock = lttcomm_accept_unix_sock(consumer_data->err_sock);
903 if (sock < 0) {
904 goto error;
905 }
906
907 health_code_update(&consumer_data->health);
908
909 DBG2("Receiving code from consumer err_sock");
910
911 /* Getting status code from kconsumerd */
912 ret = lttcomm_recv_unix_sock(sock, &code,
913 sizeof(enum lttcomm_return_code));
914 if (ret <= 0) {
915 goto error;
916 }
917
918 health_code_update(&consumer_data->health);
919
920 if (code == CONSUMERD_COMMAND_SOCK_READY) {
921 consumer_data->cmd_sock =
922 lttcomm_connect_unix_sock(consumer_data->cmd_unix_sock_path);
923 if (consumer_data->cmd_sock < 0) {
924 sem_post(&consumer_data->sem);
925 PERROR("consumer connect");
926 goto error;
927 }
928 /* Signal condition to tell that the kconsumerd is ready */
929 sem_post(&consumer_data->sem);
930 DBG("consumer command socket ready");
931 } else {
932 ERR("consumer error when waiting for SOCK_READY : %s",
933 lttcomm_get_readable_code(-code));
934 goto error;
935 }
936
937 /* Remove the kconsumerd error sock since we've established a connexion */
938 ret = lttng_poll_del(&events, consumer_data->err_sock);
939 if (ret < 0) {
940 goto error;
941 }
942
943 ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLRDHUP);
944 if (ret < 0) {
945 goto error;
946 }
947
948 health_code_update(&consumer_data->health);
949
950 /* Update number of fd */
951 nb_fd = LTTNG_POLL_GETNB(&events);
952
953 /* Inifinite blocking call, waiting for transmission */
954 restart_poll:
955 health_poll_update(&consumer_data->health);
956 ret = lttng_poll_wait(&events, -1);
957 health_poll_update(&consumer_data->health);
958 if (ret < 0) {
959 /*
960 * Restart interrupted system call.
961 */
962 if (errno == EINTR) {
963 goto restart_poll;
964 }
965 goto error;
966 }
967
968 for (i = 0; i < nb_fd; i++) {
969 /* Fetch once the poll data */
970 revents = LTTNG_POLL_GETEV(&events, i);
971 pollfd = LTTNG_POLL_GETFD(&events, i);
972
973 health_code_update(&consumer_data->health);
974
975 /* Thread quit pipe has been closed. Killing thread. */
976 ret = check_thread_quit_pipe(pollfd, revents);
977 if (ret) {
978 goto error;
979 }
980
981 /* Event on the kconsumerd socket */
982 if (pollfd == sock) {
983 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
984 ERR("consumer err socket second poll error");
985 goto error;
986 }
987 }
988 }
989
990 health_code_update(&consumer_data->health);
991
992 /* Wait for any kconsumerd error */
993 ret = lttcomm_recv_unix_sock(sock, &code,
994 sizeof(enum lttcomm_return_code));
995 if (ret <= 0) {
996 ERR("consumer closed the command socket");
997 goto error;
998 }
999
1000 ERR("consumer return code : %s", lttcomm_get_readable_code(-code));
1001
1002 error:
1003 /* Immediately set the consumerd state to stopped */
1004 if (consumer_data->type == LTTNG_CONSUMER_KERNEL) {
1005 uatomic_set(&kernel_consumerd_state, CONSUMER_ERROR);
1006 } else if (consumer_data->type == LTTNG_CONSUMER64_UST ||
1007 consumer_data->type == LTTNG_CONSUMER32_UST) {
1008 uatomic_set(&ust_consumerd_state, CONSUMER_ERROR);
1009 } else {
1010 /* Code flow error... */
1011 assert(0);
1012 }
1013
1014 if (consumer_data->err_sock >= 0) {
1015 ret = close(consumer_data->err_sock);
1016 if (ret) {
1017 PERROR("close");
1018 }
1019 }
1020 if (consumer_data->cmd_sock >= 0) {
1021 ret = close(consumer_data->cmd_sock);
1022 if (ret) {
1023 PERROR("close");
1024 }
1025 }
1026 if (sock >= 0) {
1027 ret = close(sock);
1028 if (ret) {
1029 PERROR("close");
1030 }
1031 }
1032
1033 unlink(consumer_data->err_unix_sock_path);
1034 unlink(consumer_data->cmd_unix_sock_path);
1035 consumer_data->pid = 0;
1036
1037 lttng_poll_clean(&events);
1038 error_poll:
1039 error_listen:
1040 health_reset(&consumer_data->health);
1041 DBG("consumer thread cleanup completed");
1042
1043 return NULL;
1044 }
1045
1046 /*
1047 * This thread manage application communication.
1048 */
1049 static void *thread_manage_apps(void *data)
1050 {
1051 int i, ret, pollfd;
1052 uint32_t revents, nb_fd;
1053 struct ust_command ust_cmd;
1054 struct lttng_poll_event events;
1055
1056 DBG("[thread] Manage application started");
1057
1058 rcu_register_thread();
1059 rcu_thread_online();
1060
1061 health_code_update(&health_thread_app_reg);
1062
1063 ret = create_thread_poll_set(&events, 2);
1064 if (ret < 0) {
1065 goto error_poll_create;
1066 }
1067
1068 ret = lttng_poll_add(&events, apps_cmd_pipe[0], LPOLLIN | LPOLLRDHUP);
1069 if (ret < 0) {
1070 goto error;
1071 }
1072
1073 health_code_update(&health_thread_app_reg);
1074
1075 while (1) {
1076 /* Zeroed the events structure */
1077 lttng_poll_reset(&events);
1078
1079 nb_fd = LTTNG_POLL_GETNB(&events);
1080
1081 DBG("Apps thread polling on %d fds", nb_fd);
1082
1083 /* Inifinite blocking call, waiting for transmission */
1084 restart:
1085 health_poll_update(&health_thread_app_reg);
1086 ret = lttng_poll_wait(&events, -1);
1087 health_poll_update(&health_thread_app_reg);
1088 if (ret < 0) {
1089 /*
1090 * Restart interrupted system call.
1091 */
1092 if (errno == EINTR) {
1093 goto restart;
1094 }
1095 goto error;
1096 }
1097
1098 for (i = 0; i < nb_fd; i++) {
1099 /* Fetch once the poll data */
1100 revents = LTTNG_POLL_GETEV(&events, i);
1101 pollfd = LTTNG_POLL_GETFD(&events, i);
1102
1103 health_code_update(&health_thread_app_reg);
1104
1105 /* Thread quit pipe has been closed. Killing thread. */
1106 ret = check_thread_quit_pipe(pollfd, revents);
1107 if (ret) {
1108 goto error;
1109 }
1110
1111 /* Inspect the apps cmd pipe */
1112 if (pollfd == apps_cmd_pipe[0]) {
1113 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1114 ERR("Apps command pipe error");
1115 goto error;
1116 } else if (revents & LPOLLIN) {
1117 /* Empty pipe */
1118 ret = read(apps_cmd_pipe[0], &ust_cmd, sizeof(ust_cmd));
1119 if (ret < 0 || ret < sizeof(ust_cmd)) {
1120 PERROR("read apps cmd pipe");
1121 goto error;
1122 }
1123
1124 health_code_update(&health_thread_app_reg);
1125
1126 /* Register applicaton to the session daemon */
1127 ret = ust_app_register(&ust_cmd.reg_msg,
1128 ust_cmd.sock);
1129 if (ret == -ENOMEM) {
1130 goto error;
1131 } else if (ret < 0) {
1132 break;
1133 }
1134
1135 health_code_update(&health_thread_app_reg);
1136
1137 /*
1138 * Validate UST version compatibility.
1139 */
1140 ret = ust_app_validate_version(ust_cmd.sock);
1141 if (ret >= 0) {
1142 /*
1143 * Add channel(s) and event(s) to newly registered apps
1144 * from lttng global UST domain.
1145 */
1146 update_ust_app(ust_cmd.sock);
1147 }
1148
1149 health_code_update(&health_thread_app_reg);
1150
1151 ret = ust_app_register_done(ust_cmd.sock);
1152 if (ret < 0) {
1153 /*
1154 * If the registration is not possible, we simply
1155 * unregister the apps and continue
1156 */
1157 ust_app_unregister(ust_cmd.sock);
1158 } else {
1159 /*
1160 * We just need here to monitor the close of the UST
1161 * socket and poll set monitor those by default.
1162 * Listen on POLLIN (even if we never expect any
1163 * data) to ensure that hangup wakes us.
1164 */
1165 ret = lttng_poll_add(&events, ust_cmd.sock, LPOLLIN);
1166 if (ret < 0) {
1167 goto error;
1168 }
1169
1170 DBG("Apps with sock %d added to poll set",
1171 ust_cmd.sock);
1172 }
1173
1174 health_code_update(&health_thread_app_reg);
1175
1176 break;
1177 }
1178 } else {
1179 /*
1180 * At this point, we know that a registered application made
1181 * the event at poll_wait.
1182 */
1183 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1184 /* Removing from the poll set */
1185 ret = lttng_poll_del(&events, pollfd);
1186 if (ret < 0) {
1187 goto error;
1188 }
1189
1190 /* Socket closed on remote end. */
1191 ust_app_unregister(pollfd);
1192 break;
1193 }
1194 }
1195
1196 health_code_update(&health_thread_app_reg);
1197 }
1198 }
1199
1200 error:
1201 lttng_poll_clean(&events);
1202 error_poll_create:
1203 health_reset(&health_thread_app_reg);
1204 DBG("Application communication apps thread cleanup complete");
1205 rcu_thread_offline();
1206 rcu_unregister_thread();
1207 return NULL;
1208 }
1209
1210 /*
1211 * Dispatch request from the registration threads to the application
1212 * communication thread.
1213 */
1214 static void *thread_dispatch_ust_registration(void *data)
1215 {
1216 int ret;
1217 struct cds_wfq_node *node;
1218 struct ust_command *ust_cmd = NULL;
1219
1220 DBG("[thread] Dispatch UST command started");
1221
1222 while (!dispatch_thread_exit) {
1223 /* Atomically prepare the queue futex */
1224 futex_nto1_prepare(&ust_cmd_queue.futex);
1225
1226 do {
1227 /* Dequeue command for registration */
1228 node = cds_wfq_dequeue_blocking(&ust_cmd_queue.queue);
1229 if (node == NULL) {
1230 DBG("Woken up but nothing in the UST command queue");
1231 /* Continue thread execution */
1232 break;
1233 }
1234
1235 ust_cmd = caa_container_of(node, struct ust_command, node);
1236
1237 DBG("Dispatching UST registration pid:%d ppid:%d uid:%d"
1238 " gid:%d sock:%d name:%s (version %d.%d)",
1239 ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
1240 ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
1241 ust_cmd->sock, ust_cmd->reg_msg.name,
1242 ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
1243 /*
1244 * Inform apps thread of the new application registration. This
1245 * call is blocking so we can be assured that the data will be read
1246 * at some point in time or wait to the end of the world :)
1247 */
1248 ret = write(apps_cmd_pipe[1], ust_cmd,
1249 sizeof(struct ust_command));
1250 if (ret < 0) {
1251 PERROR("write apps cmd pipe");
1252 if (errno == EBADF) {
1253 /*
1254 * We can't inform the application thread to process
1255 * registration. We will exit or else application
1256 * registration will not occur and tracing will never
1257 * start.
1258 */
1259 goto error;
1260 }
1261 }
1262 free(ust_cmd);
1263 } while (node != NULL);
1264
1265 /* Futex wait on queue. Blocking call on futex() */
1266 futex_nto1_wait(&ust_cmd_queue.futex);
1267 }
1268
1269 error:
1270 DBG("Dispatch thread dying");
1271 return NULL;
1272 }
1273
1274 /*
1275 * This thread manage application registration.
1276 */
1277 static void *thread_registration_apps(void *data)
1278 {
1279 int sock = -1, i, ret, pollfd;
1280 uint32_t revents, nb_fd;
1281 struct lttng_poll_event events;
1282 /*
1283 * Get allocated in this thread, enqueued to a global queue, dequeued and
1284 * freed in the manage apps thread.
1285 */
1286 struct ust_command *ust_cmd = NULL;
1287
1288 DBG("[thread] Manage application registration started");
1289
1290 ret = lttcomm_listen_unix_sock(apps_sock);
1291 if (ret < 0) {
1292 goto error_listen;
1293 }
1294
1295 /*
1296 * Pass 2 as size here for the thread quit pipe and apps socket. Nothing
1297 * more will be added to this poll set.
1298 */
1299 ret = create_thread_poll_set(&events, 2);
1300 if (ret < 0) {
1301 goto error_create_poll;
1302 }
1303
1304 /* Add the application registration socket */
1305 ret = lttng_poll_add(&events, apps_sock, LPOLLIN | LPOLLRDHUP);
1306 if (ret < 0) {
1307 goto error_poll_add;
1308 }
1309
1310 /* Notify all applications to register */
1311 ret = notify_ust_apps(1);
1312 if (ret < 0) {
1313 ERR("Failed to notify applications or create the wait shared memory.\n"
1314 "Execution continues but there might be problem for already\n"
1315 "running applications that wishes to register.");
1316 }
1317
1318 while (1) {
1319 DBG("Accepting application registration");
1320
1321 nb_fd = LTTNG_POLL_GETNB(&events);
1322
1323 /* Inifinite blocking call, waiting for transmission */
1324 restart:
1325 ret = lttng_poll_wait(&events, -1);
1326 if (ret < 0) {
1327 /*
1328 * Restart interrupted system call.
1329 */
1330 if (errno == EINTR) {
1331 goto restart;
1332 }
1333 goto error;
1334 }
1335
1336 for (i = 0; i < nb_fd; i++) {
1337 /* Fetch once the poll data */
1338 revents = LTTNG_POLL_GETEV(&events, i);
1339 pollfd = LTTNG_POLL_GETFD(&events, i);
1340
1341 /* Thread quit pipe has been closed. Killing thread. */
1342 ret = check_thread_quit_pipe(pollfd, revents);
1343 if (ret) {
1344 goto error;
1345 }
1346
1347 /* Event on the registration socket */
1348 if (pollfd == apps_sock) {
1349 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1350 ERR("Register apps socket poll error");
1351 goto error;
1352 } else if (revents & LPOLLIN) {
1353 sock = lttcomm_accept_unix_sock(apps_sock);
1354 if (sock < 0) {
1355 goto error;
1356 }
1357
1358 /* Create UST registration command for enqueuing */
1359 ust_cmd = zmalloc(sizeof(struct ust_command));
1360 if (ust_cmd == NULL) {
1361 PERROR("ust command zmalloc");
1362 goto error;
1363 }
1364
1365 /*
1366 * Using message-based transmissions to ensure we don't
1367 * have to deal with partially received messages.
1368 */
1369 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
1370 if (ret < 0) {
1371 ERR("Exhausted file descriptors allowed for applications.");
1372 free(ust_cmd);
1373 ret = close(sock);
1374 if (ret) {
1375 PERROR("close");
1376 }
1377 sock = -1;
1378 continue;
1379 }
1380 ret = lttcomm_recv_unix_sock(sock, &ust_cmd->reg_msg,
1381 sizeof(struct ust_register_msg));
1382 if (ret < 0 || ret < sizeof(struct ust_register_msg)) {
1383 if (ret < 0) {
1384 PERROR("lttcomm_recv_unix_sock register apps");
1385 } else {
1386 ERR("Wrong size received on apps register");
1387 }
1388 free(ust_cmd);
1389 ret = close(sock);
1390 if (ret) {
1391 PERROR("close");
1392 }
1393 lttng_fd_put(LTTNG_FD_APPS, 1);
1394 sock = -1;
1395 continue;
1396 }
1397
1398 ust_cmd->sock = sock;
1399 sock = -1;
1400
1401 DBG("UST registration received with pid:%d ppid:%d uid:%d"
1402 " gid:%d sock:%d name:%s (version %d.%d)",
1403 ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
1404 ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
1405 ust_cmd->sock, ust_cmd->reg_msg.name,
1406 ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
1407
1408 /*
1409 * Lock free enqueue the registration request. The red pill
1410 * has been taken! This apps will be part of the *system*.
1411 */
1412 cds_wfq_enqueue(&ust_cmd_queue.queue, &ust_cmd->node);
1413
1414 /*
1415 * Wake the registration queue futex. Implicit memory
1416 * barrier with the exchange in cds_wfq_enqueue.
1417 */
1418 futex_nto1_wake(&ust_cmd_queue.futex);
1419 }
1420 }
1421 }
1422 }
1423
1424 error:
1425 /* Notify that the registration thread is gone */
1426 notify_ust_apps(0);
1427
1428 if (apps_sock >= 0) {
1429 ret = close(apps_sock);
1430 if (ret) {
1431 PERROR("close");
1432 }
1433 }
1434 if (sock >= 0) {
1435 ret = close(sock);
1436 if (ret) {
1437 PERROR("close");
1438 }
1439 lttng_fd_put(LTTNG_FD_APPS, 1);
1440 }
1441 unlink(apps_unix_sock_path);
1442
1443 error_poll_add:
1444 lttng_poll_clean(&events);
1445 error_listen:
1446 error_create_poll:
1447 DBG("UST Registration thread cleanup complete");
1448
1449 return NULL;
1450 }
1451
1452 /*
1453 * Start the thread_manage_consumer. This must be done after a lttng-consumerd
1454 * exec or it will fails.
1455 */
1456 static int spawn_consumer_thread(struct consumer_data *consumer_data)
1457 {
1458 int ret;
1459 struct timespec timeout;
1460
1461 timeout.tv_sec = DEFAULT_SEM_WAIT_TIMEOUT;
1462 timeout.tv_nsec = 0;
1463
1464 /* Setup semaphore */
1465 ret = sem_init(&consumer_data->sem, 0, 0);
1466 if (ret < 0) {
1467 PERROR("sem_init consumer semaphore");
1468 goto error;
1469 }
1470
1471 ret = pthread_create(&consumer_data->thread, NULL,
1472 thread_manage_consumer, consumer_data);
1473 if (ret != 0) {
1474 PERROR("pthread_create consumer");
1475 ret = -1;
1476 goto error;
1477 }
1478
1479 /* Get time for sem_timedwait absolute timeout */
1480 ret = clock_gettime(CLOCK_REALTIME, &timeout);
1481 if (ret < 0) {
1482 PERROR("clock_gettime spawn consumer");
1483 /* Infinite wait for the kconsumerd thread to be ready */
1484 ret = sem_wait(&consumer_data->sem);
1485 } else {
1486 /* Normal timeout if the gettime was successful */
1487 timeout.tv_sec += DEFAULT_SEM_WAIT_TIMEOUT;
1488 ret = sem_timedwait(&consumer_data->sem, &timeout);
1489 }
1490
1491 if (ret < 0) {
1492 if (errno == ETIMEDOUT) {
1493 /*
1494 * Call has timed out so we kill the kconsumerd_thread and return
1495 * an error.
1496 */
1497 ERR("The consumer thread was never ready. Killing it");
1498 ret = pthread_cancel(consumer_data->thread);
1499 if (ret < 0) {
1500 PERROR("pthread_cancel consumer thread");
1501 }
1502 } else {
1503 PERROR("semaphore wait failed consumer thread");
1504 }
1505 goto error;
1506 }
1507
1508 pthread_mutex_lock(&consumer_data->pid_mutex);
1509 if (consumer_data->pid == 0) {
1510 ERR("Kconsumerd did not start");
1511 pthread_mutex_unlock(&consumer_data->pid_mutex);
1512 goto error;
1513 }
1514 pthread_mutex_unlock(&consumer_data->pid_mutex);
1515
1516 return 0;
1517
1518 error:
1519 return ret;
1520 }
1521
1522 /*
1523 * Join consumer thread
1524 */
1525 static int join_consumer_thread(struct consumer_data *consumer_data)
1526 {
1527 void *status;
1528 int ret;
1529
1530 if (consumer_data->pid != 0) {
1531 ret = kill(consumer_data->pid, SIGTERM);
1532 if (ret) {
1533 ERR("Error killing consumer daemon");
1534 return ret;
1535 }
1536 return pthread_join(consumer_data->thread, &status);
1537 } else {
1538 return 0;
1539 }
1540 }
1541
1542 /*
1543 * Fork and exec a consumer daemon (consumerd).
1544 *
1545 * Return pid if successful else -1.
1546 */
1547 static pid_t spawn_consumerd(struct consumer_data *consumer_data)
1548 {
1549 int ret;
1550 pid_t pid;
1551 const char *consumer_to_use;
1552 const char *verbosity;
1553 struct stat st;
1554
1555 DBG("Spawning consumerd");
1556
1557 pid = fork();
1558 if (pid == 0) {
1559 /*
1560 * Exec consumerd.
1561 */
1562 if (opt_verbose_consumer) {
1563 verbosity = "--verbose";
1564 } else {
1565 verbosity = "--quiet";
1566 }
1567 switch (consumer_data->type) {
1568 case LTTNG_CONSUMER_KERNEL:
1569 /*
1570 * Find out which consumerd to execute. We will first try the
1571 * 64-bit path, then the sessiond's installation directory, and
1572 * fallback on the 32-bit one,
1573 */
1574 DBG3("Looking for a kernel consumer at these locations:");
1575 DBG3(" 1) %s", consumerd64_bin);
1576 DBG3(" 2) %s/%s", INSTALL_BIN_PATH, CONSUMERD_FILE);
1577 DBG3(" 3) %s", consumerd32_bin);
1578 if (stat(consumerd64_bin, &st) == 0) {
1579 DBG3("Found location #1");
1580 consumer_to_use = consumerd64_bin;
1581 } else if (stat(INSTALL_BIN_PATH "/" CONSUMERD_FILE, &st) == 0) {
1582 DBG3("Found location #2");
1583 consumer_to_use = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
1584 } else if (stat(consumerd32_bin, &st) == 0) {
1585 DBG3("Found location #3");
1586 consumer_to_use = consumerd32_bin;
1587 } else {
1588 DBG("Could not find any valid consumerd executable");
1589 break;
1590 }
1591 DBG("Using kernel consumer at: %s", consumer_to_use);
1592 execl(consumer_to_use,
1593 "lttng-consumerd", verbosity, "-k",
1594 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
1595 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
1596 NULL);
1597 break;
1598 case LTTNG_CONSUMER64_UST:
1599 {
1600 char *tmpnew = NULL;
1601
1602 if (consumerd64_libdir[0] != '\0') {
1603 char *tmp;
1604 size_t tmplen;
1605
1606 tmp = getenv("LD_LIBRARY_PATH");
1607 if (!tmp) {
1608 tmp = "";
1609 }
1610 tmplen = strlen("LD_LIBRARY_PATH=")
1611 + strlen(consumerd64_libdir) + 1 /* : */ + strlen(tmp);
1612 tmpnew = zmalloc(tmplen + 1 /* \0 */);
1613 if (!tmpnew) {
1614 ret = -ENOMEM;
1615 goto error;
1616 }
1617 strcpy(tmpnew, "LD_LIBRARY_PATH=");
1618 strcat(tmpnew, consumerd64_libdir);
1619 if (tmp[0] != '\0') {
1620 strcat(tmpnew, ":");
1621 strcat(tmpnew, tmp);
1622 }
1623 ret = putenv(tmpnew);
1624 if (ret) {
1625 ret = -errno;
1626 goto error;
1627 }
1628 }
1629 DBG("Using 64-bit UST consumer at: %s", consumerd64_bin);
1630 ret = execl(consumerd64_bin, "lttng-consumerd", verbosity, "-u",
1631 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
1632 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
1633 NULL);
1634 if (consumerd64_libdir[0] != '\0') {
1635 free(tmpnew);
1636 }
1637 if (ret) {
1638 goto error;
1639 }
1640 break;
1641 }
1642 case LTTNG_CONSUMER32_UST:
1643 {
1644 char *tmpnew = NULL;
1645
1646 if (consumerd32_libdir[0] != '\0') {
1647 char *tmp;
1648 size_t tmplen;
1649
1650 tmp = getenv("LD_LIBRARY_PATH");
1651 if (!tmp) {
1652 tmp = "";
1653 }
1654 tmplen = strlen("LD_LIBRARY_PATH=")
1655 + strlen(consumerd32_libdir) + 1 /* : */ + strlen(tmp);
1656 tmpnew = zmalloc(tmplen + 1 /* \0 */);
1657 if (!tmpnew) {
1658 ret = -ENOMEM;
1659 goto error;
1660 }
1661 strcpy(tmpnew, "LD_LIBRARY_PATH=");
1662 strcat(tmpnew, consumerd32_libdir);
1663 if (tmp[0] != '\0') {
1664 strcat(tmpnew, ":");
1665 strcat(tmpnew, tmp);
1666 }
1667 ret = putenv(tmpnew);
1668 if (ret) {
1669 ret = -errno;
1670 goto error;
1671 }
1672 }
1673 DBG("Using 32-bit UST consumer at: %s", consumerd32_bin);
1674 ret = execl(consumerd32_bin, "lttng-consumerd", verbosity, "-u",
1675 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
1676 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
1677 NULL);
1678 if (consumerd32_libdir[0] != '\0') {
1679 free(tmpnew);
1680 }
1681 if (ret) {
1682 goto error;
1683 }
1684 break;
1685 }
1686 default:
1687 PERROR("unknown consumer type");
1688 exit(EXIT_FAILURE);
1689 }
1690 if (errno != 0) {
1691 PERROR("kernel start consumer exec");
1692 }
1693 exit(EXIT_FAILURE);
1694 } else if (pid > 0) {
1695 ret = pid;
1696 } else {
1697 PERROR("start consumer fork");
1698 ret = -errno;
1699 }
1700 error:
1701 return ret;
1702 }
1703
1704 /*
1705 * Spawn the consumerd daemon and session daemon thread.
1706 */
1707 static int start_consumerd(struct consumer_data *consumer_data)
1708 {
1709 int ret;
1710
1711 pthread_mutex_lock(&consumer_data->pid_mutex);
1712 if (consumer_data->pid != 0) {
1713 pthread_mutex_unlock(&consumer_data->pid_mutex);
1714 goto end;
1715 }
1716
1717 ret = spawn_consumerd(consumer_data);
1718 if (ret < 0) {
1719 ERR("Spawning consumerd failed");
1720 pthread_mutex_unlock(&consumer_data->pid_mutex);
1721 goto error;
1722 }
1723
1724 /* Setting up the consumer_data pid */
1725 consumer_data->pid = ret;
1726 DBG2("Consumer pid %d", consumer_data->pid);
1727 pthread_mutex_unlock(&consumer_data->pid_mutex);
1728
1729 DBG2("Spawning consumer control thread");
1730 ret = spawn_consumer_thread(consumer_data);
1731 if (ret < 0) {
1732 ERR("Fatal error spawning consumer control thread");
1733 goto error;
1734 }
1735
1736 end:
1737 return 0;
1738
1739 error:
1740 return ret;
1741 }
1742
1743 /*
1744 * Compute health status of each consumer.
1745 */
1746 static int check_consumer_health(void)
1747 {
1748 int ret;
1749
1750 ret =
1751 health_check_state(&kconsumer_data.health) &
1752 health_check_state(&ustconsumer32_data.health) &
1753 health_check_state(&ustconsumer64_data.health);
1754
1755 DBG3("Health consumer check %d", ret);
1756
1757 return ret;
1758 }
1759
1760 /*
1761 * Check version of the lttng-modules.
1762 */
1763 static int validate_lttng_modules_version(void)
1764 {
1765 return kernel_validate_version(kernel_tracer_fd);
1766 }
1767
1768 /*
1769 * Setup necessary data for kernel tracer action.
1770 */
1771 static int init_kernel_tracer(void)
1772 {
1773 int ret;
1774
1775 /* Modprobe lttng kernel modules */
1776 ret = modprobe_lttng_control();
1777 if (ret < 0) {
1778 goto error;
1779 }
1780
1781 /* Open debugfs lttng */
1782 kernel_tracer_fd = open(module_proc_lttng, O_RDWR);
1783 if (kernel_tracer_fd < 0) {
1784 DBG("Failed to open %s", module_proc_lttng);
1785 ret = -1;
1786 goto error_open;
1787 }
1788
1789 /* Validate kernel version */
1790 ret = validate_lttng_modules_version();
1791 if (ret < 0) {
1792 goto error_version;
1793 }
1794
1795 ret = modprobe_lttng_data();
1796 if (ret < 0) {
1797 goto error_modules;
1798 }
1799
1800 DBG("Kernel tracer fd %d", kernel_tracer_fd);
1801 return 0;
1802
1803 error_version:
1804 modprobe_remove_lttng_control();
1805 ret = close(kernel_tracer_fd);
1806 if (ret) {
1807 PERROR("close");
1808 }
1809 kernel_tracer_fd = -1;
1810 return LTTCOMM_KERN_VERSION;
1811
1812 error_modules:
1813 ret = close(kernel_tracer_fd);
1814 if (ret) {
1815 PERROR("close");
1816 }
1817
1818 error_open:
1819 modprobe_remove_lttng_control();
1820
1821 error:
1822 WARN("No kernel tracer available");
1823 kernel_tracer_fd = -1;
1824 if (!is_root) {
1825 return LTTCOMM_NEED_ROOT_SESSIOND;
1826 } else {
1827 return LTTCOMM_KERN_NA;
1828 }
1829 }
1830
1831 /*
1832 * Init tracing by creating trace directory and sending fds kernel consumer.
1833 */
1834 static int init_kernel_tracing(struct ltt_kernel_session *session)
1835 {
1836 int ret = 0;
1837
1838 if (session->consumer_fds_sent == 0 && session->consumer != NULL) {
1839 /*
1840 * Assign default kernel consumer socket if no consumer assigned to the
1841 * kernel session. At this point, it's NOT supposed to be -1 but this is
1842 * an extra security check.
1843 */
1844 if (session->consumer_fd < 0) {
1845 session->consumer_fd = kconsumer_data.cmd_sock;
1846 }
1847
1848 ret = kernel_consumer_send_session(session->consumer_fd, session);
1849 if (ret < 0) {
1850 ret = LTTCOMM_KERN_CONSUMER_FAIL;
1851 goto error;
1852 }
1853 }
1854
1855 error:
1856 return ret;
1857 }
1858
1859 /*
1860 * Create a socket to the relayd using the URI.
1861 *
1862 * On success, the relayd_sock pointer is set to the created socket.
1863 * Else, it is untouched and an lttcomm error code is returned.
1864 */
1865 static int create_connect_relayd(struct consumer_output *output,
1866 const char *session_name, struct lttng_uri *uri,
1867 struct lttcomm_sock **relayd_sock)
1868 {
1869 int ret;
1870 struct lttcomm_sock *sock;
1871
1872 /* Create socket object from URI */
1873 sock = lttcomm_alloc_sock_from_uri(uri);
1874 if (sock == NULL) {
1875 ret = LTTCOMM_FATAL;
1876 goto error;
1877 }
1878
1879 ret = lttcomm_create_sock(sock);
1880 if (ret < 0) {
1881 ret = LTTCOMM_FATAL;
1882 goto error;
1883 }
1884
1885 /* Connect to relayd so we can proceed with a session creation. */
1886 ret = relayd_connect(sock);
1887 if (ret < 0) {
1888 ERR("Unable to reach lttng-relayd");
1889 ret = LTTCOMM_RELAYD_SESSION_FAIL;
1890 goto free_sock;
1891 }
1892
1893 /* Create socket for control stream. */
1894 if (uri->stype == LTTNG_STREAM_CONTROL) {
1895 DBG3("Creating relayd stream socket from URI");
1896
1897 /* Check relayd version */
1898 ret = relayd_version_check(sock, LTTNG_UST_COMM_MAJOR, 0);
1899 if (ret < 0) {
1900 ret = LTTCOMM_RELAYD_VERSION_FAIL;
1901 goto close_sock;
1902 }
1903 } else if (uri->stype == LTTNG_STREAM_DATA) {
1904 DBG3("Creating relayd data socket from URI");
1905 } else {
1906 /* Command is not valid */
1907 ERR("Relayd invalid stream type: %d", uri->stype);
1908 ret = LTTCOMM_INVALID;
1909 goto close_sock;
1910 }
1911
1912 *relayd_sock = sock;
1913
1914 return LTTCOMM_OK;
1915
1916 close_sock:
1917 if (sock) {
1918 (void) relayd_close(sock);
1919 }
1920 free_sock:
1921 if (sock) {
1922 lttcomm_destroy_sock(sock);
1923 }
1924 error:
1925 return ret;
1926 }
1927
1928 /*
1929 * Connect to the relayd using URI and send the socket to the right consumer.
1930 */
1931 static int send_socket_relayd_consumer(int domain, struct ltt_session *session,
1932 struct lttng_uri *relayd_uri, struct consumer_output *consumer,
1933 int consumer_fd)
1934 {
1935 int ret;
1936 struct lttcomm_sock *sock = NULL;
1937
1938 /* Set the network sequence index if not set. */
1939 if (consumer->net_seq_index == -1) {
1940 /*
1941 * Increment net_seq_idx because we are about to transfer the
1942 * new relayd socket to the consumer.
1943 */
1944 uatomic_inc(&relayd_net_seq_idx);
1945 /* Assign unique key so the consumer can match streams */
1946 consumer->net_seq_index = uatomic_read(&relayd_net_seq_idx);
1947 }
1948
1949 /* Connect to relayd and make version check if uri is the control. */
1950 ret = create_connect_relayd(consumer, session->name, relayd_uri, &sock);
1951 if (ret != LTTCOMM_OK) {
1952 goto close_sock;
1953 }
1954
1955 /* If the control socket is connected, network session is ready */
1956 if (relayd_uri->stype == LTTNG_STREAM_CONTROL) {
1957 session->net_handle = 1;
1958 }
1959
1960 /* Send relayd socket to consumer. */
1961 ret = consumer_send_relayd_socket(consumer_fd, sock,
1962 consumer, relayd_uri->stype);
1963 if (ret < 0) {
1964 ret = LTTCOMM_ENABLE_CONSUMER_FAIL;
1965 goto close_sock;
1966 }
1967
1968 ret = LTTCOMM_OK;
1969
1970 /*
1971 * Close socket which was dup on the consumer side. The session daemon does
1972 * NOT keep track of the relayd socket(s) once transfer to the consumer.
1973 */
1974
1975 close_sock:
1976 if (sock) {
1977 (void) relayd_close(sock);
1978 lttcomm_destroy_sock(sock);
1979 }
1980
1981 return ret;
1982 }
1983
1984 /*
1985 * Send both relayd sockets to a specific consumer and domain. This is a
1986 * helper function to facilitate sending the information to the consumer for a
1987 * session.
1988 */
1989 static int send_sockets_relayd_consumer(int domain,
1990 struct ltt_session *session, struct consumer_output *consumer, int fd)
1991 {
1992 int ret;
1993
1994 /* Sending control relayd socket. */
1995 ret = send_socket_relayd_consumer(domain, session,
1996 &consumer->dst.net.control, consumer, fd);
1997 if (ret != LTTCOMM_OK) {
1998 goto error;
1999 }
2000
2001 /* Sending data relayd socket. */
2002 ret = send_socket_relayd_consumer(domain, session,
2003 &consumer->dst.net.data, consumer, fd);
2004 if (ret != LTTCOMM_OK) {
2005 goto error;
2006 }
2007
2008 error:
2009 return ret;
2010 }
2011
2012 /*
2013 * Setup relayd connections for a tracing session. First creates the socket to
2014 * the relayd and send them to the right domain consumer. Consumer type MUST be
2015 * network.
2016 */
2017 static int setup_relayd(struct ltt_session *session)
2018 {
2019 int ret = LTTCOMM_OK;
2020 struct ltt_ust_session *usess;
2021 struct ltt_kernel_session *ksess;
2022
2023 assert(session);
2024
2025 usess = session->ust_session;
2026 ksess = session->kernel_session;
2027
2028 DBG2("Setting relayd for session %s", session->name);
2029
2030 if (usess && usess->consumer->sock == -1 &&
2031 usess->consumer->type == CONSUMER_DST_NET &&
2032 usess->consumer->enabled) {
2033 /* Setup relayd for 64 bits consumer */
2034 if (ust_consumerd64_fd >= 0) {
2035 send_sockets_relayd_consumer(LTTNG_DOMAIN_UST, session,
2036 usess->consumer, ust_consumerd64_fd);
2037 if (ret != LTTCOMM_OK) {
2038 goto error;
2039 }
2040 }
2041
2042 /* Setup relayd for 32 bits consumer */
2043 if (ust_consumerd32_fd >= 0) {
2044 send_sockets_relayd_consumer(LTTNG_DOMAIN_UST, session,
2045 usess->consumer, ust_consumerd32_fd);
2046 if (ret != LTTCOMM_OK) {
2047 goto error;
2048 }
2049 }
2050 } else if (ksess && ksess->consumer->sock == -1 &&
2051 ksess->consumer->type == CONSUMER_DST_NET &&
2052 ksess->consumer->enabled) {
2053 send_sockets_relayd_consumer(LTTNG_DOMAIN_KERNEL, session,
2054 ksess->consumer, ksess->consumer_fd);
2055 if (ret != LTTCOMM_OK) {
2056 goto error;
2057 }
2058 }
2059
2060 error:
2061 return ret;
2062 }
2063
2064 /*
2065 * Copy consumer output from the tracing session to the domain session. The
2066 * function also applies the right modification on a per domain basis for the
2067 * trace files destination directory.
2068 */
2069 static int copy_session_consumer(int domain, struct ltt_session *session)
2070 {
2071 int ret;
2072 const char *dir_name;
2073 struct consumer_output *consumer;
2074
2075 switch (domain) {
2076 case LTTNG_DOMAIN_KERNEL:
2077 DBG3("Copying tracing session consumer output in kernel session");
2078 session->kernel_session->consumer =
2079 consumer_copy_output(session->consumer);
2080 /* Ease our life a bit for the next part */
2081 consumer = session->kernel_session->consumer;
2082 dir_name = DEFAULT_KERNEL_TRACE_DIR;
2083 break;
2084 case LTTNG_DOMAIN_UST:
2085 DBG3("Copying tracing session consumer output in UST session");
2086 session->ust_session->consumer =
2087 consumer_copy_output(session->consumer);
2088 /* Ease our life a bit for the next part */
2089 consumer = session->ust_session->consumer;
2090 dir_name = DEFAULT_UST_TRACE_DIR;
2091 break;
2092 default:
2093 ret = LTTCOMM_UNKNOWN_DOMAIN;
2094 goto error;
2095 }
2096
2097 /* Append correct directory to subdir */
2098 strncat(consumer->subdir, dir_name, sizeof(consumer->subdir));
2099 DBG3("Copy session consumer subdir %s", consumer->subdir);
2100
2101 /* Add default trace directory name */
2102 if (consumer->type == CONSUMER_DST_LOCAL) {
2103 strncat(consumer->dst.trace_path, dir_name,
2104 sizeof(consumer->dst.trace_path));
2105 }
2106
2107 ret = LTTCOMM_OK;
2108
2109 error:
2110 return ret;
2111 }
2112
2113 /*
2114 * Create an UST session and add it to the session ust list.
2115 */
2116 static int create_ust_session(struct ltt_session *session,
2117 struct lttng_domain *domain)
2118 {
2119 int ret;
2120 struct ltt_ust_session *lus = NULL;
2121
2122 assert(session);
2123 assert(session->consumer);
2124
2125 switch (domain->type) {
2126 case LTTNG_DOMAIN_UST:
2127 break;
2128 default:
2129 ERR("Unknown UST domain on create session %d", domain->type);
2130 ret = LTTCOMM_UNKNOWN_DOMAIN;
2131 goto error;
2132 }
2133
2134 DBG("Creating UST session");
2135
2136 lus = trace_ust_create_session(session->path, session->id, domain);
2137 if (lus == NULL) {
2138 ret = LTTCOMM_UST_SESS_FAIL;
2139 goto error;
2140 }
2141
2142 if (session->consumer->type == CONSUMER_DST_LOCAL) {
2143 ret = run_as_mkdir_recursive(lus->pathname, S_IRWXU | S_IRWXG,
2144 session->uid, session->gid);
2145 if (ret < 0) {
2146 if (ret != -EEXIST) {
2147 ERR("Trace directory creation error");
2148 ret = LTTCOMM_UST_SESS_FAIL;
2149 goto error;
2150 }
2151 }
2152 }
2153
2154 lus->uid = session->uid;
2155 lus->gid = session->gid;
2156 session->ust_session = lus;
2157
2158 /* Copy session output to the newly created UST session */
2159 ret = copy_session_consumer(domain->type, session);
2160 if (ret != LTTCOMM_OK) {
2161 goto error;
2162 }
2163
2164 return LTTCOMM_OK;
2165
2166 error:
2167 free(lus);
2168 session->ust_session = NULL;
2169 return ret;
2170 }
2171
2172 /*
2173 * Create a kernel tracer session then create the default channel.
2174 */
2175 static int create_kernel_session(struct ltt_session *session)
2176 {
2177 int ret;
2178
2179 DBG("Creating kernel session");
2180
2181 ret = kernel_create_session(session, kernel_tracer_fd);
2182 if (ret < 0) {
2183 ret = LTTCOMM_KERN_SESS_FAIL;
2184 goto error;
2185 }
2186
2187 /* Set kernel consumer socket fd */
2188 if (kconsumer_data.cmd_sock >= 0) {
2189 session->kernel_session->consumer_fd = kconsumer_data.cmd_sock;
2190 }
2191
2192 /* Copy session output to the newly created Kernel session */
2193 ret = copy_session_consumer(LTTNG_DOMAIN_KERNEL, session);
2194 if (ret != LTTCOMM_OK) {
2195 goto error;
2196 }
2197
2198 /* Create directory(ies) on local filesystem. */
2199 if (session->consumer->type == CONSUMER_DST_LOCAL) {
2200 ret = run_as_mkdir_recursive(
2201 session->kernel_session->consumer->dst.trace_path,
2202 S_IRWXU | S_IRWXG, session->uid, session->gid);
2203 if (ret < 0) {
2204 if (ret != -EEXIST) {
2205 ERR("Trace directory creation error");
2206 goto error;
2207 }
2208 }
2209 }
2210
2211 session->kernel_session->uid = session->uid;
2212 session->kernel_session->gid = session->gid;
2213
2214 return LTTCOMM_OK;
2215
2216 error:
2217 trace_kernel_destroy_session(session->kernel_session);
2218 session->kernel_session = NULL;
2219 return ret;
2220 }
2221
2222 /*
2223 * Check if the UID or GID match the session. Root user has access to all
2224 * sessions.
2225 */
2226 static int session_access_ok(struct ltt_session *session, uid_t uid, gid_t gid)
2227 {
2228 if (uid != session->uid && gid != session->gid && uid != 0) {
2229 return 0;
2230 } else {
2231 return 1;
2232 }
2233 }
2234
2235 /*
2236 * Count number of session permitted by uid/gid.
2237 */
2238 static unsigned int lttng_sessions_count(uid_t uid, gid_t gid)
2239 {
2240 unsigned int i = 0;
2241 struct ltt_session *session;
2242
2243 DBG("Counting number of available session for UID %d GID %d",
2244 uid, gid);
2245 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
2246 /*
2247 * Only list the sessions the user can control.
2248 */
2249 if (!session_access_ok(session, uid, gid)) {
2250 continue;
2251 }
2252 i++;
2253 }
2254 return i;
2255 }
2256
2257 /*
2258 * Using the session list, filled a lttng_session array to send back to the
2259 * client for session listing.
2260 *
2261 * The session list lock MUST be acquired before calling this function. Use
2262 * session_lock_list() and session_unlock_list().
2263 */
2264 static void list_lttng_sessions(struct lttng_session *sessions, uid_t uid,
2265 gid_t gid)
2266 {
2267 unsigned int i = 0;
2268 struct ltt_session *session;
2269
2270 DBG("Getting all available session for UID %d GID %d",
2271 uid, gid);
2272 /*
2273 * Iterate over session list and append data after the control struct in
2274 * the buffer.
2275 */
2276 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
2277 /*
2278 * Only list the sessions the user can control.
2279 */
2280 if (!session_access_ok(session, uid, gid)) {
2281 continue;
2282 }
2283 strncpy(sessions[i].path, session->path, PATH_MAX);
2284 sessions[i].path[PATH_MAX - 1] = '\0';
2285 strncpy(sessions[i].name, session->name, NAME_MAX);
2286 sessions[i].name[NAME_MAX - 1] = '\0';
2287 sessions[i].enabled = session->enabled;
2288 i++;
2289 }
2290 }
2291
2292 /*
2293 * Fill lttng_channel array of all channels.
2294 */
2295 static void list_lttng_channels(int domain, struct ltt_session *session,
2296 struct lttng_channel *channels)
2297 {
2298 int i = 0;
2299 struct ltt_kernel_channel *kchan;
2300
2301 DBG("Listing channels for session %s", session->name);
2302
2303 switch (domain) {
2304 case LTTNG_DOMAIN_KERNEL:
2305 /* Kernel channels */
2306 if (session->kernel_session != NULL) {
2307 cds_list_for_each_entry(kchan,
2308 &session->kernel_session->channel_list.head, list) {
2309 /* Copy lttng_channel struct to array */
2310 memcpy(&channels[i], kchan->channel, sizeof(struct lttng_channel));
2311 channels[i].enabled = kchan->enabled;
2312 i++;
2313 }
2314 }
2315 break;
2316 case LTTNG_DOMAIN_UST:
2317 {
2318 struct lttng_ht_iter iter;
2319 struct ltt_ust_channel *uchan;
2320
2321 cds_lfht_for_each_entry(session->ust_session->domain_global.channels->ht,
2322 &iter.iter, uchan, node.node) {
2323 strncpy(channels[i].name, uchan->name, LTTNG_SYMBOL_NAME_LEN);
2324 channels[i].attr.overwrite = uchan->attr.overwrite;
2325 channels[i].attr.subbuf_size = uchan->attr.subbuf_size;
2326 channels[i].attr.num_subbuf = uchan->attr.num_subbuf;
2327 channels[i].attr.switch_timer_interval =
2328 uchan->attr.switch_timer_interval;
2329 channels[i].attr.read_timer_interval =
2330 uchan->attr.read_timer_interval;
2331 channels[i].enabled = uchan->enabled;
2332 switch (uchan->attr.output) {
2333 case LTTNG_UST_MMAP:
2334 default:
2335 channels[i].attr.output = LTTNG_EVENT_MMAP;
2336 break;
2337 }
2338 i++;
2339 }
2340 break;
2341 }
2342 default:
2343 break;
2344 }
2345 }
2346
2347 /*
2348 * Create a list of ust global domain events.
2349 */
2350 static int list_lttng_ust_global_events(char *channel_name,
2351 struct ltt_ust_domain_global *ust_global, struct lttng_event **events)
2352 {
2353 int i = 0, ret = 0;
2354 unsigned int nb_event = 0;
2355 struct lttng_ht_iter iter;
2356 struct lttng_ht_node_str *node;
2357 struct ltt_ust_channel *uchan;
2358 struct ltt_ust_event *uevent;
2359 struct lttng_event *tmp;
2360
2361 DBG("Listing UST global events for channel %s", channel_name);
2362
2363 rcu_read_lock();
2364
2365 lttng_ht_lookup(ust_global->channels, (void *)channel_name, &iter);
2366 node = lttng_ht_iter_get_node_str(&iter);
2367 if (node == NULL) {
2368 ret = -LTTCOMM_UST_CHAN_NOT_FOUND;
2369 goto error;
2370 }
2371
2372 uchan = caa_container_of(&node->node, struct ltt_ust_channel, node.node);
2373
2374 nb_event += lttng_ht_get_count(uchan->events);
2375
2376 if (nb_event == 0) {
2377 ret = nb_event;
2378 goto error;
2379 }
2380
2381 DBG3("Listing UST global %d events", nb_event);
2382
2383 tmp = zmalloc(nb_event * sizeof(struct lttng_event));
2384 if (tmp == NULL) {
2385 ret = -LTTCOMM_FATAL;
2386 goto error;
2387 }
2388
2389 cds_lfht_for_each_entry(uchan->events->ht, &iter.iter, uevent, node.node) {
2390 strncpy(tmp[i].name, uevent->attr.name, LTTNG_SYMBOL_NAME_LEN);
2391 tmp[i].name[LTTNG_SYMBOL_NAME_LEN - 1] = '\0';
2392 tmp[i].enabled = uevent->enabled;
2393 switch (uevent->attr.instrumentation) {
2394 case LTTNG_UST_TRACEPOINT:
2395 tmp[i].type = LTTNG_EVENT_TRACEPOINT;
2396 break;
2397 case LTTNG_UST_PROBE:
2398 tmp[i].type = LTTNG_EVENT_PROBE;
2399 break;
2400 case LTTNG_UST_FUNCTION:
2401 tmp[i].type = LTTNG_EVENT_FUNCTION;
2402 break;
2403 }
2404 tmp[i].loglevel = uevent->attr.loglevel;
2405 switch (uevent->attr.loglevel_type) {
2406 case LTTNG_UST_LOGLEVEL_ALL:
2407 tmp[i].loglevel_type = LTTNG_EVENT_LOGLEVEL_ALL;
2408 break;
2409 case LTTNG_UST_LOGLEVEL_RANGE:
2410 tmp[i].loglevel_type = LTTNG_EVENT_LOGLEVEL_RANGE;
2411 break;
2412 case LTTNG_UST_LOGLEVEL_SINGLE:
2413 tmp[i].loglevel_type = LTTNG_EVENT_LOGLEVEL_SINGLE;
2414 break;
2415 }
2416 if (uevent->filter) {
2417 tmp[i].filter = 1;
2418 }
2419 i++;
2420 }
2421
2422 ret = nb_event;
2423 *events = tmp;
2424
2425 error:
2426 rcu_read_unlock();
2427 return ret;
2428 }
2429
2430 /*
2431 * Fill lttng_event array of all kernel events in the channel.
2432 */
2433 static int list_lttng_kernel_events(char *channel_name,
2434 struct ltt_kernel_session *kernel_session, struct lttng_event **events)
2435 {
2436 int i = 0, ret;
2437 unsigned int nb_event;
2438 struct ltt_kernel_event *event;
2439 struct ltt_kernel_channel *kchan;
2440
2441 kchan = trace_kernel_get_channel_by_name(channel_name, kernel_session);
2442 if (kchan == NULL) {
2443 ret = LTTCOMM_KERN_CHAN_NOT_FOUND;
2444 goto error;
2445 }
2446
2447 nb_event = kchan->event_count;
2448
2449 DBG("Listing events for channel %s", kchan->channel->name);
2450
2451 if (nb_event == 0) {
2452 ret = nb_event;
2453 goto error;
2454 }
2455
2456 *events = zmalloc(nb_event * sizeof(struct lttng_event));
2457 if (*events == NULL) {
2458 ret = LTTCOMM_FATAL;
2459 goto error;
2460 }
2461
2462 /* Kernel channels */
2463 cds_list_for_each_entry(event, &kchan->events_list.head , list) {
2464 strncpy((*events)[i].name, event->event->name, LTTNG_SYMBOL_NAME_LEN);
2465 (*events)[i].name[LTTNG_SYMBOL_NAME_LEN - 1] = '\0';
2466 (*events)[i].enabled = event->enabled;
2467 switch (event->event->instrumentation) {
2468 case LTTNG_KERNEL_TRACEPOINT:
2469 (*events)[i].type = LTTNG_EVENT_TRACEPOINT;
2470 break;
2471 case LTTNG_KERNEL_KPROBE:
2472 case LTTNG_KERNEL_KRETPROBE:
2473 (*events)[i].type = LTTNG_EVENT_PROBE;
2474 memcpy(&(*events)[i].attr.probe, &event->event->u.kprobe,
2475 sizeof(struct lttng_kernel_kprobe));
2476 break;
2477 case LTTNG_KERNEL_FUNCTION:
2478 (*events)[i].type = LTTNG_EVENT_FUNCTION;
2479 memcpy(&((*events)[i].attr.ftrace), &event->event->u.ftrace,
2480 sizeof(struct lttng_kernel_function));
2481 break;
2482 case LTTNG_KERNEL_NOOP:
2483 (*events)[i].type = LTTNG_EVENT_NOOP;
2484 break;
2485 case LTTNG_KERNEL_SYSCALL:
2486 (*events)[i].type = LTTNG_EVENT_SYSCALL;
2487 break;
2488 case LTTNG_KERNEL_ALL:
2489 assert(0);
2490 break;
2491 }
2492 i++;
2493 }
2494
2495 return nb_event;
2496
2497 error:
2498 return ret;
2499 }
2500
2501 /*
2502 * Command LTTNG_DISABLE_CHANNEL processed by the client thread.
2503 */
2504 static int cmd_disable_channel(struct ltt_session *session,
2505 int domain, char *channel_name)
2506 {
2507 int ret;
2508 struct ltt_ust_session *usess;
2509
2510 usess = session->ust_session;
2511
2512 switch (domain) {
2513 case LTTNG_DOMAIN_KERNEL:
2514 {
2515 ret = channel_kernel_disable(session->kernel_session,
2516 channel_name);
2517 if (ret != LTTCOMM_OK) {
2518 goto error;
2519 }
2520
2521 kernel_wait_quiescent(kernel_tracer_fd);
2522 break;
2523 }
2524 case LTTNG_DOMAIN_UST:
2525 {
2526 struct ltt_ust_channel *uchan;
2527 struct lttng_ht *chan_ht;
2528
2529 chan_ht = usess->domain_global.channels;
2530
2531 uchan = trace_ust_find_channel_by_name(chan_ht, channel_name);
2532 if (uchan == NULL) {
2533 ret = LTTCOMM_UST_CHAN_NOT_FOUND;
2534 goto error;
2535 }
2536
2537 ret = channel_ust_disable(usess, domain, uchan);
2538 if (ret != LTTCOMM_OK) {
2539 goto error;
2540 }
2541 break;
2542 }
2543 #if 0
2544 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
2545 case LTTNG_DOMAIN_UST_EXEC_NAME:
2546 case LTTNG_DOMAIN_UST_PID:
2547 #endif
2548 default:
2549 ret = LTTCOMM_UNKNOWN_DOMAIN;
2550 goto error;
2551 }
2552
2553 ret = LTTCOMM_OK;
2554
2555 error:
2556 return ret;
2557 }
2558
2559 /*
2560 * Command LTTNG_ENABLE_CHANNEL processed by the client thread.
2561 */
2562 static int cmd_enable_channel(struct ltt_session *session,
2563 int domain, struct lttng_channel *attr)
2564 {
2565 int ret;
2566 struct ltt_ust_session *usess = session->ust_session;
2567 struct lttng_ht *chan_ht;
2568
2569 DBG("Enabling channel %s for session %s", attr->name, session->name);
2570
2571 switch (domain) {
2572 case LTTNG_DOMAIN_KERNEL:
2573 {
2574 struct ltt_kernel_channel *kchan;
2575
2576 kchan = trace_kernel_get_channel_by_name(attr->name,
2577 session->kernel_session);
2578 if (kchan == NULL) {
2579 ret = channel_kernel_create(session->kernel_session,
2580 attr, kernel_poll_pipe[1]);
2581 } else {
2582 ret = channel_kernel_enable(session->kernel_session, kchan);
2583 }
2584
2585 if (ret != LTTCOMM_OK) {
2586 goto error;
2587 }
2588
2589 kernel_wait_quiescent(kernel_tracer_fd);
2590 break;
2591 }
2592 case LTTNG_DOMAIN_UST:
2593 {
2594 struct ltt_ust_channel *uchan;
2595
2596 chan_ht = usess->domain_global.channels;
2597
2598 uchan = trace_ust_find_channel_by_name(chan_ht, attr->name);
2599 if (uchan == NULL) {
2600 ret = channel_ust_create(usess, domain, attr);
2601 } else {
2602 ret = channel_ust_enable(usess, domain, uchan);
2603 }
2604 break;
2605 }
2606 #if 0
2607 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
2608 case LTTNG_DOMAIN_UST_EXEC_NAME:
2609 case LTTNG_DOMAIN_UST_PID:
2610 #endif
2611 default:
2612 ret = LTTCOMM_UNKNOWN_DOMAIN;
2613 goto error;
2614 }
2615
2616 error:
2617 return ret;
2618 }
2619
2620 /*
2621 * Command LTTNG_DISABLE_EVENT processed by the client thread.
2622 */
2623 static int cmd_disable_event(struct ltt_session *session, int domain,
2624 char *channel_name, char *event_name)
2625 {
2626 int ret;
2627
2628 switch (domain) {
2629 case LTTNG_DOMAIN_KERNEL:
2630 {
2631 struct ltt_kernel_channel *kchan;
2632 struct ltt_kernel_session *ksess;
2633
2634 ksess = session->kernel_session;
2635
2636 kchan = trace_kernel_get_channel_by_name(channel_name, ksess);
2637 if (kchan == NULL) {
2638 ret = LTTCOMM_KERN_CHAN_NOT_FOUND;
2639 goto error;
2640 }
2641
2642 ret = event_kernel_disable_tracepoint(ksess, kchan, event_name);
2643 if (ret != LTTCOMM_OK) {
2644 goto error;
2645 }
2646
2647 kernel_wait_quiescent(kernel_tracer_fd);
2648 break;
2649 }
2650 case LTTNG_DOMAIN_UST:
2651 {
2652 struct ltt_ust_channel *uchan;
2653 struct ltt_ust_session *usess;
2654
2655 usess = session->ust_session;
2656
2657 uchan = trace_ust_find_channel_by_name(usess->domain_global.channels,
2658 channel_name);
2659 if (uchan == NULL) {
2660 ret = LTTCOMM_UST_CHAN_NOT_FOUND;
2661 goto error;
2662 }
2663
2664 ret = event_ust_disable_tracepoint(usess, domain, uchan, event_name);
2665 if (ret != LTTCOMM_OK) {
2666 goto error;
2667 }
2668
2669 DBG3("Disable UST event %s in channel %s completed", event_name,
2670 channel_name);
2671 break;
2672 }
2673 #if 0
2674 case LTTNG_DOMAIN_UST_EXEC_NAME:
2675 case LTTNG_DOMAIN_UST_PID:
2676 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
2677 #endif
2678 default:
2679 ret = LTTCOMM_UND;
2680 goto error;
2681 }
2682
2683 ret = LTTCOMM_OK;
2684
2685 error:
2686 return ret;
2687 }
2688
2689 /*
2690 * Command LTTNG_DISABLE_ALL_EVENT processed by the client thread.
2691 */
2692 static int cmd_disable_event_all(struct ltt_session *session, int domain,
2693 char *channel_name)
2694 {
2695 int ret;
2696
2697 switch (domain) {
2698 case LTTNG_DOMAIN_KERNEL:
2699 {
2700 struct ltt_kernel_session *ksess;
2701 struct ltt_kernel_channel *kchan;
2702
2703 ksess = session->kernel_session;
2704
2705 kchan = trace_kernel_get_channel_by_name(channel_name, ksess);
2706 if (kchan == NULL) {
2707 ret = LTTCOMM_KERN_CHAN_NOT_FOUND;
2708 goto error;
2709 }
2710
2711 ret = event_kernel_disable_all(ksess, kchan);
2712 if (ret != LTTCOMM_OK) {
2713 goto error;
2714 }
2715
2716 kernel_wait_quiescent(kernel_tracer_fd);
2717 break;
2718 }
2719 case LTTNG_DOMAIN_UST:
2720 {
2721 struct ltt_ust_session *usess;
2722 struct ltt_ust_channel *uchan;
2723
2724 usess = session->ust_session;
2725
2726 uchan = trace_ust_find_channel_by_name(usess->domain_global.channels,
2727 channel_name);
2728 if (uchan == NULL) {
2729 ret = LTTCOMM_UST_CHAN_NOT_FOUND;
2730 goto error;
2731 }
2732
2733 ret = event_ust_disable_all_tracepoints(usess, domain, uchan);
2734 if (ret != 0) {
2735 goto error;
2736 }
2737
2738 DBG3("Disable all UST events in channel %s completed", channel_name);
2739
2740 break;
2741 }
2742 #if 0
2743 case LTTNG_DOMAIN_UST_EXEC_NAME:
2744 case LTTNG_DOMAIN_UST_PID:
2745 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
2746 #endif
2747 default:
2748 ret = LTTCOMM_UND;
2749 goto error;
2750 }
2751
2752 ret = LTTCOMM_OK;
2753
2754 error:
2755 return ret;
2756 }
2757
2758 /*
2759 * Command LTTNG_ADD_CONTEXT processed by the client thread.
2760 */
2761 static int cmd_add_context(struct ltt_session *session, int domain,
2762 char *channel_name, char *event_name, struct lttng_event_context *ctx)
2763 {
2764 int ret;
2765
2766 switch (domain) {
2767 case LTTNG_DOMAIN_KERNEL:
2768 /* Add kernel context to kernel tracer */
2769 ret = context_kernel_add(session->kernel_session, ctx,
2770 event_name, channel_name);
2771 if (ret != LTTCOMM_OK) {
2772 goto error;
2773 }
2774 break;
2775 case LTTNG_DOMAIN_UST:
2776 {
2777 struct ltt_ust_session *usess = session->ust_session;
2778
2779 ret = context_ust_add(usess, domain, ctx, event_name, channel_name);
2780 if (ret != LTTCOMM_OK) {
2781 goto error;
2782 }
2783 break;
2784 }
2785 #if 0
2786 case LTTNG_DOMAIN_UST_EXEC_NAME:
2787 case LTTNG_DOMAIN_UST_PID:
2788 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
2789 #endif
2790 default:
2791 ret = LTTCOMM_UND;
2792 goto error;
2793 }
2794
2795 ret = LTTCOMM_OK;
2796
2797 error:
2798 return ret;
2799 }
2800
2801 /*
2802 * Command LTTNG_SET_FILTER processed by the client thread.
2803 */
2804 static int cmd_set_filter(struct ltt_session *session, int domain,
2805 char *channel_name, char *event_name,
2806 struct lttng_filter_bytecode *bytecode)
2807 {
2808 int ret;
2809
2810 switch (domain) {
2811 case LTTNG_DOMAIN_KERNEL:
2812 ret = LTTCOMM_FATAL;
2813 break;
2814 case LTTNG_DOMAIN_UST:
2815 {
2816 struct ltt_ust_session *usess = session->ust_session;
2817
2818 ret = filter_ust_set(usess, domain, bytecode, event_name, channel_name);
2819 if (ret != LTTCOMM_OK) {
2820 goto error;
2821 }
2822 break;
2823 }
2824 #if 0
2825 case LTTNG_DOMAIN_UST_EXEC_NAME:
2826 case LTTNG_DOMAIN_UST_PID:
2827 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
2828 #endif
2829 default:
2830 ret = LTTCOMM_UND;
2831 goto error;
2832 }
2833
2834 ret = LTTCOMM_OK;
2835
2836 error:
2837 return ret;
2838
2839 }
2840
2841 /*
2842 * Command LTTNG_ENABLE_EVENT processed by the client thread.
2843 */
2844 static int cmd_enable_event(struct ltt_session *session, int domain,
2845 char *channel_name, struct lttng_event *event)
2846 {
2847 int ret;
2848 struct lttng_channel *attr;
2849 struct ltt_ust_session *usess = session->ust_session;
2850
2851 switch (domain) {
2852 case LTTNG_DOMAIN_KERNEL:
2853 {
2854 struct ltt_kernel_channel *kchan;
2855
2856 kchan = trace_kernel_get_channel_by_name(channel_name,
2857 session->kernel_session);
2858 if (kchan == NULL) {
2859 attr = channel_new_default_attr(domain);
2860 if (attr == NULL) {
2861 ret = LTTCOMM_FATAL;
2862 goto error;
2863 }
2864 snprintf(attr->name, NAME_MAX, "%s", channel_name);
2865
2866 /* This call will notify the kernel thread */
2867 ret = channel_kernel_create(session->kernel_session,
2868 attr, kernel_poll_pipe[1]);
2869 if (ret != LTTCOMM_OK) {
2870 free(attr);
2871 goto error;
2872 }
2873 free(attr);
2874 }
2875
2876 /* Get the newly created kernel channel pointer */
2877 kchan = trace_kernel_get_channel_by_name(channel_name,
2878 session->kernel_session);
2879 if (kchan == NULL) {
2880 /* This sould not happen... */
2881 ret = LTTCOMM_FATAL;
2882 goto error;
2883 }
2884
2885 ret = event_kernel_enable_tracepoint(session->kernel_session, kchan,
2886 event);
2887 if (ret != LTTCOMM_OK) {
2888 goto error;
2889 }
2890
2891 kernel_wait_quiescent(kernel_tracer_fd);
2892 break;
2893 }
2894 case LTTNG_DOMAIN_UST:
2895 {
2896 struct lttng_channel *attr;
2897 struct ltt_ust_channel *uchan;
2898
2899 /* Get channel from global UST domain */
2900 uchan = trace_ust_find_channel_by_name(usess->domain_global.channels,
2901 channel_name);
2902 if (uchan == NULL) {
2903 /* Create default channel */
2904 attr = channel_new_default_attr(domain);
2905 if (attr == NULL) {
2906 ret = LTTCOMM_FATAL;
2907 goto error;
2908 }
2909 snprintf(attr->name, NAME_MAX, "%s", channel_name);
2910 attr->name[NAME_MAX - 1] = '\0';
2911
2912 ret = channel_ust_create(usess, domain, attr);
2913 if (ret != LTTCOMM_OK) {
2914 free(attr);
2915 goto error;
2916 }
2917 free(attr);
2918
2919 /* Get the newly created channel reference back */
2920 uchan = trace_ust_find_channel_by_name(
2921 usess->domain_global.channels, channel_name);
2922 if (uchan == NULL) {
2923 /* Something is really wrong */
2924 ret = LTTCOMM_FATAL;
2925 goto error;
2926 }
2927 }
2928
2929 /* At this point, the session and channel exist on the tracer */
2930 ret = event_ust_enable_tracepoint(usess, domain, uchan, event);
2931 if (ret != LTTCOMM_OK) {
2932 goto error;
2933 }
2934 break;
2935 }
2936 #if 0
2937 case LTTNG_DOMAIN_UST_EXEC_NAME:
2938 case LTTNG_DOMAIN_UST_PID:
2939 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
2940 #endif
2941 default:
2942 ret = LTTCOMM_UND;
2943 goto error;
2944 }
2945
2946 ret = LTTCOMM_OK;
2947
2948 error:
2949 return ret;
2950 }
2951
2952 /*
2953 * Command LTTNG_ENABLE_ALL_EVENT processed by the client thread.
2954 */
2955 static int cmd_enable_event_all(struct ltt_session *session, int domain,
2956 char *channel_name, int event_type)
2957 {
2958 int ret;
2959 struct ltt_kernel_channel *kchan;
2960
2961 switch (domain) {
2962 case LTTNG_DOMAIN_KERNEL:
2963 kchan = trace_kernel_get_channel_by_name(channel_name,
2964 session->kernel_session);
2965 if (kchan == NULL) {
2966 /* This call will notify the kernel thread */
2967 ret = channel_kernel_create(session->kernel_session, NULL,
2968 kernel_poll_pipe[1]);
2969 if (ret != LTTCOMM_OK) {
2970 goto error;
2971 }
2972
2973 /* Get the newly created kernel channel pointer */
2974 kchan = trace_kernel_get_channel_by_name(channel_name,
2975 session->kernel_session);
2976 if (kchan == NULL) {
2977 /* This sould not happen... */
2978 ret = LTTCOMM_FATAL;
2979 goto error;
2980 }
2981
2982 }
2983
2984 switch (event_type) {
2985 case LTTNG_EVENT_SYSCALL:
2986 ret = event_kernel_enable_all_syscalls(session->kernel_session,
2987 kchan, kernel_tracer_fd);
2988 break;
2989 case LTTNG_EVENT_TRACEPOINT:
2990 /*
2991 * This call enables all LTTNG_KERNEL_TRACEPOINTS and
2992 * events already registered to the channel.
2993 */
2994 ret = event_kernel_enable_all_tracepoints(session->kernel_session,
2995 kchan, kernel_tracer_fd);
2996 break;
2997 case LTTNG_EVENT_ALL:
2998 /* Enable syscalls and tracepoints */
2999 ret = event_kernel_enable_all(session->kernel_session,
3000 kchan, kernel_tracer_fd);
3001 break;
3002 default:
3003 ret = LTTCOMM_KERN_ENABLE_FAIL;
3004 goto error;
3005 }
3006
3007 /* Manage return value */
3008 if (ret != LTTCOMM_OK) {
3009 goto error;
3010 }
3011
3012 kernel_wait_quiescent(kernel_tracer_fd);
3013 break;
3014 case LTTNG_DOMAIN_UST:
3015 {
3016 struct lttng_channel *attr;
3017 struct ltt_ust_channel *uchan;
3018 struct ltt_ust_session *usess = session->ust_session;
3019
3020 /* Get channel from global UST domain */
3021 uchan = trace_ust_find_channel_by_name(usess->domain_global.channels,
3022 channel_name);
3023 if (uchan == NULL) {
3024 /* Create default channel */
3025 attr = channel_new_default_attr(domain);
3026 if (attr == NULL) {
3027 ret = LTTCOMM_FATAL;
3028 goto error;
3029 }
3030 snprintf(attr->name, NAME_MAX, "%s", channel_name);
3031 attr->name[NAME_MAX - 1] = '\0';
3032
3033 /* Use the internal command enable channel */
3034 ret = channel_ust_create(usess, domain, attr);
3035 if (ret != LTTCOMM_OK) {
3036 free(attr);
3037 goto error;
3038 }
3039 free(attr);
3040
3041 /* Get the newly created channel reference back */
3042 uchan = trace_ust_find_channel_by_name(
3043 usess->domain_global.channels, channel_name);
3044 if (uchan == NULL) {
3045 /* Something is really wrong */
3046 ret = LTTCOMM_FATAL;
3047 goto error;
3048 }
3049 }
3050
3051 /* At this point, the session and channel exist on the tracer */
3052
3053 switch (event_type) {
3054 case LTTNG_EVENT_ALL:
3055 case LTTNG_EVENT_TRACEPOINT:
3056 ret = event_ust_enable_all_tracepoints(usess, domain, uchan);
3057 if (ret != LTTCOMM_OK) {
3058 goto error;
3059 }
3060 break;
3061 default:
3062 ret = LTTCOMM_UST_ENABLE_FAIL;
3063 goto error;
3064 }
3065
3066 /* Manage return value */
3067 if (ret != LTTCOMM_OK) {
3068 goto error;
3069 }
3070
3071 break;
3072 }
3073 #if 0
3074 case LTTNG_DOMAIN_UST_EXEC_NAME:
3075 case LTTNG_DOMAIN_UST_PID:
3076 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
3077 #endif
3078 default:
3079 ret = LTTCOMM_UND;
3080 goto error;
3081 }
3082
3083 ret = LTTCOMM_OK;
3084
3085 error:
3086 return ret;
3087 }
3088
3089 /*
3090 * Command LTTNG_LIST_TRACEPOINTS processed by the client thread.
3091 */
3092 static ssize_t cmd_list_tracepoints(int domain, struct lttng_event **events)
3093 {
3094 int ret;
3095 ssize_t nb_events = 0;
3096
3097 switch (domain) {
3098 case LTTNG_DOMAIN_KERNEL:
3099 nb_events = kernel_list_events(kernel_tracer_fd, events);
3100 if (nb_events < 0) {
3101 ret = LTTCOMM_KERN_LIST_FAIL;
3102 goto error;
3103 }
3104 break;
3105 case LTTNG_DOMAIN_UST:
3106 nb_events = ust_app_list_events(events);
3107 if (nb_events < 0) {
3108 ret = LTTCOMM_UST_LIST_FAIL;
3109 goto error;
3110 }
3111 break;
3112 default:
3113 ret = LTTCOMM_UND;
3114 goto error;
3115 }
3116
3117 return nb_events;
3118
3119 error:
3120 /* Return negative value to differentiate return code */
3121 return -ret;
3122 }
3123
3124 /*
3125 * Command LTTNG_LIST_TRACEPOINT_FIELDS processed by the client thread.
3126 */
3127 static ssize_t cmd_list_tracepoint_fields(int domain,
3128 struct lttng_event_field **fields)
3129 {
3130 int ret;
3131 ssize_t nb_fields = 0;
3132
3133 switch (domain) {
3134 case LTTNG_DOMAIN_UST:
3135 nb_fields = ust_app_list_event_fields(fields);
3136 if (nb_fields < 0) {
3137 ret = LTTCOMM_UST_LIST_FAIL;
3138 goto error;
3139 }
3140 break;
3141 case LTTNG_DOMAIN_KERNEL:
3142 default: /* fall-through */
3143 ret = LTTCOMM_UND;
3144 goto error;
3145 }
3146
3147 return nb_fields;
3148
3149 error:
3150 /* Return negative value to differentiate return code */
3151 return -ret;
3152 }
3153
3154 /*
3155 * Command LTTNG_START_TRACE processed by the client thread.
3156 */
3157 static int cmd_start_trace(struct ltt_session *session)
3158 {
3159 int ret;
3160 struct ltt_kernel_session *ksession;
3161 struct ltt_ust_session *usess;
3162 struct ltt_kernel_channel *kchan;
3163
3164 /* Ease our life a bit ;) */
3165 ksession = session->kernel_session;
3166 usess = session->ust_session;
3167
3168 if (session->enabled) {
3169 /* Already started. */
3170 ret = LTTCOMM_TRACE_ALREADY_STARTED;
3171 goto error;
3172 }
3173
3174 session->enabled = 1;
3175
3176 ret = setup_relayd(session);
3177 if (ret != LTTCOMM_OK) {
3178 ERR("Error setting up relayd for session %s", session->name);
3179 goto error;
3180 }
3181
3182 /* Kernel tracing */
3183 if (ksession != NULL) {
3184 /* Open kernel metadata */
3185 if (ksession->metadata == NULL) {
3186 ret = kernel_open_metadata(ksession,
3187 ksession->consumer->dst.trace_path);
3188 if (ret < 0) {
3189 ret = LTTCOMM_KERN_META_FAIL;
3190 goto error;
3191 }
3192 }
3193
3194 /* Open kernel metadata stream */
3195 if (ksession->metadata_stream_fd < 0) {
3196 ret = kernel_open_metadata_stream(ksession);
3197 if (ret < 0) {
3198 ERR("Kernel create metadata stream failed");
3199 ret = LTTCOMM_KERN_STREAM_FAIL;
3200 goto error;
3201 }
3202 }
3203
3204 /* For each channel */
3205 cds_list_for_each_entry(kchan, &ksession->channel_list.head, list) {
3206 if (kchan->stream_count == 0) {
3207 ret = kernel_open_channel_stream(kchan);
3208 if (ret < 0) {
3209 ret = LTTCOMM_KERN_STREAM_FAIL;
3210 goto error;
3211 }
3212 /* Update the stream global counter */
3213 ksession->stream_count_global += ret;
3214 }
3215 }
3216
3217 /* Setup kernel consumer socket and send fds to it */
3218 ret = init_kernel_tracing(ksession);
3219 if (ret < 0) {
3220 ret = LTTCOMM_KERN_START_FAIL;
3221 goto error;
3222 }
3223
3224 /* This start the kernel tracing */
3225 ret = kernel_start_session(ksession);
3226 if (ret < 0) {
3227 ret = LTTCOMM_KERN_START_FAIL;
3228 goto error;
3229 }
3230
3231 /* Quiescent wait after starting trace */
3232 kernel_wait_quiescent(kernel_tracer_fd);
3233 }
3234
3235 /* Flag session that trace should start automatically */
3236 if (usess) {
3237 usess->start_trace = 1;
3238
3239 ret = ust_app_start_trace_all(usess);
3240 if (ret < 0) {
3241 ret = LTTCOMM_UST_START_FAIL;
3242 goto error;
3243 }
3244 }
3245
3246 ret = LTTCOMM_OK;
3247
3248 error:
3249 return ret;
3250 }
3251
3252 /*
3253 * Command LTTNG_STOP_TRACE processed by the client thread.
3254 */
3255 static int cmd_stop_trace(struct ltt_session *session)
3256 {
3257 int ret;
3258 struct ltt_kernel_channel *kchan;
3259 struct ltt_kernel_session *ksession;
3260 struct ltt_ust_session *usess;
3261
3262 /* Short cut */
3263 ksession = session->kernel_session;
3264 usess = session->ust_session;
3265
3266 if (!session->enabled) {
3267 ret = LTTCOMM_TRACE_ALREADY_STOPPED;
3268 goto error;
3269 }
3270
3271 session->enabled = 0;
3272
3273 /* Kernel tracer */
3274 if (ksession != NULL) {
3275 DBG("Stop kernel tracing");
3276
3277 /* Flush metadata if exist */
3278 if (ksession->metadata_stream_fd >= 0) {