Implement filter bytecode support in lttng-session, and parse filter string
[lttng-tools.git] / src / bin / lttng-sessiond / main.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19 #define _GNU_SOURCE
20 #include <getopt.h>
21 #include <grp.h>
22 #include <limits.h>
23 #include <pthread.h>
24 #include <semaphore.h>
25 #include <signal.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <string.h>
29 #include <sys/mman.h>
30 #include <sys/mount.h>
31 #include <sys/resource.h>
32 #include <sys/socket.h>
33 #include <sys/stat.h>
34 #include <sys/types.h>
35 #include <sys/wait.h>
36 #include <urcu/uatomic.h>
37 #include <unistd.h>
38 #include <config.h>
39
40 #include <common/common.h>
41 #include <common/compat/poll.h>
42 #include <common/compat/socket.h>
43 #include <common/defaults.h>
44 #include <common/kernel-consumer/kernel-consumer.h>
45 #include <common/futex.h>
46 #include <common/relayd/relayd.h>
47
48 #include "lttng-sessiond.h"
49 #include "channel.h"
50 #include "consumer.h"
51 #include "context.h"
52 #include "event.h"
53 #include "kernel.h"
54 #include "kernel-consumer.h"
55 #include "modprobe.h"
56 #include "shm.h"
57 #include "ust-ctl.h"
58 #include "ust-consumer.h"
59 #include "utils.h"
60 #include "fd-limit.h"
61 #include "filter.h"
62
63 #define CONSUMERD_FILE "lttng-consumerd"
64
65 /* Const values */
66 const char default_home_dir[] = DEFAULT_HOME_DIR;
67 const char default_tracing_group[] = DEFAULT_TRACING_GROUP;
68 const char default_ust_sock_dir[] = DEFAULT_UST_SOCK_DIR;
69 const char default_global_apps_pipe[] = DEFAULT_GLOBAL_APPS_PIPE;
70
71 const char *progname;
72 const char *opt_tracing_group;
73 static int opt_sig_parent;
74 static int opt_verbose_consumer;
75 static int opt_daemon;
76 static int opt_no_kernel;
77 static int is_root; /* Set to 1 if the daemon is running as root */
78 static pid_t ppid; /* Parent PID for --sig-parent option */
79 static char *rundir;
80
81 /* Consumer daemon specific control data */
82 static struct consumer_data kconsumer_data = {
83 .type = LTTNG_CONSUMER_KERNEL,
84 .err_unix_sock_path = DEFAULT_KCONSUMERD_ERR_SOCK_PATH,
85 .cmd_unix_sock_path = DEFAULT_KCONSUMERD_CMD_SOCK_PATH,
86 .err_sock = -1,
87 .cmd_sock = -1,
88 };
89 static struct consumer_data ustconsumer64_data = {
90 .type = LTTNG_CONSUMER64_UST,
91 .err_unix_sock_path = DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH,
92 .cmd_unix_sock_path = DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH,
93 .err_sock = -1,
94 .cmd_sock = -1,
95 };
96 static struct consumer_data ustconsumer32_data = {
97 .type = LTTNG_CONSUMER32_UST,
98 .err_unix_sock_path = DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH,
99 .cmd_unix_sock_path = DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH,
100 .err_sock = -1,
101 .cmd_sock = -1,
102 };
103
104 static int dispatch_thread_exit;
105
106 /* Global application Unix socket path */
107 static char apps_unix_sock_path[PATH_MAX];
108 /* Global client Unix socket path */
109 static char client_unix_sock_path[PATH_MAX];
110 /* global wait shm path for UST */
111 static char wait_shm_path[PATH_MAX];
112
113 /* Sockets and FDs */
114 static int client_sock = -1;
115 static int apps_sock = -1;
116 static int kernel_tracer_fd = -1;
117 static int kernel_poll_pipe[2] = { -1, -1 };
118
119 /*
120 * Quit pipe for all threads. This permits a single cancellation point
121 * for all threads when receiving an event on the pipe.
122 */
123 static int thread_quit_pipe[2] = { -1, -1 };
124
125 /*
126 * This pipe is used to inform the thread managing application communication
127 * that a command is queued and ready to be processed.
128 */
129 static int apps_cmd_pipe[2] = { -1, -1 };
130
131 /* Pthread, Mutexes and Semaphores */
132 static pthread_t apps_thread;
133 static pthread_t reg_apps_thread;
134 static pthread_t client_thread;
135 static pthread_t kernel_thread;
136 static pthread_t dispatch_thread;
137
138 /*
139 * UST registration command queue. This queue is tied with a futex and uses a N
140 * wakers / 1 waiter implemented and detailed in futex.c/.h
141 *
142 * The thread_manage_apps and thread_dispatch_ust_registration interact with
143 * this queue and the wait/wake scheme.
144 */
145 static struct ust_cmd_queue ust_cmd_queue;
146
147 /*
148 * Pointer initialized before thread creation.
149 *
150 * This points to the tracing session list containing the session count and a
151 * mutex lock. The lock MUST be taken if you iterate over the list. The lock
152 * MUST NOT be taken if you call a public function in session.c.
153 *
154 * The lock is nested inside the structure: session_list_ptr->lock. Please use
155 * session_lock_list and session_unlock_list for lock acquisition.
156 */
157 static struct ltt_session_list *session_list_ptr;
158
159 int ust_consumerd64_fd = -1;
160 int ust_consumerd32_fd = -1;
161
162 static const char *consumerd32_bin = CONFIG_CONSUMERD32_BIN;
163 static const char *consumerd64_bin = CONFIG_CONSUMERD64_BIN;
164 static const char *consumerd32_libdir = CONFIG_CONSUMERD32_LIBDIR;
165 static const char *consumerd64_libdir = CONFIG_CONSUMERD64_LIBDIR;
166
167 /*
168 * Consumer daemon state which is changed when spawning it, killing it or in
169 * case of a fatal error.
170 */
171 enum consumerd_state {
172 CONSUMER_STARTED = 1,
173 CONSUMER_STOPPED = 2,
174 CONSUMER_ERROR = 3,
175 };
176
177 /*
178 * This consumer daemon state is used to validate if a client command will be
179 * able to reach the consumer. If not, the client is informed. For instance,
180 * doing a "lttng start" when the consumer state is set to ERROR will return an
181 * error to the client.
182 *
183 * The following example shows a possible race condition of this scheme:
184 *
185 * consumer thread error happens
186 * client cmd arrives
187 * client cmd checks state -> still OK
188 * consumer thread exit, sets error
189 * client cmd try to talk to consumer
190 * ...
191 *
192 * However, since the consumer is a different daemon, we have no way of making
193 * sure the command will reach it safely even with this state flag. This is why
194 * we consider that up to the state validation during command processing, the
195 * command is safe. After that, we can not guarantee the correctness of the
196 * client request vis-a-vis the consumer.
197 */
198 static enum consumerd_state ust_consumerd_state;
199 static enum consumerd_state kernel_consumerd_state;
200
201 /*
202 * Used to keep a unique index for each relayd socket created where this value
203 * is associated with streams on the consumer so it can match the right relayd
204 * to send to.
205 *
206 * This value should be incremented atomically for safety purposes and future
207 * possible concurrent access.
208 */
209 static unsigned int relayd_net_seq_idx;
210
211 static
212 void setup_consumerd_path(void)
213 {
214 const char *bin, *libdir;
215
216 /*
217 * Allow INSTALL_BIN_PATH to be used as a target path for the
218 * native architecture size consumer if CONFIG_CONSUMER*_PATH
219 * has not been defined.
220 */
221 #if (CAA_BITS_PER_LONG == 32)
222 if (!consumerd32_bin[0]) {
223 consumerd32_bin = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
224 }
225 if (!consumerd32_libdir[0]) {
226 consumerd32_libdir = INSTALL_LIB_PATH;
227 }
228 #elif (CAA_BITS_PER_LONG == 64)
229 if (!consumerd64_bin[0]) {
230 consumerd64_bin = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
231 }
232 if (!consumerd64_libdir[0]) {
233 consumerd64_libdir = INSTALL_LIB_PATH;
234 }
235 #else
236 #error "Unknown bitness"
237 #endif
238
239 /*
240 * runtime env. var. overrides the build default.
241 */
242 bin = getenv("LTTNG_CONSUMERD32_BIN");
243 if (bin) {
244 consumerd32_bin = bin;
245 }
246 bin = getenv("LTTNG_CONSUMERD64_BIN");
247 if (bin) {
248 consumerd64_bin = bin;
249 }
250 libdir = getenv("LTTNG_CONSUMERD32_LIBDIR");
251 if (libdir) {
252 consumerd32_libdir = libdir;
253 }
254 libdir = getenv("LTTNG_CONSUMERD64_LIBDIR");
255 if (libdir) {
256 consumerd64_libdir = libdir;
257 }
258 }
259
260 /*
261 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
262 */
263 static int create_thread_poll_set(struct lttng_poll_event *events,
264 unsigned int size)
265 {
266 int ret;
267
268 if (events == NULL || size == 0) {
269 ret = -1;
270 goto error;
271 }
272
273 ret = lttng_poll_create(events, size, LTTNG_CLOEXEC);
274 if (ret < 0) {
275 goto error;
276 }
277
278 /* Add quit pipe */
279 ret = lttng_poll_add(events, thread_quit_pipe[0], LPOLLIN);
280 if (ret < 0) {
281 goto error;
282 }
283
284 return 0;
285
286 error:
287 return ret;
288 }
289
290 /*
291 * Check if the thread quit pipe was triggered.
292 *
293 * Return 1 if it was triggered else 0;
294 */
295 static int check_thread_quit_pipe(int fd, uint32_t events)
296 {
297 if (fd == thread_quit_pipe[0] && (events & LPOLLIN)) {
298 return 1;
299 }
300
301 return 0;
302 }
303
304 /*
305 * Return group ID of the tracing group or -1 if not found.
306 */
307 static gid_t allowed_group(void)
308 {
309 struct group *grp;
310
311 if (opt_tracing_group) {
312 grp = getgrnam(opt_tracing_group);
313 } else {
314 grp = getgrnam(default_tracing_group);
315 }
316 if (!grp) {
317 return -1;
318 } else {
319 return grp->gr_gid;
320 }
321 }
322
323 /*
324 * Init thread quit pipe.
325 *
326 * Return -1 on error or 0 if all pipes are created.
327 */
328 static int init_thread_quit_pipe(void)
329 {
330 int ret, i;
331
332 ret = pipe(thread_quit_pipe);
333 if (ret < 0) {
334 PERROR("thread quit pipe");
335 goto error;
336 }
337
338 for (i = 0; i < 2; i++) {
339 ret = fcntl(thread_quit_pipe[i], F_SETFD, FD_CLOEXEC);
340 if (ret < 0) {
341 PERROR("fcntl");
342 goto error;
343 }
344 }
345
346 error:
347 return ret;
348 }
349
350 /*
351 * Complete teardown of a kernel session. This free all data structure related
352 * to a kernel session and update counter.
353 */
354 static void teardown_kernel_session(struct ltt_session *session)
355 {
356 if (!session->kernel_session) {
357 DBG3("No kernel session when tearing down session");
358 return;
359 }
360
361 DBG("Tearing down kernel session");
362
363 /*
364 * If a custom kernel consumer was registered, close the socket before
365 * tearing down the complete kernel session structure
366 */
367 if (kconsumer_data.cmd_sock >= 0 &&
368 session->kernel_session->consumer_fd != kconsumer_data.cmd_sock) {
369 lttcomm_close_unix_sock(session->kernel_session->consumer_fd);
370 }
371
372 trace_kernel_destroy_session(session->kernel_session);
373 }
374
375 /*
376 * Complete teardown of all UST sessions. This will free everything on his path
377 * and destroy the core essence of all ust sessions :)
378 */
379 static void teardown_ust_session(struct ltt_session *session)
380 {
381 int ret;
382
383 if (!session->ust_session) {
384 DBG3("No UST session when tearing down session");
385 return;
386 }
387
388 DBG("Tearing down UST session(s)");
389
390 ret = ust_app_destroy_trace_all(session->ust_session);
391 if (ret) {
392 ERR("Error in ust_app_destroy_trace_all");
393 }
394
395 trace_ust_destroy_session(session->ust_session);
396 }
397
398 /*
399 * Stop all threads by closing the thread quit pipe.
400 */
401 static void stop_threads(void)
402 {
403 int ret;
404
405 /* Stopping all threads */
406 DBG("Terminating all threads");
407 ret = notify_thread_pipe(thread_quit_pipe[1]);
408 if (ret < 0) {
409 ERR("write error on thread quit pipe");
410 }
411
412 /* Dispatch thread */
413 dispatch_thread_exit = 1;
414 futex_nto1_wake(&ust_cmd_queue.futex);
415 }
416
417 /*
418 * Cleanup the daemon
419 */
420 static void cleanup(void)
421 {
422 int ret;
423 char *cmd;
424 struct ltt_session *sess, *stmp;
425
426 DBG("Cleaning up");
427
428 DBG("Removing %s directory", rundir);
429 ret = asprintf(&cmd, "rm -rf %s", rundir);
430 if (ret < 0) {
431 ERR("asprintf failed. Something is really wrong!");
432 }
433
434 /* Remove lttng run directory */
435 ret = system(cmd);
436 if (ret < 0) {
437 ERR("Unable to clean %s", rundir);
438 }
439 free(cmd);
440
441 DBG("Cleaning up all sessions");
442
443 /* Destroy session list mutex */
444 if (session_list_ptr != NULL) {
445 pthread_mutex_destroy(&session_list_ptr->lock);
446
447 /* Cleanup ALL session */
448 cds_list_for_each_entry_safe(sess, stmp,
449 &session_list_ptr->head, list) {
450 teardown_kernel_session(sess);
451 teardown_ust_session(sess);
452 free(sess);
453 }
454 }
455
456 DBG("Closing all UST sockets");
457 ust_app_clean_list();
458
459 pthread_mutex_destroy(&kconsumer_data.pid_mutex);
460
461 if (is_root && !opt_no_kernel) {
462 DBG2("Closing kernel fd");
463 if (kernel_tracer_fd >= 0) {
464 ret = close(kernel_tracer_fd);
465 if (ret) {
466 PERROR("close");
467 }
468 }
469 DBG("Unloading kernel modules");
470 modprobe_remove_lttng_all();
471 }
472 utils_close_pipe(kernel_poll_pipe);
473 utils_close_pipe(thread_quit_pipe);
474 utils_close_pipe(apps_cmd_pipe);
475
476 /* <fun> */
477 DBG("%c[%d;%dm*** assert failed :-) *** ==> %c[%dm%c[%d;%dm"
478 "Matthew, BEET driven development works!%c[%dm",
479 27, 1, 31, 27, 0, 27, 1, 33, 27, 0);
480 /* </fun> */
481 }
482
483 /*
484 * Send data on a unix socket using the liblttsessiondcomm API.
485 *
486 * Return lttcomm error code.
487 */
488 static int send_unix_sock(int sock, void *buf, size_t len)
489 {
490 /* Check valid length */
491 if (len <= 0) {
492 return -1;
493 }
494
495 return lttcomm_send_unix_sock(sock, buf, len);
496 }
497
498 /*
499 * Free memory of a command context structure.
500 */
501 static void clean_command_ctx(struct command_ctx **cmd_ctx)
502 {
503 DBG("Clean command context structure");
504 if (*cmd_ctx) {
505 if ((*cmd_ctx)->llm) {
506 free((*cmd_ctx)->llm);
507 }
508 if ((*cmd_ctx)->lsm) {
509 free((*cmd_ctx)->lsm);
510 }
511 free(*cmd_ctx);
512 *cmd_ctx = NULL;
513 }
514 }
515
516 /*
517 * Notify UST applications using the shm mmap futex.
518 */
519 static int notify_ust_apps(int active)
520 {
521 char *wait_shm_mmap;
522
523 DBG("Notifying applications of session daemon state: %d", active);
524
525 /* See shm.c for this call implying mmap, shm and futex calls */
526 wait_shm_mmap = shm_ust_get_mmap(wait_shm_path, is_root);
527 if (wait_shm_mmap == NULL) {
528 goto error;
529 }
530
531 /* Wake waiting process */
532 futex_wait_update((int32_t *) wait_shm_mmap, active);
533
534 /* Apps notified successfully */
535 return 0;
536
537 error:
538 return -1;
539 }
540
541 /*
542 * Setup the outgoing data buffer for the response (llm) by allocating the
543 * right amount of memory and copying the original information from the lsm
544 * structure.
545 *
546 * Return total size of the buffer pointed by buf.
547 */
548 static int setup_lttng_msg(struct command_ctx *cmd_ctx, size_t size)
549 {
550 int ret, buf_size;
551
552 buf_size = size;
553
554 cmd_ctx->llm = zmalloc(sizeof(struct lttcomm_lttng_msg) + buf_size);
555 if (cmd_ctx->llm == NULL) {
556 PERROR("zmalloc");
557 ret = -ENOMEM;
558 goto error;
559 }
560
561 /* Copy common data */
562 cmd_ctx->llm->cmd_type = cmd_ctx->lsm->cmd_type;
563 cmd_ctx->llm->pid = cmd_ctx->lsm->domain.attr.pid;
564
565 cmd_ctx->llm->data_size = size;
566 cmd_ctx->lttng_msg_size = sizeof(struct lttcomm_lttng_msg) + buf_size;
567
568 return buf_size;
569
570 error:
571 return ret;
572 }
573
574 /*
575 * Update the kernel poll set of all channel fd available over all tracing
576 * session. Add the wakeup pipe at the end of the set.
577 */
578 static int update_kernel_poll(struct lttng_poll_event *events)
579 {
580 int ret;
581 struct ltt_session *session;
582 struct ltt_kernel_channel *channel;
583
584 DBG("Updating kernel poll set");
585
586 session_lock_list();
587 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
588 session_lock(session);
589 if (session->kernel_session == NULL) {
590 session_unlock(session);
591 continue;
592 }
593
594 cds_list_for_each_entry(channel,
595 &session->kernel_session->channel_list.head, list) {
596 /* Add channel fd to the kernel poll set */
597 ret = lttng_poll_add(events, channel->fd, LPOLLIN | LPOLLRDNORM);
598 if (ret < 0) {
599 session_unlock(session);
600 goto error;
601 }
602 DBG("Channel fd %d added to kernel set", channel->fd);
603 }
604 session_unlock(session);
605 }
606 session_unlock_list();
607
608 return 0;
609
610 error:
611 session_unlock_list();
612 return -1;
613 }
614
615 /*
616 * Find the channel fd from 'fd' over all tracing session. When found, check
617 * for new channel stream and send those stream fds to the kernel consumer.
618 *
619 * Useful for CPU hotplug feature.
620 */
621 static int update_kernel_stream(struct consumer_data *consumer_data, int fd)
622 {
623 int ret = 0;
624 struct ltt_session *session;
625 struct ltt_kernel_channel *channel;
626
627 DBG("Updating kernel streams for channel fd %d", fd);
628
629 session_lock_list();
630 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
631 session_lock(session);
632 if (session->kernel_session == NULL) {
633 session_unlock(session);
634 continue;
635 }
636
637 /* This is not suppose to be -1 but this is an extra security check */
638 if (session->kernel_session->consumer_fd < 0) {
639 session->kernel_session->consumer_fd = consumer_data->cmd_sock;
640 }
641
642 cds_list_for_each_entry(channel,
643 &session->kernel_session->channel_list.head, list) {
644 if (channel->fd == fd) {
645 DBG("Channel found, updating kernel streams");
646 ret = kernel_open_channel_stream(channel);
647 if (ret < 0) {
648 goto error;
649 }
650
651 /*
652 * Have we already sent fds to the consumer? If yes, it means
653 * that tracing is started so it is safe to send our updated
654 * stream fds.
655 */
656 if (session->kernel_session->consumer_fds_sent == 1 &&
657 session->kernel_session->consumer != NULL) {
658 ret = kernel_consumer_send_channel_stream(
659 session->kernel_session->consumer_fd, channel,
660 session->kernel_session);
661 if (ret < 0) {
662 goto error;
663 }
664 }
665 goto error;
666 }
667 }
668 session_unlock(session);
669 }
670 session_unlock_list();
671 return ret;
672
673 error:
674 session_unlock(session);
675 session_unlock_list();
676 return ret;
677 }
678
679 /*
680 * For each tracing session, update newly registered apps.
681 */
682 static void update_ust_app(int app_sock)
683 {
684 struct ltt_session *sess, *stmp;
685
686 session_lock_list();
687
688 /* For all tracing session(s) */
689 cds_list_for_each_entry_safe(sess, stmp, &session_list_ptr->head, list) {
690 session_lock(sess);
691 if (sess->ust_session) {
692 ust_app_global_update(sess->ust_session, app_sock);
693 }
694 session_unlock(sess);
695 }
696
697 session_unlock_list();
698 }
699
700 /*
701 * This thread manage event coming from the kernel.
702 *
703 * Features supported in this thread:
704 * -) CPU Hotplug
705 */
706 static void *thread_manage_kernel(void *data)
707 {
708 int ret, i, pollfd, update_poll_flag = 1;
709 uint32_t revents, nb_fd;
710 char tmp;
711 struct lttng_poll_event events;
712
713 DBG("Thread manage kernel started");
714
715 ret = create_thread_poll_set(&events, 2);
716 if (ret < 0) {
717 goto error_poll_create;
718 }
719
720 ret = lttng_poll_add(&events, kernel_poll_pipe[0], LPOLLIN);
721 if (ret < 0) {
722 goto error;
723 }
724
725 while (1) {
726 if (update_poll_flag == 1) {
727 /*
728 * Reset number of fd in the poll set. Always 2 since there is the thread
729 * quit pipe and the kernel pipe.
730 */
731 events.nb_fd = 2;
732
733 ret = update_kernel_poll(&events);
734 if (ret < 0) {
735 goto error;
736 }
737 update_poll_flag = 0;
738 }
739
740 nb_fd = LTTNG_POLL_GETNB(&events);
741
742 DBG("Thread kernel polling on %d fds", nb_fd);
743
744 /* Zeroed the poll events */
745 lttng_poll_reset(&events);
746
747 /* Poll infinite value of time */
748 restart:
749 ret = lttng_poll_wait(&events, -1);
750 if (ret < 0) {
751 /*
752 * Restart interrupted system call.
753 */
754 if (errno == EINTR) {
755 goto restart;
756 }
757 goto error;
758 } else if (ret == 0) {
759 /* Should not happen since timeout is infinite */
760 ERR("Return value of poll is 0 with an infinite timeout.\n"
761 "This should not have happened! Continuing...");
762 continue;
763 }
764
765 for (i = 0; i < nb_fd; i++) {
766 /* Fetch once the poll data */
767 revents = LTTNG_POLL_GETEV(&events, i);
768 pollfd = LTTNG_POLL_GETFD(&events, i);
769
770 /* Thread quit pipe has been closed. Killing thread. */
771 ret = check_thread_quit_pipe(pollfd, revents);
772 if (ret) {
773 goto error;
774 }
775
776 /* Check for data on kernel pipe */
777 if (pollfd == kernel_poll_pipe[0] && (revents & LPOLLIN)) {
778 ret = read(kernel_poll_pipe[0], &tmp, 1);
779 update_poll_flag = 1;
780 continue;
781 } else {
782 /*
783 * New CPU detected by the kernel. Adding kernel stream to
784 * kernel session and updating the kernel consumer
785 */
786 if (revents & LPOLLIN) {
787 ret = update_kernel_stream(&kconsumer_data, pollfd);
788 if (ret < 0) {
789 continue;
790 }
791 break;
792 /*
793 * TODO: We might want to handle the LPOLLERR | LPOLLHUP
794 * and unregister kernel stream at this point.
795 */
796 }
797 }
798 }
799 }
800
801 error:
802 lttng_poll_clean(&events);
803 error_poll_create:
804 DBG("Kernel thread dying");
805 return NULL;
806 }
807
808 /*
809 * This thread manage the consumer error sent back to the session daemon.
810 */
811 static void *thread_manage_consumer(void *data)
812 {
813 int sock = -1, i, ret, pollfd;
814 uint32_t revents, nb_fd;
815 enum lttcomm_return_code code;
816 struct lttng_poll_event events;
817 struct consumer_data *consumer_data = data;
818
819 DBG("[thread] Manage consumer started");
820
821 ret = lttcomm_listen_unix_sock(consumer_data->err_sock);
822 if (ret < 0) {
823 goto error_listen;
824 }
825
826 /*
827 * Pass 2 as size here for the thread quit pipe and kconsumerd_err_sock.
828 * Nothing more will be added to this poll set.
829 */
830 ret = create_thread_poll_set(&events, 2);
831 if (ret < 0) {
832 goto error_poll;
833 }
834
835 ret = lttng_poll_add(&events, consumer_data->err_sock, LPOLLIN | LPOLLRDHUP);
836 if (ret < 0) {
837 goto error;
838 }
839
840 nb_fd = LTTNG_POLL_GETNB(&events);
841
842 /* Inifinite blocking call, waiting for transmission */
843 restart:
844 ret = lttng_poll_wait(&events, -1);
845 if (ret < 0) {
846 /*
847 * Restart interrupted system call.
848 */
849 if (errno == EINTR) {
850 goto restart;
851 }
852 goto error;
853 }
854
855 for (i = 0; i < nb_fd; i++) {
856 /* Fetch once the poll data */
857 revents = LTTNG_POLL_GETEV(&events, i);
858 pollfd = LTTNG_POLL_GETFD(&events, i);
859
860 /* Thread quit pipe has been closed. Killing thread. */
861 ret = check_thread_quit_pipe(pollfd, revents);
862 if (ret) {
863 goto error;
864 }
865
866 /* Event on the registration socket */
867 if (pollfd == consumer_data->err_sock) {
868 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
869 ERR("consumer err socket poll error");
870 goto error;
871 }
872 }
873 }
874
875 sock = lttcomm_accept_unix_sock(consumer_data->err_sock);
876 if (sock < 0) {
877 goto error;
878 }
879
880 DBG2("Receiving code from consumer err_sock");
881
882 /* Getting status code from kconsumerd */
883 ret = lttcomm_recv_unix_sock(sock, &code,
884 sizeof(enum lttcomm_return_code));
885 if (ret <= 0) {
886 goto error;
887 }
888
889 if (code == CONSUMERD_COMMAND_SOCK_READY) {
890 consumer_data->cmd_sock =
891 lttcomm_connect_unix_sock(consumer_data->cmd_unix_sock_path);
892 if (consumer_data->cmd_sock < 0) {
893 sem_post(&consumer_data->sem);
894 PERROR("consumer connect");
895 goto error;
896 }
897 /* Signal condition to tell that the kconsumerd is ready */
898 sem_post(&consumer_data->sem);
899 DBG("consumer command socket ready");
900 } else {
901 ERR("consumer error when waiting for SOCK_READY : %s",
902 lttcomm_get_readable_code(-code));
903 goto error;
904 }
905
906 /* Remove the kconsumerd error sock since we've established a connexion */
907 ret = lttng_poll_del(&events, consumer_data->err_sock);
908 if (ret < 0) {
909 goto error;
910 }
911
912 ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLRDHUP);
913 if (ret < 0) {
914 goto error;
915 }
916
917 /* Update number of fd */
918 nb_fd = LTTNG_POLL_GETNB(&events);
919
920 /* Inifinite blocking call, waiting for transmission */
921 restart_poll:
922 ret = lttng_poll_wait(&events, -1);
923 if (ret < 0) {
924 /*
925 * Restart interrupted system call.
926 */
927 if (errno == EINTR) {
928 goto restart_poll;
929 }
930 goto error;
931 }
932
933 for (i = 0; i < nb_fd; i++) {
934 /* Fetch once the poll data */
935 revents = LTTNG_POLL_GETEV(&events, i);
936 pollfd = LTTNG_POLL_GETFD(&events, i);
937
938 /* Thread quit pipe has been closed. Killing thread. */
939 ret = check_thread_quit_pipe(pollfd, revents);
940 if (ret) {
941 goto error;
942 }
943
944 /* Event on the kconsumerd socket */
945 if (pollfd == sock) {
946 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
947 ERR("consumer err socket second poll error");
948 goto error;
949 }
950 }
951 }
952
953 /* Wait for any kconsumerd error */
954 ret = lttcomm_recv_unix_sock(sock, &code,
955 sizeof(enum lttcomm_return_code));
956 if (ret <= 0) {
957 ERR("consumer closed the command socket");
958 goto error;
959 }
960
961 ERR("consumer return code : %s", lttcomm_get_readable_code(-code));
962
963 error:
964 /* Immediately set the consumerd state to stopped */
965 if (consumer_data->type == LTTNG_CONSUMER_KERNEL) {
966 uatomic_set(&kernel_consumerd_state, CONSUMER_ERROR);
967 } else if (consumer_data->type == LTTNG_CONSUMER64_UST ||
968 consumer_data->type == LTTNG_CONSUMER32_UST) {
969 uatomic_set(&ust_consumerd_state, CONSUMER_ERROR);
970 } else {
971 /* Code flow error... */
972 assert(0);
973 }
974
975 if (consumer_data->err_sock >= 0) {
976 ret = close(consumer_data->err_sock);
977 if (ret) {
978 PERROR("close");
979 }
980 }
981 if (consumer_data->cmd_sock >= 0) {
982 ret = close(consumer_data->cmd_sock);
983 if (ret) {
984 PERROR("close");
985 }
986 }
987 if (sock >= 0) {
988 ret = close(sock);
989 if (ret) {
990 PERROR("close");
991 }
992 }
993
994 unlink(consumer_data->err_unix_sock_path);
995 unlink(consumer_data->cmd_unix_sock_path);
996 consumer_data->pid = 0;
997
998 lttng_poll_clean(&events);
999 error_poll:
1000 error_listen:
1001 DBG("consumer thread cleanup completed");
1002
1003 return NULL;
1004 }
1005
1006 /*
1007 * This thread manage application communication.
1008 */
1009 static void *thread_manage_apps(void *data)
1010 {
1011 int i, ret, pollfd;
1012 uint32_t revents, nb_fd;
1013 struct ust_command ust_cmd;
1014 struct lttng_poll_event events;
1015
1016 DBG("[thread] Manage application started");
1017
1018 rcu_register_thread();
1019 rcu_thread_online();
1020
1021 ret = create_thread_poll_set(&events, 2);
1022 if (ret < 0) {
1023 goto error_poll_create;
1024 }
1025
1026 ret = lttng_poll_add(&events, apps_cmd_pipe[0], LPOLLIN | LPOLLRDHUP);
1027 if (ret < 0) {
1028 goto error;
1029 }
1030
1031 while (1) {
1032 /* Zeroed the events structure */
1033 lttng_poll_reset(&events);
1034
1035 nb_fd = LTTNG_POLL_GETNB(&events);
1036
1037 DBG("Apps thread polling on %d fds", nb_fd);
1038
1039 /* Inifinite blocking call, waiting for transmission */
1040 restart:
1041 ret = lttng_poll_wait(&events, -1);
1042 if (ret < 0) {
1043 /*
1044 * Restart interrupted system call.
1045 */
1046 if (errno == EINTR) {
1047 goto restart;
1048 }
1049 goto error;
1050 }
1051
1052 for (i = 0; i < nb_fd; i++) {
1053 /* Fetch once the poll data */
1054 revents = LTTNG_POLL_GETEV(&events, i);
1055 pollfd = LTTNG_POLL_GETFD(&events, i);
1056
1057 /* Thread quit pipe has been closed. Killing thread. */
1058 ret = check_thread_quit_pipe(pollfd, revents);
1059 if (ret) {
1060 goto error;
1061 }
1062
1063 /* Inspect the apps cmd pipe */
1064 if (pollfd == apps_cmd_pipe[0]) {
1065 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1066 ERR("Apps command pipe error");
1067 goto error;
1068 } else if (revents & LPOLLIN) {
1069 /* Empty pipe */
1070 ret = read(apps_cmd_pipe[0], &ust_cmd, sizeof(ust_cmd));
1071 if (ret < 0 || ret < sizeof(ust_cmd)) {
1072 PERROR("read apps cmd pipe");
1073 goto error;
1074 }
1075
1076 /* Register applicaton to the session daemon */
1077 ret = ust_app_register(&ust_cmd.reg_msg,
1078 ust_cmd.sock);
1079 if (ret == -ENOMEM) {
1080 goto error;
1081 } else if (ret < 0) {
1082 break;
1083 }
1084
1085 /*
1086 * Validate UST version compatibility.
1087 */
1088 ret = ust_app_validate_version(ust_cmd.sock);
1089 if (ret >= 0) {
1090 /*
1091 * Add channel(s) and event(s) to newly registered apps
1092 * from lttng global UST domain.
1093 */
1094 update_ust_app(ust_cmd.sock);
1095 }
1096
1097 ret = ust_app_register_done(ust_cmd.sock);
1098 if (ret < 0) {
1099 /*
1100 * If the registration is not possible, we simply
1101 * unregister the apps and continue
1102 */
1103 ust_app_unregister(ust_cmd.sock);
1104 } else {
1105 /*
1106 * We just need here to monitor the close of the UST
1107 * socket and poll set monitor those by default.
1108 * Listen on POLLIN (even if we never expect any
1109 * data) to ensure that hangup wakes us.
1110 */
1111 ret = lttng_poll_add(&events, ust_cmd.sock, LPOLLIN);
1112 if (ret < 0) {
1113 goto error;
1114 }
1115
1116 DBG("Apps with sock %d added to poll set",
1117 ust_cmd.sock);
1118 }
1119
1120 break;
1121 }
1122 } else {
1123 /*
1124 * At this point, we know that a registered application made
1125 * the event at poll_wait.
1126 */
1127 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1128 /* Removing from the poll set */
1129 ret = lttng_poll_del(&events, pollfd);
1130 if (ret < 0) {
1131 goto error;
1132 }
1133
1134 /* Socket closed on remote end. */
1135 ust_app_unregister(pollfd);
1136 break;
1137 }
1138 }
1139 }
1140 }
1141
1142 error:
1143 lttng_poll_clean(&events);
1144 error_poll_create:
1145 DBG("Application communication apps thread cleanup complete");
1146 rcu_thread_offline();
1147 rcu_unregister_thread();
1148 return NULL;
1149 }
1150
1151 /*
1152 * Dispatch request from the registration threads to the application
1153 * communication thread.
1154 */
1155 static void *thread_dispatch_ust_registration(void *data)
1156 {
1157 int ret;
1158 struct cds_wfq_node *node;
1159 struct ust_command *ust_cmd = NULL;
1160
1161 DBG("[thread] Dispatch UST command started");
1162
1163 while (!dispatch_thread_exit) {
1164 /* Atomically prepare the queue futex */
1165 futex_nto1_prepare(&ust_cmd_queue.futex);
1166
1167 do {
1168 /* Dequeue command for registration */
1169 node = cds_wfq_dequeue_blocking(&ust_cmd_queue.queue);
1170 if (node == NULL) {
1171 DBG("Woken up but nothing in the UST command queue");
1172 /* Continue thread execution */
1173 break;
1174 }
1175
1176 ust_cmd = caa_container_of(node, struct ust_command, node);
1177
1178 DBG("Dispatching UST registration pid:%d ppid:%d uid:%d"
1179 " gid:%d sock:%d name:%s (version %d.%d)",
1180 ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
1181 ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
1182 ust_cmd->sock, ust_cmd->reg_msg.name,
1183 ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
1184 /*
1185 * Inform apps thread of the new application registration. This
1186 * call is blocking so we can be assured that the data will be read
1187 * at some point in time or wait to the end of the world :)
1188 */
1189 ret = write(apps_cmd_pipe[1], ust_cmd,
1190 sizeof(struct ust_command));
1191 if (ret < 0) {
1192 PERROR("write apps cmd pipe");
1193 if (errno == EBADF) {
1194 /*
1195 * We can't inform the application thread to process
1196 * registration. We will exit or else application
1197 * registration will not occur and tracing will never
1198 * start.
1199 */
1200 goto error;
1201 }
1202 }
1203 free(ust_cmd);
1204 } while (node != NULL);
1205
1206 /* Futex wait on queue. Blocking call on futex() */
1207 futex_nto1_wait(&ust_cmd_queue.futex);
1208 }
1209
1210 error:
1211 DBG("Dispatch thread dying");
1212 return NULL;
1213 }
1214
1215 /*
1216 * This thread manage application registration.
1217 */
1218 static void *thread_registration_apps(void *data)
1219 {
1220 int sock = -1, i, ret, pollfd;
1221 uint32_t revents, nb_fd;
1222 struct lttng_poll_event events;
1223 /*
1224 * Get allocated in this thread, enqueued to a global queue, dequeued and
1225 * freed in the manage apps thread.
1226 */
1227 struct ust_command *ust_cmd = NULL;
1228
1229 DBG("[thread] Manage application registration started");
1230
1231 ret = lttcomm_listen_unix_sock(apps_sock);
1232 if (ret < 0) {
1233 goto error_listen;
1234 }
1235
1236 /*
1237 * Pass 2 as size here for the thread quit pipe and apps socket. Nothing
1238 * more will be added to this poll set.
1239 */
1240 ret = create_thread_poll_set(&events, 2);
1241 if (ret < 0) {
1242 goto error_create_poll;
1243 }
1244
1245 /* Add the application registration socket */
1246 ret = lttng_poll_add(&events, apps_sock, LPOLLIN | LPOLLRDHUP);
1247 if (ret < 0) {
1248 goto error_poll_add;
1249 }
1250
1251 /* Notify all applications to register */
1252 ret = notify_ust_apps(1);
1253 if (ret < 0) {
1254 ERR("Failed to notify applications or create the wait shared memory.\n"
1255 "Execution continues but there might be problem for already\n"
1256 "running applications that wishes to register.");
1257 }
1258
1259 while (1) {
1260 DBG("Accepting application registration");
1261
1262 nb_fd = LTTNG_POLL_GETNB(&events);
1263
1264 /* Inifinite blocking call, waiting for transmission */
1265 restart:
1266 ret = lttng_poll_wait(&events, -1);
1267 if (ret < 0) {
1268 /*
1269 * Restart interrupted system call.
1270 */
1271 if (errno == EINTR) {
1272 goto restart;
1273 }
1274 goto error;
1275 }
1276
1277 for (i = 0; i < nb_fd; i++) {
1278 /* Fetch once the poll data */
1279 revents = LTTNG_POLL_GETEV(&events, i);
1280 pollfd = LTTNG_POLL_GETFD(&events, i);
1281
1282 /* Thread quit pipe has been closed. Killing thread. */
1283 ret = check_thread_quit_pipe(pollfd, revents);
1284 if (ret) {
1285 goto error;
1286 }
1287
1288 /* Event on the registration socket */
1289 if (pollfd == apps_sock) {
1290 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1291 ERR("Register apps socket poll error");
1292 goto error;
1293 } else if (revents & LPOLLIN) {
1294 sock = lttcomm_accept_unix_sock(apps_sock);
1295 if (sock < 0) {
1296 goto error;
1297 }
1298
1299 /* Create UST registration command for enqueuing */
1300 ust_cmd = zmalloc(sizeof(struct ust_command));
1301 if (ust_cmd == NULL) {
1302 PERROR("ust command zmalloc");
1303 goto error;
1304 }
1305
1306 /*
1307 * Using message-based transmissions to ensure we don't
1308 * have to deal with partially received messages.
1309 */
1310 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
1311 if (ret < 0) {
1312 ERR("Exhausted file descriptors allowed for applications.");
1313 free(ust_cmd);
1314 ret = close(sock);
1315 if (ret) {
1316 PERROR("close");
1317 }
1318 sock = -1;
1319 continue;
1320 }
1321 ret = lttcomm_recv_unix_sock(sock, &ust_cmd->reg_msg,
1322 sizeof(struct ust_register_msg));
1323 if (ret < 0 || ret < sizeof(struct ust_register_msg)) {
1324 if (ret < 0) {
1325 PERROR("lttcomm_recv_unix_sock register apps");
1326 } else {
1327 ERR("Wrong size received on apps register");
1328 }
1329 free(ust_cmd);
1330 ret = close(sock);
1331 if (ret) {
1332 PERROR("close");
1333 }
1334 lttng_fd_put(LTTNG_FD_APPS, 1);
1335 sock = -1;
1336 continue;
1337 }
1338
1339 ust_cmd->sock = sock;
1340 sock = -1;
1341
1342 DBG("UST registration received with pid:%d ppid:%d uid:%d"
1343 " gid:%d sock:%d name:%s (version %d.%d)",
1344 ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
1345 ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
1346 ust_cmd->sock, ust_cmd->reg_msg.name,
1347 ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
1348
1349 /*
1350 * Lock free enqueue the registration request. The red pill
1351 * has been taken! This apps will be part of the *system*.
1352 */
1353 cds_wfq_enqueue(&ust_cmd_queue.queue, &ust_cmd->node);
1354
1355 /*
1356 * Wake the registration queue futex. Implicit memory
1357 * barrier with the exchange in cds_wfq_enqueue.
1358 */
1359 futex_nto1_wake(&ust_cmd_queue.futex);
1360 }
1361 }
1362 }
1363 }
1364
1365 error:
1366 /* Notify that the registration thread is gone */
1367 notify_ust_apps(0);
1368
1369 if (apps_sock >= 0) {
1370 ret = close(apps_sock);
1371 if (ret) {
1372 PERROR("close");
1373 }
1374 }
1375 if (sock >= 0) {
1376 ret = close(sock);
1377 if (ret) {
1378 PERROR("close");
1379 }
1380 lttng_fd_put(LTTNG_FD_APPS, 1);
1381 }
1382 unlink(apps_unix_sock_path);
1383
1384 error_poll_add:
1385 lttng_poll_clean(&events);
1386 error_listen:
1387 error_create_poll:
1388 DBG("UST Registration thread cleanup complete");
1389
1390 return NULL;
1391 }
1392
1393 /*
1394 * Start the thread_manage_consumer. This must be done after a lttng-consumerd
1395 * exec or it will fails.
1396 */
1397 static int spawn_consumer_thread(struct consumer_data *consumer_data)
1398 {
1399 int ret;
1400 struct timespec timeout;
1401
1402 timeout.tv_sec = DEFAULT_SEM_WAIT_TIMEOUT;
1403 timeout.tv_nsec = 0;
1404
1405 /* Setup semaphore */
1406 ret = sem_init(&consumer_data->sem, 0, 0);
1407 if (ret < 0) {
1408 PERROR("sem_init consumer semaphore");
1409 goto error;
1410 }
1411
1412 ret = pthread_create(&consumer_data->thread, NULL,
1413 thread_manage_consumer, consumer_data);
1414 if (ret != 0) {
1415 PERROR("pthread_create consumer");
1416 ret = -1;
1417 goto error;
1418 }
1419
1420 /* Get time for sem_timedwait absolute timeout */
1421 ret = clock_gettime(CLOCK_REALTIME, &timeout);
1422 if (ret < 0) {
1423 PERROR("clock_gettime spawn consumer");
1424 /* Infinite wait for the kconsumerd thread to be ready */
1425 ret = sem_wait(&consumer_data->sem);
1426 } else {
1427 /* Normal timeout if the gettime was successful */
1428 timeout.tv_sec += DEFAULT_SEM_WAIT_TIMEOUT;
1429 ret = sem_timedwait(&consumer_data->sem, &timeout);
1430 }
1431
1432 if (ret < 0) {
1433 if (errno == ETIMEDOUT) {
1434 /*
1435 * Call has timed out so we kill the kconsumerd_thread and return
1436 * an error.
1437 */
1438 ERR("The consumer thread was never ready. Killing it");
1439 ret = pthread_cancel(consumer_data->thread);
1440 if (ret < 0) {
1441 PERROR("pthread_cancel consumer thread");
1442 }
1443 } else {
1444 PERROR("semaphore wait failed consumer thread");
1445 }
1446 goto error;
1447 }
1448
1449 pthread_mutex_lock(&consumer_data->pid_mutex);
1450 if (consumer_data->pid == 0) {
1451 ERR("Kconsumerd did not start");
1452 pthread_mutex_unlock(&consumer_data->pid_mutex);
1453 goto error;
1454 }
1455 pthread_mutex_unlock(&consumer_data->pid_mutex);
1456
1457 return 0;
1458
1459 error:
1460 return ret;
1461 }
1462
1463 /*
1464 * Join consumer thread
1465 */
1466 static int join_consumer_thread(struct consumer_data *consumer_data)
1467 {
1468 void *status;
1469 int ret;
1470
1471 if (consumer_data->pid != 0) {
1472 ret = kill(consumer_data->pid, SIGTERM);
1473 if (ret) {
1474 ERR("Error killing consumer daemon");
1475 return ret;
1476 }
1477 return pthread_join(consumer_data->thread, &status);
1478 } else {
1479 return 0;
1480 }
1481 }
1482
1483 /*
1484 * Fork and exec a consumer daemon (consumerd).
1485 *
1486 * Return pid if successful else -1.
1487 */
1488 static pid_t spawn_consumerd(struct consumer_data *consumer_data)
1489 {
1490 int ret;
1491 pid_t pid;
1492 const char *consumer_to_use;
1493 const char *verbosity;
1494 struct stat st;
1495
1496 DBG("Spawning consumerd");
1497
1498 pid = fork();
1499 if (pid == 0) {
1500 /*
1501 * Exec consumerd.
1502 */
1503 if (opt_verbose_consumer) {
1504 verbosity = "--verbose";
1505 } else {
1506 verbosity = "--quiet";
1507 }
1508 switch (consumer_data->type) {
1509 case LTTNG_CONSUMER_KERNEL:
1510 /*
1511 * Find out which consumerd to execute. We will first try the
1512 * 64-bit path, then the sessiond's installation directory, and
1513 * fallback on the 32-bit one,
1514 */
1515 DBG3("Looking for a kernel consumer at these locations:");
1516 DBG3(" 1) %s", consumerd64_bin);
1517 DBG3(" 2) %s/%s", INSTALL_BIN_PATH, CONSUMERD_FILE);
1518 DBG3(" 3) %s", consumerd32_bin);
1519 if (stat(consumerd64_bin, &st) == 0) {
1520 DBG3("Found location #1");
1521 consumer_to_use = consumerd64_bin;
1522 } else if (stat(INSTALL_BIN_PATH "/" CONSUMERD_FILE, &st) == 0) {
1523 DBG3("Found location #2");
1524 consumer_to_use = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
1525 } else if (stat(consumerd32_bin, &st) == 0) {
1526 DBG3("Found location #3");
1527 consumer_to_use = consumerd32_bin;
1528 } else {
1529 DBG("Could not find any valid consumerd executable");
1530 break;
1531 }
1532 DBG("Using kernel consumer at: %s", consumer_to_use);
1533 execl(consumer_to_use,
1534 "lttng-consumerd", verbosity, "-k",
1535 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
1536 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
1537 NULL);
1538 break;
1539 case LTTNG_CONSUMER64_UST:
1540 {
1541 char *tmpnew = NULL;
1542
1543 if (consumerd64_libdir[0] != '\0') {
1544 char *tmp;
1545 size_t tmplen;
1546
1547 tmp = getenv("LD_LIBRARY_PATH");
1548 if (!tmp) {
1549 tmp = "";
1550 }
1551 tmplen = strlen("LD_LIBRARY_PATH=")
1552 + strlen(consumerd64_libdir) + 1 /* : */ + strlen(tmp);
1553 tmpnew = zmalloc(tmplen + 1 /* \0 */);
1554 if (!tmpnew) {
1555 ret = -ENOMEM;
1556 goto error;
1557 }
1558 strcpy(tmpnew, "LD_LIBRARY_PATH=");
1559 strcat(tmpnew, consumerd64_libdir);
1560 if (tmp[0] != '\0') {
1561 strcat(tmpnew, ":");
1562 strcat(tmpnew, tmp);
1563 }
1564 ret = putenv(tmpnew);
1565 if (ret) {
1566 ret = -errno;
1567 goto error;
1568 }
1569 }
1570 DBG("Using 64-bit UST consumer at: %s", consumerd64_bin);
1571 ret = execl(consumerd64_bin, "lttng-consumerd", verbosity, "-u",
1572 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
1573 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
1574 NULL);
1575 if (consumerd64_libdir[0] != '\0') {
1576 free(tmpnew);
1577 }
1578 if (ret) {
1579 goto error;
1580 }
1581 break;
1582 }
1583 case LTTNG_CONSUMER32_UST:
1584 {
1585 char *tmpnew = NULL;
1586
1587 if (consumerd32_libdir[0] != '\0') {
1588 char *tmp;
1589 size_t tmplen;
1590
1591 tmp = getenv("LD_LIBRARY_PATH");
1592 if (!tmp) {
1593 tmp = "";
1594 }
1595 tmplen = strlen("LD_LIBRARY_PATH=")
1596 + strlen(consumerd32_libdir) + 1 /* : */ + strlen(tmp);
1597 tmpnew = zmalloc(tmplen + 1 /* \0 */);
1598 if (!tmpnew) {
1599 ret = -ENOMEM;
1600 goto error;
1601 }
1602 strcpy(tmpnew, "LD_LIBRARY_PATH=");
1603 strcat(tmpnew, consumerd32_libdir);
1604 if (tmp[0] != '\0') {
1605 strcat(tmpnew, ":");
1606 strcat(tmpnew, tmp);
1607 }
1608 ret = putenv(tmpnew);
1609 if (ret) {
1610 ret = -errno;
1611 goto error;
1612 }
1613 }
1614 DBG("Using 32-bit UST consumer at: %s", consumerd32_bin);
1615 ret = execl(consumerd32_bin, "lttng-consumerd", verbosity, "-u",
1616 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
1617 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
1618 NULL);
1619 if (consumerd32_libdir[0] != '\0') {
1620 free(tmpnew);
1621 }
1622 if (ret) {
1623 goto error;
1624 }
1625 break;
1626 }
1627 default:
1628 PERROR("unknown consumer type");
1629 exit(EXIT_FAILURE);
1630 }
1631 if (errno != 0) {
1632 PERROR("kernel start consumer exec");
1633 }
1634 exit(EXIT_FAILURE);
1635 } else if (pid > 0) {
1636 ret = pid;
1637 } else {
1638 PERROR("start consumer fork");
1639 ret = -errno;
1640 }
1641 error:
1642 return ret;
1643 }
1644
1645 /*
1646 * Spawn the consumerd daemon and session daemon thread.
1647 */
1648 static int start_consumerd(struct consumer_data *consumer_data)
1649 {
1650 int ret;
1651
1652 pthread_mutex_lock(&consumer_data->pid_mutex);
1653 if (consumer_data->pid != 0) {
1654 pthread_mutex_unlock(&consumer_data->pid_mutex);
1655 goto end;
1656 }
1657
1658 ret = spawn_consumerd(consumer_data);
1659 if (ret < 0) {
1660 ERR("Spawning consumerd failed");
1661 pthread_mutex_unlock(&consumer_data->pid_mutex);
1662 goto error;
1663 }
1664
1665 /* Setting up the consumer_data pid */
1666 consumer_data->pid = ret;
1667 DBG2("Consumer pid %d", consumer_data->pid);
1668 pthread_mutex_unlock(&consumer_data->pid_mutex);
1669
1670 DBG2("Spawning consumer control thread");
1671 ret = spawn_consumer_thread(consumer_data);
1672 if (ret < 0) {
1673 ERR("Fatal error spawning consumer control thread");
1674 goto error;
1675 }
1676
1677 end:
1678 return 0;
1679
1680 error:
1681 return ret;
1682 }
1683
1684 /*
1685 * Check version of the lttng-modules.
1686 */
1687 static int validate_lttng_modules_version(void)
1688 {
1689 return kernel_validate_version(kernel_tracer_fd);
1690 }
1691
1692 /*
1693 * Setup necessary data for kernel tracer action.
1694 */
1695 static int init_kernel_tracer(void)
1696 {
1697 int ret;
1698
1699 /* Modprobe lttng kernel modules */
1700 ret = modprobe_lttng_control();
1701 if (ret < 0) {
1702 goto error;
1703 }
1704
1705 /* Open debugfs lttng */
1706 kernel_tracer_fd = open(module_proc_lttng, O_RDWR);
1707 if (kernel_tracer_fd < 0) {
1708 DBG("Failed to open %s", module_proc_lttng);
1709 ret = -1;
1710 goto error_open;
1711 }
1712
1713 /* Validate kernel version */
1714 ret = validate_lttng_modules_version();
1715 if (ret < 0) {
1716 goto error_version;
1717 }
1718
1719 ret = modprobe_lttng_data();
1720 if (ret < 0) {
1721 goto error_modules;
1722 }
1723
1724 DBG("Kernel tracer fd %d", kernel_tracer_fd);
1725 return 0;
1726
1727 error_version:
1728 modprobe_remove_lttng_control();
1729 ret = close(kernel_tracer_fd);
1730 if (ret) {
1731 PERROR("close");
1732 }
1733 kernel_tracer_fd = -1;
1734 return LTTCOMM_KERN_VERSION;
1735
1736 error_modules:
1737 ret = close(kernel_tracer_fd);
1738 if (ret) {
1739 PERROR("close");
1740 }
1741
1742 error_open:
1743 modprobe_remove_lttng_control();
1744
1745 error:
1746 WARN("No kernel tracer available");
1747 kernel_tracer_fd = -1;
1748 if (!is_root) {
1749 return LTTCOMM_NEED_ROOT_SESSIOND;
1750 } else {
1751 return LTTCOMM_KERN_NA;
1752 }
1753 }
1754
1755 /*
1756 * Init tracing by creating trace directory and sending fds kernel consumer.
1757 */
1758 static int init_kernel_tracing(struct ltt_kernel_session *session)
1759 {
1760 int ret = 0;
1761
1762 if (session->consumer_fds_sent == 0 && session->consumer != NULL) {
1763 /*
1764 * Assign default kernel consumer socket if no consumer assigned to the
1765 * kernel session. At this point, it's NOT supposed to be -1 but this is
1766 * an extra security check.
1767 */
1768 if (session->consumer_fd < 0) {
1769 session->consumer_fd = kconsumer_data.cmd_sock;
1770 }
1771
1772 ret = kernel_consumer_send_session(session->consumer_fd, session);
1773 if (ret < 0) {
1774 ret = LTTCOMM_KERN_CONSUMER_FAIL;
1775 goto error;
1776 }
1777 }
1778
1779 error:
1780 return ret;
1781 }
1782
1783 /*
1784 * Create a socket to the relayd using the URI.
1785 *
1786 * On success, the relayd_sock pointer is set to the created socket.
1787 * Else, it is untouched and an lttcomm error code is returned.
1788 */
1789 static int create_connect_relayd(struct consumer_output *output,
1790 const char *session_name, struct lttng_uri *uri,
1791 struct lttcomm_sock **relayd_sock)
1792 {
1793 int ret;
1794 struct lttcomm_sock *sock;
1795
1796 /* Create socket object from URI */
1797 sock = lttcomm_alloc_sock_from_uri(uri);
1798 if (sock == NULL) {
1799 ret = LTTCOMM_FATAL;
1800 goto error;
1801 }
1802
1803 ret = lttcomm_create_sock(sock);
1804 if (ret < 0) {
1805 ret = LTTCOMM_FATAL;
1806 goto error;
1807 }
1808
1809 /* Connect to relayd so we can proceed with a session creation. */
1810 ret = relayd_connect(sock);
1811 if (ret < 0) {
1812 ERR("Unable to reach lttng-relayd");
1813 ret = LTTCOMM_RELAYD_SESSION_FAIL;
1814 goto free_sock;
1815 }
1816
1817 /* Create socket for control stream. */
1818 if (uri->stype == LTTNG_STREAM_CONTROL) {
1819 DBG3("Creating relayd stream socket from URI");
1820
1821 /* Check relayd version */
1822 ret = relayd_version_check(sock, LTTNG_UST_COMM_MAJOR, 0);
1823 if (ret < 0) {
1824 ret = LTTCOMM_RELAYD_VERSION_FAIL;
1825 goto close_sock;
1826 }
1827 } else if (uri->stype == LTTNG_STREAM_DATA) {
1828 DBG3("Creating relayd data socket from URI");
1829 } else {
1830 /* Command is not valid */
1831 ERR("Relayd invalid stream type: %d", uri->stype);
1832 ret = LTTCOMM_INVALID;
1833 goto close_sock;
1834 }
1835
1836 *relayd_sock = sock;
1837
1838 return LTTCOMM_OK;
1839
1840 close_sock:
1841 if (sock) {
1842 (void) relayd_close(sock);
1843 }
1844 free_sock:
1845 if (sock) {
1846 lttcomm_destroy_sock(sock);
1847 }
1848 error:
1849 return ret;
1850 }
1851
1852 /*
1853 * Connect to the relayd using URI and send the socket to the right consumer.
1854 */
1855 static int send_socket_relayd_consumer(int domain, struct ltt_session *session,
1856 struct lttng_uri *relayd_uri, struct consumer_output *consumer,
1857 int consumer_fd)
1858 {
1859 int ret;
1860 struct lttcomm_sock *sock = NULL;
1861
1862 /* Set the network sequence index if not set. */
1863 if (consumer->net_seq_index == -1) {
1864 /*
1865 * Increment net_seq_idx because we are about to transfer the
1866 * new relayd socket to the consumer.
1867 */
1868 uatomic_inc(&relayd_net_seq_idx);
1869 /* Assign unique key so the consumer can match streams */
1870 consumer->net_seq_index = uatomic_read(&relayd_net_seq_idx);
1871 }
1872
1873 /* Connect to relayd and make version check if uri is the control. */
1874 ret = create_connect_relayd(consumer, session->name, relayd_uri, &sock);
1875 if (ret != LTTCOMM_OK) {
1876 goto close_sock;
1877 }
1878
1879 /* If the control socket is connected, network session is ready */
1880 if (relayd_uri->stype == LTTNG_STREAM_CONTROL) {
1881 session->net_handle = 1;
1882 }
1883
1884 switch (domain) {
1885 case LTTNG_DOMAIN_KERNEL:
1886 /* Send relayd socket to consumer. */
1887 ret = kernel_consumer_send_relayd_socket(consumer_fd, sock,
1888 consumer, relayd_uri->stype);
1889 if (ret < 0) {
1890 ret = LTTCOMM_ENABLE_CONSUMER_FAIL;
1891 goto close_sock;
1892 }
1893 break;
1894 case LTTNG_DOMAIN_UST:
1895 /* Send relayd socket to consumer. */
1896 ret = ust_consumer_send_relayd_socket(consumer_fd, sock,
1897 consumer, relayd_uri->stype);
1898 if (ret < 0) {
1899 ret = LTTCOMM_ENABLE_CONSUMER_FAIL;
1900 goto close_sock;
1901 }
1902 break;
1903 }
1904
1905 ret = LTTCOMM_OK;
1906
1907 /*
1908 * Close socket which was dup on the consumer side. The session daemon does
1909 * NOT keep track of the relayd socket(s) once transfer to the consumer.
1910 */
1911
1912 close_sock:
1913 if (sock) {
1914 (void) relayd_close(sock);
1915 lttcomm_destroy_sock(sock);
1916 }
1917
1918 return ret;
1919 }
1920
1921 /*
1922 * Send both relayd sockets to a specific consumer and domain. This is a
1923 * helper function to facilitate sending the information to the consumer for a
1924 * session.
1925 */
1926 static int send_sockets_relayd_consumer(int domain,
1927 struct ltt_session *session, struct consumer_output *consumer, int fd)
1928 {
1929 int ret;
1930
1931 /* Sending control relayd socket. */
1932 ret = send_socket_relayd_consumer(domain, session,
1933 &consumer->dst.net.control, consumer, fd);
1934 if (ret != LTTCOMM_OK) {
1935 goto error;
1936 }
1937
1938 /* Sending data relayd socket. */
1939 ret = send_socket_relayd_consumer(domain, session,
1940 &consumer->dst.net.data, consumer, fd);
1941 if (ret != LTTCOMM_OK) {
1942 goto error;
1943 }
1944
1945 error:
1946 return ret;
1947 }
1948
1949 /*
1950 * Setup relayd connections for a tracing session. First creates the socket to
1951 * the relayd and send them to the right domain consumer. Consumer type MUST be
1952 * network.
1953 */
1954 static int setup_relayd(struct ltt_session *session)
1955 {
1956 int ret = LTTCOMM_OK;
1957 struct ltt_ust_session *usess;
1958 struct ltt_kernel_session *ksess;
1959
1960 assert(session);
1961
1962 usess = session->ust_session;
1963 ksess = session->kernel_session;
1964
1965 DBG2("Setting relayd for session %s", session->name);
1966
1967 if (usess && usess->consumer->sock == -1 &&
1968 usess->consumer->type == CONSUMER_DST_NET &&
1969 usess->consumer->enabled) {
1970 /* Setup relayd for 64 bits consumer */
1971 if (ust_consumerd64_fd >= 0) {
1972 send_sockets_relayd_consumer(LTTNG_DOMAIN_UST, session,
1973 usess->consumer, ust_consumerd64_fd);
1974 if (ret != LTTCOMM_OK) {
1975 goto error;
1976 }
1977 }
1978
1979 /* Setup relayd for 32 bits consumer */
1980 if (ust_consumerd32_fd >= 0) {
1981 send_sockets_relayd_consumer(LTTNG_DOMAIN_UST, session,
1982 usess->consumer, ust_consumerd32_fd);
1983 if (ret != LTTCOMM_OK) {
1984 goto error;
1985 }
1986 }
1987 } else if (ksess && ksess->consumer->sock == -1 &&
1988 ksess->consumer->type == CONSUMER_DST_NET &&
1989 ksess->consumer->enabled) {
1990 send_sockets_relayd_consumer(LTTNG_DOMAIN_KERNEL, session,
1991 ksess->consumer, ksess->consumer_fd);
1992 if (ret != LTTCOMM_OK) {
1993 goto error;
1994 }
1995 }
1996
1997 error:
1998 return ret;
1999 }
2000
2001 /*
2002 * Copy consumer output from the tracing session to the domain session. The
2003 * function also applies the right modification on a per domain basis for the
2004 * trace files destination directory.
2005 */
2006 static int copy_session_consumer(int domain, struct ltt_session *session)
2007 {
2008 int ret;
2009 const char *dir_name;
2010 struct consumer_output *consumer;
2011
2012 switch (domain) {
2013 case LTTNG_DOMAIN_KERNEL:
2014 DBG3("Copying tracing session consumer output in kernel session");
2015 session->kernel_session->consumer =
2016 consumer_copy_output(session->consumer);
2017 /* Ease our life a bit for the next part */
2018 consumer = session->kernel_session->consumer;
2019 dir_name = DEFAULT_KERNEL_TRACE_DIR;
2020 break;
2021 case LTTNG_DOMAIN_UST:
2022 DBG3("Copying tracing session consumer output in UST session");
2023 session->ust_session->consumer =
2024 consumer_copy_output(session->consumer);
2025 /* Ease our life a bit for the next part */
2026 consumer = session->ust_session->consumer;
2027 dir_name = DEFAULT_UST_TRACE_DIR;
2028 break;
2029 default:
2030 ret = LTTCOMM_UNKNOWN_DOMAIN;
2031 goto error;
2032 }
2033
2034 /* Append correct directory to subdir */
2035 strncat(consumer->subdir, dir_name, sizeof(consumer->subdir));
2036 DBG3("Copy session consumer subdir %s", consumer->subdir);
2037
2038 /* Add default trace directory name */
2039 if (consumer->type == CONSUMER_DST_LOCAL) {
2040 strncat(consumer->dst.trace_path, dir_name,
2041 sizeof(consumer->dst.trace_path));
2042 }
2043
2044 ret = LTTCOMM_OK;
2045
2046 error:
2047 return ret;
2048 }
2049
2050 /*
2051 * Create an UST session and add it to the session ust list.
2052 */
2053 static int create_ust_session(struct ltt_session *session,
2054 struct lttng_domain *domain)
2055 {
2056 int ret;
2057 struct ltt_ust_session *lus = NULL;
2058
2059 assert(session);
2060 assert(session->consumer);
2061
2062 switch (domain->type) {
2063 case LTTNG_DOMAIN_UST:
2064 break;
2065 default:
2066 ERR("Unknown UST domain on create session %d", domain->type);
2067 ret = LTTCOMM_UNKNOWN_DOMAIN;
2068 goto error;
2069 }
2070
2071 DBG("Creating UST session");
2072
2073 lus = trace_ust_create_session(session->path, session->id, domain);
2074 if (lus == NULL) {
2075 ret = LTTCOMM_UST_SESS_FAIL;
2076 goto error;
2077 }
2078
2079 if (session->consumer->type == CONSUMER_DST_LOCAL) {
2080 ret = run_as_mkdir_recursive(lus->pathname, S_IRWXU | S_IRWXG,
2081 session->uid, session->gid);
2082 if (ret < 0) {
2083 if (ret != -EEXIST) {
2084 ERR("Trace directory creation error");
2085 ret = LTTCOMM_UST_SESS_FAIL;
2086 goto error;
2087 }
2088 }
2089 }
2090
2091 lus->uid = session->uid;
2092 lus->gid = session->gid;
2093 session->ust_session = lus;
2094
2095 /* Copy session output to the newly created UST session */
2096 ret = copy_session_consumer(domain->type, session);
2097 if (ret != LTTCOMM_OK) {
2098 goto error;
2099 }
2100
2101 return LTTCOMM_OK;
2102
2103 error:
2104 free(lus);
2105 session->ust_session = NULL;
2106 return ret;
2107 }
2108
2109 /*
2110 * Create a kernel tracer session then create the default channel.
2111 */
2112 static int create_kernel_session(struct ltt_session *session)
2113 {
2114 int ret;
2115
2116 DBG("Creating kernel session");
2117
2118 ret = kernel_create_session(session, kernel_tracer_fd);
2119 if (ret < 0) {
2120 ret = LTTCOMM_KERN_SESS_FAIL;
2121 goto error;
2122 }
2123
2124 /* Set kernel consumer socket fd */
2125 if (kconsumer_data.cmd_sock >= 0) {
2126 session->kernel_session->consumer_fd = kconsumer_data.cmd_sock;
2127 }
2128
2129 /* Copy session output to the newly created Kernel session */
2130 ret = copy_session_consumer(LTTNG_DOMAIN_KERNEL, session);
2131 if (ret != LTTCOMM_OK) {
2132 goto error;
2133 }
2134
2135 /* Create directory(ies) on local filesystem. */
2136 if (session->consumer->type == CONSUMER_DST_LOCAL) {
2137 ret = run_as_mkdir_recursive(
2138 session->kernel_session->consumer->dst.trace_path,
2139 S_IRWXU | S_IRWXG, session->uid, session->gid);
2140 if (ret < 0) {
2141 if (ret != -EEXIST) {
2142 ERR("Trace directory creation error");
2143 goto error;
2144 }
2145 }
2146 }
2147
2148 session->kernel_session->uid = session->uid;
2149 session->kernel_session->gid = session->gid;
2150
2151 return LTTCOMM_OK;
2152
2153 error:
2154 trace_kernel_destroy_session(session->kernel_session);
2155 session->kernel_session = NULL;
2156 return ret;
2157 }
2158
2159 /*
2160 * Check if the UID or GID match the session. Root user has access to all
2161 * sessions.
2162 */
2163 static int session_access_ok(struct ltt_session *session, uid_t uid, gid_t gid)
2164 {
2165 if (uid != session->uid && gid != session->gid && uid != 0) {
2166 return 0;
2167 } else {
2168 return 1;
2169 }
2170 }
2171
2172 /*
2173 * Count number of session permitted by uid/gid.
2174 */
2175 static unsigned int lttng_sessions_count(uid_t uid, gid_t gid)
2176 {
2177 unsigned int i = 0;
2178 struct ltt_session *session;
2179
2180 DBG("Counting number of available session for UID %d GID %d",
2181 uid, gid);
2182 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
2183 /*
2184 * Only list the sessions the user can control.
2185 */
2186 if (!session_access_ok(session, uid, gid)) {
2187 continue;
2188 }
2189 i++;
2190 }
2191 return i;
2192 }
2193
2194 /*
2195 * Using the session list, filled a lttng_session array to send back to the
2196 * client for session listing.
2197 *
2198 * The session list lock MUST be acquired before calling this function. Use
2199 * session_lock_list() and session_unlock_list().
2200 */
2201 static void list_lttng_sessions(struct lttng_session *sessions, uid_t uid,
2202 gid_t gid)
2203 {
2204 unsigned int i = 0;
2205 struct ltt_session *session;
2206
2207 DBG("Getting all available session for UID %d GID %d",
2208 uid, gid);
2209 /*
2210 * Iterate over session list and append data after the control struct in
2211 * the buffer.
2212 */
2213 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
2214 /*
2215 * Only list the sessions the user can control.
2216 */
2217 if (!session_access_ok(session, uid, gid)) {
2218 continue;
2219 }
2220 strncpy(sessions[i].path, session->path, PATH_MAX);
2221 sessions[i].path[PATH_MAX - 1] = '\0';
2222 strncpy(sessions[i].name, session->name, NAME_MAX);
2223 sessions[i].name[NAME_MAX - 1] = '\0';
2224 sessions[i].enabled = session->enabled;
2225 i++;
2226 }
2227 }
2228
2229 /*
2230 * Fill lttng_channel array of all channels.
2231 */
2232 static void list_lttng_channels(int domain, struct ltt_session *session,
2233 struct lttng_channel *channels)
2234 {
2235 int i = 0;
2236 struct ltt_kernel_channel *kchan;
2237
2238 DBG("Listing channels for session %s", session->name);
2239
2240 switch (domain) {
2241 case LTTNG_DOMAIN_KERNEL:
2242 /* Kernel channels */
2243 if (session->kernel_session != NULL) {
2244 cds_list_for_each_entry(kchan,
2245 &session->kernel_session->channel_list.head, list) {
2246 /* Copy lttng_channel struct to array */
2247 memcpy(&channels[i], kchan->channel, sizeof(struct lttng_channel));
2248 channels[i].enabled = kchan->enabled;
2249 i++;
2250 }
2251 }
2252 break;
2253 case LTTNG_DOMAIN_UST:
2254 {
2255 struct lttng_ht_iter iter;
2256 struct ltt_ust_channel *uchan;
2257
2258 cds_lfht_for_each_entry(session->ust_session->domain_global.channels->ht,
2259 &iter.iter, uchan, node.node) {
2260 strncpy(channels[i].name, uchan->name, LTTNG_SYMBOL_NAME_LEN);
2261 channels[i].attr.overwrite = uchan->attr.overwrite;
2262 channels[i].attr.subbuf_size = uchan->attr.subbuf_size;
2263 channels[i].attr.num_subbuf = uchan->attr.num_subbuf;
2264 channels[i].attr.switch_timer_interval =
2265 uchan->attr.switch_timer_interval;
2266 channels[i].attr.read_timer_interval =
2267 uchan->attr.read_timer_interval;
2268 channels[i].enabled = uchan->enabled;
2269 switch (uchan->attr.output) {
2270 case LTTNG_UST_MMAP:
2271 default:
2272 channels[i].attr.output = LTTNG_EVENT_MMAP;
2273 break;
2274 }
2275 i++;
2276 }
2277 break;
2278 }
2279 default:
2280 break;
2281 }
2282 }
2283
2284 /*
2285 * Create a list of ust global domain events.
2286 */
2287 static int list_lttng_ust_global_events(char *channel_name,
2288 struct ltt_ust_domain_global *ust_global, struct lttng_event **events)
2289 {
2290 int i = 0, ret = 0;
2291 unsigned int nb_event = 0;
2292 struct lttng_ht_iter iter;
2293 struct lttng_ht_node_str *node;
2294 struct ltt_ust_channel *uchan;
2295 struct ltt_ust_event *uevent;
2296 struct lttng_event *tmp;
2297
2298 DBG("Listing UST global events for channel %s", channel_name);
2299
2300 rcu_read_lock();
2301
2302 lttng_ht_lookup(ust_global->channels, (void *)channel_name, &iter);
2303 node = lttng_ht_iter_get_node_str(&iter);
2304 if (node == NULL) {
2305 ret = -LTTCOMM_UST_CHAN_NOT_FOUND;
2306 goto error;
2307 }
2308
2309 uchan = caa_container_of(&node->node, struct ltt_ust_channel, node.node);
2310
2311 nb_event += lttng_ht_get_count(uchan->events);
2312
2313 if (nb_event == 0) {
2314 ret = nb_event;
2315 goto error;
2316 }
2317
2318 DBG3("Listing UST global %d events", nb_event);
2319
2320 tmp = zmalloc(nb_event * sizeof(struct lttng_event));
2321 if (tmp == NULL) {
2322 ret = -LTTCOMM_FATAL;
2323 goto error;
2324 }
2325
2326 cds_lfht_for_each_entry(uchan->events->ht, &iter.iter, uevent, node.node) {
2327 strncpy(tmp[i].name, uevent->attr.name, LTTNG_SYMBOL_NAME_LEN);
2328 tmp[i].name[LTTNG_SYMBOL_NAME_LEN - 1] = '\0';
2329 tmp[i].enabled = uevent->enabled;
2330 switch (uevent->attr.instrumentation) {
2331 case LTTNG_UST_TRACEPOINT:
2332 tmp[i].type = LTTNG_EVENT_TRACEPOINT;
2333 break;
2334 case LTTNG_UST_PROBE:
2335 tmp[i].type = LTTNG_EVENT_PROBE;
2336 break;
2337 case LTTNG_UST_FUNCTION:
2338 tmp[i].type = LTTNG_EVENT_FUNCTION;
2339 break;
2340 }
2341 tmp[i].loglevel = uevent->attr.loglevel;
2342 switch (uevent->attr.loglevel_type) {
2343 case LTTNG_UST_LOGLEVEL_ALL:
2344 tmp[i].loglevel_type = LTTNG_EVENT_LOGLEVEL_ALL;
2345 break;
2346 case LTTNG_UST_LOGLEVEL_RANGE:
2347 tmp[i].loglevel_type = LTTNG_EVENT_LOGLEVEL_RANGE;
2348 break;
2349 case LTTNG_UST_LOGLEVEL_SINGLE:
2350 tmp[i].loglevel_type = LTTNG_EVENT_LOGLEVEL_SINGLE;
2351 break;
2352 }
2353 i++;
2354 }
2355
2356 ret = nb_event;
2357 *events = tmp;
2358
2359 error:
2360 rcu_read_unlock();
2361 return ret;
2362 }
2363
2364 /*
2365 * Fill lttng_event array of all kernel events in the channel.
2366 */
2367 static int list_lttng_kernel_events(char *channel_name,
2368 struct ltt_kernel_session *kernel_session, struct lttng_event **events)
2369 {
2370 int i = 0, ret;
2371 unsigned int nb_event;
2372 struct ltt_kernel_event *event;
2373 struct ltt_kernel_channel *kchan;
2374
2375 kchan = trace_kernel_get_channel_by_name(channel_name, kernel_session);
2376 if (kchan == NULL) {
2377 ret = LTTCOMM_KERN_CHAN_NOT_FOUND;
2378 goto error;
2379 }
2380
2381 nb_event = kchan->event_count;
2382
2383 DBG("Listing events for channel %s", kchan->channel->name);
2384
2385 if (nb_event == 0) {
2386 ret = nb_event;
2387 goto error;
2388 }
2389
2390 *events = zmalloc(nb_event * sizeof(struct lttng_event));
2391 if (*events == NULL) {
2392 ret = LTTCOMM_FATAL;
2393 goto error;
2394 }
2395
2396 /* Kernel channels */
2397 cds_list_for_each_entry(event, &kchan->events_list.head , list) {
2398 strncpy((*events)[i].name, event->event->name, LTTNG_SYMBOL_NAME_LEN);
2399 (*events)[i].name[LTTNG_SYMBOL_NAME_LEN - 1] = '\0';
2400 (*events)[i].enabled = event->enabled;
2401 switch (event->event->instrumentation) {
2402 case LTTNG_KERNEL_TRACEPOINT:
2403 (*events)[i].type = LTTNG_EVENT_TRACEPOINT;
2404 break;
2405 case LTTNG_KERNEL_KPROBE:
2406 case LTTNG_KERNEL_KRETPROBE:
2407 (*events)[i].type = LTTNG_EVENT_PROBE;
2408 memcpy(&(*events)[i].attr.probe, &event->event->u.kprobe,
2409 sizeof(struct lttng_kernel_kprobe));
2410 break;
2411 case LTTNG_KERNEL_FUNCTION:
2412 (*events)[i].type = LTTNG_EVENT_FUNCTION;
2413 memcpy(&((*events)[i].attr.ftrace), &event->event->u.ftrace,
2414 sizeof(struct lttng_kernel_function));
2415 break;
2416 case LTTNG_KERNEL_NOOP:
2417 (*events)[i].type = LTTNG_EVENT_NOOP;
2418 break;
2419 case LTTNG_KERNEL_SYSCALL:
2420 (*events)[i].type = LTTNG_EVENT_SYSCALL;
2421 break;
2422 case LTTNG_KERNEL_ALL:
2423 assert(0);
2424 break;
2425 }
2426 i++;
2427 }
2428
2429 return nb_event;
2430
2431 error:
2432 return ret;
2433 }
2434
2435 /*
2436 * Command LTTNG_DISABLE_CHANNEL processed by the client thread.
2437 */
2438 static int cmd_disable_channel(struct ltt_session *session,
2439 int domain, char *channel_name)
2440 {
2441 int ret;
2442 struct ltt_ust_session *usess;
2443
2444 usess = session->ust_session;
2445
2446 switch (domain) {
2447 case LTTNG_DOMAIN_KERNEL:
2448 {
2449 ret = channel_kernel_disable(session->kernel_session,
2450 channel_name);
2451 if (ret != LTTCOMM_OK) {
2452 goto error;
2453 }
2454
2455 kernel_wait_quiescent(kernel_tracer_fd);
2456 break;
2457 }
2458 case LTTNG_DOMAIN_UST:
2459 {
2460 struct ltt_ust_channel *uchan;
2461 struct lttng_ht *chan_ht;
2462
2463 chan_ht = usess->domain_global.channels;
2464
2465 uchan = trace_ust_find_channel_by_name(chan_ht, channel_name);
2466 if (uchan == NULL) {
2467 ret = LTTCOMM_UST_CHAN_NOT_FOUND;
2468 goto error;
2469 }
2470
2471 ret = channel_ust_disable(usess, domain, uchan);
2472 if (ret != LTTCOMM_OK) {
2473 goto error;
2474 }
2475 break;
2476 }
2477 #if 0
2478 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
2479 case LTTNG_DOMAIN_UST_EXEC_NAME:
2480 case LTTNG_DOMAIN_UST_PID:
2481 #endif
2482 default:
2483 ret = LTTCOMM_UNKNOWN_DOMAIN;
2484 goto error;
2485 }
2486
2487 ret = LTTCOMM_OK;
2488
2489 error:
2490 return ret;
2491 }
2492
2493 /*
2494 * Command LTTNG_ENABLE_CHANNEL processed by the client thread.
2495 */
2496 static int cmd_enable_channel(struct ltt_session *session,
2497 int domain, struct lttng_channel *attr)
2498 {
2499 int ret;
2500 struct ltt_ust_session *usess = session->ust_session;
2501 struct lttng_ht *chan_ht;
2502
2503 DBG("Enabling channel %s for session %s", attr->name, session->name);
2504
2505 switch (domain) {
2506 case LTTNG_DOMAIN_KERNEL:
2507 {
2508 struct ltt_kernel_channel *kchan;
2509
2510 kchan = trace_kernel_get_channel_by_name(attr->name,
2511 session->kernel_session);
2512 if (kchan == NULL) {
2513 ret = channel_kernel_create(session->kernel_session,
2514 attr, kernel_poll_pipe[1]);
2515 } else {
2516 ret = channel_kernel_enable(session->kernel_session, kchan);
2517 }
2518
2519 if (ret != LTTCOMM_OK) {
2520 goto error;
2521 }
2522
2523 kernel_wait_quiescent(kernel_tracer_fd);
2524 break;
2525 }
2526 case LTTNG_DOMAIN_UST:
2527 {
2528 struct ltt_ust_channel *uchan;
2529
2530 chan_ht = usess->domain_global.channels;
2531
2532 uchan = trace_ust_find_channel_by_name(chan_ht, attr->name);
2533 if (uchan == NULL) {
2534 ret = channel_ust_create(usess, domain, attr);
2535 } else {
2536 ret = channel_ust_enable(usess, domain, uchan);
2537 }
2538 break;
2539 }
2540 #if 0
2541 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
2542 case LTTNG_DOMAIN_UST_EXEC_NAME:
2543 case LTTNG_DOMAIN_UST_PID:
2544 #endif
2545 default:
2546 ret = LTTCOMM_UNKNOWN_DOMAIN;
2547 goto error;
2548 }
2549
2550 error:
2551 return ret;
2552 }
2553
2554 /*
2555 * Command LTTNG_DISABLE_EVENT processed by the client thread.
2556 */
2557 static int cmd_disable_event(struct ltt_session *session, int domain,
2558 char *channel_name, char *event_name)
2559 {
2560 int ret;
2561
2562 switch (domain) {
2563 case LTTNG_DOMAIN_KERNEL:
2564 {
2565 struct ltt_kernel_channel *kchan;
2566 struct ltt_kernel_session *ksess;
2567
2568 ksess = session->kernel_session;
2569
2570 kchan = trace_kernel_get_channel_by_name(channel_name, ksess);
2571 if (kchan == NULL) {
2572 ret = LTTCOMM_KERN_CHAN_NOT_FOUND;
2573 goto error;
2574 }
2575
2576 ret = event_kernel_disable_tracepoint(ksess, kchan, event_name);
2577 if (ret != LTTCOMM_OK) {
2578 goto error;
2579 }
2580
2581 kernel_wait_quiescent(kernel_tracer_fd);
2582 break;
2583 }
2584 case LTTNG_DOMAIN_UST:
2585 {
2586 struct ltt_ust_channel *uchan;
2587 struct ltt_ust_session *usess;
2588
2589 usess = session->ust_session;
2590
2591 uchan = trace_ust_find_channel_by_name(usess->domain_global.channels,
2592 channel_name);
2593 if (uchan == NULL) {
2594 ret = LTTCOMM_UST_CHAN_NOT_FOUND;
2595 goto error;
2596 }
2597
2598 ret = event_ust_disable_tracepoint(usess, domain, uchan, event_name);
2599 if (ret != LTTCOMM_OK) {
2600 goto error;
2601 }
2602
2603 DBG3("Disable UST event %s in channel %s completed", event_name,
2604 channel_name);
2605 break;
2606 }
2607 #if 0
2608 case LTTNG_DOMAIN_UST_EXEC_NAME:
2609 case LTTNG_DOMAIN_UST_PID:
2610 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
2611 #endif
2612 default:
2613 ret = LTTCOMM_UND;
2614 goto error;
2615 }
2616
2617 ret = LTTCOMM_OK;
2618
2619 error:
2620 return ret;
2621 }
2622
2623 /*
2624 * Command LTTNG_DISABLE_ALL_EVENT processed by the client thread.
2625 */
2626 static int cmd_disable_event_all(struct ltt_session *session, int domain,
2627 char *channel_name)
2628 {
2629 int ret;
2630
2631 switch (domain) {
2632 case LTTNG_DOMAIN_KERNEL:
2633 {
2634 struct ltt_kernel_session *ksess;
2635 struct ltt_kernel_channel *kchan;
2636
2637 ksess = session->kernel_session;
2638
2639 kchan = trace_kernel_get_channel_by_name(channel_name, ksess);
2640 if (kchan == NULL) {
2641 ret = LTTCOMM_KERN_CHAN_NOT_FOUND;
2642 goto error;
2643 }
2644
2645 ret = event_kernel_disable_all(ksess, kchan);
2646 if (ret != LTTCOMM_OK) {
2647 goto error;
2648 }
2649
2650 kernel_wait_quiescent(kernel_tracer_fd);
2651 break;
2652 }
2653 case LTTNG_DOMAIN_UST:
2654 {
2655 struct ltt_ust_session *usess;
2656 struct ltt_ust_channel *uchan;
2657
2658 usess = session->ust_session;
2659
2660 uchan = trace_ust_find_channel_by_name(usess->domain_global.channels,
2661 channel_name);
2662 if (uchan == NULL) {
2663 ret = LTTCOMM_UST_CHAN_NOT_FOUND;
2664 goto error;
2665 }
2666
2667 ret = event_ust_disable_all_tracepoints(usess, domain, uchan);
2668 if (ret != 0) {
2669 goto error;
2670 }
2671
2672 DBG3("Disable all UST events in channel %s completed", channel_name);
2673
2674 break;
2675 }
2676 #if 0
2677 case LTTNG_DOMAIN_UST_EXEC_NAME:
2678 case LTTNG_DOMAIN_UST_PID:
2679 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
2680 #endif
2681 default:
2682 ret = LTTCOMM_UND;
2683 goto error;
2684 }
2685
2686 ret = LTTCOMM_OK;
2687
2688 error:
2689 return ret;
2690 }
2691
2692 /*
2693 * Command LTTNG_ADD_CONTEXT processed by the client thread.
2694 */
2695 static int cmd_add_context(struct ltt_session *session, int domain,
2696 char *channel_name, char *event_name, struct lttng_event_context *ctx)
2697 {
2698 int ret;
2699
2700 switch (domain) {
2701 case LTTNG_DOMAIN_KERNEL:
2702 /* Add kernel context to kernel tracer */
2703 ret = context_kernel_add(session->kernel_session, ctx,
2704 event_name, channel_name);
2705 if (ret != LTTCOMM_OK) {
2706 goto error;
2707 }
2708 break;
2709 case LTTNG_DOMAIN_UST:
2710 {
2711 struct ltt_ust_session *usess = session->ust_session;
2712
2713 ret = context_ust_add(usess, domain, ctx, event_name, channel_name);
2714 if (ret != LTTCOMM_OK) {
2715 goto error;
2716 }
2717 break;
2718 }
2719 #if 0
2720 case LTTNG_DOMAIN_UST_EXEC_NAME:
2721 case LTTNG_DOMAIN_UST_PID:
2722 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
2723 #endif
2724 default:
2725 ret = LTTCOMM_UND;
2726 goto error;
2727 }
2728
2729 ret = LTTCOMM_OK;
2730
2731 error:
2732 return ret;
2733 }
2734
2735 /*
2736 * Command LTTNG_SET_FILTER processed by the client thread.
2737 */
2738 static int cmd_set_filter(struct ltt_session *session, int domain,
2739 char *channel_name, char *event_name,
2740 struct lttng_filter_bytecode *bytecode)
2741 {
2742 int ret;
2743
2744 switch (domain) {
2745 case LTTNG_DOMAIN_KERNEL:
2746 ret = LTTCOMM_FATAL;
2747 break;
2748 case LTTNG_DOMAIN_UST:
2749 {
2750 struct ltt_ust_session *usess = session->ust_session;
2751
2752 ret = filter_ust_set(usess, domain, bytecode, event_name, channel_name);
2753 if (ret != LTTCOMM_OK) {
2754 goto error;
2755 }
2756 break;
2757 }
2758 #if 0
2759 case LTTNG_DOMAIN_UST_EXEC_NAME:
2760 case LTTNG_DOMAIN_UST_PID:
2761 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
2762 #endif
2763 default:
2764 ret = LTTCOMM_UND;
2765 goto error;
2766 }
2767
2768 ret = LTTCOMM_OK;
2769
2770 error:
2771 return ret;
2772
2773 }
2774
2775 /*
2776 * Command LTTNG_ENABLE_EVENT processed by the client thread.
2777 */
2778 static int cmd_enable_event(struct ltt_session *session, int domain,
2779 char *channel_name, struct lttng_event *event)
2780 {
2781 int ret;
2782 struct lttng_channel *attr;
2783 struct ltt_ust_session *usess = session->ust_session;
2784
2785 switch (domain) {
2786 case LTTNG_DOMAIN_KERNEL:
2787 {
2788 struct ltt_kernel_channel *kchan;
2789
2790 kchan = trace_kernel_get_channel_by_name(channel_name,
2791 session->kernel_session);
2792 if (kchan == NULL) {
2793 attr = channel_new_default_attr(domain);
2794 if (attr == NULL) {
2795 ret = LTTCOMM_FATAL;
2796 goto error;
2797 }
2798 snprintf(attr->name, NAME_MAX, "%s", channel_name);
2799
2800 /* This call will notify the kernel thread */
2801 ret = channel_kernel_create(session->kernel_session,
2802 attr, kernel_poll_pipe[1]);
2803 if (ret != LTTCOMM_OK) {
2804 free(attr);
2805 goto error;
2806 }
2807 free(attr);
2808 }
2809
2810 /* Get the newly created kernel channel pointer */
2811 kchan = trace_kernel_get_channel_by_name(channel_name,
2812 session->kernel_session);
2813 if (kchan == NULL) {
2814 /* This sould not happen... */
2815 ret = LTTCOMM_FATAL;
2816 goto error;
2817 }
2818
2819 ret = event_kernel_enable_tracepoint(session->kernel_session, kchan,
2820 event);
2821 if (ret != LTTCOMM_OK) {
2822 goto error;
2823 }
2824
2825 kernel_wait_quiescent(kernel_tracer_fd);
2826 break;
2827 }
2828 case LTTNG_DOMAIN_UST:
2829 {
2830 struct lttng_channel *attr;
2831 struct ltt_ust_channel *uchan;
2832
2833 /* Get channel from global UST domain */
2834 uchan = trace_ust_find_channel_by_name(usess->domain_global.channels,
2835 channel_name);
2836 if (uchan == NULL) {
2837 /* Create default channel */
2838 attr = channel_new_default_attr(domain);
2839 if (attr == NULL) {
2840 ret = LTTCOMM_FATAL;
2841 goto error;
2842 }
2843 snprintf(attr->name, NAME_MAX, "%s", channel_name);
2844 attr->name[NAME_MAX - 1] = '\0';
2845
2846 ret = channel_ust_create(usess, domain, attr);
2847 if (ret != LTTCOMM_OK) {
2848 free(attr);
2849 goto error;
2850 }
2851 free(attr);
2852
2853 /* Get the newly created channel reference back */
2854 uchan = trace_ust_find_channel_by_name(
2855 usess->domain_global.channels, channel_name);
2856 if (uchan == NULL) {
2857 /* Something is really wrong */
2858 ret = LTTCOMM_FATAL;
2859 goto error;
2860 }
2861 }
2862
2863 /* At this point, the session and channel exist on the tracer */
2864 ret = event_ust_enable_tracepoint(usess, domain, uchan, event);
2865 if (ret != LTTCOMM_OK) {
2866 goto error;
2867 }
2868 break;
2869 }
2870 #if 0
2871 case LTTNG_DOMAIN_UST_EXEC_NAME:
2872 case LTTNG_DOMAIN_UST_PID:
2873 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
2874 #endif
2875 default:
2876 ret = LTTCOMM_UND;
2877 goto error;
2878 }
2879
2880 ret = LTTCOMM_OK;
2881
2882 error:
2883 return ret;
2884 }
2885
2886 /*
2887 * Command LTTNG_ENABLE_ALL_EVENT processed by the client thread.
2888 */
2889 static int cmd_enable_event_all(struct ltt_session *session, int domain,
2890 char *channel_name, int event_type)
2891 {
2892 int ret;
2893 struct ltt_kernel_channel *kchan;
2894
2895 switch (domain) {
2896 case LTTNG_DOMAIN_KERNEL:
2897 kchan = trace_kernel_get_channel_by_name(channel_name,
2898 session->kernel_session);
2899 if (kchan == NULL) {
2900 /* This call will notify the kernel thread */
2901 ret = channel_kernel_create(session->kernel_session, NULL,
2902 kernel_poll_pipe[1]);
2903 if (ret != LTTCOMM_OK) {
2904 goto error;
2905 }
2906
2907 /* Get the newly created kernel channel pointer */
2908 kchan = trace_kernel_get_channel_by_name(channel_name,
2909 session->kernel_session);
2910 if (kchan == NULL) {
2911 /* This sould not happen... */
2912 ret = LTTCOMM_FATAL;
2913 goto error;
2914 }
2915
2916 }
2917
2918 switch (event_type) {
2919 case LTTNG_EVENT_SYSCALL:
2920 ret = event_kernel_enable_all_syscalls(session->kernel_session,
2921 kchan, kernel_tracer_fd);
2922 break;
2923 case LTTNG_EVENT_TRACEPOINT:
2924 /*
2925 * This call enables all LTTNG_KERNEL_TRACEPOINTS and
2926 * events already registered to the channel.
2927 */
2928 ret = event_kernel_enable_all_tracepoints(session->kernel_session,
2929 kchan, kernel_tracer_fd);
2930 break;
2931 case LTTNG_EVENT_ALL:
2932 /* Enable syscalls and tracepoints */
2933 ret = event_kernel_enable_all(session->kernel_session,
2934 kchan, kernel_tracer_fd);
2935 break;
2936 default:
2937 ret = LTTCOMM_KERN_ENABLE_FAIL;
2938 goto error;
2939 }
2940
2941 /* Manage return value */
2942 if (ret != LTTCOMM_OK) {
2943 goto error;
2944 }
2945
2946 kernel_wait_quiescent(kernel_tracer_fd);
2947 break;
2948 case LTTNG_DOMAIN_UST:
2949 {
2950 struct lttng_channel *attr;
2951 struct ltt_ust_channel *uchan;
2952 struct ltt_ust_session *usess = session->ust_session;
2953
2954 /* Get channel from global UST domain */
2955 uchan = trace_ust_find_channel_by_name(usess->domain_global.channels,
2956 channel_name);
2957 if (uchan == NULL) {
2958 /* Create default channel */
2959 attr = channel_new_default_attr(domain);
2960 if (attr == NULL) {
2961 ret = LTTCOMM_FATAL;
2962 goto error;
2963 }
2964 snprintf(attr->name, NAME_MAX, "%s", channel_name);
2965 attr->name[NAME_MAX - 1] = '\0';
2966
2967 /* Use the internal command enable channel */
2968 ret = channel_ust_create(usess, domain, attr);
2969 if (ret != LTTCOMM_OK) {
2970 free(attr);
2971 goto error;
2972 }
2973 free(attr);
2974
2975 /* Get the newly created channel reference back */
2976 uchan = trace_ust_find_channel_by_name(
2977 usess->domain_global.channels, channel_name);
2978 if (uchan == NULL) {
2979 /* Something is really wrong */
2980 ret = LTTCOMM_FATAL;
2981 goto error;
2982 }
2983 }
2984
2985 /* At this point, the session and channel exist on the tracer */
2986
2987 switch (event_type) {
2988 case LTTNG_EVENT_ALL:
2989 case LTTNG_EVENT_TRACEPOINT:
2990 ret = event_ust_enable_all_tracepoints(usess, domain, uchan);
2991 if (ret != LTTCOMM_OK) {
2992 goto error;
2993 }
2994 break;
2995 default:
2996 ret = LTTCOMM_UST_ENABLE_FAIL;
2997 goto error;
2998 }
2999
3000 /* Manage return value */
3001 if (ret != LTTCOMM_OK) {
3002 goto error;
3003 }
3004
3005 break;
3006 }
3007 #if 0
3008 case LTTNG_DOMAIN_UST_EXEC_NAME:
3009 case LTTNG_DOMAIN_UST_PID:
3010 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
3011 #endif
3012 default:
3013 ret = LTTCOMM_UND;
3014 goto error;
3015 }
3016
3017 ret = LTTCOMM_OK;
3018
3019 error:
3020 return ret;
3021 }
3022
3023 /*
3024 * Command LTTNG_LIST_TRACEPOINTS processed by the client thread.
3025 */
3026 static ssize_t cmd_list_tracepoints(int domain, struct lttng_event **events)
3027 {
3028 int ret;
3029 ssize_t nb_events = 0;
3030
3031 switch (domain) {
3032 case LTTNG_DOMAIN_KERNEL:
3033 nb_events = kernel_list_events(kernel_tracer_fd, events);
3034 if (nb_events < 0) {
3035 ret = LTTCOMM_KERN_LIST_FAIL;
3036 goto error;
3037 }
3038 break;
3039 case LTTNG_DOMAIN_UST:
3040 nb_events = ust_app_list_events(events);
3041 if (nb_events < 0) {
3042 ret = LTTCOMM_UST_LIST_FAIL;
3043 goto error;
3044 }
3045 break;
3046 default:
3047 ret = LTTCOMM_UND;
3048 goto error;
3049 }
3050
3051 return nb_events;
3052
3053 error:
3054 /* Return negative value to differentiate return code */
3055 return -ret;
3056 }
3057
3058 /*
3059 * Command LTTNG_LIST_TRACEPOINT_FIELDS processed by the client thread.
3060 */
3061 static ssize_t cmd_list_tracepoint_fields(int domain,
3062 struct lttng_event_field **fields)
3063 {
3064 int ret;
3065 ssize_t nb_fields = 0;
3066
3067 switch (domain) {
3068 case LTTNG_DOMAIN_UST:
3069 nb_fields = ust_app_list_event_fields(fields);
3070 if (nb_fields < 0) {
3071 ret = LTTCOMM_UST_LIST_FAIL;
3072 goto error;
3073 }
3074 break;
3075 case LTTNG_DOMAIN_KERNEL:
3076 default: /* fall-through */
3077 ret = LTTCOMM_UND;
3078 goto error;
3079 }
3080
3081 return nb_fields;
3082
3083 error:
3084 /* Return negative value to differentiate return code */
3085 return -ret;
3086 }
3087
3088 /*
3089 * Command LTTNG_START_TRACE processed by the client thread.
3090 */
3091 static int cmd_start_trace(struct ltt_session *session)
3092 {
3093 int ret;
3094 struct ltt_kernel_session *ksession;
3095 struct ltt_ust_session *usess;
3096 struct ltt_kernel_channel *kchan;
3097
3098 /* Ease our life a bit ;) */
3099 ksession = session->kernel_session;
3100 usess = session->ust_session;
3101
3102 if (session->enabled) {
3103 /* Already started. */
3104 ret = LTTCOMM_TRACE_ALREADY_STARTED;
3105 goto error;
3106 }
3107
3108 session->enabled = 1;
3109
3110 ret = setup_relayd(session);
3111 if (ret != LTTCOMM_OK) {
3112 ERR("Error setting up relayd for session %s", session->name);
3113 goto error;
3114 }
3115
3116 /* Kernel tracing */
3117 if (ksession != NULL) {
3118 /* Open kernel metadata */
3119 if (ksession->metadata == NULL) {
3120 ret = kernel_open_metadata(ksession,
3121 ksession->consumer->dst.trace_path);
3122 if (ret < 0) {
3123 ret = LTTCOMM_KERN_META_FAIL;
3124 goto error;
3125 }
3126 }
3127
3128 /* Open kernel metadata stream */
3129 if (ksession->metadata_stream_fd < 0) {
3130 ret = kernel_open_metadata_stream(ksession);
3131 if (ret < 0) {
3132 ERR("Kernel create metadata stream failed");
3133 ret = LTTCOMM_KERN_STREAM_FAIL;
3134 goto error;
3135 }
3136 }
3137
3138 /* For each channel */
3139 cds_list_for_each_entry(kchan, &ksession->channel_list.head, list) {
3140 if (kchan->stream_count == 0) {
3141 ret = kernel_open_channel_stream(kchan);
3142 if (ret < 0) {
3143 ret = LTTCOMM_KERN_STREAM_FAIL;
3144 goto error;
3145 }
3146 /* Update the stream global counter */
3147 ksession->stream_count_global += ret;
3148 }
3149 }
3150
3151 /* Setup kernel consumer socket and send fds to it */
3152 ret = init_kernel_tracing(ksession);
3153 if (ret < 0) {
3154 ret = LTTCOMM_KERN_START_FAIL;
3155 goto error;
3156 }
3157
3158 /* This start the kernel tracing */
3159 ret = kernel_start_session(ksession);
3160 if (ret < 0) {
3161 ret = LTTCOMM_KERN_START_FAIL;
3162 goto error;
3163 }
3164
3165 /* Quiescent wait after starting trace */
3166 kernel_wait_quiescent(kernel_tracer_fd);
3167 }
3168
3169 /* Flag session that trace should start automatically */
3170 if (usess) {
3171 usess->start_trace = 1;
3172
3173 ret = ust_app_start_trace_all(usess);
3174 if (ret < 0) {
3175 ret = LTTCOMM_UST_START_FAIL;
3176 goto error;
3177 }
3178 }
3179
3180 ret = LTTCOMM_OK;
3181
3182 error:
3183 return ret;
3184 }
3185
3186 /*
3187 * Command LTTNG_STOP_TRACE processed by the client thread.
3188 */
3189 static int cmd_stop_trace(struct ltt_session *session)
3190 {
3191 int ret;
3192 struct ltt_kernel_channel *kchan;
3193 struct ltt_kernel_session *ksession;
3194 struct ltt_ust_session *usess;
3195
3196 /* Short cut */
3197 ksession = session->kernel_session;
3198 usess = session->ust_session;
3199
3200 if (!session->enabled) {
3201 ret = LTTCOMM_TRACE_ALREADY_STOPPED;
3202 goto error;
3203 }
3204
3205 session->enabled = 0;
3206
3207 /* Kernel tracer */
3208 if (ksession != NULL) {
3209 DBG("Stop kernel tracing");
3210
3211 /* Flush metadata if exist */
3212 if (ksession->metadata_stream_fd >= 0) {
3213 ret = kernel_metadata_flush_buffer(ksession->metadata_stream_fd);
3214 if (ret < 0) {
3215 ERR("Kernel metadata flush failed");
3216 }
3217 }
3218
3219 /* Flush all buffers before stopping */
3220 cds_list_for_each_entry(kchan, &ksession->channel_list.head, list) {
3221 ret = kernel_flush_buffer(kchan);
3222 if (ret < 0) {
3223 ERR("Kernel flush buffer error");
3224 }
3225 }
3226
3227 ret = kernel_stop_session(ksession);
3228 if (ret < 0) {
3229 ret = LTTCOMM_KERN_STOP_FAIL;
3230 goto error;
3231 }
3232
3233 kernel_wait_quiescent(kernel_tracer_fd);
3234 }
3235
3236 if (usess) {
3237 usess->start_trace = 0;
3238
3239 ret = ust_app_stop_trace_all(usess);
3240 if (ret < 0) {
3241 ret = LTTCOMM_UST_STOP_FAIL;
3242 goto error;
3243 }
3244 }
3245
3246 ret = LTTCOMM_OK;
3247
3248 error:
3249 return ret;
3250 }
3251
3252 /*
3253 * Command LTTNG_CREATE_SESSION_URI processed by the client thread.
3254 */
3255 static int cmd_create_session_uri(char *name, struct lttng_uri *ctrl_uri,
3256 struct lttng_uri *data_uri, unsigned int enable_consumer,
3257 lttng_sock_cred *creds)
3258 {
3259 int ret;
3260 char *path = NULL;
3261 struct ltt_session *session;
3262 struct consumer_output *consumer;
3263
3264 /* Verify if the session already exist */
3265 session = session_find_by_name(name);
3266 if (session != NULL) {
3267 ret = LTTCOMM_EXIST_SESS;
3268 goto error;
3269 }
3270
3271 /* TODO: validate URIs */
3272
3273 /* Create default consumer output */
3274 consumer = consumer_create_output(CONSUMER_DST_LOCAL