2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 #include <semaphore.h>
30 #include <sys/mount.h>
31 #include <sys/resource.h>
32 #include <sys/socket.h>
34 #include <sys/types.h>
36 #include <urcu/uatomic.h>
40 #include <common/common.h>
41 #include <common/compat/poll.h>
42 #include <common/compat/socket.h>
43 #include <common/defaults.h>
44 #include <common/kernel-consumer/kernel-consumer.h>
45 #include <common/futex.h>
46 #include <common/relayd/relayd.h>
47 #include <common/utils.h>
49 #include "lttng-sessiond.h"
55 #include "kernel-consumer.h"
59 #include "ust-consumer.h"
65 #define CONSUMERD_FILE "lttng-consumerd"
68 const char default_home_dir
[] = DEFAULT_HOME_DIR
;
69 const char default_tracing_group
[] = DEFAULT_TRACING_GROUP
;
70 const char default_ust_sock_dir
[] = DEFAULT_UST_SOCK_DIR
;
71 const char default_global_apps_pipe
[] = DEFAULT_GLOBAL_APPS_PIPE
;
74 const char *opt_tracing_group
;
75 static int opt_sig_parent
;
76 static int opt_verbose_consumer
;
77 static int opt_daemon
;
78 static int opt_no_kernel
;
79 static int is_root
; /* Set to 1 if the daemon is running as root */
80 static pid_t ppid
; /* Parent PID for --sig-parent option */
83 /* Consumer daemon specific control data */
84 static struct consumer_data kconsumer_data
= {
85 .type
= LTTNG_CONSUMER_KERNEL
,
86 .err_unix_sock_path
= DEFAULT_KCONSUMERD_ERR_SOCK_PATH
,
87 .cmd_unix_sock_path
= DEFAULT_KCONSUMERD_CMD_SOCK_PATH
,
91 static struct consumer_data ustconsumer64_data
= {
92 .type
= LTTNG_CONSUMER64_UST
,
93 .err_unix_sock_path
= DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH
,
94 .cmd_unix_sock_path
= DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH
,
98 static struct consumer_data ustconsumer32_data
= {
99 .type
= LTTNG_CONSUMER32_UST
,
100 .err_unix_sock_path
= DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH
,
101 .cmd_unix_sock_path
= DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH
,
106 static int dispatch_thread_exit
;
108 /* Global application Unix socket path */
109 static char apps_unix_sock_path
[PATH_MAX
];
110 /* Global client Unix socket path */
111 static char client_unix_sock_path
[PATH_MAX
];
112 /* global wait shm path for UST */
113 static char wait_shm_path
[PATH_MAX
];
114 /* Global health check unix path */
115 static char health_unix_sock_path
[PATH_MAX
];
117 /* Sockets and FDs */
118 static int client_sock
= -1;
119 static int apps_sock
= -1;
120 static int kernel_tracer_fd
= -1;
121 static int kernel_poll_pipe
[2] = { -1, -1 };
124 * Quit pipe for all threads. This permits a single cancellation point
125 * for all threads when receiving an event on the pipe.
127 static int thread_quit_pipe
[2] = { -1, -1 };
130 * This pipe is used to inform the thread managing application communication
131 * that a command is queued and ready to be processed.
133 static int apps_cmd_pipe
[2] = { -1, -1 };
135 /* Pthread, Mutexes and Semaphores */
136 static pthread_t apps_thread
;
137 static pthread_t reg_apps_thread
;
138 static pthread_t client_thread
;
139 static pthread_t kernel_thread
;
140 static pthread_t dispatch_thread
;
141 static pthread_t health_thread
;
144 * UST registration command queue. This queue is tied with a futex and uses a N
145 * wakers / 1 waiter implemented and detailed in futex.c/.h
147 * The thread_manage_apps and thread_dispatch_ust_registration interact with
148 * this queue and the wait/wake scheme.
150 static struct ust_cmd_queue ust_cmd_queue
;
153 * Pointer initialized before thread creation.
155 * This points to the tracing session list containing the session count and a
156 * mutex lock. The lock MUST be taken if you iterate over the list. The lock
157 * MUST NOT be taken if you call a public function in session.c.
159 * The lock is nested inside the structure: session_list_ptr->lock. Please use
160 * session_lock_list and session_unlock_list for lock acquisition.
162 static struct ltt_session_list
*session_list_ptr
;
164 int ust_consumerd64_fd
= -1;
165 int ust_consumerd32_fd
= -1;
167 static const char *consumerd32_bin
= CONFIG_CONSUMERD32_BIN
;
168 static const char *consumerd64_bin
= CONFIG_CONSUMERD64_BIN
;
169 static const char *consumerd32_libdir
= CONFIG_CONSUMERD32_LIBDIR
;
170 static const char *consumerd64_libdir
= CONFIG_CONSUMERD64_LIBDIR
;
173 * Consumer daemon state which is changed when spawning it, killing it or in
174 * case of a fatal error.
176 enum consumerd_state
{
177 CONSUMER_STARTED
= 1,
178 CONSUMER_STOPPED
= 2,
183 * This consumer daemon state is used to validate if a client command will be
184 * able to reach the consumer. If not, the client is informed. For instance,
185 * doing a "lttng start" when the consumer state is set to ERROR will return an
186 * error to the client.
188 * The following example shows a possible race condition of this scheme:
190 * consumer thread error happens
192 * client cmd checks state -> still OK
193 * consumer thread exit, sets error
194 * client cmd try to talk to consumer
197 * However, since the consumer is a different daemon, we have no way of making
198 * sure the command will reach it safely even with this state flag. This is why
199 * we consider that up to the state validation during command processing, the
200 * command is safe. After that, we can not guarantee the correctness of the
201 * client request vis-a-vis the consumer.
203 static enum consumerd_state ust_consumerd_state
;
204 static enum consumerd_state kernel_consumerd_state
;
207 * Used to keep a unique index for each relayd socket created where this value
208 * is associated with streams on the consumer so it can match the right relayd
211 * This value should be incremented atomically for safety purposes and future
212 * possible concurrent access.
214 static unsigned int relayd_net_seq_idx
;
216 /* Used for the health monitoring of the session daemon. See health.h */
217 struct health_state health_thread_cmd
;
218 struct health_state health_thread_app_reg
;
219 struct health_state health_thread_kernel
;
222 void setup_consumerd_path(void)
224 const char *bin
, *libdir
;
227 * Allow INSTALL_BIN_PATH to be used as a target path for the
228 * native architecture size consumer if CONFIG_CONSUMER*_PATH
229 * has not been defined.
231 #if (CAA_BITS_PER_LONG == 32)
232 if (!consumerd32_bin
[0]) {
233 consumerd32_bin
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
235 if (!consumerd32_libdir
[0]) {
236 consumerd32_libdir
= INSTALL_LIB_PATH
;
238 #elif (CAA_BITS_PER_LONG == 64)
239 if (!consumerd64_bin
[0]) {
240 consumerd64_bin
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
242 if (!consumerd64_libdir
[0]) {
243 consumerd64_libdir
= INSTALL_LIB_PATH
;
246 #error "Unknown bitness"
250 * runtime env. var. overrides the build default.
252 bin
= getenv("LTTNG_CONSUMERD32_BIN");
254 consumerd32_bin
= bin
;
256 bin
= getenv("LTTNG_CONSUMERD64_BIN");
258 consumerd64_bin
= bin
;
260 libdir
= getenv("LTTNG_CONSUMERD32_LIBDIR");
262 consumerd32_libdir
= libdir
;
264 libdir
= getenv("LTTNG_CONSUMERD64_LIBDIR");
266 consumerd64_libdir
= libdir
;
271 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
273 static int create_thread_poll_set(struct lttng_poll_event
*events
,
278 if (events
== NULL
|| size
== 0) {
283 ret
= lttng_poll_create(events
, size
, LTTNG_CLOEXEC
);
289 ret
= lttng_poll_add(events
, thread_quit_pipe
[0], LPOLLIN
);
301 * Check if the thread quit pipe was triggered.
303 * Return 1 if it was triggered else 0;
305 static int check_thread_quit_pipe(int fd
, uint32_t events
)
307 if (fd
== thread_quit_pipe
[0] && (events
& LPOLLIN
)) {
315 * Return group ID of the tracing group or -1 if not found.
317 static gid_t
allowed_group(void)
321 if (opt_tracing_group
) {
322 grp
= getgrnam(opt_tracing_group
);
324 grp
= getgrnam(default_tracing_group
);
334 * Init thread quit pipe.
336 * Return -1 on error or 0 if all pipes are created.
338 static int init_thread_quit_pipe(void)
342 ret
= pipe(thread_quit_pipe
);
344 PERROR("thread quit pipe");
348 for (i
= 0; i
< 2; i
++) {
349 ret
= fcntl(thread_quit_pipe
[i
], F_SETFD
, FD_CLOEXEC
);
361 * Complete teardown of a kernel session. This free all data structure related
362 * to a kernel session and update counter.
364 static void teardown_kernel_session(struct ltt_session
*session
)
366 if (!session
->kernel_session
) {
367 DBG3("No kernel session when tearing down session");
371 DBG("Tearing down kernel session");
374 * If a custom kernel consumer was registered, close the socket before
375 * tearing down the complete kernel session structure
377 if (kconsumer_data
.cmd_sock
>= 0 &&
378 session
->kernel_session
->consumer_fd
!= kconsumer_data
.cmd_sock
) {
379 lttcomm_close_unix_sock(session
->kernel_session
->consumer_fd
);
382 trace_kernel_destroy_session(session
->kernel_session
);
386 * Complete teardown of all UST sessions. This will free everything on his path
387 * and destroy the core essence of all ust sessions :)
389 static void teardown_ust_session(struct ltt_session
*session
)
393 if (!session
->ust_session
) {
394 DBG3("No UST session when tearing down session");
398 DBG("Tearing down UST session(s)");
400 ret
= ust_app_destroy_trace_all(session
->ust_session
);
402 ERR("Error in ust_app_destroy_trace_all");
405 trace_ust_destroy_session(session
->ust_session
);
409 * Stop all threads by closing the thread quit pipe.
411 static void stop_threads(void)
415 /* Stopping all threads */
416 DBG("Terminating all threads");
417 ret
= notify_thread_pipe(thread_quit_pipe
[1]);
419 ERR("write error on thread quit pipe");
422 /* Dispatch thread */
423 dispatch_thread_exit
= 1;
424 futex_nto1_wake(&ust_cmd_queue
.futex
);
430 static void cleanup(void)
434 struct ltt_session
*sess
, *stmp
;
438 DBG("Removing %s directory", rundir
);
439 ret
= asprintf(&cmd
, "rm -rf %s", rundir
);
441 ERR("asprintf failed. Something is really wrong!");
444 /* Remove lttng run directory */
447 ERR("Unable to clean %s", rundir
);
451 DBG("Cleaning up all sessions");
453 /* Destroy session list mutex */
454 if (session_list_ptr
!= NULL
) {
455 pthread_mutex_destroy(&session_list_ptr
->lock
);
457 /* Cleanup ALL session */
458 cds_list_for_each_entry_safe(sess
, stmp
,
459 &session_list_ptr
->head
, list
) {
460 teardown_kernel_session(sess
);
461 teardown_ust_session(sess
);
466 DBG("Closing all UST sockets");
467 ust_app_clean_list();
469 pthread_mutex_destroy(&kconsumer_data
.pid_mutex
);
471 if (is_root
&& !opt_no_kernel
) {
472 DBG2("Closing kernel fd");
473 if (kernel_tracer_fd
>= 0) {
474 ret
= close(kernel_tracer_fd
);
479 DBG("Unloading kernel modules");
480 modprobe_remove_lttng_all();
482 utils_close_pipe(kernel_poll_pipe
);
483 utils_close_pipe(thread_quit_pipe
);
484 utils_close_pipe(apps_cmd_pipe
);
487 DBG("%c[%d;%dm*** assert failed :-) *** ==> %c[%dm%c[%d;%dm"
488 "Matthew, BEET driven development works!%c[%dm",
489 27, 1, 31, 27, 0, 27, 1, 33, 27, 0);
494 * Send data on a unix socket using the liblttsessiondcomm API.
496 * Return lttcomm error code.
498 static int send_unix_sock(int sock
, void *buf
, size_t len
)
500 /* Check valid length */
505 return lttcomm_send_unix_sock(sock
, buf
, len
);
509 * Free memory of a command context structure.
511 static void clean_command_ctx(struct command_ctx
**cmd_ctx
)
513 DBG("Clean command context structure");
515 if ((*cmd_ctx
)->llm
) {
516 free((*cmd_ctx
)->llm
);
518 if ((*cmd_ctx
)->lsm
) {
519 free((*cmd_ctx
)->lsm
);
527 * Notify UST applications using the shm mmap futex.
529 static int notify_ust_apps(int active
)
533 DBG("Notifying applications of session daemon state: %d", active
);
535 /* See shm.c for this call implying mmap, shm and futex calls */
536 wait_shm_mmap
= shm_ust_get_mmap(wait_shm_path
, is_root
);
537 if (wait_shm_mmap
== NULL
) {
541 /* Wake waiting process */
542 futex_wait_update((int32_t *) wait_shm_mmap
, active
);
544 /* Apps notified successfully */
552 * Setup the outgoing data buffer for the response (llm) by allocating the
553 * right amount of memory and copying the original information from the lsm
556 * Return total size of the buffer pointed by buf.
558 static int setup_lttng_msg(struct command_ctx
*cmd_ctx
, size_t size
)
564 cmd_ctx
->llm
= zmalloc(sizeof(struct lttcomm_lttng_msg
) + buf_size
);
565 if (cmd_ctx
->llm
== NULL
) {
571 /* Copy common data */
572 cmd_ctx
->llm
->cmd_type
= cmd_ctx
->lsm
->cmd_type
;
573 cmd_ctx
->llm
->pid
= cmd_ctx
->lsm
->domain
.attr
.pid
;
575 cmd_ctx
->llm
->data_size
= size
;
576 cmd_ctx
->lttng_msg_size
= sizeof(struct lttcomm_lttng_msg
) + buf_size
;
585 * Update the kernel poll set of all channel fd available over all tracing
586 * session. Add the wakeup pipe at the end of the set.
588 static int update_kernel_poll(struct lttng_poll_event
*events
)
591 struct ltt_session
*session
;
592 struct ltt_kernel_channel
*channel
;
594 DBG("Updating kernel poll set");
597 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
598 session_lock(session
);
599 if (session
->kernel_session
== NULL
) {
600 session_unlock(session
);
604 cds_list_for_each_entry(channel
,
605 &session
->kernel_session
->channel_list
.head
, list
) {
606 /* Add channel fd to the kernel poll set */
607 ret
= lttng_poll_add(events
, channel
->fd
, LPOLLIN
| LPOLLRDNORM
);
609 session_unlock(session
);
612 DBG("Channel fd %d added to kernel set", channel
->fd
);
614 session_unlock(session
);
616 session_unlock_list();
621 session_unlock_list();
626 * Find the channel fd from 'fd' over all tracing session. When found, check
627 * for new channel stream and send those stream fds to the kernel consumer.
629 * Useful for CPU hotplug feature.
631 static int update_kernel_stream(struct consumer_data
*consumer_data
, int fd
)
634 struct ltt_session
*session
;
635 struct ltt_kernel_channel
*channel
;
637 DBG("Updating kernel streams for channel fd %d", fd
);
640 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
641 session_lock(session
);
642 if (session
->kernel_session
== NULL
) {
643 session_unlock(session
);
647 /* This is not suppose to be -1 but this is an extra security check */
648 if (session
->kernel_session
->consumer_fd
< 0) {
649 session
->kernel_session
->consumer_fd
= consumer_data
->cmd_sock
;
652 cds_list_for_each_entry(channel
,
653 &session
->kernel_session
->channel_list
.head
, list
) {
654 if (channel
->fd
== fd
) {
655 DBG("Channel found, updating kernel streams");
656 ret
= kernel_open_channel_stream(channel
);
662 * Have we already sent fds to the consumer? If yes, it means
663 * that tracing is started so it is safe to send our updated
666 if (session
->kernel_session
->consumer_fds_sent
== 1 &&
667 session
->kernel_session
->consumer
!= NULL
) {
668 ret
= kernel_consumer_send_channel_stream(
669 session
->kernel_session
->consumer_fd
, channel
,
670 session
->kernel_session
);
678 session_unlock(session
);
680 session_unlock_list();
684 session_unlock(session
);
685 session_unlock_list();
690 * For each tracing session, update newly registered apps.
692 static void update_ust_app(int app_sock
)
694 struct ltt_session
*sess
, *stmp
;
698 /* For all tracing session(s) */
699 cds_list_for_each_entry_safe(sess
, stmp
, &session_list_ptr
->head
, list
) {
701 if (sess
->ust_session
) {
702 ust_app_global_update(sess
->ust_session
, app_sock
);
704 session_unlock(sess
);
707 session_unlock_list();
711 * This thread manage event coming from the kernel.
713 * Features supported in this thread:
716 static void *thread_manage_kernel(void *data
)
718 int ret
, i
, pollfd
, update_poll_flag
= 1;
719 uint32_t revents
, nb_fd
;
721 struct lttng_poll_event events
;
723 DBG("Thread manage kernel started");
725 health_code_update(&health_thread_kernel
);
727 ret
= create_thread_poll_set(&events
, 2);
729 goto error_poll_create
;
732 ret
= lttng_poll_add(&events
, kernel_poll_pipe
[0], LPOLLIN
);
738 health_code_update(&health_thread_kernel
);
740 if (update_poll_flag
== 1) {
742 * Reset number of fd in the poll set. Always 2 since there is the thread
743 * quit pipe and the kernel pipe.
747 ret
= update_kernel_poll(&events
);
751 update_poll_flag
= 0;
754 nb_fd
= LTTNG_POLL_GETNB(&events
);
756 DBG("Thread kernel polling on %d fds", nb_fd
);
758 /* Zeroed the poll events */
759 lttng_poll_reset(&events
);
761 /* Poll infinite value of time */
763 health_poll_update(&health_thread_kernel
);
764 ret
= lttng_poll_wait(&events
, -1);
765 health_poll_update(&health_thread_kernel
);
768 * Restart interrupted system call.
770 if (errno
== EINTR
) {
774 } else if (ret
== 0) {
775 /* Should not happen since timeout is infinite */
776 ERR("Return value of poll is 0 with an infinite timeout.\n"
777 "This should not have happened! Continuing...");
781 for (i
= 0; i
< nb_fd
; i
++) {
782 /* Fetch once the poll data */
783 revents
= LTTNG_POLL_GETEV(&events
, i
);
784 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
786 health_code_update(&health_thread_kernel
);
788 /* Thread quit pipe has been closed. Killing thread. */
789 ret
= check_thread_quit_pipe(pollfd
, revents
);
794 /* Check for data on kernel pipe */
795 if (pollfd
== kernel_poll_pipe
[0] && (revents
& LPOLLIN
)) {
796 ret
= read(kernel_poll_pipe
[0], &tmp
, 1);
797 update_poll_flag
= 1;
801 * New CPU detected by the kernel. Adding kernel stream to
802 * kernel session and updating the kernel consumer
804 if (revents
& LPOLLIN
) {
805 ret
= update_kernel_stream(&kconsumer_data
, pollfd
);
811 * TODO: We might want to handle the LPOLLERR | LPOLLHUP
812 * and unregister kernel stream at this point.
820 lttng_poll_clean(&events
);
822 health_reset(&health_thread_kernel
);
823 DBG("Kernel thread dying");
828 * This thread manage the consumer error sent back to the session daemon.
830 static void *thread_manage_consumer(void *data
)
832 int sock
= -1, i
, ret
, pollfd
;
833 uint32_t revents
, nb_fd
;
834 enum lttcomm_return_code code
;
835 struct lttng_poll_event events
;
836 struct consumer_data
*consumer_data
= data
;
838 DBG("[thread] Manage consumer started");
840 health_code_update(&consumer_data
->health
);
842 ret
= lttcomm_listen_unix_sock(consumer_data
->err_sock
);
848 * Pass 2 as size here for the thread quit pipe and kconsumerd_err_sock.
849 * Nothing more will be added to this poll set.
851 ret
= create_thread_poll_set(&events
, 2);
856 ret
= lttng_poll_add(&events
, consumer_data
->err_sock
, LPOLLIN
| LPOLLRDHUP
);
861 nb_fd
= LTTNG_POLL_GETNB(&events
);
863 health_code_update(&consumer_data
->health
);
865 /* Inifinite blocking call, waiting for transmission */
867 health_poll_update(&consumer_data
->health
);
868 ret
= lttng_poll_wait(&events
, -1);
869 health_poll_update(&consumer_data
->health
);
872 * Restart interrupted system call.
874 if (errno
== EINTR
) {
880 for (i
= 0; i
< nb_fd
; i
++) {
881 /* Fetch once the poll data */
882 revents
= LTTNG_POLL_GETEV(&events
, i
);
883 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
885 health_code_update(&consumer_data
->health
);
887 /* Thread quit pipe has been closed. Killing thread. */
888 ret
= check_thread_quit_pipe(pollfd
, revents
);
893 /* Event on the registration socket */
894 if (pollfd
== consumer_data
->err_sock
) {
895 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
896 ERR("consumer err socket poll error");
902 sock
= lttcomm_accept_unix_sock(consumer_data
->err_sock
);
907 health_code_update(&consumer_data
->health
);
909 DBG2("Receiving code from consumer err_sock");
911 /* Getting status code from kconsumerd */
912 ret
= lttcomm_recv_unix_sock(sock
, &code
,
913 sizeof(enum lttcomm_return_code
));
918 health_code_update(&consumer_data
->health
);
920 if (code
== CONSUMERD_COMMAND_SOCK_READY
) {
921 consumer_data
->cmd_sock
=
922 lttcomm_connect_unix_sock(consumer_data
->cmd_unix_sock_path
);
923 if (consumer_data
->cmd_sock
< 0) {
924 sem_post(&consumer_data
->sem
);
925 PERROR("consumer connect");
928 /* Signal condition to tell that the kconsumerd is ready */
929 sem_post(&consumer_data
->sem
);
930 DBG("consumer command socket ready");
932 ERR("consumer error when waiting for SOCK_READY : %s",
933 lttcomm_get_readable_code(-code
));
937 /* Remove the kconsumerd error sock since we've established a connexion */
938 ret
= lttng_poll_del(&events
, consumer_data
->err_sock
);
943 ret
= lttng_poll_add(&events
, sock
, LPOLLIN
| LPOLLRDHUP
);
948 health_code_update(&consumer_data
->health
);
950 /* Update number of fd */
951 nb_fd
= LTTNG_POLL_GETNB(&events
);
953 /* Inifinite blocking call, waiting for transmission */
955 health_poll_update(&consumer_data
->health
);
956 ret
= lttng_poll_wait(&events
, -1);
957 health_poll_update(&consumer_data
->health
);
960 * Restart interrupted system call.
962 if (errno
== EINTR
) {
968 for (i
= 0; i
< nb_fd
; i
++) {
969 /* Fetch once the poll data */
970 revents
= LTTNG_POLL_GETEV(&events
, i
);
971 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
973 health_code_update(&consumer_data
->health
);
975 /* Thread quit pipe has been closed. Killing thread. */
976 ret
= check_thread_quit_pipe(pollfd
, revents
);
981 /* Event on the kconsumerd socket */
982 if (pollfd
== sock
) {
983 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
984 ERR("consumer err socket second poll error");
990 health_code_update(&consumer_data
->health
);
992 /* Wait for any kconsumerd error */
993 ret
= lttcomm_recv_unix_sock(sock
, &code
,
994 sizeof(enum lttcomm_return_code
));
996 ERR("consumer closed the command socket");
1000 ERR("consumer return code : %s", lttcomm_get_readable_code(-code
));
1003 /* Immediately set the consumerd state to stopped */
1004 if (consumer_data
->type
== LTTNG_CONSUMER_KERNEL
) {
1005 uatomic_set(&kernel_consumerd_state
, CONSUMER_ERROR
);
1006 } else if (consumer_data
->type
== LTTNG_CONSUMER64_UST
||
1007 consumer_data
->type
== LTTNG_CONSUMER32_UST
) {
1008 uatomic_set(&ust_consumerd_state
, CONSUMER_ERROR
);
1010 /* Code flow error... */
1014 if (consumer_data
->err_sock
>= 0) {
1015 ret
= close(consumer_data
->err_sock
);
1020 if (consumer_data
->cmd_sock
>= 0) {
1021 ret
= close(consumer_data
->cmd_sock
);
1033 unlink(consumer_data
->err_unix_sock_path
);
1034 unlink(consumer_data
->cmd_unix_sock_path
);
1035 consumer_data
->pid
= 0;
1037 lttng_poll_clean(&events
);
1040 health_reset(&consumer_data
->health
);
1041 DBG("consumer thread cleanup completed");
1047 * This thread manage application communication.
1049 static void *thread_manage_apps(void *data
)
1052 uint32_t revents
, nb_fd
;
1053 struct ust_command ust_cmd
;
1054 struct lttng_poll_event events
;
1056 DBG("[thread] Manage application started");
1058 rcu_register_thread();
1059 rcu_thread_online();
1061 health_code_update(&health_thread_app_reg
);
1063 ret
= create_thread_poll_set(&events
, 2);
1065 goto error_poll_create
;
1068 ret
= lttng_poll_add(&events
, apps_cmd_pipe
[0], LPOLLIN
| LPOLLRDHUP
);
1073 health_code_update(&health_thread_app_reg
);
1076 /* Zeroed the events structure */
1077 lttng_poll_reset(&events
);
1079 nb_fd
= LTTNG_POLL_GETNB(&events
);
1081 DBG("Apps thread polling on %d fds", nb_fd
);
1083 /* Inifinite blocking call, waiting for transmission */
1085 health_poll_update(&health_thread_app_reg
);
1086 ret
= lttng_poll_wait(&events
, -1);
1087 health_poll_update(&health_thread_app_reg
);
1090 * Restart interrupted system call.
1092 if (errno
== EINTR
) {
1098 for (i
= 0; i
< nb_fd
; i
++) {
1099 /* Fetch once the poll data */
1100 revents
= LTTNG_POLL_GETEV(&events
, i
);
1101 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1103 health_code_update(&health_thread_app_reg
);
1105 /* Thread quit pipe has been closed. Killing thread. */
1106 ret
= check_thread_quit_pipe(pollfd
, revents
);
1111 /* Inspect the apps cmd pipe */
1112 if (pollfd
== apps_cmd_pipe
[0]) {
1113 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1114 ERR("Apps command pipe error");
1116 } else if (revents
& LPOLLIN
) {
1118 ret
= read(apps_cmd_pipe
[0], &ust_cmd
, sizeof(ust_cmd
));
1119 if (ret
< 0 || ret
< sizeof(ust_cmd
)) {
1120 PERROR("read apps cmd pipe");
1124 health_code_update(&health_thread_app_reg
);
1126 /* Register applicaton to the session daemon */
1127 ret
= ust_app_register(&ust_cmd
.reg_msg
,
1129 if (ret
== -ENOMEM
) {
1131 } else if (ret
< 0) {
1135 health_code_update(&health_thread_app_reg
);
1138 * Validate UST version compatibility.
1140 ret
= ust_app_validate_version(ust_cmd
.sock
);
1143 * Add channel(s) and event(s) to newly registered apps
1144 * from lttng global UST domain.
1146 update_ust_app(ust_cmd
.sock
);
1149 health_code_update(&health_thread_app_reg
);
1151 ret
= ust_app_register_done(ust_cmd
.sock
);
1154 * If the registration is not possible, we simply
1155 * unregister the apps and continue
1157 ust_app_unregister(ust_cmd
.sock
);
1160 * We just need here to monitor the close of the UST
1161 * socket and poll set monitor those by default.
1162 * Listen on POLLIN (even if we never expect any
1163 * data) to ensure that hangup wakes us.
1165 ret
= lttng_poll_add(&events
, ust_cmd
.sock
, LPOLLIN
);
1170 DBG("Apps with sock %d added to poll set",
1174 health_code_update(&health_thread_app_reg
);
1180 * At this point, we know that a registered application made
1181 * the event at poll_wait.
1183 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1184 /* Removing from the poll set */
1185 ret
= lttng_poll_del(&events
, pollfd
);
1190 /* Socket closed on remote end. */
1191 ust_app_unregister(pollfd
);
1196 health_code_update(&health_thread_app_reg
);
1201 lttng_poll_clean(&events
);
1203 health_reset(&health_thread_app_reg
);
1204 DBG("Application communication apps thread cleanup complete");
1205 rcu_thread_offline();
1206 rcu_unregister_thread();
1211 * Dispatch request from the registration threads to the application
1212 * communication thread.
1214 static void *thread_dispatch_ust_registration(void *data
)
1217 struct cds_wfq_node
*node
;
1218 struct ust_command
*ust_cmd
= NULL
;
1220 DBG("[thread] Dispatch UST command started");
1222 while (!dispatch_thread_exit
) {
1223 /* Atomically prepare the queue futex */
1224 futex_nto1_prepare(&ust_cmd_queue
.futex
);
1227 /* Dequeue command for registration */
1228 node
= cds_wfq_dequeue_blocking(&ust_cmd_queue
.queue
);
1230 DBG("Woken up but nothing in the UST command queue");
1231 /* Continue thread execution */
1235 ust_cmd
= caa_container_of(node
, struct ust_command
, node
);
1237 DBG("Dispatching UST registration pid:%d ppid:%d uid:%d"
1238 " gid:%d sock:%d name:%s (version %d.%d)",
1239 ust_cmd
->reg_msg
.pid
, ust_cmd
->reg_msg
.ppid
,
1240 ust_cmd
->reg_msg
.uid
, ust_cmd
->reg_msg
.gid
,
1241 ust_cmd
->sock
, ust_cmd
->reg_msg
.name
,
1242 ust_cmd
->reg_msg
.major
, ust_cmd
->reg_msg
.minor
);
1244 * Inform apps thread of the new application registration. This
1245 * call is blocking so we can be assured that the data will be read
1246 * at some point in time or wait to the end of the world :)
1248 ret
= write(apps_cmd_pipe
[1], ust_cmd
,
1249 sizeof(struct ust_command
));
1251 PERROR("write apps cmd pipe");
1252 if (errno
== EBADF
) {
1254 * We can't inform the application thread to process
1255 * registration. We will exit or else application
1256 * registration will not occur and tracing will never
1263 } while (node
!= NULL
);
1265 /* Futex wait on queue. Blocking call on futex() */
1266 futex_nto1_wait(&ust_cmd_queue
.futex
);
1270 DBG("Dispatch thread dying");
1275 * This thread manage application registration.
1277 static void *thread_registration_apps(void *data
)
1279 int sock
= -1, i
, ret
, pollfd
;
1280 uint32_t revents
, nb_fd
;
1281 struct lttng_poll_event events
;
1283 * Get allocated in this thread, enqueued to a global queue, dequeued and
1284 * freed in the manage apps thread.
1286 struct ust_command
*ust_cmd
= NULL
;
1288 DBG("[thread] Manage application registration started");
1290 ret
= lttcomm_listen_unix_sock(apps_sock
);
1296 * Pass 2 as size here for the thread quit pipe and apps socket. Nothing
1297 * more will be added to this poll set.
1299 ret
= create_thread_poll_set(&events
, 2);
1301 goto error_create_poll
;
1304 /* Add the application registration socket */
1305 ret
= lttng_poll_add(&events
, apps_sock
, LPOLLIN
| LPOLLRDHUP
);
1307 goto error_poll_add
;
1310 /* Notify all applications to register */
1311 ret
= notify_ust_apps(1);
1313 ERR("Failed to notify applications or create the wait shared memory.\n"
1314 "Execution continues but there might be problem for already\n"
1315 "running applications that wishes to register.");
1319 DBG("Accepting application registration");
1321 nb_fd
= LTTNG_POLL_GETNB(&events
);
1323 /* Inifinite blocking call, waiting for transmission */
1325 ret
= lttng_poll_wait(&events
, -1);
1328 * Restart interrupted system call.
1330 if (errno
== EINTR
) {
1336 for (i
= 0; i
< nb_fd
; i
++) {
1337 /* Fetch once the poll data */
1338 revents
= LTTNG_POLL_GETEV(&events
, i
);
1339 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1341 /* Thread quit pipe has been closed. Killing thread. */
1342 ret
= check_thread_quit_pipe(pollfd
, revents
);
1347 /* Event on the registration socket */
1348 if (pollfd
== apps_sock
) {
1349 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1350 ERR("Register apps socket poll error");
1352 } else if (revents
& LPOLLIN
) {
1353 sock
= lttcomm_accept_unix_sock(apps_sock
);
1358 /* Create UST registration command for enqueuing */
1359 ust_cmd
= zmalloc(sizeof(struct ust_command
));
1360 if (ust_cmd
== NULL
) {
1361 PERROR("ust command zmalloc");
1366 * Using message-based transmissions to ensure we don't
1367 * have to deal with partially received messages.
1369 ret
= lttng_fd_get(LTTNG_FD_APPS
, 1);
1371 ERR("Exhausted file descriptors allowed for applications.");
1380 ret
= lttcomm_recv_unix_sock(sock
, &ust_cmd
->reg_msg
,
1381 sizeof(struct ust_register_msg
));
1382 if (ret
< 0 || ret
< sizeof(struct ust_register_msg
)) {
1384 PERROR("lttcomm_recv_unix_sock register apps");
1386 ERR("Wrong size received on apps register");
1393 lttng_fd_put(LTTNG_FD_APPS
, 1);
1398 ust_cmd
->sock
= sock
;
1401 DBG("UST registration received with pid:%d ppid:%d uid:%d"
1402 " gid:%d sock:%d name:%s (version %d.%d)",
1403 ust_cmd
->reg_msg
.pid
, ust_cmd
->reg_msg
.ppid
,
1404 ust_cmd
->reg_msg
.uid
, ust_cmd
->reg_msg
.gid
,
1405 ust_cmd
->sock
, ust_cmd
->reg_msg
.name
,
1406 ust_cmd
->reg_msg
.major
, ust_cmd
->reg_msg
.minor
);
1409 * Lock free enqueue the registration request. The red pill
1410 * has been taken! This apps will be part of the *system*.
1412 cds_wfq_enqueue(&ust_cmd_queue
.queue
, &ust_cmd
->node
);
1415 * Wake the registration queue futex. Implicit memory
1416 * barrier with the exchange in cds_wfq_enqueue.
1418 futex_nto1_wake(&ust_cmd_queue
.futex
);
1425 /* Notify that the registration thread is gone */
1428 if (apps_sock
>= 0) {
1429 ret
= close(apps_sock
);
1439 lttng_fd_put(LTTNG_FD_APPS
, 1);
1441 unlink(apps_unix_sock_path
);
1444 lttng_poll_clean(&events
);
1447 DBG("UST Registration thread cleanup complete");
1453 * Start the thread_manage_consumer. This must be done after a lttng-consumerd
1454 * exec or it will fails.
1456 static int spawn_consumer_thread(struct consumer_data
*consumer_data
)
1459 struct timespec timeout
;
1461 timeout
.tv_sec
= DEFAULT_SEM_WAIT_TIMEOUT
;
1462 timeout
.tv_nsec
= 0;
1464 /* Setup semaphore */
1465 ret
= sem_init(&consumer_data
->sem
, 0, 0);
1467 PERROR("sem_init consumer semaphore");
1471 ret
= pthread_create(&consumer_data
->thread
, NULL
,
1472 thread_manage_consumer
, consumer_data
);
1474 PERROR("pthread_create consumer");
1479 /* Get time for sem_timedwait absolute timeout */
1480 ret
= clock_gettime(CLOCK_REALTIME
, &timeout
);
1482 PERROR("clock_gettime spawn consumer");
1483 /* Infinite wait for the kconsumerd thread to be ready */
1484 ret
= sem_wait(&consumer_data
->sem
);
1486 /* Normal timeout if the gettime was successful */
1487 timeout
.tv_sec
+= DEFAULT_SEM_WAIT_TIMEOUT
;
1488 ret
= sem_timedwait(&consumer_data
->sem
, &timeout
);
1492 if (errno
== ETIMEDOUT
) {
1494 * Call has timed out so we kill the kconsumerd_thread and return
1497 ERR("The consumer thread was never ready. Killing it");
1498 ret
= pthread_cancel(consumer_data
->thread
);
1500 PERROR("pthread_cancel consumer thread");
1503 PERROR("semaphore wait failed consumer thread");
1508 pthread_mutex_lock(&consumer_data
->pid_mutex
);
1509 if (consumer_data
->pid
== 0) {
1510 ERR("Kconsumerd did not start");
1511 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1514 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1523 * Join consumer thread
1525 static int join_consumer_thread(struct consumer_data
*consumer_data
)
1530 if (consumer_data
->pid
!= 0) {
1531 ret
= kill(consumer_data
->pid
, SIGTERM
);
1533 ERR("Error killing consumer daemon");
1536 return pthread_join(consumer_data
->thread
, &status
);
1543 * Fork and exec a consumer daemon (consumerd).
1545 * Return pid if successful else -1.
1547 static pid_t
spawn_consumerd(struct consumer_data
*consumer_data
)
1551 const char *consumer_to_use
;
1552 const char *verbosity
;
1555 DBG("Spawning consumerd");
1562 if (opt_verbose_consumer
) {
1563 verbosity
= "--verbose";
1565 verbosity
= "--quiet";
1567 switch (consumer_data
->type
) {
1568 case LTTNG_CONSUMER_KERNEL
:
1570 * Find out which consumerd to execute. We will first try the
1571 * 64-bit path, then the sessiond's installation directory, and
1572 * fallback on the 32-bit one,
1574 DBG3("Looking for a kernel consumer at these locations:");
1575 DBG3(" 1) %s", consumerd64_bin
);
1576 DBG3(" 2) %s/%s", INSTALL_BIN_PATH
, CONSUMERD_FILE
);
1577 DBG3(" 3) %s", consumerd32_bin
);
1578 if (stat(consumerd64_bin
, &st
) == 0) {
1579 DBG3("Found location #1");
1580 consumer_to_use
= consumerd64_bin
;
1581 } else if (stat(INSTALL_BIN_PATH
"/" CONSUMERD_FILE
, &st
) == 0) {
1582 DBG3("Found location #2");
1583 consumer_to_use
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
1584 } else if (stat(consumerd32_bin
, &st
) == 0) {
1585 DBG3("Found location #3");
1586 consumer_to_use
= consumerd32_bin
;
1588 DBG("Could not find any valid consumerd executable");
1591 DBG("Using kernel consumer at: %s", consumer_to_use
);
1592 execl(consumer_to_use
,
1593 "lttng-consumerd", verbosity
, "-k",
1594 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
1595 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
1598 case LTTNG_CONSUMER64_UST
:
1600 char *tmpnew
= NULL
;
1602 if (consumerd64_libdir
[0] != '\0') {
1606 tmp
= getenv("LD_LIBRARY_PATH");
1610 tmplen
= strlen("LD_LIBRARY_PATH=")
1611 + strlen(consumerd64_libdir
) + 1 /* : */ + strlen(tmp
);
1612 tmpnew
= zmalloc(tmplen
+ 1 /* \0 */);
1617 strcpy(tmpnew
, "LD_LIBRARY_PATH=");
1618 strcat(tmpnew
, consumerd64_libdir
);
1619 if (tmp
[0] != '\0') {
1620 strcat(tmpnew
, ":");
1621 strcat(tmpnew
, tmp
);
1623 ret
= putenv(tmpnew
);
1629 DBG("Using 64-bit UST consumer at: %s", consumerd64_bin
);
1630 ret
= execl(consumerd64_bin
, "lttng-consumerd", verbosity
, "-u",
1631 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
1632 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
1634 if (consumerd64_libdir
[0] != '\0') {
1642 case LTTNG_CONSUMER32_UST
:
1644 char *tmpnew
= NULL
;
1646 if (consumerd32_libdir
[0] != '\0') {
1650 tmp
= getenv("LD_LIBRARY_PATH");
1654 tmplen
= strlen("LD_LIBRARY_PATH=")
1655 + strlen(consumerd32_libdir
) + 1 /* : */ + strlen(tmp
);
1656 tmpnew
= zmalloc(tmplen
+ 1 /* \0 */);
1661 strcpy(tmpnew
, "LD_LIBRARY_PATH=");
1662 strcat(tmpnew
, consumerd32_libdir
);
1663 if (tmp
[0] != '\0') {
1664 strcat(tmpnew
, ":");
1665 strcat(tmpnew
, tmp
);
1667 ret
= putenv(tmpnew
);
1673 DBG("Using 32-bit UST consumer at: %s", consumerd32_bin
);
1674 ret
= execl(consumerd32_bin
, "lttng-consumerd", verbosity
, "-u",
1675 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
1676 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
1678 if (consumerd32_libdir
[0] != '\0') {
1687 PERROR("unknown consumer type");
1691 PERROR("kernel start consumer exec");
1694 } else if (pid
> 0) {
1697 PERROR("start consumer fork");
1705 * Spawn the consumerd daemon and session daemon thread.
1707 static int start_consumerd(struct consumer_data
*consumer_data
)
1711 pthread_mutex_lock(&consumer_data
->pid_mutex
);
1712 if (consumer_data
->pid
!= 0) {
1713 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1717 ret
= spawn_consumerd(consumer_data
);
1719 ERR("Spawning consumerd failed");
1720 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1724 /* Setting up the consumer_data pid */
1725 consumer_data
->pid
= ret
;
1726 DBG2("Consumer pid %d", consumer_data
->pid
);
1727 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1729 DBG2("Spawning consumer control thread");
1730 ret
= spawn_consumer_thread(consumer_data
);
1732 ERR("Fatal error spawning consumer control thread");
1744 * Compute health status of each consumer.
1746 static int check_consumer_health(void)
1751 health_check_state(&kconsumer_data
.health
) &
1752 health_check_state(&ustconsumer32_data
.health
) &
1753 health_check_state(&ustconsumer64_data
.health
);
1755 DBG3("Health consumer check %d", ret
);
1761 * Check version of the lttng-modules.
1763 static int validate_lttng_modules_version(void)
1765 return kernel_validate_version(kernel_tracer_fd
);
1769 * Setup necessary data for kernel tracer action.
1771 static int init_kernel_tracer(void)
1775 /* Modprobe lttng kernel modules */
1776 ret
= modprobe_lttng_control();
1781 /* Open debugfs lttng */
1782 kernel_tracer_fd
= open(module_proc_lttng
, O_RDWR
);
1783 if (kernel_tracer_fd
< 0) {
1784 DBG("Failed to open %s", module_proc_lttng
);
1789 /* Validate kernel version */
1790 ret
= validate_lttng_modules_version();
1795 ret
= modprobe_lttng_data();
1800 DBG("Kernel tracer fd %d", kernel_tracer_fd
);
1804 modprobe_remove_lttng_control();
1805 ret
= close(kernel_tracer_fd
);
1809 kernel_tracer_fd
= -1;
1810 return LTTCOMM_KERN_VERSION
;
1813 ret
= close(kernel_tracer_fd
);
1819 modprobe_remove_lttng_control();
1822 WARN("No kernel tracer available");
1823 kernel_tracer_fd
= -1;
1825 return LTTCOMM_NEED_ROOT_SESSIOND
;
1827 return LTTCOMM_KERN_NA
;
1832 * Init tracing by creating trace directory and sending fds kernel consumer.
1834 static int init_kernel_tracing(struct ltt_kernel_session
*session
)
1838 if (session
->consumer_fds_sent
== 0 && session
->consumer
!= NULL
) {
1840 * Assign default kernel consumer socket if no consumer assigned to the
1841 * kernel session. At this point, it's NOT supposed to be -1 but this is
1842 * an extra security check.
1844 if (session
->consumer_fd
< 0) {
1845 session
->consumer_fd
= kconsumer_data
.cmd_sock
;
1848 ret
= kernel_consumer_send_session(session
->consumer_fd
, session
);
1850 ret
= LTTCOMM_KERN_CONSUMER_FAIL
;
1860 * Create a socket to the relayd using the URI.
1862 * On success, the relayd_sock pointer is set to the created socket.
1863 * Else, it is untouched and an lttcomm error code is returned.
1865 static int create_connect_relayd(struct consumer_output
*output
,
1866 const char *session_name
, struct lttng_uri
*uri
,
1867 struct lttcomm_sock
**relayd_sock
)
1870 struct lttcomm_sock
*sock
;
1872 /* Create socket object from URI */
1873 sock
= lttcomm_alloc_sock_from_uri(uri
);
1875 ret
= LTTCOMM_FATAL
;
1879 ret
= lttcomm_create_sock(sock
);
1881 ret
= LTTCOMM_FATAL
;
1885 /* Connect to relayd so we can proceed with a session creation. */
1886 ret
= relayd_connect(sock
);
1888 ERR("Unable to reach lttng-relayd");
1889 ret
= LTTCOMM_RELAYD_SESSION_FAIL
;
1893 /* Create socket for control stream. */
1894 if (uri
->stype
== LTTNG_STREAM_CONTROL
) {
1895 DBG3("Creating relayd stream socket from URI");
1897 /* Check relayd version */
1898 ret
= relayd_version_check(sock
, LTTNG_UST_COMM_MAJOR
, 0);
1900 ret
= LTTCOMM_RELAYD_VERSION_FAIL
;
1903 } else if (uri
->stype
== LTTNG_STREAM_DATA
) {
1904 DBG3("Creating relayd data socket from URI");
1906 /* Command is not valid */
1907 ERR("Relayd invalid stream type: %d", uri
->stype
);
1908 ret
= LTTCOMM_INVALID
;
1912 *relayd_sock
= sock
;
1918 (void) relayd_close(sock
);
1922 lttcomm_destroy_sock(sock
);
1929 * Connect to the relayd using URI and send the socket to the right consumer.
1931 static int send_socket_relayd_consumer(int domain
, struct ltt_session
*session
,
1932 struct lttng_uri
*relayd_uri
, struct consumer_output
*consumer
,
1936 struct lttcomm_sock
*sock
= NULL
;
1938 /* Set the network sequence index if not set. */
1939 if (consumer
->net_seq_index
== -1) {
1941 * Increment net_seq_idx because we are about to transfer the
1942 * new relayd socket to the consumer.
1944 uatomic_inc(&relayd_net_seq_idx
);
1945 /* Assign unique key so the consumer can match streams */
1946 consumer
->net_seq_index
= uatomic_read(&relayd_net_seq_idx
);
1949 /* Connect to relayd and make version check if uri is the control. */
1950 ret
= create_connect_relayd(consumer
, session
->name
, relayd_uri
, &sock
);
1951 if (ret
!= LTTCOMM_OK
) {
1955 /* If the control socket is connected, network session is ready */
1956 if (relayd_uri
->stype
== LTTNG_STREAM_CONTROL
) {
1957 session
->net_handle
= 1;
1960 /* Send relayd socket to consumer. */
1961 ret
= consumer_send_relayd_socket(consumer_fd
, sock
,
1962 consumer
, relayd_uri
->stype
);
1964 ret
= LTTCOMM_ENABLE_CONSUMER_FAIL
;
1971 * Close socket which was dup on the consumer side. The session daemon does
1972 * NOT keep track of the relayd socket(s) once transfer to the consumer.
1977 (void) relayd_close(sock
);
1978 lttcomm_destroy_sock(sock
);
1985 * Send both relayd sockets to a specific consumer and domain. This is a
1986 * helper function to facilitate sending the information to the consumer for a
1989 static int send_sockets_relayd_consumer(int domain
,
1990 struct ltt_session
*session
, struct consumer_output
*consumer
, int fd
)
1994 /* Sending control relayd socket. */
1995 ret
= send_socket_relayd_consumer(domain
, session
,
1996 &consumer
->dst
.net
.control
, consumer
, fd
);
1997 if (ret
!= LTTCOMM_OK
) {
2001 /* Sending data relayd socket. */
2002 ret
= send_socket_relayd_consumer(domain
, session
,
2003 &consumer
->dst
.net
.data
, consumer
, fd
);
2004 if (ret
!= LTTCOMM_OK
) {
2013 * Setup relayd connections for a tracing session. First creates the socket to
2014 * the relayd and send them to the right domain consumer. Consumer type MUST be
2017 static int setup_relayd(struct ltt_session
*session
)
2019 int ret
= LTTCOMM_OK
;
2020 struct ltt_ust_session
*usess
;
2021 struct ltt_kernel_session
*ksess
;
2025 usess
= session
->ust_session
;
2026 ksess
= session
->kernel_session
;
2028 DBG2("Setting relayd for session %s", session
->name
);
2030 if (usess
&& usess
->consumer
->sock
== -1 &&
2031 usess
->consumer
->type
== CONSUMER_DST_NET
&&
2032 usess
->consumer
->enabled
) {
2033 /* Setup relayd for 64 bits consumer */
2034 if (ust_consumerd64_fd
>= 0) {
2035 send_sockets_relayd_consumer(LTTNG_DOMAIN_UST
, session
,
2036 usess
->consumer
, ust_consumerd64_fd
);
2037 if (ret
!= LTTCOMM_OK
) {
2042 /* Setup relayd for 32 bits consumer */
2043 if (ust_consumerd32_fd
>= 0) {
2044 send_sockets_relayd_consumer(LTTNG_DOMAIN_UST
, session
,
2045 usess
->consumer
, ust_consumerd32_fd
);
2046 if (ret
!= LTTCOMM_OK
) {
2050 } else if (ksess
&& ksess
->consumer
->sock
== -1 &&
2051 ksess
->consumer
->type
== CONSUMER_DST_NET
&&
2052 ksess
->consumer
->enabled
) {
2053 send_sockets_relayd_consumer(LTTNG_DOMAIN_KERNEL
, session
,
2054 ksess
->consumer
, ksess
->consumer_fd
);
2055 if (ret
!= LTTCOMM_OK
) {
2065 * Copy consumer output from the tracing session to the domain session. The
2066 * function also applies the right modification on a per domain basis for the
2067 * trace files destination directory.
2069 static int copy_session_consumer(int domain
, struct ltt_session
*session
)
2072 const char *dir_name
;
2073 struct consumer_output
*consumer
;
2076 case LTTNG_DOMAIN_KERNEL
:
2077 DBG3("Copying tracing session consumer output in kernel session");
2078 session
->kernel_session
->consumer
=
2079 consumer_copy_output(session
->consumer
);
2080 /* Ease our life a bit for the next part */
2081 consumer
= session
->kernel_session
->consumer
;
2082 dir_name
= DEFAULT_KERNEL_TRACE_DIR
;
2084 case LTTNG_DOMAIN_UST
:
2085 DBG3("Copying tracing session consumer output in UST session");
2086 session
->ust_session
->consumer
=
2087 consumer_copy_output(session
->consumer
);
2088 /* Ease our life a bit for the next part */
2089 consumer
= session
->ust_session
->consumer
;
2090 dir_name
= DEFAULT_UST_TRACE_DIR
;
2093 ret
= LTTCOMM_UNKNOWN_DOMAIN
;
2097 /* Append correct directory to subdir */
2098 strncat(consumer
->subdir
, dir_name
, sizeof(consumer
->subdir
));
2099 DBG3("Copy session consumer subdir %s", consumer
->subdir
);
2101 /* Add default trace directory name */
2102 if (consumer
->type
== CONSUMER_DST_LOCAL
) {
2103 strncat(consumer
->dst
.trace_path
, dir_name
,
2104 sizeof(consumer
->dst
.trace_path
));
2114 * Create an UST session and add it to the session ust list.
2116 static int create_ust_session(struct ltt_session
*session
,
2117 struct lttng_domain
*domain
)
2120 struct ltt_ust_session
*lus
= NULL
;
2123 assert(session
->consumer
);
2125 switch (domain
->type
) {
2126 case LTTNG_DOMAIN_UST
:
2129 ERR("Unknown UST domain on create session %d", domain
->type
);
2130 ret
= LTTCOMM_UNKNOWN_DOMAIN
;
2134 DBG("Creating UST session");
2136 lus
= trace_ust_create_session(session
->path
, session
->id
, domain
);
2138 ret
= LTTCOMM_UST_SESS_FAIL
;
2142 if (session
->consumer
->type
== CONSUMER_DST_LOCAL
) {
2143 ret
= run_as_mkdir_recursive(lus
->pathname
, S_IRWXU
| S_IRWXG
,
2144 session
->uid
, session
->gid
);
2146 if (ret
!= -EEXIST
) {
2147 ERR("Trace directory creation error");
2148 ret
= LTTCOMM_UST_SESS_FAIL
;
2154 lus
->uid
= session
->uid
;
2155 lus
->gid
= session
->gid
;
2156 session
->ust_session
= lus
;
2158 /* Copy session output to the newly created UST session */
2159 ret
= copy_session_consumer(domain
->type
, session
);
2160 if (ret
!= LTTCOMM_OK
) {
2168 session
->ust_session
= NULL
;
2173 * Create a kernel tracer session then create the default channel.
2175 static int create_kernel_session(struct ltt_session
*session
)
2179 DBG("Creating kernel session");
2181 ret
= kernel_create_session(session
, kernel_tracer_fd
);
2183 ret
= LTTCOMM_KERN_SESS_FAIL
;
2187 /* Set kernel consumer socket fd */
2188 if (kconsumer_data
.cmd_sock
>= 0) {
2189 session
->kernel_session
->consumer_fd
= kconsumer_data
.cmd_sock
;
2192 /* Copy session output to the newly created Kernel session */
2193 ret
= copy_session_consumer(LTTNG_DOMAIN_KERNEL
, session
);
2194 if (ret
!= LTTCOMM_OK
) {
2198 /* Create directory(ies) on local filesystem. */
2199 if (session
->consumer
->type
== CONSUMER_DST_LOCAL
) {
2200 ret
= run_as_mkdir_recursive(
2201 session
->kernel_session
->consumer
->dst
.trace_path
,
2202 S_IRWXU
| S_IRWXG
, session
->uid
, session
->gid
);
2204 if (ret
!= -EEXIST
) {
2205 ERR("Trace directory creation error");
2211 session
->kernel_session
->uid
= session
->uid
;
2212 session
->kernel_session
->gid
= session
->gid
;
2217 trace_kernel_destroy_session(session
->kernel_session
);
2218 session
->kernel_session
= NULL
;
2223 * Check if the UID or GID match the session. Root user has access to all
2226 static int session_access_ok(struct ltt_session
*session
, uid_t uid
, gid_t gid
)
2228 if (uid
!= session
->uid
&& gid
!= session
->gid
&& uid
!= 0) {
2236 * Count number of session permitted by uid/gid.
2238 static unsigned int lttng_sessions_count(uid_t uid
, gid_t gid
)
2241 struct ltt_session
*session
;
2243 DBG("Counting number of available session for UID %d GID %d",
2245 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
2247 * Only list the sessions the user can control.
2249 if (!session_access_ok(session
, uid
, gid
)) {
2258 * Using the session list, filled a lttng_session array to send back to the
2259 * client for session listing.
2261 * The session list lock MUST be acquired before calling this function. Use
2262 * session_lock_list() and session_unlock_list().
2264 static void list_lttng_sessions(struct lttng_session
*sessions
, uid_t uid
,
2268 struct ltt_session
*session
;
2270 DBG("Getting all available session for UID %d GID %d",
2273 * Iterate over session list and append data after the control struct in
2276 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
2278 * Only list the sessions the user can control.
2280 if (!session_access_ok(session
, uid
, gid
)) {
2283 strncpy(sessions
[i
].path
, session
->path
, PATH_MAX
);
2284 sessions
[i
].path
[PATH_MAX
- 1] = '\0';
2285 strncpy(sessions
[i
].name
, session
->name
, NAME_MAX
);
2286 sessions
[i
].name
[NAME_MAX
- 1] = '\0';
2287 sessions
[i
].enabled
= session
->enabled
;
2293 * Fill lttng_channel array of all channels.
2295 static void list_lttng_channels(int domain
, struct ltt_session
*session
,
2296 struct lttng_channel
*channels
)
2299 struct ltt_kernel_channel
*kchan
;
2301 DBG("Listing channels for session %s", session
->name
);
2304 case LTTNG_DOMAIN_KERNEL
:
2305 /* Kernel channels */
2306 if (session
->kernel_session
!= NULL
) {
2307 cds_list_for_each_entry(kchan
,
2308 &session
->kernel_session
->channel_list
.head
, list
) {
2309 /* Copy lttng_channel struct to array */
2310 memcpy(&channels
[i
], kchan
->channel
, sizeof(struct lttng_channel
));
2311 channels
[i
].enabled
= kchan
->enabled
;
2316 case LTTNG_DOMAIN_UST
:
2318 struct lttng_ht_iter iter
;
2319 struct ltt_ust_channel
*uchan
;
2321 cds_lfht_for_each_entry(session
->ust_session
->domain_global
.channels
->ht
,
2322 &iter
.iter
, uchan
, node
.node
) {
2323 strncpy(channels
[i
].name
, uchan
->name
, LTTNG_SYMBOL_NAME_LEN
);
2324 channels
[i
].attr
.overwrite
= uchan
->attr
.overwrite
;
2325 channels
[i
].attr
.subbuf_size
= uchan
->attr
.subbuf_size
;
2326 channels
[i
].attr
.num_subbuf
= uchan
->attr
.num_subbuf
;
2327 channels
[i
].attr
.switch_timer_interval
=
2328 uchan
->attr
.switch_timer_interval
;
2329 channels
[i
].attr
.read_timer_interval
=
2330 uchan
->attr
.read_timer_interval
;
2331 channels
[i
].enabled
= uchan
->enabled
;
2332 switch (uchan
->attr
.output
) {
2333 case LTTNG_UST_MMAP
:
2335 channels
[i
].attr
.output
= LTTNG_EVENT_MMAP
;
2348 * Create a list of ust global domain events.
2350 static int list_lttng_ust_global_events(char *channel_name
,
2351 struct ltt_ust_domain_global
*ust_global
, struct lttng_event
**events
)
2354 unsigned int nb_event
= 0;
2355 struct lttng_ht_iter iter
;
2356 struct lttng_ht_node_str
*node
;
2357 struct ltt_ust_channel
*uchan
;
2358 struct ltt_ust_event
*uevent
;
2359 struct lttng_event
*tmp
;
2361 DBG("Listing UST global events for channel %s", channel_name
);
2365 lttng_ht_lookup(ust_global
->channels
, (void *)channel_name
, &iter
);
2366 node
= lttng_ht_iter_get_node_str(&iter
);
2368 ret
= -LTTCOMM_UST_CHAN_NOT_FOUND
;
2372 uchan
= caa_container_of(&node
->node
, struct ltt_ust_channel
, node
.node
);
2374 nb_event
+= lttng_ht_get_count(uchan
->events
);
2376 if (nb_event
== 0) {
2381 DBG3("Listing UST global %d events", nb_event
);
2383 tmp
= zmalloc(nb_event
* sizeof(struct lttng_event
));
2385 ret
= -LTTCOMM_FATAL
;
2389 cds_lfht_for_each_entry(uchan
->events
->ht
, &iter
.iter
, uevent
, node
.node
) {
2390 strncpy(tmp
[i
].name
, uevent
->attr
.name
, LTTNG_SYMBOL_NAME_LEN
);
2391 tmp
[i
].name
[LTTNG_SYMBOL_NAME_LEN
- 1] = '\0';
2392 tmp
[i
].enabled
= uevent
->enabled
;
2393 switch (uevent
->attr
.instrumentation
) {
2394 case LTTNG_UST_TRACEPOINT
:
2395 tmp
[i
].type
= LTTNG_EVENT_TRACEPOINT
;
2397 case LTTNG_UST_PROBE
:
2398 tmp
[i
].type
= LTTNG_EVENT_PROBE
;
2400 case LTTNG_UST_FUNCTION
:
2401 tmp
[i
].type
= LTTNG_EVENT_FUNCTION
;
2404 tmp
[i
].loglevel
= uevent
->attr
.loglevel
;
2405 switch (uevent
->attr
.loglevel_type
) {
2406 case LTTNG_UST_LOGLEVEL_ALL
:
2407 tmp
[i
].loglevel_type
= LTTNG_EVENT_LOGLEVEL_ALL
;
2409 case LTTNG_UST_LOGLEVEL_RANGE
:
2410 tmp
[i
].loglevel_type
= LTTNG_EVENT_LOGLEVEL_RANGE
;
2412 case LTTNG_UST_LOGLEVEL_SINGLE
:
2413 tmp
[i
].loglevel_type
= LTTNG_EVENT_LOGLEVEL_SINGLE
;
2416 if (uevent
->filter
) {
2431 * Fill lttng_event array of all kernel events in the channel.
2433 static int list_lttng_kernel_events(char *channel_name
,
2434 struct ltt_kernel_session
*kernel_session
, struct lttng_event
**events
)
2437 unsigned int nb_event
;
2438 struct ltt_kernel_event
*event
;
2439 struct ltt_kernel_channel
*kchan
;
2441 kchan
= trace_kernel_get_channel_by_name(channel_name
, kernel_session
);
2442 if (kchan
== NULL
) {
2443 ret
= LTTCOMM_KERN_CHAN_NOT_FOUND
;
2447 nb_event
= kchan
->event_count
;
2449 DBG("Listing events for channel %s", kchan
->channel
->name
);
2451 if (nb_event
== 0) {
2456 *events
= zmalloc(nb_event
* sizeof(struct lttng_event
));
2457 if (*events
== NULL
) {
2458 ret
= LTTCOMM_FATAL
;
2462 /* Kernel channels */
2463 cds_list_for_each_entry(event
, &kchan
->events_list
.head
, list
) {
2464 strncpy((*events
)[i
].name
, event
->event
->name
, LTTNG_SYMBOL_NAME_LEN
);
2465 (*events
)[i
].name
[LTTNG_SYMBOL_NAME_LEN
- 1] = '\0';
2466 (*events
)[i
].enabled
= event
->enabled
;
2467 switch (event
->event
->instrumentation
) {
2468 case LTTNG_KERNEL_TRACEPOINT
:
2469 (*events
)[i
].type
= LTTNG_EVENT_TRACEPOINT
;
2471 case LTTNG_KERNEL_KPROBE
:
2472 case LTTNG_KERNEL_KRETPROBE
:
2473 (*events
)[i
].type
= LTTNG_EVENT_PROBE
;
2474 memcpy(&(*events
)[i
].attr
.probe
, &event
->event
->u
.kprobe
,
2475 sizeof(struct lttng_kernel_kprobe
));
2477 case LTTNG_KERNEL_FUNCTION
:
2478 (*events
)[i
].type
= LTTNG_EVENT_FUNCTION
;
2479 memcpy(&((*events
)[i
].attr
.ftrace
), &event
->event
->u
.ftrace
,
2480 sizeof(struct lttng_kernel_function
));
2482 case LTTNG_KERNEL_NOOP
:
2483 (*events
)[i
].type
= LTTNG_EVENT_NOOP
;
2485 case LTTNG_KERNEL_SYSCALL
:
2486 (*events
)[i
].type
= LTTNG_EVENT_SYSCALL
;
2488 case LTTNG_KERNEL_ALL
:
2502 * Command LTTNG_DISABLE_CHANNEL processed by the client thread.
2504 static int cmd_disable_channel(struct ltt_session
*session
,
2505 int domain
, char *channel_name
)
2508 struct ltt_ust_session
*usess
;
2510 usess
= session
->ust_session
;
2513 case LTTNG_DOMAIN_KERNEL
:
2515 ret
= channel_kernel_disable(session
->kernel_session
,
2517 if (ret
!= LTTCOMM_OK
) {
2521 kernel_wait_quiescent(kernel_tracer_fd
);
2524 case LTTNG_DOMAIN_UST
:
2526 struct ltt_ust_channel
*uchan
;
2527 struct lttng_ht
*chan_ht
;
2529 chan_ht
= usess
->domain_global
.channels
;
2531 uchan
= trace_ust_find_channel_by_name(chan_ht
, channel_name
);
2532 if (uchan
== NULL
) {
2533 ret
= LTTCOMM_UST_CHAN_NOT_FOUND
;
2537 ret
= channel_ust_disable(usess
, domain
, uchan
);
2538 if (ret
!= LTTCOMM_OK
) {
2544 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN
:
2545 case LTTNG_DOMAIN_UST_EXEC_NAME
:
2546 case LTTNG_DOMAIN_UST_PID
:
2549 ret
= LTTCOMM_UNKNOWN_DOMAIN
;
2560 * Command LTTNG_ENABLE_CHANNEL processed by the client thread.
2562 static int cmd_enable_channel(struct ltt_session
*session
,
2563 int domain
, struct lttng_channel
*attr
)
2566 struct ltt_ust_session
*usess
= session
->ust_session
;
2567 struct lttng_ht
*chan_ht
;
2569 DBG("Enabling channel %s for session %s", attr
->name
, session
->name
);
2572 case LTTNG_DOMAIN_KERNEL
:
2574 struct ltt_kernel_channel
*kchan
;
2576 kchan
= trace_kernel_get_channel_by_name(attr
->name
,
2577 session
->kernel_session
);
2578 if (kchan
== NULL
) {
2579 ret
= channel_kernel_create(session
->kernel_session
,
2580 attr
, kernel_poll_pipe
[1]);
2582 ret
= channel_kernel_enable(session
->kernel_session
, kchan
);
2585 if (ret
!= LTTCOMM_OK
) {
2589 kernel_wait_quiescent(kernel_tracer_fd
);
2592 case LTTNG_DOMAIN_UST
:
2594 struct ltt_ust_channel
*uchan
;
2596 chan_ht
= usess
->domain_global
.channels
;
2598 uchan
= trace_ust_find_channel_by_name(chan_ht
, attr
->name
);
2599 if (uchan
== NULL
) {
2600 ret
= channel_ust_create(usess
, domain
, attr
);
2602 ret
= channel_ust_enable(usess
, domain
, uchan
);
2607 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN
:
2608 case LTTNG_DOMAIN_UST_EXEC_NAME
:
2609 case LTTNG_DOMAIN_UST_PID
:
2612 ret
= LTTCOMM_UNKNOWN_DOMAIN
;
2621 * Command LTTNG_DISABLE_EVENT processed by the client thread.
2623 static int cmd_disable_event(struct ltt_session
*session
, int domain
,
2624 char *channel_name
, char *event_name
)
2629 case LTTNG_DOMAIN_KERNEL
:
2631 struct ltt_kernel_channel
*kchan
;
2632 struct ltt_kernel_session
*ksess
;
2634 ksess
= session
->kernel_session
;
2636 kchan
= trace_kernel_get_channel_by_name(channel_name
, ksess
);
2637 if (kchan
== NULL
) {
2638 ret
= LTTCOMM_KERN_CHAN_NOT_FOUND
;
2642 ret
= event_kernel_disable_tracepoint(ksess
, kchan
, event_name
);
2643 if (ret
!= LTTCOMM_OK
) {
2647 kernel_wait_quiescent(kernel_tracer_fd
);
2650 case LTTNG_DOMAIN_UST
:
2652 struct ltt_ust_channel
*uchan
;
2653 struct ltt_ust_session
*usess
;
2655 usess
= session
->ust_session
;
2657 uchan
= trace_ust_find_channel_by_name(usess
->domain_global
.channels
,
2659 if (uchan
== NULL
) {
2660 ret
= LTTCOMM_UST_CHAN_NOT_FOUND
;
2664 ret
= event_ust_disable_tracepoint(usess
, domain
, uchan
, event_name
);
2665 if (ret
!= LTTCOMM_OK
) {
2669 DBG3("Disable UST event %s in channel %s completed", event_name
,
2674 case LTTNG_DOMAIN_UST_EXEC_NAME
:
2675 case LTTNG_DOMAIN_UST_PID
:
2676 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN
:
2690 * Command LTTNG_DISABLE_ALL_EVENT processed by the client thread.
2692 static int cmd_disable_event_all(struct ltt_session
*session
, int domain
,
2698 case LTTNG_DOMAIN_KERNEL
:
2700 struct ltt_kernel_session
*ksess
;
2701 struct ltt_kernel_channel
*kchan
;
2703 ksess
= session
->kernel_session
;
2705 kchan
= trace_kernel_get_channel_by_name(channel_name
, ksess
);
2706 if (kchan
== NULL
) {
2707 ret
= LTTCOMM_KERN_CHAN_NOT_FOUND
;
2711 ret
= event_kernel_disable_all(ksess
, kchan
);
2712 if (ret
!= LTTCOMM_OK
) {
2716 kernel_wait_quiescent(kernel_tracer_fd
);
2719 case LTTNG_DOMAIN_UST
:
2721 struct ltt_ust_session
*usess
;
2722 struct ltt_ust_channel
*uchan
;
2724 usess
= session
->ust_session
;
2726 uchan
= trace_ust_find_channel_by_name(usess
->domain_global
.channels
,
2728 if (uchan
== NULL
) {
2729 ret
= LTTCOMM_UST_CHAN_NOT_FOUND
;
2733 ret
= event_ust_disable_all_tracepoints(usess
, domain
, uchan
);
2738 DBG3("Disable all UST events in channel %s completed", channel_name
);
2743 case LTTNG_DOMAIN_UST_EXEC_NAME
:
2744 case LTTNG_DOMAIN_UST_PID
:
2745 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN
:
2759 * Command LTTNG_ADD_CONTEXT processed by the client thread.
2761 static int cmd_add_context(struct ltt_session
*session
, int domain
,
2762 char *channel_name
, char *event_name
, struct lttng_event_context
*ctx
)
2767 case LTTNG_DOMAIN_KERNEL
:
2768 /* Add kernel context to kernel tracer */
2769 ret
= context_kernel_add(session
->kernel_session
, ctx
,
2770 event_name
, channel_name
);
2771 if (ret
!= LTTCOMM_OK
) {
2775 case LTTNG_DOMAIN_UST
:
2777 struct ltt_ust_session
*usess
= session
->ust_session
;
2779 ret
= context_ust_add(usess
, domain
, ctx
, event_name
, channel_name
);
2780 if (ret
!= LTTCOMM_OK
) {
2786 case LTTNG_DOMAIN_UST_EXEC_NAME
:
2787 case LTTNG_DOMAIN_UST_PID
:
2788 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN
:
2802 * Command LTTNG_SET_FILTER processed by the client thread.
2804 static int cmd_set_filter(struct ltt_session
*session
, int domain
,
2805 char *channel_name
, char *event_name
,
2806 struct lttng_filter_bytecode
*bytecode
)
2811 case LTTNG_DOMAIN_KERNEL
:
2812 ret
= LTTCOMM_FATAL
;
2814 case LTTNG_DOMAIN_UST
:
2816 struct ltt_ust_session
*usess
= session
->ust_session
;
2818 ret
= filter_ust_set(usess
, domain
, bytecode
, event_name
, channel_name
);
2819 if (ret
!= LTTCOMM_OK
) {
2825 case LTTNG_DOMAIN_UST_EXEC_NAME
:
2826 case LTTNG_DOMAIN_UST_PID
:
2827 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN
:
2842 * Command LTTNG_ENABLE_EVENT processed by the client thread.
2844 static int cmd_enable_event(struct ltt_session
*session
, int domain
,
2845 char *channel_name
, struct lttng_event
*event
)
2848 struct lttng_channel
*attr
;
2849 struct ltt_ust_session
*usess
= session
->ust_session
;
2852 case LTTNG_DOMAIN_KERNEL
:
2854 struct ltt_kernel_channel
*kchan
;
2856 kchan
= trace_kernel_get_channel_by_name(channel_name
,
2857 session
->kernel_session
);
2858 if (kchan
== NULL
) {
2859 attr
= channel_new_default_attr(domain
);
2861 ret
= LTTCOMM_FATAL
;
2864 snprintf(attr
->name
, NAME_MAX
, "%s", channel_name
);
2866 /* This call will notify the kernel thread */
2867 ret
= channel_kernel_create(session
->kernel_session
,
2868 attr
, kernel_poll_pipe
[1]);
2869 if (ret
!= LTTCOMM_OK
) {
2876 /* Get the newly created kernel channel pointer */
2877 kchan
= trace_kernel_get_channel_by_name(channel_name
,
2878 session
->kernel_session
);
2879 if (kchan
== NULL
) {
2880 /* This sould not happen... */
2881 ret
= LTTCOMM_FATAL
;
2885 ret
= event_kernel_enable_tracepoint(session
->kernel_session
, kchan
,
2887 if (ret
!= LTTCOMM_OK
) {
2891 kernel_wait_quiescent(kernel_tracer_fd
);
2894 case LTTNG_DOMAIN_UST
:
2896 struct lttng_channel
*attr
;
2897 struct ltt_ust_channel
*uchan
;
2899 /* Get channel from global UST domain */
2900 uchan
= trace_ust_find_channel_by_name(usess
->domain_global
.channels
,
2902 if (uchan
== NULL
) {
2903 /* Create default channel */
2904 attr
= channel_new_default_attr(domain
);
2906 ret
= LTTCOMM_FATAL
;
2909 snprintf(attr
->name
, NAME_MAX
, "%s", channel_name
);
2910 attr
->name
[NAME_MAX
- 1] = '\0';
2912 ret
= channel_ust_create(usess
, domain
, attr
);
2913 if (ret
!= LTTCOMM_OK
) {
2919 /* Get the newly created channel reference back */
2920 uchan
= trace_ust_find_channel_by_name(
2921 usess
->domain_global
.channels
, channel_name
);
2922 if (uchan
== NULL
) {
2923 /* Something is really wrong */
2924 ret
= LTTCOMM_FATAL
;
2929 /* At this point, the session and channel exist on the tracer */
2930 ret
= event_ust_enable_tracepoint(usess
, domain
, uchan
, event
);
2931 if (ret
!= LTTCOMM_OK
) {
2937 case LTTNG_DOMAIN_UST_EXEC_NAME
:
2938 case LTTNG_DOMAIN_UST_PID
:
2939 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN
:
2953 * Command LTTNG_ENABLE_ALL_EVENT processed by the client thread.
2955 static int cmd_enable_event_all(struct ltt_session
*session
, int domain
,
2956 char *channel_name
, int event_type
)
2959 struct ltt_kernel_channel
*kchan
;
2962 case LTTNG_DOMAIN_KERNEL
:
2963 kchan
= trace_kernel_get_channel_by_name(channel_name
,
2964 session
->kernel_session
);
2965 if (kchan
== NULL
) {
2966 /* This call will notify the kernel thread */
2967 ret
= channel_kernel_create(session
->kernel_session
, NULL
,
2968 kernel_poll_pipe
[1]);
2969 if (ret
!= LTTCOMM_OK
) {
2973 /* Get the newly created kernel channel pointer */
2974 kchan
= trace_kernel_get_channel_by_name(channel_name
,
2975 session
->kernel_session
);
2976 if (kchan
== NULL
) {
2977 /* This sould not happen... */
2978 ret
= LTTCOMM_FATAL
;
2984 switch (event_type
) {
2985 case LTTNG_EVENT_SYSCALL
:
2986 ret
= event_kernel_enable_all_syscalls(session
->kernel_session
,
2987 kchan
, kernel_tracer_fd
);
2989 case LTTNG_EVENT_TRACEPOINT
:
2991 * This call enables all LTTNG_KERNEL_TRACEPOINTS and
2992 * events already registered to the channel.
2994 ret
= event_kernel_enable_all_tracepoints(session
->kernel_session
,
2995 kchan
, kernel_tracer_fd
);
2997 case LTTNG_EVENT_ALL
:
2998 /* Enable syscalls and tracepoints */
2999 ret
= event_kernel_enable_all(session
->kernel_session
,
3000 kchan
, kernel_tracer_fd
);
3003 ret
= LTTCOMM_KERN_ENABLE_FAIL
;
3007 /* Manage return value */
3008 if (ret
!= LTTCOMM_OK
) {
3012 kernel_wait_quiescent(kernel_tracer_fd
);
3014 case LTTNG_DOMAIN_UST
:
3016 struct lttng_channel
*attr
;
3017 struct ltt_ust_channel
*uchan
;
3018 struct ltt_ust_session
*usess
= session
->ust_session
;
3020 /* Get channel from global UST domain */
3021 uchan
= trace_ust_find_channel_by_name(usess
->domain_global
.channels
,
3023 if (uchan
== NULL
) {
3024 /* Create default channel */
3025 attr
= channel_new_default_attr(domain
);
3027 ret
= LTTCOMM_FATAL
;
3030 snprintf(attr
->name
, NAME_MAX
, "%s", channel_name
);
3031 attr
->name
[NAME_MAX
- 1] = '\0';
3033 /* Use the internal command enable channel */
3034 ret
= channel_ust_create(usess
, domain
, attr
);
3035 if (ret
!= LTTCOMM_OK
) {
3041 /* Get the newly created channel reference back */
3042 uchan
= trace_ust_find_channel_by_name(
3043 usess
->domain_global
.channels
, channel_name
);
3044 if (uchan
== NULL
) {
3045 /* Something is really wrong */
3046 ret
= LTTCOMM_FATAL
;
3051 /* At this point, the session and channel exist on the tracer */
3053 switch (event_type
) {
3054 case LTTNG_EVENT_ALL
:
3055 case LTTNG_EVENT_TRACEPOINT
:
3056 ret
= event_ust_enable_all_tracepoints(usess
, domain
, uchan
);
3057 if (ret
!= LTTCOMM_OK
) {
3062 ret
= LTTCOMM_UST_ENABLE_FAIL
;
3066 /* Manage return value */
3067 if (ret
!= LTTCOMM_OK
) {
3074 case LTTNG_DOMAIN_UST_EXEC_NAME
:
3075 case LTTNG_DOMAIN_UST_PID
:
3076 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN
:
3090 * Command LTTNG_LIST_TRACEPOINTS processed by the client thread.
3092 static ssize_t
cmd_list_tracepoints(int domain
, struct lttng_event
**events
)
3095 ssize_t nb_events
= 0;
3098 case LTTNG_DOMAIN_KERNEL
:
3099 nb_events
= kernel_list_events(kernel_tracer_fd
, events
);
3100 if (nb_events
< 0) {
3101 ret
= LTTCOMM_KERN_LIST_FAIL
;
3105 case LTTNG_DOMAIN_UST
:
3106 nb_events
= ust_app_list_events(events
);
3107 if (nb_events
< 0) {
3108 ret
= LTTCOMM_UST_LIST_FAIL
;
3120 /* Return negative value to differentiate return code */
3125 * Command LTTNG_LIST_TRACEPOINT_FIELDS processed by the client thread.
3127 static ssize_t
cmd_list_tracepoint_fields(int domain
,
3128 struct lttng_event_field
**fields
)
3131 ssize_t nb_fields
= 0;
3134 case LTTNG_DOMAIN_UST
:
3135 nb_fields
= ust_app_list_event_fields(fields
);
3136 if (nb_fields
< 0) {
3137 ret
= LTTCOMM_UST_LIST_FAIL
;
3141 case LTTNG_DOMAIN_KERNEL
:
3142 default: /* fall-through */
3150 /* Return negative value to differentiate return code */
3155 * Command LTTNG_START_TRACE processed by the client thread.
3157 static int cmd_start_trace(struct ltt_session
*session
)
3160 struct ltt_kernel_session
*ksession
;
3161 struct ltt_ust_session
*usess
;
3162 struct ltt_kernel_channel
*kchan
;
3164 /* Ease our life a bit ;) */
3165 ksession
= session
->kernel_session
;
3166 usess
= session
->ust_session
;
3168 if (session
->enabled
) {
3169 /* Already started. */
3170 ret
= LTTCOMM_TRACE_ALREADY_STARTED
;
3174 session
->enabled
= 1;
3176 ret
= setup_relayd(session
);
3177 if (ret
!= LTTCOMM_OK
) {
3178 ERR("Error setting up relayd for session %s", session
->name
);
3182 /* Kernel tracing */
3183 if (ksession
!= NULL
) {
3184 /* Open kernel metadata */
3185 if (ksession
->metadata
== NULL
) {
3186 ret
= kernel_open_metadata(ksession
,
3187 ksession
->consumer
->dst
.trace_path
);
3189 ret
= LTTCOMM_KERN_META_FAIL
;
3194 /* Open kernel metadata stream */
3195 if (ksession
->metadata_stream_fd
< 0) {
3196 ret
= kernel_open_metadata_stream(ksession
);
3198 ERR("Kernel create metadata stream failed");
3199 ret
= LTTCOMM_KERN_STREAM_FAIL
;
3204 /* For each channel */
3205 cds_list_for_each_entry(kchan
, &ksession
->channel_list
.head
, list
) {
3206 if (kchan
->stream_count
== 0) {
3207 ret
= kernel_open_channel_stream(kchan
);
3209 ret
= LTTCOMM_KERN_STREAM_FAIL
;
3212 /* Update the stream global counter */
3213 ksession
->stream_count_global
+= ret
;
3217 /* Setup kernel consumer socket and send fds to it */
3218 ret
= init_kernel_tracing(ksession
);
3220 ret
= LTTCOMM_KERN_START_FAIL
;
3224 /* This start the kernel tracing */
3225 ret
= kernel_start_session(ksession
);
3227 ret
= LTTCOMM_KERN_START_FAIL
;
3231 /* Quiescent wait after starting trace */
3232 kernel_wait_quiescent(kernel_tracer_fd
);
3235 /* Flag session that trace should start automatically */
3237 usess
->start_trace
= 1;
3239 ret
= ust_app_start_trace_all(usess
);
3241 ret
= LTTCOMM_UST_START_FAIL
;
3253 * Command LTTNG_STOP_TRACE processed by the client thread.
3255 static int cmd_stop_trace(struct ltt_session
*session
)
3258 struct ltt_kernel_channel
*kchan
;
3259 struct ltt_kernel_session
*ksession
;
3260 struct ltt_ust_session
*usess
;
3263 ksession
= session
->kernel_session
;
3264 usess
= session
->ust_session
;
3266 if (!session
->enabled
) {
3267 ret
= LTTCOMM_TRACE_ALREADY_STOPPED
;
3271 session
->enabled
= 0;
3274 if (ksession
!= NULL
) {
3275 DBG("Stop kernel tracing");
3277 /* Flush metadata if exist */
3278 if (ksession
->metadata_stream_fd
>= 0) {