2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
29 #include <sys/mount.h>
30 #include <sys/resource.h>
31 #include <sys/socket.h>
33 #include <sys/types.h>
35 #include <urcu/uatomic.h>
39 #include <common/common.h>
40 #include <common/compat/poll.h>
41 #include <common/compat/socket.h>
42 #include <common/defaults.h>
43 #include <common/kernel-consumer/kernel-consumer.h>
44 #include <common/futex.h>
45 #include <common/relayd/relayd.h>
46 #include <common/utils.h>
48 #include "lttng-sessiond.h"
55 #include "kernel-consumer.h"
59 #include "ust-consumer.h"
63 #include "testpoint.h"
65 #define CONSUMERD_FILE "lttng-consumerd"
68 const char default_home_dir
[] = DEFAULT_HOME_DIR
;
69 const char default_tracing_group
[] = DEFAULT_TRACING_GROUP
;
70 const char default_ust_sock_dir
[] = DEFAULT_UST_SOCK_DIR
;
71 const char default_global_apps_pipe
[] = DEFAULT_GLOBAL_APPS_PIPE
;
74 const char *opt_tracing_group
;
75 static int opt_sig_parent
;
76 static int opt_verbose_consumer
;
77 static int opt_daemon
;
78 static int opt_no_kernel
;
79 static int is_root
; /* Set to 1 if the daemon is running as root */
80 static pid_t ppid
; /* Parent PID for --sig-parent option */
84 * Consumer daemon specific control data. Every value not initialized here is
85 * set to 0 by the static definition.
87 static struct consumer_data kconsumer_data
= {
88 .type
= LTTNG_CONSUMER_KERNEL
,
89 .err_unix_sock_path
= DEFAULT_KCONSUMERD_ERR_SOCK_PATH
,
90 .cmd_unix_sock_path
= DEFAULT_KCONSUMERD_CMD_SOCK_PATH
,
93 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
94 .lock
= PTHREAD_MUTEX_INITIALIZER
,
95 .cond
= PTHREAD_COND_INITIALIZER
,
96 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
98 static struct consumer_data ustconsumer64_data
= {
99 .type
= LTTNG_CONSUMER64_UST
,
100 .err_unix_sock_path
= DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH
,
101 .cmd_unix_sock_path
= DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH
,
104 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
105 .lock
= PTHREAD_MUTEX_INITIALIZER
,
106 .cond
= PTHREAD_COND_INITIALIZER
,
107 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
109 static struct consumer_data ustconsumer32_data
= {
110 .type
= LTTNG_CONSUMER32_UST
,
111 .err_unix_sock_path
= DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH
,
112 .cmd_unix_sock_path
= DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH
,
115 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
116 .lock
= PTHREAD_MUTEX_INITIALIZER
,
117 .cond
= PTHREAD_COND_INITIALIZER
,
118 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
121 /* Shared between threads */
122 static int dispatch_thread_exit
;
124 /* Global application Unix socket path */
125 static char apps_unix_sock_path
[PATH_MAX
];
126 /* Global client Unix socket path */
127 static char client_unix_sock_path
[PATH_MAX
];
128 /* global wait shm path for UST */
129 static char wait_shm_path
[PATH_MAX
];
130 /* Global health check unix path */
131 static char health_unix_sock_path
[PATH_MAX
];
133 /* Sockets and FDs */
134 static int client_sock
= -1;
135 static int apps_sock
= -1;
136 int kernel_tracer_fd
= -1;
137 static int kernel_poll_pipe
[2] = { -1, -1 };
140 * Quit pipe for all threads. This permits a single cancellation point
141 * for all threads when receiving an event on the pipe.
143 static int thread_quit_pipe
[2] = { -1, -1 };
146 * This pipe is used to inform the thread managing application communication
147 * that a command is queued and ready to be processed.
149 static int apps_cmd_pipe
[2] = { -1, -1 };
151 /* Pthread, Mutexes and Semaphores */
152 static pthread_t apps_thread
;
153 static pthread_t reg_apps_thread
;
154 static pthread_t client_thread
;
155 static pthread_t kernel_thread
;
156 static pthread_t dispatch_thread
;
157 static pthread_t health_thread
;
160 * UST registration command queue. This queue is tied with a futex and uses a N
161 * wakers / 1 waiter implemented and detailed in futex.c/.h
163 * The thread_manage_apps and thread_dispatch_ust_registration interact with
164 * this queue and the wait/wake scheme.
166 static struct ust_cmd_queue ust_cmd_queue
;
169 * Pointer initialized before thread creation.
171 * This points to the tracing session list containing the session count and a
172 * mutex lock. The lock MUST be taken if you iterate over the list. The lock
173 * MUST NOT be taken if you call a public function in session.c.
175 * The lock is nested inside the structure: session_list_ptr->lock. Please use
176 * session_lock_list and session_unlock_list for lock acquisition.
178 static struct ltt_session_list
*session_list_ptr
;
180 int ust_consumerd64_fd
= -1;
181 int ust_consumerd32_fd
= -1;
183 static const char *consumerd32_bin
= CONFIG_CONSUMERD32_BIN
;
184 static const char *consumerd64_bin
= CONFIG_CONSUMERD64_BIN
;
185 static const char *consumerd32_libdir
= CONFIG_CONSUMERD32_LIBDIR
;
186 static const char *consumerd64_libdir
= CONFIG_CONSUMERD64_LIBDIR
;
188 static const char *module_proc_lttng
= "/proc/lttng";
191 * Consumer daemon state which is changed when spawning it, killing it or in
192 * case of a fatal error.
194 enum consumerd_state
{
195 CONSUMER_STARTED
= 1,
196 CONSUMER_STOPPED
= 2,
201 * This consumer daemon state is used to validate if a client command will be
202 * able to reach the consumer. If not, the client is informed. For instance,
203 * doing a "lttng start" when the consumer state is set to ERROR will return an
204 * error to the client.
206 * The following example shows a possible race condition of this scheme:
208 * consumer thread error happens
210 * client cmd checks state -> still OK
211 * consumer thread exit, sets error
212 * client cmd try to talk to consumer
215 * However, since the consumer is a different daemon, we have no way of making
216 * sure the command will reach it safely even with this state flag. This is why
217 * we consider that up to the state validation during command processing, the
218 * command is safe. After that, we can not guarantee the correctness of the
219 * client request vis-a-vis the consumer.
221 static enum consumerd_state ust_consumerd_state
;
222 static enum consumerd_state kernel_consumerd_state
;
224 /* Used for the health monitoring of the session daemon. See health.h */
225 struct health_state health_thread_cmd
;
226 struct health_state health_thread_app_manage
;
227 struct health_state health_thread_app_reg
;
228 struct health_state health_thread_kernel
;
231 * Socket timeout for receiving and sending in seconds.
233 static int app_socket_timeout
;
236 void setup_consumerd_path(void)
238 const char *bin
, *libdir
;
241 * Allow INSTALL_BIN_PATH to be used as a target path for the
242 * native architecture size consumer if CONFIG_CONSUMER*_PATH
243 * has not been defined.
245 #if (CAA_BITS_PER_LONG == 32)
246 if (!consumerd32_bin
[0]) {
247 consumerd32_bin
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
249 if (!consumerd32_libdir
[0]) {
250 consumerd32_libdir
= INSTALL_LIB_PATH
;
252 #elif (CAA_BITS_PER_LONG == 64)
253 if (!consumerd64_bin
[0]) {
254 consumerd64_bin
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
256 if (!consumerd64_libdir
[0]) {
257 consumerd64_libdir
= INSTALL_LIB_PATH
;
260 #error "Unknown bitness"
264 * runtime env. var. overrides the build default.
266 bin
= getenv("LTTNG_CONSUMERD32_BIN");
268 consumerd32_bin
= bin
;
270 bin
= getenv("LTTNG_CONSUMERD64_BIN");
272 consumerd64_bin
= bin
;
274 libdir
= getenv("LTTNG_CONSUMERD32_LIBDIR");
276 consumerd32_libdir
= libdir
;
278 libdir
= getenv("LTTNG_CONSUMERD64_LIBDIR");
280 consumerd64_libdir
= libdir
;
285 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
287 static int create_thread_poll_set(struct lttng_poll_event
*events
,
292 if (events
== NULL
|| size
== 0) {
297 ret
= lttng_poll_create(events
, size
, LTTNG_CLOEXEC
);
303 ret
= lttng_poll_add(events
, thread_quit_pipe
[0], LPOLLIN
);
315 * Check if the thread quit pipe was triggered.
317 * Return 1 if it was triggered else 0;
319 static int check_thread_quit_pipe(int fd
, uint32_t events
)
321 if (fd
== thread_quit_pipe
[0] && (events
& LPOLLIN
)) {
329 * Return group ID of the tracing group or -1 if not found.
331 static gid_t
allowed_group(void)
335 if (opt_tracing_group
) {
336 grp
= getgrnam(opt_tracing_group
);
338 grp
= getgrnam(default_tracing_group
);
348 * Init thread quit pipe.
350 * Return -1 on error or 0 if all pipes are created.
352 static int init_thread_quit_pipe(void)
356 ret
= pipe(thread_quit_pipe
);
358 PERROR("thread quit pipe");
362 for (i
= 0; i
< 2; i
++) {
363 ret
= fcntl(thread_quit_pipe
[i
], F_SETFD
, FD_CLOEXEC
);
375 * Stop all threads by closing the thread quit pipe.
377 static void stop_threads(void)
381 /* Stopping all threads */
382 DBG("Terminating all threads");
383 ret
= notify_thread_pipe(thread_quit_pipe
[1]);
385 ERR("write error on thread quit pipe");
388 /* Dispatch thread */
389 CMM_STORE_SHARED(dispatch_thread_exit
, 1);
390 futex_nto1_wake(&ust_cmd_queue
.futex
);
396 static void cleanup(void)
400 struct ltt_session
*sess
, *stmp
;
404 /* First thing first, stop all threads */
405 utils_close_pipe(thread_quit_pipe
);
407 DBG("Removing %s directory", rundir
);
408 ret
= asprintf(&cmd
, "rm -rf %s", rundir
);
410 ERR("asprintf failed. Something is really wrong!");
413 /* Remove lttng run directory */
416 ERR("Unable to clean %s", rundir
);
421 DBG("Cleaning up all sessions");
423 /* Destroy session list mutex */
424 if (session_list_ptr
!= NULL
) {
425 pthread_mutex_destroy(&session_list_ptr
->lock
);
427 /* Cleanup ALL session */
428 cds_list_for_each_entry_safe(sess
, stmp
,
429 &session_list_ptr
->head
, list
) {
430 cmd_destroy_session(sess
, kernel_poll_pipe
[1]);
434 DBG("Closing all UST sockets");
435 ust_app_clean_list();
437 if (is_root
&& !opt_no_kernel
) {
438 DBG2("Closing kernel fd");
439 if (kernel_tracer_fd
>= 0) {
440 ret
= close(kernel_tracer_fd
);
445 DBG("Unloading kernel modules");
446 modprobe_remove_lttng_all();
450 DBG("%c[%d;%dm*** assert failed :-) *** ==> %c[%dm%c[%d;%dm"
451 "Matthew, BEET driven development works!%c[%dm",
452 27, 1, 31, 27, 0, 27, 1, 33, 27, 0);
457 * Send data on a unix socket using the liblttsessiondcomm API.
459 * Return lttcomm error code.
461 static int send_unix_sock(int sock
, void *buf
, size_t len
)
463 /* Check valid length */
468 return lttcomm_send_unix_sock(sock
, buf
, len
);
472 * Free memory of a command context structure.
474 static void clean_command_ctx(struct command_ctx
**cmd_ctx
)
476 DBG("Clean command context structure");
478 if ((*cmd_ctx
)->llm
) {
479 free((*cmd_ctx
)->llm
);
481 if ((*cmd_ctx
)->lsm
) {
482 free((*cmd_ctx
)->lsm
);
490 * Notify UST applications using the shm mmap futex.
492 static int notify_ust_apps(int active
)
496 DBG("Notifying applications of session daemon state: %d", active
);
498 /* See shm.c for this call implying mmap, shm and futex calls */
499 wait_shm_mmap
= shm_ust_get_mmap(wait_shm_path
, is_root
);
500 if (wait_shm_mmap
== NULL
) {
504 /* Wake waiting process */
505 futex_wait_update((int32_t *) wait_shm_mmap
, active
);
507 /* Apps notified successfully */
515 * Setup the outgoing data buffer for the response (llm) by allocating the
516 * right amount of memory and copying the original information from the lsm
519 * Return total size of the buffer pointed by buf.
521 static int setup_lttng_msg(struct command_ctx
*cmd_ctx
, size_t size
)
527 cmd_ctx
->llm
= zmalloc(sizeof(struct lttcomm_lttng_msg
) + buf_size
);
528 if (cmd_ctx
->llm
== NULL
) {
534 /* Copy common data */
535 cmd_ctx
->llm
->cmd_type
= cmd_ctx
->lsm
->cmd_type
;
536 cmd_ctx
->llm
->pid
= cmd_ctx
->lsm
->domain
.attr
.pid
;
538 cmd_ctx
->llm
->data_size
= size
;
539 cmd_ctx
->lttng_msg_size
= sizeof(struct lttcomm_lttng_msg
) + buf_size
;
548 * Update the kernel poll set of all channel fd available over all tracing
549 * session. Add the wakeup pipe at the end of the set.
551 static int update_kernel_poll(struct lttng_poll_event
*events
)
554 struct ltt_session
*session
;
555 struct ltt_kernel_channel
*channel
;
557 DBG("Updating kernel poll set");
560 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
561 session_lock(session
);
562 if (session
->kernel_session
== NULL
) {
563 session_unlock(session
);
567 cds_list_for_each_entry(channel
,
568 &session
->kernel_session
->channel_list
.head
, list
) {
569 /* Add channel fd to the kernel poll set */
570 ret
= lttng_poll_add(events
, channel
->fd
, LPOLLIN
| LPOLLRDNORM
);
572 session_unlock(session
);
575 DBG("Channel fd %d added to kernel set", channel
->fd
);
577 session_unlock(session
);
579 session_unlock_list();
584 session_unlock_list();
589 * Find the channel fd from 'fd' over all tracing session. When found, check
590 * for new channel stream and send those stream fds to the kernel consumer.
592 * Useful for CPU hotplug feature.
594 static int update_kernel_stream(struct consumer_data
*consumer_data
, int fd
)
597 struct ltt_session
*session
;
598 struct ltt_kernel_session
*ksess
;
599 struct ltt_kernel_channel
*channel
;
601 DBG("Updating kernel streams for channel fd %d", fd
);
604 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
605 session_lock(session
);
606 if (session
->kernel_session
== NULL
) {
607 session_unlock(session
);
610 ksess
= session
->kernel_session
;
612 cds_list_for_each_entry(channel
, &ksess
->channel_list
.head
, list
) {
613 if (channel
->fd
== fd
) {
614 DBG("Channel found, updating kernel streams");
615 ret
= kernel_open_channel_stream(channel
);
621 * Have we already sent fds to the consumer? If yes, it means
622 * that tracing is started so it is safe to send our updated
625 if (ksess
->consumer_fds_sent
== 1 && ksess
->consumer
!= NULL
) {
626 struct lttng_ht_iter iter
;
627 struct consumer_socket
*socket
;
630 cds_lfht_for_each_entry(ksess
->consumer
->socks
->ht
,
631 &iter
.iter
, socket
, node
.node
) {
632 /* Code flow error */
633 assert(socket
->fd
>= 0);
635 pthread_mutex_lock(socket
->lock
);
636 ret
= kernel_consumer_send_channel_stream(socket
,
638 pthread_mutex_unlock(socket
->lock
);
647 session_unlock(session
);
649 session_unlock_list();
653 session_unlock(session
);
654 session_unlock_list();
659 * For each tracing session, update newly registered apps.
661 static void update_ust_app(int app_sock
)
663 struct ltt_session
*sess
, *stmp
;
667 /* For all tracing session(s) */
668 cds_list_for_each_entry_safe(sess
, stmp
, &session_list_ptr
->head
, list
) {
670 if (sess
->ust_session
) {
671 ust_app_global_update(sess
->ust_session
, app_sock
);
673 session_unlock(sess
);
676 session_unlock_list();
680 * This thread manage event coming from the kernel.
682 * Features supported in this thread:
685 static void *thread_manage_kernel(void *data
)
687 int ret
, i
, pollfd
, update_poll_flag
= 1, err
= -1;
688 uint32_t revents
, nb_fd
;
690 struct lttng_poll_event events
;
692 DBG("[thread] Thread manage kernel started");
695 * This first step of the while is to clean this structure which could free
696 * non NULL pointers so zero it before the loop.
698 memset(&events
, 0, sizeof(events
));
700 if (testpoint(thread_manage_kernel
)) {
701 goto error_testpoint
;
704 health_code_update(&health_thread_kernel
);
706 if (testpoint(thread_manage_kernel_before_loop
)) {
707 goto error_testpoint
;
711 health_code_update(&health_thread_kernel
);
713 if (update_poll_flag
== 1) {
714 /* Clean events object. We are about to populate it again. */
715 lttng_poll_clean(&events
);
717 ret
= create_thread_poll_set(&events
, 2);
719 goto error_poll_create
;
722 ret
= lttng_poll_add(&events
, kernel_poll_pipe
[0], LPOLLIN
);
727 /* This will add the available kernel channel if any. */
728 ret
= update_kernel_poll(&events
);
732 update_poll_flag
= 0;
735 DBG("Thread kernel polling on %d fds", LTTNG_POLL_GETNB(&events
));
737 /* Poll infinite value of time */
739 health_poll_update(&health_thread_kernel
);
740 ret
= lttng_poll_wait(&events
, -1);
741 health_poll_update(&health_thread_kernel
);
744 * Restart interrupted system call.
746 if (errno
== EINTR
) {
750 } else if (ret
== 0) {
751 /* Should not happen since timeout is infinite */
752 ERR("Return value of poll is 0 with an infinite timeout.\n"
753 "This should not have happened! Continuing...");
759 for (i
= 0; i
< nb_fd
; i
++) {
760 /* Fetch once the poll data */
761 revents
= LTTNG_POLL_GETEV(&events
, i
);
762 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
764 health_code_update(&health_thread_kernel
);
766 /* Thread quit pipe has been closed. Killing thread. */
767 ret
= check_thread_quit_pipe(pollfd
, revents
);
773 /* Check for data on kernel pipe */
774 if (pollfd
== kernel_poll_pipe
[0] && (revents
& LPOLLIN
)) {
776 ret
= read(kernel_poll_pipe
[0], &tmp
, 1);
777 } while (ret
< 0 && errno
== EINTR
);
779 * Ret value is useless here, if this pipe gets any actions an
780 * update is required anyway.
782 update_poll_flag
= 1;
786 * New CPU detected by the kernel. Adding kernel stream to
787 * kernel session and updating the kernel consumer
789 if (revents
& LPOLLIN
) {
790 ret
= update_kernel_stream(&kconsumer_data
, pollfd
);
796 * TODO: We might want to handle the LPOLLERR | LPOLLHUP
797 * and unregister kernel stream at this point.
806 lttng_poll_clean(&events
);
809 utils_close_pipe(kernel_poll_pipe
);
810 kernel_poll_pipe
[0] = kernel_poll_pipe
[1] = -1;
812 health_error(&health_thread_kernel
);
813 ERR("Health error occurred in %s", __func__
);
814 WARN("Kernel thread died unexpectedly. "
815 "Kernel tracing can continue but CPU hotplug is disabled.");
817 health_exit(&health_thread_kernel
);
818 DBG("Kernel thread dying");
823 * Signal pthread condition of the consumer data that the thread.
825 static void signal_consumer_condition(struct consumer_data
*data
, int state
)
827 pthread_mutex_lock(&data
->cond_mutex
);
830 * The state is set before signaling. It can be any value, it's the waiter
831 * job to correctly interpret this condition variable associated to the
832 * consumer pthread_cond.
834 * A value of 0 means that the corresponding thread of the consumer data
835 * was not started. 1 indicates that the thread has started and is ready
836 * for action. A negative value means that there was an error during the
839 data
->consumer_thread_is_ready
= state
;
840 (void) pthread_cond_signal(&data
->cond
);
842 pthread_mutex_unlock(&data
->cond_mutex
);
846 * This thread manage the consumer error sent back to the session daemon.
848 static void *thread_manage_consumer(void *data
)
850 int sock
= -1, i
, ret
, pollfd
, err
= -1;
851 uint32_t revents
, nb_fd
;
852 enum lttcomm_return_code code
;
853 struct lttng_poll_event events
;
854 struct consumer_data
*consumer_data
= data
;
856 DBG("[thread] Manage consumer started");
859 * Since the consumer thread can be spawned at any moment in time, we init
860 * the health to a poll status (1, which is a valid health over time).
861 * When the thread starts, we update here the health to a "code" path being
862 * an even value so this thread, when reaching a poll wait, does not
863 * trigger an error with an even value.
865 * Here is the use case we avoid.
867 * +1: the first poll update during initialization (main())
868 * +2 * x: multiple code update once in this thread.
869 * +1: poll wait in this thread (being a good health state).
870 * == even number which after the wait period shows as a bad health.
872 * In a nutshell, the following poll update to the health state brings back
873 * the state to an even value meaning a code path.
875 health_poll_update(&consumer_data
->health
);
878 * Pass 2 as size here for the thread quit pipe and kconsumerd_err_sock.
879 * Nothing more will be added to this poll set.
881 ret
= create_thread_poll_set(&events
, 2);
887 * The error socket here is already in a listening state which was done
888 * just before spawning this thread to avoid a race between the consumer
889 * daemon exec trying to connect and the listen() call.
891 ret
= lttng_poll_add(&events
, consumer_data
->err_sock
, LPOLLIN
| LPOLLRDHUP
);
896 health_code_update(&consumer_data
->health
);
898 /* Inifinite blocking call, waiting for transmission */
900 health_poll_update(&consumer_data
->health
);
902 if (testpoint(thread_manage_consumer
)) {
906 ret
= lttng_poll_wait(&events
, -1);
907 health_poll_update(&consumer_data
->health
);
910 * Restart interrupted system call.
912 if (errno
== EINTR
) {
920 for (i
= 0; i
< nb_fd
; i
++) {
921 /* Fetch once the poll data */
922 revents
= LTTNG_POLL_GETEV(&events
, i
);
923 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
925 health_code_update(&consumer_data
->health
);
927 /* Thread quit pipe has been closed. Killing thread. */
928 ret
= check_thread_quit_pipe(pollfd
, revents
);
934 /* Event on the registration socket */
935 if (pollfd
== consumer_data
->err_sock
) {
936 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
937 ERR("consumer err socket poll error");
943 sock
= lttcomm_accept_unix_sock(consumer_data
->err_sock
);
949 * Set the CLOEXEC flag. Return code is useless because either way, the
952 (void) utils_set_fd_cloexec(sock
);
954 health_code_update(&consumer_data
->health
);
956 DBG2("Receiving code from consumer err_sock");
958 /* Getting status code from kconsumerd */
959 ret
= lttcomm_recv_unix_sock(sock
, &code
,
960 sizeof(enum lttcomm_return_code
));
965 health_code_update(&consumer_data
->health
);
967 if (code
== LTTCOMM_CONSUMERD_COMMAND_SOCK_READY
) {
968 consumer_data
->cmd_sock
=
969 lttcomm_connect_unix_sock(consumer_data
->cmd_unix_sock_path
);
970 if (consumer_data
->cmd_sock
< 0) {
971 /* On error, signal condition and quit. */
972 signal_consumer_condition(consumer_data
, -1);
973 PERROR("consumer connect");
976 signal_consumer_condition(consumer_data
, 1);
977 DBG("Consumer command socket ready");
979 ERR("consumer error when waiting for SOCK_READY : %s",
980 lttcomm_get_readable_code(-code
));
984 /* Remove the kconsumerd error sock since we've established a connexion */
985 ret
= lttng_poll_del(&events
, consumer_data
->err_sock
);
990 ret
= lttng_poll_add(&events
, sock
, LPOLLIN
| LPOLLRDHUP
);
995 health_code_update(&consumer_data
->health
);
997 /* Inifinite blocking call, waiting for transmission */
999 health_poll_update(&consumer_data
->health
);
1000 ret
= lttng_poll_wait(&events
, -1);
1001 health_poll_update(&consumer_data
->health
);
1004 * Restart interrupted system call.
1006 if (errno
== EINTR
) {
1014 for (i
= 0; i
< nb_fd
; i
++) {
1015 /* Fetch once the poll data */
1016 revents
= LTTNG_POLL_GETEV(&events
, i
);
1017 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1019 health_code_update(&consumer_data
->health
);
1021 /* Thread quit pipe has been closed. Killing thread. */
1022 ret
= check_thread_quit_pipe(pollfd
, revents
);
1028 /* Event on the kconsumerd socket */
1029 if (pollfd
== sock
) {
1030 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1031 ERR("consumer err socket second poll error");
1037 health_code_update(&consumer_data
->health
);
1039 /* Wait for any kconsumerd error */
1040 ret
= lttcomm_recv_unix_sock(sock
, &code
,
1041 sizeof(enum lttcomm_return_code
));
1043 ERR("consumer closed the command socket");
1047 ERR("consumer return code : %s", lttcomm_get_readable_code(-code
));
1051 /* Immediately set the consumerd state to stopped */
1052 if (consumer_data
->type
== LTTNG_CONSUMER_KERNEL
) {
1053 uatomic_set(&kernel_consumerd_state
, CONSUMER_ERROR
);
1054 } else if (consumer_data
->type
== LTTNG_CONSUMER64_UST
||
1055 consumer_data
->type
== LTTNG_CONSUMER32_UST
) {
1056 uatomic_set(&ust_consumerd_state
, CONSUMER_ERROR
);
1058 /* Code flow error... */
1062 if (consumer_data
->err_sock
>= 0) {
1063 ret
= close(consumer_data
->err_sock
);
1068 if (consumer_data
->cmd_sock
>= 0) {
1069 ret
= close(consumer_data
->cmd_sock
);
1081 unlink(consumer_data
->err_unix_sock_path
);
1082 unlink(consumer_data
->cmd_unix_sock_path
);
1083 consumer_data
->pid
= 0;
1085 lttng_poll_clean(&events
);
1088 health_error(&consumer_data
->health
);
1089 ERR("Health error occurred in %s", __func__
);
1091 health_exit(&consumer_data
->health
);
1092 DBG("consumer thread cleanup completed");
1098 * This thread manage application communication.
1100 static void *thread_manage_apps(void *data
)
1102 int i
, ret
, pollfd
, err
= -1;
1103 uint32_t revents
, nb_fd
;
1104 struct ust_command ust_cmd
;
1105 struct lttng_poll_event events
;
1107 DBG("[thread] Manage application started");
1109 rcu_register_thread();
1110 rcu_thread_online();
1112 if (testpoint(thread_manage_apps
)) {
1113 goto error_testpoint
;
1116 health_code_update(&health_thread_app_manage
);
1118 ret
= create_thread_poll_set(&events
, 2);
1120 goto error_poll_create
;
1123 ret
= lttng_poll_add(&events
, apps_cmd_pipe
[0], LPOLLIN
| LPOLLRDHUP
);
1128 if (testpoint(thread_manage_apps_before_loop
)) {
1132 health_code_update(&health_thread_app_manage
);
1135 DBG("Apps thread polling on %d fds", LTTNG_POLL_GETNB(&events
));
1137 /* Inifinite blocking call, waiting for transmission */
1139 health_poll_update(&health_thread_app_manage
);
1140 ret
= lttng_poll_wait(&events
, -1);
1141 health_poll_update(&health_thread_app_manage
);
1144 * Restart interrupted system call.
1146 if (errno
== EINTR
) {
1154 for (i
= 0; i
< nb_fd
; i
++) {
1155 /* Fetch once the poll data */
1156 revents
= LTTNG_POLL_GETEV(&events
, i
);
1157 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1159 health_code_update(&health_thread_app_manage
);
1161 /* Thread quit pipe has been closed. Killing thread. */
1162 ret
= check_thread_quit_pipe(pollfd
, revents
);
1168 /* Inspect the apps cmd pipe */
1169 if (pollfd
== apps_cmd_pipe
[0]) {
1170 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1171 ERR("Apps command pipe error");
1173 } else if (revents
& LPOLLIN
) {
1176 ret
= read(apps_cmd_pipe
[0], &ust_cmd
, sizeof(ust_cmd
));
1177 } while (ret
< 0 && errno
== EINTR
);
1178 if (ret
< 0 || ret
< sizeof(ust_cmd
)) {
1179 PERROR("read apps cmd pipe");
1183 health_code_update(&health_thread_app_manage
);
1185 /* Register applicaton to the session daemon */
1186 ret
= ust_app_register(&ust_cmd
.reg_msg
,
1188 if (ret
== -ENOMEM
) {
1190 } else if (ret
< 0) {
1194 health_code_update(&health_thread_app_manage
);
1197 * Validate UST version compatibility.
1199 ret
= ust_app_validate_version(ust_cmd
.sock
);
1202 * Add channel(s) and event(s) to newly registered apps
1203 * from lttng global UST domain.
1205 update_ust_app(ust_cmd
.sock
);
1208 health_code_update(&health_thread_app_manage
);
1210 ret
= ust_app_register_done(ust_cmd
.sock
);
1213 * If the registration is not possible, we simply
1214 * unregister the apps and continue
1216 ust_app_unregister(ust_cmd
.sock
);
1219 * We only monitor the error events of the socket. This
1220 * thread does not handle any incoming data from UST
1223 ret
= lttng_poll_add(&events
, ust_cmd
.sock
,
1224 LPOLLERR
& LPOLLHUP
& LPOLLRDHUP
);
1229 /* Set socket timeout for both receiving and ending */
1230 (void) lttcomm_setsockopt_rcv_timeout(ust_cmd
.sock
,
1231 app_socket_timeout
);
1232 (void) lttcomm_setsockopt_snd_timeout(ust_cmd
.sock
,
1233 app_socket_timeout
);
1235 DBG("Apps with sock %d added to poll set",
1239 health_code_update(&health_thread_app_manage
);
1245 * At this point, we know that a registered application made
1246 * the event at poll_wait.
1248 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1249 /* Removing from the poll set */
1250 ret
= lttng_poll_del(&events
, pollfd
);
1255 /* Socket closed on remote end. */
1256 ust_app_unregister(pollfd
);
1261 health_code_update(&health_thread_app_manage
);
1267 lttng_poll_clean(&events
);
1270 utils_close_pipe(apps_cmd_pipe
);
1271 apps_cmd_pipe
[0] = apps_cmd_pipe
[1] = -1;
1274 * We don't clean the UST app hash table here since already registered
1275 * applications can still be controlled so let them be until the session
1276 * daemon dies or the applications stop.
1280 health_error(&health_thread_app_manage
);
1281 ERR("Health error occurred in %s", __func__
);
1283 health_exit(&health_thread_app_manage
);
1284 DBG("Application communication apps thread cleanup complete");
1285 rcu_thread_offline();
1286 rcu_unregister_thread();
1291 * Dispatch request from the registration threads to the application
1292 * communication thread.
1294 static void *thread_dispatch_ust_registration(void *data
)
1297 struct cds_wfq_node
*node
;
1298 struct ust_command
*ust_cmd
= NULL
;
1300 DBG("[thread] Dispatch UST command started");
1302 while (!CMM_LOAD_SHARED(dispatch_thread_exit
)) {
1303 /* Atomically prepare the queue futex */
1304 futex_nto1_prepare(&ust_cmd_queue
.futex
);
1307 /* Dequeue command for registration */
1308 node
= cds_wfq_dequeue_blocking(&ust_cmd_queue
.queue
);
1310 DBG("Woken up but nothing in the UST command queue");
1311 /* Continue thread execution */
1315 ust_cmd
= caa_container_of(node
, struct ust_command
, node
);
1317 DBG("Dispatching UST registration pid:%d ppid:%d uid:%d"
1318 " gid:%d sock:%d name:%s (version %d.%d)",
1319 ust_cmd
->reg_msg
.pid
, ust_cmd
->reg_msg
.ppid
,
1320 ust_cmd
->reg_msg
.uid
, ust_cmd
->reg_msg
.gid
,
1321 ust_cmd
->sock
, ust_cmd
->reg_msg
.name
,
1322 ust_cmd
->reg_msg
.major
, ust_cmd
->reg_msg
.minor
);
1324 * Inform apps thread of the new application registration. This
1325 * call is blocking so we can be assured that the data will be read
1326 * at some point in time or wait to the end of the world :)
1328 if (apps_cmd_pipe
[1] >= 0) {
1329 ret
= write(apps_cmd_pipe
[1], ust_cmd
,
1330 sizeof(struct ust_command
));
1332 PERROR("write apps cmd pipe");
1333 if (errno
== EBADF
) {
1335 * We can't inform the application thread to process
1336 * registration. We will exit or else application
1337 * registration will not occur and tracing will never
1344 /* Application manager thread is not available. */
1345 ret
= close(ust_cmd
->sock
);
1347 PERROR("close ust_cmd sock");
1351 } while (node
!= NULL
);
1353 /* Futex wait on queue. Blocking call on futex() */
1354 futex_nto1_wait(&ust_cmd_queue
.futex
);
1358 DBG("Dispatch thread dying");
1363 * This thread manage application registration.
1365 static void *thread_registration_apps(void *data
)
1367 int sock
= -1, i
, ret
, pollfd
, err
= -1;
1368 uint32_t revents
, nb_fd
;
1369 struct lttng_poll_event events
;
1371 * Get allocated in this thread, enqueued to a global queue, dequeued and
1372 * freed in the manage apps thread.
1374 struct ust_command
*ust_cmd
= NULL
;
1376 DBG("[thread] Manage application registration started");
1378 if (testpoint(thread_registration_apps
)) {
1379 goto error_testpoint
;
1382 ret
= lttcomm_listen_unix_sock(apps_sock
);
1388 * Pass 2 as size here for the thread quit pipe and apps socket. Nothing
1389 * more will be added to this poll set.
1391 ret
= create_thread_poll_set(&events
, 2);
1393 goto error_create_poll
;
1396 /* Add the application registration socket */
1397 ret
= lttng_poll_add(&events
, apps_sock
, LPOLLIN
| LPOLLRDHUP
);
1399 goto error_poll_add
;
1402 /* Notify all applications to register */
1403 ret
= notify_ust_apps(1);
1405 ERR("Failed to notify applications or create the wait shared memory.\n"
1406 "Execution continues but there might be problem for already\n"
1407 "running applications that wishes to register.");
1411 DBG("Accepting application registration");
1413 /* Inifinite blocking call, waiting for transmission */
1415 health_poll_update(&health_thread_app_reg
);
1416 ret
= lttng_poll_wait(&events
, -1);
1417 health_poll_update(&health_thread_app_reg
);
1420 * Restart interrupted system call.
1422 if (errno
== EINTR
) {
1430 for (i
= 0; i
< nb_fd
; i
++) {
1431 health_code_update(&health_thread_app_reg
);
1433 /* Fetch once the poll data */
1434 revents
= LTTNG_POLL_GETEV(&events
, i
);
1435 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1437 /* Thread quit pipe has been closed. Killing thread. */
1438 ret
= check_thread_quit_pipe(pollfd
, revents
);
1444 /* Event on the registration socket */
1445 if (pollfd
== apps_sock
) {
1446 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1447 ERR("Register apps socket poll error");
1449 } else if (revents
& LPOLLIN
) {
1450 sock
= lttcomm_accept_unix_sock(apps_sock
);
1456 * Set the CLOEXEC flag. Return code is useless because
1457 * either way, the show must go on.
1459 (void) utils_set_fd_cloexec(sock
);
1461 /* Create UST registration command for enqueuing */
1462 ust_cmd
= zmalloc(sizeof(struct ust_command
));
1463 if (ust_cmd
== NULL
) {
1464 PERROR("ust command zmalloc");
1469 * Using message-based transmissions to ensure we don't
1470 * have to deal with partially received messages.
1472 ret
= lttng_fd_get(LTTNG_FD_APPS
, 1);
1474 ERR("Exhausted file descriptors allowed for applications.");
1483 health_code_update(&health_thread_app_reg
);
1484 ret
= lttcomm_recv_unix_sock(sock
, &ust_cmd
->reg_msg
,
1485 sizeof(struct ust_register_msg
));
1486 if (ret
< 0 || ret
< sizeof(struct ust_register_msg
)) {
1488 PERROR("lttcomm_recv_unix_sock register apps");
1490 ERR("Wrong size received on apps register");
1497 lttng_fd_put(LTTNG_FD_APPS
, 1);
1501 health_code_update(&health_thread_app_reg
);
1503 ust_cmd
->sock
= sock
;
1506 DBG("UST registration received with pid:%d ppid:%d uid:%d"
1507 " gid:%d sock:%d name:%s (version %d.%d)",
1508 ust_cmd
->reg_msg
.pid
, ust_cmd
->reg_msg
.ppid
,
1509 ust_cmd
->reg_msg
.uid
, ust_cmd
->reg_msg
.gid
,
1510 ust_cmd
->sock
, ust_cmd
->reg_msg
.name
,
1511 ust_cmd
->reg_msg
.major
, ust_cmd
->reg_msg
.minor
);
1514 * Lock free enqueue the registration request. The red pill
1515 * has been taken! This apps will be part of the *system*.
1517 cds_wfq_enqueue(&ust_cmd_queue
.queue
, &ust_cmd
->node
);
1520 * Wake the registration queue futex. Implicit memory
1521 * barrier with the exchange in cds_wfq_enqueue.
1523 futex_nto1_wake(&ust_cmd_queue
.futex
);
1532 health_error(&health_thread_app_reg
);
1533 ERR("Health error occurred in %s", __func__
);
1536 /* Notify that the registration thread is gone */
1539 if (apps_sock
>= 0) {
1540 ret
= close(apps_sock
);
1550 lttng_fd_put(LTTNG_FD_APPS
, 1);
1552 unlink(apps_unix_sock_path
);
1555 lttng_poll_clean(&events
);
1559 DBG("UST Registration thread cleanup complete");
1560 health_exit(&health_thread_app_reg
);
1566 * Start the thread_manage_consumer. This must be done after a lttng-consumerd
1567 * exec or it will fails.
1569 static int spawn_consumer_thread(struct consumer_data
*consumer_data
)
1572 struct timespec timeout
;
1574 /* Make sure we set the readiness flag to 0 because we are NOT ready */
1575 consumer_data
->consumer_thread_is_ready
= 0;
1577 /* Setup pthread condition */
1578 ret
= pthread_condattr_init(&consumer_data
->condattr
);
1581 PERROR("pthread_condattr_init consumer data");
1586 * Set the monotonic clock in order to make sure we DO NOT jump in time
1587 * between the clock_gettime() call and the timedwait call. See bug #324
1588 * for a more details and how we noticed it.
1590 ret
= pthread_condattr_setclock(&consumer_data
->condattr
, CLOCK_MONOTONIC
);
1593 PERROR("pthread_condattr_setclock consumer data");
1597 ret
= pthread_cond_init(&consumer_data
->cond
, &consumer_data
->condattr
);
1600 PERROR("pthread_cond_init consumer data");
1604 ret
= pthread_create(&consumer_data
->thread
, NULL
, thread_manage_consumer
,
1607 PERROR("pthread_create consumer");
1612 /* We are about to wait on a pthread condition */
1613 pthread_mutex_lock(&consumer_data
->cond_mutex
);
1615 /* Get time for sem_timedwait absolute timeout */
1616 clock_ret
= clock_gettime(CLOCK_MONOTONIC
, &timeout
);
1618 * Set the timeout for the condition timed wait even if the clock gettime
1619 * call fails since we might loop on that call and we want to avoid to
1620 * increment the timeout too many times.
1622 timeout
.tv_sec
+= DEFAULT_SEM_WAIT_TIMEOUT
;
1625 * The following loop COULD be skipped in some conditions so this is why we
1626 * set ret to 0 in order to make sure at least one round of the loop is
1632 * Loop until the condition is reached or when a timeout is reached. Note
1633 * that the pthread_cond_timedwait(P) man page specifies that EINTR can NOT
1634 * be returned but the pthread_cond(3), from the glibc-doc, says that it is
1635 * possible. This loop does not take any chances and works with both of
1638 while (!consumer_data
->consumer_thread_is_ready
&& ret
!= ETIMEDOUT
) {
1639 if (clock_ret
< 0) {
1640 PERROR("clock_gettime spawn consumer");
1641 /* Infinite wait for the consumerd thread to be ready */
1642 ret
= pthread_cond_wait(&consumer_data
->cond
,
1643 &consumer_data
->cond_mutex
);
1645 ret
= pthread_cond_timedwait(&consumer_data
->cond
,
1646 &consumer_data
->cond_mutex
, &timeout
);
1650 /* Release the pthread condition */
1651 pthread_mutex_unlock(&consumer_data
->cond_mutex
);
1655 if (ret
== ETIMEDOUT
) {
1657 * Call has timed out so we kill the kconsumerd_thread and return
1660 ERR("Condition timed out. The consumer thread was never ready."
1662 ret
= pthread_cancel(consumer_data
->thread
);
1664 PERROR("pthread_cancel consumer thread");
1667 PERROR("pthread_cond_wait failed consumer thread");
1672 pthread_mutex_lock(&consumer_data
->pid_mutex
);
1673 if (consumer_data
->pid
== 0) {
1674 ERR("Consumerd did not start");
1675 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1678 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1687 * Join consumer thread
1689 static int join_consumer_thread(struct consumer_data
*consumer_data
)
1693 /* Consumer pid must be a real one. */
1694 if (consumer_data
->pid
> 0) {
1696 ret
= kill(consumer_data
->pid
, SIGTERM
);
1698 ERR("Error killing consumer daemon");
1701 return pthread_join(consumer_data
->thread
, &status
);
1708 * Fork and exec a consumer daemon (consumerd).
1710 * Return pid if successful else -1.
1712 static pid_t
spawn_consumerd(struct consumer_data
*consumer_data
)
1716 const char *consumer_to_use
;
1717 const char *verbosity
;
1720 DBG("Spawning consumerd");
1727 if (opt_verbose_consumer
) {
1728 verbosity
= "--verbose";
1730 verbosity
= "--quiet";
1732 switch (consumer_data
->type
) {
1733 case LTTNG_CONSUMER_KERNEL
:
1735 * Find out which consumerd to execute. We will first try the
1736 * 64-bit path, then the sessiond's installation directory, and
1737 * fallback on the 32-bit one,
1739 DBG3("Looking for a kernel consumer at these locations:");
1740 DBG3(" 1) %s", consumerd64_bin
);
1741 DBG3(" 2) %s/%s", INSTALL_BIN_PATH
, CONSUMERD_FILE
);
1742 DBG3(" 3) %s", consumerd32_bin
);
1743 if (stat(consumerd64_bin
, &st
) == 0) {
1744 DBG3("Found location #1");
1745 consumer_to_use
= consumerd64_bin
;
1746 } else if (stat(INSTALL_BIN_PATH
"/" CONSUMERD_FILE
, &st
) == 0) {
1747 DBG3("Found location #2");
1748 consumer_to_use
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
1749 } else if (stat(consumerd32_bin
, &st
) == 0) {
1750 DBG3("Found location #3");
1751 consumer_to_use
= consumerd32_bin
;
1753 DBG("Could not find any valid consumerd executable");
1756 DBG("Using kernel consumer at: %s", consumer_to_use
);
1757 execl(consumer_to_use
,
1758 "lttng-consumerd", verbosity
, "-k",
1759 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
1760 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
1763 case LTTNG_CONSUMER64_UST
:
1765 char *tmpnew
= NULL
;
1767 if (consumerd64_libdir
[0] != '\0') {
1771 tmp
= getenv("LD_LIBRARY_PATH");
1775 tmplen
= strlen("LD_LIBRARY_PATH=")
1776 + strlen(consumerd64_libdir
) + 1 /* : */ + strlen(tmp
);
1777 tmpnew
= zmalloc(tmplen
+ 1 /* \0 */);
1782 strcpy(tmpnew
, "LD_LIBRARY_PATH=");
1783 strcat(tmpnew
, consumerd64_libdir
);
1784 if (tmp
[0] != '\0') {
1785 strcat(tmpnew
, ":");
1786 strcat(tmpnew
, tmp
);
1788 ret
= putenv(tmpnew
);
1794 DBG("Using 64-bit UST consumer at: %s", consumerd64_bin
);
1795 ret
= execl(consumerd64_bin
, "lttng-consumerd", verbosity
, "-u",
1796 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
1797 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
1799 if (consumerd64_libdir
[0] != '\0') {
1807 case LTTNG_CONSUMER32_UST
:
1809 char *tmpnew
= NULL
;
1811 if (consumerd32_libdir
[0] != '\0') {
1815 tmp
= getenv("LD_LIBRARY_PATH");
1819 tmplen
= strlen("LD_LIBRARY_PATH=")
1820 + strlen(consumerd32_libdir
) + 1 /* : */ + strlen(tmp
);
1821 tmpnew
= zmalloc(tmplen
+ 1 /* \0 */);
1826 strcpy(tmpnew
, "LD_LIBRARY_PATH=");
1827 strcat(tmpnew
, consumerd32_libdir
);
1828 if (tmp
[0] != '\0') {
1829 strcat(tmpnew
, ":");
1830 strcat(tmpnew
, tmp
);
1832 ret
= putenv(tmpnew
);
1838 DBG("Using 32-bit UST consumer at: %s", consumerd32_bin
);
1839 ret
= execl(consumerd32_bin
, "lttng-consumerd", verbosity
, "-u",
1840 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
1841 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
1843 if (consumerd32_libdir
[0] != '\0') {
1852 PERROR("unknown consumer type");
1856 PERROR("kernel start consumer exec");
1859 } else if (pid
> 0) {
1862 PERROR("start consumer fork");
1870 * Spawn the consumerd daemon and session daemon thread.
1872 static int start_consumerd(struct consumer_data
*consumer_data
)
1877 * Set the listen() state on the socket since there is a possible race
1878 * between the exec() of the consumer daemon and this call if place in the
1879 * consumer thread. See bug #366 for more details.
1881 ret
= lttcomm_listen_unix_sock(consumer_data
->err_sock
);
1886 pthread_mutex_lock(&consumer_data
->pid_mutex
);
1887 if (consumer_data
->pid
!= 0) {
1888 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1892 ret
= spawn_consumerd(consumer_data
);
1894 ERR("Spawning consumerd failed");
1895 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1899 /* Setting up the consumer_data pid */
1900 consumer_data
->pid
= ret
;
1901 DBG2("Consumer pid %d", consumer_data
->pid
);
1902 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1904 DBG2("Spawning consumer control thread");
1905 ret
= spawn_consumer_thread(consumer_data
);
1907 ERR("Fatal error spawning consumer control thread");
1915 /* Cleanup already created socket on error. */
1916 if (consumer_data
->err_sock
>= 0) {
1919 err
= close(consumer_data
->err_sock
);
1921 PERROR("close consumer data error socket");
1928 * Compute health status of each consumer. If one of them is zero (bad
1929 * state), we return 0.
1931 static int check_consumer_health(void)
1935 ret
= health_check_state(&kconsumer_data
.health
) &&
1936 health_check_state(&ustconsumer32_data
.health
) &&
1937 health_check_state(&ustconsumer64_data
.health
);
1939 DBG3("Health consumer check %d", ret
);
1945 * Setup necessary data for kernel tracer action.
1947 static int init_kernel_tracer(void)
1951 /* Modprobe lttng kernel modules */
1952 ret
= modprobe_lttng_control();
1957 /* Open debugfs lttng */
1958 kernel_tracer_fd
= open(module_proc_lttng
, O_RDWR
);
1959 if (kernel_tracer_fd
< 0) {
1960 DBG("Failed to open %s", module_proc_lttng
);
1965 /* Validate kernel version */
1966 ret
= kernel_validate_version(kernel_tracer_fd
);
1971 ret
= modprobe_lttng_data();
1976 DBG("Kernel tracer fd %d", kernel_tracer_fd
);
1980 modprobe_remove_lttng_control();
1981 ret
= close(kernel_tracer_fd
);
1985 kernel_tracer_fd
= -1;
1986 return LTTNG_ERR_KERN_VERSION
;
1989 ret
= close(kernel_tracer_fd
);
1995 modprobe_remove_lttng_control();
1998 WARN("No kernel tracer available");
1999 kernel_tracer_fd
= -1;
2001 return LTTNG_ERR_NEED_ROOT_SESSIOND
;
2003 return LTTNG_ERR_KERN_NA
;
2009 * Copy consumer output from the tracing session to the domain session. The
2010 * function also applies the right modification on a per domain basis for the
2011 * trace files destination directory.
2013 static int copy_session_consumer(int domain
, struct ltt_session
*session
)
2016 const char *dir_name
;
2017 struct consumer_output
*consumer
;
2020 assert(session
->consumer
);
2023 case LTTNG_DOMAIN_KERNEL
:
2024 DBG3("Copying tracing session consumer output in kernel session");
2026 * XXX: We should audit the session creation and what this function
2027 * does "extra" in order to avoid a destroy since this function is used
2028 * in the domain session creation (kernel and ust) only. Same for UST
2031 if (session
->kernel_session
->consumer
) {
2032 consumer_destroy_output(session
->kernel_session
->consumer
);
2034 session
->kernel_session
->consumer
=
2035 consumer_copy_output(session
->consumer
);
2036 /* Ease our life a bit for the next part */
2037 consumer
= session
->kernel_session
->consumer
;
2038 dir_name
= DEFAULT_KERNEL_TRACE_DIR
;
2040 case LTTNG_DOMAIN_UST
:
2041 DBG3("Copying tracing session consumer output in UST session");
2042 if (session
->ust_session
->consumer
) {
2043 consumer_destroy_output(session
->ust_session
->consumer
);
2045 session
->ust_session
->consumer
=
2046 consumer_copy_output(session
->consumer
);
2047 /* Ease our life a bit for the next part */
2048 consumer
= session
->ust_session
->consumer
;
2049 dir_name
= DEFAULT_UST_TRACE_DIR
;
2052 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
2056 /* Append correct directory to subdir */
2057 strncat(consumer
->subdir
, dir_name
,
2058 sizeof(consumer
->subdir
) - strlen(consumer
->subdir
) - 1);
2059 DBG3("Copy session consumer subdir %s", consumer
->subdir
);
2068 * Create an UST session and add it to the session ust list.
2070 static int create_ust_session(struct ltt_session
*session
,
2071 struct lttng_domain
*domain
)
2074 struct ltt_ust_session
*lus
= NULL
;
2078 assert(session
->consumer
);
2080 switch (domain
->type
) {
2081 case LTTNG_DOMAIN_UST
:
2084 ERR("Unknown UST domain on create session %d", domain
->type
);
2085 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
2089 DBG("Creating UST session");
2091 lus
= trace_ust_create_session(session
->path
, session
->id
, domain
);
2093 ret
= LTTNG_ERR_UST_SESS_FAIL
;
2097 lus
->uid
= session
->uid
;
2098 lus
->gid
= session
->gid
;
2099 session
->ust_session
= lus
;
2101 /* Copy session output to the newly created UST session */
2102 ret
= copy_session_consumer(domain
->type
, session
);
2103 if (ret
!= LTTNG_OK
) {
2111 session
->ust_session
= NULL
;
2116 * Create a kernel tracer session then create the default channel.
2118 static int create_kernel_session(struct ltt_session
*session
)
2122 DBG("Creating kernel session");
2124 ret
= kernel_create_session(session
, kernel_tracer_fd
);
2126 ret
= LTTNG_ERR_KERN_SESS_FAIL
;
2130 /* Code flow safety */
2131 assert(session
->kernel_session
);
2133 /* Copy session output to the newly created Kernel session */
2134 ret
= copy_session_consumer(LTTNG_DOMAIN_KERNEL
, session
);
2135 if (ret
!= LTTNG_OK
) {
2139 /* Create directory(ies) on local filesystem. */
2140 if (session
->kernel_session
->consumer
->type
== CONSUMER_DST_LOCAL
&&
2141 strlen(session
->kernel_session
->consumer
->dst
.trace_path
) > 0) {
2142 ret
= run_as_mkdir_recursive(
2143 session
->kernel_session
->consumer
->dst
.trace_path
,
2144 S_IRWXU
| S_IRWXG
, session
->uid
, session
->gid
);
2146 if (ret
!= -EEXIST
) {
2147 ERR("Trace directory creation error");
2153 session
->kernel_session
->uid
= session
->uid
;
2154 session
->kernel_session
->gid
= session
->gid
;
2159 trace_kernel_destroy_session(session
->kernel_session
);
2160 session
->kernel_session
= NULL
;
2165 * Count number of session permitted by uid/gid.
2167 static unsigned int lttng_sessions_count(uid_t uid
, gid_t gid
)
2170 struct ltt_session
*session
;
2172 DBG("Counting number of available session for UID %d GID %d",
2174 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
2176 * Only list the sessions the user can control.
2178 if (!session_access_ok(session
, uid
, gid
)) {
2187 * Process the command requested by the lttng client within the command
2188 * context structure. This function make sure that the return structure (llm)
2189 * is set and ready for transmission before returning.
2191 * Return any error encountered or 0 for success.
2193 * "sock" is only used for special-case var. len data.
2195 static int process_client_msg(struct command_ctx
*cmd_ctx
, int sock
,
2199 int need_tracing_session
= 1;
2202 DBG("Processing client command %d", cmd_ctx
->lsm
->cmd_type
);
2206 switch (cmd_ctx
->lsm
->cmd_type
) {
2207 case LTTNG_CREATE_SESSION
:
2208 case LTTNG_DESTROY_SESSION
:
2209 case LTTNG_LIST_SESSIONS
:
2210 case LTTNG_LIST_DOMAINS
:
2211 case LTTNG_START_TRACE
:
2212 case LTTNG_STOP_TRACE
:
2213 case LTTNG_DATA_PENDING
:
2220 if (opt_no_kernel
&& need_domain
2221 && cmd_ctx
->lsm
->domain
.type
== LTTNG_DOMAIN_KERNEL
) {
2223 ret
= LTTNG_ERR_NEED_ROOT_SESSIOND
;
2225 ret
= LTTNG_ERR_KERN_NA
;
2230 /* Deny register consumer if we already have a spawned consumer. */
2231 if (cmd_ctx
->lsm
->cmd_type
== LTTNG_REGISTER_CONSUMER
) {
2232 pthread_mutex_lock(&kconsumer_data
.pid_mutex
);
2233 if (kconsumer_data
.pid
> 0) {
2234 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
2235 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2238 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2242 * Check for command that don't needs to allocate a returned payload. We do
2243 * this here so we don't have to make the call for no payload at each
2246 switch(cmd_ctx
->lsm
->cmd_type
) {
2247 case LTTNG_LIST_SESSIONS
:
2248 case LTTNG_LIST_TRACEPOINTS
:
2249 case LTTNG_LIST_TRACEPOINT_FIELDS
:
2250 case LTTNG_LIST_DOMAINS
:
2251 case LTTNG_LIST_CHANNELS
:
2252 case LTTNG_LIST_EVENTS
:
2255 /* Setup lttng message with no payload */
2256 ret
= setup_lttng_msg(cmd_ctx
, 0);
2258 /* This label does not try to unlock the session */
2259 goto init_setup_error
;
2263 /* Commands that DO NOT need a session. */
2264 switch (cmd_ctx
->lsm
->cmd_type
) {
2265 case LTTNG_CREATE_SESSION
:
2266 case LTTNG_CALIBRATE
:
2267 case LTTNG_LIST_SESSIONS
:
2268 case LTTNG_LIST_TRACEPOINTS
:
2269 case LTTNG_LIST_TRACEPOINT_FIELDS
:
2270 need_tracing_session
= 0;
2273 DBG("Getting session %s by name", cmd_ctx
->lsm
->session
.name
);
2275 * We keep the session list lock across _all_ commands
2276 * for now, because the per-session lock does not
2277 * handle teardown properly.
2279 session_lock_list();
2280 cmd_ctx
->session
= session_find_by_name(cmd_ctx
->lsm
->session
.name
);
2281 if (cmd_ctx
->session
== NULL
) {
2282 if (cmd_ctx
->lsm
->session
.name
!= NULL
) {
2283 ret
= LTTNG_ERR_SESS_NOT_FOUND
;
2285 /* If no session name specified */
2286 ret
= LTTNG_ERR_SELECT_SESS
;
2290 /* Acquire lock for the session */
2291 session_lock(cmd_ctx
->session
);
2301 * Check domain type for specific "pre-action".
2303 switch (cmd_ctx
->lsm
->domain
.type
) {
2304 case LTTNG_DOMAIN_KERNEL
:
2306 ret
= LTTNG_ERR_NEED_ROOT_SESSIOND
;
2310 /* Kernel tracer check */
2311 if (kernel_tracer_fd
== -1) {
2312 /* Basically, load kernel tracer modules */
2313 ret
= init_kernel_tracer();
2319 /* Consumer is in an ERROR state. Report back to client */
2320 if (uatomic_read(&kernel_consumerd_state
) == CONSUMER_ERROR
) {
2321 ret
= LTTNG_ERR_NO_KERNCONSUMERD
;
2325 /* Need a session for kernel command */
2326 if (need_tracing_session
) {
2327 if (cmd_ctx
->session
->kernel_session
== NULL
) {
2328 ret
= create_kernel_session(cmd_ctx
->session
);
2330 ret
= LTTNG_ERR_KERN_SESS_FAIL
;
2335 /* Start the kernel consumer daemon */
2336 pthread_mutex_lock(&kconsumer_data
.pid_mutex
);
2337 if (kconsumer_data
.pid
== 0 &&
2338 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
&&
2339 cmd_ctx
->session
->start_consumer
) {
2340 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2341 ret
= start_consumerd(&kconsumer_data
);
2343 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
2346 uatomic_set(&kernel_consumerd_state
, CONSUMER_STARTED
);
2348 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2352 * The consumer was just spawned so we need to add the socket to
2353 * the consumer output of the session if exist.
2355 ret
= consumer_create_socket(&kconsumer_data
,
2356 cmd_ctx
->session
->kernel_session
->consumer
);
2363 case LTTNG_DOMAIN_UST
:
2365 /* Consumer is in an ERROR state. Report back to client */
2366 if (uatomic_read(&ust_consumerd_state
) == CONSUMER_ERROR
) {
2367 ret
= LTTNG_ERR_NO_USTCONSUMERD
;
2371 if (need_tracing_session
) {
2372 /* Create UST session if none exist. */
2373 if (cmd_ctx
->session
->ust_session
== NULL
) {
2374 ret
= create_ust_session(cmd_ctx
->session
,
2375 &cmd_ctx
->lsm
->domain
);
2376 if (ret
!= LTTNG_OK
) {
2381 /* Start the UST consumer daemons */
2383 pthread_mutex_lock(&ustconsumer64_data
.pid_mutex
);
2384 if (consumerd64_bin
[0] != '\0' &&
2385 ustconsumer64_data
.pid
== 0 &&
2386 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
&&
2387 cmd_ctx
->session
->start_consumer
) {
2388 pthread_mutex_unlock(&ustconsumer64_data
.pid_mutex
);
2389 ret
= start_consumerd(&ustconsumer64_data
);
2391 ret
= LTTNG_ERR_UST_CONSUMER64_FAIL
;
2392 uatomic_set(&ust_consumerd64_fd
, -EINVAL
);
2396 uatomic_set(&ust_consumerd64_fd
, ustconsumer64_data
.cmd_sock
);
2397 uatomic_set(&ust_consumerd_state
, CONSUMER_STARTED
);
2399 pthread_mutex_unlock(&ustconsumer64_data
.pid_mutex
);
2403 * Setup socket for consumer 64 bit. No need for atomic access
2404 * since it was set above and can ONLY be set in this thread.
2406 ret
= consumer_create_socket(&ustconsumer64_data
,
2407 cmd_ctx
->session
->ust_session
->consumer
);
2413 if (consumerd32_bin
[0] != '\0' &&
2414 ustconsumer32_data
.pid
== 0 &&
2415 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
&&
2416 cmd_ctx
->session
->start_consumer
) {
2417 pthread_mutex_unlock(&ustconsumer32_data
.pid_mutex
);
2418 ret
= start_consumerd(&ustconsumer32_data
);
2420 ret
= LTTNG_ERR_UST_CONSUMER32_FAIL
;
2421 uatomic_set(&ust_consumerd32_fd
, -EINVAL
);
2425 uatomic_set(&ust_consumerd32_fd
, ustconsumer32_data
.cmd_sock
);
2426 uatomic_set(&ust_consumerd_state
, CONSUMER_STARTED
);
2428 pthread_mutex_unlock(&ustconsumer32_data
.pid_mutex
);
2432 * Setup socket for consumer 64 bit. No need for atomic access
2433 * since it was set above and can ONLY be set in this thread.
2435 ret
= consumer_create_socket(&ustconsumer32_data
,
2436 cmd_ctx
->session
->ust_session
->consumer
);
2448 /* Validate consumer daemon state when start/stop trace command */
2449 if (cmd_ctx
->lsm
->cmd_type
== LTTNG_START_TRACE
||
2450 cmd_ctx
->lsm
->cmd_type
== LTTNG_STOP_TRACE
) {
2451 switch (cmd_ctx
->lsm
->domain
.type
) {
2452 case LTTNG_DOMAIN_UST
:
2453 if (uatomic_read(&ust_consumerd_state
) != CONSUMER_STARTED
) {
2454 ret
= LTTNG_ERR_NO_USTCONSUMERD
;
2458 case LTTNG_DOMAIN_KERNEL
:
2459 if (uatomic_read(&kernel_consumerd_state
) != CONSUMER_STARTED
) {
2460 ret
= LTTNG_ERR_NO_KERNCONSUMERD
;
2468 * Check that the UID or GID match that of the tracing session.
2469 * The root user can interact with all sessions.
2471 if (need_tracing_session
) {
2472 if (!session_access_ok(cmd_ctx
->session
,
2473 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
2474 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
))) {
2475 ret
= LTTNG_ERR_EPERM
;
2480 /* Process by command type */
2481 switch (cmd_ctx
->lsm
->cmd_type
) {
2482 case LTTNG_ADD_CONTEXT
:
2484 ret
= cmd_add_context(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2485 cmd_ctx
->lsm
->u
.context
.channel_name
,
2486 &cmd_ctx
->lsm
->u
.context
.ctx
, kernel_poll_pipe
[1]);
2489 case LTTNG_DISABLE_CHANNEL
:
2491 ret
= cmd_disable_channel(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2492 cmd_ctx
->lsm
->u
.disable
.channel_name
);
2495 case LTTNG_DISABLE_EVENT
:
2497 ret
= cmd_disable_event(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2498 cmd_ctx
->lsm
->u
.disable
.channel_name
,
2499 cmd_ctx
->lsm
->u
.disable
.name
);
2502 case LTTNG_DISABLE_ALL_EVENT
:
2504 DBG("Disabling all events");
2506 ret
= cmd_disable_event_all(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2507 cmd_ctx
->lsm
->u
.disable
.channel_name
);
2510 case LTTNG_DISABLE_CONSUMER
:
2512 ret
= cmd_disable_consumer(cmd_ctx
->lsm
->domain
.type
, cmd_ctx
->session
);
2515 case LTTNG_ENABLE_CHANNEL
:
2517 ret
= cmd_enable_channel(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2518 &cmd_ctx
->lsm
->u
.channel
.chan
, kernel_poll_pipe
[1]);
2521 case LTTNG_ENABLE_CONSUMER
:
2524 * XXX: 0 means that this URI should be applied on the session. Should
2525 * be a DOMAIN enuam.
2527 ret
= cmd_enable_consumer(cmd_ctx
->lsm
->domain
.type
, cmd_ctx
->session
);
2528 if (ret
!= LTTNG_OK
) {
2532 if (cmd_ctx
->lsm
->domain
.type
== 0) {
2533 /* Add the URI for the UST session if a consumer is present. */
2534 if (cmd_ctx
->session
->ust_session
&&
2535 cmd_ctx
->session
->ust_session
->consumer
) {
2536 ret
= cmd_enable_consumer(LTTNG_DOMAIN_UST
, cmd_ctx
->session
);
2537 } else if (cmd_ctx
->session
->kernel_session
&&
2538 cmd_ctx
->session
->kernel_session
->consumer
) {
2539 ret
= cmd_enable_consumer(LTTNG_DOMAIN_KERNEL
,
2545 case LTTNG_ENABLE_EVENT
:
2547 ret
= cmd_enable_event(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2548 cmd_ctx
->lsm
->u
.enable
.channel_name
,
2549 &cmd_ctx
->lsm
->u
.enable
.event
, NULL
, kernel_poll_pipe
[1]);
2552 case LTTNG_ENABLE_ALL_EVENT
:
2554 DBG("Enabling all events");
2556 ret
= cmd_enable_event_all(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2557 cmd_ctx
->lsm
->u
.enable
.channel_name
,
2558 cmd_ctx
->lsm
->u
.enable
.event
.type
, NULL
, kernel_poll_pipe
[1]);
2561 case LTTNG_LIST_TRACEPOINTS
:
2563 struct lttng_event
*events
;
2566 nb_events
= cmd_list_tracepoints(cmd_ctx
->lsm
->domain
.type
, &events
);
2567 if (nb_events
< 0) {
2568 /* Return value is a negative lttng_error_code. */
2574 * Setup lttng message with payload size set to the event list size in
2575 * bytes and then copy list into the llm payload.
2577 ret
= setup_lttng_msg(cmd_ctx
, sizeof(struct lttng_event
) * nb_events
);
2583 /* Copy event list into message payload */
2584 memcpy(cmd_ctx
->llm
->payload
, events
,
2585 sizeof(struct lttng_event
) * nb_events
);
2592 case LTTNG_LIST_TRACEPOINT_FIELDS
:
2594 struct lttng_event_field
*fields
;
2597 nb_fields
= cmd_list_tracepoint_fields(cmd_ctx
->lsm
->domain
.type
,
2599 if (nb_fields
< 0) {
2600 /* Return value is a negative lttng_error_code. */
2606 * Setup lttng message with payload size set to the event list size in
2607 * bytes and then copy list into the llm payload.
2609 ret
= setup_lttng_msg(cmd_ctx
,
2610 sizeof(struct lttng_event_field
) * nb_fields
);
2616 /* Copy event list into message payload */
2617 memcpy(cmd_ctx
->llm
->payload
, fields
,
2618 sizeof(struct lttng_event_field
) * nb_fields
);
2625 case LTTNG_SET_CONSUMER_URI
:
2628 struct lttng_uri
*uris
;
2630 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
2631 len
= nb_uri
* sizeof(struct lttng_uri
);
2634 ret
= LTTNG_ERR_INVALID
;
2638 uris
= zmalloc(len
);
2640 ret
= LTTNG_ERR_FATAL
;
2644 /* Receive variable len data */
2645 DBG("Receiving %zu URI(s) from client ...", nb_uri
);
2646 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
2648 DBG("No URIs received from client... continuing");
2650 ret
= LTTNG_ERR_SESSION_FAIL
;
2655 ret
= cmd_set_consumer_uri(cmd_ctx
->lsm
->domain
.type
, cmd_ctx
->session
,
2657 if (ret
!= LTTNG_OK
) {
2663 * XXX: 0 means that this URI should be applied on the session. Should
2664 * be a DOMAIN enuam.
2666 if (cmd_ctx
->lsm
->domain
.type
== 0) {
2667 /* Add the URI for the UST session if a consumer is present. */
2668 if (cmd_ctx
->session
->ust_session
&&
2669 cmd_ctx
->session
->ust_session
->consumer
) {
2670 ret
= cmd_set_consumer_uri(LTTNG_DOMAIN_UST
, cmd_ctx
->session
,
2672 } else if (cmd_ctx
->session
->kernel_session
&&
2673 cmd_ctx
->session
->kernel_session
->consumer
) {
2674 ret
= cmd_set_consumer_uri(LTTNG_DOMAIN_KERNEL
,
2675 cmd_ctx
->session
, nb_uri
, uris
);
2683 case LTTNG_START_TRACE
:
2685 ret
= cmd_start_trace(cmd_ctx
->session
);
2688 case LTTNG_STOP_TRACE
:
2690 ret
= cmd_stop_trace(cmd_ctx
->session
);
2693 case LTTNG_CREATE_SESSION
:
2696 struct lttng_uri
*uris
= NULL
;
2698 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
2699 len
= nb_uri
* sizeof(struct lttng_uri
);
2702 uris
= zmalloc(len
);
2704 ret
= LTTNG_ERR_FATAL
;
2708 /* Receive variable len data */
2709 DBG("Waiting for %zu URIs from client ...", nb_uri
);
2710 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
2712 DBG("No URIs received from client... continuing");
2714 ret
= LTTNG_ERR_SESSION_FAIL
;
2719 if (nb_uri
== 1 && uris
[0].dtype
!= LTTNG_DST_PATH
) {
2720 DBG("Creating session with ONE network URI is a bad call");
2721 ret
= LTTNG_ERR_SESSION_FAIL
;
2727 ret
= cmd_create_session_uri(cmd_ctx
->lsm
->session
.name
, uris
, nb_uri
,
2734 case LTTNG_DESTROY_SESSION
:
2736 ret
= cmd_destroy_session(cmd_ctx
->session
, kernel_poll_pipe
[1]);
2738 /* Set session to NULL so we do not unlock it after free. */
2739 cmd_ctx
->session
= NULL
;
2742 case LTTNG_LIST_DOMAINS
:
2745 struct lttng_domain
*domains
;
2747 nb_dom
= cmd_list_domains(cmd_ctx
->session
, &domains
);
2749 /* Return value is a negative lttng_error_code. */
2754 ret
= setup_lttng_msg(cmd_ctx
, nb_dom
* sizeof(struct lttng_domain
));
2759 /* Copy event list into message payload */
2760 memcpy(cmd_ctx
->llm
->payload
, domains
,
2761 nb_dom
* sizeof(struct lttng_domain
));
2768 case LTTNG_LIST_CHANNELS
:
2771 struct lttng_channel
*channels
;
2773 nb_chan
= cmd_list_channels(cmd_ctx
->lsm
->domain
.type
,
2774 cmd_ctx
->session
, &channels
);
2776 /* Return value is a negative lttng_error_code. */
2781 ret
= setup_lttng_msg(cmd_ctx
, nb_chan
* sizeof(struct lttng_channel
));
2786 /* Copy event list into message payload */
2787 memcpy(cmd_ctx
->llm
->payload
, channels
,
2788 nb_chan
* sizeof(struct lttng_channel
));
2795 case LTTNG_LIST_EVENTS
:
2798 struct lttng_event
*events
= NULL
;
2800 nb_event
= cmd_list_events(cmd_ctx
->lsm
->domain
.type
, cmd_ctx
->session
,
2801 cmd_ctx
->lsm
->u
.list
.channel_name
, &events
);
2803 /* Return value is a negative lttng_error_code. */
2808 ret
= setup_lttng_msg(cmd_ctx
, nb_event
* sizeof(struct lttng_event
));
2813 /* Copy event list into message payload */
2814 memcpy(cmd_ctx
->llm
->payload
, events
,
2815 nb_event
* sizeof(struct lttng_event
));
2822 case LTTNG_LIST_SESSIONS
:
2824 unsigned int nr_sessions
;
2826 session_lock_list();
2827 nr_sessions
= lttng_sessions_count(
2828 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
2829 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
));
2831 ret
= setup_lttng_msg(cmd_ctx
, sizeof(struct lttng_session
) * nr_sessions
);
2833 session_unlock_list();
2837 /* Filled the session array */
2838 cmd_list_lttng_sessions((struct lttng_session
*)(cmd_ctx
->llm
->payload
),
2839 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
2840 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
));
2842 session_unlock_list();
2847 case LTTNG_CALIBRATE
:
2849 ret
= cmd_calibrate(cmd_ctx
->lsm
->domain
.type
,
2850 &cmd_ctx
->lsm
->u
.calibrate
);
2853 case LTTNG_REGISTER_CONSUMER
:
2855 struct consumer_data
*cdata
;
2857 switch (cmd_ctx
->lsm
->domain
.type
) {
2858 case LTTNG_DOMAIN_KERNEL
:
2859 cdata
= &kconsumer_data
;
2862 ret
= LTTNG_ERR_UND
;
2866 ret
= cmd_register_consumer(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2867 cmd_ctx
->lsm
->u
.reg
.path
, cdata
);
2870 case LTTNG_ENABLE_EVENT_WITH_FILTER
:
2872 struct lttng_filter_bytecode
*bytecode
;
2874 if (cmd_ctx
->lsm
->u
.enable
.bytecode_len
> LTTNG_FILTER_MAX_LEN
) {
2875 ret
= LTTNG_ERR_FILTER_INVAL
;
2878 if (cmd_ctx
->lsm
->u
.enable
.bytecode_len
== 0) {
2879 ret
= LTTNG_ERR_FILTER_INVAL
;
2882 bytecode
= zmalloc(cmd_ctx
->lsm
->u
.enable
.bytecode_len
);
2884 ret
= LTTNG_ERR_FILTER_NOMEM
;
2887 /* Receive var. len. data */
2888 DBG("Receiving var len data from client ...");
2889 ret
= lttcomm_recv_unix_sock(sock
, bytecode
,
2890 cmd_ctx
->lsm
->u
.enable
.bytecode_len
);
2892 DBG("Nothing recv() from client var len data... continuing");
2894 ret
= LTTNG_ERR_FILTER_INVAL
;
2898 if (bytecode
->len
+ sizeof(*bytecode
)
2899 != cmd_ctx
->lsm
->u
.enable
.bytecode_len
) {
2901 ret
= LTTNG_ERR_FILTER_INVAL
;
2905 ret
= cmd_enable_event(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2906 cmd_ctx
->lsm
->u
.enable
.channel_name
,
2907 &cmd_ctx
->lsm
->u
.enable
.event
, bytecode
, kernel_poll_pipe
[1]);
2910 case LTTNG_DATA_PENDING
:
2912 ret
= cmd_data_pending(cmd_ctx
->session
);
2916 ret
= LTTNG_ERR_UND
;
2921 if (cmd_ctx
->llm
== NULL
) {
2922 DBG("Missing llm structure. Allocating one.");
2923 if (setup_lttng_msg(cmd_ctx
, 0) < 0) {
2927 /* Set return code */
2928 cmd_ctx
->llm
->ret_code
= ret
;
2930 if (cmd_ctx
->session
) {
2931 session_unlock(cmd_ctx
->session
);
2933 if (need_tracing_session
) {
2934 session_unlock_list();
2941 * Thread managing health check socket.
2943 static void *thread_manage_health(void *data
)
2945 int sock
= -1, new_sock
= -1, ret
, i
, pollfd
, err
= -1;
2946 uint32_t revents
, nb_fd
;
2947 struct lttng_poll_event events
;
2948 struct lttcomm_health_msg msg
;
2949 struct lttcomm_health_data reply
;
2951 DBG("[thread] Manage health check started");
2953 rcu_register_thread();
2955 /* Create unix socket */
2956 sock
= lttcomm_create_unix_sock(health_unix_sock_path
);
2958 ERR("Unable to create health check Unix socket");
2964 * Set the CLOEXEC flag. Return code is useless because either way, the
2967 (void) utils_set_fd_cloexec(sock
);
2969 ret
= lttcomm_listen_unix_sock(sock
);
2975 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
2976 * more will be added to this poll set.
2978 ret
= create_thread_poll_set(&events
, 2);
2983 /* Add the application registration socket */
2984 ret
= lttng_poll_add(&events
, sock
, LPOLLIN
| LPOLLPRI
);
2990 DBG("Health check ready");
2992 /* Inifinite blocking call, waiting for transmission */
2994 ret
= lttng_poll_wait(&events
, -1);
2997 * Restart interrupted system call.
2999 if (errno
== EINTR
) {
3007 for (i
= 0; i
< nb_fd
; i
++) {
3008 /* Fetch once the poll data */
3009 revents
= LTTNG_POLL_GETEV(&events
, i
);
3010 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
3012 /* Thread quit pipe has been closed. Killing thread. */
3013 ret
= check_thread_quit_pipe(pollfd
, revents
);
3019 /* Event on the registration socket */
3020 if (pollfd
== sock
) {
3021 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
3022 ERR("Health socket poll error");
3028 new_sock
= lttcomm_accept_unix_sock(sock
);
3034 * Set the CLOEXEC flag. Return code is useless because either way, the
3037 (void) utils_set_fd_cloexec(new_sock
);
3039 DBG("Receiving data from client for health...");
3040 ret
= lttcomm_recv_unix_sock(new_sock
, (void *)&msg
, sizeof(msg
));
3042 DBG("Nothing recv() from client... continuing");
3043 ret
= close(new_sock
);
3051 rcu_thread_online();
3053 switch (msg
.component
) {
3054 case LTTNG_HEALTH_CMD
:
3055 reply
.ret_code
= health_check_state(&health_thread_cmd
);
3057 case LTTNG_HEALTH_APP_MANAGE
:
3058 reply
.ret_code
= health_check_state(&health_thread_app_manage
);
3060 case LTTNG_HEALTH_APP_REG
:
3061 reply
.ret_code
= health_check_state(&health_thread_app_reg
);
3063 case LTTNG_HEALTH_KERNEL
:
3064 reply
.ret_code
= health_check_state(&health_thread_kernel
);
3066 case LTTNG_HEALTH_CONSUMER
:
3067 reply
.ret_code
= check_consumer_health();
3069 case LTTNG_HEALTH_ALL
:
3071 health_check_state(&health_thread_app_manage
) &&
3072 health_check_state(&health_thread_app_reg
) &&
3073 health_check_state(&health_thread_cmd
) &&
3074 health_check_state(&health_thread_kernel
) &&
3075 check_consumer_health();
3078 reply
.ret_code
= LTTNG_ERR_UND
;
3083 * Flip ret value since 0 is a success and 1 indicates a bad health for
3084 * the client where in the sessiond it is the opposite. Again, this is
3085 * just to make things easier for us poor developer which enjoy a lot
3088 if (reply
.ret_code
== 0 || reply
.ret_code
== 1) {
3089 reply
.ret_code
= !reply
.ret_code
;
3092 DBG2("Health check return value %d", reply
.ret_code
);
3094 ret
= send_unix_sock(new_sock
, (void *) &reply
, sizeof(reply
));
3096 ERR("Failed to send health data back to client");
3099 /* End of transmission */
3100 ret
= close(new_sock
);
3110 ERR("Health error occurred in %s", __func__
);
3112 DBG("Health check thread dying");
3113 unlink(health_unix_sock_path
);
3120 if (new_sock
>= 0) {
3121 ret
= close(new_sock
);
3127 lttng_poll_clean(&events
);
3129 rcu_unregister_thread();
3134 * This thread manage all clients request using the unix client socket for
3137 static void *thread_manage_clients(void *data
)
3139 int sock
= -1, ret
, i
, pollfd
, err
= -1;
3141 uint32_t revents
, nb_fd
;
3142 struct command_ctx
*cmd_ctx
= NULL
;
3143 struct lttng_poll_event events
;
3145 DBG("[thread] Manage client started");
3147 rcu_register_thread();
3149 if (testpoint(thread_manage_clients
)) {
3150 goto error_testpoint
;
3153 health_code_update(&health_thread_cmd
);
3155 ret
= lttcomm_listen_unix_sock(client_sock
);
3161 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
3162 * more will be added to this poll set.
3164 ret
= create_thread_poll_set(&events
, 2);
3166 goto error_create_poll
;
3169 /* Add the application registration socket */
3170 ret
= lttng_poll_add(&events
, client_sock
, LPOLLIN
| LPOLLPRI
);
3176 * Notify parent pid that we are ready to accept command for client side.
3178 if (opt_sig_parent
) {
3179 kill(ppid
, SIGUSR1
);
3182 if (testpoint(thread_manage_clients_before_loop
)) {
3186 health_code_update(&health_thread_cmd
);
3189 DBG("Accepting client command ...");
3191 /* Inifinite blocking call, waiting for transmission */
3193 health_poll_update(&health_thread_cmd
);
3194 ret
= lttng_poll_wait(&events
, -1);
3195 health_poll_update(&health_thread_cmd
);
3198 * Restart interrupted system call.
3200 if (errno
== EINTR
) {
3208 for (i
= 0; i
< nb_fd
; i
++) {
3209 /* Fetch once the poll data */
3210 revents
= LTTNG_POLL_GETEV(&events
, i
);
3211 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
3213 health_code_update(&health_thread_cmd
);
3215 /* Thread quit pipe has been closed. Killing thread. */
3216 ret
= check_thread_quit_pipe(pollfd
, revents
);
3222 /* Event on the registration socket */
3223 if (pollfd
== client_sock
) {
3224 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
3225 ERR("Client socket poll error");
3231 DBG("Wait for client response");
3233 health_code_update(&health_thread_cmd
);
3235 sock
= lttcomm_accept_unix_sock(client_sock
);
3241 * Set the CLOEXEC flag. Return code is useless because either way, the
3244 (void) utils_set_fd_cloexec(sock
);
3246 /* Set socket option for credentials retrieval */
3247 ret
= lttcomm_setsockopt_creds_unix_sock(sock
);
3252 /* Allocate context command to process the client request */
3253 cmd_ctx
= zmalloc(sizeof(struct command_ctx
));
3254 if (cmd_ctx
== NULL
) {
3255 PERROR("zmalloc cmd_ctx");
3259 /* Allocate data buffer for reception */
3260 cmd_ctx
->lsm
= zmalloc(sizeof(struct lttcomm_session_msg
));
3261 if (cmd_ctx
->lsm
== NULL
) {
3262 PERROR("zmalloc cmd_ctx->lsm");
3266 cmd_ctx
->llm
= NULL
;
3267 cmd_ctx
->session
= NULL
;
3269 health_code_update(&health_thread_cmd
);
3272 * Data is received from the lttng client. The struct
3273 * lttcomm_session_msg (lsm) contains the command and data request of
3276 DBG("Receiving data from client ...");
3277 ret
= lttcomm_recv_creds_unix_sock(sock
, cmd_ctx
->lsm
,
3278 sizeof(struct lttcomm_session_msg
), &cmd_ctx
->creds
);
3280 DBG("Nothing recv() from client... continuing");
3286 clean_command_ctx(&cmd_ctx
);
3290 health_code_update(&health_thread_cmd
);
3292 // TODO: Validate cmd_ctx including sanity check for
3293 // security purpose.
3295 rcu_thread_online();
3297 * This function dispatch the work to the kernel or userspace tracer
3298 * libs and fill the lttcomm_lttng_msg data structure of all the needed
3299 * informations for the client. The command context struct contains
3300 * everything this function may needs.
3302 ret
= process_client_msg(cmd_ctx
, sock
, &sock_error
);
3303 rcu_thread_offline();
3313 * TODO: Inform client somehow of the fatal error. At
3314 * this point, ret < 0 means that a zmalloc failed
3315 * (ENOMEM). Error detected but still accept
3316 * command, unless a socket error has been
3319 clean_command_ctx(&cmd_ctx
);
3323 health_code_update(&health_thread_cmd
);
3325 DBG("Sending response (size: %d, retcode: %s)",
3326 cmd_ctx
->lttng_msg_size
,
3327 lttng_strerror(-cmd_ctx
->llm
->ret_code
));
3328 ret
= send_unix_sock(sock
, cmd_ctx
->llm
, cmd_ctx
->lttng_msg_size
);
3330 ERR("Failed to send data back to client");
3333 /* End of transmission */
3340 clean_command_ctx(&cmd_ctx
);
3342 health_code_update(&health_thread_cmd
);
3354 lttng_poll_clean(&events
);
3355 clean_command_ctx(&cmd_ctx
);
3360 unlink(client_unix_sock_path
);
3361 if (client_sock
>= 0) {
3362 ret
= close(client_sock
);
3369 health_error(&health_thread_cmd
);
3370 ERR("Health error occurred in %s", __func__
);
3373 health_exit(&health_thread_cmd
);
3375 DBG("Client thread dying");
3377 rcu_unregister_thread();
3383 * usage function on stderr
3385 static void usage(void)
3387 fprintf(stderr
, "Usage: %s OPTIONS\n\nOptions:\n", progname
);
3388 fprintf(stderr
, " -h, --help Display this usage.\n");
3389 fprintf(stderr
, " -c, --client-sock PATH Specify path for the client unix socket\n");
3390 fprintf(stderr
, " -a, --apps-sock PATH Specify path for apps unix socket\n");
3391 fprintf(stderr
, " --kconsumerd-err-sock PATH Specify path for the kernel consumer error socket\n");
3392 fprintf(stderr
, " --kconsumerd-cmd-sock PATH Specify path for the kernel consumer command socket\n");
3393 fprintf(stderr
, " --ustconsumerd32-err-sock PATH Specify path for the 32-bit UST consumer error socket\n");
3394 fprintf(stderr
, " --ustconsumerd64-err-sock PATH Specify path for the 64-bit UST consumer error socket\n");
3395 fprintf(stderr
, " --ustconsumerd32-cmd-sock PATH Specify path for the 32-bit UST consumer command socket\n");
3396 fprintf(stderr
, " --ustconsumerd64-cmd-sock PATH Specify path for the 64-bit UST consumer command socket\n");
3397 fprintf(stderr
, " --consumerd32-path PATH Specify path for the 32-bit UST consumer daemon binary\n");
3398 fprintf(stderr
, " --consumerd32-libdir PATH Specify path for the 32-bit UST consumer daemon libraries\n");
3399 fprintf(stderr
, " --consumerd64-path PATH Specify path for the 64-bit UST consumer daemon binary\n");
3400 fprintf(stderr
, " --consumerd64-libdir PATH Specify path for the 64-bit UST consumer daemon libraries\n");
3401 fprintf(stderr
, " -d, --daemonize Start as a daemon.\n");
3402 fprintf(stderr
, " -g, --group NAME Specify the tracing group name. (default: tracing)\n");
3403 fprintf(stderr
, " -V, --version Show version number.\n");
3404 fprintf(stderr
, " -S, --sig-parent Send SIGCHLD to parent pid to notify readiness.\n");
3405 fprintf(stderr
, " -q, --quiet No output at all.\n");
3406 fprintf(stderr
, " -v, --verbose Verbose mode. Activate DBG() macro.\n");
3407 fprintf(stderr
, " --verbose-consumer Verbose mode for consumer. Activate DBG() macro.\n");
3408 fprintf(stderr
, " --no-kernel Disable kernel tracer\n");
3412 * daemon argument parsing
3414 static int parse_args(int argc
, char **argv
)
3418 static struct option long_options
[] = {
3419 { "client-sock", 1, 0, 'c' },
3420 { "apps-sock", 1, 0, 'a' },
3421 { "kconsumerd-cmd-sock", 1, 0, 'C' },
3422 { "kconsumerd-err-sock", 1, 0, 'E' },
3423 { "ustconsumerd32-cmd-sock", 1, 0, 'G' },
3424 { "ustconsumerd32-err-sock", 1, 0, 'H' },
3425 { "ustconsumerd64-cmd-sock", 1, 0, 'D' },
3426 { "ustconsumerd64-err-sock", 1, 0, 'F' },
3427 { "consumerd32-path", 1, 0, 'u' },
3428 { "consumerd32-libdir", 1, 0, 'U' },
3429 { "consumerd64-path", 1, 0, 't' },
3430 { "consumerd64-libdir", 1, 0, 'T' },
3431 { "daemonize", 0, 0, 'd' },
3432 { "sig-parent", 0, 0, 'S' },
3433 { "help", 0, 0, 'h' },
3434 { "group", 1, 0, 'g' },
3435 { "version", 0, 0, 'V' },
3436 { "quiet", 0, 0, 'q' },
3437 { "verbose", 0, 0, 'v' },
3438 { "verbose-consumer", 0, 0, 'Z' },
3439 { "no-kernel", 0, 0, 'N' },
3444 int option_index
= 0;
3445 c
= getopt_long(argc
, argv
, "dhqvVSN" "a:c:g:s:C:E:D:F:Z:u:t",
3446 long_options
, &option_index
);
3453 fprintf(stderr
, "option %s", long_options
[option_index
].name
);
3455 fprintf(stderr
, " with arg %s\n", optarg
);
3459 snprintf(client_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3462 snprintf(apps_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3468 opt_tracing_group
= optarg
;
3474 fprintf(stdout
, "%s\n", VERSION
);
3480 snprintf(kconsumer_data
.err_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3483 snprintf(kconsumer_data
.cmd_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3486 snprintf(ustconsumer64_data
.err_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3489 snprintf(ustconsumer64_data
.cmd_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3492 snprintf(ustconsumer32_data
.err_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3495 snprintf(ustconsumer32_data
.cmd_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3501 lttng_opt_quiet
= 1;
3504 /* Verbose level can increase using multiple -v */
3505 lttng_opt_verbose
+= 1;
3508 opt_verbose_consumer
+= 1;
3511 consumerd32_bin
= optarg
;
3514 consumerd32_libdir
= optarg
;
3517 consumerd64_bin
= optarg
;
3520 consumerd64_libdir
= optarg
;
3523 /* Unknown option or other error.
3524 * Error is printed by getopt, just return */
3533 * Creates the two needed socket by the daemon.
3534 * apps_sock - The communication socket for all UST apps.
3535 * client_sock - The communication of the cli tool (lttng).
3537 static int init_daemon_socket(void)
3542 old_umask
= umask(0);
3544 /* Create client tool unix socket */
3545 client_sock
= lttcomm_create_unix_sock(client_unix_sock_path
);
3546 if (client_sock
< 0) {
3547 ERR("Create unix sock failed: %s", client_unix_sock_path
);
3552 /* Set the cloexec flag */
3553 ret
= utils_set_fd_cloexec(client_sock
);
3555 ERR("Unable to set CLOEXEC flag to the client Unix socket (fd: %d). "
3556 "Continuing but note that the consumer daemon will have a "
3557 "reference to this socket on exec()", client_sock
);
3560 /* File permission MUST be 660 */
3561 ret
= chmod(client_unix_sock_path
, S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
3563 ERR("Set file permissions failed: %s", client_unix_sock_path
);
3568 /* Create the application unix socket */
3569 apps_sock
= lttcomm_create_unix_sock(apps_unix_sock_path
);
3570 if (apps_sock
< 0) {
3571 ERR("Create unix sock failed: %s", apps_unix_sock_path
);
3576 /* Set the cloexec flag */
3577 ret
= utils_set_fd_cloexec(apps_sock
);
3579 ERR("Unable to set CLOEXEC flag to the app Unix socket (fd: %d). "
3580 "Continuing but note that the consumer daemon will have a "
3581 "reference to this socket on exec()", apps_sock
);
3584 /* File permission MUST be 666 */
3585 ret
= chmod(apps_unix_sock_path
,
3586 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
| S_IROTH
| S_IWOTH
);
3588 ERR("Set file permissions failed: %s", apps_unix_sock_path
);
3593 DBG3("Session daemon client socket %d and application socket %d created",
3594 client_sock
, apps_sock
);
3602 * Check if the global socket is available, and if a daemon is answering at the
3603 * other side. If yes, error is returned.
3605 static int check_existing_daemon(void)
3607 /* Is there anybody out there ? */
3608 if (lttng_session_daemon_alive()) {
3616 * Set the tracing group gid onto the client socket.
3618 * Race window between mkdir and chown is OK because we are going from more
3619 * permissive (root.root) to less permissive (root.tracing).
3621 static int set_permissions(char *rundir
)
3626 ret
= allowed_group();
3628 WARN("No tracing group detected");
3635 /* Set lttng run dir */
3636 ret
= chown(rundir
, 0, gid
);
3638 ERR("Unable to set group on %s", rundir
);
3642 /* Ensure tracing group can search the run dir */
3643 ret
= chmod(rundir
, S_IRWXU
| S_IXGRP
| S_IXOTH
);
3645 ERR("Unable to set permissions on %s", rundir
);
3649 /* lttng client socket path */
3650 ret
= chown(client_unix_sock_path
, 0, gid
);
3652 ERR("Unable to set group on %s", client_unix_sock_path
);
3656 /* kconsumer error socket path */
3657 ret
= chown(kconsumer_data
.err_unix_sock_path
, 0, gid
);
3659 ERR("Unable to set group on %s", kconsumer_data
.err_unix_sock_path
);
3663 /* 64-bit ustconsumer error socket path */
3664 ret
= chown(ustconsumer64_data
.err_unix_sock_path
, 0, gid
);
3666 ERR("Unable to set group on %s", ustconsumer64_data
.err_unix_sock_path
);
3670 /* 32-bit ustconsumer compat32 error socket path */
3671 ret
= chown(ustconsumer32_data
.err_unix_sock_path
, 0, gid
);
3673 ERR("Unable to set group on %s", ustconsumer32_data
.err_unix_sock_path
);
3677 DBG("All permissions are set");
3684 * Create the lttng run directory needed for all global sockets and pipe.
3686 static int create_lttng_rundir(const char *rundir
)
3690 DBG3("Creating LTTng run directory: %s", rundir
);
3692 ret
= mkdir(rundir
, S_IRWXU
);
3694 if (errno
!= EEXIST
) {
3695 ERR("Unable to create %s", rundir
);
3707 * Setup sockets and directory needed by the kconsumerd communication with the
3710 static int set_consumer_sockets(struct consumer_data
*consumer_data
,
3714 char path
[PATH_MAX
];
3716 switch (consumer_data
->type
) {
3717 case LTTNG_CONSUMER_KERNEL
:
3718 snprintf(path
, PATH_MAX
, DEFAULT_KCONSUMERD_PATH
, rundir
);
3720 case LTTNG_CONSUMER64_UST
:
3721 snprintf(path
, PATH_MAX
, DEFAULT_USTCONSUMERD64_PATH
, rundir
);
3723 case LTTNG_CONSUMER32_UST
:
3724 snprintf(path
, PATH_MAX
, DEFAULT_USTCONSUMERD32_PATH
, rundir
);
3727 ERR("Consumer type unknown");
3732 DBG2("Creating consumer directory: %s", path
);
3734 ret
= mkdir(path
, S_IRWXU
);
3736 if (errno
!= EEXIST
) {
3738 ERR("Failed to create %s", path
);
3744 /* Create the kconsumerd error unix socket */
3745 consumer_data
->err_sock
=
3746 lttcomm_create_unix_sock(consumer_data
->err_unix_sock_path
);
3747 if (consumer_data
->err_sock
< 0) {
3748 ERR("Create unix sock failed: %s", consumer_data
->err_unix_sock_path
);
3753 /* File permission MUST be 660 */
3754 ret
= chmod(consumer_data
->err_unix_sock_path
,
3755 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
3757 ERR("Set file permissions failed: %s", consumer_data
->err_unix_sock_path
);
3767 * Signal handler for the daemon
3769 * Simply stop all worker threads, leaving main() return gracefully after
3770 * joining all threads and calling cleanup().
3772 static void sighandler(int sig
)
3776 DBG("SIGPIPE caught");
3779 DBG("SIGINT caught");
3783 DBG("SIGTERM caught");
3792 * Setup signal handler for :
3793 * SIGINT, SIGTERM, SIGPIPE
3795 static int set_signal_handler(void)
3798 struct sigaction sa
;
3801 if ((ret
= sigemptyset(&sigset
)) < 0) {
3802 PERROR("sigemptyset");
3806 sa
.sa_handler
= sighandler
;
3807 sa
.sa_mask
= sigset
;
3809 if ((ret
= sigaction(SIGTERM
, &sa
, NULL
)) < 0) {
3810 PERROR("sigaction");
3814 if ((ret
= sigaction(SIGINT
, &sa
, NULL
)) < 0) {
3815 PERROR("sigaction");
3819 if ((ret
= sigaction(SIGPIPE
, &sa
, NULL
)) < 0) {
3820 PERROR("sigaction");
3824 DBG("Signal handler set for SIGTERM, SIGPIPE and SIGINT");
3830 * Set open files limit to unlimited. This daemon can open a large number of
3831 * file descriptors in order to consumer multiple kernel traces.
3833 static void set_ulimit(void)
3838 /* The kernel does not allowed an infinite limit for open files */
3839 lim
.rlim_cur
= 65535;
3840 lim
.rlim_max
= 65535;
3842 ret
= setrlimit(RLIMIT_NOFILE
, &lim
);
3844 PERROR("failed to set open files limit");
3851 int main(int argc
, char **argv
)
3855 const char *home_path
, *env_app_timeout
;
3857 init_kernel_workarounds();
3859 rcu_register_thread();
3861 setup_consumerd_path();
3863 /* Parse arguments */
3865 if ((ret
= parse_args(argc
, argv
)) < 0) {
3875 * child: setsid, close FD 0, 1, 2, chdir /
3876 * parent: exit (if fork is successful)
3884 * We are in the child. Make sure all other file
3885 * descriptors are closed, in case we are called with
3886 * more opened file descriptors than the standard ones.
3888 for (i
= 3; i
< sysconf(_SC_OPEN_MAX
); i
++) {
3893 /* Create thread quit pipe */
3894 if ((ret
= init_thread_quit_pipe()) < 0) {
3898 /* Check if daemon is UID = 0 */
3899 is_root
= !getuid();
3902 rundir
= strdup(DEFAULT_LTTNG_RUNDIR
);
3904 /* Create global run dir with root access */
3905 ret
= create_lttng_rundir(rundir
);
3910 if (strlen(apps_unix_sock_path
) == 0) {
3911 snprintf(apps_unix_sock_path
, PATH_MAX
,
3912 DEFAULT_GLOBAL_APPS_UNIX_SOCK
);
3915 if (strlen(client_unix_sock_path
) == 0) {
3916 snprintf(client_unix_sock_path
, PATH_MAX
,
3917 DEFAULT_GLOBAL_CLIENT_UNIX_SOCK
);
3920 /* Set global SHM for ust */
3921 if (strlen(wait_shm_path
) == 0) {
3922 snprintf(wait_shm_path
, PATH_MAX
,
3923 DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH
);
3926 if (strlen(health_unix_sock_path
) == 0) {
3927 snprintf(health_unix_sock_path
, sizeof(health_unix_sock_path
),
3928 DEFAULT_GLOBAL_HEALTH_UNIX_SOCK
);
3931 /* Setup kernel consumerd path */
3932 snprintf(kconsumer_data
.err_unix_sock_path
, PATH_MAX
,
3933 DEFAULT_KCONSUMERD_ERR_SOCK_PATH
, rundir
);
3934 snprintf(kconsumer_data
.cmd_unix_sock_path
, PATH_MAX
,
3935 DEFAULT_KCONSUMERD_CMD_SOCK_PATH
, rundir
);
3937 DBG2("Kernel consumer err path: %s",
3938 kconsumer_data
.err_unix_sock_path
);
3939 DBG2("Kernel consumer cmd path: %s",
3940 kconsumer_data
.cmd_unix_sock_path
);
3942 home_path
= get_home_dir();
3943 if (home_path
== NULL
) {
3944 /* TODO: Add --socket PATH option */
3945 ERR("Can't get HOME directory for sockets creation.");
3951 * Create rundir from home path. This will create something like
3954 ret
= asprintf(&rundir
, DEFAULT_LTTNG_HOME_RUNDIR
, home_path
);
3960 ret
= create_lttng_rundir(rundir
);
3965 if (strlen(apps_unix_sock_path
) == 0) {
3966 snprintf(apps_unix_sock_path
, PATH_MAX
,
3967 DEFAULT_HOME_APPS_UNIX_SOCK
, home_path
);
3970 /* Set the cli tool unix socket path */
3971 if (strlen(client_unix_sock_path
) == 0) {
3972 snprintf(client_unix_sock_path
, PATH_MAX
,
3973 DEFAULT_HOME_CLIENT_UNIX_SOCK
, home_path
);
3976 /* Set global SHM for ust */
3977 if (strlen(wait_shm_path
) == 0) {
3978 snprintf(wait_shm_path
, PATH_MAX
,
3979 DEFAULT_HOME_APPS_WAIT_SHM_PATH
, geteuid());
3982 /* Set health check Unix path */
3983 if (strlen(health_unix_sock_path
) == 0) {
3984 snprintf(health_unix_sock_path
, sizeof(health_unix_sock_path
),
3985 DEFAULT_HOME_HEALTH_UNIX_SOCK
, home_path
);
3989 /* Set consumer initial state */
3990 kernel_consumerd_state
= CONSUMER_STOPPED
;
3991 ust_consumerd_state
= CONSUMER_STOPPED
;
3993 DBG("Client socket path %s", client_unix_sock_path
);
3994 DBG("Application socket path %s", apps_unix_sock_path
);
3995 DBG("LTTng run directory path: %s", rundir
);
3997 /* 32 bits consumerd path setup */
3998 snprintf(ustconsumer32_data
.err_unix_sock_path
, PATH_MAX
,
3999 DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH
, rundir
);
4000 snprintf(ustconsumer32_data
.cmd_unix_sock_path
, PATH_MAX
,
4001 DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH
, rundir
);
4003 DBG2("UST consumer 32 bits err path: %s",
4004 ustconsumer32_data
.err_unix_sock_path
);
4005 DBG2("UST consumer 32 bits cmd path: %s",
4006 ustconsumer32_data
.cmd_unix_sock_path
);
4008 /* 64 bits consumerd path setup */
4009 snprintf(ustconsumer64_data
.err_unix_sock_path
, PATH_MAX
,
4010 DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH
, rundir
);
4011 snprintf(ustconsumer64_data
.cmd_unix_sock_path
, PATH_MAX
,
4012 DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH
, rundir
);
4014 DBG2("UST consumer 64 bits err path: %s",
4015 ustconsumer64_data
.err_unix_sock_path
);
4016 DBG2("UST consumer 64 bits cmd path: %s",
4017 ustconsumer64_data
.cmd_unix_sock_path
);
4020 * See if daemon already exist.
4022 if ((ret
= check_existing_daemon()) < 0) {
4023 ERR("Already running daemon.\n");
4025 * We do not goto exit because we must not cleanup()
4026 * because a daemon is already running.
4032 * Init UST app hash table. Alloc hash table before this point since
4033 * cleanup() can get called after that point.
4037 /* After this point, we can safely call cleanup() with "goto exit" */
4040 * These actions must be executed as root. We do that *after* setting up
4041 * the sockets path because we MUST make the check for another daemon using
4042 * those paths *before* trying to set the kernel consumer sockets and init
4046 ret
= set_consumer_sockets(&kconsumer_data
, rundir
);
4051 /* Setup kernel tracer */
4052 if (!opt_no_kernel
) {
4053 init_kernel_tracer();
4056 /* Set ulimit for open files */
4059 /* init lttng_fd tracking must be done after set_ulimit. */
4062 ret
= set_consumer_sockets(&ustconsumer64_data
, rundir
);
4067 ret
= set_consumer_sockets(&ustconsumer32_data
, rundir
);
4072 if ((ret
= set_signal_handler()) < 0) {
4076 /* Setup the needed unix socket */
4077 if ((ret
= init_daemon_socket()) < 0) {
4081 /* Set credentials to socket */
4082 if (is_root
&& ((ret
= set_permissions(rundir
)) < 0)) {
4086 /* Get parent pid if -S, --sig-parent is specified. */
4087 if (opt_sig_parent
) {
4091 /* Setup the kernel pipe for waking up the kernel thread */
4092 if (is_root
&& !opt_no_kernel
) {
4093 if ((ret
= utils_create_pipe_cloexec(kernel_poll_pipe
)) < 0) {
4098 /* Setup the thread apps communication pipe. */
4099 if ((ret
= utils_create_pipe_cloexec(apps_cmd_pipe
)) < 0) {
4103 /* Init UST command queue. */
4104 cds_wfq_init(&ust_cmd_queue
.queue
);
4107 * Get session list pointer. This pointer MUST NOT be free(). This list is
4108 * statically declared in session.c
4110 session_list_ptr
= session_get_list();
4112 /* Set up max poll set size */
4113 lttng_poll_set_max_size();
4117 /* Init all health thread counters. */
4118 health_init(&health_thread_cmd
);
4119 health_init(&health_thread_kernel
);
4120 health_init(&health_thread_app_manage
);
4121 health_init(&health_thread_app_reg
);
4124 * Init health counters of the consumer thread. We do a quick hack here to
4125 * the state of the consumer health is fine even if the thread is not
4126 * started. Once the thread starts, the health state is updated with a poll
4127 * value to set a health code path. This is simply to ease our life and has
4128 * no cost what so ever.
4130 health_init(&kconsumer_data
.health
);
4131 health_poll_update(&kconsumer_data
.health
);
4132 health_init(&ustconsumer32_data
.health
);
4133 health_poll_update(&ustconsumer32_data
.health
);
4134 health_init(&ustconsumer64_data
.health
);
4135 health_poll_update(&ustconsumer64_data
.health
);
4137 /* Check for the application socket timeout env variable. */
4138 env_app_timeout
= getenv(DEFAULT_APP_SOCKET_TIMEOUT_ENV
);
4139 if (env_app_timeout
) {
4140 app_socket_timeout
= atoi(env_app_timeout
);
4142 app_socket_timeout
= DEFAULT_APP_SOCKET_RW_TIMEOUT
;
4145 /* Create thread to manage the client socket */
4146 ret
= pthread_create(&health_thread
, NULL
,
4147 thread_manage_health
, (void *) NULL
);
4149 PERROR("pthread_create health");
4153 /* Create thread to manage the client socket */
4154 ret
= pthread_create(&client_thread
, NULL
,
4155 thread_manage_clients
, (void *) NULL
);
4157 PERROR("pthread_create clients");
4161 /* Create thread to dispatch registration */
4162 ret
= pthread_create(&dispatch_thread
, NULL
,
4163 thread_dispatch_ust_registration
, (void *) NULL
);
4165 PERROR("pthread_create dispatch");
4169 /* Create thread to manage application registration. */
4170 ret
= pthread_create(®_apps_thread
, NULL
,
4171 thread_registration_apps
, (void *) NULL
);
4173 PERROR("pthread_create registration");
4177 /* Create thread to manage application socket */
4178 ret
= pthread_create(&apps_thread
, NULL
,
4179 thread_manage_apps
, (void *) NULL
);
4181 PERROR("pthread_create apps");
4185 /* Don't start this thread if kernel tracing is not requested nor root */
4186 if (is_root
&& !opt_no_kernel
) {
4187 /* Create kernel thread to manage kernel event */
4188 ret
= pthread_create(&kernel_thread
, NULL
,
4189 thread_manage_kernel
, (void *) NULL
);
4191 PERROR("pthread_create kernel");
4195 ret
= pthread_join(kernel_thread
, &status
);
4197 PERROR("pthread_join");
4198 goto error
; /* join error, exit without cleanup */
4203 ret
= pthread_join(apps_thread
, &status
);
4205 PERROR("pthread_join");
4206 goto error
; /* join error, exit without cleanup */
4210 ret
= pthread_join(reg_apps_thread
, &status
);
4212 PERROR("pthread_join");
4213 goto error
; /* join error, exit without cleanup */
4217 ret
= pthread_join(dispatch_thread
, &status
);
4219 PERROR("pthread_join");
4220 goto error
; /* join error, exit without cleanup */
4224 ret
= pthread_join(client_thread
, &status
);
4226 PERROR("pthread_join");
4227 goto error
; /* join error, exit without cleanup */
4230 ret
= join_consumer_thread(&kconsumer_data
);
4232 PERROR("join_consumer");
4233 goto error
; /* join error, exit without cleanup */
4236 ret
= join_consumer_thread(&ustconsumer32_data
);
4238 PERROR("join_consumer ust32");
4239 goto error
; /* join error, exit without cleanup */
4242 ret
= join_consumer_thread(&ustconsumer64_data
);
4244 PERROR("join_consumer ust64");
4245 goto error
; /* join error, exit without cleanup */
4249 ret
= pthread_join(health_thread
, &status
);
4251 PERROR("pthread_join health thread");
4252 goto error
; /* join error, exit without cleanup */
4258 * cleanup() is called when no other thread is running.
4260 rcu_thread_online();
4262 rcu_thread_offline();
4263 rcu_unregister_thread();