Simply close socket when application registration is refused
[lttng-tools.git] / lttng-sessiond / main.c
... / ...
CommitLineData
1/*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; only version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307, USA.
17 */
18
19#define _GNU_SOURCE
20#include <fcntl.h>
21#include <getopt.h>
22#include <grp.h>
23#include <limits.h>
24#include <pthread.h>
25#include <semaphore.h>
26#include <signal.h>
27#include <stdio.h>
28#include <stdlib.h>
29#include <string.h>
30#include <sys/mman.h>
31#include <sys/mount.h>
32#include <sys/resource.h>
33#include <sys/socket.h>
34#include <sys/stat.h>
35#include <sys/types.h>
36#include <sys/wait.h>
37#include <urcu/futex.h>
38#include <unistd.h>
39#include <config.h>
40
41#include <lttng-consumerd.h>
42#include <lttng-sessiond-comm.h>
43#include <lttng/lttng-consumer.h>
44
45#include <lttngerr.h>
46
47#include "channel.h"
48#include "compat/poll.h"
49#include "context.h"
50#include "event.h"
51#include "futex.h"
52#include "hashtable.h"
53#include "kernel-ctl.h"
54#include "lttng-sessiond.h"
55#include "shm.h"
56#include "ust-app.h"
57#include "ust-ctl.h"
58#include "utils.h"
59
60struct consumer_data {
61 enum lttng_consumer_type type;
62
63 pthread_t thread; /* Worker thread interacting with the consumer */
64 sem_t sem;
65
66 /* Mutex to control consumerd pid assignation */
67 pthread_mutex_t pid_mutex;
68 pid_t pid;
69
70 int err_sock;
71 int cmd_sock;
72
73 /* consumer error and command Unix socket path */
74 char err_unix_sock_path[PATH_MAX];
75 char cmd_unix_sock_path[PATH_MAX];
76};
77
78/* Const values */
79const char default_home_dir[] = DEFAULT_HOME_DIR;
80const char default_tracing_group[] = LTTNG_DEFAULT_TRACING_GROUP;
81const char default_ust_sock_dir[] = DEFAULT_UST_SOCK_DIR;
82const char default_global_apps_pipe[] = DEFAULT_GLOBAL_APPS_PIPE;
83
84/* Variables */
85int opt_verbose; /* Not static for lttngerr.h */
86int opt_verbose_consumer; /* Not static for lttngerr.h */
87int opt_quiet; /* Not static for lttngerr.h */
88
89const char *progname;
90const char *opt_tracing_group;
91static int opt_sig_parent;
92static int opt_daemon;
93static int is_root; /* Set to 1 if the daemon is running as root */
94static pid_t ppid; /* Parent PID for --sig-parent option */
95
96/* Consumer daemon specific control data */
97static struct consumer_data kconsumer_data = {
98 .type = LTTNG_CONSUMER_KERNEL,
99};
100static struct consumer_data ustconsumer_data = {
101 .type = LTTNG_CONSUMER_UST,
102};
103
104static int dispatch_thread_exit;
105
106/* Global application Unix socket path */
107static char apps_unix_sock_path[PATH_MAX];
108/* Global client Unix socket path */
109static char client_unix_sock_path[PATH_MAX];
110/* global wait shm path for UST */
111static char wait_shm_path[PATH_MAX];
112
113/* Sockets and FDs */
114static int client_sock;
115static int apps_sock;
116static int kernel_tracer_fd;
117static int kernel_poll_pipe[2];
118
119/*
120 * Quit pipe for all threads. This permits a single cancellation point
121 * for all threads when receiving an event on the pipe.
122 */
123static int thread_quit_pipe[2];
124
125/*
126 * This pipe is used to inform the thread managing application communication
127 * that a command is queued and ready to be processed.
128 */
129static int apps_cmd_pipe[2];
130
131/* Pthread, Mutexes and Semaphores */
132static pthread_t apps_thread;
133static pthread_t reg_apps_thread;
134static pthread_t client_thread;
135static pthread_t kernel_thread;
136static pthread_t dispatch_thread;
137
138
139/*
140 * UST registration command queue. This queue is tied with a futex and uses a N
141 * wakers / 1 waiter implemented and detailed in futex.c/.h
142 *
143 * The thread_manage_apps and thread_dispatch_ust_registration interact with
144 * this queue and the wait/wake scheme.
145 */
146static struct ust_cmd_queue ust_cmd_queue;
147
148/*
149 * Pointer initialized before thread creation.
150 *
151 * This points to the tracing session list containing the session count and a
152 * mutex lock. The lock MUST be taken if you iterate over the list. The lock
153 * MUST NOT be taken if you call a public function in session.c.
154 *
155 * The lock is nested inside the structure: session_list_ptr->lock. Please use
156 * session_lock_list and session_unlock_list for lock acquisition.
157 */
158static struct ltt_session_list *session_list_ptr;
159
160int ust_consumer_fd;
161
162/*
163 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
164 */
165static int create_thread_poll_set(struct lttng_poll_event *events,
166 unsigned int size)
167{
168 int ret;
169
170 if (events == NULL || size == 0) {
171 ret = -1;
172 goto error;
173 }
174
175 ret = lttng_poll_create(events, size, LTTNG_CLOEXEC);
176 if (ret < 0) {
177 goto error;
178 }
179
180 /* Add quit pipe */
181 ret = lttng_poll_add(events, thread_quit_pipe[0], LPOLLIN);
182 if (ret < 0) {
183 goto error;
184 }
185
186 return 0;
187
188error:
189 return ret;
190}
191
192/*
193 * Check if the thread quit pipe was triggered.
194 *
195 * Return 1 if it was triggered else 0;
196 */
197static int check_thread_quit_pipe(int fd, uint32_t events)
198{
199 if (fd == thread_quit_pipe[0] && (events & LPOLLIN)) {
200 return 1;
201 }
202
203 return 0;
204}
205
206/*
207 * Remove modules in reverse load order.
208 */
209static int modprobe_remove_kernel_modules(void)
210{
211 int ret = 0, i;
212 char modprobe[256];
213
214 for (i = ARRAY_SIZE(kernel_modules_list) - 1; i >= 0; i--) {
215 ret = snprintf(modprobe, sizeof(modprobe),
216 "/sbin/modprobe -r -q %s",
217 kernel_modules_list[i].name);
218 if (ret < 0) {
219 perror("snprintf modprobe -r");
220 goto error;
221 }
222 modprobe[sizeof(modprobe) - 1] = '\0';
223 ret = system(modprobe);
224 if (ret == -1) {
225 ERR("Unable to launch modprobe -r for module %s",
226 kernel_modules_list[i].name);
227 } else if (kernel_modules_list[i].required
228 && WEXITSTATUS(ret) != 0) {
229 ERR("Unable to remove module %s",
230 kernel_modules_list[i].name);
231 } else {
232 DBG("Modprobe removal successful %s",
233 kernel_modules_list[i].name);
234 }
235 }
236
237error:
238 return ret;
239}
240
241/*
242 * Return group ID of the tracing group or -1 if not found.
243 */
244static gid_t allowed_group(void)
245{
246 struct group *grp;
247
248 if (opt_tracing_group) {
249 grp = getgrnam(opt_tracing_group);
250 } else {
251 grp = getgrnam(default_tracing_group);
252 }
253 if (!grp) {
254 return -1;
255 } else {
256 return grp->gr_gid;
257 }
258}
259
260/*
261 * Init thread quit pipe.
262 *
263 * Return -1 on error or 0 if all pipes are created.
264 */
265static int init_thread_quit_pipe(void)
266{
267 int ret;
268
269 ret = pipe2(thread_quit_pipe, O_CLOEXEC);
270 if (ret < 0) {
271 perror("thread quit pipe");
272 goto error;
273 }
274
275error:
276 return ret;
277}
278
279/*
280 * Complete teardown of a kernel session. This free all data structure related
281 * to a kernel session and update counter.
282 */
283static void teardown_kernel_session(struct ltt_session *session)
284{
285 if (session->kernel_session != NULL) {
286 DBG("Tearing down kernel session");
287
288 /*
289 * If a custom kernel consumer was registered, close the socket before
290 * tearing down the complete kernel session structure
291 */
292 if (session->kernel_session->consumer_fd != kconsumer_data.cmd_sock) {
293 lttcomm_close_unix_sock(session->kernel_session->consumer_fd);
294 }
295
296 trace_kernel_destroy_session(session->kernel_session);
297 /* Extra precaution */
298 session->kernel_session = NULL;
299 }
300}
301
302/*
303 * Complete teardown of all UST sessions. This will free everything on his path
304 * and destroy the core essence of all ust sessions :)
305 */
306static void teardown_ust_session(struct ltt_session *session)
307{
308 int ret;
309
310 DBG("Tearing down UST session(s)");
311
312 ret = ust_app_destroy_trace_all(session->ust_session);
313 if (ret) {
314 ERR("Error in ust_app_destroy_trace_all");
315 }
316 trace_ust_destroy_session(session->ust_session);
317}
318
319/*
320 * Stop all threads by closing the thread quit pipe.
321 */
322static void stop_threads(void)
323{
324 int ret;
325
326 /* Stopping all threads */
327 DBG("Terminating all threads");
328 ret = notify_thread_pipe(thread_quit_pipe[1]);
329 if (ret < 0) {
330 ERR("write error on thread quit pipe");
331 }
332
333 /* Dispatch thread */
334 dispatch_thread_exit = 1;
335 futex_nto1_wake(&ust_cmd_queue.futex);
336}
337
338/*
339 * Cleanup the daemon
340 */
341static void cleanup(void)
342{
343 int ret;
344 char *cmd;
345 struct ltt_session *sess, *stmp;
346
347 DBG("Cleaning up");
348
349 if (is_root) {
350 DBG("Removing %s directory", LTTNG_RUNDIR);
351 ret = asprintf(&cmd, "rm -rf " LTTNG_RUNDIR);
352 if (ret < 0) {
353 ERR("asprintf failed. Something is really wrong!");
354 }
355
356 /* Remove lttng run directory */
357 ret = system(cmd);
358 if (ret < 0) {
359 ERR("Unable to clean " LTTNG_RUNDIR);
360 }
361 }
362
363 DBG("Cleaning up all session");
364
365 /* Destroy session list mutex */
366 if (session_list_ptr != NULL) {
367 pthread_mutex_destroy(&session_list_ptr->lock);
368
369 /* Cleanup ALL session */
370 cds_list_for_each_entry_safe(sess, stmp,
371 &session_list_ptr->head, list) {
372 teardown_kernel_session(sess);
373 teardown_ust_session(sess);
374 free(sess);
375 }
376 }
377
378 DBG("Closing all UST sockets");
379 ust_app_clean_list();
380
381 pthread_mutex_destroy(&kconsumer_data.pid_mutex);
382
383 DBG("Closing kernel fd");
384 close(kernel_tracer_fd);
385
386 if (is_root) {
387 DBG("Unloading kernel modules");
388 modprobe_remove_kernel_modules();
389 }
390
391 close(thread_quit_pipe[0]);
392 close(thread_quit_pipe[1]);
393
394 /* <fun> */
395 MSG("%c[%d;%dm*** assert failed :-) *** ==> %c[%dm%c[%d;%dm"
396 "Matthew, BEET driven development works!%c[%dm",
397 27, 1, 31, 27, 0, 27, 1, 33, 27, 0);
398 /* </fun> */
399}
400
401/*
402 * Send data on a unix socket using the liblttsessiondcomm API.
403 *
404 * Return lttcomm error code.
405 */
406static int send_unix_sock(int sock, void *buf, size_t len)
407{
408 /* Check valid length */
409 if (len <= 0) {
410 return -1;
411 }
412
413 return lttcomm_send_unix_sock(sock, buf, len);
414}
415
416/*
417 * Free memory of a command context structure.
418 */
419static void clean_command_ctx(struct command_ctx **cmd_ctx)
420{
421 DBG("Clean command context structure");
422 if (*cmd_ctx) {
423 if ((*cmd_ctx)->llm) {
424 free((*cmd_ctx)->llm);
425 }
426 if ((*cmd_ctx)->lsm) {
427 free((*cmd_ctx)->lsm);
428 }
429 free(*cmd_ctx);
430 *cmd_ctx = NULL;
431 }
432}
433
434/*
435 * Send all stream fds of kernel channel to the consumer.
436 */
437static int send_kconsumer_channel_streams(struct consumer_data *consumer_data,
438 int sock, struct ltt_kernel_channel *channel)
439{
440 int ret;
441 struct ltt_kernel_stream *stream;
442 struct lttcomm_consumer_msg lkm;
443
444 DBG("Sending streams of channel %s to kernel consumer",
445 channel->channel->name);
446
447 /* Send channel */
448 lkm.cmd_type = LTTNG_CONSUMER_ADD_CHANNEL;
449 lkm.u.channel.channel_key = channel->fd;
450 lkm.u.channel.max_sb_size = channel->channel->attr.subbuf_size;
451 lkm.u.channel.mmap_len = 0; /* for kernel */
452 DBG("Sending channel %d to consumer", lkm.u.channel.channel_key);
453 ret = lttcomm_send_unix_sock(sock, &lkm, sizeof(lkm));
454 if (ret < 0) {
455 perror("send consumer channel");
456 goto error;
457 }
458
459 /* Send streams */
460 cds_list_for_each_entry(stream, &channel->stream_list.head, list) {
461 if (!stream->fd) {
462 continue;
463 }
464 lkm.cmd_type = LTTNG_CONSUMER_ADD_STREAM;
465 lkm.u.stream.channel_key = channel->fd;
466 lkm.u.stream.stream_key = stream->fd;
467 lkm.u.stream.state = stream->state;
468 lkm.u.stream.output = channel->channel->attr.output;
469 lkm.u.stream.mmap_len = 0; /* for kernel */
470 strncpy(lkm.u.stream.path_name, stream->pathname, PATH_MAX - 1);
471 lkm.u.stream.path_name[PATH_MAX - 1] = '\0';
472 DBG("Sending stream %d to consumer", lkm.u.stream.stream_key);
473 ret = lttcomm_send_unix_sock(sock, &lkm, sizeof(lkm));
474 if (ret < 0) {
475 perror("send consumer stream");
476 goto error;
477 }
478 ret = lttcomm_send_fds_unix_sock(sock, &stream->fd, 1);
479 if (ret < 0) {
480 perror("send consumer stream ancillary data");
481 goto error;
482 }
483 }
484
485 DBG("consumer channel streams sent");
486
487 return 0;
488
489error:
490 return ret;
491}
492
493/*
494 * Send all stream fds of the kernel session to the consumer.
495 */
496static int send_kconsumer_session_streams(struct consumer_data *consumer_data,
497 struct ltt_kernel_session *session)
498{
499 int ret;
500 struct ltt_kernel_channel *chan;
501 struct lttcomm_consumer_msg lkm;
502 int sock = session->consumer_fd;
503
504 DBG("Sending metadata stream fd");
505
506 /* Extra protection. It's NOT supposed to be set to 0 at this point */
507 if (session->consumer_fd == 0) {
508 session->consumer_fd = consumer_data->cmd_sock;
509 }
510
511 if (session->metadata_stream_fd != 0) {
512 /* Send metadata channel fd */
513 lkm.cmd_type = LTTNG_CONSUMER_ADD_CHANNEL;
514 lkm.u.channel.channel_key = session->metadata->fd;
515 lkm.u.channel.max_sb_size = session->metadata->conf->attr.subbuf_size;
516 lkm.u.channel.mmap_len = 0; /* for kernel */
517 DBG("Sending metadata channel %d to consumer", lkm.u.stream.stream_key);
518 ret = lttcomm_send_unix_sock(sock, &lkm, sizeof(lkm));
519 if (ret < 0) {
520 perror("send consumer channel");
521 goto error;
522 }
523
524 /* Send metadata stream fd */
525 lkm.cmd_type = LTTNG_CONSUMER_ADD_STREAM;
526 lkm.u.stream.channel_key = session->metadata->fd;
527 lkm.u.stream.stream_key = session->metadata_stream_fd;
528 lkm.u.stream.state = LTTNG_CONSUMER_ACTIVE_STREAM;
529 lkm.u.stream.output = DEFAULT_KERNEL_CHANNEL_OUTPUT;
530 lkm.u.stream.mmap_len = 0; /* for kernel */
531 strncpy(lkm.u.stream.path_name, session->metadata->pathname, PATH_MAX - 1);
532 lkm.u.stream.path_name[PATH_MAX - 1] = '\0';
533 DBG("Sending metadata stream %d to consumer", lkm.u.stream.stream_key);
534 ret = lttcomm_send_unix_sock(sock, &lkm, sizeof(lkm));
535 if (ret < 0) {
536 perror("send consumer stream");
537 goto error;
538 }
539 ret = lttcomm_send_fds_unix_sock(sock, &session->metadata_stream_fd, 1);
540 if (ret < 0) {
541 perror("send consumer stream");
542 goto error;
543 }
544 }
545
546 cds_list_for_each_entry(chan, &session->channel_list.head, list) {
547 ret = send_kconsumer_channel_streams(consumer_data, sock, chan);
548 if (ret < 0) {
549 goto error;
550 }
551 }
552
553 DBG("consumer fds (metadata and channel streams) sent");
554
555 return 0;
556
557error:
558 return ret;
559}
560
561/*
562 * Notify UST applications using the shm mmap futex.
563 */
564static int notify_ust_apps(int active)
565{
566 char *wait_shm_mmap;
567
568 DBG("Notifying applications of session daemon state: %d", active);
569
570 /* See shm.c for this call implying mmap, shm and futex calls */
571 wait_shm_mmap = shm_ust_get_mmap(wait_shm_path, is_root);
572 if (wait_shm_mmap == NULL) {
573 goto error;
574 }
575
576 /* Wake waiting process */
577 futex_wait_update((int32_t *) wait_shm_mmap, active);
578
579 /* Apps notified successfully */
580 return 0;
581
582error:
583 return -1;
584}
585
586/*
587 * Setup the outgoing data buffer for the response (llm) by allocating the
588 * right amount of memory and copying the original information from the lsm
589 * structure.
590 *
591 * Return total size of the buffer pointed by buf.
592 */
593static int setup_lttng_msg(struct command_ctx *cmd_ctx, size_t size)
594{
595 int ret, buf_size;
596
597 buf_size = size;
598
599 cmd_ctx->llm = zmalloc(sizeof(struct lttcomm_lttng_msg) + buf_size);
600 if (cmd_ctx->llm == NULL) {
601 perror("zmalloc");
602 ret = -ENOMEM;
603 goto error;
604 }
605
606 /* Copy common data */
607 cmd_ctx->llm->cmd_type = cmd_ctx->lsm->cmd_type;
608 cmd_ctx->llm->pid = cmd_ctx->lsm->domain.attr.pid;
609
610 cmd_ctx->llm->data_size = size;
611 cmd_ctx->lttng_msg_size = sizeof(struct lttcomm_lttng_msg) + buf_size;
612
613 return buf_size;
614
615error:
616 return ret;
617}
618
619/*
620 * Update the kernel poll set of all channel fd available over all tracing
621 * session. Add the wakeup pipe at the end of the set.
622 */
623static int update_kernel_poll(struct lttng_poll_event *events)
624{
625 int ret;
626 struct ltt_session *session;
627 struct ltt_kernel_channel *channel;
628
629 DBG("Updating kernel poll set");
630
631 session_lock_list();
632 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
633 session_lock(session);
634 if (session->kernel_session == NULL) {
635 session_unlock(session);
636 continue;
637 }
638
639 cds_list_for_each_entry(channel,
640 &session->kernel_session->channel_list.head, list) {
641 /* Add channel fd to the kernel poll set */
642 ret = lttng_poll_add(events, channel->fd, LPOLLIN | LPOLLRDNORM);
643 if (ret < 0) {
644 session_unlock(session);
645 goto error;
646 }
647 DBG("Channel fd %d added to kernel set", channel->fd);
648 }
649 session_unlock(session);
650 }
651 session_unlock_list();
652
653 return 0;
654
655error:
656 session_unlock_list();
657 return -1;
658}
659
660/*
661 * Find the channel fd from 'fd' over all tracing session. When found, check
662 * for new channel stream and send those stream fds to the kernel consumer.
663 *
664 * Useful for CPU hotplug feature.
665 */
666static int update_kernel_stream(struct consumer_data *consumer_data, int fd)
667{
668 int ret = 0;
669 struct ltt_session *session;
670 struct ltt_kernel_channel *channel;
671
672 DBG("Updating kernel streams for channel fd %d", fd);
673
674 session_lock_list();
675 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
676 session_lock(session);
677 if (session->kernel_session == NULL) {
678 session_unlock(session);
679 continue;
680 }
681
682 /* This is not suppose to be 0 but this is an extra security check */
683 if (session->kernel_session->consumer_fd == 0) {
684 session->kernel_session->consumer_fd = consumer_data->cmd_sock;
685 }
686
687 cds_list_for_each_entry(channel,
688 &session->kernel_session->channel_list.head, list) {
689 if (channel->fd == fd) {
690 DBG("Channel found, updating kernel streams");
691 ret = kernel_open_channel_stream(channel);
692 if (ret < 0) {
693 goto error;
694 }
695
696 /*
697 * Have we already sent fds to the consumer? If yes, it means
698 * that tracing is started so it is safe to send our updated
699 * stream fds.
700 */
701 if (session->kernel_session->consumer_fds_sent == 1) {
702 ret = send_kconsumer_channel_streams(consumer_data,
703 session->kernel_session->consumer_fd, channel);
704 if (ret < 0) {
705 goto error;
706 }
707 }
708 goto error;
709 }
710 }
711 session_unlock(session);
712 }
713 session_unlock_list();
714 return ret;
715
716error:
717 session_unlock(session);
718 session_unlock_list();
719 return ret;
720}
721
722/*
723 * For each tracing session, update newly registered apps.
724 */
725static void update_ust_app(int app_sock)
726{
727 struct ltt_session *sess, *stmp;
728
729 /* For all tracing session(s) */
730 cds_list_for_each_entry_safe(sess, stmp, &session_list_ptr->head, list) {
731 if (sess->ust_session) {
732 ust_app_global_update(sess->ust_session, app_sock);
733 }
734 }
735}
736
737/*
738 * This thread manage event coming from the kernel.
739 *
740 * Features supported in this thread:
741 * -) CPU Hotplug
742 */
743static void *thread_manage_kernel(void *data)
744{
745 int ret, i, pollfd, update_poll_flag = 1;
746 uint32_t revents, nb_fd;
747 char tmp;
748 struct lttng_poll_event events;
749
750 DBG("Thread manage kernel started");
751
752 ret = create_thread_poll_set(&events, 2);
753 if (ret < 0) {
754 goto error;
755 }
756
757 ret = lttng_poll_add(&events, kernel_poll_pipe[0], LPOLLIN);
758 if (ret < 0) {
759 goto error;
760 }
761
762 while (1) {
763 if (update_poll_flag == 1) {
764 /*
765 * Reset number of fd in the poll set. Always 2 since there is the thread
766 * quit pipe and the kernel pipe.
767 */
768 events.nb_fd = 2;
769
770 ret = update_kernel_poll(&events);
771 if (ret < 0) {
772 goto error;
773 }
774 update_poll_flag = 0;
775 }
776
777 nb_fd = LTTNG_POLL_GETNB(&events);
778
779 DBG("Thread kernel polling on %d fds", nb_fd);
780
781 /* Zeroed the poll events */
782 lttng_poll_reset(&events);
783
784 /* Poll infinite value of time */
785 ret = lttng_poll_wait(&events, -1);
786 if (ret < 0) {
787 goto error;
788 } else if (ret == 0) {
789 /* Should not happen since timeout is infinite */
790 ERR("Return value of poll is 0 with an infinite timeout.\n"
791 "This should not have happened! Continuing...");
792 continue;
793 }
794
795 for (i = 0; i < nb_fd; i++) {
796 /* Fetch once the poll data */
797 revents = LTTNG_POLL_GETEV(&events, i);
798 pollfd = LTTNG_POLL_GETFD(&events, i);
799
800 /* Thread quit pipe has been closed. Killing thread. */
801 ret = check_thread_quit_pipe(pollfd, revents);
802 if (ret) {
803 goto error;
804 }
805
806 /* Check for data on kernel pipe */
807 if (pollfd == kernel_poll_pipe[0] && (revents & LPOLLIN)) {
808 ret = read(kernel_poll_pipe[0], &tmp, 1);
809 update_poll_flag = 1;
810 continue;
811 } else {
812 /*
813 * New CPU detected by the kernel. Adding kernel stream to
814 * kernel session and updating the kernel consumer
815 */
816 if (revents & LPOLLIN) {
817 ret = update_kernel_stream(&kconsumer_data, pollfd);
818 if (ret < 0) {
819 continue;
820 }
821 break;
822 /*
823 * TODO: We might want to handle the LPOLLERR | LPOLLHUP
824 * and unregister kernel stream at this point.
825 */
826 }
827 }
828 }
829 }
830
831error:
832 DBG("Kernel thread dying");
833 close(kernel_poll_pipe[0]);
834 close(kernel_poll_pipe[1]);
835
836 lttng_poll_clean(&events);
837
838 return NULL;
839}
840
841/*
842 * This thread manage the consumer error sent back to the session daemon.
843 */
844static void *thread_manage_consumer(void *data)
845{
846 int sock = 0, i, ret, pollfd;
847 uint32_t revents, nb_fd;
848 enum lttcomm_return_code code;
849 struct lttng_poll_event events;
850 struct consumer_data *consumer_data = data;
851
852 DBG("[thread] Manage consumer started");
853
854 ret = lttcomm_listen_unix_sock(consumer_data->err_sock);
855 if (ret < 0) {
856 goto error;
857 }
858
859 /*
860 * Pass 2 as size here for the thread quit pipe and kconsumerd_err_sock.
861 * Nothing more will be added to this poll set.
862 */
863 ret = create_thread_poll_set(&events, 2);
864 if (ret < 0) {
865 goto error;
866 }
867
868 ret = lttng_poll_add(&events, consumer_data->err_sock, LPOLLIN | LPOLLRDHUP);
869 if (ret < 0) {
870 goto error;
871 }
872
873 nb_fd = LTTNG_POLL_GETNB(&events);
874
875 /* Inifinite blocking call, waiting for transmission */
876 ret = lttng_poll_wait(&events, -1);
877 if (ret < 0) {
878 goto error;
879 }
880
881 for (i = 0; i < nb_fd; i++) {
882 /* Fetch once the poll data */
883 revents = LTTNG_POLL_GETEV(&events, i);
884 pollfd = LTTNG_POLL_GETFD(&events, i);
885
886 /* Thread quit pipe has been closed. Killing thread. */
887 ret = check_thread_quit_pipe(pollfd, revents);
888 if (ret) {
889 goto error;
890 }
891
892 /* Event on the registration socket */
893 if (pollfd == consumer_data->err_sock) {
894 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
895 ERR("consumer err socket poll error");
896 goto error;
897 }
898 }
899 }
900
901 sock = lttcomm_accept_unix_sock(consumer_data->err_sock);
902 if (sock < 0) {
903 goto error;
904 }
905
906 DBG2("Receiving code from consumer err_sock");
907
908 /* Getting status code from kconsumerd */
909 ret = lttcomm_recv_unix_sock(sock, &code,
910 sizeof(enum lttcomm_return_code));
911 if (ret <= 0) {
912 goto error;
913 }
914
915 if (code == CONSUMERD_COMMAND_SOCK_READY) {
916 consumer_data->cmd_sock =
917 lttcomm_connect_unix_sock(consumer_data->cmd_unix_sock_path);
918 if (consumer_data->cmd_sock < 0) {
919 sem_post(&consumer_data->sem);
920 PERROR("consumer connect");
921 goto error;
922 }
923 /* Signal condition to tell that the kconsumerd is ready */
924 sem_post(&consumer_data->sem);
925 DBG("consumer command socket ready");
926 } else {
927 ERR("consumer error when waiting for SOCK_READY : %s",
928 lttcomm_get_readable_code(-code));
929 goto error;
930 }
931
932 /* Remove the kconsumerd error sock since we've established a connexion */
933 ret = lttng_poll_del(&events, consumer_data->err_sock);
934 if (ret < 0) {
935 goto error;
936 }
937
938 ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLRDHUP);
939 if (ret < 0) {
940 goto error;
941 }
942
943 /* Update number of fd */
944 nb_fd = LTTNG_POLL_GETNB(&events);
945
946 /* Inifinite blocking call, waiting for transmission */
947 ret = lttng_poll_wait(&events, -1);
948 if (ret < 0) {
949 goto error;
950 }
951
952 for (i = 0; i < nb_fd; i++) {
953 /* Fetch once the poll data */
954 revents = LTTNG_POLL_GETEV(&events, i);
955 pollfd = LTTNG_POLL_GETFD(&events, i);
956
957 /* Thread quit pipe has been closed. Killing thread. */
958 ret = check_thread_quit_pipe(pollfd, revents);
959 if (ret) {
960 goto error;
961 }
962
963 /* Event on the kconsumerd socket */
964 if (pollfd == sock) {
965 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
966 ERR("consumer err socket second poll error");
967 goto error;
968 }
969 }
970 }
971
972 /* Wait for any kconsumerd error */
973 ret = lttcomm_recv_unix_sock(sock, &code,
974 sizeof(enum lttcomm_return_code));
975 if (ret <= 0) {
976 ERR("consumer closed the command socket");
977 goto error;
978 }
979
980 ERR("consumer return code : %s", lttcomm_get_readable_code(-code));
981
982error:
983 DBG("consumer thread dying");
984 close(consumer_data->err_sock);
985 close(consumer_data->cmd_sock);
986 close(sock);
987
988 unlink(consumer_data->err_unix_sock_path);
989 unlink(consumer_data->cmd_unix_sock_path);
990 consumer_data->pid = 0;
991
992 lttng_poll_clean(&events);
993
994 return NULL;
995}
996
997/*
998 * This thread manage application communication.
999 */
1000static void *thread_manage_apps(void *data)
1001{
1002 int i, ret, pollfd;
1003 uint32_t revents, nb_fd;
1004 struct ust_command ust_cmd;
1005 struct lttng_poll_event events;
1006
1007 DBG("[thread] Manage application started");
1008
1009 rcu_register_thread();
1010 rcu_thread_online();
1011
1012 ret = create_thread_poll_set(&events, 2);
1013 if (ret < 0) {
1014 goto error;
1015 }
1016
1017 ret = lttng_poll_add(&events, apps_cmd_pipe[0], LPOLLIN | LPOLLRDHUP);
1018 if (ret < 0) {
1019 goto error;
1020 }
1021
1022 while (1) {
1023 /* Zeroed the events structure */
1024 lttng_poll_reset(&events);
1025
1026 nb_fd = LTTNG_POLL_GETNB(&events);
1027
1028 DBG("Apps thread polling on %d fds", nb_fd);
1029
1030 /* Inifinite blocking call, waiting for transmission */
1031 ret = lttng_poll_wait(&events, -1);
1032 if (ret < 0) {
1033 goto error;
1034 }
1035
1036 for (i = 0; i < nb_fd; i++) {
1037 /* Fetch once the poll data */
1038 revents = LTTNG_POLL_GETEV(&events, i);
1039 pollfd = LTTNG_POLL_GETFD(&events, i);
1040
1041 /* Thread quit pipe has been closed. Killing thread. */
1042 ret = check_thread_quit_pipe(pollfd, revents);
1043 if (ret) {
1044 goto error;
1045 }
1046
1047 /* Inspect the apps cmd pipe */
1048 if (pollfd == apps_cmd_pipe[0]) {
1049 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1050 ERR("Apps command pipe error");
1051 goto error;
1052 } else if (revents & LPOLLIN) {
1053 /* Empty pipe */
1054 ret = read(apps_cmd_pipe[0], &ust_cmd, sizeof(ust_cmd));
1055 if (ret < 0 || ret < sizeof(ust_cmd)) {
1056 perror("read apps cmd pipe");
1057 goto error;
1058 }
1059
1060 /* Register applicaton to the session daemon */
1061 ret = ust_app_register(&ust_cmd.reg_msg,
1062 ust_cmd.sock);
1063 if (ret == -ENOMEM) {
1064 goto error;
1065 } else if (ret < 0) {
1066 break;
1067 }
1068
1069 /*
1070 * Add channel(s) and event(s) to newly registered apps
1071 * from lttng global UST domain.
1072 */
1073 update_ust_app(ust_cmd.sock);
1074
1075 ret = ustctl_register_done(ust_cmd.sock);
1076 if (ret < 0) {
1077 /*
1078 * If the registration is not possible, we simply
1079 * unregister the apps and continue
1080 */
1081 ust_app_unregister(ust_cmd.sock);
1082 } else {
1083 /*
1084 * We just need here to monitor the close of the UST
1085 * socket and poll set monitor those by default.
1086 */
1087 ret = lttng_poll_add(&events, ust_cmd.sock, 0);
1088 if (ret < 0) {
1089 goto error;
1090 }
1091
1092 DBG("Apps with sock %d added to poll set",
1093 ust_cmd.sock);
1094 }
1095
1096 break;
1097 }
1098 } else {
1099 /*
1100 * At this point, we know that a registered application made
1101 * the event at poll_wait.
1102 */
1103 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1104 /* Removing from the poll set */
1105 ret = lttng_poll_del(&events, pollfd);
1106 if (ret < 0) {
1107 goto error;
1108 }
1109
1110 /* Socket closed on remote end. */
1111 ust_app_unregister(pollfd);
1112 break;
1113 }
1114 }
1115 }
1116 }
1117
1118error:
1119 DBG("Application communication apps dying");
1120 close(apps_cmd_pipe[0]);
1121 close(apps_cmd_pipe[1]);
1122
1123 lttng_poll_clean(&events);
1124
1125 rcu_thread_offline();
1126 rcu_unregister_thread();
1127 return NULL;
1128}
1129
1130/*
1131 * Dispatch request from the registration threads to the application
1132 * communication thread.
1133 */
1134static void *thread_dispatch_ust_registration(void *data)
1135{
1136 int ret;
1137 struct cds_wfq_node *node;
1138 struct ust_command *ust_cmd = NULL;
1139
1140 DBG("[thread] Dispatch UST command started");
1141
1142 while (!dispatch_thread_exit) {
1143 /* Atomically prepare the queue futex */
1144 futex_nto1_prepare(&ust_cmd_queue.futex);
1145
1146 do {
1147 /* Dequeue command for registration */
1148 node = cds_wfq_dequeue_blocking(&ust_cmd_queue.queue);
1149 if (node == NULL) {
1150 DBG("Woken up but nothing in the UST command queue");
1151 /* Continue thread execution */
1152 break;
1153 }
1154
1155 ust_cmd = caa_container_of(node, struct ust_command, node);
1156
1157 DBG("Dispatching UST registration pid:%d ppid:%d uid:%d"
1158 " gid:%d sock:%d name:%s (version %d.%d)",
1159 ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
1160 ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
1161 ust_cmd->sock, ust_cmd->reg_msg.name,
1162 ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
1163 /*
1164 * Inform apps thread of the new application registration. This
1165 * call is blocking so we can be assured that the data will be read
1166 * at some point in time or wait to the end of the world :)
1167 */
1168 ret = write(apps_cmd_pipe[1], ust_cmd,
1169 sizeof(struct ust_command));
1170 if (ret < 0) {
1171 perror("write apps cmd pipe");
1172 if (errno == EBADF) {
1173 /*
1174 * We can't inform the application thread to process
1175 * registration. We will exit or else application
1176 * registration will not occur and tracing will never
1177 * start.
1178 */
1179 goto error;
1180 }
1181 }
1182 free(ust_cmd);
1183 } while (node != NULL);
1184
1185 /* Futex wait on queue. Blocking call on futex() */
1186 futex_nto1_wait(&ust_cmd_queue.futex);
1187 }
1188
1189error:
1190 DBG("Dispatch thread dying");
1191 return NULL;
1192}
1193
1194/*
1195 * This thread manage application registration.
1196 */
1197static void *thread_registration_apps(void *data)
1198{
1199 int sock = 0, i, ret, pollfd;
1200 uint32_t revents, nb_fd;
1201 struct lttng_poll_event events;
1202 /*
1203 * Get allocated in this thread, enqueued to a global queue, dequeued and
1204 * freed in the manage apps thread.
1205 */
1206 struct ust_command *ust_cmd = NULL;
1207
1208 DBG("[thread] Manage application registration started");
1209
1210 ret = lttcomm_listen_unix_sock(apps_sock);
1211 if (ret < 0) {
1212 goto error;
1213 }
1214
1215 /*
1216 * Pass 2 as size here for the thread quit pipe and apps socket. Nothing
1217 * more will be added to this poll set.
1218 */
1219 ret = create_thread_poll_set(&events, 2);
1220 if (ret < 0) {
1221 goto error;
1222 }
1223
1224 /* Add the application registration socket */
1225 ret = lttng_poll_add(&events, apps_sock, LPOLLIN | LPOLLRDHUP);
1226 if (ret < 0) {
1227 goto error;
1228 }
1229
1230 /* Notify all applications to register */
1231 ret = notify_ust_apps(1);
1232 if (ret < 0) {
1233 ERR("Failed to notify applications or create the wait shared memory.\n"
1234 "Execution continues but there might be problem for already\n"
1235 "running applications that wishes to register.");
1236 }
1237
1238 while (1) {
1239 DBG("Accepting application registration");
1240
1241 nb_fd = LTTNG_POLL_GETNB(&events);
1242
1243 /* Inifinite blocking call, waiting for transmission */
1244 ret = lttng_poll_wait(&events, -1);
1245 if (ret < 0) {
1246 goto error;
1247 }
1248
1249 for (i = 0; i < nb_fd; i++) {
1250 /* Fetch once the poll data */
1251 revents = LTTNG_POLL_GETEV(&events, i);
1252 pollfd = LTTNG_POLL_GETFD(&events, i);
1253
1254 /* Thread quit pipe has been closed. Killing thread. */
1255 ret = check_thread_quit_pipe(pollfd, revents);
1256 if (ret) {
1257 goto error;
1258 }
1259
1260 /* Event on the registration socket */
1261 if (pollfd == apps_sock) {
1262 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1263 ERR("Register apps socket poll error");
1264 goto error;
1265 } else if (revents & LPOLLIN) {
1266 sock = lttcomm_accept_unix_sock(apps_sock);
1267 if (sock < 0) {
1268 goto error;
1269 }
1270
1271 /* Create UST registration command for enqueuing */
1272 ust_cmd = zmalloc(sizeof(struct ust_command));
1273 if (ust_cmd == NULL) {
1274 perror("ust command zmalloc");
1275 goto error;
1276 }
1277
1278 /*
1279 * Using message-based transmissions to ensure we don't
1280 * have to deal with partially received messages.
1281 */
1282 ret = lttcomm_recv_unix_sock(sock, &ust_cmd->reg_msg,
1283 sizeof(struct ust_register_msg));
1284 if (ret < 0 || ret < sizeof(struct ust_register_msg)) {
1285 if (ret < 0) {
1286 perror("lttcomm_recv_unix_sock register apps");
1287 } else {
1288 ERR("Wrong size received on apps register");
1289 }
1290 free(ust_cmd);
1291 close(sock);
1292 continue;
1293 }
1294
1295 ust_cmd->sock = sock;
1296
1297 DBG("UST registration received with pid:%d ppid:%d uid:%d"
1298 " gid:%d sock:%d name:%s (version %d.%d)",
1299 ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
1300 ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
1301 ust_cmd->sock, ust_cmd->reg_msg.name,
1302 ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
1303
1304 /*
1305 * Lock free enqueue the registration request. The red pill
1306 * has been taken! This apps will be part of the *system*.
1307 */
1308 cds_wfq_enqueue(&ust_cmd_queue.queue, &ust_cmd->node);
1309
1310 /*
1311 * Wake the registration queue futex. Implicit memory
1312 * barrier with the exchange in cds_wfq_enqueue.
1313 */
1314 futex_nto1_wake(&ust_cmd_queue.futex);
1315 }
1316 }
1317 }
1318 }
1319
1320error:
1321 DBG("UST Registration thread dying");
1322
1323 /* Notify that the registration thread is gone */
1324 notify_ust_apps(0);
1325
1326 close(apps_sock);
1327 close(sock);
1328 unlink(apps_unix_sock_path);
1329
1330 lttng_poll_clean(&events);
1331
1332 return NULL;
1333}
1334
1335/*
1336 * Start the thread_manage_consumer. This must be done after a lttng-consumerd
1337 * exec or it will fails.
1338 */
1339static int spawn_consumer_thread(struct consumer_data *consumer_data)
1340{
1341 int ret;
1342 struct timespec timeout;
1343
1344 timeout.tv_sec = DEFAULT_SEM_WAIT_TIMEOUT;
1345 timeout.tv_nsec = 0;
1346
1347 /* Setup semaphore */
1348 ret = sem_init(&consumer_data->sem, 0, 0);
1349 if (ret < 0) {
1350 PERROR("sem_init consumer semaphore");
1351 goto error;
1352 }
1353
1354 ret = pthread_create(&consumer_data->thread, NULL,
1355 thread_manage_consumer, consumer_data);
1356 if (ret != 0) {
1357 PERROR("pthread_create consumer");
1358 ret = -1;
1359 goto error;
1360 }
1361
1362 /* Get time for sem_timedwait absolute timeout */
1363 ret = clock_gettime(CLOCK_REALTIME, &timeout);
1364 if (ret < 0) {
1365 PERROR("clock_gettime spawn consumer");
1366 /* Infinite wait for the kconsumerd thread to be ready */
1367 ret = sem_wait(&consumer_data->sem);
1368 } else {
1369 /* Normal timeout if the gettime was successful */
1370 timeout.tv_sec += DEFAULT_SEM_WAIT_TIMEOUT;
1371 ret = sem_timedwait(&consumer_data->sem, &timeout);
1372 }
1373
1374 if (ret < 0) {
1375 if (errno == ETIMEDOUT) {
1376 /*
1377 * Call has timed out so we kill the kconsumerd_thread and return
1378 * an error.
1379 */
1380 ERR("The consumer thread was never ready. Killing it");
1381 ret = pthread_cancel(consumer_data->thread);
1382 if (ret < 0) {
1383 PERROR("pthread_cancel consumer thread");
1384 }
1385 } else {
1386 PERROR("semaphore wait failed consumer thread");
1387 }
1388 goto error;
1389 }
1390
1391 pthread_mutex_lock(&consumer_data->pid_mutex);
1392 if (consumer_data->pid == 0) {
1393 ERR("Kconsumerd did not start");
1394 pthread_mutex_unlock(&consumer_data->pid_mutex);
1395 goto error;
1396 }
1397 pthread_mutex_unlock(&consumer_data->pid_mutex);
1398
1399 return 0;
1400
1401error:
1402 return ret;
1403}
1404
1405/*
1406 * Join consumer thread
1407 */
1408static int join_consumer_thread(struct consumer_data *consumer_data)
1409{
1410 void *status;
1411 int ret;
1412
1413 if (consumer_data->pid != 0) {
1414 ret = kill(consumer_data->pid, SIGTERM);
1415 if (ret) {
1416 ERR("Error killing consumer daemon");
1417 return ret;
1418 }
1419 return pthread_join(consumer_data->thread, &status);
1420 } else {
1421 return 0;
1422 }
1423}
1424
1425/*
1426 * Fork and exec a consumer daemon (consumerd).
1427 *
1428 * Return pid if successful else -1.
1429 */
1430static pid_t spawn_consumerd(struct consumer_data *consumer_data)
1431{
1432 int ret;
1433 pid_t pid;
1434 const char *verbosity;
1435
1436 DBG("Spawning consumerd");
1437
1438 pid = fork();
1439 if (pid == 0) {
1440 /*
1441 * Exec consumerd.
1442 */
1443 if (opt_verbose > 1 || opt_verbose_consumer) {
1444 verbosity = "--verbose";
1445 } else {
1446 verbosity = "--quiet";
1447 }
1448 switch (consumer_data->type) {
1449 case LTTNG_CONSUMER_KERNEL:
1450 execl(INSTALL_BIN_PATH "/lttng-consumerd",
1451 "lttng-consumerd", verbosity, "-k", NULL);
1452 break;
1453 case LTTNG_CONSUMER_UST:
1454 execl(INSTALL_BIN_PATH "/lttng-consumerd",
1455 "lttng-consumerd", verbosity, "-u", NULL);
1456 break;
1457 default:
1458 perror("unknown consumer type");
1459 exit(EXIT_FAILURE);
1460 }
1461 if (errno != 0) {
1462 perror("kernel start consumer exec");
1463 }
1464 exit(EXIT_FAILURE);
1465 } else if (pid > 0) {
1466 ret = pid;
1467 } else {
1468 perror("start consumer fork");
1469 ret = -errno;
1470 }
1471 return ret;
1472}
1473
1474/*
1475 * Spawn the consumerd daemon and session daemon thread.
1476 */
1477static int start_consumerd(struct consumer_data *consumer_data)
1478{
1479 int ret;
1480
1481 pthread_mutex_lock(&consumer_data->pid_mutex);
1482 if (consumer_data->pid != 0) {
1483 pthread_mutex_unlock(&consumer_data->pid_mutex);
1484 goto end;
1485 }
1486
1487 ret = spawn_consumerd(consumer_data);
1488 if (ret < 0) {
1489 ERR("Spawning consumerd failed");
1490 pthread_mutex_unlock(&consumer_data->pid_mutex);
1491 goto error;
1492 }
1493
1494 /* Setting up the consumer_data pid */
1495 consumer_data->pid = ret;
1496 DBG2("Consumer pid %d", consumer_data->pid);
1497 pthread_mutex_unlock(&consumer_data->pid_mutex);
1498
1499 DBG2("Spawning consumer control thread");
1500 ret = spawn_consumer_thread(consumer_data);
1501 if (ret < 0) {
1502 ERR("Fatal error spawning consumer control thread");
1503 goto error;
1504 }
1505
1506end:
1507 return 0;
1508
1509error:
1510 return ret;
1511}
1512
1513/*
1514 * modprobe_kernel_modules
1515 */
1516static int modprobe_kernel_modules(void)
1517{
1518 int ret = 0, i;
1519 char modprobe[256];
1520
1521 for (i = 0; i < ARRAY_SIZE(kernel_modules_list); i++) {
1522 ret = snprintf(modprobe, sizeof(modprobe),
1523 "/sbin/modprobe %s%s",
1524 kernel_modules_list[i].required ? "" : "-q ",
1525 kernel_modules_list[i].name);
1526 if (ret < 0) {
1527 perror("snprintf modprobe");
1528 goto error;
1529 }
1530 modprobe[sizeof(modprobe) - 1] = '\0';
1531 ret = system(modprobe);
1532 if (ret == -1) {
1533 ERR("Unable to launch modprobe for module %s",
1534 kernel_modules_list[i].name);
1535 } else if (kernel_modules_list[i].required
1536 && WEXITSTATUS(ret) != 0) {
1537 ERR("Unable to load module %s",
1538 kernel_modules_list[i].name);
1539 } else {
1540 DBG("Modprobe successfully %s",
1541 kernel_modules_list[i].name);
1542 }
1543 }
1544
1545error:
1546 return ret;
1547}
1548
1549/*
1550 * mount_debugfs
1551 */
1552static int mount_debugfs(char *path)
1553{
1554 int ret;
1555 char *type = "debugfs";
1556
1557 ret = mkdir_recursive(path, S_IRWXU | S_IRWXG, geteuid(), getegid());
1558 if (ret < 0) {
1559 PERROR("Cannot create debugfs path");
1560 goto error;
1561 }
1562
1563 ret = mount(type, path, type, 0, NULL);
1564 if (ret < 0) {
1565 PERROR("Cannot mount debugfs");
1566 goto error;
1567 }
1568
1569 DBG("Mounted debugfs successfully at %s", path);
1570
1571error:
1572 return ret;
1573}
1574
1575/*
1576 * Setup necessary data for kernel tracer action.
1577 */
1578static void init_kernel_tracer(void)
1579{
1580 int ret;
1581 char *proc_mounts = "/proc/mounts";
1582 char line[256];
1583 char *debugfs_path = NULL, *lttng_path = NULL;
1584 FILE *fp;
1585
1586 /* Detect debugfs */
1587 fp = fopen(proc_mounts, "r");
1588 if (fp == NULL) {
1589 ERR("Unable to probe %s", proc_mounts);
1590 goto error;
1591 }
1592
1593 while (fgets(line, sizeof(line), fp) != NULL) {
1594 if (strstr(line, "debugfs") != NULL) {
1595 /* Remove first string */
1596 strtok(line, " ");
1597 /* Dup string here so we can reuse line later on */
1598 debugfs_path = strdup(strtok(NULL, " "));
1599 DBG("Got debugfs path : %s", debugfs_path);
1600 break;
1601 }
1602 }
1603
1604 fclose(fp);
1605
1606 /* Mount debugfs if needded */
1607 if (debugfs_path == NULL) {
1608 ret = asprintf(&debugfs_path, "/mnt/debugfs");
1609 if (ret < 0) {
1610 perror("asprintf debugfs path");
1611 goto error;
1612 }
1613 ret = mount_debugfs(debugfs_path);
1614 if (ret < 0) {
1615 perror("Cannot mount debugfs");
1616 goto error;
1617 }
1618 }
1619
1620 /* Modprobe lttng kernel modules */
1621 ret = modprobe_kernel_modules();
1622 if (ret < 0) {
1623 goto error;
1624 }
1625
1626 /* Setup lttng kernel path */
1627 ret = asprintf(&lttng_path, "%s/lttng", debugfs_path);
1628 if (ret < 0) {
1629 perror("asprintf lttng path");
1630 goto error;
1631 }
1632
1633 /* Open debugfs lttng */
1634 kernel_tracer_fd = open(lttng_path, O_RDWR);
1635 if (kernel_tracer_fd < 0) {
1636 DBG("Failed to open %s", lttng_path);
1637 goto error;
1638 }
1639
1640 free(lttng_path);
1641 free(debugfs_path);
1642 DBG("Kernel tracer fd %d", kernel_tracer_fd);
1643 return;
1644
1645error:
1646 if (lttng_path) {
1647 free(lttng_path);
1648 }
1649 if (debugfs_path) {
1650 free(debugfs_path);
1651 }
1652 WARN("No kernel tracer available");
1653 kernel_tracer_fd = 0;
1654 return;
1655}
1656
1657/*
1658 * Init tracing by creating trace directory and sending fds kernel consumer.
1659 */
1660static int init_kernel_tracing(struct ltt_kernel_session *session)
1661{
1662 int ret = 0;
1663
1664 if (session->consumer_fds_sent == 0) {
1665 /*
1666 * Assign default kernel consumer socket if no consumer assigned to the
1667 * kernel session. At this point, it's NOT suppose to be 0 but this is
1668 * an extra security check.
1669 */
1670 if (session->consumer_fd == 0) {
1671 session->consumer_fd = kconsumer_data.cmd_sock;
1672 }
1673
1674 ret = send_kconsumer_session_streams(&kconsumer_data, session);
1675 if (ret < 0) {
1676 ret = LTTCOMM_KERN_CONSUMER_FAIL;
1677 goto error;
1678 }
1679
1680 session->consumer_fds_sent = 1;
1681 }
1682
1683error:
1684 return ret;
1685}
1686
1687/*
1688 * Create an UST session and add it to the session ust list.
1689 */
1690static int create_ust_session(struct ltt_session *session,
1691 struct lttng_domain *domain)
1692{
1693 int ret;
1694 unsigned int uid;
1695 struct ltt_ust_session *lus = NULL;
1696
1697 switch (domain->type) {
1698 case LTTNG_DOMAIN_UST:
1699 break;
1700 default:
1701 ret = LTTCOMM_UNKNOWN_DOMAIN;
1702 goto error;
1703 }
1704
1705 DBG("Creating UST session");
1706
1707 session_lock_list();
1708 uid = session_list_ptr->count;
1709 session_unlock_list();
1710
1711 lus = trace_ust_create_session(session->path, uid, domain);
1712 if (lus == NULL) {
1713 ret = LTTCOMM_UST_SESS_FAIL;
1714 goto error;
1715 }
1716
1717 ret = mkdir_recursive(lus->pathname, S_IRWXU | S_IRWXG,
1718 geteuid(), allowed_group());
1719 if (ret < 0) {
1720 if (ret != -EEXIST) {
1721 ERR("Trace directory creation error");
1722 ret = LTTCOMM_UST_SESS_FAIL;
1723 goto error;
1724 }
1725 }
1726
1727 /* The domain type dictate different actions on session creation */
1728 switch (domain->type) {
1729 case LTTNG_DOMAIN_UST:
1730 /* No ustctl for the global UST domain */
1731 break;
1732 default:
1733 ERR("Unknown UST domain on create session %d", domain->type);
1734 goto error;
1735 }
1736 session->ust_session = lus;
1737
1738 return LTTCOMM_OK;
1739
1740error:
1741 free(lus);
1742 return ret;
1743}
1744
1745/*
1746 * Create a kernel tracer session then create the default channel.
1747 */
1748static int create_kernel_session(struct ltt_session *session)
1749{
1750 int ret;
1751
1752 DBG("Creating kernel session");
1753
1754 ret = kernel_create_session(session, kernel_tracer_fd);
1755 if (ret < 0) {
1756 ret = LTTCOMM_KERN_SESS_FAIL;
1757 goto error;
1758 }
1759
1760 /* Set kernel consumer socket fd */
1761 if (kconsumer_data.cmd_sock) {
1762 session->kernel_session->consumer_fd = kconsumer_data.cmd_sock;
1763 }
1764
1765 ret = mkdir_recursive(session->kernel_session->trace_path,
1766 S_IRWXU | S_IRWXG, geteuid(), allowed_group());
1767 if (ret < 0) {
1768 if (ret != -EEXIST) {
1769 ERR("Trace directory creation error");
1770 goto error;
1771 }
1772 }
1773
1774error:
1775 return ret;
1776}
1777
1778/*
1779 * Using the session list, filled a lttng_session array to send back to the
1780 * client for session listing.
1781 *
1782 * The session list lock MUST be acquired before calling this function. Use
1783 * session_lock_list() and session_unlock_list().
1784 */
1785static void list_lttng_sessions(struct lttng_session *sessions)
1786{
1787 int i = 0;
1788 struct ltt_session *session;
1789
1790 DBG("Getting all available session");
1791 /*
1792 * Iterate over session list and append data after the control struct in
1793 * the buffer.
1794 */
1795 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
1796 strncpy(sessions[i].path, session->path, PATH_MAX);
1797 sessions[i].path[PATH_MAX - 1] = '\0';
1798 strncpy(sessions[i].name, session->name, NAME_MAX);
1799 sessions[i].name[NAME_MAX - 1] = '\0';
1800 sessions[i].enabled = session->enabled;
1801 i++;
1802 }
1803}
1804
1805/*
1806 * Fill lttng_channel array of all channels.
1807 */
1808static void list_lttng_channels(int domain, struct ltt_session *session,
1809 struct lttng_channel *channels)
1810{
1811 int i = 0;
1812 struct ltt_kernel_channel *kchan;
1813
1814 DBG("Listing channels for session %s", session->name);
1815
1816 switch (domain) {
1817 case LTTNG_DOMAIN_KERNEL:
1818 /* Kernel channels */
1819 if (session->kernel_session != NULL) {
1820 cds_list_for_each_entry(kchan,
1821 &session->kernel_session->channel_list.head, list) {
1822 /* Copy lttng_channel struct to array */
1823 memcpy(&channels[i], kchan->channel, sizeof(struct lttng_channel));
1824 channels[i].enabled = kchan->enabled;
1825 i++;
1826 }
1827 }
1828 break;
1829 case LTTNG_DOMAIN_UST:
1830 {
1831 struct cds_lfht_iter iter;
1832 struct ltt_ust_channel *uchan;
1833
1834 cds_lfht_for_each_entry(session->ust_session->domain_global.channels,
1835 &iter, uchan, node) {
1836 strncpy(channels[i].name, uchan->name, LTTNG_SYMBOL_NAME_LEN);
1837 channels[i].attr.overwrite = uchan->attr.overwrite;
1838 channels[i].attr.subbuf_size = uchan->attr.subbuf_size;
1839 channels[i].attr.num_subbuf = uchan->attr.num_subbuf;
1840 channels[i].attr.switch_timer_interval =
1841 uchan->attr.switch_timer_interval;
1842 channels[i].attr.read_timer_interval =
1843 uchan->attr.read_timer_interval;
1844 channels[i].attr.output = uchan->attr.output;
1845 }
1846 break;
1847 }
1848 default:
1849 break;
1850 }
1851}
1852
1853/*
1854 * Create a list of ust global domain events.
1855 */
1856static int list_lttng_ust_global_events(char *channel_name,
1857 struct ltt_ust_domain_global *ust_global, struct lttng_event **events)
1858{
1859 int i = 0, ret = 0;
1860 unsigned int nb_event = 0;
1861 struct cds_lfht_iter iter;
1862 struct ltt_ust_channel *uchan;
1863 struct ltt_ust_event *uevent;
1864 struct lttng_event *tmp;
1865
1866 DBG("Listing UST global events for channel %s", channel_name);
1867
1868 rcu_read_lock();
1869
1870 /* Count events in all channels */
1871 cds_lfht_for_each_entry(ust_global->channels, &iter, uchan, node) {
1872 nb_event += hashtable_get_count(uchan->events);
1873 }
1874
1875 if (nb_event == 0) {
1876 ret = nb_event;
1877 goto error;
1878 }
1879
1880 DBG3("Listing UST global %d events", nb_event);
1881
1882 tmp = zmalloc(nb_event * sizeof(struct lttng_event));
1883 if (tmp == NULL) {
1884 ret = -LTTCOMM_FATAL;
1885 goto error;
1886 }
1887
1888 cds_lfht_for_each_entry(ust_global->channels, &iter, uchan, node) {
1889 cds_lfht_for_each_entry(uchan->events, &iter, uevent, node) {
1890 strncpy(tmp[i].name, uevent->attr.name, LTTNG_SYMBOL_NAME_LEN);
1891 tmp[i].name[LTTNG_SYMBOL_NAME_LEN - 1] = '\0';
1892 tmp[i].enabled = uevent->enabled;
1893 switch (uevent->attr.instrumentation) {
1894 case LTTNG_UST_TRACEPOINT:
1895 tmp[i].type = LTTNG_EVENT_TRACEPOINT;
1896 break;
1897 case LTTNG_UST_PROBE:
1898 tmp[i].type = LTTNG_EVENT_PROBE;
1899 break;
1900 case LTTNG_UST_FUNCTION:
1901 tmp[i].type = LTTNG_EVENT_FUNCTION;
1902 break;
1903 }
1904 i++;
1905 }
1906 }
1907
1908 ret = nb_event;
1909 *events = tmp;
1910
1911error:
1912 rcu_read_unlock();
1913 return ret;
1914}
1915
1916/*
1917 * Fill lttng_event array of all kernel events in the channel.
1918 */
1919static int list_lttng_kernel_events(char *channel_name,
1920 struct ltt_kernel_session *kernel_session, struct lttng_event **events)
1921{
1922 int i = 0, ret;
1923 unsigned int nb_event;
1924 struct ltt_kernel_event *event;
1925 struct ltt_kernel_channel *kchan;
1926
1927 kchan = trace_kernel_get_channel_by_name(channel_name, kernel_session);
1928 if (kchan == NULL) {
1929 ret = LTTCOMM_KERN_CHAN_NOT_FOUND;
1930 goto error;
1931 }
1932
1933 nb_event = kchan->event_count;
1934
1935 DBG("Listing events for channel %s", kchan->channel->name);
1936
1937 if (nb_event == 0) {
1938 ret = nb_event;
1939 goto error;
1940 }
1941
1942 *events = zmalloc(nb_event * sizeof(struct lttng_event));
1943 if (*events == NULL) {
1944 ret = LTTCOMM_FATAL;
1945 goto error;
1946 }
1947
1948 /* Kernel channels */
1949 cds_list_for_each_entry(event, &kchan->events_list.head , list) {
1950 strncpy((*events)[i].name, event->event->name, LTTNG_SYMBOL_NAME_LEN);
1951 (*events)[i].name[LTTNG_SYMBOL_NAME_LEN - 1] = '\0';
1952 (*events)[i].enabled = event->enabled;
1953 switch (event->event->instrumentation) {
1954 case LTTNG_KERNEL_TRACEPOINT:
1955 (*events)[i].type = LTTNG_EVENT_TRACEPOINT;
1956 break;
1957 case LTTNG_KERNEL_KPROBE:
1958 case LTTNG_KERNEL_KRETPROBE:
1959 (*events)[i].type = LTTNG_EVENT_PROBE;
1960 memcpy(&(*events)[i].attr.probe, &event->event->u.kprobe,
1961 sizeof(struct lttng_kernel_kprobe));
1962 break;
1963 case LTTNG_KERNEL_FUNCTION:
1964 (*events)[i].type = LTTNG_EVENT_FUNCTION;
1965 memcpy(&((*events)[i].attr.ftrace), &event->event->u.ftrace,
1966 sizeof(struct lttng_kernel_function));
1967 break;
1968 case LTTNG_KERNEL_NOOP:
1969 (*events)[i].type = LTTNG_EVENT_NOOP;
1970 break;
1971 case LTTNG_KERNEL_SYSCALL:
1972 (*events)[i].type = LTTNG_EVENT_SYSCALL;
1973 break;
1974 case LTTNG_KERNEL_ALL:
1975 assert(0);
1976 break;
1977 }
1978 i++;
1979 }
1980
1981 return nb_event;
1982
1983error:
1984 return ret;
1985}
1986
1987/*
1988 * Command LTTNG_DISABLE_CHANNEL processed by the client thread.
1989 */
1990static int cmd_disable_channel(struct ltt_session *session,
1991 int domain, char *channel_name)
1992{
1993 int ret;
1994
1995 switch (domain) {
1996 case LTTNG_DOMAIN_KERNEL:
1997 ret = channel_kernel_disable(session->kernel_session,
1998 channel_name);
1999 if (ret != LTTCOMM_OK) {
2000 goto error;
2001 }
2002
2003 kernel_wait_quiescent(kernel_tracer_fd);
2004 break;
2005 case LTTNG_DOMAIN_UST_PID:
2006 break;
2007 default:
2008 ret = LTTCOMM_UNKNOWN_DOMAIN;
2009 goto error;
2010 }
2011
2012 ret = LTTCOMM_OK;
2013
2014error:
2015 return ret;
2016}
2017
2018/*
2019 * Copy channel from attributes and set it in the application channel list.
2020 */
2021/*
2022static int copy_ust_channel_to_app(struct ltt_ust_session *usess,
2023 struct lttng_channel *attr, struct ust_app *app)
2024{
2025 int ret;
2026 struct ltt_ust_channel *uchan, *new_chan;
2027
2028 uchan = trace_ust_get_channel_by_key(usess->channels, attr->name);
2029 if (uchan == NULL) {
2030 ret = LTTCOMM_FATAL;
2031 goto error;
2032 }
2033
2034 new_chan = trace_ust_create_channel(attr, usess->path);
2035 if (new_chan == NULL) {
2036 PERROR("malloc ltt_ust_channel");
2037 ret = LTTCOMM_FATAL;
2038 goto error;
2039 }
2040
2041 ret = channel_ust_copy(new_chan, uchan);
2042 if (ret < 0) {
2043 ret = LTTCOMM_FATAL;
2044 goto error;
2045 }
2046
2047error:
2048 return ret;
2049}
2050*/
2051
2052/*
2053 * Command LTTNG_ENABLE_CHANNEL processed by the client thread.
2054 */
2055static int cmd_enable_channel(struct ltt_session *session,
2056 struct lttng_domain *domain, struct lttng_channel *attr)
2057{
2058 int ret;
2059 struct ltt_ust_session *usess = session->ust_session;
2060
2061 DBG("Enabling channel %s for session %s", attr->name, session->name);
2062
2063 switch (domain->type) {
2064 case LTTNG_DOMAIN_KERNEL:
2065 {
2066 struct ltt_kernel_channel *kchan;
2067
2068 kchan = trace_kernel_get_channel_by_name(attr->name,
2069 session->kernel_session);
2070 if (kchan == NULL) {
2071 ret = channel_kernel_create(session->kernel_session,
2072 attr, kernel_poll_pipe[1]);
2073 } else {
2074 ret = channel_kernel_enable(session->kernel_session, kchan);
2075 }
2076
2077 if (ret != LTTCOMM_OK) {
2078 goto error;
2079 }
2080
2081 kernel_wait_quiescent(kernel_tracer_fd);
2082 break;
2083 }
2084 case LTTNG_DOMAIN_UST:
2085 {
2086 struct ltt_ust_channel *uchan;
2087
2088 DBG2("Enabling channel for LTTNG_DOMAIN_UST");
2089
2090 /* Get channel in global UST domain HT */
2091 uchan = trace_ust_find_channel_by_name(usess->domain_global.channels,
2092 attr->name);
2093 if (uchan == NULL) {
2094 uchan = trace_ust_create_channel(attr, usess->pathname);
2095 if (uchan == NULL) {
2096 ret = LTTCOMM_UST_CHAN_FAIL;
2097 goto error;
2098 }
2099
2100 rcu_read_lock();
2101 hashtable_add_unique(usess->domain_global.channels, &uchan->node);
2102 rcu_read_unlock();
2103 DBG2("UST channel %s added to global domain HT", attr->name);
2104 } else {
2105 ret = LTTCOMM_UST_CHAN_EXIST;
2106 goto error;
2107 }
2108
2109 /* Add channel to all registered applications */
2110 ret = ust_app_create_channel_all(usess, uchan);
2111 if (ret != 0) {
2112 goto error;
2113 }
2114
2115 uchan->enabled = 1;
2116
2117 break;
2118 }
2119 case LTTNG_DOMAIN_UST_PID:
2120 {
2121 /*
2122 int sock;
2123 struct ltt_ust_channel *uchan;
2124 struct ltt_ust_session *usess;
2125 struct ust_app *app;
2126
2127 usess = trace_ust_get_session_by_pid(&session->ust_session_list,
2128 domain->attr.pid);
2129 if (usess == NULL) {
2130 ret = LTTCOMM_UST_CHAN_NOT_FOUND;
2131 goto error;
2132 }
2133
2134 app = ust_app_get_by_pid(domain->attr.pid);
2135 if (app == NULL) {
2136 ret = LTTCOMM_APP_NOT_FOUND;
2137 goto error;
2138 }
2139 sock = app->sock;
2140
2141 uchan = trace_ust_get_channel_by_name(attr->name, usess);
2142 if (uchan == NULL) {
2143 ret = channel_ust_create(usess, attr, sock);
2144 } else {
2145 ret = channel_ust_enable(usess, uchan, sock);
2146 }
2147
2148 if (ret != LTTCOMM_OK) {
2149 goto error;
2150 }
2151
2152 ret = copy_ust_channel_to_app(usess, attr, app);
2153 if (ret != LTTCOMM_OK) {
2154 goto error;
2155 }
2156
2157 DBG("UST channel %s created for app sock %d with pid %d",
2158 attr->name, app->sock, domain->attr.pid);
2159 */
2160 ret = LTTCOMM_NOT_IMPLEMENTED;
2161 goto error;
2162 }
2163 default:
2164 ret = LTTCOMM_UNKNOWN_DOMAIN;
2165 goto error;
2166 }
2167
2168 ret = LTTCOMM_OK;
2169
2170error:
2171 return ret;
2172}
2173
2174/*
2175 * Command LTTNG_DISABLE_EVENT processed by the client thread.
2176 */
2177static int cmd_disable_event(struct ltt_session *session, int domain,
2178 char *channel_name, char *event_name)
2179{
2180 int ret;
2181
2182 switch (domain) {
2183 case LTTNG_DOMAIN_KERNEL:
2184 {
2185 struct ltt_kernel_channel *kchan;
2186
2187 kchan = trace_kernel_get_channel_by_name(channel_name,
2188 session->kernel_session);
2189 if (kchan == NULL) {
2190 ret = LTTCOMM_KERN_CHAN_NOT_FOUND;
2191 goto error;
2192 }
2193
2194 ret = event_kernel_disable_tracepoint(session->kernel_session, kchan, event_name);
2195 if (ret != LTTCOMM_OK) {
2196 goto error;
2197 }
2198
2199 kernel_wait_quiescent(kernel_tracer_fd);
2200 break;
2201 }
2202 case LTTNG_DOMAIN_UST:
2203 case LTTNG_DOMAIN_UST_EXEC_NAME:
2204 case LTTNG_DOMAIN_UST_PID:
2205 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
2206 default:
2207 /* TODO: Other UST domains */
2208 ret = LTTCOMM_NOT_IMPLEMENTED;
2209 goto error;
2210 }
2211
2212 ret = LTTCOMM_OK;
2213
2214error:
2215 return ret;
2216}
2217
2218/*
2219 * Command LTTNG_DISABLE_ALL_EVENT processed by the client thread.
2220 */
2221static int cmd_disable_event_all(struct ltt_session *session, int domain,
2222 char *channel_name)
2223{
2224 int ret;
2225 struct ltt_kernel_channel *kchan;
2226
2227 switch (domain) {
2228 case LTTNG_DOMAIN_KERNEL:
2229 kchan = trace_kernel_get_channel_by_name(channel_name,
2230 session->kernel_session);
2231 if (kchan == NULL) {
2232 ret = LTTCOMM_KERN_CHAN_NOT_FOUND;
2233 goto error;
2234 }
2235
2236 ret = event_kernel_disable_all(session->kernel_session, kchan);
2237 if (ret != LTTCOMM_OK) {
2238 goto error;
2239 }
2240
2241 kernel_wait_quiescent(kernel_tracer_fd);
2242 break;
2243 default:
2244 /* TODO: Userspace tracing */
2245 ret = LTTCOMM_NOT_IMPLEMENTED;
2246 goto error;
2247 }
2248
2249 ret = LTTCOMM_OK;
2250
2251error:
2252 return ret;
2253}
2254
2255/*
2256 * Command LTTNG_ADD_CONTEXT processed by the client thread.
2257 */
2258static int cmd_add_context(struct ltt_session *session, int domain,
2259 char *channel_name, char *event_name, struct lttng_event_context *ctx)
2260{
2261 int ret;
2262
2263 switch (domain) {
2264 case LTTNG_DOMAIN_KERNEL:
2265 /* Add kernel context to kernel tracer */
2266 ret = context_kernel_add(session->kernel_session, ctx,
2267 event_name, channel_name);
2268 if (ret != LTTCOMM_OK) {
2269 goto error;
2270 }
2271 break;
2272 case LTTNG_DOMAIN_UST:
2273 {
2274 /*
2275 struct ltt_ust_session *usess;
2276
2277 cds_list_for_each_entry(usess, &session->ust_session_list.head, list) {
2278 ret = context_ust_add(usess, ctx,
2279 event_name, channel_name, domain);
2280 if (ret != LTTCOMM_OK) {
2281 goto error;
2282 }
2283 }
2284 break;
2285 */
2286 }
2287 default:
2288 /* TODO: UST other domains */
2289 ret = LTTCOMM_NOT_IMPLEMENTED;
2290 goto error;
2291 }
2292
2293 ret = LTTCOMM_OK;
2294
2295error:
2296 return ret;
2297}
2298
2299/*
2300 * Command LTTNG_ENABLE_EVENT processed by the client thread.
2301 */
2302static int cmd_enable_event(struct ltt_session *session, int domain,
2303 char *channel_name, struct lttng_event *event)
2304{
2305 int ret;
2306 struct lttng_channel *attr;
2307 struct ltt_ust_session *usess = session->ust_session;
2308
2309 switch (domain) {
2310 case LTTNG_DOMAIN_KERNEL:
2311 {
2312 struct ltt_kernel_channel *kchan;
2313
2314 kchan = trace_kernel_get_channel_by_name(channel_name,
2315 session->kernel_session);
2316 if (kchan == NULL) {
2317 attr = channel_new_default_attr(domain);
2318 if (attr == NULL) {
2319 ret = LTTCOMM_FATAL;
2320 goto error;
2321 }
2322 snprintf(attr->name, NAME_MAX, "%s", channel_name);
2323
2324 /* This call will notify the kernel thread */
2325 ret = channel_kernel_create(session->kernel_session,
2326 attr, kernel_poll_pipe[1]);
2327 if (ret != LTTCOMM_OK) {
2328 goto error;
2329 }
2330 }
2331
2332 /* Get the newly created kernel channel pointer */
2333 kchan = trace_kernel_get_channel_by_name(channel_name,
2334 session->kernel_session);
2335 if (kchan == NULL) {
2336 /* This sould not happen... */
2337 ret = LTTCOMM_FATAL;
2338 goto error;
2339 }
2340
2341 ret = event_kernel_enable_tracepoint(session->kernel_session, kchan,
2342 event);
2343 if (ret != LTTCOMM_OK) {
2344 goto error;
2345 }
2346
2347 kernel_wait_quiescent(kernel_tracer_fd);
2348 break;
2349 }
2350 case LTTNG_DOMAIN_UST:
2351 {
2352 struct ltt_ust_channel *uchan;
2353 struct ltt_ust_event *uevent;
2354
2355 uchan = trace_ust_find_channel_by_name(usess->domain_global.channels,
2356 channel_name);
2357 if (uchan == NULL) {
2358 /* TODO: Create default channel */
2359 ret = LTTCOMM_UST_CHAN_NOT_FOUND;
2360 goto error;
2361 }
2362
2363 uevent = trace_ust_find_event_by_name(uchan->events, event->name);
2364 if (uevent == NULL) {
2365 uevent = trace_ust_create_event(event);
2366 if (uevent == NULL) {
2367 ret = LTTCOMM_FATAL;
2368 goto error;
2369 }
2370
2371 }
2372
2373 ret = ust_app_create_event_all(usess, uchan, uevent);
2374 if (ret < 0) {
2375 ret = LTTCOMM_UST_ENABLE_FAIL;
2376 goto error;
2377 }
2378
2379 /* Add ltt ust event to channel */
2380 rcu_read_lock();
2381 hashtable_add_unique(uchan->events, &uevent->node);
2382 rcu_read_unlock();
2383
2384 uevent->enabled = 1;
2385
2386 DBG3("UST ltt event %s added to channel %s", uevent->attr.name,
2387 uchan->name);
2388 break;
2389 }
2390 case LTTNG_DOMAIN_UST_EXEC_NAME:
2391 case LTTNG_DOMAIN_UST_PID:
2392 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
2393 default:
2394 ret = LTTCOMM_NOT_IMPLEMENTED;
2395 goto error;
2396 }
2397
2398 ret = LTTCOMM_OK;
2399
2400error:
2401 return ret;
2402}
2403
2404/*
2405 * Command LTTNG_ENABLE_ALL_EVENT processed by the client thread.
2406 */
2407static int cmd_enable_event_all(struct ltt_session *session, int domain,
2408 char *channel_name, int event_type)
2409{
2410 int ret;
2411 struct ltt_kernel_channel *kchan;
2412
2413 switch (domain) {
2414 case LTTNG_DOMAIN_KERNEL:
2415 kchan = trace_kernel_get_channel_by_name(channel_name,
2416 session->kernel_session);
2417 if (kchan == NULL) {
2418 /* This call will notify the kernel thread */
2419 ret = channel_kernel_create(session->kernel_session, NULL,
2420 kernel_poll_pipe[1]);
2421 if (ret != LTTCOMM_OK) {
2422 goto error;
2423 }
2424 }
2425
2426 /* Get the newly created kernel channel pointer */
2427 kchan = trace_kernel_get_channel_by_name(channel_name,
2428 session->kernel_session);
2429 if (kchan == NULL) {
2430 /* This sould not happen... */
2431 ret = LTTCOMM_FATAL;
2432 goto error;
2433 }
2434
2435 switch (event_type) {
2436 case LTTNG_KERNEL_SYSCALL:
2437 ret = event_kernel_enable_all_syscalls(session->kernel_session,
2438 kchan, kernel_tracer_fd);
2439 break;
2440 case LTTNG_KERNEL_TRACEPOINT:
2441 /*
2442 * This call enables all LTTNG_KERNEL_TRACEPOINTS and
2443 * events already registered to the channel.
2444 */
2445 ret = event_kernel_enable_all_tracepoints(session->kernel_session,
2446 kchan, kernel_tracer_fd);
2447 break;
2448 case LTTNG_KERNEL_ALL:
2449 /* Enable syscalls and tracepoints */
2450 ret = event_kernel_enable_all(session->kernel_session,
2451 kchan, kernel_tracer_fd);
2452 break;
2453 default:
2454 ret = LTTCOMM_KERN_ENABLE_FAIL;
2455 goto error;
2456 }
2457 if (ret != LTTCOMM_OK) {
2458 goto error;
2459 }
2460
2461 kernel_wait_quiescent(kernel_tracer_fd);
2462 break;
2463 default:
2464 /* TODO: Userspace tracing */
2465 ret = LTTCOMM_NOT_IMPLEMENTED;
2466 goto error;
2467 }
2468
2469 ret = LTTCOMM_OK;
2470
2471error:
2472 return ret;
2473}
2474
2475/*
2476 * Command LTTNG_LIST_TRACEPOINTS processed by the client thread.
2477 */
2478static ssize_t cmd_list_tracepoints(int domain, struct lttng_event **events)
2479{
2480 int ret;
2481 ssize_t nb_events = 0;
2482
2483 switch (domain) {
2484 case LTTNG_DOMAIN_KERNEL:
2485 nb_events = kernel_list_events(kernel_tracer_fd, events);
2486 if (nb_events < 0) {
2487 ret = LTTCOMM_KERN_LIST_FAIL;
2488 goto error;
2489 }
2490 break;
2491 case LTTNG_DOMAIN_UST:
2492 nb_events = ust_app_list_events(events);
2493 if (nb_events < 0) {
2494 ret = LTTCOMM_UST_LIST_FAIL;
2495 goto error;
2496 }
2497 break;
2498 default:
2499 ret = LTTCOMM_NOT_IMPLEMENTED;
2500 goto error;
2501 }
2502
2503 return nb_events;
2504
2505error:
2506 /* Return negative value to differentiate return code */
2507 return -ret;
2508}
2509
2510/*
2511 * Command LTTNG_START_TRACE processed by the client thread.
2512 */
2513static int cmd_start_trace(struct ltt_session *session)
2514{
2515 int ret;
2516 struct ltt_kernel_session *ksession;
2517 struct ltt_ust_session *usess;
2518
2519 /* Short cut */
2520 ksession = session->kernel_session;
2521 usess = session->ust_session;
2522
2523 if (session->enabled)
2524 return LTTCOMM_UST_START_FAIL;
2525 session->enabled = 1;
2526
2527 /* Kernel tracing */
2528 if (ksession != NULL) {
2529 struct ltt_kernel_channel *kchan;
2530
2531 /* Open kernel metadata */
2532 if (ksession->metadata == NULL) {
2533 ret = kernel_open_metadata(ksession, ksession->trace_path);
2534 if (ret < 0) {
2535 ret = LTTCOMM_KERN_META_FAIL;
2536 goto error;
2537 }
2538 }
2539
2540 /* Open kernel metadata stream */
2541 if (ksession->metadata_stream_fd == 0) {
2542 ret = kernel_open_metadata_stream(ksession);
2543 if (ret < 0) {
2544 ERR("Kernel create metadata stream failed");
2545 ret = LTTCOMM_KERN_STREAM_FAIL;
2546 goto error;
2547 }
2548 }
2549
2550 /* For each channel */
2551 cds_list_for_each_entry(kchan, &ksession->channel_list.head, list) {
2552 if (kchan->stream_count == 0) {
2553 ret = kernel_open_channel_stream(kchan);
2554 if (ret < 0) {
2555 ret = LTTCOMM_KERN_STREAM_FAIL;
2556 goto error;
2557 }
2558 /* Update the stream global counter */
2559 ksession->stream_count_global += ret;
2560 }
2561 }
2562
2563 /* Setup kernel consumer socket and send fds to it */
2564 ret = init_kernel_tracing(ksession);
2565 if (ret < 0) {
2566 ret = LTTCOMM_KERN_START_FAIL;
2567 goto error;
2568 }
2569
2570 /* This start the kernel tracing */
2571 ret = kernel_start_session(ksession);
2572 if (ret < 0) {
2573 ret = LTTCOMM_KERN_START_FAIL;
2574 goto error;
2575 }
2576
2577 /* Quiescent wait after starting trace */
2578 kernel_wait_quiescent(kernel_tracer_fd);
2579 }
2580
2581 /* Flag session that trace should start automatically */
2582 if (usess) {
2583 usess->start_trace = 1;
2584
2585 ret = ust_app_start_trace_all(usess);
2586 if (ret < 0) {
2587 ret = LTTCOMM_UST_START_FAIL;
2588 goto error;
2589 }
2590 }
2591
2592 ret = LTTCOMM_OK;
2593
2594error:
2595 return ret;
2596}
2597
2598/*
2599 * Command LTTNG_STOP_TRACE processed by the client thread.
2600 */
2601static int cmd_stop_trace(struct ltt_session *session)
2602{
2603 int ret;
2604 struct ltt_kernel_channel *kchan;
2605 struct ltt_kernel_session *ksession;
2606 struct ltt_ust_session *usess;
2607
2608 /* Short cut */
2609 ksession = session->kernel_session;
2610 usess = session->ust_session;
2611
2612 if (!session->enabled)
2613 return LTTCOMM_UST_START_FAIL;
2614 session->enabled = 0;
2615
2616 /* Kernel tracer */
2617 if (ksession != NULL) {
2618 DBG("Stop kernel tracing");
2619
2620 /* Flush all buffers before stopping */
2621 ret = kernel_metadata_flush_buffer(ksession->metadata_stream_fd);
2622 if (ret < 0) {
2623 ERR("Kernel metadata flush failed");
2624 }
2625
2626 cds_list_for_each_entry(kchan, &ksession->channel_list.head, list) {
2627 ret = kernel_flush_buffer(kchan);
2628 if (ret < 0) {
2629 ERR("Kernel flush buffer error");
2630 }
2631 }
2632
2633 ret = kernel_stop_session(ksession);
2634 if (ret < 0) {
2635 ret = LTTCOMM_KERN_STOP_FAIL;
2636 goto error;
2637 }
2638
2639 kernel_wait_quiescent(kernel_tracer_fd);
2640 }
2641
2642 /* Flag session that trace should start automatically */
2643 if (usess) {
2644 usess->start_trace = 0;
2645
2646 ret = ust_app_stop_trace_all(usess);
2647 if (ret < 0) {
2648 ret = LTTCOMM_UST_START_FAIL;
2649 goto error;
2650 }
2651 }
2652
2653 ret = LTTCOMM_OK;
2654
2655error:
2656 return ret;
2657}
2658
2659/*
2660 * Command LTTNG_CREATE_SESSION processed by the client thread.
2661 */
2662static int cmd_create_session(char *name, char *path)
2663{
2664 int ret;
2665
2666 ret = session_create(name, path);
2667 if (ret != LTTCOMM_OK) {
2668 goto error;
2669 }
2670
2671 ret = LTTCOMM_OK;
2672
2673error:
2674 return ret;
2675}
2676
2677/*
2678 * Command LTTNG_DESTROY_SESSION processed by the client thread.
2679 */
2680static int cmd_destroy_session(struct ltt_session *session, char *name)
2681{
2682 int ret;
2683
2684 /* Clean kernel session teardown */
2685 teardown_kernel_session(session);
2686 /* UST session teardown */
2687 teardown_ust_session(session);
2688
2689 /*
2690 * Must notify the kernel thread here to update it's poll setin order
2691 * to remove the channel(s)' fd just destroyed.
2692 */
2693 ret = notify_thread_pipe(kernel_poll_pipe[1]);
2694 if (ret < 0) {
2695 perror("write kernel poll pipe");
2696 }
2697
2698 ret = session_destroy(session);
2699
2700 return ret;
2701}
2702
2703/*
2704 * Command LTTNG_CALIBRATE processed by the client thread.
2705 */
2706static int cmd_calibrate(int domain, struct lttng_calibrate *calibrate)
2707{
2708 int ret;
2709
2710 switch (domain) {
2711 case LTTNG_DOMAIN_KERNEL:
2712 {
2713 struct lttng_kernel_calibrate kcalibrate;
2714
2715 kcalibrate.type = calibrate->type;
2716 ret = kernel_calibrate(kernel_tracer_fd, &kcalibrate);
2717 if (ret < 0) {
2718 ret = LTTCOMM_KERN_ENABLE_FAIL;
2719 goto error;
2720 }
2721 break;
2722 }
2723 default:
2724 /* TODO: Userspace tracing */
2725 ret = LTTCOMM_NOT_IMPLEMENTED;
2726 goto error;
2727 }
2728
2729 ret = LTTCOMM_OK;
2730
2731error:
2732 return ret;
2733}
2734
2735/*
2736 * Command LTTNG_REGISTER_CONSUMER processed by the client thread.
2737 */
2738static int cmd_register_consumer(struct ltt_session *session, int domain,
2739 char *sock_path)
2740{
2741 int ret, sock;
2742
2743 switch (domain) {
2744 case LTTNG_DOMAIN_KERNEL:
2745 /* Can't register a consumer if there is already one */
2746 if (session->kernel_session->consumer_fd != 0) {
2747 ret = LTTCOMM_KERN_CONSUMER_FAIL;
2748 goto error;
2749 }
2750
2751 sock = lttcomm_connect_unix_sock(sock_path);
2752 if (sock < 0) {
2753 ret = LTTCOMM_CONNECT_FAIL;
2754 goto error;
2755 }
2756
2757 session->kernel_session->consumer_fd = sock;
2758 break;
2759 default:
2760 /* TODO: Userspace tracing */
2761 ret = LTTCOMM_NOT_IMPLEMENTED;
2762 goto error;
2763 }
2764
2765 ret = LTTCOMM_OK;
2766
2767error:
2768 return ret;
2769}
2770
2771/*
2772 * Command LTTNG_LIST_DOMAINS processed by the client thread.
2773 */
2774static ssize_t cmd_list_domains(struct ltt_session *session,
2775 struct lttng_domain **domains)
2776{
2777 int ret, index = 0;
2778 ssize_t nb_dom = 0;
2779
2780 if (session->kernel_session != NULL) {
2781 DBG3("Listing domains found kernel domain");
2782 nb_dom++;
2783 }
2784
2785 if (session->ust_session != NULL) {
2786 DBG3("Listing domains found UST global domain");
2787 nb_dom++;
2788 }
2789
2790 *domains = zmalloc(nb_dom * sizeof(struct lttng_domain));
2791 if (*domains == NULL) {
2792 ret = -LTTCOMM_FATAL;
2793 goto error;
2794 }
2795
2796 if (session->kernel_session != NULL) {
2797 (*domains)[index].type = LTTNG_DOMAIN_KERNEL;
2798 index++;
2799 }
2800
2801 if (session->ust_session != NULL) {
2802 (*domains)[index].type = LTTNG_DOMAIN_UST;
2803 index++;
2804 }
2805
2806 return nb_dom;
2807
2808error:
2809 return ret;
2810}
2811
2812/*
2813 * Command LTTNG_LIST_CHANNELS processed by the client thread.
2814 */
2815static ssize_t cmd_list_channels(int domain, struct ltt_session *session,
2816 struct lttng_channel **channels)
2817{
2818 int ret;
2819 ssize_t nb_chan = 0;
2820
2821 switch (domain) {
2822 case LTTNG_DOMAIN_KERNEL:
2823 if (session->kernel_session != NULL) {
2824 nb_chan = session->kernel_session->channel_count;
2825 }
2826 DBG3("Number of kernel channels %zd", nb_chan);
2827 break;
2828 case LTTNG_DOMAIN_UST:
2829 if (session->ust_session != NULL) {
2830 nb_chan = hashtable_get_count(
2831 session->ust_session->domain_global.channels);
2832 }
2833 DBG3("Number of UST global channels %zd", nb_chan);
2834 break;
2835 default:
2836 *channels = NULL;
2837 ret = -LTTCOMM_NOT_IMPLEMENTED;
2838 goto error;
2839 }
2840
2841 if (nb_chan > 0) {
2842 *channels = zmalloc(nb_chan * sizeof(struct lttng_channel));
2843 if (*channels == NULL) {
2844 ret = -LTTCOMM_FATAL;
2845 goto error;
2846 }
2847
2848 list_lttng_channels(domain, session, *channels);
2849 } else {
2850 *channels = NULL;
2851 }
2852
2853 return nb_chan;
2854
2855error:
2856 return ret;
2857}
2858
2859/*
2860 * Command LTTNG_LIST_EVENTS processed by the client thread.
2861 */
2862static ssize_t cmd_list_events(int domain, struct ltt_session *session,
2863 char *channel_name, struct lttng_event **events)
2864{
2865 int ret = 0;
2866 ssize_t nb_event = 0;
2867
2868 switch (domain) {
2869 case LTTNG_DOMAIN_KERNEL:
2870 if (session->kernel_session != NULL) {
2871 nb_event = list_lttng_kernel_events(channel_name,
2872 session->kernel_session, events);
2873 }
2874 break;
2875 case LTTNG_DOMAIN_UST:
2876 {
2877 if (session->ust_session != NULL) {
2878 nb_event = list_lttng_ust_global_events(channel_name,
2879 &session->ust_session->domain_global, events);
2880 }
2881 break;
2882 }
2883 default:
2884 ret = -LTTCOMM_NOT_IMPLEMENTED;
2885 goto error;
2886 }
2887
2888 ret = nb_event;
2889
2890error:
2891 return ret;
2892}
2893
2894/*
2895 * Process the command requested by the lttng client within the command
2896 * context structure. This function make sure that the return structure (llm)
2897 * is set and ready for transmission before returning.
2898 *
2899 * Return any error encountered or 0 for success.
2900 */
2901static int process_client_msg(struct command_ctx *cmd_ctx)
2902{
2903 int ret = LTTCOMM_OK;
2904 int need_tracing_session = 1;
2905
2906 DBG("Processing client command %d", cmd_ctx->lsm->cmd_type);
2907
2908 /*
2909 * Check for command that don't needs to allocate a returned payload. We do
2910 * this here so we don't have to make the call for no payload at each
2911 * command.
2912 */
2913 switch(cmd_ctx->lsm->cmd_type) {
2914 case LTTNG_LIST_SESSIONS:
2915 case LTTNG_LIST_TRACEPOINTS:
2916 case LTTNG_LIST_DOMAINS:
2917 case LTTNG_LIST_CHANNELS:
2918 case LTTNG_LIST_EVENTS:
2919 break;
2920 default:
2921 /* Setup lttng message with no payload */
2922 ret = setup_lttng_msg(cmd_ctx, 0);
2923 if (ret < 0) {
2924 /* This label does not try to unlock the session */
2925 goto init_setup_error;
2926 }
2927 }
2928
2929 /* Commands that DO NOT need a session. */
2930 switch (cmd_ctx->lsm->cmd_type) {
2931 case LTTNG_CALIBRATE:
2932 case LTTNG_CREATE_SESSION:
2933 case LTTNG_LIST_SESSIONS:
2934 case LTTNG_LIST_TRACEPOINTS:
2935 need_tracing_session = 0;
2936 break;
2937 default:
2938 DBG("Getting session %s by name", cmd_ctx->lsm->session.name);
2939 session_lock_list();
2940 cmd_ctx->session = session_find_by_name(cmd_ctx->lsm->session.name);
2941 session_unlock_list();
2942 if (cmd_ctx->session == NULL) {
2943 if (cmd_ctx->lsm->session.name != NULL) {
2944 ret = LTTCOMM_SESS_NOT_FOUND;
2945 } else {
2946 /* If no session name specified */
2947 ret = LTTCOMM_SELECT_SESS;
2948 }
2949 goto error;
2950 } else {
2951 /* Acquire lock for the session */
2952 session_lock(cmd_ctx->session);
2953 }
2954 break;
2955 }
2956
2957 /*
2958 * Check domain type for specific "pre-action".
2959 */
2960 switch (cmd_ctx->lsm->domain.type) {
2961 case LTTNG_DOMAIN_KERNEL:
2962 /* Kernel tracer check */
2963 if (kernel_tracer_fd == 0) {
2964 /* Basically, load kernel tracer modules */
2965 init_kernel_tracer();
2966 if (kernel_tracer_fd == 0) {
2967 ret = LTTCOMM_KERN_NA;
2968 goto error;
2969 }
2970 }
2971
2972 /* Need a session for kernel command */
2973 if (need_tracing_session) {
2974 if (cmd_ctx->session->kernel_session == NULL) {
2975 ret = create_kernel_session(cmd_ctx->session);
2976 if (ret < 0) {
2977 ret = LTTCOMM_KERN_SESS_FAIL;
2978 goto error;
2979 }
2980 }
2981
2982 /* Start the kernel consumer daemon */
2983 pthread_mutex_lock(&kconsumer_data.pid_mutex);
2984 if (kconsumer_data.pid == 0 &&
2985 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
2986 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2987 ret = start_consumerd(&kconsumer_data);
2988 if (ret < 0) {
2989 ret = LTTCOMM_KERN_CONSUMER_FAIL;
2990 goto error;
2991 }
2992 }
2993 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2994 }
2995 break;
2996 case LTTNG_DOMAIN_UST:
2997 {
2998 if (need_tracing_session) {
2999 if (cmd_ctx->session->ust_session == NULL) {
3000 ret = create_ust_session(cmd_ctx->session,
3001 &cmd_ctx->lsm->domain);
3002 if (ret != LTTCOMM_OK) {
3003 goto error;
3004 }
3005 }
3006 /* Start the kernel consumer daemon */
3007 pthread_mutex_lock(&ustconsumer_data.pid_mutex);
3008 if (ustconsumer_data.pid == 0 &&
3009 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
3010 pthread_mutex_unlock(&ustconsumer_data.pid_mutex);
3011 ret = start_consumerd(&ustconsumer_data);
3012 if (ret < 0) {
3013 ret = LTTCOMM_KERN_CONSUMER_FAIL;
3014 goto error;
3015 }
3016
3017 ust_consumer_fd = ustconsumer_data.cmd_sock;
3018 }
3019 pthread_mutex_unlock(&ustconsumer_data.pid_mutex);
3020 }
3021 break;
3022 }
3023 default:
3024 break;
3025 }
3026
3027 /* Process by command type */
3028 switch (cmd_ctx->lsm->cmd_type) {
3029 case LTTNG_ADD_CONTEXT:
3030 {
3031 ret = cmd_add_context(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3032 cmd_ctx->lsm->u.context.channel_name,
3033 cmd_ctx->lsm->u.context.event_name,
3034 &cmd_ctx->lsm->u.context.ctx);
3035 break;
3036 }
3037 case LTTNG_DISABLE_CHANNEL:
3038 {
3039 ret = cmd_disable_channel(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3040 cmd_ctx->lsm->u.disable.channel_name);
3041 break;
3042 }
3043 case LTTNG_DISABLE_EVENT:
3044 {
3045 ret = cmd_disable_event(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3046 cmd_ctx->lsm->u.disable.channel_name,
3047 cmd_ctx->lsm->u.disable.name);
3048 ret = LTTCOMM_OK;
3049 break;
3050 }
3051 case LTTNG_DISABLE_ALL_EVENT:
3052 {
3053 DBG("Disabling all events");
3054
3055 ret = cmd_disable_event_all(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3056 cmd_ctx->lsm->u.disable.channel_name);
3057 break;
3058 }
3059 case LTTNG_ENABLE_CHANNEL:
3060 {
3061 ret = cmd_enable_channel(cmd_ctx->session, &cmd_ctx->lsm->domain,
3062 &cmd_ctx->lsm->u.channel.chan);
3063 break;
3064 }
3065 case LTTNG_ENABLE_EVENT:
3066 {
3067 ret = cmd_enable_event(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3068 cmd_ctx->lsm->u.enable.channel_name,
3069 &cmd_ctx->lsm->u.enable.event);
3070 break;
3071 }
3072 case LTTNG_ENABLE_ALL_EVENT:
3073 {
3074 DBG("Enabling all events");
3075
3076 ret = cmd_enable_event_all(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3077 cmd_ctx->lsm->u.enable.channel_name,
3078 cmd_ctx->lsm->u.enable.event.type);
3079 break;
3080 }
3081 case LTTNG_LIST_TRACEPOINTS:
3082 {
3083 struct lttng_event *events;
3084 ssize_t nb_events;
3085
3086 nb_events = cmd_list_tracepoints(cmd_ctx->lsm->domain.type, &events);
3087 if (nb_events < 0) {
3088 ret = -nb_events;
3089 goto error;
3090 }
3091
3092 /*
3093 * Setup lttng message with payload size set to the event list size in
3094 * bytes and then copy list into the llm payload.
3095 */
3096 ret = setup_lttng_msg(cmd_ctx, sizeof(struct lttng_event) * nb_events);
3097 if (ret < 0) {
3098 free(events);
3099 goto setup_error;
3100 }
3101
3102 /* Copy event list into message payload */
3103 memcpy(cmd_ctx->llm->payload, events,
3104 sizeof(struct lttng_event) * nb_events);
3105
3106 free(events);
3107
3108 ret = LTTCOMM_OK;
3109 break;
3110 }
3111 case LTTNG_START_TRACE:
3112 {
3113 ret = cmd_start_trace(cmd_ctx->session);
3114 break;
3115 }
3116 case LTTNG_STOP_TRACE:
3117 {
3118 ret = cmd_stop_trace(cmd_ctx->session);
3119 break;
3120 }
3121 case LTTNG_CREATE_SESSION:
3122 {
3123 ret = cmd_create_session(cmd_ctx->lsm->session.name,
3124 cmd_ctx->lsm->session.path);
3125 break;
3126 }
3127 case LTTNG_DESTROY_SESSION:
3128 {
3129 ret = cmd_destroy_session(cmd_ctx->session,
3130 cmd_ctx->lsm->session.name);
3131 break;
3132 }
3133 case LTTNG_LIST_DOMAINS:
3134 {
3135 ssize_t nb_dom;
3136 struct lttng_domain *domains;
3137
3138 nb_dom = cmd_list_domains(cmd_ctx->session, &domains);
3139 if (nb_dom < 0) {
3140 ret = -nb_dom;
3141 goto error;
3142 }
3143
3144 ret = setup_lttng_msg(cmd_ctx, nb_dom * sizeof(struct lttng_domain));
3145 if (ret < 0) {
3146 goto setup_error;
3147 }
3148
3149 /* Copy event list into message payload */
3150 memcpy(cmd_ctx->llm->payload, domains,
3151 nb_dom * sizeof(struct lttng_domain));
3152
3153 free(domains);
3154
3155 ret = LTTCOMM_OK;
3156 break;
3157 }
3158 case LTTNG_LIST_CHANNELS:
3159 {
3160 size_t nb_chan;
3161 struct lttng_channel *channels;
3162
3163 nb_chan = cmd_list_channels(cmd_ctx->lsm->domain.type,
3164 cmd_ctx->session, &channels);
3165 if (nb_chan < 0) {
3166 ret = -nb_chan;
3167 goto error;
3168 }
3169
3170 ret = setup_lttng_msg(cmd_ctx, nb_chan * sizeof(struct lttng_channel));
3171 if (ret < 0) {
3172 goto setup_error;
3173 }
3174
3175 /* Copy event list into message payload */
3176 memcpy(cmd_ctx->llm->payload, channels,
3177 nb_chan * sizeof(struct lttng_channel));
3178
3179 free(channels);
3180
3181 ret = LTTCOMM_OK;
3182 break;
3183 }
3184 case LTTNG_LIST_EVENTS:
3185 {
3186 ssize_t nb_event;
3187 struct lttng_event *events = NULL;
3188
3189 nb_event = cmd_list_events(cmd_ctx->lsm->domain.type, cmd_ctx->session,
3190 cmd_ctx->lsm->u.list.channel_name, &events);
3191 if (nb_event < 0) {
3192 ret = -nb_event;
3193 goto error;
3194 }
3195
3196 ret = setup_lttng_msg(cmd_ctx, nb_event * sizeof(struct lttng_event));
3197 if (ret < 0) {
3198 goto setup_error;
3199 }
3200
3201 /* Copy event list into message payload */
3202 memcpy(cmd_ctx->llm->payload, events,
3203 nb_event * sizeof(struct lttng_event));
3204
3205 free(events);
3206
3207 ret = LTTCOMM_OK;
3208 break;
3209 }
3210 case LTTNG_LIST_SESSIONS:
3211 {
3212 session_lock_list();
3213
3214 if (session_list_ptr->count == 0) {
3215 ret = LTTCOMM_NO_SESSION;
3216 session_unlock_list();
3217 goto error;
3218 }
3219
3220 ret = setup_lttng_msg(cmd_ctx, sizeof(struct lttng_session) *
3221 session_list_ptr->count);
3222 if (ret < 0) {
3223 session_unlock_list();
3224 goto setup_error;
3225 }
3226
3227 /* Filled the session array */
3228 list_lttng_sessions((struct lttng_session *)(cmd_ctx->llm->payload));
3229
3230 session_unlock_list();
3231
3232 ret = LTTCOMM_OK;
3233 break;
3234 }
3235 case LTTNG_CALIBRATE:
3236 {
3237 ret = cmd_calibrate(cmd_ctx->lsm->domain.type,
3238 &cmd_ctx->lsm->u.calibrate);
3239 break;
3240 }
3241 case LTTNG_REGISTER_CONSUMER:
3242 {
3243 ret = cmd_register_consumer(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3244 cmd_ctx->lsm->u.reg.path);
3245 break;
3246 }
3247 default:
3248 ret = LTTCOMM_UND;
3249 break;
3250 }
3251
3252error:
3253 if (cmd_ctx->llm == NULL) {
3254 DBG("Missing llm structure. Allocating one.");
3255 if (setup_lttng_msg(cmd_ctx, 0) < 0) {
3256 goto setup_error;
3257 }
3258 }
3259 /* Set return code */
3260 cmd_ctx->llm->ret_code = ret;
3261setup_error:
3262 if (cmd_ctx->session) {
3263 session_unlock(cmd_ctx->session);
3264 }
3265init_setup_error:
3266 return ret;
3267}
3268
3269/*
3270 * This thread manage all clients request using the unix client socket for
3271 * communication.
3272 */
3273static void *thread_manage_clients(void *data)
3274{
3275 int sock = 0, ret, i, pollfd;
3276 uint32_t revents, nb_fd;
3277 struct command_ctx *cmd_ctx = NULL;
3278 struct lttng_poll_event events;
3279
3280 DBG("[thread] Manage client started");
3281
3282 rcu_register_thread();
3283
3284 ret = lttcomm_listen_unix_sock(client_sock);
3285 if (ret < 0) {
3286 goto error;
3287 }
3288
3289 /*
3290 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
3291 * more will be added to this poll set.
3292 */
3293 ret = create_thread_poll_set(&events, 2);
3294 if (ret < 0) {
3295 goto error;
3296 }
3297
3298 /* Add the application registration socket */
3299 ret = lttng_poll_add(&events, client_sock, LPOLLIN | LPOLLPRI);
3300 if (ret < 0) {
3301 goto error;
3302 }
3303
3304 /*
3305 * Notify parent pid that we are ready to accept command for client side.
3306 */
3307 if (opt_sig_parent) {
3308 kill(ppid, SIGCHLD);
3309 }
3310
3311 while (1) {
3312 DBG("Accepting client command ...");
3313
3314 nb_fd = LTTNG_POLL_GETNB(&events);
3315
3316 /* Inifinite blocking call, waiting for transmission */
3317 ret = lttng_poll_wait(&events, -1);
3318 if (ret < 0) {
3319 goto error;
3320 }
3321
3322 for (i = 0; i < nb_fd; i++) {
3323 /* Fetch once the poll data */
3324 revents = LTTNG_POLL_GETEV(&events, i);
3325 pollfd = LTTNG_POLL_GETFD(&events, i);
3326
3327 /* Thread quit pipe has been closed. Killing thread. */
3328 ret = check_thread_quit_pipe(pollfd, revents);
3329 if (ret) {
3330 goto error;
3331 }
3332
3333 /* Event on the registration socket */
3334 if (pollfd == client_sock) {
3335 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3336 ERR("Client socket poll error");
3337 goto error;
3338 }
3339 }
3340 }
3341
3342 DBG("Wait for client response");
3343
3344 sock = lttcomm_accept_unix_sock(client_sock);
3345 if (sock < 0) {
3346 goto error;
3347 }
3348
3349 /* Allocate context command to process the client request */
3350 cmd_ctx = zmalloc(sizeof(struct command_ctx));
3351 if (cmd_ctx == NULL) {
3352 perror("zmalloc cmd_ctx");
3353 goto error;
3354 }
3355
3356 /* Allocate data buffer for reception */
3357 cmd_ctx->lsm = zmalloc(sizeof(struct lttcomm_session_msg));
3358 if (cmd_ctx->lsm == NULL) {
3359 perror("zmalloc cmd_ctx->lsm");
3360 goto error;
3361 }
3362
3363 cmd_ctx->llm = NULL;
3364 cmd_ctx->session = NULL;
3365
3366 /*
3367 * Data is received from the lttng client. The struct
3368 * lttcomm_session_msg (lsm) contains the command and data request of
3369 * the client.
3370 */
3371 DBG("Receiving data from client ...");
3372 ret = lttcomm_recv_unix_sock(sock, cmd_ctx->lsm,
3373 sizeof(struct lttcomm_session_msg));
3374 if (ret <= 0) {
3375 DBG("Nothing recv() from client... continuing");
3376 close(sock);
3377 free(cmd_ctx);
3378 continue;
3379 }
3380
3381 // TODO: Validate cmd_ctx including sanity check for
3382 // security purpose.
3383
3384 rcu_thread_online();
3385 /*
3386 * This function dispatch the work to the kernel or userspace tracer
3387 * libs and fill the lttcomm_lttng_msg data structure of all the needed
3388 * informations for the client. The command context struct contains
3389 * everything this function may needs.
3390 */
3391 ret = process_client_msg(cmd_ctx);
3392 rcu_thread_offline();
3393 if (ret < 0) {
3394 /*
3395 * TODO: Inform client somehow of the fatal error. At
3396 * this point, ret < 0 means that a zmalloc failed
3397 * (ENOMEM). Error detected but still accept command.
3398 */
3399 clean_command_ctx(&cmd_ctx);
3400 continue;
3401 }
3402
3403 DBG("Sending response (size: %d, retcode: %s)",
3404 cmd_ctx->lttng_msg_size,
3405 lttng_strerror(-cmd_ctx->llm->ret_code));
3406 ret = send_unix_sock(sock, cmd_ctx->llm, cmd_ctx->lttng_msg_size);
3407 if (ret < 0) {
3408 ERR("Failed to send data back to client");
3409 }
3410
3411 clean_command_ctx(&cmd_ctx);
3412
3413 /* End of transmission */
3414 close(sock);
3415 }
3416
3417error:
3418 DBG("Client thread dying");
3419 unlink(client_unix_sock_path);
3420 close(client_sock);
3421 close(sock);
3422
3423 lttng_poll_clean(&events);
3424 clean_command_ctx(&cmd_ctx);
3425
3426 rcu_unregister_thread();
3427 return NULL;
3428}
3429
3430
3431/*
3432 * usage function on stderr
3433 */
3434static void usage(void)
3435{
3436 fprintf(stderr, "Usage: %s OPTIONS\n\nOptions:\n", progname);
3437 fprintf(stderr, " -h, --help Display this usage.\n");
3438 fprintf(stderr, " -c, --client-sock PATH Specify path for the client unix socket\n");
3439 fprintf(stderr, " -a, --apps-sock PATH Specify path for apps unix socket\n");
3440 fprintf(stderr, " --kconsumerd-err-sock PATH Specify path for the kernel consumer error socket\n");
3441 fprintf(stderr, " --kconsumerd-cmd-sock PATH Specify path for the kernel consumer command socket\n");
3442 fprintf(stderr, " --ustconsumerd-err-sock PATH Specify path for the UST consumer error socket\n");
3443 fprintf(stderr, " --ustconsumerd-cmd-sock PATH Specify path for the UST consumer command socket\n");
3444 fprintf(stderr, " -d, --daemonize Start as a daemon.\n");
3445 fprintf(stderr, " -g, --group NAME Specify the tracing group name. (default: tracing)\n");
3446 fprintf(stderr, " -V, --version Show version number.\n");
3447 fprintf(stderr, " -S, --sig-parent Send SIGCHLD to parent pid to notify readiness.\n");
3448 fprintf(stderr, " -q, --quiet No output at all.\n");
3449 fprintf(stderr, " -v, --verbose Verbose mode. Activate DBG() macro.\n");
3450 fprintf(stderr, " --verbose-consumer Verbose mode for consumer. Activate DBG() macro.\n");
3451}
3452
3453/*
3454 * daemon argument parsing
3455 */
3456static int parse_args(int argc, char **argv)
3457{
3458 int c;
3459
3460 static struct option long_options[] = {
3461 { "client-sock", 1, 0, 'c' },
3462 { "apps-sock", 1, 0, 'a' },
3463 { "kconsumerd-cmd-sock", 1, 0, 'C' },
3464 { "kconsumerd-err-sock", 1, 0, 'E' },
3465 { "ustconsumerd-cmd-sock", 1, 0, 'D' },
3466 { "ustconsumerd-err-sock", 1, 0, 'F' },
3467 { "daemonize", 0, 0, 'd' },
3468 { "sig-parent", 0, 0, 'S' },
3469 { "help", 0, 0, 'h' },
3470 { "group", 1, 0, 'g' },
3471 { "version", 0, 0, 'V' },
3472 { "quiet", 0, 0, 'q' },
3473 { "verbose", 0, 0, 'v' },
3474 { "verbose-consumer", 0, 0, 'Z' },
3475 { NULL, 0, 0, 0 }
3476 };
3477
3478 while (1) {
3479 int option_index = 0;
3480 c = getopt_long(argc, argv, "dhqvVS" "a:c:g:s:C:E:D:F:Z",
3481 long_options, &option_index);
3482 if (c == -1) {
3483 break;
3484 }
3485
3486 switch (c) {
3487 case 0:
3488 fprintf(stderr, "option %s", long_options[option_index].name);
3489 if (optarg) {
3490 fprintf(stderr, " with arg %s\n", optarg);
3491 }
3492 break;
3493 case 'c':
3494 snprintf(client_unix_sock_path, PATH_MAX, "%s", optarg);
3495 break;
3496 case 'a':
3497 snprintf(apps_unix_sock_path, PATH_MAX, "%s", optarg);
3498 break;
3499 case 'd':
3500 opt_daemon = 1;
3501 break;
3502 case 'g':
3503 opt_tracing_group = strdup(optarg);
3504 break;
3505 case 'h':
3506 usage();
3507 exit(EXIT_FAILURE);
3508 case 'V':
3509 fprintf(stdout, "%s\n", VERSION);
3510 exit(EXIT_SUCCESS);
3511 case 'S':
3512 opt_sig_parent = 1;
3513 break;
3514 case 'E':
3515 snprintf(kconsumer_data.err_unix_sock_path, PATH_MAX, "%s", optarg);
3516 break;
3517 case 'C':
3518 snprintf(kconsumer_data.cmd_unix_sock_path, PATH_MAX, "%s", optarg);
3519 break;
3520 case 'F':
3521 snprintf(ustconsumer_data.err_unix_sock_path, PATH_MAX, "%s", optarg);
3522 break;
3523 case 'D':
3524 snprintf(ustconsumer_data.cmd_unix_sock_path, PATH_MAX, "%s", optarg);
3525 break;
3526 case 'q':
3527 opt_quiet = 1;
3528 break;
3529 case 'v':
3530 /* Verbose level can increase using multiple -v */
3531 opt_verbose += 1;
3532 break;
3533 case 'Z':
3534 opt_verbose_consumer += 1;
3535 break;
3536 default:
3537 /* Unknown option or other error.
3538 * Error is printed by getopt, just return */
3539 return -1;
3540 }
3541 }
3542
3543 return 0;
3544}
3545
3546/*
3547 * Creates the two needed socket by the daemon.
3548 * apps_sock - The communication socket for all UST apps.
3549 * client_sock - The communication of the cli tool (lttng).
3550 */
3551static int init_daemon_socket(void)
3552{
3553 int ret = 0;
3554 mode_t old_umask;
3555
3556 old_umask = umask(0);
3557
3558 /* Create client tool unix socket */
3559 client_sock = lttcomm_create_unix_sock(client_unix_sock_path);
3560 if (client_sock < 0) {
3561 ERR("Create unix sock failed: %s", client_unix_sock_path);
3562 ret = -1;
3563 goto end;
3564 }
3565
3566 /* File permission MUST be 660 */
3567 ret = chmod(client_unix_sock_path, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
3568 if (ret < 0) {
3569 ERR("Set file permissions failed: %s", client_unix_sock_path);
3570 perror("chmod");
3571 goto end;
3572 }
3573
3574 /* Create the application unix socket */
3575 apps_sock = lttcomm_create_unix_sock(apps_unix_sock_path);
3576 if (apps_sock < 0) {
3577 ERR("Create unix sock failed: %s", apps_unix_sock_path);
3578 ret = -1;
3579 goto end;
3580 }
3581
3582 /* File permission MUST be 666 */
3583 ret = chmod(apps_unix_sock_path,
3584 S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH);
3585 if (ret < 0) {
3586 ERR("Set file permissions failed: %s", apps_unix_sock_path);
3587 perror("chmod");
3588 goto end;
3589 }
3590
3591end:
3592 umask(old_umask);
3593 return ret;
3594}
3595
3596/*
3597 * Check if the global socket is available, and if a daemon is answering at the
3598 * other side. If yes, error is returned.
3599 */
3600static int check_existing_daemon(void)
3601{
3602 if (access(client_unix_sock_path, F_OK) < 0 &&
3603 access(apps_unix_sock_path, F_OK) < 0) {
3604 return 0;
3605 }
3606
3607 /* Is there anybody out there ? */
3608 if (lttng_session_daemon_alive()) {
3609 return -EEXIST;
3610 } else {
3611 return 0;
3612 }
3613}
3614
3615/*
3616 * Set the tracing group gid onto the client socket.
3617 *
3618 * Race window between mkdir and chown is OK because we are going from more
3619 * permissive (root.root) to les permissive (root.tracing).
3620 */
3621static int set_permissions(void)
3622{
3623 int ret;
3624 gid_t gid;
3625
3626 gid = allowed_group();
3627 if (gid < 0) {
3628 if (is_root) {
3629 WARN("No tracing group detected");
3630 ret = 0;
3631 } else {
3632 ERR("Missing tracing group. Aborting execution.");
3633 ret = -1;
3634 }
3635 goto end;
3636 }
3637
3638 /* Set lttng run dir */
3639 ret = chown(LTTNG_RUNDIR, 0, gid);
3640 if (ret < 0) {
3641 ERR("Unable to set group on " LTTNG_RUNDIR);
3642 perror("chown");
3643 }
3644
3645 /* lttng client socket path */
3646 ret = chown(client_unix_sock_path, 0, gid);
3647 if (ret < 0) {
3648 ERR("Unable to set group on %s", client_unix_sock_path);
3649 perror("chown");
3650 }
3651
3652 /* kconsumer error socket path */
3653 ret = chown(kconsumer_data.err_unix_sock_path, 0, gid);
3654 if (ret < 0) {
3655 ERR("Unable to set group on %s", kconsumer_data.err_unix_sock_path);
3656 perror("chown");
3657 }
3658
3659 /* ustconsumer error socket path */
3660 ret = chown(ustconsumer_data.err_unix_sock_path, 0, gid);
3661 if (ret < 0) {
3662 ERR("Unable to set group on %s", ustconsumer_data.err_unix_sock_path);
3663 perror("chown");
3664 }
3665
3666 DBG("All permissions are set");
3667
3668end:
3669 return ret;
3670}
3671
3672/*
3673 * Create the pipe used to wake up the kernel thread.
3674 */
3675static int create_kernel_poll_pipe(void)
3676{
3677 return pipe2(kernel_poll_pipe, O_CLOEXEC);
3678}
3679
3680/*
3681 * Create the application command pipe to wake thread_manage_apps.
3682 */
3683static int create_apps_cmd_pipe(void)
3684{
3685 return pipe2(apps_cmd_pipe, O_CLOEXEC);
3686}
3687
3688/*
3689 * Create the lttng run directory needed for all global sockets and pipe.
3690 */
3691static int create_lttng_rundir(void)
3692{
3693 int ret;
3694
3695 ret = mkdir(LTTNG_RUNDIR, S_IRWXU | S_IRWXG );
3696 if (ret < 0) {
3697 if (errno != EEXIST) {
3698 ERR("Unable to create " LTTNG_RUNDIR);
3699 goto error;
3700 } else {
3701 ret = 0;
3702 }
3703 }
3704
3705error:
3706 return ret;
3707}
3708
3709/*
3710 * Setup sockets and directory needed by the kconsumerd communication with the
3711 * session daemon.
3712 */
3713static int set_consumer_sockets(struct consumer_data *consumer_data)
3714{
3715 int ret;
3716 const char *path = consumer_data->type == LTTNG_CONSUMER_KERNEL ?
3717 KCONSUMERD_PATH : USTCONSUMERD_PATH;
3718
3719 if (strlen(consumer_data->err_unix_sock_path) == 0) {
3720 snprintf(consumer_data->err_unix_sock_path, PATH_MAX,
3721 consumer_data->type == LTTNG_CONSUMER_KERNEL ?
3722 KCONSUMERD_ERR_SOCK_PATH :
3723 USTCONSUMERD_ERR_SOCK_PATH);
3724 }
3725
3726 if (strlen(consumer_data->cmd_unix_sock_path) == 0) {
3727 snprintf(consumer_data->cmd_unix_sock_path, PATH_MAX,
3728 consumer_data->type == LTTNG_CONSUMER_KERNEL ?
3729 KCONSUMERD_CMD_SOCK_PATH :
3730 USTCONSUMERD_CMD_SOCK_PATH);
3731 }
3732
3733 ret = mkdir(path, S_IRWXU | S_IRWXG);
3734 if (ret < 0) {
3735 if (errno != EEXIST) {
3736 ERR("Failed to create %s", path);
3737 goto error;
3738 }
3739 ret = 0;
3740 }
3741
3742 /* Create the kconsumerd error unix socket */
3743 consumer_data->err_sock =
3744 lttcomm_create_unix_sock(consumer_data->err_unix_sock_path);
3745 if (consumer_data->err_sock < 0) {
3746 ERR("Create unix sock failed: %s", consumer_data->err_unix_sock_path);
3747 ret = -1;
3748 goto error;
3749 }
3750
3751 /* File permission MUST be 660 */
3752 ret = chmod(consumer_data->err_unix_sock_path,
3753 S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
3754 if (ret < 0) {
3755 ERR("Set file permissions failed: %s", consumer_data->err_unix_sock_path);
3756 perror("chmod");
3757 goto error;
3758 }
3759
3760error:
3761 return ret;
3762}
3763
3764/*
3765 * Signal handler for the daemon
3766 *
3767 * Simply stop all worker threads, leaving main() return gracefully after
3768 * joining all threads and calling cleanup().
3769 */
3770static void sighandler(int sig)
3771{
3772 switch (sig) {
3773 case SIGPIPE:
3774 DBG("SIGPIPE catched");
3775 return;
3776 case SIGINT:
3777 DBG("SIGINT catched");
3778 stop_threads();
3779 break;
3780 case SIGTERM:
3781 DBG("SIGTERM catched");
3782 stop_threads();
3783 break;
3784 default:
3785 break;
3786 }
3787}
3788
3789/*
3790 * Setup signal handler for :
3791 * SIGINT, SIGTERM, SIGPIPE
3792 */
3793static int set_signal_handler(void)
3794{
3795 int ret = 0;
3796 struct sigaction sa;
3797 sigset_t sigset;
3798
3799 if ((ret = sigemptyset(&sigset)) < 0) {
3800 perror("sigemptyset");
3801 return ret;
3802 }
3803
3804 sa.sa_handler = sighandler;
3805 sa.sa_mask = sigset;
3806 sa.sa_flags = 0;
3807 if ((ret = sigaction(SIGTERM, &sa, NULL)) < 0) {
3808 perror("sigaction");
3809 return ret;
3810 }
3811
3812 if ((ret = sigaction(SIGINT, &sa, NULL)) < 0) {
3813 perror("sigaction");
3814 return ret;
3815 }
3816
3817 if ((ret = sigaction(SIGPIPE, &sa, NULL)) < 0) {
3818 perror("sigaction");
3819 return ret;
3820 }
3821
3822 DBG("Signal handler set for SIGTERM, SIGPIPE and SIGINT");
3823
3824 return ret;
3825}
3826
3827/*
3828 * Set open files limit to unlimited. This daemon can open a large number of
3829 * file descriptors in order to consumer multiple kernel traces.
3830 */
3831static void set_ulimit(void)
3832{
3833 int ret;
3834 struct rlimit lim;
3835
3836 /* The kernel does not allowed an infinite limit for open files */
3837 lim.rlim_cur = 65535;
3838 lim.rlim_max = 65535;
3839
3840 ret = setrlimit(RLIMIT_NOFILE, &lim);
3841 if (ret < 0) {
3842 perror("failed to set open files limit");
3843 }
3844}
3845
3846/*
3847 * main
3848 */
3849int main(int argc, char **argv)
3850{
3851 int ret = 0;
3852 void *status;
3853 const char *home_path;
3854
3855 rcu_register_thread();
3856
3857 /* Create thread quit pipe */
3858 if ((ret = init_thread_quit_pipe()) < 0) {
3859 goto error;
3860 }
3861
3862 /* Parse arguments */
3863 progname = argv[0];
3864 if ((ret = parse_args(argc, argv) < 0)) {
3865 goto error;
3866 }
3867
3868 /* Daemonize */
3869 if (opt_daemon) {
3870 ret = daemon(0, 0);
3871 if (ret < 0) {
3872 perror("daemon");
3873 goto error;
3874 }
3875 }
3876
3877 /* Check if daemon is UID = 0 */
3878 is_root = !getuid();
3879
3880 if (is_root) {
3881 ret = create_lttng_rundir();
3882 if (ret < 0) {
3883 goto error;
3884 }
3885
3886 if (strlen(apps_unix_sock_path) == 0) {
3887 snprintf(apps_unix_sock_path, PATH_MAX,
3888 DEFAULT_GLOBAL_APPS_UNIX_SOCK);
3889 }
3890
3891 if (strlen(client_unix_sock_path) == 0) {
3892 snprintf(client_unix_sock_path, PATH_MAX,
3893 DEFAULT_GLOBAL_CLIENT_UNIX_SOCK);
3894 }
3895
3896 /* Set global SHM for ust */
3897 if (strlen(wait_shm_path) == 0) {
3898 snprintf(wait_shm_path, PATH_MAX,
3899 DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH);
3900 }
3901 } else {
3902 home_path = get_home_dir();
3903 if (home_path == NULL) {
3904 /* TODO: Add --socket PATH option */
3905 ERR("Can't get HOME directory for sockets creation.");
3906 ret = -EPERM;
3907 goto error;
3908 }
3909
3910 if (strlen(apps_unix_sock_path) == 0) {
3911 snprintf(apps_unix_sock_path, PATH_MAX,
3912 DEFAULT_HOME_APPS_UNIX_SOCK, home_path);
3913 }
3914
3915 /* Set the cli tool unix socket path */
3916 if (strlen(client_unix_sock_path) == 0) {
3917 snprintf(client_unix_sock_path, PATH_MAX,
3918 DEFAULT_HOME_CLIENT_UNIX_SOCK, home_path);
3919 }
3920
3921 /* Set global SHM for ust */
3922 if (strlen(wait_shm_path) == 0) {
3923 snprintf(wait_shm_path, PATH_MAX,
3924 DEFAULT_HOME_APPS_WAIT_SHM_PATH, geteuid());
3925 }
3926 }
3927
3928 DBG("Client socket path %s", client_unix_sock_path);
3929 DBG("Application socket path %s", apps_unix_sock_path);
3930
3931 /*
3932 * See if daemon already exist.
3933 */
3934 if ((ret = check_existing_daemon()) < 0) {
3935 ERR("Already running daemon.\n");
3936 /*
3937 * We do not goto exit because we must not cleanup()
3938 * because a daemon is already running.
3939 */
3940 goto error;
3941 }
3942
3943 /* After this point, we can safely call cleanup() with "goto exit" */
3944
3945 /*
3946 * These actions must be executed as root. We do that *after* setting up
3947 * the sockets path because we MUST make the check for another daemon using
3948 * those paths *before* trying to set the kernel consumer sockets and init
3949 * kernel tracer.
3950 */
3951 if (is_root) {
3952 ret = set_consumer_sockets(&kconsumer_data);
3953 if (ret < 0) {
3954 goto exit;
3955 }
3956
3957 ret = set_consumer_sockets(&ustconsumer_data);
3958 if (ret < 0) {
3959 goto exit;
3960 }
3961 /* Setup kernel tracer */
3962 init_kernel_tracer();
3963
3964 /* Set ulimit for open files */
3965 set_ulimit();
3966 }
3967
3968 if ((ret = set_signal_handler()) < 0) {
3969 goto exit;
3970 }
3971
3972 /* Setup the needed unix socket */
3973 if ((ret = init_daemon_socket()) < 0) {
3974 goto exit;
3975 }
3976
3977 /* Set credentials to socket */
3978 if (is_root && ((ret = set_permissions()) < 0)) {
3979 goto exit;
3980 }
3981
3982 /* Get parent pid if -S, --sig-parent is specified. */
3983 if (opt_sig_parent) {
3984 ppid = getppid();
3985 }
3986
3987 /* Setup the kernel pipe for waking up the kernel thread */
3988 if ((ret = create_kernel_poll_pipe()) < 0) {
3989 goto exit;
3990 }
3991
3992 /* Setup the thread apps communication pipe. */
3993 if ((ret = create_apps_cmd_pipe()) < 0) {
3994 goto exit;
3995 }
3996
3997 /* Init UST command queue. */
3998 cds_wfq_init(&ust_cmd_queue.queue);
3999
4000 /* Init UST app hash table */
4001 ust_app_ht_alloc();
4002
4003 /*
4004 * Get session list pointer. This pointer MUST NOT be free(). This list is
4005 * statically declared in session.c
4006 */
4007 session_list_ptr = session_get_list();
4008
4009 /* Set up max poll set size */
4010 lttng_poll_set_max_size();
4011
4012 /* Create thread to manage the client socket */
4013 ret = pthread_create(&client_thread, NULL,
4014 thread_manage_clients, (void *) NULL);
4015 if (ret != 0) {
4016 perror("pthread_create clients");
4017 goto exit_client;
4018 }
4019
4020 /* Create thread to dispatch registration */
4021 ret = pthread_create(&dispatch_thread, NULL,
4022 thread_dispatch_ust_registration, (void *) NULL);
4023 if (ret != 0) {
4024 perror("pthread_create dispatch");
4025 goto exit_dispatch;
4026 }
4027
4028 /* Create thread to manage application registration. */
4029 ret = pthread_create(&reg_apps_thread, NULL,
4030 thread_registration_apps, (void *) NULL);
4031 if (ret != 0) {
4032 perror("pthread_create registration");
4033 goto exit_reg_apps;
4034 }
4035
4036 /* Create thread to manage application socket */
4037 ret = pthread_create(&apps_thread, NULL,
4038 thread_manage_apps, (void *) NULL);
4039 if (ret != 0) {
4040 perror("pthread_create apps");
4041 goto exit_apps;
4042 }
4043
4044 /* Create kernel thread to manage kernel event */
4045 ret = pthread_create(&kernel_thread, NULL,
4046 thread_manage_kernel, (void *) NULL);
4047 if (ret != 0) {
4048 perror("pthread_create kernel");
4049 goto exit_kernel;
4050 }
4051
4052 ret = pthread_join(kernel_thread, &status);
4053 if (ret != 0) {
4054 perror("pthread_join");
4055 goto error; /* join error, exit without cleanup */
4056 }
4057
4058exit_kernel:
4059 ret = pthread_join(apps_thread, &status);
4060 if (ret != 0) {
4061 perror("pthread_join");
4062 goto error; /* join error, exit without cleanup */
4063 }
4064
4065exit_apps:
4066 ret = pthread_join(reg_apps_thread, &status);
4067 if (ret != 0) {
4068 perror("pthread_join");
4069 goto error; /* join error, exit without cleanup */
4070 }
4071
4072exit_reg_apps:
4073 ret = pthread_join(dispatch_thread, &status);
4074 if (ret != 0) {
4075 perror("pthread_join");
4076 goto error; /* join error, exit without cleanup */
4077 }
4078
4079exit_dispatch:
4080 ret = pthread_join(client_thread, &status);
4081 if (ret != 0) {
4082 perror("pthread_join");
4083 goto error; /* join error, exit without cleanup */
4084 }
4085
4086 ret = join_consumer_thread(&kconsumer_data);
4087 if (ret != 0) {
4088 perror("join_consumer");
4089 goto error; /* join error, exit without cleanup */
4090 }
4091
4092exit_client:
4093exit:
4094 /*
4095 * cleanup() is called when no other thread is running.
4096 */
4097 rcu_thread_online();
4098 cleanup();
4099 rcu_thread_offline();
4100 rcu_unregister_thread();
4101 if (!ret)
4102 exit(EXIT_SUCCESS);
4103error:
4104 exit(EXIT_FAILURE);
4105}
This page took 0.108458 seconds and 4 git commands to generate.