Implement UST stop command
[lttng-tools.git] / lttng-sessiond / main.c
... / ...
CommitLineData
1/*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; only version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307, USA.
17 */
18
19#define _GNU_SOURCE
20#include <fcntl.h>
21#include <getopt.h>
22#include <grp.h>
23#include <limits.h>
24#include <pthread.h>
25#include <semaphore.h>
26#include <signal.h>
27#include <stdio.h>
28#include <stdlib.h>
29#include <string.h>
30#include <sys/mman.h>
31#include <sys/mount.h>
32#include <sys/resource.h>
33#include <sys/socket.h>
34#include <sys/stat.h>
35#include <sys/types.h>
36#include <sys/wait.h>
37#include <urcu/futex.h>
38#include <unistd.h>
39#include <config.h>
40
41#include <lttng-consumerd.h>
42#include <lttng-sessiond-comm.h>
43#include <lttng/lttng-consumer.h>
44
45#include <lttngerr.h>
46
47#include "channel.h"
48#include "compat/poll.h"
49#include "context.h"
50#include "event.h"
51#include "futex.h"
52#include "hashtable.h"
53#include "kernel-ctl.h"
54#include "lttng-sessiond.h"
55#include "shm.h"
56#include "ust-app.h"
57#include "ust-ctl.h"
58#include "utils.h"
59
60struct consumer_data {
61 enum lttng_consumer_type type;
62
63 pthread_t thread; /* Worker thread interacting with the consumer */
64 sem_t sem;
65
66 /* Mutex to control consumerd pid assignation */
67 pthread_mutex_t pid_mutex;
68 pid_t pid;
69
70 int err_sock;
71 int cmd_sock;
72
73 /* consumer error and command Unix socket path */
74 char err_unix_sock_path[PATH_MAX];
75 char cmd_unix_sock_path[PATH_MAX];
76};
77
78/* Const values */
79const char default_home_dir[] = DEFAULT_HOME_DIR;
80const char default_tracing_group[] = LTTNG_DEFAULT_TRACING_GROUP;
81const char default_ust_sock_dir[] = DEFAULT_UST_SOCK_DIR;
82const char default_global_apps_pipe[] = DEFAULT_GLOBAL_APPS_PIPE;
83
84/* Variables */
85int opt_verbose; /* Not static for lttngerr.h */
86int opt_verbose_consumer; /* Not static for lttngerr.h */
87int opt_quiet; /* Not static for lttngerr.h */
88
89const char *progname;
90const char *opt_tracing_group;
91static int opt_sig_parent;
92static int opt_daemon;
93static int is_root; /* Set to 1 if the daemon is running as root */
94static pid_t ppid; /* Parent PID for --sig-parent option */
95
96/* Consumer daemon specific control data */
97static struct consumer_data kconsumer_data = {
98 .type = LTTNG_CONSUMER_KERNEL,
99};
100static struct consumer_data ustconsumer_data = {
101 .type = LTTNG_CONSUMER_UST,
102};
103
104static int dispatch_thread_exit;
105
106/* Global application Unix socket path */
107static char apps_unix_sock_path[PATH_MAX];
108/* Global client Unix socket path */
109static char client_unix_sock_path[PATH_MAX];
110/* global wait shm path for UST */
111static char wait_shm_path[PATH_MAX];
112
113/* Sockets and FDs */
114static int client_sock;
115static int apps_sock;
116static int kernel_tracer_fd;
117static int kernel_poll_pipe[2];
118
119/*
120 * Quit pipe for all threads. This permits a single cancellation point
121 * for all threads when receiving an event on the pipe.
122 */
123static int thread_quit_pipe[2];
124
125/*
126 * This pipe is used to inform the thread managing application communication
127 * that a command is queued and ready to be processed.
128 */
129static int apps_cmd_pipe[2];
130
131/* Pthread, Mutexes and Semaphores */
132static pthread_t apps_thread;
133static pthread_t reg_apps_thread;
134static pthread_t client_thread;
135static pthread_t kernel_thread;
136static pthread_t dispatch_thread;
137
138
139/*
140 * UST registration command queue. This queue is tied with a futex and uses a N
141 * wakers / 1 waiter implemented and detailed in futex.c/.h
142 *
143 * The thread_manage_apps and thread_dispatch_ust_registration interact with
144 * this queue and the wait/wake scheme.
145 */
146static struct ust_cmd_queue ust_cmd_queue;
147
148/*
149 * Pointer initialized before thread creation.
150 *
151 * This points to the tracing session list containing the session count and a
152 * mutex lock. The lock MUST be taken if you iterate over the list. The lock
153 * MUST NOT be taken if you call a public function in session.c.
154 *
155 * The lock is nested inside the structure: session_list_ptr->lock. Please use
156 * session_lock_list and session_unlock_list for lock acquisition.
157 */
158static struct ltt_session_list *session_list_ptr;
159
160int ust_consumer_fd;
161
162/*
163 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
164 */
165static int create_thread_poll_set(struct lttng_poll_event *events,
166 unsigned int size)
167{
168 int ret;
169
170 if (events == NULL || size == 0) {
171 ret = -1;
172 goto error;
173 }
174
175 ret = lttng_poll_create(events, size, LTTNG_CLOEXEC);
176 if (ret < 0) {
177 goto error;
178 }
179
180 /* Add quit pipe */
181 ret = lttng_poll_add(events, thread_quit_pipe[0], LPOLLIN);
182 if (ret < 0) {
183 goto error;
184 }
185
186 return 0;
187
188error:
189 return ret;
190}
191
192/*
193 * Check if the thread quit pipe was triggered.
194 *
195 * Return 1 if it was triggered else 0;
196 */
197static int check_thread_quit_pipe(int fd, uint32_t events)
198{
199 if (fd == thread_quit_pipe[0] && (events & LPOLLIN)) {
200 return 1;
201 }
202
203 return 0;
204}
205
206/*
207 * Remove modules in reverse load order.
208 */
209static int modprobe_remove_kernel_modules(void)
210{
211 int ret = 0, i;
212 char modprobe[256];
213
214 for (i = ARRAY_SIZE(kernel_modules_list) - 1; i >= 0; i--) {
215 ret = snprintf(modprobe, sizeof(modprobe),
216 "/sbin/modprobe -r -q %s",
217 kernel_modules_list[i].name);
218 if (ret < 0) {
219 perror("snprintf modprobe -r");
220 goto error;
221 }
222 modprobe[sizeof(modprobe) - 1] = '\0';
223 ret = system(modprobe);
224 if (ret == -1) {
225 ERR("Unable to launch modprobe -r for module %s",
226 kernel_modules_list[i].name);
227 } else if (kernel_modules_list[i].required
228 && WEXITSTATUS(ret) != 0) {
229 ERR("Unable to remove module %s",
230 kernel_modules_list[i].name);
231 } else {
232 DBG("Modprobe removal successful %s",
233 kernel_modules_list[i].name);
234 }
235 }
236
237error:
238 return ret;
239}
240
241/*
242 * Return group ID of the tracing group or -1 if not found.
243 */
244static gid_t allowed_group(void)
245{
246 struct group *grp;
247
248 if (opt_tracing_group) {
249 grp = getgrnam(opt_tracing_group);
250 } else {
251 grp = getgrnam(default_tracing_group);
252 }
253 if (!grp) {
254 return -1;
255 } else {
256 return grp->gr_gid;
257 }
258}
259
260/*
261 * Init thread quit pipe.
262 *
263 * Return -1 on error or 0 if all pipes are created.
264 */
265static int init_thread_quit_pipe(void)
266{
267 int ret;
268
269 ret = pipe2(thread_quit_pipe, O_CLOEXEC);
270 if (ret < 0) {
271 perror("thread quit pipe");
272 goto error;
273 }
274
275error:
276 return ret;
277}
278
279/*
280 * Complete teardown of a kernel session. This free all data structure related
281 * to a kernel session and update counter.
282 */
283static void teardown_kernel_session(struct ltt_session *session)
284{
285 if (session->kernel_session != NULL) {
286 DBG("Tearing down kernel session");
287
288 /*
289 * If a custom kernel consumer was registered, close the socket before
290 * tearing down the complete kernel session structure
291 */
292 if (session->kernel_session->consumer_fd != kconsumer_data.cmd_sock) {
293 lttcomm_close_unix_sock(session->kernel_session->consumer_fd);
294 }
295
296 trace_kernel_destroy_session(session->kernel_session);
297 /* Extra precaution */
298 session->kernel_session = NULL;
299 }
300}
301
302/*
303 * Complete teardown of all UST sessions. This will free everything on his path
304 * and destroy the core essence of all ust sessions :)
305 */
306static void teardown_ust_session(struct ltt_session *session)
307{
308 DBG("Tearing down UST session(s)");
309
310 trace_ust_destroy_session(session->ust_session);
311}
312
313/*
314 * Stop all threads by closing the thread quit pipe.
315 */
316static void stop_threads(void)
317{
318 int ret;
319
320 /* Stopping all threads */
321 DBG("Terminating all threads");
322 ret = notify_thread_pipe(thread_quit_pipe[1]);
323 if (ret < 0) {
324 ERR("write error on thread quit pipe");
325 }
326
327 /* Dispatch thread */
328 dispatch_thread_exit = 1;
329 futex_nto1_wake(&ust_cmd_queue.futex);
330}
331
332/*
333 * Cleanup the daemon
334 */
335static void cleanup(void)
336{
337 int ret;
338 char *cmd;
339 struct ltt_session *sess, *stmp;
340
341 DBG("Cleaning up");
342
343 if (is_root) {
344 DBG("Removing %s directory", LTTNG_RUNDIR);
345 ret = asprintf(&cmd, "rm -rf " LTTNG_RUNDIR);
346 if (ret < 0) {
347 ERR("asprintf failed. Something is really wrong!");
348 }
349
350 /* Remove lttng run directory */
351 ret = system(cmd);
352 if (ret < 0) {
353 ERR("Unable to clean " LTTNG_RUNDIR);
354 }
355 }
356
357 DBG("Cleaning up all session");
358
359 /* Destroy session list mutex */
360 if (session_list_ptr != NULL) {
361 pthread_mutex_destroy(&session_list_ptr->lock);
362
363 /* Cleanup ALL session */
364 cds_list_for_each_entry_safe(sess, stmp,
365 &session_list_ptr->head, list) {
366 teardown_kernel_session(sess);
367 teardown_ust_session(sess);
368 free(sess);
369 }
370 }
371
372 DBG("Closing all UST sockets");
373 ust_app_clean_list();
374
375 pthread_mutex_destroy(&kconsumer_data.pid_mutex);
376
377 DBG("Closing kernel fd");
378 close(kernel_tracer_fd);
379
380 if (is_root) {
381 DBG("Unloading kernel modules");
382 modprobe_remove_kernel_modules();
383 }
384
385 close(thread_quit_pipe[0]);
386 close(thread_quit_pipe[1]);
387
388 /* <fun> */
389 MSG("%c[%d;%dm*** assert failed :-) *** ==> %c[%dm%c[%d;%dm"
390 "Matthew, BEET driven development works!%c[%dm",
391 27, 1, 31, 27, 0, 27, 1, 33, 27, 0);
392 /* </fun> */
393}
394
395/*
396 * Send data on a unix socket using the liblttsessiondcomm API.
397 *
398 * Return lttcomm error code.
399 */
400static int send_unix_sock(int sock, void *buf, size_t len)
401{
402 /* Check valid length */
403 if (len <= 0) {
404 return -1;
405 }
406
407 return lttcomm_send_unix_sock(sock, buf, len);
408}
409
410/*
411 * Free memory of a command context structure.
412 */
413static void clean_command_ctx(struct command_ctx **cmd_ctx)
414{
415 DBG("Clean command context structure");
416 if (*cmd_ctx) {
417 if ((*cmd_ctx)->llm) {
418 free((*cmd_ctx)->llm);
419 }
420 if ((*cmd_ctx)->lsm) {
421 free((*cmd_ctx)->lsm);
422 }
423 free(*cmd_ctx);
424 *cmd_ctx = NULL;
425 }
426}
427
428/*
429 * Send all stream fds of kernel channel to the consumer.
430 */
431static int send_kconsumer_channel_streams(struct consumer_data *consumer_data,
432 int sock, struct ltt_kernel_channel *channel)
433{
434 int ret;
435 struct ltt_kernel_stream *stream;
436 struct lttcomm_consumer_msg lkm;
437
438 DBG("Sending streams of channel %s to kernel consumer",
439 channel->channel->name);
440
441 /* Send channel */
442 lkm.cmd_type = LTTNG_CONSUMER_ADD_CHANNEL;
443 lkm.u.channel.channel_key = channel->fd;
444 lkm.u.channel.max_sb_size = channel->channel->attr.subbuf_size;
445 lkm.u.channel.mmap_len = 0; /* for kernel */
446 DBG("Sending channel %d to consumer", lkm.u.channel.channel_key);
447 ret = lttcomm_send_unix_sock(sock, &lkm, sizeof(lkm));
448 if (ret < 0) {
449 perror("send consumer channel");
450 goto error;
451 }
452
453 /* Send streams */
454 cds_list_for_each_entry(stream, &channel->stream_list.head, list) {
455 if (!stream->fd) {
456 continue;
457 }
458 lkm.cmd_type = LTTNG_CONSUMER_ADD_STREAM;
459 lkm.u.stream.channel_key = channel->fd;
460 lkm.u.stream.stream_key = stream->fd;
461 lkm.u.stream.state = stream->state;
462 lkm.u.stream.output = channel->channel->attr.output;
463 lkm.u.stream.mmap_len = 0; /* for kernel */
464 strncpy(lkm.u.stream.path_name, stream->pathname, PATH_MAX - 1);
465 lkm.u.stream.path_name[PATH_MAX - 1] = '\0';
466 DBG("Sending stream %d to consumer", lkm.u.stream.stream_key);
467 ret = lttcomm_send_unix_sock(sock, &lkm, sizeof(lkm));
468 if (ret < 0) {
469 perror("send consumer stream");
470 goto error;
471 }
472 ret = lttcomm_send_fds_unix_sock(sock, &stream->fd, 1);
473 if (ret < 0) {
474 perror("send consumer stream ancillary data");
475 goto error;
476 }
477 }
478
479 DBG("consumer channel streams sent");
480
481 return 0;
482
483error:
484 return ret;
485}
486
487/*
488 * Send all stream fds of the kernel session to the consumer.
489 */
490static int send_kconsumer_session_streams(struct consumer_data *consumer_data,
491 struct ltt_kernel_session *session)
492{
493 int ret;
494 struct ltt_kernel_channel *chan;
495 struct lttcomm_consumer_msg lkm;
496 int sock = session->consumer_fd;
497
498 DBG("Sending metadata stream fd");
499
500 /* Extra protection. It's NOT supposed to be set to 0 at this point */
501 if (session->consumer_fd == 0) {
502 session->consumer_fd = consumer_data->cmd_sock;
503 }
504
505 if (session->metadata_stream_fd != 0) {
506 /* Send metadata channel fd */
507 lkm.cmd_type = LTTNG_CONSUMER_ADD_CHANNEL;
508 lkm.u.channel.channel_key = session->metadata->fd;
509 lkm.u.channel.max_sb_size = session->metadata->conf->attr.subbuf_size;
510 lkm.u.channel.mmap_len = 0; /* for kernel */
511 DBG("Sending metadata channel %d to consumer", lkm.u.stream.stream_key);
512 ret = lttcomm_send_unix_sock(sock, &lkm, sizeof(lkm));
513 if (ret < 0) {
514 perror("send consumer channel");
515 goto error;
516 }
517
518 /* Send metadata stream fd */
519 lkm.cmd_type = LTTNG_CONSUMER_ADD_STREAM;
520 lkm.u.stream.channel_key = session->metadata->fd;
521 lkm.u.stream.stream_key = session->metadata_stream_fd;
522 lkm.u.stream.state = LTTNG_CONSUMER_ACTIVE_STREAM;
523 lkm.u.stream.output = DEFAULT_KERNEL_CHANNEL_OUTPUT;
524 lkm.u.stream.mmap_len = 0; /* for kernel */
525 strncpy(lkm.u.stream.path_name, session->metadata->pathname, PATH_MAX - 1);
526 lkm.u.stream.path_name[PATH_MAX - 1] = '\0';
527 DBG("Sending metadata stream %d to consumer", lkm.u.stream.stream_key);
528 ret = lttcomm_send_unix_sock(sock, &lkm, sizeof(lkm));
529 if (ret < 0) {
530 perror("send consumer stream");
531 goto error;
532 }
533 ret = lttcomm_send_fds_unix_sock(sock, &session->metadata_stream_fd, 1);
534 if (ret < 0) {
535 perror("send consumer stream");
536 goto error;
537 }
538 }
539
540 cds_list_for_each_entry(chan, &session->channel_list.head, list) {
541 ret = send_kconsumer_channel_streams(consumer_data, sock, chan);
542 if (ret < 0) {
543 goto error;
544 }
545 }
546
547 DBG("consumer fds (metadata and channel streams) sent");
548
549 return 0;
550
551error:
552 return ret;
553}
554
555/*
556 * Notify UST applications using the shm mmap futex.
557 */
558static int notify_ust_apps(int active)
559{
560 char *wait_shm_mmap;
561
562 DBG("Notifying applications of session daemon state: %d", active);
563
564 /* See shm.c for this call implying mmap, shm and futex calls */
565 wait_shm_mmap = shm_ust_get_mmap(wait_shm_path, is_root);
566 if (wait_shm_mmap == NULL) {
567 goto error;
568 }
569
570 /* Wake waiting process */
571 futex_wait_update((int32_t *) wait_shm_mmap, active);
572
573 /* Apps notified successfully */
574 return 0;
575
576error:
577 return -1;
578}
579
580/*
581 * Setup the outgoing data buffer for the response (llm) by allocating the
582 * right amount of memory and copying the original information from the lsm
583 * structure.
584 *
585 * Return total size of the buffer pointed by buf.
586 */
587static int setup_lttng_msg(struct command_ctx *cmd_ctx, size_t size)
588{
589 int ret, buf_size;
590
591 buf_size = size;
592
593 cmd_ctx->llm = zmalloc(sizeof(struct lttcomm_lttng_msg) + buf_size);
594 if (cmd_ctx->llm == NULL) {
595 perror("zmalloc");
596 ret = -ENOMEM;
597 goto error;
598 }
599
600 /* Copy common data */
601 cmd_ctx->llm->cmd_type = cmd_ctx->lsm->cmd_type;
602 cmd_ctx->llm->pid = cmd_ctx->lsm->domain.attr.pid;
603
604 cmd_ctx->llm->data_size = size;
605 cmd_ctx->lttng_msg_size = sizeof(struct lttcomm_lttng_msg) + buf_size;
606
607 return buf_size;
608
609error:
610 return ret;
611}
612
613/*
614 * Update the kernel poll set of all channel fd available over all tracing
615 * session. Add the wakeup pipe at the end of the set.
616 */
617static int update_kernel_poll(struct lttng_poll_event *events)
618{
619 int ret;
620 struct ltt_session *session;
621 struct ltt_kernel_channel *channel;
622
623 DBG("Updating kernel poll set");
624
625 session_lock_list();
626 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
627 session_lock(session);
628 if (session->kernel_session == NULL) {
629 session_unlock(session);
630 continue;
631 }
632
633 cds_list_for_each_entry(channel,
634 &session->kernel_session->channel_list.head, list) {
635 /* Add channel fd to the kernel poll set */
636 ret = lttng_poll_add(events, channel->fd, LPOLLIN | LPOLLRDNORM);
637 if (ret < 0) {
638 session_unlock(session);
639 goto error;
640 }
641 DBG("Channel fd %d added to kernel set", channel->fd);
642 }
643 session_unlock(session);
644 }
645 session_unlock_list();
646
647 return 0;
648
649error:
650 session_unlock_list();
651 return -1;
652}
653
654/*
655 * Find the channel fd from 'fd' over all tracing session. When found, check
656 * for new channel stream and send those stream fds to the kernel consumer.
657 *
658 * Useful for CPU hotplug feature.
659 */
660static int update_kernel_stream(struct consumer_data *consumer_data, int fd)
661{
662 int ret = 0;
663 struct ltt_session *session;
664 struct ltt_kernel_channel *channel;
665
666 DBG("Updating kernel streams for channel fd %d", fd);
667
668 session_lock_list();
669 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
670 session_lock(session);
671 if (session->kernel_session == NULL) {
672 session_unlock(session);
673 continue;
674 }
675
676 /* This is not suppose to be 0 but this is an extra security check */
677 if (session->kernel_session->consumer_fd == 0) {
678 session->kernel_session->consumer_fd = consumer_data->cmd_sock;
679 }
680
681 cds_list_for_each_entry(channel,
682 &session->kernel_session->channel_list.head, list) {
683 if (channel->fd == fd) {
684 DBG("Channel found, updating kernel streams");
685 ret = kernel_open_channel_stream(channel);
686 if (ret < 0) {
687 goto error;
688 }
689
690 /*
691 * Have we already sent fds to the consumer? If yes, it means
692 * that tracing is started so it is safe to send our updated
693 * stream fds.
694 */
695 if (session->kernel_session->consumer_fds_sent == 1) {
696 ret = send_kconsumer_channel_streams(consumer_data,
697 session->kernel_session->consumer_fd, channel);
698 if (ret < 0) {
699 goto error;
700 }
701 }
702 goto error;
703 }
704 }
705 session_unlock(session);
706 }
707 session_unlock_list();
708 return ret;
709
710error:
711 session_unlock(session);
712 session_unlock_list();
713 return ret;
714}
715
716/*
717 * For each tracing session, update newly registered apps.
718 */
719static void update_ust_app(int app_sock)
720{
721 struct ltt_session *sess, *stmp;
722
723 /* For all tracing session(s) */
724 cds_list_for_each_entry_safe(sess, stmp, &session_list_ptr->head, list) {
725 if (sess->ust_session) {
726 ust_app_global_update(sess->ust_session, app_sock);
727 }
728 }
729}
730
731/*
732 * This thread manage event coming from the kernel.
733 *
734 * Features supported in this thread:
735 * -) CPU Hotplug
736 */
737static void *thread_manage_kernel(void *data)
738{
739 int ret, i, pollfd, update_poll_flag = 1;
740 uint32_t revents, nb_fd;
741 char tmp;
742 struct lttng_poll_event events;
743
744 DBG("Thread manage kernel started");
745
746 ret = create_thread_poll_set(&events, 2);
747 if (ret < 0) {
748 goto error;
749 }
750
751 ret = lttng_poll_add(&events, kernel_poll_pipe[0], LPOLLIN);
752 if (ret < 0) {
753 goto error;
754 }
755
756 while (1) {
757 if (update_poll_flag == 1) {
758 /*
759 * Reset number of fd in the poll set. Always 2 since there is the thread
760 * quit pipe and the kernel pipe.
761 */
762 events.nb_fd = 2;
763
764 ret = update_kernel_poll(&events);
765 if (ret < 0) {
766 goto error;
767 }
768 update_poll_flag = 0;
769 }
770
771 nb_fd = LTTNG_POLL_GETNB(&events);
772
773 DBG("Thread kernel polling on %d fds", nb_fd);
774
775 /* Zeroed the poll events */
776 lttng_poll_reset(&events);
777
778 /* Poll infinite value of time */
779 ret = lttng_poll_wait(&events, -1);
780 if (ret < 0) {
781 goto error;
782 } else if (ret == 0) {
783 /* Should not happen since timeout is infinite */
784 ERR("Return value of poll is 0 with an infinite timeout.\n"
785 "This should not have happened! Continuing...");
786 continue;
787 }
788
789 for (i = 0; i < nb_fd; i++) {
790 /* Fetch once the poll data */
791 revents = LTTNG_POLL_GETEV(&events, i);
792 pollfd = LTTNG_POLL_GETFD(&events, i);
793
794 /* Thread quit pipe has been closed. Killing thread. */
795 ret = check_thread_quit_pipe(pollfd, revents);
796 if (ret) {
797 goto error;
798 }
799
800 /* Check for data on kernel pipe */
801 if (pollfd == kernel_poll_pipe[0] && (revents & LPOLLIN)) {
802 ret = read(kernel_poll_pipe[0], &tmp, 1);
803 update_poll_flag = 1;
804 continue;
805 } else {
806 /*
807 * New CPU detected by the kernel. Adding kernel stream to
808 * kernel session and updating the kernel consumer
809 */
810 if (revents & LPOLLIN) {
811 ret = update_kernel_stream(&kconsumer_data, pollfd);
812 if (ret < 0) {
813 continue;
814 }
815 break;
816 /*
817 * TODO: We might want to handle the LPOLLERR | LPOLLHUP
818 * and unregister kernel stream at this point.
819 */
820 }
821 }
822 }
823 }
824
825error:
826 DBG("Kernel thread dying");
827 close(kernel_poll_pipe[0]);
828 close(kernel_poll_pipe[1]);
829
830 lttng_poll_clean(&events);
831
832 return NULL;
833}
834
835/*
836 * This thread manage the consumer error sent back to the session daemon.
837 */
838static void *thread_manage_consumer(void *data)
839{
840 int sock = 0, i, ret, pollfd;
841 uint32_t revents, nb_fd;
842 enum lttcomm_return_code code;
843 struct lttng_poll_event events;
844 struct consumer_data *consumer_data = data;
845
846 DBG("[thread] Manage consumer started");
847
848 ret = lttcomm_listen_unix_sock(consumer_data->err_sock);
849 if (ret < 0) {
850 goto error;
851 }
852
853 /*
854 * Pass 2 as size here for the thread quit pipe and kconsumerd_err_sock.
855 * Nothing more will be added to this poll set.
856 */
857 ret = create_thread_poll_set(&events, 2);
858 if (ret < 0) {
859 goto error;
860 }
861
862 ret = lttng_poll_add(&events, consumer_data->err_sock, LPOLLIN | LPOLLRDHUP);
863 if (ret < 0) {
864 goto error;
865 }
866
867 nb_fd = LTTNG_POLL_GETNB(&events);
868
869 /* Inifinite blocking call, waiting for transmission */
870 ret = lttng_poll_wait(&events, -1);
871 if (ret < 0) {
872 goto error;
873 }
874
875 for (i = 0; i < nb_fd; i++) {
876 /* Fetch once the poll data */
877 revents = LTTNG_POLL_GETEV(&events, i);
878 pollfd = LTTNG_POLL_GETFD(&events, i);
879
880 /* Thread quit pipe has been closed. Killing thread. */
881 ret = check_thread_quit_pipe(pollfd, revents);
882 if (ret) {
883 goto error;
884 }
885
886 /* Event on the registration socket */
887 if (pollfd == consumer_data->err_sock) {
888 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
889 ERR("consumer err socket poll error");
890 goto error;
891 }
892 }
893 }
894
895 sock = lttcomm_accept_unix_sock(consumer_data->err_sock);
896 if (sock < 0) {
897 goto error;
898 }
899
900 DBG2("Receiving code from consumer err_sock");
901
902 /* Getting status code from kconsumerd */
903 ret = lttcomm_recv_unix_sock(sock, &code,
904 sizeof(enum lttcomm_return_code));
905 if (ret <= 0) {
906 goto error;
907 }
908
909 if (code == CONSUMERD_COMMAND_SOCK_READY) {
910 consumer_data->cmd_sock =
911 lttcomm_connect_unix_sock(consumer_data->cmd_unix_sock_path);
912 if (consumer_data->cmd_sock < 0) {
913 sem_post(&consumer_data->sem);
914 PERROR("consumer connect");
915 goto error;
916 }
917 /* Signal condition to tell that the kconsumerd is ready */
918 sem_post(&consumer_data->sem);
919 DBG("consumer command socket ready");
920 } else {
921 ERR("consumer error when waiting for SOCK_READY : %s",
922 lttcomm_get_readable_code(-code));
923 goto error;
924 }
925
926 /* Remove the kconsumerd error sock since we've established a connexion */
927 ret = lttng_poll_del(&events, consumer_data->err_sock);
928 if (ret < 0) {
929 goto error;
930 }
931
932 ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLRDHUP);
933 if (ret < 0) {
934 goto error;
935 }
936
937 /* Update number of fd */
938 nb_fd = LTTNG_POLL_GETNB(&events);
939
940 /* Inifinite blocking call, waiting for transmission */
941 ret = lttng_poll_wait(&events, -1);
942 if (ret < 0) {
943 goto error;
944 }
945
946 for (i = 0; i < nb_fd; i++) {
947 /* Fetch once the poll data */
948 revents = LTTNG_POLL_GETEV(&events, i);
949 pollfd = LTTNG_POLL_GETFD(&events, i);
950
951 /* Thread quit pipe has been closed. Killing thread. */
952 ret = check_thread_quit_pipe(pollfd, revents);
953 if (ret) {
954 goto error;
955 }
956
957 /* Event on the kconsumerd socket */
958 if (pollfd == sock) {
959 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
960 ERR("consumer err socket second poll error");
961 goto error;
962 }
963 }
964 }
965
966 /* Wait for any kconsumerd error */
967 ret = lttcomm_recv_unix_sock(sock, &code,
968 sizeof(enum lttcomm_return_code));
969 if (ret <= 0) {
970 ERR("consumer closed the command socket");
971 goto error;
972 }
973
974 ERR("consumer return code : %s", lttcomm_get_readable_code(-code));
975
976error:
977 DBG("consumer thread dying");
978 close(consumer_data->err_sock);
979 close(consumer_data->cmd_sock);
980 close(sock);
981
982 unlink(consumer_data->err_unix_sock_path);
983 unlink(consumer_data->cmd_unix_sock_path);
984 consumer_data->pid = 0;
985
986 lttng_poll_clean(&events);
987
988 return NULL;
989}
990
991/*
992 * This thread manage application communication.
993 */
994static void *thread_manage_apps(void *data)
995{
996 int i, ret, pollfd;
997 uint32_t revents, nb_fd;
998 struct ust_command ust_cmd;
999 struct lttng_poll_event events;
1000
1001 DBG("[thread] Manage application started");
1002
1003 rcu_register_thread();
1004 rcu_thread_online();
1005
1006 ret = create_thread_poll_set(&events, 2);
1007 if (ret < 0) {
1008 goto error;
1009 }
1010
1011 ret = lttng_poll_add(&events, apps_cmd_pipe[0], LPOLLIN | LPOLLRDHUP);
1012 if (ret < 0) {
1013 goto error;
1014 }
1015
1016 while (1) {
1017 /* Zeroed the events structure */
1018 lttng_poll_reset(&events);
1019
1020 nb_fd = LTTNG_POLL_GETNB(&events);
1021
1022 DBG("Apps thread polling on %d fds", nb_fd);
1023
1024 /* Inifinite blocking call, waiting for transmission */
1025 ret = lttng_poll_wait(&events, -1);
1026 if (ret < 0) {
1027 goto error;
1028 }
1029
1030 for (i = 0; i < nb_fd; i++) {
1031 /* Fetch once the poll data */
1032 revents = LTTNG_POLL_GETEV(&events, i);
1033 pollfd = LTTNG_POLL_GETFD(&events, i);
1034
1035 /* Thread quit pipe has been closed. Killing thread. */
1036 ret = check_thread_quit_pipe(pollfd, revents);
1037 if (ret) {
1038 goto error;
1039 }
1040
1041 /* Inspect the apps cmd pipe */
1042 if (pollfd == apps_cmd_pipe[0]) {
1043 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1044 ERR("Apps command pipe error");
1045 goto error;
1046 } else if (revents & LPOLLIN) {
1047 /* Empty pipe */
1048 ret = read(apps_cmd_pipe[0], &ust_cmd, sizeof(ust_cmd));
1049 if (ret < 0 || ret < sizeof(ust_cmd)) {
1050 perror("read apps cmd pipe");
1051 goto error;
1052 }
1053
1054 /* Register applicaton to the session daemon */
1055 ret = ust_app_register(&ust_cmd.reg_msg,
1056 ust_cmd.sock);
1057 if (ret < 0) {
1058 /* Only critical ENOMEM error can be returned here */
1059 goto error;
1060 }
1061
1062 /*
1063 * Add channel(s) and event(s) to newly registered apps
1064 * from lttng global UST domain.
1065 */
1066 update_ust_app(ust_cmd.sock);
1067
1068 ret = ustctl_register_done(ust_cmd.sock);
1069 if (ret < 0) {
1070 /*
1071 * If the registration is not possible, we simply
1072 * unregister the apps and continue
1073 */
1074 ust_app_unregister(ust_cmd.sock);
1075 } else {
1076 /*
1077 * We just need here to monitor the close of the UST
1078 * socket and poll set monitor those by default.
1079 */
1080 ret = lttng_poll_add(&events, ust_cmd.sock, 0);
1081 if (ret < 0) {
1082 goto error;
1083 }
1084
1085 DBG("Apps with sock %d added to poll set",
1086 ust_cmd.sock);
1087 }
1088
1089 break;
1090 }
1091 } else {
1092 /*
1093 * At this point, we know that a registered application made
1094 * the event at poll_wait.
1095 */
1096 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1097 /* Removing from the poll set */
1098 ret = lttng_poll_del(&events, pollfd);
1099 if (ret < 0) {
1100 goto error;
1101 }
1102
1103 /* Socket closed on remote end. */
1104 ust_app_unregister(pollfd);
1105 break;
1106 }
1107 }
1108 }
1109 }
1110
1111error:
1112 DBG("Application communication apps dying");
1113 close(apps_cmd_pipe[0]);
1114 close(apps_cmd_pipe[1]);
1115
1116 lttng_poll_clean(&events);
1117
1118 rcu_thread_offline();
1119 rcu_unregister_thread();
1120 return NULL;
1121}
1122
1123/*
1124 * Dispatch request from the registration threads to the application
1125 * communication thread.
1126 */
1127static void *thread_dispatch_ust_registration(void *data)
1128{
1129 int ret;
1130 struct cds_wfq_node *node;
1131 struct ust_command *ust_cmd = NULL;
1132
1133 DBG("[thread] Dispatch UST command started");
1134
1135 while (!dispatch_thread_exit) {
1136 /* Atomically prepare the queue futex */
1137 futex_nto1_prepare(&ust_cmd_queue.futex);
1138
1139 do {
1140 /* Dequeue command for registration */
1141 node = cds_wfq_dequeue_blocking(&ust_cmd_queue.queue);
1142 if (node == NULL) {
1143 DBG("Woken up but nothing in the UST command queue");
1144 /* Continue thread execution */
1145 break;
1146 }
1147
1148 ust_cmd = caa_container_of(node, struct ust_command, node);
1149
1150 DBG("Dispatching UST registration pid:%d ppid:%d uid:%d"
1151 " gid:%d sock:%d name:%s (version %d.%d)",
1152 ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
1153 ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
1154 ust_cmd->sock, ust_cmd->reg_msg.name,
1155 ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
1156 /*
1157 * Inform apps thread of the new application registration. This
1158 * call is blocking so we can be assured that the data will be read
1159 * at some point in time or wait to the end of the world :)
1160 */
1161 ret = write(apps_cmd_pipe[1], ust_cmd,
1162 sizeof(struct ust_command));
1163 if (ret < 0) {
1164 perror("write apps cmd pipe");
1165 if (errno == EBADF) {
1166 /*
1167 * We can't inform the application thread to process
1168 * registration. We will exit or else application
1169 * registration will not occur and tracing will never
1170 * start.
1171 */
1172 goto error;
1173 }
1174 }
1175 free(ust_cmd);
1176 } while (node != NULL);
1177
1178 /* Futex wait on queue. Blocking call on futex() */
1179 futex_nto1_wait(&ust_cmd_queue.futex);
1180 }
1181
1182error:
1183 DBG("Dispatch thread dying");
1184 return NULL;
1185}
1186
1187/*
1188 * This thread manage application registration.
1189 */
1190static void *thread_registration_apps(void *data)
1191{
1192 int sock = 0, i, ret, pollfd;
1193 uint32_t revents, nb_fd;
1194 struct lttng_poll_event events;
1195 /*
1196 * Get allocated in this thread, enqueued to a global queue, dequeued and
1197 * freed in the manage apps thread.
1198 */
1199 struct ust_command *ust_cmd = NULL;
1200
1201 DBG("[thread] Manage application registration started");
1202
1203 ret = lttcomm_listen_unix_sock(apps_sock);
1204 if (ret < 0) {
1205 goto error;
1206 }
1207
1208 /*
1209 * Pass 2 as size here for the thread quit pipe and apps socket. Nothing
1210 * more will be added to this poll set.
1211 */
1212 ret = create_thread_poll_set(&events, 2);
1213 if (ret < 0) {
1214 goto error;
1215 }
1216
1217 /* Add the application registration socket */
1218 ret = lttng_poll_add(&events, apps_sock, LPOLLIN | LPOLLRDHUP);
1219 if (ret < 0) {
1220 goto error;
1221 }
1222
1223 /* Notify all applications to register */
1224 ret = notify_ust_apps(1);
1225 if (ret < 0) {
1226 ERR("Failed to notify applications or create the wait shared memory.\n"
1227 "Execution continues but there might be problem for already\n"
1228 "running applications that wishes to register.");
1229 }
1230
1231 while (1) {
1232 DBG("Accepting application registration");
1233
1234 nb_fd = LTTNG_POLL_GETNB(&events);
1235
1236 /* Inifinite blocking call, waiting for transmission */
1237 ret = lttng_poll_wait(&events, -1);
1238 if (ret < 0) {
1239 goto error;
1240 }
1241
1242 for (i = 0; i < nb_fd; i++) {
1243 /* Fetch once the poll data */
1244 revents = LTTNG_POLL_GETEV(&events, i);
1245 pollfd = LTTNG_POLL_GETFD(&events, i);
1246
1247 /* Thread quit pipe has been closed. Killing thread. */
1248 ret = check_thread_quit_pipe(pollfd, revents);
1249 if (ret) {
1250 goto error;
1251 }
1252
1253 /* Event on the registration socket */
1254 if (pollfd == apps_sock) {
1255 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1256 ERR("Register apps socket poll error");
1257 goto error;
1258 } else if (revents & LPOLLIN) {
1259 sock = lttcomm_accept_unix_sock(apps_sock);
1260 if (sock < 0) {
1261 goto error;
1262 }
1263
1264 /* Create UST registration command for enqueuing */
1265 ust_cmd = zmalloc(sizeof(struct ust_command));
1266 if (ust_cmd == NULL) {
1267 perror("ust command zmalloc");
1268 goto error;
1269 }
1270
1271 /*
1272 * Using message-based transmissions to ensure we don't
1273 * have to deal with partially received messages.
1274 */
1275 ret = lttcomm_recv_unix_sock(sock, &ust_cmd->reg_msg,
1276 sizeof(struct ust_register_msg));
1277 if (ret < 0 || ret < sizeof(struct ust_register_msg)) {
1278 if (ret < 0) {
1279 perror("lttcomm_recv_unix_sock register apps");
1280 } else {
1281 ERR("Wrong size received on apps register");
1282 }
1283 free(ust_cmd);
1284 close(sock);
1285 continue;
1286 }
1287
1288 ust_cmd->sock = sock;
1289
1290 DBG("UST registration received with pid:%d ppid:%d uid:%d"
1291 " gid:%d sock:%d name:%s (version %d.%d)",
1292 ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
1293 ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
1294 ust_cmd->sock, ust_cmd->reg_msg.name,
1295 ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
1296
1297 /*
1298 * Lock free enqueue the registration request. The red pill
1299 * has been taken! This apps will be part of the *system*.
1300 */
1301 cds_wfq_enqueue(&ust_cmd_queue.queue, &ust_cmd->node);
1302
1303 /*
1304 * Wake the registration queue futex. Implicit memory
1305 * barrier with the exchange in cds_wfq_enqueue.
1306 */
1307 futex_nto1_wake(&ust_cmd_queue.futex);
1308 }
1309 }
1310 }
1311 }
1312
1313error:
1314 DBG("UST Registration thread dying");
1315
1316 /* Notify that the registration thread is gone */
1317 notify_ust_apps(0);
1318
1319 close(apps_sock);
1320 close(sock);
1321 unlink(apps_unix_sock_path);
1322
1323 lttng_poll_clean(&events);
1324
1325 return NULL;
1326}
1327
1328/*
1329 * Start the thread_manage_consumer. This must be done after a lttng-consumerd
1330 * exec or it will fails.
1331 */
1332static int spawn_consumer_thread(struct consumer_data *consumer_data)
1333{
1334 int ret;
1335 struct timespec timeout;
1336
1337 timeout.tv_sec = DEFAULT_SEM_WAIT_TIMEOUT;
1338 timeout.tv_nsec = 0;
1339
1340 /* Setup semaphore */
1341 ret = sem_init(&consumer_data->sem, 0, 0);
1342 if (ret < 0) {
1343 PERROR("sem_init consumer semaphore");
1344 goto error;
1345 }
1346
1347 ret = pthread_create(&consumer_data->thread, NULL,
1348 thread_manage_consumer, consumer_data);
1349 if (ret != 0) {
1350 PERROR("pthread_create consumer");
1351 ret = -1;
1352 goto error;
1353 }
1354
1355 /* Get time for sem_timedwait absolute timeout */
1356 ret = clock_gettime(CLOCK_REALTIME, &timeout);
1357 if (ret < 0) {
1358 PERROR("clock_gettime spawn consumer");
1359 /* Infinite wait for the kconsumerd thread to be ready */
1360 ret = sem_wait(&consumer_data->sem);
1361 } else {
1362 /* Normal timeout if the gettime was successful */
1363 timeout.tv_sec += DEFAULT_SEM_WAIT_TIMEOUT;
1364 ret = sem_timedwait(&consumer_data->sem, &timeout);
1365 }
1366
1367 if (ret < 0) {
1368 if (errno == ETIMEDOUT) {
1369 /*
1370 * Call has timed out so we kill the kconsumerd_thread and return
1371 * an error.
1372 */
1373 ERR("The consumer thread was never ready. Killing it");
1374 ret = pthread_cancel(consumer_data->thread);
1375 if (ret < 0) {
1376 PERROR("pthread_cancel consumer thread");
1377 }
1378 } else {
1379 PERROR("semaphore wait failed consumer thread");
1380 }
1381 goto error;
1382 }
1383
1384 pthread_mutex_lock(&consumer_data->pid_mutex);
1385 if (consumer_data->pid == 0) {
1386 ERR("Kconsumerd did not start");
1387 pthread_mutex_unlock(&consumer_data->pid_mutex);
1388 goto error;
1389 }
1390 pthread_mutex_unlock(&consumer_data->pid_mutex);
1391
1392 return 0;
1393
1394error:
1395 return ret;
1396}
1397
1398/*
1399 * Join consumer thread
1400 */
1401static int join_consumer_thread(struct consumer_data *consumer_data)
1402{
1403 void *status;
1404 int ret;
1405
1406 if (consumer_data->pid != 0) {
1407 ret = kill(consumer_data->pid, SIGTERM);
1408 if (ret) {
1409 ERR("Error killing consumer daemon");
1410 return ret;
1411 }
1412 return pthread_join(consumer_data->thread, &status);
1413 } else {
1414 return 0;
1415 }
1416}
1417
1418/*
1419 * Fork and exec a consumer daemon (consumerd).
1420 *
1421 * Return pid if successful else -1.
1422 */
1423static pid_t spawn_consumerd(struct consumer_data *consumer_data)
1424{
1425 int ret;
1426 pid_t pid;
1427 const char *verbosity;
1428
1429 DBG("Spawning consumerd");
1430
1431 pid = fork();
1432 if (pid == 0) {
1433 /*
1434 * Exec consumerd.
1435 */
1436 if (opt_verbose > 1 || opt_verbose_consumer) {
1437 verbosity = "--verbose";
1438 } else {
1439 verbosity = "--quiet";
1440 }
1441 switch (consumer_data->type) {
1442 case LTTNG_CONSUMER_KERNEL:
1443 execl(INSTALL_BIN_PATH "/lttng-consumerd",
1444 "lttng-consumerd", verbosity, "-k", NULL);
1445 break;
1446 case LTTNG_CONSUMER_UST:
1447 execl(INSTALL_BIN_PATH "/lttng-consumerd",
1448 "lttng-consumerd", verbosity, "-u", NULL);
1449 break;
1450 default:
1451 perror("unknown consumer type");
1452 exit(EXIT_FAILURE);
1453 }
1454 if (errno != 0) {
1455 perror("kernel start consumer exec");
1456 }
1457 exit(EXIT_FAILURE);
1458 } else if (pid > 0) {
1459 ret = pid;
1460 } else {
1461 perror("start consumer fork");
1462 ret = -errno;
1463 }
1464 return ret;
1465}
1466
1467/*
1468 * Spawn the consumerd daemon and session daemon thread.
1469 */
1470static int start_consumerd(struct consumer_data *consumer_data)
1471{
1472 int ret;
1473
1474 pthread_mutex_lock(&consumer_data->pid_mutex);
1475 if (consumer_data->pid != 0) {
1476 pthread_mutex_unlock(&consumer_data->pid_mutex);
1477 goto end;
1478 }
1479
1480 ret = spawn_consumerd(consumer_data);
1481 if (ret < 0) {
1482 ERR("Spawning consumerd failed");
1483 pthread_mutex_unlock(&consumer_data->pid_mutex);
1484 goto error;
1485 }
1486
1487 /* Setting up the consumer_data pid */
1488 consumer_data->pid = ret;
1489 DBG2("Consumer pid %d", consumer_data->pid);
1490 pthread_mutex_unlock(&consumer_data->pid_mutex);
1491
1492 DBG2("Spawning consumer control thread");
1493 ret = spawn_consumer_thread(consumer_data);
1494 if (ret < 0) {
1495 ERR("Fatal error spawning consumer control thread");
1496 goto error;
1497 }
1498
1499end:
1500 return 0;
1501
1502error:
1503 return ret;
1504}
1505
1506/*
1507 * modprobe_kernel_modules
1508 */
1509static int modprobe_kernel_modules(void)
1510{
1511 int ret = 0, i;
1512 char modprobe[256];
1513
1514 for (i = 0; i < ARRAY_SIZE(kernel_modules_list); i++) {
1515 ret = snprintf(modprobe, sizeof(modprobe),
1516 "/sbin/modprobe %s%s",
1517 kernel_modules_list[i].required ? "" : "-q ",
1518 kernel_modules_list[i].name);
1519 if (ret < 0) {
1520 perror("snprintf modprobe");
1521 goto error;
1522 }
1523 modprobe[sizeof(modprobe) - 1] = '\0';
1524 ret = system(modprobe);
1525 if (ret == -1) {
1526 ERR("Unable to launch modprobe for module %s",
1527 kernel_modules_list[i].name);
1528 } else if (kernel_modules_list[i].required
1529 && WEXITSTATUS(ret) != 0) {
1530 ERR("Unable to load module %s",
1531 kernel_modules_list[i].name);
1532 } else {
1533 DBG("Modprobe successfully %s",
1534 kernel_modules_list[i].name);
1535 }
1536 }
1537
1538error:
1539 return ret;
1540}
1541
1542/*
1543 * mount_debugfs
1544 */
1545static int mount_debugfs(char *path)
1546{
1547 int ret;
1548 char *type = "debugfs";
1549
1550 ret = mkdir_recursive(path, S_IRWXU | S_IRWXG, geteuid(), getegid());
1551 if (ret < 0) {
1552 PERROR("Cannot create debugfs path");
1553 goto error;
1554 }
1555
1556 ret = mount(type, path, type, 0, NULL);
1557 if (ret < 0) {
1558 PERROR("Cannot mount debugfs");
1559 goto error;
1560 }
1561
1562 DBG("Mounted debugfs successfully at %s", path);
1563
1564error:
1565 return ret;
1566}
1567
1568/*
1569 * Setup necessary data for kernel tracer action.
1570 */
1571static void init_kernel_tracer(void)
1572{
1573 int ret;
1574 char *proc_mounts = "/proc/mounts";
1575 char line[256];
1576 char *debugfs_path = NULL, *lttng_path = NULL;
1577 FILE *fp;
1578
1579 /* Detect debugfs */
1580 fp = fopen(proc_mounts, "r");
1581 if (fp == NULL) {
1582 ERR("Unable to probe %s", proc_mounts);
1583 goto error;
1584 }
1585
1586 while (fgets(line, sizeof(line), fp) != NULL) {
1587 if (strstr(line, "debugfs") != NULL) {
1588 /* Remove first string */
1589 strtok(line, " ");
1590 /* Dup string here so we can reuse line later on */
1591 debugfs_path = strdup(strtok(NULL, " "));
1592 DBG("Got debugfs path : %s", debugfs_path);
1593 break;
1594 }
1595 }
1596
1597 fclose(fp);
1598
1599 /* Mount debugfs if needded */
1600 if (debugfs_path == NULL) {
1601 ret = asprintf(&debugfs_path, "/mnt/debugfs");
1602 if (ret < 0) {
1603 perror("asprintf debugfs path");
1604 goto error;
1605 }
1606 ret = mount_debugfs(debugfs_path);
1607 if (ret < 0) {
1608 perror("Cannot mount debugfs");
1609 goto error;
1610 }
1611 }
1612
1613 /* Modprobe lttng kernel modules */
1614 ret = modprobe_kernel_modules();
1615 if (ret < 0) {
1616 goto error;
1617 }
1618
1619 /* Setup lttng kernel path */
1620 ret = asprintf(&lttng_path, "%s/lttng", debugfs_path);
1621 if (ret < 0) {
1622 perror("asprintf lttng path");
1623 goto error;
1624 }
1625
1626 /* Open debugfs lttng */
1627 kernel_tracer_fd = open(lttng_path, O_RDWR);
1628 if (kernel_tracer_fd < 0) {
1629 DBG("Failed to open %s", lttng_path);
1630 goto error;
1631 }
1632
1633 free(lttng_path);
1634 free(debugfs_path);
1635 DBG("Kernel tracer fd %d", kernel_tracer_fd);
1636 return;
1637
1638error:
1639 if (lttng_path) {
1640 free(lttng_path);
1641 }
1642 if (debugfs_path) {
1643 free(debugfs_path);
1644 }
1645 WARN("No kernel tracer available");
1646 kernel_tracer_fd = 0;
1647 return;
1648}
1649
1650/*
1651 * Init tracing by creating trace directory and sending fds kernel consumer.
1652 */
1653static int init_kernel_tracing(struct ltt_kernel_session *session)
1654{
1655 int ret = 0;
1656
1657 if (session->consumer_fds_sent == 0) {
1658 /*
1659 * Assign default kernel consumer socket if no consumer assigned to the
1660 * kernel session. At this point, it's NOT suppose to be 0 but this is
1661 * an extra security check.
1662 */
1663 if (session->consumer_fd == 0) {
1664 session->consumer_fd = kconsumer_data.cmd_sock;
1665 }
1666
1667 ret = send_kconsumer_session_streams(&kconsumer_data, session);
1668 if (ret < 0) {
1669 ret = LTTCOMM_KERN_CONSUMER_FAIL;
1670 goto error;
1671 }
1672
1673 session->consumer_fds_sent = 1;
1674 }
1675
1676error:
1677 return ret;
1678}
1679
1680/*
1681 * Create an UST session and add it to the session ust list.
1682 */
1683static int create_ust_session(struct ltt_session *session,
1684 struct lttng_domain *domain)
1685{
1686 int ret;
1687 unsigned int uid;
1688 struct ltt_ust_session *lus = NULL;
1689
1690 switch (domain->type) {
1691 case LTTNG_DOMAIN_UST:
1692 break;
1693 default:
1694 ret = LTTCOMM_UNKNOWN_DOMAIN;
1695 goto error;
1696 }
1697
1698 DBG("Creating UST session");
1699
1700 session_lock_list();
1701 uid = session_list_ptr->count;
1702 session_unlock_list();
1703
1704 lus = trace_ust_create_session(session->path, uid, domain);
1705 if (lus == NULL) {
1706 ret = LTTCOMM_UST_SESS_FAIL;
1707 goto error;
1708 }
1709
1710 ret = mkdir_recursive(lus->pathname, S_IRWXU | S_IRWXG,
1711 geteuid(), allowed_group());
1712 if (ret < 0) {
1713 if (ret != -EEXIST) {
1714 ERR("Trace directory creation error");
1715 ret = LTTCOMM_UST_SESS_FAIL;
1716 goto error;
1717 }
1718 }
1719
1720 /* The domain type dictate different actions on session creation */
1721 switch (domain->type) {
1722 case LTTNG_DOMAIN_UST:
1723 /* No ustctl for the global UST domain */
1724 break;
1725 default:
1726 ERR("Unknown UST domain on create session %d", domain->type);
1727 goto error;
1728 }
1729 session->ust_session = lus;
1730
1731 return LTTCOMM_OK;
1732
1733error:
1734 free(lus);
1735 return ret;
1736}
1737
1738/*
1739 * Create a kernel tracer session then create the default channel.
1740 */
1741static int create_kernel_session(struct ltt_session *session)
1742{
1743 int ret;
1744
1745 DBG("Creating kernel session");
1746
1747 ret = kernel_create_session(session, kernel_tracer_fd);
1748 if (ret < 0) {
1749 ret = LTTCOMM_KERN_SESS_FAIL;
1750 goto error;
1751 }
1752
1753 /* Set kernel consumer socket fd */
1754 if (kconsumer_data.cmd_sock) {
1755 session->kernel_session->consumer_fd = kconsumer_data.cmd_sock;
1756 }
1757
1758 ret = mkdir_recursive(session->kernel_session->trace_path,
1759 S_IRWXU | S_IRWXG, geteuid(), allowed_group());
1760 if (ret < 0) {
1761 if (ret != -EEXIST) {
1762 ERR("Trace directory creation error");
1763 goto error;
1764 }
1765 }
1766
1767error:
1768 return ret;
1769}
1770
1771/*
1772 * Using the session list, filled a lttng_session array to send back to the
1773 * client for session listing.
1774 *
1775 * The session list lock MUST be acquired before calling this function. Use
1776 * session_lock_list() and session_unlock_list().
1777 */
1778static void list_lttng_sessions(struct lttng_session *sessions)
1779{
1780 int i = 0;
1781 struct ltt_session *session;
1782
1783 DBG("Getting all available session");
1784 /*
1785 * Iterate over session list and append data after the control struct in
1786 * the buffer.
1787 */
1788 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
1789 strncpy(sessions[i].path, session->path, PATH_MAX);
1790 sessions[i].path[PATH_MAX - 1] = '\0';
1791 strncpy(sessions[i].name, session->name, NAME_MAX);
1792 sessions[i].name[NAME_MAX - 1] = '\0';
1793 sessions[i].enabled = session->enabled;
1794 i++;
1795 }
1796}
1797
1798/*
1799 * Fill lttng_channel array of all channels.
1800 */
1801static void list_lttng_channels(int domain, struct ltt_session *session,
1802 struct lttng_channel *channels)
1803{
1804 int i = 0;
1805 struct ltt_kernel_channel *kchan;
1806
1807 DBG("Listing channels for session %s", session->name);
1808
1809 switch (domain) {
1810 case LTTNG_DOMAIN_KERNEL:
1811 /* Kernel channels */
1812 if (session->kernel_session != NULL) {
1813 cds_list_for_each_entry(kchan,
1814 &session->kernel_session->channel_list.head, list) {
1815 /* Copy lttng_channel struct to array */
1816 memcpy(&channels[i], kchan->channel, sizeof(struct lttng_channel));
1817 channels[i].enabled = kchan->enabled;
1818 i++;
1819 }
1820 }
1821 break;
1822 case LTTNG_DOMAIN_UST:
1823 {
1824 struct cds_lfht_iter iter;
1825 struct ltt_ust_channel *uchan;
1826
1827 cds_lfht_for_each_entry(session->ust_session->domain_global.channels,
1828 &iter, uchan, node) {
1829 strncpy(channels[i].name, uchan->name, LTTNG_SYMBOL_NAME_LEN);
1830 channels[i].attr.overwrite = uchan->attr.overwrite;
1831 channels[i].attr.subbuf_size = uchan->attr.subbuf_size;
1832 channels[i].attr.num_subbuf = uchan->attr.num_subbuf;
1833 channels[i].attr.switch_timer_interval =
1834 uchan->attr.switch_timer_interval;
1835 channels[i].attr.read_timer_interval =
1836 uchan->attr.read_timer_interval;
1837 channels[i].attr.output = uchan->attr.output;
1838 }
1839 break;
1840 }
1841 default:
1842 break;
1843 }
1844}
1845
1846/*
1847 * Create a list of ust global domain events.
1848 */
1849static int list_lttng_ust_global_events(char *channel_name,
1850 struct ltt_ust_domain_global *ust_global, struct lttng_event **events)
1851{
1852 int i = 0, ret = 0;
1853 unsigned int nb_event = 0;
1854 struct cds_lfht_iter iter;
1855 struct ltt_ust_channel *uchan;
1856 struct ltt_ust_event *uevent;
1857 struct lttng_event *tmp;
1858
1859 DBG("Listing UST global events for channel %s", channel_name);
1860
1861 rcu_read_lock();
1862
1863 /* Count events in all channels */
1864 cds_lfht_for_each_entry(ust_global->channels, &iter, uchan, node) {
1865 nb_event += hashtable_get_count(uchan->events);
1866 }
1867
1868 if (nb_event == 0) {
1869 ret = nb_event;
1870 goto error;
1871 }
1872
1873 DBG3("Listing UST global %d events", nb_event);
1874
1875 tmp = zmalloc(nb_event * sizeof(struct lttng_event));
1876 if (tmp == NULL) {
1877 ret = -LTTCOMM_FATAL;
1878 goto error;
1879 }
1880
1881 cds_lfht_for_each_entry(ust_global->channels, &iter, uchan, node) {
1882 cds_lfht_for_each_entry(uchan->events, &iter, uevent, node) {
1883 strncpy(tmp[i].name, uevent->attr.name, LTTNG_SYMBOL_NAME_LEN);
1884 tmp[i].name[LTTNG_SYMBOL_NAME_LEN - 1] = '\0';
1885 tmp[i].enabled = uevent->enabled;
1886 switch (uevent->attr.instrumentation) {
1887 case LTTNG_UST_TRACEPOINT:
1888 tmp[i].type = LTTNG_EVENT_TRACEPOINT;
1889 break;
1890 case LTTNG_UST_PROBE:
1891 tmp[i].type = LTTNG_EVENT_PROBE;
1892 break;
1893 case LTTNG_UST_FUNCTION:
1894 tmp[i].type = LTTNG_EVENT_FUNCTION;
1895 break;
1896 }
1897 i++;
1898 }
1899 }
1900
1901 ret = nb_event;
1902 *events = tmp;
1903
1904error:
1905 rcu_read_unlock();
1906 return ret;
1907}
1908
1909/*
1910 * Fill lttng_event array of all kernel events in the channel.
1911 */
1912static int list_lttng_kernel_events(char *channel_name,
1913 struct ltt_kernel_session *kernel_session, struct lttng_event **events)
1914{
1915 int i = 0, ret;
1916 unsigned int nb_event;
1917 struct ltt_kernel_event *event;
1918 struct ltt_kernel_channel *kchan;
1919
1920 kchan = trace_kernel_get_channel_by_name(channel_name, kernel_session);
1921 if (kchan == NULL) {
1922 ret = LTTCOMM_KERN_CHAN_NOT_FOUND;
1923 goto error;
1924 }
1925
1926 nb_event = kchan->event_count;
1927
1928 DBG("Listing events for channel %s", kchan->channel->name);
1929
1930 if (nb_event == 0) {
1931 ret = nb_event;
1932 goto error;
1933 }
1934
1935 *events = zmalloc(nb_event * sizeof(struct lttng_event));
1936 if (*events == NULL) {
1937 ret = LTTCOMM_FATAL;
1938 goto error;
1939 }
1940
1941 /* Kernel channels */
1942 cds_list_for_each_entry(event, &kchan->events_list.head , list) {
1943 strncpy((*events)[i].name, event->event->name, LTTNG_SYMBOL_NAME_LEN);
1944 (*events)[i].name[LTTNG_SYMBOL_NAME_LEN - 1] = '\0';
1945 (*events)[i].enabled = event->enabled;
1946 switch (event->event->instrumentation) {
1947 case LTTNG_KERNEL_TRACEPOINT:
1948 (*events)[i].type = LTTNG_EVENT_TRACEPOINT;
1949 break;
1950 case LTTNG_KERNEL_KPROBE:
1951 case LTTNG_KERNEL_KRETPROBE:
1952 (*events)[i].type = LTTNG_EVENT_PROBE;
1953 memcpy(&(*events)[i].attr.probe, &event->event->u.kprobe,
1954 sizeof(struct lttng_kernel_kprobe));
1955 break;
1956 case LTTNG_KERNEL_FUNCTION:
1957 (*events)[i].type = LTTNG_EVENT_FUNCTION;
1958 memcpy(&((*events)[i].attr.ftrace), &event->event->u.ftrace,
1959 sizeof(struct lttng_kernel_function));
1960 break;
1961 case LTTNG_KERNEL_NOOP:
1962 (*events)[i].type = LTTNG_EVENT_NOOP;
1963 break;
1964 case LTTNG_KERNEL_SYSCALL:
1965 (*events)[i].type = LTTNG_EVENT_SYSCALL;
1966 break;
1967 case LTTNG_KERNEL_ALL:
1968 assert(0);
1969 break;
1970 }
1971 i++;
1972 }
1973
1974 return nb_event;
1975
1976error:
1977 return ret;
1978}
1979
1980/*
1981 * Command LTTNG_DISABLE_CHANNEL processed by the client thread.
1982 */
1983static int cmd_disable_channel(struct ltt_session *session,
1984 int domain, char *channel_name)
1985{
1986 int ret;
1987
1988 switch (domain) {
1989 case LTTNG_DOMAIN_KERNEL:
1990 ret = channel_kernel_disable(session->kernel_session,
1991 channel_name);
1992 if (ret != LTTCOMM_OK) {
1993 goto error;
1994 }
1995
1996 kernel_wait_quiescent(kernel_tracer_fd);
1997 break;
1998 case LTTNG_DOMAIN_UST_PID:
1999 break;
2000 default:
2001 ret = LTTCOMM_UNKNOWN_DOMAIN;
2002 goto error;
2003 }
2004
2005 ret = LTTCOMM_OK;
2006
2007error:
2008 return ret;
2009}
2010
2011/*
2012 * Copy channel from attributes and set it in the application channel list.
2013 */
2014/*
2015static int copy_ust_channel_to_app(struct ltt_ust_session *usess,
2016 struct lttng_channel *attr, struct ust_app *app)
2017{
2018 int ret;
2019 struct ltt_ust_channel *uchan, *new_chan;
2020
2021 uchan = trace_ust_get_channel_by_key(usess->channels, attr->name);
2022 if (uchan == NULL) {
2023 ret = LTTCOMM_FATAL;
2024 goto error;
2025 }
2026
2027 new_chan = trace_ust_create_channel(attr, usess->path);
2028 if (new_chan == NULL) {
2029 PERROR("malloc ltt_ust_channel");
2030 ret = LTTCOMM_FATAL;
2031 goto error;
2032 }
2033
2034 ret = channel_ust_copy(new_chan, uchan);
2035 if (ret < 0) {
2036 ret = LTTCOMM_FATAL;
2037 goto error;
2038 }
2039
2040error:
2041 return ret;
2042}
2043*/
2044
2045/*
2046 * Command LTTNG_ENABLE_CHANNEL processed by the client thread.
2047 */
2048static int cmd_enable_channel(struct ltt_session *session,
2049 struct lttng_domain *domain, struct lttng_channel *attr)
2050{
2051 int ret;
2052 struct ltt_ust_session *usess = session->ust_session;
2053
2054 DBG("Enabling channel %s for session %s", attr->name, session->name);
2055
2056 switch (domain->type) {
2057 case LTTNG_DOMAIN_KERNEL:
2058 {
2059 struct ltt_kernel_channel *kchan;
2060
2061 kchan = trace_kernel_get_channel_by_name(attr->name,
2062 session->kernel_session);
2063 if (kchan == NULL) {
2064 ret = channel_kernel_create(session->kernel_session,
2065 attr, kernel_poll_pipe[1]);
2066 } else {
2067 ret = channel_kernel_enable(session->kernel_session, kchan);
2068 }
2069
2070 if (ret != LTTCOMM_OK) {
2071 goto error;
2072 }
2073
2074 kernel_wait_quiescent(kernel_tracer_fd);
2075 break;
2076 }
2077 case LTTNG_DOMAIN_UST:
2078 {
2079 struct ltt_ust_channel *uchan;
2080
2081 DBG2("Enabling channel for LTTNG_DOMAIN_UST");
2082
2083 /* Get channel in global UST domain HT */
2084 uchan = trace_ust_find_channel_by_name(usess->domain_global.channels,
2085 attr->name);
2086 if (uchan == NULL) {
2087 uchan = trace_ust_create_channel(attr, usess->pathname);
2088 if (uchan == NULL) {
2089 ret = LTTCOMM_UST_CHAN_FAIL;
2090 goto error;
2091 }
2092
2093 rcu_read_lock();
2094 hashtable_add_unique(usess->domain_global.channels, &uchan->node);
2095 rcu_read_unlock();
2096 DBG2("UST channel %s added to global domain HT", attr->name);
2097 } else {
2098 ret = LTTCOMM_UST_CHAN_EXIST;
2099 goto error;
2100 }
2101
2102 /* Add channel to all registered applications */
2103 ret = ust_app_create_channel_all(usess, uchan);
2104 if (ret != 0) {
2105 goto error;
2106 }
2107
2108 uchan->enabled = 1;
2109
2110 break;
2111 }
2112 case LTTNG_DOMAIN_UST_PID:
2113 {
2114 /*
2115 int sock;
2116 struct ltt_ust_channel *uchan;
2117 struct ltt_ust_session *usess;
2118 struct ust_app *app;
2119
2120 usess = trace_ust_get_session_by_pid(&session->ust_session_list,
2121 domain->attr.pid);
2122 if (usess == NULL) {
2123 ret = LTTCOMM_UST_CHAN_NOT_FOUND;
2124 goto error;
2125 }
2126
2127 app = ust_app_get_by_pid(domain->attr.pid);
2128 if (app == NULL) {
2129 ret = LTTCOMM_APP_NOT_FOUND;
2130 goto error;
2131 }
2132 sock = app->sock;
2133
2134 uchan = trace_ust_get_channel_by_name(attr->name, usess);
2135 if (uchan == NULL) {
2136 ret = channel_ust_create(usess, attr, sock);
2137 } else {
2138 ret = channel_ust_enable(usess, uchan, sock);
2139 }
2140
2141 if (ret != LTTCOMM_OK) {
2142 goto error;
2143 }
2144
2145 ret = copy_ust_channel_to_app(usess, attr, app);
2146 if (ret != LTTCOMM_OK) {
2147 goto error;
2148 }
2149
2150 DBG("UST channel %s created for app sock %d with pid %d",
2151 attr->name, app->sock, domain->attr.pid);
2152 */
2153 ret = LTTCOMM_NOT_IMPLEMENTED;
2154 goto error;
2155 }
2156 default:
2157 ret = LTTCOMM_UNKNOWN_DOMAIN;
2158 goto error;
2159 }
2160
2161 ret = LTTCOMM_OK;
2162
2163error:
2164 return ret;
2165}
2166
2167/*
2168 * Command LTTNG_DISABLE_EVENT processed by the client thread.
2169 */
2170static int cmd_disable_event(struct ltt_session *session, int domain,
2171 char *channel_name, char *event_name)
2172{
2173 int ret;
2174
2175 switch (domain) {
2176 case LTTNG_DOMAIN_KERNEL:
2177 {
2178 struct ltt_kernel_channel *kchan;
2179
2180 kchan = trace_kernel_get_channel_by_name(channel_name,
2181 session->kernel_session);
2182 if (kchan == NULL) {
2183 ret = LTTCOMM_KERN_CHAN_NOT_FOUND;
2184 goto error;
2185 }
2186
2187 ret = event_kernel_disable_tracepoint(session->kernel_session, kchan, event_name);
2188 if (ret != LTTCOMM_OK) {
2189 goto error;
2190 }
2191
2192 kernel_wait_quiescent(kernel_tracer_fd);
2193 break;
2194 }
2195 case LTTNG_DOMAIN_UST:
2196 case LTTNG_DOMAIN_UST_EXEC_NAME:
2197 case LTTNG_DOMAIN_UST_PID:
2198 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
2199 default:
2200 /* TODO: Other UST domains */
2201 ret = LTTCOMM_NOT_IMPLEMENTED;
2202 goto error;
2203 }
2204
2205 ret = LTTCOMM_OK;
2206
2207error:
2208 return ret;
2209}
2210
2211/*
2212 * Command LTTNG_DISABLE_ALL_EVENT processed by the client thread.
2213 */
2214static int cmd_disable_event_all(struct ltt_session *session, int domain,
2215 char *channel_name)
2216{
2217 int ret;
2218 struct ltt_kernel_channel *kchan;
2219
2220 switch (domain) {
2221 case LTTNG_DOMAIN_KERNEL:
2222 kchan = trace_kernel_get_channel_by_name(channel_name,
2223 session->kernel_session);
2224 if (kchan == NULL) {
2225 ret = LTTCOMM_KERN_CHAN_NOT_FOUND;
2226 goto error;
2227 }
2228
2229 ret = event_kernel_disable_all(session->kernel_session, kchan);
2230 if (ret != LTTCOMM_OK) {
2231 goto error;
2232 }
2233
2234 kernel_wait_quiescent(kernel_tracer_fd);
2235 break;
2236 default:
2237 /* TODO: Userspace tracing */
2238 ret = LTTCOMM_NOT_IMPLEMENTED;
2239 goto error;
2240 }
2241
2242 ret = LTTCOMM_OK;
2243
2244error:
2245 return ret;
2246}
2247
2248/*
2249 * Command LTTNG_ADD_CONTEXT processed by the client thread.
2250 */
2251static int cmd_add_context(struct ltt_session *session, int domain,
2252 char *channel_name, char *event_name, struct lttng_event_context *ctx)
2253{
2254 int ret;
2255
2256 switch (domain) {
2257 case LTTNG_DOMAIN_KERNEL:
2258 /* Add kernel context to kernel tracer */
2259 ret = context_kernel_add(session->kernel_session, ctx,
2260 event_name, channel_name);
2261 if (ret != LTTCOMM_OK) {
2262 goto error;
2263 }
2264 break;
2265 case LTTNG_DOMAIN_UST:
2266 {
2267 /*
2268 struct ltt_ust_session *usess;
2269
2270 cds_list_for_each_entry(usess, &session->ust_session_list.head, list) {
2271 ret = context_ust_add(usess, ctx,
2272 event_name, channel_name, domain);
2273 if (ret != LTTCOMM_OK) {
2274 goto error;
2275 }
2276 }
2277 break;
2278 */
2279 }
2280 default:
2281 /* TODO: UST other domains */
2282 ret = LTTCOMM_NOT_IMPLEMENTED;
2283 goto error;
2284 }
2285
2286 ret = LTTCOMM_OK;
2287
2288error:
2289 return ret;
2290}
2291
2292/*
2293 * Command LTTNG_ENABLE_EVENT processed by the client thread.
2294 */
2295static int cmd_enable_event(struct ltt_session *session, int domain,
2296 char *channel_name, struct lttng_event *event)
2297{
2298 int ret;
2299 struct lttng_channel *attr;
2300 struct ltt_ust_session *usess = session->ust_session;
2301
2302 switch (domain) {
2303 case LTTNG_DOMAIN_KERNEL:
2304 {
2305 struct ltt_kernel_channel *kchan;
2306
2307 kchan = trace_kernel_get_channel_by_name(channel_name,
2308 session->kernel_session);
2309 if (kchan == NULL) {
2310 attr = channel_new_default_attr(domain);
2311 if (attr == NULL) {
2312 ret = LTTCOMM_FATAL;
2313 goto error;
2314 }
2315 snprintf(attr->name, NAME_MAX, "%s", channel_name);
2316
2317 /* This call will notify the kernel thread */
2318 ret = channel_kernel_create(session->kernel_session,
2319 attr, kernel_poll_pipe[1]);
2320 if (ret != LTTCOMM_OK) {
2321 goto error;
2322 }
2323 }
2324
2325 /* Get the newly created kernel channel pointer */
2326 kchan = trace_kernel_get_channel_by_name(channel_name,
2327 session->kernel_session);
2328 if (kchan == NULL) {
2329 /* This sould not happen... */
2330 ret = LTTCOMM_FATAL;
2331 goto error;
2332 }
2333
2334 ret = event_kernel_enable_tracepoint(session->kernel_session, kchan,
2335 event);
2336 if (ret != LTTCOMM_OK) {
2337 goto error;
2338 }
2339
2340 kernel_wait_quiescent(kernel_tracer_fd);
2341 break;
2342 }
2343 case LTTNG_DOMAIN_UST:
2344 {
2345 struct ltt_ust_channel *uchan;
2346 struct ltt_ust_event *uevent;
2347
2348 uchan = trace_ust_find_channel_by_name(usess->domain_global.channels,
2349 channel_name);
2350 if (uchan == NULL) {
2351 /* TODO: Create default channel */
2352 ret = LTTCOMM_UST_CHAN_NOT_FOUND;
2353 goto error;
2354 }
2355
2356 uevent = trace_ust_find_event_by_name(uchan->events, event->name);
2357 if (uevent == NULL) {
2358 uevent = trace_ust_create_event(event);
2359 if (uevent == NULL) {
2360 ret = LTTCOMM_FATAL;
2361 goto error;
2362 }
2363
2364 }
2365
2366 ret = ust_app_create_event_all(usess, uchan, uevent);
2367 if (ret < 0) {
2368 ret = LTTCOMM_UST_ENABLE_FAIL;
2369 goto error;
2370 }
2371
2372 /* Add ltt ust event to channel */
2373 rcu_read_lock();
2374 hashtable_add_unique(uchan->events, &uevent->node);
2375 rcu_read_unlock();
2376
2377 uevent->enabled = 1;
2378
2379 DBG3("UST ltt event %s added to channel %s", uevent->attr.name,
2380 uchan->name);
2381 break;
2382 }
2383 case LTTNG_DOMAIN_UST_EXEC_NAME:
2384 case LTTNG_DOMAIN_UST_PID:
2385 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
2386 default:
2387 ret = LTTCOMM_NOT_IMPLEMENTED;
2388 goto error;
2389 }
2390
2391 ret = LTTCOMM_OK;
2392
2393error:
2394 return ret;
2395}
2396
2397/*
2398 * Command LTTNG_ENABLE_ALL_EVENT processed by the client thread.
2399 */
2400static int cmd_enable_event_all(struct ltt_session *session, int domain,
2401 char *channel_name, int event_type)
2402{
2403 int ret;
2404 struct ltt_kernel_channel *kchan;
2405
2406 switch (domain) {
2407 case LTTNG_DOMAIN_KERNEL:
2408 kchan = trace_kernel_get_channel_by_name(channel_name,
2409 session->kernel_session);
2410 if (kchan == NULL) {
2411 /* This call will notify the kernel thread */
2412 ret = channel_kernel_create(session->kernel_session, NULL,
2413 kernel_poll_pipe[1]);
2414 if (ret != LTTCOMM_OK) {
2415 goto error;
2416 }
2417 }
2418
2419 /* Get the newly created kernel channel pointer */
2420 kchan = trace_kernel_get_channel_by_name(channel_name,
2421 session->kernel_session);
2422 if (kchan == NULL) {
2423 /* This sould not happen... */
2424 ret = LTTCOMM_FATAL;
2425 goto error;
2426 }
2427
2428 switch (event_type) {
2429 case LTTNG_KERNEL_SYSCALL:
2430 ret = event_kernel_enable_all_syscalls(session->kernel_session,
2431 kchan, kernel_tracer_fd);
2432 break;
2433 case LTTNG_KERNEL_TRACEPOINT:
2434 /*
2435 * This call enables all LTTNG_KERNEL_TRACEPOINTS and
2436 * events already registered to the channel.
2437 */
2438 ret = event_kernel_enable_all_tracepoints(session->kernel_session,
2439 kchan, kernel_tracer_fd);
2440 break;
2441 case LTTNG_KERNEL_ALL:
2442 /* Enable syscalls and tracepoints */
2443 ret = event_kernel_enable_all(session->kernel_session,
2444 kchan, kernel_tracer_fd);
2445 break;
2446 default:
2447 ret = LTTCOMM_KERN_ENABLE_FAIL;
2448 goto error;
2449 }
2450 if (ret != LTTCOMM_OK) {
2451 goto error;
2452 }
2453
2454 kernel_wait_quiescent(kernel_tracer_fd);
2455 break;
2456 default:
2457 /* TODO: Userspace tracing */
2458 ret = LTTCOMM_NOT_IMPLEMENTED;
2459 goto error;
2460 }
2461
2462 ret = LTTCOMM_OK;
2463
2464error:
2465 return ret;
2466}
2467
2468/*
2469 * Command LTTNG_LIST_TRACEPOINTS processed by the client thread.
2470 */
2471static ssize_t cmd_list_tracepoints(int domain, struct lttng_event **events)
2472{
2473 int ret;
2474 ssize_t nb_events = 0;
2475
2476 switch (domain) {
2477 case LTTNG_DOMAIN_KERNEL:
2478 nb_events = kernel_list_events(kernel_tracer_fd, events);
2479 if (nb_events < 0) {
2480 ret = LTTCOMM_KERN_LIST_FAIL;
2481 goto error;
2482 }
2483 break;
2484 case LTTNG_DOMAIN_UST:
2485 nb_events = ust_app_list_events(events);
2486 if (nb_events < 0) {
2487 ret = LTTCOMM_UST_LIST_FAIL;
2488 goto error;
2489 }
2490 break;
2491 default:
2492 ret = LTTCOMM_NOT_IMPLEMENTED;
2493 goto error;
2494 }
2495
2496 return nb_events;
2497
2498error:
2499 /* Return negative value to differentiate return code */
2500 return -ret;
2501}
2502
2503/*
2504 * Command LTTNG_START_TRACE processed by the client thread.
2505 */
2506static int cmd_start_trace(struct ltt_session *session)
2507{
2508 int ret;
2509 struct ltt_kernel_session *ksession;
2510 struct ltt_ust_session *usess;
2511
2512 /* Short cut */
2513 ksession = session->kernel_session;
2514 usess = session->ust_session;
2515
2516 if (session->enabled)
2517 return LTTCOMM_UST_START_FAIL;
2518 session->enabled = 1;
2519
2520 /* Kernel tracing */
2521 if (ksession != NULL) {
2522 struct ltt_kernel_channel *kchan;
2523
2524 /* Open kernel metadata */
2525 if (ksession->metadata == NULL) {
2526 ret = kernel_open_metadata(ksession, ksession->trace_path);
2527 if (ret < 0) {
2528 ret = LTTCOMM_KERN_META_FAIL;
2529 goto error;
2530 }
2531 }
2532
2533 /* Open kernel metadata stream */
2534 if (ksession->metadata_stream_fd == 0) {
2535 ret = kernel_open_metadata_stream(ksession);
2536 if (ret < 0) {
2537 ERR("Kernel create metadata stream failed");
2538 ret = LTTCOMM_KERN_STREAM_FAIL;
2539 goto error;
2540 }
2541 }
2542
2543 /* For each channel */
2544 cds_list_for_each_entry(kchan, &ksession->channel_list.head, list) {
2545 if (kchan->stream_count == 0) {
2546 ret = kernel_open_channel_stream(kchan);
2547 if (ret < 0) {
2548 ret = LTTCOMM_KERN_STREAM_FAIL;
2549 goto error;
2550 }
2551 /* Update the stream global counter */
2552 ksession->stream_count_global += ret;
2553 }
2554 }
2555
2556 /* Setup kernel consumer socket and send fds to it */
2557 ret = init_kernel_tracing(ksession);
2558 if (ret < 0) {
2559 ret = LTTCOMM_KERN_START_FAIL;
2560 goto error;
2561 }
2562
2563 /* This start the kernel tracing */
2564 ret = kernel_start_session(ksession);
2565 if (ret < 0) {
2566 ret = LTTCOMM_KERN_START_FAIL;
2567 goto error;
2568 }
2569
2570 /* Quiescent wait after starting trace */
2571 kernel_wait_quiescent(kernel_tracer_fd);
2572 }
2573
2574 /* Flag session that trace should start automatically */
2575 if (usess) {
2576 usess->start_trace = 1;
2577
2578 ret = ust_app_start_trace_all(usess);
2579 if (ret < 0) {
2580 ret = LTTCOMM_UST_START_FAIL;
2581 goto error;
2582 }
2583 }
2584
2585 ret = LTTCOMM_OK;
2586
2587error:
2588 return ret;
2589}
2590
2591/*
2592 * Command LTTNG_STOP_TRACE processed by the client thread.
2593 */
2594static int cmd_stop_trace(struct ltt_session *session)
2595{
2596 int ret;
2597 struct ltt_kernel_channel *kchan;
2598 struct ltt_kernel_session *ksession;
2599 struct ltt_ust_session *usess;
2600
2601 /* Short cut */
2602 ksession = session->kernel_session;
2603 usess = session->ust_session;
2604
2605 if (!session->enabled)
2606 return LTTCOMM_UST_START_FAIL;
2607 session->enabled = 0;
2608
2609 /* Kernel tracer */
2610 if (ksession != NULL) {
2611 DBG("Stop kernel tracing");
2612
2613 /* Flush all buffers before stopping */
2614 ret = kernel_metadata_flush_buffer(ksession->metadata_stream_fd);
2615 if (ret < 0) {
2616 ERR("Kernel metadata flush failed");
2617 }
2618
2619 cds_list_for_each_entry(kchan, &ksession->channel_list.head, list) {
2620 ret = kernel_flush_buffer(kchan);
2621 if (ret < 0) {
2622 ERR("Kernel flush buffer error");
2623 }
2624 }
2625
2626 ret = kernel_stop_session(ksession);
2627 if (ret < 0) {
2628 ret = LTTCOMM_KERN_STOP_FAIL;
2629 goto error;
2630 }
2631
2632 kernel_wait_quiescent(kernel_tracer_fd);
2633 }
2634
2635 /* Flag session that trace should start automatically */
2636 if (usess) {
2637 usess->start_trace = 0;
2638
2639 ret = ust_app_stop_trace_all(usess);
2640 if (ret < 0) {
2641 ret = LTTCOMM_UST_START_FAIL;
2642 goto error;
2643 }
2644 }
2645
2646 ret = LTTCOMM_OK;
2647
2648error:
2649 return ret;
2650}
2651
2652/*
2653 * Command LTTNG_CREATE_SESSION processed by the client thread.
2654 */
2655static int cmd_create_session(char *name, char *path)
2656{
2657 int ret;
2658
2659 ret = session_create(name, path);
2660 if (ret != LTTCOMM_OK) {
2661 goto error;
2662 }
2663
2664 ret = LTTCOMM_OK;
2665
2666error:
2667 return ret;
2668}
2669
2670/*
2671 * Command LTTNG_DESTROY_SESSION processed by the client thread.
2672 */
2673static int cmd_destroy_session(struct ltt_session *session, char *name)
2674{
2675 int ret;
2676
2677 /* Clean kernel session teardown */
2678 teardown_kernel_session(session);
2679
2680 /*
2681 * Must notify the kernel thread here to update it's poll setin order
2682 * to remove the channel(s)' fd just destroyed.
2683 */
2684 ret = notify_thread_pipe(kernel_poll_pipe[1]);
2685 if (ret < 0) {
2686 perror("write kernel poll pipe");
2687 }
2688
2689 ret = session_destroy(session);
2690
2691 return ret;
2692}
2693
2694/*
2695 * Command LTTNG_CALIBRATE processed by the client thread.
2696 */
2697static int cmd_calibrate(int domain, struct lttng_calibrate *calibrate)
2698{
2699 int ret;
2700
2701 switch (domain) {
2702 case LTTNG_DOMAIN_KERNEL:
2703 {
2704 struct lttng_kernel_calibrate kcalibrate;
2705
2706 kcalibrate.type = calibrate->type;
2707 ret = kernel_calibrate(kernel_tracer_fd, &kcalibrate);
2708 if (ret < 0) {
2709 ret = LTTCOMM_KERN_ENABLE_FAIL;
2710 goto error;
2711 }
2712 break;
2713 }
2714 default:
2715 /* TODO: Userspace tracing */
2716 ret = LTTCOMM_NOT_IMPLEMENTED;
2717 goto error;
2718 }
2719
2720 ret = LTTCOMM_OK;
2721
2722error:
2723 return ret;
2724}
2725
2726/*
2727 * Command LTTNG_REGISTER_CONSUMER processed by the client thread.
2728 */
2729static int cmd_register_consumer(struct ltt_session *session, int domain,
2730 char *sock_path)
2731{
2732 int ret, sock;
2733
2734 switch (domain) {
2735 case LTTNG_DOMAIN_KERNEL:
2736 /* Can't register a consumer if there is already one */
2737 if (session->kernel_session->consumer_fd != 0) {
2738 ret = LTTCOMM_KERN_CONSUMER_FAIL;
2739 goto error;
2740 }
2741
2742 sock = lttcomm_connect_unix_sock(sock_path);
2743 if (sock < 0) {
2744 ret = LTTCOMM_CONNECT_FAIL;
2745 goto error;
2746 }
2747
2748 session->kernel_session->consumer_fd = sock;
2749 break;
2750 default:
2751 /* TODO: Userspace tracing */
2752 ret = LTTCOMM_NOT_IMPLEMENTED;
2753 goto error;
2754 }
2755
2756 ret = LTTCOMM_OK;
2757
2758error:
2759 return ret;
2760}
2761
2762/*
2763 * Command LTTNG_LIST_DOMAINS processed by the client thread.
2764 */
2765static ssize_t cmd_list_domains(struct ltt_session *session,
2766 struct lttng_domain **domains)
2767{
2768 int ret, index = 0;
2769 ssize_t nb_dom = 0;
2770
2771 if (session->kernel_session != NULL) {
2772 DBG3("Listing domains found kernel domain");
2773 nb_dom++;
2774 }
2775
2776 if (session->ust_session != NULL) {
2777 DBG3("Listing domains found UST global domain");
2778 nb_dom++;
2779 }
2780
2781 *domains = zmalloc(nb_dom * sizeof(struct lttng_domain));
2782 if (*domains == NULL) {
2783 ret = -LTTCOMM_FATAL;
2784 goto error;
2785 }
2786
2787 if (session->kernel_session != NULL) {
2788 (*domains)[index].type = LTTNG_DOMAIN_KERNEL;
2789 index++;
2790 }
2791
2792 if (session->ust_session != NULL) {
2793 (*domains)[index].type = LTTNG_DOMAIN_UST;
2794 index++;
2795 }
2796
2797 return nb_dom;
2798
2799error:
2800 return ret;
2801}
2802
2803/*
2804 * Command LTTNG_LIST_CHANNELS processed by the client thread.
2805 */
2806static ssize_t cmd_list_channels(int domain, struct ltt_session *session,
2807 struct lttng_channel **channels)
2808{
2809 int ret;
2810 ssize_t nb_chan = 0;
2811
2812 switch (domain) {
2813 case LTTNG_DOMAIN_KERNEL:
2814 if (session->kernel_session != NULL) {
2815 nb_chan = session->kernel_session->channel_count;
2816 }
2817 DBG3("Number of kernel channels %ld", nb_chan);
2818 break;
2819 case LTTNG_DOMAIN_UST:
2820 if (session->ust_session != NULL) {
2821 nb_chan = hashtable_get_count(
2822 session->ust_session->domain_global.channels);
2823 }
2824 DBG3("Number of UST global channels %ld", nb_chan);
2825 break;
2826 default:
2827 *channels = NULL;
2828 ret = -LTTCOMM_NOT_IMPLEMENTED;
2829 goto error;
2830 }
2831
2832 if (nb_chan > 0) {
2833 *channels = zmalloc(nb_chan * sizeof(struct lttng_channel));
2834 if (*channels == NULL) {
2835 ret = -LTTCOMM_FATAL;
2836 goto error;
2837 }
2838
2839 list_lttng_channels(domain, session, *channels);
2840 } else {
2841 *channels = NULL;
2842 }
2843
2844 return nb_chan;
2845
2846error:
2847 return ret;
2848}
2849
2850/*
2851 * Command LTTNG_LIST_EVENTS processed by the client thread.
2852 */
2853static ssize_t cmd_list_events(int domain, struct ltt_session *session,
2854 char *channel_name, struct lttng_event **events)
2855{
2856 int ret = 0;
2857 ssize_t nb_event = 0;
2858
2859 switch (domain) {
2860 case LTTNG_DOMAIN_KERNEL:
2861 if (session->kernel_session != NULL) {
2862 nb_event = list_lttng_kernel_events(channel_name,
2863 session->kernel_session, events);
2864 }
2865 break;
2866 case LTTNG_DOMAIN_UST:
2867 {
2868 if (session->ust_session != NULL) {
2869 nb_event = list_lttng_ust_global_events(channel_name,
2870 &session->ust_session->domain_global, events);
2871 }
2872 break;
2873 }
2874 default:
2875 ret = -LTTCOMM_NOT_IMPLEMENTED;
2876 goto error;
2877 }
2878
2879 ret = nb_event;
2880
2881error:
2882 return ret;
2883}
2884
2885/*
2886 * Process the command requested by the lttng client within the command
2887 * context structure. This function make sure that the return structure (llm)
2888 * is set and ready for transmission before returning.
2889 *
2890 * Return any error encountered or 0 for success.
2891 */
2892static int process_client_msg(struct command_ctx *cmd_ctx)
2893{
2894 int ret = LTTCOMM_OK;
2895 int need_tracing_session = 1;
2896
2897 DBG("Processing client command %d", cmd_ctx->lsm->cmd_type);
2898
2899 /*
2900 * Check for command that don't needs to allocate a returned payload. We do
2901 * this here so we don't have to make the call for no payload at each
2902 * command.
2903 */
2904 switch(cmd_ctx->lsm->cmd_type) {
2905 case LTTNG_LIST_SESSIONS:
2906 case LTTNG_LIST_TRACEPOINTS:
2907 case LTTNG_LIST_DOMAINS:
2908 case LTTNG_LIST_CHANNELS:
2909 case LTTNG_LIST_EVENTS:
2910 break;
2911 default:
2912 /* Setup lttng message with no payload */
2913 ret = setup_lttng_msg(cmd_ctx, 0);
2914 if (ret < 0) {
2915 /* This label does not try to unlock the session */
2916 goto init_setup_error;
2917 }
2918 }
2919
2920 /* Commands that DO NOT need a session. */
2921 switch (cmd_ctx->lsm->cmd_type) {
2922 case LTTNG_CALIBRATE:
2923 case LTTNG_CREATE_SESSION:
2924 case LTTNG_LIST_SESSIONS:
2925 case LTTNG_LIST_TRACEPOINTS:
2926 need_tracing_session = 0;
2927 break;
2928 default:
2929 DBG("Getting session %s by name", cmd_ctx->lsm->session.name);
2930 session_lock_list();
2931 cmd_ctx->session = session_find_by_name(cmd_ctx->lsm->session.name);
2932 session_unlock_list();
2933 if (cmd_ctx->session == NULL) {
2934 if (cmd_ctx->lsm->session.name != NULL) {
2935 ret = LTTCOMM_SESS_NOT_FOUND;
2936 } else {
2937 /* If no session name specified */
2938 ret = LTTCOMM_SELECT_SESS;
2939 }
2940 goto error;
2941 } else {
2942 /* Acquire lock for the session */
2943 session_lock(cmd_ctx->session);
2944 }
2945 break;
2946 }
2947
2948 /*
2949 * Check domain type for specific "pre-action".
2950 */
2951 switch (cmd_ctx->lsm->domain.type) {
2952 case LTTNG_DOMAIN_KERNEL:
2953 /* Kernel tracer check */
2954 if (kernel_tracer_fd == 0) {
2955 /* Basically, load kernel tracer modules */
2956 init_kernel_tracer();
2957 if (kernel_tracer_fd == 0) {
2958 ret = LTTCOMM_KERN_NA;
2959 goto error;
2960 }
2961 }
2962
2963 /* Need a session for kernel command */
2964 if (need_tracing_session) {
2965 if (cmd_ctx->session->kernel_session == NULL) {
2966 ret = create_kernel_session(cmd_ctx->session);
2967 if (ret < 0) {
2968 ret = LTTCOMM_KERN_SESS_FAIL;
2969 goto error;
2970 }
2971 }
2972
2973 /* Start the kernel consumer daemon */
2974 pthread_mutex_lock(&kconsumer_data.pid_mutex);
2975 if (kconsumer_data.pid == 0 &&
2976 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
2977 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2978 ret = start_consumerd(&kconsumer_data);
2979 if (ret < 0) {
2980 ret = LTTCOMM_KERN_CONSUMER_FAIL;
2981 goto error;
2982 }
2983 }
2984 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2985 }
2986 break;
2987 case LTTNG_DOMAIN_UST:
2988 {
2989 if (need_tracing_session) {
2990 if (cmd_ctx->session->ust_session == NULL) {
2991 ret = create_ust_session(cmd_ctx->session,
2992 &cmd_ctx->lsm->domain);
2993 if (ret != LTTCOMM_OK) {
2994 goto error;
2995 }
2996 }
2997 /* Start the kernel consumer daemon */
2998 pthread_mutex_lock(&ustconsumer_data.pid_mutex);
2999 if (ustconsumer_data.pid == 0 &&
3000 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
3001 pthread_mutex_unlock(&ustconsumer_data.pid_mutex);
3002 ret = start_consumerd(&ustconsumer_data);
3003 if (ret < 0) {
3004 ret = LTTCOMM_KERN_CONSUMER_FAIL;
3005 goto error;
3006 }
3007
3008 ust_consumer_fd = ustconsumer_data.cmd_sock;
3009 }
3010 pthread_mutex_unlock(&ustconsumer_data.pid_mutex);
3011 }
3012 break;
3013 }
3014 default:
3015 break;
3016 }
3017
3018 /* Process by command type */
3019 switch (cmd_ctx->lsm->cmd_type) {
3020 case LTTNG_ADD_CONTEXT:
3021 {
3022 ret = cmd_add_context(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3023 cmd_ctx->lsm->u.context.channel_name,
3024 cmd_ctx->lsm->u.context.event_name,
3025 &cmd_ctx->lsm->u.context.ctx);
3026 break;
3027 }
3028 case LTTNG_DISABLE_CHANNEL:
3029 {
3030 ret = cmd_disable_channel(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3031 cmd_ctx->lsm->u.disable.channel_name);
3032 break;
3033 }
3034 case LTTNG_DISABLE_EVENT:
3035 {
3036 ret = cmd_disable_event(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3037 cmd_ctx->lsm->u.disable.channel_name,
3038 cmd_ctx->lsm->u.disable.name);
3039 ret = LTTCOMM_OK;
3040 break;
3041 }
3042 case LTTNG_DISABLE_ALL_EVENT:
3043 {
3044 DBG("Disabling all events");
3045
3046 ret = cmd_disable_event_all(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3047 cmd_ctx->lsm->u.disable.channel_name);
3048 break;
3049 }
3050 case LTTNG_ENABLE_CHANNEL:
3051 {
3052 ret = cmd_enable_channel(cmd_ctx->session, &cmd_ctx->lsm->domain,
3053 &cmd_ctx->lsm->u.channel.chan);
3054 break;
3055 }
3056 case LTTNG_ENABLE_EVENT:
3057 {
3058 ret = cmd_enable_event(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3059 cmd_ctx->lsm->u.enable.channel_name,
3060 &cmd_ctx->lsm->u.enable.event);
3061 break;
3062 }
3063 case LTTNG_ENABLE_ALL_EVENT:
3064 {
3065 DBG("Enabling all events");
3066
3067 ret = cmd_enable_event_all(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3068 cmd_ctx->lsm->u.enable.channel_name,
3069 cmd_ctx->lsm->u.enable.event.type);
3070 break;
3071 }
3072 case LTTNG_LIST_TRACEPOINTS:
3073 {
3074 struct lttng_event *events;
3075 ssize_t nb_events;
3076
3077 nb_events = cmd_list_tracepoints(cmd_ctx->lsm->domain.type, &events);
3078 if (nb_events < 0) {
3079 ret = -nb_events;
3080 goto error;
3081 }
3082
3083 /*
3084 * Setup lttng message with payload size set to the event list size in
3085 * bytes and then copy list into the llm payload.
3086 */
3087 ret = setup_lttng_msg(cmd_ctx, sizeof(struct lttng_event) * nb_events);
3088 if (ret < 0) {
3089 free(events);
3090 goto setup_error;
3091 }
3092
3093 /* Copy event list into message payload */
3094 memcpy(cmd_ctx->llm->payload, events,
3095 sizeof(struct lttng_event) * nb_events);
3096
3097 free(events);
3098
3099 ret = LTTCOMM_OK;
3100 break;
3101 }
3102 case LTTNG_START_TRACE:
3103 {
3104 ret = cmd_start_trace(cmd_ctx->session);
3105 break;
3106 }
3107 case LTTNG_STOP_TRACE:
3108 {
3109 ret = cmd_stop_trace(cmd_ctx->session);
3110 break;
3111 }
3112 case LTTNG_CREATE_SESSION:
3113 {
3114 ret = cmd_create_session(cmd_ctx->lsm->session.name,
3115 cmd_ctx->lsm->session.path);
3116 break;
3117 }
3118 case LTTNG_DESTROY_SESSION:
3119 {
3120 ret = cmd_destroy_session(cmd_ctx->session,
3121 cmd_ctx->lsm->session.name);
3122 break;
3123 }
3124 case LTTNG_LIST_DOMAINS:
3125 {
3126 ssize_t nb_dom;
3127 struct lttng_domain *domains;
3128
3129 nb_dom = cmd_list_domains(cmd_ctx->session, &domains);
3130 if (nb_dom < 0) {
3131 ret = -nb_dom;
3132 goto error;
3133 }
3134
3135 ret = setup_lttng_msg(cmd_ctx, nb_dom * sizeof(struct lttng_domain));
3136 if (ret < 0) {
3137 goto setup_error;
3138 }
3139
3140 /* Copy event list into message payload */
3141 memcpy(cmd_ctx->llm->payload, domains,
3142 nb_dom * sizeof(struct lttng_domain));
3143
3144 free(domains);
3145
3146 ret = LTTCOMM_OK;
3147 break;
3148 }
3149 case LTTNG_LIST_CHANNELS:
3150 {
3151 size_t nb_chan;
3152 struct lttng_channel *channels;
3153
3154 nb_chan = cmd_list_channels(cmd_ctx->lsm->domain.type,
3155 cmd_ctx->session, &channels);
3156 if (nb_chan < 0) {
3157 ret = -nb_chan;
3158 goto error;
3159 }
3160
3161 ret = setup_lttng_msg(cmd_ctx, nb_chan * sizeof(struct lttng_channel));
3162 if (ret < 0) {
3163 goto setup_error;
3164 }
3165
3166 /* Copy event list into message payload */
3167 memcpy(cmd_ctx->llm->payload, channels,
3168 nb_chan * sizeof(struct lttng_channel));
3169
3170 free(channels);
3171
3172 ret = LTTCOMM_OK;
3173 break;
3174 }
3175 case LTTNG_LIST_EVENTS:
3176 {
3177 ssize_t nb_event;
3178 struct lttng_event *events = NULL;
3179
3180 nb_event = cmd_list_events(cmd_ctx->lsm->domain.type, cmd_ctx->session,
3181 cmd_ctx->lsm->u.list.channel_name, &events);
3182 if (nb_event < 0) {
3183 ret = -nb_event;
3184 goto error;
3185 }
3186
3187 ret = setup_lttng_msg(cmd_ctx, nb_event * sizeof(struct lttng_event));
3188 if (ret < 0) {
3189 goto setup_error;
3190 }
3191
3192 /* Copy event list into message payload */
3193 memcpy(cmd_ctx->llm->payload, events,
3194 nb_event * sizeof(struct lttng_event));
3195
3196 free(events);
3197
3198 ret = LTTCOMM_OK;
3199 break;
3200 }
3201 case LTTNG_LIST_SESSIONS:
3202 {
3203 session_lock_list();
3204
3205 if (session_list_ptr->count == 0) {
3206 ret = LTTCOMM_NO_SESSION;
3207 session_unlock_list();
3208 goto error;
3209 }
3210
3211 ret = setup_lttng_msg(cmd_ctx, sizeof(struct lttng_session) *
3212 session_list_ptr->count);
3213 if (ret < 0) {
3214 session_unlock_list();
3215 goto setup_error;
3216 }
3217
3218 /* Filled the session array */
3219 list_lttng_sessions((struct lttng_session *)(cmd_ctx->llm->payload));
3220
3221 session_unlock_list();
3222
3223 ret = LTTCOMM_OK;
3224 break;
3225 }
3226 case LTTNG_CALIBRATE:
3227 {
3228 ret = cmd_calibrate(cmd_ctx->lsm->domain.type,
3229 &cmd_ctx->lsm->u.calibrate);
3230 break;
3231 }
3232 case LTTNG_REGISTER_CONSUMER:
3233 {
3234 ret = cmd_register_consumer(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3235 cmd_ctx->lsm->u.reg.path);
3236 break;
3237 }
3238 default:
3239 ret = LTTCOMM_UND;
3240 break;
3241 }
3242
3243error:
3244 if (cmd_ctx->llm == NULL) {
3245 DBG("Missing llm structure. Allocating one.");
3246 if (setup_lttng_msg(cmd_ctx, 0) < 0) {
3247 goto setup_error;
3248 }
3249 }
3250 /* Set return code */
3251 cmd_ctx->llm->ret_code = ret;
3252setup_error:
3253 if (cmd_ctx->session) {
3254 session_unlock(cmd_ctx->session);
3255 }
3256init_setup_error:
3257 return ret;
3258}
3259
3260/*
3261 * This thread manage all clients request using the unix client socket for
3262 * communication.
3263 */
3264static void *thread_manage_clients(void *data)
3265{
3266 int sock = 0, ret, i, pollfd;
3267 uint32_t revents, nb_fd;
3268 struct command_ctx *cmd_ctx = NULL;
3269 struct lttng_poll_event events;
3270
3271 DBG("[thread] Manage client started");
3272
3273 rcu_register_thread();
3274
3275 ret = lttcomm_listen_unix_sock(client_sock);
3276 if (ret < 0) {
3277 goto error;
3278 }
3279
3280 /*
3281 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
3282 * more will be added to this poll set.
3283 */
3284 ret = create_thread_poll_set(&events, 2);
3285 if (ret < 0) {
3286 goto error;
3287 }
3288
3289 /* Add the application registration socket */
3290 ret = lttng_poll_add(&events, client_sock, LPOLLIN | LPOLLPRI);
3291 if (ret < 0) {
3292 goto error;
3293 }
3294
3295 /*
3296 * Notify parent pid that we are ready to accept command for client side.
3297 */
3298 if (opt_sig_parent) {
3299 kill(ppid, SIGCHLD);
3300 }
3301
3302 while (1) {
3303 DBG("Accepting client command ...");
3304
3305 nb_fd = LTTNG_POLL_GETNB(&events);
3306
3307 /* Inifinite blocking call, waiting for transmission */
3308 ret = lttng_poll_wait(&events, -1);
3309 if (ret < 0) {
3310 goto error;
3311 }
3312
3313 for (i = 0; i < nb_fd; i++) {
3314 /* Fetch once the poll data */
3315 revents = LTTNG_POLL_GETEV(&events, i);
3316 pollfd = LTTNG_POLL_GETFD(&events, i);
3317
3318 /* Thread quit pipe has been closed. Killing thread. */
3319 ret = check_thread_quit_pipe(pollfd, revents);
3320 if (ret) {
3321 goto error;
3322 }
3323
3324 /* Event on the registration socket */
3325 if (pollfd == client_sock) {
3326 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3327 ERR("Client socket poll error");
3328 goto error;
3329 }
3330 }
3331 }
3332
3333 DBG("Wait for client response");
3334
3335 sock = lttcomm_accept_unix_sock(client_sock);
3336 if (sock < 0) {
3337 goto error;
3338 }
3339
3340 /* Allocate context command to process the client request */
3341 cmd_ctx = zmalloc(sizeof(struct command_ctx));
3342 if (cmd_ctx == NULL) {
3343 perror("zmalloc cmd_ctx");
3344 goto error;
3345 }
3346
3347 /* Allocate data buffer for reception */
3348 cmd_ctx->lsm = zmalloc(sizeof(struct lttcomm_session_msg));
3349 if (cmd_ctx->lsm == NULL) {
3350 perror("zmalloc cmd_ctx->lsm");
3351 goto error;
3352 }
3353
3354 cmd_ctx->llm = NULL;
3355 cmd_ctx->session = NULL;
3356
3357 /*
3358 * Data is received from the lttng client. The struct
3359 * lttcomm_session_msg (lsm) contains the command and data request of
3360 * the client.
3361 */
3362 DBG("Receiving data from client ...");
3363 ret = lttcomm_recv_unix_sock(sock, cmd_ctx->lsm,
3364 sizeof(struct lttcomm_session_msg));
3365 if (ret <= 0) {
3366 DBG("Nothing recv() from client... continuing");
3367 close(sock);
3368 free(cmd_ctx);
3369 continue;
3370 }
3371
3372 // TODO: Validate cmd_ctx including sanity check for
3373 // security purpose.
3374
3375 rcu_thread_online();
3376 /*
3377 * This function dispatch the work to the kernel or userspace tracer
3378 * libs and fill the lttcomm_lttng_msg data structure of all the needed
3379 * informations for the client. The command context struct contains
3380 * everything this function may needs.
3381 */
3382 ret = process_client_msg(cmd_ctx);
3383 rcu_thread_offline();
3384 if (ret < 0) {
3385 /*
3386 * TODO: Inform client somehow of the fatal error. At
3387 * this point, ret < 0 means that a zmalloc failed
3388 * (ENOMEM). Error detected but still accept command.
3389 */
3390 clean_command_ctx(&cmd_ctx);
3391 continue;
3392 }
3393
3394 DBG("Sending response (size: %d, retcode: %s)",
3395 cmd_ctx->lttng_msg_size,
3396 lttng_strerror(-cmd_ctx->llm->ret_code));
3397 ret = send_unix_sock(sock, cmd_ctx->llm, cmd_ctx->lttng_msg_size);
3398 if (ret < 0) {
3399 ERR("Failed to send data back to client");
3400 }
3401
3402 clean_command_ctx(&cmd_ctx);
3403
3404 /* End of transmission */
3405 close(sock);
3406 }
3407
3408error:
3409 DBG("Client thread dying");
3410 unlink(client_unix_sock_path);
3411 close(client_sock);
3412 close(sock);
3413
3414 lttng_poll_clean(&events);
3415 clean_command_ctx(&cmd_ctx);
3416
3417 rcu_unregister_thread();
3418 return NULL;
3419}
3420
3421
3422/*
3423 * usage function on stderr
3424 */
3425static void usage(void)
3426{
3427 fprintf(stderr, "Usage: %s OPTIONS\n\nOptions:\n", progname);
3428 fprintf(stderr, " -h, --help Display this usage.\n");
3429 fprintf(stderr, " -c, --client-sock PATH Specify path for the client unix socket\n");
3430 fprintf(stderr, " -a, --apps-sock PATH Specify path for apps unix socket\n");
3431 fprintf(stderr, " --kconsumerd-err-sock PATH Specify path for the kernel consumer error socket\n");
3432 fprintf(stderr, " --kconsumerd-cmd-sock PATH Specify path for the kernel consumer command socket\n");
3433 fprintf(stderr, " --ustconsumerd-err-sock PATH Specify path for the UST consumer error socket\n");
3434 fprintf(stderr, " --ustconsumerd-cmd-sock PATH Specify path for the UST consumer command socket\n");
3435 fprintf(stderr, " -d, --daemonize Start as a daemon.\n");
3436 fprintf(stderr, " -g, --group NAME Specify the tracing group name. (default: tracing)\n");
3437 fprintf(stderr, " -V, --version Show version number.\n");
3438 fprintf(stderr, " -S, --sig-parent Send SIGCHLD to parent pid to notify readiness.\n");
3439 fprintf(stderr, " -q, --quiet No output at all.\n");
3440 fprintf(stderr, " -v, --verbose Verbose mode. Activate DBG() macro.\n");
3441 fprintf(stderr, " --verbose-consumer Verbose mode for consumer. Activate DBG() macro.\n");
3442}
3443
3444/*
3445 * daemon argument parsing
3446 */
3447static int parse_args(int argc, char **argv)
3448{
3449 int c;
3450
3451 static struct option long_options[] = {
3452 { "client-sock", 1, 0, 'c' },
3453 { "apps-sock", 1, 0, 'a' },
3454 { "kconsumerd-cmd-sock", 1, 0, 'C' },
3455 { "kconsumerd-err-sock", 1, 0, 'E' },
3456 { "ustconsumerd-cmd-sock", 1, 0, 'D' },
3457 { "ustconsumerd-err-sock", 1, 0, 'F' },
3458 { "daemonize", 0, 0, 'd' },
3459 { "sig-parent", 0, 0, 'S' },
3460 { "help", 0, 0, 'h' },
3461 { "group", 1, 0, 'g' },
3462 { "version", 0, 0, 'V' },
3463 { "quiet", 0, 0, 'q' },
3464 { "verbose", 0, 0, 'v' },
3465 { "verbose-consumer", 0, 0, 'Z' },
3466 { NULL, 0, 0, 0 }
3467 };
3468
3469 while (1) {
3470 int option_index = 0;
3471 c = getopt_long(argc, argv, "dhqvVS" "a:c:g:s:C:E:D:F:Z",
3472 long_options, &option_index);
3473 if (c == -1) {
3474 break;
3475 }
3476
3477 switch (c) {
3478 case 0:
3479 fprintf(stderr, "option %s", long_options[option_index].name);
3480 if (optarg) {
3481 fprintf(stderr, " with arg %s\n", optarg);
3482 }
3483 break;
3484 case 'c':
3485 snprintf(client_unix_sock_path, PATH_MAX, "%s", optarg);
3486 break;
3487 case 'a':
3488 snprintf(apps_unix_sock_path, PATH_MAX, "%s", optarg);
3489 break;
3490 case 'd':
3491 opt_daemon = 1;
3492 break;
3493 case 'g':
3494 opt_tracing_group = strdup(optarg);
3495 break;
3496 case 'h':
3497 usage();
3498 exit(EXIT_FAILURE);
3499 case 'V':
3500 fprintf(stdout, "%s\n", VERSION);
3501 exit(EXIT_SUCCESS);
3502 case 'S':
3503 opt_sig_parent = 1;
3504 break;
3505 case 'E':
3506 snprintf(kconsumer_data.err_unix_sock_path, PATH_MAX, "%s", optarg);
3507 break;
3508 case 'C':
3509 snprintf(kconsumer_data.cmd_unix_sock_path, PATH_MAX, "%s", optarg);
3510 break;
3511 case 'F':
3512 snprintf(ustconsumer_data.err_unix_sock_path, PATH_MAX, "%s", optarg);
3513 break;
3514 case 'D':
3515 snprintf(ustconsumer_data.cmd_unix_sock_path, PATH_MAX, "%s", optarg);
3516 break;
3517 case 'q':
3518 opt_quiet = 1;
3519 break;
3520 case 'v':
3521 /* Verbose level can increase using multiple -v */
3522 opt_verbose += 1;
3523 break;
3524 case 'Z':
3525 opt_verbose_consumer += 1;
3526 break;
3527 default:
3528 /* Unknown option or other error.
3529 * Error is printed by getopt, just return */
3530 return -1;
3531 }
3532 }
3533
3534 return 0;
3535}
3536
3537/*
3538 * Creates the two needed socket by the daemon.
3539 * apps_sock - The communication socket for all UST apps.
3540 * client_sock - The communication of the cli tool (lttng).
3541 */
3542static int init_daemon_socket(void)
3543{
3544 int ret = 0;
3545 mode_t old_umask;
3546
3547 old_umask = umask(0);
3548
3549 /* Create client tool unix socket */
3550 client_sock = lttcomm_create_unix_sock(client_unix_sock_path);
3551 if (client_sock < 0) {
3552 ERR("Create unix sock failed: %s", client_unix_sock_path);
3553 ret = -1;
3554 goto end;
3555 }
3556
3557 /* File permission MUST be 660 */
3558 ret = chmod(client_unix_sock_path, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
3559 if (ret < 0) {
3560 ERR("Set file permissions failed: %s", client_unix_sock_path);
3561 perror("chmod");
3562 goto end;
3563 }
3564
3565 /* Create the application unix socket */
3566 apps_sock = lttcomm_create_unix_sock(apps_unix_sock_path);
3567 if (apps_sock < 0) {
3568 ERR("Create unix sock failed: %s", apps_unix_sock_path);
3569 ret = -1;
3570 goto end;
3571 }
3572
3573 /* File permission MUST be 666 */
3574 ret = chmod(apps_unix_sock_path,
3575 S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH);
3576 if (ret < 0) {
3577 ERR("Set file permissions failed: %s", apps_unix_sock_path);
3578 perror("chmod");
3579 goto end;
3580 }
3581
3582end:
3583 umask(old_umask);
3584 return ret;
3585}
3586
3587/*
3588 * Check if the global socket is available, and if a daemon is answering at the
3589 * other side. If yes, error is returned.
3590 */
3591static int check_existing_daemon(void)
3592{
3593 if (access(client_unix_sock_path, F_OK) < 0 &&
3594 access(apps_unix_sock_path, F_OK) < 0) {
3595 return 0;
3596 }
3597
3598 /* Is there anybody out there ? */
3599 if (lttng_session_daemon_alive()) {
3600 return -EEXIST;
3601 } else {
3602 return 0;
3603 }
3604}
3605
3606/*
3607 * Set the tracing group gid onto the client socket.
3608 *
3609 * Race window between mkdir and chown is OK because we are going from more
3610 * permissive (root.root) to les permissive (root.tracing).
3611 */
3612static int set_permissions(void)
3613{
3614 int ret;
3615 gid_t gid;
3616
3617 gid = allowed_group();
3618 if (gid < 0) {
3619 if (is_root) {
3620 WARN("No tracing group detected");
3621 ret = 0;
3622 } else {
3623 ERR("Missing tracing group. Aborting execution.");
3624 ret = -1;
3625 }
3626 goto end;
3627 }
3628
3629 /* Set lttng run dir */
3630 ret = chown(LTTNG_RUNDIR, 0, gid);
3631 if (ret < 0) {
3632 ERR("Unable to set group on " LTTNG_RUNDIR);
3633 perror("chown");
3634 }
3635
3636 /* lttng client socket path */
3637 ret = chown(client_unix_sock_path, 0, gid);
3638 if (ret < 0) {
3639 ERR("Unable to set group on %s", client_unix_sock_path);
3640 perror("chown");
3641 }
3642
3643 /* kconsumer error socket path */
3644 ret = chown(kconsumer_data.err_unix_sock_path, 0, gid);
3645 if (ret < 0) {
3646 ERR("Unable to set group on %s", kconsumer_data.err_unix_sock_path);
3647 perror("chown");
3648 }
3649
3650 /* ustconsumer error socket path */
3651 ret = chown(ustconsumer_data.err_unix_sock_path, 0, gid);
3652 if (ret < 0) {
3653 ERR("Unable to set group on %s", ustconsumer_data.err_unix_sock_path);
3654 perror("chown");
3655 }
3656
3657 DBG("All permissions are set");
3658
3659end:
3660 return ret;
3661}
3662
3663/*
3664 * Create the pipe used to wake up the kernel thread.
3665 */
3666static int create_kernel_poll_pipe(void)
3667{
3668 return pipe2(kernel_poll_pipe, O_CLOEXEC);
3669}
3670
3671/*
3672 * Create the application command pipe to wake thread_manage_apps.
3673 */
3674static int create_apps_cmd_pipe(void)
3675{
3676 return pipe2(apps_cmd_pipe, O_CLOEXEC);
3677}
3678
3679/*
3680 * Create the lttng run directory needed for all global sockets and pipe.
3681 */
3682static int create_lttng_rundir(void)
3683{
3684 int ret;
3685
3686 ret = mkdir(LTTNG_RUNDIR, S_IRWXU | S_IRWXG );
3687 if (ret < 0) {
3688 if (errno != EEXIST) {
3689 ERR("Unable to create " LTTNG_RUNDIR);
3690 goto error;
3691 } else {
3692 ret = 0;
3693 }
3694 }
3695
3696error:
3697 return ret;
3698}
3699
3700/*
3701 * Setup sockets and directory needed by the kconsumerd communication with the
3702 * session daemon.
3703 */
3704static int set_consumer_sockets(struct consumer_data *consumer_data)
3705{
3706 int ret;
3707 const char *path = consumer_data->type == LTTNG_CONSUMER_KERNEL ?
3708 KCONSUMERD_PATH : USTCONSUMERD_PATH;
3709
3710 if (strlen(consumer_data->err_unix_sock_path) == 0) {
3711 snprintf(consumer_data->err_unix_sock_path, PATH_MAX,
3712 consumer_data->type == LTTNG_CONSUMER_KERNEL ?
3713 KCONSUMERD_ERR_SOCK_PATH :
3714 USTCONSUMERD_ERR_SOCK_PATH);
3715 }
3716
3717 if (strlen(consumer_data->cmd_unix_sock_path) == 0) {
3718 snprintf(consumer_data->cmd_unix_sock_path, PATH_MAX,
3719 consumer_data->type == LTTNG_CONSUMER_KERNEL ?
3720 KCONSUMERD_CMD_SOCK_PATH :
3721 USTCONSUMERD_CMD_SOCK_PATH);
3722 }
3723
3724 ret = mkdir(path, S_IRWXU | S_IRWXG);
3725 if (ret < 0) {
3726 if (errno != EEXIST) {
3727 ERR("Failed to create %s", path);
3728 goto error;
3729 }
3730 ret = 0;
3731 }
3732
3733 /* Create the kconsumerd error unix socket */
3734 consumer_data->err_sock =
3735 lttcomm_create_unix_sock(consumer_data->err_unix_sock_path);
3736 if (consumer_data->err_sock < 0) {
3737 ERR("Create unix sock failed: %s", consumer_data->err_unix_sock_path);
3738 ret = -1;
3739 goto error;
3740 }
3741
3742 /* File permission MUST be 660 */
3743 ret = chmod(consumer_data->err_unix_sock_path,
3744 S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
3745 if (ret < 0) {
3746 ERR("Set file permissions failed: %s", consumer_data->err_unix_sock_path);
3747 perror("chmod");
3748 goto error;
3749 }
3750
3751error:
3752 return ret;
3753}
3754
3755/*
3756 * Signal handler for the daemon
3757 *
3758 * Simply stop all worker threads, leaving main() return gracefully after
3759 * joining all threads and calling cleanup().
3760 */
3761static void sighandler(int sig)
3762{
3763 switch (sig) {
3764 case SIGPIPE:
3765 DBG("SIGPIPE catched");
3766 return;
3767 case SIGINT:
3768 DBG("SIGINT catched");
3769 stop_threads();
3770 break;
3771 case SIGTERM:
3772 DBG("SIGTERM catched");
3773 stop_threads();
3774 break;
3775 default:
3776 break;
3777 }
3778}
3779
3780/*
3781 * Setup signal handler for :
3782 * SIGINT, SIGTERM, SIGPIPE
3783 */
3784static int set_signal_handler(void)
3785{
3786 int ret = 0;
3787 struct sigaction sa;
3788 sigset_t sigset;
3789
3790 if ((ret = sigemptyset(&sigset)) < 0) {
3791 perror("sigemptyset");
3792 return ret;
3793 }
3794
3795 sa.sa_handler = sighandler;
3796 sa.sa_mask = sigset;
3797 sa.sa_flags = 0;
3798 if ((ret = sigaction(SIGTERM, &sa, NULL)) < 0) {
3799 perror("sigaction");
3800 return ret;
3801 }
3802
3803 if ((ret = sigaction(SIGINT, &sa, NULL)) < 0) {
3804 perror("sigaction");
3805 return ret;
3806 }
3807
3808 if ((ret = sigaction(SIGPIPE, &sa, NULL)) < 0) {
3809 perror("sigaction");
3810 return ret;
3811 }
3812
3813 DBG("Signal handler set for SIGTERM, SIGPIPE and SIGINT");
3814
3815 return ret;
3816}
3817
3818/*
3819 * Set open files limit to unlimited. This daemon can open a large number of
3820 * file descriptors in order to consumer multiple kernel traces.
3821 */
3822static void set_ulimit(void)
3823{
3824 int ret;
3825 struct rlimit lim;
3826
3827 /* The kernel does not allowed an infinite limit for open files */
3828 lim.rlim_cur = 65535;
3829 lim.rlim_max = 65535;
3830
3831 ret = setrlimit(RLIMIT_NOFILE, &lim);
3832 if (ret < 0) {
3833 perror("failed to set open files limit");
3834 }
3835}
3836
3837/*
3838 * main
3839 */
3840int main(int argc, char **argv)
3841{
3842 int ret = 0;
3843 void *status;
3844 const char *home_path;
3845
3846 rcu_register_thread();
3847
3848 /* Create thread quit pipe */
3849 if ((ret = init_thread_quit_pipe()) < 0) {
3850 goto error;
3851 }
3852
3853 /* Parse arguments */
3854 progname = argv[0];
3855 if ((ret = parse_args(argc, argv) < 0)) {
3856 goto error;
3857 }
3858
3859 /* Daemonize */
3860 if (opt_daemon) {
3861 ret = daemon(0, 0);
3862 if (ret < 0) {
3863 perror("daemon");
3864 goto error;
3865 }
3866 }
3867
3868 /* Check if daemon is UID = 0 */
3869 is_root = !getuid();
3870
3871 if (is_root) {
3872 ret = create_lttng_rundir();
3873 if (ret < 0) {
3874 goto error;
3875 }
3876
3877 if (strlen(apps_unix_sock_path) == 0) {
3878 snprintf(apps_unix_sock_path, PATH_MAX,
3879 DEFAULT_GLOBAL_APPS_UNIX_SOCK);
3880 }
3881
3882 if (strlen(client_unix_sock_path) == 0) {
3883 snprintf(client_unix_sock_path, PATH_MAX,
3884 DEFAULT_GLOBAL_CLIENT_UNIX_SOCK);
3885 }
3886
3887 /* Set global SHM for ust */
3888 if (strlen(wait_shm_path) == 0) {
3889 snprintf(wait_shm_path, PATH_MAX,
3890 DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH);
3891 }
3892 } else {
3893 home_path = get_home_dir();
3894 if (home_path == NULL) {
3895 /* TODO: Add --socket PATH option */
3896 ERR("Can't get HOME directory for sockets creation.");
3897 ret = -EPERM;
3898 goto error;
3899 }
3900
3901 if (strlen(apps_unix_sock_path) == 0) {
3902 snprintf(apps_unix_sock_path, PATH_MAX,
3903 DEFAULT_HOME_APPS_UNIX_SOCK, home_path);
3904 }
3905
3906 /* Set the cli tool unix socket path */
3907 if (strlen(client_unix_sock_path) == 0) {
3908 snprintf(client_unix_sock_path, PATH_MAX,
3909 DEFAULT_HOME_CLIENT_UNIX_SOCK, home_path);
3910 }
3911
3912 /* Set global SHM for ust */
3913 if (strlen(wait_shm_path) == 0) {
3914 snprintf(wait_shm_path, PATH_MAX,
3915 DEFAULT_HOME_APPS_WAIT_SHM_PATH, geteuid());
3916 }
3917 }
3918
3919 DBG("Client socket path %s", client_unix_sock_path);
3920 DBG("Application socket path %s", apps_unix_sock_path);
3921
3922 /*
3923 * See if daemon already exist.
3924 */
3925 if ((ret = check_existing_daemon()) < 0) {
3926 ERR("Already running daemon.\n");
3927 /*
3928 * We do not goto exit because we must not cleanup()
3929 * because a daemon is already running.
3930 */
3931 goto error;
3932 }
3933
3934 /* After this point, we can safely call cleanup() with "goto exit" */
3935
3936 /*
3937 * These actions must be executed as root. We do that *after* setting up
3938 * the sockets path because we MUST make the check for another daemon using
3939 * those paths *before* trying to set the kernel consumer sockets and init
3940 * kernel tracer.
3941 */
3942 if (is_root) {
3943 ret = set_consumer_sockets(&kconsumer_data);
3944 if (ret < 0) {
3945 goto exit;
3946 }
3947
3948 ret = set_consumer_sockets(&ustconsumer_data);
3949 if (ret < 0) {
3950 goto exit;
3951 }
3952 /* Setup kernel tracer */
3953 init_kernel_tracer();
3954
3955 /* Set ulimit for open files */
3956 set_ulimit();
3957 }
3958
3959 if ((ret = set_signal_handler()) < 0) {
3960 goto exit;
3961 }
3962
3963 /* Setup the needed unix socket */
3964 if ((ret = init_daemon_socket()) < 0) {
3965 goto exit;
3966 }
3967
3968 /* Set credentials to socket */
3969 if (is_root && ((ret = set_permissions()) < 0)) {
3970 goto exit;
3971 }
3972
3973 /* Get parent pid if -S, --sig-parent is specified. */
3974 if (opt_sig_parent) {
3975 ppid = getppid();
3976 }
3977
3978 /* Setup the kernel pipe for waking up the kernel thread */
3979 if ((ret = create_kernel_poll_pipe()) < 0) {
3980 goto exit;
3981 }
3982
3983 /* Setup the thread apps communication pipe. */
3984 if ((ret = create_apps_cmd_pipe()) < 0) {
3985 goto exit;
3986 }
3987
3988 /* Init UST command queue. */
3989 cds_wfq_init(&ust_cmd_queue.queue);
3990
3991 /* Init UST app hash table */
3992 ust_app_ht_alloc();
3993
3994 /*
3995 * Get session list pointer. This pointer MUST NOT be free(). This list is
3996 * statically declared in session.c
3997 */
3998 session_list_ptr = session_get_list();
3999
4000 /* Set up max poll set size */
4001 lttng_poll_set_max_size();
4002
4003 /* Create thread to manage the client socket */
4004 ret = pthread_create(&client_thread, NULL,
4005 thread_manage_clients, (void *) NULL);
4006 if (ret != 0) {
4007 perror("pthread_create clients");
4008 goto exit_client;
4009 }
4010
4011 /* Create thread to dispatch registration */
4012 ret = pthread_create(&dispatch_thread, NULL,
4013 thread_dispatch_ust_registration, (void *) NULL);
4014 if (ret != 0) {
4015 perror("pthread_create dispatch");
4016 goto exit_dispatch;
4017 }
4018
4019 /* Create thread to manage application registration. */
4020 ret = pthread_create(&reg_apps_thread, NULL,
4021 thread_registration_apps, (void *) NULL);
4022 if (ret != 0) {
4023 perror("pthread_create registration");
4024 goto exit_reg_apps;
4025 }
4026
4027 /* Create thread to manage application socket */
4028 ret = pthread_create(&apps_thread, NULL,
4029 thread_manage_apps, (void *) NULL);
4030 if (ret != 0) {
4031 perror("pthread_create apps");
4032 goto exit_apps;
4033 }
4034
4035 /* Create kernel thread to manage kernel event */
4036 ret = pthread_create(&kernel_thread, NULL,
4037 thread_manage_kernel, (void *) NULL);
4038 if (ret != 0) {
4039 perror("pthread_create kernel");
4040 goto exit_kernel;
4041 }
4042
4043 ret = pthread_join(kernel_thread, &status);
4044 if (ret != 0) {
4045 perror("pthread_join");
4046 goto error; /* join error, exit without cleanup */
4047 }
4048
4049exit_kernel:
4050 ret = pthread_join(apps_thread, &status);
4051 if (ret != 0) {
4052 perror("pthread_join");
4053 goto error; /* join error, exit without cleanup */
4054 }
4055
4056exit_apps:
4057 ret = pthread_join(reg_apps_thread, &status);
4058 if (ret != 0) {
4059 perror("pthread_join");
4060 goto error; /* join error, exit without cleanup */
4061 }
4062
4063exit_reg_apps:
4064 ret = pthread_join(dispatch_thread, &status);
4065 if (ret != 0) {
4066 perror("pthread_join");
4067 goto error; /* join error, exit without cleanup */
4068 }
4069
4070exit_dispatch:
4071 ret = pthread_join(client_thread, &status);
4072 if (ret != 0) {
4073 perror("pthread_join");
4074 goto error; /* join error, exit without cleanup */
4075 }
4076
4077 ret = join_consumer_thread(&kconsumer_data);
4078 if (ret != 0) {
4079 perror("join_consumer");
4080 goto error; /* join error, exit without cleanup */
4081 }
4082
4083exit_client:
4084exit:
4085 /*
4086 * cleanup() is called when no other thread is running.
4087 */
4088 rcu_thread_online();
4089 cleanup();
4090 rcu_thread_offline();
4091 rcu_unregister_thread();
4092 if (!ret)
4093 exit(EXIT_SUCCESS);
4094error:
4095 exit(EXIT_FAILURE);
4096}
This page took 0.077684 seconds and 4 git commands to generate.