6ea615e10bd67435866598632c592d4f3562dc68
[lttng-tools.git] / src / bin / lttng-sessiond / main.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19 #define _GNU_SOURCE
20 #include <getopt.h>
21 #include <grp.h>
22 #include <limits.h>
23 #include <pthread.h>
24 #include <semaphore.h>
25 #include <signal.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <string.h>
29 #include <sys/mman.h>
30 #include <sys/mount.h>
31 #include <sys/resource.h>
32 #include <sys/socket.h>
33 #include <sys/stat.h>
34 #include <sys/types.h>
35 #include <sys/wait.h>
36 #include <urcu/uatomic.h>
37 #include <unistd.h>
38 #include <config.h>
39
40 #include <common/common.h>
41 #include <common/compat/poll.h>
42 #include <common/compat/socket.h>
43 #include <common/defaults.h>
44 #include <common/kernel-consumer/kernel-consumer.h>
45 #include <common/ust-consumer/ust-consumer.h>
46 #include <common/futex.h>
47
48 #include "lttng-sessiond.h"
49 #include "channel.h"
50 #include "context.h"
51 #include "event.h"
52 #include "kernel.h"
53 #include "kernel-consumer.h"
54 #include "modprobe.h"
55 #include "shm.h"
56 #include "ust-ctl.h"
57 #include "utils.h"
58 #include "fd-limit.h"
59
60 #define CONSUMERD_FILE "lttng-consumerd"
61
62 /* Const values */
63 const char default_home_dir[] = DEFAULT_HOME_DIR;
64 const char default_tracing_group[] = DEFAULT_TRACING_GROUP;
65 const char default_ust_sock_dir[] = DEFAULT_UST_SOCK_DIR;
66 const char default_global_apps_pipe[] = DEFAULT_GLOBAL_APPS_PIPE;
67
68 const char *progname;
69 const char *opt_tracing_group;
70 static int opt_sig_parent;
71 static int opt_verbose_consumer;
72 static int opt_daemon;
73 static int opt_no_kernel;
74 static int is_root; /* Set to 1 if the daemon is running as root */
75 static pid_t ppid; /* Parent PID for --sig-parent option */
76 static char *rundir;
77
78 /* Consumer daemon specific control data */
79 static struct consumer_data kconsumer_data = {
80 .type = LTTNG_CONSUMER_KERNEL,
81 .err_unix_sock_path = DEFAULT_KCONSUMERD_ERR_SOCK_PATH,
82 .cmd_unix_sock_path = DEFAULT_KCONSUMERD_CMD_SOCK_PATH,
83 .err_sock = -1,
84 .cmd_sock = -1,
85 };
86 static struct consumer_data ustconsumer64_data = {
87 .type = LTTNG_CONSUMER64_UST,
88 .err_unix_sock_path = DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH,
89 .cmd_unix_sock_path = DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH,
90 .err_sock = -1,
91 .cmd_sock = -1,
92 };
93 static struct consumer_data ustconsumer32_data = {
94 .type = LTTNG_CONSUMER32_UST,
95 .err_unix_sock_path = DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH,
96 .cmd_unix_sock_path = DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH,
97 .err_sock = -1,
98 .cmd_sock = -1,
99 };
100
101 static int dispatch_thread_exit;
102
103 /* Global application Unix socket path */
104 static char apps_unix_sock_path[PATH_MAX];
105 /* Global client Unix socket path */
106 static char client_unix_sock_path[PATH_MAX];
107 /* global wait shm path for UST */
108 static char wait_shm_path[PATH_MAX];
109
110 /* Sockets and FDs */
111 static int client_sock = -1;
112 static int apps_sock = -1;
113 static int kernel_tracer_fd = -1;
114 static int kernel_poll_pipe[2] = { -1, -1 };
115
116 /*
117 * Quit pipe for all threads. This permits a single cancellation point
118 * for all threads when receiving an event on the pipe.
119 */
120 static int thread_quit_pipe[2] = { -1, -1 };
121
122 /*
123 * This pipe is used to inform the thread managing application communication
124 * that a command is queued and ready to be processed.
125 */
126 static int apps_cmd_pipe[2] = { -1, -1 };
127
128 /* Pthread, Mutexes and Semaphores */
129 static pthread_t apps_thread;
130 static pthread_t reg_apps_thread;
131 static pthread_t client_thread;
132 static pthread_t kernel_thread;
133 static pthread_t dispatch_thread;
134
135
136 /*
137 * UST registration command queue. This queue is tied with a futex and uses a N
138 * wakers / 1 waiter implemented and detailed in futex.c/.h
139 *
140 * The thread_manage_apps and thread_dispatch_ust_registration interact with
141 * this queue and the wait/wake scheme.
142 */
143 static struct ust_cmd_queue ust_cmd_queue;
144
145 /*
146 * Pointer initialized before thread creation.
147 *
148 * This points to the tracing session list containing the session count and a
149 * mutex lock. The lock MUST be taken if you iterate over the list. The lock
150 * MUST NOT be taken if you call a public function in session.c.
151 *
152 * The lock is nested inside the structure: session_list_ptr->lock. Please use
153 * session_lock_list and session_unlock_list for lock acquisition.
154 */
155 static struct ltt_session_list *session_list_ptr;
156
157 int ust_consumerd64_fd = -1;
158 int ust_consumerd32_fd = -1;
159
160 static const char *consumerd32_bin = CONFIG_CONSUMERD32_BIN;
161 static const char *consumerd64_bin = CONFIG_CONSUMERD64_BIN;
162 static const char *consumerd32_libdir = CONFIG_CONSUMERD32_LIBDIR;
163 static const char *consumerd64_libdir = CONFIG_CONSUMERD64_LIBDIR;
164
165 /*
166 * Consumer daemon state which is changed when spawning it, killing it or in
167 * case of a fatal error.
168 */
169 enum consumerd_state {
170 CONSUMER_STARTED = 1,
171 CONSUMER_STOPPED = 2,
172 CONSUMER_ERROR = 3,
173 };
174
175 /*
176 * This consumer daemon state is used to validate if a client command will be
177 * able to reach the consumer. If not, the client is informed. For instance,
178 * doing a "lttng start" when the consumer state is set to ERROR will return an
179 * error to the client.
180 *
181 * The following example shows a possible race condition of this scheme:
182 *
183 * consumer thread error happens
184 * client cmd arrives
185 * client cmd checks state -> still OK
186 * consumer thread exit, sets error
187 * client cmd try to talk to consumer
188 * ...
189 *
190 * However, since the consumer is a different daemon, we have no way of making
191 * sure the command will reach it safely even with this state flag. This is why
192 * we consider that up to the state validation during command processing, the
193 * command is safe. After that, we can not guarantee the correctness of the
194 * client request vis-a-vis the consumer.
195 */
196 static enum consumerd_state ust_consumerd_state;
197 static enum consumerd_state kernel_consumerd_state;
198
199 static
200 void setup_consumerd_path(void)
201 {
202 const char *bin, *libdir;
203
204 /*
205 * Allow INSTALL_BIN_PATH to be used as a target path for the
206 * native architecture size consumer if CONFIG_CONSUMER*_PATH
207 * has not been defined.
208 */
209 #if (CAA_BITS_PER_LONG == 32)
210 if (!consumerd32_bin[0]) {
211 consumerd32_bin = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
212 }
213 if (!consumerd32_libdir[0]) {
214 consumerd32_libdir = INSTALL_LIB_PATH;
215 }
216 #elif (CAA_BITS_PER_LONG == 64)
217 if (!consumerd64_bin[0]) {
218 consumerd64_bin = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
219 }
220 if (!consumerd64_libdir[0]) {
221 consumerd64_libdir = INSTALL_LIB_PATH;
222 }
223 #else
224 #error "Unknown bitness"
225 #endif
226
227 /*
228 * runtime env. var. overrides the build default.
229 */
230 bin = getenv("LTTNG_CONSUMERD32_BIN");
231 if (bin) {
232 consumerd32_bin = bin;
233 }
234 bin = getenv("LTTNG_CONSUMERD64_BIN");
235 if (bin) {
236 consumerd64_bin = bin;
237 }
238 libdir = getenv("LTTNG_CONSUMERD32_LIBDIR");
239 if (libdir) {
240 consumerd32_libdir = libdir;
241 }
242 libdir = getenv("LTTNG_CONSUMERD64_LIBDIR");
243 if (libdir) {
244 consumerd64_libdir = libdir;
245 }
246 }
247
248 /*
249 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
250 */
251 static int create_thread_poll_set(struct lttng_poll_event *events,
252 unsigned int size)
253 {
254 int ret;
255
256 if (events == NULL || size == 0) {
257 ret = -1;
258 goto error;
259 }
260
261 ret = lttng_poll_create(events, size, LTTNG_CLOEXEC);
262 if (ret < 0) {
263 goto error;
264 }
265
266 /* Add quit pipe */
267 ret = lttng_poll_add(events, thread_quit_pipe[0], LPOLLIN);
268 if (ret < 0) {
269 goto error;
270 }
271
272 return 0;
273
274 error:
275 return ret;
276 }
277
278 /*
279 * Check if the thread quit pipe was triggered.
280 *
281 * Return 1 if it was triggered else 0;
282 */
283 static int check_thread_quit_pipe(int fd, uint32_t events)
284 {
285 if (fd == thread_quit_pipe[0] && (events & LPOLLIN)) {
286 return 1;
287 }
288
289 return 0;
290 }
291
292 /*
293 * Return group ID of the tracing group or -1 if not found.
294 */
295 static gid_t allowed_group(void)
296 {
297 struct group *grp;
298
299 if (opt_tracing_group) {
300 grp = getgrnam(opt_tracing_group);
301 } else {
302 grp = getgrnam(default_tracing_group);
303 }
304 if (!grp) {
305 return -1;
306 } else {
307 return grp->gr_gid;
308 }
309 }
310
311 /*
312 * Init thread quit pipe.
313 *
314 * Return -1 on error or 0 if all pipes are created.
315 */
316 static int init_thread_quit_pipe(void)
317 {
318 int ret, i;
319
320 ret = pipe(thread_quit_pipe);
321 if (ret < 0) {
322 PERROR("thread quit pipe");
323 goto error;
324 }
325
326 for (i = 0; i < 2; i++) {
327 ret = fcntl(thread_quit_pipe[i], F_SETFD, FD_CLOEXEC);
328 if (ret < 0) {
329 PERROR("fcntl");
330 goto error;
331 }
332 }
333
334 error:
335 return ret;
336 }
337
338 /*
339 * Complete teardown of a kernel session. This free all data structure related
340 * to a kernel session and update counter.
341 */
342 static void teardown_kernel_session(struct ltt_session *session)
343 {
344 if (!session->kernel_session) {
345 DBG3("No kernel session when tearing down session");
346 return;
347 }
348
349 DBG("Tearing down kernel session");
350
351 /*
352 * If a custom kernel consumer was registered, close the socket before
353 * tearing down the complete kernel session structure
354 */
355 if (kconsumer_data.cmd_sock >= 0 &&
356 session->kernel_session->consumer_fd != kconsumer_data.cmd_sock) {
357 lttcomm_close_unix_sock(session->kernel_session->consumer_fd);
358 }
359
360 trace_kernel_destroy_session(session->kernel_session);
361 }
362
363 /*
364 * Complete teardown of all UST sessions. This will free everything on his path
365 * and destroy the core essence of all ust sessions :)
366 */
367 static void teardown_ust_session(struct ltt_session *session)
368 {
369 int ret;
370
371 if (!session->ust_session) {
372 DBG3("No UST session when tearing down session");
373 return;
374 }
375
376 DBG("Tearing down UST session(s)");
377
378 ret = ust_app_destroy_trace_all(session->ust_session);
379 if (ret) {
380 ERR("Error in ust_app_destroy_trace_all");
381 }
382
383 trace_ust_destroy_session(session->ust_session);
384 }
385
386 /*
387 * Stop all threads by closing the thread quit pipe.
388 */
389 static void stop_threads(void)
390 {
391 int ret;
392
393 /* Stopping all threads */
394 DBG("Terminating all threads");
395 ret = notify_thread_pipe(thread_quit_pipe[1]);
396 if (ret < 0) {
397 ERR("write error on thread quit pipe");
398 }
399
400 /* Dispatch thread */
401 dispatch_thread_exit = 1;
402 futex_nto1_wake(&ust_cmd_queue.futex);
403 }
404
405 /*
406 * Cleanup the daemon
407 */
408 static void cleanup(void)
409 {
410 int ret;
411 char *cmd;
412 struct ltt_session *sess, *stmp;
413
414 DBG("Cleaning up");
415
416 DBG("Removing %s directory", rundir);
417 ret = asprintf(&cmd, "rm -rf %s", rundir);
418 if (ret < 0) {
419 ERR("asprintf failed. Something is really wrong!");
420 }
421
422 /* Remove lttng run directory */
423 ret = system(cmd);
424 if (ret < 0) {
425 ERR("Unable to clean %s", rundir);
426 }
427 free(cmd);
428
429 DBG("Cleaning up all sessions");
430
431 /* Destroy session list mutex */
432 if (session_list_ptr != NULL) {
433 pthread_mutex_destroy(&session_list_ptr->lock);
434
435 /* Cleanup ALL session */
436 cds_list_for_each_entry_safe(sess, stmp,
437 &session_list_ptr->head, list) {
438 teardown_kernel_session(sess);
439 teardown_ust_session(sess);
440 free(sess);
441 }
442 }
443
444 DBG("Closing all UST sockets");
445 ust_app_clean_list();
446
447 pthread_mutex_destroy(&kconsumer_data.pid_mutex);
448
449 if (is_root && !opt_no_kernel) {
450 DBG2("Closing kernel fd");
451 if (kernel_tracer_fd >= 0) {
452 ret = close(kernel_tracer_fd);
453 if (ret) {
454 PERROR("close");
455 }
456 }
457 DBG("Unloading kernel modules");
458 modprobe_remove_lttng_all();
459 }
460 utils_close_pipe(kernel_poll_pipe);
461 utils_close_pipe(thread_quit_pipe);
462 utils_close_pipe(apps_cmd_pipe);
463
464 /* <fun> */
465 DBG("%c[%d;%dm*** assert failed :-) *** ==> %c[%dm%c[%d;%dm"
466 "Matthew, BEET driven development works!%c[%dm",
467 27, 1, 31, 27, 0, 27, 1, 33, 27, 0);
468 /* </fun> */
469 }
470
471 /*
472 * Send data on a unix socket using the liblttsessiondcomm API.
473 *
474 * Return lttcomm error code.
475 */
476 static int send_unix_sock(int sock, void *buf, size_t len)
477 {
478 /* Check valid length */
479 if (len <= 0) {
480 return -1;
481 }
482
483 return lttcomm_send_unix_sock(sock, buf, len);
484 }
485
486 /*
487 * Free memory of a command context structure.
488 */
489 static void clean_command_ctx(struct command_ctx **cmd_ctx)
490 {
491 DBG("Clean command context structure");
492 if (*cmd_ctx) {
493 if ((*cmd_ctx)->llm) {
494 free((*cmd_ctx)->llm);
495 }
496 if ((*cmd_ctx)->lsm) {
497 free((*cmd_ctx)->lsm);
498 }
499 free(*cmd_ctx);
500 *cmd_ctx = NULL;
501 }
502 }
503
504 /*
505 * Notify UST applications using the shm mmap futex.
506 */
507 static int notify_ust_apps(int active)
508 {
509 char *wait_shm_mmap;
510
511 DBG("Notifying applications of session daemon state: %d", active);
512
513 /* See shm.c for this call implying mmap, shm and futex calls */
514 wait_shm_mmap = shm_ust_get_mmap(wait_shm_path, is_root);
515 if (wait_shm_mmap == NULL) {
516 goto error;
517 }
518
519 /* Wake waiting process */
520 futex_wait_update((int32_t *) wait_shm_mmap, active);
521
522 /* Apps notified successfully */
523 return 0;
524
525 error:
526 return -1;
527 }
528
529 /*
530 * Setup the outgoing data buffer for the response (llm) by allocating the
531 * right amount of memory and copying the original information from the lsm
532 * structure.
533 *
534 * Return total size of the buffer pointed by buf.
535 */
536 static int setup_lttng_msg(struct command_ctx *cmd_ctx, size_t size)
537 {
538 int ret, buf_size;
539
540 buf_size = size;
541
542 cmd_ctx->llm = zmalloc(sizeof(struct lttcomm_lttng_msg) + buf_size);
543 if (cmd_ctx->llm == NULL) {
544 PERROR("zmalloc");
545 ret = -ENOMEM;
546 goto error;
547 }
548
549 /* Copy common data */
550 cmd_ctx->llm->cmd_type = cmd_ctx->lsm->cmd_type;
551 cmd_ctx->llm->pid = cmd_ctx->lsm->domain.attr.pid;
552
553 cmd_ctx->llm->data_size = size;
554 cmd_ctx->lttng_msg_size = sizeof(struct lttcomm_lttng_msg) + buf_size;
555
556 return buf_size;
557
558 error:
559 return ret;
560 }
561
562 /*
563 * Update the kernel poll set of all channel fd available over all tracing
564 * session. Add the wakeup pipe at the end of the set.
565 */
566 static int update_kernel_poll(struct lttng_poll_event *events)
567 {
568 int ret;
569 struct ltt_session *session;
570 struct ltt_kernel_channel *channel;
571
572 DBG("Updating kernel poll set");
573
574 session_lock_list();
575 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
576 session_lock(session);
577 if (session->kernel_session == NULL) {
578 session_unlock(session);
579 continue;
580 }
581
582 cds_list_for_each_entry(channel,
583 &session->kernel_session->channel_list.head, list) {
584 /* Add channel fd to the kernel poll set */
585 ret = lttng_poll_add(events, channel->fd, LPOLLIN | LPOLLRDNORM);
586 if (ret < 0) {
587 session_unlock(session);
588 goto error;
589 }
590 DBG("Channel fd %d added to kernel set", channel->fd);
591 }
592 session_unlock(session);
593 }
594 session_unlock_list();
595
596 return 0;
597
598 error:
599 session_unlock_list();
600 return -1;
601 }
602
603 /*
604 * Find the channel fd from 'fd' over all tracing session. When found, check
605 * for new channel stream and send those stream fds to the kernel consumer.
606 *
607 * Useful for CPU hotplug feature.
608 */
609 static int update_kernel_stream(struct consumer_data *consumer_data, int fd)
610 {
611 int ret = 0;
612 struct ltt_session *session;
613 struct ltt_kernel_channel *channel;
614
615 DBG("Updating kernel streams for channel fd %d", fd);
616
617 session_lock_list();
618 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
619 session_lock(session);
620 if (session->kernel_session == NULL) {
621 session_unlock(session);
622 continue;
623 }
624
625 /* This is not suppose to be -1 but this is an extra security check */
626 if (session->kernel_session->consumer_fd < 0) {
627 session->kernel_session->consumer_fd = consumer_data->cmd_sock;
628 }
629
630 cds_list_for_each_entry(channel,
631 &session->kernel_session->channel_list.head, list) {
632 if (channel->fd == fd) {
633 DBG("Channel found, updating kernel streams");
634 ret = kernel_open_channel_stream(channel);
635 if (ret < 0) {
636 goto error;
637 }
638
639 /*
640 * Have we already sent fds to the consumer? If yes, it means
641 * that tracing is started so it is safe to send our updated
642 * stream fds.
643 */
644 if (session->kernel_session->consumer_fds_sent == 1) {
645 ret = kernel_consumer_send_channel_stream(consumer_data,
646 channel, session->uid, session->gid);
647 if (ret < 0) {
648 goto error;
649 }
650 }
651 goto error;
652 }
653 }
654 session_unlock(session);
655 }
656 session_unlock_list();
657 return ret;
658
659 error:
660 session_unlock(session);
661 session_unlock_list();
662 return ret;
663 }
664
665 /*
666 * For each tracing session, update newly registered apps.
667 */
668 static void update_ust_app(int app_sock)
669 {
670 struct ltt_session *sess, *stmp;
671
672 session_lock_list();
673
674 /* For all tracing session(s) */
675 cds_list_for_each_entry_safe(sess, stmp, &session_list_ptr->head, list) {
676 session_lock(sess);
677 if (sess->ust_session) {
678 ust_app_global_update(sess->ust_session, app_sock);
679 }
680 session_unlock(sess);
681 }
682
683 session_unlock_list();
684 }
685
686 /*
687 * This thread manage event coming from the kernel.
688 *
689 * Features supported in this thread:
690 * -) CPU Hotplug
691 */
692 static void *thread_manage_kernel(void *data)
693 {
694 int ret, i, pollfd, update_poll_flag = 1;
695 uint32_t revents, nb_fd;
696 char tmp;
697 struct lttng_poll_event events;
698
699 DBG("Thread manage kernel started");
700
701 ret = create_thread_poll_set(&events, 2);
702 if (ret < 0) {
703 goto error_poll_create;
704 }
705
706 ret = lttng_poll_add(&events, kernel_poll_pipe[0], LPOLLIN);
707 if (ret < 0) {
708 goto error;
709 }
710
711 while (1) {
712 if (update_poll_flag == 1) {
713 /*
714 * Reset number of fd in the poll set. Always 2 since there is the thread
715 * quit pipe and the kernel pipe.
716 */
717 events.nb_fd = 2;
718
719 ret = update_kernel_poll(&events);
720 if (ret < 0) {
721 goto error;
722 }
723 update_poll_flag = 0;
724 }
725
726 nb_fd = LTTNG_POLL_GETNB(&events);
727
728 DBG("Thread kernel polling on %d fds", nb_fd);
729
730 /* Zeroed the poll events */
731 lttng_poll_reset(&events);
732
733 /* Poll infinite value of time */
734 restart:
735 ret = lttng_poll_wait(&events, -1);
736 if (ret < 0) {
737 /*
738 * Restart interrupted system call.
739 */
740 if (errno == EINTR) {
741 goto restart;
742 }
743 goto error;
744 } else if (ret == 0) {
745 /* Should not happen since timeout is infinite */
746 ERR("Return value of poll is 0 with an infinite timeout.\n"
747 "This should not have happened! Continuing...");
748 continue;
749 }
750
751 for (i = 0; i < nb_fd; i++) {
752 /* Fetch once the poll data */
753 revents = LTTNG_POLL_GETEV(&events, i);
754 pollfd = LTTNG_POLL_GETFD(&events, i);
755
756 /* Thread quit pipe has been closed. Killing thread. */
757 ret = check_thread_quit_pipe(pollfd, revents);
758 if (ret) {
759 goto error;
760 }
761
762 /* Check for data on kernel pipe */
763 if (pollfd == kernel_poll_pipe[0] && (revents & LPOLLIN)) {
764 ret = read(kernel_poll_pipe[0], &tmp, 1);
765 update_poll_flag = 1;
766 continue;
767 } else {
768 /*
769 * New CPU detected by the kernel. Adding kernel stream to
770 * kernel session and updating the kernel consumer
771 */
772 if (revents & LPOLLIN) {
773 ret = update_kernel_stream(&kconsumer_data, pollfd);
774 if (ret < 0) {
775 continue;
776 }
777 break;
778 /*
779 * TODO: We might want to handle the LPOLLERR | LPOLLHUP
780 * and unregister kernel stream at this point.
781 */
782 }
783 }
784 }
785 }
786
787 error:
788 lttng_poll_clean(&events);
789 error_poll_create:
790 DBG("Kernel thread dying");
791 return NULL;
792 }
793
794 /*
795 * This thread manage the consumer error sent back to the session daemon.
796 */
797 static void *thread_manage_consumer(void *data)
798 {
799 int sock = -1, i, ret, pollfd;
800 uint32_t revents, nb_fd;
801 enum lttcomm_return_code code;
802 struct lttng_poll_event events;
803 struct consumer_data *consumer_data = data;
804
805 DBG("[thread] Manage consumer started");
806
807 ret = lttcomm_listen_unix_sock(consumer_data->err_sock);
808 if (ret < 0) {
809 goto error_listen;
810 }
811
812 /*
813 * Pass 2 as size here for the thread quit pipe and kconsumerd_err_sock.
814 * Nothing more will be added to this poll set.
815 */
816 ret = create_thread_poll_set(&events, 2);
817 if (ret < 0) {
818 goto error_poll;
819 }
820
821 ret = lttng_poll_add(&events, consumer_data->err_sock, LPOLLIN | LPOLLRDHUP);
822 if (ret < 0) {
823 goto error;
824 }
825
826 nb_fd = LTTNG_POLL_GETNB(&events);
827
828 /* Inifinite blocking call, waiting for transmission */
829 restart:
830 ret = lttng_poll_wait(&events, -1);
831 if (ret < 0) {
832 /*
833 * Restart interrupted system call.
834 */
835 if (errno == EINTR) {
836 goto restart;
837 }
838 goto error;
839 }
840
841 for (i = 0; i < nb_fd; i++) {
842 /* Fetch once the poll data */
843 revents = LTTNG_POLL_GETEV(&events, i);
844 pollfd = LTTNG_POLL_GETFD(&events, i);
845
846 /* Thread quit pipe has been closed. Killing thread. */
847 ret = check_thread_quit_pipe(pollfd, revents);
848 if (ret) {
849 goto error;
850 }
851
852 /* Event on the registration socket */
853 if (pollfd == consumer_data->err_sock) {
854 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
855 ERR("consumer err socket poll error");
856 goto error;
857 }
858 }
859 }
860
861 sock = lttcomm_accept_unix_sock(consumer_data->err_sock);
862 if (sock < 0) {
863 goto error;
864 }
865
866 DBG2("Receiving code from consumer err_sock");
867
868 /* Getting status code from kconsumerd */
869 ret = lttcomm_recv_unix_sock(sock, &code,
870 sizeof(enum lttcomm_return_code));
871 if (ret <= 0) {
872 goto error;
873 }
874
875 if (code == CONSUMERD_COMMAND_SOCK_READY) {
876 consumer_data->cmd_sock =
877 lttcomm_connect_unix_sock(consumer_data->cmd_unix_sock_path);
878 if (consumer_data->cmd_sock < 0) {
879 sem_post(&consumer_data->sem);
880 PERROR("consumer connect");
881 goto error;
882 }
883 /* Signal condition to tell that the kconsumerd is ready */
884 sem_post(&consumer_data->sem);
885 DBG("consumer command socket ready");
886 } else {
887 ERR("consumer error when waiting for SOCK_READY : %s",
888 lttcomm_get_readable_code(-code));
889 goto error;
890 }
891
892 /* Remove the kconsumerd error sock since we've established a connexion */
893 ret = lttng_poll_del(&events, consumer_data->err_sock);
894 if (ret < 0) {
895 goto error;
896 }
897
898 ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLRDHUP);
899 if (ret < 0) {
900 goto error;
901 }
902
903 /* Update number of fd */
904 nb_fd = LTTNG_POLL_GETNB(&events);
905
906 /* Inifinite blocking call, waiting for transmission */
907 restart_poll:
908 ret = lttng_poll_wait(&events, -1);
909 if (ret < 0) {
910 /*
911 * Restart interrupted system call.
912 */
913 if (errno == EINTR) {
914 goto restart_poll;
915 }
916 goto error;
917 }
918
919 for (i = 0; i < nb_fd; i++) {
920 /* Fetch once the poll data */
921 revents = LTTNG_POLL_GETEV(&events, i);
922 pollfd = LTTNG_POLL_GETFD(&events, i);
923
924 /* Thread quit pipe has been closed. Killing thread. */
925 ret = check_thread_quit_pipe(pollfd, revents);
926 if (ret) {
927 goto error;
928 }
929
930 /* Event on the kconsumerd socket */
931 if (pollfd == sock) {
932 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
933 ERR("consumer err socket second poll error");
934 goto error;
935 }
936 }
937 }
938
939 /* Wait for any kconsumerd error */
940 ret = lttcomm_recv_unix_sock(sock, &code,
941 sizeof(enum lttcomm_return_code));
942 if (ret <= 0) {
943 ERR("consumer closed the command socket");
944 goto error;
945 }
946
947 ERR("consumer return code : %s", lttcomm_get_readable_code(-code));
948
949 error:
950 /* Immediately set the consumerd state to stopped */
951 if (consumer_data->type == LTTNG_CONSUMER_KERNEL) {
952 uatomic_set(&kernel_consumerd_state, CONSUMER_ERROR);
953 } else if (consumer_data->type == LTTNG_CONSUMER64_UST ||
954 consumer_data->type == LTTNG_CONSUMER32_UST) {
955 uatomic_set(&ust_consumerd_state, CONSUMER_ERROR);
956 } else {
957 /* Code flow error... */
958 assert(0);
959 }
960
961 if (consumer_data->err_sock >= 0) {
962 ret = close(consumer_data->err_sock);
963 if (ret) {
964 PERROR("close");
965 }
966 }
967 if (consumer_data->cmd_sock >= 0) {
968 ret = close(consumer_data->cmd_sock);
969 if (ret) {
970 PERROR("close");
971 }
972 }
973 if (sock >= 0) {
974 ret = close(sock);
975 if (ret) {
976 PERROR("close");
977 }
978 }
979
980 unlink(consumer_data->err_unix_sock_path);
981 unlink(consumer_data->cmd_unix_sock_path);
982 consumer_data->pid = 0;
983
984 lttng_poll_clean(&events);
985 error_poll:
986 error_listen:
987 DBG("consumer thread cleanup completed");
988
989 return NULL;
990 }
991
992 /*
993 * This thread manage application communication.
994 */
995 static void *thread_manage_apps(void *data)
996 {
997 int i, ret, pollfd;
998 uint32_t revents, nb_fd;
999 struct ust_command ust_cmd;
1000 struct lttng_poll_event events;
1001
1002 DBG("[thread] Manage application started");
1003
1004 rcu_register_thread();
1005 rcu_thread_online();
1006
1007 ret = create_thread_poll_set(&events, 2);
1008 if (ret < 0) {
1009 goto error_poll_create;
1010 }
1011
1012 ret = lttng_poll_add(&events, apps_cmd_pipe[0], LPOLLIN | LPOLLRDHUP);
1013 if (ret < 0) {
1014 goto error;
1015 }
1016
1017 while (1) {
1018 /* Zeroed the events structure */
1019 lttng_poll_reset(&events);
1020
1021 nb_fd = LTTNG_POLL_GETNB(&events);
1022
1023 DBG("Apps thread polling on %d fds", nb_fd);
1024
1025 /* Inifinite blocking call, waiting for transmission */
1026 restart:
1027 ret = lttng_poll_wait(&events, -1);
1028 if (ret < 0) {
1029 /*
1030 * Restart interrupted system call.
1031 */
1032 if (errno == EINTR) {
1033 goto restart;
1034 }
1035 goto error;
1036 }
1037
1038 for (i = 0; i < nb_fd; i++) {
1039 /* Fetch once the poll data */
1040 revents = LTTNG_POLL_GETEV(&events, i);
1041 pollfd = LTTNG_POLL_GETFD(&events, i);
1042
1043 /* Thread quit pipe has been closed. Killing thread. */
1044 ret = check_thread_quit_pipe(pollfd, revents);
1045 if (ret) {
1046 goto error;
1047 }
1048
1049 /* Inspect the apps cmd pipe */
1050 if (pollfd == apps_cmd_pipe[0]) {
1051 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1052 ERR("Apps command pipe error");
1053 goto error;
1054 } else if (revents & LPOLLIN) {
1055 /* Empty pipe */
1056 ret = read(apps_cmd_pipe[0], &ust_cmd, sizeof(ust_cmd));
1057 if (ret < 0 || ret < sizeof(ust_cmd)) {
1058 PERROR("read apps cmd pipe");
1059 goto error;
1060 }
1061
1062 /* Register applicaton to the session daemon */
1063 ret = ust_app_register(&ust_cmd.reg_msg,
1064 ust_cmd.sock);
1065 if (ret == -ENOMEM) {
1066 goto error;
1067 } else if (ret < 0) {
1068 break;
1069 }
1070
1071 /*
1072 * Validate UST version compatibility.
1073 */
1074 ret = ust_app_validate_version(ust_cmd.sock);
1075 if (ret >= 0) {
1076 /*
1077 * Add channel(s) and event(s) to newly registered apps
1078 * from lttng global UST domain.
1079 */
1080 update_ust_app(ust_cmd.sock);
1081 }
1082
1083 ret = ust_app_register_done(ust_cmd.sock);
1084 if (ret < 0) {
1085 /*
1086 * If the registration is not possible, we simply
1087 * unregister the apps and continue
1088 */
1089 ust_app_unregister(ust_cmd.sock);
1090 } else {
1091 /*
1092 * We just need here to monitor the close of the UST
1093 * socket and poll set monitor those by default.
1094 * Listen on POLLIN (even if we never expect any
1095 * data) to ensure that hangup wakes us.
1096 */
1097 ret = lttng_poll_add(&events, ust_cmd.sock, LPOLLIN);
1098 if (ret < 0) {
1099 goto error;
1100 }
1101
1102 DBG("Apps with sock %d added to poll set",
1103 ust_cmd.sock);
1104 }
1105
1106 break;
1107 }
1108 } else {
1109 /*
1110 * At this point, we know that a registered application made
1111 * the event at poll_wait.
1112 */
1113 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1114 /* Removing from the poll set */
1115 ret = lttng_poll_del(&events, pollfd);
1116 if (ret < 0) {
1117 goto error;
1118 }
1119
1120 /* Socket closed on remote end. */
1121 ust_app_unregister(pollfd);
1122 break;
1123 }
1124 }
1125 }
1126 }
1127
1128 error:
1129 lttng_poll_clean(&events);
1130 error_poll_create:
1131 DBG("Application communication apps thread cleanup complete");
1132 rcu_thread_offline();
1133 rcu_unregister_thread();
1134 return NULL;
1135 }
1136
1137 /*
1138 * Dispatch request from the registration threads to the application
1139 * communication thread.
1140 */
1141 static void *thread_dispatch_ust_registration(void *data)
1142 {
1143 int ret;
1144 struct cds_wfq_node *node;
1145 struct ust_command *ust_cmd = NULL;
1146
1147 DBG("[thread] Dispatch UST command started");
1148
1149 while (!dispatch_thread_exit) {
1150 /* Atomically prepare the queue futex */
1151 futex_nto1_prepare(&ust_cmd_queue.futex);
1152
1153 do {
1154 /* Dequeue command for registration */
1155 node = cds_wfq_dequeue_blocking(&ust_cmd_queue.queue);
1156 if (node == NULL) {
1157 DBG("Woken up but nothing in the UST command queue");
1158 /* Continue thread execution */
1159 break;
1160 }
1161
1162 ust_cmd = caa_container_of(node, struct ust_command, node);
1163
1164 DBG("Dispatching UST registration pid:%d ppid:%d uid:%d"
1165 " gid:%d sock:%d name:%s (version %d.%d)",
1166 ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
1167 ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
1168 ust_cmd->sock, ust_cmd->reg_msg.name,
1169 ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
1170 /*
1171 * Inform apps thread of the new application registration. This
1172 * call is blocking so we can be assured that the data will be read
1173 * at some point in time or wait to the end of the world :)
1174 */
1175 ret = write(apps_cmd_pipe[1], ust_cmd,
1176 sizeof(struct ust_command));
1177 if (ret < 0) {
1178 PERROR("write apps cmd pipe");
1179 if (errno == EBADF) {
1180 /*
1181 * We can't inform the application thread to process
1182 * registration. We will exit or else application
1183 * registration will not occur and tracing will never
1184 * start.
1185 */
1186 goto error;
1187 }
1188 }
1189 free(ust_cmd);
1190 } while (node != NULL);
1191
1192 /* Futex wait on queue. Blocking call on futex() */
1193 futex_nto1_wait(&ust_cmd_queue.futex);
1194 }
1195
1196 error:
1197 DBG("Dispatch thread dying");
1198 return NULL;
1199 }
1200
1201 /*
1202 * This thread manage application registration.
1203 */
1204 static void *thread_registration_apps(void *data)
1205 {
1206 int sock = -1, i, ret, pollfd;
1207 uint32_t revents, nb_fd;
1208 struct lttng_poll_event events;
1209 /*
1210 * Get allocated in this thread, enqueued to a global queue, dequeued and
1211 * freed in the manage apps thread.
1212 */
1213 struct ust_command *ust_cmd = NULL;
1214
1215 DBG("[thread] Manage application registration started");
1216
1217 ret = lttcomm_listen_unix_sock(apps_sock);
1218 if (ret < 0) {
1219 goto error_listen;
1220 }
1221
1222 /*
1223 * Pass 2 as size here for the thread quit pipe and apps socket. Nothing
1224 * more will be added to this poll set.
1225 */
1226 ret = create_thread_poll_set(&events, 2);
1227 if (ret < 0) {
1228 goto error_create_poll;
1229 }
1230
1231 /* Add the application registration socket */
1232 ret = lttng_poll_add(&events, apps_sock, LPOLLIN | LPOLLRDHUP);
1233 if (ret < 0) {
1234 goto error_poll_add;
1235 }
1236
1237 /* Notify all applications to register */
1238 ret = notify_ust_apps(1);
1239 if (ret < 0) {
1240 ERR("Failed to notify applications or create the wait shared memory.\n"
1241 "Execution continues but there might be problem for already\n"
1242 "running applications that wishes to register.");
1243 }
1244
1245 while (1) {
1246 DBG("Accepting application registration");
1247
1248 nb_fd = LTTNG_POLL_GETNB(&events);
1249
1250 /* Inifinite blocking call, waiting for transmission */
1251 restart:
1252 ret = lttng_poll_wait(&events, -1);
1253 if (ret < 0) {
1254 /*
1255 * Restart interrupted system call.
1256 */
1257 if (errno == EINTR) {
1258 goto restart;
1259 }
1260 goto error;
1261 }
1262
1263 for (i = 0; i < nb_fd; i++) {
1264 /* Fetch once the poll data */
1265 revents = LTTNG_POLL_GETEV(&events, i);
1266 pollfd = LTTNG_POLL_GETFD(&events, i);
1267
1268 /* Thread quit pipe has been closed. Killing thread. */
1269 ret = check_thread_quit_pipe(pollfd, revents);
1270 if (ret) {
1271 goto error;
1272 }
1273
1274 /* Event on the registration socket */
1275 if (pollfd == apps_sock) {
1276 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1277 ERR("Register apps socket poll error");
1278 goto error;
1279 } else if (revents & LPOLLIN) {
1280 sock = lttcomm_accept_unix_sock(apps_sock);
1281 if (sock < 0) {
1282 goto error;
1283 }
1284
1285 /* Create UST registration command for enqueuing */
1286 ust_cmd = zmalloc(sizeof(struct ust_command));
1287 if (ust_cmd == NULL) {
1288 PERROR("ust command zmalloc");
1289 goto error;
1290 }
1291
1292 /*
1293 * Using message-based transmissions to ensure we don't
1294 * have to deal with partially received messages.
1295 */
1296 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
1297 if (ret < 0) {
1298 ERR("Exhausted file descriptors allowed for applications.");
1299 free(ust_cmd);
1300 ret = close(sock);
1301 if (ret) {
1302 PERROR("close");
1303 }
1304 sock = -1;
1305 continue;
1306 }
1307 ret = lttcomm_recv_unix_sock(sock, &ust_cmd->reg_msg,
1308 sizeof(struct ust_register_msg));
1309 if (ret < 0 || ret < sizeof(struct ust_register_msg)) {
1310 if (ret < 0) {
1311 PERROR("lttcomm_recv_unix_sock register apps");
1312 } else {
1313 ERR("Wrong size received on apps register");
1314 }
1315 free(ust_cmd);
1316 ret = close(sock);
1317 if (ret) {
1318 PERROR("close");
1319 }
1320 lttng_fd_put(LTTNG_FD_APPS, 1);
1321 sock = -1;
1322 continue;
1323 }
1324
1325 ust_cmd->sock = sock;
1326 sock = -1;
1327
1328 DBG("UST registration received with pid:%d ppid:%d uid:%d"
1329 " gid:%d sock:%d name:%s (version %d.%d)",
1330 ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
1331 ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
1332 ust_cmd->sock, ust_cmd->reg_msg.name,
1333 ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
1334
1335 /*
1336 * Lock free enqueue the registration request. The red pill
1337 * has been taken! This apps will be part of the *system*.
1338 */
1339 cds_wfq_enqueue(&ust_cmd_queue.queue, &ust_cmd->node);
1340
1341 /*
1342 * Wake the registration queue futex. Implicit memory
1343 * barrier with the exchange in cds_wfq_enqueue.
1344 */
1345 futex_nto1_wake(&ust_cmd_queue.futex);
1346 }
1347 }
1348 }
1349 }
1350
1351 error:
1352 /* Notify that the registration thread is gone */
1353 notify_ust_apps(0);
1354
1355 if (apps_sock >= 0) {
1356 ret = close(apps_sock);
1357 if (ret) {
1358 PERROR("close");
1359 }
1360 }
1361 if (sock >= 0) {
1362 ret = close(sock);
1363 if (ret) {
1364 PERROR("close");
1365 }
1366 lttng_fd_put(LTTNG_FD_APPS, 1);
1367 }
1368 unlink(apps_unix_sock_path);
1369
1370 error_poll_add:
1371 lttng_poll_clean(&events);
1372 error_listen:
1373 error_create_poll:
1374 DBG("UST Registration thread cleanup complete");
1375
1376 return NULL;
1377 }
1378
1379 /*
1380 * Start the thread_manage_consumer. This must be done after a lttng-consumerd
1381 * exec or it will fails.
1382 */
1383 static int spawn_consumer_thread(struct consumer_data *consumer_data)
1384 {
1385 int ret;
1386 struct timespec timeout;
1387
1388 timeout.tv_sec = DEFAULT_SEM_WAIT_TIMEOUT;
1389 timeout.tv_nsec = 0;
1390
1391 /* Setup semaphore */
1392 ret = sem_init(&consumer_data->sem, 0, 0);
1393 if (ret < 0) {
1394 PERROR("sem_init consumer semaphore");
1395 goto error;
1396 }
1397
1398 ret = pthread_create(&consumer_data->thread, NULL,
1399 thread_manage_consumer, consumer_data);
1400 if (ret != 0) {
1401 PERROR("pthread_create consumer");
1402 ret = -1;
1403 goto error;
1404 }
1405
1406 /* Get time for sem_timedwait absolute timeout */
1407 ret = clock_gettime(CLOCK_REALTIME, &timeout);
1408 if (ret < 0) {
1409 PERROR("clock_gettime spawn consumer");
1410 /* Infinite wait for the kconsumerd thread to be ready */
1411 ret = sem_wait(&consumer_data->sem);
1412 } else {
1413 /* Normal timeout if the gettime was successful */
1414 timeout.tv_sec += DEFAULT_SEM_WAIT_TIMEOUT;
1415 ret = sem_timedwait(&consumer_data->sem, &timeout);
1416 }
1417
1418 if (ret < 0) {
1419 if (errno == ETIMEDOUT) {
1420 /*
1421 * Call has timed out so we kill the kconsumerd_thread and return
1422 * an error.
1423 */
1424 ERR("The consumer thread was never ready. Killing it");
1425 ret = pthread_cancel(consumer_data->thread);
1426 if (ret < 0) {
1427 PERROR("pthread_cancel consumer thread");
1428 }
1429 } else {
1430 PERROR("semaphore wait failed consumer thread");
1431 }
1432 goto error;
1433 }
1434
1435 pthread_mutex_lock(&consumer_data->pid_mutex);
1436 if (consumer_data->pid == 0) {
1437 ERR("Kconsumerd did not start");
1438 pthread_mutex_unlock(&consumer_data->pid_mutex);
1439 goto error;
1440 }
1441 pthread_mutex_unlock(&consumer_data->pid_mutex);
1442
1443 return 0;
1444
1445 error:
1446 return ret;
1447 }
1448
1449 /*
1450 * Join consumer thread
1451 */
1452 static int join_consumer_thread(struct consumer_data *consumer_data)
1453 {
1454 void *status;
1455 int ret;
1456
1457 if (consumer_data->pid != 0) {
1458 ret = kill(consumer_data->pid, SIGTERM);
1459 if (ret) {
1460 ERR("Error killing consumer daemon");
1461 return ret;
1462 }
1463 return pthread_join(consumer_data->thread, &status);
1464 } else {
1465 return 0;
1466 }
1467 }
1468
1469 /*
1470 * Fork and exec a consumer daemon (consumerd).
1471 *
1472 * Return pid if successful else -1.
1473 */
1474 static pid_t spawn_consumerd(struct consumer_data *consumer_data)
1475 {
1476 int ret;
1477 pid_t pid;
1478 const char *consumer_to_use;
1479 const char *verbosity;
1480 struct stat st;
1481
1482 DBG("Spawning consumerd");
1483
1484 pid = fork();
1485 if (pid == 0) {
1486 /*
1487 * Exec consumerd.
1488 */
1489 if (opt_verbose_consumer) {
1490 verbosity = "--verbose";
1491 } else {
1492 verbosity = "--quiet";
1493 }
1494 switch (consumer_data->type) {
1495 case LTTNG_CONSUMER_KERNEL:
1496 /*
1497 * Find out which consumerd to execute. We will first try the
1498 * 64-bit path, then the sessiond's installation directory, and
1499 * fallback on the 32-bit one,
1500 */
1501 DBG3("Looking for a kernel consumer at these locations:");
1502 DBG3(" 1) %s", consumerd64_bin);
1503 DBG3(" 2) %s/%s", INSTALL_BIN_PATH, CONSUMERD_FILE);
1504 DBG3(" 3) %s", consumerd32_bin);
1505 if (stat(consumerd64_bin, &st) == 0) {
1506 DBG3("Found location #1");
1507 consumer_to_use = consumerd64_bin;
1508 } else if (stat(INSTALL_BIN_PATH "/" CONSUMERD_FILE, &st) == 0) {
1509 DBG3("Found location #2");
1510 consumer_to_use = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
1511 } else if (stat(consumerd32_bin, &st) == 0) {
1512 DBG3("Found location #3");
1513 consumer_to_use = consumerd32_bin;
1514 } else {
1515 DBG("Could not find any valid consumerd executable");
1516 break;
1517 }
1518 DBG("Using kernel consumer at: %s", consumer_to_use);
1519 execl(consumer_to_use,
1520 "lttng-consumerd", verbosity, "-k",
1521 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
1522 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
1523 NULL);
1524 break;
1525 case LTTNG_CONSUMER64_UST:
1526 {
1527 char *tmpnew = NULL;
1528
1529 if (consumerd64_libdir[0] != '\0') {
1530 char *tmp;
1531 size_t tmplen;
1532
1533 tmp = getenv("LD_LIBRARY_PATH");
1534 if (!tmp) {
1535 tmp = "";
1536 }
1537 tmplen = strlen("LD_LIBRARY_PATH=")
1538 + strlen(consumerd64_libdir) + 1 /* : */ + strlen(tmp);
1539 tmpnew = zmalloc(tmplen + 1 /* \0 */);
1540 if (!tmpnew) {
1541 ret = -ENOMEM;
1542 goto error;
1543 }
1544 strcpy(tmpnew, "LD_LIBRARY_PATH=");
1545 strcat(tmpnew, consumerd64_libdir);
1546 if (tmp[0] != '\0') {
1547 strcat(tmpnew, ":");
1548 strcat(tmpnew, tmp);
1549 }
1550 ret = putenv(tmpnew);
1551 if (ret) {
1552 ret = -errno;
1553 goto error;
1554 }
1555 }
1556 DBG("Using 64-bit UST consumer at: %s", consumerd64_bin);
1557 ret = execl(consumerd64_bin, "lttng-consumerd", verbosity, "-u",
1558 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
1559 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
1560 NULL);
1561 if (consumerd64_libdir[0] != '\0') {
1562 free(tmpnew);
1563 }
1564 if (ret) {
1565 goto error;
1566 }
1567 break;
1568 }
1569 case LTTNG_CONSUMER32_UST:
1570 {
1571 char *tmpnew = NULL;
1572
1573 if (consumerd32_libdir[0] != '\0') {
1574 char *tmp;
1575 size_t tmplen;
1576
1577 tmp = getenv("LD_LIBRARY_PATH");
1578 if (!tmp) {
1579 tmp = "";
1580 }
1581 tmplen = strlen("LD_LIBRARY_PATH=")
1582 + strlen(consumerd32_libdir) + 1 /* : */ + strlen(tmp);
1583 tmpnew = zmalloc(tmplen + 1 /* \0 */);
1584 if (!tmpnew) {
1585 ret = -ENOMEM;
1586 goto error;
1587 }
1588 strcpy(tmpnew, "LD_LIBRARY_PATH=");
1589 strcat(tmpnew, consumerd32_libdir);
1590 if (tmp[0] != '\0') {
1591 strcat(tmpnew, ":");
1592 strcat(tmpnew, tmp);
1593 }
1594 ret = putenv(tmpnew);
1595 if (ret) {
1596 ret = -errno;
1597 goto error;
1598 }
1599 }
1600 DBG("Using 32-bit UST consumer at: %s", consumerd32_bin);
1601 ret = execl(consumerd32_bin, "lttng-consumerd", verbosity, "-u",
1602 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
1603 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
1604 NULL);
1605 if (consumerd32_libdir[0] != '\0') {
1606 free(tmpnew);
1607 }
1608 if (ret) {
1609 goto error;
1610 }
1611 break;
1612 }
1613 default:
1614 PERROR("unknown consumer type");
1615 exit(EXIT_FAILURE);
1616 }
1617 if (errno != 0) {
1618 PERROR("kernel start consumer exec");
1619 }
1620 exit(EXIT_FAILURE);
1621 } else if (pid > 0) {
1622 ret = pid;
1623 } else {
1624 PERROR("start consumer fork");
1625 ret = -errno;
1626 }
1627 error:
1628 return ret;
1629 }
1630
1631 /*
1632 * Spawn the consumerd daemon and session daemon thread.
1633 */
1634 static int start_consumerd(struct consumer_data *consumer_data)
1635 {
1636 int ret;
1637
1638 pthread_mutex_lock(&consumer_data->pid_mutex);
1639 if (consumer_data->pid != 0) {
1640 pthread_mutex_unlock(&consumer_data->pid_mutex);
1641 goto end;
1642 }
1643
1644 ret = spawn_consumerd(consumer_data);
1645 if (ret < 0) {
1646 ERR("Spawning consumerd failed");
1647 pthread_mutex_unlock(&consumer_data->pid_mutex);
1648 goto error;
1649 }
1650
1651 /* Setting up the consumer_data pid */
1652 consumer_data->pid = ret;
1653 DBG2("Consumer pid %d", consumer_data->pid);
1654 pthread_mutex_unlock(&consumer_data->pid_mutex);
1655
1656 DBG2("Spawning consumer control thread");
1657 ret = spawn_consumer_thread(consumer_data);
1658 if (ret < 0) {
1659 ERR("Fatal error spawning consumer control thread");
1660 goto error;
1661 }
1662
1663 end:
1664 return 0;
1665
1666 error:
1667 return ret;
1668 }
1669
1670 /*
1671 * Check version of the lttng-modules.
1672 */
1673 static int validate_lttng_modules_version(void)
1674 {
1675 return kernel_validate_version(kernel_tracer_fd);
1676 }
1677
1678 /*
1679 * Setup necessary data for kernel tracer action.
1680 */
1681 static int init_kernel_tracer(void)
1682 {
1683 int ret;
1684
1685 /* Modprobe lttng kernel modules */
1686 ret = modprobe_lttng_control();
1687 if (ret < 0) {
1688 goto error;
1689 }
1690
1691 /* Open debugfs lttng */
1692 kernel_tracer_fd = open(module_proc_lttng, O_RDWR);
1693 if (kernel_tracer_fd < 0) {
1694 DBG("Failed to open %s", module_proc_lttng);
1695 ret = -1;
1696 goto error_open;
1697 }
1698
1699 /* Validate kernel version */
1700 ret = validate_lttng_modules_version();
1701 if (ret < 0) {
1702 goto error_version;
1703 }
1704
1705 ret = modprobe_lttng_data();
1706 if (ret < 0) {
1707 goto error_modules;
1708 }
1709
1710 DBG("Kernel tracer fd %d", kernel_tracer_fd);
1711 return 0;
1712
1713 error_version:
1714 modprobe_remove_lttng_control();
1715 ret = close(kernel_tracer_fd);
1716 if (ret) {
1717 PERROR("close");
1718 }
1719 kernel_tracer_fd = -1;
1720 return LTTCOMM_KERN_VERSION;
1721
1722 error_modules:
1723 ret = close(kernel_tracer_fd);
1724 if (ret) {
1725 PERROR("close");
1726 }
1727
1728 error_open:
1729 modprobe_remove_lttng_control();
1730
1731 error:
1732 WARN("No kernel tracer available");
1733 kernel_tracer_fd = -1;
1734 if (!is_root) {
1735 return LTTCOMM_NEED_ROOT_SESSIOND;
1736 } else {
1737 return LTTCOMM_KERN_NA;
1738 }
1739 }
1740
1741 /*
1742 * Init tracing by creating trace directory and sending fds kernel consumer.
1743 */
1744 static int init_kernel_tracing(struct ltt_kernel_session *session)
1745 {
1746 int ret = 0;
1747
1748 if (session->consumer_fds_sent == 0) {
1749 /*
1750 * Assign default kernel consumer socket if no consumer assigned to the
1751 * kernel session. At this point, it's NOT supposed to be -1 but this is
1752 * an extra security check.
1753 */
1754 if (session->consumer_fd < 0) {
1755 session->consumer_fd = kconsumer_data.cmd_sock;
1756 }
1757
1758 ret = kernel_consumer_send_session(&kconsumer_data, session);
1759 if (ret < 0) {
1760 ret = LTTCOMM_KERN_CONSUMER_FAIL;
1761 goto error;
1762 }
1763
1764 session->consumer_fds_sent = 1;
1765 }
1766
1767 error:
1768 return ret;
1769 }
1770
1771 /*
1772 * Create an UST session and add it to the session ust list.
1773 */
1774 static int create_ust_session(struct ltt_session *session,
1775 struct lttng_domain *domain)
1776 {
1777 struct ltt_ust_session *lus = NULL;
1778 int ret;
1779
1780 switch (domain->type) {
1781 case LTTNG_DOMAIN_UST:
1782 break;
1783 default:
1784 ret = LTTCOMM_UNKNOWN_DOMAIN;
1785 goto error;
1786 }
1787
1788 DBG("Creating UST session");
1789
1790 lus = trace_ust_create_session(session->path, session->id, domain);
1791 if (lus == NULL) {
1792 ret = LTTCOMM_UST_SESS_FAIL;
1793 goto error;
1794 }
1795
1796 ret = run_as_mkdir_recursive(lus->pathname, S_IRWXU | S_IRWXG,
1797 session->uid, session->gid);
1798 if (ret < 0) {
1799 if (ret != -EEXIST) {
1800 ERR("Trace directory creation error");
1801 ret = LTTCOMM_UST_SESS_FAIL;
1802 goto error;
1803 }
1804 }
1805
1806 /* The domain type dictate different actions on session creation */
1807 switch (domain->type) {
1808 case LTTNG_DOMAIN_UST:
1809 /* No ustctl for the global UST domain */
1810 break;
1811 default:
1812 ERR("Unknown UST domain on create session %d", domain->type);
1813 goto error;
1814 }
1815 lus->uid = session->uid;
1816 lus->gid = session->gid;
1817 session->ust_session = lus;
1818
1819 return LTTCOMM_OK;
1820
1821 error:
1822 free(lus);
1823 return ret;
1824 }
1825
1826 /*
1827 * Create a kernel tracer session then create the default channel.
1828 */
1829 static int create_kernel_session(struct ltt_session *session)
1830 {
1831 int ret;
1832
1833 DBG("Creating kernel session");
1834
1835 ret = kernel_create_session(session, kernel_tracer_fd);
1836 if (ret < 0) {
1837 ret = LTTCOMM_KERN_SESS_FAIL;
1838 goto error;
1839 }
1840
1841 /* Set kernel consumer socket fd */
1842 if (kconsumer_data.cmd_sock >= 0) {
1843 session->kernel_session->consumer_fd = kconsumer_data.cmd_sock;
1844 }
1845
1846 ret = run_as_mkdir_recursive(session->kernel_session->trace_path,
1847 S_IRWXU | S_IRWXG, session->uid, session->gid);
1848 if (ret < 0) {
1849 if (ret != -EEXIST) {
1850 ERR("Trace directory creation error");
1851 goto error;
1852 }
1853 }
1854 session->kernel_session->uid = session->uid;
1855 session->kernel_session->gid = session->gid;
1856
1857 error:
1858 return ret;
1859 }
1860
1861 /*
1862 * Check if the UID or GID match the session. Root user has access to all
1863 * sessions.
1864 */
1865 static int session_access_ok(struct ltt_session *session, uid_t uid, gid_t gid)
1866 {
1867 if (uid != session->uid && gid != session->gid && uid != 0) {
1868 return 0;
1869 } else {
1870 return 1;
1871 }
1872 }
1873
1874 static unsigned int lttng_sessions_count(uid_t uid, gid_t gid)
1875 {
1876 unsigned int i = 0;
1877 struct ltt_session *session;
1878
1879 DBG("Counting number of available session for UID %d GID %d",
1880 uid, gid);
1881 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
1882 /*
1883 * Only list the sessions the user can control.
1884 */
1885 if (!session_access_ok(session, uid, gid)) {
1886 continue;
1887 }
1888 i++;
1889 }
1890 return i;
1891 }
1892
1893 /*
1894 * Using the session list, filled a lttng_session array to send back to the
1895 * client for session listing.
1896 *
1897 * The session list lock MUST be acquired before calling this function. Use
1898 * session_lock_list() and session_unlock_list().
1899 */
1900 static void list_lttng_sessions(struct lttng_session *sessions, uid_t uid,
1901 gid_t gid)
1902 {
1903 unsigned int i = 0;
1904 struct ltt_session *session;
1905
1906 DBG("Getting all available session for UID %d GID %d",
1907 uid, gid);
1908 /*
1909 * Iterate over session list and append data after the control struct in
1910 * the buffer.
1911 */
1912 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
1913 /*
1914 * Only list the sessions the user can control.
1915 */
1916 if (!session_access_ok(session, uid, gid)) {
1917 continue;
1918 }
1919 strncpy(sessions[i].path, session->path, PATH_MAX);
1920 sessions[i].path[PATH_MAX - 1] = '\0';
1921 strncpy(sessions[i].name, session->name, NAME_MAX);
1922 sessions[i].name[NAME_MAX - 1] = '\0';
1923 sessions[i].enabled = session->enabled;
1924 i++;
1925 }
1926 }
1927
1928 /*
1929 * Fill lttng_channel array of all channels.
1930 */
1931 static void list_lttng_channels(int domain, struct ltt_session *session,
1932 struct lttng_channel *channels)
1933 {
1934 int i = 0;
1935 struct ltt_kernel_channel *kchan;
1936
1937 DBG("Listing channels for session %s", session->name);
1938
1939 switch (domain) {
1940 case LTTNG_DOMAIN_KERNEL:
1941 /* Kernel channels */
1942 if (session->kernel_session != NULL) {
1943 cds_list_for_each_entry(kchan,
1944 &session->kernel_session->channel_list.head, list) {
1945 /* Copy lttng_channel struct to array */
1946 memcpy(&channels[i], kchan->channel, sizeof(struct lttng_channel));
1947 channels[i].enabled = kchan->enabled;
1948 i++;
1949 }
1950 }
1951 break;
1952 case LTTNG_DOMAIN_UST:
1953 {
1954 struct lttng_ht_iter iter;
1955 struct ltt_ust_channel *uchan;
1956
1957 cds_lfht_for_each_entry(session->ust_session->domain_global.channels->ht,
1958 &iter.iter, uchan, node.node) {
1959 strncpy(channels[i].name, uchan->name, LTTNG_SYMBOL_NAME_LEN);
1960 channels[i].attr.overwrite = uchan->attr.overwrite;
1961 channels[i].attr.subbuf_size = uchan->attr.subbuf_size;
1962 channels[i].attr.num_subbuf = uchan->attr.num_subbuf;
1963 channels[i].attr.switch_timer_interval =
1964 uchan->attr.switch_timer_interval;
1965 channels[i].attr.read_timer_interval =
1966 uchan->attr.read_timer_interval;
1967 channels[i].enabled = uchan->enabled;
1968 switch (uchan->attr.output) {
1969 case LTTNG_UST_MMAP:
1970 default:
1971 channels[i].attr.output = LTTNG_EVENT_MMAP;
1972 break;
1973 }
1974 i++;
1975 }
1976 break;
1977 }
1978 default:
1979 break;
1980 }
1981 }
1982
1983 /*
1984 * Create a list of ust global domain events.
1985 */
1986 static int list_lttng_ust_global_events(char *channel_name,
1987 struct ltt_ust_domain_global *ust_global, struct lttng_event **events)
1988 {
1989 int i = 0, ret = 0;
1990 unsigned int nb_event = 0;
1991 struct lttng_ht_iter iter;
1992 struct lttng_ht_node_str *node;
1993 struct ltt_ust_channel *uchan;
1994 struct ltt_ust_event *uevent;
1995 struct lttng_event *tmp;
1996
1997 DBG("Listing UST global events for channel %s", channel_name);
1998
1999 rcu_read_lock();
2000
2001 lttng_ht_lookup(ust_global->channels, (void *)channel_name, &iter);
2002 node = lttng_ht_iter_get_node_str(&iter);
2003 if (node == NULL) {
2004 ret = -LTTCOMM_UST_CHAN_NOT_FOUND;
2005 goto error;
2006 }
2007
2008 uchan = caa_container_of(&node->node, struct ltt_ust_channel, node.node);
2009
2010 nb_event += lttng_ht_get_count(uchan->events);
2011
2012 if (nb_event == 0) {
2013 ret = nb_event;
2014 goto error;
2015 }
2016
2017 DBG3("Listing UST global %d events", nb_event);
2018
2019 tmp = zmalloc(nb_event * sizeof(struct lttng_event));
2020 if (tmp == NULL) {
2021 ret = -LTTCOMM_FATAL;
2022 goto error;
2023 }
2024
2025 cds_lfht_for_each_entry(uchan->events->ht, &iter.iter, uevent, node.node) {
2026 strncpy(tmp[i].name, uevent->attr.name, LTTNG_SYMBOL_NAME_LEN);
2027 tmp[i].name[LTTNG_SYMBOL_NAME_LEN - 1] = '\0';
2028 tmp[i].enabled = uevent->enabled;
2029 switch (uevent->attr.instrumentation) {
2030 case LTTNG_UST_TRACEPOINT:
2031 tmp[i].type = LTTNG_EVENT_TRACEPOINT;
2032 break;
2033 case LTTNG_UST_PROBE:
2034 tmp[i].type = LTTNG_EVENT_PROBE;
2035 break;
2036 case LTTNG_UST_FUNCTION:
2037 tmp[i].type = LTTNG_EVENT_FUNCTION;
2038 break;
2039 }
2040 tmp[i].loglevel = uevent->attr.loglevel;
2041 switch (uevent->attr.loglevel_type) {
2042 case LTTNG_UST_LOGLEVEL_ALL:
2043 tmp[i].loglevel_type = LTTNG_EVENT_LOGLEVEL_ALL;
2044 break;
2045 case LTTNG_UST_LOGLEVEL_RANGE:
2046 tmp[i].loglevel_type = LTTNG_EVENT_LOGLEVEL_RANGE;
2047 break;
2048 case LTTNG_UST_LOGLEVEL_SINGLE:
2049 tmp[i].loglevel_type = LTTNG_EVENT_LOGLEVEL_SINGLE;
2050 break;
2051 }
2052 i++;
2053 }
2054
2055 ret = nb_event;
2056 *events = tmp;
2057
2058 error:
2059 rcu_read_unlock();
2060 return ret;
2061 }
2062
2063 /*
2064 * Fill lttng_event array of all kernel events in the channel.
2065 */
2066 static int list_lttng_kernel_events(char *channel_name,
2067 struct ltt_kernel_session *kernel_session, struct lttng_event **events)
2068 {
2069 int i = 0, ret;
2070 unsigned int nb_event;
2071 struct ltt_kernel_event *event;
2072 struct ltt_kernel_channel *kchan;
2073
2074 kchan = trace_kernel_get_channel_by_name(channel_name, kernel_session);
2075 if (kchan == NULL) {
2076 ret = LTTCOMM_KERN_CHAN_NOT_FOUND;
2077 goto error;
2078 }
2079
2080 nb_event = kchan->event_count;
2081
2082 DBG("Listing events for channel %s", kchan->channel->name);
2083
2084 if (nb_event == 0) {
2085 ret = nb_event;
2086 goto error;
2087 }
2088
2089 *events = zmalloc(nb_event * sizeof(struct lttng_event));
2090 if (*events == NULL) {
2091 ret = LTTCOMM_FATAL;
2092 goto error;
2093 }
2094
2095 /* Kernel channels */
2096 cds_list_for_each_entry(event, &kchan->events_list.head , list) {
2097 strncpy((*events)[i].name, event->event->name, LTTNG_SYMBOL_NAME_LEN);
2098 (*events)[i].name[LTTNG_SYMBOL_NAME_LEN - 1] = '\0';
2099 (*events)[i].enabled = event->enabled;
2100 switch (event->event->instrumentation) {
2101 case LTTNG_KERNEL_TRACEPOINT:
2102 (*events)[i].type = LTTNG_EVENT_TRACEPOINT;
2103 break;
2104 case LTTNG_KERNEL_KPROBE:
2105 case LTTNG_KERNEL_KRETPROBE:
2106 (*events)[i].type = LTTNG_EVENT_PROBE;
2107 memcpy(&(*events)[i].attr.probe, &event->event->u.kprobe,
2108 sizeof(struct lttng_kernel_kprobe));
2109 break;
2110 case LTTNG_KERNEL_FUNCTION:
2111 (*events)[i].type = LTTNG_EVENT_FUNCTION;
2112 memcpy(&((*events)[i].attr.ftrace), &event->event->u.ftrace,
2113 sizeof(struct lttng_kernel_function));
2114 break;
2115 case LTTNG_KERNEL_NOOP:
2116 (*events)[i].type = LTTNG_EVENT_NOOP;
2117 break;
2118 case LTTNG_KERNEL_SYSCALL:
2119 (*events)[i].type = LTTNG_EVENT_SYSCALL;
2120 break;
2121 case LTTNG_KERNEL_ALL:
2122 assert(0);
2123 break;
2124 }
2125 i++;
2126 }
2127
2128 return nb_event;
2129
2130 error:
2131 return ret;
2132 }
2133
2134 /*
2135 * Command LTTNG_DISABLE_CHANNEL processed by the client thread.
2136 */
2137 static int cmd_disable_channel(struct ltt_session *session,
2138 int domain, char *channel_name)
2139 {
2140 int ret;
2141 struct ltt_ust_session *usess;
2142
2143 usess = session->ust_session;
2144
2145 switch (domain) {
2146 case LTTNG_DOMAIN_KERNEL:
2147 {
2148 ret = channel_kernel_disable(session->kernel_session,
2149 channel_name);
2150 if (ret != LTTCOMM_OK) {
2151 goto error;
2152 }
2153
2154 kernel_wait_quiescent(kernel_tracer_fd);
2155 break;
2156 }
2157 case LTTNG_DOMAIN_UST:
2158 {
2159 struct ltt_ust_channel *uchan;
2160 struct lttng_ht *chan_ht;
2161
2162 chan_ht = usess->domain_global.channels;
2163
2164 uchan = trace_ust_find_channel_by_name(chan_ht, channel_name);
2165 if (uchan == NULL) {
2166 ret = LTTCOMM_UST_CHAN_NOT_FOUND;
2167 goto error;
2168 }
2169
2170 ret = channel_ust_disable(usess, domain, uchan);
2171 if (ret != LTTCOMM_OK) {
2172 goto error;
2173 }
2174 break;
2175 }
2176 #if 0
2177 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
2178 case LTTNG_DOMAIN_UST_EXEC_NAME:
2179 case LTTNG_DOMAIN_UST_PID:
2180 #endif
2181 default:
2182 ret = LTTCOMM_UNKNOWN_DOMAIN;
2183 goto error;
2184 }
2185
2186 ret = LTTCOMM_OK;
2187
2188 error:
2189 return ret;
2190 }
2191
2192 /*
2193 * Command LTTNG_ENABLE_CHANNEL processed by the client thread.
2194 */
2195 static int cmd_enable_channel(struct ltt_session *session,
2196 int domain, struct lttng_channel *attr)
2197 {
2198 int ret;
2199 struct ltt_ust_session *usess = session->ust_session;
2200 struct lttng_ht *chan_ht;
2201
2202 DBG("Enabling channel %s for session %s", attr->name, session->name);
2203
2204 switch (domain) {
2205 case LTTNG_DOMAIN_KERNEL:
2206 {
2207 struct ltt_kernel_channel *kchan;
2208
2209 kchan = trace_kernel_get_channel_by_name(attr->name,
2210 session->kernel_session);
2211 if (kchan == NULL) {
2212 ret = channel_kernel_create(session->kernel_session,
2213 attr, kernel_poll_pipe[1]);
2214 } else {
2215 ret = channel_kernel_enable(session->kernel_session, kchan);
2216 }
2217
2218 if (ret != LTTCOMM_OK) {
2219 goto error;
2220 }
2221
2222 kernel_wait_quiescent(kernel_tracer_fd);
2223 break;
2224 }
2225 case LTTNG_DOMAIN_UST:
2226 {
2227 struct ltt_ust_channel *uchan;
2228
2229 chan_ht = usess->domain_global.channels;
2230
2231 uchan = trace_ust_find_channel_by_name(chan_ht, attr->name);
2232 if (uchan == NULL) {
2233 ret = channel_ust_create(usess, domain, attr);
2234 } else {
2235 ret = channel_ust_enable(usess, domain, uchan);
2236 }
2237 break;
2238 }
2239 #if 0
2240 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
2241 case LTTNG_DOMAIN_UST_EXEC_NAME:
2242 case LTTNG_DOMAIN_UST_PID:
2243 #endif
2244 default:
2245 ret = LTTCOMM_UNKNOWN_DOMAIN;
2246 goto error;
2247 }
2248
2249 error:
2250 return ret;
2251 }
2252
2253 /*
2254 * Command LTTNG_DISABLE_EVENT processed by the client thread.
2255 */
2256 static int cmd_disable_event(struct ltt_session *session, int domain,
2257 char *channel_name, char *event_name)
2258 {
2259 int ret;
2260
2261 switch (domain) {
2262 case LTTNG_DOMAIN_KERNEL:
2263 {
2264 struct ltt_kernel_channel *kchan;
2265 struct ltt_kernel_session *ksess;
2266
2267 ksess = session->kernel_session;
2268
2269 kchan = trace_kernel_get_channel_by_name(channel_name, ksess);
2270 if (kchan == NULL) {
2271 ret = LTTCOMM_KERN_CHAN_NOT_FOUND;
2272 goto error;
2273 }
2274
2275 ret = event_kernel_disable_tracepoint(ksess, kchan, event_name);
2276 if (ret != LTTCOMM_OK) {
2277 goto error;
2278 }
2279
2280 kernel_wait_quiescent(kernel_tracer_fd);
2281 break;
2282 }
2283 case LTTNG_DOMAIN_UST:
2284 {
2285 struct ltt_ust_channel *uchan;
2286 struct ltt_ust_session *usess;
2287
2288 usess = session->ust_session;
2289
2290 uchan = trace_ust_find_channel_by_name(usess->domain_global.channels,
2291 channel_name);
2292 if (uchan == NULL) {
2293 ret = LTTCOMM_UST_CHAN_NOT_FOUND;
2294 goto error;
2295 }
2296
2297 ret = event_ust_disable_tracepoint(usess, domain, uchan, event_name);
2298 if (ret != LTTCOMM_OK) {
2299 goto error;
2300 }
2301
2302 DBG3("Disable UST event %s in channel %s completed", event_name,
2303 channel_name);
2304 break;
2305 }
2306 #if 0
2307 case LTTNG_DOMAIN_UST_EXEC_NAME:
2308 case LTTNG_DOMAIN_UST_PID:
2309 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
2310 #endif
2311 default:
2312 ret = LTTCOMM_UND;
2313 goto error;
2314 }
2315
2316 ret = LTTCOMM_OK;
2317
2318 error:
2319 return ret;
2320 }
2321
2322 /*
2323 * Command LTTNG_DISABLE_ALL_EVENT processed by the client thread.
2324 */
2325 static int cmd_disable_event_all(struct ltt_session *session, int domain,
2326 char *channel_name)
2327 {
2328 int ret;
2329
2330 switch (domain) {
2331 case LTTNG_DOMAIN_KERNEL:
2332 {
2333 struct ltt_kernel_session *ksess;
2334 struct ltt_kernel_channel *kchan;
2335
2336 ksess = session->kernel_session;
2337
2338 kchan = trace_kernel_get_channel_by_name(channel_name, ksess);
2339 if (kchan == NULL) {
2340 ret = LTTCOMM_KERN_CHAN_NOT_FOUND;
2341 goto error;
2342 }
2343
2344 ret = event_kernel_disable_all(ksess, kchan);
2345 if (ret != LTTCOMM_OK) {
2346 goto error;
2347 }
2348
2349 kernel_wait_quiescent(kernel_tracer_fd);
2350 break;
2351 }
2352 case LTTNG_DOMAIN_UST:
2353 {
2354 struct ltt_ust_session *usess;
2355 struct ltt_ust_channel *uchan;
2356
2357 usess = session->ust_session;
2358
2359 uchan = trace_ust_find_channel_by_name(usess->domain_global.channels,
2360 channel_name);
2361 if (uchan == NULL) {
2362 ret = LTTCOMM_UST_CHAN_NOT_FOUND;
2363 goto error;
2364 }
2365
2366 ret = event_ust_disable_all_tracepoints(usess, domain, uchan);
2367 if (ret != 0) {
2368 goto error;
2369 }
2370
2371 DBG3("Disable all UST events in channel %s completed", channel_name);
2372
2373 break;
2374 }
2375 #if 0
2376 case LTTNG_DOMAIN_UST_EXEC_NAME:
2377 case LTTNG_DOMAIN_UST_PID:
2378 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
2379 #endif
2380 default:
2381 ret = LTTCOMM_UND;
2382 goto error;
2383 }
2384
2385 ret = LTTCOMM_OK;
2386
2387 error:
2388 return ret;
2389 }
2390
2391 /*
2392 * Command LTTNG_ADD_CONTEXT processed by the client thread.
2393 */
2394 static int cmd_add_context(struct ltt_session *session, int domain,
2395 char *channel_name, char *event_name, struct lttng_event_context *ctx)
2396 {
2397 int ret;
2398
2399 switch (domain) {
2400 case LTTNG_DOMAIN_KERNEL:
2401 /* Add kernel context to kernel tracer */
2402 ret = context_kernel_add(session->kernel_session, ctx,
2403 event_name, channel_name);
2404 if (ret != LTTCOMM_OK) {
2405 goto error;
2406 }
2407 break;
2408 case LTTNG_DOMAIN_UST:
2409 {
2410 struct ltt_ust_session *usess = session->ust_session;
2411
2412 ret = context_ust_add(usess, domain, ctx, event_name, channel_name);
2413 if (ret != LTTCOMM_OK) {
2414 goto error;
2415 }
2416 break;
2417 }
2418 #if 0
2419 case LTTNG_DOMAIN_UST_EXEC_NAME:
2420 case LTTNG_DOMAIN_UST_PID:
2421 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
2422 #endif
2423 default:
2424 ret = LTTCOMM_UND;
2425 goto error;
2426 }
2427
2428 ret = LTTCOMM_OK;
2429
2430 error:
2431 return ret;
2432 }
2433
2434 /*
2435 * Command LTTNG_ENABLE_EVENT processed by the client thread.
2436 */
2437 static int cmd_enable_event(struct ltt_session *session, int domain,
2438 char *channel_name, struct lttng_event *event)
2439 {
2440 int ret;
2441 struct lttng_channel *attr;
2442 struct ltt_ust_session *usess = session->ust_session;
2443
2444 switch (domain) {
2445 case LTTNG_DOMAIN_KERNEL:
2446 {
2447 struct ltt_kernel_channel *kchan;
2448
2449 kchan = trace_kernel_get_channel_by_name(channel_name,
2450 session->kernel_session);
2451 if (kchan == NULL) {
2452 attr = channel_new_default_attr(domain);
2453 if (attr == NULL) {
2454 ret = LTTCOMM_FATAL;
2455 goto error;
2456 }
2457 snprintf(attr->name, NAME_MAX, "%s", channel_name);
2458
2459 /* This call will notify the kernel thread */
2460 ret = channel_kernel_create(session->kernel_session,
2461 attr, kernel_poll_pipe[1]);
2462 if (ret != LTTCOMM_OK) {
2463 free(attr);
2464 goto error;
2465 }
2466 free(attr);
2467 }
2468
2469 /* Get the newly created kernel channel pointer */
2470 kchan = trace_kernel_get_channel_by_name(channel_name,
2471 session->kernel_session);
2472 if (kchan == NULL) {
2473 /* This sould not happen... */
2474 ret = LTTCOMM_FATAL;
2475 goto error;
2476 }
2477
2478 ret = event_kernel_enable_tracepoint(session->kernel_session, kchan,
2479 event);
2480 if (ret != LTTCOMM_OK) {
2481 goto error;
2482 }
2483
2484 kernel_wait_quiescent(kernel_tracer_fd);
2485 break;
2486 }
2487 case LTTNG_DOMAIN_UST:
2488 {
2489 struct lttng_channel *attr;
2490 struct ltt_ust_channel *uchan;
2491
2492 /* Get channel from global UST domain */
2493 uchan = trace_ust_find_channel_by_name(usess->domain_global.channels,
2494 channel_name);
2495 if (uchan == NULL) {
2496 /* Create default channel */
2497 attr = channel_new_default_attr(domain);
2498 if (attr == NULL) {
2499 ret = LTTCOMM_FATAL;
2500 goto error;
2501 }
2502 snprintf(attr->name, NAME_MAX, "%s", channel_name);
2503 attr->name[NAME_MAX - 1] = '\0';
2504
2505 ret = channel_ust_create(usess, domain, attr);
2506 if (ret != LTTCOMM_OK) {
2507 free(attr);
2508 goto error;
2509 }
2510 free(attr);
2511
2512 /* Get the newly created channel reference back */
2513 uchan = trace_ust_find_channel_by_name(
2514 usess->domain_global.channels, channel_name);
2515 if (uchan == NULL) {
2516 /* Something is really wrong */
2517 ret = LTTCOMM_FATAL;
2518 goto error;
2519 }
2520 }
2521
2522 /* At this point, the session and channel exist on the tracer */
2523 ret = event_ust_enable_tracepoint(usess, domain, uchan, event);
2524 if (ret != LTTCOMM_OK) {
2525 goto error;
2526 }
2527 break;
2528 }
2529 #if 0
2530 case LTTNG_DOMAIN_UST_EXEC_NAME:
2531 case LTTNG_DOMAIN_UST_PID:
2532 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
2533 #endif
2534 default:
2535 ret = LTTCOMM_UND;
2536 goto error;
2537 }
2538
2539 ret = LTTCOMM_OK;
2540
2541 error:
2542 return ret;
2543 }
2544
2545 /*
2546 * Command LTTNG_ENABLE_ALL_EVENT processed by the client thread.
2547 */
2548 static int cmd_enable_event_all(struct ltt_session *session, int domain,
2549 char *channel_name, int event_type)
2550 {
2551 int ret;
2552 struct ltt_kernel_channel *kchan;
2553
2554 switch (domain) {
2555 case LTTNG_DOMAIN_KERNEL:
2556 kchan = trace_kernel_get_channel_by_name(channel_name,
2557 session->kernel_session);
2558 if (kchan == NULL) {
2559 /* This call will notify the kernel thread */
2560 ret = channel_kernel_create(session->kernel_session, NULL,
2561 kernel_poll_pipe[1]);
2562 if (ret != LTTCOMM_OK) {
2563 goto error;
2564 }
2565
2566 /* Get the newly created kernel channel pointer */
2567 kchan = trace_kernel_get_channel_by_name(channel_name,
2568 session->kernel_session);
2569 if (kchan == NULL) {
2570 /* This sould not happen... */
2571 ret = LTTCOMM_FATAL;
2572 goto error;
2573 }
2574
2575 }
2576
2577 switch (event_type) {
2578 case LTTNG_EVENT_SYSCALL:
2579 ret = event_kernel_enable_all_syscalls(session->kernel_session,
2580 kchan, kernel_tracer_fd);
2581 break;
2582 case LTTNG_EVENT_TRACEPOINT:
2583 /*
2584 * This call enables all LTTNG_KERNEL_TRACEPOINTS and
2585 * events already registered to the channel.
2586 */
2587 ret = event_kernel_enable_all_tracepoints(session->kernel_session,
2588 kchan, kernel_tracer_fd);
2589 break;
2590 case LTTNG_EVENT_ALL:
2591 /* Enable syscalls and tracepoints */
2592 ret = event_kernel_enable_all(session->kernel_session,
2593 kchan, kernel_tracer_fd);
2594 break;
2595 default:
2596 ret = LTTCOMM_KERN_ENABLE_FAIL;
2597 goto error;
2598 }
2599
2600 /* Manage return value */
2601 if (ret != LTTCOMM_OK) {
2602 goto error;
2603 }
2604
2605 kernel_wait_quiescent(kernel_tracer_fd);
2606 break;
2607 case LTTNG_DOMAIN_UST:
2608 {
2609 struct lttng_channel *attr;
2610 struct ltt_ust_channel *uchan;
2611 struct ltt_ust_session *usess = session->ust_session;
2612
2613 /* Get channel from global UST domain */
2614 uchan = trace_ust_find_channel_by_name(usess->domain_global.channels,
2615 channel_name);
2616 if (uchan == NULL) {
2617 /* Create default channel */
2618 attr = channel_new_default_attr(domain);
2619 if (attr == NULL) {
2620 ret = LTTCOMM_FATAL;
2621 goto error;
2622 }
2623 snprintf(attr->name, NAME_MAX, "%s", channel_name);
2624 attr->name[NAME_MAX - 1] = '\0';
2625
2626 /* Use the internal command enable channel */
2627 ret = channel_ust_create(usess, domain, attr);
2628 if (ret != LTTCOMM_OK) {
2629 free(attr);
2630 goto error;
2631 }
2632 free(attr);
2633
2634 /* Get the newly created channel reference back */
2635 uchan = trace_ust_find_channel_by_name(
2636 usess->domain_global.channels, channel_name);
2637 if (uchan == NULL) {
2638 /* Something is really wrong */
2639 ret = LTTCOMM_FATAL;
2640 goto error;
2641 }
2642 }
2643
2644 /* At this point, the session and channel exist on the tracer */
2645
2646 switch (event_type) {
2647 case LTTNG_EVENT_ALL:
2648 case LTTNG_EVENT_TRACEPOINT:
2649 ret = event_ust_enable_all_tracepoints(usess, domain, uchan);
2650 if (ret != LTTCOMM_OK) {
2651 goto error;
2652 }
2653 break;
2654 default:
2655 ret = LTTCOMM_UST_ENABLE_FAIL;
2656 goto error;
2657 }
2658
2659 /* Manage return value */
2660 if (ret != LTTCOMM_OK) {
2661 goto error;
2662 }
2663
2664 break;
2665 }
2666 #if 0
2667 case LTTNG_DOMAIN_UST_EXEC_NAME:
2668 case LTTNG_DOMAIN_UST_PID:
2669 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
2670 #endif
2671 default:
2672 ret = LTTCOMM_UND;
2673 goto error;
2674 }
2675
2676 ret = LTTCOMM_OK;
2677
2678 error:
2679 return ret;
2680 }
2681
2682 /*
2683 * Command LTTNG_LIST_TRACEPOINTS processed by the client thread.
2684 */
2685 static ssize_t cmd_list_tracepoints(int domain, struct lttng_event **events)
2686 {
2687 int ret;
2688 ssize_t nb_events = 0;
2689
2690 switch (domain) {
2691 case LTTNG_DOMAIN_KERNEL:
2692 nb_events = kernel_list_events(kernel_tracer_fd, events);
2693 if (nb_events < 0) {
2694 ret = LTTCOMM_KERN_LIST_FAIL;
2695 goto error;
2696 }
2697 break;
2698 case LTTNG_DOMAIN_UST:
2699 nb_events = ust_app_list_events(events);
2700 if (nb_events < 0) {
2701 ret = LTTCOMM_UST_LIST_FAIL;
2702 goto error;
2703 }
2704 break;
2705 default:
2706 ret = LTTCOMM_UND;
2707 goto error;
2708 }
2709
2710 return nb_events;
2711
2712 error:
2713 /* Return negative value to differentiate return code */
2714 return -ret;
2715 }
2716
2717 /*
2718 * Command LTTNG_LIST_TRACEPOINT_FIELDS processed by the client thread.
2719 */
2720 static ssize_t cmd_list_tracepoint_fields(int domain,
2721 struct lttng_event_field **fields)
2722 {
2723 int ret;
2724 ssize_t nb_fields = 0;
2725
2726 switch (domain) {
2727 case LTTNG_DOMAIN_UST:
2728 nb_fields = ust_app_list_event_fields(fields);
2729 if (nb_fields < 0) {
2730 ret = LTTCOMM_UST_LIST_FAIL;
2731 goto error;
2732 }
2733 break;
2734 case LTTNG_DOMAIN_KERNEL:
2735 default: /* fall-through */
2736 ret = LTTCOMM_UND;
2737 goto error;
2738 }
2739
2740 return nb_fields;
2741
2742 error:
2743 /* Return negative value to differentiate return code */
2744 return -ret;
2745 }
2746
2747 /*
2748 * Command LTTNG_START_TRACE processed by the client thread.
2749 */
2750 static int cmd_start_trace(struct ltt_session *session)
2751 {
2752 int ret;
2753 struct ltt_kernel_session *ksession;
2754 struct ltt_ust_session *usess;
2755
2756 /* Short cut */
2757 ksession = session->kernel_session;
2758 usess = session->ust_session;
2759
2760 if (session->enabled) {
2761 /* Already started. */
2762 ret = LTTCOMM_TRACE_ALREADY_STARTED;
2763 goto error;
2764 }
2765
2766 session->enabled = 1;
2767
2768 /* Kernel tracing */
2769 if (ksession != NULL) {
2770 struct ltt_kernel_channel *kchan;
2771
2772 /* Open kernel metadata */
2773 if (ksession->metadata == NULL) {
2774 ret = kernel_open_metadata(ksession, ksession->trace_path);
2775 if (ret < 0) {
2776 ret = LTTCOMM_KERN_META_FAIL;
2777 goto error;
2778 }
2779 }
2780
2781 /* Open kernel metadata stream */
2782 if (ksession->metadata_stream_fd < 0) {
2783 ret = kernel_open_metadata_stream(ksession);
2784 if (ret < 0) {
2785 ERR("Kernel create metadata stream failed");
2786 ret = LTTCOMM_KERN_STREAM_FAIL;
2787 goto error;
2788 }
2789 }
2790
2791 /* For each channel */
2792 cds_list_for_each_entry(kchan, &ksession->channel_list.head, list) {
2793 if (kchan->stream_count == 0) {
2794 ret = kernel_open_channel_stream(kchan);
2795 if (ret < 0) {
2796 ret = LTTCOMM_KERN_STREAM_FAIL;
2797 goto error;
2798 }
2799 /* Update the stream global counter */
2800 ksession->stream_count_global += ret;
2801 }
2802 }
2803
2804 /* Setup kernel consumer socket and send fds to it */
2805 ret = init_kernel_tracing(ksession);
2806 if (ret < 0) {
2807 ret = LTTCOMM_KERN_START_FAIL;
2808 goto error;
2809 }
2810
2811 /* This start the kernel tracing */
2812 ret = kernel_start_session(ksession);
2813 if (ret < 0) {
2814 ret = LTTCOMM_KERN_START_FAIL;
2815 goto error;
2816 }
2817
2818 /* Quiescent wait after starting trace */
2819 kernel_wait_quiescent(kernel_tracer_fd);
2820 }
2821
2822 /* Flag session that trace should start automatically */
2823 if (usess) {
2824 usess->start_trace = 1;
2825
2826 ret = ust_app_start_trace_all(usess);
2827 if (ret < 0) {
2828 ret = LTTCOMM_UST_START_FAIL;
2829 goto error;
2830 }
2831 }
2832
2833 ret = LTTCOMM_OK;
2834
2835 error:
2836 return ret;
2837 }
2838
2839 /*
2840 * Command LTTNG_STOP_TRACE processed by the client thread.
2841 */
2842 static int cmd_stop_trace(struct ltt_session *session)
2843 {
2844 int ret;
2845 struct ltt_kernel_channel *kchan;
2846 struct ltt_kernel_session *ksession;
2847 struct ltt_ust_session *usess;
2848
2849 /* Short cut */
2850 ksession = session->kernel_session;
2851 usess = session->ust_session;
2852
2853 if (!session->enabled) {
2854 ret = LTTCOMM_TRACE_ALREADY_STOPPED;
2855 goto error;
2856 }
2857
2858 session->enabled = 0;
2859
2860 /* Kernel tracer */
2861 if (ksession != NULL) {
2862 DBG("Stop kernel tracing");
2863
2864 /* Flush all buffers before stopping */
2865 ret = kernel_metadata_flush_buffer(ksession->metadata_stream_fd);
2866 if (ret < 0) {
2867 ERR("Kernel metadata flush failed");
2868 }
2869
2870 cds_list_for_each_entry(kchan, &ksession->channel_list.head, list) {
2871 ret = kernel_flush_buffer(kchan);
2872 if (ret < 0) {
2873 ERR("Kernel flush buffer error");
2874 }
2875 }
2876
2877 ret = kernel_stop_session(ksession);
2878 if (ret < 0) {
2879 ret = LTTCOMM_KERN_STOP_FAIL;
2880 goto error;
2881 }
2882
2883 kernel_wait_quiescent(kernel_tracer_fd);
2884 }
2885
2886 if (usess) {
2887 usess->start_trace = 0;
2888
2889 ret = ust_app_stop_trace_all(usess);
2890 if (ret < 0) {
2891 ret = LTTCOMM_UST_STOP_FAIL;
2892 goto error;
2893 }
2894 }
2895
2896 ret = LTTCOMM_OK;
2897
2898 error:
2899 return ret;
2900 }
2901
2902 /*
2903 * Command LTTNG_CREATE_SESSION processed by the client thread.
2904 */
2905 static int cmd_create_session(char *name, char *path, lttng_sock_cred *creds)
2906 {
2907 int ret;
2908
2909 ret = session_create(name, path, LTTNG_SOCK_GET_UID_CRED(creds),
2910 LTTNG_SOCK_GET_GID_CRED(creds));
2911 if (ret != LTTCOMM_OK) {
2912 goto error;
2913 }
2914
2915 ret = LTTCOMM_OK;
2916
2917 error:
2918 return ret;
2919 }
2920
2921 /*
2922 * Command LTTNG_DESTROY_SESSION processed by the client thread.
2923 */
2924 static int cmd_destroy_session(struct ltt_session *session, char *name)
2925 {
2926 int ret;
2927
2928 /* Clean kernel session teardown */
2929 teardown_kernel_session(session);
2930 /* UST session teardown */
2931 teardown_ust_session(session);
2932
2933 /*
2934 * Must notify the kernel thread here to update it's poll setin order
2935 * to remove the channel(s)' fd just destroyed.
2936 */
2937 ret = notify_thread_pipe(kernel_poll_pipe[1]);
2938 if (ret < 0) {
2939 PERROR("write kernel poll pipe");
2940 }
2941
2942 ret = session_destroy(session);
2943
2944 return ret;
2945 }
2946
2947 /*
2948 * Command LTTNG_CALIBRATE processed by the client thread.
2949 */
2950 static int cmd_calibrate(int domain, struct lttng_calibrate *calibrate)
2951 {
2952 int ret;
2953
2954 switch (domain) {
2955 case LTTNG_DOMAIN_KERNEL:
2956 {
2957 struct lttng_kernel_calibrate kcalibrate;
2958
2959 kcalibrate.type = calibrate->type;
2960 ret = kernel_calibrate(kernel_tracer_fd, &kcalibrate);
2961 if (ret < 0) {
2962 ret = LTTCOMM_KERN_ENABLE_FAIL;
2963 goto error;
2964 }
2965 break;
2966 }
2967 case LTTNG_DOMAIN_UST:
2968 {
2969 struct lttng_ust_calibrate ucalibrate;
2970
2971 ucalibrate.type = calibrate->type;
2972 ret = ust_app_calibrate_glb(&ucalibrate);
2973 if (ret < 0) {
2974 ret = LTTCOMM_UST_CALIBRATE_FAIL;
2975 goto error;
2976 }
2977 break;
2978 }
2979 default:
2980 ret = LTTCOMM_UND;
2981 goto error;
2982 }
2983
2984 ret = LTTCOMM_OK;
2985
2986 error:
2987 return ret;
2988 }
2989
2990 /*
2991 * Command LTTNG_REGISTER_CONSUMER processed by the client thread.
2992 */
2993 static int cmd_register_consumer(struct ltt_session *session, int domain,
2994 char *sock_path)
2995 {
2996 int ret, sock;
2997
2998 switch (domain) {
2999 case LTTNG_DOMAIN_KERNEL:
3000 /* Can't register a consumer if there is already one */
3001 if (session->kernel_session->consumer_fds_sent != 0) {
3002 ret = LTTCOMM_KERN_CONSUMER_FAIL;
3003 goto error;
3004 }
3005
3006 sock = lttcomm_connect_unix_sock(sock_path);
3007 if (sock < 0) {
3008 ret = LTTCOMM_CONNECT_FAIL;
3009 goto error;
3010 }
3011
3012 session->kernel_session->consumer_fd = sock;
3013 break;
3014 default:
3015 /* TODO: Userspace tracing */
3016 ret = LTTCOMM_UND;
3017 goto error;
3018 }
3019
3020 ret = LTTCOMM_OK;
3021
3022 error:
3023 return ret;
3024 }
3025
3026 /*
3027 * Command LTTNG_LIST_DOMAINS processed by the client thread.
3028 */
3029 static ssize_t cmd_list_domains(struct ltt_session *session,
3030 struct lttng_domain **domains)
3031 {
3032 int ret, index = 0;
3033 ssize_t nb_dom = 0;
3034
3035 if (session->kernel_session != NULL) {
3036 DBG3("Listing domains found kernel domain");
3037 nb_dom++;
3038 }
3039
3040 if (session->ust_session != NULL) {
3041 DBG3("Listing domains found UST global domain");
3042 nb_dom++;
3043 }
3044
3045 *domains = zmalloc(nb_dom * sizeof(struct lttng_domain));
3046 if (*domains == NULL) {
3047 ret = -LTTCOMM_FATAL;
3048 goto error;
3049 }
3050
3051 if (session->kernel_session != NULL) {
3052 (*domains)[index].type = LTTNG_DOMAIN_KERNEL;
3053 index++;
3054 }
3055
3056 if (session->ust_session != NULL) {
3057 (*domains)[index].type = LTTNG_DOMAIN_UST;
3058 index++;
3059 }
3060
3061 return nb_dom;
3062
3063 error:
3064 return ret;
3065 }
3066
3067 /*
3068 * Command LTTNG_LIST_CHANNELS processed by the client thread.
3069 */
3070 static ssize_t cmd_list_channels(int domain, struct ltt_session *session,
3071 struct lttng_channel **channels)
3072 {
3073 int ret;
3074 ssize_t nb_chan = 0;
3075
3076 switch (domain) {
3077 case LTTNG_DOMAIN_KERNEL:
3078 if (session->kernel_session != NULL) {
3079 nb_chan = session->kernel_session->channel_count;
3080 }
3081 DBG3("Number of kernel channels %zd", nb_chan);
3082 break;
3083 case LTTNG_DOMAIN_UST:
3084 if (session->ust_session != NULL) {
3085 nb_chan = lttng_ht_get_count(
3086 session->ust_session->domain_global.channels);
3087 }
3088 DBG3("Number of UST global channels %zd", nb_chan);
3089 break;
3090 default:
3091 *channels = NULL;
3092 ret = -LTTCOMM_UND;
3093 goto error;
3094 }
3095
3096 if (nb_chan > 0) {
3097 *channels = zmalloc(nb_chan * sizeof(struct lttng_channel));
3098 if (*channels == NULL) {
3099 ret = -LTTCOMM_FATAL;
3100 goto error;
3101 }
3102
3103 list_lttng_channels(domain, session, *channels);
3104 } else {
3105 *channels = NULL;
3106 }
3107
3108 return nb_chan;
3109
3110 error:
3111 return ret;
3112 }
3113
3114 /*
3115 * Command LTTNG_LIST_EVENTS processed by the client thread.
3116 */
3117 static ssize_t cmd_list_events(int domain, struct ltt_session *session,
3118 char *channel_name, struct lttng_event **events)
3119 {
3120 int ret = 0;
3121 ssize_t nb_event = 0;
3122
3123 switch (domain) {
3124 case LTTNG_DOMAIN_KERNEL:
3125 if (session->kernel_session != NULL) {
3126 nb_event = list_lttng_kernel_events(channel_name,
3127 session->kernel_session, events);
3128 }
3129 break;
3130 case LTTNG_DOMAIN_UST:
3131 {
3132 if (session->ust_session != NULL) {
3133 nb_event = list_lttng_ust_global_events(channel_name,
3134 &session->ust_session->domain_global, events);
3135 }
3136 break;
3137 }
3138 default:
3139 ret = -LTTCOMM_UND;
3140 goto error;
3141 }
3142
3143 ret = nb_event;
3144
3145 error:
3146 return ret;
3147 }
3148
3149 /*
3150 * Process the command requested by the lttng client within the command
3151 * context structure. This function make sure that the return structure (llm)
3152 * is set and ready for transmission before returning.
3153 *
3154 * Return any error encountered or 0 for success.
3155 */
3156 static int process_client_msg(struct command_ctx *cmd_ctx)
3157 {
3158 int ret = LTTCOMM_OK;
3159 int need_tracing_session = 1;
3160 int need_domain;
3161
3162 DBG("Processing client command %d", cmd_ctx->lsm->cmd_type);
3163
3164 switch (cmd_ctx->lsm->cmd_type) {
3165 case LTTNG_CREATE_SESSION:
3166 case LTTNG_DESTROY_SESSION:
3167 case LTTNG_LIST_SESSIONS:
3168 case LTTNG_LIST_DOMAINS:
3169 case LTTNG_START_TRACE:
3170 case LTTNG_STOP_TRACE:
3171 need_domain = 0;
3172 break;
3173 default:
3174 need_domain = 1;
3175 }
3176
3177 if (opt_no_kernel && need_domain
3178 && cmd_ctx->lsm->domain.type == LTTNG_DOMAIN_KERNEL) {
3179 if (!is_root) {
3180 ret = LTTCOMM_NEED_ROOT_SESSIOND;
3181 } else {
3182 ret = LTTCOMM_KERN_NA;
3183 }
3184 goto error;
3185 }
3186
3187 /*
3188 * Check for command that don't needs to allocate a returned payload. We do
3189 * this here so we don't have to make the call for no payload at each
3190 * command.
3191 */
3192 switch(cmd_ctx->lsm->cmd_type) {
3193 case LTTNG_LIST_SESSIONS:
3194 case LTTNG_LIST_TRACEPOINTS:
3195 case LTTNG_LIST_TRACEPOINT_FIELDS:
3196 case LTTNG_LIST_DOMAINS:
3197 case LTTNG_LIST_CHANNELS:
3198 case LTTNG_LIST_EVENTS:
3199 break;
3200 default:
3201 /* Setup lttng message with no payload */
3202 ret = setup_lttng_msg(cmd_ctx, 0);
3203 if (ret < 0) {
3204 /* This label does not try to unlock the session */
3205 goto init_setup_error;
3206 }
3207 }
3208
3209 /* Commands that DO NOT need a session. */
3210 switch (cmd_ctx->lsm->cmd_type) {
3211 case LTTNG_CREATE_SESSION:
3212 case LTTNG_CALIBRATE:
3213 case LTTNG_LIST_SESSIONS:
3214 case LTTNG_LIST_TRACEPOINTS:
3215 case LTTNG_LIST_TRACEPOINT_FIELDS:
3216 need_tracing_session = 0;
3217 break;
3218 default:
3219 DBG("Getting session %s by name", cmd_ctx->lsm->session.name);
3220 /*
3221 * We keep the session list lock across _all_ commands
3222 * for now, because the per-session lock does not
3223 * handle teardown properly.
3224 */
3225 session_lock_list();
3226 cmd_ctx->session = session_find_by_name(cmd_ctx->lsm->session.name);
3227 if (cmd_ctx->session == NULL) {
3228 if (cmd_ctx->lsm->session.name != NULL) {
3229 ret = LTTCOMM_SESS_NOT_FOUND;
3230 } else {
3231 /* If no session name specified */
3232 ret = LTTCOMM_SELECT_SESS;
3233 }
3234 goto error;
3235 } else {
3236 /* Acquire lock for the session */
3237 session_lock(cmd_ctx->session);
3238 }
3239 break;
3240 }
3241
3242 if (!need_domain) {
3243 goto skip_domain;
3244 }
3245 /*
3246 * Check domain type for specific "pre-action".
3247 */
3248 switch (cmd_ctx->lsm->domain.type) {
3249 case LTTNG_DOMAIN_KERNEL:
3250 if (!is_root) {
3251 ret = LTTCOMM_NEED_ROOT_SESSIOND;
3252 goto error;
3253 }
3254
3255 /* Kernel tracer check */
3256 if (kernel_tracer_fd == -1) {
3257 /* Basically, load kernel tracer modules */
3258 ret = init_kernel_tracer();
3259 if (ret != 0) {
3260 goto error;
3261 }
3262 }
3263
3264 /* Consumer is in an ERROR state. Report back to client */
3265 if (uatomic_read(&kernel_consumerd_state) == CONSUMER_ERROR) {
3266 ret = LTTCOMM_NO_KERNCONSUMERD;
3267 goto error;
3268 }
3269
3270 /* Need a session for kernel command */
3271 if (need_tracing_session) {
3272 if (cmd_ctx->session->kernel_session == NULL) {
3273 ret = create_kernel_session(cmd_ctx->session);
3274 if (ret < 0) {
3275 ret = LTTCOMM_KERN_SESS_FAIL;
3276 goto error;
3277 }
3278 }
3279
3280 /* Start the kernel consumer daemon */
3281 pthread_mutex_lock(&kconsumer_data.pid_mutex);
3282 if (kconsumer_data.pid == 0 &&
3283 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
3284 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
3285 ret = start_consumerd(&kconsumer_data);
3286 if (ret < 0) {
3287 ret = LTTCOMM_KERN_CONSUMER_FAIL;
3288 goto error;
3289 }
3290 uatomic_set(&kernel_consumerd_state, CONSUMER_STARTED);
3291 } else {
3292 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
3293 }
3294 }
3295
3296 break;
3297 case LTTNG_DOMAIN_UST:
3298 {
3299 /* Consumer is in an ERROR state. Report back to client */
3300 if (uatomic_read(&ust_consumerd_state) == CONSUMER_ERROR) {
3301 ret = LTTCOMM_NO_USTCONSUMERD;
3302 goto error;
3303 }
3304
3305 if (need_tracing_session) {
3306 if (cmd_ctx->session->ust_session == NULL) {
3307 ret = create_ust_session(cmd_ctx->session,
3308 &cmd_ctx->lsm->domain);
3309 if (ret != LTTCOMM_OK) {
3310 goto error;
3311 }
3312 }
3313 /* Start the UST consumer daemons */
3314 /* 64-bit */
3315 pthread_mutex_lock(&ustconsumer64_data.pid_mutex);
3316 if (consumerd64_bin[0] != '\0' &&
3317 ustconsumer64_data.pid == 0 &&
3318 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
3319 pthread_mutex_unlock(&ustconsumer64_data.pid_mutex);
3320 ret = start_consumerd(&ustconsumer64_data);
3321 if (ret < 0) {
3322 ret = LTTCOMM_UST_CONSUMER64_FAIL;
3323 ust_consumerd64_fd = -EINVAL;
3324 goto error;
3325 }
3326
3327 ust_consumerd64_fd = ustconsumer64_data.cmd_sock;
3328 uatomic_set(&ust_consumerd_state, CONSUMER_STARTED);
3329 } else {
3330 pthread_mutex_unlock(&ustconsumer64_data.pid_mutex);
3331 }
3332 /* 32-bit */
3333 if (consumerd32_bin[0] != '\0' &&
3334 ustconsumer32_data.pid == 0 &&
3335 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
3336 pthread_mutex_unlock(&ustconsumer32_data.pid_mutex);
3337 ret = start_consumerd(&ustconsumer32_data);
3338 if (ret < 0) {
3339 ret = LTTCOMM_UST_CONSUMER32_FAIL;
3340 ust_consumerd32_fd = -EINVAL;
3341 goto error;
3342 }
3343
3344 ust_consumerd32_fd = ustconsumer32_data.cmd_sock;
3345 uatomic_set(&ust_consumerd_state, CONSUMER_STARTED);
3346 } else {
3347 pthread_mutex_unlock(&ustconsumer32_data.pid_mutex);
3348 }
3349 }
3350 break;
3351 }
3352 default:
3353 break;
3354 }
3355 skip_domain:
3356
3357 /* Validate consumer daemon state when start/stop trace command */
3358 if (cmd_ctx->lsm->cmd_type == LTTNG_START_TRACE ||
3359 cmd_ctx->lsm->cmd_type == LTTNG_STOP_TRACE) {
3360 switch (cmd_ctx->lsm->domain.type) {
3361 case LTTNG_DOMAIN_UST:
3362 if (uatomic_read(&ust_consumerd_state) != CONSUMER_STARTED) {
3363 ret = LTTCOMM_NO_USTCONSUMERD;
3364 goto error;
3365 }
3366 break;
3367 case LTTNG_DOMAIN_KERNEL:
3368 if (uatomic_read(&kernel_consumerd_state) != CONSUMER_STARTED) {
3369 ret = LTTCOMM_NO_KERNCONSUMERD;
3370 goto error;
3371 }
3372 break;
3373 }
3374 }
3375
3376 /*
3377 * Check that the UID or GID match that of the tracing session.
3378 * The root user can interact with all sessions.
3379 */
3380 if (need_tracing_session) {
3381 if (!session_access_ok(cmd_ctx->session,
3382 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
3383 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds))) {
3384 ret = LTTCOMM_EPERM;
3385 goto error;
3386 }
3387 }
3388
3389 /* Process by command type */
3390 switch (cmd_ctx->lsm->cmd_type) {
3391 case LTTNG_ADD_CONTEXT:
3392 {
3393 ret = cmd_add_context(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3394 cmd_ctx->lsm->u.context.channel_name,
3395 cmd_ctx->lsm->u.context.event_name,
3396 &cmd_ctx->lsm->u.context.ctx);
3397 break;
3398 }
3399 case LTTNG_DISABLE_CHANNEL:
3400 {
3401 ret = cmd_disable_channel(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3402 cmd_ctx->lsm->u.disable.channel_name);
3403 break;
3404 }
3405 case LTTNG_DISABLE_EVENT:
3406 {
3407 ret = cmd_disable_event(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3408 cmd_ctx->lsm->u.disable.channel_name,
3409 cmd_ctx->lsm->u.disable.name);
3410 break;
3411 }
3412 case LTTNG_DISABLE_ALL_EVENT:
3413 {
3414 DBG("Disabling all events");
3415
3416 ret = cmd_disable_event_all(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3417 cmd_ctx->lsm->u.disable.channel_name);
3418 break;
3419 }
3420 case LTTNG_ENABLE_CHANNEL:
3421 {
3422 ret = cmd_enable_channel(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3423 &cmd_ctx->lsm->u.channel.chan);
3424 break;
3425 }
3426 case LTTNG_ENABLE_EVENT:
3427 {
3428 ret = cmd_enable_event(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3429 cmd_ctx->lsm->u.enable.channel_name,
3430 &cmd_ctx->lsm->u.enable.event);
3431 break;
3432 }
3433 case LTTNG_ENABLE_ALL_EVENT:
3434 {
3435 DBG("Enabling all events");
3436
3437 ret = cmd_enable_event_all(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3438 cmd_ctx->lsm->u.enable.channel_name,
3439 cmd_ctx->lsm->u.enable.event.type);
3440 break;
3441 }
3442 case LTTNG_LIST_TRACEPOINTS:
3443 {
3444 struct lttng_event *events;
3445 ssize_t nb_events;
3446
3447 nb_events = cmd_list_tracepoints(cmd_ctx->lsm->domain.type, &events);
3448 if (nb_events < 0) {
3449 ret = -nb_events;
3450 goto error;
3451 }
3452
3453 /*
3454 * Setup lttng message with payload size set to the event list size in
3455 * bytes and then copy list into the llm payload.
3456 */
3457 ret = setup_lttng_msg(cmd_ctx, sizeof(struct lttng_event) * nb_events);
3458 if (ret < 0) {
3459 free(events);
3460 goto setup_error;
3461 }
3462
3463 /* Copy event list into message payload */
3464 memcpy(cmd_ctx->llm->payload, events,
3465 sizeof(struct lttng_event) * nb_events);
3466
3467 free(events);
3468
3469 ret = LTTCOMM_OK;
3470 break;
3471 }
3472 case LTTNG_LIST_TRACEPOINT_FIELDS:
3473 {
3474 struct lttng_event_field *fields;
3475 ssize_t nb_fields;
3476
3477 nb_fields = cmd_list_tracepoint_fields(cmd_ctx->lsm->domain.type, &fields);
3478 if (nb_fields < 0) {
3479 ret = -nb_fields;
3480 goto error;
3481 }
3482
3483 /*
3484 * Setup lttng message with payload size set to the event list size in
3485 * bytes and then copy list into the llm payload.
3486 */
3487 ret = setup_lttng_msg(cmd_ctx, sizeof(struct lttng_event_field) * nb_fields);
3488 if (ret < 0) {
3489 free(fields);
3490 goto setup_error;
3491 }
3492
3493 /* Copy event list into message payload */
3494 memcpy(cmd_ctx->llm->payload, fields,
3495 sizeof(struct lttng_event_field) * nb_fields);
3496
3497 free(fields);
3498
3499 ret = LTTCOMM_OK;
3500 break;
3501 }
3502
3503 case LTTNG_START_TRACE:
3504 {
3505 ret = cmd_start_trace(cmd_ctx->session);
3506 break;
3507 }
3508 case LTTNG_STOP_TRACE:
3509 {
3510 ret = cmd_stop_trace(cmd_ctx->session);
3511 break;
3512 }
3513 case LTTNG_CREATE_SESSION:
3514 {
3515 ret = cmd_create_session(cmd_ctx->lsm->session.name,
3516 cmd_ctx->lsm->session.path, &cmd_ctx->creds);
3517 break;
3518 }
3519 case LTTNG_DESTROY_SESSION:
3520 {
3521 ret = cmd_destroy_session(cmd_ctx->session,
3522 cmd_ctx->lsm->session.name);
3523 /*
3524 * Set session to NULL so we do not unlock it after
3525 * free.
3526 */
3527 cmd_ctx->session = NULL;
3528 break;
3529 }
3530 case LTTNG_LIST_DOMAINS:
3531 {
3532 ssize_t nb_dom;
3533 struct lttng_domain *domains;
3534
3535 nb_dom = cmd_list_domains(cmd_ctx->session, &domains);
3536 if (nb_dom < 0) {
3537 ret = -nb_dom;
3538 goto error;
3539 }
3540
3541 ret = setup_lttng_msg(cmd_ctx, nb_dom * sizeof(struct lttng_domain));
3542 if (ret < 0) {
3543 goto setup_error;
3544 }
3545
3546 /* Copy event list into message payload */
3547 memcpy(cmd_ctx->llm->payload, domains,
3548 nb_dom * sizeof(struct lttng_domain));
3549
3550 free(domains);
3551
3552 ret = LTTCOMM_OK;
3553 break;
3554 }
3555 case LTTNG_LIST_CHANNELS:
3556 {
3557 int nb_chan;
3558 struct lttng_channel *channels;
3559
3560 nb_chan = cmd_list_channels(cmd_ctx->lsm->domain.type,
3561 cmd_ctx->session, &channels);
3562 if (nb_chan < 0) {
3563 ret = -nb_chan;
3564 goto error;
3565 }
3566
3567 ret = setup_lttng_msg(cmd_ctx, nb_chan * sizeof(struct lttng_channel));
3568 if (ret < 0) {
3569 goto setup_error;
3570 }
3571
3572 /* Copy event list into message payload */
3573 memcpy(cmd_ctx->llm->payload, channels,
3574 nb_chan * sizeof(struct lttng_channel));
3575
3576 free(channels);
3577
3578 ret = LTTCOMM_OK;
3579 break;
3580 }
3581 case LTTNG_LIST_EVENTS:
3582 {
3583 ssize_t nb_event;
3584 struct lttng_event *events = NULL;
3585
3586 nb_event = cmd_list_events(cmd_ctx->lsm->domain.type, cmd_ctx->session,
3587 cmd_ctx->lsm->u.list.channel_name, &events);
3588 if (nb_event < 0) {
3589 ret = -nb_event;
3590 goto error;
3591 }
3592
3593 ret = setup_lttng_msg(cmd_ctx, nb_event * sizeof(struct lttng_event));
3594 if (ret < 0) {
3595 goto setup_error;
3596 }
3597
3598 /* Copy event list into message payload */
3599 memcpy(cmd_ctx->llm->payload, events,
3600 nb_event * sizeof(struct lttng_event));
3601
3602 free(events);
3603
3604 ret = LTTCOMM_OK;
3605 break;
3606 }
3607 case LTTNG_LIST_SESSIONS:
3608 {
3609 unsigned int nr_sessions;
3610
3611 session_lock_list();
3612 nr_sessions = lttng_sessions_count(
3613 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
3614 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds));
3615
3616 ret = setup_lttng_msg(cmd_ctx, sizeof(struct lttng_session) * nr_sessions);
3617 if (ret < 0) {
3618 session_unlock_list();
3619 goto setup_error;
3620 }
3621
3622 /* Filled the session array */
3623 list_lttng_sessions((struct lttng_session *)(cmd_ctx->llm->payload),
3624 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
3625 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds));
3626
3627 session_unlock_list();
3628
3629 ret = LTTCOMM_OK;
3630 break;
3631 }
3632 case LTTNG_CALIBRATE:
3633 {
3634 ret = cmd_calibrate(cmd_ctx->lsm->domain.type,
3635 &cmd_ctx->lsm->u.calibrate);
3636 break;
3637 }
3638 case LTTNG_REGISTER_CONSUMER:
3639 {
3640 ret = cmd_register_consumer(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3641 cmd_ctx->lsm->u.reg.path);
3642 break;
3643 }
3644 default:
3645 ret = LTTCOMM_UND;
3646 break;
3647 }
3648
3649 error:
3650 if (cmd_ctx->llm == NULL) {
3651 DBG("Missing llm structure. Allocating one.");
3652 if (setup_lttng_msg(cmd_ctx, 0) < 0) {
3653 goto setup_error;
3654 }
3655 }
3656 /* Set return code */
3657 cmd_ctx->llm->ret_code = ret;
3658 setup_error:
3659 if (cmd_ctx->session) {
3660 session_unlock(cmd_ctx->session);
3661 }
3662 if (need_tracing_session) {
3663 session_unlock_list();
3664 }
3665 init_setup_error:
3666 return ret;
3667 }
3668
3669 /*
3670 * This thread manage all clients request using the unix client socket for
3671 * communication.
3672 */
3673 static void *thread_manage_clients(void *data)
3674 {
3675 int sock = -1, ret, i, pollfd;
3676 uint32_t revents, nb_fd;
3677 struct command_ctx *cmd_ctx = NULL;
3678 struct lttng_poll_event events;
3679
3680 DBG("[thread] Manage client started");
3681
3682 rcu_register_thread();
3683
3684 ret = lttcomm_listen_unix_sock(client_sock);
3685 if (ret < 0) {
3686 goto error;
3687 }
3688
3689 /*
3690 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
3691 * more will be added to this poll set.
3692 */
3693 ret = create_thread_poll_set(&events, 2);
3694 if (ret < 0) {
3695 goto error;
3696 }
3697
3698 /* Add the application registration socket */
3699 ret = lttng_poll_add(&events, client_sock, LPOLLIN | LPOLLPRI);
3700 if (ret < 0) {
3701 goto error;
3702 }
3703
3704 /*
3705 * Notify parent pid that we are ready to accept command for client side.
3706 */
3707 if (opt_sig_parent) {
3708 kill(ppid, SIGUSR1);
3709 }
3710
3711 while (1) {
3712 DBG("Accepting client command ...");
3713
3714 nb_fd = LTTNG_POLL_GETNB(&events);
3715
3716 /* Inifinite blocking call, waiting for transmission */
3717 restart:
3718 ret = lttng_poll_wait(&events, -1);
3719 if (ret < 0) {
3720 /*
3721 * Restart interrupted system call.
3722 */
3723 if (errno == EINTR) {
3724 goto restart;
3725 }
3726 goto error;
3727 }
3728
3729 for (i = 0; i < nb_fd; i++) {
3730 /* Fetch once the poll data */
3731 revents = LTTNG_POLL_GETEV(&events, i);
3732 pollfd = LTTNG_POLL_GETFD(&events, i);
3733
3734 /* Thread quit pipe has been closed. Killing thread. */
3735 ret = check_thread_quit_pipe(pollfd, revents);
3736 if (ret) {
3737 goto error;
3738 }
3739
3740 /* Event on the registration socket */
3741 if (pollfd == client_sock) {
3742 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3743 ERR("Client socket poll error");
3744 goto error;
3745 }
3746 }
3747 }
3748
3749 DBG("Wait for client response");
3750
3751 sock = lttcomm_accept_unix_sock(client_sock);
3752 if (sock < 0) {
3753 goto error;
3754 }
3755
3756 /* Set socket option for credentials retrieval */
3757 ret = lttcomm_setsockopt_creds_unix_sock(sock);
3758 if (ret < 0) {
3759 goto error;
3760 }
3761
3762 /* Allocate context command to process the client request */
3763 cmd_ctx = zmalloc(sizeof(struct command_ctx));
3764 if (cmd_ctx == NULL) {
3765 PERROR("zmalloc cmd_ctx");
3766 goto error;
3767 }
3768
3769 /* Allocate data buffer for reception */
3770 cmd_ctx->lsm = zmalloc(sizeof(struct lttcomm_session_msg));
3771 if (cmd_ctx->lsm == NULL) {
3772 PERROR("zmalloc cmd_ctx->lsm");
3773 goto error;
3774 }
3775
3776 cmd_ctx->llm = NULL;
3777 cmd_ctx->session = NULL;
3778
3779 /*
3780 * Data is received from the lttng client. The struct
3781 * lttcomm_session_msg (lsm) contains the command and data request of
3782 * the client.
3783 */
3784 DBG("Receiving data from client ...");
3785 ret = lttcomm_recv_creds_unix_sock(sock, cmd_ctx->lsm,
3786 sizeof(struct lttcomm_session_msg), &cmd_ctx->creds);
3787 if (ret <= 0) {
3788 DBG("Nothing recv() from client... continuing");
3789 ret = close(sock);
3790 if (ret) {
3791 PERROR("close");
3792 }