0c207309dbb0714ed24c78e55c8072b1f15154b1
[lttng-tools.git] / src / bin / lttng-sessiond / main.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * 2013 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20 #define _LGPL_SOURCE
21 #include <getopt.h>
22 #include <grp.h>
23 #include <limits.h>
24 #include <paths.h>
25 #include <pthread.h>
26 #include <signal.h>
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <inttypes.h>
31 #include <sys/mman.h>
32 #include <sys/mount.h>
33 #include <sys/resource.h>
34 #include <sys/socket.h>
35 #include <sys/stat.h>
36 #include <sys/types.h>
37 #include <sys/wait.h>
38 #include <urcu/uatomic.h>
39 #include <unistd.h>
40 #include <ctype.h>
41
42 #include <common/common.h>
43 #include <common/compat/socket.h>
44 #include <common/compat/getenv.h>
45 #include <common/defaults.h>
46 #include <common/kernel-consumer/kernel-consumer.h>
47 #include <common/futex.h>
48 #include <common/relayd/relayd.h>
49 #include <common/utils.h>
50 #include <common/daemonize.h>
51 #include <common/config/session-config.h>
52 #include <common/dynamic-buffer.h>
53 #include <lttng/userspace-probe-internal.h>
54 #include <lttng/event-internal.h>
55
56 #include "lttng-sessiond.h"
57 #include "buffer-registry.h"
58 #include "channel.h"
59 #include "cmd.h"
60 #include "consumer.h"
61 #include "context.h"
62 #include "event.h"
63 #include "kernel.h"
64 #include "kernel-consumer.h"
65 #include "modprobe.h"
66 #include "shm.h"
67 #include "ust-ctl.h"
68 #include "ust-consumer.h"
69 #include "utils.h"
70 #include "fd-limit.h"
71 #include "health-sessiond.h"
72 #include "testpoint.h"
73 #include "ust-thread.h"
74 #include "agent-thread.h"
75 #include "save.h"
76 #include "load-session-thread.h"
77 #include "notification-thread.h"
78 #include "notification-thread-commands.h"
79 #include "rotation-thread.h"
80 #include "lttng-syscall.h"
81 #include "agent.h"
82 #include "ht-cleanup.h"
83 #include "sessiond-config.h"
84 #include "timer.h"
85 #include "thread.h"
86
87 static const char *help_msg =
88 #ifdef LTTNG_EMBED_HELP
89 #include <lttng-sessiond.8.h>
90 #else
91 NULL
92 #endif
93 ;
94
95 const char *progname;
96 static int lockfile_fd = -1;
97
98 /* Set to 1 when a SIGUSR1 signal is received. */
99 static int recv_child_signal;
100
101 static struct lttng_kernel_tracer_version kernel_tracer_version;
102 static struct lttng_kernel_tracer_abi_version kernel_tracer_abi_version;
103
104 /*
105 * Consumer daemon specific control data. Every value not initialized here is
106 * set to 0 by the static definition.
107 */
108 static struct consumer_data kconsumer_data = {
109 .type = LTTNG_CONSUMER_KERNEL,
110 .err_sock = -1,
111 .cmd_sock = -1,
112 .channel_monitor_pipe = -1,
113 .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
114 .lock = PTHREAD_MUTEX_INITIALIZER,
115 .cond = PTHREAD_COND_INITIALIZER,
116 .cond_mutex = PTHREAD_MUTEX_INITIALIZER,
117 };
118 static struct consumer_data ustconsumer64_data = {
119 .type = LTTNG_CONSUMER64_UST,
120 .err_sock = -1,
121 .cmd_sock = -1,
122 .channel_monitor_pipe = -1,
123 .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
124 .lock = PTHREAD_MUTEX_INITIALIZER,
125 .cond = PTHREAD_COND_INITIALIZER,
126 .cond_mutex = PTHREAD_MUTEX_INITIALIZER,
127 };
128 static struct consumer_data ustconsumer32_data = {
129 .type = LTTNG_CONSUMER32_UST,
130 .err_sock = -1,
131 .cmd_sock = -1,
132 .channel_monitor_pipe = -1,
133 .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
134 .lock = PTHREAD_MUTEX_INITIALIZER,
135 .cond = PTHREAD_COND_INITIALIZER,
136 .cond_mutex = PTHREAD_MUTEX_INITIALIZER,
137 };
138
139 /* Command line options */
140 static const struct option long_options[] = {
141 { "client-sock", required_argument, 0, 'c' },
142 { "apps-sock", required_argument, 0, 'a' },
143 { "kconsumerd-cmd-sock", required_argument, 0, '\0' },
144 { "kconsumerd-err-sock", required_argument, 0, '\0' },
145 { "ustconsumerd32-cmd-sock", required_argument, 0, '\0' },
146 { "ustconsumerd32-err-sock", required_argument, 0, '\0' },
147 { "ustconsumerd64-cmd-sock", required_argument, 0, '\0' },
148 { "ustconsumerd64-err-sock", required_argument, 0, '\0' },
149 { "consumerd32-path", required_argument, 0, '\0' },
150 { "consumerd32-libdir", required_argument, 0, '\0' },
151 { "consumerd64-path", required_argument, 0, '\0' },
152 { "consumerd64-libdir", required_argument, 0, '\0' },
153 { "daemonize", no_argument, 0, 'd' },
154 { "background", no_argument, 0, 'b' },
155 { "sig-parent", no_argument, 0, 'S' },
156 { "help", no_argument, 0, 'h' },
157 { "group", required_argument, 0, 'g' },
158 { "version", no_argument, 0, 'V' },
159 { "quiet", no_argument, 0, 'q' },
160 { "verbose", no_argument, 0, 'v' },
161 { "verbose-consumer", no_argument, 0, '\0' },
162 { "no-kernel", no_argument, 0, '\0' },
163 { "pidfile", required_argument, 0, 'p' },
164 { "agent-tcp-port", required_argument, 0, '\0' },
165 { "config", required_argument, 0, 'f' },
166 { "load", required_argument, 0, 'l' },
167 { "kmod-probes", required_argument, 0, '\0' },
168 { "extra-kmod-probes", required_argument, 0, '\0' },
169 { NULL, 0, 0, 0 }
170 };
171
172 /* Command line options to ignore from configuration file */
173 static const char *config_ignore_options[] = { "help", "version", "config" };
174
175 /* Shared between threads */
176 static int dispatch_thread_exit;
177
178 /* Sockets and FDs */
179 static int client_sock = -1;
180 static int apps_sock = -1;
181
182 /*
183 * This pipe is used to inform the thread managing application communication
184 * that a command is queued and ready to be processed.
185 */
186 static int apps_cmd_pipe[2] = { -1, -1 };
187
188 /* Pthread, Mutexes and Semaphores */
189 static pthread_t apps_thread;
190 static pthread_t apps_notify_thread;
191 static pthread_t reg_apps_thread;
192 static pthread_t client_thread;
193 static pthread_t kernel_thread;
194 static pthread_t dispatch_thread;
195 static pthread_t agent_reg_thread;
196 static pthread_t load_session_thread;
197 static pthread_t timer_thread;
198
199 /*
200 * UST registration command queue. This queue is tied with a futex and uses a N
201 * wakers / 1 waiter implemented and detailed in futex.c/.h
202 *
203 * The thread_registration_apps and thread_dispatch_ust_registration uses this
204 * queue along with the wait/wake scheme. The thread_manage_apps receives down
205 * the line new application socket and monitors it for any I/O error or clean
206 * close that triggers an unregistration of the application.
207 */
208 static struct ust_cmd_queue ust_cmd_queue;
209
210 static const char *module_proc_lttng = "/proc/lttng";
211
212 /*
213 * Consumer daemon state which is changed when spawning it, killing it or in
214 * case of a fatal error.
215 */
216 enum consumerd_state {
217 CONSUMER_STARTED = 1,
218 CONSUMER_STOPPED = 2,
219 CONSUMER_ERROR = 3,
220 };
221
222 /*
223 * This consumer daemon state is used to validate if a client command will be
224 * able to reach the consumer. If not, the client is informed. For instance,
225 * doing a "lttng start" when the consumer state is set to ERROR will return an
226 * error to the client.
227 *
228 * The following example shows a possible race condition of this scheme:
229 *
230 * consumer thread error happens
231 * client cmd arrives
232 * client cmd checks state -> still OK
233 * consumer thread exit, sets error
234 * client cmd try to talk to consumer
235 * ...
236 *
237 * However, since the consumer is a different daemon, we have no way of making
238 * sure the command will reach it safely even with this state flag. This is why
239 * we consider that up to the state validation during command processing, the
240 * command is safe. After that, we can not guarantee the correctness of the
241 * client request vis-a-vis the consumer.
242 */
243 static enum consumerd_state ust_consumerd_state;
244 static enum consumerd_state kernel_consumerd_state;
245
246 /* Load session thread information to operate. */
247 static struct load_session_thread_data *load_info;
248
249 /*
250 * Section name to look for in the daemon configuration file.
251 */
252 static const char * const config_section_name = "sessiond";
253
254 /* Am I root or not. Set to 1 if the daemon is running as root */
255 static int is_root;
256
257 /*
258 * Stop all threads by closing the thread quit pipe.
259 */
260 static void stop_threads(void)
261 {
262 int ret;
263
264 /* Stopping all threads */
265 DBG("Terminating all threads");
266 ret = sessiond_notify_quit_pipe();
267 if (ret < 0) {
268 ERR("write error on thread quit pipe");
269 }
270
271 /* Dispatch thread */
272 CMM_STORE_SHARED(dispatch_thread_exit, 1);
273 futex_nto1_wake(&ust_cmd_queue.futex);
274 }
275
276 /*
277 * Close every consumer sockets.
278 */
279 static void close_consumer_sockets(void)
280 {
281 int ret;
282
283 if (kconsumer_data.err_sock >= 0) {
284 ret = close(kconsumer_data.err_sock);
285 if (ret < 0) {
286 PERROR("kernel consumer err_sock close");
287 }
288 }
289 if (ustconsumer32_data.err_sock >= 0) {
290 ret = close(ustconsumer32_data.err_sock);
291 if (ret < 0) {
292 PERROR("UST consumerd32 err_sock close");
293 }
294 }
295 if (ustconsumer64_data.err_sock >= 0) {
296 ret = close(ustconsumer64_data.err_sock);
297 if (ret < 0) {
298 PERROR("UST consumerd64 err_sock close");
299 }
300 }
301 if (kconsumer_data.cmd_sock >= 0) {
302 ret = close(kconsumer_data.cmd_sock);
303 if (ret < 0) {
304 PERROR("kernel consumer cmd_sock close");
305 }
306 }
307 if (ustconsumer32_data.cmd_sock >= 0) {
308 ret = close(ustconsumer32_data.cmd_sock);
309 if (ret < 0) {
310 PERROR("UST consumerd32 cmd_sock close");
311 }
312 }
313 if (ustconsumer64_data.cmd_sock >= 0) {
314 ret = close(ustconsumer64_data.cmd_sock);
315 if (ret < 0) {
316 PERROR("UST consumerd64 cmd_sock close");
317 }
318 }
319 if (kconsumer_data.channel_monitor_pipe >= 0) {
320 ret = close(kconsumer_data.channel_monitor_pipe);
321 if (ret < 0) {
322 PERROR("kernel consumer channel monitor pipe close");
323 }
324 }
325 if (ustconsumer32_data.channel_monitor_pipe >= 0) {
326 ret = close(ustconsumer32_data.channel_monitor_pipe);
327 if (ret < 0) {
328 PERROR("UST consumerd32 channel monitor pipe close");
329 }
330 }
331 if (ustconsumer64_data.channel_monitor_pipe >= 0) {
332 ret = close(ustconsumer64_data.channel_monitor_pipe);
333 if (ret < 0) {
334 PERROR("UST consumerd64 channel monitor pipe close");
335 }
336 }
337 }
338
339 /*
340 * Wait on consumer process termination.
341 *
342 * Need to be called with the consumer data lock held or from a context
343 * ensuring no concurrent access to data (e.g: cleanup).
344 */
345 static void wait_consumer(struct consumer_data *consumer_data)
346 {
347 pid_t ret;
348 int status;
349
350 if (consumer_data->pid <= 0) {
351 return;
352 }
353
354 DBG("Waiting for complete teardown of consumerd (PID: %d)",
355 consumer_data->pid);
356 ret = waitpid(consumer_data->pid, &status, 0);
357 if (ret == -1) {
358 PERROR("consumerd waitpid pid: %d", consumer_data->pid)
359 } else if (!WIFEXITED(status)) {
360 ERR("consumerd termination with error: %d",
361 WEXITSTATUS(ret));
362 }
363 consumer_data->pid = 0;
364 }
365
366 /*
367 * Cleanup the session daemon's data structures.
368 */
369 static void sessiond_cleanup(void)
370 {
371 int ret;
372 struct ltt_session_list *session_list = session_get_list();
373
374 DBG("Cleanup sessiond");
375
376 /*
377 * Close the thread quit pipe. It has already done its job,
378 * since we are now called.
379 */
380 sessiond_close_quit_pipe();
381
382 ret = remove(config.pid_file_path.value);
383 if (ret < 0) {
384 PERROR("remove pidfile %s", config.pid_file_path.value);
385 }
386
387 DBG("Removing sessiond and consumerd content of directory %s",
388 config.rundir.value);
389
390 /* sessiond */
391 DBG("Removing %s", config.pid_file_path.value);
392 (void) unlink(config.pid_file_path.value);
393
394 DBG("Removing %s", config.agent_port_file_path.value);
395 (void) unlink(config.agent_port_file_path.value);
396
397 /* kconsumerd */
398 DBG("Removing %s", kconsumer_data.err_unix_sock_path);
399 (void) unlink(kconsumer_data.err_unix_sock_path);
400
401 DBG("Removing directory %s", config.kconsumerd_path.value);
402 (void) rmdir(config.kconsumerd_path.value);
403
404 /* ust consumerd 32 */
405 DBG("Removing %s", config.consumerd32_err_unix_sock_path.value);
406 (void) unlink(config.consumerd32_err_unix_sock_path.value);
407
408 DBG("Removing directory %s", config.consumerd32_path.value);
409 (void) rmdir(config.consumerd32_path.value);
410
411 /* ust consumerd 64 */
412 DBG("Removing %s", config.consumerd64_err_unix_sock_path.value);
413 (void) unlink(config.consumerd64_err_unix_sock_path.value);
414
415 DBG("Removing directory %s", config.consumerd64_path.value);
416 (void) rmdir(config.consumerd64_path.value);
417
418 pthread_mutex_destroy(&session_list->lock);
419
420 wait_consumer(&kconsumer_data);
421 wait_consumer(&ustconsumer64_data);
422 wait_consumer(&ustconsumer32_data);
423
424 DBG("Cleaning up all agent apps");
425 agent_app_ht_clean();
426
427 DBG("Closing all UST sockets");
428 ust_app_clean_list();
429 buffer_reg_destroy_registries();
430
431 if (is_root && !config.no_kernel) {
432 DBG2("Closing kernel fd");
433 if (kernel_tracer_fd >= 0) {
434 ret = close(kernel_tracer_fd);
435 if (ret) {
436 PERROR("close");
437 }
438 }
439 DBG("Unloading kernel modules");
440 modprobe_remove_lttng_all();
441 free(syscall_table);
442 }
443
444 close_consumer_sockets();
445
446 if (load_info) {
447 load_session_destroy_data(load_info);
448 free(load_info);
449 }
450
451 /*
452 * We do NOT rmdir rundir because there are other processes
453 * using it, for instance lttng-relayd, which can start in
454 * parallel with this teardown.
455 */
456 }
457
458 /*
459 * Cleanup the daemon's option data structures.
460 */
461 static void sessiond_cleanup_options(void)
462 {
463 DBG("Cleaning up options");
464
465 sessiond_config_fini(&config);
466
467 run_as_destroy_worker();
468 }
469
470 /*
471 * Send data on a unix socket using the liblttsessiondcomm API.
472 *
473 * Return lttcomm error code.
474 */
475 static int send_unix_sock(int sock, void *buf, size_t len)
476 {
477 /* Check valid length */
478 if (len == 0) {
479 return -1;
480 }
481
482 return lttcomm_send_unix_sock(sock, buf, len);
483 }
484
485 /*
486 * Free memory of a command context structure.
487 */
488 static void clean_command_ctx(struct command_ctx **cmd_ctx)
489 {
490 DBG("Clean command context structure");
491 if (*cmd_ctx) {
492 if ((*cmd_ctx)->llm) {
493 free((*cmd_ctx)->llm);
494 }
495 if ((*cmd_ctx)->lsm) {
496 free((*cmd_ctx)->lsm);
497 }
498 free(*cmd_ctx);
499 *cmd_ctx = NULL;
500 }
501 }
502
503 /*
504 * Notify UST applications using the shm mmap futex.
505 */
506 static int notify_ust_apps(int active)
507 {
508 char *wait_shm_mmap;
509
510 DBG("Notifying applications of session daemon state: %d", active);
511
512 /* See shm.c for this call implying mmap, shm and futex calls */
513 wait_shm_mmap = shm_ust_get_mmap(config.wait_shm_path.value, is_root);
514 if (wait_shm_mmap == NULL) {
515 goto error;
516 }
517
518 /* Wake waiting process */
519 futex_wait_update((int32_t *) wait_shm_mmap, active);
520
521 /* Apps notified successfully */
522 return 0;
523
524 error:
525 return -1;
526 }
527
528 /*
529 * Setup the outgoing data buffer for the response (llm) by allocating the
530 * right amount of memory and copying the original information from the lsm
531 * structure.
532 *
533 * Return 0 on success, negative value on error.
534 */
535 static int setup_lttng_msg(struct command_ctx *cmd_ctx,
536 const void *payload_buf, size_t payload_len,
537 const void *cmd_header_buf, size_t cmd_header_len)
538 {
539 int ret = 0;
540 const size_t header_len = sizeof(struct lttcomm_lttng_msg);
541 const size_t cmd_header_offset = header_len;
542 const size_t payload_offset = cmd_header_offset + cmd_header_len;
543 const size_t total_msg_size = header_len + cmd_header_len + payload_len;
544
545 cmd_ctx->llm = zmalloc(total_msg_size);
546
547 if (cmd_ctx->llm == NULL) {
548 PERROR("zmalloc");
549 ret = -ENOMEM;
550 goto end;
551 }
552
553 /* Copy common data */
554 cmd_ctx->llm->cmd_type = cmd_ctx->lsm->cmd_type;
555 cmd_ctx->llm->pid = cmd_ctx->lsm->domain.attr.pid;
556 cmd_ctx->llm->cmd_header_size = cmd_header_len;
557 cmd_ctx->llm->data_size = payload_len;
558 cmd_ctx->lttng_msg_size = total_msg_size;
559
560 /* Copy command header */
561 if (cmd_header_len) {
562 memcpy(((uint8_t *) cmd_ctx->llm) + cmd_header_offset, cmd_header_buf,
563 cmd_header_len);
564 }
565
566 /* Copy payload */
567 if (payload_len) {
568 memcpy(((uint8_t *) cmd_ctx->llm) + payload_offset, payload_buf,
569 payload_len);
570 }
571
572 end:
573 return ret;
574 }
575
576 /*
577 * Version of setup_lttng_msg() without command header.
578 */
579 static int setup_lttng_msg_no_cmd_header(struct command_ctx *cmd_ctx,
580 void *payload_buf, size_t payload_len)
581 {
582 return setup_lttng_msg(cmd_ctx, payload_buf, payload_len, NULL, 0);
583 }
584 /*
585 * Update the kernel poll set of all channel fd available over all tracing
586 * session. Add the wakeup pipe at the end of the set.
587 */
588 static int update_kernel_poll(struct lttng_poll_event *events)
589 {
590 int ret;
591 struct ltt_kernel_channel *channel;
592 struct ltt_session *session;
593 const struct ltt_session_list *session_list = session_get_list();
594
595 DBG("Updating kernel poll set");
596
597 session_lock_list();
598 cds_list_for_each_entry(session, &session_list->head, list) {
599 if (!session_get(session)) {
600 continue;
601 }
602 session_lock(session);
603 if (session->kernel_session == NULL) {
604 session_unlock(session);
605 session_put(session);
606 continue;
607 }
608
609 cds_list_for_each_entry(channel,
610 &session->kernel_session->channel_list.head, list) {
611 /* Add channel fd to the kernel poll set */
612 ret = lttng_poll_add(events, channel->fd, LPOLLIN | LPOLLRDNORM);
613 if (ret < 0) {
614 session_unlock(session);
615 session_put(session);
616 goto error;
617 }
618 DBG("Channel fd %d added to kernel set", channel->fd);
619 }
620 session_unlock(session);
621 }
622 session_unlock_list();
623
624 return 0;
625
626 error:
627 session_unlock_list();
628 return -1;
629 }
630
631 /*
632 * Find the channel fd from 'fd' over all tracing session. When found, check
633 * for new channel stream and send those stream fds to the kernel consumer.
634 *
635 * Useful for CPU hotplug feature.
636 */
637 static int update_kernel_stream(int fd)
638 {
639 int ret = 0;
640 struct ltt_session *session;
641 struct ltt_kernel_session *ksess;
642 struct ltt_kernel_channel *channel;
643 const struct ltt_session_list *session_list = session_get_list();
644
645 DBG("Updating kernel streams for channel fd %d", fd);
646
647 session_lock_list();
648 cds_list_for_each_entry(session, &session_list->head, list) {
649 if (!session_get(session)) {
650 continue;
651 }
652 session_lock(session);
653 if (session->kernel_session == NULL) {
654 session_unlock(session);
655 session_put(session);
656 continue;
657 }
658 ksess = session->kernel_session;
659
660 cds_list_for_each_entry(channel,
661 &ksess->channel_list.head, list) {
662 struct lttng_ht_iter iter;
663 struct consumer_socket *socket;
664
665 if (channel->fd != fd) {
666 continue;
667 }
668 DBG("Channel found, updating kernel streams");
669 ret = kernel_open_channel_stream(channel);
670 if (ret < 0) {
671 goto error;
672 }
673 /* Update the stream global counter */
674 ksess->stream_count_global += ret;
675
676 /*
677 * Have we already sent fds to the consumer? If yes, it
678 * means that tracing is started so it is safe to send
679 * our updated stream fds.
680 */
681 if (ksess->consumer_fds_sent != 1
682 || ksess->consumer == NULL) {
683 ret = -1;
684 goto error;
685 }
686
687 rcu_read_lock();
688 cds_lfht_for_each_entry(ksess->consumer->socks->ht,
689 &iter.iter, socket, node.node) {
690 pthread_mutex_lock(socket->lock);
691 ret = kernel_consumer_send_channel_streams(socket,
692 channel, ksess,
693 session->output_traces ? 1 : 0);
694 pthread_mutex_unlock(socket->lock);
695 if (ret < 0) {
696 rcu_read_unlock();
697 goto error;
698 }
699 }
700 rcu_read_unlock();
701 }
702 session_unlock(session);
703 session_put(session);
704 }
705 session_unlock_list();
706 return ret;
707
708 error:
709 session_unlock(session);
710 session_put(session);
711 session_unlock_list();
712 return ret;
713 }
714
715 /*
716 * For each tracing session, update newly registered apps. The session list
717 * lock MUST be acquired before calling this.
718 */
719 static void update_ust_app(int app_sock)
720 {
721 struct ltt_session *sess, *stmp;
722 const struct ltt_session_list *session_list = session_get_list();
723
724 /* Consumer is in an ERROR state. Stop any application update. */
725 if (uatomic_read(&ust_consumerd_state) == CONSUMER_ERROR) {
726 /* Stop the update process since the consumer is dead. */
727 return;
728 }
729
730 /* For all tracing session(s) */
731 cds_list_for_each_entry_safe(sess, stmp, &session_list->head, list) {
732 struct ust_app *app;
733
734 if (!session_get(sess)) {
735 continue;
736 }
737 session_lock(sess);
738 if (!sess->ust_session) {
739 goto unlock_session;
740 }
741
742 rcu_read_lock();
743 assert(app_sock >= 0);
744 app = ust_app_find_by_sock(app_sock);
745 if (app == NULL) {
746 /*
747 * Application can be unregistered before so
748 * this is possible hence simply stopping the
749 * update.
750 */
751 DBG3("UST app update failed to find app sock %d",
752 app_sock);
753 goto unlock_rcu;
754 }
755 ust_app_global_update(sess->ust_session, app);
756 unlock_rcu:
757 rcu_read_unlock();
758 unlock_session:
759 session_unlock(sess);
760 session_put(sess);
761 }
762 }
763
764 /*
765 * This thread manage event coming from the kernel.
766 *
767 * Features supported in this thread:
768 * -) CPU Hotplug
769 */
770 static void *thread_manage_kernel(void *data)
771 {
772 int ret, i, pollfd, update_poll_flag = 1, err = -1;
773 uint32_t revents, nb_fd;
774 char tmp;
775 struct lttng_poll_event events;
776
777 DBG("[thread] Thread manage kernel started");
778
779 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_KERNEL);
780
781 /*
782 * This first step of the while is to clean this structure which could free
783 * non NULL pointers so initialize it before the loop.
784 */
785 lttng_poll_init(&events);
786
787 if (testpoint(sessiond_thread_manage_kernel)) {
788 goto error_testpoint;
789 }
790
791 health_code_update();
792
793 if (testpoint(sessiond_thread_manage_kernel_before_loop)) {
794 goto error_testpoint;
795 }
796
797 while (1) {
798 health_code_update();
799
800 if (update_poll_flag == 1) {
801 /* Clean events object. We are about to populate it again. */
802 lttng_poll_clean(&events);
803
804 ret = sessiond_set_thread_pollset(&events, 2);
805 if (ret < 0) {
806 goto error_poll_create;
807 }
808
809 ret = lttng_poll_add(&events, kernel_poll_pipe[0], LPOLLIN);
810 if (ret < 0) {
811 goto error;
812 }
813
814 /* This will add the available kernel channel if any. */
815 ret = update_kernel_poll(&events);
816 if (ret < 0) {
817 goto error;
818 }
819 update_poll_flag = 0;
820 }
821
822 DBG("Thread kernel polling");
823
824 /* Poll infinite value of time */
825 restart:
826 health_poll_entry();
827 ret = lttng_poll_wait(&events, -1);
828 DBG("Thread kernel return from poll on %d fds",
829 LTTNG_POLL_GETNB(&events));
830 health_poll_exit();
831 if (ret < 0) {
832 /*
833 * Restart interrupted system call.
834 */
835 if (errno == EINTR) {
836 goto restart;
837 }
838 goto error;
839 } else if (ret == 0) {
840 /* Should not happen since timeout is infinite */
841 ERR("Return value of poll is 0 with an infinite timeout.\n"
842 "This should not have happened! Continuing...");
843 continue;
844 }
845
846 nb_fd = ret;
847
848 for (i = 0; i < nb_fd; i++) {
849 /* Fetch once the poll data */
850 revents = LTTNG_POLL_GETEV(&events, i);
851 pollfd = LTTNG_POLL_GETFD(&events, i);
852
853 health_code_update();
854
855 if (!revents) {
856 /* No activity for this FD (poll implementation). */
857 continue;
858 }
859
860 /* Thread quit pipe has been closed. Killing thread. */
861 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
862 if (ret) {
863 err = 0;
864 goto exit;
865 }
866
867 /* Check for data on kernel pipe */
868 if (revents & LPOLLIN) {
869 if (pollfd == kernel_poll_pipe[0]) {
870 (void) lttng_read(kernel_poll_pipe[0],
871 &tmp, 1);
872 /*
873 * Ret value is useless here, if this pipe gets any actions an
874 * update is required anyway.
875 */
876 update_poll_flag = 1;
877 continue;
878 } else {
879 /*
880 * New CPU detected by the kernel. Adding kernel stream to
881 * kernel session and updating the kernel consumer
882 */
883 ret = update_kernel_stream(pollfd);
884 if (ret < 0) {
885 continue;
886 }
887 break;
888 }
889 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
890 update_poll_flag = 1;
891 continue;
892 } else {
893 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
894 goto error;
895 }
896 }
897 }
898
899 exit:
900 error:
901 lttng_poll_clean(&events);
902 error_poll_create:
903 error_testpoint:
904 utils_close_pipe(kernel_poll_pipe);
905 kernel_poll_pipe[0] = kernel_poll_pipe[1] = -1;
906 if (err) {
907 health_error();
908 ERR("Health error occurred in %s", __func__);
909 WARN("Kernel thread died unexpectedly. "
910 "Kernel tracing can continue but CPU hotplug is disabled.");
911 }
912 health_unregister(health_sessiond);
913 DBG("Kernel thread dying");
914 return NULL;
915 }
916
917 /*
918 * Signal pthread condition of the consumer data that the thread.
919 */
920 static void signal_consumer_condition(struct consumer_data *data, int state)
921 {
922 pthread_mutex_lock(&data->cond_mutex);
923
924 /*
925 * The state is set before signaling. It can be any value, it's the waiter
926 * job to correctly interpret this condition variable associated to the
927 * consumer pthread_cond.
928 *
929 * A value of 0 means that the corresponding thread of the consumer data
930 * was not started. 1 indicates that the thread has started and is ready
931 * for action. A negative value means that there was an error during the
932 * thread bootstrap.
933 */
934 data->consumer_thread_is_ready = state;
935 (void) pthread_cond_signal(&data->cond);
936
937 pthread_mutex_unlock(&data->cond_mutex);
938 }
939
940 /*
941 * This thread manage the consumer error sent back to the session daemon.
942 */
943 static void *thread_manage_consumer(void *data)
944 {
945 int sock = -1, i, ret, pollfd, err = -1, should_quit = 0;
946 uint32_t revents, nb_fd;
947 enum lttcomm_return_code code;
948 struct lttng_poll_event events;
949 struct consumer_data *consumer_data = data;
950 struct consumer_socket *cmd_socket_wrapper = NULL;
951
952 DBG("[thread] Manage consumer started");
953
954 rcu_register_thread();
955 rcu_thread_online();
956
957 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_CONSUMER);
958
959 health_code_update();
960
961 /*
962 * Pass 3 as size here for the thread quit pipe, consumerd_err_sock and the
963 * metadata_sock. Nothing more will be added to this poll set.
964 */
965 ret = sessiond_set_thread_pollset(&events, 3);
966 if (ret < 0) {
967 goto error_poll;
968 }
969
970 /*
971 * The error socket here is already in a listening state which was done
972 * just before spawning this thread to avoid a race between the consumer
973 * daemon exec trying to connect and the listen() call.
974 */
975 ret = lttng_poll_add(&events, consumer_data->err_sock, LPOLLIN | LPOLLRDHUP);
976 if (ret < 0) {
977 goto error;
978 }
979
980 health_code_update();
981
982 /* Infinite blocking call, waiting for transmission */
983 restart:
984 health_poll_entry();
985
986 if (testpoint(sessiond_thread_manage_consumer)) {
987 goto error;
988 }
989
990 ret = lttng_poll_wait(&events, -1);
991 health_poll_exit();
992 if (ret < 0) {
993 /*
994 * Restart interrupted system call.
995 */
996 if (errno == EINTR) {
997 goto restart;
998 }
999 goto error;
1000 }
1001
1002 nb_fd = ret;
1003
1004 for (i = 0; i < nb_fd; i++) {
1005 /* Fetch once the poll data */
1006 revents = LTTNG_POLL_GETEV(&events, i);
1007 pollfd = LTTNG_POLL_GETFD(&events, i);
1008
1009 health_code_update();
1010
1011 if (!revents) {
1012 /* No activity for this FD (poll implementation). */
1013 continue;
1014 }
1015
1016 /* Thread quit pipe has been closed. Killing thread. */
1017 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
1018 if (ret) {
1019 err = 0;
1020 goto exit;
1021 }
1022
1023 /* Event on the registration socket */
1024 if (pollfd == consumer_data->err_sock) {
1025 if (revents & LPOLLIN) {
1026 continue;
1027 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1028 ERR("consumer err socket poll error");
1029 goto error;
1030 } else {
1031 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
1032 goto error;
1033 }
1034 }
1035 }
1036
1037 sock = lttcomm_accept_unix_sock(consumer_data->err_sock);
1038 if (sock < 0) {
1039 goto error;
1040 }
1041
1042 /*
1043 * Set the CLOEXEC flag. Return code is useless because either way, the
1044 * show must go on.
1045 */
1046 (void) utils_set_fd_cloexec(sock);
1047
1048 health_code_update();
1049
1050 DBG2("Receiving code from consumer err_sock");
1051
1052 /* Getting status code from kconsumerd */
1053 ret = lttcomm_recv_unix_sock(sock, &code,
1054 sizeof(enum lttcomm_return_code));
1055 if (ret <= 0) {
1056 goto error;
1057 }
1058
1059 health_code_update();
1060 if (code != LTTCOMM_CONSUMERD_COMMAND_SOCK_READY) {
1061 ERR("consumer error when waiting for SOCK_READY : %s",
1062 lttcomm_get_readable_code(-code));
1063 goto error;
1064 }
1065
1066 /* Connect both command and metadata sockets. */
1067 consumer_data->cmd_sock =
1068 lttcomm_connect_unix_sock(
1069 consumer_data->cmd_unix_sock_path);
1070 consumer_data->metadata_fd =
1071 lttcomm_connect_unix_sock(
1072 consumer_data->cmd_unix_sock_path);
1073 if (consumer_data->cmd_sock < 0 || consumer_data->metadata_fd < 0) {
1074 PERROR("consumer connect cmd socket");
1075 /* On error, signal condition and quit. */
1076 signal_consumer_condition(consumer_data, -1);
1077 goto error;
1078 }
1079
1080 consumer_data->metadata_sock.fd_ptr = &consumer_data->metadata_fd;
1081
1082 /* Create metadata socket lock. */
1083 consumer_data->metadata_sock.lock = zmalloc(sizeof(pthread_mutex_t));
1084 if (consumer_data->metadata_sock.lock == NULL) {
1085 PERROR("zmalloc pthread mutex");
1086 goto error;
1087 }
1088 pthread_mutex_init(consumer_data->metadata_sock.lock, NULL);
1089
1090 DBG("Consumer command socket ready (fd: %d", consumer_data->cmd_sock);
1091 DBG("Consumer metadata socket ready (fd: %d)",
1092 consumer_data->metadata_fd);
1093
1094 /*
1095 * Remove the consumerd error sock since we've established a connection.
1096 */
1097 ret = lttng_poll_del(&events, consumer_data->err_sock);
1098 if (ret < 0) {
1099 goto error;
1100 }
1101
1102 /* Add new accepted error socket. */
1103 ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLRDHUP);
1104 if (ret < 0) {
1105 goto error;
1106 }
1107
1108 /* Add metadata socket that is successfully connected. */
1109 ret = lttng_poll_add(&events, consumer_data->metadata_fd,
1110 LPOLLIN | LPOLLRDHUP);
1111 if (ret < 0) {
1112 goto error;
1113 }
1114
1115 health_code_update();
1116
1117 /*
1118 * Transfer the write-end of the channel monitoring and rotate pipe
1119 * to the consumer by issuing a SET_CHANNEL_MONITOR_PIPE command.
1120 */
1121 cmd_socket_wrapper = consumer_allocate_socket(&consumer_data->cmd_sock);
1122 if (!cmd_socket_wrapper) {
1123 goto error;
1124 }
1125 cmd_socket_wrapper->lock = &consumer_data->lock;
1126
1127 ret = consumer_send_channel_monitor_pipe(cmd_socket_wrapper,
1128 consumer_data->channel_monitor_pipe);
1129 if (ret) {
1130 goto error;
1131 }
1132
1133 /* Discard the socket wrapper as it is no longer needed. */
1134 consumer_destroy_socket(cmd_socket_wrapper);
1135 cmd_socket_wrapper = NULL;
1136
1137 /* The thread is completely initialized, signal that it is ready. */
1138 signal_consumer_condition(consumer_data, 1);
1139
1140 /* Infinite blocking call, waiting for transmission */
1141 restart_poll:
1142 while (1) {
1143 health_code_update();
1144
1145 /* Exit the thread because the thread quit pipe has been triggered. */
1146 if (should_quit) {
1147 /* Not a health error. */
1148 err = 0;
1149 goto exit;
1150 }
1151
1152 health_poll_entry();
1153 ret = lttng_poll_wait(&events, -1);
1154 health_poll_exit();
1155 if (ret < 0) {
1156 /*
1157 * Restart interrupted system call.
1158 */
1159 if (errno == EINTR) {
1160 goto restart_poll;
1161 }
1162 goto error;
1163 }
1164
1165 nb_fd = ret;
1166
1167 for (i = 0; i < nb_fd; i++) {
1168 /* Fetch once the poll data */
1169 revents = LTTNG_POLL_GETEV(&events, i);
1170 pollfd = LTTNG_POLL_GETFD(&events, i);
1171
1172 health_code_update();
1173
1174 if (!revents) {
1175 /* No activity for this FD (poll implementation). */
1176 continue;
1177 }
1178
1179 /*
1180 * Thread quit pipe has been triggered, flag that we should stop
1181 * but continue the current loop to handle potential data from
1182 * consumer.
1183 */
1184 should_quit = sessiond_check_thread_quit_pipe(pollfd, revents);
1185
1186 if (pollfd == sock) {
1187 /* Event on the consumerd socket */
1188 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)
1189 && !(revents & LPOLLIN)) {
1190 ERR("consumer err socket second poll error");
1191 goto error;
1192 }
1193 health_code_update();
1194 /* Wait for any kconsumerd error */
1195 ret = lttcomm_recv_unix_sock(sock, &code,
1196 sizeof(enum lttcomm_return_code));
1197 if (ret <= 0) {
1198 ERR("consumer closed the command socket");
1199 goto error;
1200 }
1201
1202 ERR("consumer return code : %s",
1203 lttcomm_get_readable_code(-code));
1204
1205 goto exit;
1206 } else if (pollfd == consumer_data->metadata_fd) {
1207 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)
1208 && !(revents & LPOLLIN)) {
1209 ERR("consumer err metadata socket second poll error");
1210 goto error;
1211 }
1212 /* UST metadata requests */
1213 ret = ust_consumer_metadata_request(
1214 &consumer_data->metadata_sock);
1215 if (ret < 0) {
1216 ERR("Handling metadata request");
1217 goto error;
1218 }
1219 }
1220 /* No need for an else branch all FDs are tested prior. */
1221 }
1222 health_code_update();
1223 }
1224
1225 exit:
1226 error:
1227 /*
1228 * We lock here because we are about to close the sockets and some other
1229 * thread might be using them so get exclusive access which will abort all
1230 * other consumer command by other threads.
1231 */
1232 pthread_mutex_lock(&consumer_data->lock);
1233
1234 /* Immediately set the consumerd state to stopped */
1235 if (consumer_data->type == LTTNG_CONSUMER_KERNEL) {
1236 uatomic_set(&kernel_consumerd_state, CONSUMER_ERROR);
1237 } else if (consumer_data->type == LTTNG_CONSUMER64_UST ||
1238 consumer_data->type == LTTNG_CONSUMER32_UST) {
1239 uatomic_set(&ust_consumerd_state, CONSUMER_ERROR);
1240 } else {
1241 /* Code flow error... */
1242 assert(0);
1243 }
1244
1245 if (consumer_data->err_sock >= 0) {
1246 ret = close(consumer_data->err_sock);
1247 if (ret) {
1248 PERROR("close");
1249 }
1250 consumer_data->err_sock = -1;
1251 }
1252 if (consumer_data->cmd_sock >= 0) {
1253 ret = close(consumer_data->cmd_sock);
1254 if (ret) {
1255 PERROR("close");
1256 }
1257 consumer_data->cmd_sock = -1;
1258 }
1259 if (consumer_data->metadata_sock.fd_ptr &&
1260 *consumer_data->metadata_sock.fd_ptr >= 0) {
1261 ret = close(*consumer_data->metadata_sock.fd_ptr);
1262 if (ret) {
1263 PERROR("close");
1264 }
1265 }
1266 if (sock >= 0) {
1267 ret = close(sock);
1268 if (ret) {
1269 PERROR("close");
1270 }
1271 }
1272
1273 unlink(consumer_data->err_unix_sock_path);
1274 unlink(consumer_data->cmd_unix_sock_path);
1275 pthread_mutex_unlock(&consumer_data->lock);
1276
1277 /* Cleanup metadata socket mutex. */
1278 if (consumer_data->metadata_sock.lock) {
1279 pthread_mutex_destroy(consumer_data->metadata_sock.lock);
1280 free(consumer_data->metadata_sock.lock);
1281 }
1282 lttng_poll_clean(&events);
1283
1284 if (cmd_socket_wrapper) {
1285 consumer_destroy_socket(cmd_socket_wrapper);
1286 }
1287 error_poll:
1288 if (err) {
1289 health_error();
1290 ERR("Health error occurred in %s", __func__);
1291 }
1292 health_unregister(health_sessiond);
1293 DBG("consumer thread cleanup completed");
1294
1295 rcu_thread_offline();
1296 rcu_unregister_thread();
1297
1298 return NULL;
1299 }
1300
1301 /*
1302 * This thread receives application command sockets (FDs) on the
1303 * apps_cmd_pipe and waits (polls) on them until they are closed
1304 * or an error occurs.
1305 *
1306 * At that point, it flushes the data (tracing and metadata) associated
1307 * with this application and tears down ust app sessions and other
1308 * associated data structures through ust_app_unregister().
1309 *
1310 * Note that this thread never sends commands to the applications
1311 * through the command sockets; it merely listens for hang-ups
1312 * and errors on those sockets and cleans-up as they occur.
1313 */
1314 static void *thread_manage_apps(void *data)
1315 {
1316 int i, ret, pollfd, err = -1;
1317 ssize_t size_ret;
1318 uint32_t revents, nb_fd;
1319 struct lttng_poll_event events;
1320
1321 DBG("[thread] Manage application started");
1322
1323 rcu_register_thread();
1324 rcu_thread_online();
1325
1326 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_APP_MANAGE);
1327
1328 if (testpoint(sessiond_thread_manage_apps)) {
1329 goto error_testpoint;
1330 }
1331
1332 health_code_update();
1333
1334 ret = sessiond_set_thread_pollset(&events, 2);
1335 if (ret < 0) {
1336 goto error_poll_create;
1337 }
1338
1339 ret = lttng_poll_add(&events, apps_cmd_pipe[0], LPOLLIN | LPOLLRDHUP);
1340 if (ret < 0) {
1341 goto error;
1342 }
1343
1344 if (testpoint(sessiond_thread_manage_apps_before_loop)) {
1345 goto error;
1346 }
1347
1348 health_code_update();
1349
1350 while (1) {
1351 DBG("Apps thread polling");
1352
1353 /* Inifinite blocking call, waiting for transmission */
1354 restart:
1355 health_poll_entry();
1356 ret = lttng_poll_wait(&events, -1);
1357 DBG("Apps thread return from poll on %d fds",
1358 LTTNG_POLL_GETNB(&events));
1359 health_poll_exit();
1360 if (ret < 0) {
1361 /*
1362 * Restart interrupted system call.
1363 */
1364 if (errno == EINTR) {
1365 goto restart;
1366 }
1367 goto error;
1368 }
1369
1370 nb_fd = ret;
1371
1372 for (i = 0; i < nb_fd; i++) {
1373 /* Fetch once the poll data */
1374 revents = LTTNG_POLL_GETEV(&events, i);
1375 pollfd = LTTNG_POLL_GETFD(&events, i);
1376
1377 health_code_update();
1378
1379 if (!revents) {
1380 /* No activity for this FD (poll implementation). */
1381 continue;
1382 }
1383
1384 /* Thread quit pipe has been closed. Killing thread. */
1385 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
1386 if (ret) {
1387 err = 0;
1388 goto exit;
1389 }
1390
1391 /* Inspect the apps cmd pipe */
1392 if (pollfd == apps_cmd_pipe[0]) {
1393 if (revents & LPOLLIN) {
1394 int sock;
1395
1396 /* Empty pipe */
1397 size_ret = lttng_read(apps_cmd_pipe[0], &sock, sizeof(sock));
1398 if (size_ret < sizeof(sock)) {
1399 PERROR("read apps cmd pipe");
1400 goto error;
1401 }
1402
1403 health_code_update();
1404
1405 /*
1406 * Since this is a command socket (write then read),
1407 * we only monitor the error events of the socket.
1408 */
1409 ret = lttng_poll_add(&events, sock,
1410 LPOLLERR | LPOLLHUP | LPOLLRDHUP);
1411 if (ret < 0) {
1412 goto error;
1413 }
1414
1415 DBG("Apps with sock %d added to poll set", sock);
1416 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1417 ERR("Apps command pipe error");
1418 goto error;
1419 } else {
1420 ERR("Unknown poll events %u for sock %d", revents, pollfd);
1421 goto error;
1422 }
1423 } else {
1424 /*
1425 * At this point, we know that a registered application made
1426 * the event at poll_wait.
1427 */
1428 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1429 /* Removing from the poll set */
1430 ret = lttng_poll_del(&events, pollfd);
1431 if (ret < 0) {
1432 goto error;
1433 }
1434
1435 /* Socket closed on remote end. */
1436 ust_app_unregister(pollfd);
1437 } else {
1438 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
1439 goto error;
1440 }
1441 }
1442
1443 health_code_update();
1444 }
1445 }
1446
1447 exit:
1448 error:
1449 lttng_poll_clean(&events);
1450 error_poll_create:
1451 error_testpoint:
1452 utils_close_pipe(apps_cmd_pipe);
1453 apps_cmd_pipe[0] = apps_cmd_pipe[1] = -1;
1454
1455 /*
1456 * We don't clean the UST app hash table here since already registered
1457 * applications can still be controlled so let them be until the session
1458 * daemon dies or the applications stop.
1459 */
1460
1461 if (err) {
1462 health_error();
1463 ERR("Health error occurred in %s", __func__);
1464 }
1465 health_unregister(health_sessiond);
1466 DBG("Application communication apps thread cleanup complete");
1467 rcu_thread_offline();
1468 rcu_unregister_thread();
1469 return NULL;
1470 }
1471
1472 /*
1473 * Send a socket to a thread This is called from the dispatch UST registration
1474 * thread once all sockets are set for the application.
1475 *
1476 * The sock value can be invalid, we don't really care, the thread will handle
1477 * it and make the necessary cleanup if so.
1478 *
1479 * On success, return 0 else a negative value being the errno message of the
1480 * write().
1481 */
1482 static int send_socket_to_thread(int fd, int sock)
1483 {
1484 ssize_t ret;
1485
1486 /*
1487 * It's possible that the FD is set as invalid with -1 concurrently just
1488 * before calling this function being a shutdown state of the thread.
1489 */
1490 if (fd < 0) {
1491 ret = -EBADF;
1492 goto error;
1493 }
1494
1495 ret = lttng_write(fd, &sock, sizeof(sock));
1496 if (ret < sizeof(sock)) {
1497 PERROR("write apps pipe %d", fd);
1498 if (ret < 0) {
1499 ret = -errno;
1500 }
1501 goto error;
1502 }
1503
1504 /* All good. Don't send back the write positive ret value. */
1505 ret = 0;
1506 error:
1507 return (int) ret;
1508 }
1509
1510 /*
1511 * Sanitize the wait queue of the dispatch registration thread meaning removing
1512 * invalid nodes from it. This is to avoid memory leaks for the case the UST
1513 * notify socket is never received.
1514 */
1515 static void sanitize_wait_queue(struct ust_reg_wait_queue *wait_queue)
1516 {
1517 int ret, nb_fd = 0, i;
1518 unsigned int fd_added = 0;
1519 struct lttng_poll_event events;
1520 struct ust_reg_wait_node *wait_node = NULL, *tmp_wait_node;
1521
1522 assert(wait_queue);
1523
1524 lttng_poll_init(&events);
1525
1526 /* Just skip everything for an empty queue. */
1527 if (!wait_queue->count) {
1528 goto end;
1529 }
1530
1531 ret = lttng_poll_create(&events, wait_queue->count, LTTNG_CLOEXEC);
1532 if (ret < 0) {
1533 goto error_create;
1534 }
1535
1536 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1537 &wait_queue->head, head) {
1538 assert(wait_node->app);
1539 ret = lttng_poll_add(&events, wait_node->app->sock,
1540 LPOLLHUP | LPOLLERR);
1541 if (ret < 0) {
1542 goto error;
1543 }
1544
1545 fd_added = 1;
1546 }
1547
1548 if (!fd_added) {
1549 goto end;
1550 }
1551
1552 /*
1553 * Poll but don't block so we can quickly identify the faulty events and
1554 * clean them afterwards from the wait queue.
1555 */
1556 ret = lttng_poll_wait(&events, 0);
1557 if (ret < 0) {
1558 goto error;
1559 }
1560 nb_fd = ret;
1561
1562 for (i = 0; i < nb_fd; i++) {
1563 /* Get faulty FD. */
1564 uint32_t revents = LTTNG_POLL_GETEV(&events, i);
1565 int pollfd = LTTNG_POLL_GETFD(&events, i);
1566
1567 if (!revents) {
1568 /* No activity for this FD (poll implementation). */
1569 continue;
1570 }
1571
1572 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1573 &wait_queue->head, head) {
1574 if (pollfd == wait_node->app->sock &&
1575 (revents & (LPOLLHUP | LPOLLERR))) {
1576 cds_list_del(&wait_node->head);
1577 wait_queue->count--;
1578 ust_app_destroy(wait_node->app);
1579 free(wait_node);
1580 /*
1581 * Silence warning of use-after-free in
1582 * cds_list_for_each_entry_safe which uses
1583 * __typeof__(*wait_node).
1584 */
1585 wait_node = NULL;
1586 break;
1587 } else {
1588 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
1589 goto error;
1590 }
1591 }
1592 }
1593
1594 if (nb_fd > 0) {
1595 DBG("Wait queue sanitized, %d node were cleaned up", nb_fd);
1596 }
1597
1598 end:
1599 lttng_poll_clean(&events);
1600 return;
1601
1602 error:
1603 lttng_poll_clean(&events);
1604 error_create:
1605 ERR("Unable to sanitize wait queue");
1606 return;
1607 }
1608
1609 /*
1610 * Dispatch request from the registration threads to the application
1611 * communication thread.
1612 */
1613 static void *thread_dispatch_ust_registration(void *data)
1614 {
1615 int ret, err = -1;
1616 struct cds_wfcq_node *node;
1617 struct ust_command *ust_cmd = NULL;
1618 struct ust_reg_wait_node *wait_node = NULL, *tmp_wait_node;
1619 struct ust_reg_wait_queue wait_queue = {
1620 .count = 0,
1621 };
1622
1623 rcu_register_thread();
1624
1625 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_APP_REG_DISPATCH);
1626
1627 if (testpoint(sessiond_thread_app_reg_dispatch)) {
1628 goto error_testpoint;
1629 }
1630
1631 health_code_update();
1632
1633 CDS_INIT_LIST_HEAD(&wait_queue.head);
1634
1635 DBG("[thread] Dispatch UST command started");
1636
1637 for (;;) {
1638 health_code_update();
1639
1640 /* Atomically prepare the queue futex */
1641 futex_nto1_prepare(&ust_cmd_queue.futex);
1642
1643 if (CMM_LOAD_SHARED(dispatch_thread_exit)) {
1644 break;
1645 }
1646
1647 do {
1648 struct ust_app *app = NULL;
1649 ust_cmd = NULL;
1650
1651 /*
1652 * Make sure we don't have node(s) that have hung up before receiving
1653 * the notify socket. This is to clean the list in order to avoid
1654 * memory leaks from notify socket that are never seen.
1655 */
1656 sanitize_wait_queue(&wait_queue);
1657
1658 health_code_update();
1659 /* Dequeue command for registration */
1660 node = cds_wfcq_dequeue_blocking(&ust_cmd_queue.head, &ust_cmd_queue.tail);
1661 if (node == NULL) {
1662 DBG("Woken up but nothing in the UST command queue");
1663 /* Continue thread execution */
1664 break;
1665 }
1666
1667 ust_cmd = caa_container_of(node, struct ust_command, node);
1668
1669 DBG("Dispatching UST registration pid:%d ppid:%d uid:%d"
1670 " gid:%d sock:%d name:%s (version %d.%d)",
1671 ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
1672 ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
1673 ust_cmd->sock, ust_cmd->reg_msg.name,
1674 ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
1675
1676 if (ust_cmd->reg_msg.type == USTCTL_SOCKET_CMD) {
1677 wait_node = zmalloc(sizeof(*wait_node));
1678 if (!wait_node) {
1679 PERROR("zmalloc wait_node dispatch");
1680 ret = close(ust_cmd->sock);
1681 if (ret < 0) {
1682 PERROR("close ust sock dispatch %d", ust_cmd->sock);
1683 }
1684 lttng_fd_put(LTTNG_FD_APPS, 1);
1685 free(ust_cmd);
1686 goto error;
1687 }
1688 CDS_INIT_LIST_HEAD(&wait_node->head);
1689
1690 /* Create application object if socket is CMD. */
1691 wait_node->app = ust_app_create(&ust_cmd->reg_msg,
1692 ust_cmd->sock);
1693 if (!wait_node->app) {
1694 ret = close(ust_cmd->sock);
1695 if (ret < 0) {
1696 PERROR("close ust sock dispatch %d", ust_cmd->sock);
1697 }
1698 lttng_fd_put(LTTNG_FD_APPS, 1);
1699 free(wait_node);
1700 free(ust_cmd);
1701 continue;
1702 }
1703 /*
1704 * Add application to the wait queue so we can set the notify
1705 * socket before putting this object in the global ht.
1706 */
1707 cds_list_add(&wait_node->head, &wait_queue.head);
1708 wait_queue.count++;
1709
1710 free(ust_cmd);
1711 /*
1712 * We have to continue here since we don't have the notify
1713 * socket and the application MUST be added to the hash table
1714 * only at that moment.
1715 */
1716 continue;
1717 } else {
1718 /*
1719 * Look for the application in the local wait queue and set the
1720 * notify socket if found.
1721 */
1722 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1723 &wait_queue.head, head) {
1724 health_code_update();
1725 if (wait_node->app->pid == ust_cmd->reg_msg.pid) {
1726 wait_node->app->notify_sock = ust_cmd->sock;
1727 cds_list_del(&wait_node->head);
1728 wait_queue.count--;
1729 app = wait_node->app;
1730 free(wait_node);
1731 DBG3("UST app notify socket %d is set", ust_cmd->sock);
1732 break;
1733 }
1734 }
1735
1736 /*
1737 * With no application at this stage the received socket is
1738 * basically useless so close it before we free the cmd data
1739 * structure for good.
1740 */
1741 if (!app) {
1742 ret = close(ust_cmd->sock);
1743 if (ret < 0) {
1744 PERROR("close ust sock dispatch %d", ust_cmd->sock);
1745 }
1746 lttng_fd_put(LTTNG_FD_APPS, 1);
1747 }
1748 free(ust_cmd);
1749 }
1750
1751 if (app) {
1752 /*
1753 * @session_lock_list
1754 *
1755 * Lock the global session list so from the register up to the
1756 * registration done message, no thread can see the application
1757 * and change its state.
1758 */
1759 session_lock_list();
1760 rcu_read_lock();
1761
1762 /*
1763 * Add application to the global hash table. This needs to be
1764 * done before the update to the UST registry can locate the
1765 * application.
1766 */
1767 ust_app_add(app);
1768
1769 /* Set app version. This call will print an error if needed. */
1770 (void) ust_app_version(app);
1771
1772 /* Send notify socket through the notify pipe. */
1773 ret = send_socket_to_thread(apps_cmd_notify_pipe[1],
1774 app->notify_sock);
1775 if (ret < 0) {
1776 rcu_read_unlock();
1777 session_unlock_list();
1778 /*
1779 * No notify thread, stop the UST tracing. However, this is
1780 * not an internal error of the this thread thus setting
1781 * the health error code to a normal exit.
1782 */
1783 err = 0;
1784 goto error;
1785 }
1786
1787 /*
1788 * Update newly registered application with the tracing
1789 * registry info already enabled information.
1790 */
1791 update_ust_app(app->sock);
1792
1793 /*
1794 * Don't care about return value. Let the manage apps threads
1795 * handle app unregistration upon socket close.
1796 */
1797 (void) ust_app_register_done(app);
1798
1799 /*
1800 * Even if the application socket has been closed, send the app
1801 * to the thread and unregistration will take place at that
1802 * place.
1803 */
1804 ret = send_socket_to_thread(apps_cmd_pipe[1], app->sock);
1805 if (ret < 0) {
1806 rcu_read_unlock();
1807 session_unlock_list();
1808 /*
1809 * No apps. thread, stop the UST tracing. However, this is
1810 * not an internal error of the this thread thus setting
1811 * the health error code to a normal exit.
1812 */
1813 err = 0;
1814 goto error;
1815 }
1816
1817 rcu_read_unlock();
1818 session_unlock_list();
1819 }
1820 } while (node != NULL);
1821
1822 health_poll_entry();
1823 /* Futex wait on queue. Blocking call on futex() */
1824 futex_nto1_wait(&ust_cmd_queue.futex);
1825 health_poll_exit();
1826 }
1827 /* Normal exit, no error */
1828 err = 0;
1829
1830 error:
1831 /* Clean up wait queue. */
1832 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1833 &wait_queue.head, head) {
1834 cds_list_del(&wait_node->head);
1835 wait_queue.count--;
1836 free(wait_node);
1837 }
1838
1839 /* Empty command queue. */
1840 for (;;) {
1841 /* Dequeue command for registration */
1842 node = cds_wfcq_dequeue_blocking(&ust_cmd_queue.head, &ust_cmd_queue.tail);
1843 if (node == NULL) {
1844 break;
1845 }
1846 ust_cmd = caa_container_of(node, struct ust_command, node);
1847 ret = close(ust_cmd->sock);
1848 if (ret < 0) {
1849 PERROR("close ust sock exit dispatch %d", ust_cmd->sock);
1850 }
1851 lttng_fd_put(LTTNG_FD_APPS, 1);
1852 free(ust_cmd);
1853 }
1854
1855 error_testpoint:
1856 DBG("Dispatch thread dying");
1857 if (err) {
1858 health_error();
1859 ERR("Health error occurred in %s", __func__);
1860 }
1861 health_unregister(health_sessiond);
1862 rcu_unregister_thread();
1863 return NULL;
1864 }
1865
1866 /*
1867 * This thread manage application registration.
1868 */
1869 static void *thread_registration_apps(void *data)
1870 {
1871 int sock = -1, i, ret, pollfd, err = -1;
1872 uint32_t revents, nb_fd;
1873 struct lttng_poll_event events;
1874 /*
1875 * Get allocated in this thread, enqueued to a global queue, dequeued and
1876 * freed in the manage apps thread.
1877 */
1878 struct ust_command *ust_cmd = NULL;
1879
1880 DBG("[thread] Manage application registration started");
1881
1882 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_APP_REG);
1883
1884 if (testpoint(sessiond_thread_registration_apps)) {
1885 goto error_testpoint;
1886 }
1887
1888 ret = lttcomm_listen_unix_sock(apps_sock);
1889 if (ret < 0) {
1890 goto error_listen;
1891 }
1892
1893 /*
1894 * Pass 2 as size here for the thread quit pipe and apps socket. Nothing
1895 * more will be added to this poll set.
1896 */
1897 ret = sessiond_set_thread_pollset(&events, 2);
1898 if (ret < 0) {
1899 goto error_create_poll;
1900 }
1901
1902 /* Add the application registration socket */
1903 ret = lttng_poll_add(&events, apps_sock, LPOLLIN | LPOLLRDHUP);
1904 if (ret < 0) {
1905 goto error_poll_add;
1906 }
1907
1908 /* Notify all applications to register */
1909 ret = notify_ust_apps(1);
1910 if (ret < 0) {
1911 ERR("Failed to notify applications or create the wait shared memory.\n"
1912 "Execution continues but there might be problem for already\n"
1913 "running applications that wishes to register.");
1914 }
1915
1916 while (1) {
1917 DBG("Accepting application registration");
1918
1919 /* Inifinite blocking call, waiting for transmission */
1920 restart:
1921 health_poll_entry();
1922 ret = lttng_poll_wait(&events, -1);
1923 health_poll_exit();
1924 if (ret < 0) {
1925 /*
1926 * Restart interrupted system call.
1927 */
1928 if (errno == EINTR) {
1929 goto restart;
1930 }
1931 goto error;
1932 }
1933
1934 nb_fd = ret;
1935
1936 for (i = 0; i < nb_fd; i++) {
1937 health_code_update();
1938
1939 /* Fetch once the poll data */
1940 revents = LTTNG_POLL_GETEV(&events, i);
1941 pollfd = LTTNG_POLL_GETFD(&events, i);
1942
1943 if (!revents) {
1944 /* No activity for this FD (poll implementation). */
1945 continue;
1946 }
1947
1948 /* Thread quit pipe has been closed. Killing thread. */
1949 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
1950 if (ret) {
1951 err = 0;
1952 goto exit;
1953 }
1954
1955 /* Event on the registration socket */
1956 if (pollfd == apps_sock) {
1957 if (revents & LPOLLIN) {
1958 sock = lttcomm_accept_unix_sock(apps_sock);
1959 if (sock < 0) {
1960 goto error;
1961 }
1962
1963 /*
1964 * Set socket timeout for both receiving and ending.
1965 * app_socket_timeout is in seconds, whereas
1966 * lttcomm_setsockopt_rcv_timeout and
1967 * lttcomm_setsockopt_snd_timeout expect msec as
1968 * parameter.
1969 */
1970 if (config.app_socket_timeout >= 0) {
1971 (void) lttcomm_setsockopt_rcv_timeout(sock,
1972 config.app_socket_timeout * 1000);
1973 (void) lttcomm_setsockopt_snd_timeout(sock,
1974 config.app_socket_timeout * 1000);
1975 }
1976
1977 /*
1978 * Set the CLOEXEC flag. Return code is useless because
1979 * either way, the show must go on.
1980 */
1981 (void) utils_set_fd_cloexec(sock);
1982
1983 /* Create UST registration command for enqueuing */
1984 ust_cmd = zmalloc(sizeof(struct ust_command));
1985 if (ust_cmd == NULL) {
1986 PERROR("ust command zmalloc");
1987 ret = close(sock);
1988 if (ret) {
1989 PERROR("close");
1990 }
1991 goto error;
1992 }
1993
1994 /*
1995 * Using message-based transmissions to ensure we don't
1996 * have to deal with partially received messages.
1997 */
1998 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
1999 if (ret < 0) {
2000 ERR("Exhausted file descriptors allowed for applications.");
2001 free(ust_cmd);
2002 ret = close(sock);
2003 if (ret) {
2004 PERROR("close");
2005 }
2006 sock = -1;
2007 continue;
2008 }
2009
2010 health_code_update();
2011 ret = ust_app_recv_registration(sock, &ust_cmd->reg_msg);
2012 if (ret < 0) {
2013 free(ust_cmd);
2014 /* Close socket of the application. */
2015 ret = close(sock);
2016 if (ret) {
2017 PERROR("close");
2018 }
2019 lttng_fd_put(LTTNG_FD_APPS, 1);
2020 sock = -1;
2021 continue;
2022 }
2023 health_code_update();
2024
2025 ust_cmd->sock = sock;
2026 sock = -1;
2027
2028 DBG("UST registration received with pid:%d ppid:%d uid:%d"
2029 " gid:%d sock:%d name:%s (version %d.%d)",
2030 ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
2031 ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
2032 ust_cmd->sock, ust_cmd->reg_msg.name,
2033 ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
2034
2035 /*
2036 * Lock free enqueue the registration request. The red pill
2037 * has been taken! This apps will be part of the *system*.
2038 */
2039 cds_wfcq_enqueue(&ust_cmd_queue.head, &ust_cmd_queue.tail, &ust_cmd->node);
2040
2041 /*
2042 * Wake the registration queue futex. Implicit memory
2043 * barrier with the exchange in cds_wfcq_enqueue.
2044 */
2045 futex_nto1_wake(&ust_cmd_queue.futex);
2046 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
2047 ERR("Register apps socket poll error");
2048 goto error;
2049 } else {
2050 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
2051 goto error;
2052 }
2053 }
2054 }
2055 }
2056
2057 exit:
2058 error:
2059 /* Notify that the registration thread is gone */
2060 notify_ust_apps(0);
2061
2062 if (apps_sock >= 0) {
2063 ret = close(apps_sock);
2064 if (ret) {
2065 PERROR("close");
2066 }
2067 }
2068 if (sock >= 0) {
2069 ret = close(sock);
2070 if (ret) {
2071 PERROR("close");
2072 }
2073 lttng_fd_put(LTTNG_FD_APPS, 1);
2074 }
2075 unlink(config.apps_unix_sock_path.value);
2076
2077 error_poll_add:
2078 lttng_poll_clean(&events);
2079 error_listen:
2080 error_create_poll:
2081 error_testpoint:
2082 DBG("UST Registration thread cleanup complete");
2083 if (err) {
2084 health_error();
2085 ERR("Health error occurred in %s", __func__);
2086 }
2087 health_unregister(health_sessiond);
2088
2089 return NULL;
2090 }
2091
2092 /*
2093 * Start the thread_manage_consumer. This must be done after a lttng-consumerd
2094 * exec or it will fails.
2095 */
2096 static int spawn_consumer_thread(struct consumer_data *consumer_data)
2097 {
2098 int ret, clock_ret;
2099 struct timespec timeout;
2100
2101 /*
2102 * Make sure we set the readiness flag to 0 because we are NOT ready.
2103 * This access to consumer_thread_is_ready does not need to be
2104 * protected by consumer_data.cond_mutex (yet) since the consumer
2105 * management thread has not been started at this point.
2106 */
2107 consumer_data->consumer_thread_is_ready = 0;
2108
2109 /* Setup pthread condition */
2110 ret = pthread_condattr_init(&consumer_data->condattr);
2111 if (ret) {
2112 errno = ret;
2113 PERROR("pthread_condattr_init consumer data");
2114 goto error;
2115 }
2116
2117 /*
2118 * Set the monotonic clock in order to make sure we DO NOT jump in time
2119 * between the clock_gettime() call and the timedwait call. See bug #324
2120 * for a more details and how we noticed it.
2121 */
2122 ret = pthread_condattr_setclock(&consumer_data->condattr, CLOCK_MONOTONIC);
2123 if (ret) {
2124 errno = ret;
2125 PERROR("pthread_condattr_setclock consumer data");
2126 goto error;
2127 }
2128
2129 ret = pthread_cond_init(&consumer_data->cond, &consumer_data->condattr);
2130 if (ret) {
2131 errno = ret;
2132 PERROR("pthread_cond_init consumer data");
2133 goto error;
2134 }
2135
2136 ret = pthread_create(&consumer_data->thread, default_pthread_attr(),
2137 thread_manage_consumer, consumer_data);
2138 if (ret) {
2139 errno = ret;
2140 PERROR("pthread_create consumer");
2141 ret = -1;
2142 goto error;
2143 }
2144
2145 /* We are about to wait on a pthread condition */
2146 pthread_mutex_lock(&consumer_data->cond_mutex);
2147
2148 /* Get time for sem_timedwait absolute timeout */
2149 clock_ret = lttng_clock_gettime(CLOCK_MONOTONIC, &timeout);
2150 /*
2151 * Set the timeout for the condition timed wait even if the clock gettime
2152 * call fails since we might loop on that call and we want to avoid to
2153 * increment the timeout too many times.
2154 */
2155 timeout.tv_sec += DEFAULT_SEM_WAIT_TIMEOUT;
2156
2157 /*
2158 * The following loop COULD be skipped in some conditions so this is why we
2159 * set ret to 0 in order to make sure at least one round of the loop is
2160 * done.
2161 */
2162 ret = 0;
2163
2164 /*
2165 * Loop until the condition is reached or when a timeout is reached. Note
2166 * that the pthread_cond_timedwait(P) man page specifies that EINTR can NOT
2167 * be returned but the pthread_cond(3), from the glibc-doc, says that it is
2168 * possible. This loop does not take any chances and works with both of
2169 * them.
2170 */
2171 while (!consumer_data->consumer_thread_is_ready && ret != ETIMEDOUT) {
2172 if (clock_ret < 0) {
2173 PERROR("clock_gettime spawn consumer");
2174 /* Infinite wait for the consumerd thread to be ready */
2175 ret = pthread_cond_wait(&consumer_data->cond,
2176 &consumer_data->cond_mutex);
2177 } else {
2178 ret = pthread_cond_timedwait(&consumer_data->cond,
2179 &consumer_data->cond_mutex, &timeout);
2180 }
2181 }
2182
2183 /* Release the pthread condition */
2184 pthread_mutex_unlock(&consumer_data->cond_mutex);
2185
2186 if (ret != 0) {
2187 errno = ret;
2188 if (ret == ETIMEDOUT) {
2189 int pth_ret;
2190
2191 /*
2192 * Call has timed out so we kill the kconsumerd_thread and return
2193 * an error.
2194 */
2195 ERR("Condition timed out. The consumer thread was never ready."
2196 " Killing it");
2197 pth_ret = pthread_cancel(consumer_data->thread);
2198 if (pth_ret < 0) {
2199 PERROR("pthread_cancel consumer thread");
2200 }
2201 } else {
2202 PERROR("pthread_cond_wait failed consumer thread");
2203 }
2204 /* Caller is expecting a negative value on failure. */
2205 ret = -1;
2206 goto error;
2207 }
2208
2209 pthread_mutex_lock(&consumer_data->pid_mutex);
2210 if (consumer_data->pid == 0) {
2211 ERR("Consumerd did not start");
2212 pthread_mutex_unlock(&consumer_data->pid_mutex);
2213 goto error;
2214 }
2215 pthread_mutex_unlock(&consumer_data->pid_mutex);
2216
2217 return 0;
2218
2219 error:
2220 return ret;
2221 }
2222
2223 /*
2224 * Join consumer thread
2225 */
2226 static int join_consumer_thread(struct consumer_data *consumer_data)
2227 {
2228 void *status;
2229
2230 /* Consumer pid must be a real one. */
2231 if (consumer_data->pid > 0) {
2232 int ret;
2233 ret = kill(consumer_data->pid, SIGTERM);
2234 if (ret) {
2235 PERROR("Error killing consumer daemon");
2236 return ret;
2237 }
2238 return pthread_join(consumer_data->thread, &status);
2239 } else {
2240 return 0;
2241 }
2242 }
2243
2244 /*
2245 * Fork and exec a consumer daemon (consumerd).
2246 *
2247 * Return pid if successful else -1.
2248 */
2249 static pid_t spawn_consumerd(struct consumer_data *consumer_data)
2250 {
2251 int ret;
2252 pid_t pid;
2253 const char *consumer_to_use;
2254 const char *verbosity;
2255 struct stat st;
2256
2257 DBG("Spawning consumerd");
2258
2259 pid = fork();
2260 if (pid == 0) {
2261 /*
2262 * Exec consumerd.
2263 */
2264 if (config.verbose_consumer) {
2265 verbosity = "--verbose";
2266 } else if (lttng_opt_quiet) {
2267 verbosity = "--quiet";
2268 } else {
2269 verbosity = "";
2270 }
2271
2272 switch (consumer_data->type) {
2273 case LTTNG_CONSUMER_KERNEL:
2274 /*
2275 * Find out which consumerd to execute. We will first try the
2276 * 64-bit path, then the sessiond's installation directory, and
2277 * fallback on the 32-bit one,
2278 */
2279 DBG3("Looking for a kernel consumer at these locations:");
2280 DBG3(" 1) %s", config.consumerd64_bin_path.value ? : "NULL");
2281 DBG3(" 2) %s/%s", INSTALL_BIN_PATH, DEFAULT_CONSUMERD_FILE);
2282 DBG3(" 3) %s", config.consumerd32_bin_path.value ? : "NULL");
2283 if (stat(config.consumerd64_bin_path.value, &st) == 0) {
2284 DBG3("Found location #1");
2285 consumer_to_use = config.consumerd64_bin_path.value;
2286 } else if (stat(INSTALL_BIN_PATH "/" DEFAULT_CONSUMERD_FILE, &st) == 0) {
2287 DBG3("Found location #2");
2288 consumer_to_use = INSTALL_BIN_PATH "/" DEFAULT_CONSUMERD_FILE;
2289 } else if (config.consumerd32_bin_path.value &&
2290 stat(config.consumerd32_bin_path.value, &st) == 0) {
2291 DBG3("Found location #3");
2292 consumer_to_use = config.consumerd32_bin_path.value;
2293 } else {
2294 DBG("Could not find any valid consumerd executable");
2295 ret = -EINVAL;
2296 goto error;
2297 }
2298 DBG("Using kernel consumer at: %s", consumer_to_use);
2299 (void) execl(consumer_to_use,
2300 "lttng-consumerd", verbosity, "-k",
2301 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
2302 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
2303 "--group", config.tracing_group_name.value,
2304 NULL);
2305 break;
2306 case LTTNG_CONSUMER64_UST:
2307 {
2308 if (config.consumerd64_lib_dir.value) {
2309 char *tmp;
2310 size_t tmplen;
2311 char *tmpnew;
2312
2313 tmp = lttng_secure_getenv("LD_LIBRARY_PATH");
2314 if (!tmp) {
2315 tmp = "";
2316 }
2317 tmplen = strlen(config.consumerd64_lib_dir.value) + 1 /* : */ + strlen(tmp);
2318 tmpnew = zmalloc(tmplen + 1 /* \0 */);
2319 if (!tmpnew) {
2320 ret = -ENOMEM;
2321 goto error;
2322 }
2323 strcat(tmpnew, config.consumerd64_lib_dir.value);
2324 if (tmp[0] != '\0') {
2325 strcat(tmpnew, ":");
2326 strcat(tmpnew, tmp);
2327 }
2328 ret = setenv("LD_LIBRARY_PATH", tmpnew, 1);
2329 free(tmpnew);
2330 if (ret) {
2331 ret = -errno;
2332 goto error;
2333 }
2334 }
2335 DBG("Using 64-bit UST consumer at: %s", config.consumerd64_bin_path.value);
2336 (void) execl(config.consumerd64_bin_path.value, "lttng-consumerd", verbosity, "-u",
2337 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
2338 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
2339 "--group", config.tracing_group_name.value,
2340 NULL);
2341 break;
2342 }
2343 case LTTNG_CONSUMER32_UST:
2344 {
2345 if (config.consumerd32_lib_dir.value) {
2346 char *tmp;
2347 size_t tmplen;
2348 char *tmpnew;
2349
2350 tmp = lttng_secure_getenv("LD_LIBRARY_PATH");
2351 if (!tmp) {
2352 tmp = "";
2353 }
2354 tmplen = strlen(config.consumerd32_lib_dir.value) + 1 /* : */ + strlen(tmp);
2355 tmpnew = zmalloc(tmplen + 1 /* \0 */);
2356 if (!tmpnew) {
2357 ret = -ENOMEM;
2358 goto error;
2359 }
2360 strcat(tmpnew, config.consumerd32_lib_dir.value);
2361 if (tmp[0] != '\0') {
2362 strcat(tmpnew, ":");
2363 strcat(tmpnew, tmp);
2364 }
2365 ret = setenv("LD_LIBRARY_PATH", tmpnew, 1);
2366 free(tmpnew);
2367 if (ret) {
2368 ret = -errno;
2369 goto error;
2370 }
2371 }
2372 DBG("Using 32-bit UST consumer at: %s", config.consumerd32_bin_path.value);
2373 (void) execl(config.consumerd32_bin_path.value, "lttng-consumerd", verbosity, "-u",
2374 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
2375 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
2376 "--group", config.tracing_group_name.value,
2377 NULL);
2378 break;
2379 }
2380 default:
2381 ERR("unknown consumer type");
2382 errno = 0;
2383 }
2384 if (errno != 0) {
2385 PERROR("Consumer execl()");
2386 }
2387 /* Reaching this point, we got a failure on our execl(). */
2388 exit(EXIT_FAILURE);
2389 } else if (pid > 0) {
2390 ret = pid;
2391 } else {
2392 PERROR("start consumer fork");
2393 ret = -errno;
2394 }
2395 error:
2396 return ret;
2397 }
2398
2399 /*
2400 * Spawn the consumerd daemon and session daemon thread.
2401 */
2402 static int start_consumerd(struct consumer_data *consumer_data)
2403 {
2404 int ret;
2405
2406 /*
2407 * Set the listen() state on the socket since there is a possible race
2408 * between the exec() of the consumer daemon and this call if place in the
2409 * consumer thread. See bug #366 for more details.
2410 */
2411 ret = lttcomm_listen_unix_sock(consumer_data->err_sock);
2412 if (ret < 0) {
2413 goto error;
2414 }
2415
2416 pthread_mutex_lock(&consumer_data->pid_mutex);
2417 if (consumer_data->pid != 0) {
2418 pthread_mutex_unlock(&consumer_data->pid_mutex);
2419 goto end;
2420 }
2421
2422 ret = spawn_consumerd(consumer_data);
2423 if (ret < 0) {
2424 ERR("Spawning consumerd failed");
2425 pthread_mutex_unlock(&consumer_data->pid_mutex);
2426 goto error;
2427 }
2428
2429 /* Setting up the consumer_data pid */
2430 consumer_data->pid = ret;
2431 DBG2("Consumer pid %d", consumer_data->pid);
2432 pthread_mutex_unlock(&consumer_data->pid_mutex);
2433
2434 DBG2("Spawning consumer control thread");
2435 ret = spawn_consumer_thread(consumer_data);
2436 if (ret < 0) {
2437 ERR("Fatal error spawning consumer control thread");
2438 goto error;
2439 }
2440
2441 end:
2442 return 0;
2443
2444 error:
2445 /* Cleanup already created sockets on error. */
2446 if (consumer_data->err_sock >= 0) {
2447 int err;
2448
2449 err = close(consumer_data->err_sock);
2450 if (err < 0) {
2451 PERROR("close consumer data error socket");
2452 }
2453 }
2454 return ret;
2455 }
2456
2457 /*
2458 * Setup necessary data for kernel tracer action.
2459 */
2460 static int init_kernel_tracer(void)
2461 {
2462 int ret;
2463
2464 /* Modprobe lttng kernel modules */
2465 ret = modprobe_lttng_control();
2466 if (ret < 0) {
2467 goto error;
2468 }
2469
2470 /* Open debugfs lttng */
2471 kernel_tracer_fd = open(module_proc_lttng, O_RDWR);
2472 if (kernel_tracer_fd < 0) {
2473 DBG("Failed to open %s", module_proc_lttng);
2474 goto error_open;
2475 }
2476
2477 /* Validate kernel version */
2478 ret = kernel_validate_version(kernel_tracer_fd, &kernel_tracer_version,
2479 &kernel_tracer_abi_version);
2480 if (ret < 0) {
2481 goto error_version;
2482 }
2483
2484 ret = modprobe_lttng_data();
2485 if (ret < 0) {
2486 goto error_modules;
2487 }
2488
2489 ret = kernel_supports_ring_buffer_snapshot_sample_positions(
2490 kernel_tracer_fd);
2491 if (ret < 0) {
2492 goto error_modules;
2493 }
2494
2495 if (ret < 1) {
2496 WARN("Kernel tracer does not support buffer monitoring. "
2497 "The monitoring timer of channels in the kernel domain "
2498 "will be set to 0 (disabled).");
2499 }
2500
2501 DBG("Kernel tracer fd %d", kernel_tracer_fd);
2502 return 0;
2503
2504 error_version:
2505 modprobe_remove_lttng_control();
2506 ret = close(kernel_tracer_fd);
2507 if (ret) {
2508 PERROR("close");
2509 }
2510 kernel_tracer_fd = -1;
2511 return LTTNG_ERR_KERN_VERSION;
2512
2513 error_modules:
2514 ret = close(kernel_tracer_fd);
2515 if (ret) {
2516 PERROR("close");
2517 }
2518
2519 error_open:
2520 modprobe_remove_lttng_control();
2521
2522 error:
2523 WARN("No kernel tracer available");
2524 kernel_tracer_fd = -1;
2525 if (!is_root) {
2526 return LTTNG_ERR_NEED_ROOT_SESSIOND;
2527 } else {
2528 return LTTNG_ERR_KERN_NA;
2529 }
2530 }
2531
2532
2533 /*
2534 * Copy consumer output from the tracing session to the domain session. The
2535 * function also applies the right modification on a per domain basis for the
2536 * trace files destination directory.
2537 *
2538 * Should *NOT* be called with RCU read-side lock held.
2539 */
2540 static int copy_session_consumer(int domain, struct ltt_session *session)
2541 {
2542 int ret;
2543 const char *dir_name;
2544 struct consumer_output *consumer;
2545
2546 assert(session);
2547 assert(session->consumer);
2548
2549 switch (domain) {
2550 case LTTNG_DOMAIN_KERNEL:
2551 DBG3("Copying tracing session consumer output in kernel session");
2552 /*
2553 * XXX: We should audit the session creation and what this function
2554 * does "extra" in order to avoid a destroy since this function is used
2555 * in the domain session creation (kernel and ust) only. Same for UST
2556 * domain.
2557 */
2558 if (session->kernel_session->consumer) {
2559 consumer_output_put(session->kernel_session->consumer);
2560 }
2561 session->kernel_session->consumer =
2562 consumer_copy_output(session->consumer);
2563 /* Ease our life a bit for the next part */
2564 consumer = session->kernel_session->consumer;
2565 dir_name = DEFAULT_KERNEL_TRACE_DIR;
2566 break;
2567 case LTTNG_DOMAIN_JUL:
2568 case LTTNG_DOMAIN_LOG4J:
2569 case LTTNG_DOMAIN_PYTHON:
2570 case LTTNG_DOMAIN_UST:
2571 DBG3("Copying tracing session consumer output in UST session");
2572 if (session->ust_session->consumer) {
2573 consumer_output_put(session->ust_session->consumer);
2574 }
2575 session->ust_session->consumer =
2576 consumer_copy_output(session->consumer);
2577 /* Ease our life a bit for the next part */
2578 consumer = session->ust_session->consumer;
2579 dir_name = DEFAULT_UST_TRACE_DIR;
2580 break;
2581 default:
2582 ret = LTTNG_ERR_UNKNOWN_DOMAIN;
2583 goto error;
2584 }
2585
2586 /* Append correct directory to subdir */
2587 strncat(consumer->subdir, dir_name,
2588 sizeof(consumer->subdir) - strlen(consumer->subdir) - 1);
2589 DBG3("Copy session consumer subdir %s", consumer->subdir);
2590
2591 ret = LTTNG_OK;
2592
2593 error:
2594 return ret;
2595 }
2596
2597 /*
2598 * Create an UST session and add it to the session ust list.
2599 *
2600 * Should *NOT* be called with RCU read-side lock held.
2601 */
2602 static int create_ust_session(struct ltt_session *session,
2603 struct lttng_domain *domain)
2604 {
2605 int ret;
2606 struct ltt_ust_session *lus = NULL;
2607
2608 assert(session);
2609 assert(domain);
2610 assert(session->consumer);
2611
2612 switch (domain->type) {
2613 case LTTNG_DOMAIN_JUL:
2614 case LTTNG_DOMAIN_LOG4J:
2615 case LTTNG_DOMAIN_PYTHON:
2616 case LTTNG_DOMAIN_UST:
2617 break;
2618 default:
2619 ERR("Unknown UST domain on create session %d", domain->type);
2620 ret = LTTNG_ERR_UNKNOWN_DOMAIN;
2621 goto error;
2622 }
2623
2624 DBG("Creating UST session");
2625
2626 lus = trace_ust_create_session(session->id);
2627 if (lus == NULL) {
2628 ret = LTTNG_ERR_UST_SESS_FAIL;
2629 goto error;
2630 }
2631
2632 lus->uid = session->uid;
2633 lus->gid = session->gid;
2634 lus->output_traces = session->output_traces;
2635 lus->snapshot_mode = session->snapshot_mode;
2636 lus->live_timer_interval = session->live_timer;
2637 session->ust_session = lus;
2638 if (session->shm_path[0]) {
2639 strncpy(lus->root_shm_path, session->shm_path,
2640 sizeof(lus->root_shm_path));
2641 lus->root_shm_path[sizeof(lus->root_shm_path) - 1] = '\0';
2642 strncpy(lus->shm_path, session->shm_path,
2643 sizeof(lus->shm_path));
2644 lus->shm_path[sizeof(lus->shm_path) - 1] = '\0';
2645 strncat(lus->shm_path, "/ust",
2646 sizeof(lus->shm_path) - strlen(lus->shm_path) - 1);
2647 }
2648 /* Copy session output to the newly created UST session */
2649 ret = copy_session_consumer(domain->type, session);
2650 if (ret != LTTNG_OK) {
2651 goto error;
2652 }
2653
2654 return LTTNG_OK;
2655
2656 error:
2657 free(lus);
2658 session->ust_session = NULL;
2659 return ret;
2660 }
2661
2662 /*
2663 * Create a kernel tracer session then create the default channel.
2664 */
2665 static int create_kernel_session(struct ltt_session *session)
2666 {
2667 int ret;
2668
2669 DBG("Creating kernel session");
2670
2671 ret = kernel_create_session(session, kernel_tracer_fd);
2672 if (ret < 0) {
2673 ret = LTTNG_ERR_KERN_SESS_FAIL;
2674 goto error;
2675 }
2676
2677 /* Code flow safety */
2678 assert(session->kernel_session);
2679
2680 /* Copy session output to the newly created Kernel session */
2681 ret = copy_session_consumer(LTTNG_DOMAIN_KERNEL, session);
2682 if (ret != LTTNG_OK) {
2683 goto error;
2684 }
2685
2686 session->kernel_session->uid = session->uid;
2687 session->kernel_session->gid = session->gid;
2688 session->kernel_session->output_traces = session->output_traces;
2689 session->kernel_session->snapshot_mode = session->snapshot_mode;
2690
2691 return LTTNG_OK;
2692
2693 error:
2694 trace_kernel_destroy_session(session->kernel_session);
2695 session->kernel_session = NULL;
2696 return ret;
2697 }
2698
2699 /*
2700 * Count number of session permitted by uid/gid.
2701 */
2702 static unsigned int lttng_sessions_count(uid_t uid, gid_t gid)
2703 {
2704 unsigned int i = 0;
2705 struct ltt_session *session;
2706 const struct ltt_session_list *session_list = session_get_list();
2707
2708 DBG("Counting number of available session for UID %d GID %d",
2709 uid, gid);
2710 cds_list_for_each_entry(session, &session_list->head, list) {
2711 if (!session_get(session)) {
2712 continue;
2713 }
2714 session_lock(session);
2715 /* Only count the sessions the user can control. */
2716 if (session_access_ok(session, uid, gid) &&
2717 !session->destroyed) {
2718 i++;
2719 }
2720 session_unlock(session);
2721 session_put(session);
2722 }
2723 return i;
2724 }
2725
2726 static int receive_userspace_probe(struct command_ctx *cmd_ctx, int sock,
2727 int *sock_error, struct lttng_event *event)
2728 {
2729 int fd, ret;
2730 struct lttng_userspace_probe_location *probe_location;
2731 const struct lttng_userspace_probe_location_lookup_method *lookup = NULL;
2732 struct lttng_dynamic_buffer probe_location_buffer;
2733 struct lttng_buffer_view buffer_view;
2734
2735 /*
2736 * Create a buffer to store the serialized version of the probe
2737 * location.
2738 */
2739 lttng_dynamic_buffer_init(&probe_location_buffer);
2740 ret = lttng_dynamic_buffer_set_size(&probe_location_buffer,
2741 cmd_ctx->lsm->u.enable.userspace_probe_location_len);
2742 if (ret) {
2743 ret = LTTNG_ERR_NOMEM;
2744 goto error;
2745 }
2746
2747 /*
2748 * Receive the probe location.
2749 */
2750 ret = lttcomm_recv_unix_sock(sock, probe_location_buffer.data,
2751 probe_location_buffer.size);
2752 if (ret <= 0) {
2753 DBG("Nothing recv() from client var len data... continuing");
2754 *sock_error = 1;
2755 lttng_dynamic_buffer_reset(&probe_location_buffer);
2756 ret = LTTNG_ERR_PROBE_LOCATION_INVAL;
2757 goto error;
2758 }
2759
2760 buffer_view = lttng_buffer_view_from_dynamic_buffer(
2761 &probe_location_buffer, 0, probe_location_buffer.size);
2762
2763 /*
2764 * Extract the probe location from the serialized version.
2765 */
2766 ret = lttng_userspace_probe_location_create_from_buffer(
2767 &buffer_view, &probe_location);
2768 if (ret < 0) {
2769 WARN("Failed to create a userspace probe location from the received buffer");
2770 lttng_dynamic_buffer_reset( &probe_location_buffer);
2771 ret = LTTNG_ERR_PROBE_LOCATION_INVAL;
2772 goto error;
2773 }
2774
2775 /*
2776 * Receive the file descriptor to the target binary from the client.
2777 */
2778 DBG("Receiving userspace probe target FD from client ...");
2779 ret = lttcomm_recv_fds_unix_sock(sock, &fd, 1);
2780 if (ret <= 0) {
2781 DBG("Nothing recv() from client userspace probe fd... continuing");
2782 *sock_error = 1;
2783 ret = LTTNG_ERR_PROBE_LOCATION_INVAL;
2784 goto error;
2785 }
2786
2787 /*
2788 * Set the file descriptor received from the client through the unix
2789 * socket in the probe location.
2790 */
2791 lookup = lttng_userspace_probe_location_get_lookup_method(probe_location);
2792 if (!lookup) {
2793 ret = LTTNG_ERR_PROBE_LOCATION_INVAL;
2794 goto error;
2795 }
2796
2797 /*
2798 * From the kernel tracer's perspective, all userspace probe event types
2799 * are all the same: a file and an offset.
2800 */
2801 switch (lttng_userspace_probe_location_lookup_method_get_type(lookup)) {
2802 case LTTNG_USERSPACE_PROBE_LOCATION_LOOKUP_METHOD_TYPE_FUNCTION_ELF:
2803 ret = lttng_userspace_probe_location_function_set_binary_fd(
2804 probe_location, fd);
2805 break;
2806 case LTTNG_USERSPACE_PROBE_LOCATION_LOOKUP_METHOD_TYPE_TRACEPOINT_SDT:
2807 ret = lttng_userspace_probe_location_tracepoint_set_binary_fd(
2808 probe_location, fd);
2809 break;
2810 default:
2811 ret = LTTNG_ERR_PROBE_LOCATION_INVAL;
2812 goto error;
2813 }
2814
2815 if (ret) {
2816 ret = LTTNG_ERR_PROBE_LOCATION_INVAL;
2817 goto error;
2818 }
2819
2820 /* Attach the probe location to the event. */
2821 ret = lttng_event_set_userspace_probe_location(event, probe_location);
2822 if (ret) {
2823 ret = LTTNG_ERR_PROBE_LOCATION_INVAL;
2824 goto error;
2825 }
2826
2827 lttng_dynamic_buffer_reset(&probe_location_buffer);
2828 error:
2829 return ret;
2830 }
2831
2832 /*
2833 * Check if the current kernel tracer supports the session rotation feature.
2834 * Return 1 if it does, 0 otherwise.
2835 */
2836 static int check_rotate_compatible(void)
2837 {
2838 int ret = 1;
2839
2840 if (kernel_tracer_version.major != 2 || kernel_tracer_version.minor < 11) {
2841 DBG("Kernel tracer version is not compatible with the rotation feature");
2842 ret = 0;
2843 }
2844
2845 return ret;
2846 }
2847
2848 /*
2849 * Process the command requested by the lttng client within the command
2850 * context structure. This function make sure that the return structure (llm)
2851 * is set and ready for transmission before returning.
2852 *
2853 * Return any error encountered or 0 for success.
2854 *
2855 * "sock" is only used for special-case var. len data.
2856 *
2857 * Should *NOT* be called with RCU read-side lock held.
2858 */
2859 static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
2860 int *sock_error)
2861 {
2862 int ret = LTTNG_OK;
2863 int need_tracing_session = 1;
2864 int need_domain;
2865
2866 DBG("Processing client command %d", cmd_ctx->lsm->cmd_type);
2867
2868 assert(!rcu_read_ongoing());
2869
2870 *sock_error = 0;
2871
2872 switch (cmd_ctx->lsm->cmd_type) {
2873 case LTTNG_CREATE_SESSION:
2874 case LTTNG_CREATE_SESSION_SNAPSHOT:
2875 case LTTNG_CREATE_SESSION_LIVE:
2876 case LTTNG_DESTROY_SESSION:
2877 case LTTNG_LIST_SESSIONS:
2878 case LTTNG_LIST_DOMAINS:
2879 case LTTNG_START_TRACE:
2880 case LTTNG_STOP_TRACE:
2881 case LTTNG_DATA_PENDING:
2882 case LTTNG_SNAPSHOT_ADD_OUTPUT:
2883 case LTTNG_SNAPSHOT_DEL_OUTPUT:
2884 case LTTNG_SNAPSHOT_LIST_OUTPUT:
2885 case LTTNG_SNAPSHOT_RECORD:
2886 case LTTNG_SAVE_SESSION:
2887 case LTTNG_SET_SESSION_SHM_PATH:
2888 case LTTNG_REGENERATE_METADATA:
2889 case LTTNG_REGENERATE_STATEDUMP:
2890 case LTTNG_REGISTER_TRIGGER:
2891 case LTTNG_UNREGISTER_TRIGGER:
2892 case LTTNG_ROTATE_SESSION:
2893 case LTTNG_ROTATION_GET_INFO:
2894 case LTTNG_ROTATION_SET_SCHEDULE:
2895 case LTTNG_SESSION_LIST_ROTATION_SCHEDULES:
2896 need_domain = 0;
2897 break;
2898 default:
2899 need_domain = 1;
2900 }
2901
2902 if (config.no_kernel && need_domain
2903 && cmd_ctx->lsm->domain.type == LTTNG_DOMAIN_KERNEL) {
2904 if (!is_root) {
2905 ret = LTTNG_ERR_NEED_ROOT_SESSIOND;
2906 } else {
2907 ret = LTTNG_ERR_KERN_NA;
2908 }
2909 goto error;
2910 }
2911
2912 /* Deny register consumer if we already have a spawned consumer. */
2913 if (cmd_ctx->lsm->cmd_type == LTTNG_REGISTER_CONSUMER) {
2914 pthread_mutex_lock(&kconsumer_data.pid_mutex);
2915 if (kconsumer_data.pid > 0) {
2916 ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
2917 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2918 goto error;
2919 }
2920 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2921 }
2922
2923 /*
2924 * Check for command that don't needs to allocate a returned payload. We do
2925 * this here so we don't have to make the call for no payload at each
2926 * command.
2927 */
2928 switch(cmd_ctx->lsm->cmd_type) {
2929 case LTTNG_LIST_SESSIONS:
2930 case LTTNG_LIST_TRACEPOINTS:
2931 case LTTNG_LIST_TRACEPOINT_FIELDS:
2932 case LTTNG_LIST_DOMAINS:
2933 case LTTNG_LIST_CHANNELS:
2934 case LTTNG_LIST_EVENTS:
2935 case LTTNG_LIST_SYSCALLS:
2936 case LTTNG_LIST_TRACKER_PIDS:
2937 case LTTNG_DATA_PENDING:
2938 case LTTNG_ROTATE_SESSION:
2939 case LTTNG_ROTATION_GET_INFO:
2940 case LTTNG_SESSION_LIST_ROTATION_SCHEDULES:
2941 break;
2942 default:
2943 /* Setup lttng message with no payload */
2944 ret = setup_lttng_msg_no_cmd_header(cmd_ctx, NULL, 0);
2945 if (ret < 0) {
2946 /* This label does not try to unlock the session */
2947 goto init_setup_error;
2948 }
2949 }
2950
2951 /* Commands that DO NOT need a session. */
2952 switch (cmd_ctx->lsm->cmd_type) {
2953 case LTTNG_CREATE_SESSION:
2954 case LTTNG_CREATE_SESSION_SNAPSHOT:
2955 case LTTNG_CREATE_SESSION_LIVE:
2956 case LTTNG_LIST_SESSIONS:
2957 case LTTNG_LIST_TRACEPOINTS:
2958 case LTTNG_LIST_SYSCALLS:
2959 case LTTNG_LIST_TRACEPOINT_FIELDS:
2960 case LTTNG_SAVE_SESSION:
2961 case LTTNG_REGISTER_TRIGGER:
2962 case LTTNG_UNREGISTER_TRIGGER:
2963 need_tracing_session = 0;
2964 break;
2965 default:
2966 DBG("Getting session %s by name", cmd_ctx->lsm->session.name);
2967 /*
2968 * We keep the session list lock across _all_ commands
2969 * for now, because the per-session lock does not
2970 * handle teardown properly.
2971 */
2972 session_lock_list();
2973 cmd_ctx->session = session_find_by_name(cmd_ctx->lsm->session.name);
2974 if (cmd_ctx->session == NULL) {
2975 ret = LTTNG_ERR_SESS_NOT_FOUND;
2976 goto error;
2977 } else {
2978 /* Acquire lock for the session */
2979 session_lock(cmd_ctx->session);
2980 }
2981 break;
2982 }
2983
2984 /*
2985 * Commands that need a valid session but should NOT create one if none
2986 * exists. Instead of creating one and destroying it when the command is
2987 * handled, process that right before so we save some round trip in useless
2988 * code path.
2989 */
2990 switch (cmd_ctx->lsm->cmd_type) {
2991 case LTTNG_DISABLE_CHANNEL:
2992 case LTTNG_DISABLE_EVENT:
2993 switch (cmd_ctx->lsm->domain.type) {
2994 case LTTNG_DOMAIN_KERNEL:
2995 if (!cmd_ctx->session->kernel_session) {
2996 ret = LTTNG_ERR_NO_CHANNEL;
2997 goto error;
2998 }
2999 break;
3000 case LTTNG_DOMAIN_JUL:
3001 case LTTNG_DOMAIN_LOG4J:
3002 case LTTNG_DOMAIN_PYTHON:
3003 case LTTNG_DOMAIN_UST:
3004 if (!cmd_ctx->session->ust_session) {
3005 ret = LTTNG_ERR_NO_CHANNEL;
3006 goto error;
3007 }
3008 break;
3009 default:
3010 ret = LTTNG_ERR_UNKNOWN_DOMAIN;
3011 goto error;
3012 }
3013 default:
3014 break;
3015 }
3016
3017 if (!need_domain) {
3018 goto skip_domain;
3019 }
3020
3021 /*
3022 * Check domain type for specific "pre-action".
3023 */
3024 switch (cmd_ctx->lsm->domain.type) {
3025 case LTTNG_DOMAIN_KERNEL:
3026 if (!is_root) {
3027 ret = LTTNG_ERR_NEED_ROOT_SESSIOND;
3028 goto error;
3029 }
3030
3031 /* Kernel tracer check */
3032 if (kernel_tracer_fd == -1) {
3033 /* Basically, load kernel tracer modules */
3034 ret = init_kernel_tracer();
3035 if (ret != 0) {
3036 goto error;
3037 }
3038 }
3039
3040 /* Consumer is in an ERROR state. Report back to client */
3041 if (uatomic_read(&kernel_consumerd_state) == CONSUMER_ERROR) {
3042 ret = LTTNG_ERR_NO_KERNCONSUMERD;
3043 goto error;
3044 }
3045
3046 /* Need a session for kernel command */
3047 if (need_tracing_session) {
3048 if (cmd_ctx->session->kernel_session == NULL) {
3049 ret = create_kernel_session(cmd_ctx->session);
3050 if (ret < 0) {
3051 ret = LTTNG_ERR_KERN_SESS_FAIL;
3052 goto error;
3053 }
3054 }
3055
3056 /* Start the kernel consumer daemon */
3057 pthread_mutex_lock(&kconsumer_data.pid_mutex);
3058 if (kconsumer_data.pid == 0 &&
3059 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
3060 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
3061 ret = start_consumerd(&kconsumer_data);
3062 if (ret < 0) {
3063 ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
3064 goto error;
3065 }
3066 uatomic_set(&kernel_consumerd_state, CONSUMER_STARTED);
3067 } else {
3068 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
3069 }
3070
3071 /*
3072 * The consumer was just spawned so we need to add the socket to
3073 * the consumer output of the session if exist.
3074 */
3075 ret = consumer_create_socket(&kconsumer_data,
3076 cmd_ctx->session->kernel_session->consumer);
3077 if (ret < 0) {
3078 goto error;
3079 }
3080 }
3081
3082 break;
3083 case LTTNG_DOMAIN_JUL:
3084 case LTTNG_DOMAIN_LOG4J:
3085 case LTTNG_DOMAIN_PYTHON:
3086 case LTTNG_DOMAIN_UST:
3087 {
3088 if (!ust_app_supported()) {
3089 ret = LTTNG_ERR_NO_UST;
3090 goto error;
3091 }
3092 /* Consumer is in an ERROR state. Report back to client */
3093 if (uatomic_read(&ust_consumerd_state) == CONSUMER_ERROR) {
3094 ret = LTTNG_ERR_NO_USTCONSUMERD;
3095 goto error;
3096 }
3097
3098 if (need_tracing_session) {
3099 /* Create UST session if none exist. */
3100 if (cmd_ctx->session->ust_session == NULL) {
3101 ret = create_ust_session(cmd_ctx->session,
3102 &cmd_ctx->lsm->domain);
3103 if (ret != LTTNG_OK) {
3104 goto error;
3105 }
3106 }
3107
3108 /* Start the UST consumer daemons */
3109 /* 64-bit */
3110 pthread_mutex_lock(&ustconsumer64_data.pid_mutex);
3111 if (config.consumerd64_bin_path.value &&
3112 ustconsumer64_data.pid == 0 &&
3113 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
3114 pthread_mutex_unlock(&ustconsumer64_data.pid_mutex);
3115 ret = start_consumerd(&ustconsumer64_data);
3116 if (ret < 0) {
3117 ret = LTTNG_ERR_UST_CONSUMER64_FAIL;
3118 uatomic_set(&ust_consumerd64_fd, -EINVAL);
3119 goto error;
3120 }
3121
3122 uatomic_set(&ust_consumerd64_fd, ustconsumer64_data.cmd_sock);
3123 uatomic_set(&ust_consumerd_state, CONSUMER_STARTED);
3124 } else {
3125 pthread_mutex_unlock(&ustconsumer64_data.pid_mutex);
3126 }
3127
3128 /*
3129 * Setup socket for consumer 64 bit. No need for atomic access
3130 * since it was set above and can ONLY be set in this thread.
3131 */
3132 ret = consumer_create_socket(&ustconsumer64_data,
3133 cmd_ctx->session->ust_session->consumer);
3134 if (ret < 0) {
3135 goto error;
3136 }
3137
3138 /* 32-bit */
3139 pthread_mutex_lock(&ustconsumer32_data.pid_mutex);
3140 if (config.consumerd32_bin_path.value &&
3141 ustconsumer32_data.pid == 0 &&
3142 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
3143 pthread_mutex_unlock(&ustconsumer32_data.pid_mutex);
3144 ret = start_consumerd(&ustconsumer32_data);
3145 if (ret < 0) {
3146 ret = LTTNG_ERR_UST_CONSUMER32_FAIL;
3147 uatomic_set(&ust_consumerd32_fd, -EINVAL);
3148 goto error;
3149 }
3150
3151 uatomic_set(&ust_consumerd32_fd, ustconsumer32_data.cmd_sock);
3152 uatomic_set(&ust_consumerd_state, CONSUMER_STARTED);
3153 } else {
3154 pthread_mutex_unlock(&ustconsumer32_data.pid_mutex);
3155 }
3156
3157 /*
3158 * Setup socket for consumer 32 bit. No need for atomic access
3159 * since it was set above and can ONLY be set in this thread.
3160 */
3161 ret = consumer_create_socket(&ustconsumer32_data,
3162 cmd_ctx->session->ust_session->consumer);
3163 if (ret < 0) {
3164 goto error;
3165 }
3166 }
3167 break;
3168 }
3169 default:
3170 break;
3171 }
3172 skip_domain:
3173
3174 /* Validate consumer daemon state when start/stop trace command */
3175 if (cmd_ctx->lsm->cmd_type == LTTNG_START_TRACE ||
3176 cmd_ctx->lsm->cmd_type == LTTNG_STOP_TRACE) {
3177 switch (cmd_ctx->lsm->domain.type) {
3178 case LTTNG_DOMAIN_NONE:
3179 break;
3180 case LTTNG_DOMAIN_JUL:
3181 case LTTNG_DOMAIN_LOG4J:
3182 case LTTNG_DOMAIN_PYTHON:
3183 case LTTNG_DOMAIN_UST:
3184 if (uatomic_read(&ust_consumerd_state) != CONSUMER_STARTED) {
3185 ret = LTTNG_ERR_NO_USTCONSUMERD;
3186 goto error;
3187 }
3188 break;
3189 case LTTNG_DOMAIN_KERNEL:
3190 if (uatomic_read(&kernel_consumerd_state) != CONSUMER_STARTED) {
3191 ret = LTTNG_ERR_NO_KERNCONSUMERD;
3192 goto error;
3193 }
3194 break;
3195 default:
3196 ret = LTTNG_ERR_UNKNOWN_DOMAIN;
3197 goto error;
3198 }
3199 }
3200
3201 /*
3202 * Check that the UID or GID match that of the tracing session.
3203 * The root user can interact with all sessions.
3204 */
3205 if (need_tracing_session) {
3206 if (!session_access_ok(cmd_ctx->session,
3207 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
3208 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds)) ||
3209 cmd_ctx->session->destroyed) {
3210 ret = LTTNG_ERR_EPERM;
3211 goto error;
3212 }
3213 }
3214
3215 /*
3216 * Send relayd information to consumer as soon as we have a domain and a
3217 * session defined.
3218 */
3219 if (cmd_ctx->session && need_domain) {
3220 /*
3221 * Setup relayd if not done yet. If the relayd information was already
3222 * sent to the consumer, this call will gracefully return.
3223 */
3224 ret = cmd_setup_relayd(cmd_ctx->session);
3225 if (ret != LTTNG_OK) {
3226 goto error;
3227 }
3228 }
3229
3230 /* Process by command type */
3231 switch (cmd_ctx->lsm->cmd_type) {
3232 case LTTNG_ADD_CONTEXT:
3233 {
3234 /*
3235 * An LTTNG_ADD_CONTEXT command might have a supplementary
3236 * payload if the context being added is an application context.
3237 */
3238 if (cmd_ctx->lsm->u.context.ctx.ctx ==
3239 LTTNG_EVENT_CONTEXT_APP_CONTEXT) {
3240 char *provider_name = NULL, *context_name = NULL;
3241 size_t provider_name_len =
3242 cmd_ctx->lsm->u.context.provider_name_len;
3243 size_t context_name_len =
3244 cmd_ctx->lsm->u.context.context_name_len;
3245
3246 if (provider_name_len == 0 || context_name_len == 0) {
3247 /*
3248 * Application provider and context names MUST
3249 * be provided.
3250 */
3251 ret = -LTTNG_ERR_INVALID;
3252 goto error;
3253 }
3254
3255 provider_name = zmalloc(provider_name_len + 1);
3256 if (!provider_name) {
3257 ret = -LTTNG_ERR_NOMEM;
3258 goto error;
3259 }
3260 cmd_ctx->lsm->u.context.ctx.u.app_ctx.provider_name =
3261 provider_name;
3262
3263 context_name = zmalloc(context_name_len + 1);
3264 if (!context_name) {
3265 ret = -LTTNG_ERR_NOMEM;
3266 goto error_add_context;
3267 }
3268 cmd_ctx->lsm->u.context.ctx.u.app_ctx.ctx_name =
3269 context_name;
3270
3271 ret = lttcomm_recv_unix_sock(sock, provider_name,
3272 provider_name_len);
3273 if (ret < 0) {
3274 goto error_add_context;
3275 }
3276
3277 ret = lttcomm_recv_unix_sock(sock, context_name,
3278 context_name_len);
3279 if (ret < 0) {
3280 goto error_add_context;
3281 }
3282 }
3283
3284 /*
3285 * cmd_add_context assumes ownership of the provider and context
3286 * names.
3287 */
3288 ret = cmd_add_context(cmd_ctx->session,
3289 cmd_ctx->lsm->domain.type,
3290 cmd_ctx->lsm->u.context.channel_name,
3291 &cmd_ctx->lsm->u.context.ctx,
3292 kernel_poll_pipe[1]);
3293
3294 cmd_ctx->lsm->u.context.ctx.u.app_ctx.provider_name = NULL;
3295 cmd_ctx->lsm->u.context.ctx.u.app_ctx.ctx_name = NULL;
3296 error_add_context:
3297 free(cmd_ctx->lsm->u.context.ctx.u.app_ctx.provider_name);
3298 free(cmd_ctx->lsm->u.context.ctx.u.app_ctx.ctx_name);
3299 if (ret < 0) {
3300 goto error;
3301 }
3302 break;
3303 }
3304 case LTTNG_DISABLE_CHANNEL:
3305 {
3306 ret = cmd_disable_channel(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3307 cmd_ctx->lsm->u.disable.channel_name);
3308 break;
3309 }
3310 case LTTNG_DISABLE_EVENT:
3311 {
3312
3313 /*
3314 * FIXME: handle filter; for now we just receive the filter's
3315 * bytecode along with the filter expression which are sent by
3316 * liblttng-ctl and discard them.
3317 *
3318 * This fixes an issue where the client may block while sending
3319 * the filter payload and encounter an error because the session
3320 * daemon closes the socket without ever handling this data.
3321 */
3322 size_t count = cmd_ctx->lsm->u.disable.expression_len +
3323 cmd_ctx->lsm->u.disable.bytecode_len;
3324
3325 if (count) {
3326 char data[LTTNG_FILTER_MAX_LEN];
3327
3328 DBG("Discarding disable event command payload of size %zu", count);
3329 while (count) {
3330 ret = lttcomm_recv_unix_sock(sock, data,
3331 count > sizeof(data) ? sizeof(data) : count);
3332 if (ret < 0) {
3333 goto error;
3334 }
3335
3336 count -= (size_t) ret;
3337 }
3338 }
3339 /* FIXME: passing packed structure to non-packed pointer */
3340 ret = cmd_disable_event(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3341 cmd_ctx->lsm->u.disable.channel_name,
3342 &cmd_ctx->lsm->u.disable.event);
3343 break;
3344 }
3345 case LTTNG_ENABLE_CHANNEL:
3346 {
3347 cmd_ctx->lsm->u.channel.chan.attr.extended.ptr =
3348 (struct lttng_channel_extended *) &cmd_ctx->lsm->u.channel.extended;
3349 ret = cmd_enable_channel(cmd_ctx->session, &cmd_ctx->lsm->domain,
3350 &cmd_ctx->lsm->u.channel.chan,
3351 kernel_poll_pipe[1]);
3352 break;
3353 }
3354 case LTTNG_TRACK_PID:
3355 {
3356 ret = cmd_track_pid(cmd_ctx->session,
3357 cmd_ctx->lsm->domain.type,
3358 cmd_ctx->lsm->u.pid_tracker.pid);
3359 break;
3360 }
3361 case LTTNG_UNTRACK_PID:
3362 {
3363 ret = cmd_untrack_pid(cmd_ctx->session,
3364 cmd_ctx->lsm->domain.type,
3365 cmd_ctx->lsm->u.pid_tracker.pid);
3366 break;
3367 }
3368 case LTTNG_ENABLE_EVENT:
3369 {
3370 struct lttng_event *ev = NULL;
3371 struct lttng_event_exclusion *exclusion = NULL;
3372 struct lttng_filter_bytecode *bytecode = NULL;
3373 char *filter_expression = NULL;
3374
3375 /* Handle exclusion events and receive it from the client. */
3376 if (cmd_ctx->lsm->u.enable.exclusion_count > 0) {
3377 size_t count = cmd_ctx->lsm->u.enable.exclusion_count;
3378
3379 exclusion = zmalloc(sizeof(struct lttng_event_exclusion) +
3380 (count * LTTNG_SYMBOL_NAME_LEN));
3381 if (!exclusion) {
3382 ret = LTTNG_ERR_EXCLUSION_NOMEM;
3383 goto error;
3384 }
3385
3386 DBG("Receiving var len exclusion event list from client ...");
3387 exclusion->count = count;
3388 ret = lttcomm_recv_unix_sock(sock, exclusion->names,
3389 count * LTTNG_SYMBOL_NAME_LEN);
3390 if (ret <= 0) {
3391 DBG("Nothing recv() from client var len data... continuing");
3392 *sock_error = 1;
3393 free(exclusion);
3394 ret = LTTNG_ERR_EXCLUSION_INVAL;
3395 goto error;
3396 }
3397 }
3398
3399 /* Get filter expression from client. */
3400 if (cmd_ctx->lsm->u.enable.expression_len > 0) {
3401 size_t expression_len =
3402 cmd_ctx->lsm->u.enable.expression_len;
3403
3404 if (expression_len > LTTNG_FILTER_MAX_LEN) {
3405 ret = LTTNG_ERR_FILTER_INVAL;
3406 free(exclusion);
3407 goto error;
3408 }
3409
3410 filter_expression = zmalloc(expression_len);
3411 if (!filter_expression) {
3412 free(exclusion);
3413 ret = LTTNG_ERR_FILTER_NOMEM;
3414 goto error;
3415 }
3416
3417 /* Receive var. len. data */
3418 DBG("Receiving var len filter's expression from client ...");
3419 ret = lttcomm_recv_unix_sock(sock, filter_expression,
3420 expression_len);
3421 if (ret <= 0) {
3422 DBG("Nothing recv() from client var len data... continuing");
3423 *sock_error = 1;
3424 free(filter_expression);
3425 free(exclusion);
3426 ret = LTTNG_ERR_FILTER_INVAL;
3427 goto error;
3428 }
3429 }
3430
3431 /* Handle filter and get bytecode from client. */
3432 if (cmd_ctx->lsm->u.enable.bytecode_len > 0) {
3433 size_t bytecode_len = cmd_ctx->lsm->u.enable.bytecode_len;
3434
3435 if (bytecode_len > LTTNG_FILTER_MAX_LEN) {
3436 ret = LTTNG_ERR_FILTER_INVAL;
3437 free(filter_expression);
3438 free(exclusion);
3439 goto error;
3440 }
3441
3442 bytecode = zmalloc(bytecode_len);
3443 if (!bytecode) {
3444 free(filter_expression);
3445 free(exclusion);
3446 ret = LTTNG_ERR_FILTER_NOMEM;
3447 goto error;
3448 }
3449
3450 /* Receive var. len. data */
3451 DBG("Receiving var len filter's bytecode from client ...");
3452 ret = lttcomm_recv_unix_sock(sock, bytecode, bytecode_len);
3453 if (ret <= 0) {
3454 DBG("Nothing recv() from client var len data... continuing");
3455 *sock_error = 1;
3456 free(filter_expression);
3457 free(bytecode);
3458 free(exclusion);
3459 ret = LTTNG_ERR_FILTER_INVAL;
3460 goto error;
3461 }
3462
3463 if ((bytecode->len + sizeof(*bytecode)) != bytecode_len) {
3464 free(filter_expression);
3465 free(bytecode);
3466 free(exclusion);
3467 ret = LTTNG_ERR_FILTER_INVAL;
3468 goto error;
3469 }
3470 }
3471
3472 ev = lttng_event_copy(&cmd_ctx->lsm->u.enable.event);
3473 if (!ev) {
3474 DBG("Failed to copy event: %s",
3475 cmd_ctx->lsm->u.enable.event.name);
3476 free(filter_expression);
3477 free(bytecode);
3478 free(exclusion);
3479 ret = LTTNG_ERR_NOMEM;
3480 goto error;
3481 }
3482
3483
3484 if (cmd_ctx->lsm->u.enable.userspace_probe_location_len > 0) {
3485 /* Expect a userspace probe description. */
3486 ret = receive_userspace_probe(cmd_ctx, sock, sock_error, ev);
3487 if (ret) {
3488 free(filter_expression);
3489 free(bytecode);
3490 free(exclusion);
3491 lttng_event_destroy(ev);
3492 goto error;
3493 }
3494 }
3495
3496 ret = cmd_enable_event(cmd_ctx->session, &cmd_ctx->lsm->domain,
3497 cmd_ctx->lsm->u.enable.channel_name,
3498 ev,
3499 filter_expression, bytecode, exclusion,
3500 kernel_poll_pipe[1]);
3501 lttng_event_destroy(ev);
3502 break;
3503 }
3504 case LTTNG_LIST_TRACEPOINTS:
3505 {
3506 struct lttng_event *events;
3507 ssize_t nb_events;
3508
3509 session_lock_list();
3510 nb_events = cmd_list_tracepoints(cmd_ctx->lsm->domain.type, &events);
3511 session_unlock_list();
3512 if (nb_events < 0) {
3513 /* Return value is a negative lttng_error_code. */
3514 ret = -nb_events;
3515 goto error;
3516 }
3517
3518 /*
3519 * Setup lttng message with payload size set to the event list size in
3520 * bytes and then copy list into the llm payload.
3521 */
3522 ret = setup_lttng_msg_no_cmd_header(cmd_ctx, events,
3523 sizeof(struct lttng_event) * nb_events);
3524 free(events);
3525
3526 if (ret < 0) {
3527 goto setup_error;
3528 }
3529
3530 ret = LTTNG_OK;
3531 break;
3532 }
3533 case LTTNG_LIST_TRACEPOINT_FIELDS:
3534 {
3535 struct lttng_event_field *fields;
3536 ssize_t nb_fields;
3537
3538 session_lock_list();
3539 nb_fields = cmd_list_tracepoint_fields(cmd_ctx->lsm->domain.type,
3540 &fields);
3541 session_unlock_list();
3542 if (nb_fields < 0) {
3543 /* Return value is a negative lttng_error_code. */
3544 ret = -nb_fields;
3545 goto error;
3546 }
3547
3548 /*
3549 * Setup lttng message with payload size set to the event list size in
3550 * bytes and then copy list into the llm payload.
3551 */
3552 ret = setup_lttng_msg_no_cmd_header(cmd_ctx, fields,
3553 sizeof(struct lttng_event_field) * nb_fields);
3554 free(fields);
3555
3556 if (ret < 0) {
3557 goto setup_error;
3558 }
3559
3560 ret = LTTNG_OK;
3561 break;
3562 }
3563 case LTTNG_LIST_SYSCALLS:
3564 {
3565 struct lttng_event *events;
3566 ssize_t nb_events;
3567
3568 nb_events = cmd_list_syscalls(&events);
3569 if (nb_events < 0) {
3570 /* Return value is a negative lttng_error_code. */
3571 ret = -nb_events;
3572 goto error;
3573 }
3574
3575 /*
3576 * Setup lttng message with payload size set to the event list size in
3577 * bytes and then copy list into the llm payload.
3578 */
3579 ret = setup_lttng_msg_no_cmd_header(cmd_ctx, events,
3580 sizeof(struct lttng_event) * nb_events);
3581 free(events);
3582
3583 if (ret < 0) {
3584 goto setup_error;
3585 }
3586
3587 ret = LTTNG_OK;
3588 break;
3589 }
3590 case LTTNG_LIST_TRACKER_PIDS:
3591 {
3592 int32_t *pids = NULL;
3593 ssize_t nr_pids;
3594
3595 nr_pids = cmd_list_tracker_pids(cmd_ctx->session,
3596 cmd_ctx->lsm->domain.type, &pids);
3597 if (nr_pids < 0) {
3598 /* Return value is a negative lttng_error_code. */
3599 ret = -nr_pids;
3600 goto error;
3601 }
3602
3603 /*
3604 * Setup lttng message with payload size set to the event list size in
3605 * bytes and then copy list into the llm payload.
3606 */
3607 ret = setup_lttng_msg_no_cmd_header(cmd_ctx, pids,
3608 sizeof(int32_t) * nr_pids);
3609 free(pids);
3610
3611 if (ret < 0) {
3612 goto setup_error;
3613 }
3614
3615 ret = LTTNG_OK;
3616 break;
3617 }
3618 case LTTNG_SET_CONSUMER_URI:
3619 {
3620 size_t nb_uri, len;
3621 struct lttng_uri *uris;
3622
3623 nb_uri = cmd_ctx->lsm->u.uri.size;
3624 len = nb_uri * sizeof(struct lttng_uri);
3625
3626 if (nb_uri == 0) {
3627 ret = LTTNG_ERR_INVALID;
3628 goto error;
3629 }
3630
3631 uris = zmalloc(len);
3632 if (uris == NULL) {
3633 ret = LTTNG_ERR_FATAL;
3634 goto error;
3635 }
3636
3637 /* Receive variable len data */
3638 DBG("Receiving %zu URI(s) from client ...", nb_uri);
3639 ret = lttcomm_recv_unix_sock(sock, uris, len);
3640 if (ret <= 0) {
3641 DBG("No URIs received from client... continuing");
3642 *sock_error = 1;
3643 ret = LTTNG_ERR_SESSION_FAIL;
3644 free(uris);
3645 goto error;
3646 }
3647
3648 ret = cmd_set_consumer_uri(cmd_ctx->session, nb_uri, uris);
3649 free(uris);
3650 if (ret != LTTNG_OK) {
3651 goto error;
3652 }
3653
3654
3655 break;
3656 }
3657 case LTTNG_START_TRACE:
3658 {
3659 /*
3660 * On the first start, if we have a kernel session and we have
3661 * enabled time or size-based rotations, we have to make sure
3662 * the kernel tracer supports it.
3663 */
3664 if (!cmd_ctx->session->has_been_started && \
3665 cmd_ctx->session->kernel_session && \
3666 (cmd_ctx->session->rotate_timer_period || \
3667 cmd_ctx->session->rotate_size) && \
3668 !check_rotate_compatible()) {
3669 DBG("Kernel tracer version is not compatible with the rotation feature");
3670 ret = LTTNG_ERR_ROTATION_WRONG_VERSION;
3671 goto error;
3672 }
3673 ret = cmd_start_trace(cmd_ctx->session);
3674 break;
3675 }
3676 case LTTNG_STOP_TRACE:
3677 {
3678 ret = cmd_stop_trace(cmd_ctx->session);
3679 break;
3680 }
3681 case LTTNG_CREATE_SESSION:
3682 {
3683 size_t nb_uri, len;
3684 struct lttng_uri *uris = NULL;
3685
3686 nb_uri = cmd_ctx->lsm->u.uri.size;
3687 len = nb_uri * sizeof(struct lttng_uri);
3688
3689 if (nb_uri > 0) {
3690 uris = zmalloc(len);
3691 if (uris == NULL) {
3692 ret = LTTNG_ERR_FATAL;
3693 goto error;
3694 }
3695
3696 /* Receive variable len data */
3697 DBG("Waiting for %zu URIs from client ...", nb_uri);
3698 ret = lttcomm_recv_unix_sock(sock, uris, len);
3699 if (ret <= 0) {
3700 DBG("No URIs received from client... continuing");
3701 *sock_error = 1;
3702 ret = LTTNG_ERR_SESSION_FAIL;
3703 free(uris);
3704 goto error;
3705 }
3706
3707 if (nb_uri == 1 && uris[0].dtype != LTTNG_DST_PATH) {
3708