a2904eb08ae044faa2109338f4af5fe1d0c764b2
[lttng-tools.git] / src / bin / lttng-relayd / main.c
1 /*
2 * Copyright (C) 2012 - Julien Desfossez <jdesfossez@efficios.com>
3 * David Goulet <dgoulet@efficios.com>
4 * 2013 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 * 2015 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License, version 2 only,
9 * as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20
21 #define _LGPL_SOURCE
22 #include <getopt.h>
23 #include <grp.h>
24 #include <limits.h>
25 #include <pthread.h>
26 #include <signal.h>
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <sys/mman.h>
31 #include <sys/mount.h>
32 #include <sys/resource.h>
33 #include <sys/socket.h>
34 #include <sys/stat.h>
35 #include <sys/types.h>
36 #include <sys/wait.h>
37 #include <inttypes.h>
38 #include <urcu/futex.h>
39 #include <urcu/uatomic.h>
40 #include <urcu/rculist.h>
41 #include <unistd.h>
42 #include <fcntl.h>
43
44 #include <lttng/lttng.h>
45 #include <common/common.h>
46 #include <common/compat/poll.h>
47 #include <common/compat/socket.h>
48 #include <common/compat/endian.h>
49 #include <common/compat/getenv.h>
50 #include <common/defaults.h>
51 #include <common/daemonize.h>
52 #include <common/futex.h>
53 #include <common/sessiond-comm/sessiond-comm.h>
54 #include <common/sessiond-comm/inet.h>
55 #include <common/sessiond-comm/relayd.h>
56 #include <common/uri.h>
57 #include <common/utils.h>
58 #include <common/align.h>
59 #include <common/config/session-config.h>
60 #include <common/dynamic-buffer.h>
61 #include <common/buffer-view.h>
62 #include <common/string-utils/format.h>
63
64 #include "cmd.h"
65 #include "ctf-trace.h"
66 #include "index.h"
67 #include "utils.h"
68 #include "lttng-relayd.h"
69 #include "live.h"
70 #include "health-relayd.h"
71 #include "testpoint.h"
72 #include "viewer-stream.h"
73 #include "session.h"
74 #include "stream.h"
75 #include "connection.h"
76 #include "tracefile-array.h"
77 #include "tcp_keep_alive.h"
78 #include "sessiond-trace-chunks.h"
79
80 static const char *help_msg =
81 #ifdef LTTNG_EMBED_HELP
82 #include <lttng-relayd.8.h>
83 #else
84 NULL
85 #endif
86 ;
87
88 enum relay_connection_status {
89 RELAY_CONNECTION_STATUS_OK,
90 /* An error occurred while processing an event on the connection. */
91 RELAY_CONNECTION_STATUS_ERROR,
92 /* Connection closed/shutdown cleanly. */
93 RELAY_CONNECTION_STATUS_CLOSED,
94 };
95
96 /* command line options */
97 char *opt_output_path;
98 static int opt_daemon, opt_background;
99
100 /*
101 * We need to wait for listener and live listener threads, as well as
102 * health check thread, before being ready to signal readiness.
103 */
104 #define NR_LTTNG_RELAY_READY 3
105 static int lttng_relay_ready = NR_LTTNG_RELAY_READY;
106
107 /* Size of receive buffer. */
108 #define RECV_DATA_BUFFER_SIZE 65536
109
110 static int recv_child_signal; /* Set to 1 when a SIGUSR1 signal is received. */
111 static pid_t child_ppid; /* Internal parent PID use with daemonize. */
112
113 static struct lttng_uri *control_uri;
114 static struct lttng_uri *data_uri;
115 static struct lttng_uri *live_uri;
116
117 const char *progname;
118
119 const char *tracing_group_name = DEFAULT_TRACING_GROUP;
120 static int tracing_group_name_override;
121
122 const char * const config_section_name = "relayd";
123
124 /*
125 * Quit pipe for all threads. This permits a single cancellation point
126 * for all threads when receiving an event on the pipe.
127 */
128 int thread_quit_pipe[2] = { -1, -1 };
129
130 /*
131 * This pipe is used to inform the worker thread that a command is queued and
132 * ready to be processed.
133 */
134 static int relay_conn_pipe[2] = { -1, -1 };
135
136 /* Shared between threads */
137 static int dispatch_thread_exit;
138
139 static pthread_t listener_thread;
140 static pthread_t dispatcher_thread;
141 static pthread_t worker_thread;
142 static pthread_t health_thread;
143
144 /*
145 * last_relay_stream_id_lock protects last_relay_stream_id increment
146 * atomicity on 32-bit architectures.
147 */
148 static pthread_mutex_t last_relay_stream_id_lock = PTHREAD_MUTEX_INITIALIZER;
149 static uint64_t last_relay_stream_id;
150
151 /*
152 * Relay command queue.
153 *
154 * The relay_thread_listener and relay_thread_dispatcher communicate with this
155 * queue.
156 */
157 static struct relay_conn_queue relay_conn_queue;
158
159 /* Global relay stream hash table. */
160 struct lttng_ht *relay_streams_ht;
161
162 /* Global relay viewer stream hash table. */
163 struct lttng_ht *viewer_streams_ht;
164
165 /* Global relay sessions hash table. */
166 struct lttng_ht *sessions_ht;
167
168 /* Relayd health monitoring */
169 struct health_app *health_relayd;
170
171 struct sessiond_trace_chunk_registry *sessiond_trace_chunk_registry;
172
173 static struct option long_options[] = {
174 { "control-port", 1, 0, 'C', },
175 { "data-port", 1, 0, 'D', },
176 { "live-port", 1, 0, 'L', },
177 { "daemonize", 0, 0, 'd', },
178 { "background", 0, 0, 'b', },
179 { "group", 1, 0, 'g', },
180 { "help", 0, 0, 'h', },
181 { "output", 1, 0, 'o', },
182 { "verbose", 0, 0, 'v', },
183 { "config", 1, 0, 'f' },
184 { "version", 0, 0, 'V' },
185 { NULL, 0, 0, 0, },
186 };
187
188 static const char *config_ignore_options[] = { "help", "config", "version" };
189
190 /*
191 * Take an option from the getopt output and set it in the right variable to be
192 * used later.
193 *
194 * Return 0 on success else a negative value.
195 */
196 static int set_option(int opt, const char *arg, const char *optname)
197 {
198 int ret;
199
200 switch (opt) {
201 case 0:
202 fprintf(stderr, "option %s", optname);
203 if (arg) {
204 fprintf(stderr, " with arg %s\n", arg);
205 }
206 break;
207 case 'C':
208 if (lttng_is_setuid_setgid()) {
209 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
210 "-C, --control-port");
211 } else {
212 ret = uri_parse(arg, &control_uri);
213 if (ret < 0) {
214 ERR("Invalid control URI specified");
215 goto end;
216 }
217 if (control_uri->port == 0) {
218 control_uri->port = DEFAULT_NETWORK_CONTROL_PORT;
219 }
220 }
221 break;
222 case 'D':
223 if (lttng_is_setuid_setgid()) {
224 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
225 "-D, -data-port");
226 } else {
227 ret = uri_parse(arg, &data_uri);
228 if (ret < 0) {
229 ERR("Invalid data URI specified");
230 goto end;
231 }
232 if (data_uri->port == 0) {
233 data_uri->port = DEFAULT_NETWORK_DATA_PORT;
234 }
235 }
236 break;
237 case 'L':
238 if (lttng_is_setuid_setgid()) {
239 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
240 "-L, -live-port");
241 } else {
242 ret = uri_parse(arg, &live_uri);
243 if (ret < 0) {
244 ERR("Invalid live URI specified");
245 goto end;
246 }
247 if (live_uri->port == 0) {
248 live_uri->port = DEFAULT_NETWORK_VIEWER_PORT;
249 }
250 }
251 break;
252 case 'd':
253 opt_daemon = 1;
254 break;
255 case 'b':
256 opt_background = 1;
257 break;
258 case 'g':
259 if (lttng_is_setuid_setgid()) {
260 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
261 "-g, --group");
262 } else {
263 tracing_group_name = strdup(arg);
264 if (tracing_group_name == NULL) {
265 ret = -errno;
266 PERROR("strdup");
267 goto end;
268 }
269 tracing_group_name_override = 1;
270 }
271 break;
272 case 'h':
273 ret = utils_show_help(8, "lttng-relayd", help_msg);
274 if (ret) {
275 ERR("Cannot show --help for `lttng-relayd`");
276 perror("exec");
277 }
278 exit(EXIT_FAILURE);
279 case 'V':
280 fprintf(stdout, "%s\n", VERSION);
281 exit(EXIT_SUCCESS);
282 case 'o':
283 if (lttng_is_setuid_setgid()) {
284 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
285 "-o, --output");
286 } else {
287 ret = asprintf(&opt_output_path, "%s", arg);
288 if (ret < 0) {
289 ret = -errno;
290 PERROR("asprintf opt_output_path");
291 goto end;
292 }
293 }
294 break;
295 case 'v':
296 /* Verbose level can increase using multiple -v */
297 if (arg) {
298 lttng_opt_verbose = config_parse_value(arg);
299 } else {
300 /* Only 3 level of verbosity (-vvv). */
301 if (lttng_opt_verbose < 3) {
302 lttng_opt_verbose += 1;
303 }
304 }
305 break;
306 default:
307 /* Unknown option or other error.
308 * Error is printed by getopt, just return */
309 ret = -1;
310 goto end;
311 }
312
313 /* All good. */
314 ret = 0;
315
316 end:
317 return ret;
318 }
319
320 /*
321 * config_entry_handler_cb used to handle options read from a config file.
322 * See config_entry_handler_cb comment in common/config/session-config.h for the
323 * return value conventions.
324 */
325 static int config_entry_handler(const struct config_entry *entry, void *unused)
326 {
327 int ret = 0, i;
328
329 if (!entry || !entry->name || !entry->value) {
330 ret = -EINVAL;
331 goto end;
332 }
333
334 /* Check if the option is to be ignored */
335 for (i = 0; i < sizeof(config_ignore_options) / sizeof(char *); i++) {
336 if (!strcmp(entry->name, config_ignore_options[i])) {
337 goto end;
338 }
339 }
340
341 for (i = 0; i < (sizeof(long_options) / sizeof(struct option)) - 1; i++) {
342 /* Ignore if entry name is not fully matched. */
343 if (strcmp(entry->name, long_options[i].name)) {
344 continue;
345 }
346
347 /*
348 * If the option takes no argument on the command line,
349 * we have to check if the value is "true". We support
350 * non-zero numeric values, true, on and yes.
351 */
352 if (!long_options[i].has_arg) {
353 ret = config_parse_value(entry->value);
354 if (ret <= 0) {
355 if (ret) {
356 WARN("Invalid configuration value \"%s\" for option %s",
357 entry->value, entry->name);
358 }
359 /* False, skip boolean config option. */
360 goto end;
361 }
362 }
363
364 ret = set_option(long_options[i].val, entry->value, entry->name);
365 goto end;
366 }
367
368 WARN("Unrecognized option \"%s\" in daemon configuration file.",
369 entry->name);
370
371 end:
372 return ret;
373 }
374
375 static int set_options(int argc, char **argv)
376 {
377 int c, ret = 0, option_index = 0, retval = 0;
378 int orig_optopt = optopt, orig_optind = optind;
379 char *default_address, *optstring;
380 const char *config_path = NULL;
381
382 optstring = utils_generate_optstring(long_options,
383 sizeof(long_options) / sizeof(struct option));
384 if (!optstring) {
385 retval = -ENOMEM;
386 goto exit;
387 }
388
389 /* Check for the --config option */
390
391 while ((c = getopt_long(argc, argv, optstring, long_options,
392 &option_index)) != -1) {
393 if (c == '?') {
394 retval = -EINVAL;
395 goto exit;
396 } else if (c != 'f') {
397 continue;
398 }
399
400 if (lttng_is_setuid_setgid()) {
401 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
402 "-f, --config");
403 } else {
404 config_path = utils_expand_path(optarg);
405 if (!config_path) {
406 ERR("Failed to resolve path: %s", optarg);
407 }
408 }
409 }
410
411 ret = config_get_section_entries(config_path, config_section_name,
412 config_entry_handler, NULL);
413 if (ret) {
414 if (ret > 0) {
415 ERR("Invalid configuration option at line %i", ret);
416 }
417 retval = -1;
418 goto exit;
419 }
420
421 /* Reset getopt's global state */
422 optopt = orig_optopt;
423 optind = orig_optind;
424 while (1) {
425 c = getopt_long(argc, argv, optstring, long_options, &option_index);
426 if (c == -1) {
427 break;
428 }
429
430 ret = set_option(c, optarg, long_options[option_index].name);
431 if (ret < 0) {
432 retval = -1;
433 goto exit;
434 }
435 }
436
437 /* assign default values */
438 if (control_uri == NULL) {
439 ret = asprintf(&default_address,
440 "tcp://" DEFAULT_NETWORK_CONTROL_BIND_ADDRESS ":%d",
441 DEFAULT_NETWORK_CONTROL_PORT);
442 if (ret < 0) {
443 PERROR("asprintf default data address");
444 retval = -1;
445 goto exit;
446 }
447
448 ret = uri_parse(default_address, &control_uri);
449 free(default_address);
450 if (ret < 0) {
451 ERR("Invalid control URI specified");
452 retval = -1;
453 goto exit;
454 }
455 }
456 if (data_uri == NULL) {
457 ret = asprintf(&default_address,
458 "tcp://" DEFAULT_NETWORK_DATA_BIND_ADDRESS ":%d",
459 DEFAULT_NETWORK_DATA_PORT);
460 if (ret < 0) {
461 PERROR("asprintf default data address");
462 retval = -1;
463 goto exit;
464 }
465
466 ret = uri_parse(default_address, &data_uri);
467 free(default_address);
468 if (ret < 0) {
469 ERR("Invalid data URI specified");
470 retval = -1;
471 goto exit;
472 }
473 }
474 if (live_uri == NULL) {
475 ret = asprintf(&default_address,
476 "tcp://" DEFAULT_NETWORK_VIEWER_BIND_ADDRESS ":%d",
477 DEFAULT_NETWORK_VIEWER_PORT);
478 if (ret < 0) {
479 PERROR("asprintf default viewer control address");
480 retval = -1;
481 goto exit;
482 }
483
484 ret = uri_parse(default_address, &live_uri);
485 free(default_address);
486 if (ret < 0) {
487 ERR("Invalid viewer control URI specified");
488 retval = -1;
489 goto exit;
490 }
491 }
492
493 exit:
494 free(optstring);
495 return retval;
496 }
497
498 static void print_global_objects(void)
499 {
500 rcu_register_thread();
501
502 print_viewer_streams();
503 print_relay_streams();
504 print_sessions();
505
506 rcu_unregister_thread();
507 }
508
509 /*
510 * Cleanup the daemon
511 */
512 static void relayd_cleanup(void)
513 {
514 print_global_objects();
515
516 DBG("Cleaning up");
517
518 if (viewer_streams_ht)
519 lttng_ht_destroy(viewer_streams_ht);
520 if (relay_streams_ht)
521 lttng_ht_destroy(relay_streams_ht);
522 if (sessions_ht)
523 lttng_ht_destroy(sessions_ht);
524
525 /* free the dynamically allocated opt_output_path */
526 free(opt_output_path);
527
528 /* Close thread quit pipes */
529 utils_close_pipe(thread_quit_pipe);
530
531 uri_free(control_uri);
532 uri_free(data_uri);
533 /* Live URI is freed in the live thread. */
534
535 if (tracing_group_name_override) {
536 free((void *) tracing_group_name);
537 }
538 }
539
540 /*
541 * Write to writable pipe used to notify a thread.
542 */
543 static int notify_thread_pipe(int wpipe)
544 {
545 ssize_t ret;
546
547 ret = lttng_write(wpipe, "!", 1);
548 if (ret < 1) {
549 PERROR("write poll pipe");
550 goto end;
551 }
552 ret = 0;
553 end:
554 return ret;
555 }
556
557 static int notify_health_quit_pipe(int *pipe)
558 {
559 ssize_t ret;
560
561 ret = lttng_write(pipe[1], "4", 1);
562 if (ret < 1) {
563 PERROR("write relay health quit");
564 goto end;
565 }
566 ret = 0;
567 end:
568 return ret;
569 }
570
571 /*
572 * Stop all relayd and relayd-live threads.
573 */
574 int lttng_relay_stop_threads(void)
575 {
576 int retval = 0;
577
578 /* Stopping all threads */
579 DBG("Terminating all threads");
580 if (notify_thread_pipe(thread_quit_pipe[1])) {
581 ERR("write error on thread quit pipe");
582 retval = -1;
583 }
584
585 if (notify_health_quit_pipe(health_quit_pipe)) {
586 ERR("write error on health quit pipe");
587 }
588
589 /* Dispatch thread */
590 CMM_STORE_SHARED(dispatch_thread_exit, 1);
591 futex_nto1_wake(&relay_conn_queue.futex);
592
593 if (relayd_live_stop()) {
594 ERR("Error stopping live threads");
595 retval = -1;
596 }
597 return retval;
598 }
599
600 /*
601 * Signal handler for the daemon
602 *
603 * Simply stop all worker threads, leaving main() return gracefully after
604 * joining all threads and calling cleanup().
605 */
606 static void sighandler(int sig)
607 {
608 switch (sig) {
609 case SIGINT:
610 DBG("SIGINT caught");
611 if (lttng_relay_stop_threads()) {
612 ERR("Error stopping threads");
613 }
614 break;
615 case SIGTERM:
616 DBG("SIGTERM caught");
617 if (lttng_relay_stop_threads()) {
618 ERR("Error stopping threads");
619 }
620 break;
621 case SIGUSR1:
622 CMM_STORE_SHARED(recv_child_signal, 1);
623 break;
624 default:
625 break;
626 }
627 }
628
629 /*
630 * Setup signal handler for :
631 * SIGINT, SIGTERM, SIGPIPE
632 */
633 static int set_signal_handler(void)
634 {
635 int ret = 0;
636 struct sigaction sa;
637 sigset_t sigset;
638
639 if ((ret = sigemptyset(&sigset)) < 0) {
640 PERROR("sigemptyset");
641 return ret;
642 }
643
644 sa.sa_mask = sigset;
645 sa.sa_flags = 0;
646
647 sa.sa_handler = sighandler;
648 if ((ret = sigaction(SIGTERM, &sa, NULL)) < 0) {
649 PERROR("sigaction");
650 return ret;
651 }
652
653 if ((ret = sigaction(SIGINT, &sa, NULL)) < 0) {
654 PERROR("sigaction");
655 return ret;
656 }
657
658 if ((ret = sigaction(SIGUSR1, &sa, NULL)) < 0) {
659 PERROR("sigaction");
660 return ret;
661 }
662
663 sa.sa_handler = SIG_IGN;
664 if ((ret = sigaction(SIGPIPE, &sa, NULL)) < 0) {
665 PERROR("sigaction");
666 return ret;
667 }
668
669 DBG("Signal handler set for SIGTERM, SIGUSR1, SIGPIPE and SIGINT");
670
671 return ret;
672 }
673
674 void lttng_relay_notify_ready(void)
675 {
676 /* Notify the parent of the fork() process that we are ready. */
677 if (opt_daemon || opt_background) {
678 if (uatomic_sub_return(&lttng_relay_ready, 1) == 0) {
679 kill(child_ppid, SIGUSR1);
680 }
681 }
682 }
683
684 /*
685 * Init thread quit pipe.
686 *
687 * Return -1 on error or 0 if all pipes are created.
688 */
689 static int init_thread_quit_pipe(void)
690 {
691 int ret;
692
693 ret = utils_create_pipe_cloexec(thread_quit_pipe);
694
695 return ret;
696 }
697
698 /*
699 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
700 */
701 static int create_thread_poll_set(struct lttng_poll_event *events, int size)
702 {
703 int ret;
704
705 if (events == NULL || size == 0) {
706 ret = -1;
707 goto error;
708 }
709
710 ret = lttng_poll_create(events, size, LTTNG_CLOEXEC);
711 if (ret < 0) {
712 goto error;
713 }
714
715 /* Add quit pipe */
716 ret = lttng_poll_add(events, thread_quit_pipe[0], LPOLLIN | LPOLLERR);
717 if (ret < 0) {
718 goto error;
719 }
720
721 return 0;
722
723 error:
724 return ret;
725 }
726
727 /*
728 * Check if the thread quit pipe was triggered.
729 *
730 * Return 1 if it was triggered else 0;
731 */
732 static int check_thread_quit_pipe(int fd, uint32_t events)
733 {
734 if (fd == thread_quit_pipe[0] && (events & LPOLLIN)) {
735 return 1;
736 }
737
738 return 0;
739 }
740
741 /*
742 * Create and init socket from uri.
743 */
744 static struct lttcomm_sock *relay_socket_create(struct lttng_uri *uri)
745 {
746 int ret;
747 struct lttcomm_sock *sock = NULL;
748
749 sock = lttcomm_alloc_sock_from_uri(uri);
750 if (sock == NULL) {
751 ERR("Allocating socket");
752 goto error;
753 }
754
755 ret = lttcomm_create_sock(sock);
756 if (ret < 0) {
757 goto error;
758 }
759 DBG("Listening on sock %d", sock->fd);
760
761 ret = sock->ops->bind(sock);
762 if (ret < 0) {
763 PERROR("Failed to bind socket");
764 goto error;
765 }
766
767 ret = sock->ops->listen(sock, -1);
768 if (ret < 0) {
769 goto error;
770
771 }
772
773 return sock;
774
775 error:
776 if (sock) {
777 lttcomm_destroy_sock(sock);
778 }
779 return NULL;
780 }
781
782 /*
783 * This thread manages the listening for new connections on the network
784 */
785 static void *relay_thread_listener(void *data)
786 {
787 int i, ret, pollfd, err = -1;
788 uint32_t revents, nb_fd;
789 struct lttng_poll_event events;
790 struct lttcomm_sock *control_sock, *data_sock;
791
792 DBG("[thread] Relay listener started");
793
794 health_register(health_relayd, HEALTH_RELAYD_TYPE_LISTENER);
795
796 health_code_update();
797
798 control_sock = relay_socket_create(control_uri);
799 if (!control_sock) {
800 goto error_sock_control;
801 }
802
803 data_sock = relay_socket_create(data_uri);
804 if (!data_sock) {
805 goto error_sock_relay;
806 }
807
808 /*
809 * Pass 3 as size here for the thread quit pipe, control and
810 * data socket.
811 */
812 ret = create_thread_poll_set(&events, 3);
813 if (ret < 0) {
814 goto error_create_poll;
815 }
816
817 /* Add the control socket */
818 ret = lttng_poll_add(&events, control_sock->fd, LPOLLIN | LPOLLRDHUP);
819 if (ret < 0) {
820 goto error_poll_add;
821 }
822
823 /* Add the data socket */
824 ret = lttng_poll_add(&events, data_sock->fd, LPOLLIN | LPOLLRDHUP);
825 if (ret < 0) {
826 goto error_poll_add;
827 }
828
829 lttng_relay_notify_ready();
830
831 if (testpoint(relayd_thread_listener)) {
832 goto error_testpoint;
833 }
834
835 while (1) {
836 health_code_update();
837
838 DBG("Listener accepting connections");
839
840 restart:
841 health_poll_entry();
842 ret = lttng_poll_wait(&events, -1);
843 health_poll_exit();
844 if (ret < 0) {
845 /*
846 * Restart interrupted system call.
847 */
848 if (errno == EINTR) {
849 goto restart;
850 }
851 goto error;
852 }
853
854 nb_fd = ret;
855
856 DBG("Relay new connection received");
857 for (i = 0; i < nb_fd; i++) {
858 health_code_update();
859
860 /* Fetch once the poll data */
861 revents = LTTNG_POLL_GETEV(&events, i);
862 pollfd = LTTNG_POLL_GETFD(&events, i);
863
864 /* Thread quit pipe has been closed. Killing thread. */
865 ret = check_thread_quit_pipe(pollfd, revents);
866 if (ret) {
867 err = 0;
868 goto exit;
869 }
870
871 if (revents & LPOLLIN) {
872 /*
873 * A new connection is requested, therefore a
874 * sessiond/consumerd connection is allocated in
875 * this thread, enqueued to a global queue and
876 * dequeued (and freed) in the worker thread.
877 */
878 int val = 1;
879 struct relay_connection *new_conn;
880 struct lttcomm_sock *newsock;
881 enum connection_type type;
882
883 if (pollfd == data_sock->fd) {
884 type = RELAY_DATA;
885 newsock = data_sock->ops->accept(data_sock);
886 DBG("Relay data connection accepted, socket %d",
887 newsock->fd);
888 } else {
889 assert(pollfd == control_sock->fd);
890 type = RELAY_CONTROL;
891 newsock = control_sock->ops->accept(control_sock);
892 DBG("Relay control connection accepted, socket %d",
893 newsock->fd);
894 }
895 if (!newsock) {
896 PERROR("accepting sock");
897 goto error;
898 }
899
900 ret = setsockopt(newsock->fd, SOL_SOCKET, SO_REUSEADDR, &val,
901 sizeof(val));
902 if (ret < 0) {
903 PERROR("setsockopt inet");
904 lttcomm_destroy_sock(newsock);
905 goto error;
906 }
907
908 ret = socket_apply_keep_alive_config(newsock->fd);
909 if (ret < 0) {
910 ERR("Failed to apply TCP keep-alive configuration on socket (%i)",
911 newsock->fd);
912 lttcomm_destroy_sock(newsock);
913 goto error;
914 }
915
916 new_conn = connection_create(newsock, type);
917 if (!new_conn) {
918 lttcomm_destroy_sock(newsock);
919 goto error;
920 }
921
922 /* Enqueue request for the dispatcher thread. */
923 cds_wfcq_enqueue(&relay_conn_queue.head, &relay_conn_queue.tail,
924 &new_conn->qnode);
925
926 /*
927 * Wake the dispatch queue futex.
928 * Implicit memory barrier with the
929 * exchange in cds_wfcq_enqueue.
930 */
931 futex_nto1_wake(&relay_conn_queue.futex);
932 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
933 ERR("socket poll error");
934 goto error;
935 } else {
936 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
937 goto error;
938 }
939 }
940 }
941
942 exit:
943 error:
944 error_poll_add:
945 error_testpoint:
946 lttng_poll_clean(&events);
947 error_create_poll:
948 if (data_sock->fd >= 0) {
949 ret = data_sock->ops->close(data_sock);
950 if (ret) {
951 PERROR("close");
952 }
953 }
954 lttcomm_destroy_sock(data_sock);
955 error_sock_relay:
956 if (control_sock->fd >= 0) {
957 ret = control_sock->ops->close(control_sock);
958 if (ret) {
959 PERROR("close");
960 }
961 }
962 lttcomm_destroy_sock(control_sock);
963 error_sock_control:
964 if (err) {
965 health_error();
966 ERR("Health error occurred in %s", __func__);
967 }
968 health_unregister(health_relayd);
969 DBG("Relay listener thread cleanup complete");
970 lttng_relay_stop_threads();
971 return NULL;
972 }
973
974 /*
975 * This thread manages the dispatching of the requests to worker threads
976 */
977 static void *relay_thread_dispatcher(void *data)
978 {
979 int err = -1;
980 ssize_t ret;
981 struct cds_wfcq_node *node;
982 struct relay_connection *new_conn = NULL;
983
984 DBG("[thread] Relay dispatcher started");
985
986 health_register(health_relayd, HEALTH_RELAYD_TYPE_DISPATCHER);
987
988 if (testpoint(relayd_thread_dispatcher)) {
989 goto error_testpoint;
990 }
991
992 health_code_update();
993
994 for (;;) {
995 health_code_update();
996
997 /* Atomically prepare the queue futex */
998 futex_nto1_prepare(&relay_conn_queue.futex);
999
1000 if (CMM_LOAD_SHARED(dispatch_thread_exit)) {
1001 break;
1002 }
1003
1004 do {
1005 health_code_update();
1006
1007 /* Dequeue commands */
1008 node = cds_wfcq_dequeue_blocking(&relay_conn_queue.head,
1009 &relay_conn_queue.tail);
1010 if (node == NULL) {
1011 DBG("Woken up but nothing in the relay command queue");
1012 /* Continue thread execution */
1013 break;
1014 }
1015 new_conn = caa_container_of(node, struct relay_connection, qnode);
1016
1017 DBG("Dispatching request waiting on sock %d", new_conn->sock->fd);
1018
1019 /*
1020 * Inform worker thread of the new request. This
1021 * call is blocking so we can be assured that
1022 * the data will be read at some point in time
1023 * or wait to the end of the world :)
1024 */
1025 ret = lttng_write(relay_conn_pipe[1], &new_conn, sizeof(new_conn));
1026 if (ret < 0) {
1027 PERROR("write connection pipe");
1028 connection_put(new_conn);
1029 goto error;
1030 }
1031 } while (node != NULL);
1032
1033 /* Futex wait on queue. Blocking call on futex() */
1034 health_poll_entry();
1035 futex_nto1_wait(&relay_conn_queue.futex);
1036 health_poll_exit();
1037 }
1038
1039 /* Normal exit, no error */
1040 err = 0;
1041
1042 error:
1043 error_testpoint:
1044 if (err) {
1045 health_error();
1046 ERR("Health error occurred in %s", __func__);
1047 }
1048 health_unregister(health_relayd);
1049 DBG("Dispatch thread dying");
1050 lttng_relay_stop_threads();
1051 return NULL;
1052 }
1053
1054 static bool session_streams_have_index(const struct relay_session *session)
1055 {
1056 return session->minor >= 4 && !session->snapshot;
1057 }
1058
1059 /*
1060 * Handle the RELAYD_CREATE_SESSION command.
1061 *
1062 * On success, send back the session id or else return a negative value.
1063 */
1064 static int relay_create_session(const struct lttcomm_relayd_hdr *recv_hdr,
1065 struct relay_connection *conn,
1066 const struct lttng_buffer_view *payload)
1067 {
1068 int ret = 0;
1069 ssize_t send_ret;
1070 struct relay_session *session = NULL;
1071 struct lttcomm_relayd_status_session reply = {};
1072 char session_name[LTTNG_NAME_MAX] = {};
1073 char hostname[LTTNG_HOST_NAME_MAX] = {};
1074 uint32_t live_timer = 0;
1075 bool snapshot = false;
1076 bool session_name_contains_creation_timestamp = false;
1077 /* Left nil for peers < 2.11. */
1078 char base_path[LTTNG_PATH_MAX] = {};
1079 lttng_uuid sessiond_uuid = {};
1080 LTTNG_OPTIONAL(uint64_t) id_sessiond = {};
1081 LTTNG_OPTIONAL(uint64_t) current_chunk_id = {};
1082 LTTNG_OPTIONAL(time_t) creation_time = {};
1083
1084 if (conn->minor < 4) {
1085 /* From 2.1 to 2.3 */
1086 ret = 0;
1087 } else if (conn->minor >= 4 && conn->minor < 11) {
1088 /* From 2.4 to 2.10 */
1089 ret = cmd_create_session_2_4(payload, session_name,
1090 hostname, &live_timer, &snapshot);
1091 } else {
1092 bool has_current_chunk;
1093 uint64_t current_chunk_id_value;
1094 time_t creation_time_value;
1095 uint64_t id_sessiond_value;
1096
1097 /* From 2.11 to ... */
1098 ret = cmd_create_session_2_11(payload, session_name, hostname,
1099 base_path, &live_timer, &snapshot, &id_sessiond_value,
1100 sessiond_uuid, &has_current_chunk,
1101 &current_chunk_id_value, &creation_time_value,
1102 &session_name_contains_creation_timestamp);
1103 if (lttng_uuid_is_nil(sessiond_uuid)) {
1104 /* The nil UUID is reserved for pre-2.11 clients. */
1105 ERR("Illegal nil UUID announced by peer in create session command");
1106 ret = -1;
1107 goto send_reply;
1108 }
1109 LTTNG_OPTIONAL_SET(&id_sessiond, id_sessiond_value);
1110 LTTNG_OPTIONAL_SET(&creation_time, creation_time_value);
1111 if (has_current_chunk) {
1112 LTTNG_OPTIONAL_SET(&current_chunk_id,
1113 current_chunk_id_value);
1114 }
1115 }
1116
1117 if (ret < 0) {
1118 goto send_reply;
1119 }
1120
1121 session = session_create(session_name, hostname, base_path, live_timer,
1122 snapshot, sessiond_uuid,
1123 id_sessiond.is_set ? &id_sessiond.value : NULL,
1124 current_chunk_id.is_set ? &current_chunk_id.value : NULL,
1125 creation_time.is_set ? &creation_time.value : NULL,
1126 conn->major, conn->minor,
1127 session_name_contains_creation_timestamp);
1128 if (!session) {
1129 ret = -1;
1130 goto send_reply;
1131 }
1132 assert(!conn->session);
1133 conn->session = session;
1134 DBG("Created session %" PRIu64, session->id);
1135
1136 reply.session_id = htobe64(session->id);
1137
1138 send_reply:
1139 if (ret < 0) {
1140 reply.ret_code = htobe32(LTTNG_ERR_FATAL);
1141 } else {
1142 reply.ret_code = htobe32(LTTNG_OK);
1143 }
1144
1145 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
1146 if (send_ret < (ssize_t) sizeof(reply)) {
1147 ERR("Failed to send \"create session\" command reply (ret = %zd)",
1148 send_ret);
1149 ret = -1;
1150 }
1151 if (ret < 0 && session) {
1152 session_put(session);
1153 }
1154 return ret;
1155 }
1156
1157 /*
1158 * When we have received all the streams and the metadata for a channel,
1159 * we make them visible to the viewer threads.
1160 */
1161 static void publish_connection_local_streams(struct relay_connection *conn)
1162 {
1163 struct relay_stream *stream;
1164 struct relay_session *session = conn->session;
1165
1166 /*
1167 * We publish all streams belonging to a session atomically wrt
1168 * session lock.
1169 */
1170 pthread_mutex_lock(&session->lock);
1171 rcu_read_lock();
1172 cds_list_for_each_entry_rcu(stream, &session->recv_list,
1173 recv_node) {
1174 stream_publish(stream);
1175 }
1176 rcu_read_unlock();
1177
1178 /*
1179 * Inform the viewer that there are new streams in the session.
1180 */
1181 if (session->viewer_attached) {
1182 uatomic_set(&session->new_streams, 1);
1183 }
1184 pthread_mutex_unlock(&session->lock);
1185 }
1186
1187 static int conform_channel_path(char *channel_path)
1188 {
1189 int ret = 0;
1190
1191 if (strstr("../", channel_path)) {
1192 ERR("Refusing channel path as it walks up the path hierarchy: \"%s\"",
1193 channel_path);
1194 ret = -1;
1195 goto end;
1196 }
1197
1198 if (*channel_path == '/') {
1199 const size_t len = strlen(channel_path);
1200
1201 /*
1202 * Channel paths from peers prior to 2.11 are expressed as an
1203 * absolute path that is, in reality, relative to the relay
1204 * daemon's output directory. Remove the leading slash so it
1205 * is correctly interpreted as a relative path later on.
1206 *
1207 * len (and not len - 1) is used to copy the trailing NULL.
1208 */
1209 bcopy(channel_path + 1, channel_path, len);
1210 }
1211 end:
1212 return ret;
1213 }
1214
1215 /*
1216 * relay_add_stream: allocate a new stream for a session
1217 */
1218 static int relay_add_stream(const struct lttcomm_relayd_hdr *recv_hdr,
1219 struct relay_connection *conn,
1220 const struct lttng_buffer_view *payload)
1221 {
1222 int ret;
1223 ssize_t send_ret;
1224 struct relay_session *session = conn->session;
1225 struct relay_stream *stream = NULL;
1226 struct lttcomm_relayd_status_stream reply;
1227 struct ctf_trace *trace = NULL;
1228 uint64_t stream_handle = -1ULL;
1229 char *path_name = NULL, *channel_name = NULL;
1230 uint64_t tracefile_size = 0, tracefile_count = 0;
1231 LTTNG_OPTIONAL(uint64_t) stream_chunk_id = {};
1232
1233 if (!session || !conn->version_check_done) {
1234 ERR("Trying to add a stream before version check");
1235 ret = -1;
1236 goto end_no_session;
1237 }
1238
1239 if (session->minor == 1) {
1240 /* For 2.1 */
1241 ret = cmd_recv_stream_2_1(payload, &path_name,
1242 &channel_name);
1243 } else if (session->minor > 1 && session->minor < 11) {
1244 /* From 2.2 to 2.10 */
1245 ret = cmd_recv_stream_2_2(payload, &path_name,
1246 &channel_name, &tracefile_size, &tracefile_count);
1247 } else {
1248 /* From 2.11 to ... */
1249 ret = cmd_recv_stream_2_11(payload, &path_name,
1250 &channel_name, &tracefile_size, &tracefile_count,
1251 &stream_chunk_id.value);
1252 stream_chunk_id.is_set = true;
1253 }
1254
1255 if (ret < 0) {
1256 goto send_reply;
1257 }
1258
1259 if (conform_channel_path(path_name)) {
1260 goto send_reply;
1261 }
1262
1263 trace = ctf_trace_get_by_path_or_create(session, path_name);
1264 if (!trace) {
1265 goto send_reply;
1266 }
1267 /* This stream here has one reference on the trace. */
1268
1269 pthread_mutex_lock(&last_relay_stream_id_lock);
1270 stream_handle = ++last_relay_stream_id;
1271 pthread_mutex_unlock(&last_relay_stream_id_lock);
1272
1273 /* We pass ownership of path_name and channel_name. */
1274 stream = stream_create(trace, stream_handle, path_name,
1275 channel_name, tracefile_size, tracefile_count);
1276 path_name = NULL;
1277 channel_name = NULL;
1278
1279 /*
1280 * Streams are the owners of their trace. Reference to trace is
1281 * kept within stream_create().
1282 */
1283 ctf_trace_put(trace);
1284
1285 send_reply:
1286 memset(&reply, 0, sizeof(reply));
1287 reply.handle = htobe64(stream_handle);
1288 if (!stream) {
1289 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1290 } else {
1291 reply.ret_code = htobe32(LTTNG_OK);
1292 }
1293
1294 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1295 sizeof(struct lttcomm_relayd_status_stream), 0);
1296 if (send_ret < (ssize_t) sizeof(reply)) {
1297 ERR("Failed to send \"add stream\" command reply (ret = %zd)",
1298 send_ret);
1299 ret = -1;
1300 }
1301
1302 end_no_session:
1303 free(path_name);
1304 free(channel_name);
1305 return ret;
1306 }
1307
1308 /*
1309 * relay_close_stream: close a specific stream
1310 */
1311 static int relay_close_stream(const struct lttcomm_relayd_hdr *recv_hdr,
1312 struct relay_connection *conn,
1313 const struct lttng_buffer_view *payload)
1314 {
1315 int ret;
1316 ssize_t send_ret;
1317 struct relay_session *session = conn->session;
1318 struct lttcomm_relayd_close_stream stream_info;
1319 struct lttcomm_relayd_generic_reply reply;
1320 struct relay_stream *stream;
1321
1322 DBG("Close stream received");
1323
1324 if (!session || !conn->version_check_done) {
1325 ERR("Trying to close a stream before version check");
1326 ret = -1;
1327 goto end_no_session;
1328 }
1329
1330 if (payload->size < sizeof(stream_info)) {
1331 ERR("Unexpected payload size in \"relay_close_stream\": expected >= %zu bytes, got %zu bytes",
1332 sizeof(stream_info), payload->size);
1333 ret = -1;
1334 goto end_no_session;
1335 }
1336 memcpy(&stream_info, payload->data, sizeof(stream_info));
1337 stream_info.stream_id = be64toh(stream_info.stream_id);
1338 stream_info.last_net_seq_num = be64toh(stream_info.last_net_seq_num);
1339
1340 stream = stream_get_by_id(stream_info.stream_id);
1341 if (!stream) {
1342 ret = -1;
1343 goto end;
1344 }
1345
1346 /*
1347 * Set last_net_seq_num before the close flag. Required by data
1348 * pending check.
1349 */
1350 pthread_mutex_lock(&stream->lock);
1351 stream->last_net_seq_num = stream_info.last_net_seq_num;
1352 pthread_mutex_unlock(&stream->lock);
1353
1354 /*
1355 * This is one of the conditions which may trigger a stream close
1356 * with the others being:
1357 * 1) A close command is received for a stream
1358 * 2) The control connection owning the stream is closed
1359 * 3) We have received all of the stream's data _after_ a close
1360 * request.
1361 */
1362 try_stream_close(stream);
1363 if (stream->is_metadata) {
1364 struct relay_viewer_stream *vstream;
1365
1366 vstream = viewer_stream_get_by_id(stream->stream_handle);
1367 if (vstream) {
1368 if (vstream->metadata_sent == stream->metadata_received) {
1369 /*
1370 * Since all the metadata has been sent to the
1371 * viewer and that we have a request to close
1372 * its stream, we can safely teardown the
1373 * corresponding metadata viewer stream.
1374 */
1375 viewer_stream_put(vstream);
1376 }
1377 /* Put local reference. */
1378 viewer_stream_put(vstream);
1379 }
1380 }
1381 stream_put(stream);
1382 ret = 0;
1383
1384 end:
1385 memset(&reply, 0, sizeof(reply));
1386 if (ret < 0) {
1387 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1388 } else {
1389 reply.ret_code = htobe32(LTTNG_OK);
1390 }
1391 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1392 sizeof(struct lttcomm_relayd_generic_reply), 0);
1393 if (send_ret < (ssize_t) sizeof(reply)) {
1394 ERR("Failed to send \"close stream\" command reply (ret = %zd)",
1395 send_ret);
1396 ret = -1;
1397 }
1398
1399 end_no_session:
1400 return ret;
1401 }
1402
1403 /*
1404 * relay_reset_metadata: reset a metadata stream
1405 */
1406 static
1407 int relay_reset_metadata(const struct lttcomm_relayd_hdr *recv_hdr,
1408 struct relay_connection *conn,
1409 const struct lttng_buffer_view *payload)
1410 {
1411 int ret;
1412 ssize_t send_ret;
1413 struct relay_session *session = conn->session;
1414 struct lttcomm_relayd_reset_metadata stream_info;
1415 struct lttcomm_relayd_generic_reply reply;
1416 struct relay_stream *stream;
1417
1418 DBG("Reset metadata received");
1419
1420 if (!session || !conn->version_check_done) {
1421 ERR("Trying to reset a metadata stream before version check");
1422 ret = -1;
1423 goto end_no_session;
1424 }
1425
1426 if (payload->size < sizeof(stream_info)) {
1427 ERR("Unexpected payload size in \"relay_reset_metadata\": expected >= %zu bytes, got %zu bytes",
1428 sizeof(stream_info), payload->size);
1429 ret = -1;
1430 goto end_no_session;
1431 }
1432 memcpy(&stream_info, payload->data, sizeof(stream_info));
1433 stream_info.stream_id = be64toh(stream_info.stream_id);
1434 stream_info.version = be64toh(stream_info.version);
1435
1436 DBG("Update metadata to version %" PRIu64, stream_info.version);
1437
1438 /* Unsupported for live sessions for now. */
1439 if (session->live_timer != 0) {
1440 ret = -1;
1441 goto end;
1442 }
1443
1444 stream = stream_get_by_id(stream_info.stream_id);
1445 if (!stream) {
1446 ret = -1;
1447 goto end;
1448 }
1449 pthread_mutex_lock(&stream->lock);
1450 if (!stream->is_metadata) {
1451 ret = -1;
1452 goto end_unlock;
1453 }
1454
1455 ret = stream_reset_file(stream);
1456 if (ret < 0) {
1457 ERR("Failed to reset metadata stream %" PRIu64
1458 ": stream_path = %s, channel = %s",
1459 stream->stream_handle, stream->path_name,
1460 stream->channel_name);
1461 goto end_unlock;
1462 }
1463 end_unlock:
1464 pthread_mutex_unlock(&stream->lock);
1465 stream_put(stream);
1466
1467 end:
1468 memset(&reply, 0, sizeof(reply));
1469 if (ret < 0) {
1470 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1471 } else {
1472 reply.ret_code = htobe32(LTTNG_OK);
1473 }
1474 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1475 sizeof(struct lttcomm_relayd_generic_reply), 0);
1476 if (send_ret < (ssize_t) sizeof(reply)) {
1477 ERR("Failed to send \"reset metadata\" command reply (ret = %zd)",
1478 send_ret);
1479 ret = -1;
1480 }
1481
1482 end_no_session:
1483 return ret;
1484 }
1485
1486 /*
1487 * relay_unknown_command: send -1 if received unknown command
1488 */
1489 static void relay_unknown_command(struct relay_connection *conn)
1490 {
1491 struct lttcomm_relayd_generic_reply reply;
1492 ssize_t send_ret;
1493
1494 memset(&reply, 0, sizeof(reply));
1495 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1496 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
1497 if (send_ret < sizeof(reply)) {
1498 ERR("Failed to send \"unknown command\" command reply (ret = %zd)", send_ret);
1499 }
1500 }
1501
1502 /*
1503 * relay_start: send an acknowledgment to the client to tell if we are
1504 * ready to receive data. We are ready if a session is established.
1505 */
1506 static int relay_start(const struct lttcomm_relayd_hdr *recv_hdr,
1507 struct relay_connection *conn,
1508 const struct lttng_buffer_view *payload)
1509 {
1510 int ret = 0;
1511 ssize_t send_ret;
1512 struct lttcomm_relayd_generic_reply reply;
1513 struct relay_session *session = conn->session;
1514
1515 if (!session) {
1516 DBG("Trying to start the streaming without a session established");
1517 ret = htobe32(LTTNG_ERR_UNK);
1518 }
1519
1520 memset(&reply, 0, sizeof(reply));
1521 reply.ret_code = htobe32(LTTNG_OK);
1522 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1523 sizeof(reply), 0);
1524 if (send_ret < (ssize_t) sizeof(reply)) {
1525 ERR("Failed to send \"relay_start\" command reply (ret = %zd)",
1526 send_ret);
1527 ret = -1;
1528 }
1529
1530 return ret;
1531 }
1532
1533 /*
1534 * relay_recv_metadata: receive the metadata for the session.
1535 */
1536 static int relay_recv_metadata(const struct lttcomm_relayd_hdr *recv_hdr,
1537 struct relay_connection *conn,
1538 const struct lttng_buffer_view *payload)
1539 {
1540 int ret = 0;
1541 struct relay_session *session = conn->session;
1542 struct lttcomm_relayd_metadata_payload metadata_payload_header;
1543 struct relay_stream *metadata_stream;
1544 uint64_t metadata_payload_size;
1545 struct lttng_buffer_view packet_view;
1546
1547 if (!session) {
1548 ERR("Metadata sent before version check");
1549 ret = -1;
1550 goto end;
1551 }
1552
1553 if (recv_hdr->data_size < sizeof(struct lttcomm_relayd_metadata_payload)) {
1554 ERR("Incorrect data size");
1555 ret = -1;
1556 goto end;
1557 }
1558 metadata_payload_size = recv_hdr->data_size -
1559 sizeof(struct lttcomm_relayd_metadata_payload);
1560
1561 memcpy(&metadata_payload_header, payload->data,
1562 sizeof(metadata_payload_header));
1563 metadata_payload_header.stream_id = be64toh(
1564 metadata_payload_header.stream_id);
1565 metadata_payload_header.padding_size = be32toh(
1566 metadata_payload_header.padding_size);
1567
1568 metadata_stream = stream_get_by_id(metadata_payload_header.stream_id);
1569 if (!metadata_stream) {
1570 ret = -1;
1571 goto end;
1572 }
1573
1574 packet_view = lttng_buffer_view_from_view(payload,
1575 sizeof(metadata_payload_header), metadata_payload_size);
1576 if (!packet_view.data) {
1577 ERR("Invalid metadata packet length announced by header");
1578 ret = -1;
1579 goto end_put;
1580 }
1581
1582 pthread_mutex_lock(&metadata_stream->lock);
1583 ret = stream_write(metadata_stream, &packet_view,
1584 metadata_payload_header.padding_size);
1585 pthread_mutex_unlock(&metadata_stream->lock);
1586 if (ret){
1587 ret = -1;
1588 goto end_put;
1589 }
1590 end_put:
1591 stream_put(metadata_stream);
1592 end:
1593 return ret;
1594 }
1595
1596 /*
1597 * relay_send_version: send relayd version number
1598 */
1599 static int relay_send_version(const struct lttcomm_relayd_hdr *recv_hdr,
1600 struct relay_connection *conn,
1601 const struct lttng_buffer_view *payload)
1602 {
1603 int ret;
1604 ssize_t send_ret;
1605 struct lttcomm_relayd_version reply, msg;
1606 bool compatible = true;
1607
1608 conn->version_check_done = true;
1609
1610 /* Get version from the other side. */
1611 if (payload->size < sizeof(msg)) {
1612 ERR("Unexpected payload size in \"relay_send_version\": expected >= %zu bytes, got %zu bytes",
1613 sizeof(msg), payload->size);
1614 ret = -1;
1615 goto end;
1616 }
1617
1618 memcpy(&msg, payload->data, sizeof(msg));
1619 msg.major = be32toh(msg.major);
1620 msg.minor = be32toh(msg.minor);
1621
1622 memset(&reply, 0, sizeof(reply));
1623 reply.major = RELAYD_VERSION_COMM_MAJOR;
1624 reply.minor = RELAYD_VERSION_COMM_MINOR;
1625
1626 /* Major versions must be the same */
1627 if (reply.major != msg.major) {
1628 DBG("Incompatible major versions (%u vs %u), deleting session",
1629 reply.major, msg.major);
1630 compatible = false;
1631 }
1632
1633 conn->major = reply.major;
1634 /* We adapt to the lowest compatible version */
1635 if (reply.minor <= msg.minor) {
1636 conn->minor = reply.minor;
1637 } else {
1638 conn->minor = msg.minor;
1639 }
1640
1641 reply.major = htobe32(reply.major);
1642 reply.minor = htobe32(reply.minor);
1643 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1644 sizeof(reply), 0);
1645 if (send_ret < (ssize_t) sizeof(reply)) {
1646 ERR("Failed to send \"send version\" command reply (ret = %zd)",
1647 send_ret);
1648 ret = -1;
1649 goto end;
1650 } else {
1651 ret = 0;
1652 }
1653
1654 if (!compatible) {
1655 ret = -1;
1656 goto end;
1657 }
1658
1659 DBG("Version check done using protocol %u.%u", conn->major,
1660 conn->minor);
1661
1662 end:
1663 return ret;
1664 }
1665
1666 /*
1667 * Check for data pending for a given stream id from the session daemon.
1668 */
1669 static int relay_data_pending(const struct lttcomm_relayd_hdr *recv_hdr,
1670 struct relay_connection *conn,
1671 const struct lttng_buffer_view *payload)
1672 {
1673 struct relay_session *session = conn->session;
1674 struct lttcomm_relayd_data_pending msg;
1675 struct lttcomm_relayd_generic_reply reply;
1676 struct relay_stream *stream;
1677 ssize_t send_ret;
1678 int ret;
1679 uint64_t stream_seq;
1680
1681 DBG("Data pending command received");
1682
1683 if (!session || !conn->version_check_done) {
1684 ERR("Trying to check for data before version check");
1685 ret = -1;
1686 goto end_no_session;
1687 }
1688
1689 if (payload->size < sizeof(msg)) {
1690 ERR("Unexpected payload size in \"relay_data_pending\": expected >= %zu bytes, got %zu bytes",
1691 sizeof(msg), payload->size);
1692 ret = -1;
1693 goto end_no_session;
1694 }
1695 memcpy(&msg, payload->data, sizeof(msg));
1696 msg.stream_id = be64toh(msg.stream_id);
1697 msg.last_net_seq_num = be64toh(msg.last_net_seq_num);
1698
1699 stream = stream_get_by_id(msg.stream_id);
1700 if (stream == NULL) {
1701 ret = -1;
1702 goto end;
1703 }
1704
1705 pthread_mutex_lock(&stream->lock);
1706
1707 if (session_streams_have_index(session)) {
1708 /*
1709 * Ensure that both the index and stream data have been
1710 * flushed up to the requested point.
1711 */
1712 stream_seq = min(stream->prev_data_seq, stream->prev_index_seq);
1713 } else {
1714 stream_seq = stream->prev_data_seq;
1715 }
1716 DBG("Data pending for stream id %" PRIu64 ": prev_data_seq %" PRIu64
1717 ", prev_index_seq %" PRIu64
1718 ", and last_seq %" PRIu64, msg.stream_id,
1719 stream->prev_data_seq, stream->prev_index_seq,
1720 msg.last_net_seq_num);
1721
1722 /* Avoid wrapping issue */
1723 if (((int64_t) (stream_seq - msg.last_net_seq_num)) >= 0) {
1724 /* Data has in fact been written and is NOT pending */
1725 ret = 0;
1726 } else {
1727 /* Data still being streamed thus pending */
1728 ret = 1;
1729 }
1730
1731 stream->data_pending_check_done = true;
1732 pthread_mutex_unlock(&stream->lock);
1733
1734 stream_put(stream);
1735 end:
1736
1737 memset(&reply, 0, sizeof(reply));
1738 reply.ret_code = htobe32(ret);
1739 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
1740 if (send_ret < (ssize_t) sizeof(reply)) {
1741 ERR("Failed to send \"data pending\" command reply (ret = %zd)",
1742 send_ret);
1743 ret = -1;
1744 }
1745
1746 end_no_session:
1747 return ret;
1748 }
1749
1750 /*
1751 * Wait for the control socket to reach a quiescent state.
1752 *
1753 * Note that for now, when receiving this command from the session
1754 * daemon, this means that every subsequent commands or data received on
1755 * the control socket has been handled. So, this is why we simply return
1756 * OK here.
1757 */
1758 static int relay_quiescent_control(const struct lttcomm_relayd_hdr *recv_hdr,
1759 struct relay_connection *conn,
1760 const struct lttng_buffer_view *payload)
1761 {
1762 int ret;
1763 ssize_t send_ret;
1764 struct relay_stream *stream;
1765 struct lttcomm_relayd_quiescent_control msg;
1766 struct lttcomm_relayd_generic_reply reply;
1767
1768 DBG("Checking quiescent state on control socket");
1769
1770 if (!conn->session || !conn->version_check_done) {
1771 ERR("Trying to check for data before version check");
1772 ret = -1;
1773 goto end_no_session;
1774 }
1775
1776 if (payload->size < sizeof(msg)) {
1777 ERR("Unexpected payload size in \"relay_quiescent_control\": expected >= %zu bytes, got %zu bytes",
1778 sizeof(msg), payload->size);
1779 ret = -1;
1780 goto end_no_session;
1781 }
1782 memcpy(&msg, payload->data, sizeof(msg));
1783 msg.stream_id = be64toh(msg.stream_id);
1784
1785 stream = stream_get_by_id(msg.stream_id);
1786 if (!stream) {
1787 goto reply;
1788 }
1789 pthread_mutex_lock(&stream->lock);
1790 stream->data_pending_check_done = true;
1791 pthread_mutex_unlock(&stream->lock);
1792
1793 DBG("Relay quiescent control pending flag set to %" PRIu64, msg.stream_id);
1794 stream_put(stream);
1795 reply:
1796 memset(&reply, 0, sizeof(reply));
1797 reply.ret_code = htobe32(LTTNG_OK);
1798 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
1799 if (send_ret < (ssize_t) sizeof(reply)) {
1800 ERR("Failed to send \"quiescent control\" command reply (ret = %zd)",
1801 send_ret);
1802 ret = -1;
1803 } else {
1804 ret = 0;
1805 }
1806
1807 end_no_session:
1808 return ret;
1809 }
1810
1811 /*
1812 * Initialize a data pending command. This means that a consumer is about
1813 * to ask for data pending for each stream it holds. Simply iterate over
1814 * all streams of a session and set the data_pending_check_done flag.
1815 *
1816 * This command returns to the client a LTTNG_OK code.
1817 */
1818 static int relay_begin_data_pending(const struct lttcomm_relayd_hdr *recv_hdr,
1819 struct relay_connection *conn,
1820 const struct lttng_buffer_view *payload)
1821 {
1822 int ret;
1823 ssize_t send_ret;
1824 struct lttng_ht_iter iter;
1825 struct lttcomm_relayd_begin_data_pending msg;
1826 struct lttcomm_relayd_generic_reply reply;
1827 struct relay_stream *stream;
1828
1829 assert(recv_hdr);
1830 assert(conn);
1831
1832 DBG("Init streams for data pending");
1833
1834 if (!conn->session || !conn->version_check_done) {
1835 ERR("Trying to check for data before version check");
1836 ret = -1;
1837 goto end_no_session;
1838 }
1839
1840 if (payload->size < sizeof(msg)) {
1841 ERR("Unexpected payload size in \"relay_begin_data_pending\": expected >= %zu bytes, got %zu bytes",
1842 sizeof(msg), payload->size);
1843 ret = -1;
1844 goto end_no_session;
1845 }
1846 memcpy(&msg, payload->data, sizeof(msg));
1847 msg.session_id = be64toh(msg.session_id);
1848
1849 /*
1850 * Iterate over all streams to set the begin data pending flag.
1851 * For now, the streams are indexed by stream handle so we have
1852 * to iterate over all streams to find the one associated with
1853 * the right session_id.
1854 */
1855 rcu_read_lock();
1856 cds_lfht_for_each_entry(relay_streams_ht->ht, &iter.iter, stream,
1857 node.node) {
1858 if (!stream_get(stream)) {
1859 continue;
1860 }
1861 if (stream->trace->session->id == msg.session_id) {
1862 pthread_mutex_lock(&stream->lock);
1863 stream->data_pending_check_done = false;
1864 pthread_mutex_unlock(&stream->lock);
1865 DBG("Set begin data pending flag to stream %" PRIu64,
1866 stream->stream_handle);
1867 }
1868 stream_put(stream);
1869 }
1870 rcu_read_unlock();
1871
1872 memset(&reply, 0, sizeof(reply));
1873 /* All good, send back reply. */
1874 reply.ret_code = htobe32(LTTNG_OK);
1875
1876 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
1877 if (send_ret < (ssize_t) sizeof(reply)) {
1878 ERR("Failed to send \"begin data pending\" command reply (ret = %zd)",
1879 send_ret);
1880 ret = -1;
1881 } else {
1882 ret = 0;
1883 }
1884
1885 end_no_session:
1886 return ret;
1887 }
1888
1889 /*
1890 * End data pending command. This will check, for a given session id, if
1891 * each stream associated with it has its data_pending_check_done flag
1892 * set. If not, this means that the client lost track of the stream but
1893 * the data is still being streamed on our side. In this case, we inform
1894 * the client that data is in flight.
1895 *
1896 * Return to the client if there is data in flight or not with a ret_code.
1897 */
1898 static int relay_end_data_pending(const struct lttcomm_relayd_hdr *recv_hdr,
1899 struct relay_connection *conn,
1900 const struct lttng_buffer_view *payload)
1901 {
1902 int ret;
1903 ssize_t send_ret;
1904 struct lttng_ht_iter iter;
1905 struct lttcomm_relayd_end_data_pending msg;
1906 struct lttcomm_relayd_generic_reply reply;
1907 struct relay_stream *stream;
1908 uint32_t is_data_inflight = 0;
1909
1910 DBG("End data pending command");
1911
1912 if (!conn->session || !conn->version_check_done) {
1913 ERR("Trying to check for data before version check");
1914 ret = -1;
1915 goto end_no_session;
1916 }
1917
1918 if (payload->size < sizeof(msg)) {
1919 ERR("Unexpected payload size in \"relay_end_data_pending\": expected >= %zu bytes, got %zu bytes",
1920 sizeof(msg), payload->size);
1921 ret = -1;
1922 goto end_no_session;
1923 }
1924 memcpy(&msg, payload->data, sizeof(msg));
1925 msg.session_id = be64toh(msg.session_id);
1926
1927 /*
1928 * Iterate over all streams to see if the begin data pending
1929 * flag is set.
1930 */
1931 rcu_read_lock();
1932 cds_lfht_for_each_entry(relay_streams_ht->ht, &iter.iter, stream,
1933 node.node) {
1934 if (!stream_get(stream)) {
1935 continue;
1936 }
1937 if (stream->trace->session->id != msg.session_id) {
1938 stream_put(stream);
1939 continue;
1940 }
1941 pthread_mutex_lock(&stream->lock);
1942 if (!stream->data_pending_check_done) {
1943 uint64_t stream_seq;
1944
1945 if (session_streams_have_index(conn->session)) {
1946 /*
1947 * Ensure that both the index and stream data have been
1948 * flushed up to the requested point.
1949 */
1950 stream_seq = min(stream->prev_data_seq, stream->prev_index_seq);
1951 } else {
1952 stream_seq = stream->prev_data_seq;
1953 }
1954 if (!stream->closed || !(((int64_t) (stream_seq - stream->last_net_seq_num)) >= 0)) {
1955 is_data_inflight = 1;
1956 DBG("Data is still in flight for stream %" PRIu64,
1957 stream->stream_handle);
1958 pthread_mutex_unlock(&stream->lock);
1959 stream_put(stream);
1960 break;
1961 }
1962 }
1963 pthread_mutex_unlock(&stream->lock);
1964 stream_put(stream);
1965 }
1966 rcu_read_unlock();
1967
1968 memset(&reply, 0, sizeof(reply));
1969 /* All good, send back reply. */
1970 reply.ret_code = htobe32(is_data_inflight);
1971
1972 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
1973 if (send_ret < (ssize_t) sizeof(reply)) {
1974 ERR("Failed to send \"end data pending\" command reply (ret = %zd)",
1975 send_ret);
1976 ret = -1;
1977 } else {
1978 ret = 0;
1979 }
1980
1981 end_no_session:
1982 return ret;
1983 }
1984
1985 /*
1986 * Receive an index for a specific stream.
1987 *
1988 * Return 0 on success else a negative value.
1989 */
1990 static int relay_recv_index(const struct lttcomm_relayd_hdr *recv_hdr,
1991 struct relay_connection *conn,
1992 const struct lttng_buffer_view *payload)
1993 {
1994 int ret;
1995 ssize_t send_ret;
1996 struct relay_session *session = conn->session;
1997 struct lttcomm_relayd_index index_info;
1998 struct lttcomm_relayd_generic_reply reply;
1999 struct relay_stream *stream;
2000 size_t msg_len;
2001
2002 assert(conn);
2003
2004 DBG("Relay receiving index");
2005
2006 if (!session || !conn->version_check_done) {
2007 ERR("Trying to close a stream before version check");
2008 ret = -1;
2009 goto end_no_session;
2010 }
2011
2012 msg_len = lttcomm_relayd_index_len(
2013 lttng_to_index_major(conn->major, conn->minor),
2014 lttng_to_index_minor(conn->major, conn->minor));
2015 if (payload->size < msg_len) {
2016 ERR("Unexpected payload size in \"relay_recv_index\": expected >= %zu bytes, got %zu bytes",
2017 msg_len, payload->size);
2018 ret = -1;
2019 goto end_no_session;
2020 }
2021 memcpy(&index_info, payload->data, msg_len);
2022 index_info.relay_stream_id = be64toh(index_info.relay_stream_id);
2023 index_info.net_seq_num = be64toh(index_info.net_seq_num);
2024 index_info.packet_size = be64toh(index_info.packet_size);
2025 index_info.content_size = be64toh(index_info.content_size);
2026 index_info.timestamp_begin = be64toh(index_info.timestamp_begin);
2027 index_info.timestamp_end = be64toh(index_info.timestamp_end);
2028 index_info.events_discarded = be64toh(index_info.events_discarded);
2029 index_info.stream_id = be64toh(index_info.stream_id);
2030
2031 if (conn->minor >= 8) {
2032 index_info.stream_instance_id =
2033 be64toh(index_info.stream_instance_id);
2034 index_info.packet_seq_num = be64toh(index_info.packet_seq_num);
2035 }
2036
2037 stream = stream_get_by_id(index_info.relay_stream_id);
2038 if (!stream) {
2039 ERR("stream_get_by_id not found");
2040 ret = -1;
2041 goto end;
2042 }
2043
2044 pthread_mutex_lock(&stream->lock);
2045 ret = stream_add_index(stream, &index_info);
2046 pthread_mutex_unlock(&stream->lock);
2047 if (ret) {
2048 goto end_stream_put;
2049 }
2050
2051 end_stream_put:
2052 stream_put(stream);
2053 end:
2054 memset(&reply, 0, sizeof(reply));
2055 if (ret < 0) {
2056 reply.ret_code = htobe32(LTTNG_ERR_UNK);
2057 } else {
2058 reply.ret_code = htobe32(LTTNG_OK);
2059 }
2060 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
2061 if (send_ret < (ssize_t) sizeof(reply)) {
2062 ERR("Failed to send \"recv index\" command reply (ret = %zd)", send_ret);
2063 ret = -1;
2064 }
2065
2066 end_no_session:
2067 return ret;
2068 }
2069
2070 /*
2071 * Receive the streams_sent message.
2072 *
2073 * Return 0 on success else a negative value.
2074 */
2075 static int relay_streams_sent(const struct lttcomm_relayd_hdr *recv_hdr,
2076 struct relay_connection *conn,
2077 const struct lttng_buffer_view *payload)
2078 {
2079 int ret;
2080 ssize_t send_ret;
2081 struct lttcomm_relayd_generic_reply reply;
2082
2083 assert(conn);
2084
2085 DBG("Relay receiving streams_sent");
2086
2087 if (!conn->session || !conn->version_check_done) {
2088 ERR("Trying to close a stream before version check");
2089 ret = -1;
2090 goto end_no_session;
2091 }
2092
2093 /*
2094 * Publish every pending stream in the connection recv list which are
2095 * now ready to be used by the viewer.
2096 */
2097 publish_connection_local_streams(conn);
2098
2099 memset(&reply, 0, sizeof(reply));
2100 reply.ret_code = htobe32(LTTNG_OK);
2101 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
2102 if (send_ret < (ssize_t) sizeof(reply)) {
2103 ERR("Failed to send \"streams sent\" command reply (ret = %zd)",
2104 send_ret);
2105 ret = -1;
2106 } else {
2107 /* Success. */
2108 ret = 0;
2109 }
2110
2111 end_no_session:
2112 return ret;
2113 }
2114
2115 /*
2116 * relay_rotate_session_stream: rotate a stream to a new tracefile for the
2117 * session rotation feature (not the tracefile rotation feature).
2118 */
2119 static int relay_rotate_session_streams(
2120 const struct lttcomm_relayd_hdr *recv_hdr,
2121 struct relay_connection *conn,
2122 const struct lttng_buffer_view *payload)
2123 {
2124 int ret = 0;
2125 uint32_t i;
2126 ssize_t send_ret;
2127 enum lttng_error_code reply_code = LTTNG_ERR_UNK;
2128 struct relay_session *session = conn->session;
2129 struct lttcomm_relayd_rotate_streams rotate_streams;
2130 struct lttcomm_relayd_generic_reply reply = {};
2131 struct relay_stream *stream = NULL;
2132 const size_t header_len = sizeof(struct lttcomm_relayd_rotate_streams);
2133 struct lttng_trace_chunk *next_trace_chunk = NULL;
2134 struct lttng_buffer_view stream_positions;
2135 char chunk_id_buf[MAX_INT_DEC_LEN(uint64_t)];
2136 const char *chunk_id_str = "none";
2137
2138 if (!session || !conn->version_check_done) {
2139 ERR("Trying to rotate a stream before version check");
2140 ret = -1;
2141 goto end_no_reply;
2142 }
2143
2144 if (session->major == 2 && session->minor < 11) {
2145 ERR("Unsupported feature before 2.11");
2146 ret = -1;
2147 goto end_no_reply;
2148 }
2149
2150 if (payload->size < header_len) {
2151 ERR("Unexpected payload size in \"relay_rotate_session_stream\": expected >= %zu bytes, got %zu bytes",
2152 header_len, payload->size);
2153 ret = -1;
2154 goto end_no_reply;
2155 }
2156
2157 memcpy(&rotate_streams, payload->data, header_len);
2158
2159 /* Convert header to host endianness. */
2160 rotate_streams = (typeof(rotate_streams)) {
2161 .stream_count = be32toh(rotate_streams.stream_count),
2162 .new_chunk_id = (typeof(rotate_streams.new_chunk_id)) {
2163 .is_set = !!rotate_streams.new_chunk_id.is_set,
2164 .value = be64toh(rotate_streams.new_chunk_id.value),
2165 }
2166 };
2167
2168 if (rotate_streams.new_chunk_id.is_set) {
2169 /*
2170 * Retrieve the trace chunk the stream must transition to. As
2171 * per the protocol, this chunk should have been created
2172 * before this command is received.
2173 */
2174 next_trace_chunk = sessiond_trace_chunk_registry_get_chunk(
2175 sessiond_trace_chunk_registry,
2176 session->sessiond_uuid, session->id,
2177 rotate_streams.new_chunk_id.value);
2178 if (!next_trace_chunk) {
2179 char uuid_str[UUID_STR_LEN];
2180
2181 lttng_uuid_to_str(session->sessiond_uuid, uuid_str);
2182 ERR("Unknown next trace chunk in ROTATE_STREAMS command: sessiond_uuid = {%s}, session_id = %" PRIu64
2183 ", trace_chunk_id = %" PRIu64,
2184 uuid_str, session->id,
2185 rotate_streams.new_chunk_id.value);
2186 reply_code = LTTNG_ERR_INVALID_PROTOCOL;
2187 ret = -1;
2188 goto end;
2189 }
2190
2191 ret = snprintf(chunk_id_buf, sizeof(chunk_id_buf), "%" PRIu64,
2192 rotate_streams.new_chunk_id.value);
2193 if (ret < 0 || ret >= sizeof(chunk_id_buf)) {
2194 chunk_id_str = "formatting error";
2195 } else {
2196 chunk_id_str = chunk_id_buf;
2197 }
2198 }
2199
2200 DBG("Rotate %" PRIu32 " streams of session \"%s\" to chunk \"%s\"",
2201 rotate_streams.stream_count, session->session_name,
2202 chunk_id_str);
2203
2204 stream_positions = lttng_buffer_view_from_view(payload,
2205 sizeof(rotate_streams), -1);
2206 if (!stream_positions.data ||
2207 stream_positions.size <
2208 (rotate_streams.stream_count *
2209 sizeof(struct lttcomm_relayd_stream_rotation_position))) {
2210 reply_code = LTTNG_ERR_INVALID_PROTOCOL;
2211 ret = -1;
2212 goto end;
2213 }
2214
2215 for (i = 0; i < rotate_streams.stream_count; i++) {
2216 struct lttcomm_relayd_stream_rotation_position *position_comm =
2217 &((typeof(position_comm)) stream_positions.data)[i];
2218 const struct lttcomm_relayd_stream_rotation_position pos = {
2219 .stream_id = be64toh(position_comm->stream_id),
2220 .rotate_at_seq_num = be64toh(
2221 position_comm->rotate_at_seq_num),
2222 };
2223
2224 stream = stream_get_by_id(pos.stream_id);
2225 if (!stream) {
2226 reply_code = LTTNG_ERR_INVALID;
2227 ret = -1;
2228 goto end;
2229 }
2230
2231 pthread_mutex_lock(&stream->lock);
2232 ret = stream_set_pending_rotation(stream, next_trace_chunk,
2233 pos.rotate_at_seq_num);
2234 pthread_mutex_unlock(&stream->lock);
2235 if (ret) {
2236 reply_code = LTTNG_ERR_FILE_CREATION_ERROR;
2237 goto end;
2238 }
2239
2240 stream_put(stream);
2241 stream = NULL;
2242 }
2243
2244 reply_code = LTTNG_OK;
2245 end:
2246 if (stream) {
2247 stream_put(stream);
2248 }
2249
2250 reply.ret_code = htobe32((uint32_t) reply_code);
2251 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
2252 sizeof(struct lttcomm_relayd_generic_reply), 0);
2253 if (send_ret < (ssize_t) sizeof(reply)) {
2254 ERR("Failed to send \"rotate session stream\" command reply (ret = %zd)",
2255 send_ret);
2256 ret = -1;
2257 }
2258
2259 ret = 0;
2260 end_no_reply:
2261 lttng_trace_chunk_put(next_trace_chunk);
2262 return ret;
2263 }
2264
2265 static int init_session_output_directory_handle(struct relay_session *session,
2266 struct lttng_directory_handle *handle)
2267 {
2268 int ret;
2269 /*
2270 * session_directory:
2271 *
2272 * if base_path is \0'
2273 * hostname/session_name
2274 * else
2275 * hostname/base_path
2276 */
2277 char *session_directory = NULL;
2278 /*
2279 * relayd_output_path/session_directory
2280 * e.g. /home/user/lttng-traces/hostname/session_name
2281 */
2282 char *full_session_path = NULL;
2283
2284 /*
2285 * If base path is set, it overrides the session name for the
2286 * session relative base path. No timestamp is appended if the
2287 * base path is overridden.
2288 *
2289 * If the session name already contains the creation time (e.g.
2290 * auto-<timestamp>, don't append yet another timestamp after
2291 * the session name in the generated path.
2292 *
2293 * Otherwise, generate the path with session_name-<timestamp>.
2294 */
2295 if (session->base_path[0] != '\0') {
2296 pthread_mutex_lock(&session->lock);
2297 ret = asprintf(&session_directory, "%s/%s", session->hostname,
2298 session->base_path);
2299 pthread_mutex_unlock(&session->lock);
2300 } else if (session->session_name_contains_creation_time) {
2301 pthread_mutex_lock(&session->lock);
2302 ret = asprintf(&session_directory, "%s/%s", session->hostname,
2303 session->session_name);
2304 pthread_mutex_unlock(&session->lock);
2305 } else {
2306 char session_creation_datetime[16];
2307 size_t strftime_ret;
2308 struct tm *timeinfo;
2309 time_t creation_time;
2310
2311 /*
2312 * The 2.11+ protocol guarantees that a creation time
2313 * is provided for a session. This would indicate a
2314 * protocol error or an improper use of this util.
2315 */
2316 if (!session->creation_time.is_set) {
2317 ERR("Creation time missing for session \"%s\" (protocol error)",
2318 session->session_name);
2319 ret = -1;
2320 goto end;
2321 }
2322 creation_time = LTTNG_OPTIONAL_GET(session->creation_time);
2323
2324 timeinfo = localtime(&creation_time);
2325 if (!timeinfo) {
2326 ERR("Failed to get timeinfo while initializing session output directory handle");
2327 ret = -1;
2328 goto end;
2329 }
2330 strftime_ret = strftime(session_creation_datetime,
2331 sizeof(session_creation_datetime),
2332 "%Y%m%d-%H%M%S", timeinfo);
2333 if (strftime_ret == 0) {
2334 ERR("Failed to format session creation timestamp while initializing session output directory handle");
2335 ret = -1;
2336 goto end;
2337 }
2338 pthread_mutex_lock(&session->lock);
2339 ret = asprintf(&session_directory, "%s/%s-%s",
2340 session->hostname, session->session_name,
2341 session_creation_datetime);
2342 pthread_mutex_unlock(&session->lock);
2343 }
2344 if (ret < 0) {
2345 PERROR("Failed to format session directory name");
2346 goto end;
2347 }
2348
2349 full_session_path = create_output_path(session_directory);
2350 if (!full_session_path) {
2351 ret = -1;
2352 goto end;
2353 }
2354
2355 ret = utils_mkdir_recursive(
2356 full_session_path, S_IRWXU | S_IRWXG, -1, -1);
2357 if (ret) {
2358 ERR("Failed to create session output path \"%s\"",
2359 full_session_path);
2360 goto end;
2361 }
2362
2363 ret = lttng_directory_handle_init(handle, full_session_path);
2364 if (ret) {
2365 goto end;
2366 }
2367 end:
2368 free(session_directory);
2369 free(full_session_path);
2370 return ret;
2371 }
2372
2373 /*
2374 * relay_create_trace_chunk: create a new trace chunk
2375 */
2376 static int relay_create_trace_chunk(const struct lttcomm_relayd_hdr *recv_hdr,
2377 struct relay_connection *conn,
2378 const struct lttng_buffer_view *payload)
2379 {
2380 int ret = 0;
2381 ssize_t send_ret;
2382 struct relay_session *session = conn->session;
2383 struct lttcomm_relayd_create_trace_chunk *msg;
2384 struct lttcomm_relayd_generic_reply reply = {};
2385 struct lttng_buffer_view header_view;
2386 struct lttng_buffer_view chunk_name_view;
2387 struct lttng_trace_chunk *chunk = NULL, *published_chunk = NULL;
2388 enum lttng_error_code reply_code = LTTNG_OK;
2389 enum lttng_trace_chunk_status chunk_status;
2390 struct lttng_directory_handle session_output;
2391
2392 if (!session || !conn->version_check_done) {
2393 ERR("Trying to create a trace chunk before version check");
2394 ret = -1;
2395 goto end_no_reply;
2396 }
2397
2398 if (session->major == 2 && session->minor < 11) {
2399 ERR("Chunk creation command is unsupported before 2.11");
2400 ret = -1;
2401 goto end_no_reply;
2402 }
2403
2404 header_view = lttng_buffer_view_from_view(payload, 0, sizeof(*msg));
2405 if (!header_view.data) {
2406 ERR("Failed to receive payload of chunk creation command");
2407 ret = -1;
2408 goto end_no_reply;
2409 }
2410
2411 /* Convert to host endianness. */
2412 msg = (typeof(msg)) header_view.data;
2413 msg->chunk_id = be64toh(msg->chunk_id);
2414 msg->creation_timestamp = be64toh(msg->creation_timestamp);
2415 msg->override_name_length = be32toh(msg->override_name_length);
2416
2417 chunk = lttng_trace_chunk_create(
2418 msg->chunk_id, msg->creation_timestamp);
2419 if (!chunk) {
2420 ERR("Failed to create trace chunk in trace chunk creation command");
2421 ret = -1;
2422 reply_code = LTTNG_ERR_NOMEM;
2423 goto end;
2424 }
2425
2426 if (msg->override_name_length) {
2427 const char *name;
2428
2429 chunk_name_view = lttng_buffer_view_from_view(payload,
2430 sizeof(*msg),
2431 msg->override_name_length);
2432 name = chunk_name_view.data;
2433 if (!name || name[msg->override_name_length - 1]) {
2434 ERR("Failed to receive payload of chunk creation command");
2435 ret = -1;
2436 reply_code = LTTNG_ERR_INVALID;
2437 goto end;
2438 }
2439
2440 chunk_status = lttng_trace_chunk_override_name(
2441 chunk, chunk_name_view.data);
2442 switch (chunk_status) {
2443 case LTTNG_TRACE_CHUNK_STATUS_OK:
2444 break;
2445 case LTTNG_TRACE_CHUNK_STATUS_INVALID_ARGUMENT:
2446 ERR("Failed to set the name of new trace chunk in trace chunk creation command (invalid name)");
2447 reply_code = LTTNG_ERR_INVALID;
2448 ret = -1;
2449 goto end;
2450 default:
2451 ERR("Failed to set the name of new trace chunk in trace chunk creation command (unknown error)");
2452 reply_code = LTTNG_ERR_UNK;
2453 ret = -1;
2454 goto end;
2455 }
2456 }
2457
2458 ret = init_session_output_directory_handle(
2459 conn->session, &session_output);
2460 if (ret) {
2461 reply_code = LTTNG_ERR_CREATE_DIR_FAIL;
2462 goto end;
2463 }
2464
2465 chunk_status = lttng_trace_chunk_set_credentials_current_user(chunk);
2466 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
2467 reply_code = LTTNG_ERR_UNK;
2468 ret = -1;
2469 goto end;
2470 }
2471
2472 chunk_status = lttng_trace_chunk_set_as_owner(chunk, &session_output);
2473 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
2474 reply_code = LTTNG_ERR_UNK;
2475 ret = -1;
2476 goto end;
2477 }
2478
2479 published_chunk = sessiond_trace_chunk_registry_publish_chunk(
2480 sessiond_trace_chunk_registry,
2481 conn->session->sessiond_uuid,
2482 conn->session->id,
2483 chunk);
2484 if (!published_chunk) {
2485 char uuid_str[UUID_STR_LEN];
2486
2487 lttng_uuid_to_str(conn->session->sessiond_uuid, uuid_str);
2488 ERR("Failed to publish chunk: sessiond_uuid = %s, session_id = %" PRIu64 ", chunk_id = %" PRIu64,
2489 uuid_str,
2490 conn->session->id,
2491 msg->chunk_id);
2492 ret = -1;
2493 reply_code = LTTNG_ERR_NOMEM;
2494 goto end;
2495 }
2496
2497 pthread_mutex_lock(&conn->session->lock);
2498 if (conn->session->pending_closure_trace_chunk) {
2499 /*
2500 * Invalid; this means a second create_trace_chunk command was
2501 * received before a close_trace_chunk.
2502 */
2503 ERR("Invalid trace chunk close command received; a trace chunk is already waiting for a trace chunk close command");
2504 reply_code = LTTNG_ERR_INVALID_PROTOCOL;
2505 ret = -1;
2506 goto end_unlock_session;
2507 }
2508 conn->session->pending_closure_trace_chunk =
2509 conn->session->current_trace_chunk;
2510 conn->session->current_trace_chunk = published_chunk;
2511 published_chunk = NULL;
2512 end_unlock_session:
2513 pthread_mutex_unlock(&conn->session->lock);
2514 end:
2515 reply.ret_code = htobe32((uint32_t) reply_code);
2516 send_ret = conn->sock->ops->sendmsg(conn->sock,
2517 &reply,
2518 sizeof(struct lttcomm_relayd_generic_reply),
2519 0);
2520 if (send_ret < (ssize_t) sizeof(reply)) {
2521 ERR("Failed to send \"create trace chunk\" command reply (ret = %zd)",
2522 send_ret);
2523 ret = -1;
2524 }
2525 end_no_reply:
2526 lttng_trace_chunk_put(chunk);
2527 lttng_trace_chunk_put(published_chunk);
2528 lttng_directory_handle_fini(&session_output);
2529 return ret;
2530 }
2531
2532 /*
2533 * relay_close_trace_chunk: close a trace chunk
2534 */
2535 static int relay_close_trace_chunk(const struct lttcomm_relayd_hdr *recv_hdr,
2536 struct relay_connection *conn,
2537 const struct lttng_buffer_view *payload)
2538 {
2539 int ret = 0;
2540 ssize_t send_ret;
2541 struct relay_session *session = conn->session;
2542 struct lttcomm_relayd_close_trace_chunk *msg;
2543 struct lttcomm_relayd_generic_reply reply = {};
2544 struct lttng_buffer_view header_view;
2545 struct lttng_trace_chunk *chunk = NULL;
2546 enum lttng_error_code reply_code = LTTNG_OK;
2547 enum lttng_trace_chunk_status chunk_status;
2548 uint64_t chunk_id;
2549 LTTNG_OPTIONAL(enum lttng_trace_chunk_command_type) close_command = {};
2550 time_t close_timestamp;
2551
2552 if (!session || !conn->version_check_done) {
2553 ERR("Trying to close a trace chunk before version check");
2554 ret = -1;
2555 goto end_no_reply;
2556 }
2557
2558 if (session->major == 2 && session->minor < 11) {
2559 ERR("Chunk close command is unsupported before 2.11");
2560 ret = -1;
2561 goto end_no_reply;
2562 }
2563
2564 header_view = lttng_buffer_view_from_view(payload, 0, sizeof(*msg));
2565 if (!header_view.data) {
2566 ERR("Failed to receive payload of chunk close command");
2567 ret = -1;
2568 goto end_no_reply;
2569 }
2570
2571 /* Convert to host endianness. */
2572 msg = (typeof(msg)) header_view.data;
2573 chunk_id = be64toh(msg->chunk_id);
2574 close_timestamp = (time_t) be64toh(msg->close_timestamp);
2575 close_command = (typeof(close_command)){
2576 .value = be32toh(msg->close_command.value),
2577 .is_set = msg->close_command.is_set,
2578 };
2579
2580 chunk = sessiond_trace_chunk_registry_get_chunk(
2581 sessiond_trace_chunk_registry,
2582 conn->session->sessiond_uuid,
2583 conn->session->id,
2584 chunk_id);
2585 if (!chunk) {
2586 char uuid_str[UUID_STR_LEN];
2587
2588 lttng_uuid_to_str(conn->session->sessiond_uuid, uuid_str);
2589 ERR("Failed to find chunk to close: sessiond_uuid = %s, session_id = %" PRIu64 ", chunk_id = %" PRIu64,
2590 uuid_str,
2591 conn->session->id,
2592 msg->chunk_id);
2593 ret = -1;
2594 reply_code = LTTNG_ERR_NOMEM;
2595 goto end;
2596 }
2597
2598 pthread_mutex_lock(&session->lock);
2599 if (session->pending_closure_trace_chunk &&
2600 session->pending_closure_trace_chunk != chunk) {
2601 ERR("Trace chunk close command for session \"%s\" does not target the trace chunk pending closure",
2602 session->session_name);
2603 reply_code = LTTNG_ERR_INVALID_PROTOCOL;
2604 ret = -1;
2605 goto end_unlock_session;
2606 }
2607
2608 chunk_status = lttng_trace_chunk_set_close_timestamp(
2609 chunk, close_timestamp);
2610 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
2611 ERR("Failed to set trace chunk close timestamp");
2612 ret = -1;
2613 reply_code = LTTNG_ERR_UNK;
2614 goto end_unlock_session;
2615 }
2616
2617 if (close_command.is_set) {
2618 chunk_status = lttng_trace_chunk_set_close_command(
2619 chunk, close_command.value);
2620 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
2621 ret = -1;
2622 reply_code = LTTNG_ERR_INVALID;
2623 goto end_unlock_session;
2624 }
2625 }
2626
2627 if (session->current_trace_chunk == chunk) {
2628 /*
2629 * After a trace chunk close command, no new streams
2630 * referencing the chunk may be created. Hence, on the
2631 * event that no new trace chunk have been created for
2632 * the session, the reference to the current trace chunk
2633 * is released in order to allow it to be reclaimed when
2634 * the last stream releases its reference to it.
2635 */
2636 lttng_trace_chunk_put(session->current_trace_chunk);
2637 session->current_trace_chunk = NULL;
2638 }
2639 lttng_trace_chunk_put(session->pending_closure_trace_chunk);
2640 session->pending_closure_trace_chunk = NULL;
2641 end_unlock_session:
2642 pthread_mutex_unlock(&session->lock);
2643
2644 end:
2645 reply.ret_code = htobe32((uint32_t) reply_code);
2646 send_ret = conn->sock->ops->sendmsg(conn->sock,
2647 &reply,
2648 sizeof(struct lttcomm_relayd_generic_reply),
2649 0);
2650 if (send_ret < (ssize_t) sizeof(reply)) {
2651 ERR("Failed to send \"create trace chunk\" command reply (ret = %zd)",
2652 send_ret);
2653 ret = -1;
2654 }
2655 end_no_reply:
2656 lttng_trace_chunk_put(chunk);
2657 return ret;
2658 }
2659
2660 /*
2661 * relay_trace_chunk_exists: check if a trace chunk exists
2662 */
2663 static int relay_trace_chunk_exists(const struct lttcomm_relayd_hdr *recv_hdr,
2664 struct relay_connection *conn,
2665 const struct lttng_buffer_view *payload)
2666 {
2667 int ret = 0;
2668 ssize_t send_ret;
2669 struct relay_session *session = conn->session;
2670 struct lttcomm_relayd_trace_chunk_exists *msg;
2671 struct lttcomm_relayd_trace_chunk_exists_reply reply = {};
2672 struct lttng_buffer_view header_view;
2673 struct lttng_trace_chunk *chunk = NULL;
2674 uint64_t chunk_id;
2675
2676 if (!session || !conn->version_check_done) {
2677 ERR("Trying to close a trace chunk before version check");
2678 ret = -1;
2679 goto end_no_reply;
2680 }
2681
2682 if (session->major == 2 && session->minor < 11) {
2683 ERR("Chunk close command is unsupported before 2.11");
2684 ret = -1;
2685 goto end_no_reply;
2686 }
2687
2688 header_view = lttng_buffer_view_from_view(payload, 0, sizeof(*msg));
2689 if (!header_view.data) {
2690 ERR("Failed to receive payload of chunk close command");
2691 ret = -1;
2692 goto end_no_reply;
2693 }
2694
2695 /* Convert to host endianness. */
2696 msg = (typeof(msg)) header_view.data;
2697 chunk_id = be64toh(msg->chunk_id);
2698
2699 chunk = sessiond_trace_chunk_registry_get_chunk(
2700 sessiond_trace_chunk_registry,
2701 conn->session->sessiond_uuid,
2702 conn->session->id,
2703 chunk_id);
2704
2705 reply = (typeof(reply)) {
2706 .generic.ret_code = htobe32((uint32_t) LTTNG_OK),
2707 .trace_chunk_exists = !!chunk,
2708 };
2709 send_ret = conn->sock->ops->sendmsg(conn->sock,
2710 &reply, sizeof(reply), 0);
2711 if (send_ret < (ssize_t) sizeof(reply)) {
2712 ERR("Failed to send \"create trace chunk\" command reply (ret = %zd)",
2713 send_ret);
2714 ret = -1;
2715 }
2716 end_no_reply:
2717 lttng_trace_chunk_put(chunk);
2718 return ret;
2719 }
2720
2721 #define DBG_CMD(cmd_name, conn) \
2722 DBG3("Processing \"%s\" command for socket %i", cmd_name, conn->sock->fd);
2723
2724 static int relay_process_control_command(struct relay_connection *conn,
2725 const struct lttcomm_relayd_hdr *header,
2726 const struct lttng_buffer_view *payload)
2727 {
2728 int ret = 0;
2729
2730 switch (header->cmd) {
2731 case RELAYD_CREATE_SESSION:
2732 DBG_CMD("RELAYD_CREATE_SESSION", conn);
2733 ret = relay_create_session(header, conn, payload);
2734 break;
2735 case RELAYD_ADD_STREAM:
2736 DBG_CMD("RELAYD_ADD_STREAM", conn);
2737 ret = relay_add_stream(header, conn, payload);
2738 break;
2739 case RELAYD_START_DATA:
2740 DBG_CMD("RELAYD_START_DATA", conn);
2741 ret = relay_start(header, conn, payload);
2742 break;
2743 case RELAYD_SEND_METADATA:
2744 DBG_CMD("RELAYD_SEND_METADATA", conn);
2745 ret = relay_recv_metadata(header, conn, payload);
2746 break;
2747 case RELAYD_VERSION:
2748 DBG_CMD("RELAYD_VERSION", conn);
2749 ret = relay_send_version(header, conn, payload);
2750 break;
2751 case RELAYD_CLOSE_STREAM:
2752 DBG_CMD("RELAYD_CLOSE_STREAM", conn);
2753 ret = relay_close_stream(header, conn, payload);
2754 break;
2755 case RELAYD_DATA_PENDING:
2756 DBG_CMD("RELAYD_DATA_PENDING", conn);
2757 ret = relay_data_pending(header, conn, payload);
2758 break;
2759 case RELAYD_QUIESCENT_CONTROL:
2760 DBG_CMD("RELAYD_QUIESCENT_CONTROL", conn);
2761 ret = relay_quiescent_control(header, conn, payload);
2762 break;
2763 case RELAYD_BEGIN_DATA_PENDING:
2764 DBG_CMD("RELAYD_BEGIN_DATA_PENDING", conn);
2765 ret = relay_begin_data_pending(header, conn, payload);
2766 break;
2767 case RELAYD_END_DATA_PENDING:
2768 DBG_CMD("RELAYD_END_DATA_PENDING", conn);
2769 ret = relay_end_data_pending(header, conn, payload);
2770 break;
2771 case RELAYD_SEND_INDEX:
2772 DBG_CMD("RELAYD_SEND_INDEX", conn);
2773 ret = relay_recv_index(header, conn, payload);
2774 break;
2775 case RELAYD_STREAMS_SENT:
2776 DBG_CMD("RELAYD_STREAMS_SENT", conn);
2777 ret = relay_streams_sent(header, conn, payload);
2778 break;
2779 case RELAYD_RESET_METADATA:
2780 DBG_CMD("RELAYD_RESET_METADATA", conn);
2781 ret = relay_reset_metadata(header, conn, payload);
2782 break;
2783 case RELAYD_ROTATE_STREAMS:
2784 DBG_CMD("RELAYD_ROTATE_STREAMS", conn);
2785 ret = relay_rotate_session_streams(header, conn, payload);
2786 break;
2787 case RELAYD_CREATE_TRACE_CHUNK:
2788 DBG_CMD("RELAYD_CREATE_TRACE_CHUNK", conn);
2789 ret = relay_create_trace_chunk(header, conn, payload);
2790 break;
2791 case RELAYD_CLOSE_TRACE_CHUNK:
2792 DBG_CMD("RELAYD_CLOSE_TRACE_CHUNK", conn);
2793 ret = relay_close_trace_chunk(header, conn, payload);
2794 break;
2795 case RELAYD_TRACE_CHUNK_EXISTS:
2796 DBG_CMD("RELAYD_TRACE_CHUNK_EXISTS", conn);
2797 ret = relay_trace_chunk_exists(header, conn, payload);
2798 break;
2799 case RELAYD_UPDATE_SYNC_INFO:
2800 default:
2801 ERR("Received unknown command (%u)", header->cmd);
2802 relay_unknown_command(conn);
2803 ret = -1;
2804 goto end;
2805 }
2806
2807 end:
2808 return ret;
2809 }
2810
2811 static enum relay_connection_status relay_process_control_receive_payload(
2812 struct relay_connection *conn)
2813 {
2814 int ret = 0;
2815 enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK;
2816 struct lttng_dynamic_buffer *reception_buffer =
2817 &conn->protocol.ctrl.reception_buffer;
2818 struct ctrl_connection_state_receive_payload *state =
2819 &conn->protocol.ctrl.state.receive_payload;
2820 struct lttng_buffer_view payload_view;
2821
2822 if (state->left_to_receive == 0) {
2823 /* Short-circuit for payload-less commands. */
2824 goto reception_complete;
2825 }
2826
2827 ret = conn->sock->ops->recvmsg(conn->sock,
2828 reception_buffer->data + state->received,
2829 state->left_to_receive, MSG_DONTWAIT);
2830 if (ret < 0) {
2831 if (errno != EAGAIN && errno != EWOULDBLOCK) {
2832 PERROR("Unable to receive command payload on sock %d",
2833 conn->sock->fd);
2834 status = RELAY_CONNECTION_STATUS_ERROR;
2835 }
2836 goto end;
2837 } else if (ret == 0) {
2838 DBG("Socket %d performed an orderly shutdown (received EOF)", conn->sock->fd);
2839 status = RELAY_CONNECTION_STATUS_CLOSED;
2840 goto end;
2841 }
2842
2843 assert(ret > 0);
2844 assert(ret <= state->left_to_receive);
2845
2846 state->left_to_receive -= ret;
2847 state->received += ret;
2848
2849 if (state->left_to_receive > 0) {
2850 /*
2851 * Can't transition to the protocol's next state, wait to
2852 * receive the rest of the header.
2853 */
2854 DBG3("Partial reception of control connection protocol payload (received %" PRIu64 " bytes, %" PRIu64 " bytes left to receive, fd = %i)",
2855 state->received, state->left_to_receive,
2856 conn->sock->fd);
2857 goto end;
2858 }
2859
2860 reception_complete:
2861 DBG("Done receiving control command payload: fd = %i, payload size = %" PRIu64 " bytes",
2862 conn->sock->fd, state->received);
2863 /*
2864 * The payload required to process the command has been received.
2865 * A view to the reception buffer is forwarded to the various
2866 * commands and the state of the control is reset on success.
2867 *
2868 * Commands are responsible for sending their reply to the peer.
2869 */
2870 payload_view = lttng_buffer_view_from_dynamic_buffer(reception_buffer,
2871 0, -1);
2872 ret = relay_process_control_command(conn,
2873 &state->header, &payload_view);
2874 if (ret < 0) {
2875 status = RELAY_CONNECTION_STATUS_ERROR;
2876 goto end;
2877 }
2878
2879 ret = connection_reset_protocol_state(conn);
2880 if (ret) {
2881 status = RELAY_CONNECTION_STATUS_ERROR;
2882 }
2883 end:
2884 return status;
2885 }
2886
2887 static enum relay_connection_status relay_process_control_receive_header(
2888 struct relay_connection *conn)
2889 {
2890 int ret = 0;
2891 enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK;
2892 struct lttcomm_relayd_hdr header;
2893 struct lttng_dynamic_buffer *reception_buffer =
2894 &conn->protocol.ctrl.reception_buffer;
2895 struct ctrl_connection_state_receive_header *state =
2896 &conn->protocol.ctrl.state.receive_header;
2897
2898 assert(state->left_to_receive != 0);
2899
2900 ret = conn->sock->ops->recvmsg(conn->sock,
2901 reception_buffer->data + state->received,
2902 state->left_to_receive, MSG_DONTWAIT);
2903 if (ret < 0) {
2904 if (errno != EAGAIN && errno != EWOULDBLOCK) {
2905 PERROR("Unable to receive control command header on sock %d",
2906 conn->sock->fd);
2907 status = RELAY_CONNECTION_STATUS_ERROR;
2908 }
2909 goto end;
2910 } else if (ret == 0) {
2911 DBG("Socket %d performed an orderly shutdown (received EOF)", conn->sock->fd);
2912 status = RELAY_CONNECTION_STATUS_CLOSED;
2913 goto end;
2914 }
2915
2916 assert(ret > 0);
2917 assert(ret <= state->left_to_receive);
2918
2919 state->left_to_receive -= ret;
2920 state->received += ret;
2921
2922 if (state->left_to_receive > 0) {
2923 /*
2924 * Can't transition to the protocol's next state, wait to
2925 * receive the rest of the header.
2926 */
2927 DBG3("Partial reception of control connection protocol header (received %" PRIu64 " bytes, %" PRIu64 " bytes left to receive, fd = %i)",
2928 state->received, state->left_to_receive,
2929 conn->sock->fd);
2930 goto end;
2931 }
2932
2933 /* Transition to next state: receiving the command's payload. */
2934 conn->protocol.ctrl.state_id =
2935 CTRL_CONNECTION_STATE_RECEIVE_PAYLOAD;
2936 memcpy(&header, reception_buffer->data, sizeof(header));
2937 header.circuit_id = be64toh(header.circuit_id);
2938 header.data_size = be64toh(header.data_size);
2939 header.cmd = be32toh(header.cmd);
2940 header.cmd_version = be32toh(header.cmd_version);
2941 memcpy(&conn->protocol.ctrl.state.receive_payload.header,
2942 &header, sizeof(header));
2943
2944 DBG("Done receiving control command header: fd = %i, cmd = %" PRIu32 ", cmd_version = %" PRIu32 ", payload size = %" PRIu64 " bytes",
2945 conn->sock->fd, header.cmd, header.cmd_version,
2946 header.data_size);
2947
2948 if (header.data_size > DEFAULT_NETWORK_RELAYD_CTRL_MAX_PAYLOAD_SIZE) {
2949 ERR("Command header indicates a payload (%" PRIu64 " bytes) that exceeds the maximal payload size allowed on a control connection.",
2950 header.data_size);
2951 status = RELAY_CONNECTION_STATUS_ERROR;
2952 goto end;
2953 }
2954
2955 conn->protocol.ctrl.state.receive_payload.left_to_receive =
2956 header.data_size;
2957 conn->protocol.ctrl.state.receive_payload.received = 0;
2958 ret = lttng_dynamic_buffer_set_size(reception_buffer,
2959 header.data_size);
2960 if (ret) {
2961 status = RELAY_CONNECTION_STATUS_ERROR;
2962 goto end;
2963 }
2964
2965 if (header.data_size == 0) {
2966 /*
2967 * Manually invoke the next state as the poll loop
2968 * will not wake-up to allow us to proceed further.
2969 */
2970 status = relay_process_control_receive_payload(conn);
2971 }
2972 end:
2973 return status;
2974 }
2975
2976 /*
2977 * Process the commands received on the control socket
2978 */
2979 static enum relay_connection_status relay_process_control(
2980 struct relay_connection *conn)
2981 {
2982 enum relay_connection_status status;
2983
2984 switch (conn->protocol.ctrl.state_id) {
2985 case CTRL_CONNECTION_STATE_RECEIVE_HEADER:
2986 status = relay_process_control_receive_header(conn);
2987 break;
2988 case CTRL_CONNECTION_STATE_RECEIVE_PAYLOAD:
2989 status = relay_process_control_receive_payload(conn);
2990 break;
2991 default:
2992 ERR("Unknown control connection protocol state encountered.");
2993 abort();
2994 }
2995
2996 return status;
2997 }
2998
2999 static enum relay_connection_status relay_process_data_receive_header(
3000 struct relay_connection *conn)
3001 {
3002 int ret;
3003 enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK;
3004 struct data_connection_state_receive_header *state =
3005 &conn->protocol.data.state.receive_header;
3006 struct lttcomm_relayd_data_hdr header;
3007 struct relay_stream *stream;
3008
3009 assert(state->left_to_receive != 0);
3010
3011 ret = conn->sock->ops->recvmsg(conn->sock,
3012 state->header_reception_buffer + state->received,
3013 state->left_to_receive, MSG_DONTWAIT);
3014 if (ret < 0) {
3015 if (errno != EAGAIN && errno != EWOULDBLOCK) {
3016 PERROR("Unable to receive data header on sock %d", conn->sock->fd);
3017 status = RELAY_CONNECTION_STATUS_ERROR;
3018 }
3019 goto end;
3020 } else if (ret == 0) {
3021 /* Orderly shutdown. Not necessary to print an error. */
3022 DBG("Socket %d performed an orderly shutdown (received EOF)", conn->sock->fd);
3023 status = RELAY_CONNECTION_STATUS_CLOSED;
3024 goto end;
3025 }
3026
3027 assert(ret > 0);
3028 assert(ret <= state->left_to_receive);
3029
3030 state->left_to_receive -= ret;
3031 state->received += ret;
3032
3033 if (state->left_to_receive > 0) {
3034 /*
3035 * Can't transition to the protocol's next state, wait to
3036 * receive the rest of the header.
3037 */
3038 DBG3("Partial reception of data connection header (received %" PRIu64 " bytes, %" PRIu64 " bytes left to receive, fd = %i)",
3039 state->received, state->left_to_receive,
3040 conn->sock->fd);
3041 goto end;
3042 }
3043
3044 /* Transition to next state: receiving the payload. */
3045 conn->protocol.data.state_id = DATA_CONNECTION_STATE_RECEIVE_PAYLOAD;
3046
3047 memcpy(&header, state->header_reception_buffer, sizeof(header));
3048 header.circuit_id = be64toh(header.circuit_id);
3049 header.stream_id = be64toh(header.stream_id);
3050 header.data_size = be32toh(header.data_size);
3051 header.net_seq_num = be64toh(header.net_seq_num);
3052 header.padding_size = be32toh(header.padding_size);
3053 memcpy(&conn->protocol.data.state.receive_payload.header, &header, sizeof(header));
3054
3055 conn->protocol.data.state.receive_payload.left_to_receive =
3056 header.data_size;
3057 conn->protocol.data.state.receive_payload.received = 0;
3058 conn->protocol.data.state.receive_payload.rotate_index = false;
3059
3060 DBG("Received data connection header on fd %i: circuit_id = %" PRIu64 ", stream_id = %" PRIu64 ", data_size = %" PRIu32 ", net_seq_num = %" PRIu64 ", padding_size = %" PRIu32,
3061 conn->sock->fd, header.circuit_id,
3062 header.stream_id, header.data_size,
3063 header.net_seq_num, header.padding_size);
3064
3065 stream = stream_get_by_id(header.stream_id);
3066 if (!stream) {
3067 DBG("relay_process_data_receive_payload: Cannot find stream %" PRIu64,
3068 header.stream_id);
3069 /* Protocol error. */
3070 status = RELAY_CONNECTION_STATUS_ERROR;
3071 goto end;
3072 }
3073
3074 pthread_mutex_lock(&stream->lock);
3075 /* Prepare stream for the reception of a new packet. */
3076 ret = stream_init_packet(stream, header.data_size,
3077 &conn->protocol.data.state.receive_payload.rotate_index);
3078 pthread_mutex_unlock(&stream->lock);
3079 if (ret) {
3080 ERR("Failed to rotate stream output file");
3081 status = RELAY_CONNECTION_STATUS_ERROR;
3082 goto end_stream_unlock;
3083 }
3084
3085 end_stream_unlock:
3086 stream_put(stream);
3087 end:
3088 return status;
3089 }
3090
3091 static enum relay_connection_status relay_process_data_receive_payload(
3092 struct relay_connection *conn)
3093 {
3094 int ret;
3095 enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK;
3096 struct relay_stream *stream;
3097 struct data_connection_state_receive_payload *state =
3098 &conn->protocol.data.state.receive_payload;
3099 const size_t chunk_size = RECV_DATA_BUFFER_SIZE;
3100 char data_buffer[chunk_size];
3101 bool partial_recv = false;
3102 bool new_stream = false, close_requested = false, index_flushed = false;
3103 uint64_t left_to_receive = state->left_to_receive;
3104 struct relay_session *session;
3105
3106 DBG3("Receiving data for stream id %" PRIu64 " seqnum %" PRIu64 ", %" PRIu64" bytes received, %" PRIu64 " bytes left to receive",
3107 state->header.stream_id, state->header.net_seq_num,
3108 state->received, left_to_receive);
3109
3110 stream = stream_get_by_id(state->header.stream_id);
3111 if (!stream) {
3112 /* Protocol error. */
3113 ERR("relay_process_data_receive_payload: cannot find stream %" PRIu64,
3114 state->header.stream_id);
3115 status = RELAY_CONNECTION_STATUS_ERROR;
3116 goto end;
3117 }
3118
3119 pthread_mutex_lock(&stream->lock);
3120 session = stream->trace->session;
3121 if (!conn->session) {
3122 ret = connection_set_session(conn, session);
3123 if (ret) {
3124 status = RELAY_CONNECTION_STATUS_ERROR;
3125 goto end_stream_unlock;
3126 }
3127 }
3128
3129 /*
3130 * The size of the "chunk" received on any iteration is bounded by:
3131 * - the data left to receive,
3132 * - the data immediately available on the socket,
3133 * - the on-stack data buffer
3134 */
3135 while (left_to_receive > 0 && !partial_recv) {
3136 size_t recv_size = min(left_to_receive, chunk_size);
3137 struct lttng_buffer_view packet_chunk;
3138
3139 ret = conn->sock->ops->recvmsg(conn->sock, data_buffer,
3140 recv_size, MSG_DONTWAIT);
3141 if (ret < 0) {
3142 if (errno != EAGAIN && errno != EWOULDBLOCK) {
3143 PERROR("Socket %d error", conn->sock->fd);
3144 status = RELAY_CONNECTION_STATUS_ERROR;
3145 }
3146 goto end_stream_unlock;
3147 } else if (ret == 0) {
3148 /* No more data ready to be consumed on socket. */
3149 DBG3("No more data ready for consumption on data socket of stream id %" PRIu64,
3150 state->header.stream_id);
3151 status = RELAY_CONNECTION_STATUS_CLOSED;
3152 break;
3153 } else if (ret < (int) recv_size) {
3154 /*
3155 * All the data available on the socket has been
3156 * consumed.
3157 */
3158 partial_recv = true;
3159 recv_size = ret;
3160 }
3161
3162 packet_chunk = lttng_buffer_view_init(data_buffer,
3163 0, recv_size);
3164 assert(packet_chunk.data);
3165
3166 ret = stream_write(stream, &packet_chunk, 0);
3167 if (ret) {
3168 ERR("Relay error writing data to file");
3169 status = RELAY_CONNECTION_STATUS_ERROR;
3170 goto end_stream_unlock;
3171 }
3172
3173 left_to_receive -= recv_size;
3174 state->received += recv_size;
3175 state->left_to_receive = left_to_receive;
3176 }
3177
3178 if (state->left_to_receive > 0) {
3179 /*
3180 * Did not receive all the data expected, wait for more data to
3181 * become available on the socket.
3182 */
3183 DBG3("Partial receive on data connection of stream id %" PRIu64 ", %" PRIu64 " bytes received, %" PRIu64 " bytes left to receive",
3184 state->header.stream_id, state->received,
3185 state->left_to_receive);
3186 goto end_stream_unlock;
3187 }
3188
3189 ret = stream_write(stream, NULL, state->header.padding_size);
3190 if (ret) {
3191 status = RELAY_CONNECTION_STATUS_ERROR;
3192 goto end_stream_unlock;
3193 }
3194
3195 if (session_streams_have_index(session)) {
3196 ret = stream_update_index(stream, state->header.net_seq_num,
3197 state->rotate_index, &index_flushed,
3198 state->header.data_size + state->header.padding_size);
3199 if (ret < 0) {
3200 ERR("Failed to update index: stream %" PRIu64 " net_seq_num %" PRIu64 " ret %d",
3201 stream->stream_handle,
3202 state->header.net_seq_num, ret);
3203 status = RELAY_CONNECTION_STATUS_ERROR;
3204 goto end_stream_unlock;
3205 }
3206 }
3207
3208 if (stream->prev_data_seq == -1ULL) {
3209 new_stream = true;
3210 }
3211
3212 ret = stream_complete_packet(stream, state->header.data_size +
3213 state->header.padding_size, state->header.net_seq_num,
3214 index_flushed);
3215 if (ret) {
3216 status = RELAY_CONNECTION_STATUS_ERROR;
3217 goto end_stream_unlock;
3218 }
3219
3220 /*
3221 * Resetting the protocol state (to RECEIVE_HEADER) will trash the
3222 * contents of *state which are aliased (union) to the same location as
3223 * the new state. Don't use it beyond this point.
3224 */
3225 connection_reset_protocol_state(conn);
3226 state = NULL;
3227
3228 end_stream_unlock:
3229 close_requested = stream->close_requested;
3230 pthread_mutex_unlock(&stream->lock);
3231 if (close_requested && left_to_receive == 0) {
3232 try_stream_close(stream);
3233 }
3234
3235 if (new_stream) {
3236 pthread_mutex_lock(&session->lock);
3237 uatomic_set(&session->new_streams, 1);
3238 pthread_mutex_unlock(&session->lock);
3239 }
3240
3241 stream_put(stream);
3242 end:
3243 return status;
3244 }
3245
3246 /*
3247 * relay_process_data: Process the data received on the data socket
3248 */
3249 static enum relay_connection_status relay_process_data(
3250 struct relay_connection *conn)
3251 {
3252 enum relay_connection_status status;
3253
3254 switch (conn->protocol.data.state_id) {
3255 case DATA_CONNECTION_STATE_RECEIVE_HEADER:
3256 status = relay_process_data_receive_header(conn);
3257 break;
3258 case DATA_CONNECTION_STATE_RECEIVE_PAYLOAD:
3259 status = relay_process_data_receive_payload(conn);
3260 break;
3261 default:
3262 ERR("Unexpected data connection communication state.");
3263 abort();
3264 }
3265
3266 return status;
3267 }
3268
3269 static void cleanup_connection_pollfd(struct lttng_poll_event *events, int pollfd)
3270 {
3271 int ret;
3272
3273 (void) lttng_poll_del(events, pollfd);
3274
3275 ret = close(pollfd);
3276 if (ret < 0) {
3277 ERR("Closing pollfd %d", pollfd);
3278 }
3279 }
3280
3281 static void relay_thread_close_connection(struct lttng_poll_event *events,
3282 int pollfd, struct relay_connection *conn)
3283 {
3284 const char *type_str;
3285
3286 switch (conn->type) {
3287 case RELAY_DATA:
3288 type_str = "Data";
3289 break;
3290 case RELAY_CONTROL:
3291 type_str = "Control";
3292 break;
3293 case RELAY_VIEWER_COMMAND:
3294 type_str = "Viewer Command";
3295 break;
3296 case RELAY_VIEWER_NOTIFICATION:
3297 type_str = "Viewer Notification";
3298 break;
3299 default:
3300 type_str = "Unknown";
3301 }
3302 cleanup_connection_pollfd(events, pollfd);
3303 connection_put(conn);
3304 DBG("%s connection closed with %d", type_str, pollfd);
3305 }
3306
3307 /*
3308 * This thread does the actual work
3309 */
3310 static void *relay_thread_worker(void *data)
3311 {
3312 int ret, err = -1, last_seen_data_fd = -1;
3313 uint32_t nb_fd;
3314 struct lttng_poll_event events;
3315 struct lttng_ht *relay_connections_ht;
3316 struct lttng_ht_iter iter;
3317 struct relay_connection *destroy_conn = NULL;
3318
3319 DBG("[thread] Relay worker started");
3320
3321 rcu_register_thread();
3322
3323 health_register(health_relayd, HEALTH_RELAYD_TYPE_WORKER);
3324
3325 if (testpoint(relayd_thread_worker)) {
3326 goto error_testpoint;
3327 }
3328
3329 health_code_update();
3330
3331 /* table of connections indexed on socket */
3332 relay_connections_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3333 if (!relay_connections_ht) {
3334 goto relay_connections_ht_error;
3335 }
3336
3337 ret = create_thread_poll_set(&events, 2);
3338 if (ret < 0) {
3339 goto error_poll_create;
3340 }
3341
3342 ret = lttng_poll_add(&events, relay_conn_pipe[0], LPOLLIN | LPOLLRDHUP);
3343 if (ret < 0) {
3344 goto error;
3345 }
3346
3347 restart:
3348 while (1) {
3349 int idx = -1, i, seen_control = 0, last_notdel_data_fd = -1;
3350
3351 health_code_update();
3352
3353 /* Infinite blocking call, waiting for transmission */
3354 DBG3("Relayd worker thread polling...");
3355 health_poll_entry();
3356 ret = lttng_poll_wait(&events, -1);
3357 health_poll_exit();
3358 if (ret < 0) {
3359 /*
3360 * Restart interrupted system call.
3361 */
3362 if (errno == EINTR) {
3363 goto restart;
3364 }
3365 goto error;
3366 }
3367
3368 nb_fd = ret;
3369
3370 /*
3371 * Process control. The control connection is
3372 * prioritized so we don't starve it with high
3373 * throughput tracing data on the data connection.
3374 */
3375 for (i = 0; i < nb_fd; i++) {
3376 /* Fetch once the poll data */
3377 uint32_t revents = LTTNG_POLL_GETEV(&events, i);
3378 int pollfd = LTTNG_POLL_GETFD(&events, i);
3379
3380 health_code_update();
3381
3382 /* Thread quit pipe has been closed. Killing thread. */
3383 ret = check_thread_quit_pipe(pollfd, revents);
3384 if (ret) {
3385 err = 0;
3386 goto exit;
3387 }
3388
3389 /* Inspect the relay conn pipe for new connection */
3390 if (pollfd == relay_conn_pipe[0]) {
3391 if (revents & LPOLLIN) {
3392 struct relay_connection *conn;
3393
3394 ret = lttng_read(relay_conn_pipe[0], &conn, sizeof(conn));
3395 if (ret < 0) {
3396 goto error;
3397 }
3398 lttng_poll_add(&events, conn->sock->fd,
3399 LPOLLIN | LPOLLRDHUP);
3400 connection_ht_add(relay_connections_ht, conn);
3401 DBG("Connection socket %d added", conn->sock->fd);
3402 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3403 ERR("Relay connection pipe error");
3404 goto error;
3405 } else {
3406 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
3407 goto error;
3408 }
3409 } else {
3410 struct relay_connection *ctrl_conn;
3411
3412 ctrl_conn = connection_get_by_sock(relay_connections_ht, pollfd);
3413 /* If not found, there is a synchronization issue. */
3414 assert(ctrl_conn);
3415
3416 if (ctrl_conn->type == RELAY_DATA) {
3417 if (revents & LPOLLIN) {
3418 /*
3419 * Flag the last seen data fd not deleted. It will be
3420 * used as the last seen fd if any fd gets deleted in
3421 * this first loop.
3422 */
3423 last_notdel_data_fd = pollfd;
3424 }
3425 goto put_ctrl_connection;
3426 }
3427 assert(ctrl_conn->type == RELAY_CONTROL);
3428
3429 if (revents & LPOLLIN) {
3430 enum relay_connection_status status;
3431
3432 status = relay_process_control(ctrl_conn);
3433 if (status != RELAY_CONNECTION_STATUS_OK) {
3434 /*
3435 * On socket error flag the session as aborted to force
3436 * the cleanup of its stream otherwise it can leak
3437 * during the lifetime of the relayd.
3438 *
3439 * This prevents situations in which streams can be
3440 * left opened because an index was received, the
3441 * control connection is closed, and the data
3442 * connection is closed (uncleanly) before the packet's
3443 * data provided.
3444 *
3445 * Since the control connection encountered an error,
3446 * it is okay to be conservative and close the
3447 * session right now as we can't rely on the protocol
3448 * being respected anymore.
3449 */
3450 if (status == RELAY_CONNECTION_STATUS_ERROR) {
3451 session_abort(ctrl_conn->session);
3452 }
3453
3454 /* Clear the connection on error or close. */
3455 relay_thread_close_connection(&events,
3456 pollfd,
3457 ctrl_conn);
3458 }
3459 seen_control = 1;
3460 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3461 relay_thread_close_connection(&events,
3462 pollfd, ctrl_conn);
3463 if (last_seen_data_fd == pollfd) {
3464 last_seen_data_fd = last_notdel_data_fd;
3465 }
3466 } else {
3467 ERR("Unexpected poll events %u for control sock %d",
3468 revents, pollfd);
3469 connection_put(ctrl_conn);
3470 goto error;
3471 }
3472 put_ctrl_connection:
3473 connection_put(ctrl_conn);
3474 }
3475 }
3476
3477 /*
3478 * The last loop handled a control request, go back to poll to make
3479 * sure we prioritise the control socket.
3480 */
3481 if (seen_control) {
3482 continue;
3483 }
3484
3485 if (last_seen_data_fd >= 0) {
3486 for (i = 0; i < nb_fd; i++) {
3487 int pollfd = LTTNG_POLL_GETFD(&events, i);
3488
3489 health_code_update();
3490
3491 if (last_seen_data_fd == pollfd) {
3492 idx = i;
3493 break;
3494 }
3495 }
3496 }
3497
3498 /* Process data connection. */
3499 for (i = idx + 1; i < nb_fd; i++) {
3500 /* Fetch the poll data. */
3501 uint32_t revents = LTTNG_POLL_GETEV(&events, i);
3502 int pollfd = LTTNG_POLL_GETFD(&events, i);
3503 struct relay_connection *data_conn;
3504
3505 health_code_update();
3506
3507 if (!revents) {
3508 /* No activity for this FD (poll implementation). */
3509 continue;
3510 }
3511
3512 /* Skip the command pipe. It's handled in the first loop. */
3513 if (pollfd == relay_conn_pipe[0]) {
3514 continue;
3515 }
3516
3517 data_conn = connection_get_by_sock(relay_connections_ht, pollfd);
3518 if (!data_conn) {
3519 /* Skip it. Might be removed before. */
3520 continue;
3521 }
3522 if (data_conn->type == RELAY_CONTROL) {
3523 goto put_data_connection;
3524 }
3525 assert(data_conn->type == RELAY_DATA);
3526
3527 if (revents & LPOLLIN) {
3528 enum relay_connection_status status;
3529
3530 status = relay_process_data(data_conn);
3531 /* Connection closed or error. */
3532 if (status != RELAY_CONNECTION_STATUS_OK) {
3533 /*
3534 * On socket error flag the session as aborted to force
3535 * the cleanup of its stream otherwise it can leak
3536 * during the lifetime of the relayd.
3537 *
3538 * This prevents situations in which streams can be
3539 * left opened because an index was received, the
3540 * control connection is closed, and the data
3541 * connection is closed (uncleanly) before the packet's
3542 * data provided.
3543 *
3544 * Since the data connection encountered an error,
3545 * it is okay to be conservative and close the
3546 * session right now as we can't rely on the protocol
3547 * being respected anymore.
3548 */
3549 if (status == RELAY_CONNECTION_STATUS_ERROR) {
3550 session_abort(data_conn->session);
3551 }
3552 relay_thread_close_connection(&events, pollfd,
3553 data_conn);
3554 /*
3555 * Every goto restart call sets the last seen fd where
3556 * here we don't really care since we gracefully
3557 * continue the loop after the connection is deleted.
3558 */
3559 } else {
3560 /* Keep last seen port. */
3561 last_seen_data_fd = pollfd;
3562 connection_put(data_conn);
3563 goto restart;
3564 }
3565 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3566 relay_thread_close_connection(&events, pollfd,
3567 data_conn);
3568 } else {
3569 ERR("Unknown poll events %u for data sock %d",
3570 revents, pollfd);
3571 }
3572 put_data_connection:
3573 connection_put(data_conn);
3574 }
3575 last_seen_data_fd = -1;
3576 }
3577
3578 /* Normal exit, no error */
3579 ret = 0;
3580
3581 exit:
3582 error:
3583 /* Cleanup remaining connection object. */
3584 rcu_read_lock();
3585 cds_lfht_for_each_entry(relay_connections_ht->ht, &iter.iter,
3586 destroy_conn,
3587 sock_n.node) {
3588 health_code_update();
3589
3590 session_abort(destroy_conn->session);
3591
3592 /*
3593 * No need to grab another ref, because we own
3594 * destroy_conn.
3595 */
3596 relay_thread_close_connection(&events, destroy_conn->sock->fd,
3597 destroy_conn);
3598 }
3599 rcu_read_unlock();
3600
3601 lttng_poll_clean(&events);
3602 error_poll_create:
3603 lttng_ht_destroy(relay_connections_ht);
3604 relay_connections_ht_error:
3605 /* Close relay conn pipes */
3606 utils_close_pipe(relay_conn_pipe);
3607 if (err) {
3608 DBG("Thread exited with error");
3609 }
3610 DBG("Worker thread cleanup complete");
3611 error_testpoint:
3612 if (err) {
3613 health_error();
3614 ERR("Health error occurred in %s", __func__);
3615 }
3616 health_unregister(health_relayd);
3617 rcu_unregister_thread();
3618 lttng_relay_stop_threads();
3619 return NULL;
3620 }
3621
3622 /*
3623 * Create the relay command pipe to wake thread_manage_apps.
3624 * Closed in cleanup().
3625 */
3626 static int create_relay_conn_pipe(void)
3627 {
3628 int ret;
3629
3630 ret = utils_create_pipe_cloexec(relay_conn_pipe);
3631
3632 return ret;
3633 }
3634
3635 /*
3636 * main
3637 */
3638 int main(int argc, char **argv)
3639 {
3640 int ret = 0, retval = 0;
3641 void *status;
3642
3643 /* Parse arguments */
3644 progname = argv[0];
3645 if (set_options(argc, argv)) {
3646 retval = -1;
3647 goto exit_options;
3648 }
3649
3650 if (set_signal_handler()) {
3651 retval = -1;
3652 goto exit_options;
3653 }
3654
3655 /* Try to create directory if -o, --output is specified. */
3656 if (opt_output_path) {
3657 if (*opt_output_path != '/') {
3658 ERR("Please specify an absolute path for -o, --output PATH");
3659 retval = -1;
3660 goto exit_options;
3661 }
3662
3663 ret = utils_mkdir_recursive(opt_output_path, S_IRWXU | S_IRWXG,
3664 -1, -1);
3665 if (ret < 0) {
3666 ERR("Unable to create %s", opt_output_path);
3667 retval = -1;
3668 goto exit_options;
3669 }
3670 }
3671
3672 /* Daemonize */
3673 if (opt_daemon || opt_background) {
3674 int i;
3675
3676 ret = lttng_daemonize(&child_ppid, &recv_child_signal,
3677 !opt_background);
3678 if (ret < 0) {
3679 retval = -1;
3680 goto exit_options;
3681 }
3682
3683 /*
3684 * We are in the child. Make sure all other file
3685 * descriptors are closed, in case we are called with
3686 * more opened file descriptors than the standard ones.
3687 */
3688 for (i = 3; i < sysconf(_SC_OPEN_MAX); i++) {
3689 (void) close(i);
3690 }
3691 }
3692
3693 sessiond_trace_chunk_registry = sessiond_trace_chunk_registry_create();
3694 if (!sessiond_trace_chunk_registry) {
3695 ERR("Failed to initialize session daemon trace chunk registry");
3696 retval = -1;
3697 goto exit_sessiond_trace_chunk_registry;
3698 }
3699
3700 /* Initialize thread health monitoring */
3701 health_relayd = health_app_create(NR_HEALTH_RELAYD_TYPES);
3702 if (!health_relayd) {
3703 PERROR("health_app_create error");
3704 retval = -1;
3705 goto exit_health_app_create;
3706 }
3707
3708 /* Create thread quit pipe */
3709 if (init_thread_quit_pipe()) {
3710 retval = -1;
3711 goto exit_init_data;
3712 }
3713
3714 /* Setup the thread apps communication pipe. */
3715 if (create_relay_conn_pipe()) {
3716 retval = -1;
3717 goto exit_init_data;
3718 }
3719
3720 /* Init relay command queue. */
3721 cds_wfcq_init(&relay_conn_queue.head, &relay_conn_queue.tail);
3722
3723 /* Initialize communication library */
3724 lttcomm_init();
3725 lttcomm_inet_init();
3726
3727 /* tables of sessions indexed by session ID */
3728 sessions_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3729 if (!sessions_ht) {
3730 retval = -1;
3731 goto exit_init_data;
3732 }
3733
3734 /* tables of streams indexed by stream ID */
3735 relay_streams_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3736 if (!relay_streams_ht) {
3737 retval = -1;
3738 goto exit_init_data;
3739 }
3740
3741 /* tables of streams indexed by stream ID */
3742 viewer_streams_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3743 if (!viewer_streams_ht) {
3744 retval = -1;
3745 goto exit_init_data;
3746 }
3747
3748 ret = utils_create_pipe(health_quit_pipe);
3749 if (ret) {
3750 retval = -1;
3751 goto exit_health_quit_pipe;
3752 }
3753
3754 /* Create thread to manage the client socket */
3755 ret = pthread_create(&health_thread, default_pthread_attr(),
3756 thread_manage_health, (void *) NULL);
3757 if (ret) {
3758 errno = ret;
3759 PERROR("pthread_create health");
3760 retval = -1;
3761 goto exit_health_thread;
3762 }
3763
3764 /* Setup the dispatcher thread */
3765 ret = pthread_create(&dispatcher_thread, default_pthread_attr(),
3766 relay_thread_dispatcher, (void *) NULL);
3767 if (ret) {
3768 errno = ret;
3769 PERROR("pthread_create dispatcher");
3770 retval = -1;
3771 goto exit_dispatcher_thread;
3772 }
3773
3774 /* Setup the worker thread */
3775 ret = pthread_create(&worker_thread, default_pthread_attr(),
3776 relay_thread_worker, NULL);
3777 if (ret) {
3778 errno = ret;
3779 PERROR("pthread_create worker");
3780 retval = -1;
3781 goto exit_worker_thread;
3782 }
3783
3784 /* Setup the listener thread */
3785 ret = pthread_create(&listener_thread, default_pthread_attr(),
3786 relay_thread_listener, (void *) NULL);
3787 if (ret) {
3788 errno = ret;
3789 PERROR("pthread_create listener");
3790 retval = -1;
3791 goto exit_listener_thread;
3792 }
3793
3794 ret = relayd_live_create(live_uri);
3795 if (ret) {
3796 ERR("Starting live viewer threads");
3797 retval = -1;
3798 goto exit_live;
3799 }
3800
3801 /*
3802 * This is where we start awaiting program completion (e.g. through
3803 * signal that asks threads to teardown).
3804 */
3805
3806 ret = relayd_live_join();
3807 if (ret) {
3808 retval = -1;
3809 }
3810 exit_live:
3811
3812 ret = pthread_join(listener_thread, &status);
3813 if (ret) {
3814 errno = ret;
3815 PERROR("pthread_join listener_thread");
3816 retval = -1;
3817 }
3818
3819 exit_listener_thread:
3820 ret = pthread_join(worker_thread, &status);
3821 if (ret) {
3822 errno = ret;
3823 PERROR("pthread_join worker_thread");
3824 retval = -1;
3825 }
3826
3827 exit_worker_thread:
3828 ret = pthread_join(dispatcher_thread, &status);
3829 if (ret) {
3830 errno = ret;
3831 PERROR("pthread_join dispatcher_thread");
3832 retval = -1;
3833 }
3834 exit_dispatcher_thread:
3835
3836 ret = pthread_join(health_thread, &status);
3837 if (ret) {
3838 errno = ret;
3839 PERROR("pthread_join health_thread");
3840 retval = -1;
3841 }
3842 exit_health_thread:
3843
3844 utils_close_pipe(health_quit_pipe);
3845 exit_health_quit_pipe:
3846
3847 exit_init_data:
3848 health_app_destroy(health_relayd);
3849 sessiond_trace_chunk_registry_destroy(sessiond_trace_chunk_registry);
3850 exit_health_app_create:
3851 exit_sessiond_trace_chunk_registry:
3852 exit_options:
3853 /*
3854 * Wait for all pending call_rcu work to complete before tearing
3855 * down data structures. call_rcu worker may be trying to
3856 * perform lookups in those structures.
3857 */
3858 rcu_barrier();
3859 relayd_cleanup();
3860
3861 /* Ensure all prior call_rcu are done. */
3862 rcu_barrier();
3863
3864 if (!retval) {
3865 exit(EXIT_SUCCESS);
3866 } else {
3867 exit(EXIT_FAILURE);
3868 }
3869 }
This page took 0.102224 seconds and 3 git commands to generate.