Tests: add an lttng-relayd working directory test
[lttng-tools.git] / src / bin / lttng-relayd / main.c
1 /*
2 * Copyright (C) 2012 - Julien Desfossez <jdesfossez@efficios.com>
3 * David Goulet <dgoulet@efficios.com>
4 * 2013 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 * 2015 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License, version 2 only,
9 * as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20
21 #define _LGPL_SOURCE
22 #include <getopt.h>
23 #include <grp.h>
24 #include <limits.h>
25 #include <pthread.h>
26 #include <signal.h>
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <sys/mman.h>
31 #include <sys/mount.h>
32 #include <sys/resource.h>
33 #include <sys/socket.h>
34 #include <sys/stat.h>
35 #include <sys/types.h>
36 #include <sys/wait.h>
37 #include <inttypes.h>
38 #include <urcu/futex.h>
39 #include <urcu/uatomic.h>
40 #include <urcu/rculist.h>
41 #include <unistd.h>
42 #include <fcntl.h>
43 #include <strings.h>
44
45 #include <lttng/lttng.h>
46 #include <common/common.h>
47 #include <common/compat/poll.h>
48 #include <common/compat/socket.h>
49 #include <common/compat/endian.h>
50 #include <common/compat/getenv.h>
51 #include <common/defaults.h>
52 #include <common/daemonize.h>
53 #include <common/futex.h>
54 #include <common/sessiond-comm/sessiond-comm.h>
55 #include <common/sessiond-comm/inet.h>
56 #include <common/sessiond-comm/relayd.h>
57 #include <common/uri.h>
58 #include <common/utils.h>
59 #include <common/align.h>
60 #include <common/config/session-config.h>
61 #include <common/dynamic-buffer.h>
62 #include <common/buffer-view.h>
63 #include <common/string-utils/format.h>
64
65 #include "version.h"
66 #include "cmd.h"
67 #include "ctf-trace.h"
68 #include "index.h"
69 #include "utils.h"
70 #include "lttng-relayd.h"
71 #include "live.h"
72 #include "health-relayd.h"
73 #include "testpoint.h"
74 #include "viewer-stream.h"
75 #include "session.h"
76 #include "stream.h"
77 #include "connection.h"
78 #include "tracefile-array.h"
79 #include "tcp_keep_alive.h"
80 #include "sessiond-trace-chunks.h"
81
82 static const char *help_msg =
83 #ifdef LTTNG_EMBED_HELP
84 #include <lttng-relayd.8.h>
85 #else
86 NULL
87 #endif
88 ;
89
90 enum relay_connection_status {
91 RELAY_CONNECTION_STATUS_OK,
92 /* An error occurred while processing an event on the connection. */
93 RELAY_CONNECTION_STATUS_ERROR,
94 /* Connection closed/shutdown cleanly. */
95 RELAY_CONNECTION_STATUS_CLOSED,
96 };
97
98 /* command line options */
99 char *opt_output_path, *opt_working_directory;
100 static int opt_daemon, opt_background, opt_print_version;
101
102 /*
103 * We need to wait for listener and live listener threads, as well as
104 * health check thread, before being ready to signal readiness.
105 */
106 #define NR_LTTNG_RELAY_READY 3
107 static int lttng_relay_ready = NR_LTTNG_RELAY_READY;
108
109 /* Size of receive buffer. */
110 #define RECV_DATA_BUFFER_SIZE 65536
111
112 static int recv_child_signal; /* Set to 1 when a SIGUSR1 signal is received. */
113 static pid_t child_ppid; /* Internal parent PID use with daemonize. */
114
115 static struct lttng_uri *control_uri;
116 static struct lttng_uri *data_uri;
117 static struct lttng_uri *live_uri;
118
119 const char *progname;
120
121 const char *tracing_group_name = DEFAULT_TRACING_GROUP;
122 static int tracing_group_name_override;
123
124 const char * const config_section_name = "relayd";
125
126 /*
127 * Quit pipe for all threads. This permits a single cancellation point
128 * for all threads when receiving an event on the pipe.
129 */
130 int thread_quit_pipe[2] = { -1, -1 };
131
132 /*
133 * This pipe is used to inform the worker thread that a command is queued and
134 * ready to be processed.
135 */
136 static int relay_conn_pipe[2] = { -1, -1 };
137
138 /* Shared between threads */
139 static int dispatch_thread_exit;
140
141 static pthread_t listener_thread;
142 static pthread_t dispatcher_thread;
143 static pthread_t worker_thread;
144 static pthread_t health_thread;
145
146 /*
147 * last_relay_stream_id_lock protects last_relay_stream_id increment
148 * atomicity on 32-bit architectures.
149 */
150 static pthread_mutex_t last_relay_stream_id_lock = PTHREAD_MUTEX_INITIALIZER;
151 static uint64_t last_relay_stream_id;
152
153 /*
154 * Relay command queue.
155 *
156 * The relay_thread_listener and relay_thread_dispatcher communicate with this
157 * queue.
158 */
159 static struct relay_conn_queue relay_conn_queue;
160
161 /* Global relay stream hash table. */
162 struct lttng_ht *relay_streams_ht;
163
164 /* Global relay viewer stream hash table. */
165 struct lttng_ht *viewer_streams_ht;
166
167 /* Global relay sessions hash table. */
168 struct lttng_ht *sessions_ht;
169
170 /* Relayd health monitoring */
171 struct health_app *health_relayd;
172
173 struct sessiond_trace_chunk_registry *sessiond_trace_chunk_registry;
174
175 static struct option long_options[] = {
176 { "control-port", 1, 0, 'C', },
177 { "data-port", 1, 0, 'D', },
178 { "live-port", 1, 0, 'L', },
179 { "daemonize", 0, 0, 'd', },
180 { "background", 0, 0, 'b', },
181 { "group", 1, 0, 'g', },
182 { "help", 0, 0, 'h', },
183 { "output", 1, 0, 'o', },
184 { "verbose", 0, 0, 'v', },
185 { "config", 1, 0, 'f' },
186 { "version", 0, 0, 'V' },
187 { "working-directory", 1, 0, 'w', },
188 { NULL, 0, 0, 0, },
189 };
190
191 static const char *config_ignore_options[] = { "help", "config", "version" };
192
193 static void print_version(void) {
194 fprintf(stdout, "%s\n", VERSION);
195 }
196
197 static void relayd_config_log(void)
198 {
199 DBG("LTTng-relayd " VERSION " - " VERSION_NAME "%s%s",
200 GIT_VERSION[0] == '\0' ? "" : " - " GIT_VERSION,
201 EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " EXTRA_VERSION_NAME);
202 if (EXTRA_VERSION_DESCRIPTION[0] != '\0') {
203 DBG("LTTng-relayd extra version description:\n\t" EXTRA_VERSION_DESCRIPTION "\n");
204 }
205 if (EXTRA_VERSION_PATCHES[0] != '\0') {
206 DBG("LTTng-relayd extra patches:\n\t" EXTRA_VERSION_PATCHES "\n");
207 }
208 }
209
210 /*
211 * Take an option from the getopt output and set it in the right variable to be
212 * used later.
213 *
214 * Return 0 on success else a negative value.
215 */
216 static int set_option(int opt, const char *arg, const char *optname)
217 {
218 int ret;
219
220 switch (opt) {
221 case 0:
222 fprintf(stderr, "option %s", optname);
223 if (arg) {
224 fprintf(stderr, " with arg %s\n", arg);
225 }
226 break;
227 case 'C':
228 if (lttng_is_setuid_setgid()) {
229 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
230 "-C, --control-port");
231 } else {
232 ret = uri_parse(arg, &control_uri);
233 if (ret < 0) {
234 ERR("Invalid control URI specified");
235 goto end;
236 }
237 if (control_uri->port == 0) {
238 control_uri->port = DEFAULT_NETWORK_CONTROL_PORT;
239 }
240 }
241 break;
242 case 'D':
243 if (lttng_is_setuid_setgid()) {
244 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
245 "-D, -data-port");
246 } else {
247 ret = uri_parse(arg, &data_uri);
248 if (ret < 0) {
249 ERR("Invalid data URI specified");
250 goto end;
251 }
252 if (data_uri->port == 0) {
253 data_uri->port = DEFAULT_NETWORK_DATA_PORT;
254 }
255 }
256 break;
257 case 'L':
258 if (lttng_is_setuid_setgid()) {
259 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
260 "-L, -live-port");
261 } else {
262 ret = uri_parse(arg, &live_uri);
263 if (ret < 0) {
264 ERR("Invalid live URI specified");
265 goto end;
266 }
267 if (live_uri->port == 0) {
268 live_uri->port = DEFAULT_NETWORK_VIEWER_PORT;
269 }
270 }
271 break;
272 case 'd':
273 opt_daemon = 1;
274 break;
275 case 'b':
276 opt_background = 1;
277 break;
278 case 'g':
279 if (lttng_is_setuid_setgid()) {
280 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
281 "-g, --group");
282 } else {
283 tracing_group_name = strdup(arg);
284 if (tracing_group_name == NULL) {
285 ret = -errno;
286 PERROR("strdup");
287 goto end;
288 }
289 tracing_group_name_override = 1;
290 }
291 break;
292 case 'h':
293 ret = utils_show_help(8, "lttng-relayd", help_msg);
294 if (ret) {
295 ERR("Cannot show --help for `lttng-relayd`");
296 perror("exec");
297 }
298 exit(EXIT_FAILURE);
299 case 'V':
300 opt_print_version = 1;
301 break;
302 case 'o':
303 if (lttng_is_setuid_setgid()) {
304 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
305 "-o, --output");
306 } else {
307 ret = asprintf(&opt_output_path, "%s", arg);
308 if (ret < 0) {
309 ret = -errno;
310 PERROR("asprintf opt_output_path");
311 goto end;
312 }
313 }
314 break;
315 case 'w':
316 if (lttng_is_setuid_setgid()) {
317 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
318 "-w, --working-directory");
319 } else {
320 ret = asprintf(&opt_working_directory, "%s", arg);
321 if (ret < 0) {
322 ret = -errno;
323 PERROR("asprintf opt_working_directory");
324 goto end;
325 }
326 }
327 break;
328
329 case 'v':
330 /* Verbose level can increase using multiple -v */
331 if (arg) {
332 lttng_opt_verbose = config_parse_value(arg);
333 } else {
334 /* Only 3 level of verbosity (-vvv). */
335 if (lttng_opt_verbose < 3) {
336 lttng_opt_verbose += 1;
337 }
338 }
339 break;
340 default:
341 /* Unknown option or other error.
342 * Error is printed by getopt, just return */
343 ret = -1;
344 goto end;
345 }
346
347 /* All good. */
348 ret = 0;
349
350 end:
351 return ret;
352 }
353
354 /*
355 * config_entry_handler_cb used to handle options read from a config file.
356 * See config_entry_handler_cb comment in common/config/session-config.h for the
357 * return value conventions.
358 */
359 static int config_entry_handler(const struct config_entry *entry, void *unused)
360 {
361 int ret = 0, i;
362
363 if (!entry || !entry->name || !entry->value) {
364 ret = -EINVAL;
365 goto end;
366 }
367
368 /* Check if the option is to be ignored */
369 for (i = 0; i < sizeof(config_ignore_options) / sizeof(char *); i++) {
370 if (!strcmp(entry->name, config_ignore_options[i])) {
371 goto end;
372 }
373 }
374
375 for (i = 0; i < (sizeof(long_options) / sizeof(struct option)) - 1; i++) {
376 /* Ignore if entry name is not fully matched. */
377 if (strcmp(entry->name, long_options[i].name)) {
378 continue;
379 }
380
381 /*
382 * If the option takes no argument on the command line,
383 * we have to check if the value is "true". We support
384 * non-zero numeric values, true, on and yes.
385 */
386 if (!long_options[i].has_arg) {
387 ret = config_parse_value(entry->value);
388 if (ret <= 0) {
389 if (ret) {
390 WARN("Invalid configuration value \"%s\" for option %s",
391 entry->value, entry->name);
392 }
393 /* False, skip boolean config option. */
394 goto end;
395 }
396 }
397
398 ret = set_option(long_options[i].val, entry->value, entry->name);
399 goto end;
400 }
401
402 WARN("Unrecognized option \"%s\" in daemon configuration file.",
403 entry->name);
404
405 end:
406 return ret;
407 }
408
409 static int set_options(int argc, char **argv)
410 {
411 int c, ret = 0, option_index = 0, retval = 0;
412 int orig_optopt = optopt, orig_optind = optind;
413 char *default_address, *optstring;
414 const char *config_path = NULL;
415
416 optstring = utils_generate_optstring(long_options,
417 sizeof(long_options) / sizeof(struct option));
418 if (!optstring) {
419 retval = -ENOMEM;
420 goto exit;
421 }
422
423 /* Check for the --config option */
424
425 while ((c = getopt_long(argc, argv, optstring, long_options,
426 &option_index)) != -1) {
427 if (c == '?') {
428 retval = -EINVAL;
429 goto exit;
430 } else if (c != 'f') {
431 continue;
432 }
433
434 if (lttng_is_setuid_setgid()) {
435 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
436 "-f, --config");
437 } else {
438 config_path = utils_expand_path(optarg);
439 if (!config_path) {
440 ERR("Failed to resolve path: %s", optarg);
441 }
442 }
443 }
444
445 ret = config_get_section_entries(config_path, config_section_name,
446 config_entry_handler, NULL);
447 if (ret) {
448 if (ret > 0) {
449 ERR("Invalid configuration option at line %i", ret);
450 }
451 retval = -1;
452 goto exit;
453 }
454
455 /* Reset getopt's global state */
456 optopt = orig_optopt;
457 optind = orig_optind;
458 while (1) {
459 c = getopt_long(argc, argv, optstring, long_options, &option_index);
460 if (c == -1) {
461 break;
462 }
463
464 ret = set_option(c, optarg, long_options[option_index].name);
465 if (ret < 0) {
466 retval = -1;
467 goto exit;
468 }
469 }
470
471 /* assign default values */
472 if (control_uri == NULL) {
473 ret = asprintf(&default_address,
474 "tcp://" DEFAULT_NETWORK_CONTROL_BIND_ADDRESS ":%d",
475 DEFAULT_NETWORK_CONTROL_PORT);
476 if (ret < 0) {
477 PERROR("asprintf default data address");
478 retval = -1;
479 goto exit;
480 }
481
482 ret = uri_parse(default_address, &control_uri);
483 free(default_address);
484 if (ret < 0) {
485 ERR("Invalid control URI specified");
486 retval = -1;
487 goto exit;
488 }
489 }
490 if (data_uri == NULL) {
491 ret = asprintf(&default_address,
492 "tcp://" DEFAULT_NETWORK_DATA_BIND_ADDRESS ":%d",
493 DEFAULT_NETWORK_DATA_PORT);
494 if (ret < 0) {
495 PERROR("asprintf default data address");
496 retval = -1;
497 goto exit;
498 }
499
500 ret = uri_parse(default_address, &data_uri);
501 free(default_address);
502 if (ret < 0) {
503 ERR("Invalid data URI specified");
504 retval = -1;
505 goto exit;
506 }
507 }
508 if (live_uri == NULL) {
509 ret = asprintf(&default_address,
510 "tcp://" DEFAULT_NETWORK_VIEWER_BIND_ADDRESS ":%d",
511 DEFAULT_NETWORK_VIEWER_PORT);
512 if (ret < 0) {
513 PERROR("asprintf default viewer control address");
514 retval = -1;
515 goto exit;
516 }
517
518 ret = uri_parse(default_address, &live_uri);
519 free(default_address);
520 if (ret < 0) {
521 ERR("Invalid viewer control URI specified");
522 retval = -1;
523 goto exit;
524 }
525 }
526
527 exit:
528 free(optstring);
529 return retval;
530 }
531
532 static void print_global_objects(void)
533 {
534 rcu_register_thread();
535
536 print_viewer_streams();
537 print_relay_streams();
538 print_sessions();
539
540 rcu_unregister_thread();
541 }
542
543 /*
544 * Cleanup the daemon
545 */
546 static void relayd_cleanup(void)
547 {
548 print_global_objects();
549
550 DBG("Cleaning up");
551
552 if (viewer_streams_ht)
553 lttng_ht_destroy(viewer_streams_ht);
554 if (relay_streams_ht)
555 lttng_ht_destroy(relay_streams_ht);
556 if (sessions_ht)
557 lttng_ht_destroy(sessions_ht);
558
559 free(opt_output_path);
560 free(opt_working_directory);
561
562 /* Close thread quit pipes */
563 utils_close_pipe(thread_quit_pipe);
564
565 uri_free(control_uri);
566 uri_free(data_uri);
567 /* Live URI is freed in the live thread. */
568
569 if (tracing_group_name_override) {
570 free((void *) tracing_group_name);
571 }
572 }
573
574 /*
575 * Write to writable pipe used to notify a thread.
576 */
577 static int notify_thread_pipe(int wpipe)
578 {
579 ssize_t ret;
580
581 ret = lttng_write(wpipe, "!", 1);
582 if (ret < 1) {
583 PERROR("write poll pipe");
584 goto end;
585 }
586 ret = 0;
587 end:
588 return ret;
589 }
590
591 static int notify_health_quit_pipe(int *pipe)
592 {
593 ssize_t ret;
594
595 ret = lttng_write(pipe[1], "4", 1);
596 if (ret < 1) {
597 PERROR("write relay health quit");
598 goto end;
599 }
600 ret = 0;
601 end:
602 return ret;
603 }
604
605 /*
606 * Stop all relayd and relayd-live threads.
607 */
608 int lttng_relay_stop_threads(void)
609 {
610 int retval = 0;
611
612 /* Stopping all threads */
613 DBG("Terminating all threads");
614 if (notify_thread_pipe(thread_quit_pipe[1])) {
615 ERR("write error on thread quit pipe");
616 retval = -1;
617 }
618
619 if (notify_health_quit_pipe(health_quit_pipe)) {
620 ERR("write error on health quit pipe");
621 }
622
623 /* Dispatch thread */
624 CMM_STORE_SHARED(dispatch_thread_exit, 1);
625 futex_nto1_wake(&relay_conn_queue.futex);
626
627 if (relayd_live_stop()) {
628 ERR("Error stopping live threads");
629 retval = -1;
630 }
631 return retval;
632 }
633
634 /*
635 * Signal handler for the daemon
636 *
637 * Simply stop all worker threads, leaving main() return gracefully after
638 * joining all threads and calling cleanup().
639 */
640 static void sighandler(int sig)
641 {
642 switch (sig) {
643 case SIGINT:
644 DBG("SIGINT caught");
645 if (lttng_relay_stop_threads()) {
646 ERR("Error stopping threads");
647 }
648 break;
649 case SIGTERM:
650 DBG("SIGTERM caught");
651 if (lttng_relay_stop_threads()) {
652 ERR("Error stopping threads");
653 }
654 break;
655 case SIGUSR1:
656 CMM_STORE_SHARED(recv_child_signal, 1);
657 break;
658 default:
659 break;
660 }
661 }
662
663 /*
664 * Setup signal handler for :
665 * SIGINT, SIGTERM, SIGPIPE
666 */
667 static int set_signal_handler(void)
668 {
669 int ret = 0;
670 struct sigaction sa;
671 sigset_t sigset;
672
673 if ((ret = sigemptyset(&sigset)) < 0) {
674 PERROR("sigemptyset");
675 return ret;
676 }
677
678 sa.sa_mask = sigset;
679 sa.sa_flags = 0;
680
681 sa.sa_handler = sighandler;
682 if ((ret = sigaction(SIGTERM, &sa, NULL)) < 0) {
683 PERROR("sigaction");
684 return ret;
685 }
686
687 if ((ret = sigaction(SIGINT, &sa, NULL)) < 0) {
688 PERROR("sigaction");
689 return ret;
690 }
691
692 if ((ret = sigaction(SIGUSR1, &sa, NULL)) < 0) {
693 PERROR("sigaction");
694 return ret;
695 }
696
697 sa.sa_handler = SIG_IGN;
698 if ((ret = sigaction(SIGPIPE, &sa, NULL)) < 0) {
699 PERROR("sigaction");
700 return ret;
701 }
702
703 DBG("Signal handler set for SIGTERM, SIGUSR1, SIGPIPE and SIGINT");
704
705 return ret;
706 }
707
708 void lttng_relay_notify_ready(void)
709 {
710 /* Notify the parent of the fork() process that we are ready. */
711 if (opt_daemon || opt_background) {
712 if (uatomic_sub_return(&lttng_relay_ready, 1) == 0) {
713 kill(child_ppid, SIGUSR1);
714 }
715 }
716 }
717
718 /*
719 * Init thread quit pipe.
720 *
721 * Return -1 on error or 0 if all pipes are created.
722 */
723 static int init_thread_quit_pipe(void)
724 {
725 int ret;
726
727 ret = utils_create_pipe_cloexec(thread_quit_pipe);
728
729 return ret;
730 }
731
732 /*
733 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
734 */
735 static int create_thread_poll_set(struct lttng_poll_event *events, int size)
736 {
737 int ret;
738
739 if (events == NULL || size == 0) {
740 ret = -1;
741 goto error;
742 }
743
744 ret = lttng_poll_create(events, size, LTTNG_CLOEXEC);
745 if (ret < 0) {
746 goto error;
747 }
748
749 /* Add quit pipe */
750 ret = lttng_poll_add(events, thread_quit_pipe[0], LPOLLIN | LPOLLERR);
751 if (ret < 0) {
752 goto error;
753 }
754
755 return 0;
756
757 error:
758 return ret;
759 }
760
761 /*
762 * Check if the thread quit pipe was triggered.
763 *
764 * Return 1 if it was triggered else 0;
765 */
766 static int check_thread_quit_pipe(int fd, uint32_t events)
767 {
768 if (fd == thread_quit_pipe[0] && (events & LPOLLIN)) {
769 return 1;
770 }
771
772 return 0;
773 }
774
775 /*
776 * Create and init socket from uri.
777 */
778 static struct lttcomm_sock *relay_socket_create(struct lttng_uri *uri)
779 {
780 int ret;
781 struct lttcomm_sock *sock = NULL;
782
783 sock = lttcomm_alloc_sock_from_uri(uri);
784 if (sock == NULL) {
785 ERR("Allocating socket");
786 goto error;
787 }
788
789 ret = lttcomm_create_sock(sock);
790 if (ret < 0) {
791 goto error;
792 }
793 DBG("Listening on sock %d", sock->fd);
794
795 ret = sock->ops->bind(sock);
796 if (ret < 0) {
797 PERROR("Failed to bind socket");
798 goto error;
799 }
800
801 ret = sock->ops->listen(sock, -1);
802 if (ret < 0) {
803 goto error;
804
805 }
806
807 return sock;
808
809 error:
810 if (sock) {
811 lttcomm_destroy_sock(sock);
812 }
813 return NULL;
814 }
815
816 /*
817 * This thread manages the listening for new connections on the network
818 */
819 static void *relay_thread_listener(void *data)
820 {
821 int i, ret, pollfd, err = -1;
822 uint32_t revents, nb_fd;
823 struct lttng_poll_event events;
824 struct lttcomm_sock *control_sock, *data_sock;
825
826 DBG("[thread] Relay listener started");
827
828 health_register(health_relayd, HEALTH_RELAYD_TYPE_LISTENER);
829
830 health_code_update();
831
832 control_sock = relay_socket_create(control_uri);
833 if (!control_sock) {
834 goto error_sock_control;
835 }
836
837 data_sock = relay_socket_create(data_uri);
838 if (!data_sock) {
839 goto error_sock_relay;
840 }
841
842 /*
843 * Pass 3 as size here for the thread quit pipe, control and
844 * data socket.
845 */
846 ret = create_thread_poll_set(&events, 3);
847 if (ret < 0) {
848 goto error_create_poll;
849 }
850
851 /* Add the control socket */
852 ret = lttng_poll_add(&events, control_sock->fd, LPOLLIN | LPOLLRDHUP);
853 if (ret < 0) {
854 goto error_poll_add;
855 }
856
857 /* Add the data socket */
858 ret = lttng_poll_add(&events, data_sock->fd, LPOLLIN | LPOLLRDHUP);
859 if (ret < 0) {
860 goto error_poll_add;
861 }
862
863 lttng_relay_notify_ready();
864
865 if (testpoint(relayd_thread_listener)) {
866 goto error_testpoint;
867 }
868
869 while (1) {
870 health_code_update();
871
872 DBG("Listener accepting connections");
873
874 restart:
875 health_poll_entry();
876 ret = lttng_poll_wait(&events, -1);
877 health_poll_exit();
878 if (ret < 0) {
879 /*
880 * Restart interrupted system call.
881 */
882 if (errno == EINTR) {
883 goto restart;
884 }
885 goto error;
886 }
887
888 nb_fd = ret;
889
890 DBG("Relay new connection received");
891 for (i = 0; i < nb_fd; i++) {
892 health_code_update();
893
894 /* Fetch once the poll data */
895 revents = LTTNG_POLL_GETEV(&events, i);
896 pollfd = LTTNG_POLL_GETFD(&events, i);
897
898 /* Thread quit pipe has been closed. Killing thread. */
899 ret = check_thread_quit_pipe(pollfd, revents);
900 if (ret) {
901 err = 0;
902 goto exit;
903 }
904
905 if (revents & LPOLLIN) {
906 /*
907 * A new connection is requested, therefore a
908 * sessiond/consumerd connection is allocated in
909 * this thread, enqueued to a global queue and
910 * dequeued (and freed) in the worker thread.
911 */
912 int val = 1;
913 struct relay_connection *new_conn;
914 struct lttcomm_sock *newsock;
915 enum connection_type type;
916
917 if (pollfd == data_sock->fd) {
918 type = RELAY_DATA;
919 newsock = data_sock->ops->accept(data_sock);
920 DBG("Relay data connection accepted, socket %d",
921 newsock->fd);
922 } else {
923 assert(pollfd == control_sock->fd);
924 type = RELAY_CONTROL;
925 newsock = control_sock->ops->accept(control_sock);
926 DBG("Relay control connection accepted, socket %d",
927 newsock->fd);
928 }
929 if (!newsock) {
930 PERROR("accepting sock");
931 goto error;
932 }
933
934 ret = setsockopt(newsock->fd, SOL_SOCKET, SO_REUSEADDR, &val,
935 sizeof(val));
936 if (ret < 0) {
937 PERROR("setsockopt inet");
938 lttcomm_destroy_sock(newsock);
939 goto error;
940 }
941
942 ret = socket_apply_keep_alive_config(newsock->fd);
943 if (ret < 0) {
944 ERR("Failed to apply TCP keep-alive configuration on socket (%i)",
945 newsock->fd);
946 lttcomm_destroy_sock(newsock);
947 goto error;
948 }
949
950 new_conn = connection_create(newsock, type);
951 if (!new_conn) {
952 lttcomm_destroy_sock(newsock);
953 goto error;
954 }
955
956 /* Enqueue request for the dispatcher thread. */
957 cds_wfcq_enqueue(&relay_conn_queue.head, &relay_conn_queue.tail,
958 &new_conn->qnode);
959
960 /*
961 * Wake the dispatch queue futex.
962 * Implicit memory barrier with the
963 * exchange in cds_wfcq_enqueue.
964 */
965 futex_nto1_wake(&relay_conn_queue.futex);
966 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
967 ERR("socket poll error");
968 goto error;
969 } else {
970 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
971 goto error;
972 }
973 }
974 }
975
976 exit:
977 error:
978 error_poll_add:
979 error_testpoint:
980 lttng_poll_clean(&events);
981 error_create_poll:
982 if (data_sock->fd >= 0) {
983 ret = data_sock->ops->close(data_sock);
984 if (ret) {
985 PERROR("close");
986 }
987 }
988 lttcomm_destroy_sock(data_sock);
989 error_sock_relay:
990 if (control_sock->fd >= 0) {
991 ret = control_sock->ops->close(control_sock);
992 if (ret) {
993 PERROR("close");
994 }
995 }
996 lttcomm_destroy_sock(control_sock);
997 error_sock_control:
998 if (err) {
999 health_error();
1000 ERR("Health error occurred in %s", __func__);
1001 }
1002 health_unregister(health_relayd);
1003 DBG("Relay listener thread cleanup complete");
1004 lttng_relay_stop_threads();
1005 return NULL;
1006 }
1007
1008 /*
1009 * This thread manages the dispatching of the requests to worker threads
1010 */
1011 static void *relay_thread_dispatcher(void *data)
1012 {
1013 int err = -1;
1014 ssize_t ret;
1015 struct cds_wfcq_node *node;
1016 struct relay_connection *new_conn = NULL;
1017
1018 DBG("[thread] Relay dispatcher started");
1019
1020 health_register(health_relayd, HEALTH_RELAYD_TYPE_DISPATCHER);
1021
1022 if (testpoint(relayd_thread_dispatcher)) {
1023 goto error_testpoint;
1024 }
1025
1026 health_code_update();
1027
1028 for (;;) {
1029 health_code_update();
1030
1031 /* Atomically prepare the queue futex */
1032 futex_nto1_prepare(&relay_conn_queue.futex);
1033
1034 if (CMM_LOAD_SHARED(dispatch_thread_exit)) {
1035 break;
1036 }
1037
1038 do {
1039 health_code_update();
1040
1041 /* Dequeue commands */
1042 node = cds_wfcq_dequeue_blocking(&relay_conn_queue.head,
1043 &relay_conn_queue.tail);
1044 if (node == NULL) {
1045 DBG("Woken up but nothing in the relay command queue");
1046 /* Continue thread execution */
1047 break;
1048 }
1049 new_conn = caa_container_of(node, struct relay_connection, qnode);
1050
1051 DBG("Dispatching request waiting on sock %d", new_conn->sock->fd);
1052
1053 /*
1054 * Inform worker thread of the new request. This
1055 * call is blocking so we can be assured that
1056 * the data will be read at some point in time
1057 * or wait to the end of the world :)
1058 */
1059 ret = lttng_write(relay_conn_pipe[1], &new_conn, sizeof(new_conn));
1060 if (ret < 0) {
1061 PERROR("write connection pipe");
1062 connection_put(new_conn);
1063 goto error;
1064 }
1065 } while (node != NULL);
1066
1067 /* Futex wait on queue. Blocking call on futex() */
1068 health_poll_entry();
1069 futex_nto1_wait(&relay_conn_queue.futex);
1070 health_poll_exit();
1071 }
1072
1073 /* Normal exit, no error */
1074 err = 0;
1075
1076 error:
1077 error_testpoint:
1078 if (err) {
1079 health_error();
1080 ERR("Health error occurred in %s", __func__);
1081 }
1082 health_unregister(health_relayd);
1083 DBG("Dispatch thread dying");
1084 lttng_relay_stop_threads();
1085 return NULL;
1086 }
1087
1088 static bool session_streams_have_index(const struct relay_session *session)
1089 {
1090 return session->minor >= 4 && !session->snapshot;
1091 }
1092
1093 /*
1094 * Handle the RELAYD_CREATE_SESSION command.
1095 *
1096 * On success, send back the session id or else return a negative value.
1097 */
1098 static int relay_create_session(const struct lttcomm_relayd_hdr *recv_hdr,
1099 struct relay_connection *conn,
1100 const struct lttng_buffer_view *payload)
1101 {
1102 int ret = 0;
1103 ssize_t send_ret;
1104 struct relay_session *session = NULL;
1105 struct lttcomm_relayd_create_session_reply_2_11 reply = {};
1106 char session_name[LTTNG_NAME_MAX] = {};
1107 char hostname[LTTNG_HOST_NAME_MAX] = {};
1108 uint32_t live_timer = 0;
1109 bool snapshot = false;
1110 bool session_name_contains_creation_timestamp = false;
1111 /* Left nil for peers < 2.11. */
1112 char base_path[LTTNG_PATH_MAX] = {};
1113 lttng_uuid sessiond_uuid = {};
1114 LTTNG_OPTIONAL(uint64_t) id_sessiond = {};
1115 LTTNG_OPTIONAL(uint64_t) current_chunk_id = {};
1116 LTTNG_OPTIONAL(time_t) creation_time = {};
1117 struct lttng_dynamic_buffer reply_payload;
1118
1119 lttng_dynamic_buffer_init(&reply_payload);
1120
1121 if (conn->minor < 4) {
1122 /* From 2.1 to 2.3 */
1123 ret = 0;
1124 } else if (conn->minor >= 4 && conn->minor < 11) {
1125 /* From 2.4 to 2.10 */
1126 ret = cmd_create_session_2_4(payload, session_name,
1127 hostname, &live_timer, &snapshot);
1128 } else {
1129 bool has_current_chunk;
1130 uint64_t current_chunk_id_value;
1131 time_t creation_time_value;
1132 uint64_t id_sessiond_value;
1133
1134 /* From 2.11 to ... */
1135 ret = cmd_create_session_2_11(payload, session_name, hostname,
1136 base_path, &live_timer, &snapshot, &id_sessiond_value,
1137 sessiond_uuid, &has_current_chunk,
1138 &current_chunk_id_value, &creation_time_value,
1139 &session_name_contains_creation_timestamp);
1140 if (lttng_uuid_is_nil(sessiond_uuid)) {
1141 /* The nil UUID is reserved for pre-2.11 clients. */
1142 ERR("Illegal nil UUID announced by peer in create session command");
1143 ret = -1;
1144 goto send_reply;
1145 }
1146 LTTNG_OPTIONAL_SET(&id_sessiond, id_sessiond_value);
1147 LTTNG_OPTIONAL_SET(&creation_time, creation_time_value);
1148 if (has_current_chunk) {
1149 LTTNG_OPTIONAL_SET(&current_chunk_id,
1150 current_chunk_id_value);
1151 }
1152 }
1153
1154 if (ret < 0) {
1155 goto send_reply;
1156 }
1157
1158 session = session_create(session_name, hostname, base_path, live_timer,
1159 snapshot, sessiond_uuid,
1160 id_sessiond.is_set ? &id_sessiond.value : NULL,
1161 current_chunk_id.is_set ? &current_chunk_id.value : NULL,
1162 creation_time.is_set ? &creation_time.value : NULL,
1163 conn->major, conn->minor,
1164 session_name_contains_creation_timestamp);
1165 if (!session) {
1166 ret = -1;
1167 goto send_reply;
1168 }
1169 assert(!conn->session);
1170 conn->session = session;
1171 DBG("Created session %" PRIu64, session->id);
1172
1173 reply.generic.session_id = htobe64(session->id);
1174
1175 send_reply:
1176 if (ret < 0) {
1177 reply.generic.ret_code = htobe32(LTTNG_ERR_FATAL);
1178 } else {
1179 reply.generic.ret_code = htobe32(LTTNG_OK);
1180 }
1181
1182 if (conn->minor < 11) {
1183 /* From 2.1 to 2.10 */
1184 ret = lttng_dynamic_buffer_append(&reply_payload,
1185 &reply.generic, sizeof(reply.generic));
1186 if (ret) {
1187 ERR("Failed to append \"create session\" command reply header to payload buffer");
1188 ret = -1;
1189 goto end;
1190 }
1191 } else {
1192 const uint32_t output_path_length =
1193 session ? strlen(session->output_path) + 1 : 0;
1194
1195 reply.output_path_length = htobe32(output_path_length);
1196 ret = lttng_dynamic_buffer_append(
1197 &reply_payload, &reply, sizeof(reply));
1198 if (ret) {
1199 ERR("Failed to append \"create session\" command reply header to payload buffer");
1200 goto end;
1201 }
1202
1203 if (output_path_length) {
1204 ret = lttng_dynamic_buffer_append(&reply_payload,
1205 session->output_path,
1206 output_path_length);
1207 if (ret) {
1208 ERR("Failed to append \"create session\" command reply path to payload buffer");
1209 goto end;
1210 }
1211 }
1212 }
1213
1214 send_ret = conn->sock->ops->sendmsg(conn->sock, reply_payload.data,
1215 reply_payload.size, 0);
1216 if (send_ret < (ssize_t) reply_payload.size) {
1217 ERR("Failed to send \"create session\" command reply of %zu bytes (ret = %zd)",
1218 reply_payload.size, send_ret);
1219 ret = -1;
1220 }
1221 end:
1222 if (ret < 0 && session) {
1223 session_put(session);
1224 }
1225 lttng_dynamic_buffer_reset(&reply_payload);
1226 return ret;
1227 }
1228
1229 /*
1230 * When we have received all the streams and the metadata for a channel,
1231 * we make them visible to the viewer threads.
1232 */
1233 static void publish_connection_local_streams(struct relay_connection *conn)
1234 {
1235 struct relay_stream *stream;
1236 struct relay_session *session = conn->session;
1237
1238 /*
1239 * We publish all streams belonging to a session atomically wrt
1240 * session lock.
1241 */
1242 pthread_mutex_lock(&session->lock);
1243 rcu_read_lock();
1244 cds_list_for_each_entry_rcu(stream, &session->recv_list,
1245 recv_node) {
1246 stream_publish(stream);
1247 }
1248 rcu_read_unlock();
1249
1250 /*
1251 * Inform the viewer that there are new streams in the session.
1252 */
1253 if (session->viewer_attached) {
1254 uatomic_set(&session->new_streams, 1);
1255 }
1256 pthread_mutex_unlock(&session->lock);
1257 }
1258
1259 static int conform_channel_path(char *channel_path)
1260 {
1261 int ret = 0;
1262
1263 if (strstr("../", channel_path)) {
1264 ERR("Refusing channel path as it walks up the path hierarchy: \"%s\"",
1265 channel_path);
1266 ret = -1;
1267 goto end;
1268 }
1269
1270 if (*channel_path == '/') {
1271 const size_t len = strlen(channel_path);
1272
1273 /*
1274 * Channel paths from peers prior to 2.11 are expressed as an
1275 * absolute path that is, in reality, relative to the relay
1276 * daemon's output directory. Remove the leading slash so it
1277 * is correctly interpreted as a relative path later on.
1278 *
1279 * len (and not len - 1) is used to copy the trailing NULL.
1280 */
1281 bcopy(channel_path + 1, channel_path, len);
1282 }
1283 end:
1284 return ret;
1285 }
1286
1287 /*
1288 * relay_add_stream: allocate a new stream for a session
1289 */
1290 static int relay_add_stream(const struct lttcomm_relayd_hdr *recv_hdr,
1291 struct relay_connection *conn,
1292 const struct lttng_buffer_view *payload)
1293 {
1294 int ret;
1295 ssize_t send_ret;
1296 struct relay_session *session = conn->session;
1297 struct relay_stream *stream = NULL;
1298 struct lttcomm_relayd_status_stream reply;
1299 struct ctf_trace *trace = NULL;
1300 uint64_t stream_handle = -1ULL;
1301 char *path_name = NULL, *channel_name = NULL;
1302 uint64_t tracefile_size = 0, tracefile_count = 0;
1303 LTTNG_OPTIONAL(uint64_t) stream_chunk_id = {};
1304
1305 if (!session || !conn->version_check_done) {
1306 ERR("Trying to add a stream before version check");
1307 ret = -1;
1308 goto end_no_session;
1309 }
1310
1311 if (session->minor == 1) {
1312 /* For 2.1 */
1313 ret = cmd_recv_stream_2_1(payload, &path_name,
1314 &channel_name);
1315 } else if (session->minor > 1 && session->minor < 11) {
1316 /* From 2.2 to 2.10 */
1317 ret = cmd_recv_stream_2_2(payload, &path_name,
1318 &channel_name, &tracefile_size, &tracefile_count);
1319 } else {
1320 /* From 2.11 to ... */
1321 ret = cmd_recv_stream_2_11(payload, &path_name,
1322 &channel_name, &tracefile_size, &tracefile_count,
1323 &stream_chunk_id.value);
1324 stream_chunk_id.is_set = true;
1325 }
1326
1327 if (ret < 0) {
1328 goto send_reply;
1329 }
1330
1331 if (conform_channel_path(path_name)) {
1332 goto send_reply;
1333 }
1334
1335 trace = ctf_trace_get_by_path_or_create(session, path_name);
1336 if (!trace) {
1337 goto send_reply;
1338 }
1339 /* This stream here has one reference on the trace. */
1340
1341 pthread_mutex_lock(&last_relay_stream_id_lock);
1342 stream_handle = ++last_relay_stream_id;
1343 pthread_mutex_unlock(&last_relay_stream_id_lock);
1344
1345 /* We pass ownership of path_name and channel_name. */
1346 stream = stream_create(trace, stream_handle, path_name,
1347 channel_name, tracefile_size, tracefile_count);
1348 path_name = NULL;
1349 channel_name = NULL;
1350
1351 /*
1352 * Streams are the owners of their trace. Reference to trace is
1353 * kept within stream_create().
1354 */
1355 ctf_trace_put(trace);
1356
1357 send_reply:
1358 memset(&reply, 0, sizeof(reply));
1359 reply.handle = htobe64(stream_handle);
1360 if (!stream) {
1361 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1362 } else {
1363 reply.ret_code = htobe32(LTTNG_OK);
1364 }
1365
1366 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1367 sizeof(struct lttcomm_relayd_status_stream), 0);
1368 if (send_ret < (ssize_t) sizeof(reply)) {
1369 ERR("Failed to send \"add stream\" command reply (ret = %zd)",
1370 send_ret);
1371 ret = -1;
1372 }
1373
1374 end_no_session:
1375 free(path_name);
1376 free(channel_name);
1377 return ret;
1378 }
1379
1380 /*
1381 * relay_close_stream: close a specific stream
1382 */
1383 static int relay_close_stream(const struct lttcomm_relayd_hdr *recv_hdr,
1384 struct relay_connection *conn,
1385 const struct lttng_buffer_view *payload)
1386 {
1387 int ret;
1388 ssize_t send_ret;
1389 struct relay_session *session = conn->session;
1390 struct lttcomm_relayd_close_stream stream_info;
1391 struct lttcomm_relayd_generic_reply reply;
1392 struct relay_stream *stream;
1393
1394 DBG("Close stream received");
1395
1396 if (!session || !conn->version_check_done) {
1397 ERR("Trying to close a stream before version check");
1398 ret = -1;
1399 goto end_no_session;
1400 }
1401
1402 if (payload->size < sizeof(stream_info)) {
1403 ERR("Unexpected payload size in \"relay_close_stream\": expected >= %zu bytes, got %zu bytes",
1404 sizeof(stream_info), payload->size);
1405 ret = -1;
1406 goto end_no_session;
1407 }
1408 memcpy(&stream_info, payload->data, sizeof(stream_info));
1409 stream_info.stream_id = be64toh(stream_info.stream_id);
1410 stream_info.last_net_seq_num = be64toh(stream_info.last_net_seq_num);
1411
1412 stream = stream_get_by_id(stream_info.stream_id);
1413 if (!stream) {
1414 ret = -1;
1415 goto end;
1416 }
1417
1418 /*
1419 * Set last_net_seq_num before the close flag. Required by data
1420 * pending check.
1421 */
1422 pthread_mutex_lock(&stream->lock);
1423 stream->last_net_seq_num = stream_info.last_net_seq_num;
1424 pthread_mutex_unlock(&stream->lock);
1425
1426 /*
1427 * This is one of the conditions which may trigger a stream close
1428 * with the others being:
1429 * 1) A close command is received for a stream
1430 * 2) The control connection owning the stream is closed
1431 * 3) We have received all of the stream's data _after_ a close
1432 * request.
1433 */
1434 try_stream_close(stream);
1435 if (stream->is_metadata) {
1436 struct relay_viewer_stream *vstream;
1437
1438 vstream = viewer_stream_get_by_id(stream->stream_handle);
1439 if (vstream) {
1440 if (vstream->metadata_sent == stream->metadata_received) {
1441 /*
1442 * Since all the metadata has been sent to the
1443 * viewer and that we have a request to close
1444 * its stream, we can safely teardown the
1445 * corresponding metadata viewer stream.
1446 */
1447 viewer_stream_put(vstream);
1448 }
1449 /* Put local reference. */
1450 viewer_stream_put(vstream);
1451 }
1452 }
1453 stream_put(stream);
1454 ret = 0;
1455
1456 end:
1457 memset(&reply, 0, sizeof(reply));
1458 if (ret < 0) {
1459 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1460 } else {
1461 reply.ret_code = htobe32(LTTNG_OK);
1462 }
1463 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1464 sizeof(struct lttcomm_relayd_generic_reply), 0);
1465 if (send_ret < (ssize_t) sizeof(reply)) {
1466 ERR("Failed to send \"close stream\" command reply (ret = %zd)",
1467 send_ret);
1468 ret = -1;
1469 }
1470
1471 end_no_session:
1472 return ret;
1473 }
1474
1475 /*
1476 * relay_reset_metadata: reset a metadata stream
1477 */
1478 static
1479 int relay_reset_metadata(const struct lttcomm_relayd_hdr *recv_hdr,
1480 struct relay_connection *conn,
1481 const struct lttng_buffer_view *payload)
1482 {
1483 int ret;
1484 ssize_t send_ret;
1485 struct relay_session *session = conn->session;
1486 struct lttcomm_relayd_reset_metadata stream_info;
1487 struct lttcomm_relayd_generic_reply reply;
1488 struct relay_stream *stream;
1489
1490 DBG("Reset metadata received");
1491
1492 if (!session || !conn->version_check_done) {
1493 ERR("Trying to reset a metadata stream before version check");
1494 ret = -1;
1495 goto end_no_session;
1496 }
1497
1498 if (payload->size < sizeof(stream_info)) {
1499 ERR("Unexpected payload size in \"relay_reset_metadata\": expected >= %zu bytes, got %zu bytes",
1500 sizeof(stream_info), payload->size);
1501 ret = -1;
1502 goto end_no_session;
1503 }
1504 memcpy(&stream_info, payload->data, sizeof(stream_info));
1505 stream_info.stream_id = be64toh(stream_info.stream_id);
1506 stream_info.version = be64toh(stream_info.version);
1507
1508 DBG("Update metadata to version %" PRIu64, stream_info.version);
1509
1510 /* Unsupported for live sessions for now. */
1511 if (session->live_timer != 0) {
1512 ret = -1;
1513 goto end;
1514 }
1515
1516 stream = stream_get_by_id(stream_info.stream_id);
1517 if (!stream) {
1518 ret = -1;
1519 goto end;
1520 }
1521 pthread_mutex_lock(&stream->lock);
1522 if (!stream->is_metadata) {
1523 ret = -1;
1524 goto end_unlock;
1525 }
1526
1527 ret = stream_reset_file(stream);
1528 if (ret < 0) {
1529 ERR("Failed to reset metadata stream %" PRIu64
1530 ": stream_path = %s, channel = %s",
1531 stream->stream_handle, stream->path_name,
1532 stream->channel_name);
1533 goto end_unlock;
1534 }
1535 end_unlock:
1536 pthread_mutex_unlock(&stream->lock);
1537 stream_put(stream);
1538
1539 end:
1540 memset(&reply, 0, sizeof(reply));
1541 if (ret < 0) {
1542 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1543 } else {
1544 reply.ret_code = htobe32(LTTNG_OK);
1545 }
1546 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1547 sizeof(struct lttcomm_relayd_generic_reply), 0);
1548 if (send_ret < (ssize_t) sizeof(reply)) {
1549 ERR("Failed to send \"reset metadata\" command reply (ret = %zd)",
1550 send_ret);
1551 ret = -1;
1552 }
1553
1554 end_no_session:
1555 return ret;
1556 }
1557
1558 /*
1559 * relay_unknown_command: send -1 if received unknown command
1560 */
1561 static void relay_unknown_command(struct relay_connection *conn)
1562 {
1563 struct lttcomm_relayd_generic_reply reply;
1564 ssize_t send_ret;
1565
1566 memset(&reply, 0, sizeof(reply));
1567 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1568 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
1569 if (send_ret < sizeof(reply)) {
1570 ERR("Failed to send \"unknown command\" command reply (ret = %zd)", send_ret);
1571 }
1572 }
1573
1574 /*
1575 * relay_start: send an acknowledgment to the client to tell if we are
1576 * ready to receive data. We are ready if a session is established.
1577 */
1578 static int relay_start(const struct lttcomm_relayd_hdr *recv_hdr,
1579 struct relay_connection *conn,
1580 const struct lttng_buffer_view *payload)
1581 {
1582 int ret = 0;
1583 ssize_t send_ret;
1584 struct lttcomm_relayd_generic_reply reply;
1585 struct relay_session *session = conn->session;
1586
1587 if (!session) {
1588 DBG("Trying to start the streaming without a session established");
1589 ret = htobe32(LTTNG_ERR_UNK);
1590 }
1591
1592 memset(&reply, 0, sizeof(reply));
1593 reply.ret_code = htobe32(LTTNG_OK);
1594 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1595 sizeof(reply), 0);
1596 if (send_ret < (ssize_t) sizeof(reply)) {
1597 ERR("Failed to send \"relay_start\" command reply (ret = %zd)",
1598 send_ret);
1599 ret = -1;
1600 }
1601
1602 return ret;
1603 }
1604
1605 /*
1606 * relay_recv_metadata: receive the metadata for the session.
1607 */
1608 static int relay_recv_metadata(const struct lttcomm_relayd_hdr *recv_hdr,
1609 struct relay_connection *conn,
1610 const struct lttng_buffer_view *payload)
1611 {
1612 int ret = 0;
1613 struct relay_session *session = conn->session;
1614 struct lttcomm_relayd_metadata_payload metadata_payload_header;
1615 struct relay_stream *metadata_stream;
1616 uint64_t metadata_payload_size;
1617 struct lttng_buffer_view packet_view;
1618
1619 if (!session) {
1620 ERR("Metadata sent before version check");
1621 ret = -1;
1622 goto end;
1623 }
1624
1625 if (recv_hdr->data_size < sizeof(struct lttcomm_relayd_metadata_payload)) {
1626 ERR("Incorrect data size");
1627 ret = -1;
1628 goto end;
1629 }
1630 metadata_payload_size = recv_hdr->data_size -
1631 sizeof(struct lttcomm_relayd_metadata_payload);
1632
1633 memcpy(&metadata_payload_header, payload->data,
1634 sizeof(metadata_payload_header));
1635 metadata_payload_header.stream_id = be64toh(
1636 metadata_payload_header.stream_id);
1637 metadata_payload_header.padding_size = be32toh(
1638 metadata_payload_header.padding_size);
1639
1640 metadata_stream = stream_get_by_id(metadata_payload_header.stream_id);
1641 if (!metadata_stream) {
1642 ret = -1;
1643 goto end;
1644 }
1645
1646 packet_view = lttng_buffer_view_from_view(payload,
1647 sizeof(metadata_payload_header), metadata_payload_size);
1648 if (!packet_view.data) {
1649 ERR("Invalid metadata packet length announced by header");
1650 ret = -1;
1651 goto end_put;
1652 }
1653
1654 pthread_mutex_lock(&metadata_stream->lock);
1655 ret = stream_write(metadata_stream, &packet_view,
1656 metadata_payload_header.padding_size);
1657 pthread_mutex_unlock(&metadata_stream->lock);
1658 if (ret){
1659 ret = -1;
1660 goto end_put;
1661 }
1662 end_put:
1663 stream_put(metadata_stream);
1664 end:
1665 return ret;
1666 }
1667
1668 /*
1669 * relay_send_version: send relayd version number
1670 */
1671 static int relay_send_version(const struct lttcomm_relayd_hdr *recv_hdr,
1672 struct relay_connection *conn,
1673 const struct lttng_buffer_view *payload)
1674 {
1675 int ret;
1676 ssize_t send_ret;
1677 struct lttcomm_relayd_version reply, msg;
1678 bool compatible = true;
1679
1680 conn->version_check_done = true;
1681
1682 /* Get version from the other side. */
1683 if (payload->size < sizeof(msg)) {
1684 ERR("Unexpected payload size in \"relay_send_version\": expected >= %zu bytes, got %zu bytes",
1685 sizeof(msg), payload->size);
1686 ret = -1;
1687 goto end;
1688 }
1689
1690 memcpy(&msg, payload->data, sizeof(msg));
1691 msg.major = be32toh(msg.major);
1692 msg.minor = be32toh(msg.minor);
1693
1694 memset(&reply, 0, sizeof(reply));
1695 reply.major = RELAYD_VERSION_COMM_MAJOR;
1696 reply.minor = RELAYD_VERSION_COMM_MINOR;
1697
1698 /* Major versions must be the same */
1699 if (reply.major != msg.major) {
1700 DBG("Incompatible major versions (%u vs %u), deleting session",
1701 reply.major, msg.major);
1702 compatible = false;
1703 }
1704
1705 conn->major = reply.major;
1706 /* We adapt to the lowest compatible version */
1707 if (reply.minor <= msg.minor) {
1708 conn->minor = reply.minor;
1709 } else {
1710 conn->minor = msg.minor;
1711 }
1712
1713 reply.major = htobe32(reply.major);
1714 reply.minor = htobe32(reply.minor);
1715 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1716 sizeof(reply), 0);
1717 if (send_ret < (ssize_t) sizeof(reply)) {
1718 ERR("Failed to send \"send version\" command reply (ret = %zd)",
1719 send_ret);
1720 ret = -1;
1721 goto end;
1722 } else {
1723 ret = 0;
1724 }
1725
1726 if (!compatible) {
1727 ret = -1;
1728 goto end;
1729 }
1730
1731 DBG("Version check done using protocol %u.%u", conn->major,
1732 conn->minor);
1733
1734 end:
1735 return ret;
1736 }
1737
1738 /*
1739 * Check for data pending for a given stream id from the session daemon.
1740 */
1741 static int relay_data_pending(const struct lttcomm_relayd_hdr *recv_hdr,
1742 struct relay_connection *conn,
1743 const struct lttng_buffer_view *payload)
1744 {
1745 struct relay_session *session = conn->session;
1746 struct lttcomm_relayd_data_pending msg;
1747 struct lttcomm_relayd_generic_reply reply;
1748 struct relay_stream *stream;
1749 ssize_t send_ret;
1750 int ret;
1751 uint64_t stream_seq;
1752
1753 DBG("Data pending command received");
1754
1755 if (!session || !conn->version_check_done) {
1756 ERR("Trying to check for data before version check");
1757 ret = -1;
1758 goto end_no_session;
1759 }
1760
1761 if (payload->size < sizeof(msg)) {
1762 ERR("Unexpected payload size in \"relay_data_pending\": expected >= %zu bytes, got %zu bytes",
1763 sizeof(msg), payload->size);
1764 ret = -1;
1765 goto end_no_session;
1766 }
1767 memcpy(&msg, payload->data, sizeof(msg));
1768 msg.stream_id = be64toh(msg.stream_id);
1769 msg.last_net_seq_num = be64toh(msg.last_net_seq_num);
1770
1771 stream = stream_get_by_id(msg.stream_id);
1772 if (stream == NULL) {
1773 ret = -1;
1774 goto end;
1775 }
1776
1777 pthread_mutex_lock(&stream->lock);
1778
1779 if (session_streams_have_index(session)) {
1780 /*
1781 * Ensure that both the index and stream data have been
1782 * flushed up to the requested point.
1783 */
1784 stream_seq = min(stream->prev_data_seq, stream->prev_index_seq);
1785 } else {
1786 stream_seq = stream->prev_data_seq;
1787 }
1788 DBG("Data pending for stream id %" PRIu64 ": prev_data_seq %" PRIu64
1789 ", prev_index_seq %" PRIu64
1790 ", and last_seq %" PRIu64, msg.stream_id,
1791 stream->prev_data_seq, stream->prev_index_seq,
1792 msg.last_net_seq_num);
1793
1794 /* Avoid wrapping issue */
1795 if (((int64_t) (stream_seq - msg.last_net_seq_num)) >= 0) {
1796 /* Data has in fact been written and is NOT pending */
1797 ret = 0;
1798 } else {
1799 /* Data still being streamed thus pending */
1800 ret = 1;
1801 }
1802
1803 stream->data_pending_check_done = true;
1804 pthread_mutex_unlock(&stream->lock);
1805
1806 stream_put(stream);
1807 end:
1808
1809 memset(&reply, 0, sizeof(reply));
1810 reply.ret_code = htobe32(ret);
1811 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
1812 if (send_ret < (ssize_t) sizeof(reply)) {
1813 ERR("Failed to send \"data pending\" command reply (ret = %zd)",
1814 send_ret);
1815 ret = -1;
1816 }
1817
1818 end_no_session:
1819 return ret;
1820 }
1821
1822 /*
1823 * Wait for the control socket to reach a quiescent state.
1824 *
1825 * Note that for now, when receiving this command from the session
1826 * daemon, this means that every subsequent commands or data received on
1827 * the control socket has been handled. So, this is why we simply return
1828 * OK here.
1829 */
1830 static int relay_quiescent_control(const struct lttcomm_relayd_hdr *recv_hdr,
1831 struct relay_connection *conn,
1832 const struct lttng_buffer_view *payload)
1833 {
1834 int ret;
1835 ssize_t send_ret;
1836 struct relay_stream *stream;
1837 struct lttcomm_relayd_quiescent_control msg;
1838 struct lttcomm_relayd_generic_reply reply;
1839
1840 DBG("Checking quiescent state on control socket");
1841
1842 if (!conn->session || !conn->version_check_done) {
1843 ERR("Trying to check for data before version check");
1844 ret = -1;
1845 goto end_no_session;
1846 }
1847
1848 if (payload->size < sizeof(msg)) {
1849 ERR("Unexpected payload size in \"relay_quiescent_control\": expected >= %zu bytes, got %zu bytes",
1850 sizeof(msg), payload->size);
1851 ret = -1;
1852 goto end_no_session;
1853 }
1854 memcpy(&msg, payload->data, sizeof(msg));
1855 msg.stream_id = be64toh(msg.stream_id);
1856
1857 stream = stream_get_by_id(msg.stream_id);
1858 if (!stream) {
1859 goto reply;
1860 }
1861 pthread_mutex_lock(&stream->lock);
1862 stream->data_pending_check_done = true;
1863 pthread_mutex_unlock(&stream->lock);
1864
1865 DBG("Relay quiescent control pending flag set to %" PRIu64, msg.stream_id);
1866 stream_put(stream);
1867 reply:
1868 memset(&reply, 0, sizeof(reply));
1869 reply.ret_code = htobe32(LTTNG_OK);
1870 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
1871 if (send_ret < (ssize_t) sizeof(reply)) {
1872 ERR("Failed to send \"quiescent control\" command reply (ret = %zd)",
1873 send_ret);
1874 ret = -1;
1875 } else {
1876 ret = 0;
1877 }
1878
1879 end_no_session:
1880 return ret;
1881 }
1882
1883 /*
1884 * Initialize a data pending command. This means that a consumer is about
1885 * to ask for data pending for each stream it holds. Simply iterate over
1886 * all streams of a session and set the data_pending_check_done flag.
1887 *
1888 * This command returns to the client a LTTNG_OK code.
1889 */
1890 static int relay_begin_data_pending(const struct lttcomm_relayd_hdr *recv_hdr,
1891 struct relay_connection *conn,
1892 const struct lttng_buffer_view *payload)
1893 {
1894 int ret;
1895 ssize_t send_ret;
1896 struct lttng_ht_iter iter;
1897 struct lttcomm_relayd_begin_data_pending msg;
1898 struct lttcomm_relayd_generic_reply reply;
1899 struct relay_stream *stream;
1900
1901 assert(recv_hdr);
1902 assert(conn);
1903
1904 DBG("Init streams for data pending");
1905
1906 if (!conn->session || !conn->version_check_done) {
1907 ERR("Trying to check for data before version check");
1908 ret = -1;
1909 goto end_no_session;
1910 }
1911
1912 if (payload->size < sizeof(msg)) {
1913 ERR("Unexpected payload size in \"relay_begin_data_pending\": expected >= %zu bytes, got %zu bytes",
1914 sizeof(msg), payload->size);
1915 ret = -1;
1916 goto end_no_session;
1917 }
1918 memcpy(&msg, payload->data, sizeof(msg));
1919 msg.session_id = be64toh(msg.session_id);
1920
1921 /*
1922 * Iterate over all streams to set the begin data pending flag.
1923 * For now, the streams are indexed by stream handle so we have
1924 * to iterate over all streams to find the one associated with
1925 * the right session_id.
1926 */
1927 rcu_read_lock();
1928 cds_lfht_for_each_entry(relay_streams_ht->ht, &iter.iter, stream,
1929 node.node) {
1930 if (!stream_get(stream)) {
1931 continue;
1932 }
1933 if (stream->trace->session->id == msg.session_id) {
1934 pthread_mutex_lock(&stream->lock);
1935 stream->data_pending_check_done = false;
1936 pthread_mutex_unlock(&stream->lock);
1937 DBG("Set begin data pending flag to stream %" PRIu64,
1938 stream->stream_handle);
1939 }
1940 stream_put(stream);
1941 }
1942 rcu_read_unlock();
1943
1944 memset(&reply, 0, sizeof(reply));
1945 /* All good, send back reply. */
1946 reply.ret_code = htobe32(LTTNG_OK);
1947
1948 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
1949 if (send_ret < (ssize_t) sizeof(reply)) {
1950 ERR("Failed to send \"begin data pending\" command reply (ret = %zd)",
1951 send_ret);
1952 ret = -1;
1953 } else {
1954 ret = 0;
1955 }
1956
1957 end_no_session:
1958 return ret;
1959 }
1960
1961 /*
1962 * End data pending command. This will check, for a given session id, if
1963 * each stream associated with it has its data_pending_check_done flag
1964 * set. If not, this means that the client lost track of the stream but
1965 * the data is still being streamed on our side. In this case, we inform
1966 * the client that data is in flight.
1967 *
1968 * Return to the client if there is data in flight or not with a ret_code.
1969 */
1970 static int relay_end_data_pending(const struct lttcomm_relayd_hdr *recv_hdr,
1971 struct relay_connection *conn,
1972 const struct lttng_buffer_view *payload)
1973 {
1974 int ret;
1975 ssize_t send_ret;
1976 struct lttng_ht_iter iter;
1977 struct lttcomm_relayd_end_data_pending msg;
1978 struct lttcomm_relayd_generic_reply reply;
1979 struct relay_stream *stream;
1980 uint32_t is_data_inflight = 0;
1981
1982 DBG("End data pending command");
1983
1984 if (!conn->session || !conn->version_check_done) {
1985 ERR("Trying to check for data before version check");
1986 ret = -1;
1987 goto end_no_session;
1988 }
1989
1990 if (payload->size < sizeof(msg)) {
1991 ERR("Unexpected payload size in \"relay_end_data_pending\": expected >= %zu bytes, got %zu bytes",
1992 sizeof(msg), payload->size);
1993 ret = -1;
1994 goto end_no_session;
1995 }
1996 memcpy(&msg, payload->data, sizeof(msg));
1997 msg.session_id = be64toh(msg.session_id);
1998
1999 /*
2000 * Iterate over all streams to see if the begin data pending
2001 * flag is set.
2002 */
2003 rcu_read_lock();
2004 cds_lfht_for_each_entry(relay_streams_ht->ht, &iter.iter, stream,
2005 node.node) {
2006 if (!stream_get(stream)) {
2007 continue;
2008 }
2009 if (stream->trace->session->id != msg.session_id) {
2010 stream_put(stream);
2011 continue;
2012 }
2013 pthread_mutex_lock(&stream->lock);
2014 if (!stream->data_pending_check_done) {
2015 uint64_t stream_seq;
2016
2017 if (session_streams_have_index(conn->session)) {
2018 /*
2019 * Ensure that both the index and stream data have been
2020 * flushed up to the requested point.
2021 */
2022 stream_seq = min(stream->prev_data_seq, stream->prev_index_seq);
2023 } else {
2024 stream_seq = stream->prev_data_seq;
2025 }
2026 if (!stream->closed || !(((int64_t) (stream_seq - stream->last_net_seq_num)) >= 0)) {
2027 is_data_inflight = 1;
2028 DBG("Data is still in flight for stream %" PRIu64,
2029 stream->stream_handle);
2030 pthread_mutex_unlock(&stream->lock);
2031 stream_put(stream);
2032 break;
2033 }
2034 }
2035 pthread_mutex_unlock(&stream->lock);
2036 stream_put(stream);
2037 }
2038 rcu_read_unlock();
2039
2040 memset(&reply, 0, sizeof(reply));
2041 /* All good, send back reply. */
2042 reply.ret_code = htobe32(is_data_inflight);
2043
2044 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
2045 if (send_ret < (ssize_t) sizeof(reply)) {
2046 ERR("Failed to send \"end data pending\" command reply (ret = %zd)",
2047 send_ret);
2048 ret = -1;
2049 } else {
2050 ret = 0;
2051 }
2052
2053 end_no_session:
2054 return ret;
2055 }
2056
2057 /*
2058 * Receive an index for a specific stream.
2059 *
2060 * Return 0 on success else a negative value.
2061 */
2062 static int relay_recv_index(const struct lttcomm_relayd_hdr *recv_hdr,
2063 struct relay_connection *conn,
2064 const struct lttng_buffer_view *payload)
2065 {
2066 int ret;
2067 ssize_t send_ret;
2068 struct relay_session *session = conn->session;
2069 struct lttcomm_relayd_index index_info;
2070 struct lttcomm_relayd_generic_reply reply;
2071 struct relay_stream *stream;
2072 size_t msg_len;
2073
2074 assert(conn);
2075
2076 DBG("Relay receiving index");
2077
2078 if (!session || !conn->version_check_done) {
2079 ERR("Trying to close a stream before version check");
2080 ret = -1;
2081 goto end_no_session;
2082 }
2083
2084 msg_len = lttcomm_relayd_index_len(
2085 lttng_to_index_major(conn->major, conn->minor),
2086 lttng_to_index_minor(conn->major, conn->minor));
2087 if (payload->size < msg_len) {
2088 ERR("Unexpected payload size in \"relay_recv_index\": expected >= %zu bytes, got %zu bytes",
2089 msg_len, payload->size);
2090 ret = -1;
2091 goto end_no_session;
2092 }
2093 memcpy(&index_info, payload->data, msg_len);
2094 index_info.relay_stream_id = be64toh(index_info.relay_stream_id);
2095 index_info.net_seq_num = be64toh(index_info.net_seq_num);
2096 index_info.packet_size = be64toh(index_info.packet_size);
2097 index_info.content_size = be64toh(index_info.content_size);
2098 index_info.timestamp_begin = be64toh(index_info.timestamp_begin);
2099 index_info.timestamp_end = be64toh(index_info.timestamp_end);
2100 index_info.events_discarded = be64toh(index_info.events_discarded);
2101 index_info.stream_id = be64toh(index_info.stream_id);
2102
2103 if (conn->minor >= 8) {
2104 index_info.stream_instance_id =
2105 be64toh(index_info.stream_instance_id);
2106 index_info.packet_seq_num = be64toh(index_info.packet_seq_num);
2107 }
2108
2109 stream = stream_get_by_id(index_info.relay_stream_id);
2110 if (!stream) {
2111 ERR("stream_get_by_id not found");
2112 ret = -1;
2113 goto end;
2114 }
2115
2116 pthread_mutex_lock(&stream->lock);
2117 ret = stream_add_index(stream, &index_info);
2118 pthread_mutex_unlock(&stream->lock);
2119 if (ret) {
2120 goto end_stream_put;
2121 }
2122
2123 end_stream_put:
2124 stream_put(stream);
2125 end:
2126 memset(&reply, 0, sizeof(reply));
2127 if (ret < 0) {
2128 reply.ret_code = htobe32(LTTNG_ERR_UNK);
2129 } else {
2130 reply.ret_code = htobe32(LTTNG_OK);
2131 }
2132 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
2133 if (send_ret < (ssize_t) sizeof(reply)) {
2134 ERR("Failed to send \"recv index\" command reply (ret = %zd)", send_ret);
2135 ret = -1;
2136 }
2137
2138 end_no_session:
2139 return ret;
2140 }
2141
2142 /*
2143 * Receive the streams_sent message.
2144 *
2145 * Return 0 on success else a negative value.
2146 */
2147 static int relay_streams_sent(const struct lttcomm_relayd_hdr *recv_hdr,
2148 struct relay_connection *conn,
2149 const struct lttng_buffer_view *payload)
2150 {
2151 int ret;
2152 ssize_t send_ret;
2153 struct lttcomm_relayd_generic_reply reply;
2154
2155 assert(conn);
2156
2157 DBG("Relay receiving streams_sent");
2158
2159 if (!conn->session || !conn->version_check_done) {
2160 ERR("Trying to close a stream before version check");
2161 ret = -1;
2162 goto end_no_session;
2163 }
2164
2165 /*
2166 * Publish every pending stream in the connection recv list which are
2167 * now ready to be used by the viewer.
2168 */
2169 publish_connection_local_streams(conn);
2170
2171 memset(&reply, 0, sizeof(reply));
2172 reply.ret_code = htobe32(LTTNG_OK);
2173 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
2174 if (send_ret < (ssize_t) sizeof(reply)) {
2175 ERR("Failed to send \"streams sent\" command reply (ret = %zd)",
2176 send_ret);
2177 ret = -1;
2178 } else {
2179 /* Success. */
2180 ret = 0;
2181 }
2182
2183 end_no_session:
2184 return ret;
2185 }
2186
2187 /*
2188 * relay_rotate_session_stream: rotate a stream to a new tracefile for the
2189 * session rotation feature (not the tracefile rotation feature).
2190 */
2191 static int relay_rotate_session_streams(
2192 const struct lttcomm_relayd_hdr *recv_hdr,
2193 struct relay_connection *conn,
2194 const struct lttng_buffer_view *payload)
2195 {
2196 int ret = 0;
2197 uint32_t i;
2198 ssize_t send_ret;
2199 enum lttng_error_code reply_code = LTTNG_ERR_UNK;
2200 struct relay_session *session = conn->session;
2201 struct lttcomm_relayd_rotate_streams rotate_streams;
2202 struct lttcomm_relayd_generic_reply reply = {};
2203 struct relay_stream *stream = NULL;
2204 const size_t header_len = sizeof(struct lttcomm_relayd_rotate_streams);
2205 struct lttng_trace_chunk *next_trace_chunk = NULL;
2206 struct lttng_buffer_view stream_positions;
2207 char chunk_id_buf[MAX_INT_DEC_LEN(uint64_t)];
2208 const char *chunk_id_str = "none";
2209
2210 if (!session || !conn->version_check_done) {
2211 ERR("Trying to rotate a stream before version check");
2212 ret = -1;
2213 goto end_no_reply;
2214 }
2215
2216 if (session->major == 2 && session->minor < 11) {
2217 ERR("Unsupported feature before 2.11");
2218 ret = -1;
2219 goto end_no_reply;
2220 }
2221
2222 if (payload->size < header_len) {
2223 ERR("Unexpected payload size in \"relay_rotate_session_stream\": expected >= %zu bytes, got %zu bytes",
2224 header_len, payload->size);
2225 ret = -1;
2226 goto end_no_reply;
2227 }
2228
2229 memcpy(&rotate_streams, payload->data, header_len);
2230
2231 /* Convert header to host endianness. */
2232 rotate_streams = (typeof(rotate_streams)) {
2233 .stream_count = be32toh(rotate_streams.stream_count),
2234 .new_chunk_id = (typeof(rotate_streams.new_chunk_id)) {
2235 .is_set = !!rotate_streams.new_chunk_id.is_set,
2236 .value = be64toh(rotate_streams.new_chunk_id.value),
2237 }
2238 };
2239
2240 if (rotate_streams.new_chunk_id.is_set) {
2241 /*
2242 * Retrieve the trace chunk the stream must transition to. As
2243 * per the protocol, this chunk should have been created
2244 * before this command is received.
2245 */
2246 next_trace_chunk = sessiond_trace_chunk_registry_get_chunk(
2247 sessiond_trace_chunk_registry,
2248 session->sessiond_uuid, session->id,
2249 rotate_streams.new_chunk_id.value);
2250 if (!next_trace_chunk) {
2251 char uuid_str[UUID_STR_LEN];
2252
2253 lttng_uuid_to_str(session->sessiond_uuid, uuid_str);
2254 ERR("Unknown next trace chunk in ROTATE_STREAMS command: sessiond_uuid = {%s}, session_id = %" PRIu64
2255 ", trace_chunk_id = %" PRIu64,
2256 uuid_str, session->id,
2257 rotate_streams.new_chunk_id.value);
2258 reply_code = LTTNG_ERR_INVALID_PROTOCOL;
2259 ret = -1;
2260 goto end;
2261 }
2262
2263 ret = snprintf(chunk_id_buf, sizeof(chunk_id_buf), "%" PRIu64,
2264 rotate_streams.new_chunk_id.value);
2265 if (ret < 0 || ret >= sizeof(chunk_id_buf)) {
2266 chunk_id_str = "formatting error";
2267 } else {
2268 chunk_id_str = chunk_id_buf;
2269 }
2270 session->has_rotated = true;
2271 }
2272
2273 DBG("Rotate %" PRIu32 " streams of session \"%s\" to chunk \"%s\"",
2274 rotate_streams.stream_count, session->session_name,
2275 chunk_id_str);
2276
2277 stream_positions = lttng_buffer_view_from_view(payload,
2278 sizeof(rotate_streams), -1);
2279 if (!stream_positions.data ||
2280 stream_positions.size <
2281 (rotate_streams.stream_count *
2282 sizeof(struct lttcomm_relayd_stream_rotation_position))) {
2283 reply_code = LTTNG_ERR_INVALID_PROTOCOL;
2284 ret = -1;
2285 goto end;
2286 }
2287
2288 for (i = 0; i < rotate_streams.stream_count; i++) {
2289 struct lttcomm_relayd_stream_rotation_position *position_comm =
2290 &((typeof(position_comm)) stream_positions.data)[i];
2291 const struct lttcomm_relayd_stream_rotation_position pos = {
2292 .stream_id = be64toh(position_comm->stream_id),
2293 .rotate_at_seq_num = be64toh(
2294 position_comm->rotate_at_seq_num),
2295 };
2296
2297 stream = stream_get_by_id(pos.stream_id);
2298 if (!stream) {
2299 reply_code = LTTNG_ERR_INVALID;
2300 ret = -1;
2301 goto end;
2302 }
2303
2304 pthread_mutex_lock(&stream->lock);
2305 ret = stream_set_pending_rotation(stream, next_trace_chunk,
2306 pos.rotate_at_seq_num);
2307 pthread_mutex_unlock(&stream->lock);
2308 if (ret) {
2309 reply_code = LTTNG_ERR_FILE_CREATION_ERROR;
2310 goto end;
2311 }
2312
2313 stream_put(stream);
2314 stream = NULL;
2315 }
2316
2317 reply_code = LTTNG_OK;
2318 ret = 0;
2319 end:
2320 if (stream) {
2321 stream_put(stream);
2322 }
2323
2324 reply.ret_code = htobe32((uint32_t) reply_code);
2325 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
2326 sizeof(struct lttcomm_relayd_generic_reply), 0);
2327 if (send_ret < (ssize_t) sizeof(reply)) {
2328 ERR("Failed to send \"rotate session stream\" command reply (ret = %zd)",
2329 send_ret);
2330 ret = -1;
2331 }
2332 end_no_reply:
2333 lttng_trace_chunk_put(next_trace_chunk);
2334 return ret;
2335 }
2336
2337
2338
2339 /*
2340 * relay_create_trace_chunk: create a new trace chunk
2341 */
2342 static int relay_create_trace_chunk(const struct lttcomm_relayd_hdr *recv_hdr,
2343 struct relay_connection *conn,
2344 const struct lttng_buffer_view *payload)
2345 {
2346 int ret = 0;
2347 ssize_t send_ret;
2348 struct relay_session *session = conn->session;
2349 struct lttcomm_relayd_create_trace_chunk *msg;
2350 struct lttcomm_relayd_generic_reply reply = {};
2351 struct lttng_buffer_view header_view;
2352 struct lttng_buffer_view chunk_name_view;
2353 struct lttng_trace_chunk *chunk = NULL, *published_chunk = NULL;
2354 enum lttng_error_code reply_code = LTTNG_OK;
2355 enum lttng_trace_chunk_status chunk_status;
2356 struct lttng_directory_handle session_output;
2357
2358 if (!session || !conn->version_check_done) {
2359 ERR("Trying to create a trace chunk before version check");
2360 ret = -1;
2361 goto end_no_reply;
2362 }
2363
2364 if (session->major == 2 && session->minor < 11) {
2365 ERR("Chunk creation command is unsupported before 2.11");
2366 ret = -1;
2367 goto end_no_reply;
2368 }
2369
2370 header_view = lttng_buffer_view_from_view(payload, 0, sizeof(*msg));
2371 if (!header_view.data) {
2372 ERR("Failed to receive payload of chunk creation command");
2373 ret = -1;
2374 goto end_no_reply;
2375 }
2376
2377 /* Convert to host endianness. */
2378 msg = (typeof(msg)) header_view.data;
2379 msg->chunk_id = be64toh(msg->chunk_id);
2380 msg->creation_timestamp = be64toh(msg->creation_timestamp);
2381 msg->override_name_length = be32toh(msg->override_name_length);
2382
2383 chunk = lttng_trace_chunk_create(
2384 msg->chunk_id, msg->creation_timestamp);
2385 if (!chunk) {
2386 ERR("Failed to create trace chunk in trace chunk creation command");
2387 ret = -1;
2388 reply_code = LTTNG_ERR_NOMEM;
2389 goto end;
2390 }
2391
2392 if (msg->override_name_length) {
2393 const char *name;
2394
2395 chunk_name_view = lttng_buffer_view_from_view(payload,
2396 sizeof(*msg),
2397 msg->override_name_length);
2398 name = chunk_name_view.data;
2399 if (!name || name[msg->override_name_length - 1]) {
2400 ERR("Failed to receive payload of chunk creation command");
2401 ret = -1;
2402 reply_code = LTTNG_ERR_INVALID;
2403 goto end;
2404 }
2405
2406 chunk_status = lttng_trace_chunk_override_name(
2407 chunk, chunk_name_view.data);
2408 switch (chunk_status) {
2409 case LTTNG_TRACE_CHUNK_STATUS_OK:
2410 break;
2411 case LTTNG_TRACE_CHUNK_STATUS_INVALID_ARGUMENT:
2412 ERR("Failed to set the name of new trace chunk in trace chunk creation command (invalid name)");
2413 reply_code = LTTNG_ERR_INVALID;
2414 ret = -1;
2415 goto end;
2416 default:
2417 ERR("Failed to set the name of new trace chunk in trace chunk creation command (unknown error)");
2418 reply_code = LTTNG_ERR_UNK;
2419 ret = -1;
2420 goto end;
2421 }
2422 }
2423
2424 chunk_status = lttng_trace_chunk_set_credentials_current_user(chunk);
2425 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
2426 reply_code = LTTNG_ERR_UNK;
2427 ret = -1;
2428 goto end;
2429 }
2430
2431 ret = session_init_output_directory_handle(
2432 conn->session, &session_output);
2433 if (ret) {
2434 reply_code = LTTNG_ERR_CREATE_DIR_FAIL;
2435 goto end;
2436 }
2437 chunk_status = lttng_trace_chunk_set_as_owner(chunk, &session_output);
2438 lttng_directory_handle_fini(&session_output);
2439 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
2440 reply_code = LTTNG_ERR_UNK;
2441 ret = -1;
2442 goto end;
2443 }
2444
2445 published_chunk = sessiond_trace_chunk_registry_publish_chunk(
2446 sessiond_trace_chunk_registry,
2447 conn->session->sessiond_uuid,
2448 conn->session->id,
2449 chunk);
2450 if (!published_chunk) {
2451 char uuid_str[UUID_STR_LEN];
2452
2453 lttng_uuid_to_str(conn->session->sessiond_uuid, uuid_str);
2454 ERR("Failed to publish chunk: sessiond_uuid = %s, session_id = %" PRIu64 ", chunk_id = %" PRIu64,
2455 uuid_str,
2456 conn->session->id,
2457 msg->chunk_id);
2458 ret = -1;
2459 reply_code = LTTNG_ERR_NOMEM;
2460 goto end;
2461 }
2462
2463 pthread_mutex_lock(&conn->session->lock);
2464 if (conn->session->pending_closure_trace_chunk) {
2465 /*
2466 * Invalid; this means a second create_trace_chunk command was
2467 * received before a close_trace_chunk.
2468 */
2469 ERR("Invalid trace chunk close command received; a trace chunk is already waiting for a trace chunk close command");
2470 reply_code = LTTNG_ERR_INVALID_PROTOCOL;
2471 ret = -1;
2472 goto end_unlock_session;
2473 }
2474 conn->session->pending_closure_trace_chunk =
2475 conn->session->current_trace_chunk;
2476 conn->session->current_trace_chunk = published_chunk;
2477 published_chunk = NULL;
2478 end_unlock_session:
2479 pthread_mutex_unlock(&conn->session->lock);
2480 end:
2481 reply.ret_code = htobe32((uint32_t) reply_code);
2482 send_ret = conn->sock->ops->sendmsg(conn->sock,
2483 &reply,
2484 sizeof(struct lttcomm_relayd_generic_reply),
2485 0);
2486 if (send_ret < (ssize_t) sizeof(reply)) {
2487 ERR("Failed to send \"create trace chunk\" command reply (ret = %zd)",
2488 send_ret);
2489 ret = -1;
2490 }
2491 end_no_reply:
2492 lttng_trace_chunk_put(chunk);
2493 lttng_trace_chunk_put(published_chunk);
2494 return ret;
2495 }
2496
2497 /*
2498 * relay_close_trace_chunk: close a trace chunk
2499 */
2500 static int relay_close_trace_chunk(const struct lttcomm_relayd_hdr *recv_hdr,
2501 struct relay_connection *conn,
2502 const struct lttng_buffer_view *payload)
2503 {
2504 int ret = 0, buf_ret;
2505 ssize_t send_ret;
2506 struct relay_session *session = conn->session;
2507 struct lttcomm_relayd_close_trace_chunk *msg;
2508 struct lttcomm_relayd_close_trace_chunk_reply reply = {};
2509 struct lttng_buffer_view header_view;
2510 struct lttng_trace_chunk *chunk = NULL;
2511 enum lttng_error_code reply_code = LTTNG_OK;
2512 enum lttng_trace_chunk_status chunk_status;
2513 uint64_t chunk_id;
2514 LTTNG_OPTIONAL(enum lttng_trace_chunk_command_type) close_command = {};
2515 time_t close_timestamp;
2516 char closed_trace_chunk_path[LTTNG_PATH_MAX];
2517 size_t path_length = 0;
2518 const char *chunk_name = NULL;
2519 struct lttng_dynamic_buffer reply_payload;
2520
2521 lttng_dynamic_buffer_init(&reply_payload);
2522
2523 if (!session || !conn->version_check_done) {
2524 ERR("Trying to close a trace chunk before version check");
2525 ret = -1;
2526 goto end_no_reply;
2527 }
2528
2529 if (session->major == 2 && session->minor < 11) {
2530 ERR("Chunk close command is unsupported before 2.11");
2531 ret = -1;
2532 goto end_no_reply;
2533 }
2534
2535 header_view = lttng_buffer_view_from_view(payload, 0, sizeof(*msg));
2536 if (!header_view.data) {
2537 ERR("Failed to receive payload of chunk close command");
2538 ret = -1;
2539 goto end_no_reply;
2540 }
2541
2542 /* Convert to host endianness. */
2543 msg = (typeof(msg)) header_view.data;
2544 chunk_id = be64toh(msg->chunk_id);
2545 close_timestamp = (time_t) be64toh(msg->close_timestamp);
2546 close_command = (typeof(close_command)){
2547 .value = be32toh(msg->close_command.value),
2548 .is_set = msg->close_command.is_set,
2549 };
2550
2551 chunk = sessiond_trace_chunk_registry_get_chunk(
2552 sessiond_trace_chunk_registry,
2553 conn->session->sessiond_uuid,
2554 conn->session->id,
2555 chunk_id);
2556 if (!chunk) {
2557 char uuid_str[UUID_STR_LEN];
2558
2559 lttng_uuid_to_str(conn->session->sessiond_uuid, uuid_str);
2560 ERR("Failed to find chunk to close: sessiond_uuid = %s, session_id = %" PRIu64 ", chunk_id = %" PRIu64,
2561 uuid_str,
2562 conn->session->id,
2563 msg->chunk_id);
2564 ret = -1;
2565 reply_code = LTTNG_ERR_NOMEM;
2566 goto end;
2567 }
2568
2569 pthread_mutex_lock(&session->lock);
2570 if (session->pending_closure_trace_chunk &&
2571 session->pending_closure_trace_chunk != chunk) {
2572 ERR("Trace chunk close command for session \"%s\" does not target the trace chunk pending closure",
2573 session->session_name);
2574 reply_code = LTTNG_ERR_INVALID_PROTOCOL;
2575 ret = -1;
2576 goto end_unlock_session;
2577 }
2578
2579 chunk_status = lttng_trace_chunk_set_close_timestamp(
2580 chunk, close_timestamp);
2581 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
2582 ERR("Failed to set trace chunk close timestamp");
2583 ret = -1;
2584 reply_code = LTTNG_ERR_UNK;
2585 goto end_unlock_session;
2586 }
2587
2588 if (close_command.is_set) {
2589 chunk_status = lttng_trace_chunk_set_close_command(
2590 chunk, close_command.value);
2591 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
2592 ret = -1;
2593 reply_code = LTTNG_ERR_INVALID;
2594 goto end_unlock_session;
2595 }
2596 }
2597 chunk_status = lttng_trace_chunk_get_name(chunk, &chunk_name, NULL);
2598 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
2599 ERR("Failed to get chunk name");
2600 ret = -1;
2601 reply_code = LTTNG_ERR_UNK;
2602 goto end_unlock_session;
2603 }
2604 if (!session->has_rotated && !session->snapshot) {
2605 ret = lttng_strncpy(closed_trace_chunk_path,
2606 session->output_path,
2607 sizeof(closed_trace_chunk_path));
2608 if (ret) {
2609 ERR("Failed to send trace chunk path: path length of %zu bytes exceeds the maximal allowed length of %zu bytes",
2610 strlen(session->output_path),
2611 sizeof(closed_trace_chunk_path));
2612 reply_code = LTTNG_ERR_NOMEM;
2613 ret = -1;
2614 goto end_unlock_session;
2615 }
2616 } else {
2617 if (session->snapshot) {
2618 ret = snprintf(closed_trace_chunk_path,
2619 sizeof(closed_trace_chunk_path),
2620 "%s/%s", session->output_path,
2621 chunk_name);
2622 } else {
2623 ret = snprintf(closed_trace_chunk_path,
2624 sizeof(closed_trace_chunk_path),
2625 "%s/" DEFAULT_ARCHIVED_TRACE_CHUNKS_DIRECTORY
2626 "/%s",
2627 session->output_path, chunk_name);
2628 }
2629 if (ret < 0 || ret == sizeof(closed_trace_chunk_path)) {
2630 ERR("Failed to format closed trace chunk resulting path");
2631 reply_code = ret < 0 ? LTTNG_ERR_UNK : LTTNG_ERR_NOMEM;
2632 ret = -1;
2633 goto end_unlock_session;
2634 }
2635 }
2636 DBG("Reply chunk path on close: %s", closed_trace_chunk_path);
2637 path_length = strlen(closed_trace_chunk_path) + 1;
2638 if (path_length > UINT32_MAX) {
2639 ERR("Closed trace chunk path exceeds the maximal length allowed by the protocol");
2640 ret = -1;
2641 reply_code = LTTNG_ERR_INVALID_PROTOCOL;
2642 goto end_unlock_session;
2643 }
2644
2645 if (session->current_trace_chunk == chunk) {
2646 /*
2647 * After a trace chunk close command, no new streams
2648 * referencing the chunk may be created. Hence, on the
2649 * event that no new trace chunk have been created for
2650 * the session, the reference to the current trace chunk
2651 * is released in order to allow it to be reclaimed when
2652 * the last stream releases its reference to it.
2653 */
2654 lttng_trace_chunk_put(session->current_trace_chunk);
2655 session->current_trace_chunk = NULL;
2656 }
2657 lttng_trace_chunk_put(session->pending_closure_trace_chunk);
2658 session->pending_closure_trace_chunk = NULL;
2659 end_unlock_session:
2660 pthread_mutex_unlock(&session->lock);
2661
2662 end:
2663 reply.generic.ret_code = htobe32((uint32_t) reply_code);
2664 reply.path_length = htobe32((uint32_t) path_length);
2665 buf_ret = lttng_dynamic_buffer_append(
2666 &reply_payload, &reply, sizeof(reply));
2667 if (buf_ret) {
2668 ERR("Failed to append \"close trace chunk\" command reply header to payload buffer");
2669 goto end_no_reply;
2670 }
2671
2672 if (reply_code == LTTNG_OK) {
2673 buf_ret = lttng_dynamic_buffer_append(&reply_payload,
2674 closed_trace_chunk_path, path_length);
2675 if (buf_ret) {
2676 ERR("Failed to append \"close trace chunk\" command reply path to payload buffer");
2677 goto end_no_reply;
2678 }
2679 }
2680
2681 send_ret = conn->sock->ops->sendmsg(conn->sock,
2682 reply_payload.data,
2683 reply_payload.size,
2684 0);
2685 if (send_ret < reply_payload.size) {
2686 ERR("Failed to send \"close trace chunk\" command reply of %zu bytes (ret = %zd)",
2687 reply_payload.size, send_ret);
2688 ret = -1;
2689 goto end_no_reply;
2690 }
2691 end_no_reply:
2692 lttng_trace_chunk_put(chunk);
2693 lttng_dynamic_buffer_reset(&reply_payload);
2694 return ret;
2695 }
2696
2697 /*
2698 * relay_trace_chunk_exists: check if a trace chunk exists
2699 */
2700 static int relay_trace_chunk_exists(const struct lttcomm_relayd_hdr *recv_hdr,
2701 struct relay_connection *conn,
2702 const struct lttng_buffer_view *payload)
2703 {
2704 int ret = 0;
2705 ssize_t send_ret;
2706 struct relay_session *session = conn->session;
2707 struct lttcomm_relayd_trace_chunk_exists *msg;
2708 struct lttcomm_relayd_trace_chunk_exists_reply reply = {};
2709 struct lttng_buffer_view header_view;
2710 uint64_t chunk_id;
2711 bool chunk_exists;
2712
2713 if (!session || !conn->version_check_done) {
2714 ERR("Trying to close a trace chunk before version check");
2715 ret = -1;
2716 goto end_no_reply;
2717 }
2718
2719 if (session->major == 2 && session->minor < 11) {
2720 ERR("Chunk close command is unsupported before 2.11");
2721 ret = -1;
2722 goto end_no_reply;
2723 }
2724
2725 header_view = lttng_buffer_view_from_view(payload, 0, sizeof(*msg));
2726 if (!header_view.data) {
2727 ERR("Failed to receive payload of chunk close command");
2728 ret = -1;
2729 goto end_no_reply;
2730 }
2731
2732 /* Convert to host endianness. */
2733 msg = (typeof(msg)) header_view.data;
2734 chunk_id = be64toh(msg->chunk_id);
2735
2736 ret = sessiond_trace_chunk_registry_chunk_exists(
2737 sessiond_trace_chunk_registry,
2738 conn->session->sessiond_uuid,
2739 conn->session->id,
2740 chunk_id, &chunk_exists);
2741 /*
2742 * If ret is not 0, send the reply and report the error to the caller.
2743 * It is a protocol (or internal) error and the session/connection
2744 * should be torn down.
2745 */
2746 reply = (typeof(reply)){
2747 .generic.ret_code = htobe32((uint32_t)
2748 (ret == 0 ? LTTNG_OK : LTTNG_ERR_INVALID_PROTOCOL)),
2749 .trace_chunk_exists = ret == 0 ? chunk_exists : 0,
2750 };
2751 send_ret = conn->sock->ops->sendmsg(
2752 conn->sock, &reply, sizeof(reply), 0);
2753 if (send_ret < (ssize_t) sizeof(reply)) {
2754 ERR("Failed to send \"create trace chunk\" command reply (ret = %zd)",
2755 send_ret);
2756 ret = -1;
2757 }
2758 end_no_reply:
2759 return ret;
2760 }
2761
2762 #define DBG_CMD(cmd_name, conn) \
2763 DBG3("Processing \"%s\" command for socket %i", cmd_name, conn->sock->fd);
2764
2765 static int relay_process_control_command(struct relay_connection *conn,
2766 const struct lttcomm_relayd_hdr *header,
2767 const struct lttng_buffer_view *payload)
2768 {
2769 int ret = 0;
2770
2771 switch (header->cmd) {
2772 case RELAYD_CREATE_SESSION:
2773 DBG_CMD("RELAYD_CREATE_SESSION", conn);
2774 ret = relay_create_session(header, conn, payload);
2775 break;
2776 case RELAYD_ADD_STREAM:
2777 DBG_CMD("RELAYD_ADD_STREAM", conn);
2778 ret = relay_add_stream(header, conn, payload);
2779 break;
2780 case RELAYD_START_DATA:
2781 DBG_CMD("RELAYD_START_DATA", conn);
2782 ret = relay_start(header, conn, payload);
2783 break;
2784 case RELAYD_SEND_METADATA:
2785 DBG_CMD("RELAYD_SEND_METADATA", conn);
2786 ret = relay_recv_metadata(header, conn, payload);
2787 break;
2788 case RELAYD_VERSION:
2789 DBG_CMD("RELAYD_VERSION", conn);
2790 ret = relay_send_version(header, conn, payload);
2791 break;
2792 case RELAYD_CLOSE_STREAM:
2793 DBG_CMD("RELAYD_CLOSE_STREAM", conn);
2794 ret = relay_close_stream(header, conn, payload);
2795 break;
2796 case RELAYD_DATA_PENDING:
2797 DBG_CMD("RELAYD_DATA_PENDING", conn);
2798 ret = relay_data_pending(header, conn, payload);
2799 break;
2800 case RELAYD_QUIESCENT_CONTROL:
2801 DBG_CMD("RELAYD_QUIESCENT_CONTROL", conn);
2802 ret = relay_quiescent_control(header, conn, payload);
2803 break;
2804 case RELAYD_BEGIN_DATA_PENDING:
2805 DBG_CMD("RELAYD_BEGIN_DATA_PENDING", conn);
2806 ret = relay_begin_data_pending(header, conn, payload);
2807 break;
2808 case RELAYD_END_DATA_PENDING:
2809 DBG_CMD("RELAYD_END_DATA_PENDING", conn);
2810 ret = relay_end_data_pending(header, conn, payload);
2811 break;
2812 case RELAYD_SEND_INDEX:
2813 DBG_CMD("RELAYD_SEND_INDEX", conn);
2814 ret = relay_recv_index(header, conn, payload);
2815 break;
2816 case RELAYD_STREAMS_SENT:
2817 DBG_CMD("RELAYD_STREAMS_SENT", conn);
2818 ret = relay_streams_sent(header, conn, payload);
2819 break;
2820 case RELAYD_RESET_METADATA:
2821 DBG_CMD("RELAYD_RESET_METADATA", conn);
2822 ret = relay_reset_metadata(header, conn, payload);
2823 break;
2824 case RELAYD_ROTATE_STREAMS:
2825 DBG_CMD("RELAYD_ROTATE_STREAMS", conn);
2826 ret = relay_rotate_session_streams(header, conn, payload);
2827 break;
2828 case RELAYD_CREATE_TRACE_CHUNK:
2829 DBG_CMD("RELAYD_CREATE_TRACE_CHUNK", conn);
2830 ret = relay_create_trace_chunk(header, conn, payload);
2831 break;
2832 case RELAYD_CLOSE_TRACE_CHUNK:
2833 DBG_CMD("RELAYD_CLOSE_TRACE_CHUNK", conn);
2834 ret = relay_close_trace_chunk(header, conn, payload);
2835 break;
2836 case RELAYD_TRACE_CHUNK_EXISTS:
2837 DBG_CMD("RELAYD_TRACE_CHUNK_EXISTS", conn);
2838 ret = relay_trace_chunk_exists(header, conn, payload);
2839 break;
2840 case RELAYD_UPDATE_SYNC_INFO:
2841 default:
2842 ERR("Received unknown command (%u)", header->cmd);
2843 relay_unknown_command(conn);
2844 ret = -1;
2845 goto end;
2846 }
2847
2848 end:
2849 return ret;
2850 }
2851
2852 static enum relay_connection_status relay_process_control_receive_payload(
2853 struct relay_connection *conn)
2854 {
2855 int ret = 0;
2856 enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK;
2857 struct lttng_dynamic_buffer *reception_buffer =
2858 &conn->protocol.ctrl.reception_buffer;
2859 struct ctrl_connection_state_receive_payload *state =
2860 &conn->protocol.ctrl.state.receive_payload;
2861 struct lttng_buffer_view payload_view;
2862
2863 if (state->left_to_receive == 0) {
2864 /* Short-circuit for payload-less commands. */
2865 goto reception_complete;
2866 }
2867
2868 ret = conn->sock->ops->recvmsg(conn->sock,
2869 reception_buffer->data + state->received,
2870 state->left_to_receive, MSG_DONTWAIT);
2871 if (ret < 0) {
2872 if (errno != EAGAIN && errno != EWOULDBLOCK) {
2873 PERROR("Unable to receive command payload on sock %d",
2874 conn->sock->fd);
2875 status = RELAY_CONNECTION_STATUS_ERROR;
2876 }
2877 goto end;
2878 } else if (ret == 0) {
2879 DBG("Socket %d performed an orderly shutdown (received EOF)", conn->sock->fd);
2880 status = RELAY_CONNECTION_STATUS_CLOSED;
2881 goto end;
2882 }
2883
2884 assert(ret > 0);
2885 assert(ret <= state->left_to_receive);
2886
2887 state->left_to_receive -= ret;
2888 state->received += ret;
2889
2890 if (state->left_to_receive > 0) {
2891 /*
2892 * Can't transition to the protocol's next state, wait to
2893 * receive the rest of the header.
2894 */
2895 DBG3("Partial reception of control connection protocol payload (received %" PRIu64 " bytes, %" PRIu64 " bytes left to receive, fd = %i)",
2896 state->received, state->left_to_receive,
2897 conn->sock->fd);
2898 goto end;
2899 }
2900
2901 reception_complete:
2902 DBG("Done receiving control command payload: fd = %i, payload size = %" PRIu64 " bytes",
2903 conn->sock->fd, state->received);
2904 /*
2905 * The payload required to process the command has been received.
2906 * A view to the reception buffer is forwarded to the various
2907 * commands and the state of the control is reset on success.
2908 *
2909 * Commands are responsible for sending their reply to the peer.
2910 */
2911 payload_view = lttng_buffer_view_from_dynamic_buffer(reception_buffer,
2912 0, -1);
2913 ret = relay_process_control_command(conn,
2914 &state->header, &payload_view);
2915 if (ret < 0) {
2916 status = RELAY_CONNECTION_STATUS_ERROR;
2917 goto end;
2918 }
2919
2920 ret = connection_reset_protocol_state(conn);
2921 if (ret) {
2922 status = RELAY_CONNECTION_STATUS_ERROR;
2923 }
2924 end:
2925 return status;
2926 }
2927
2928 static enum relay_connection_status relay_process_control_receive_header(
2929 struct relay_connection *conn)
2930 {
2931 int ret = 0;
2932 enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK;
2933 struct lttcomm_relayd_hdr header;
2934 struct lttng_dynamic_buffer *reception_buffer =
2935 &conn->protocol.ctrl.reception_buffer;
2936 struct ctrl_connection_state_receive_header *state =
2937 &conn->protocol.ctrl.state.receive_header;
2938
2939 assert(state->left_to_receive != 0);
2940
2941 ret = conn->sock->ops->recvmsg(conn->sock,
2942 reception_buffer->data + state->received,
2943 state->left_to_receive, MSG_DONTWAIT);
2944 if (ret < 0) {
2945 if (errno != EAGAIN && errno != EWOULDBLOCK) {
2946 PERROR("Unable to receive control command header on sock %d",
2947 conn->sock->fd);
2948 status = RELAY_CONNECTION_STATUS_ERROR;
2949 }
2950 goto end;
2951 } else if (ret == 0) {
2952 DBG("Socket %d performed an orderly shutdown (received EOF)", conn->sock->fd);
2953 status = RELAY_CONNECTION_STATUS_CLOSED;
2954 goto end;
2955 }
2956
2957 assert(ret > 0);
2958 assert(ret <= state->left_to_receive);
2959
2960 state->left_to_receive -= ret;
2961 state->received += ret;
2962
2963 if (state->left_to_receive > 0) {
2964 /*
2965 * Can't transition to the protocol's next state, wait to
2966 * receive the rest of the header.
2967 */
2968 DBG3("Partial reception of control connection protocol header (received %" PRIu64 " bytes, %" PRIu64 " bytes left to receive, fd = %i)",
2969 state->received, state->left_to_receive,
2970 conn->sock->fd);
2971 goto end;
2972 }
2973
2974 /* Transition to next state: receiving the command's payload. */
2975 conn->protocol.ctrl.state_id =
2976 CTRL_CONNECTION_STATE_RECEIVE_PAYLOAD;
2977 memcpy(&header, reception_buffer->data, sizeof(header));
2978 header.circuit_id = be64toh(header.circuit_id);
2979 header.data_size = be64toh(header.data_size);
2980 header.cmd = be32toh(header.cmd);
2981 header.cmd_version = be32toh(header.cmd_version);
2982 memcpy(&conn->protocol.ctrl.state.receive_payload.header,
2983 &header, sizeof(header));
2984
2985 DBG("Done receiving control command header: fd = %i, cmd = %" PRIu32 ", cmd_version = %" PRIu32 ", payload size = %" PRIu64 " bytes",
2986 conn->sock->fd, header.cmd, header.cmd_version,
2987 header.data_size);
2988
2989 if (header.data_size > DEFAULT_NETWORK_RELAYD_CTRL_MAX_PAYLOAD_SIZE) {
2990 ERR("Command header indicates a payload (%" PRIu64 " bytes) that exceeds the maximal payload size allowed on a control connection.",
2991 header.data_size);
2992 status = RELAY_CONNECTION_STATUS_ERROR;
2993 goto end;
2994 }
2995
2996 conn->protocol.ctrl.state.receive_payload.left_to_receive =
2997 header.data_size;
2998 conn->protocol.ctrl.state.receive_payload.received = 0;
2999 ret = lttng_dynamic_buffer_set_size(reception_buffer,
3000 header.data_size);
3001 if (ret) {
3002 status = RELAY_CONNECTION_STATUS_ERROR;
3003 goto end;
3004 }
3005
3006 if (header.data_size == 0) {
3007 /*
3008 * Manually invoke the next state as the poll loop
3009 * will not wake-up to allow us to proceed further.
3010 */
3011 status = relay_process_control_receive_payload(conn);
3012 }
3013 end:
3014 return status;
3015 }
3016
3017 /*
3018 * Process the commands received on the control socket
3019 */
3020 static enum relay_connection_status relay_process_control(
3021 struct relay_connection *conn)
3022 {
3023 enum relay_connection_status status;
3024
3025 switch (conn->protocol.ctrl.state_id) {
3026 case CTRL_CONNECTION_STATE_RECEIVE_HEADER:
3027 status = relay_process_control_receive_header(conn);
3028 break;
3029 case CTRL_CONNECTION_STATE_RECEIVE_PAYLOAD:
3030 status = relay_process_control_receive_payload(conn);
3031 break;
3032 default:
3033 ERR("Unknown control connection protocol state encountered.");
3034 abort();
3035 }
3036
3037 return status;
3038 }
3039
3040 static enum relay_connection_status relay_process_data_receive_header(
3041 struct relay_connection *conn)
3042 {
3043 int ret;
3044 enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK;
3045 struct data_connection_state_receive_header *state =
3046 &conn->protocol.data.state.receive_header;
3047 struct lttcomm_relayd_data_hdr header;
3048 struct relay_stream *stream;
3049
3050 assert(state->left_to_receive != 0);
3051
3052 ret = conn->sock->ops->recvmsg(conn->sock,
3053 state->header_reception_buffer + state->received,
3054 state->left_to_receive, MSG_DONTWAIT);
3055 if (ret < 0) {
3056 if (errno != EAGAIN && errno != EWOULDBLOCK) {
3057 PERROR("Unable to receive data header on sock %d", conn->sock->fd);
3058 status = RELAY_CONNECTION_STATUS_ERROR;
3059 }
3060 goto end;
3061 } else if (ret == 0) {
3062 /* Orderly shutdown. Not necessary to print an error. */
3063 DBG("Socket %d performed an orderly shutdown (received EOF)", conn->sock->fd);
3064 status = RELAY_CONNECTION_STATUS_CLOSED;
3065 goto end;
3066 }
3067
3068 assert(ret > 0);
3069 assert(ret <= state->left_to_receive);
3070
3071 state->left_to_receive -= ret;
3072 state->received += ret;
3073
3074 if (state->left_to_receive > 0) {
3075 /*
3076 * Can't transition to the protocol's next state, wait to
3077 * receive the rest of the header.
3078 */
3079 DBG3("Partial reception of data connection header (received %" PRIu64 " bytes, %" PRIu64 " bytes left to receive, fd = %i)",
3080 state->received, state->left_to_receive,
3081 conn->sock->fd);
3082 goto end;
3083 }
3084
3085 /* Transition to next state: receiving the payload. */
3086 conn->protocol.data.state_id = DATA_CONNECTION_STATE_RECEIVE_PAYLOAD;
3087
3088 memcpy(&header, state->header_reception_buffer, sizeof(header));
3089 header.circuit_id = be64toh(header.circuit_id);
3090 header.stream_id = be64toh(header.stream_id);
3091 header.data_size = be32toh(header.data_size);
3092 header.net_seq_num = be64toh(header.net_seq_num);
3093 header.padding_size = be32toh(header.padding_size);
3094 memcpy(&conn->protocol.data.state.receive_payload.header, &header, sizeof(header));
3095
3096 conn->protocol.data.state.receive_payload.left_to_receive =
3097 header.data_size;
3098 conn->protocol.data.state.receive_payload.received = 0;
3099 conn->protocol.data.state.receive_payload.rotate_index = false;
3100
3101 DBG("Received data connection header on fd %i: circuit_id = %" PRIu64 ", stream_id = %" PRIu64 ", data_size = %" PRIu32 ", net_seq_num = %" PRIu64 ", padding_size = %" PRIu32,
3102 conn->sock->fd, header.circuit_id,
3103 header.stream_id, header.data_size,
3104 header.net_seq_num, header.padding_size);
3105
3106 stream = stream_get_by_id(header.stream_id);
3107 if (!stream) {
3108 DBG("relay_process_data_receive_payload: Cannot find stream %" PRIu64,
3109 header.stream_id);
3110 /* Protocol error. */
3111 status = RELAY_CONNECTION_STATUS_ERROR;
3112 goto end;
3113 }
3114
3115 pthread_mutex_lock(&stream->lock);
3116 /* Prepare stream for the reception of a new packet. */
3117 ret = stream_init_packet(stream, header.data_size,
3118 &conn->protocol.data.state.receive_payload.rotate_index);
3119 pthread_mutex_unlock(&stream->lock);
3120 if (ret) {
3121 ERR("Failed to rotate stream output file");
3122 status = RELAY_CONNECTION_STATUS_ERROR;
3123 goto end_stream_unlock;
3124 }
3125
3126 end_stream_unlock:
3127 stream_put(stream);
3128 end:
3129 return status;
3130 }
3131
3132 static enum relay_connection_status relay_process_data_receive_payload(
3133 struct relay_connection *conn)
3134 {
3135 int ret;
3136 enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK;
3137 struct relay_stream *stream;
3138 struct data_connection_state_receive_payload *state =
3139 &conn->protocol.data.state.receive_payload;
3140 const size_t chunk_size = RECV_DATA_BUFFER_SIZE;
3141 char data_buffer[chunk_size];
3142 bool partial_recv = false;
3143 bool new_stream = false, close_requested = false, index_flushed = false;
3144 uint64_t left_to_receive = state->left_to_receive;
3145 struct relay_session *session;
3146
3147 DBG3("Receiving data for stream id %" PRIu64 " seqnum %" PRIu64 ", %" PRIu64" bytes received, %" PRIu64 " bytes left to receive",
3148 state->header.stream_id, state->header.net_seq_num,
3149 state->received, left_to_receive);
3150
3151 stream = stream_get_by_id(state->header.stream_id);
3152 if (!stream) {
3153 /* Protocol error. */
3154 ERR("relay_process_data_receive_payload: cannot find stream %" PRIu64,
3155 state->header.stream_id);
3156 status = RELAY_CONNECTION_STATUS_ERROR;
3157 goto end;
3158 }
3159
3160 pthread_mutex_lock(&stream->lock);
3161 session = stream->trace->session;
3162 if (!conn->session) {
3163 ret = connection_set_session(conn, session);
3164 if (ret) {
3165 status = RELAY_CONNECTION_STATUS_ERROR;
3166 goto end_stream_unlock;
3167 }
3168 }
3169
3170 /*
3171 * The size of the "chunk" received on any iteration is bounded by:
3172 * - the data left to receive,
3173 * - the data immediately available on the socket,
3174 * - the on-stack data buffer
3175 */
3176 while (left_to_receive > 0 && !partial_recv) {
3177 size_t recv_size = min(left_to_receive, chunk_size);
3178 struct lttng_buffer_view packet_chunk;
3179
3180 ret = conn->sock->ops->recvmsg(conn->sock, data_buffer,
3181 recv_size, MSG_DONTWAIT);
3182 if (ret < 0) {
3183 if (errno != EAGAIN && errno != EWOULDBLOCK) {
3184 PERROR("Socket %d error", conn->sock->fd);
3185 status = RELAY_CONNECTION_STATUS_ERROR;
3186 }
3187 goto end_stream_unlock;
3188 } else if (ret == 0) {
3189 /* No more data ready to be consumed on socket. */
3190 DBG3("No more data ready for consumption on data socket of stream id %" PRIu64,
3191 state->header.stream_id);
3192 status = RELAY_CONNECTION_STATUS_CLOSED;
3193 break;
3194 } else if (ret < (int) recv_size) {
3195 /*
3196 * All the data available on the socket has been
3197 * consumed.
3198 */
3199 partial_recv = true;
3200 recv_size = ret;
3201 }
3202
3203 packet_chunk = lttng_buffer_view_init(data_buffer,
3204 0, recv_size);
3205 assert(packet_chunk.data);
3206
3207 ret = stream_write(stream, &packet_chunk, 0);
3208 if (ret) {
3209 ERR("Relay error writing data to file");
3210 status = RELAY_CONNECTION_STATUS_ERROR;
3211 goto end_stream_unlock;
3212 }
3213
3214 left_to_receive -= recv_size;
3215 state->received += recv_size;
3216 state->left_to_receive = left_to_receive;
3217 }
3218
3219 if (state->left_to_receive > 0) {
3220 /*
3221 * Did not receive all the data expected, wait for more data to
3222 * become available on the socket.
3223 */
3224 DBG3("Partial receive on data connection of stream id %" PRIu64 ", %" PRIu64 " bytes received, %" PRIu64 " bytes left to receive",
3225 state->header.stream_id, state->received,
3226 state->left_to_receive);
3227 goto end_stream_unlock;
3228 }
3229
3230 ret = stream_write(stream, NULL, state->header.padding_size);
3231 if (ret) {
3232 status = RELAY_CONNECTION_STATUS_ERROR;
3233 goto end_stream_unlock;
3234 }
3235
3236 if (session_streams_have_index(session)) {
3237 ret = stream_update_index(stream, state->header.net_seq_num,
3238 state->rotate_index, &index_flushed,
3239 state->header.data_size + state->header.padding_size);
3240 if (ret < 0) {
3241 ERR("Failed to update index: stream %" PRIu64 " net_seq_num %" PRIu64 " ret %d",
3242 stream->stream_handle,
3243 state->header.net_seq_num, ret);
3244 status = RELAY_CONNECTION_STATUS_ERROR;
3245 goto end_stream_unlock;
3246 }
3247 }
3248
3249 if (stream->prev_data_seq == -1ULL) {
3250 new_stream = true;
3251 }
3252
3253 ret = stream_complete_packet(stream, state->header.data_size +
3254 state->header.padding_size, state->header.net_seq_num,
3255 index_flushed);
3256 if (ret) {
3257 status = RELAY_CONNECTION_STATUS_ERROR;
3258 goto end_stream_unlock;
3259 }
3260
3261 /*
3262 * Resetting the protocol state (to RECEIVE_HEADER) will trash the
3263 * contents of *state which are aliased (union) to the same location as
3264 * the new state. Don't use it beyond this point.
3265 */
3266 connection_reset_protocol_state(conn);
3267 state = NULL;
3268
3269 end_stream_unlock:
3270 close_requested = stream->close_requested;
3271 pthread_mutex_unlock(&stream->lock);
3272 if (close_requested && left_to_receive == 0) {
3273 try_stream_close(stream);
3274 }
3275
3276 if (new_stream) {
3277 pthread_mutex_lock(&session->lock);
3278 uatomic_set(&session->new_streams, 1);
3279 pthread_mutex_unlock(&session->lock);
3280 }
3281
3282 stream_put(stream);
3283 end:
3284 return status;
3285 }
3286
3287 /*
3288 * relay_process_data: Process the data received on the data socket
3289 */
3290 static enum relay_connection_status relay_process_data(
3291 struct relay_connection *conn)
3292 {
3293 enum relay_connection_status status;
3294
3295 switch (conn->protocol.data.state_id) {
3296 case DATA_CONNECTION_STATE_RECEIVE_HEADER:
3297 status = relay_process_data_receive_header(conn);
3298 break;
3299 case DATA_CONNECTION_STATE_RECEIVE_PAYLOAD:
3300 status = relay_process_data_receive_payload(conn);
3301 break;
3302 default:
3303 ERR("Unexpected data connection communication state.");
3304 abort();
3305 }
3306
3307 return status;
3308 }
3309
3310 static void cleanup_connection_pollfd(struct lttng_poll_event *events, int pollfd)
3311 {
3312 int ret;
3313
3314 (void) lttng_poll_del(events, pollfd);
3315
3316 ret = close(pollfd);
3317 if (ret < 0) {
3318 ERR("Closing pollfd %d", pollfd);
3319 }
3320 }
3321
3322 static void relay_thread_close_connection(struct lttng_poll_event *events,
3323 int pollfd, struct relay_connection *conn)
3324 {
3325 const char *type_str;
3326
3327 switch (conn->type) {
3328 case RELAY_DATA:
3329 type_str = "Data";
3330 break;
3331 case RELAY_CONTROL:
3332 type_str = "Control";
3333 break;
3334 case RELAY_VIEWER_COMMAND:
3335 type_str = "Viewer Command";
3336 break;
3337 case RELAY_VIEWER_NOTIFICATION:
3338 type_str = "Viewer Notification";
3339 break;
3340 default:
3341 type_str = "Unknown";
3342 }
3343 cleanup_connection_pollfd(events, pollfd);
3344 connection_put(conn);
3345 DBG("%s connection closed with %d", type_str, pollfd);
3346 }
3347
3348 /*
3349 * This thread does the actual work
3350 */
3351 static void *relay_thread_worker(void *data)
3352 {
3353 int ret, err = -1, last_seen_data_fd = -1;
3354 uint32_t nb_fd;
3355 struct lttng_poll_event events;
3356 struct lttng_ht *relay_connections_ht;
3357 struct lttng_ht_iter iter;
3358 struct relay_connection *destroy_conn = NULL;
3359
3360 DBG("[thread] Relay worker started");
3361
3362 rcu_register_thread();
3363
3364 health_register(health_relayd, HEALTH_RELAYD_TYPE_WORKER);
3365
3366 if (testpoint(relayd_thread_worker)) {
3367 goto error_testpoint;
3368 }
3369
3370 health_code_update();
3371
3372 /* table of connections indexed on socket */
3373 relay_connections_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3374 if (!relay_connections_ht) {
3375 goto relay_connections_ht_error;
3376 }
3377
3378 ret = create_thread_poll_set(&events, 2);
3379 if (ret < 0) {
3380 goto error_poll_create;
3381 }
3382
3383 ret = lttng_poll_add(&events, relay_conn_pipe[0], LPOLLIN | LPOLLRDHUP);
3384 if (ret < 0) {
3385 goto error;
3386 }
3387
3388 restart:
3389 while (1) {
3390 int idx = -1, i, seen_control = 0, last_notdel_data_fd = -1;
3391
3392 health_code_update();
3393
3394 /* Infinite blocking call, waiting for transmission */
3395 DBG3("Relayd worker thread polling...");
3396 health_poll_entry();
3397 ret = lttng_poll_wait(&events, -1);
3398 health_poll_exit();
3399 if (ret < 0) {
3400 /*
3401 * Restart interrupted system call.
3402 */
3403 if (errno == EINTR) {
3404 goto restart;
3405 }
3406 goto error;
3407 }
3408
3409 nb_fd = ret;
3410
3411 /*
3412 * Process control. The control connection is
3413 * prioritized so we don't starve it with high
3414 * throughput tracing data on the data connection.
3415 */
3416 for (i = 0; i < nb_fd; i++) {
3417 /* Fetch once the poll data */
3418 uint32_t revents = LTTNG_POLL_GETEV(&events, i);
3419 int pollfd = LTTNG_POLL_GETFD(&events, i);
3420
3421 health_code_update();
3422
3423 /* Thread quit pipe has been closed. Killing thread. */
3424 ret = check_thread_quit_pipe(pollfd, revents);
3425 if (ret) {
3426 err = 0;
3427 goto exit;
3428 }
3429
3430 /* Inspect the relay conn pipe for new connection */
3431 if (pollfd == relay_conn_pipe[0]) {
3432 if (revents & LPOLLIN) {
3433 struct relay_connection *conn;
3434
3435 ret = lttng_read(relay_conn_pipe[0], &conn, sizeof(conn));
3436 if (ret < 0) {
3437 goto error;
3438 }
3439 ret = lttng_poll_add(&events,
3440 conn->sock->fd,
3441 LPOLLIN | LPOLLRDHUP);
3442 if (ret) {
3443 ERR("Failed to add new connection file descriptor to poll set");
3444 goto error;
3445 }
3446 connection_ht_add(relay_connections_ht, conn);
3447 DBG("Connection socket %d added", conn->sock->fd);
3448 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3449 ERR("Relay connection pipe error");
3450 goto error;
3451 } else {
3452 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
3453 goto error;
3454 }
3455 } else {
3456 struct relay_connection *ctrl_conn;
3457
3458 ctrl_conn = connection_get_by_sock(relay_connections_ht, pollfd);
3459 /* If not found, there is a synchronization issue. */
3460 assert(ctrl_conn);
3461
3462 if (ctrl_conn->type == RELAY_DATA) {
3463 if (revents & LPOLLIN) {
3464 /*
3465 * Flag the last seen data fd not deleted. It will be
3466 * used as the last seen fd if any fd gets deleted in
3467 * this first loop.
3468 */
3469 last_notdel_data_fd = pollfd;
3470 }
3471 goto put_ctrl_connection;
3472 }
3473 assert(ctrl_conn->type == RELAY_CONTROL);
3474
3475 if (revents & LPOLLIN) {
3476 enum relay_connection_status status;
3477
3478 status = relay_process_control(ctrl_conn);
3479 if (status != RELAY_CONNECTION_STATUS_OK) {
3480 /*
3481 * On socket error flag the session as aborted to force
3482 * the cleanup of its stream otherwise it can leak
3483 * during the lifetime of the relayd.
3484 *
3485 * This prevents situations in which streams can be
3486 * left opened because an index was received, the
3487 * control connection is closed, and the data
3488 * connection is closed (uncleanly) before the packet's
3489 * data provided.
3490 *
3491 * Since the control connection encountered an error,
3492 * it is okay to be conservative and close the
3493 * session right now as we can't rely on the protocol
3494 * being respected anymore.
3495 */
3496 if (status == RELAY_CONNECTION_STATUS_ERROR) {
3497 session_abort(ctrl_conn->session);
3498 }
3499
3500 /* Clear the connection on error or close. */
3501 relay_thread_close_connection(&events,
3502 pollfd,
3503 ctrl_conn);
3504 }
3505 seen_control = 1;
3506 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3507 relay_thread_close_connection(&events,
3508 pollfd, ctrl_conn);
3509 if (last_seen_data_fd == pollfd) {
3510 last_seen_data_fd = last_notdel_data_fd;
3511 }
3512 } else {
3513 ERR("Unexpected poll events %u for control sock %d",
3514 revents, pollfd);
3515 connection_put(ctrl_conn);
3516 goto error;
3517 }
3518 put_ctrl_connection:
3519 connection_put(ctrl_conn);
3520 }
3521 }
3522
3523 /*
3524 * The last loop handled a control request, go back to poll to make
3525 * sure we prioritise the control socket.
3526 */
3527 if (seen_control) {
3528 continue;
3529 }
3530
3531 if (last_seen_data_fd >= 0) {
3532 for (i = 0; i < nb_fd; i++) {
3533 int pollfd = LTTNG_POLL_GETFD(&events, i);
3534
3535 health_code_update();
3536
3537 if (last_seen_data_fd == pollfd) {
3538 idx = i;
3539 break;
3540 }
3541 }
3542 }
3543
3544 /* Process data connection. */
3545 for (i = idx + 1; i < nb_fd; i++) {
3546 /* Fetch the poll data. */
3547 uint32_t revents = LTTNG_POLL_GETEV(&events, i);
3548 int pollfd = LTTNG_POLL_GETFD(&events, i);
3549 struct relay_connection *data_conn;
3550
3551 health_code_update();
3552
3553 if (!revents) {
3554 /* No activity for this FD (poll implementation). */
3555 continue;
3556 }
3557
3558 /* Skip the command pipe. It's handled in the first loop. */
3559 if (pollfd == relay_conn_pipe[0]) {
3560 continue;
3561 }
3562
3563 data_conn = connection_get_by_sock(relay_connections_ht, pollfd);
3564 if (!data_conn) {
3565 /* Skip it. Might be removed before. */
3566 continue;
3567 }
3568 if (data_conn->type == RELAY_CONTROL) {
3569 goto put_data_connection;
3570 }
3571 assert(data_conn->type == RELAY_DATA);
3572
3573 if (revents & LPOLLIN) {
3574 enum relay_connection_status status;
3575
3576 status = relay_process_data(data_conn);
3577 /* Connection closed or error. */
3578 if (status != RELAY_CONNECTION_STATUS_OK) {
3579 /*
3580 * On socket error flag the session as aborted to force
3581 * the cleanup of its stream otherwise it can leak
3582 * during the lifetime of the relayd.
3583 *
3584 * This prevents situations in which streams can be
3585 * left opened because an index was received, the
3586 * control connection is closed, and the data
3587 * connection is closed (uncleanly) before the packet's
3588 * data provided.
3589 *
3590 * Since the data connection encountered an error,
3591 * it is okay to be conservative and close the
3592 * session right now as we can't rely on the protocol
3593 * being respected anymore.
3594 */
3595 if (status == RELAY_CONNECTION_STATUS_ERROR) {
3596 session_abort(data_conn->session);
3597 }
3598 relay_thread_close_connection(&events, pollfd,
3599 data_conn);
3600 /*
3601 * Every goto restart call sets the last seen fd where
3602 * here we don't really care since we gracefully
3603 * continue the loop after the connection is deleted.
3604 */
3605 } else {
3606 /* Keep last seen port. */
3607 last_seen_data_fd = pollfd;
3608 connection_put(data_conn);
3609 goto restart;
3610 }
3611 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3612 relay_thread_close_connection(&events, pollfd,
3613 data_conn);
3614 } else {
3615 ERR("Unknown poll events %u for data sock %d",
3616 revents, pollfd);
3617 }
3618 put_data_connection:
3619 connection_put(data_conn);
3620 }
3621 last_seen_data_fd = -1;
3622 }
3623
3624 /* Normal exit, no error */
3625 ret = 0;
3626
3627 exit:
3628 error:
3629 /* Cleanup remaining connection object. */
3630 rcu_read_lock();
3631 cds_lfht_for_each_entry(relay_connections_ht->ht, &iter.iter,
3632 destroy_conn,
3633 sock_n.node) {
3634 health_code_update();
3635
3636 session_abort(destroy_conn->session);
3637
3638 /*
3639 * No need to grab another ref, because we own
3640 * destroy_conn.
3641 */
3642 relay_thread_close_connection(&events, destroy_conn->sock->fd,
3643 destroy_conn);
3644 }
3645 rcu_read_unlock();
3646
3647 lttng_poll_clean(&events);
3648 error_poll_create:
3649 lttng_ht_destroy(relay_connections_ht);
3650 relay_connections_ht_error:
3651 /* Close relay conn pipes */
3652 utils_close_pipe(relay_conn_pipe);
3653 if (err) {
3654 DBG("Thread exited with error");
3655 }
3656 DBG("Worker thread cleanup complete");
3657 error_testpoint:
3658 if (err) {
3659 health_error();
3660 ERR("Health error occurred in %s", __func__);
3661 }
3662 health_unregister(health_relayd);
3663 rcu_unregister_thread();
3664 lttng_relay_stop_threads();
3665 return NULL;
3666 }
3667
3668 /*
3669 * Create the relay command pipe to wake thread_manage_apps.
3670 * Closed in cleanup().
3671 */
3672 static int create_relay_conn_pipe(void)
3673 {
3674 int ret;
3675
3676 ret = utils_create_pipe_cloexec(relay_conn_pipe);
3677
3678 return ret;
3679 }
3680
3681 /*
3682 * main
3683 */
3684 int main(int argc, char **argv)
3685 {
3686 int ret = 0, retval = 0;
3687 void *status;
3688
3689 /* Parse arguments */
3690 progname = argv[0];
3691 if (set_options(argc, argv)) {
3692 retval = -1;
3693 goto exit_options;
3694 }
3695
3696 if (set_signal_handler()) {
3697 retval = -1;
3698 goto exit_options;
3699 }
3700
3701 relayd_config_log();
3702
3703 if (opt_print_version) {
3704 print_version();
3705 retval = 0;
3706 goto exit_options;
3707 }
3708
3709 /* Try to create directory if -o, --output is specified. */
3710 if (opt_output_path) {
3711 if (*opt_output_path != '/') {
3712 ERR("Please specify an absolute path for -o, --output PATH");
3713 retval = -1;
3714 goto exit_options;
3715 }
3716
3717 ret = utils_mkdir_recursive(opt_output_path, S_IRWXU | S_IRWXG,
3718 -1, -1);
3719 if (ret < 0) {
3720 ERR("Unable to create %s", opt_output_path);
3721 retval = -1;
3722 goto exit_options;
3723 }
3724 }
3725
3726 /* Daemonize */
3727 if (opt_daemon || opt_background) {
3728 int i;
3729
3730 ret = lttng_daemonize(&child_ppid, &recv_child_signal,
3731 !opt_background);
3732 if (ret < 0) {
3733 retval = -1;
3734 goto exit_options;
3735 }
3736
3737 /*
3738 * We are in the child. Make sure all other file
3739 * descriptors are closed, in case we are called with
3740 * more opened file descriptors than the standard ones.
3741 */
3742 for (i = 3; i < sysconf(_SC_OPEN_MAX); i++) {
3743 (void) close(i);
3744 }
3745 }
3746
3747 if (opt_working_directory) {
3748 ret = utils_change_working_directory(opt_working_directory);
3749 if (ret) {
3750 /* All errors are already logged. */
3751 goto exit_options;
3752 }
3753 }
3754
3755 sessiond_trace_chunk_registry = sessiond_trace_chunk_registry_create();
3756 if (!sessiond_trace_chunk_registry) {
3757 ERR("Failed to initialize session daemon trace chunk registry");
3758 retval = -1;
3759 goto exit_sessiond_trace_chunk_registry;
3760 }
3761
3762 /* Initialize thread health monitoring */
3763 health_relayd = health_app_create(NR_HEALTH_RELAYD_TYPES);
3764 if (!health_relayd) {
3765 PERROR("health_app_create error");
3766 retval = -1;
3767 goto exit_health_app_create;
3768 }
3769
3770 /* Create thread quit pipe */
3771 if (init_thread_quit_pipe()) {
3772 retval = -1;
3773 goto exit_init_data;
3774 }
3775
3776 /* Setup the thread apps communication pipe. */
3777 if (create_relay_conn_pipe()) {
3778 retval = -1;
3779 goto exit_init_data;
3780 }
3781
3782 /* Init relay command queue. */
3783 cds_wfcq_init(&relay_conn_queue.head, &relay_conn_queue.tail);
3784
3785 /* Initialize communication library */
3786 lttcomm_init();
3787 lttcomm_inet_init();
3788
3789 /* tables of sessions indexed by session ID */
3790 sessions_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3791 if (!sessions_ht) {
3792 retval = -1;
3793 goto exit_init_data;
3794 }
3795
3796 /* tables of streams indexed by stream ID */
3797 relay_streams_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3798 if (!relay_streams_ht) {
3799 retval = -1;
3800 goto exit_init_data;
3801 }
3802
3803 /* tables of streams indexed by stream ID */
3804 viewer_streams_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3805 if (!viewer_streams_ht) {
3806 retval = -1;
3807 goto exit_init_data;
3808 }
3809
3810 ret = utils_create_pipe(health_quit_pipe);
3811 if (ret) {
3812 retval = -1;
3813 goto exit_health_quit_pipe;
3814 }
3815
3816 /* Create thread to manage the client socket */
3817 ret = pthread_create(&health_thread, default_pthread_attr(),
3818 thread_manage_health, (void *) NULL);
3819 if (ret) {
3820 errno = ret;
3821 PERROR("pthread_create health");
3822 retval = -1;
3823 goto exit_health_thread;
3824 }
3825
3826 /* Setup the dispatcher thread */
3827 ret = pthread_create(&dispatcher_thread, default_pthread_attr(),
3828 relay_thread_dispatcher, (void *) NULL);
3829 if (ret) {
3830 errno = ret;
3831 PERROR("pthread_create dispatcher");
3832 retval = -1;
3833 goto exit_dispatcher_thread;
3834 }
3835
3836 /* Setup the worker thread */
3837 ret = pthread_create(&worker_thread, default_pthread_attr(),
3838 relay_thread_worker, NULL);
3839 if (ret) {
3840 errno = ret;
3841 PERROR("pthread_create worker");
3842 retval = -1;
3843 goto exit_worker_thread;
3844 }
3845
3846 /* Setup the listener thread */
3847 ret = pthread_create(&listener_thread, default_pthread_attr(),
3848 relay_thread_listener, (void *) NULL);
3849 if (ret) {
3850 errno = ret;
3851 PERROR("pthread_create listener");
3852 retval = -1;
3853 goto exit_listener_thread;
3854 }
3855
3856 ret = relayd_live_create(live_uri);
3857 if (ret) {
3858 ERR("Starting live viewer threads");
3859 retval = -1;
3860 goto exit_live;
3861 }
3862
3863 /*
3864 * This is where we start awaiting program completion (e.g. through
3865 * signal that asks threads to teardown).
3866 */
3867
3868 ret = relayd_live_join();
3869 if (ret) {
3870 retval = -1;
3871 }
3872 exit_live:
3873
3874 ret = pthread_join(listener_thread, &status);
3875 if (ret) {
3876 errno = ret;
3877 PERROR("pthread_join listener_thread");
3878 retval = -1;
3879 }
3880
3881 exit_listener_thread:
3882 ret = pthread_join(worker_thread, &status);
3883 if (ret) {
3884 errno = ret;
3885 PERROR("pthread_join worker_thread");
3886 retval = -1;
3887 }
3888
3889 exit_worker_thread:
3890 ret = pthread_join(dispatcher_thread, &status);
3891 if (ret) {
3892 errno = ret;
3893 PERROR("pthread_join dispatcher_thread");
3894 retval = -1;
3895 }
3896 exit_dispatcher_thread:
3897
3898 ret = pthread_join(health_thread, &status);
3899 if (ret) {
3900 errno = ret;
3901 PERROR("pthread_join health_thread");
3902 retval = -1;
3903 }
3904 exit_health_thread:
3905
3906 utils_close_pipe(health_quit_pipe);
3907 exit_health_quit_pipe:
3908
3909 exit_init_data:
3910 health_app_destroy(health_relayd);
3911 sessiond_trace_chunk_registry_destroy(sessiond_trace_chunk_registry);
3912 exit_health_app_create:
3913 exit_sessiond_trace_chunk_registry:
3914 exit_options:
3915 /*
3916 * Wait for all pending call_rcu work to complete before tearing
3917 * down data structures. call_rcu worker may be trying to
3918 * perform lookups in those structures.
3919 */
3920 rcu_barrier();
3921 relayd_cleanup();
3922
3923 /* Ensure all prior call_rcu are done. */
3924 rcu_barrier();
3925
3926 if (!retval) {
3927 exit(EXIT_SUCCESS);
3928 } else {
3929 exit(EXIT_FAILURE);
3930 }
3931 }
This page took 0.184957 seconds and 4 git commands to generate.