5b32341cd9ff4cb2647caa3557536326188dcf47
[lttng-tools.git] / src / bin / lttng-relayd / main.c
1 /*
2 * Copyright (C) 2012 - Julien Desfossez <jdesfossez@efficios.com>
3 * David Goulet <dgoulet@efficios.com>
4 * 2013 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 * 2015 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License, version 2 only,
9 * as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20
21 #define _LGPL_SOURCE
22 #include <getopt.h>
23 #include <grp.h>
24 #include <limits.h>
25 #include <pthread.h>
26 #include <signal.h>
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <sys/mman.h>
31 #include <sys/mount.h>
32 #include <sys/resource.h>
33 #include <sys/socket.h>
34 #include <sys/stat.h>
35 #include <sys/types.h>
36 #include <sys/wait.h>
37 #include <inttypes.h>
38 #include <urcu/futex.h>
39 #include <urcu/uatomic.h>
40 #include <urcu/rculist.h>
41 #include <unistd.h>
42 #include <fcntl.h>
43 #include <strings.h>
44
45 #include <lttng/lttng.h>
46 #include <common/common.h>
47 #include <common/compat/poll.h>
48 #include <common/compat/socket.h>
49 #include <common/compat/endian.h>
50 #include <common/compat/getenv.h>
51 #include <common/defaults.h>
52 #include <common/daemonize.h>
53 #include <common/futex.h>
54 #include <common/sessiond-comm/sessiond-comm.h>
55 #include <common/sessiond-comm/inet.h>
56 #include <common/sessiond-comm/relayd.h>
57 #include <common/uri.h>
58 #include <common/utils.h>
59 #include <common/align.h>
60 #include <common/config/session-config.h>
61 #include <common/dynamic-buffer.h>
62 #include <common/buffer-view.h>
63 #include <common/string-utils/format.h>
64
65 #include "cmd.h"
66 #include "ctf-trace.h"
67 #include "index.h"
68 #include "utils.h"
69 #include "lttng-relayd.h"
70 #include "live.h"
71 #include "health-relayd.h"
72 #include "testpoint.h"
73 #include "viewer-stream.h"
74 #include "session.h"
75 #include "stream.h"
76 #include "connection.h"
77 #include "tracefile-array.h"
78 #include "tcp_keep_alive.h"
79 #include "sessiond-trace-chunks.h"
80
81 static const char *help_msg =
82 #ifdef LTTNG_EMBED_HELP
83 #include <lttng-relayd.8.h>
84 #else
85 NULL
86 #endif
87 ;
88
89 enum relay_connection_status {
90 RELAY_CONNECTION_STATUS_OK,
91 /* An error occurred while processing an event on the connection. */
92 RELAY_CONNECTION_STATUS_ERROR,
93 /* Connection closed/shutdown cleanly. */
94 RELAY_CONNECTION_STATUS_CLOSED,
95 };
96
97 /* command line options */
98 char *opt_output_path;
99 static int opt_daemon, opt_background;
100
101 /*
102 * We need to wait for listener and live listener threads, as well as
103 * health check thread, before being ready to signal readiness.
104 */
105 #define NR_LTTNG_RELAY_READY 3
106 static int lttng_relay_ready = NR_LTTNG_RELAY_READY;
107
108 /* Size of receive buffer. */
109 #define RECV_DATA_BUFFER_SIZE 65536
110
111 static int recv_child_signal; /* Set to 1 when a SIGUSR1 signal is received. */
112 static pid_t child_ppid; /* Internal parent PID use with daemonize. */
113
114 static struct lttng_uri *control_uri;
115 static struct lttng_uri *data_uri;
116 static struct lttng_uri *live_uri;
117
118 const char *progname;
119
120 const char *tracing_group_name = DEFAULT_TRACING_GROUP;
121 static int tracing_group_name_override;
122
123 const char * const config_section_name = "relayd";
124
125 /*
126 * Quit pipe for all threads. This permits a single cancellation point
127 * for all threads when receiving an event on the pipe.
128 */
129 int thread_quit_pipe[2] = { -1, -1 };
130
131 /*
132 * This pipe is used to inform the worker thread that a command is queued and
133 * ready to be processed.
134 */
135 static int relay_conn_pipe[2] = { -1, -1 };
136
137 /* Shared between threads */
138 static int dispatch_thread_exit;
139
140 static pthread_t listener_thread;
141 static pthread_t dispatcher_thread;
142 static pthread_t worker_thread;
143 static pthread_t health_thread;
144
145 /*
146 * last_relay_stream_id_lock protects last_relay_stream_id increment
147 * atomicity on 32-bit architectures.
148 */
149 static pthread_mutex_t last_relay_stream_id_lock = PTHREAD_MUTEX_INITIALIZER;
150 static uint64_t last_relay_stream_id;
151
152 /*
153 * Relay command queue.
154 *
155 * The relay_thread_listener and relay_thread_dispatcher communicate with this
156 * queue.
157 */
158 static struct relay_conn_queue relay_conn_queue;
159
160 /* Global relay stream hash table. */
161 struct lttng_ht *relay_streams_ht;
162
163 /* Global relay viewer stream hash table. */
164 struct lttng_ht *viewer_streams_ht;
165
166 /* Global relay sessions hash table. */
167 struct lttng_ht *sessions_ht;
168
169 /* Relayd health monitoring */
170 struct health_app *health_relayd;
171
172 struct sessiond_trace_chunk_registry *sessiond_trace_chunk_registry;
173
174 static struct option long_options[] = {
175 { "control-port", 1, 0, 'C', },
176 { "data-port", 1, 0, 'D', },
177 { "live-port", 1, 0, 'L', },
178 { "daemonize", 0, 0, 'd', },
179 { "background", 0, 0, 'b', },
180 { "group", 1, 0, 'g', },
181 { "help", 0, 0, 'h', },
182 { "output", 1, 0, 'o', },
183 { "verbose", 0, 0, 'v', },
184 { "config", 1, 0, 'f' },
185 { "version", 0, 0, 'V' },
186 { NULL, 0, 0, 0, },
187 };
188
189 static const char *config_ignore_options[] = { "help", "config", "version" };
190
191 /*
192 * Take an option from the getopt output and set it in the right variable to be
193 * used later.
194 *
195 * Return 0 on success else a negative value.
196 */
197 static int set_option(int opt, const char *arg, const char *optname)
198 {
199 int ret;
200
201 switch (opt) {
202 case 0:
203 fprintf(stderr, "option %s", optname);
204 if (arg) {
205 fprintf(stderr, " with arg %s\n", arg);
206 }
207 break;
208 case 'C':
209 if (lttng_is_setuid_setgid()) {
210 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
211 "-C, --control-port");
212 } else {
213 ret = uri_parse(arg, &control_uri);
214 if (ret < 0) {
215 ERR("Invalid control URI specified");
216 goto end;
217 }
218 if (control_uri->port == 0) {
219 control_uri->port = DEFAULT_NETWORK_CONTROL_PORT;
220 }
221 }
222 break;
223 case 'D':
224 if (lttng_is_setuid_setgid()) {
225 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
226 "-D, -data-port");
227 } else {
228 ret = uri_parse(arg, &data_uri);
229 if (ret < 0) {
230 ERR("Invalid data URI specified");
231 goto end;
232 }
233 if (data_uri->port == 0) {
234 data_uri->port = DEFAULT_NETWORK_DATA_PORT;
235 }
236 }
237 break;
238 case 'L':
239 if (lttng_is_setuid_setgid()) {
240 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
241 "-L, -live-port");
242 } else {
243 ret = uri_parse(arg, &live_uri);
244 if (ret < 0) {
245 ERR("Invalid live URI specified");
246 goto end;
247 }
248 if (live_uri->port == 0) {
249 live_uri->port = DEFAULT_NETWORK_VIEWER_PORT;
250 }
251 }
252 break;
253 case 'd':
254 opt_daemon = 1;
255 break;
256 case 'b':
257 opt_background = 1;
258 break;
259 case 'g':
260 if (lttng_is_setuid_setgid()) {
261 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
262 "-g, --group");
263 } else {
264 tracing_group_name = strdup(arg);
265 if (tracing_group_name == NULL) {
266 ret = -errno;
267 PERROR("strdup");
268 goto end;
269 }
270 tracing_group_name_override = 1;
271 }
272 break;
273 case 'h':
274 ret = utils_show_help(8, "lttng-relayd", help_msg);
275 if (ret) {
276 ERR("Cannot show --help for `lttng-relayd`");
277 perror("exec");
278 }
279 exit(EXIT_FAILURE);
280 case 'V':
281 fprintf(stdout, "%s\n", VERSION);
282 exit(EXIT_SUCCESS);
283 case 'o':
284 if (lttng_is_setuid_setgid()) {
285 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
286 "-o, --output");
287 } else {
288 ret = asprintf(&opt_output_path, "%s", arg);
289 if (ret < 0) {
290 ret = -errno;
291 PERROR("asprintf opt_output_path");
292 goto end;
293 }
294 }
295 break;
296 case 'v':
297 /* Verbose level can increase using multiple -v */
298 if (arg) {
299 lttng_opt_verbose = config_parse_value(arg);
300 } else {
301 /* Only 3 level of verbosity (-vvv). */
302 if (lttng_opt_verbose < 3) {
303 lttng_opt_verbose += 1;
304 }
305 }
306 break;
307 default:
308 /* Unknown option or other error.
309 * Error is printed by getopt, just return */
310 ret = -1;
311 goto end;
312 }
313
314 /* All good. */
315 ret = 0;
316
317 end:
318 return ret;
319 }
320
321 /*
322 * config_entry_handler_cb used to handle options read from a config file.
323 * See config_entry_handler_cb comment in common/config/session-config.h for the
324 * return value conventions.
325 */
326 static int config_entry_handler(const struct config_entry *entry, void *unused)
327 {
328 int ret = 0, i;
329
330 if (!entry || !entry->name || !entry->value) {
331 ret = -EINVAL;
332 goto end;
333 }
334
335 /* Check if the option is to be ignored */
336 for (i = 0; i < sizeof(config_ignore_options) / sizeof(char *); i++) {
337 if (!strcmp(entry->name, config_ignore_options[i])) {
338 goto end;
339 }
340 }
341
342 for (i = 0; i < (sizeof(long_options) / sizeof(struct option)) - 1; i++) {
343 /* Ignore if entry name is not fully matched. */
344 if (strcmp(entry->name, long_options[i].name)) {
345 continue;
346 }
347
348 /*
349 * If the option takes no argument on the command line,
350 * we have to check if the value is "true". We support
351 * non-zero numeric values, true, on and yes.
352 */
353 if (!long_options[i].has_arg) {
354 ret = config_parse_value(entry->value);
355 if (ret <= 0) {
356 if (ret) {
357 WARN("Invalid configuration value \"%s\" for option %s",
358 entry->value, entry->name);
359 }
360 /* False, skip boolean config option. */
361 goto end;
362 }
363 }
364
365 ret = set_option(long_options[i].val, entry->value, entry->name);
366 goto end;
367 }
368
369 WARN("Unrecognized option \"%s\" in daemon configuration file.",
370 entry->name);
371
372 end:
373 return ret;
374 }
375
376 static int set_options(int argc, char **argv)
377 {
378 int c, ret = 0, option_index = 0, retval = 0;
379 int orig_optopt = optopt, orig_optind = optind;
380 char *default_address, *optstring;
381 const char *config_path = NULL;
382
383 optstring = utils_generate_optstring(long_options,
384 sizeof(long_options) / sizeof(struct option));
385 if (!optstring) {
386 retval = -ENOMEM;
387 goto exit;
388 }
389
390 /* Check for the --config option */
391
392 while ((c = getopt_long(argc, argv, optstring, long_options,
393 &option_index)) != -1) {
394 if (c == '?') {
395 retval = -EINVAL;
396 goto exit;
397 } else if (c != 'f') {
398 continue;
399 }
400
401 if (lttng_is_setuid_setgid()) {
402 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
403 "-f, --config");
404 } else {
405 config_path = utils_expand_path(optarg);
406 if (!config_path) {
407 ERR("Failed to resolve path: %s", optarg);
408 }
409 }
410 }
411
412 ret = config_get_section_entries(config_path, config_section_name,
413 config_entry_handler, NULL);
414 if (ret) {
415 if (ret > 0) {
416 ERR("Invalid configuration option at line %i", ret);
417 }
418 retval = -1;
419 goto exit;
420 }
421
422 /* Reset getopt's global state */
423 optopt = orig_optopt;
424 optind = orig_optind;
425 while (1) {
426 c = getopt_long(argc, argv, optstring, long_options, &option_index);
427 if (c == -1) {
428 break;
429 }
430
431 ret = set_option(c, optarg, long_options[option_index].name);
432 if (ret < 0) {
433 retval = -1;
434 goto exit;
435 }
436 }
437
438 /* assign default values */
439 if (control_uri == NULL) {
440 ret = asprintf(&default_address,
441 "tcp://" DEFAULT_NETWORK_CONTROL_BIND_ADDRESS ":%d",
442 DEFAULT_NETWORK_CONTROL_PORT);
443 if (ret < 0) {
444 PERROR("asprintf default data address");
445 retval = -1;
446 goto exit;
447 }
448
449 ret = uri_parse(default_address, &control_uri);
450 free(default_address);
451 if (ret < 0) {
452 ERR("Invalid control URI specified");
453 retval = -1;
454 goto exit;
455 }
456 }
457 if (data_uri == NULL) {
458 ret = asprintf(&default_address,
459 "tcp://" DEFAULT_NETWORK_DATA_BIND_ADDRESS ":%d",
460 DEFAULT_NETWORK_DATA_PORT);
461 if (ret < 0) {
462 PERROR("asprintf default data address");
463 retval = -1;
464 goto exit;
465 }
466
467 ret = uri_parse(default_address, &data_uri);
468 free(default_address);
469 if (ret < 0) {
470 ERR("Invalid data URI specified");
471 retval = -1;
472 goto exit;
473 }
474 }
475 if (live_uri == NULL) {
476 ret = asprintf(&default_address,
477 "tcp://" DEFAULT_NETWORK_VIEWER_BIND_ADDRESS ":%d",
478 DEFAULT_NETWORK_VIEWER_PORT);
479 if (ret < 0) {
480 PERROR("asprintf default viewer control address");
481 retval = -1;
482 goto exit;
483 }
484
485 ret = uri_parse(default_address, &live_uri);
486 free(default_address);
487 if (ret < 0) {
488 ERR("Invalid viewer control URI specified");
489 retval = -1;
490 goto exit;
491 }
492 }
493
494 exit:
495 free(optstring);
496 return retval;
497 }
498
499 static void print_global_objects(void)
500 {
501 rcu_register_thread();
502
503 print_viewer_streams();
504 print_relay_streams();
505 print_sessions();
506
507 rcu_unregister_thread();
508 }
509
510 /*
511 * Cleanup the daemon
512 */
513 static void relayd_cleanup(void)
514 {
515 print_global_objects();
516
517 DBG("Cleaning up");
518
519 if (viewer_streams_ht)
520 lttng_ht_destroy(viewer_streams_ht);
521 if (relay_streams_ht)
522 lttng_ht_destroy(relay_streams_ht);
523 if (sessions_ht)
524 lttng_ht_destroy(sessions_ht);
525
526 /* free the dynamically allocated opt_output_path */
527 free(opt_output_path);
528
529 /* Close thread quit pipes */
530 utils_close_pipe(thread_quit_pipe);
531
532 uri_free(control_uri);
533 uri_free(data_uri);
534 /* Live URI is freed in the live thread. */
535
536 if (tracing_group_name_override) {
537 free((void *) tracing_group_name);
538 }
539 }
540
541 /*
542 * Write to writable pipe used to notify a thread.
543 */
544 static int notify_thread_pipe(int wpipe)
545 {
546 ssize_t ret;
547
548 ret = lttng_write(wpipe, "!", 1);
549 if (ret < 1) {
550 PERROR("write poll pipe");
551 goto end;
552 }
553 ret = 0;
554 end:
555 return ret;
556 }
557
558 static int notify_health_quit_pipe(int *pipe)
559 {
560 ssize_t ret;
561
562 ret = lttng_write(pipe[1], "4", 1);
563 if (ret < 1) {
564 PERROR("write relay health quit");
565 goto end;
566 }
567 ret = 0;
568 end:
569 return ret;
570 }
571
572 /*
573 * Stop all relayd and relayd-live threads.
574 */
575 int lttng_relay_stop_threads(void)
576 {
577 int retval = 0;
578
579 /* Stopping all threads */
580 DBG("Terminating all threads");
581 if (notify_thread_pipe(thread_quit_pipe[1])) {
582 ERR("write error on thread quit pipe");
583 retval = -1;
584 }
585
586 if (notify_health_quit_pipe(health_quit_pipe)) {
587 ERR("write error on health quit pipe");
588 }
589
590 /* Dispatch thread */
591 CMM_STORE_SHARED(dispatch_thread_exit, 1);
592 futex_nto1_wake(&relay_conn_queue.futex);
593
594 if (relayd_live_stop()) {
595 ERR("Error stopping live threads");
596 retval = -1;
597 }
598 return retval;
599 }
600
601 /*
602 * Signal handler for the daemon
603 *
604 * Simply stop all worker threads, leaving main() return gracefully after
605 * joining all threads and calling cleanup().
606 */
607 static void sighandler(int sig)
608 {
609 switch (sig) {
610 case SIGINT:
611 DBG("SIGINT caught");
612 if (lttng_relay_stop_threads()) {
613 ERR("Error stopping threads");
614 }
615 break;
616 case SIGTERM:
617 DBG("SIGTERM caught");
618 if (lttng_relay_stop_threads()) {
619 ERR("Error stopping threads");
620 }
621 break;
622 case SIGUSR1:
623 CMM_STORE_SHARED(recv_child_signal, 1);
624 break;
625 default:
626 break;
627 }
628 }
629
630 /*
631 * Setup signal handler for :
632 * SIGINT, SIGTERM, SIGPIPE
633 */
634 static int set_signal_handler(void)
635 {
636 int ret = 0;
637 struct sigaction sa;
638 sigset_t sigset;
639
640 if ((ret = sigemptyset(&sigset)) < 0) {
641 PERROR("sigemptyset");
642 return ret;
643 }
644
645 sa.sa_mask = sigset;
646 sa.sa_flags = 0;
647
648 sa.sa_handler = sighandler;
649 if ((ret = sigaction(SIGTERM, &sa, NULL)) < 0) {
650 PERROR("sigaction");
651 return ret;
652 }
653
654 if ((ret = sigaction(SIGINT, &sa, NULL)) < 0) {
655 PERROR("sigaction");
656 return ret;
657 }
658
659 if ((ret = sigaction(SIGUSR1, &sa, NULL)) < 0) {
660 PERROR("sigaction");
661 return ret;
662 }
663
664 sa.sa_handler = SIG_IGN;
665 if ((ret = sigaction(SIGPIPE, &sa, NULL)) < 0) {
666 PERROR("sigaction");
667 return ret;
668 }
669
670 DBG("Signal handler set for SIGTERM, SIGUSR1, SIGPIPE and SIGINT");
671
672 return ret;
673 }
674
675 void lttng_relay_notify_ready(void)
676 {
677 /* Notify the parent of the fork() process that we are ready. */
678 if (opt_daemon || opt_background) {
679 if (uatomic_sub_return(&lttng_relay_ready, 1) == 0) {
680 kill(child_ppid, SIGUSR1);
681 }
682 }
683 }
684
685 /*
686 * Init thread quit pipe.
687 *
688 * Return -1 on error or 0 if all pipes are created.
689 */
690 static int init_thread_quit_pipe(void)
691 {
692 int ret;
693
694 ret = utils_create_pipe_cloexec(thread_quit_pipe);
695
696 return ret;
697 }
698
699 /*
700 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
701 */
702 static int create_thread_poll_set(struct lttng_poll_event *events, int size)
703 {
704 int ret;
705
706 if (events == NULL || size == 0) {
707 ret = -1;
708 goto error;
709 }
710
711 ret = lttng_poll_create(events, size, LTTNG_CLOEXEC);
712 if (ret < 0) {
713 goto error;
714 }
715
716 /* Add quit pipe */
717 ret = lttng_poll_add(events, thread_quit_pipe[0], LPOLLIN | LPOLLERR);
718 if (ret < 0) {
719 goto error;
720 }
721
722 return 0;
723
724 error:
725 return ret;
726 }
727
728 /*
729 * Check if the thread quit pipe was triggered.
730 *
731 * Return 1 if it was triggered else 0;
732 */
733 static int check_thread_quit_pipe(int fd, uint32_t events)
734 {
735 if (fd == thread_quit_pipe[0] && (events & LPOLLIN)) {
736 return 1;
737 }
738
739 return 0;
740 }
741
742 /*
743 * Create and init socket from uri.
744 */
745 static struct lttcomm_sock *relay_socket_create(struct lttng_uri *uri)
746 {
747 int ret;
748 struct lttcomm_sock *sock = NULL;
749
750 sock = lttcomm_alloc_sock_from_uri(uri);
751 if (sock == NULL) {
752 ERR("Allocating socket");
753 goto error;
754 }
755
756 ret = lttcomm_create_sock(sock);
757 if (ret < 0) {
758 goto error;
759 }
760 DBG("Listening on sock %d", sock->fd);
761
762 ret = sock->ops->bind(sock);
763 if (ret < 0) {
764 PERROR("Failed to bind socket");
765 goto error;
766 }
767
768 ret = sock->ops->listen(sock, -1);
769 if (ret < 0) {
770 goto error;
771
772 }
773
774 return sock;
775
776 error:
777 if (sock) {
778 lttcomm_destroy_sock(sock);
779 }
780 return NULL;
781 }
782
783 /*
784 * This thread manages the listening for new connections on the network
785 */
786 static void *relay_thread_listener(void *data)
787 {
788 int i, ret, pollfd, err = -1;
789 uint32_t revents, nb_fd;
790 struct lttng_poll_event events;
791 struct lttcomm_sock *control_sock, *data_sock;
792
793 DBG("[thread] Relay listener started");
794
795 health_register(health_relayd, HEALTH_RELAYD_TYPE_LISTENER);
796
797 health_code_update();
798
799 control_sock = relay_socket_create(control_uri);
800 if (!control_sock) {
801 goto error_sock_control;
802 }
803
804 data_sock = relay_socket_create(data_uri);
805 if (!data_sock) {
806 goto error_sock_relay;
807 }
808
809 /*
810 * Pass 3 as size here for the thread quit pipe, control and
811 * data socket.
812 */
813 ret = create_thread_poll_set(&events, 3);
814 if (ret < 0) {
815 goto error_create_poll;
816 }
817
818 /* Add the control socket */
819 ret = lttng_poll_add(&events, control_sock->fd, LPOLLIN | LPOLLRDHUP);
820 if (ret < 0) {
821 goto error_poll_add;
822 }
823
824 /* Add the data socket */
825 ret = lttng_poll_add(&events, data_sock->fd, LPOLLIN | LPOLLRDHUP);
826 if (ret < 0) {
827 goto error_poll_add;
828 }
829
830 lttng_relay_notify_ready();
831
832 if (testpoint(relayd_thread_listener)) {
833 goto error_testpoint;
834 }
835
836 while (1) {
837 health_code_update();
838
839 DBG("Listener accepting connections");
840
841 restart:
842 health_poll_entry();
843 ret = lttng_poll_wait(&events, -1);
844 health_poll_exit();
845 if (ret < 0) {
846 /*
847 * Restart interrupted system call.
848 */
849 if (errno == EINTR) {
850 goto restart;
851 }
852 goto error;
853 }
854
855 nb_fd = ret;
856
857 DBG("Relay new connection received");
858 for (i = 0; i < nb_fd; i++) {
859 health_code_update();
860
861 /* Fetch once the poll data */
862 revents = LTTNG_POLL_GETEV(&events, i);
863 pollfd = LTTNG_POLL_GETFD(&events, i);
864
865 /* Thread quit pipe has been closed. Killing thread. */
866 ret = check_thread_quit_pipe(pollfd, revents);
867 if (ret) {
868 err = 0;
869 goto exit;
870 }
871
872 if (revents & LPOLLIN) {
873 /*
874 * A new connection is requested, therefore a
875 * sessiond/consumerd connection is allocated in
876 * this thread, enqueued to a global queue and
877 * dequeued (and freed) in the worker thread.
878 */
879 int val = 1;
880 struct relay_connection *new_conn;
881 struct lttcomm_sock *newsock;
882 enum connection_type type;
883
884 if (pollfd == data_sock->fd) {
885 type = RELAY_DATA;
886 newsock = data_sock->ops->accept(data_sock);
887 DBG("Relay data connection accepted, socket %d",
888 newsock->fd);
889 } else {
890 assert(pollfd == control_sock->fd);
891 type = RELAY_CONTROL;
892 newsock = control_sock->ops->accept(control_sock);
893 DBG("Relay control connection accepted, socket %d",
894 newsock->fd);
895 }
896 if (!newsock) {
897 PERROR("accepting sock");
898 goto error;
899 }
900
901 ret = setsockopt(newsock->fd, SOL_SOCKET, SO_REUSEADDR, &val,
902 sizeof(val));
903 if (ret < 0) {
904 PERROR("setsockopt inet");
905 lttcomm_destroy_sock(newsock);
906 goto error;
907 }
908
909 ret = socket_apply_keep_alive_config(newsock->fd);
910 if (ret < 0) {
911 ERR("Failed to apply TCP keep-alive configuration on socket (%i)",
912 newsock->fd);
913 lttcomm_destroy_sock(newsock);
914 goto error;
915 }
916
917 new_conn = connection_create(newsock, type);
918 if (!new_conn) {
919 lttcomm_destroy_sock(newsock);
920 goto error;
921 }
922
923 /* Enqueue request for the dispatcher thread. */
924 cds_wfcq_enqueue(&relay_conn_queue.head, &relay_conn_queue.tail,
925 &new_conn->qnode);
926
927 /*
928 * Wake the dispatch queue futex.
929 * Implicit memory barrier with the
930 * exchange in cds_wfcq_enqueue.
931 */
932 futex_nto1_wake(&relay_conn_queue.futex);
933 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
934 ERR("socket poll error");
935 goto error;
936 } else {
937 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
938 goto error;
939 }
940 }
941 }
942
943 exit:
944 error:
945 error_poll_add:
946 error_testpoint:
947 lttng_poll_clean(&events);
948 error_create_poll:
949 if (data_sock->fd >= 0) {
950 ret = data_sock->ops->close(data_sock);
951 if (ret) {
952 PERROR("close");
953 }
954 }
955 lttcomm_destroy_sock(data_sock);
956 error_sock_relay:
957 if (control_sock->fd >= 0) {
958 ret = control_sock->ops->close(control_sock);
959 if (ret) {
960 PERROR("close");
961 }
962 }
963 lttcomm_destroy_sock(control_sock);
964 error_sock_control:
965 if (err) {
966 health_error();
967 ERR("Health error occurred in %s", __func__);
968 }
969 health_unregister(health_relayd);
970 DBG("Relay listener thread cleanup complete");
971 lttng_relay_stop_threads();
972 return NULL;
973 }
974
975 /*
976 * This thread manages the dispatching of the requests to worker threads
977 */
978 static void *relay_thread_dispatcher(void *data)
979 {
980 int err = -1;
981 ssize_t ret;
982 struct cds_wfcq_node *node;
983 struct relay_connection *new_conn = NULL;
984
985 DBG("[thread] Relay dispatcher started");
986
987 health_register(health_relayd, HEALTH_RELAYD_TYPE_DISPATCHER);
988
989 if (testpoint(relayd_thread_dispatcher)) {
990 goto error_testpoint;
991 }
992
993 health_code_update();
994
995 for (;;) {
996 health_code_update();
997
998 /* Atomically prepare the queue futex */
999 futex_nto1_prepare(&relay_conn_queue.futex);
1000
1001 if (CMM_LOAD_SHARED(dispatch_thread_exit)) {
1002 break;
1003 }
1004
1005 do {
1006 health_code_update();
1007
1008 /* Dequeue commands */
1009 node = cds_wfcq_dequeue_blocking(&relay_conn_queue.head,
1010 &relay_conn_queue.tail);
1011 if (node == NULL) {
1012 DBG("Woken up but nothing in the relay command queue");
1013 /* Continue thread execution */
1014 break;
1015 }
1016 new_conn = caa_container_of(node, struct relay_connection, qnode);
1017
1018 DBG("Dispatching request waiting on sock %d", new_conn->sock->fd);
1019
1020 /*
1021 * Inform worker thread of the new request. This
1022 * call is blocking so we can be assured that
1023 * the data will be read at some point in time
1024 * or wait to the end of the world :)
1025 */
1026 ret = lttng_write(relay_conn_pipe[1], &new_conn, sizeof(new_conn));
1027 if (ret < 0) {
1028 PERROR("write connection pipe");
1029 connection_put(new_conn);
1030 goto error;
1031 }
1032 } while (node != NULL);
1033
1034 /* Futex wait on queue. Blocking call on futex() */
1035 health_poll_entry();
1036 futex_nto1_wait(&relay_conn_queue.futex);
1037 health_poll_exit();
1038 }
1039
1040 /* Normal exit, no error */
1041 err = 0;
1042
1043 error:
1044 error_testpoint:
1045 if (err) {
1046 health_error();
1047 ERR("Health error occurred in %s", __func__);
1048 }
1049 health_unregister(health_relayd);
1050 DBG("Dispatch thread dying");
1051 lttng_relay_stop_threads();
1052 return NULL;
1053 }
1054
1055 static bool session_streams_have_index(const struct relay_session *session)
1056 {
1057 return session->minor >= 4 && !session->snapshot;
1058 }
1059
1060 /*
1061 * Handle the RELAYD_CREATE_SESSION command.
1062 *
1063 * On success, send back the session id or else return a negative value.
1064 */
1065 static int relay_create_session(const struct lttcomm_relayd_hdr *recv_hdr,
1066 struct relay_connection *conn,
1067 const struct lttng_buffer_view *payload)
1068 {
1069 int ret = 0;
1070 ssize_t send_ret;
1071 struct relay_session *session = NULL;
1072 struct lttcomm_relayd_create_session_reply_2_11 reply = {};
1073 char session_name[LTTNG_NAME_MAX] = {};
1074 char hostname[LTTNG_HOST_NAME_MAX] = {};
1075 uint32_t live_timer = 0;
1076 bool snapshot = false;
1077 bool session_name_contains_creation_timestamp = false;
1078 /* Left nil for peers < 2.11. */
1079 char base_path[LTTNG_PATH_MAX] = {};
1080 lttng_uuid sessiond_uuid = {};
1081 LTTNG_OPTIONAL(uint64_t) id_sessiond = {};
1082 LTTNG_OPTIONAL(uint64_t) current_chunk_id = {};
1083 LTTNG_OPTIONAL(time_t) creation_time = {};
1084 struct lttng_dynamic_buffer reply_payload;
1085
1086 lttng_dynamic_buffer_init(&reply_payload);
1087
1088 if (conn->minor < 4) {
1089 /* From 2.1 to 2.3 */
1090 ret = 0;
1091 } else if (conn->minor >= 4 && conn->minor < 11) {
1092 /* From 2.4 to 2.10 */
1093 ret = cmd_create_session_2_4(payload, session_name,
1094 hostname, &live_timer, &snapshot);
1095 } else {
1096 bool has_current_chunk;
1097 uint64_t current_chunk_id_value;
1098 time_t creation_time_value;
1099 uint64_t id_sessiond_value;
1100
1101 /* From 2.11 to ... */
1102 ret = cmd_create_session_2_11(payload, session_name, hostname,
1103 base_path, &live_timer, &snapshot, &id_sessiond_value,
1104 sessiond_uuid, &has_current_chunk,
1105 &current_chunk_id_value, &creation_time_value,
1106 &session_name_contains_creation_timestamp);
1107 if (lttng_uuid_is_nil(sessiond_uuid)) {
1108 /* The nil UUID is reserved for pre-2.11 clients. */
1109 ERR("Illegal nil UUID announced by peer in create session command");
1110 ret = -1;
1111 goto send_reply;
1112 }
1113 LTTNG_OPTIONAL_SET(&id_sessiond, id_sessiond_value);
1114 LTTNG_OPTIONAL_SET(&creation_time, creation_time_value);
1115 if (has_current_chunk) {
1116 LTTNG_OPTIONAL_SET(&current_chunk_id,
1117 current_chunk_id_value);
1118 }
1119 }
1120
1121 if (ret < 0) {
1122 goto send_reply;
1123 }
1124
1125 session = session_create(session_name, hostname, base_path, live_timer,
1126 snapshot, sessiond_uuid,
1127 id_sessiond.is_set ? &id_sessiond.value : NULL,
1128 current_chunk_id.is_set ? &current_chunk_id.value : NULL,
1129 creation_time.is_set ? &creation_time.value : NULL,
1130 conn->major, conn->minor,
1131 session_name_contains_creation_timestamp);
1132 if (!session) {
1133 ret = -1;
1134 goto send_reply;
1135 }
1136 assert(!conn->session);
1137 conn->session = session;
1138 DBG("Created session %" PRIu64, session->id);
1139
1140 reply.generic.session_id = htobe64(session->id);
1141
1142 send_reply:
1143 if (ret < 0) {
1144 reply.generic.ret_code = htobe32(LTTNG_ERR_FATAL);
1145 } else {
1146 reply.generic.ret_code = htobe32(LTTNG_OK);
1147 }
1148
1149 if (conn->minor < 11) {
1150 /* From 2.1 to 2.10 */
1151 ret = lttng_dynamic_buffer_append(&reply_payload,
1152 &reply.generic, sizeof(reply.generic));
1153 if (ret) {
1154 ERR("Failed to append \"create session\" command reply header to payload buffer");
1155 ret = -1;
1156 goto end;
1157 }
1158 } else {
1159 const uint32_t output_path_length =
1160 session ? strlen(session->output_path) + 1 : 0;
1161
1162 reply.output_path_length = htobe32(output_path_length);
1163 ret = lttng_dynamic_buffer_append(
1164 &reply_payload, &reply, sizeof(reply));
1165 if (ret) {
1166 ERR("Failed to append \"create session\" command reply header to payload buffer");
1167 goto end;
1168 }
1169
1170 if (output_path_length) {
1171 ret = lttng_dynamic_buffer_append(&reply_payload,
1172 session->output_path,
1173 output_path_length);
1174 if (ret) {
1175 ERR("Failed to append \"create session\" command reply path to payload buffer");
1176 goto end;
1177 }
1178 }
1179 }
1180
1181 send_ret = conn->sock->ops->sendmsg(conn->sock, reply_payload.data,
1182 reply_payload.size, 0);
1183 if (send_ret < (ssize_t) reply_payload.size) {
1184 ERR("Failed to send \"create session\" command reply of %zu bytes (ret = %zd)",
1185 reply_payload.size, send_ret);
1186 ret = -1;
1187 }
1188 end:
1189 if (ret < 0 && session) {
1190 session_put(session);
1191 }
1192 lttng_dynamic_buffer_reset(&reply_payload);
1193 return ret;
1194 }
1195
1196 /*
1197 * When we have received all the streams and the metadata for a channel,
1198 * we make them visible to the viewer threads.
1199 */
1200 static void publish_connection_local_streams(struct relay_connection *conn)
1201 {
1202 struct relay_stream *stream;
1203 struct relay_session *session = conn->session;
1204
1205 /*
1206 * We publish all streams belonging to a session atomically wrt
1207 * session lock.
1208 */
1209 pthread_mutex_lock(&session->lock);
1210 rcu_read_lock();
1211 cds_list_for_each_entry_rcu(stream, &session->recv_list,
1212 recv_node) {
1213 stream_publish(stream);
1214 }
1215 rcu_read_unlock();
1216
1217 /*
1218 * Inform the viewer that there are new streams in the session.
1219 */
1220 if (session->viewer_attached) {
1221 uatomic_set(&session->new_streams, 1);
1222 }
1223 pthread_mutex_unlock(&session->lock);
1224 }
1225
1226 static int conform_channel_path(char *channel_path)
1227 {
1228 int ret = 0;
1229
1230 if (strstr("../", channel_path)) {
1231 ERR("Refusing channel path as it walks up the path hierarchy: \"%s\"",
1232 channel_path);
1233 ret = -1;
1234 goto end;
1235 }
1236
1237 if (*channel_path == '/') {
1238 const size_t len = strlen(channel_path);
1239
1240 /*
1241 * Channel paths from peers prior to 2.11 are expressed as an
1242 * absolute path that is, in reality, relative to the relay
1243 * daemon's output directory. Remove the leading slash so it
1244 * is correctly interpreted as a relative path later on.
1245 *
1246 * len (and not len - 1) is used to copy the trailing NULL.
1247 */
1248 bcopy(channel_path + 1, channel_path, len);
1249 }
1250 end:
1251 return ret;
1252 }
1253
1254 /*
1255 * relay_add_stream: allocate a new stream for a session
1256 */
1257 static int relay_add_stream(const struct lttcomm_relayd_hdr *recv_hdr,
1258 struct relay_connection *conn,
1259 const struct lttng_buffer_view *payload)
1260 {
1261 int ret;
1262 ssize_t send_ret;
1263 struct relay_session *session = conn->session;
1264 struct relay_stream *stream = NULL;
1265 struct lttcomm_relayd_status_stream reply;
1266 struct ctf_trace *trace = NULL;
1267 uint64_t stream_handle = -1ULL;
1268 char *path_name = NULL, *channel_name = NULL;
1269 uint64_t tracefile_size = 0, tracefile_count = 0;
1270 LTTNG_OPTIONAL(uint64_t) stream_chunk_id = {};
1271
1272 if (!session || !conn->version_check_done) {
1273 ERR("Trying to add a stream before version check");
1274 ret = -1;
1275 goto end_no_session;
1276 }
1277
1278 if (session->minor == 1) {
1279 /* For 2.1 */
1280 ret = cmd_recv_stream_2_1(payload, &path_name,
1281 &channel_name);
1282 } else if (session->minor > 1 && session->minor < 11) {
1283 /* From 2.2 to 2.10 */
1284 ret = cmd_recv_stream_2_2(payload, &path_name,
1285 &channel_name, &tracefile_size, &tracefile_count);
1286 } else {
1287 /* From 2.11 to ... */
1288 ret = cmd_recv_stream_2_11(payload, &path_name,
1289 &channel_name, &tracefile_size, &tracefile_count,
1290 &stream_chunk_id.value);
1291 stream_chunk_id.is_set = true;
1292 }
1293
1294 if (ret < 0) {
1295 goto send_reply;
1296 }
1297
1298 if (conform_channel_path(path_name)) {
1299 goto send_reply;
1300 }
1301
1302 trace = ctf_trace_get_by_path_or_create(session, path_name);
1303 if (!trace) {
1304 goto send_reply;
1305 }
1306 /* This stream here has one reference on the trace. */
1307
1308 pthread_mutex_lock(&last_relay_stream_id_lock);
1309 stream_handle = ++last_relay_stream_id;
1310 pthread_mutex_unlock(&last_relay_stream_id_lock);
1311
1312 /* We pass ownership of path_name and channel_name. */
1313 stream = stream_create(trace, stream_handle, path_name,
1314 channel_name, tracefile_size, tracefile_count);
1315 path_name = NULL;
1316 channel_name = NULL;
1317
1318 /*
1319 * Streams are the owners of their trace. Reference to trace is
1320 * kept within stream_create().
1321 */
1322 ctf_trace_put(trace);
1323
1324 send_reply:
1325 memset(&reply, 0, sizeof(reply));
1326 reply.handle = htobe64(stream_handle);
1327 if (!stream) {
1328 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1329 } else {
1330 reply.ret_code = htobe32(LTTNG_OK);
1331 }
1332
1333 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1334 sizeof(struct lttcomm_relayd_status_stream), 0);
1335 if (send_ret < (ssize_t) sizeof(reply)) {
1336 ERR("Failed to send \"add stream\" command reply (ret = %zd)",
1337 send_ret);
1338 ret = -1;
1339 }
1340
1341 end_no_session:
1342 free(path_name);
1343 free(channel_name);
1344 return ret;
1345 }
1346
1347 /*
1348 * relay_close_stream: close a specific stream
1349 */
1350 static int relay_close_stream(const struct lttcomm_relayd_hdr *recv_hdr,
1351 struct relay_connection *conn,
1352 const struct lttng_buffer_view *payload)
1353 {
1354 int ret;
1355 ssize_t send_ret;
1356 struct relay_session *session = conn->session;
1357 struct lttcomm_relayd_close_stream stream_info;
1358 struct lttcomm_relayd_generic_reply reply;
1359 struct relay_stream *stream;
1360
1361 DBG("Close stream received");
1362
1363 if (!session || !conn->version_check_done) {
1364 ERR("Trying to close a stream before version check");
1365 ret = -1;
1366 goto end_no_session;
1367 }
1368
1369 if (payload->size < sizeof(stream_info)) {
1370 ERR("Unexpected payload size in \"relay_close_stream\": expected >= %zu bytes, got %zu bytes",
1371 sizeof(stream_info), payload->size);
1372 ret = -1;
1373 goto end_no_session;
1374 }
1375 memcpy(&stream_info, payload->data, sizeof(stream_info));
1376 stream_info.stream_id = be64toh(stream_info.stream_id);
1377 stream_info.last_net_seq_num = be64toh(stream_info.last_net_seq_num);
1378
1379 stream = stream_get_by_id(stream_info.stream_id);
1380 if (!stream) {
1381 ret = -1;
1382 goto end;
1383 }
1384
1385 /*
1386 * Set last_net_seq_num before the close flag. Required by data
1387 * pending check.
1388 */
1389 pthread_mutex_lock(&stream->lock);
1390 stream->last_net_seq_num = stream_info.last_net_seq_num;
1391 pthread_mutex_unlock(&stream->lock);
1392
1393 /*
1394 * This is one of the conditions which may trigger a stream close
1395 * with the others being:
1396 * 1) A close command is received for a stream
1397 * 2) The control connection owning the stream is closed
1398 * 3) We have received all of the stream's data _after_ a close
1399 * request.
1400 */
1401 try_stream_close(stream);
1402 if (stream->is_metadata) {
1403 struct relay_viewer_stream *vstream;
1404
1405 vstream = viewer_stream_get_by_id(stream->stream_handle);
1406 if (vstream) {
1407 if (vstream->metadata_sent == stream->metadata_received) {
1408 /*
1409 * Since all the metadata has been sent to the
1410 * viewer and that we have a request to close
1411 * its stream, we can safely teardown the
1412 * corresponding metadata viewer stream.
1413 */
1414 viewer_stream_put(vstream);
1415 }
1416 /* Put local reference. */
1417 viewer_stream_put(vstream);
1418 }
1419 }
1420 stream_put(stream);
1421 ret = 0;
1422
1423 end:
1424 memset(&reply, 0, sizeof(reply));
1425 if (ret < 0) {
1426 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1427 } else {
1428 reply.ret_code = htobe32(LTTNG_OK);
1429 }
1430 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1431 sizeof(struct lttcomm_relayd_generic_reply), 0);
1432 if (send_ret < (ssize_t) sizeof(reply)) {
1433 ERR("Failed to send \"close stream\" command reply (ret = %zd)",
1434 send_ret);
1435 ret = -1;
1436 }
1437
1438 end_no_session:
1439 return ret;
1440 }
1441
1442 /*
1443 * relay_reset_metadata: reset a metadata stream
1444 */
1445 static
1446 int relay_reset_metadata(const struct lttcomm_relayd_hdr *recv_hdr,
1447 struct relay_connection *conn,
1448 const struct lttng_buffer_view *payload)
1449 {
1450 int ret;
1451 ssize_t send_ret;
1452 struct relay_session *session = conn->session;
1453 struct lttcomm_relayd_reset_metadata stream_info;
1454 struct lttcomm_relayd_generic_reply reply;
1455 struct relay_stream *stream;
1456
1457 DBG("Reset metadata received");
1458
1459 if (!session || !conn->version_check_done) {
1460 ERR("Trying to reset a metadata stream before version check");
1461 ret = -1;
1462 goto end_no_session;
1463 }
1464
1465 if (payload->size < sizeof(stream_info)) {
1466 ERR("Unexpected payload size in \"relay_reset_metadata\": expected >= %zu bytes, got %zu bytes",
1467 sizeof(stream_info), payload->size);
1468 ret = -1;
1469 goto end_no_session;
1470 }
1471 memcpy(&stream_info, payload->data, sizeof(stream_info));
1472 stream_info.stream_id = be64toh(stream_info.stream_id);
1473 stream_info.version = be64toh(stream_info.version);
1474
1475 DBG("Update metadata to version %" PRIu64, stream_info.version);
1476
1477 /* Unsupported for live sessions for now. */
1478 if (session->live_timer != 0) {
1479 ret = -1;
1480 goto end;
1481 }
1482
1483 stream = stream_get_by_id(stream_info.stream_id);
1484 if (!stream) {
1485 ret = -1;
1486 goto end;
1487 }
1488 pthread_mutex_lock(&stream->lock);
1489 if (!stream->is_metadata) {
1490 ret = -1;
1491 goto end_unlock;
1492 }
1493
1494 ret = stream_reset_file(stream);
1495 if (ret < 0) {
1496 ERR("Failed to reset metadata stream %" PRIu64
1497 ": stream_path = %s, channel = %s",
1498 stream->stream_handle, stream->path_name,
1499 stream->channel_name);
1500 goto end_unlock;
1501 }
1502 end_unlock:
1503 pthread_mutex_unlock(&stream->lock);
1504 stream_put(stream);
1505
1506 end:
1507 memset(&reply, 0, sizeof(reply));
1508 if (ret < 0) {
1509 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1510 } else {
1511 reply.ret_code = htobe32(LTTNG_OK);
1512 }
1513 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1514 sizeof(struct lttcomm_relayd_generic_reply), 0);
1515 if (send_ret < (ssize_t) sizeof(reply)) {
1516 ERR("Failed to send \"reset metadata\" command reply (ret = %zd)",
1517 send_ret);
1518 ret = -1;
1519 }
1520
1521 end_no_session:
1522 return ret;
1523 }
1524
1525 /*
1526 * relay_unknown_command: send -1 if received unknown command
1527 */
1528 static void relay_unknown_command(struct relay_connection *conn)
1529 {
1530 struct lttcomm_relayd_generic_reply reply;
1531 ssize_t send_ret;
1532
1533 memset(&reply, 0, sizeof(reply));
1534 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1535 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
1536 if (send_ret < sizeof(reply)) {
1537 ERR("Failed to send \"unknown command\" command reply (ret = %zd)", send_ret);
1538 }
1539 }
1540
1541 /*
1542 * relay_start: send an acknowledgment to the client to tell if we are
1543 * ready to receive data. We are ready if a session is established.
1544 */
1545 static int relay_start(const struct lttcomm_relayd_hdr *recv_hdr,
1546 struct relay_connection *conn,
1547 const struct lttng_buffer_view *payload)
1548 {
1549 int ret = 0;
1550 ssize_t send_ret;
1551 struct lttcomm_relayd_generic_reply reply;
1552 struct relay_session *session = conn->session;
1553
1554 if (!session) {
1555 DBG("Trying to start the streaming without a session established");
1556 ret = htobe32(LTTNG_ERR_UNK);
1557 }
1558
1559 memset(&reply, 0, sizeof(reply));
1560 reply.ret_code = htobe32(LTTNG_OK);
1561 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1562 sizeof(reply), 0);
1563 if (send_ret < (ssize_t) sizeof(reply)) {
1564 ERR("Failed to send \"relay_start\" command reply (ret = %zd)",
1565 send_ret);
1566 ret = -1;
1567 }
1568
1569 return ret;
1570 }
1571
1572 /*
1573 * relay_recv_metadata: receive the metadata for the session.
1574 */
1575 static int relay_recv_metadata(const struct lttcomm_relayd_hdr *recv_hdr,
1576 struct relay_connection *conn,
1577 const struct lttng_buffer_view *payload)
1578 {
1579 int ret = 0;
1580 struct relay_session *session = conn->session;
1581 struct lttcomm_relayd_metadata_payload metadata_payload_header;
1582 struct relay_stream *metadata_stream;
1583 uint64_t metadata_payload_size;
1584 struct lttng_buffer_view packet_view;
1585
1586 if (!session) {
1587 ERR("Metadata sent before version check");
1588 ret = -1;
1589 goto end;
1590 }
1591
1592 if (recv_hdr->data_size < sizeof(struct lttcomm_relayd_metadata_payload)) {
1593 ERR("Incorrect data size");
1594 ret = -1;
1595 goto end;
1596 }
1597 metadata_payload_size = recv_hdr->data_size -
1598 sizeof(struct lttcomm_relayd_metadata_payload);
1599
1600 memcpy(&metadata_payload_header, payload->data,
1601 sizeof(metadata_payload_header));
1602 metadata_payload_header.stream_id = be64toh(
1603 metadata_payload_header.stream_id);
1604 metadata_payload_header.padding_size = be32toh(
1605 metadata_payload_header.padding_size);
1606
1607 metadata_stream = stream_get_by_id(metadata_payload_header.stream_id);
1608 if (!metadata_stream) {
1609 ret = -1;
1610 goto end;
1611 }
1612
1613 packet_view = lttng_buffer_view_from_view(payload,
1614 sizeof(metadata_payload_header), metadata_payload_size);
1615 if (!packet_view.data) {
1616 ERR("Invalid metadata packet length announced by header");
1617 ret = -1;
1618 goto end_put;
1619 }
1620
1621 pthread_mutex_lock(&metadata_stream->lock);
1622 ret = stream_write(metadata_stream, &packet_view,
1623 metadata_payload_header.padding_size);
1624 pthread_mutex_unlock(&metadata_stream->lock);
1625 if (ret){
1626 ret = -1;
1627 goto end_put;
1628 }
1629 end_put:
1630 stream_put(metadata_stream);
1631 end:
1632 return ret;
1633 }
1634
1635 /*
1636 * relay_send_version: send relayd version number
1637 */
1638 static int relay_send_version(const struct lttcomm_relayd_hdr *recv_hdr,
1639 struct relay_connection *conn,
1640 const struct lttng_buffer_view *payload)
1641 {
1642 int ret;
1643 ssize_t send_ret;
1644 struct lttcomm_relayd_version reply, msg;
1645 bool compatible = true;
1646
1647 conn->version_check_done = true;
1648
1649 /* Get version from the other side. */
1650 if (payload->size < sizeof(msg)) {
1651 ERR("Unexpected payload size in \"relay_send_version\": expected >= %zu bytes, got %zu bytes",
1652 sizeof(msg), payload->size);
1653 ret = -1;
1654 goto end;
1655 }
1656
1657 memcpy(&msg, payload->data, sizeof(msg));
1658 msg.major = be32toh(msg.major);
1659 msg.minor = be32toh(msg.minor);
1660
1661 memset(&reply, 0, sizeof(reply));
1662 reply.major = RELAYD_VERSION_COMM_MAJOR;
1663 reply.minor = RELAYD_VERSION_COMM_MINOR;
1664
1665 /* Major versions must be the same */
1666 if (reply.major != msg.major) {
1667 DBG("Incompatible major versions (%u vs %u), deleting session",
1668 reply.major, msg.major);
1669 compatible = false;
1670 }
1671
1672 conn->major = reply.major;
1673 /* We adapt to the lowest compatible version */
1674 if (reply.minor <= msg.minor) {
1675 conn->minor = reply.minor;
1676 } else {
1677 conn->minor = msg.minor;
1678 }
1679
1680 reply.major = htobe32(reply.major);
1681 reply.minor = htobe32(reply.minor);
1682 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1683 sizeof(reply), 0);
1684 if (send_ret < (ssize_t) sizeof(reply)) {
1685 ERR("Failed to send \"send version\" command reply (ret = %zd)",
1686 send_ret);
1687 ret = -1;
1688 goto end;
1689 } else {
1690 ret = 0;
1691 }
1692
1693 if (!compatible) {
1694 ret = -1;
1695 goto end;
1696 }
1697
1698 DBG("Version check done using protocol %u.%u", conn->major,
1699 conn->minor);
1700
1701 end:
1702 return ret;
1703 }
1704
1705 /*
1706 * Check for data pending for a given stream id from the session daemon.
1707 */
1708 static int relay_data_pending(const struct lttcomm_relayd_hdr *recv_hdr,
1709 struct relay_connection *conn,
1710 const struct lttng_buffer_view *payload)
1711 {
1712 struct relay_session *session = conn->session;
1713 struct lttcomm_relayd_data_pending msg;
1714 struct lttcomm_relayd_generic_reply reply;
1715 struct relay_stream *stream;
1716 ssize_t send_ret;
1717 int ret;
1718 uint64_t stream_seq;
1719
1720 DBG("Data pending command received");
1721
1722 if (!session || !conn->version_check_done) {
1723 ERR("Trying to check for data before version check");
1724 ret = -1;
1725 goto end_no_session;
1726 }
1727
1728 if (payload->size < sizeof(msg)) {
1729 ERR("Unexpected payload size in \"relay_data_pending\": expected >= %zu bytes, got %zu bytes",
1730 sizeof(msg), payload->size);
1731 ret = -1;
1732 goto end_no_session;
1733 }
1734 memcpy(&msg, payload->data, sizeof(msg));
1735 msg.stream_id = be64toh(msg.stream_id);
1736 msg.last_net_seq_num = be64toh(msg.last_net_seq_num);
1737
1738 stream = stream_get_by_id(msg.stream_id);
1739 if (stream == NULL) {
1740 ret = -1;
1741 goto end;
1742 }
1743
1744 pthread_mutex_lock(&stream->lock);
1745
1746 if (session_streams_have_index(session)) {
1747 /*
1748 * Ensure that both the index and stream data have been
1749 * flushed up to the requested point.
1750 */
1751 stream_seq = min(stream->prev_data_seq, stream->prev_index_seq);
1752 } else {
1753 stream_seq = stream->prev_data_seq;
1754 }
1755 DBG("Data pending for stream id %" PRIu64 ": prev_data_seq %" PRIu64
1756 ", prev_index_seq %" PRIu64
1757 ", and last_seq %" PRIu64, msg.stream_id,
1758 stream->prev_data_seq, stream->prev_index_seq,
1759 msg.last_net_seq_num);
1760
1761 /* Avoid wrapping issue */
1762 if (((int64_t) (stream_seq - msg.last_net_seq_num)) >= 0) {
1763 /* Data has in fact been written and is NOT pending */
1764 ret = 0;
1765 } else {
1766 /* Data still being streamed thus pending */
1767 ret = 1;
1768 }
1769
1770 stream->data_pending_check_done = true;
1771 pthread_mutex_unlock(&stream->lock);
1772
1773 stream_put(stream);
1774 end:
1775
1776 memset(&reply, 0, sizeof(reply));
1777 reply.ret_code = htobe32(ret);
1778 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
1779 if (send_ret < (ssize_t) sizeof(reply)) {
1780 ERR("Failed to send \"data pending\" command reply (ret = %zd)",
1781 send_ret);
1782 ret = -1;
1783 }
1784
1785 end_no_session:
1786 return ret;
1787 }
1788
1789 /*
1790 * Wait for the control socket to reach a quiescent state.
1791 *
1792 * Note that for now, when receiving this command from the session
1793 * daemon, this means that every subsequent commands or data received on
1794 * the control socket has been handled. So, this is why we simply return
1795 * OK here.
1796 */
1797 static int relay_quiescent_control(const struct lttcomm_relayd_hdr *recv_hdr,
1798 struct relay_connection *conn,
1799 const struct lttng_buffer_view *payload)
1800 {
1801 int ret;
1802 ssize_t send_ret;
1803 struct relay_stream *stream;
1804 struct lttcomm_relayd_quiescent_control msg;
1805 struct lttcomm_relayd_generic_reply reply;
1806
1807 DBG("Checking quiescent state on control socket");
1808
1809 if (!conn->session || !conn->version_check_done) {
1810 ERR("Trying to check for data before version check");
1811 ret = -1;
1812 goto end_no_session;
1813 }
1814
1815 if (payload->size < sizeof(msg)) {
1816 ERR("Unexpected payload size in \"relay_quiescent_control\": expected >= %zu bytes, got %zu bytes",
1817 sizeof(msg), payload->size);
1818 ret = -1;
1819 goto end_no_session;
1820 }
1821 memcpy(&msg, payload->data, sizeof(msg));
1822 msg.stream_id = be64toh(msg.stream_id);
1823
1824 stream = stream_get_by_id(msg.stream_id);
1825 if (!stream) {
1826 goto reply;
1827 }
1828 pthread_mutex_lock(&stream->lock);
1829 stream->data_pending_check_done = true;
1830 pthread_mutex_unlock(&stream->lock);
1831
1832 DBG("Relay quiescent control pending flag set to %" PRIu64, msg.stream_id);
1833 stream_put(stream);
1834 reply:
1835 memset(&reply, 0, sizeof(reply));
1836 reply.ret_code = htobe32(LTTNG_OK);
1837 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
1838 if (send_ret < (ssize_t) sizeof(reply)) {
1839 ERR("Failed to send \"quiescent control\" command reply (ret = %zd)",
1840 send_ret);
1841 ret = -1;
1842 } else {
1843 ret = 0;
1844 }
1845
1846 end_no_session:
1847 return ret;
1848 }
1849
1850 /*
1851 * Initialize a data pending command. This means that a consumer is about
1852 * to ask for data pending for each stream it holds. Simply iterate over
1853 * all streams of a session and set the data_pending_check_done flag.
1854 *
1855 * This command returns to the client a LTTNG_OK code.
1856 */
1857 static int relay_begin_data_pending(const struct lttcomm_relayd_hdr *recv_hdr,
1858 struct relay_connection *conn,
1859 const struct lttng_buffer_view *payload)
1860 {
1861 int ret;
1862 ssize_t send_ret;
1863 struct lttng_ht_iter iter;
1864 struct lttcomm_relayd_begin_data_pending msg;
1865 struct lttcomm_relayd_generic_reply reply;
1866 struct relay_stream *stream;
1867
1868 assert(recv_hdr);
1869 assert(conn);
1870
1871 DBG("Init streams for data pending");
1872
1873 if (!conn->session || !conn->version_check_done) {
1874 ERR("Trying to check for data before version check");
1875 ret = -1;
1876 goto end_no_session;
1877 }
1878
1879 if (payload->size < sizeof(msg)) {
1880 ERR("Unexpected payload size in \"relay_begin_data_pending\": expected >= %zu bytes, got %zu bytes",
1881 sizeof(msg), payload->size);
1882 ret = -1;
1883 goto end_no_session;
1884 }
1885 memcpy(&msg, payload->data, sizeof(msg));
1886 msg.session_id = be64toh(msg.session_id);
1887
1888 /*
1889 * Iterate over all streams to set the begin data pending flag.
1890 * For now, the streams are indexed by stream handle so we have
1891 * to iterate over all streams to find the one associated with
1892 * the right session_id.
1893 */
1894 rcu_read_lock();
1895 cds_lfht_for_each_entry(relay_streams_ht->ht, &iter.iter, stream,
1896 node.node) {
1897 if (!stream_get(stream)) {
1898 continue;
1899 }
1900 if (stream->trace->session->id == msg.session_id) {
1901 pthread_mutex_lock(&stream->lock);
1902 stream->data_pending_check_done = false;
1903 pthread_mutex_unlock(&stream->lock);
1904 DBG("Set begin data pending flag to stream %" PRIu64,
1905 stream->stream_handle);
1906 }
1907 stream_put(stream);
1908 }
1909 rcu_read_unlock();
1910
1911 memset(&reply, 0, sizeof(reply));
1912 /* All good, send back reply. */
1913 reply.ret_code = htobe32(LTTNG_OK);
1914
1915 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
1916 if (send_ret < (ssize_t) sizeof(reply)) {
1917 ERR("Failed to send \"begin data pending\" command reply (ret = %zd)",
1918 send_ret);
1919 ret = -1;
1920 } else {
1921 ret = 0;
1922 }
1923
1924 end_no_session:
1925 return ret;
1926 }
1927
1928 /*
1929 * End data pending command. This will check, for a given session id, if
1930 * each stream associated with it has its data_pending_check_done flag
1931 * set. If not, this means that the client lost track of the stream but
1932 * the data is still being streamed on our side. In this case, we inform
1933 * the client that data is in flight.
1934 *
1935 * Return to the client if there is data in flight or not with a ret_code.
1936 */
1937 static int relay_end_data_pending(const struct lttcomm_relayd_hdr *recv_hdr,
1938 struct relay_connection *conn,
1939 const struct lttng_buffer_view *payload)
1940 {
1941 int ret;
1942 ssize_t send_ret;
1943 struct lttng_ht_iter iter;
1944 struct lttcomm_relayd_end_data_pending msg;
1945 struct lttcomm_relayd_generic_reply reply;
1946 struct relay_stream *stream;
1947 uint32_t is_data_inflight = 0;
1948
1949 DBG("End data pending command");
1950
1951 if (!conn->session || !conn->version_check_done) {
1952 ERR("Trying to check for data before version check");
1953 ret = -1;
1954 goto end_no_session;
1955 }
1956
1957 if (payload->size < sizeof(msg)) {
1958 ERR("Unexpected payload size in \"relay_end_data_pending\": expected >= %zu bytes, got %zu bytes",
1959 sizeof(msg), payload->size);
1960 ret = -1;
1961 goto end_no_session;
1962 }
1963 memcpy(&msg, payload->data, sizeof(msg));
1964 msg.session_id = be64toh(msg.session_id);
1965
1966 /*
1967 * Iterate over all streams to see if the begin data pending
1968 * flag is set.
1969 */
1970 rcu_read_lock();
1971 cds_lfht_for_each_entry(relay_streams_ht->ht, &iter.iter, stream,
1972 node.node) {
1973 if (!stream_get(stream)) {
1974 continue;
1975 }
1976 if (stream->trace->session->id != msg.session_id) {
1977 stream_put(stream);
1978 continue;
1979 }
1980 pthread_mutex_lock(&stream->lock);
1981 if (!stream->data_pending_check_done) {
1982 uint64_t stream_seq;
1983
1984 if (session_streams_have_index(conn->session)) {
1985 /*
1986 * Ensure that both the index and stream data have been
1987 * flushed up to the requested point.
1988 */
1989 stream_seq = min(stream->prev_data_seq, stream->prev_index_seq);
1990 } else {
1991 stream_seq = stream->prev_data_seq;
1992 }
1993 if (!stream->closed || !(((int64_t) (stream_seq - stream->last_net_seq_num)) >= 0)) {
1994 is_data_inflight = 1;
1995 DBG("Data is still in flight for stream %" PRIu64,
1996 stream->stream_handle);
1997 pthread_mutex_unlock(&stream->lock);
1998 stream_put(stream);
1999 break;
2000 }
2001 }
2002 pthread_mutex_unlock(&stream->lock);
2003 stream_put(stream);
2004 }
2005 rcu_read_unlock();
2006
2007 memset(&reply, 0, sizeof(reply));
2008 /* All good, send back reply. */
2009 reply.ret_code = htobe32(is_data_inflight);
2010
2011 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
2012 if (send_ret < (ssize_t) sizeof(reply)) {
2013 ERR("Failed to send \"end data pending\" command reply (ret = %zd)",
2014 send_ret);
2015 ret = -1;
2016 } else {
2017 ret = 0;
2018 }
2019
2020 end_no_session:
2021 return ret;
2022 }
2023
2024 /*
2025 * Receive an index for a specific stream.
2026 *
2027 * Return 0 on success else a negative value.
2028 */
2029 static int relay_recv_index(const struct lttcomm_relayd_hdr *recv_hdr,
2030 struct relay_connection *conn,
2031 const struct lttng_buffer_view *payload)
2032 {
2033 int ret;
2034 ssize_t send_ret;
2035 struct relay_session *session = conn->session;
2036 struct lttcomm_relayd_index index_info;
2037 struct lttcomm_relayd_generic_reply reply;
2038 struct relay_stream *stream;
2039 size_t msg_len;
2040
2041 assert(conn);
2042
2043 DBG("Relay receiving index");
2044
2045 if (!session || !conn->version_check_done) {
2046 ERR("Trying to close a stream before version check");
2047 ret = -1;
2048 goto end_no_session;
2049 }
2050
2051 msg_len = lttcomm_relayd_index_len(
2052 lttng_to_index_major(conn->major, conn->minor),
2053 lttng_to_index_minor(conn->major, conn->minor));
2054 if (payload->size < msg_len) {
2055 ERR("Unexpected payload size in \"relay_recv_index\": expected >= %zu bytes, got %zu bytes",
2056 msg_len, payload->size);
2057 ret = -1;
2058 goto end_no_session;
2059 }
2060 memcpy(&index_info, payload->data, msg_len);
2061 index_info.relay_stream_id = be64toh(index_info.relay_stream_id);
2062 index_info.net_seq_num = be64toh(index_info.net_seq_num);
2063 index_info.packet_size = be64toh(index_info.packet_size);
2064 index_info.content_size = be64toh(index_info.content_size);
2065 index_info.timestamp_begin = be64toh(index_info.timestamp_begin);
2066 index_info.timestamp_end = be64toh(index_info.timestamp_end);
2067 index_info.events_discarded = be64toh(index_info.events_discarded);
2068 index_info.stream_id = be64toh(index_info.stream_id);
2069
2070 if (conn->minor >= 8) {
2071 index_info.stream_instance_id =
2072 be64toh(index_info.stream_instance_id);
2073 index_info.packet_seq_num = be64toh(index_info.packet_seq_num);
2074 }
2075
2076 stream = stream_get_by_id(index_info.relay_stream_id);
2077 if (!stream) {
2078 ERR("stream_get_by_id not found");
2079 ret = -1;
2080 goto end;
2081 }
2082
2083 pthread_mutex_lock(&stream->lock);
2084 ret = stream_add_index(stream, &index_info);
2085 pthread_mutex_unlock(&stream->lock);
2086 if (ret) {
2087 goto end_stream_put;
2088 }
2089
2090 end_stream_put:
2091 stream_put(stream);
2092 end:
2093 memset(&reply, 0, sizeof(reply));
2094 if (ret < 0) {
2095 reply.ret_code = htobe32(LTTNG_ERR_UNK);
2096 } else {
2097 reply.ret_code = htobe32(LTTNG_OK);
2098 }
2099 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
2100 if (send_ret < (ssize_t) sizeof(reply)) {
2101 ERR("Failed to send \"recv index\" command reply (ret = %zd)", send_ret);
2102 ret = -1;
2103 }
2104
2105 end_no_session:
2106 return ret;
2107 }
2108
2109 /*
2110 * Receive the streams_sent message.
2111 *
2112 * Return 0 on success else a negative value.
2113 */
2114 static int relay_streams_sent(const struct lttcomm_relayd_hdr *recv_hdr,
2115 struct relay_connection *conn,
2116 const struct lttng_buffer_view *payload)
2117 {
2118 int ret;
2119 ssize_t send_ret;
2120 struct lttcomm_relayd_generic_reply reply;
2121
2122 assert(conn);
2123
2124 DBG("Relay receiving streams_sent");
2125
2126 if (!conn->session || !conn->version_check_done) {
2127 ERR("Trying to close a stream before version check");
2128 ret = -1;
2129 goto end_no_session;
2130 }
2131
2132 /*
2133 * Publish every pending stream in the connection recv list which are
2134 * now ready to be used by the viewer.
2135 */
2136 publish_connection_local_streams(conn);
2137
2138 memset(&reply, 0, sizeof(reply));
2139 reply.ret_code = htobe32(LTTNG_OK);
2140 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
2141 if (send_ret < (ssize_t) sizeof(reply)) {
2142 ERR("Failed to send \"streams sent\" command reply (ret = %zd)",
2143 send_ret);
2144 ret = -1;
2145 } else {
2146 /* Success. */
2147 ret = 0;
2148 }
2149
2150 end_no_session:
2151 return ret;
2152 }
2153
2154 /*
2155 * relay_rotate_session_stream: rotate a stream to a new tracefile for the
2156 * session rotation feature (not the tracefile rotation feature).
2157 */
2158 static int relay_rotate_session_streams(
2159 const struct lttcomm_relayd_hdr *recv_hdr,
2160 struct relay_connection *conn,
2161 const struct lttng_buffer_view *payload)
2162 {
2163 int ret = 0;
2164 uint32_t i;
2165 ssize_t send_ret;
2166 enum lttng_error_code reply_code = LTTNG_ERR_UNK;
2167 struct relay_session *session = conn->session;
2168 struct lttcomm_relayd_rotate_streams rotate_streams;
2169 struct lttcomm_relayd_generic_reply reply = {};
2170 struct relay_stream *stream = NULL;
2171 const size_t header_len = sizeof(struct lttcomm_relayd_rotate_streams);
2172 struct lttng_trace_chunk *next_trace_chunk = NULL;
2173 struct lttng_buffer_view stream_positions;
2174 char chunk_id_buf[MAX_INT_DEC_LEN(uint64_t)];
2175 const char *chunk_id_str = "none";
2176
2177 if (!session || !conn->version_check_done) {
2178 ERR("Trying to rotate a stream before version check");
2179 ret = -1;
2180 goto end_no_reply;
2181 }
2182
2183 if (session->major == 2 && session->minor < 11) {
2184 ERR("Unsupported feature before 2.11");
2185 ret = -1;
2186 goto end_no_reply;
2187 }
2188
2189 if (payload->size < header_len) {
2190 ERR("Unexpected payload size in \"relay_rotate_session_stream\": expected >= %zu bytes, got %zu bytes",
2191 header_len, payload->size);
2192 ret = -1;
2193 goto end_no_reply;
2194 }
2195
2196 memcpy(&rotate_streams, payload->data, header_len);
2197
2198 /* Convert header to host endianness. */
2199 rotate_streams = (typeof(rotate_streams)) {
2200 .stream_count = be32toh(rotate_streams.stream_count),
2201 .new_chunk_id = (typeof(rotate_streams.new_chunk_id)) {
2202 .is_set = !!rotate_streams.new_chunk_id.is_set,
2203 .value = be64toh(rotate_streams.new_chunk_id.value),
2204 }
2205 };
2206
2207 if (rotate_streams.new_chunk_id.is_set) {
2208 /*
2209 * Retrieve the trace chunk the stream must transition to. As
2210 * per the protocol, this chunk should have been created
2211 * before this command is received.
2212 */
2213 next_trace_chunk = sessiond_trace_chunk_registry_get_chunk(
2214 sessiond_trace_chunk_registry,
2215 session->sessiond_uuid, session->id,
2216 rotate_streams.new_chunk_id.value);
2217 if (!next_trace_chunk) {
2218 char uuid_str[UUID_STR_LEN];
2219
2220 lttng_uuid_to_str(session->sessiond_uuid, uuid_str);
2221 ERR("Unknown next trace chunk in ROTATE_STREAMS command: sessiond_uuid = {%s}, session_id = %" PRIu64
2222 ", trace_chunk_id = %" PRIu64,
2223 uuid_str, session->id,
2224 rotate_streams.new_chunk_id.value);
2225 reply_code = LTTNG_ERR_INVALID_PROTOCOL;
2226 ret = -1;
2227 goto end;
2228 }
2229
2230 ret = snprintf(chunk_id_buf, sizeof(chunk_id_buf), "%" PRIu64,
2231 rotate_streams.new_chunk_id.value);
2232 if (ret < 0 || ret >= sizeof(chunk_id_buf)) {
2233 chunk_id_str = "formatting error";
2234 } else {
2235 chunk_id_str = chunk_id_buf;
2236 }
2237 session->has_rotated = true;
2238 }
2239
2240 DBG("Rotate %" PRIu32 " streams of session \"%s\" to chunk \"%s\"",
2241 rotate_streams.stream_count, session->session_name,
2242 chunk_id_str);
2243
2244 stream_positions = lttng_buffer_view_from_view(payload,
2245 sizeof(rotate_streams), -1);
2246 if (!stream_positions.data ||
2247 stream_positions.size <
2248 (rotate_streams.stream_count *
2249 sizeof(struct lttcomm_relayd_stream_rotation_position))) {
2250 reply_code = LTTNG_ERR_INVALID_PROTOCOL;
2251 ret = -1;
2252 goto end;
2253 }
2254
2255 for (i = 0; i < rotate_streams.stream_count; i++) {
2256 struct lttcomm_relayd_stream_rotation_position *position_comm =
2257 &((typeof(position_comm)) stream_positions.data)[i];
2258 const struct lttcomm_relayd_stream_rotation_position pos = {
2259 .stream_id = be64toh(position_comm->stream_id),
2260 .rotate_at_seq_num = be64toh(
2261 position_comm->rotate_at_seq_num),
2262 };
2263
2264 stream = stream_get_by_id(pos.stream_id);
2265 if (!stream) {
2266 reply_code = LTTNG_ERR_INVALID;
2267 ret = -1;
2268 goto end;
2269 }
2270
2271 pthread_mutex_lock(&stream->lock);
2272 ret = stream_set_pending_rotation(stream, next_trace_chunk,
2273 pos.rotate_at_seq_num);
2274 pthread_mutex_unlock(&stream->lock);
2275 if (ret) {
2276 reply_code = LTTNG_ERR_FILE_CREATION_ERROR;
2277 goto end;
2278 }
2279
2280 stream_put(stream);
2281 stream = NULL;
2282 }
2283
2284 reply_code = LTTNG_OK;
2285 ret = 0;
2286 end:
2287 if (stream) {
2288 stream_put(stream);
2289 }
2290
2291 reply.ret_code = htobe32((uint32_t) reply_code);
2292 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
2293 sizeof(struct lttcomm_relayd_generic_reply), 0);
2294 if (send_ret < (ssize_t) sizeof(reply)) {
2295 ERR("Failed to send \"rotate session stream\" command reply (ret = %zd)",
2296 send_ret);
2297 ret = -1;
2298 }
2299 end_no_reply:
2300 lttng_trace_chunk_put(next_trace_chunk);
2301 return ret;
2302 }
2303
2304
2305
2306 /*
2307 * relay_create_trace_chunk: create a new trace chunk
2308 */
2309 static int relay_create_trace_chunk(const struct lttcomm_relayd_hdr *recv_hdr,
2310 struct relay_connection *conn,
2311 const struct lttng_buffer_view *payload)
2312 {
2313 int ret = 0;
2314 ssize_t send_ret;
2315 struct relay_session *session = conn->session;
2316 struct lttcomm_relayd_create_trace_chunk *msg;
2317 struct lttcomm_relayd_generic_reply reply = {};
2318 struct lttng_buffer_view header_view;
2319 struct lttng_buffer_view chunk_name_view;
2320 struct lttng_trace_chunk *chunk = NULL, *published_chunk = NULL;
2321 enum lttng_error_code reply_code = LTTNG_OK;
2322 enum lttng_trace_chunk_status chunk_status;
2323 struct lttng_directory_handle session_output;
2324
2325 if (!session || !conn->version_check_done) {
2326 ERR("Trying to create a trace chunk before version check");
2327 ret = -1;
2328 goto end_no_reply;
2329 }
2330
2331 if (session->major == 2 && session->minor < 11) {
2332 ERR("Chunk creation command is unsupported before 2.11");
2333 ret = -1;
2334 goto end_no_reply;
2335 }
2336
2337 header_view = lttng_buffer_view_from_view(payload, 0, sizeof(*msg));
2338 if (!header_view.data) {
2339 ERR("Failed to receive payload of chunk creation command");
2340 ret = -1;
2341 goto end_no_reply;
2342 }
2343
2344 /* Convert to host endianness. */
2345 msg = (typeof(msg)) header_view.data;
2346 msg->chunk_id = be64toh(msg->chunk_id);
2347 msg->creation_timestamp = be64toh(msg->creation_timestamp);
2348 msg->override_name_length = be32toh(msg->override_name_length);
2349
2350 chunk = lttng_trace_chunk_create(
2351 msg->chunk_id, msg->creation_timestamp);
2352 if (!chunk) {
2353 ERR("Failed to create trace chunk in trace chunk creation command");
2354 ret = -1;
2355 reply_code = LTTNG_ERR_NOMEM;
2356 goto end;
2357 }
2358
2359 if (msg->override_name_length) {
2360 const char *name;
2361
2362 chunk_name_view = lttng_buffer_view_from_view(payload,
2363 sizeof(*msg),
2364 msg->override_name_length);
2365 name = chunk_name_view.data;
2366 if (!name || name[msg->override_name_length - 1]) {
2367 ERR("Failed to receive payload of chunk creation command");
2368 ret = -1;
2369 reply_code = LTTNG_ERR_INVALID;
2370 goto end;
2371 }
2372
2373 chunk_status = lttng_trace_chunk_override_name(
2374 chunk, chunk_name_view.data);
2375 switch (chunk_status) {
2376 case LTTNG_TRACE_CHUNK_STATUS_OK:
2377 break;
2378 case LTTNG_TRACE_CHUNK_STATUS_INVALID_ARGUMENT:
2379 ERR("Failed to set the name of new trace chunk in trace chunk creation command (invalid name)");
2380 reply_code = LTTNG_ERR_INVALID;
2381 ret = -1;
2382 goto end;
2383 default:
2384 ERR("Failed to set the name of new trace chunk in trace chunk creation command (unknown error)");
2385 reply_code = LTTNG_ERR_UNK;
2386 ret = -1;
2387 goto end;
2388 }
2389 }
2390
2391 chunk_status = lttng_trace_chunk_set_credentials_current_user(chunk);
2392 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
2393 reply_code = LTTNG_ERR_UNK;
2394 ret = -1;
2395 goto end;
2396 }
2397
2398 ret = session_init_output_directory_handle(
2399 conn->session, &session_output);
2400 if (ret) {
2401 reply_code = LTTNG_ERR_CREATE_DIR_FAIL;
2402 goto end;
2403 }
2404 chunk_status = lttng_trace_chunk_set_as_owner(chunk, &session_output);
2405 lttng_directory_handle_fini(&session_output);
2406 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
2407 reply_code = LTTNG_ERR_UNK;
2408 ret = -1;
2409 goto end;
2410 }
2411
2412 published_chunk = sessiond_trace_chunk_registry_publish_chunk(
2413 sessiond_trace_chunk_registry,
2414 conn->session->sessiond_uuid,
2415 conn->session->id,
2416 chunk);
2417 if (!published_chunk) {
2418 char uuid_str[UUID_STR_LEN];
2419
2420 lttng_uuid_to_str(conn->session->sessiond_uuid, uuid_str);
2421 ERR("Failed to publish chunk: sessiond_uuid = %s, session_id = %" PRIu64 ", chunk_id = %" PRIu64,
2422 uuid_str,
2423 conn->session->id,
2424 msg->chunk_id);
2425 ret = -1;
2426 reply_code = LTTNG_ERR_NOMEM;
2427 goto end;
2428 }
2429
2430 pthread_mutex_lock(&conn->session->lock);
2431 if (conn->session->pending_closure_trace_chunk) {
2432 /*
2433 * Invalid; this means a second create_trace_chunk command was
2434 * received before a close_trace_chunk.
2435 */
2436 ERR("Invalid trace chunk close command received; a trace chunk is already waiting for a trace chunk close command");
2437 reply_code = LTTNG_ERR_INVALID_PROTOCOL;
2438 ret = -1;
2439 goto end_unlock_session;
2440 }
2441 conn->session->pending_closure_trace_chunk =
2442 conn->session->current_trace_chunk;
2443 conn->session->current_trace_chunk = published_chunk;
2444 published_chunk = NULL;
2445 end_unlock_session:
2446 pthread_mutex_unlock(&conn->session->lock);
2447 end:
2448 reply.ret_code = htobe32((uint32_t) reply_code);
2449 send_ret = conn->sock->ops->sendmsg(conn->sock,
2450 &reply,
2451 sizeof(struct lttcomm_relayd_generic_reply),
2452 0);
2453 if (send_ret < (ssize_t) sizeof(reply)) {
2454 ERR("Failed to send \"create trace chunk\" command reply (ret = %zd)",
2455 send_ret);
2456 ret = -1;
2457 }
2458 end_no_reply:
2459 lttng_trace_chunk_put(chunk);
2460 lttng_trace_chunk_put(published_chunk);
2461 return ret;
2462 }
2463
2464 /*
2465 * relay_close_trace_chunk: close a trace chunk
2466 */
2467 static int relay_close_trace_chunk(const struct lttcomm_relayd_hdr *recv_hdr,
2468 struct relay_connection *conn,
2469 const struct lttng_buffer_view *payload)
2470 {
2471 int ret = 0, buf_ret;
2472 ssize_t send_ret;
2473 struct relay_session *session = conn->session;
2474 struct lttcomm_relayd_close_trace_chunk *msg;
2475 struct lttcomm_relayd_close_trace_chunk_reply reply = {};
2476 struct lttng_buffer_view header_view;
2477 struct lttng_trace_chunk *chunk = NULL;
2478 enum lttng_error_code reply_code = LTTNG_OK;
2479 enum lttng_trace_chunk_status chunk_status;
2480 uint64_t chunk_id;
2481 LTTNG_OPTIONAL(enum lttng_trace_chunk_command_type) close_command = {};
2482 time_t close_timestamp;
2483 char closed_trace_chunk_path[LTTNG_PATH_MAX];
2484 size_t path_length = 0;
2485 const char *chunk_name = NULL;
2486 struct lttng_dynamic_buffer reply_payload;
2487
2488 lttng_dynamic_buffer_init(&reply_payload);
2489
2490 if (!session || !conn->version_check_done) {
2491 ERR("Trying to close a trace chunk before version check");
2492 ret = -1;
2493 goto end_no_reply;
2494 }
2495
2496 if (session->major == 2 && session->minor < 11) {
2497 ERR("Chunk close command is unsupported before 2.11");
2498 ret = -1;
2499 goto end_no_reply;
2500 }
2501
2502 header_view = lttng_buffer_view_from_view(payload, 0, sizeof(*msg));
2503 if (!header_view.data) {
2504 ERR("Failed to receive payload of chunk close command");
2505 ret = -1;
2506 goto end_no_reply;
2507 }
2508
2509 /* Convert to host endianness. */
2510 msg = (typeof(msg)) header_view.data;
2511 chunk_id = be64toh(msg->chunk_id);
2512 close_timestamp = (time_t) be64toh(msg->close_timestamp);
2513 close_command = (typeof(close_command)){
2514 .value = be32toh(msg->close_command.value),
2515 .is_set = msg->close_command.is_set,
2516 };
2517
2518 chunk = sessiond_trace_chunk_registry_get_chunk(
2519 sessiond_trace_chunk_registry,
2520 conn->session->sessiond_uuid,
2521 conn->session->id,
2522 chunk_id);
2523 if (!chunk) {
2524 char uuid_str[UUID_STR_LEN];
2525
2526 lttng_uuid_to_str(conn->session->sessiond_uuid, uuid_str);
2527 ERR("Failed to find chunk to close: sessiond_uuid = %s, session_id = %" PRIu64 ", chunk_id = %" PRIu64,
2528 uuid_str,
2529 conn->session->id,
2530 msg->chunk_id);
2531 ret = -1;
2532 reply_code = LTTNG_ERR_NOMEM;
2533 goto end;
2534 }
2535
2536 pthread_mutex_lock(&session->lock);
2537 if (session->pending_closure_trace_chunk &&
2538 session->pending_closure_trace_chunk != chunk) {
2539 ERR("Trace chunk close command for session \"%s\" does not target the trace chunk pending closure",
2540 session->session_name);
2541 reply_code = LTTNG_ERR_INVALID_PROTOCOL;
2542 ret = -1;
2543 goto end_unlock_session;
2544 }
2545
2546 chunk_status = lttng_trace_chunk_set_close_timestamp(
2547 chunk, close_timestamp);
2548 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
2549 ERR("Failed to set trace chunk close timestamp");
2550 ret = -1;
2551 reply_code = LTTNG_ERR_UNK;
2552 goto end_unlock_session;
2553 }
2554
2555 if (close_command.is_set) {
2556 chunk_status = lttng_trace_chunk_set_close_command(
2557 chunk, close_command.value);
2558 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
2559 ret = -1;
2560 reply_code = LTTNG_ERR_INVALID;
2561 goto end_unlock_session;
2562 }
2563 }
2564 chunk_status = lttng_trace_chunk_get_name(chunk, &chunk_name, NULL);
2565 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
2566 ERR("Failed to get chunk name");
2567 ret = -1;
2568 reply_code = LTTNG_ERR_UNK;
2569 goto end_unlock_session;
2570 }
2571 if (!session->has_rotated && !session->snapshot) {
2572 ret = lttng_strncpy(closed_trace_chunk_path,
2573 session->output_path,
2574 sizeof(closed_trace_chunk_path));
2575 if (ret) {
2576 ERR("Failed to send trace chunk path: path length of %zu bytes exceeds the maximal allowed length of %zu bytes",
2577 strlen(session->output_path),
2578 sizeof(closed_trace_chunk_path));
2579 reply_code = LTTNG_ERR_NOMEM;
2580 ret = -1;
2581 goto end_unlock_session;
2582 }
2583 } else {
2584 if (session->snapshot) {
2585 ret = snprintf(closed_trace_chunk_path,
2586 sizeof(closed_trace_chunk_path),
2587 "%s/%s", session->output_path,
2588 chunk_name);
2589 } else {
2590 ret = snprintf(closed_trace_chunk_path,
2591 sizeof(closed_trace_chunk_path),
2592 "%s/" DEFAULT_ARCHIVED_TRACE_CHUNKS_DIRECTORY
2593 "/%s",
2594 session->output_path, chunk_name);
2595 }
2596 if (ret < 0 || ret == sizeof(closed_trace_chunk_path)) {
2597 ERR("Failed to format closed trace chunk resulting path");
2598 reply_code = ret < 0 ? LTTNG_ERR_UNK : LTTNG_ERR_NOMEM;
2599 ret = -1;
2600 goto end_unlock_session;
2601 }
2602 }
2603 DBG("Reply chunk path on close: %s", closed_trace_chunk_path);
2604 path_length = strlen(closed_trace_chunk_path) + 1;
2605 if (path_length > UINT32_MAX) {
2606 ERR("Closed trace chunk path exceeds the maximal length allowed by the protocol");
2607 ret = -1;
2608 reply_code = LTTNG_ERR_INVALID_PROTOCOL;
2609 goto end_unlock_session;
2610 }
2611
2612 if (session->current_trace_chunk == chunk) {
2613 /*
2614 * After a trace chunk close command, no new streams
2615 * referencing the chunk may be created. Hence, on the
2616 * event that no new trace chunk have been created for
2617 * the session, the reference to the current trace chunk
2618 * is released in order to allow it to be reclaimed when
2619 * the last stream releases its reference to it.
2620 */
2621 lttng_trace_chunk_put(session->current_trace_chunk);
2622 session->current_trace_chunk = NULL;
2623 }
2624 lttng_trace_chunk_put(session->pending_closure_trace_chunk);
2625 session->pending_closure_trace_chunk = NULL;
2626 end_unlock_session:
2627 pthread_mutex_unlock(&session->lock);
2628
2629 end:
2630 reply.generic.ret_code = htobe32((uint32_t) reply_code);
2631 reply.path_length = htobe32((uint32_t) path_length);
2632 buf_ret = lttng_dynamic_buffer_append(
2633 &reply_payload, &reply, sizeof(reply));
2634 if (buf_ret) {
2635 ERR("Failed to append \"close trace chunk\" command reply header to payload buffer");
2636 goto end_no_reply;
2637 }
2638
2639 if (reply_code == LTTNG_OK) {
2640 buf_ret = lttng_dynamic_buffer_append(&reply_payload,
2641 closed_trace_chunk_path, path_length);
2642 if (buf_ret) {
2643 ERR("Failed to append \"close trace chunk\" command reply path to payload buffer");
2644 goto end_no_reply;
2645 }
2646 }
2647
2648 send_ret = conn->sock->ops->sendmsg(conn->sock,
2649 reply_payload.data,
2650 reply_payload.size,
2651 0);
2652 if (send_ret < reply_payload.size) {
2653 ERR("Failed to send \"close trace chunk\" command reply of %zu bytes (ret = %zd)",
2654 reply_payload.size, send_ret);
2655 ret = -1;
2656 goto end_no_reply;
2657 }
2658 end_no_reply:
2659 lttng_trace_chunk_put(chunk);
2660 lttng_dynamic_buffer_reset(&reply_payload);
2661 return ret;
2662 }
2663
2664 /*
2665 * relay_trace_chunk_exists: check if a trace chunk exists
2666 */
2667 static int relay_trace_chunk_exists(const struct lttcomm_relayd_hdr *recv_hdr,
2668 struct relay_connection *conn,
2669 const struct lttng_buffer_view *payload)
2670 {
2671 int ret = 0;
2672 ssize_t send_ret;
2673 struct relay_session *session = conn->session;
2674 struct lttcomm_relayd_trace_chunk_exists *msg;
2675 struct lttcomm_relayd_trace_chunk_exists_reply reply = {};
2676 struct lttng_buffer_view header_view;
2677 uint64_t chunk_id;
2678 bool chunk_exists;
2679
2680 if (!session || !conn->version_check_done) {
2681 ERR("Trying to close a trace chunk before version check");
2682 ret = -1;
2683 goto end_no_reply;
2684 }
2685
2686 if (session->major == 2 && session->minor < 11) {
2687 ERR("Chunk close command is unsupported before 2.11");
2688 ret = -1;
2689 goto end_no_reply;
2690 }
2691
2692 header_view = lttng_buffer_view_from_view(payload, 0, sizeof(*msg));
2693 if (!header_view.data) {
2694 ERR("Failed to receive payload of chunk close command");
2695 ret = -1;
2696 goto end_no_reply;
2697 }
2698
2699 /* Convert to host endianness. */
2700 msg = (typeof(msg)) header_view.data;
2701 chunk_id = be64toh(msg->chunk_id);
2702
2703 ret = sessiond_trace_chunk_registry_chunk_exists(
2704 sessiond_trace_chunk_registry,
2705 conn->session->sessiond_uuid,
2706 conn->session->id,
2707 chunk_id, &chunk_exists);
2708 /*
2709 * If ret is not 0, send the reply and report the error to the caller.
2710 * It is a protocol (or internal) error and the session/connection
2711 * should be torn down.
2712 */
2713 reply = (typeof(reply)){
2714 .generic.ret_code = htobe32((uint32_t)
2715 (ret == 0 ? LTTNG_OK : LTTNG_ERR_INVALID_PROTOCOL)),
2716 .trace_chunk_exists = ret == 0 ? chunk_exists : 0,
2717 };
2718 send_ret = conn->sock->ops->sendmsg(
2719 conn->sock, &reply, sizeof(reply), 0);
2720 if (send_ret < (ssize_t) sizeof(reply)) {
2721 ERR("Failed to send \"create trace chunk\" command reply (ret = %zd)",
2722 send_ret);
2723 ret = -1;
2724 }
2725 end_no_reply:
2726 return ret;
2727 }
2728
2729 #define DBG_CMD(cmd_name, conn) \
2730 DBG3("Processing \"%s\" command for socket %i", cmd_name, conn->sock->fd);
2731
2732 static int relay_process_control_command(struct relay_connection *conn,
2733 const struct lttcomm_relayd_hdr *header,
2734 const struct lttng_buffer_view *payload)
2735 {
2736 int ret = 0;
2737
2738 switch (header->cmd) {
2739 case RELAYD_CREATE_SESSION:
2740 DBG_CMD("RELAYD_CREATE_SESSION", conn);
2741 ret = relay_create_session(header, conn, payload);
2742 break;
2743 case RELAYD_ADD_STREAM:
2744 DBG_CMD("RELAYD_ADD_STREAM", conn);
2745 ret = relay_add_stream(header, conn, payload);
2746 break;
2747 case RELAYD_START_DATA:
2748 DBG_CMD("RELAYD_START_DATA", conn);
2749 ret = relay_start(header, conn, payload);
2750 break;
2751 case RELAYD_SEND_METADATA:
2752 DBG_CMD("RELAYD_SEND_METADATA", conn);
2753 ret = relay_recv_metadata(header, conn, payload);
2754 break;
2755 case RELAYD_VERSION:
2756 DBG_CMD("RELAYD_VERSION", conn);
2757 ret = relay_send_version(header, conn, payload);
2758 break;
2759 case RELAYD_CLOSE_STREAM:
2760 DBG_CMD("RELAYD_CLOSE_STREAM", conn);
2761 ret = relay_close_stream(header, conn, payload);
2762 break;
2763 case RELAYD_DATA_PENDING:
2764 DBG_CMD("RELAYD_DATA_PENDING", conn);
2765 ret = relay_data_pending(header, conn, payload);
2766 break;
2767 case RELAYD_QUIESCENT_CONTROL:
2768 DBG_CMD("RELAYD_QUIESCENT_CONTROL", conn);
2769 ret = relay_quiescent_control(header, conn, payload);
2770 break;
2771 case RELAYD_BEGIN_DATA_PENDING:
2772 DBG_CMD("RELAYD_BEGIN_DATA_PENDING", conn);
2773 ret = relay_begin_data_pending(header, conn, payload);
2774 break;
2775 case RELAYD_END_DATA_PENDING:
2776 DBG_CMD("RELAYD_END_DATA_PENDING", conn);
2777 ret = relay_end_data_pending(header, conn, payload);
2778 break;
2779 case RELAYD_SEND_INDEX:
2780 DBG_CMD("RELAYD_SEND_INDEX", conn);
2781 ret = relay_recv_index(header, conn, payload);
2782 break;
2783 case RELAYD_STREAMS_SENT:
2784 DBG_CMD("RELAYD_STREAMS_SENT", conn);
2785 ret = relay_streams_sent(header, conn, payload);
2786 break;
2787 case RELAYD_RESET_METADATA:
2788 DBG_CMD("RELAYD_RESET_METADATA", conn);
2789 ret = relay_reset_metadata(header, conn, payload);
2790 break;
2791 case RELAYD_ROTATE_STREAMS:
2792 DBG_CMD("RELAYD_ROTATE_STREAMS", conn);
2793 ret = relay_rotate_session_streams(header, conn, payload);
2794 break;
2795 case RELAYD_CREATE_TRACE_CHUNK:
2796 DBG_CMD("RELAYD_CREATE_TRACE_CHUNK", conn);
2797 ret = relay_create_trace_chunk(header, conn, payload);
2798 break;
2799 case RELAYD_CLOSE_TRACE_CHUNK:
2800 DBG_CMD("RELAYD_CLOSE_TRACE_CHUNK", conn);
2801 ret = relay_close_trace_chunk(header, conn, payload);
2802 break;
2803 case RELAYD_TRACE_CHUNK_EXISTS:
2804 DBG_CMD("RELAYD_TRACE_CHUNK_EXISTS", conn);
2805 ret = relay_trace_chunk_exists(header, conn, payload);
2806 break;
2807 case RELAYD_UPDATE_SYNC_INFO:
2808 default:
2809 ERR("Received unknown command (%u)", header->cmd);
2810 relay_unknown_command(conn);
2811 ret = -1;
2812 goto end;
2813 }
2814
2815 end:
2816 return ret;
2817 }
2818
2819 static enum relay_connection_status relay_process_control_receive_payload(
2820 struct relay_connection *conn)
2821 {
2822 int ret = 0;
2823 enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK;
2824 struct lttng_dynamic_buffer *reception_buffer =
2825 &conn->protocol.ctrl.reception_buffer;
2826 struct ctrl_connection_state_receive_payload *state =
2827 &conn->protocol.ctrl.state.receive_payload;
2828 struct lttng_buffer_view payload_view;
2829
2830 if (state->left_to_receive == 0) {
2831 /* Short-circuit for payload-less commands. */
2832 goto reception_complete;
2833 }
2834
2835 ret = conn->sock->ops->recvmsg(conn->sock,
2836 reception_buffer->data + state->received,
2837 state->left_to_receive, MSG_DONTWAIT);
2838 if (ret < 0) {
2839 if (errno != EAGAIN && errno != EWOULDBLOCK) {
2840 PERROR("Unable to receive command payload on sock %d",
2841 conn->sock->fd);
2842 status = RELAY_CONNECTION_STATUS_ERROR;
2843 }
2844 goto end;
2845 } else if (ret == 0) {
2846 DBG("Socket %d performed an orderly shutdown (received EOF)", conn->sock->fd);
2847 status = RELAY_CONNECTION_STATUS_CLOSED;
2848 goto end;
2849 }
2850
2851 assert(ret > 0);
2852 assert(ret <= state->left_to_receive);
2853
2854 state->left_to_receive -= ret;
2855 state->received += ret;
2856
2857 if (state->left_to_receive > 0) {
2858 /*
2859 * Can't transition to the protocol's next state, wait to
2860 * receive the rest of the header.
2861 */
2862 DBG3("Partial reception of control connection protocol payload (received %" PRIu64 " bytes, %" PRIu64 " bytes left to receive, fd = %i)",
2863 state->received, state->left_to_receive,
2864 conn->sock->fd);
2865 goto end;
2866 }
2867
2868 reception_complete:
2869 DBG("Done receiving control command payload: fd = %i, payload size = %" PRIu64 " bytes",
2870 conn->sock->fd, state->received);
2871 /*
2872 * The payload required to process the command has been received.
2873 * A view to the reception buffer is forwarded to the various
2874 * commands and the state of the control is reset on success.
2875 *
2876 * Commands are responsible for sending their reply to the peer.
2877 */
2878 payload_view = lttng_buffer_view_from_dynamic_buffer(reception_buffer,
2879 0, -1);
2880 ret = relay_process_control_command(conn,
2881 &state->header, &payload_view);
2882 if (ret < 0) {
2883 status = RELAY_CONNECTION_STATUS_ERROR;
2884 goto end;
2885 }
2886
2887 ret = connection_reset_protocol_state(conn);
2888 if (ret) {
2889 status = RELAY_CONNECTION_STATUS_ERROR;
2890 }
2891 end:
2892 return status;
2893 }
2894
2895 static enum relay_connection_status relay_process_control_receive_header(
2896 struct relay_connection *conn)
2897 {
2898 int ret = 0;
2899 enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK;
2900 struct lttcomm_relayd_hdr header;
2901 struct lttng_dynamic_buffer *reception_buffer =
2902 &conn->protocol.ctrl.reception_buffer;
2903 struct ctrl_connection_state_receive_header *state =
2904 &conn->protocol.ctrl.state.receive_header;
2905
2906 assert(state->left_to_receive != 0);
2907
2908 ret = conn->sock->ops->recvmsg(conn->sock,
2909 reception_buffer->data + state->received,
2910 state->left_to_receive, MSG_DONTWAIT);
2911 if (ret < 0) {
2912 if (errno != EAGAIN && errno != EWOULDBLOCK) {
2913 PERROR("Unable to receive control command header on sock %d",
2914 conn->sock->fd);
2915 status = RELAY_CONNECTION_STATUS_ERROR;
2916 }
2917 goto end;
2918 } else if (ret == 0) {
2919 DBG("Socket %d performed an orderly shutdown (received EOF)", conn->sock->fd);
2920 status = RELAY_CONNECTION_STATUS_CLOSED;
2921 goto end;
2922 }
2923
2924 assert(ret > 0);
2925 assert(ret <= state->left_to_receive);
2926
2927 state->left_to_receive -= ret;
2928 state->received += ret;
2929
2930 if (state->left_to_receive > 0) {
2931 /*
2932 * Can't transition to the protocol's next state, wait to
2933 * receive the rest of the header.
2934 */
2935 DBG3("Partial reception of control connection protocol header (received %" PRIu64 " bytes, %" PRIu64 " bytes left to receive, fd = %i)",
2936 state->received, state->left_to_receive,
2937 conn->sock->fd);
2938 goto end;
2939 }
2940
2941 /* Transition to next state: receiving the command's payload. */
2942 conn->protocol.ctrl.state_id =
2943 CTRL_CONNECTION_STATE_RECEIVE_PAYLOAD;
2944 memcpy(&header, reception_buffer->data, sizeof(header));
2945 header.circuit_id = be64toh(header.circuit_id);
2946 header.data_size = be64toh(header.data_size);
2947 header.cmd = be32toh(header.cmd);
2948 header.cmd_version = be32toh(header.cmd_version);
2949 memcpy(&conn->protocol.ctrl.state.receive_payload.header,
2950 &header, sizeof(header));
2951
2952 DBG("Done receiving control command header: fd = %i, cmd = %" PRIu32 ", cmd_version = %" PRIu32 ", payload size = %" PRIu64 " bytes",
2953 conn->sock->fd, header.cmd, header.cmd_version,
2954 header.data_size);
2955
2956 if (header.data_size > DEFAULT_NETWORK_RELAYD_CTRL_MAX_PAYLOAD_SIZE) {
2957 ERR("Command header indicates a payload (%" PRIu64 " bytes) that exceeds the maximal payload size allowed on a control connection.",
2958 header.data_size);
2959 status = RELAY_CONNECTION_STATUS_ERROR;
2960 goto end;
2961 }
2962
2963 conn->protocol.ctrl.state.receive_payload.left_to_receive =
2964 header.data_size;
2965 conn->protocol.ctrl.state.receive_payload.received = 0;
2966 ret = lttng_dynamic_buffer_set_size(reception_buffer,
2967 header.data_size);
2968 if (ret) {
2969 status = RELAY_CONNECTION_STATUS_ERROR;
2970 goto end;
2971 }
2972
2973 if (header.data_size == 0) {
2974 /*
2975 * Manually invoke the next state as the poll loop
2976 * will not wake-up to allow us to proceed further.
2977 */
2978 status = relay_process_control_receive_payload(conn);
2979 }
2980 end:
2981 return status;
2982 }
2983
2984 /*
2985 * Process the commands received on the control socket
2986 */
2987 static enum relay_connection_status relay_process_control(
2988 struct relay_connection *conn)
2989 {
2990 enum relay_connection_status status;
2991
2992 switch (conn->protocol.ctrl.state_id) {
2993 case CTRL_CONNECTION_STATE_RECEIVE_HEADER:
2994 status = relay_process_control_receive_header(conn);
2995 break;
2996 case CTRL_CONNECTION_STATE_RECEIVE_PAYLOAD:
2997 status = relay_process_control_receive_payload(conn);
2998 break;
2999 default:
3000 ERR("Unknown control connection protocol state encountered.");
3001 abort();
3002 }
3003
3004 return status;
3005 }
3006
3007 static enum relay_connection_status relay_process_data_receive_header(
3008 struct relay_connection *conn)
3009 {
3010 int ret;
3011 enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK;
3012 struct data_connection_state_receive_header *state =
3013 &conn->protocol.data.state.receive_header;
3014 struct lttcomm_relayd_data_hdr header;
3015 struct relay_stream *stream;
3016
3017 assert(state->left_to_receive != 0);
3018
3019 ret = conn->sock->ops->recvmsg(conn->sock,
3020 state->header_reception_buffer + state->received,
3021 state->left_to_receive, MSG_DONTWAIT);
3022 if (ret < 0) {
3023 if (errno != EAGAIN && errno != EWOULDBLOCK) {
3024 PERROR("Unable to receive data header on sock %d", conn->sock->fd);
3025 status = RELAY_CONNECTION_STATUS_ERROR;
3026 }
3027 goto end;
3028 } else if (ret == 0) {
3029 /* Orderly shutdown. Not necessary to print an error. */
3030 DBG("Socket %d performed an orderly shutdown (received EOF)", conn->sock->fd);
3031 status = RELAY_CONNECTION_STATUS_CLOSED;
3032 goto end;
3033 }
3034
3035 assert(ret > 0);
3036 assert(ret <= state->left_to_receive);
3037
3038 state->left_to_receive -= ret;
3039 state->received += ret;
3040
3041 if (state->left_to_receive > 0) {
3042 /*
3043 * Can't transition to the protocol's next state, wait to
3044 * receive the rest of the header.
3045 */
3046 DBG3("Partial reception of data connection header (received %" PRIu64 " bytes, %" PRIu64 " bytes left to receive, fd = %i)",
3047 state->received, state->left_to_receive,
3048 conn->sock->fd);
3049 goto end;
3050 }
3051
3052 /* Transition to next state: receiving the payload. */
3053 conn->protocol.data.state_id = DATA_CONNECTION_STATE_RECEIVE_PAYLOAD;
3054
3055 memcpy(&header, state->header_reception_buffer, sizeof(header));
3056 header.circuit_id = be64toh(header.circuit_id);
3057 header.stream_id = be64toh(header.stream_id);
3058 header.data_size = be32toh(header.data_size);
3059 header.net_seq_num = be64toh(header.net_seq_num);
3060 header.padding_size = be32toh(header.padding_size);
3061 memcpy(&conn->protocol.data.state.receive_payload.header, &header, sizeof(header));
3062
3063 conn->protocol.data.state.receive_payload.left_to_receive =
3064 header.data_size;
3065 conn->protocol.data.state.receive_payload.received = 0;
3066 conn->protocol.data.state.receive_payload.rotate_index = false;
3067
3068 DBG("Received data connection header on fd %i: circuit_id = %" PRIu64 ", stream_id = %" PRIu64 ", data_size = %" PRIu32 ", net_seq_num = %" PRIu64 ", padding_size = %" PRIu32,
3069 conn->sock->fd, header.circuit_id,
3070 header.stream_id, header.data_size,
3071 header.net_seq_num, header.padding_size);
3072
3073 stream = stream_get_by_id(header.stream_id);
3074 if (!stream) {
3075 DBG("relay_process_data_receive_payload: Cannot find stream %" PRIu64,
3076 header.stream_id);
3077 /* Protocol error. */
3078 status = RELAY_CONNECTION_STATUS_ERROR;
3079 goto end;
3080 }
3081
3082 pthread_mutex_lock(&stream->lock);
3083 /* Prepare stream for the reception of a new packet. */
3084 ret = stream_init_packet(stream, header.data_size,
3085 &conn->protocol.data.state.receive_payload.rotate_index);
3086 pthread_mutex_unlock(&stream->lock);
3087 if (ret) {
3088 ERR("Failed to rotate stream output file");
3089 status = RELAY_CONNECTION_STATUS_ERROR;
3090 goto end_stream_unlock;
3091 }
3092
3093 end_stream_unlock:
3094 stream_put(stream);
3095 end:
3096 return status;
3097 }
3098
3099 static enum relay_connection_status relay_process_data_receive_payload(
3100 struct relay_connection *conn)
3101 {
3102 int ret;
3103 enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK;
3104 struct relay_stream *stream;
3105 struct data_connection_state_receive_payload *state =
3106 &conn->protocol.data.state.receive_payload;
3107 const size_t chunk_size = RECV_DATA_BUFFER_SIZE;
3108 char data_buffer[chunk_size];
3109 bool partial_recv = false;
3110 bool new_stream = false, close_requested = false, index_flushed = false;
3111 uint64_t left_to_receive = state->left_to_receive;
3112 struct relay_session *session;
3113
3114 DBG3("Receiving data for stream id %" PRIu64 " seqnum %" PRIu64 ", %" PRIu64" bytes received, %" PRIu64 " bytes left to receive",
3115 state->header.stream_id, state->header.net_seq_num,
3116 state->received, left_to_receive);
3117
3118 stream = stream_get_by_id(state->header.stream_id);
3119 if (!stream) {
3120 /* Protocol error. */
3121 ERR("relay_process_data_receive_payload: cannot find stream %" PRIu64,
3122 state->header.stream_id);
3123 status = RELAY_CONNECTION_STATUS_ERROR;
3124 goto end;
3125 }
3126
3127 pthread_mutex_lock(&stream->lock);
3128 session = stream->trace->session;
3129 if (!conn->session) {
3130 ret = connection_set_session(conn, session);
3131 if (ret) {
3132 status = RELAY_CONNECTION_STATUS_ERROR;
3133 goto end_stream_unlock;
3134 }
3135 }
3136
3137 /*
3138 * The size of the "chunk" received on any iteration is bounded by:
3139 * - the data left to receive,
3140 * - the data immediately available on the socket,
3141 * - the on-stack data buffer
3142 */
3143 while (left_to_receive > 0 && !partial_recv) {
3144 size_t recv_size = min(left_to_receive, chunk_size);
3145 struct lttng_buffer_view packet_chunk;
3146
3147 ret = conn->sock->ops->recvmsg(conn->sock, data_buffer,
3148 recv_size, MSG_DONTWAIT);
3149 if (ret < 0) {
3150 if (errno != EAGAIN && errno != EWOULDBLOCK) {
3151 PERROR("Socket %d error", conn->sock->fd);
3152 status = RELAY_CONNECTION_STATUS_ERROR;
3153 }
3154 goto end_stream_unlock;
3155 } else if (ret == 0) {
3156 /* No more data ready to be consumed on socket. */
3157 DBG3("No more data ready for consumption on data socket of stream id %" PRIu64,
3158 state->header.stream_id);
3159 status = RELAY_CONNECTION_STATUS_CLOSED;
3160 break;
3161 } else if (ret < (int) recv_size) {
3162 /*
3163 * All the data available on the socket has been
3164 * consumed.
3165 */
3166 partial_recv = true;
3167 recv_size = ret;
3168 }
3169
3170 packet_chunk = lttng_buffer_view_init(data_buffer,
3171 0, recv_size);
3172 assert(packet_chunk.data);
3173
3174 ret = stream_write(stream, &packet_chunk, 0);
3175 if (ret) {
3176 ERR("Relay error writing data to file");
3177 status = RELAY_CONNECTION_STATUS_ERROR;
3178 goto end_stream_unlock;
3179 }
3180
3181 left_to_receive -= recv_size;
3182 state->received += recv_size;
3183 state->left_to_receive = left_to_receive;
3184 }
3185
3186 if (state->left_to_receive > 0) {
3187 /*
3188 * Did not receive all the data expected, wait for more data to
3189 * become available on the socket.
3190 */
3191 DBG3("Partial receive on data connection of stream id %" PRIu64 ", %" PRIu64 " bytes received, %" PRIu64 " bytes left to receive",
3192 state->header.stream_id, state->received,
3193 state->left_to_receive);
3194 goto end_stream_unlock;
3195 }
3196
3197 ret = stream_write(stream, NULL, state->header.padding_size);
3198 if (ret) {
3199 status = RELAY_CONNECTION_STATUS_ERROR;
3200 goto end_stream_unlock;
3201 }
3202
3203 if (session_streams_have_index(session)) {
3204 ret = stream_update_index(stream, state->header.net_seq_num,
3205 state->rotate_index, &index_flushed,
3206 state->header.data_size + state->header.padding_size);
3207 if (ret < 0) {
3208 ERR("Failed to update index: stream %" PRIu64 " net_seq_num %" PRIu64 " ret %d",
3209 stream->stream_handle,
3210 state->header.net_seq_num, ret);
3211 status = RELAY_CONNECTION_STATUS_ERROR;
3212 goto end_stream_unlock;
3213 }
3214 }
3215
3216 if (stream->prev_data_seq == -1ULL) {
3217 new_stream = true;
3218 }
3219
3220 ret = stream_complete_packet(stream, state->header.data_size +
3221 state->header.padding_size, state->header.net_seq_num,
3222 index_flushed);
3223 if (ret) {
3224 status = RELAY_CONNECTION_STATUS_ERROR;
3225 goto end_stream_unlock;
3226 }
3227
3228 /*
3229 * Resetting the protocol state (to RECEIVE_HEADER) will trash the
3230 * contents of *state which are aliased (union) to the same location as
3231 * the new state. Don't use it beyond this point.
3232 */
3233 connection_reset_protocol_state(conn);
3234 state = NULL;
3235
3236 end_stream_unlock:
3237 close_requested = stream->close_requested;
3238 pthread_mutex_unlock(&stream->lock);
3239 if (close_requested && left_to_receive == 0) {
3240 try_stream_close(stream);
3241 }
3242
3243 if (new_stream) {
3244 pthread_mutex_lock(&session->lock);
3245 uatomic_set(&session->new_streams, 1);
3246 pthread_mutex_unlock(&session->lock);
3247 }
3248
3249 stream_put(stream);
3250 end:
3251 return status;
3252 }
3253
3254 /*
3255 * relay_process_data: Process the data received on the data socket
3256 */
3257 static enum relay_connection_status relay_process_data(
3258 struct relay_connection *conn)
3259 {
3260 enum relay_connection_status status;
3261
3262 switch (conn->protocol.data.state_id) {
3263 case DATA_CONNECTION_STATE_RECEIVE_HEADER:
3264 status = relay_process_data_receive_header(conn);
3265 break;
3266 case DATA_CONNECTION_STATE_RECEIVE_PAYLOAD:
3267 status = relay_process_data_receive_payload(conn);
3268 break;
3269 default:
3270 ERR("Unexpected data connection communication state.");
3271 abort();
3272 }
3273
3274 return status;
3275 }
3276
3277 static void cleanup_connection_pollfd(struct lttng_poll_event *events, int pollfd)
3278 {
3279 int ret;
3280
3281 (void) lttng_poll_del(events, pollfd);
3282
3283 ret = close(pollfd);
3284 if (ret < 0) {
3285 ERR("Closing pollfd %d", pollfd);
3286 }
3287 }
3288
3289 static void relay_thread_close_connection(struct lttng_poll_event *events,
3290 int pollfd, struct relay_connection *conn)
3291 {
3292 const char *type_str;
3293
3294 switch (conn->type) {
3295 case RELAY_DATA:
3296 type_str = "Data";
3297 break;
3298 case RELAY_CONTROL:
3299 type_str = "Control";
3300 break;
3301 case RELAY_VIEWER_COMMAND:
3302 type_str = "Viewer Command";
3303 break;
3304 case RELAY_VIEWER_NOTIFICATION:
3305 type_str = "Viewer Notification";
3306 break;
3307 default:
3308 type_str = "Unknown";
3309 }
3310 cleanup_connection_pollfd(events, pollfd);
3311 connection_put(conn);
3312 DBG("%s connection closed with %d", type_str, pollfd);
3313 }
3314
3315 /*
3316 * This thread does the actual work
3317 */
3318 static void *relay_thread_worker(void *data)
3319 {
3320 int ret, err = -1, last_seen_data_fd = -1;
3321 uint32_t nb_fd;
3322 struct lttng_poll_event events;
3323 struct lttng_ht *relay_connections_ht;
3324 struct lttng_ht_iter iter;
3325 struct relay_connection *destroy_conn = NULL;
3326
3327 DBG("[thread] Relay worker started");
3328
3329 rcu_register_thread();
3330
3331 health_register(health_relayd, HEALTH_RELAYD_TYPE_WORKER);
3332
3333 if (testpoint(relayd_thread_worker)) {
3334 goto error_testpoint;
3335 }
3336
3337 health_code_update();
3338
3339 /* table of connections indexed on socket */
3340 relay_connections_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3341 if (!relay_connections_ht) {
3342 goto relay_connections_ht_error;
3343 }
3344
3345 ret = create_thread_poll_set(&events, 2);
3346 if (ret < 0) {
3347 goto error_poll_create;
3348 }
3349
3350 ret = lttng_poll_add(&events, relay_conn_pipe[0], LPOLLIN | LPOLLRDHUP);
3351 if (ret < 0) {
3352 goto error;
3353 }
3354
3355 restart:
3356 while (1) {
3357 int idx = -1, i, seen_control = 0, last_notdel_data_fd = -1;
3358
3359 health_code_update();
3360
3361 /* Infinite blocking call, waiting for transmission */
3362 DBG3("Relayd worker thread polling...");
3363 health_poll_entry();
3364 ret = lttng_poll_wait(&events, -1);
3365 health_poll_exit();
3366 if (ret < 0) {
3367 /*
3368 * Restart interrupted system call.
3369 */
3370 if (errno == EINTR) {
3371 goto restart;
3372 }
3373 goto error;
3374 }
3375
3376 nb_fd = ret;
3377
3378 /*
3379 * Process control. The control connection is
3380 * prioritized so we don't starve it with high
3381 * throughput tracing data on the data connection.
3382 */
3383 for (i = 0; i < nb_fd; i++) {
3384 /* Fetch once the poll data */
3385 uint32_t revents = LTTNG_POLL_GETEV(&events, i);
3386 int pollfd = LTTNG_POLL_GETFD(&events, i);
3387
3388 health_code_update();
3389
3390 /* Thread quit pipe has been closed. Killing thread. */
3391 ret = check_thread_quit_pipe(pollfd, revents);
3392 if (ret) {
3393 err = 0;
3394 goto exit;
3395 }
3396
3397 /* Inspect the relay conn pipe for new connection */
3398 if (pollfd == relay_conn_pipe[0]) {
3399 if (revents & LPOLLIN) {
3400 struct relay_connection *conn;
3401
3402 ret = lttng_read(relay_conn_pipe[0], &conn, sizeof(conn));
3403 if (ret < 0) {
3404 goto error;
3405 }
3406 lttng_poll_add(&events, conn->sock->fd,
3407 LPOLLIN | LPOLLRDHUP);
3408 connection_ht_add(relay_connections_ht, conn);
3409 DBG("Connection socket %d added", conn->sock->fd);
3410 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3411 ERR("Relay connection pipe error");
3412 goto error;
3413 } else {
3414 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
3415 goto error;
3416 }
3417 } else {
3418 struct relay_connection *ctrl_conn;
3419
3420 ctrl_conn = connection_get_by_sock(relay_connections_ht, pollfd);
3421 /* If not found, there is a synchronization issue. */
3422 assert(ctrl_conn);
3423
3424 if (ctrl_conn->type == RELAY_DATA) {
3425 if (revents & LPOLLIN) {
3426 /*
3427 * Flag the last seen data fd not deleted. It will be
3428 * used as the last seen fd if any fd gets deleted in
3429 * this first loop.
3430 */
3431 last_notdel_data_fd = pollfd;
3432 }
3433 goto put_ctrl_connection;
3434 }
3435 assert(ctrl_conn->type == RELAY_CONTROL);
3436
3437 if (revents & LPOLLIN) {
3438 enum relay_connection_status status;
3439
3440 status = relay_process_control(ctrl_conn);
3441 if (status != RELAY_CONNECTION_STATUS_OK) {
3442 /*
3443 * On socket error flag the session as aborted to force
3444 * the cleanup of its stream otherwise it can leak
3445 * during the lifetime of the relayd.
3446 *
3447 * This prevents situations in which streams can be
3448 * left opened because an index was received, the
3449 * control connection is closed, and the data
3450 * connection is closed (uncleanly) before the packet's
3451 * data provided.
3452 *
3453 * Since the control connection encountered an error,
3454 * it is okay to be conservative and close the
3455 * session right now as we can't rely on the protocol
3456 * being respected anymore.
3457 */
3458 if (status == RELAY_CONNECTION_STATUS_ERROR) {
3459 session_abort(ctrl_conn->session);
3460 }
3461
3462 /* Clear the connection on error or close. */
3463 relay_thread_close_connection(&events,
3464 pollfd,
3465 ctrl_conn);
3466 }
3467 seen_control = 1;
3468 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3469 relay_thread_close_connection(&events,
3470 pollfd, ctrl_conn);
3471 if (last_seen_data_fd == pollfd) {
3472 last_seen_data_fd = last_notdel_data_fd;
3473 }
3474 } else {
3475 ERR("Unexpected poll events %u for control sock %d",
3476 revents, pollfd);
3477 connection_put(ctrl_conn);
3478 goto error;
3479 }
3480 put_ctrl_connection:
3481 connection_put(ctrl_conn);
3482 }
3483 }
3484
3485 /*
3486 * The last loop handled a control request, go back to poll to make
3487 * sure we prioritise the control socket.
3488 */
3489 if (seen_control) {
3490 continue;
3491 }
3492
3493 if (last_seen_data_fd >= 0) {
3494 for (i = 0; i < nb_fd; i++) {
3495 int pollfd = LTTNG_POLL_GETFD(&events, i);
3496
3497 health_code_update();
3498
3499 if (last_seen_data_fd == pollfd) {
3500 idx = i;
3501 break;
3502 }
3503 }
3504 }
3505
3506 /* Process data connection. */
3507 for (i = idx + 1; i < nb_fd; i++) {
3508 /* Fetch the poll data. */
3509 uint32_t revents = LTTNG_POLL_GETEV(&events, i);
3510 int pollfd = LTTNG_POLL_GETFD(&events, i);
3511 struct relay_connection *data_conn;
3512
3513 health_code_update();
3514
3515 if (!revents) {
3516 /* No activity for this FD (poll implementation). */
3517 continue;
3518 }
3519
3520 /* Skip the command pipe. It's handled in the first loop. */
3521 if (pollfd == relay_conn_pipe[0]) {
3522 continue;
3523 }
3524
3525 data_conn = connection_get_by_sock(relay_connections_ht, pollfd);
3526 if (!data_conn) {
3527 /* Skip it. Might be removed before. */
3528 continue;
3529 }
3530 if (data_conn->type == RELAY_CONTROL) {
3531 goto put_data_connection;
3532 }
3533 assert(data_conn->type == RELAY_DATA);
3534
3535 if (revents & LPOLLIN) {
3536 enum relay_connection_status status;
3537
3538 status = relay_process_data(data_conn);
3539 /* Connection closed or error. */
3540 if (status != RELAY_CONNECTION_STATUS_OK) {
3541 /*
3542 * On socket error flag the session as aborted to force
3543 * the cleanup of its stream otherwise it can leak
3544 * during the lifetime of the relayd.
3545 *
3546 * This prevents situations in which streams can be
3547 * left opened because an index was received, the
3548 * control connection is closed, and the data
3549 * connection is closed (uncleanly) before the packet's
3550 * data provided.
3551 *
3552 * Since the data connection encountered an error,
3553 * it is okay to be conservative and close the
3554 * session right now as we can't rely on the protocol
3555 * being respected anymore.
3556 */
3557 if (status == RELAY_CONNECTION_STATUS_ERROR) {
3558 session_abort(data_conn->session);
3559 }
3560 relay_thread_close_connection(&events, pollfd,
3561 data_conn);
3562 /*
3563 * Every goto restart call sets the last seen fd where
3564 * here we don't really care since we gracefully
3565 * continue the loop after the connection is deleted.
3566 */
3567 } else {
3568 /* Keep last seen port. */
3569 last_seen_data_fd = pollfd;
3570 connection_put(data_conn);
3571 goto restart;
3572 }
3573 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3574 relay_thread_close_connection(&events, pollfd,
3575 data_conn);
3576 } else {
3577 ERR("Unknown poll events %u for data sock %d",
3578 revents, pollfd);
3579 }
3580 put_data_connection:
3581 connection_put(data_conn);
3582 }
3583 last_seen_data_fd = -1;
3584 }
3585
3586 /* Normal exit, no error */
3587 ret = 0;
3588
3589 exit:
3590 error:
3591 /* Cleanup remaining connection object. */
3592 rcu_read_lock();
3593 cds_lfht_for_each_entry(relay_connections_ht->ht, &iter.iter,
3594 destroy_conn,
3595 sock_n.node) {
3596 health_code_update();
3597
3598 session_abort(destroy_conn->session);
3599
3600 /*
3601 * No need to grab another ref, because we own
3602 * destroy_conn.
3603 */
3604 relay_thread_close_connection(&events, destroy_conn->sock->fd,
3605 destroy_conn);
3606 }
3607 rcu_read_unlock();
3608
3609 lttng_poll_clean(&events);
3610 error_poll_create:
3611 lttng_ht_destroy(relay_connections_ht);
3612 relay_connections_ht_error:
3613 /* Close relay conn pipes */
3614 utils_close_pipe(relay_conn_pipe);
3615 if (err) {
3616 DBG("Thread exited with error");
3617 }
3618 DBG("Worker thread cleanup complete");
3619 error_testpoint:
3620 if (err) {
3621 health_error();
3622 ERR("Health error occurred in %s", __func__);
3623 }
3624 health_unregister(health_relayd);
3625 rcu_unregister_thread();
3626 lttng_relay_stop_threads();
3627 return NULL;
3628 }
3629
3630 /*
3631 * Create the relay command pipe to wake thread_manage_apps.
3632 * Closed in cleanup().
3633 */
3634 static int create_relay_conn_pipe(void)
3635 {
3636 int ret;
3637
3638 ret = utils_create_pipe_cloexec(relay_conn_pipe);
3639
3640 return ret;
3641 }
3642
3643 /*
3644 * main
3645 */
3646 int main(int argc, char **argv)
3647 {
3648 int ret = 0, retval = 0;
3649 void *status;
3650
3651 /* Parse arguments */
3652 progname = argv[0];
3653 if (set_options(argc, argv)) {
3654 retval = -1;
3655 goto exit_options;
3656 }
3657
3658 if (set_signal_handler()) {
3659 retval = -1;
3660 goto exit_options;
3661 }
3662
3663 /* Try to create directory if -o, --output is specified. */
3664 if (opt_output_path) {
3665 if (*opt_output_path != '/') {
3666 ERR("Please specify an absolute path for -o, --output PATH");
3667 retval = -1;
3668 goto exit_options;
3669 }
3670
3671 ret = utils_mkdir_recursive(opt_output_path, S_IRWXU | S_IRWXG,
3672 -1, -1);
3673 if (ret < 0) {
3674 ERR("Unable to create %s", opt_output_path);
3675 retval = -1;
3676 goto exit_options;
3677 }
3678 }
3679
3680 /* Daemonize */
3681 if (opt_daemon || opt_background) {
3682 int i;
3683
3684 ret = lttng_daemonize(&child_ppid, &recv_child_signal,
3685 !opt_background);
3686 if (ret < 0) {
3687 retval = -1;
3688 goto exit_options;
3689 }
3690
3691 /*
3692 * We are in the child. Make sure all other file
3693 * descriptors are closed, in case we are called with
3694 * more opened file descriptors than the standard ones.
3695 */
3696 for (i = 3; i < sysconf(_SC_OPEN_MAX); i++) {
3697 (void) close(i);
3698 }
3699 }
3700
3701 sessiond_trace_chunk_registry = sessiond_trace_chunk_registry_create();
3702 if (!sessiond_trace_chunk_registry) {
3703 ERR("Failed to initialize session daemon trace chunk registry");
3704 retval = -1;
3705 goto exit_sessiond_trace_chunk_registry;
3706 }
3707
3708 /* Initialize thread health monitoring */
3709 health_relayd = health_app_create(NR_HEALTH_RELAYD_TYPES);
3710 if (!health_relayd) {
3711 PERROR("health_app_create error");
3712 retval = -1;
3713 goto exit_health_app_create;
3714 }
3715
3716 /* Create thread quit pipe */
3717 if (init_thread_quit_pipe()) {
3718 retval = -1;
3719 goto exit_init_data;
3720 }
3721
3722 /* Setup the thread apps communication pipe. */
3723 if (create_relay_conn_pipe()) {
3724 retval = -1;
3725 goto exit_init_data;
3726 }
3727
3728 /* Init relay command queue. */
3729 cds_wfcq_init(&relay_conn_queue.head, &relay_conn_queue.tail);
3730
3731 /* Initialize communication library */
3732 lttcomm_init();
3733 lttcomm_inet_init();
3734
3735 /* tables of sessions indexed by session ID */
3736 sessions_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3737 if (!sessions_ht) {
3738 retval = -1;
3739 goto exit_init_data;
3740 }
3741
3742 /* tables of streams indexed by stream ID */
3743 relay_streams_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3744 if (!relay_streams_ht) {
3745 retval = -1;
3746 goto exit_init_data;
3747 }
3748
3749 /* tables of streams indexed by stream ID */
3750 viewer_streams_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3751 if (!viewer_streams_ht) {
3752 retval = -1;
3753 goto exit_init_data;
3754 }
3755
3756 ret = utils_create_pipe(health_quit_pipe);
3757 if (ret) {
3758 retval = -1;
3759 goto exit_health_quit_pipe;
3760 }
3761
3762 /* Create thread to manage the client socket */
3763 ret = pthread_create(&health_thread, default_pthread_attr(),
3764 thread_manage_health, (void *) NULL);
3765 if (ret) {
3766 errno = ret;
3767 PERROR("pthread_create health");
3768 retval = -1;
3769 goto exit_health_thread;
3770 }
3771
3772 /* Setup the dispatcher thread */
3773 ret = pthread_create(&dispatcher_thread, default_pthread_attr(),
3774 relay_thread_dispatcher, (void *) NULL);
3775 if (ret) {
3776 errno = ret;
3777 PERROR("pthread_create dispatcher");
3778 retval = -1;
3779 goto exit_dispatcher_thread;
3780 }
3781
3782 /* Setup the worker thread */
3783 ret = pthread_create(&worker_thread, default_pthread_attr(),
3784 relay_thread_worker, NULL);
3785 if (ret) {
3786 errno = ret;
3787 PERROR("pthread_create worker");
3788 retval = -1;
3789 goto exit_worker_thread;
3790 }
3791
3792 /* Setup the listener thread */
3793 ret = pthread_create(&listener_thread, default_pthread_attr(),
3794 relay_thread_listener, (void *) NULL);
3795 if (ret) {
3796 errno = ret;
3797 PERROR("pthread_create listener");
3798 retval = -1;
3799 goto exit_listener_thread;
3800 }
3801
3802 ret = relayd_live_create(live_uri);
3803 if (ret) {
3804 ERR("Starting live viewer threads");
3805 retval = -1;
3806 goto exit_live;
3807 }
3808
3809 /*
3810 * This is where we start awaiting program completion (e.g. through
3811 * signal that asks threads to teardown).
3812 */
3813
3814 ret = relayd_live_join();
3815 if (ret) {
3816 retval = -1;
3817 }
3818 exit_live:
3819
3820 ret = pthread_join(listener_thread, &status);
3821 if (ret) {
3822 errno = ret;
3823 PERROR("pthread_join listener_thread");
3824 retval = -1;
3825 }
3826
3827 exit_listener_thread:
3828 ret = pthread_join(worker_thread, &status);
3829 if (ret) {
3830 errno = ret;
3831 PERROR("pthread_join worker_thread");
3832 retval = -1;
3833 }
3834
3835 exit_worker_thread:
3836 ret = pthread_join(dispatcher_thread, &status);
3837 if (ret) {
3838 errno = ret;
3839 PERROR("pthread_join dispatcher_thread");
3840 retval = -1;
3841 }
3842 exit_dispatcher_thread:
3843
3844 ret = pthread_join(health_thread, &status);
3845 if (ret) {
3846 errno = ret;
3847 PERROR("pthread_join health_thread");
3848 retval = -1;
3849 }
3850 exit_health_thread:
3851
3852 utils_close_pipe(health_quit_pipe);
3853 exit_health_quit_pipe:
3854
3855 exit_init_data:
3856 health_app_destroy(health_relayd);
3857 sessiond_trace_chunk_registry_destroy(sessiond_trace_chunk_registry);
3858 exit_health_app_create:
3859 exit_sessiond_trace_chunk_registry:
3860 exit_options:
3861 /*
3862 * Wait for all pending call_rcu work to complete before tearing
3863 * down data structures. call_rcu worker may be trying to
3864 * perform lookups in those structures.
3865 */
3866 rcu_barrier();
3867 relayd_cleanup();
3868
3869 /* Ensure all prior call_rcu are done. */
3870 rcu_barrier();
3871
3872 if (!retval) {
3873 exit(EXIT_SUCCESS);
3874 } else {
3875 exit(EXIT_FAILURE);
3876 }
3877 }
This page took 0.162711 seconds and 3 git commands to generate.