db021f43fc5b28a9ac0de322752880a3c9a171c9
[lttng-tools.git] / src / bin / lttng-relayd / main.c
1 /*
2 * Copyright (C) 2012 - Julien Desfossez <jdesfossez@efficios.com>
3 * David Goulet <dgoulet@efficios.com>
4 * 2013 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 * 2015 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License, version 2 only,
9 * as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20
21 #define _LGPL_SOURCE
22 #include <getopt.h>
23 #include <grp.h>
24 #include <limits.h>
25 #include <pthread.h>
26 #include <signal.h>
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <sys/mman.h>
31 #include <sys/mount.h>
32 #include <sys/resource.h>
33 #include <sys/socket.h>
34 #include <sys/stat.h>
35 #include <sys/types.h>
36 #include <sys/wait.h>
37 #include <inttypes.h>
38 #include <urcu/futex.h>
39 #include <urcu/uatomic.h>
40 #include <unistd.h>
41 #include <fcntl.h>
42
43 #include <lttng/lttng.h>
44 #include <common/common.h>
45 #include <common/compat/poll.h>
46 #include <common/compat/socket.h>
47 #include <common/compat/endian.h>
48 #include <common/compat/getenv.h>
49 #include <common/defaults.h>
50 #include <common/daemonize.h>
51 #include <common/futex.h>
52 #include <common/sessiond-comm/sessiond-comm.h>
53 #include <common/sessiond-comm/inet.h>
54 #include <common/sessiond-comm/relayd.h>
55 #include <common/uri.h>
56 #include <common/utils.h>
57 #include <common/align.h>
58 #include <common/config/session-config.h>
59 #include <common/dynamic-buffer.h>
60 #include <common/buffer-view.h>
61 #include <urcu/rculist.h>
62
63 #include "cmd.h"
64 #include "ctf-trace.h"
65 #include "index.h"
66 #include "utils.h"
67 #include "lttng-relayd.h"
68 #include "live.h"
69 #include "health-relayd.h"
70 #include "testpoint.h"
71 #include "viewer-stream.h"
72 #include "session.h"
73 #include "stream.h"
74 #include "connection.h"
75 #include "tracefile-array.h"
76 #include "tcp_keep_alive.h"
77 #include "sessiond-trace-chunks.h"
78
79 static const char *help_msg =
80 #ifdef LTTNG_EMBED_HELP
81 #include <lttng-relayd.8.h>
82 #else
83 NULL
84 #endif
85 ;
86
87 enum relay_connection_status {
88 RELAY_CONNECTION_STATUS_OK,
89 /* An error occurred while processing an event on the connection. */
90 RELAY_CONNECTION_STATUS_ERROR,
91 /* Connection closed/shutdown cleanly. */
92 RELAY_CONNECTION_STATUS_CLOSED,
93 };
94
95 /* command line options */
96 char *opt_output_path;
97 static int opt_daemon, opt_background;
98
99 /*
100 * We need to wait for listener and live listener threads, as well as
101 * health check thread, before being ready to signal readiness.
102 */
103 #define NR_LTTNG_RELAY_READY 3
104 static int lttng_relay_ready = NR_LTTNG_RELAY_READY;
105
106 /* Size of receive buffer. */
107 #define RECV_DATA_BUFFER_SIZE 65536
108 #define FILE_COPY_BUFFER_SIZE 65536
109
110 static int recv_child_signal; /* Set to 1 when a SIGUSR1 signal is received. */
111 static pid_t child_ppid; /* Internal parent PID use with daemonize. */
112
113 static struct lttng_uri *control_uri;
114 static struct lttng_uri *data_uri;
115 static struct lttng_uri *live_uri;
116
117 const char *progname;
118
119 const char *tracing_group_name = DEFAULT_TRACING_GROUP;
120 static int tracing_group_name_override;
121
122 const char * const config_section_name = "relayd";
123
124 /*
125 * Quit pipe for all threads. This permits a single cancellation point
126 * for all threads when receiving an event on the pipe.
127 */
128 int thread_quit_pipe[2] = { -1, -1 };
129
130 /*
131 * This pipe is used to inform the worker thread that a command is queued and
132 * ready to be processed.
133 */
134 static int relay_conn_pipe[2] = { -1, -1 };
135
136 /* Shared between threads */
137 static int dispatch_thread_exit;
138
139 static pthread_t listener_thread;
140 static pthread_t dispatcher_thread;
141 static pthread_t worker_thread;
142 static pthread_t health_thread;
143
144 /*
145 * last_relay_stream_id_lock protects last_relay_stream_id increment
146 * atomicity on 32-bit architectures.
147 */
148 static pthread_mutex_t last_relay_stream_id_lock = PTHREAD_MUTEX_INITIALIZER;
149 static uint64_t last_relay_stream_id;
150
151 /*
152 * Relay command queue.
153 *
154 * The relay_thread_listener and relay_thread_dispatcher communicate with this
155 * queue.
156 */
157 static struct relay_conn_queue relay_conn_queue;
158
159 /* Global relay stream hash table. */
160 struct lttng_ht *relay_streams_ht;
161
162 /* Global relay viewer stream hash table. */
163 struct lttng_ht *viewer_streams_ht;
164
165 /* Global relay sessions hash table. */
166 struct lttng_ht *sessions_ht;
167
168 /* Relayd health monitoring */
169 struct health_app *health_relayd;
170
171 struct sessiond_trace_chunk_registry *sessiond_trace_chunk_registry;
172
173 static struct option long_options[] = {
174 { "control-port", 1, 0, 'C', },
175 { "data-port", 1, 0, 'D', },
176 { "live-port", 1, 0, 'L', },
177 { "daemonize", 0, 0, 'd', },
178 { "background", 0, 0, 'b', },
179 { "group", 1, 0, 'g', },
180 { "help", 0, 0, 'h', },
181 { "output", 1, 0, 'o', },
182 { "verbose", 0, 0, 'v', },
183 { "config", 1, 0, 'f' },
184 { "version", 0, 0, 'V' },
185 { NULL, 0, 0, 0, },
186 };
187
188 static const char *config_ignore_options[] = { "help", "config", "version" };
189
190 /*
191 * Take an option from the getopt output and set it in the right variable to be
192 * used later.
193 *
194 * Return 0 on success else a negative value.
195 */
196 static int set_option(int opt, const char *arg, const char *optname)
197 {
198 int ret;
199
200 switch (opt) {
201 case 0:
202 fprintf(stderr, "option %s", optname);
203 if (arg) {
204 fprintf(stderr, " with arg %s\n", arg);
205 }
206 break;
207 case 'C':
208 if (lttng_is_setuid_setgid()) {
209 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
210 "-C, --control-port");
211 } else {
212 ret = uri_parse(arg, &control_uri);
213 if (ret < 0) {
214 ERR("Invalid control URI specified");
215 goto end;
216 }
217 if (control_uri->port == 0) {
218 control_uri->port = DEFAULT_NETWORK_CONTROL_PORT;
219 }
220 }
221 break;
222 case 'D':
223 if (lttng_is_setuid_setgid()) {
224 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
225 "-D, -data-port");
226 } else {
227 ret = uri_parse(arg, &data_uri);
228 if (ret < 0) {
229 ERR("Invalid data URI specified");
230 goto end;
231 }
232 if (data_uri->port == 0) {
233 data_uri->port = DEFAULT_NETWORK_DATA_PORT;
234 }
235 }
236 break;
237 case 'L':
238 if (lttng_is_setuid_setgid()) {
239 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
240 "-L, -live-port");
241 } else {
242 ret = uri_parse(arg, &live_uri);
243 if (ret < 0) {
244 ERR("Invalid live URI specified");
245 goto end;
246 }
247 if (live_uri->port == 0) {
248 live_uri->port = DEFAULT_NETWORK_VIEWER_PORT;
249 }
250 }
251 break;
252 case 'd':
253 opt_daemon = 1;
254 break;
255 case 'b':
256 opt_background = 1;
257 break;
258 case 'g':
259 if (lttng_is_setuid_setgid()) {
260 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
261 "-g, --group");
262 } else {
263 tracing_group_name = strdup(arg);
264 if (tracing_group_name == NULL) {
265 ret = -errno;
266 PERROR("strdup");
267 goto end;
268 }
269 tracing_group_name_override = 1;
270 }
271 break;
272 case 'h':
273 ret = utils_show_help(8, "lttng-relayd", help_msg);
274 if (ret) {
275 ERR("Cannot show --help for `lttng-relayd`");
276 perror("exec");
277 }
278 exit(EXIT_FAILURE);
279 case 'V':
280 fprintf(stdout, "%s\n", VERSION);
281 exit(EXIT_SUCCESS);
282 case 'o':
283 if (lttng_is_setuid_setgid()) {
284 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
285 "-o, --output");
286 } else {
287 ret = asprintf(&opt_output_path, "%s", arg);
288 if (ret < 0) {
289 ret = -errno;
290 PERROR("asprintf opt_output_path");
291 goto end;
292 }
293 }
294 break;
295 case 'v':
296 /* Verbose level can increase using multiple -v */
297 if (arg) {
298 lttng_opt_verbose = config_parse_value(arg);
299 } else {
300 /* Only 3 level of verbosity (-vvv). */
301 if (lttng_opt_verbose < 3) {
302 lttng_opt_verbose += 1;
303 }
304 }
305 break;
306 default:
307 /* Unknown option or other error.
308 * Error is printed by getopt, just return */
309 ret = -1;
310 goto end;
311 }
312
313 /* All good. */
314 ret = 0;
315
316 end:
317 return ret;
318 }
319
320 /*
321 * config_entry_handler_cb used to handle options read from a config file.
322 * See config_entry_handler_cb comment in common/config/session-config.h for the
323 * return value conventions.
324 */
325 static int config_entry_handler(const struct config_entry *entry, void *unused)
326 {
327 int ret = 0, i;
328
329 if (!entry || !entry->name || !entry->value) {
330 ret = -EINVAL;
331 goto end;
332 }
333
334 /* Check if the option is to be ignored */
335 for (i = 0; i < sizeof(config_ignore_options) / sizeof(char *); i++) {
336 if (!strcmp(entry->name, config_ignore_options[i])) {
337 goto end;
338 }
339 }
340
341 for (i = 0; i < (sizeof(long_options) / sizeof(struct option)) - 1; i++) {
342 /* Ignore if entry name is not fully matched. */
343 if (strcmp(entry->name, long_options[i].name)) {
344 continue;
345 }
346
347 /*
348 * If the option takes no argument on the command line,
349 * we have to check if the value is "true". We support
350 * non-zero numeric values, true, on and yes.
351 */
352 if (!long_options[i].has_arg) {
353 ret = config_parse_value(entry->value);
354 if (ret <= 0) {
355 if (ret) {
356 WARN("Invalid configuration value \"%s\" for option %s",
357 entry->value, entry->name);
358 }
359 /* False, skip boolean config option. */
360 goto end;
361 }
362 }
363
364 ret = set_option(long_options[i].val, entry->value, entry->name);
365 goto end;
366 }
367
368 WARN("Unrecognized option \"%s\" in daemon configuration file.",
369 entry->name);
370
371 end:
372 return ret;
373 }
374
375 static int set_options(int argc, char **argv)
376 {
377 int c, ret = 0, option_index = 0, retval = 0;
378 int orig_optopt = optopt, orig_optind = optind;
379 char *default_address, *optstring;
380 const char *config_path = NULL;
381
382 optstring = utils_generate_optstring(long_options,
383 sizeof(long_options) / sizeof(struct option));
384 if (!optstring) {
385 retval = -ENOMEM;
386 goto exit;
387 }
388
389 /* Check for the --config option */
390
391 while ((c = getopt_long(argc, argv, optstring, long_options,
392 &option_index)) != -1) {
393 if (c == '?') {
394 retval = -EINVAL;
395 goto exit;
396 } else if (c != 'f') {
397 continue;
398 }
399
400 if (lttng_is_setuid_setgid()) {
401 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
402 "-f, --config");
403 } else {
404 config_path = utils_expand_path(optarg);
405 if (!config_path) {
406 ERR("Failed to resolve path: %s", optarg);
407 }
408 }
409 }
410
411 ret = config_get_section_entries(config_path, config_section_name,
412 config_entry_handler, NULL);
413 if (ret) {
414 if (ret > 0) {
415 ERR("Invalid configuration option at line %i", ret);
416 }
417 retval = -1;
418 goto exit;
419 }
420
421 /* Reset getopt's global state */
422 optopt = orig_optopt;
423 optind = orig_optind;
424 while (1) {
425 c = getopt_long(argc, argv, optstring, long_options, &option_index);
426 if (c == -1) {
427 break;
428 }
429
430 ret = set_option(c, optarg, long_options[option_index].name);
431 if (ret < 0) {
432 retval = -1;
433 goto exit;
434 }
435 }
436
437 /* assign default values */
438 if (control_uri == NULL) {
439 ret = asprintf(&default_address,
440 "tcp://" DEFAULT_NETWORK_CONTROL_BIND_ADDRESS ":%d",
441 DEFAULT_NETWORK_CONTROL_PORT);
442 if (ret < 0) {
443 PERROR("asprintf default data address");
444 retval = -1;
445 goto exit;
446 }
447
448 ret = uri_parse(default_address, &control_uri);
449 free(default_address);
450 if (ret < 0) {
451 ERR("Invalid control URI specified");
452 retval = -1;
453 goto exit;
454 }
455 }
456 if (data_uri == NULL) {
457 ret = asprintf(&default_address,
458 "tcp://" DEFAULT_NETWORK_DATA_BIND_ADDRESS ":%d",
459 DEFAULT_NETWORK_DATA_PORT);
460 if (ret < 0) {
461 PERROR("asprintf default data address");
462 retval = -1;
463 goto exit;
464 }
465
466 ret = uri_parse(default_address, &data_uri);
467 free(default_address);
468 if (ret < 0) {
469 ERR("Invalid data URI specified");
470 retval = -1;
471 goto exit;
472 }
473 }
474 if (live_uri == NULL) {
475 ret = asprintf(&default_address,
476 "tcp://" DEFAULT_NETWORK_VIEWER_BIND_ADDRESS ":%d",
477 DEFAULT_NETWORK_VIEWER_PORT);
478 if (ret < 0) {
479 PERROR("asprintf default viewer control address");
480 retval = -1;
481 goto exit;
482 }
483
484 ret = uri_parse(default_address, &live_uri);
485 free(default_address);
486 if (ret < 0) {
487 ERR("Invalid viewer control URI specified");
488 retval = -1;
489 goto exit;
490 }
491 }
492
493 exit:
494 free(optstring);
495 return retval;
496 }
497
498 static void print_global_objects(void)
499 {
500 rcu_register_thread();
501
502 print_viewer_streams();
503 print_relay_streams();
504 print_sessions();
505
506 rcu_unregister_thread();
507 }
508
509 /*
510 * Cleanup the daemon
511 */
512 static void relayd_cleanup(void)
513 {
514 print_global_objects();
515
516 DBG("Cleaning up");
517
518 if (viewer_streams_ht)
519 lttng_ht_destroy(viewer_streams_ht);
520 if (relay_streams_ht)
521 lttng_ht_destroy(relay_streams_ht);
522 if (sessions_ht)
523 lttng_ht_destroy(sessions_ht);
524
525 /* free the dynamically allocated opt_output_path */
526 free(opt_output_path);
527
528 /* Close thread quit pipes */
529 utils_close_pipe(thread_quit_pipe);
530
531 uri_free(control_uri);
532 uri_free(data_uri);
533 /* Live URI is freed in the live thread. */
534
535 if (tracing_group_name_override) {
536 free((void *) tracing_group_name);
537 }
538 }
539
540 /*
541 * Write to writable pipe used to notify a thread.
542 */
543 static int notify_thread_pipe(int wpipe)
544 {
545 ssize_t ret;
546
547 ret = lttng_write(wpipe, "!", 1);
548 if (ret < 1) {
549 PERROR("write poll pipe");
550 goto end;
551 }
552 ret = 0;
553 end:
554 return ret;
555 }
556
557 static int notify_health_quit_pipe(int *pipe)
558 {
559 ssize_t ret;
560
561 ret = lttng_write(pipe[1], "4", 1);
562 if (ret < 1) {
563 PERROR("write relay health quit");
564 goto end;
565 }
566 ret = 0;
567 end:
568 return ret;
569 }
570
571 /*
572 * Stop all relayd and relayd-live threads.
573 */
574 int lttng_relay_stop_threads(void)
575 {
576 int retval = 0;
577
578 /* Stopping all threads */
579 DBG("Terminating all threads");
580 if (notify_thread_pipe(thread_quit_pipe[1])) {
581 ERR("write error on thread quit pipe");
582 retval = -1;
583 }
584
585 if (notify_health_quit_pipe(health_quit_pipe)) {
586 ERR("write error on health quit pipe");
587 }
588
589 /* Dispatch thread */
590 CMM_STORE_SHARED(dispatch_thread_exit, 1);
591 futex_nto1_wake(&relay_conn_queue.futex);
592
593 if (relayd_live_stop()) {
594 ERR("Error stopping live threads");
595 retval = -1;
596 }
597 return retval;
598 }
599
600 /*
601 * Signal handler for the daemon
602 *
603 * Simply stop all worker threads, leaving main() return gracefully after
604 * joining all threads and calling cleanup().
605 */
606 static void sighandler(int sig)
607 {
608 switch (sig) {
609 case SIGINT:
610 DBG("SIGINT caught");
611 if (lttng_relay_stop_threads()) {
612 ERR("Error stopping threads");
613 }
614 break;
615 case SIGTERM:
616 DBG("SIGTERM caught");
617 if (lttng_relay_stop_threads()) {
618 ERR("Error stopping threads");
619 }
620 break;
621 case SIGUSR1:
622 CMM_STORE_SHARED(recv_child_signal, 1);
623 break;
624 default:
625 break;
626 }
627 }
628
629 /*
630 * Setup signal handler for :
631 * SIGINT, SIGTERM, SIGPIPE
632 */
633 static int set_signal_handler(void)
634 {
635 int ret = 0;
636 struct sigaction sa;
637 sigset_t sigset;
638
639 if ((ret = sigemptyset(&sigset)) < 0) {
640 PERROR("sigemptyset");
641 return ret;
642 }
643
644 sa.sa_mask = sigset;
645 sa.sa_flags = 0;
646
647 sa.sa_handler = sighandler;
648 if ((ret = sigaction(SIGTERM, &sa, NULL)) < 0) {
649 PERROR("sigaction");
650 return ret;
651 }
652
653 if ((ret = sigaction(SIGINT, &sa, NULL)) < 0) {
654 PERROR("sigaction");
655 return ret;
656 }
657
658 if ((ret = sigaction(SIGUSR1, &sa, NULL)) < 0) {
659 PERROR("sigaction");
660 return ret;
661 }
662
663 sa.sa_handler = SIG_IGN;
664 if ((ret = sigaction(SIGPIPE, &sa, NULL)) < 0) {
665 PERROR("sigaction");
666 return ret;
667 }
668
669 DBG("Signal handler set for SIGTERM, SIGUSR1, SIGPIPE and SIGINT");
670
671 return ret;
672 }
673
674 void lttng_relay_notify_ready(void)
675 {
676 /* Notify the parent of the fork() process that we are ready. */
677 if (opt_daemon || opt_background) {
678 if (uatomic_sub_return(&lttng_relay_ready, 1) == 0) {
679 kill(child_ppid, SIGUSR1);
680 }
681 }
682 }
683
684 /*
685 * Init thread quit pipe.
686 *
687 * Return -1 on error or 0 if all pipes are created.
688 */
689 static int init_thread_quit_pipe(void)
690 {
691 int ret;
692
693 ret = utils_create_pipe_cloexec(thread_quit_pipe);
694
695 return ret;
696 }
697
698 /*
699 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
700 */
701 static int create_thread_poll_set(struct lttng_poll_event *events, int size)
702 {
703 int ret;
704
705 if (events == NULL || size == 0) {
706 ret = -1;
707 goto error;
708 }
709
710 ret = lttng_poll_create(events, size, LTTNG_CLOEXEC);
711 if (ret < 0) {
712 goto error;
713 }
714
715 /* Add quit pipe */
716 ret = lttng_poll_add(events, thread_quit_pipe[0], LPOLLIN | LPOLLERR);
717 if (ret < 0) {
718 goto error;
719 }
720
721 return 0;
722
723 error:
724 return ret;
725 }
726
727 /*
728 * Check if the thread quit pipe was triggered.
729 *
730 * Return 1 if it was triggered else 0;
731 */
732 static int check_thread_quit_pipe(int fd, uint32_t events)
733 {
734 if (fd == thread_quit_pipe[0] && (events & LPOLLIN)) {
735 return 1;
736 }
737
738 return 0;
739 }
740
741 /*
742 * Create and init socket from uri.
743 */
744 static struct lttcomm_sock *relay_socket_create(struct lttng_uri *uri)
745 {
746 int ret;
747 struct lttcomm_sock *sock = NULL;
748
749 sock = lttcomm_alloc_sock_from_uri(uri);
750 if (sock == NULL) {
751 ERR("Allocating socket");
752 goto error;
753 }
754
755 ret = lttcomm_create_sock(sock);
756 if (ret < 0) {
757 goto error;
758 }
759 DBG("Listening on sock %d", sock->fd);
760
761 ret = sock->ops->bind(sock);
762 if (ret < 0) {
763 PERROR("Failed to bind socket");
764 goto error;
765 }
766
767 ret = sock->ops->listen(sock, -1);
768 if (ret < 0) {
769 goto error;
770
771 }
772
773 return sock;
774
775 error:
776 if (sock) {
777 lttcomm_destroy_sock(sock);
778 }
779 return NULL;
780 }
781
782 /*
783 * This thread manages the listening for new connections on the network
784 */
785 static void *relay_thread_listener(void *data)
786 {
787 int i, ret, pollfd, err = -1;
788 uint32_t revents, nb_fd;
789 struct lttng_poll_event events;
790 struct lttcomm_sock *control_sock, *data_sock;
791
792 DBG("[thread] Relay listener started");
793
794 health_register(health_relayd, HEALTH_RELAYD_TYPE_LISTENER);
795
796 health_code_update();
797
798 control_sock = relay_socket_create(control_uri);
799 if (!control_sock) {
800 goto error_sock_control;
801 }
802
803 data_sock = relay_socket_create(data_uri);
804 if (!data_sock) {
805 goto error_sock_relay;
806 }
807
808 /*
809 * Pass 3 as size here for the thread quit pipe, control and
810 * data socket.
811 */
812 ret = create_thread_poll_set(&events, 3);
813 if (ret < 0) {
814 goto error_create_poll;
815 }
816
817 /* Add the control socket */
818 ret = lttng_poll_add(&events, control_sock->fd, LPOLLIN | LPOLLRDHUP);
819 if (ret < 0) {
820 goto error_poll_add;
821 }
822
823 /* Add the data socket */
824 ret = lttng_poll_add(&events, data_sock->fd, LPOLLIN | LPOLLRDHUP);
825 if (ret < 0) {
826 goto error_poll_add;
827 }
828
829 lttng_relay_notify_ready();
830
831 if (testpoint(relayd_thread_listener)) {
832 goto error_testpoint;
833 }
834
835 while (1) {
836 health_code_update();
837
838 DBG("Listener accepting connections");
839
840 restart:
841 health_poll_entry();
842 ret = lttng_poll_wait(&events, -1);
843 health_poll_exit();
844 if (ret < 0) {
845 /*
846 * Restart interrupted system call.
847 */
848 if (errno == EINTR) {
849 goto restart;
850 }
851 goto error;
852 }
853
854 nb_fd = ret;
855
856 DBG("Relay new connection received");
857 for (i = 0; i < nb_fd; i++) {
858 health_code_update();
859
860 /* Fetch once the poll data */
861 revents = LTTNG_POLL_GETEV(&events, i);
862 pollfd = LTTNG_POLL_GETFD(&events, i);
863
864 /* Thread quit pipe has been closed. Killing thread. */
865 ret = check_thread_quit_pipe(pollfd, revents);
866 if (ret) {
867 err = 0;
868 goto exit;
869 }
870
871 if (revents & LPOLLIN) {
872 /*
873 * A new connection is requested, therefore a
874 * sessiond/consumerd connection is allocated in
875 * this thread, enqueued to a global queue and
876 * dequeued (and freed) in the worker thread.
877 */
878 int val = 1;
879 struct relay_connection *new_conn;
880 struct lttcomm_sock *newsock;
881 enum connection_type type;
882
883 if (pollfd == data_sock->fd) {
884 type = RELAY_DATA;
885 newsock = data_sock->ops->accept(data_sock);
886 DBG("Relay data connection accepted, socket %d",
887 newsock->fd);
888 } else {
889 assert(pollfd == control_sock->fd);
890 type = RELAY_CONTROL;
891 newsock = control_sock->ops->accept(control_sock);
892 DBG("Relay control connection accepted, socket %d",
893 newsock->fd);
894 }
895 if (!newsock) {
896 PERROR("accepting sock");
897 goto error;
898 }
899
900 ret = setsockopt(newsock->fd, SOL_SOCKET, SO_REUSEADDR, &val,
901 sizeof(val));
902 if (ret < 0) {
903 PERROR("setsockopt inet");
904 lttcomm_destroy_sock(newsock);
905 goto error;
906 }
907
908 ret = socket_apply_keep_alive_config(newsock->fd);
909 if (ret < 0) {
910 ERR("Failed to apply TCP keep-alive configuration on socket (%i)",
911 newsock->fd);
912 lttcomm_destroy_sock(newsock);
913 goto error;
914 }
915
916 new_conn = connection_create(newsock, type);
917 if (!new_conn) {
918 lttcomm_destroy_sock(newsock);
919 goto error;
920 }
921
922 /* Enqueue request for the dispatcher thread. */
923 cds_wfcq_enqueue(&relay_conn_queue.head, &relay_conn_queue.tail,
924 &new_conn->qnode);
925
926 /*
927 * Wake the dispatch queue futex.
928 * Implicit memory barrier with the
929 * exchange in cds_wfcq_enqueue.
930 */
931 futex_nto1_wake(&relay_conn_queue.futex);
932 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
933 ERR("socket poll error");
934 goto error;
935 } else {
936 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
937 goto error;
938 }
939 }
940 }
941
942 exit:
943 error:
944 error_poll_add:
945 error_testpoint:
946 lttng_poll_clean(&events);
947 error_create_poll:
948 if (data_sock->fd >= 0) {
949 ret = data_sock->ops->close(data_sock);
950 if (ret) {
951 PERROR("close");
952 }
953 }
954 lttcomm_destroy_sock(data_sock);
955 error_sock_relay:
956 if (control_sock->fd >= 0) {
957 ret = control_sock->ops->close(control_sock);
958 if (ret) {
959 PERROR("close");
960 }
961 }
962 lttcomm_destroy_sock(control_sock);
963 error_sock_control:
964 if (err) {
965 health_error();
966 ERR("Health error occurred in %s", __func__);
967 }
968 health_unregister(health_relayd);
969 DBG("Relay listener thread cleanup complete");
970 lttng_relay_stop_threads();
971 return NULL;
972 }
973
974 /*
975 * This thread manages the dispatching of the requests to worker threads
976 */
977 static void *relay_thread_dispatcher(void *data)
978 {
979 int err = -1;
980 ssize_t ret;
981 struct cds_wfcq_node *node;
982 struct relay_connection *new_conn = NULL;
983
984 DBG("[thread] Relay dispatcher started");
985
986 health_register(health_relayd, HEALTH_RELAYD_TYPE_DISPATCHER);
987
988 if (testpoint(relayd_thread_dispatcher)) {
989 goto error_testpoint;
990 }
991
992 health_code_update();
993
994 for (;;) {
995 health_code_update();
996
997 /* Atomically prepare the queue futex */
998 futex_nto1_prepare(&relay_conn_queue.futex);
999
1000 if (CMM_LOAD_SHARED(dispatch_thread_exit)) {
1001 break;
1002 }
1003
1004 do {
1005 health_code_update();
1006
1007 /* Dequeue commands */
1008 node = cds_wfcq_dequeue_blocking(&relay_conn_queue.head,
1009 &relay_conn_queue.tail);
1010 if (node == NULL) {
1011 DBG("Woken up but nothing in the relay command queue");
1012 /* Continue thread execution */
1013 break;
1014 }
1015 new_conn = caa_container_of(node, struct relay_connection, qnode);
1016
1017 DBG("Dispatching request waiting on sock %d", new_conn->sock->fd);
1018
1019 /*
1020 * Inform worker thread of the new request. This
1021 * call is blocking so we can be assured that
1022 * the data will be read at some point in time
1023 * or wait to the end of the world :)
1024 */
1025 ret = lttng_write(relay_conn_pipe[1], &new_conn, sizeof(new_conn));
1026 if (ret < 0) {
1027 PERROR("write connection pipe");
1028 connection_put(new_conn);
1029 goto error;
1030 }
1031 } while (node != NULL);
1032
1033 /* Futex wait on queue. Blocking call on futex() */
1034 health_poll_entry();
1035 futex_nto1_wait(&relay_conn_queue.futex);
1036 health_poll_exit();
1037 }
1038
1039 /* Normal exit, no error */
1040 err = 0;
1041
1042 error:
1043 error_testpoint:
1044 if (err) {
1045 health_error();
1046 ERR("Health error occurred in %s", __func__);
1047 }
1048 health_unregister(health_relayd);
1049 DBG("Dispatch thread dying");
1050 lttng_relay_stop_threads();
1051 return NULL;
1052 }
1053
1054 /*
1055 * Set index data from the control port to a given index object.
1056 */
1057 static int set_index_control_data(struct relay_index *index,
1058 struct lttcomm_relayd_index *data,
1059 struct relay_connection *conn)
1060 {
1061 struct ctf_packet_index index_data;
1062
1063 /*
1064 * The index on disk is encoded in big endian.
1065 */
1066 index_data.packet_size = htobe64(data->packet_size);
1067 index_data.content_size = htobe64(data->content_size);
1068 index_data.timestamp_begin = htobe64(data->timestamp_begin);
1069 index_data.timestamp_end = htobe64(data->timestamp_end);
1070 index_data.events_discarded = htobe64(data->events_discarded);
1071 index_data.stream_id = htobe64(data->stream_id);
1072
1073 if (conn->minor >= 8) {
1074 index->index_data.stream_instance_id = htobe64(data->stream_instance_id);
1075 index->index_data.packet_seq_num = htobe64(data->packet_seq_num);
1076 }
1077
1078 return relay_index_set_data(index, &index_data);
1079 }
1080
1081 static bool session_streams_have_index(const struct relay_session *session)
1082 {
1083 return session->minor >= 4 && !session->snapshot;
1084 }
1085
1086 /*
1087 * Handle the RELAYD_CREATE_SESSION command.
1088 *
1089 * On success, send back the session id or else return a negative value.
1090 */
1091 static int relay_create_session(const struct lttcomm_relayd_hdr *recv_hdr,
1092 struct relay_connection *conn,
1093 const struct lttng_buffer_view *payload)
1094 {
1095 int ret = 0;
1096 ssize_t send_ret;
1097 struct relay_session *session = NULL;
1098 struct lttcomm_relayd_status_session reply;
1099 char session_name[LTTNG_NAME_MAX];
1100 char hostname[LTTNG_HOST_NAME_MAX];
1101 uint32_t live_timer = 0;
1102 bool snapshot = false;
1103 /* Left nil for peers < 2.11. */
1104 lttng_uuid sessiond_uuid = {};
1105
1106 memset(session_name, 0, LTTNG_NAME_MAX);
1107 memset(hostname, 0, LTTNG_HOST_NAME_MAX);
1108
1109 memset(&reply, 0, sizeof(reply));
1110
1111 if (conn->minor < 4) {
1112 /* From 2.1 to 2.3 */
1113 ret = 0;
1114 } else if (conn->minor >= 4 && conn->minor < 11) {
1115 /* From 2.4 to 2.10 */
1116 ret = cmd_create_session_2_4(payload, session_name,
1117 hostname, &live_timer, &snapshot);
1118 } else {
1119 /* From 2.11 to ... */
1120 ret = cmd_create_session_2_11(payload, session_name,
1121 hostname, &live_timer, &snapshot,
1122 sessiond_uuid);
1123 if (lttng_uuid_is_nil(sessiond_uuid)) {
1124 /* The nil UUID is reserved for pre-2.11 clients. */
1125 ERR("Illegal nil UUID announced by peer in create session command");
1126 ret = -1;
1127 goto send_reply;
1128 }
1129 }
1130
1131 if (ret < 0) {
1132 goto send_reply;
1133 }
1134
1135 session = session_create(session_name, hostname, live_timer,
1136 snapshot, sessiond_uuid, conn->major, conn->minor);
1137 if (!session) {
1138 ret = -1;
1139 goto send_reply;
1140 }
1141 assert(!conn->session);
1142 conn->session = session;
1143 DBG("Created session %" PRIu64, session->id);
1144
1145 reply.session_id = htobe64(session->id);
1146
1147 session->current_trace_chunk =
1148 sessiond_trace_chunk_registry_get_anonymous_chunk(
1149 sessiond_trace_chunk_registry, sessiond_uuid,
1150 session->id,
1151 opt_output_path);
1152 if (!session->current_trace_chunk) {
1153 ret = -1;
1154 }
1155
1156 send_reply:
1157 if (ret < 0) {
1158 reply.ret_code = htobe32(LTTNG_ERR_FATAL);
1159 } else {
1160 reply.ret_code = htobe32(LTTNG_OK);
1161 }
1162
1163 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
1164 if (send_ret < (ssize_t) sizeof(reply)) {
1165 ERR("Failed to send \"create session\" command reply (ret = %zd)",
1166 send_ret);
1167 ret = -1;
1168 }
1169 if (ret < 0 && session) {
1170 session_put(session);
1171 }
1172 return ret;
1173 }
1174
1175 /*
1176 * When we have received all the streams and the metadata for a channel,
1177 * we make them visible to the viewer threads.
1178 */
1179 static void publish_connection_local_streams(struct relay_connection *conn)
1180 {
1181 struct relay_stream *stream;
1182 struct relay_session *session = conn->session;
1183
1184 /*
1185 * We publish all streams belonging to a session atomically wrt
1186 * session lock.
1187 */
1188 pthread_mutex_lock(&session->lock);
1189 rcu_read_lock();
1190 cds_list_for_each_entry_rcu(stream, &session->recv_list,
1191 recv_node) {
1192 stream_publish(stream);
1193 }
1194 rcu_read_unlock();
1195
1196 /*
1197 * Inform the viewer that there are new streams in the session.
1198 */
1199 if (session->viewer_attached) {
1200 uatomic_set(&session->new_streams, 1);
1201 }
1202 pthread_mutex_unlock(&session->lock);
1203 }
1204
1205 /*
1206 * relay_add_stream: allocate a new stream for a session
1207 */
1208 static int relay_add_stream(const struct lttcomm_relayd_hdr *recv_hdr,
1209 struct relay_connection *conn,
1210 const struct lttng_buffer_view *payload)
1211 {
1212 int ret;
1213 ssize_t send_ret;
1214 struct relay_session *session = conn->session;
1215 struct relay_stream *stream = NULL;
1216 struct lttcomm_relayd_status_stream reply;
1217 struct ctf_trace *trace = NULL;
1218 uint64_t stream_handle = -1ULL;
1219 char *path_name = NULL, *channel_name = NULL;
1220 uint64_t tracefile_size = 0, tracefile_count = 0;
1221 struct relay_stream_chunk_id stream_chunk_id = { 0 };
1222
1223 if (!session || !conn->version_check_done) {
1224 ERR("Trying to add a stream before version check");
1225 ret = -1;
1226 goto end_no_session;
1227 }
1228
1229 if (session->minor == 1) {
1230 /* For 2.1 */
1231 ret = cmd_recv_stream_2_1(payload, &path_name,
1232 &channel_name);
1233 } else if (session->minor > 1 && session->minor < 11) {
1234 /* From 2.2 to 2.10 */
1235 ret = cmd_recv_stream_2_2(payload, &path_name,
1236 &channel_name, &tracefile_size, &tracefile_count);
1237 } else {
1238 /* From 2.11 to ... */
1239 ret = cmd_recv_stream_2_11(payload, &path_name,
1240 &channel_name, &tracefile_size, &tracefile_count,
1241 &stream_chunk_id.value);
1242 stream_chunk_id.is_set = true;
1243 }
1244
1245 if (ret < 0) {
1246 goto send_reply;
1247 }
1248
1249 trace = ctf_trace_get_by_path_or_create(session, path_name);
1250 if (!trace) {
1251 goto send_reply;
1252 }
1253 /* This stream here has one reference on the trace. */
1254
1255 pthread_mutex_lock(&last_relay_stream_id_lock);
1256 stream_handle = ++last_relay_stream_id;
1257 pthread_mutex_unlock(&last_relay_stream_id_lock);
1258
1259 /* We pass ownership of path_name and channel_name. */
1260 stream = stream_create(trace, stream_handle, path_name,
1261 channel_name, tracefile_size, tracefile_count,
1262 &stream_chunk_id);
1263 path_name = NULL;
1264 channel_name = NULL;
1265
1266 /*
1267 * Streams are the owners of their trace. Reference to trace is
1268 * kept within stream_create().
1269 */
1270 ctf_trace_put(trace);
1271
1272 send_reply:
1273 memset(&reply, 0, sizeof(reply));
1274 reply.handle = htobe64(stream_handle);
1275 if (!stream) {
1276 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1277 } else {
1278 reply.ret_code = htobe32(LTTNG_OK);
1279 }
1280
1281 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1282 sizeof(struct lttcomm_relayd_status_stream), 0);
1283 if (send_ret < (ssize_t) sizeof(reply)) {
1284 ERR("Failed to send \"add stream\" command reply (ret = %zd)",
1285 send_ret);
1286 ret = -1;
1287 }
1288
1289 end_no_session:
1290 free(path_name);
1291 free(channel_name);
1292 return ret;
1293 }
1294
1295 /*
1296 * relay_close_stream: close a specific stream
1297 */
1298 static int relay_close_stream(const struct lttcomm_relayd_hdr *recv_hdr,
1299 struct relay_connection *conn,
1300 const struct lttng_buffer_view *payload)
1301 {
1302 int ret;
1303 ssize_t send_ret;
1304 struct relay_session *session = conn->session;
1305 struct lttcomm_relayd_close_stream stream_info;
1306 struct lttcomm_relayd_generic_reply reply;
1307 struct relay_stream *stream;
1308
1309 DBG("Close stream received");
1310
1311 if (!session || !conn->version_check_done) {
1312 ERR("Trying to close a stream before version check");
1313 ret = -1;
1314 goto end_no_session;
1315 }
1316
1317 if (payload->size < sizeof(stream_info)) {
1318 ERR("Unexpected payload size in \"relay_close_stream\": expected >= %zu bytes, got %zu bytes",
1319 sizeof(stream_info), payload->size);
1320 ret = -1;
1321 goto end_no_session;
1322 }
1323 memcpy(&stream_info, payload->data, sizeof(stream_info));
1324 stream_info.stream_id = be64toh(stream_info.stream_id);
1325 stream_info.last_net_seq_num = be64toh(stream_info.last_net_seq_num);
1326
1327 stream = stream_get_by_id(stream_info.stream_id);
1328 if (!stream) {
1329 ret = -1;
1330 goto end;
1331 }
1332
1333 /*
1334 * Set last_net_seq_num before the close flag. Required by data
1335 * pending check.
1336 */
1337 pthread_mutex_lock(&stream->lock);
1338 stream->last_net_seq_num = stream_info.last_net_seq_num;
1339 pthread_mutex_unlock(&stream->lock);
1340
1341 /*
1342 * This is one of the conditions which may trigger a stream close
1343 * with the others being:
1344 * 1) A close command is received for a stream
1345 * 2) The control connection owning the stream is closed
1346 * 3) We have received all of the stream's data _after_ a close
1347 * request.
1348 */
1349 try_stream_close(stream);
1350 if (stream->is_metadata) {
1351 struct relay_viewer_stream *vstream;
1352
1353 vstream = viewer_stream_get_by_id(stream->stream_handle);
1354 if (vstream) {
1355 if (vstream->metadata_sent == stream->metadata_received) {
1356 /*
1357 * Since all the metadata has been sent to the
1358 * viewer and that we have a request to close
1359 * its stream, we can safely teardown the
1360 * corresponding metadata viewer stream.
1361 */
1362 viewer_stream_put(vstream);
1363 }
1364 /* Put local reference. */
1365 viewer_stream_put(vstream);
1366 }
1367 }
1368 stream_put(stream);
1369 ret = 0;
1370
1371 end:
1372 memset(&reply, 0, sizeof(reply));
1373 if (ret < 0) {
1374 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1375 } else {
1376 reply.ret_code = htobe32(LTTNG_OK);
1377 }
1378 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1379 sizeof(struct lttcomm_relayd_generic_reply), 0);
1380 if (send_ret < (ssize_t) sizeof(reply)) {
1381 ERR("Failed to send \"close stream\" command reply (ret = %zd)",
1382 send_ret);
1383 ret = -1;
1384 }
1385
1386 end_no_session:
1387 return ret;
1388 }
1389
1390 /*
1391 * relay_reset_metadata: reset a metadata stream
1392 */
1393 static
1394 int relay_reset_metadata(const struct lttcomm_relayd_hdr *recv_hdr,
1395 struct relay_connection *conn,
1396 const struct lttng_buffer_view *payload)
1397 {
1398 int ret;
1399 ssize_t send_ret;
1400 struct relay_session *session = conn->session;
1401 struct lttcomm_relayd_reset_metadata stream_info;
1402 struct lttcomm_relayd_generic_reply reply;
1403 struct relay_stream *stream;
1404
1405 DBG("Reset metadata received");
1406
1407 if (!session || !conn->version_check_done) {
1408 ERR("Trying to reset a metadata stream before version check");
1409 ret = -1;
1410 goto end_no_session;
1411 }
1412
1413 if (payload->size < sizeof(stream_info)) {
1414 ERR("Unexpected payload size in \"relay_reset_metadata\": expected >= %zu bytes, got %zu bytes",
1415 sizeof(stream_info), payload->size);
1416 ret = -1;
1417 goto end_no_session;
1418 }
1419 memcpy(&stream_info, payload->data, sizeof(stream_info));
1420 stream_info.stream_id = be64toh(stream_info.stream_id);
1421 stream_info.version = be64toh(stream_info.version);
1422
1423 DBG("Update metadata to version %" PRIu64, stream_info.version);
1424
1425 /* Unsupported for live sessions for now. */
1426 if (session->live_timer != 0) {
1427 ret = -1;
1428 goto end;
1429 }
1430
1431 stream = stream_get_by_id(stream_info.stream_id);
1432 if (!stream) {
1433 ret = -1;
1434 goto end;
1435 }
1436 pthread_mutex_lock(&stream->lock);
1437 if (!stream->is_metadata) {
1438 ret = -1;
1439 goto end_unlock;
1440 }
1441
1442 ret = utils_rotate_stream_file(stream->path_name, stream->channel_name,
1443 0, 0, -1, -1, stream->stream_fd->fd, NULL,
1444 &stream->stream_fd->fd);
1445 if (ret < 0) {
1446 ERR("Failed to rotate metadata file %s of channel %s",
1447 stream->path_name, stream->channel_name);
1448 goto end_unlock;
1449 }
1450
1451 end_unlock:
1452 pthread_mutex_unlock(&stream->lock);
1453 stream_put(stream);
1454
1455 end:
1456 memset(&reply, 0, sizeof(reply));
1457 if (ret < 0) {
1458 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1459 } else {
1460 reply.ret_code = htobe32(LTTNG_OK);
1461 }
1462 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1463 sizeof(struct lttcomm_relayd_generic_reply), 0);
1464 if (send_ret < (ssize_t) sizeof(reply)) {
1465 ERR("Failed to send \"reset metadata\" command reply (ret = %zd)",
1466 send_ret);
1467 ret = -1;
1468 }
1469
1470 end_no_session:
1471 return ret;
1472 }
1473
1474 /*
1475 * relay_unknown_command: send -1 if received unknown command
1476 */
1477 static void relay_unknown_command(struct relay_connection *conn)
1478 {
1479 struct lttcomm_relayd_generic_reply reply;
1480 ssize_t send_ret;
1481
1482 memset(&reply, 0, sizeof(reply));
1483 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1484 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
1485 if (send_ret < sizeof(reply)) {
1486 ERR("Failed to send \"unknown command\" command reply (ret = %zd)", send_ret);
1487 }
1488 }
1489
1490 /*
1491 * relay_start: send an acknowledgment to the client to tell if we are
1492 * ready to receive data. We are ready if a session is established.
1493 */
1494 static int relay_start(const struct lttcomm_relayd_hdr *recv_hdr,
1495 struct relay_connection *conn,
1496 const struct lttng_buffer_view *payload)
1497 {
1498 int ret = 0;
1499 ssize_t send_ret;
1500 struct lttcomm_relayd_generic_reply reply;
1501 struct relay_session *session = conn->session;
1502
1503 if (!session) {
1504 DBG("Trying to start the streaming without a session established");
1505 ret = htobe32(LTTNG_ERR_UNK);
1506 }
1507
1508 memset(&reply, 0, sizeof(reply));
1509 reply.ret_code = htobe32(LTTNG_OK);
1510 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1511 sizeof(reply), 0);
1512 if (send_ret < (ssize_t) sizeof(reply)) {
1513 ERR("Failed to send \"relay_start\" command reply (ret = %zd)",
1514 send_ret);
1515 ret = -1;
1516 }
1517
1518 return ret;
1519 }
1520
1521 /*
1522 * Append padding to the file pointed by the file descriptor fd.
1523 */
1524 static int write_padding_to_file(int fd, uint32_t size)
1525 {
1526 ssize_t ret = 0;
1527 char *zeros;
1528
1529 if (size == 0) {
1530 goto end;
1531 }
1532
1533 zeros = zmalloc(size);
1534 if (zeros == NULL) {
1535 PERROR("zmalloc zeros for padding");
1536 ret = -1;
1537 goto end;
1538 }
1539
1540 ret = lttng_write(fd, zeros, size);
1541 if (ret < size) {
1542 PERROR("write padding to file");
1543 }
1544
1545 free(zeros);
1546
1547 end:
1548 return ret;
1549 }
1550
1551 /*
1552 * Close the current index file if it is open, and create a new one.
1553 *
1554 * Return 0 on success, -1 on error.
1555 */
1556 static
1557 int create_rotate_index_file(struct relay_stream *stream,
1558 const char *stream_path)
1559 {
1560 int ret;
1561 uint32_t major, minor;
1562
1563 /* Put ref on previous index_file. */
1564 if (stream->index_file) {
1565 lttng_index_file_put(stream->index_file);
1566 stream->index_file = NULL;
1567 }
1568 major = stream->trace->session->major;
1569 minor = stream->trace->session->minor;
1570 stream->index_file = lttng_index_file_create(stream_path,
1571 stream->channel_name,
1572 -1, -1, stream->tracefile_size,
1573 tracefile_array_get_file_index_head(stream->tfa),
1574 lttng_to_index_major(major, minor),
1575 lttng_to_index_minor(major, minor));
1576 if (!stream->index_file) {
1577 ret = -1;
1578 goto end;
1579 }
1580
1581 ret = 0;
1582
1583 end:
1584 return ret;
1585 }
1586
1587 static
1588 int do_rotate_stream_data(struct relay_stream *stream)
1589 {
1590 int ret;
1591
1592 DBG("Rotating stream %" PRIu64 " data file",
1593 stream->stream_handle);
1594 /* Perform the stream rotation. */
1595 ret = utils_rotate_stream_file(stream->path_name,
1596 stream->channel_name, stream->tracefile_size,
1597 stream->tracefile_count, -1,
1598 -1, stream->stream_fd->fd,
1599 NULL, &stream->stream_fd->fd);
1600 if (ret < 0) {
1601 ERR("Rotating stream output file");
1602 goto end;
1603 }
1604 stream->tracefile_size_current = 0;
1605 stream->pos_after_last_complete_data_index = 0;
1606 stream->data_rotated = true;
1607
1608 if (stream->data_rotated && stream->index_rotated) {
1609 /* Rotation completed; reset its state. */
1610 DBG("Rotation completed for stream %" PRIu64,
1611 stream->stream_handle);
1612 stream->rotate_at_seq_num = -1ULL;
1613 stream->data_rotated = false;
1614 stream->index_rotated = false;
1615 }
1616 end:
1617 return ret;
1618 }
1619
1620 /*
1621 * If too much data has been written in a tracefile before we received the
1622 * rotation command, we have to move the excess data to the new tracefile and
1623 * perform the rotation. This can happen because the control and data
1624 * connections are separate, the indexes as well as the commands arrive from
1625 * the control connection and we have no control over the order so we could be
1626 * in a situation where too much data has been received on the data connection
1627 * before the rotation command on the control connection arrives.
1628 */
1629 static
1630 int rotate_truncate_stream(struct relay_stream *stream)
1631 {
1632 int ret, new_fd;
1633 off_t lseek_ret;
1634 uint64_t diff, pos = 0;
1635 char buf[FILE_COPY_BUFFER_SIZE];
1636
1637 assert(!stream->is_metadata);
1638
1639 assert(stream->tracefile_size_current >
1640 stream->pos_after_last_complete_data_index);
1641 diff = stream->tracefile_size_current -
1642 stream->pos_after_last_complete_data_index;
1643
1644 /* Create the new tracefile. */
1645 new_fd = utils_create_stream_file(stream->path_name,
1646 stream->channel_name,
1647 stream->tracefile_size, stream->tracefile_count,
1648 /* uid */ -1, /* gid */ -1, /* suffix */ NULL);
1649 if (new_fd < 0) {
1650 ERR("Failed to create new stream file at path %s for channel %s",
1651 stream->path_name, stream->channel_name);
1652 ret = -1;
1653 goto end;
1654 }
1655
1656 /*
1657 * Rewind the current tracefile to the position at which the rotation
1658 * should have occurred.
1659 */
1660 lseek_ret = lseek(stream->stream_fd->fd,
1661 stream->pos_after_last_complete_data_index, SEEK_SET);
1662 if (lseek_ret < 0) {
1663 PERROR("seek truncate stream");
1664 ret = -1;
1665 goto end;
1666 }
1667
1668 /* Move data from the old file to the new file. */
1669 while (pos < diff) {
1670 uint64_t count, bytes_left;
1671 ssize_t io_ret;
1672
1673 bytes_left = diff - pos;
1674 count = bytes_left > sizeof(buf) ? sizeof(buf) : bytes_left;
1675 assert(count <= SIZE_MAX);
1676
1677 io_ret = lttng_read(stream->stream_fd->fd, buf, count);
1678 if (io_ret < (ssize_t) count) {
1679 char error_string[256];
1680
1681 snprintf(error_string, sizeof(error_string),
1682 "Failed to read %" PRIu64 " bytes from fd %i in rotate_truncate_stream(), returned %zi",
1683 count, stream->stream_fd->fd, io_ret);
1684 if (io_ret == -1) {
1685 PERROR("%s", error_string);
1686 } else {
1687 ERR("%s", error_string);
1688 }
1689 ret = -1;
1690 goto end;
1691 }
1692
1693 io_ret = lttng_write(new_fd, buf, count);
1694 if (io_ret < (ssize_t) count) {
1695 char error_string[256];
1696
1697 snprintf(error_string, sizeof(error_string),
1698 "Failed to write %" PRIu64 " bytes from fd %i in rotate_truncate_stream(), returned %zi",
1699 count, new_fd, io_ret);
1700 if (io_ret == -1) {
1701 PERROR("%s", error_string);
1702 } else {
1703 ERR("%s", error_string);
1704 }
1705 ret = -1;
1706 goto end;
1707 }
1708
1709 pos += count;
1710 }
1711
1712 /* Truncate the file to get rid of the excess data. */
1713 ret = ftruncate(stream->stream_fd->fd,
1714 stream->pos_after_last_complete_data_index);
1715 if (ret) {
1716 PERROR("ftruncate");
1717 goto end;
1718 }
1719
1720 ret = close(stream->stream_fd->fd);
1721 if (ret < 0) {
1722 PERROR("Closing tracefile");
1723 goto end;
1724 }
1725
1726 /*
1727 * Update the offset and FD of all the eventual indexes created by the
1728 * data connection before the rotation command arrived.
1729 */
1730 ret = relay_index_switch_all_files(stream);
1731 if (ret < 0) {
1732 ERR("Failed to rotate index file");
1733 goto end;
1734 }
1735
1736 stream->stream_fd->fd = new_fd;
1737 stream->tracefile_size_current = diff;
1738 stream->pos_after_last_complete_data_index = 0;
1739 stream->rotate_at_seq_num = -1ULL;
1740
1741 ret = 0;
1742
1743 end:
1744 return ret;
1745 }
1746
1747 /*
1748 * Check if a stream's index file should be rotated (for session rotation).
1749 * Must be called with the stream lock held.
1750 *
1751 * Return 0 on success, a negative value on error.
1752 */
1753 static
1754 int try_rotate_stream_index(struct relay_stream *stream)
1755 {
1756 int ret = 0;
1757
1758 if (stream->rotate_at_seq_num == -1ULL) {
1759 /* No rotation expected. */
1760 goto end;
1761 }
1762
1763 if (stream->index_rotated) {
1764 /* Rotation of the index has already occurred. */
1765 goto end;
1766 }
1767
1768 if (stream->prev_index_seq == -1ULL ||
1769 stream->prev_index_seq < stream->rotate_at_seq_num) {
1770 DBG("Stream %" PRIu64 " index not yet ready for rotation (rotate_at_seq_num = %" PRIu64 ", prev_index_seq = %" PRIu64 ")",
1771 stream->stream_handle,
1772 stream->rotate_at_seq_num,
1773 stream->prev_index_seq);
1774 goto end;
1775 } else if (stream->prev_index_seq != stream->rotate_at_seq_num) {
1776 /*
1777 * Unexpected, protocol error/bug.
1778 * It could mean that we received a rotation position
1779 * that is in the past.
1780 */
1781 ERR("Stream %" PRIu64 " index is in an inconsistent state (rotate_at_seq_num = %" PRIu64 ", prev_data_seq = %" PRIu64 ", prev_index_seq = %" PRIu64 ")",
1782 stream->stream_handle,
1783 stream->rotate_at_seq_num,
1784 stream->prev_data_seq,
1785 stream->prev_index_seq);
1786 ret = -1;
1787 goto end;
1788 } else {
1789 DBG("Rotating stream %" PRIu64 " index file",
1790 stream->stream_handle);
1791 ret = create_rotate_index_file(stream, stream->path_name);
1792 stream->index_rotated = true;
1793
1794 if (stream->data_rotated && stream->index_rotated) {
1795 /* Rotation completed; reset its state. */
1796 DBG("Rotation completed for stream %" PRIu64,
1797 stream->stream_handle);
1798 stream->rotate_at_seq_num = -1ULL;
1799 stream->data_rotated = false;
1800 stream->index_rotated = false;
1801 }
1802 }
1803
1804 end:
1805 return ret;
1806 }
1807
1808 /*
1809 * Check if a stream's data file (as opposed to index) should be rotated
1810 * (for session rotation).
1811 * Must be called with the stream lock held.
1812 *
1813 * Return 0 on success, a negative value on error.
1814 */
1815 static
1816 int try_rotate_stream_data(struct relay_stream *stream)
1817 {
1818 int ret = 0;
1819
1820 if (stream->rotate_at_seq_num == -1ULL) {
1821 /* No rotation expected. */
1822 goto end;
1823 }
1824
1825 if (stream->data_rotated) {
1826 /* Rotation of the data file has already occurred. */
1827 goto end;
1828 }
1829
1830 if (stream->prev_data_seq == -1ULL ||
1831 stream->prev_data_seq < stream->rotate_at_seq_num) {
1832 DBG("Stream %" PRIu64 " not yet ready for rotation (rotate_at_seq_num = %" PRIu64 ", prev_data_seq = %" PRIu64 ")",
1833 stream->stream_handle,
1834 stream->rotate_at_seq_num,
1835 stream->prev_data_seq);
1836 goto end;
1837 } else if (stream->prev_data_seq > stream->rotate_at_seq_num) {
1838 /*
1839 * prev_data_seq is checked here since indexes and rotation
1840 * commands are serialized with respect to each other.
1841 */
1842 DBG("Rotation after too much data has been written in tracefile "
1843 "for stream %" PRIu64 ", need to truncate before "
1844 "rotating", stream->stream_handle);
1845 ret = rotate_truncate_stream(stream);
1846 if (ret) {
1847 ERR("Failed to truncate stream");
1848 goto end;
1849 }
1850 } else if (stream->prev_data_seq != stream->rotate_at_seq_num) {
1851 /*
1852 * Unexpected, protocol error/bug.
1853 * It could mean that we received a rotation position
1854 * that is in the past.
1855 */
1856 ERR("Stream %" PRIu64 " data is in an inconsistent state (rotate_at_seq_num = %" PRIu64 ", prev_data_seq = %" PRIu64 ")",
1857 stream->stream_handle,
1858 stream->rotate_at_seq_num,
1859 stream->prev_data_seq);
1860 ret = -1;
1861 goto end;
1862 } else {
1863 ret = do_rotate_stream_data(stream);
1864 }
1865
1866 end:
1867 return ret;
1868 }
1869
1870 /*
1871 * relay_recv_metadata: receive the metadata for the session.
1872 */
1873 static int relay_recv_metadata(const struct lttcomm_relayd_hdr *recv_hdr,
1874 struct relay_connection *conn,
1875 const struct lttng_buffer_view *payload)
1876 {
1877 int ret = 0;
1878 ssize_t size_ret;
1879 struct relay_session *session = conn->session;
1880 struct lttcomm_relayd_metadata_payload metadata_payload_header;
1881 struct relay_stream *metadata_stream;
1882 uint64_t metadata_payload_size;
1883
1884 if (!session) {
1885 ERR("Metadata sent before version check");
1886 ret = -1;
1887 goto end;
1888 }
1889
1890 if (recv_hdr->data_size < sizeof(struct lttcomm_relayd_metadata_payload)) {
1891 ERR("Incorrect data size");
1892 ret = -1;
1893 goto end;
1894 }
1895 metadata_payload_size = recv_hdr->data_size -
1896 sizeof(struct lttcomm_relayd_metadata_payload);
1897
1898 memcpy(&metadata_payload_header, payload->data,
1899 sizeof(metadata_payload_header));
1900 metadata_payload_header.stream_id = be64toh(
1901 metadata_payload_header.stream_id);
1902 metadata_payload_header.padding_size = be32toh(
1903 metadata_payload_header.padding_size);
1904
1905 metadata_stream = stream_get_by_id(metadata_payload_header.stream_id);
1906 if (!metadata_stream) {
1907 ret = -1;
1908 goto end;
1909 }
1910
1911 pthread_mutex_lock(&metadata_stream->lock);
1912
1913 size_ret = lttng_write(metadata_stream->stream_fd->fd,
1914 payload->data + sizeof(metadata_payload_header),
1915 metadata_payload_size);
1916 if (size_ret < metadata_payload_size) {
1917 ERR("Relay error writing metadata on file");
1918 ret = -1;
1919 goto end_put;
1920 }
1921
1922 size_ret = write_padding_to_file(metadata_stream->stream_fd->fd,
1923 metadata_payload_header.padding_size);
1924 if (size_ret < (int64_t) metadata_payload_header.padding_size) {
1925 ret = -1;
1926 goto end_put;
1927 }
1928
1929 metadata_stream->metadata_received +=
1930 metadata_payload_size + metadata_payload_header.padding_size;
1931 DBG2("Relay metadata written. Updated metadata_received %" PRIu64,
1932 metadata_stream->metadata_received);
1933
1934 ret = try_rotate_stream_data(metadata_stream);
1935 if (ret < 0) {
1936 goto end_put;
1937 }
1938
1939 end_put:
1940 pthread_mutex_unlock(&metadata_stream->lock);
1941 stream_put(metadata_stream);
1942 end:
1943 return ret;
1944 }
1945
1946 /*
1947 * relay_send_version: send relayd version number
1948 */
1949 static int relay_send_version(const struct lttcomm_relayd_hdr *recv_hdr,
1950 struct relay_connection *conn,
1951 const struct lttng_buffer_view *payload)
1952 {
1953 int ret;
1954 ssize_t send_ret;
1955 struct lttcomm_relayd_version reply, msg;
1956 bool compatible = true;
1957
1958 conn->version_check_done = true;
1959
1960 /* Get version from the other side. */
1961 if (payload->size < sizeof(msg)) {
1962 ERR("Unexpected payload size in \"relay_send_version\": expected >= %zu bytes, got %zu bytes",
1963 sizeof(msg), payload->size);
1964 ret = -1;
1965 goto end;
1966 }
1967
1968 memcpy(&msg, payload->data, sizeof(msg));
1969 msg.major = be32toh(msg.major);
1970 msg.minor = be32toh(msg.minor);
1971
1972 memset(&reply, 0, sizeof(reply));
1973 reply.major = RELAYD_VERSION_COMM_MAJOR;
1974 reply.minor = RELAYD_VERSION_COMM_MINOR;
1975
1976 /* Major versions must be the same */
1977 if (reply.major != msg.major) {
1978 DBG("Incompatible major versions (%u vs %u), deleting session",
1979 reply.major, msg.major);
1980 compatible = false;
1981 }
1982
1983 conn->major = reply.major;
1984 /* We adapt to the lowest compatible version */
1985 if (reply.minor <= msg.minor) {
1986 conn->minor = reply.minor;
1987 } else {
1988 conn->minor = msg.minor;
1989 }
1990
1991 reply.major = htobe32(reply.major);
1992 reply.minor = htobe32(reply.minor);
1993 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1994 sizeof(reply), 0);
1995 if (send_ret < (ssize_t) sizeof(reply)) {
1996 ERR("Failed to send \"send version\" command reply (ret = %zd)",
1997 send_ret);
1998 ret = -1;
1999 goto end;
2000 } else {
2001 ret = 0;
2002 }
2003
2004 if (!compatible) {
2005 ret = -1;
2006 goto end;
2007 }
2008
2009 DBG("Version check done using protocol %u.%u", conn->major,
2010 conn->minor);
2011
2012 end:
2013 return ret;
2014 }
2015
2016 /*
2017 * Check for data pending for a given stream id from the session daemon.
2018 */
2019 static int relay_data_pending(const struct lttcomm_relayd_hdr *recv_hdr,
2020 struct relay_connection *conn,
2021 const struct lttng_buffer_view *payload)
2022 {
2023 struct relay_session *session = conn->session;
2024 struct lttcomm_relayd_data_pending msg;
2025 struct lttcomm_relayd_generic_reply reply;
2026 struct relay_stream *stream;
2027 ssize_t send_ret;
2028 int ret;
2029 uint64_t stream_seq;
2030
2031 DBG("Data pending command received");
2032
2033 if (!session || !conn->version_check_done) {
2034 ERR("Trying to check for data before version check");
2035 ret = -1;
2036 goto end_no_session;
2037 }
2038
2039 if (payload->size < sizeof(msg)) {
2040 ERR("Unexpected payload size in \"relay_data_pending\": expected >= %zu bytes, got %zu bytes",
2041 sizeof(msg), payload->size);
2042 ret = -1;
2043 goto end_no_session;
2044 }
2045 memcpy(&msg, payload->data, sizeof(msg));
2046 msg.stream_id = be64toh(msg.stream_id);
2047 msg.last_net_seq_num = be64toh(msg.last_net_seq_num);
2048
2049 stream = stream_get_by_id(msg.stream_id);
2050 if (stream == NULL) {
2051 ret = -1;
2052 goto end;
2053 }
2054
2055 pthread_mutex_lock(&stream->lock);
2056
2057 if (session_streams_have_index(session)) {
2058 /*
2059 * Ensure that both the index and stream data have been
2060 * flushed up to the requested point.
2061 */
2062 stream_seq = min(stream->prev_data_seq, stream->prev_index_seq);
2063 } else {
2064 stream_seq = stream->prev_data_seq;
2065 }
2066 DBG("Data pending for stream id %" PRIu64 ": prev_data_seq %" PRIu64
2067 ", prev_index_seq %" PRIu64
2068 ", and last_seq %" PRIu64, msg.stream_id,
2069 stream->prev_data_seq, stream->prev_index_seq,
2070 msg.last_net_seq_num);
2071
2072 /* Avoid wrapping issue */
2073 if (((int64_t) (stream_seq - msg.last_net_seq_num)) >= 0) {
2074 /* Data has in fact been written and is NOT pending */
2075 ret = 0;
2076 } else {
2077 /* Data still being streamed thus pending */
2078 ret = 1;
2079 }
2080
2081 stream->data_pending_check_done = true;
2082 pthread_mutex_unlock(&stream->lock);
2083
2084 stream_put(stream);
2085 end:
2086
2087 memset(&reply, 0, sizeof(reply));
2088 reply.ret_code = htobe32(ret);
2089 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
2090 if (send_ret < (ssize_t) sizeof(reply)) {
2091 ERR("Failed to send \"data pending\" command reply (ret = %zd)",
2092 send_ret);
2093 ret = -1;
2094 }
2095
2096 end_no_session:
2097 return ret;
2098 }
2099
2100 /*
2101 * Wait for the control socket to reach a quiescent state.
2102 *
2103 * Note that for now, when receiving this command from the session
2104 * daemon, this means that every subsequent commands or data received on
2105 * the control socket has been handled. So, this is why we simply return
2106 * OK here.
2107 */
2108 static int relay_quiescent_control(const struct lttcomm_relayd_hdr *recv_hdr,
2109 struct relay_connection *conn,
2110 const struct lttng_buffer_view *payload)
2111 {
2112 int ret;
2113 ssize_t send_ret;
2114 struct relay_stream *stream;
2115 struct lttcomm_relayd_quiescent_control msg;
2116 struct lttcomm_relayd_generic_reply reply;
2117
2118 DBG("Checking quiescent state on control socket");
2119
2120 if (!conn->session || !conn->version_check_done) {
2121 ERR("Trying to check for data before version check");
2122 ret = -1;
2123 goto end_no_session;
2124 }
2125
2126 if (payload->size < sizeof(msg)) {
2127 ERR("Unexpected payload size in \"relay_quiescent_control\": expected >= %zu bytes, got %zu bytes",
2128 sizeof(msg), payload->size);
2129 ret = -1;
2130 goto end_no_session;
2131 }
2132 memcpy(&msg, payload->data, sizeof(msg));
2133 msg.stream_id = be64toh(msg.stream_id);
2134
2135 stream = stream_get_by_id(msg.stream_id);
2136 if (!stream) {
2137 goto reply;
2138 }
2139 pthread_mutex_lock(&stream->lock);
2140 stream->data_pending_check_done = true;
2141 pthread_mutex_unlock(&stream->lock);
2142
2143 DBG("Relay quiescent control pending flag set to %" PRIu64, msg.stream_id);
2144 stream_put(stream);
2145 reply:
2146 memset(&reply, 0, sizeof(reply));
2147 reply.ret_code = htobe32(LTTNG_OK);
2148 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
2149 if (send_ret < (ssize_t) sizeof(reply)) {
2150 ERR("Failed to send \"quiescent control\" command reply (ret = %zd)",
2151 send_ret);
2152 ret = -1;
2153 } else {
2154 ret = 0;
2155 }
2156
2157 end_no_session:
2158 return ret;
2159 }
2160
2161 /*
2162 * Initialize a data pending command. This means that a consumer is about
2163 * to ask for data pending for each stream it holds. Simply iterate over
2164 * all streams of a session and set the data_pending_check_done flag.
2165 *
2166 * This command returns to the client a LTTNG_OK code.
2167 */
2168 static int relay_begin_data_pending(const struct lttcomm_relayd_hdr *recv_hdr,
2169 struct relay_connection *conn,
2170 const struct lttng_buffer_view *payload)
2171 {
2172 int ret;
2173 ssize_t send_ret;
2174 struct lttng_ht_iter iter;
2175 struct lttcomm_relayd_begin_data_pending msg;
2176 struct lttcomm_relayd_generic_reply reply;
2177 struct relay_stream *stream;
2178
2179 assert(recv_hdr);
2180 assert(conn);
2181
2182 DBG("Init streams for data pending");
2183
2184 if (!conn->session || !conn->version_check_done) {
2185 ERR("Trying to check for data before version check");
2186 ret = -1;
2187 goto end_no_session;
2188 }
2189
2190 if (payload->size < sizeof(msg)) {
2191 ERR("Unexpected payload size in \"relay_begin_data_pending\": expected >= %zu bytes, got %zu bytes",
2192 sizeof(msg), payload->size);
2193 ret = -1;
2194 goto end_no_session;
2195 }
2196 memcpy(&msg, payload->data, sizeof(msg));
2197 msg.session_id = be64toh(msg.session_id);
2198
2199 /*
2200 * Iterate over all streams to set the begin data pending flag.
2201 * For now, the streams are indexed by stream handle so we have
2202 * to iterate over all streams to find the one associated with
2203 * the right session_id.
2204 */
2205 rcu_read_lock();
2206 cds_lfht_for_each_entry(relay_streams_ht->ht, &iter.iter, stream,
2207 node.node) {
2208 if (!stream_get(stream)) {
2209 continue;
2210 }
2211 if (stream->trace->session->id == msg.session_id) {
2212 pthread_mutex_lock(&stream->lock);
2213 stream->data_pending_check_done = false;
2214 pthread_mutex_unlock(&stream->lock);
2215 DBG("Set begin data pending flag to stream %" PRIu64,
2216 stream->stream_handle);
2217 }
2218 stream_put(stream);
2219 }
2220 rcu_read_unlock();
2221
2222 memset(&reply, 0, sizeof(reply));
2223 /* All good, send back reply. */
2224 reply.ret_code = htobe32(LTTNG_OK);
2225
2226 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
2227 if (send_ret < (ssize_t) sizeof(reply)) {
2228 ERR("Failed to send \"begin data pending\" command reply (ret = %zd)",
2229 send_ret);
2230 ret = -1;
2231 } else {
2232 ret = 0;
2233 }
2234
2235 end_no_session:
2236 return ret;
2237 }
2238
2239 /*
2240 * End data pending command. This will check, for a given session id, if
2241 * each stream associated with it has its data_pending_check_done flag
2242 * set. If not, this means that the client lost track of the stream but
2243 * the data is still being streamed on our side. In this case, we inform
2244 * the client that data is in flight.
2245 *
2246 * Return to the client if there is data in flight or not with a ret_code.
2247 */
2248 static int relay_end_data_pending(const struct lttcomm_relayd_hdr *recv_hdr,
2249 struct relay_connection *conn,
2250 const struct lttng_buffer_view *payload)
2251 {
2252 int ret;
2253 ssize_t send_ret;
2254 struct lttng_ht_iter iter;
2255 struct lttcomm_relayd_end_data_pending msg;
2256 struct lttcomm_relayd_generic_reply reply;
2257 struct relay_stream *stream;
2258 uint32_t is_data_inflight = 0;
2259
2260 DBG("End data pending command");
2261
2262 if (!conn->session || !conn->version_check_done) {
2263 ERR("Trying to check for data before version check");
2264 ret = -1;
2265 goto end_no_session;
2266 }
2267
2268 if (payload->size < sizeof(msg)) {
2269 ERR("Unexpected payload size in \"relay_end_data_pending\": expected >= %zu bytes, got %zu bytes",
2270 sizeof(msg), payload->size);
2271 ret = -1;
2272 goto end_no_session;
2273 }
2274 memcpy(&msg, payload->data, sizeof(msg));
2275 msg.session_id = be64toh(msg.session_id);
2276
2277 /*
2278 * Iterate over all streams to see if the begin data pending
2279 * flag is set.
2280 */
2281 rcu_read_lock();
2282 cds_lfht_for_each_entry(relay_streams_ht->ht, &iter.iter, stream,
2283 node.node) {
2284 if (!stream_get(stream)) {
2285 continue;
2286 }
2287 if (stream->trace->session->id != msg.session_id) {
2288 stream_put(stream);
2289 continue;
2290 }
2291 pthread_mutex_lock(&stream->lock);
2292 if (!stream->data_pending_check_done) {
2293 uint64_t stream_seq;
2294
2295 if (session_streams_have_index(conn->session)) {
2296 /*
2297 * Ensure that both the index and stream data have been
2298 * flushed up to the requested point.
2299 */
2300 stream_seq = min(stream->prev_data_seq, stream->prev_index_seq);
2301 } else {
2302 stream_seq = stream->prev_data_seq;
2303 }
2304 if (!stream->closed || !(((int64_t) (stream_seq - stream->last_net_seq_num)) >= 0)) {
2305 is_data_inflight = 1;
2306 DBG("Data is still in flight for stream %" PRIu64,
2307 stream->stream_handle);
2308 pthread_mutex_unlock(&stream->lock);
2309 stream_put(stream);
2310 break;
2311 }
2312 }
2313 pthread_mutex_unlock(&stream->lock);
2314 stream_put(stream);
2315 }
2316 rcu_read_unlock();
2317
2318 memset(&reply, 0, sizeof(reply));
2319 /* All good, send back reply. */
2320 reply.ret_code = htobe32(is_data_inflight);
2321
2322 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
2323 if (send_ret < (ssize_t) sizeof(reply)) {
2324 ERR("Failed to send \"end data pending\" command reply (ret = %zd)",
2325 send_ret);
2326 ret = -1;
2327 } else {
2328 ret = 0;
2329 }
2330
2331 end_no_session:
2332 return ret;
2333 }
2334
2335 /*
2336 * Receive an index for a specific stream.
2337 *
2338 * Return 0 on success else a negative value.
2339 */
2340 static int relay_recv_index(const struct lttcomm_relayd_hdr *recv_hdr,
2341 struct relay_connection *conn,
2342 const struct lttng_buffer_view *payload)
2343 {
2344 int ret;
2345 ssize_t send_ret;
2346 struct relay_session *session = conn->session;
2347 struct lttcomm_relayd_index index_info;
2348 struct relay_index *index;
2349 struct lttcomm_relayd_generic_reply reply;
2350 struct relay_stream *stream;
2351 size_t msg_len;
2352
2353 assert(conn);
2354
2355 DBG("Relay receiving index");
2356
2357 if (!session || !conn->version_check_done) {
2358 ERR("Trying to close a stream before version check");
2359 ret = -1;
2360 goto end_no_session;
2361 }
2362
2363 msg_len = lttcomm_relayd_index_len(
2364 lttng_to_index_major(conn->major, conn->minor),
2365 lttng_to_index_minor(conn->major, conn->minor));
2366 if (payload->size < msg_len) {
2367 ERR("Unexpected payload size in \"relay_recv_index\": expected >= %zu bytes, got %zu bytes",
2368 msg_len, payload->size);
2369 ret = -1;
2370 goto end_no_session;
2371 }
2372 memcpy(&index_info, payload->data, msg_len);
2373 index_info.relay_stream_id = be64toh(index_info.relay_stream_id);
2374 index_info.net_seq_num = be64toh(index_info.net_seq_num);
2375 index_info.packet_size = be64toh(index_info.packet_size);
2376 index_info.content_size = be64toh(index_info.content_size);
2377 index_info.timestamp_begin = be64toh(index_info.timestamp_begin);
2378 index_info.timestamp_end = be64toh(index_info.timestamp_end);
2379 index_info.events_discarded = be64toh(index_info.events_discarded);
2380 index_info.stream_id = be64toh(index_info.stream_id);
2381
2382 if (conn->minor >= 8) {
2383 index_info.stream_instance_id =
2384 be64toh(index_info.stream_instance_id);
2385 index_info.packet_seq_num = be64toh(index_info.packet_seq_num);
2386 }
2387
2388 stream = stream_get_by_id(index_info.relay_stream_id);
2389 if (!stream) {
2390 ERR("stream_get_by_id not found");
2391 ret = -1;
2392 goto end;
2393 }
2394 pthread_mutex_lock(&stream->lock);
2395
2396 /* Live beacon handling */
2397 if (index_info.packet_size == 0) {
2398 DBG("Received live beacon for stream %" PRIu64,
2399 stream->stream_handle);
2400
2401 /*
2402 * Only flag a stream inactive when it has already
2403 * received data and no indexes are in flight.
2404 */
2405 if (stream->index_received_seqcount > 0
2406 && stream->indexes_in_flight == 0) {
2407 stream->beacon_ts_end = index_info.timestamp_end;
2408 }
2409 ret = 0;
2410 goto end_stream_put;
2411 } else {
2412 stream->beacon_ts_end = -1ULL;
2413 }
2414
2415 if (stream->ctf_stream_id == -1ULL) {
2416 stream->ctf_stream_id = index_info.stream_id;
2417 }
2418 index = relay_index_get_by_id_or_create(stream, index_info.net_seq_num);
2419 if (!index) {
2420 ret = -1;
2421 ERR("relay_index_get_by_id_or_create index NULL");
2422 goto end_stream_put;
2423 }
2424 if (set_index_control_data(index, &index_info, conn)) {
2425 ERR("set_index_control_data error");
2426 relay_index_put(index);
2427 ret = -1;
2428 goto end_stream_put;
2429 }
2430 ret = relay_index_try_flush(index);
2431 if (ret == 0) {
2432 tracefile_array_commit_seq(stream->tfa);
2433 stream->index_received_seqcount++;
2434 stream->pos_after_last_complete_data_index += index->total_size;
2435 stream->prev_index_seq = index_info.net_seq_num;
2436
2437 ret = try_rotate_stream_index(stream);
2438 if (ret < 0) {
2439 goto end_stream_put;
2440 }
2441 } else if (ret > 0) {
2442 /* no flush. */
2443 ret = 0;
2444 } else {
2445 /*
2446 * ret < 0
2447 *
2448 * relay_index_try_flush is responsible for the self-reference
2449 * put of the index object on error.
2450 */
2451 ERR("relay_index_try_flush error %d", ret);
2452 ret = -1;
2453 }
2454
2455 end_stream_put:
2456 pthread_mutex_unlock(&stream->lock);
2457 stream_put(stream);
2458
2459 end:
2460
2461 memset(&reply, 0, sizeof(reply));
2462 if (ret < 0) {
2463 reply.ret_code = htobe32(LTTNG_ERR_UNK);
2464 } else {
2465 reply.ret_code = htobe32(LTTNG_OK);
2466 }
2467 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
2468 if (send_ret < (ssize_t) sizeof(reply)) {
2469 ERR("Failed to send \"recv index\" command reply (ret = %zd)", send_ret);
2470 ret = -1;
2471 }
2472
2473 end_no_session:
2474 return ret;
2475 }
2476
2477 /*
2478 * Receive the streams_sent message.
2479 *
2480 * Return 0 on success else a negative value.
2481 */
2482 static int relay_streams_sent(const struct lttcomm_relayd_hdr *recv_hdr,
2483 struct relay_connection *conn,
2484 const struct lttng_buffer_view *payload)
2485 {
2486 int ret;
2487 ssize_t send_ret;
2488 struct lttcomm_relayd_generic_reply reply;
2489
2490 assert(conn);
2491
2492 DBG("Relay receiving streams_sent");
2493
2494 if (!conn->session || !conn->version_check_done) {
2495 ERR("Trying to close a stream before version check");
2496 ret = -1;
2497 goto end_no_session;
2498 }
2499
2500 /*
2501 * Publish every pending stream in the connection recv list which are
2502 * now ready to be used by the viewer.
2503 */
2504 publish_connection_local_streams(conn);
2505
2506 memset(&reply, 0, sizeof(reply));
2507 reply.ret_code = htobe32(LTTNG_OK);
2508 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
2509 if (send_ret < (ssize_t) sizeof(reply)) {
2510 ERR("Failed to send \"streams sent\" command reply (ret = %zd)",
2511 send_ret);
2512 ret = -1;
2513 } else {
2514 /* Success. */
2515 ret = 0;
2516 }
2517
2518 end_no_session:
2519 return ret;
2520 }
2521
2522 /*
2523 * relay_rotate_session_stream: rotate a stream to a new tracefile for the session
2524 * rotation feature (not the tracefile rotation feature).
2525 */
2526 static int relay_rotate_session_stream(const struct lttcomm_relayd_hdr *recv_hdr,
2527 struct relay_connection *conn,
2528 const struct lttng_buffer_view *payload)
2529 {
2530 int ret;
2531 ssize_t send_ret;
2532 struct relay_session *session = conn->session;
2533 struct lttcomm_relayd_rotate_stream stream_info;
2534 struct lttcomm_relayd_generic_reply reply;
2535 struct relay_stream *stream;
2536 size_t header_len;
2537 size_t path_len;
2538 struct lttng_buffer_view new_path_view;
2539
2540 DBG("Rotate stream received");
2541
2542 if (!session || !conn->version_check_done) {
2543 ERR("Trying to rotate a stream before version check");
2544 ret = -1;
2545 goto end_no_reply;
2546 }
2547
2548 if (session->major == 2 && session->minor < 11) {
2549 ERR("Unsupported feature before 2.11");
2550 ret = -1;
2551 goto end_no_reply;
2552 }
2553
2554 header_len = sizeof(struct lttcomm_relayd_rotate_stream);
2555
2556 if (payload->size < header_len) {
2557 ERR("Unexpected payload size in \"relay_rotate_session_stream\": expected >= %zu bytes, got %zu bytes",
2558 header_len, payload->size);
2559 ret = -1;
2560 goto end_no_reply;
2561 }
2562
2563 memcpy(&stream_info, payload->data, header_len);
2564
2565 /* Convert to host */
2566 stream_info.pathname_length = be32toh(stream_info.pathname_length);
2567 stream_info.stream_id = be64toh(stream_info.stream_id);
2568 stream_info.new_chunk_id = be64toh(stream_info.new_chunk_id);
2569 stream_info.rotate_at_seq_num = be64toh(stream_info.rotate_at_seq_num);
2570
2571 path_len = stream_info.pathname_length;
2572 if (payload->size < header_len + path_len) {
2573 ERR("Unexpected payload size in \"relay_rotate_session_stream\" including path: expected >= %zu bytes, got %zu bytes",
2574 header_len + path_len, payload->size);
2575 ret = -1;
2576 goto end_no_reply;
2577 }
2578
2579 /* Ensure it fits in local filename length. */
2580 if (path_len >= LTTNG_PATH_MAX) {
2581 ret = -ENAMETOOLONG;
2582 ERR("Length of relay_rotate_session_stream command's path name (%zu bytes) exceeds the maximal allowed length of %i bytes",
2583 path_len, LTTNG_PATH_MAX);
2584 goto end;
2585 }
2586
2587 new_path_view = lttng_buffer_view_from_view(payload, header_len,
2588 stream_info.pathname_length);
2589
2590 stream = stream_get_by_id(stream_info.stream_id);
2591 if (!stream) {
2592 ret = -1;
2593 goto end;
2594 }
2595
2596 pthread_mutex_lock(&stream->lock);
2597
2598 /*
2599 * Update the trace path (just the folder, the stream name does not
2600 * change).
2601 */
2602 free(stream->prev_path_name);
2603 stream->prev_path_name = stream->path_name;
2604 stream->path_name = create_output_path(new_path_view.data);
2605 if (!stream->path_name) {
2606 ERR("Failed to create a new output path");
2607 ret = -1;
2608 goto end_stream_unlock;
2609 }
2610 ret = utils_mkdir_recursive(stream->path_name, S_IRWXU | S_IRWXG,
2611 -1, -1);
2612 if (ret < 0) {
2613 ERR("relay creating output directory");
2614 ret = -1;
2615 goto end_stream_unlock;
2616 }
2617
2618 assert(stream->current_chunk_id.is_set);
2619 stream->current_chunk_id.value = stream_info.new_chunk_id;
2620
2621 if (stream->is_metadata) {
2622 /*
2623 * Metadata streams have no index; consider its rotation
2624 * complete.
2625 */
2626 stream->index_rotated = true;
2627 /*
2628 * The metadata stream is sent only over the control connection
2629 * so we know we have all the data to perform the stream
2630 * rotation.
2631 */
2632 ret = do_rotate_stream_data(stream);
2633 } else {
2634 stream->rotate_at_seq_num = stream_info.rotate_at_seq_num;
2635 ret = try_rotate_stream_data(stream);
2636 if (ret < 0) {
2637 goto end_stream_unlock;
2638 }
2639
2640 ret = try_rotate_stream_index(stream);
2641 if (ret < 0) {
2642 goto end_stream_unlock;
2643 }
2644 }
2645
2646 end_stream_unlock:
2647 pthread_mutex_unlock(&stream->lock);
2648 stream_put(stream);
2649 end:
2650 memset(&reply, 0, sizeof(reply));
2651 if (ret < 0) {
2652 reply.ret_code = htobe32(LTTNG_ERR_UNK);
2653 } else {
2654 reply.ret_code = htobe32(LTTNG_OK);
2655 }
2656 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
2657 sizeof(struct lttcomm_relayd_generic_reply), 0);
2658 if (send_ret < (ssize_t) sizeof(reply)) {
2659 ERR("Failed to send \"rotate session stream\" command reply (ret = %zd)",
2660 send_ret);
2661 ret = -1;
2662 }
2663
2664 end_no_reply:
2665 return ret;
2666 }
2667
2668 #define DBG_CMD(cmd_name, conn) \
2669 DBG3("Processing \"%s\" command for socket %i", cmd_name, conn->sock->fd);
2670
2671 static int relay_process_control_command(struct relay_connection *conn,
2672 const struct lttcomm_relayd_hdr *header,
2673 const struct lttng_buffer_view *payload)
2674 {
2675 int ret = 0;
2676
2677 switch (header->cmd) {
2678 case RELAYD_CREATE_SESSION:
2679 DBG_CMD("RELAYD_CREATE_SESSION", conn);
2680 ret = relay_create_session(header, conn, payload);
2681 break;
2682 case RELAYD_ADD_STREAM:
2683 DBG_CMD("RELAYD_ADD_STREAM", conn);
2684 ret = relay_add_stream(header, conn, payload);
2685 break;
2686 case RELAYD_START_DATA:
2687 DBG_CMD("RELAYD_START_DATA", conn);
2688 ret = relay_start(header, conn, payload);
2689 break;
2690 case RELAYD_SEND_METADATA:
2691 DBG_CMD("RELAYD_SEND_METADATA", conn);
2692 ret = relay_recv_metadata(header, conn, payload);
2693 break;
2694 case RELAYD_VERSION:
2695 DBG_CMD("RELAYD_VERSION", conn);
2696 ret = relay_send_version(header, conn, payload);
2697 break;
2698 case RELAYD_CLOSE_STREAM:
2699 DBG_CMD("RELAYD_CLOSE_STREAM", conn);
2700 ret = relay_close_stream(header, conn, payload);
2701 break;
2702 case RELAYD_DATA_PENDING:
2703 DBG_CMD("RELAYD_DATA_PENDING", conn);
2704 ret = relay_data_pending(header, conn, payload);
2705 break;
2706 case RELAYD_QUIESCENT_CONTROL:
2707 DBG_CMD("RELAYD_QUIESCENT_CONTROL", conn);
2708 ret = relay_quiescent_control(header, conn, payload);
2709 break;
2710 case RELAYD_BEGIN_DATA_PENDING:
2711 DBG_CMD("RELAYD_BEGIN_DATA_PENDING", conn);
2712 ret = relay_begin_data_pending(header, conn, payload);
2713 break;
2714 case RELAYD_END_DATA_PENDING:
2715 DBG_CMD("RELAYD_END_DATA_PENDING", conn);
2716 ret = relay_end_data_pending(header, conn, payload);
2717 break;
2718 case RELAYD_SEND_INDEX:
2719 DBG_CMD("RELAYD_SEND_INDEX", conn);
2720 ret = relay_recv_index(header, conn, payload);
2721 break;
2722 case RELAYD_STREAMS_SENT:
2723 DBG_CMD("RELAYD_STREAMS_SENT", conn);
2724 ret = relay_streams_sent(header, conn, payload);
2725 break;
2726 case RELAYD_RESET_METADATA:
2727 DBG_CMD("RELAYD_RESET_METADATA", conn);
2728 ret = relay_reset_metadata(header, conn, payload);
2729 break;
2730 case RELAYD_ROTATE_STREAM:
2731 DBG_CMD("RELAYD_ROTATE_STREAM", conn);
2732 ret = relay_rotate_session_stream(header, conn, payload);
2733 break;
2734 case RELAYD_UPDATE_SYNC_INFO:
2735 default:
2736 ERR("Received unknown command (%u)", header->cmd);
2737 relay_unknown_command(conn);
2738 ret = -1;
2739 goto end;
2740 }
2741
2742 end:
2743 return ret;
2744 }
2745
2746 static enum relay_connection_status relay_process_control_receive_payload(
2747 struct relay_connection *conn)
2748 {
2749 int ret = 0;
2750 enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK;
2751 struct lttng_dynamic_buffer *reception_buffer =
2752 &conn->protocol.ctrl.reception_buffer;
2753 struct ctrl_connection_state_receive_payload *state =
2754 &conn->protocol.ctrl.state.receive_payload;
2755 struct lttng_buffer_view payload_view;
2756
2757 if (state->left_to_receive == 0) {
2758 /* Short-circuit for payload-less commands. */
2759 goto reception_complete;
2760 }
2761
2762 ret = conn->sock->ops->recvmsg(conn->sock,
2763 reception_buffer->data + state->received,
2764 state->left_to_receive, MSG_DONTWAIT);
2765 if (ret < 0) {
2766 if (errno != EAGAIN && errno != EWOULDBLOCK) {
2767 PERROR("Unable to receive command payload on sock %d",
2768 conn->sock->fd);
2769 status = RELAY_CONNECTION_STATUS_ERROR;
2770 }
2771 goto end;
2772 } else if (ret == 0) {
2773 DBG("Socket %d performed an orderly shutdown (received EOF)", conn->sock->fd);
2774 status = RELAY_CONNECTION_STATUS_CLOSED;
2775 goto end;
2776 }
2777
2778 assert(ret > 0);
2779 assert(ret <= state->left_to_receive);
2780
2781 state->left_to_receive -= ret;
2782 state->received += ret;
2783
2784 if (state->left_to_receive > 0) {
2785 /*
2786 * Can't transition to the protocol's next state, wait to
2787 * receive the rest of the header.
2788 */
2789 DBG3("Partial reception of control connection protocol payload (received %" PRIu64 " bytes, %" PRIu64 " bytes left to receive, fd = %i)",
2790 state->received, state->left_to_receive,
2791 conn->sock->fd);
2792 goto end;
2793 }
2794
2795 reception_complete:
2796 DBG("Done receiving control command payload: fd = %i, payload size = %" PRIu64 " bytes",
2797 conn->sock->fd, state->received);
2798 /*
2799 * The payload required to process the command has been received.
2800 * A view to the reception buffer is forwarded to the various
2801 * commands and the state of the control is reset on success.
2802 *
2803 * Commands are responsible for sending their reply to the peer.
2804 */
2805 payload_view = lttng_buffer_view_from_dynamic_buffer(reception_buffer,
2806 0, -1);
2807 ret = relay_process_control_command(conn,
2808 &state->header, &payload_view);
2809 if (ret < 0) {
2810 status = RELAY_CONNECTION_STATUS_ERROR;
2811 goto end;
2812 }
2813
2814 ret = connection_reset_protocol_state(conn);
2815 if (ret) {
2816 status = RELAY_CONNECTION_STATUS_ERROR;
2817 }
2818 end:
2819 return status;
2820 }
2821
2822 static enum relay_connection_status relay_process_control_receive_header(
2823 struct relay_connection *conn)
2824 {
2825 int ret = 0;
2826 enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK;
2827 struct lttcomm_relayd_hdr header;
2828 struct lttng_dynamic_buffer *reception_buffer =
2829 &conn->protocol.ctrl.reception_buffer;
2830 struct ctrl_connection_state_receive_header *state =
2831 &conn->protocol.ctrl.state.receive_header;
2832
2833 assert(state->left_to_receive != 0);
2834
2835 ret = conn->sock->ops->recvmsg(conn->sock,
2836 reception_buffer->data + state->received,
2837 state->left_to_receive, MSG_DONTWAIT);
2838 if (ret < 0) {
2839 if (errno != EAGAIN && errno != EWOULDBLOCK) {
2840 PERROR("Unable to receive control command header on sock %d",
2841 conn->sock->fd);
2842 status = RELAY_CONNECTION_STATUS_ERROR;
2843 }
2844 goto end;
2845 } else if (ret == 0) {
2846 DBG("Socket %d performed an orderly shutdown (received EOF)", conn->sock->fd);
2847 status = RELAY_CONNECTION_STATUS_CLOSED;
2848 goto end;
2849 }
2850
2851 assert(ret > 0);
2852 assert(ret <= state->left_to_receive);
2853
2854 state->left_to_receive -= ret;
2855 state->received += ret;
2856
2857 if (state->left_to_receive > 0) {
2858 /*
2859 * Can't transition to the protocol's next state, wait to
2860 * receive the rest of the header.
2861 */
2862 DBG3("Partial reception of control connection protocol header (received %" PRIu64 " bytes, %" PRIu64 " bytes left to receive, fd = %i)",
2863 state->received, state->left_to_receive,
2864 conn->sock->fd);
2865 goto end;
2866 }
2867
2868 /* Transition to next state: receiving the command's payload. */
2869 conn->protocol.ctrl.state_id =
2870 CTRL_CONNECTION_STATE_RECEIVE_PAYLOAD;
2871 memcpy(&header, reception_buffer->data, sizeof(header));
2872 header.circuit_id = be64toh(header.circuit_id);
2873 header.data_size = be64toh(header.data_size);
2874 header.cmd = be32toh(header.cmd);
2875 header.cmd_version = be32toh(header.cmd_version);
2876 memcpy(&conn->protocol.ctrl.state.receive_payload.header,
2877 &header, sizeof(header));
2878
2879 DBG("Done receiving control command header: fd = %i, cmd = %" PRIu32 ", cmd_version = %" PRIu32 ", payload size = %" PRIu64 " bytes",
2880 conn->sock->fd, header.cmd, header.cmd_version,
2881 header.data_size);
2882
2883 if (header.data_size > DEFAULT_NETWORK_RELAYD_CTRL_MAX_PAYLOAD_SIZE) {
2884 ERR("Command header indicates a payload (%" PRIu64 " bytes) that exceeds the maximal payload size allowed on a control connection.",
2885 header.data_size);
2886 status = RELAY_CONNECTION_STATUS_ERROR;
2887 goto end;
2888 }
2889
2890 conn->protocol.ctrl.state.receive_payload.left_to_receive =
2891 header.data_size;
2892 conn->protocol.ctrl.state.receive_payload.received = 0;
2893 ret = lttng_dynamic_buffer_set_size(reception_buffer,
2894 header.data_size);
2895 if (ret) {
2896 status = RELAY_CONNECTION_STATUS_ERROR;
2897 goto end;
2898 }
2899
2900 if (header.data_size == 0) {
2901 /*
2902 * Manually invoke the next state as the poll loop
2903 * will not wake-up to allow us to proceed further.
2904 */
2905 status = relay_process_control_receive_payload(conn);
2906 }
2907 end:
2908 return status;
2909 }
2910
2911 /*
2912 * Process the commands received on the control socket
2913 */
2914 static enum relay_connection_status relay_process_control(
2915 struct relay_connection *conn)
2916 {
2917 enum relay_connection_status status;
2918
2919 switch (conn->protocol.ctrl.state_id) {
2920 case CTRL_CONNECTION_STATE_RECEIVE_HEADER:
2921 status = relay_process_control_receive_header(conn);
2922 break;
2923 case CTRL_CONNECTION_STATE_RECEIVE_PAYLOAD:
2924 status = relay_process_control_receive_payload(conn);
2925 break;
2926 default:
2927 ERR("Unknown control connection protocol state encountered.");
2928 abort();
2929 }
2930
2931 return status;
2932 }
2933
2934 /*
2935 * Handle index for a data stream.
2936 *
2937 * Called with the stream lock held.
2938 *
2939 * Return 0 on success else a negative value.
2940 */
2941 static int handle_index_data(struct relay_stream *stream, uint64_t net_seq_num,
2942 bool rotate_index, bool *flushed, uint64_t total_size)
2943 {
2944 int ret = 0;
2945 uint64_t data_offset;
2946 struct relay_index *index;
2947
2948 /* Get data offset because we are about to update the index. */
2949 data_offset = htobe64(stream->tracefile_size_current);
2950
2951 DBG("handle_index_data: stream %" PRIu64 " net_seq_num %" PRIu64 " data offset %" PRIu64,
2952 stream->stream_handle, net_seq_num, stream->tracefile_size_current);
2953
2954 /*
2955 * Lookup for an existing index for that stream id/sequence
2956 * number. If it exists, the control thread has already received the
2957 * data for it, thus we need to write it to disk.
2958 */
2959 index = relay_index_get_by_id_or_create(stream, net_seq_num);
2960 if (!index) {
2961 ret = -1;
2962 goto end;
2963 }
2964
2965 if (rotate_index || !stream->index_file) {
2966 const char *stream_path;
2967
2968 /*
2969 * The data connection creates the stream's first index file.
2970 *
2971 * This can happen _after_ a ROTATE_STREAM command. In
2972 * other words, the data of the first packet of this stream
2973 * can be received after a ROTATE_STREAM command.
2974 *
2975 * The ROTATE_STREAM command changes the stream's path_name
2976 * to point to the "next" chunk. If a rotation is pending for
2977 * this stream, as indicated by "rotate_at_seq_num != -1ULL",
2978 * it means that we are still receiving data that belongs in the
2979 * stream's former path.
2980 *
2981 * In this very specific case, we must ensure that the index
2982 * file is created in the streams's former path,
2983 * "prev_path_name".
2984 *
2985 * All other rotations beyond the first one are not affected
2986 * by this problem since the actual rotation operation creates
2987 * the new chunk's index file.
2988 */
2989 stream_path = stream->rotate_at_seq_num == -1ULL ?
2990 stream->path_name:
2991 stream->prev_path_name;
2992
2993 ret = create_rotate_index_file(stream, stream_path);
2994 if (ret < 0) {
2995 ERR("Failed to rotate index");
2996 /* Put self-ref for this index due to error. */
2997 relay_index_put(index);
2998 index = NULL;
2999 goto end;
3000 }
3001 }
3002
3003 if (relay_index_set_file(index, stream->index_file, data_offset)) {
3004 ret = -1;
3005 /* Put self-ref for this index due to error. */
3006 relay_index_put(index);
3007 index = NULL;
3008 goto end;
3009 }
3010
3011 ret = relay_index_try_flush(index);
3012 if (ret == 0) {
3013 tracefile_array_commit_seq(stream->tfa);
3014 stream->index_received_seqcount++;
3015 *flushed = true;
3016 } else if (ret > 0) {
3017 index->total_size = total_size;
3018 /* No flush. */
3019 ret = 0;
3020 } else {
3021 /*
3022 * ret < 0
3023 *
3024 * relay_index_try_flush is responsible for the self-reference
3025 * put of the index object on error.
3026 */
3027 ERR("relay_index_try_flush error %d", ret);
3028 ret = -1;
3029 }
3030 end:
3031 return ret;
3032 }
3033
3034 static enum relay_connection_status relay_process_data_receive_header(
3035 struct relay_connection *conn)
3036 {
3037 int ret;
3038 enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK;
3039 struct data_connection_state_receive_header *state =
3040 &conn->protocol.data.state.receive_header;
3041 struct lttcomm_relayd_data_hdr header;
3042 struct relay_stream *stream;
3043
3044 assert(state->left_to_receive != 0);
3045
3046 ret = conn->sock->ops->recvmsg(conn->sock,
3047 state->header_reception_buffer + state->received,
3048 state->left_to_receive, MSG_DONTWAIT);
3049 if (ret < 0) {
3050 if (errno != EAGAIN && errno != EWOULDBLOCK) {
3051 PERROR("Unable to receive data header on sock %d", conn->sock->fd);
3052 status = RELAY_CONNECTION_STATUS_ERROR;
3053 }
3054 goto end;
3055 } else if (ret == 0) {
3056 /* Orderly shutdown. Not necessary to print an error. */
3057 DBG("Socket %d performed an orderly shutdown (received EOF)", conn->sock->fd);
3058 status = RELAY_CONNECTION_STATUS_CLOSED;
3059 goto end;
3060 }
3061
3062 assert(ret > 0);
3063 assert(ret <= state->left_to_receive);
3064
3065 state->left_to_receive -= ret;
3066 state->received += ret;
3067
3068 if (state->left_to_receive > 0) {
3069 /*
3070 * Can't transition to the protocol's next state, wait to
3071 * receive the rest of the header.
3072 */
3073 DBG3("Partial reception of data connection header (received %" PRIu64 " bytes, %" PRIu64 " bytes left to receive, fd = %i)",
3074 state->received, state->left_to_receive,
3075 conn->sock->fd);
3076 goto end;
3077 }
3078
3079 /* Transition to next state: receiving the payload. */
3080 conn->protocol.data.state_id = DATA_CONNECTION_STATE_RECEIVE_PAYLOAD;
3081
3082 memcpy(&header, state->header_reception_buffer, sizeof(header));
3083 header.circuit_id = be64toh(header.circuit_id);
3084 header.stream_id = be64toh(header.stream_id);
3085 header.data_size = be32toh(header.data_size);
3086 header.net_seq_num = be64toh(header.net_seq_num);
3087 header.padding_size = be32toh(header.padding_size);
3088 memcpy(&conn->protocol.data.state.receive_payload.header, &header, sizeof(header));
3089
3090 conn->protocol.data.state.receive_payload.left_to_receive =
3091 header.data_size;
3092 conn->protocol.data.state.receive_payload.received = 0;
3093 conn->protocol.data.state.receive_payload.rotate_index = false;
3094
3095 DBG("Received data connection header on fd %i: circuit_id = %" PRIu64 ", stream_id = %" PRIu64 ", data_size = %" PRIu32 ", net_seq_num = %" PRIu64 ", padding_size = %" PRIu32,
3096 conn->sock->fd, header.circuit_id,
3097 header.stream_id, header.data_size,
3098 header.net_seq_num, header.padding_size);
3099
3100 stream = stream_get_by_id(header.stream_id);
3101 if (!stream) {
3102 DBG("relay_process_data_receive_payload: Cannot find stream %" PRIu64,
3103 header.stream_id);
3104 /* Protocol error. */
3105 status = RELAY_CONNECTION_STATUS_ERROR;
3106 goto end;
3107 }
3108
3109 pthread_mutex_lock(&stream->lock);
3110
3111 /* Check if a rotation is needed. */
3112 if (stream->tracefile_size > 0 &&
3113 (stream->tracefile_size_current + header.data_size) >
3114 stream->tracefile_size) {
3115 uint64_t old_id, new_id;
3116
3117 old_id = tracefile_array_get_file_index_head(stream->tfa);
3118 tracefile_array_file_rotate(stream->tfa);
3119
3120 /* new_id is updated by utils_rotate_stream_file. */
3121 new_id = old_id;
3122
3123 ret = utils_rotate_stream_file(stream->path_name,
3124 stream->channel_name, stream->tracefile_size,
3125 stream->tracefile_count, -1,
3126 -1, stream->stream_fd->fd,
3127 &new_id, &stream->stream_fd->fd);
3128 if (ret < 0) {
3129 ERR("Failed to rotate stream output file");
3130 status = RELAY_CONNECTION_STATUS_ERROR;
3131 goto end_stream_unlock;
3132 }
3133
3134 /*
3135 * Reset current size because we just performed a stream
3136 * rotation.
3137 */
3138 stream->tracefile_size_current = 0;
3139 conn->protocol.data.state.receive_payload.rotate_index = true;
3140 }
3141
3142 end_stream_unlock:
3143 pthread_mutex_unlock(&stream->lock);
3144 stream_put(stream);
3145 end:
3146 return status;
3147 }
3148
3149 static enum relay_connection_status relay_process_data_receive_payload(
3150 struct relay_connection *conn)
3151 {
3152 int ret;
3153 enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK;
3154 struct relay_stream *stream;
3155 struct data_connection_state_receive_payload *state =
3156 &conn->protocol.data.state.receive_payload;
3157 const size_t chunk_size = RECV_DATA_BUFFER_SIZE;
3158 char data_buffer[chunk_size];
3159 bool partial_recv = false;
3160 bool new_stream = false, close_requested = false, index_flushed = false;
3161 uint64_t left_to_receive = state->left_to_receive;
3162 struct relay_session *session;
3163
3164 DBG3("Receiving data for stream id %" PRIu64 " seqnum %" PRIu64 ", %" PRIu64" bytes received, %" PRIu64 " bytes left to receive",
3165 state->header.stream_id, state->header.net_seq_num,
3166 state->received, left_to_receive);
3167
3168 stream = stream_get_by_id(state->header.stream_id);
3169 if (!stream) {
3170 /* Protocol error. */
3171 ERR("relay_process_data_receive_payload: cannot find stream %" PRIu64,
3172 state->header.stream_id);
3173 status = RELAY_CONNECTION_STATUS_ERROR;
3174 goto end;
3175 }
3176
3177 pthread_mutex_lock(&stream->lock);
3178 session = stream->trace->session;
3179 if (!conn->session) {
3180 ret = connection_set_session(conn, session);
3181 if (ret) {
3182 status = RELAY_CONNECTION_STATUS_ERROR;
3183 goto end_stream_unlock;
3184 }
3185 }
3186
3187 /*
3188 * The size of the "chunk" received on any iteration is bounded by:
3189 * - the data left to receive,
3190 * - the data immediately available on the socket,
3191 * - the on-stack data buffer
3192 */
3193 while (left_to_receive > 0 && !partial_recv) {
3194 ssize_t write_ret;
3195 size_t recv_size = min(left_to_receive, chunk_size);
3196
3197 ret = conn->sock->ops->recvmsg(conn->sock, data_buffer,
3198 recv_size, MSG_DONTWAIT);
3199 if (ret < 0) {
3200 if (errno != EAGAIN && errno != EWOULDBLOCK) {
3201 PERROR("Socket %d error", conn->sock->fd);
3202 status = RELAY_CONNECTION_STATUS_ERROR;
3203 }
3204 goto end_stream_unlock;
3205 } else if (ret == 0) {
3206 /* No more data ready to be consumed on socket. */
3207 DBG3("No more data ready for consumption on data socket of stream id %" PRIu64,
3208 state->header.stream_id);
3209 status = RELAY_CONNECTION_STATUS_CLOSED;
3210 break;
3211 } else if (ret < (int) recv_size) {
3212 /*
3213 * All the data available on the socket has been
3214 * consumed.
3215 */
3216 partial_recv = true;
3217 }
3218
3219 recv_size = ret;
3220
3221 /* Write data to stream output fd. */
3222 write_ret = lttng_write(stream->stream_fd->fd, data_buffer,
3223 recv_size);
3224 if (write_ret < (ssize_t) recv_size) {
3225 ERR("Relay error writing data to file");
3226 status = RELAY_CONNECTION_STATUS_ERROR;
3227 goto end_stream_unlock;
3228 }
3229
3230 left_to_receive -= recv_size;
3231 state->received += recv_size;
3232 state->left_to_receive = left_to_receive;
3233
3234 DBG2("Relay wrote %zd bytes to tracefile for stream id %" PRIu64,
3235 write_ret, stream->stream_handle);
3236 }
3237
3238 if (state->left_to_receive > 0) {
3239 /*
3240 * Did not receive all the data expected, wait for more data to
3241 * become available on the socket.
3242 */
3243 DBG3("Partial receive on data connection of stream id %" PRIu64 ", %" PRIu64 " bytes received, %" PRIu64 " bytes left to receive",
3244 state->header.stream_id, state->received,
3245 state->left_to_receive);
3246 goto end_stream_unlock;
3247 }
3248
3249 ret = write_padding_to_file(stream->stream_fd->fd,
3250 state->header.padding_size);
3251 if ((int64_t) ret < (int64_t) state->header.padding_size) {
3252 ERR("write_padding_to_file: fail stream %" PRIu64 " net_seq_num %" PRIu64 " ret %d",
3253 stream->stream_handle,
3254 state->header.net_seq_num, ret);
3255 status = RELAY_CONNECTION_STATUS_ERROR;
3256 goto end_stream_unlock;
3257 }
3258
3259
3260 if (session_streams_have_index(session)) {
3261 ret = handle_index_data(stream, state->header.net_seq_num,
3262 state->rotate_index, &index_flushed, state->header.data_size + state->header.padding_size);
3263 if (ret < 0) {
3264 ERR("handle_index_data: fail stream %" PRIu64 " net_seq_num %" PRIu64 " ret %d",
3265 stream->stream_handle,
3266 state->header.net_seq_num, ret);
3267 status = RELAY_CONNECTION_STATUS_ERROR;
3268 goto end_stream_unlock;
3269 }
3270 }
3271
3272 stream->tracefile_size_current += state->header.data_size +
3273 state->header.padding_size;
3274
3275 if (stream->prev_data_seq == -1ULL) {
3276 new_stream = true;
3277 }
3278 if (index_flushed) {
3279 stream->pos_after_last_complete_data_index =
3280 stream->tracefile_size_current;
3281 stream->prev_index_seq = state->header.net_seq_num;
3282 ret = try_rotate_stream_index(stream);
3283 if (ret < 0) {
3284 goto end_stream_unlock;
3285 }
3286 }
3287
3288 stream->prev_data_seq = state->header.net_seq_num;
3289
3290 /*
3291 * Resetting the protocol state (to RECEIVE_HEADER) will trash the
3292 * contents of *state which are aliased (union) to the same location as
3293 * the new state. Don't use it beyond this point.
3294 */
3295 connection_reset_protocol_state(conn);
3296 state = NULL;
3297
3298 ret = try_rotate_stream_data(stream);
3299 if (ret < 0) {
3300 status = RELAY_CONNECTION_STATUS_ERROR;
3301 goto end_stream_unlock;
3302 }
3303
3304 end_stream_unlock:
3305 close_requested = stream->close_requested;
3306 pthread_mutex_unlock(&stream->lock);
3307 if (close_requested && left_to_receive == 0) {
3308 try_stream_close(stream);
3309 }
3310
3311 if (new_stream) {
3312 pthread_mutex_lock(&session->lock);
3313 uatomic_set(&session->new_streams, 1);
3314 pthread_mutex_unlock(&session->lock);
3315 }
3316
3317 stream_put(stream);
3318 end:
3319 return status;
3320 }
3321
3322 /*
3323 * relay_process_data: Process the data received on the data socket
3324 */
3325 static enum relay_connection_status relay_process_data(
3326 struct relay_connection *conn)
3327 {
3328 enum relay_connection_status status;
3329
3330 switch (conn->protocol.data.state_id) {
3331 case DATA_CONNECTION_STATE_RECEIVE_HEADER:
3332 status = relay_process_data_receive_header(conn);
3333 break;
3334 case DATA_CONNECTION_STATE_RECEIVE_PAYLOAD:
3335 status = relay_process_data_receive_payload(conn);
3336 break;
3337 default:
3338 ERR("Unexpected data connection communication state.");
3339 abort();
3340 }
3341
3342 return status;
3343 }
3344
3345 static void cleanup_connection_pollfd(struct lttng_poll_event *events, int pollfd)
3346 {
3347 int ret;
3348
3349 (void) lttng_poll_del(events, pollfd);
3350
3351 ret = close(pollfd);
3352 if (ret < 0) {
3353 ERR("Closing pollfd %d", pollfd);
3354 }
3355 }
3356
3357 static void relay_thread_close_connection(struct lttng_poll_event *events,
3358 int pollfd, struct relay_connection *conn)
3359 {
3360 const char *type_str;
3361
3362 switch (conn->type) {
3363 case RELAY_DATA:
3364 type_str = "Data";
3365 break;
3366 case RELAY_CONTROL:
3367 type_str = "Control";
3368 break;
3369 case RELAY_VIEWER_COMMAND:
3370 type_str = "Viewer Command";
3371 break;
3372 case RELAY_VIEWER_NOTIFICATION:
3373 type_str = "Viewer Notification";
3374 break;
3375 default:
3376 type_str = "Unknown";
3377 }
3378 cleanup_connection_pollfd(events, pollfd);
3379 connection_put(conn);
3380 DBG("%s connection closed with %d", type_str, pollfd);
3381 }
3382
3383 /*
3384 * This thread does the actual work
3385 */
3386 static void *relay_thread_worker(void *data)
3387 {
3388 int ret, err = -1, last_seen_data_fd = -1;
3389 uint32_t nb_fd;
3390 struct lttng_poll_event events;
3391 struct lttng_ht *relay_connections_ht;
3392 struct lttng_ht_iter iter;
3393 struct relay_connection *destroy_conn = NULL;
3394
3395 DBG("[thread] Relay worker started");
3396
3397 rcu_register_thread();
3398
3399 health_register(health_relayd, HEALTH_RELAYD_TYPE_WORKER);
3400
3401 if (testpoint(relayd_thread_worker)) {
3402 goto error_testpoint;
3403 }
3404
3405 health_code_update();
3406
3407 /* table of connections indexed on socket */
3408 relay_connections_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3409 if (!relay_connections_ht) {
3410 goto relay_connections_ht_error;
3411 }
3412
3413 ret = create_thread_poll_set(&events, 2);
3414 if (ret < 0) {
3415 goto error_poll_create;
3416 }
3417
3418 ret = lttng_poll_add(&events, relay_conn_pipe[0], LPOLLIN | LPOLLRDHUP);
3419 if (ret < 0) {
3420 goto error;
3421 }
3422
3423 restart:
3424 while (1) {
3425 int idx = -1, i, seen_control = 0, last_notdel_data_fd = -1;
3426
3427 health_code_update();
3428
3429 /* Infinite blocking call, waiting for transmission */
3430 DBG3("Relayd worker thread polling...");
3431 health_poll_entry();
3432 ret = lttng_poll_wait(&events, -1);
3433 health_poll_exit();
3434 if (ret < 0) {
3435 /*
3436 * Restart interrupted system call.
3437 */
3438 if (errno == EINTR) {
3439 goto restart;
3440 }
3441 goto error;
3442 }
3443
3444 nb_fd = ret;
3445
3446 /*
3447 * Process control. The control connection is
3448 * prioritized so we don't starve it with high
3449 * throughput tracing data on the data connection.
3450 */
3451 for (i = 0; i < nb_fd; i++) {
3452 /* Fetch once the poll data */
3453 uint32_t revents = LTTNG_POLL_GETEV(&events, i);
3454 int pollfd = LTTNG_POLL_GETFD(&events, i);
3455
3456 health_code_update();
3457
3458 /* Thread quit pipe has been closed. Killing thread. */
3459 ret = check_thread_quit_pipe(pollfd, revents);
3460 if (ret) {
3461 err = 0;
3462 goto exit;
3463 }
3464
3465 /* Inspect the relay conn pipe for new connection */
3466 if (pollfd == relay_conn_pipe[0]) {
3467 if (revents & LPOLLIN) {
3468 struct relay_connection *conn;
3469
3470 ret = lttng_read(relay_conn_pipe[0], &conn, sizeof(conn));
3471 if (ret < 0) {
3472 goto error;
3473 }
3474 lttng_poll_add(&events, conn->sock->fd,
3475 LPOLLIN | LPOLLRDHUP);
3476 connection_ht_add(relay_connections_ht, conn);
3477 DBG("Connection socket %d added", conn->sock->fd);
3478 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3479 ERR("Relay connection pipe error");
3480 goto error;
3481 } else {
3482 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
3483 goto error;
3484 }
3485 } else {
3486 struct relay_connection *ctrl_conn;
3487
3488 ctrl_conn = connection_get_by_sock(relay_connections_ht, pollfd);
3489 /* If not found, there is a synchronization issue. */
3490 assert(ctrl_conn);
3491
3492 if (ctrl_conn->type == RELAY_DATA) {
3493 if (revents & LPOLLIN) {
3494 /*
3495 * Flag the last seen data fd not deleted. It will be
3496 * used as the last seen fd if any fd gets deleted in
3497 * this first loop.
3498 */
3499 last_notdel_data_fd = pollfd;
3500 }
3501 goto put_ctrl_connection;
3502 }
3503 assert(ctrl_conn->type == RELAY_CONTROL);
3504
3505 if (revents & LPOLLIN) {
3506 enum relay_connection_status status;
3507
3508 status = relay_process_control(ctrl_conn);
3509 if (status != RELAY_CONNECTION_STATUS_OK) {
3510 /*
3511 * On socket error flag the session as aborted to force
3512 * the cleanup of its stream otherwise it can leak
3513 * during the lifetime of the relayd.
3514 *
3515 * This prevents situations in which streams can be
3516 * left opened because an index was received, the
3517 * control connection is closed, and the data
3518 * connection is closed (uncleanly) before the packet's
3519 * data provided.
3520 *
3521 * Since the control connection encountered an error,
3522 * it is okay to be conservative and close the
3523 * session right now as we can't rely on the protocol
3524 * being respected anymore.
3525 */
3526 if (status == RELAY_CONNECTION_STATUS_ERROR) {
3527 session_abort(ctrl_conn->session);
3528 }
3529
3530 /* Clear the connection on error or close. */
3531 relay_thread_close_connection(&events,
3532 pollfd,
3533 ctrl_conn);
3534 }
3535 seen_control = 1;
3536 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3537 relay_thread_close_connection(&events,
3538 pollfd, ctrl_conn);
3539 if (last_seen_data_fd == pollfd) {
3540 last_seen_data_fd = last_notdel_data_fd;
3541 }
3542 } else {
3543 ERR("Unexpected poll events %u for control sock %d",
3544 revents, pollfd);
3545 connection_put(ctrl_conn);
3546 goto error;
3547 }
3548 put_ctrl_connection:
3549 connection_put(ctrl_conn);
3550 }
3551 }
3552
3553 /*
3554 * The last loop handled a control request, go back to poll to make
3555 * sure we prioritise the control socket.
3556 */
3557 if (seen_control) {
3558 continue;
3559 }
3560
3561 if (last_seen_data_fd >= 0) {
3562 for (i = 0; i < nb_fd; i++) {
3563 int pollfd = LTTNG_POLL_GETFD(&events, i);
3564
3565 health_code_update();
3566
3567 if (last_seen_data_fd == pollfd) {
3568 idx = i;
3569 break;
3570 }
3571 }
3572 }
3573
3574 /* Process data connection. */
3575 for (i = idx + 1; i < nb_fd; i++) {
3576 /* Fetch the poll data. */
3577 uint32_t revents = LTTNG_POLL_GETEV(&events, i);
3578 int pollfd = LTTNG_POLL_GETFD(&events, i);
3579 struct relay_connection *data_conn;
3580
3581 health_code_update();
3582
3583 if (!revents) {
3584 /* No activity for this FD (poll implementation). */
3585 continue;
3586 }
3587
3588 /* Skip the command pipe. It's handled in the first loop. */
3589 if (pollfd == relay_conn_pipe[0]) {
3590 continue;
3591 }
3592
3593 data_conn = connection_get_by_sock(relay_connections_ht, pollfd);
3594 if (!data_conn) {
3595 /* Skip it. Might be removed before. */
3596 continue;
3597 }
3598 if (data_conn->type == RELAY_CONTROL) {
3599 goto put_data_connection;
3600 }
3601 assert(data_conn->type == RELAY_DATA);
3602
3603 if (revents & LPOLLIN) {
3604 enum relay_connection_status status;
3605
3606 status = relay_process_data(data_conn);
3607 /* Connection closed or error. */
3608 if (status != RELAY_CONNECTION_STATUS_OK) {
3609 /*
3610 * On socket error flag the session as aborted to force
3611 * the cleanup of its stream otherwise it can leak
3612 * during the lifetime of the relayd.
3613 *
3614 * This prevents situations in which streams can be
3615 * left opened because an index was received, the
3616 * control connection is closed, and the data
3617 * connection is closed (uncleanly) before the packet's
3618 * data provided.
3619 *
3620 * Since the data connection encountered an error,
3621 * it is okay to be conservative and close the
3622 * session right now as we can't rely on the protocol
3623 * being respected anymore.
3624 */
3625 if (status == RELAY_CONNECTION_STATUS_ERROR) {
3626 session_abort(data_conn->session);
3627 }
3628 relay_thread_close_connection(&events, pollfd,
3629 data_conn);
3630 /*
3631 * Every goto restart call sets the last seen fd where
3632 * here we don't really care since we gracefully
3633 * continue the loop after the connection is deleted.
3634 */
3635 } else {
3636 /* Keep last seen port. */
3637 last_seen_data_fd = pollfd;
3638 connection_put(data_conn);
3639 goto restart;
3640 }
3641 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3642 relay_thread_close_connection(&events, pollfd,
3643 data_conn);
3644 } else {
3645 ERR("Unknown poll events %u for data sock %d",
3646 revents, pollfd);
3647 }
3648 put_data_connection:
3649 connection_put(data_conn);
3650 }
3651 last_seen_data_fd = -1;
3652 }
3653
3654 /* Normal exit, no error */
3655 ret = 0;
3656
3657 exit:
3658 error:
3659 /* Cleanup remaining connection object. */
3660 rcu_read_lock();
3661 cds_lfht_for_each_entry(relay_connections_ht->ht, &iter.iter,
3662 destroy_conn,
3663 sock_n.node) {
3664 health_code_update();
3665
3666 session_abort(destroy_conn->session);
3667
3668 /*
3669 * No need to grab another ref, because we own
3670 * destroy_conn.
3671 */
3672 relay_thread_close_connection(&events, destroy_conn->sock->fd,
3673 destroy_conn);
3674 }
3675 rcu_read_unlock();
3676
3677 lttng_poll_clean(&events);
3678 error_poll_create:
3679 lttng_ht_destroy(relay_connections_ht);
3680 relay_connections_ht_error:
3681 /* Close relay conn pipes */
3682 utils_close_pipe(relay_conn_pipe);
3683 if (err) {
3684 DBG("Thread exited with error");
3685 }
3686 DBG("Worker thread cleanup complete");
3687 error_testpoint:
3688 if (err) {
3689 health_error();
3690 ERR("Health error occurred in %s", __func__);
3691 }
3692 health_unregister(health_relayd);
3693 rcu_unregister_thread();
3694 lttng_relay_stop_threads();
3695 return NULL;
3696 }
3697
3698 /*
3699 * Create the relay command pipe to wake thread_manage_apps.
3700 * Closed in cleanup().
3701 */
3702 static int create_relay_conn_pipe(void)
3703 {
3704 int ret;
3705
3706 ret = utils_create_pipe_cloexec(relay_conn_pipe);
3707
3708 return ret;
3709 }
3710
3711 /*
3712 * main
3713 */
3714 int main(int argc, char **argv)
3715 {
3716 int ret = 0, retval = 0;
3717 void *status;
3718
3719 /* Parse arguments */
3720 progname = argv[0];
3721 if (set_options(argc, argv)) {
3722 retval = -1;
3723 goto exit_options;
3724 }
3725
3726 if (set_signal_handler()) {
3727 retval = -1;
3728 goto exit_options;
3729 }
3730
3731 /* Try to create directory if -o, --output is specified. */
3732 if (opt_output_path) {
3733 if (*opt_output_path != '/') {
3734 ERR("Please specify an absolute path for -o, --output PATH");
3735 retval = -1;
3736 goto exit_options;
3737 }
3738
3739 ret = utils_mkdir_recursive(opt_output_path, S_IRWXU | S_IRWXG,
3740 -1, -1);
3741 if (ret < 0) {
3742 ERR("Unable to create %s", opt_output_path);
3743 retval = -1;
3744 goto exit_options;
3745 }
3746 }
3747
3748 /* Daemonize */
3749 if (opt_daemon || opt_background) {
3750 int i;
3751
3752 ret = lttng_daemonize(&child_ppid, &recv_child_signal,
3753 !opt_background);
3754 if (ret < 0) {
3755 retval = -1;
3756 goto exit_options;
3757 }
3758
3759 /*
3760 * We are in the child. Make sure all other file
3761 * descriptors are closed, in case we are called with
3762 * more opened file descriptors than the standard ones.
3763 */
3764 for (i = 3; i < sysconf(_SC_OPEN_MAX); i++) {
3765 (void) close(i);
3766 }
3767 }
3768
3769 sessiond_trace_chunk_registry = sessiond_trace_chunk_registry_create();
3770 if (!sessiond_trace_chunk_registry) {
3771 ERR("Failed to initialize session daemon trace chunk registry");
3772 retval = -1;
3773 goto exit_sessiond_trace_chunk_registry;
3774 }
3775
3776 /* Initialize thread health monitoring */
3777 health_relayd = health_app_create(NR_HEALTH_RELAYD_TYPES);
3778 if (!health_relayd) {
3779 PERROR("health_app_create error");
3780 retval = -1;
3781 goto exit_health_app_create;
3782 }
3783
3784 /* Create thread quit pipe */
3785 if (init_thread_quit_pipe()) {
3786 retval = -1;
3787 goto exit_init_data;
3788 }
3789
3790 /* Setup the thread apps communication pipe. */
3791 if (create_relay_conn_pipe()) {
3792 retval = -1;
3793 goto exit_init_data;
3794 }
3795
3796 /* Init relay command queue. */
3797 cds_wfcq_init(&relay_conn_queue.head, &relay_conn_queue.tail);
3798
3799 /* Initialize communication library */
3800 lttcomm_init();
3801 lttcomm_inet_init();
3802
3803 /* tables of sessions indexed by session ID */
3804 sessions_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3805 if (!sessions_ht) {
3806 retval = -1;
3807 goto exit_init_data;
3808 }
3809
3810 /* tables of streams indexed by stream ID */
3811 relay_streams_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3812 if (!relay_streams_ht) {
3813 retval = -1;
3814 goto exit_init_data;
3815 }
3816
3817 /* tables of streams indexed by stream ID */
3818 viewer_streams_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3819 if (!viewer_streams_ht) {
3820 retval = -1;
3821 goto exit_init_data;
3822 }
3823
3824 ret = utils_create_pipe(health_quit_pipe);
3825 if (ret) {
3826 retval = -1;
3827 goto exit_health_quit_pipe;
3828 }
3829
3830 /* Create thread to manage the client socket */
3831 ret = pthread_create(&health_thread, default_pthread_attr(),
3832 thread_manage_health, (void *) NULL);
3833 if (ret) {
3834 errno = ret;
3835 PERROR("pthread_create health");
3836 retval = -1;
3837 goto exit_health_thread;
3838 }
3839
3840 /* Setup the dispatcher thread */
3841 ret = pthread_create(&dispatcher_thread, default_pthread_attr(),
3842 relay_thread_dispatcher, (void *) NULL);
3843 if (ret) {
3844 errno = ret;
3845 PERROR("pthread_create dispatcher");
3846 retval = -1;
3847 goto exit_dispatcher_thread;
3848 }
3849
3850 /* Setup the worker thread */
3851 ret = pthread_create(&worker_thread, default_pthread_attr(),
3852 relay_thread_worker, NULL);
3853 if (ret) {
3854 errno = ret;
3855 PERROR("pthread_create worker");
3856 retval = -1;
3857 goto exit_worker_thread;
3858 }
3859
3860 /* Setup the listener thread */
3861 ret = pthread_create(&listener_thread, default_pthread_attr(),
3862 relay_thread_listener, (void *) NULL);
3863 if (ret) {
3864 errno = ret;
3865 PERROR("pthread_create listener");
3866 retval = -1;
3867 goto exit_listener_thread;
3868 }
3869
3870 ret = relayd_live_create(live_uri);
3871 if (ret) {
3872 ERR("Starting live viewer threads");
3873 retval = -1;
3874 goto exit_live;
3875 }
3876
3877 /*
3878 * This is where we start awaiting program completion (e.g. through
3879 * signal that asks threads to teardown).
3880 */
3881
3882 ret = relayd_live_join();
3883 if (ret) {
3884 retval = -1;
3885 }
3886 exit_live:
3887
3888 ret = pthread_join(listener_thread, &status);
3889 if (ret) {
3890 errno = ret;
3891 PERROR("pthread_join listener_thread");
3892 retval = -1;
3893 }
3894
3895 exit_listener_thread:
3896 ret = pthread_join(worker_thread, &status);
3897 if (ret) {
3898 errno = ret;
3899 PERROR("pthread_join worker_thread");
3900 retval = -1;
3901 }
3902
3903 exit_worker_thread:
3904 ret = pthread_join(dispatcher_thread, &status);
3905 if (ret) {
3906 errno = ret;
3907 PERROR("pthread_join dispatcher_thread");
3908 retval = -1;
3909 }
3910 exit_dispatcher_thread:
3911
3912 ret = pthread_join(health_thread, &status);
3913 if (ret) {
3914 errno = ret;
3915 PERROR("pthread_join health_thread");
3916 retval = -1;
3917 }
3918 exit_health_thread:
3919
3920 utils_close_pipe(health_quit_pipe);
3921 exit_health_quit_pipe:
3922
3923 exit_init_data:
3924 health_app_destroy(health_relayd);
3925 sessiond_trace_chunk_registry_destroy(sessiond_trace_chunk_registry);
3926 exit_health_app_create:
3927 exit_sessiond_trace_chunk_registry:
3928 exit_options:
3929 /*
3930 * Wait for all pending call_rcu work to complete before tearing
3931 * down data structures. call_rcu worker may be trying to
3932 * perform lookups in those structures.
3933 */
3934 rcu_barrier();
3935 relayd_cleanup();
3936
3937 /* Ensure all prior call_rcu are done. */
3938 rcu_barrier();
3939
3940 if (!retval) {
3941 exit(EXIT_SUCCESS);
3942 } else {
3943 exit(EXIT_FAILURE);
3944 }
3945 }
This page took 0.14055 seconds and 3 git commands to generate.