relayd: add fd-cap option to limit the number of opened FDs
[lttng-tools.git] / src / bin / lttng-relayd / main.c
... / ...
CommitLineData
1/*
2 * Copyright (C) 2012 - Julien Desfossez <jdesfossez@efficios.com>
3 * David Goulet <dgoulet@efficios.com>
4 * 2013 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 * 2015 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License, version 2 only,
9 * as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20
21#define _LGPL_SOURCE
22#include <getopt.h>
23#include <grp.h>
24#include <limits.h>
25#include <pthread.h>
26#include <signal.h>
27#include <stdio.h>
28#include <stdlib.h>
29#include <string.h>
30#include <sys/mman.h>
31#include <sys/mount.h>
32#include <sys/resource.h>
33#include <sys/socket.h>
34#include <sys/stat.h>
35#include <sys/types.h>
36#include <sys/wait.h>
37#include <sys/resource.h>
38#include <inttypes.h>
39#include <urcu/futex.h>
40#include <urcu/uatomic.h>
41#include <urcu/rculist.h>
42#include <unistd.h>
43#include <fcntl.h>
44#include <strings.h>
45#include <ctype.h>
46
47#include <lttng/lttng.h>
48#include <common/common.h>
49#include <common/compat/poll.h>
50#include <common/compat/socket.h>
51#include <common/compat/endian.h>
52#include <common/compat/getenv.h>
53#include <common/defaults.h>
54#include <common/daemonize.h>
55#include <common/futex.h>
56#include <common/sessiond-comm/sessiond-comm.h>
57#include <common/sessiond-comm/inet.h>
58#include <common/sessiond-comm/relayd.h>
59#include <common/uri.h>
60#include <common/utils.h>
61#include <common/align.h>
62#include <common/config/session-config.h>
63#include <common/dynamic-buffer.h>
64#include <common/buffer-view.h>
65#include <common/string-utils/format.h>
66
67#include "backward-compatibility-group-by.h"
68#include "cmd.h"
69#include "connection.h"
70#include "ctf-trace.h"
71#include "health-relayd.h"
72#include "index.h"
73#include "live.h"
74#include "lttng-relayd.h"
75#include "session.h"
76#include "sessiond-trace-chunks.h"
77#include "stream.h"
78#include "tcp_keep_alive.h"
79#include "testpoint.h"
80#include "tracefile-array.h"
81#include "utils.h"
82#include "version.h"
83#include "viewer-stream.h"
84
85static const char *help_msg =
86#ifdef LTTNG_EMBED_HELP
87#include <lttng-relayd.8.h>
88#else
89NULL
90#endif
91;
92
93enum relay_connection_status {
94 RELAY_CONNECTION_STATUS_OK,
95 /* An error occurred while processing an event on the connection. */
96 RELAY_CONNECTION_STATUS_ERROR,
97 /* Connection closed/shutdown cleanly. */
98 RELAY_CONNECTION_STATUS_CLOSED,
99};
100
101/* command line options */
102char *opt_output_path, *opt_working_directory;
103static int opt_daemon, opt_background, opt_print_version, opt_allow_clear = 1;
104enum relay_group_output_by opt_group_output_by = RELAYD_GROUP_OUTPUT_BY_UNKNOWN;
105
106/*
107 * We need to wait for listener and live listener threads, as well as
108 * health check thread, before being ready to signal readiness.
109 */
110#define NR_LTTNG_RELAY_READY 3
111static int lttng_relay_ready = NR_LTTNG_RELAY_READY;
112
113/* Size of receive buffer. */
114#define RECV_DATA_BUFFER_SIZE 65536
115
116static int recv_child_signal; /* Set to 1 when a SIGUSR1 signal is received. */
117static pid_t child_ppid; /* Internal parent PID use with daemonize. */
118
119static struct lttng_uri *control_uri;
120static struct lttng_uri *data_uri;
121static struct lttng_uri *live_uri;
122
123const char *progname;
124
125const char *tracing_group_name = DEFAULT_TRACING_GROUP;
126static int tracing_group_name_override;
127
128const char * const config_section_name = "relayd";
129
130/*
131 * Quit pipe for all threads. This permits a single cancellation point
132 * for all threads when receiving an event on the pipe.
133 */
134int thread_quit_pipe[2] = { -1, -1 };
135
136/*
137 * This pipe is used to inform the worker thread that a command is queued and
138 * ready to be processed.
139 */
140static int relay_conn_pipe[2] = { -1, -1 };
141
142/* Shared between threads */
143static int dispatch_thread_exit;
144
145static pthread_t listener_thread;
146static pthread_t dispatcher_thread;
147static pthread_t worker_thread;
148static pthread_t health_thread;
149
150/*
151 * last_relay_stream_id_lock protects last_relay_stream_id increment
152 * atomicity on 32-bit architectures.
153 */
154static pthread_mutex_t last_relay_stream_id_lock = PTHREAD_MUTEX_INITIALIZER;
155static uint64_t last_relay_stream_id;
156
157/*
158 * Relay command queue.
159 *
160 * The relay_thread_listener and relay_thread_dispatcher communicate with this
161 * queue.
162 */
163static struct relay_conn_queue relay_conn_queue;
164
165/* Cap of file desriptors to be in simultaneous use by the relay daemon. */
166static unsigned int lttng_opt_fd_cap;
167
168/* Global relay stream hash table. */
169struct lttng_ht *relay_streams_ht;
170
171/* Global relay viewer stream hash table. */
172struct lttng_ht *viewer_streams_ht;
173
174/* Global relay sessions hash table. */
175struct lttng_ht *sessions_ht;
176
177/* Relayd health monitoring */
178struct health_app *health_relayd;
179
180struct sessiond_trace_chunk_registry *sessiond_trace_chunk_registry;
181
182static struct option long_options[] = {
183 { "control-port", 1, 0, 'C', },
184 { "data-port", 1, 0, 'D', },
185 { "live-port", 1, 0, 'L', },
186 { "daemonize", 0, 0, 'd', },
187 { "background", 0, 0, 'b', },
188 { "group", 1, 0, 'g', },
189 { "fd-cap", 1, 0, '\0', },
190 { "help", 0, 0, 'h', },
191 { "output", 1, 0, 'o', },
192 { "verbose", 0, 0, 'v', },
193 { "config", 1, 0, 'f' },
194 { "version", 0, 0, 'V' },
195 { "working-directory", 1, 0, 'w', },
196 { "group-output-by-session", 0, 0, 's', },
197 { "group-output-by-host", 0, 0, 'p', },
198 { "disallow-clear", 0, 0, 'x' },
199 { NULL, 0, 0, 0, },
200};
201
202static const char *config_ignore_options[] = { "help", "config", "version" };
203
204static void print_version(void) {
205 fprintf(stdout, "%s\n", VERSION);
206}
207
208static void relayd_config_log(void)
209{
210 DBG("LTTng-relayd " VERSION " - " VERSION_NAME "%s%s",
211 GIT_VERSION[0] == '\0' ? "" : " - " GIT_VERSION,
212 EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " EXTRA_VERSION_NAME);
213 if (EXTRA_VERSION_DESCRIPTION[0] != '\0') {
214 DBG("LTTng-relayd extra version description:\n\t" EXTRA_VERSION_DESCRIPTION "\n");
215 }
216 if (EXTRA_VERSION_PATCHES[0] != '\0') {
217 DBG("LTTng-relayd extra patches:\n\t" EXTRA_VERSION_PATCHES "\n");
218 }
219}
220
221/*
222 * Take an option from the getopt output and set it in the right variable to be
223 * used later.
224 *
225 * Return 0 on success else a negative value.
226 */
227static int set_option(int opt, const char *arg, const char *optname)
228{
229 int ret;
230
231 switch (opt) {
232 case 0:
233 if (!strcmp(optname, "fd-cap")) {
234 unsigned long v;
235
236 errno = 0;
237 v = strtoul(arg, NULL, 0);
238 if (errno != 0 || !isdigit(arg[0])) {
239 ERR("Wrong value in --fd-cap parameter: %s",
240 arg);
241 ret = -1;
242 goto end;
243 }
244 if (v < DEFAULT_RELAYD_MINIMAL_FD_CAP) {
245 ERR("File descriptor cap must be set to at least %d",
246 DEFAULT_RELAYD_MINIMAL_FD_CAP);
247 }
248 if (v >= UINT_MAX) {
249 ERR("File descriptor cap overflow in --fd-cap parameter: %s",
250 arg);
251 ret = -1;
252 goto end;
253 }
254 lttng_opt_fd_cap = (unsigned int) v;
255 DBG3("File descriptor cap set to %u", lttng_opt_fd_cap);
256 } else {
257 fprintf(stderr, "unknown option %s", optname);
258 if (arg) {
259 fprintf(stderr, " with arg %s\n", arg);
260 }
261 }
262 break;
263 case 'C':
264 if (lttng_is_setuid_setgid()) {
265 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
266 "-C, --control-port");
267 } else {
268 ret = uri_parse(arg, &control_uri);
269 if (ret < 0) {
270 ERR("Invalid control URI specified");
271 goto end;
272 }
273 if (control_uri->port == 0) {
274 control_uri->port = DEFAULT_NETWORK_CONTROL_PORT;
275 }
276 }
277 break;
278 case 'D':
279 if (lttng_is_setuid_setgid()) {
280 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
281 "-D, -data-port");
282 } else {
283 ret = uri_parse(arg, &data_uri);
284 if (ret < 0) {
285 ERR("Invalid data URI specified");
286 goto end;
287 }
288 if (data_uri->port == 0) {
289 data_uri->port = DEFAULT_NETWORK_DATA_PORT;
290 }
291 }
292 break;
293 case 'L':
294 if (lttng_is_setuid_setgid()) {
295 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
296 "-L, -live-port");
297 } else {
298 ret = uri_parse(arg, &live_uri);
299 if (ret < 0) {
300 ERR("Invalid live URI specified");
301 goto end;
302 }
303 if (live_uri->port == 0) {
304 live_uri->port = DEFAULT_NETWORK_VIEWER_PORT;
305 }
306 }
307 break;
308 case 'd':
309 opt_daemon = 1;
310 break;
311 case 'b':
312 opt_background = 1;
313 break;
314 case 'g':
315 if (lttng_is_setuid_setgid()) {
316 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
317 "-g, --group");
318 } else {
319 tracing_group_name = strdup(arg);
320 if (tracing_group_name == NULL) {
321 ret = -errno;
322 PERROR("strdup");
323 goto end;
324 }
325 tracing_group_name_override = 1;
326 }
327 break;
328 case 'h':
329 ret = utils_show_help(8, "lttng-relayd", help_msg);
330 if (ret) {
331 ERR("Cannot show --help for `lttng-relayd`");
332 perror("exec");
333 }
334 exit(EXIT_FAILURE);
335 case 'V':
336 opt_print_version = 1;
337 break;
338 case 'o':
339 if (lttng_is_setuid_setgid()) {
340 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
341 "-o, --output");
342 } else {
343 ret = asprintf(&opt_output_path, "%s", arg);
344 if (ret < 0) {
345 ret = -errno;
346 PERROR("asprintf opt_output_path");
347 goto end;
348 }
349 }
350 break;
351 case 'w':
352 if (lttng_is_setuid_setgid()) {
353 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
354 "-w, --working-directory");
355 } else {
356 ret = asprintf(&opt_working_directory, "%s", arg);
357 if (ret < 0) {
358 ret = -errno;
359 PERROR("asprintf opt_working_directory");
360 goto end;
361 }
362 }
363 break;
364
365 case 'v':
366 /* Verbose level can increase using multiple -v */
367 if (arg) {
368 lttng_opt_verbose = config_parse_value(arg);
369 } else {
370 /* Only 3 level of verbosity (-vvv). */
371 if (lttng_opt_verbose < 3) {
372 lttng_opt_verbose += 1;
373 }
374 }
375 break;
376 case 's':
377 if (opt_group_output_by != RELAYD_GROUP_OUTPUT_BY_UNKNOWN) {
378 ERR("Cannot set --group-output-by-session, another --group-output-by argument is present");
379 exit(EXIT_FAILURE);
380 }
381 opt_group_output_by = RELAYD_GROUP_OUTPUT_BY_SESSION;
382 break;
383 case 'p':
384 if (opt_group_output_by != RELAYD_GROUP_OUTPUT_BY_UNKNOWN) {
385 ERR("Cannot set --group-output-by-host, another --group-output-by argument is present");
386 exit(EXIT_FAILURE);
387 }
388 opt_group_output_by = RELAYD_GROUP_OUTPUT_BY_HOST;
389 break;
390 case 'x':
391 /* Disallow clear */
392 opt_allow_clear = 0;
393 break;
394 default:
395 /* Unknown option or other error.
396 * Error is printed by getopt, just return */
397 ret = -1;
398 goto end;
399 }
400
401 /* All good. */
402 ret = 0;
403
404end:
405 return ret;
406}
407
408/*
409 * config_entry_handler_cb used to handle options read from a config file.
410 * See config_entry_handler_cb comment in common/config/session-config.h for the
411 * return value conventions.
412 */
413static int config_entry_handler(const struct config_entry *entry, void *unused)
414{
415 int ret = 0, i;
416
417 if (!entry || !entry->name || !entry->value) {
418 ret = -EINVAL;
419 goto end;
420 }
421
422 /* Check if the option is to be ignored */
423 for (i = 0; i < sizeof(config_ignore_options) / sizeof(char *); i++) {
424 if (!strcmp(entry->name, config_ignore_options[i])) {
425 goto end;
426 }
427 }
428
429 for (i = 0; i < (sizeof(long_options) / sizeof(struct option)) - 1; i++) {
430 /* Ignore if entry name is not fully matched. */
431 if (strcmp(entry->name, long_options[i].name)) {
432 continue;
433 }
434
435 /*
436 * If the option takes no argument on the command line,
437 * we have to check if the value is "true". We support
438 * non-zero numeric values, true, on and yes.
439 */
440 if (!long_options[i].has_arg) {
441 ret = config_parse_value(entry->value);
442 if (ret <= 0) {
443 if (ret) {
444 WARN("Invalid configuration value \"%s\" for option %s",
445 entry->value, entry->name);
446 }
447 /* False, skip boolean config option. */
448 goto end;
449 }
450 }
451
452 ret = set_option(long_options[i].val, entry->value, entry->name);
453 goto end;
454 }
455
456 WARN("Unrecognized option \"%s\" in daemon configuration file.",
457 entry->name);
458
459end:
460 return ret;
461}
462
463static int parse_env_options(void)
464{
465 int ret = 0;
466 char *value = NULL;
467
468 value = lttng_secure_getenv(DEFAULT_LTTNG_RELAYD_WORKING_DIRECTORY_ENV);
469 if (value) {
470 opt_working_directory = strdup(value);
471 if (!opt_working_directory) {
472 ERR("Failed to allocate working directory string (\"%s\")",
473 value);
474 ret = -1;
475 }
476 }
477 return ret;
478}
479
480static int set_options(int argc, char **argv)
481{
482 int c, ret = 0, option_index = 0, retval = 0;
483 int orig_optopt = optopt, orig_optind = optind;
484 char *default_address, *optstring;
485 const char *config_path = NULL;
486
487 optstring = utils_generate_optstring(long_options,
488 sizeof(long_options) / sizeof(struct option));
489 if (!optstring) {
490 retval = -ENOMEM;
491 goto exit;
492 }
493
494 /* Check for the --config option */
495
496 while ((c = getopt_long(argc, argv, optstring, long_options,
497 &option_index)) != -1) {
498 if (c == '?') {
499 retval = -EINVAL;
500 goto exit;
501 } else if (c != 'f') {
502 continue;
503 }
504
505 if (lttng_is_setuid_setgid()) {
506 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
507 "-f, --config");
508 } else {
509 config_path = utils_expand_path(optarg);
510 if (!config_path) {
511 ERR("Failed to resolve path: %s", optarg);
512 }
513 }
514 }
515
516 ret = config_get_section_entries(config_path, config_section_name,
517 config_entry_handler, NULL);
518 if (ret) {
519 if (ret > 0) {
520 ERR("Invalid configuration option at line %i", ret);
521 }
522 retval = -1;
523 goto exit;
524 }
525
526 /* Reset getopt's global state */
527 optopt = orig_optopt;
528 optind = orig_optind;
529 while (1) {
530 c = getopt_long(argc, argv, optstring, long_options, &option_index);
531 if (c == -1) {
532 break;
533 }
534
535 ret = set_option(c, optarg, long_options[option_index].name);
536 if (ret < 0) {
537 retval = -1;
538 goto exit;
539 }
540 }
541
542 /* assign default values */
543 if (control_uri == NULL) {
544 ret = asprintf(&default_address,
545 "tcp://" DEFAULT_NETWORK_CONTROL_BIND_ADDRESS ":%d",
546 DEFAULT_NETWORK_CONTROL_PORT);
547 if (ret < 0) {
548 PERROR("asprintf default data address");
549 retval = -1;
550 goto exit;
551 }
552
553 ret = uri_parse(default_address, &control_uri);
554 free(default_address);
555 if (ret < 0) {
556 ERR("Invalid control URI specified");
557 retval = -1;
558 goto exit;
559 }
560 }
561 if (data_uri == NULL) {
562 ret = asprintf(&default_address,
563 "tcp://" DEFAULT_NETWORK_DATA_BIND_ADDRESS ":%d",
564 DEFAULT_NETWORK_DATA_PORT);
565 if (ret < 0) {
566 PERROR("asprintf default data address");
567 retval = -1;
568 goto exit;
569 }
570
571 ret = uri_parse(default_address, &data_uri);
572 free(default_address);
573 if (ret < 0) {
574 ERR("Invalid data URI specified");
575 retval = -1;
576 goto exit;
577 }
578 }
579 if (live_uri == NULL) {
580 ret = asprintf(&default_address,
581 "tcp://" DEFAULT_NETWORK_VIEWER_BIND_ADDRESS ":%d",
582 DEFAULT_NETWORK_VIEWER_PORT);
583 if (ret < 0) {
584 PERROR("asprintf default viewer control address");
585 retval = -1;
586 goto exit;
587 }
588
589 ret = uri_parse(default_address, &live_uri);
590 free(default_address);
591 if (ret < 0) {
592 ERR("Invalid viewer control URI specified");
593 retval = -1;
594 goto exit;
595 }
596 }
597 if (lttng_opt_fd_cap == 0) {
598 int ret;
599 struct rlimit rlimit;
600
601 ret = getrlimit(RLIMIT_NOFILE, &rlimit);
602 if (ret) {
603 PERROR("Failed to get file descriptor limit");
604 retval = -1;
605 }
606
607 lttng_opt_fd_cap = rlimit.rlim_cur;
608 }
609
610 if (opt_group_output_by == RELAYD_GROUP_OUTPUT_BY_UNKNOWN) {
611 opt_group_output_by = RELAYD_GROUP_OUTPUT_BY_HOST;
612 }
613 if (opt_allow_clear) {
614 /* Check if env variable exists. */
615 const char *value = lttng_secure_getenv(DEFAULT_LTTNG_RELAYD_DISALLOW_CLEAR_ENV);
616 if (value) {
617 ret = config_parse_value(value);
618 if (ret < 0) {
619 ERR("Invalid value for %s specified", DEFAULT_LTTNG_RELAYD_DISALLOW_CLEAR_ENV);
620 retval = -1;
621 goto exit;
622 }
623 opt_allow_clear = !ret;
624 }
625 }
626
627exit:
628 free(optstring);
629 return retval;
630}
631
632static void print_global_objects(void)
633{
634 rcu_register_thread();
635
636 print_viewer_streams();
637 print_relay_streams();
638 print_sessions();
639
640 rcu_unregister_thread();
641}
642
643/*
644 * Cleanup the daemon
645 */
646static void relayd_cleanup(void)
647{
648 print_global_objects();
649
650 DBG("Cleaning up");
651
652 if (viewer_streams_ht)
653 lttng_ht_destroy(viewer_streams_ht);
654 if (relay_streams_ht)
655 lttng_ht_destroy(relay_streams_ht);
656 if (sessions_ht)
657 lttng_ht_destroy(sessions_ht);
658
659 free(opt_output_path);
660 free(opt_working_directory);
661
662 /* Close thread quit pipes */
663 utils_close_pipe(thread_quit_pipe);
664
665 uri_free(control_uri);
666 uri_free(data_uri);
667 /* Live URI is freed in the live thread. */
668
669 if (tracing_group_name_override) {
670 free((void *) tracing_group_name);
671 }
672}
673
674/*
675 * Write to writable pipe used to notify a thread.
676 */
677static int notify_thread_pipe(int wpipe)
678{
679 ssize_t ret;
680
681 ret = lttng_write(wpipe, "!", 1);
682 if (ret < 1) {
683 PERROR("write poll pipe");
684 goto end;
685 }
686 ret = 0;
687end:
688 return ret;
689}
690
691static int notify_health_quit_pipe(int *pipe)
692{
693 ssize_t ret;
694
695 ret = lttng_write(pipe[1], "4", 1);
696 if (ret < 1) {
697 PERROR("write relay health quit");
698 goto end;
699 }
700 ret = 0;
701end:
702 return ret;
703}
704
705/*
706 * Stop all relayd and relayd-live threads.
707 */
708int lttng_relay_stop_threads(void)
709{
710 int retval = 0;
711
712 /* Stopping all threads */
713 DBG("Terminating all threads");
714 if (notify_thread_pipe(thread_quit_pipe[1])) {
715 ERR("write error on thread quit pipe");
716 retval = -1;
717 }
718
719 if (notify_health_quit_pipe(health_quit_pipe)) {
720 ERR("write error on health quit pipe");
721 }
722
723 /* Dispatch thread */
724 CMM_STORE_SHARED(dispatch_thread_exit, 1);
725 futex_nto1_wake(&relay_conn_queue.futex);
726
727 if (relayd_live_stop()) {
728 ERR("Error stopping live threads");
729 retval = -1;
730 }
731 return retval;
732}
733
734/*
735 * Signal handler for the daemon
736 *
737 * Simply stop all worker threads, leaving main() return gracefully after
738 * joining all threads and calling cleanup().
739 */
740static void sighandler(int sig)
741{
742 switch (sig) {
743 case SIGINT:
744 DBG("SIGINT caught");
745 if (lttng_relay_stop_threads()) {
746 ERR("Error stopping threads");
747 }
748 break;
749 case SIGTERM:
750 DBG("SIGTERM caught");
751 if (lttng_relay_stop_threads()) {
752 ERR("Error stopping threads");
753 }
754 break;
755 case SIGUSR1:
756 CMM_STORE_SHARED(recv_child_signal, 1);
757 break;
758 default:
759 break;
760 }
761}
762
763/*
764 * Setup signal handler for :
765 * SIGINT, SIGTERM, SIGPIPE
766 */
767static int set_signal_handler(void)
768{
769 int ret = 0;
770 struct sigaction sa;
771 sigset_t sigset;
772
773 if ((ret = sigemptyset(&sigset)) < 0) {
774 PERROR("sigemptyset");
775 return ret;
776 }
777
778 sa.sa_mask = sigset;
779 sa.sa_flags = 0;
780
781 sa.sa_handler = sighandler;
782 if ((ret = sigaction(SIGTERM, &sa, NULL)) < 0) {
783 PERROR("sigaction");
784 return ret;
785 }
786
787 if ((ret = sigaction(SIGINT, &sa, NULL)) < 0) {
788 PERROR("sigaction");
789 return ret;
790 }
791
792 if ((ret = sigaction(SIGUSR1, &sa, NULL)) < 0) {
793 PERROR("sigaction");
794 return ret;
795 }
796
797 sa.sa_handler = SIG_IGN;
798 if ((ret = sigaction(SIGPIPE, &sa, NULL)) < 0) {
799 PERROR("sigaction");
800 return ret;
801 }
802
803 DBG("Signal handler set for SIGTERM, SIGUSR1, SIGPIPE and SIGINT");
804
805 return ret;
806}
807
808void lttng_relay_notify_ready(void)
809{
810 /* Notify the parent of the fork() process that we are ready. */
811 if (opt_daemon || opt_background) {
812 if (uatomic_sub_return(&lttng_relay_ready, 1) == 0) {
813 kill(child_ppid, SIGUSR1);
814 }
815 }
816}
817
818/*
819 * Init thread quit pipe.
820 *
821 * Return -1 on error or 0 if all pipes are created.
822 */
823static int init_thread_quit_pipe(void)
824{
825 int ret;
826
827 ret = utils_create_pipe_cloexec(thread_quit_pipe);
828
829 return ret;
830}
831
832/*
833 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
834 */
835static int create_thread_poll_set(struct lttng_poll_event *events, int size)
836{
837 int ret;
838
839 if (events == NULL || size == 0) {
840 ret = -1;
841 goto error;
842 }
843
844 ret = lttng_poll_create(events, size, LTTNG_CLOEXEC);
845 if (ret < 0) {
846 goto error;
847 }
848
849 /* Add quit pipe */
850 ret = lttng_poll_add(events, thread_quit_pipe[0], LPOLLIN | LPOLLERR);
851 if (ret < 0) {
852 goto error;
853 }
854
855 return 0;
856
857error:
858 return ret;
859}
860
861/*
862 * Check if the thread quit pipe was triggered.
863 *
864 * Return 1 if it was triggered else 0;
865 */
866static int check_thread_quit_pipe(int fd, uint32_t events)
867{
868 if (fd == thread_quit_pipe[0] && (events & LPOLLIN)) {
869 return 1;
870 }
871
872 return 0;
873}
874
875/*
876 * Create and init socket from uri.
877 */
878static struct lttcomm_sock *relay_socket_create(struct lttng_uri *uri)
879{
880 int ret;
881 struct lttcomm_sock *sock = NULL;
882
883 sock = lttcomm_alloc_sock_from_uri(uri);
884 if (sock == NULL) {
885 ERR("Allocating socket");
886 goto error;
887 }
888
889 ret = lttcomm_create_sock(sock);
890 if (ret < 0) {
891 goto error;
892 }
893 DBG("Listening on sock %d", sock->fd);
894
895 ret = sock->ops->bind(sock);
896 if (ret < 0) {
897 PERROR("Failed to bind socket");
898 goto error;
899 }
900
901 ret = sock->ops->listen(sock, -1);
902 if (ret < 0) {
903 goto error;
904
905 }
906
907 return sock;
908
909error:
910 if (sock) {
911 lttcomm_destroy_sock(sock);
912 }
913 return NULL;
914}
915
916/*
917 * This thread manages the listening for new connections on the network
918 */
919static void *relay_thread_listener(void *data)
920{
921 int i, ret, pollfd, err = -1;
922 uint32_t revents, nb_fd;
923 struct lttng_poll_event events;
924 struct lttcomm_sock *control_sock, *data_sock;
925
926 DBG("[thread] Relay listener started");
927
928 health_register(health_relayd, HEALTH_RELAYD_TYPE_LISTENER);
929
930 health_code_update();
931
932 control_sock = relay_socket_create(control_uri);
933 if (!control_sock) {
934 goto error_sock_control;
935 }
936
937 data_sock = relay_socket_create(data_uri);
938 if (!data_sock) {
939 goto error_sock_relay;
940 }
941
942 /*
943 * Pass 3 as size here for the thread quit pipe, control and
944 * data socket.
945 */
946 ret = create_thread_poll_set(&events, 3);
947 if (ret < 0) {
948 goto error_create_poll;
949 }
950
951 /* Add the control socket */
952 ret = lttng_poll_add(&events, control_sock->fd, LPOLLIN | LPOLLRDHUP);
953 if (ret < 0) {
954 goto error_poll_add;
955 }
956
957 /* Add the data socket */
958 ret = lttng_poll_add(&events, data_sock->fd, LPOLLIN | LPOLLRDHUP);
959 if (ret < 0) {
960 goto error_poll_add;
961 }
962
963 lttng_relay_notify_ready();
964
965 if (testpoint(relayd_thread_listener)) {
966 goto error_testpoint;
967 }
968
969 while (1) {
970 health_code_update();
971
972 DBG("Listener accepting connections");
973
974restart:
975 health_poll_entry();
976 ret = lttng_poll_wait(&events, -1);
977 health_poll_exit();
978 if (ret < 0) {
979 /*
980 * Restart interrupted system call.
981 */
982 if (errno == EINTR) {
983 goto restart;
984 }
985 goto error;
986 }
987
988 nb_fd = ret;
989
990 DBG("Relay new connection received");
991 for (i = 0; i < nb_fd; i++) {
992 health_code_update();
993
994 /* Fetch once the poll data */
995 revents = LTTNG_POLL_GETEV(&events, i);
996 pollfd = LTTNG_POLL_GETFD(&events, i);
997
998 /* Thread quit pipe has been closed. Killing thread. */
999 ret = check_thread_quit_pipe(pollfd, revents);
1000 if (ret) {
1001 err = 0;
1002 goto exit;
1003 }
1004
1005 if (revents & LPOLLIN) {
1006 /*
1007 * A new connection is requested, therefore a
1008 * sessiond/consumerd connection is allocated in
1009 * this thread, enqueued to a global queue and
1010 * dequeued (and freed) in the worker thread.
1011 */
1012 int val = 1;
1013 struct relay_connection *new_conn;
1014 struct lttcomm_sock *newsock;
1015 enum connection_type type;
1016
1017 if (pollfd == data_sock->fd) {
1018 type = RELAY_DATA;
1019 newsock = data_sock->ops->accept(data_sock);
1020 DBG("Relay data connection accepted, socket %d",
1021 newsock->fd);
1022 } else {
1023 assert(pollfd == control_sock->fd);
1024 type = RELAY_CONTROL;
1025 newsock = control_sock->ops->accept(control_sock);
1026 DBG("Relay control connection accepted, socket %d",
1027 newsock->fd);
1028 }
1029 if (!newsock) {
1030 PERROR("accepting sock");
1031 goto error;
1032 }
1033
1034 ret = setsockopt(newsock->fd, SOL_SOCKET, SO_REUSEADDR, &val,
1035 sizeof(val));
1036 if (ret < 0) {
1037 PERROR("setsockopt inet");
1038 lttcomm_destroy_sock(newsock);
1039 goto error;
1040 }
1041
1042 ret = socket_apply_keep_alive_config(newsock->fd);
1043 if (ret < 0) {
1044 ERR("Failed to apply TCP keep-alive configuration on socket (%i)",
1045 newsock->fd);
1046 lttcomm_destroy_sock(newsock);
1047 goto error;
1048 }
1049
1050 new_conn = connection_create(newsock, type);
1051 if (!new_conn) {
1052 lttcomm_destroy_sock(newsock);
1053 goto error;
1054 }
1055
1056 /* Enqueue request for the dispatcher thread. */
1057 cds_wfcq_enqueue(&relay_conn_queue.head, &relay_conn_queue.tail,
1058 &new_conn->qnode);
1059
1060 /*
1061 * Wake the dispatch queue futex.
1062 * Implicit memory barrier with the
1063 * exchange in cds_wfcq_enqueue.
1064 */
1065 futex_nto1_wake(&relay_conn_queue.futex);
1066 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1067 ERR("socket poll error");
1068 goto error;
1069 } else {
1070 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
1071 goto error;
1072 }
1073 }
1074 }
1075
1076exit:
1077error:
1078error_poll_add:
1079error_testpoint:
1080 lttng_poll_clean(&events);
1081error_create_poll:
1082 if (data_sock->fd >= 0) {
1083 ret = data_sock->ops->close(data_sock);
1084 if (ret) {
1085 PERROR("close");
1086 }
1087 }
1088 lttcomm_destroy_sock(data_sock);
1089error_sock_relay:
1090 if (control_sock->fd >= 0) {
1091 ret = control_sock->ops->close(control_sock);
1092 if (ret) {
1093 PERROR("close");
1094 }
1095 }
1096 lttcomm_destroy_sock(control_sock);
1097error_sock_control:
1098 if (err) {
1099 health_error();
1100 ERR("Health error occurred in %s", __func__);
1101 }
1102 health_unregister(health_relayd);
1103 DBG("Relay listener thread cleanup complete");
1104 lttng_relay_stop_threads();
1105 return NULL;
1106}
1107
1108/*
1109 * This thread manages the dispatching of the requests to worker threads
1110 */
1111static void *relay_thread_dispatcher(void *data)
1112{
1113 int err = -1;
1114 ssize_t ret;
1115 struct cds_wfcq_node *node;
1116 struct relay_connection *new_conn = NULL;
1117
1118 DBG("[thread] Relay dispatcher started");
1119
1120 health_register(health_relayd, HEALTH_RELAYD_TYPE_DISPATCHER);
1121
1122 if (testpoint(relayd_thread_dispatcher)) {
1123 goto error_testpoint;
1124 }
1125
1126 health_code_update();
1127
1128 for (;;) {
1129 health_code_update();
1130
1131 /* Atomically prepare the queue futex */
1132 futex_nto1_prepare(&relay_conn_queue.futex);
1133
1134 if (CMM_LOAD_SHARED(dispatch_thread_exit)) {
1135 break;
1136 }
1137
1138 do {
1139 health_code_update();
1140
1141 /* Dequeue commands */
1142 node = cds_wfcq_dequeue_blocking(&relay_conn_queue.head,
1143 &relay_conn_queue.tail);
1144 if (node == NULL) {
1145 DBG("Woken up but nothing in the relay command queue");
1146 /* Continue thread execution */
1147 break;
1148 }
1149 new_conn = caa_container_of(node, struct relay_connection, qnode);
1150
1151 DBG("Dispatching request waiting on sock %d", new_conn->sock->fd);
1152
1153 /*
1154 * Inform worker thread of the new request. This
1155 * call is blocking so we can be assured that
1156 * the data will be read at some point in time
1157 * or wait to the end of the world :)
1158 */
1159 ret = lttng_write(relay_conn_pipe[1], &new_conn, sizeof(new_conn));
1160 if (ret < 0) {
1161 PERROR("write connection pipe");
1162 connection_put(new_conn);
1163 goto error;
1164 }
1165 } while (node != NULL);
1166
1167 /* Futex wait on queue. Blocking call on futex() */
1168 health_poll_entry();
1169 futex_nto1_wait(&relay_conn_queue.futex);
1170 health_poll_exit();
1171 }
1172
1173 /* Normal exit, no error */
1174 err = 0;
1175
1176error:
1177error_testpoint:
1178 if (err) {
1179 health_error();
1180 ERR("Health error occurred in %s", __func__);
1181 }
1182 health_unregister(health_relayd);
1183 DBG("Dispatch thread dying");
1184 lttng_relay_stop_threads();
1185 return NULL;
1186}
1187
1188static bool session_streams_have_index(const struct relay_session *session)
1189{
1190 return session->minor >= 4 && !session->snapshot;
1191}
1192
1193/*
1194 * Handle the RELAYD_CREATE_SESSION command.
1195 *
1196 * On success, send back the session id or else return a negative value.
1197 */
1198static int relay_create_session(const struct lttcomm_relayd_hdr *recv_hdr,
1199 struct relay_connection *conn,
1200 const struct lttng_buffer_view *payload)
1201{
1202 int ret = 0;
1203 ssize_t send_ret;
1204 struct relay_session *session = NULL;
1205 struct lttcomm_relayd_create_session_reply_2_11 reply = {};
1206 char session_name[LTTNG_NAME_MAX] = {};
1207 char hostname[LTTNG_HOST_NAME_MAX] = {};
1208 uint32_t live_timer = 0;
1209 bool snapshot = false;
1210 bool session_name_contains_creation_timestamp = false;
1211 /* Left nil for peers < 2.11. */
1212 char base_path[LTTNG_PATH_MAX] = {};
1213 lttng_uuid sessiond_uuid = {};
1214 LTTNG_OPTIONAL(uint64_t) id_sessiond = {};
1215 LTTNG_OPTIONAL(uint64_t) current_chunk_id = {};
1216 LTTNG_OPTIONAL(time_t) creation_time = {};
1217 struct lttng_dynamic_buffer reply_payload;
1218
1219 lttng_dynamic_buffer_init(&reply_payload);
1220
1221 if (conn->minor < 4) {
1222 /* From 2.1 to 2.3 */
1223 ret = 0;
1224 } else if (conn->minor >= 4 && conn->minor < 11) {
1225 /* From 2.4 to 2.10 */
1226 ret = cmd_create_session_2_4(payload, session_name,
1227 hostname, &live_timer, &snapshot);
1228 } else {
1229 bool has_current_chunk;
1230 uint64_t current_chunk_id_value;
1231 time_t creation_time_value;
1232 uint64_t id_sessiond_value;
1233
1234 /* From 2.11 to ... */
1235 ret = cmd_create_session_2_11(payload, session_name, hostname,
1236 base_path, &live_timer, &snapshot, &id_sessiond_value,
1237 sessiond_uuid, &has_current_chunk,
1238 &current_chunk_id_value, &creation_time_value,
1239 &session_name_contains_creation_timestamp);
1240 if (lttng_uuid_is_nil(sessiond_uuid)) {
1241 /* The nil UUID is reserved for pre-2.11 clients. */
1242 ERR("Illegal nil UUID announced by peer in create session command");
1243 ret = -1;
1244 goto send_reply;
1245 }
1246 LTTNG_OPTIONAL_SET(&id_sessiond, id_sessiond_value);
1247 LTTNG_OPTIONAL_SET(&creation_time, creation_time_value);
1248 if (has_current_chunk) {
1249 LTTNG_OPTIONAL_SET(&current_chunk_id,
1250 current_chunk_id_value);
1251 }
1252 }
1253
1254 if (ret < 0) {
1255 goto send_reply;
1256 }
1257
1258 session = session_create(session_name, hostname, base_path, live_timer,
1259 snapshot, sessiond_uuid,
1260 id_sessiond.is_set ? &id_sessiond.value : NULL,
1261 current_chunk_id.is_set ? &current_chunk_id.value : NULL,
1262 creation_time.is_set ? &creation_time.value : NULL,
1263 conn->major, conn->minor,
1264 session_name_contains_creation_timestamp);
1265 if (!session) {
1266 ret = -1;
1267 goto send_reply;
1268 }
1269 assert(!conn->session);
1270 conn->session = session;
1271 DBG("Created session %" PRIu64, session->id);
1272
1273 reply.generic.session_id = htobe64(session->id);
1274
1275send_reply:
1276 if (ret < 0) {
1277 reply.generic.ret_code = htobe32(LTTNG_ERR_FATAL);
1278 } else {
1279 reply.generic.ret_code = htobe32(LTTNG_OK);
1280 }
1281
1282 if (conn->minor < 11) {
1283 /* From 2.1 to 2.10 */
1284 ret = lttng_dynamic_buffer_append(&reply_payload,
1285 &reply.generic, sizeof(reply.generic));
1286 if (ret) {
1287 ERR("Failed to append \"create session\" command reply header to payload buffer");
1288 ret = -1;
1289 goto end;
1290 }
1291 } else {
1292 const uint32_t output_path_length =
1293 session ? strlen(session->output_path) + 1 : 0;
1294
1295 reply.output_path_length = htobe32(output_path_length);
1296 ret = lttng_dynamic_buffer_append(
1297 &reply_payload, &reply, sizeof(reply));
1298 if (ret) {
1299 ERR("Failed to append \"create session\" command reply header to payload buffer");
1300 goto end;
1301 }
1302
1303 if (output_path_length) {
1304 ret = lttng_dynamic_buffer_append(&reply_payload,
1305 session->output_path,
1306 output_path_length);
1307 if (ret) {
1308 ERR("Failed to append \"create session\" command reply path to payload buffer");
1309 goto end;
1310 }
1311 }
1312 }
1313
1314 send_ret = conn->sock->ops->sendmsg(conn->sock, reply_payload.data,
1315 reply_payload.size, 0);
1316 if (send_ret < (ssize_t) reply_payload.size) {
1317 ERR("Failed to send \"create session\" command reply of %zu bytes (ret = %zd)",
1318 reply_payload.size, send_ret);
1319 ret = -1;
1320 }
1321end:
1322 if (ret < 0 && session) {
1323 session_put(session);
1324 }
1325 lttng_dynamic_buffer_reset(&reply_payload);
1326 return ret;
1327}
1328
1329/*
1330 * When we have received all the streams and the metadata for a channel,
1331 * we make them visible to the viewer threads.
1332 */
1333static void publish_connection_local_streams(struct relay_connection *conn)
1334{
1335 struct relay_stream *stream;
1336 struct relay_session *session = conn->session;
1337
1338 /*
1339 * We publish all streams belonging to a session atomically wrt
1340 * session lock.
1341 */
1342 pthread_mutex_lock(&session->lock);
1343 rcu_read_lock();
1344 cds_list_for_each_entry_rcu(stream, &session->recv_list,
1345 recv_node) {
1346 stream_publish(stream);
1347 }
1348 rcu_read_unlock();
1349
1350 /*
1351 * Inform the viewer that there are new streams in the session.
1352 */
1353 if (session->viewer_attached) {
1354 uatomic_set(&session->new_streams, 1);
1355 }
1356 pthread_mutex_unlock(&session->lock);
1357}
1358
1359static int conform_channel_path(char *channel_path)
1360{
1361 int ret = 0;
1362
1363 if (strstr("../", channel_path)) {
1364 ERR("Refusing channel path as it walks up the path hierarchy: \"%s\"",
1365 channel_path);
1366 ret = -1;
1367 goto end;
1368 }
1369
1370 if (*channel_path == '/') {
1371 const size_t len = strlen(channel_path);
1372
1373 /*
1374 * Channel paths from peers prior to 2.11 are expressed as an
1375 * absolute path that is, in reality, relative to the relay
1376 * daemon's output directory. Remove the leading slash so it
1377 * is correctly interpreted as a relative path later on.
1378 *
1379 * len (and not len - 1) is used to copy the trailing NULL.
1380 */
1381 bcopy(channel_path + 1, channel_path, len);
1382 }
1383end:
1384 return ret;
1385}
1386
1387/*
1388 * relay_add_stream: allocate a new stream for a session
1389 */
1390static int relay_add_stream(const struct lttcomm_relayd_hdr *recv_hdr,
1391 struct relay_connection *conn,
1392 const struct lttng_buffer_view *payload)
1393{
1394 int ret;
1395 ssize_t send_ret;
1396 struct relay_session *session = conn->session;
1397 struct relay_stream *stream = NULL;
1398 struct lttcomm_relayd_status_stream reply;
1399 struct ctf_trace *trace = NULL;
1400 uint64_t stream_handle = -1ULL;
1401 char *path_name = NULL, *channel_name = NULL;
1402 uint64_t tracefile_size = 0, tracefile_count = 0;
1403 LTTNG_OPTIONAL(uint64_t) stream_chunk_id = {};
1404
1405 if (!session || !conn->version_check_done) {
1406 ERR("Trying to add a stream before version check");
1407 ret = -1;
1408 goto end_no_session;
1409 }
1410
1411 if (session->minor == 1) {
1412 /* For 2.1 */
1413 ret = cmd_recv_stream_2_1(payload, &path_name,
1414 &channel_name);
1415 } else if (session->minor > 1 && session->minor < 11) {
1416 /* From 2.2 to 2.10 */
1417 ret = cmd_recv_stream_2_2(payload, &path_name,
1418 &channel_name, &tracefile_size, &tracefile_count);
1419 } else {
1420 /* From 2.11 to ... */
1421 ret = cmd_recv_stream_2_11(payload, &path_name,
1422 &channel_name, &tracefile_size, &tracefile_count,
1423 &stream_chunk_id.value);
1424 stream_chunk_id.is_set = true;
1425 }
1426
1427 if (ret < 0) {
1428 goto send_reply;
1429 }
1430
1431 if (conform_channel_path(path_name)) {
1432 goto send_reply;
1433 }
1434
1435 /*
1436 * Backward compatibility for --group-output-by-session.
1437 * Prior to lttng 2.11, the complete path is passed by the stream.
1438 * Starting at 2.11, lttng-relayd uses chunk. When dealing with producer
1439 * >=2.11 the chunk is responsible for the output path. When dealing
1440 * with producer < 2.11 the chunk output_path is the root output path
1441 * and the stream carries the complete path (path_name).
1442 * To support --group-output-by-session with older producer (<2.11), we
1443 * need to craft the path based on the stream path.
1444 */
1445 if (opt_group_output_by == RELAYD_GROUP_OUTPUT_BY_SESSION) {
1446 if (conn->minor < 4) {
1447 /*
1448 * From 2.1 to 2.3, the session_name is not passed on
1449 * the RELAYD_CREATE_SESSION command. The session name
1450 * is necessary to detect the presence of a base_path
1451 * inside the stream path. Without it we cannot perform
1452 * a valid group-output-by-session transformation.
1453 */
1454 WARN("Unable to perform a --group-by-session transformation for session %" PRIu64
1455 " for stream with path \"%s\" as it is produced by a peer using a protocol older than v2.4",
1456 session->id, path_name);
1457 } else if (conn->minor >= 4 && conn->minor < 11) {
1458 char *group_by_session_path_name;
1459
1460 assert(session->session_name[0] != '\0');
1461
1462 group_by_session_path_name =
1463 backward_compat_group_by_session(
1464 path_name,
1465 session->session_name);
1466 if (!group_by_session_path_name) {
1467 ERR("Failed to apply group by session to stream of session %" PRIu64,
1468 session->id);
1469 goto send_reply;
1470 }
1471
1472 DBG("Transformed session path from \"%s\" to \"%s\" to honor per-session name grouping",
1473 path_name, group_by_session_path_name);
1474
1475 free(path_name);
1476 path_name = group_by_session_path_name;
1477 }
1478 }
1479
1480 trace = ctf_trace_get_by_path_or_create(session, path_name);
1481 if (!trace) {
1482 goto send_reply;
1483 }
1484
1485 /* This stream here has one reference on the trace. */
1486 pthread_mutex_lock(&last_relay_stream_id_lock);
1487 stream_handle = ++last_relay_stream_id;
1488 pthread_mutex_unlock(&last_relay_stream_id_lock);
1489
1490 /* We pass ownership of path_name and channel_name. */
1491 stream = stream_create(trace, stream_handle, path_name,
1492 channel_name, tracefile_size, tracefile_count);
1493 path_name = NULL;
1494 channel_name = NULL;
1495
1496 /*
1497 * Streams are the owners of their trace. Reference to trace is
1498 * kept within stream_create().
1499 */
1500 ctf_trace_put(trace);
1501
1502send_reply:
1503 memset(&reply, 0, sizeof(reply));
1504 reply.handle = htobe64(stream_handle);
1505 if (!stream) {
1506 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1507 } else {
1508 reply.ret_code = htobe32(LTTNG_OK);
1509 }
1510
1511 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1512 sizeof(struct lttcomm_relayd_status_stream), 0);
1513 if (send_ret < (ssize_t) sizeof(reply)) {
1514 ERR("Failed to send \"add stream\" command reply (ret = %zd)",
1515 send_ret);
1516 ret = -1;
1517 }
1518
1519end_no_session:
1520 free(path_name);
1521 free(channel_name);
1522 return ret;
1523}
1524
1525/*
1526 * relay_close_stream: close a specific stream
1527 */
1528static int relay_close_stream(const struct lttcomm_relayd_hdr *recv_hdr,
1529 struct relay_connection *conn,
1530 const struct lttng_buffer_view *payload)
1531{
1532 int ret;
1533 ssize_t send_ret;
1534 struct relay_session *session = conn->session;
1535 struct lttcomm_relayd_close_stream stream_info;
1536 struct lttcomm_relayd_generic_reply reply;
1537 struct relay_stream *stream;
1538
1539 DBG("Close stream received");
1540
1541 if (!session || !conn->version_check_done) {
1542 ERR("Trying to close a stream before version check");
1543 ret = -1;
1544 goto end_no_session;
1545 }
1546
1547 if (payload->size < sizeof(stream_info)) {
1548 ERR("Unexpected payload size in \"relay_close_stream\": expected >= %zu bytes, got %zu bytes",
1549 sizeof(stream_info), payload->size);
1550 ret = -1;
1551 goto end_no_session;
1552 }
1553 memcpy(&stream_info, payload->data, sizeof(stream_info));
1554 stream_info.stream_id = be64toh(stream_info.stream_id);
1555 stream_info.last_net_seq_num = be64toh(stream_info.last_net_seq_num);
1556
1557 stream = stream_get_by_id(stream_info.stream_id);
1558 if (!stream) {
1559 ret = -1;
1560 goto end;
1561 }
1562
1563 /*
1564 * Set last_net_seq_num before the close flag. Required by data
1565 * pending check.
1566 */
1567 pthread_mutex_lock(&stream->lock);
1568 stream->last_net_seq_num = stream_info.last_net_seq_num;
1569 pthread_mutex_unlock(&stream->lock);
1570
1571 /*
1572 * This is one of the conditions which may trigger a stream close
1573 * with the others being:
1574 * 1) A close command is received for a stream
1575 * 2) The control connection owning the stream is closed
1576 * 3) We have received all of the stream's data _after_ a close
1577 * request.
1578 */
1579 try_stream_close(stream);
1580 if (stream->is_metadata) {
1581 struct relay_viewer_stream *vstream;
1582
1583 vstream = viewer_stream_get_by_id(stream->stream_handle);
1584 if (vstream) {
1585 if (stream->no_new_metadata_notified) {
1586 /*
1587 * Since all the metadata has been sent to the
1588 * viewer and that we have a request to close
1589 * its stream, we can safely teardown the
1590 * corresponding metadata viewer stream.
1591 */
1592 viewer_stream_put(vstream);
1593 }
1594 /* Put local reference. */
1595 viewer_stream_put(vstream);
1596 }
1597 }
1598 stream_put(stream);
1599 ret = 0;
1600
1601end:
1602 memset(&reply, 0, sizeof(reply));
1603 if (ret < 0) {
1604 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1605 } else {
1606 reply.ret_code = htobe32(LTTNG_OK);
1607 }
1608 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1609 sizeof(struct lttcomm_relayd_generic_reply), 0);
1610 if (send_ret < (ssize_t) sizeof(reply)) {
1611 ERR("Failed to send \"close stream\" command reply (ret = %zd)",
1612 send_ret);
1613 ret = -1;
1614 }
1615
1616end_no_session:
1617 return ret;
1618}
1619
1620/*
1621 * relay_reset_metadata: reset a metadata stream
1622 */
1623static
1624int relay_reset_metadata(const struct lttcomm_relayd_hdr *recv_hdr,
1625 struct relay_connection *conn,
1626 const struct lttng_buffer_view *payload)
1627{
1628 int ret;
1629 ssize_t send_ret;
1630 struct relay_session *session = conn->session;
1631 struct lttcomm_relayd_reset_metadata stream_info;
1632 struct lttcomm_relayd_generic_reply reply;
1633 struct relay_stream *stream;
1634
1635 DBG("Reset metadata received");
1636
1637 if (!session || !conn->version_check_done) {
1638 ERR("Trying to reset a metadata stream before version check");
1639 ret = -1;
1640 goto end_no_session;
1641 }
1642
1643 if (payload->size < sizeof(stream_info)) {
1644 ERR("Unexpected payload size in \"relay_reset_metadata\": expected >= %zu bytes, got %zu bytes",
1645 sizeof(stream_info), payload->size);
1646 ret = -1;
1647 goto end_no_session;
1648 }
1649 memcpy(&stream_info, payload->data, sizeof(stream_info));
1650 stream_info.stream_id = be64toh(stream_info.stream_id);
1651 stream_info.version = be64toh(stream_info.version);
1652
1653 DBG("Update metadata to version %" PRIu64, stream_info.version);
1654
1655 /* Unsupported for live sessions for now. */
1656 if (session->live_timer != 0) {
1657 ret = -1;
1658 goto end;
1659 }
1660
1661 stream = stream_get_by_id(stream_info.stream_id);
1662 if (!stream) {
1663 ret = -1;
1664 goto end;
1665 }
1666 pthread_mutex_lock(&stream->lock);
1667 if (!stream->is_metadata) {
1668 ret = -1;
1669 goto end_unlock;
1670 }
1671
1672 ret = stream_reset_file(stream);
1673 if (ret < 0) {
1674 ERR("Failed to reset metadata stream %" PRIu64
1675 ": stream_path = %s, channel = %s",
1676 stream->stream_handle, stream->path_name,
1677 stream->channel_name);
1678 goto end_unlock;
1679 }
1680end_unlock:
1681 pthread_mutex_unlock(&stream->lock);
1682 stream_put(stream);
1683
1684end:
1685 memset(&reply, 0, sizeof(reply));
1686 if (ret < 0) {
1687 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1688 } else {
1689 reply.ret_code = htobe32(LTTNG_OK);
1690 }
1691 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1692 sizeof(struct lttcomm_relayd_generic_reply), 0);
1693 if (send_ret < (ssize_t) sizeof(reply)) {
1694 ERR("Failed to send \"reset metadata\" command reply (ret = %zd)",
1695 send_ret);
1696 ret = -1;
1697 }
1698
1699end_no_session:
1700 return ret;
1701}
1702
1703/*
1704 * relay_unknown_command: send -1 if received unknown command
1705 */
1706static void relay_unknown_command(struct relay_connection *conn)
1707{
1708 struct lttcomm_relayd_generic_reply reply;
1709 ssize_t send_ret;
1710
1711 memset(&reply, 0, sizeof(reply));
1712 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1713 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
1714 if (send_ret < sizeof(reply)) {
1715 ERR("Failed to send \"unknown command\" command reply (ret = %zd)", send_ret);
1716 }
1717}
1718
1719/*
1720 * relay_start: send an acknowledgment to the client to tell if we are
1721 * ready to receive data. We are ready if a session is established.
1722 */
1723static int relay_start(const struct lttcomm_relayd_hdr *recv_hdr,
1724 struct relay_connection *conn,
1725 const struct lttng_buffer_view *payload)
1726{
1727 int ret = 0;
1728 ssize_t send_ret;
1729 struct lttcomm_relayd_generic_reply reply;
1730 struct relay_session *session = conn->session;
1731
1732 if (!session) {
1733 DBG("Trying to start the streaming without a session established");
1734 ret = htobe32(LTTNG_ERR_UNK);
1735 }
1736
1737 memset(&reply, 0, sizeof(reply));
1738 reply.ret_code = htobe32(LTTNG_OK);
1739 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1740 sizeof(reply), 0);
1741 if (send_ret < (ssize_t) sizeof(reply)) {
1742 ERR("Failed to send \"relay_start\" command reply (ret = %zd)",
1743 send_ret);
1744 ret = -1;
1745 }
1746
1747 return ret;
1748}
1749
1750/*
1751 * relay_recv_metadata: receive the metadata for the session.
1752 */
1753static int relay_recv_metadata(const struct lttcomm_relayd_hdr *recv_hdr,
1754 struct relay_connection *conn,
1755 const struct lttng_buffer_view *payload)
1756{
1757 int ret = 0;
1758 struct relay_session *session = conn->session;
1759 struct lttcomm_relayd_metadata_payload metadata_payload_header;
1760 struct relay_stream *metadata_stream;
1761 uint64_t metadata_payload_size;
1762 struct lttng_buffer_view packet_view;
1763
1764 if (!session) {
1765 ERR("Metadata sent before version check");
1766 ret = -1;
1767 goto end;
1768 }
1769
1770 if (recv_hdr->data_size < sizeof(struct lttcomm_relayd_metadata_payload)) {
1771 ERR("Incorrect data size");
1772 ret = -1;
1773 goto end;
1774 }
1775 metadata_payload_size = recv_hdr->data_size -
1776 sizeof(struct lttcomm_relayd_metadata_payload);
1777
1778 memcpy(&metadata_payload_header, payload->data,
1779 sizeof(metadata_payload_header));
1780 metadata_payload_header.stream_id = be64toh(
1781 metadata_payload_header.stream_id);
1782 metadata_payload_header.padding_size = be32toh(
1783 metadata_payload_header.padding_size);
1784
1785 metadata_stream = stream_get_by_id(metadata_payload_header.stream_id);
1786 if (!metadata_stream) {
1787 ret = -1;
1788 goto end;
1789 }
1790
1791 packet_view = lttng_buffer_view_from_view(payload,
1792 sizeof(metadata_payload_header), metadata_payload_size);
1793 if (!packet_view.data) {
1794 ERR("Invalid metadata packet length announced by header");
1795 ret = -1;
1796 goto end_put;
1797 }
1798
1799 pthread_mutex_lock(&metadata_stream->lock);
1800 ret = stream_write(metadata_stream, &packet_view,
1801 metadata_payload_header.padding_size);
1802 pthread_mutex_unlock(&metadata_stream->lock);
1803 if (ret){
1804 ret = -1;
1805 goto end_put;
1806 }
1807end_put:
1808 stream_put(metadata_stream);
1809end:
1810 return ret;
1811}
1812
1813/*
1814 * relay_send_version: send relayd version number
1815 */
1816static int relay_send_version(const struct lttcomm_relayd_hdr *recv_hdr,
1817 struct relay_connection *conn,
1818 const struct lttng_buffer_view *payload)
1819{
1820 int ret;
1821 ssize_t send_ret;
1822 struct lttcomm_relayd_version reply, msg;
1823 bool compatible = true;
1824
1825 conn->version_check_done = true;
1826
1827 /* Get version from the other side. */
1828 if (payload->size < sizeof(msg)) {
1829 ERR("Unexpected payload size in \"relay_send_version\": expected >= %zu bytes, got %zu bytes",
1830 sizeof(msg), payload->size);
1831 ret = -1;
1832 goto end;
1833 }
1834
1835 memcpy(&msg, payload->data, sizeof(msg));
1836 msg.major = be32toh(msg.major);
1837 msg.minor = be32toh(msg.minor);
1838
1839 memset(&reply, 0, sizeof(reply));
1840 reply.major = RELAYD_VERSION_COMM_MAJOR;
1841 reply.minor = RELAYD_VERSION_COMM_MINOR;
1842
1843 /* Major versions must be the same */
1844 if (reply.major != msg.major) {
1845 DBG("Incompatible major versions (%u vs %u), deleting session",
1846 reply.major, msg.major);
1847 compatible = false;
1848 }
1849
1850 conn->major = reply.major;
1851 /* We adapt to the lowest compatible version */
1852 if (reply.minor <= msg.minor) {
1853 conn->minor = reply.minor;
1854 } else {
1855 conn->minor = msg.minor;
1856 }
1857
1858 reply.major = htobe32(reply.major);
1859 reply.minor = htobe32(reply.minor);
1860 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1861 sizeof(reply), 0);
1862 if (send_ret < (ssize_t) sizeof(reply)) {
1863 ERR("Failed to send \"send version\" command reply (ret = %zd)",
1864 send_ret);
1865 ret = -1;
1866 goto end;
1867 } else {
1868 ret = 0;
1869 }
1870
1871 if (!compatible) {
1872 ret = -1;
1873 goto end;
1874 }
1875
1876 DBG("Version check done using protocol %u.%u", conn->major,
1877 conn->minor);
1878
1879end:
1880 return ret;
1881}
1882
1883/*
1884 * Check for data pending for a given stream id from the session daemon.
1885 */
1886static int relay_data_pending(const struct lttcomm_relayd_hdr *recv_hdr,
1887 struct relay_connection *conn,
1888 const struct lttng_buffer_view *payload)
1889{
1890 struct relay_session *session = conn->session;
1891 struct lttcomm_relayd_data_pending msg;
1892 struct lttcomm_relayd_generic_reply reply;
1893 struct relay_stream *stream;
1894 ssize_t send_ret;
1895 int ret;
1896 uint64_t stream_seq;
1897
1898 DBG("Data pending command received");
1899
1900 if (!session || !conn->version_check_done) {
1901 ERR("Trying to check for data before version check");
1902 ret = -1;
1903 goto end_no_session;
1904 }
1905
1906 if (payload->size < sizeof(msg)) {
1907 ERR("Unexpected payload size in \"relay_data_pending\": expected >= %zu bytes, got %zu bytes",
1908 sizeof(msg), payload->size);
1909 ret = -1;
1910 goto end_no_session;
1911 }
1912 memcpy(&msg, payload->data, sizeof(msg));
1913 msg.stream_id = be64toh(msg.stream_id);
1914 msg.last_net_seq_num = be64toh(msg.last_net_seq_num);
1915
1916 stream = stream_get_by_id(msg.stream_id);
1917 if (stream == NULL) {
1918 ret = -1;
1919 goto end;
1920 }
1921
1922 pthread_mutex_lock(&stream->lock);
1923
1924 if (session_streams_have_index(session)) {
1925 /*
1926 * Ensure that both the index and stream data have been
1927 * flushed up to the requested point.
1928 */
1929 stream_seq = min(stream->prev_data_seq, stream->prev_index_seq);
1930 } else {
1931 stream_seq = stream->prev_data_seq;
1932 }
1933 DBG("Data pending for stream id %" PRIu64 ": prev_data_seq %" PRIu64
1934 ", prev_index_seq %" PRIu64
1935 ", and last_seq %" PRIu64, msg.stream_id,
1936 stream->prev_data_seq, stream->prev_index_seq,
1937 msg.last_net_seq_num);
1938
1939 /* Avoid wrapping issue */
1940 if (((int64_t) (stream_seq - msg.last_net_seq_num)) >= 0) {
1941 /* Data has in fact been written and is NOT pending */
1942 ret = 0;
1943 } else {
1944 /* Data still being streamed thus pending */
1945 ret = 1;
1946 }
1947
1948 stream->data_pending_check_done = true;
1949 pthread_mutex_unlock(&stream->lock);
1950
1951 stream_put(stream);
1952end:
1953
1954 memset(&reply, 0, sizeof(reply));
1955 reply.ret_code = htobe32(ret);
1956 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
1957 if (send_ret < (ssize_t) sizeof(reply)) {
1958 ERR("Failed to send \"data pending\" command reply (ret = %zd)",
1959 send_ret);
1960 ret = -1;
1961 }
1962
1963end_no_session:
1964 return ret;
1965}
1966
1967/*
1968 * Wait for the control socket to reach a quiescent state.
1969 *
1970 * Note that for now, when receiving this command from the session
1971 * daemon, this means that every subsequent commands or data received on
1972 * the control socket has been handled. So, this is why we simply return
1973 * OK here.
1974 */
1975static int relay_quiescent_control(const struct lttcomm_relayd_hdr *recv_hdr,
1976 struct relay_connection *conn,
1977 const struct lttng_buffer_view *payload)
1978{
1979 int ret;
1980 ssize_t send_ret;
1981 struct relay_stream *stream;
1982 struct lttcomm_relayd_quiescent_control msg;
1983 struct lttcomm_relayd_generic_reply reply;
1984
1985 DBG("Checking quiescent state on control socket");
1986
1987 if (!conn->session || !conn->version_check_done) {
1988 ERR("Trying to check for data before version check");
1989 ret = -1;
1990 goto end_no_session;
1991 }
1992
1993 if (payload->size < sizeof(msg)) {
1994 ERR("Unexpected payload size in \"relay_quiescent_control\": expected >= %zu bytes, got %zu bytes",
1995 sizeof(msg), payload->size);
1996 ret = -1;
1997 goto end_no_session;
1998 }
1999 memcpy(&msg, payload->data, sizeof(msg));
2000 msg.stream_id = be64toh(msg.stream_id);
2001
2002 stream = stream_get_by_id(msg.stream_id);
2003 if (!stream) {
2004 goto reply;
2005 }
2006 pthread_mutex_lock(&stream->lock);
2007 stream->data_pending_check_done = true;
2008 pthread_mutex_unlock(&stream->lock);
2009
2010 DBG("Relay quiescent control pending flag set to %" PRIu64, msg.stream_id);
2011 stream_put(stream);
2012reply:
2013 memset(&reply, 0, sizeof(reply));
2014 reply.ret_code = htobe32(LTTNG_OK);
2015 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
2016 if (send_ret < (ssize_t) sizeof(reply)) {
2017 ERR("Failed to send \"quiescent control\" command reply (ret = %zd)",
2018 send_ret);
2019 ret = -1;
2020 } else {
2021 ret = 0;
2022 }
2023
2024end_no_session:
2025 return ret;
2026}
2027
2028/*
2029 * Initialize a data pending command. This means that a consumer is about
2030 * to ask for data pending for each stream it holds. Simply iterate over
2031 * all streams of a session and set the data_pending_check_done flag.
2032 *
2033 * This command returns to the client a LTTNG_OK code.
2034 */
2035static int relay_begin_data_pending(const struct lttcomm_relayd_hdr *recv_hdr,
2036 struct relay_connection *conn,
2037 const struct lttng_buffer_view *payload)
2038{
2039 int ret;
2040 ssize_t send_ret;
2041 struct lttng_ht_iter iter;
2042 struct lttcomm_relayd_begin_data_pending msg;
2043 struct lttcomm_relayd_generic_reply reply;
2044 struct relay_stream *stream;
2045
2046 assert(recv_hdr);
2047 assert(conn);
2048
2049 DBG("Init streams for data pending");
2050
2051 if (!conn->session || !conn->version_check_done) {
2052 ERR("Trying to check for data before version check");
2053 ret = -1;
2054 goto end_no_session;
2055 }
2056
2057 if (payload->size < sizeof(msg)) {
2058 ERR("Unexpected payload size in \"relay_begin_data_pending\": expected >= %zu bytes, got %zu bytes",
2059 sizeof(msg), payload->size);
2060 ret = -1;
2061 goto end_no_session;
2062 }
2063 memcpy(&msg, payload->data, sizeof(msg));
2064 msg.session_id = be64toh(msg.session_id);
2065
2066 /*
2067 * Iterate over all streams to set the begin data pending flag.
2068 * For now, the streams are indexed by stream handle so we have
2069 * to iterate over all streams to find the one associated with
2070 * the right session_id.
2071 */
2072 rcu_read_lock();
2073 cds_lfht_for_each_entry(relay_streams_ht->ht, &iter.iter, stream,
2074 node.node) {
2075 if (!stream_get(stream)) {
2076 continue;
2077 }
2078 if (stream->trace->session->id == msg.session_id) {
2079 pthread_mutex_lock(&stream->lock);
2080 stream->data_pending_check_done = false;
2081 pthread_mutex_unlock(&stream->lock);
2082 DBG("Set begin data pending flag to stream %" PRIu64,
2083 stream->stream_handle);
2084 }
2085 stream_put(stream);
2086 }
2087 rcu_read_unlock();
2088
2089 memset(&reply, 0, sizeof(reply));
2090 /* All good, send back reply. */
2091 reply.ret_code = htobe32(LTTNG_OK);
2092
2093 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
2094 if (send_ret < (ssize_t) sizeof(reply)) {
2095 ERR("Failed to send \"begin data pending\" command reply (ret = %zd)",
2096 send_ret);
2097 ret = -1;
2098 } else {
2099 ret = 0;
2100 }
2101
2102end_no_session:
2103 return ret;
2104}
2105
2106/*
2107 * End data pending command. This will check, for a given session id, if
2108 * each stream associated with it has its data_pending_check_done flag
2109 * set. If not, this means that the client lost track of the stream but
2110 * the data is still being streamed on our side. In this case, we inform
2111 * the client that data is in flight.
2112 *
2113 * Return to the client if there is data in flight or not with a ret_code.
2114 */
2115static int relay_end_data_pending(const struct lttcomm_relayd_hdr *recv_hdr,
2116 struct relay_connection *conn,
2117 const struct lttng_buffer_view *payload)
2118{
2119 int ret;
2120 ssize_t send_ret;
2121 struct lttng_ht_iter iter;
2122 struct lttcomm_relayd_end_data_pending msg;
2123 struct lttcomm_relayd_generic_reply reply;
2124 struct relay_stream *stream;
2125 uint32_t is_data_inflight = 0;
2126
2127 DBG("End data pending command");
2128
2129 if (!conn->session || !conn->version_check_done) {
2130 ERR("Trying to check for data before version check");
2131 ret = -1;
2132 goto end_no_session;
2133 }
2134
2135 if (payload->size < sizeof(msg)) {
2136 ERR("Unexpected payload size in \"relay_end_data_pending\": expected >= %zu bytes, got %zu bytes",
2137 sizeof(msg), payload->size);
2138 ret = -1;
2139 goto end_no_session;
2140 }
2141 memcpy(&msg, payload->data, sizeof(msg));
2142 msg.session_id = be64toh(msg.session_id);
2143
2144 /*
2145 * Iterate over all streams to see if the begin data pending
2146 * flag is set.
2147 */
2148 rcu_read_lock();
2149 cds_lfht_for_each_entry(relay_streams_ht->ht, &iter.iter, stream,
2150 node.node) {
2151 if (!stream_get(stream)) {
2152 continue;
2153 }
2154 if (stream->trace->session->id != msg.session_id) {
2155 stream_put(stream);
2156 continue;
2157 }
2158 pthread_mutex_lock(&stream->lock);
2159 if (!stream->data_pending_check_done) {
2160 uint64_t stream_seq;
2161
2162 if (session_streams_have_index(conn->session)) {
2163 /*
2164 * Ensure that both the index and stream data have been
2165 * flushed up to the requested point.
2166 */
2167 stream_seq = min(stream->prev_data_seq, stream->prev_index_seq);
2168 } else {
2169 stream_seq = stream->prev_data_seq;
2170 }
2171 if (!stream->closed || !(((int64_t) (stream_seq - stream->last_net_seq_num)) >= 0)) {
2172 is_data_inflight = 1;
2173 DBG("Data is still in flight for stream %" PRIu64,
2174 stream->stream_handle);
2175 pthread_mutex_unlock(&stream->lock);
2176 stream_put(stream);
2177 break;
2178 }
2179 }
2180 pthread_mutex_unlock(&stream->lock);
2181 stream_put(stream);
2182 }
2183 rcu_read_unlock();
2184
2185 memset(&reply, 0, sizeof(reply));
2186 /* All good, send back reply. */
2187 reply.ret_code = htobe32(is_data_inflight);
2188
2189 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
2190 if (send_ret < (ssize_t) sizeof(reply)) {
2191 ERR("Failed to send \"end data pending\" command reply (ret = %zd)",
2192 send_ret);
2193 ret = -1;
2194 } else {
2195 ret = 0;
2196 }
2197
2198end_no_session:
2199 return ret;
2200}
2201
2202/*
2203 * Receive an index for a specific stream.
2204 *
2205 * Return 0 on success else a negative value.
2206 */
2207static int relay_recv_index(const struct lttcomm_relayd_hdr *recv_hdr,
2208 struct relay_connection *conn,
2209 const struct lttng_buffer_view *payload)
2210{
2211 int ret;
2212 ssize_t send_ret;
2213 struct relay_session *session = conn->session;
2214 struct lttcomm_relayd_index index_info;
2215 struct lttcomm_relayd_generic_reply reply;
2216 struct relay_stream *stream;
2217 size_t msg_len;
2218
2219 assert(conn);
2220
2221 DBG("Relay receiving index");
2222
2223 if (!session || !conn->version_check_done) {
2224 ERR("Trying to close a stream before version check");
2225 ret = -1;
2226 goto end_no_session;
2227 }
2228
2229 msg_len = lttcomm_relayd_index_len(
2230 lttng_to_index_major(conn->major, conn->minor),
2231 lttng_to_index_minor(conn->major, conn->minor));
2232 if (payload->size < msg_len) {
2233 ERR("Unexpected payload size in \"relay_recv_index\": expected >= %zu bytes, got %zu bytes",
2234 msg_len, payload->size);
2235 ret = -1;
2236 goto end_no_session;
2237 }
2238 memcpy(&index_info, payload->data, msg_len);
2239 index_info.relay_stream_id = be64toh(index_info.relay_stream_id);
2240 index_info.net_seq_num = be64toh(index_info.net_seq_num);
2241 index_info.packet_size = be64toh(index_info.packet_size);
2242 index_info.content_size = be64toh(index_info.content_size);
2243 index_info.timestamp_begin = be64toh(index_info.timestamp_begin);
2244 index_info.timestamp_end = be64toh(index_info.timestamp_end);
2245 index_info.events_discarded = be64toh(index_info.events_discarded);
2246 index_info.stream_id = be64toh(index_info.stream_id);
2247
2248 if (conn->minor >= 8) {
2249 index_info.stream_instance_id =
2250 be64toh(index_info.stream_instance_id);
2251 index_info.packet_seq_num = be64toh(index_info.packet_seq_num);
2252 } else {
2253 index_info.stream_instance_id = -1ULL;
2254 index_info.packet_seq_num = -1ULL;
2255 }
2256
2257 stream = stream_get_by_id(index_info.relay_stream_id);
2258 if (!stream) {
2259 ERR("stream_get_by_id not found");
2260 ret = -1;
2261 goto end;
2262 }
2263
2264 pthread_mutex_lock(&stream->lock);
2265 ret = stream_add_index(stream, &index_info);
2266 pthread_mutex_unlock(&stream->lock);
2267 if (ret) {
2268 goto end_stream_put;
2269 }
2270
2271end_stream_put:
2272 stream_put(stream);
2273end:
2274 memset(&reply, 0, sizeof(reply));
2275 if (ret < 0) {
2276 reply.ret_code = htobe32(LTTNG_ERR_UNK);
2277 } else {
2278 reply.ret_code = htobe32(LTTNG_OK);
2279 }
2280 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
2281 if (send_ret < (ssize_t) sizeof(reply)) {
2282 ERR("Failed to send \"recv index\" command reply (ret = %zd)", send_ret);
2283 ret = -1;
2284 }
2285
2286end_no_session:
2287 return ret;
2288}
2289
2290/*
2291 * Receive the streams_sent message.
2292 *
2293 * Return 0 on success else a negative value.
2294 */
2295static int relay_streams_sent(const struct lttcomm_relayd_hdr *recv_hdr,
2296 struct relay_connection *conn,
2297 const struct lttng_buffer_view *payload)
2298{
2299 int ret;
2300 ssize_t send_ret;
2301 struct lttcomm_relayd_generic_reply reply;
2302
2303 assert(conn);
2304
2305 DBG("Relay receiving streams_sent");
2306
2307 if (!conn->session || !conn->version_check_done) {
2308 ERR("Trying to close a stream before version check");
2309 ret = -1;
2310 goto end_no_session;
2311 }
2312
2313 /*
2314 * Publish every pending stream in the connection recv list which are
2315 * now ready to be used by the viewer.
2316 */
2317 publish_connection_local_streams(conn);
2318
2319 memset(&reply, 0, sizeof(reply));
2320 reply.ret_code = htobe32(LTTNG_OK);
2321 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
2322 if (send_ret < (ssize_t) sizeof(reply)) {
2323 ERR("Failed to send \"streams sent\" command reply (ret = %zd)",
2324 send_ret);
2325 ret = -1;
2326 } else {
2327 /* Success. */
2328 ret = 0;
2329 }
2330
2331end_no_session:
2332 return ret;
2333}
2334
2335/*
2336 * relay_rotate_session_stream: rotate a stream to a new tracefile for the
2337 * session rotation feature (not the tracefile rotation feature).
2338 */
2339static int relay_rotate_session_streams(
2340 const struct lttcomm_relayd_hdr *recv_hdr,
2341 struct relay_connection *conn,
2342 const struct lttng_buffer_view *payload)
2343{
2344 int ret = 0;
2345 uint32_t i;
2346 ssize_t send_ret;
2347 enum lttng_error_code reply_code = LTTNG_ERR_UNK;
2348 struct relay_session *session = conn->session;
2349 struct lttcomm_relayd_rotate_streams rotate_streams;
2350 struct lttcomm_relayd_generic_reply reply = {};
2351 struct relay_stream *stream = NULL;
2352 const size_t header_len = sizeof(struct lttcomm_relayd_rotate_streams);
2353 struct lttng_trace_chunk *next_trace_chunk = NULL;
2354 struct lttng_buffer_view stream_positions;
2355 char chunk_id_buf[MAX_INT_DEC_LEN(uint64_t)];
2356 const char *chunk_id_str = "none";
2357
2358 if (!session || !conn->version_check_done) {
2359 ERR("Trying to rotate a stream before version check");
2360 ret = -1;
2361 goto end_no_reply;
2362 }
2363
2364 if (session->major == 2 && session->minor < 11) {
2365 ERR("Unsupported feature before 2.11");
2366 ret = -1;
2367 goto end_no_reply;
2368 }
2369
2370 if (payload->size < header_len) {
2371 ERR("Unexpected payload size in \"relay_rotate_session_stream\": expected >= %zu bytes, got %zu bytes",
2372 header_len, payload->size);
2373 ret = -1;
2374 goto end_no_reply;
2375 }
2376
2377 memcpy(&rotate_streams, payload->data, header_len);
2378
2379 /* Convert header to host endianness. */
2380 rotate_streams = (typeof(rotate_streams)) {
2381 .stream_count = be32toh(rotate_streams.stream_count),
2382 .new_chunk_id = (typeof(rotate_streams.new_chunk_id)) {
2383 .is_set = !!rotate_streams.new_chunk_id.is_set,
2384 .value = be64toh(rotate_streams.new_chunk_id.value),
2385 }
2386 };
2387
2388 if (rotate_streams.new_chunk_id.is_set) {
2389 /*
2390 * Retrieve the trace chunk the stream must transition to. As
2391 * per the protocol, this chunk should have been created
2392 * before this command is received.
2393 */
2394 next_trace_chunk = sessiond_trace_chunk_registry_get_chunk(
2395 sessiond_trace_chunk_registry,
2396 session->sessiond_uuid, session->id,
2397 rotate_streams.new_chunk_id.value);
2398 if (!next_trace_chunk) {
2399 char uuid_str[LTTNG_UUID_STR_LEN];
2400
2401 lttng_uuid_to_str(session->sessiond_uuid, uuid_str);
2402 ERR("Unknown next trace chunk in ROTATE_STREAMS command: sessiond_uuid = {%s}, session_id = %" PRIu64
2403 ", trace_chunk_id = %" PRIu64,
2404 uuid_str, session->id,
2405 rotate_streams.new_chunk_id.value);
2406 reply_code = LTTNG_ERR_INVALID_PROTOCOL;
2407 ret = -1;
2408 goto end;
2409 }
2410
2411 ret = snprintf(chunk_id_buf, sizeof(chunk_id_buf), "%" PRIu64,
2412 rotate_streams.new_chunk_id.value);
2413 if (ret < 0 || ret >= sizeof(chunk_id_buf)) {
2414 chunk_id_str = "formatting error";
2415 } else {
2416 chunk_id_str = chunk_id_buf;
2417 }
2418 }
2419
2420 DBG("Rotate %" PRIu32 " streams of session \"%s\" to chunk \"%s\"",
2421 rotate_streams.stream_count, session->session_name,
2422 chunk_id_str);
2423
2424 stream_positions = lttng_buffer_view_from_view(payload,
2425 sizeof(rotate_streams), -1);
2426 if (!stream_positions.data ||
2427 stream_positions.size <
2428 (rotate_streams.stream_count *
2429 sizeof(struct lttcomm_relayd_stream_rotation_position))) {
2430 reply_code = LTTNG_ERR_INVALID_PROTOCOL;
2431 ret = -1;
2432 goto end;
2433 }
2434
2435 for (i = 0; i < rotate_streams.stream_count; i++) {
2436 struct lttcomm_relayd_stream_rotation_position *position_comm =
2437 &((typeof(position_comm)) stream_positions.data)[i];
2438 const struct lttcomm_relayd_stream_rotation_position pos = {
2439 .stream_id = be64toh(position_comm->stream_id),
2440 .rotate_at_seq_num = be64toh(
2441 position_comm->rotate_at_seq_num),
2442 };
2443
2444 stream = stream_get_by_id(pos.stream_id);
2445 if (!stream) {
2446 reply_code = LTTNG_ERR_INVALID;
2447 ret = -1;
2448 goto end;
2449 }
2450
2451 pthread_mutex_lock(&stream->lock);
2452 ret = stream_set_pending_rotation(stream, next_trace_chunk,
2453 pos.rotate_at_seq_num);
2454 pthread_mutex_unlock(&stream->lock);
2455 if (ret) {
2456 reply_code = LTTNG_ERR_FILE_CREATION_ERROR;
2457 goto end;
2458 }
2459
2460 stream_put(stream);
2461 stream = NULL;
2462 }
2463
2464 reply_code = LTTNG_OK;
2465 ret = 0;
2466end:
2467 if (stream) {
2468 stream_put(stream);
2469 }
2470
2471 reply.ret_code = htobe32((uint32_t) reply_code);
2472 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
2473 sizeof(struct lttcomm_relayd_generic_reply), 0);
2474 if (send_ret < (ssize_t) sizeof(reply)) {
2475 ERR("Failed to send \"rotate session stream\" command reply (ret = %zd)",
2476 send_ret);
2477 ret = -1;
2478 }
2479end_no_reply:
2480 lttng_trace_chunk_put(next_trace_chunk);
2481 return ret;
2482}
2483
2484
2485
2486/*
2487 * relay_create_trace_chunk: create a new trace chunk
2488 */
2489static int relay_create_trace_chunk(const struct lttcomm_relayd_hdr *recv_hdr,
2490 struct relay_connection *conn,
2491 const struct lttng_buffer_view *payload)
2492{
2493 int ret = 0;
2494 ssize_t send_ret;
2495 struct relay_session *session = conn->session;
2496 struct lttcomm_relayd_create_trace_chunk *msg;
2497 struct lttcomm_relayd_generic_reply reply = {};
2498 struct lttng_buffer_view header_view;
2499 struct lttng_buffer_view chunk_name_view;
2500 struct lttng_trace_chunk *chunk = NULL, *published_chunk = NULL;
2501 enum lttng_error_code reply_code = LTTNG_OK;
2502 enum lttng_trace_chunk_status chunk_status;
2503 struct lttng_directory_handle *session_output = NULL;
2504 const char *new_path;
2505
2506 if (!session || !conn->version_check_done) {
2507 ERR("Trying to create a trace chunk before version check");
2508 ret = -1;
2509 goto end_no_reply;
2510 }
2511
2512 if (session->major == 2 && session->minor < 11) {
2513 ERR("Chunk creation command is unsupported before 2.11");
2514 ret = -1;
2515 goto end_no_reply;
2516 }
2517
2518 header_view = lttng_buffer_view_from_view(payload, 0, sizeof(*msg));
2519 if (!header_view.data) {
2520 ERR("Failed to receive payload of chunk creation command");
2521 ret = -1;
2522 goto end_no_reply;
2523 }
2524
2525 /* Convert to host endianness. */
2526 msg = (typeof(msg)) header_view.data;
2527 msg->chunk_id = be64toh(msg->chunk_id);
2528 msg->creation_timestamp = be64toh(msg->creation_timestamp);
2529 msg->override_name_length = be32toh(msg->override_name_length);
2530
2531 if (session->current_trace_chunk &&
2532 !lttng_trace_chunk_get_name_overridden(session->current_trace_chunk)) {
2533 chunk_status = lttng_trace_chunk_rename_path(session->current_trace_chunk,
2534 DEFAULT_CHUNK_TMP_OLD_DIRECTORY);
2535 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
2536 ERR("Failed to rename old chunk");
2537 ret = -1;
2538 reply_code = LTTNG_ERR_UNK;
2539 goto end;
2540 }
2541 }
2542 session->ongoing_rotation = true;
2543 if (!session->current_trace_chunk) {
2544 if (!session->has_rotated) {
2545 new_path = "";
2546 } else {
2547 new_path = NULL;
2548 }
2549 } else {
2550 new_path = DEFAULT_CHUNK_TMP_NEW_DIRECTORY;
2551 }
2552 chunk = lttng_trace_chunk_create(
2553 msg->chunk_id, msg->creation_timestamp, new_path);
2554 if (!chunk) {
2555 ERR("Failed to create trace chunk in trace chunk creation command");
2556 ret = -1;
2557 reply_code = LTTNG_ERR_NOMEM;
2558 goto end;
2559 }
2560
2561 if (msg->override_name_length) {
2562 const char *name;
2563
2564 chunk_name_view = lttng_buffer_view_from_view(payload,
2565 sizeof(*msg),
2566 msg->override_name_length);
2567 name = chunk_name_view.data;
2568 if (!name || name[msg->override_name_length - 1]) {
2569 ERR("Failed to receive payload of chunk creation command");
2570 ret = -1;
2571 reply_code = LTTNG_ERR_INVALID;
2572 goto end;
2573 }
2574
2575 chunk_status = lttng_trace_chunk_override_name(
2576 chunk, chunk_name_view.data);
2577 switch (chunk_status) {
2578 case LTTNG_TRACE_CHUNK_STATUS_OK:
2579 break;
2580 case LTTNG_TRACE_CHUNK_STATUS_INVALID_ARGUMENT:
2581 ERR("Failed to set the name of new trace chunk in trace chunk creation command (invalid name)");
2582 reply_code = LTTNG_ERR_INVALID;
2583 ret = -1;
2584 goto end;
2585 default:
2586 ERR("Failed to set the name of new trace chunk in trace chunk creation command (unknown error)");
2587 reply_code = LTTNG_ERR_UNK;
2588 ret = -1;
2589 goto end;
2590 }
2591 }
2592
2593 chunk_status = lttng_trace_chunk_set_credentials_current_user(chunk);
2594 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
2595 reply_code = LTTNG_ERR_UNK;
2596 ret = -1;
2597 goto end;
2598 }
2599
2600 session_output = session_create_output_directory_handle(
2601 conn->session);
2602 if (!session_output) {
2603 reply_code = LTTNG_ERR_CREATE_DIR_FAIL;
2604 goto end;
2605 }
2606 chunk_status = lttng_trace_chunk_set_as_owner(chunk, session_output);
2607 lttng_directory_handle_put(session_output);
2608 session_output = NULL;
2609 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
2610 reply_code = LTTNG_ERR_UNK;
2611 ret = -1;
2612 goto end;
2613 }
2614
2615 published_chunk = sessiond_trace_chunk_registry_publish_chunk(
2616 sessiond_trace_chunk_registry,
2617 conn->session->sessiond_uuid,
2618 conn->session->id,
2619 chunk);
2620 if (!published_chunk) {
2621 char uuid_str[LTTNG_UUID_STR_LEN];
2622
2623 lttng_uuid_to_str(conn->session->sessiond_uuid, uuid_str);
2624 ERR("Failed to publish chunk: sessiond_uuid = %s, session_id = %" PRIu64 ", chunk_id = %" PRIu64,
2625 uuid_str,
2626 conn->session->id,
2627 msg->chunk_id);
2628 ret = -1;
2629 reply_code = LTTNG_ERR_NOMEM;
2630 goto end;
2631 }
2632
2633 pthread_mutex_lock(&conn->session->lock);
2634 if (conn->session->pending_closure_trace_chunk) {
2635 /*
2636 * Invalid; this means a second create_trace_chunk command was
2637 * received before a close_trace_chunk.
2638 */
2639 ERR("Invalid trace chunk close command received; a trace chunk is already waiting for a trace chunk close command");
2640 reply_code = LTTNG_ERR_INVALID_PROTOCOL;
2641 ret = -1;
2642 goto end_unlock_session;
2643 }
2644 conn->session->pending_closure_trace_chunk =
2645 conn->session->current_trace_chunk;
2646 conn->session->current_trace_chunk = published_chunk;
2647 published_chunk = NULL;
2648 if (!conn->session->pending_closure_trace_chunk) {
2649 session->ongoing_rotation = false;
2650 }
2651end_unlock_session:
2652 pthread_mutex_unlock(&conn->session->lock);
2653end:
2654 reply.ret_code = htobe32((uint32_t) reply_code);
2655 send_ret = conn->sock->ops->sendmsg(conn->sock,
2656 &reply,
2657 sizeof(struct lttcomm_relayd_generic_reply),
2658 0);
2659 if (send_ret < (ssize_t) sizeof(reply)) {
2660 ERR("Failed to send \"create trace chunk\" command reply (ret = %zd)",
2661 send_ret);
2662 ret = -1;
2663 }
2664end_no_reply:
2665 lttng_trace_chunk_put(chunk);
2666 lttng_trace_chunk_put(published_chunk);
2667 lttng_directory_handle_put(session_output);
2668 return ret;
2669}
2670
2671/*
2672 * relay_close_trace_chunk: close a trace chunk
2673 */
2674static int relay_close_trace_chunk(const struct lttcomm_relayd_hdr *recv_hdr,
2675 struct relay_connection *conn,
2676 const struct lttng_buffer_view *payload)
2677{
2678 int ret = 0, buf_ret;
2679 ssize_t send_ret;
2680 struct relay_session *session = conn->session;
2681 struct lttcomm_relayd_close_trace_chunk *msg;
2682 struct lttcomm_relayd_close_trace_chunk_reply reply = {};
2683 struct lttng_buffer_view header_view;
2684 struct lttng_trace_chunk *chunk = NULL;
2685 enum lttng_error_code reply_code = LTTNG_OK;
2686 enum lttng_trace_chunk_status chunk_status;
2687 uint64_t chunk_id;
2688 LTTNG_OPTIONAL(enum lttng_trace_chunk_command_type) close_command = {};
2689 time_t close_timestamp;
2690 char closed_trace_chunk_path[LTTNG_PATH_MAX];
2691 size_t path_length = 0;
2692 const char *chunk_name = NULL;
2693 struct lttng_dynamic_buffer reply_payload;
2694 const char *new_path;
2695
2696 lttng_dynamic_buffer_init(&reply_payload);
2697
2698 if (!session || !conn->version_check_done) {
2699 ERR("Trying to close a trace chunk before version check");
2700 ret = -1;
2701 goto end_no_reply;
2702 }
2703
2704 if (session->major == 2 && session->minor < 11) {
2705 ERR("Chunk close command is unsupported before 2.11");
2706 ret = -1;
2707 goto end_no_reply;
2708 }
2709
2710 header_view = lttng_buffer_view_from_view(payload, 0, sizeof(*msg));
2711 if (!header_view.data) {
2712 ERR("Failed to receive payload of chunk close command");
2713 ret = -1;
2714 goto end_no_reply;
2715 }
2716
2717 /* Convert to host endianness. */
2718 msg = (typeof(msg)) header_view.data;
2719 chunk_id = be64toh(msg->chunk_id);
2720 close_timestamp = (time_t) be64toh(msg->close_timestamp);
2721 close_command = (typeof(close_command)){
2722 .value = be32toh(msg->close_command.value),
2723 .is_set = msg->close_command.is_set,
2724 };
2725
2726 chunk = sessiond_trace_chunk_registry_get_chunk(
2727 sessiond_trace_chunk_registry,
2728 conn->session->sessiond_uuid,
2729 conn->session->id,
2730 chunk_id);
2731 if (!chunk) {
2732 char uuid_str[LTTNG_UUID_STR_LEN];
2733
2734 lttng_uuid_to_str(conn->session->sessiond_uuid, uuid_str);
2735 ERR("Failed to find chunk to close: sessiond_uuid = %s, session_id = %" PRIu64 ", chunk_id = %" PRIu64,
2736 uuid_str,
2737 conn->session->id,
2738 msg->chunk_id);
2739 ret = -1;
2740 reply_code = LTTNG_ERR_NOMEM;
2741 goto end;
2742 }
2743
2744 pthread_mutex_lock(&session->lock);
2745 if (close_command.is_set &&
2746 close_command.value == LTTNG_TRACE_CHUNK_COMMAND_TYPE_DELETE) {
2747 /*
2748 * Clear command. It is a protocol error to ask for a
2749 * clear on a relay which does not allow it. Querying
2750 * the configuration allows figuring out whether
2751 * clearing is allowed before doing the clear.
2752 */
2753 if (!opt_allow_clear) {
2754 ret = -1;
2755 reply_code = LTTNG_ERR_INVALID_PROTOCOL;
2756 goto end_unlock_session;
2757 }
2758 }
2759 if (session->pending_closure_trace_chunk &&
2760 session->pending_closure_trace_chunk != chunk) {
2761 ERR("Trace chunk close command for session \"%s\" does not target the trace chunk pending closure",
2762 session->session_name);
2763 reply_code = LTTNG_ERR_INVALID_PROTOCOL;
2764 ret = -1;
2765 goto end_unlock_session;
2766 }
2767
2768 if (session->current_trace_chunk && session->current_trace_chunk != chunk &&
2769 !lttng_trace_chunk_get_name_overridden(session->current_trace_chunk)) {
2770 if (close_command.is_set &&
2771 close_command.value == LTTNG_TRACE_CHUNK_COMMAND_TYPE_DELETE &&
2772 !session->has_rotated) {
2773 /* New chunk stays in session output directory. */
2774 new_path = "";
2775 } else {
2776 /* Use chunk name for new chunk. */
2777 new_path = NULL;
2778 }
2779 /* Rename new chunk path. */
2780 chunk_status = lttng_trace_chunk_rename_path(session->current_trace_chunk,
2781 new_path);
2782 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
2783 ret = -1;
2784 goto end;
2785 }
2786 session->ongoing_rotation = false;
2787 }
2788 if ((!close_command.is_set ||
2789 close_command.value == LTTNG_TRACE_CHUNK_COMMAND_TYPE_NO_OPERATION) &&
2790 !lttng_trace_chunk_get_name_overridden(chunk)) {
2791 const char *old_path;
2792
2793 if (!session->has_rotated) {
2794 old_path = "";
2795 } else {
2796 old_path = NULL;
2797 }
2798 /* We need to move back the .tmp_old_chunk to its rightful place. */
2799 chunk_status = lttng_trace_chunk_rename_path(chunk, old_path);
2800 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
2801 ret = -1;
2802 goto end;
2803 }
2804 }
2805 chunk_status = lttng_trace_chunk_set_close_timestamp(
2806 chunk, close_timestamp);
2807 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
2808 ERR("Failed to set trace chunk close timestamp");
2809 ret = -1;
2810 reply_code = LTTNG_ERR_UNK;
2811 goto end_unlock_session;
2812 }
2813
2814 if (close_command.is_set) {
2815 chunk_status = lttng_trace_chunk_set_close_command(
2816 chunk, close_command.value);
2817 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
2818 ret = -1;
2819 reply_code = LTTNG_ERR_INVALID;
2820 goto end_unlock_session;
2821 }
2822 }
2823 chunk_status = lttng_trace_chunk_get_name(chunk, &chunk_name, NULL);
2824 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
2825 ERR("Failed to get chunk name");
2826 ret = -1;
2827 reply_code = LTTNG_ERR_UNK;
2828 goto end_unlock_session;
2829 }
2830 if (!session->has_rotated && !session->snapshot) {
2831 ret = lttng_strncpy(closed_trace_chunk_path,
2832 session->output_path,
2833 sizeof(closed_trace_chunk_path));
2834 if (ret) {
2835 ERR("Failed to send trace chunk path: path length of %zu bytes exceeds the maximal allowed length of %zu bytes",
2836 strlen(session->output_path),
2837 sizeof(closed_trace_chunk_path));
2838 reply_code = LTTNG_ERR_NOMEM;
2839 ret = -1;
2840 goto end_unlock_session;
2841 }
2842 } else {
2843 if (session->snapshot) {
2844 ret = snprintf(closed_trace_chunk_path,
2845 sizeof(closed_trace_chunk_path),
2846 "%s/%s", session->output_path,
2847 chunk_name);
2848 } else {
2849 ret = snprintf(closed_trace_chunk_path,
2850 sizeof(closed_trace_chunk_path),
2851 "%s/" DEFAULT_ARCHIVED_TRACE_CHUNKS_DIRECTORY
2852 "/%s",
2853 session->output_path, chunk_name);
2854 }
2855 if (ret < 0 || ret == sizeof(closed_trace_chunk_path)) {
2856 ERR("Failed to format closed trace chunk resulting path");
2857 reply_code = ret < 0 ? LTTNG_ERR_UNK : LTTNG_ERR_NOMEM;
2858 ret = -1;
2859 goto end_unlock_session;
2860 }
2861 }
2862 if (close_command.is_set &&
2863 close_command.value == LTTNG_TRACE_CHUNK_COMMAND_TYPE_MOVE_TO_COMPLETED) {
2864 session->has_rotated = true;
2865 }
2866 DBG("Reply chunk path on close: %s", closed_trace_chunk_path);
2867 path_length = strlen(closed_trace_chunk_path) + 1;
2868 if (path_length > UINT32_MAX) {
2869 ERR("Closed trace chunk path exceeds the maximal length allowed by the protocol");
2870 ret = -1;
2871 reply_code = LTTNG_ERR_INVALID_PROTOCOL;
2872 goto end_unlock_session;
2873 }
2874
2875 if (session->current_trace_chunk == chunk) {
2876 /*
2877 * After a trace chunk close command, no new streams
2878 * referencing the chunk may be created. Hence, on the
2879 * event that no new trace chunk have been created for
2880 * the session, the reference to the current trace chunk
2881 * is released in order to allow it to be reclaimed when
2882 * the last stream releases its reference to it.
2883 */
2884 lttng_trace_chunk_put(session->current_trace_chunk);
2885 session->current_trace_chunk = NULL;
2886 }
2887 lttng_trace_chunk_put(session->pending_closure_trace_chunk);
2888 session->pending_closure_trace_chunk = NULL;
2889end_unlock_session:
2890 pthread_mutex_unlock(&session->lock);
2891
2892end:
2893 reply.generic.ret_code = htobe32((uint32_t) reply_code);
2894 reply.path_length = htobe32((uint32_t) path_length);
2895 buf_ret = lttng_dynamic_buffer_append(
2896 &reply_payload, &reply, sizeof(reply));
2897 if (buf_ret) {
2898 ERR("Failed to append \"close trace chunk\" command reply header to payload buffer");
2899 goto end_no_reply;
2900 }
2901
2902 if (reply_code == LTTNG_OK) {
2903 buf_ret = lttng_dynamic_buffer_append(&reply_payload,
2904 closed_trace_chunk_path, path_length);
2905 if (buf_ret) {
2906 ERR("Failed to append \"close trace chunk\" command reply path to payload buffer");
2907 goto end_no_reply;
2908 }
2909 }
2910
2911 send_ret = conn->sock->ops->sendmsg(conn->sock,
2912 reply_payload.data,
2913 reply_payload.size,
2914 0);
2915 if (send_ret < reply_payload.size) {
2916 ERR("Failed to send \"close trace chunk\" command reply of %zu bytes (ret = %zd)",
2917 reply_payload.size, send_ret);
2918 ret = -1;
2919 goto end_no_reply;
2920 }
2921end_no_reply:
2922 lttng_trace_chunk_put(chunk);
2923 lttng_dynamic_buffer_reset(&reply_payload);
2924 return ret;
2925}
2926
2927/*
2928 * relay_trace_chunk_exists: check if a trace chunk exists
2929 */
2930static int relay_trace_chunk_exists(const struct lttcomm_relayd_hdr *recv_hdr,
2931 struct relay_connection *conn,
2932 const struct lttng_buffer_view *payload)
2933{
2934 int ret = 0;
2935 ssize_t send_ret;
2936 struct relay_session *session = conn->session;
2937 struct lttcomm_relayd_trace_chunk_exists *msg;
2938 struct lttcomm_relayd_trace_chunk_exists_reply reply = {};
2939 struct lttng_buffer_view header_view;
2940 uint64_t chunk_id;
2941 bool chunk_exists;
2942
2943 if (!session || !conn->version_check_done) {
2944 ERR("Trying to close a trace chunk before version check");
2945 ret = -1;
2946 goto end_no_reply;
2947 }
2948
2949 if (session->major == 2 && session->minor < 11) {
2950 ERR("Chunk close command is unsupported before 2.11");
2951 ret = -1;
2952 goto end_no_reply;
2953 }
2954
2955 header_view = lttng_buffer_view_from_view(payload, 0, sizeof(*msg));
2956 if (!header_view.data) {
2957 ERR("Failed to receive payload of chunk close command");
2958 ret = -1;
2959 goto end_no_reply;
2960 }
2961
2962 /* Convert to host endianness. */
2963 msg = (typeof(msg)) header_view.data;
2964 chunk_id = be64toh(msg->chunk_id);
2965
2966 ret = sessiond_trace_chunk_registry_chunk_exists(
2967 sessiond_trace_chunk_registry,
2968 conn->session->sessiond_uuid,
2969 conn->session->id,
2970 chunk_id, &chunk_exists);
2971 /*
2972 * If ret is not 0, send the reply and report the error to the caller.
2973 * It is a protocol (or internal) error and the session/connection
2974 * should be torn down.
2975 */
2976 reply = (typeof(reply)){
2977 .generic.ret_code = htobe32((uint32_t)
2978 (ret == 0 ? LTTNG_OK : LTTNG_ERR_INVALID_PROTOCOL)),
2979 .trace_chunk_exists = ret == 0 ? chunk_exists : 0,
2980 };
2981 send_ret = conn->sock->ops->sendmsg(
2982 conn->sock, &reply, sizeof(reply), 0);
2983 if (send_ret < (ssize_t) sizeof(reply)) {
2984 ERR("Failed to send \"create trace chunk\" command reply (ret = %zd)",
2985 send_ret);
2986 ret = -1;
2987 }
2988end_no_reply:
2989 return ret;
2990}
2991
2992/*
2993 * relay_get_configuration: query whether feature is available
2994 */
2995static int relay_get_configuration(const struct lttcomm_relayd_hdr *recv_hdr,
2996 struct relay_connection *conn,
2997 const struct lttng_buffer_view *payload)
2998{
2999 int ret = 0;
3000 ssize_t send_ret;
3001 struct lttcomm_relayd_get_configuration *msg;
3002 struct lttcomm_relayd_get_configuration_reply reply = {};
3003 struct lttng_buffer_view header_view;
3004 uint64_t query_flags = 0;
3005 uint64_t result_flags = 0;
3006
3007 header_view = lttng_buffer_view_from_view(payload, 0, sizeof(*msg));
3008 if (!header_view.data) {
3009 ERR("Failed to receive payload of chunk close command");
3010 ret = -1;
3011 goto end_no_reply;
3012 }
3013
3014 /* Convert to host endianness. */
3015 msg = (typeof(msg)) header_view.data;
3016 query_flags = be64toh(msg->query_flags);
3017
3018 if (query_flags) {
3019 ret = LTTNG_ERR_INVALID_PROTOCOL;
3020 goto reply;
3021 }
3022 if (opt_allow_clear) {
3023 result_flags |= LTTCOMM_RELAYD_CONFIGURATION_FLAG_CLEAR_ALLOWED;
3024 }
3025 ret = 0;
3026reply:
3027 reply = (typeof(reply)){
3028 .generic.ret_code = htobe32((uint32_t)
3029 (ret == 0 ? LTTNG_OK : LTTNG_ERR_INVALID_PROTOCOL)),
3030 .relayd_configuration_flags = htobe64(result_flags),
3031 };
3032 send_ret = conn->sock->ops->sendmsg(
3033 conn->sock, &reply, sizeof(reply), 0);
3034 if (send_ret < (ssize_t) sizeof(reply)) {
3035 ERR("Failed to send \"get configuration\" command reply (ret = %zd)",
3036 send_ret);
3037 ret = -1;
3038 }
3039end_no_reply:
3040 return ret;
3041}
3042
3043#define DBG_CMD(cmd_name, conn) \
3044 DBG3("Processing \"%s\" command for socket %i", cmd_name, conn->sock->fd);
3045
3046static int relay_process_control_command(struct relay_connection *conn,
3047 const struct lttcomm_relayd_hdr *header,
3048 const struct lttng_buffer_view *payload)
3049{
3050 int ret = 0;
3051
3052 switch (header->cmd) {
3053 case RELAYD_CREATE_SESSION:
3054 DBG_CMD("RELAYD_CREATE_SESSION", conn);
3055 ret = relay_create_session(header, conn, payload);
3056 break;
3057 case RELAYD_ADD_STREAM:
3058 DBG_CMD("RELAYD_ADD_STREAM", conn);
3059 ret = relay_add_stream(header, conn, payload);
3060 break;
3061 case RELAYD_START_DATA:
3062 DBG_CMD("RELAYD_START_DATA", conn);
3063 ret = relay_start(header, conn, payload);
3064 break;
3065 case RELAYD_SEND_METADATA:
3066 DBG_CMD("RELAYD_SEND_METADATA", conn);
3067 ret = relay_recv_metadata(header, conn, payload);
3068 break;
3069 case RELAYD_VERSION:
3070 DBG_CMD("RELAYD_VERSION", conn);
3071 ret = relay_send_version(header, conn, payload);
3072 break;
3073 case RELAYD_CLOSE_STREAM:
3074 DBG_CMD("RELAYD_CLOSE_STREAM", conn);
3075 ret = relay_close_stream(header, conn, payload);
3076 break;
3077 case RELAYD_DATA_PENDING:
3078 DBG_CMD("RELAYD_DATA_PENDING", conn);
3079 ret = relay_data_pending(header, conn, payload);
3080 break;
3081 case RELAYD_QUIESCENT_CONTROL:
3082 DBG_CMD("RELAYD_QUIESCENT_CONTROL", conn);
3083 ret = relay_quiescent_control(header, conn, payload);
3084 break;
3085 case RELAYD_BEGIN_DATA_PENDING:
3086 DBG_CMD("RELAYD_BEGIN_DATA_PENDING", conn);
3087 ret = relay_begin_data_pending(header, conn, payload);
3088 break;
3089 case RELAYD_END_DATA_PENDING:
3090 DBG_CMD("RELAYD_END_DATA_PENDING", conn);
3091 ret = relay_end_data_pending(header, conn, payload);
3092 break;
3093 case RELAYD_SEND_INDEX:
3094 DBG_CMD("RELAYD_SEND_INDEX", conn);
3095 ret = relay_recv_index(header, conn, payload);
3096 break;
3097 case RELAYD_STREAMS_SENT:
3098 DBG_CMD("RELAYD_STREAMS_SENT", conn);
3099 ret = relay_streams_sent(header, conn, payload);
3100 break;
3101 case RELAYD_RESET_METADATA:
3102 DBG_CMD("RELAYD_RESET_METADATA", conn);
3103 ret = relay_reset_metadata(header, conn, payload);
3104 break;
3105 case RELAYD_ROTATE_STREAMS:
3106 DBG_CMD("RELAYD_ROTATE_STREAMS", conn);
3107 ret = relay_rotate_session_streams(header, conn, payload);
3108 break;
3109 case RELAYD_CREATE_TRACE_CHUNK:
3110 DBG_CMD("RELAYD_CREATE_TRACE_CHUNK", conn);
3111 ret = relay_create_trace_chunk(header, conn, payload);
3112 break;
3113 case RELAYD_CLOSE_TRACE_CHUNK:
3114 DBG_CMD("RELAYD_CLOSE_TRACE_CHUNK", conn);
3115 ret = relay_close_trace_chunk(header, conn, payload);
3116 break;
3117 case RELAYD_TRACE_CHUNK_EXISTS:
3118 DBG_CMD("RELAYD_TRACE_CHUNK_EXISTS", conn);
3119 ret = relay_trace_chunk_exists(header, conn, payload);
3120 break;
3121 case RELAYD_GET_CONFIGURATION:
3122 DBG_CMD("RELAYD_GET_CONFIGURATION", conn);
3123 ret = relay_get_configuration(header, conn, payload);
3124 break;
3125 case RELAYD_UPDATE_SYNC_INFO:
3126 default:
3127 ERR("Received unknown command (%u)", header->cmd);
3128 relay_unknown_command(conn);
3129 ret = -1;
3130 goto end;
3131 }
3132
3133end:
3134 return ret;
3135}
3136
3137static enum relay_connection_status relay_process_control_receive_payload(
3138 struct relay_connection *conn)
3139{
3140 int ret = 0;
3141 enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK;
3142 struct lttng_dynamic_buffer *reception_buffer =
3143 &conn->protocol.ctrl.reception_buffer;
3144 struct ctrl_connection_state_receive_payload *state =
3145 &conn->protocol.ctrl.state.receive_payload;
3146 struct lttng_buffer_view payload_view;
3147
3148 if (state->left_to_receive == 0) {
3149 /* Short-circuit for payload-less commands. */
3150 goto reception_complete;
3151 }
3152
3153 ret = conn->sock->ops->recvmsg(conn->sock,
3154 reception_buffer->data + state->received,
3155 state->left_to_receive, MSG_DONTWAIT);
3156 if (ret < 0) {
3157 if (errno != EAGAIN && errno != EWOULDBLOCK) {
3158 PERROR("Unable to receive command payload on sock %d",
3159 conn->sock->fd);
3160 status = RELAY_CONNECTION_STATUS_ERROR;
3161 }
3162 goto end;
3163 } else if (ret == 0) {
3164 DBG("Socket %d performed an orderly shutdown (received EOF)", conn->sock->fd);
3165 status = RELAY_CONNECTION_STATUS_CLOSED;
3166 goto end;
3167 }
3168
3169 assert(ret > 0);
3170 assert(ret <= state->left_to_receive);
3171
3172 state->left_to_receive -= ret;
3173 state->received += ret;
3174
3175 if (state->left_to_receive > 0) {
3176 /*
3177 * Can't transition to the protocol's next state, wait to
3178 * receive the rest of the header.
3179 */
3180 DBG3("Partial reception of control connection protocol payload (received %" PRIu64 " bytes, %" PRIu64 " bytes left to receive, fd = %i)",
3181 state->received, state->left_to_receive,
3182 conn->sock->fd);
3183 goto end;
3184 }
3185
3186reception_complete:
3187 DBG("Done receiving control command payload: fd = %i, payload size = %" PRIu64 " bytes",
3188 conn->sock->fd, state->received);
3189 /*
3190 * The payload required to process the command has been received.
3191 * A view to the reception buffer is forwarded to the various
3192 * commands and the state of the control is reset on success.
3193 *
3194 * Commands are responsible for sending their reply to the peer.
3195 */
3196 payload_view = lttng_buffer_view_from_dynamic_buffer(reception_buffer,
3197 0, -1);
3198 ret = relay_process_control_command(conn,
3199 &state->header, &payload_view);
3200 if (ret < 0) {
3201 status = RELAY_CONNECTION_STATUS_ERROR;
3202 goto end;
3203 }
3204
3205 ret = connection_reset_protocol_state(conn);
3206 if (ret) {
3207 status = RELAY_CONNECTION_STATUS_ERROR;
3208 }
3209end:
3210 return status;
3211}
3212
3213static enum relay_connection_status relay_process_control_receive_header(
3214 struct relay_connection *conn)
3215{
3216 int ret = 0;
3217 enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK;
3218 struct lttcomm_relayd_hdr header;
3219 struct lttng_dynamic_buffer *reception_buffer =
3220 &conn->protocol.ctrl.reception_buffer;
3221 struct ctrl_connection_state_receive_header *state =
3222 &conn->protocol.ctrl.state.receive_header;
3223
3224 assert(state->left_to_receive != 0);
3225
3226 ret = conn->sock->ops->recvmsg(conn->sock,
3227 reception_buffer->data + state->received,
3228 state->left_to_receive, MSG_DONTWAIT);
3229 if (ret < 0) {
3230 if (errno != EAGAIN && errno != EWOULDBLOCK) {
3231 PERROR("Unable to receive control command header on sock %d",
3232 conn->sock->fd);
3233 status = RELAY_CONNECTION_STATUS_ERROR;
3234 }
3235 goto end;
3236 } else if (ret == 0) {
3237 DBG("Socket %d performed an orderly shutdown (received EOF)", conn->sock->fd);
3238 status = RELAY_CONNECTION_STATUS_CLOSED;
3239 goto end;
3240 }
3241
3242 assert(ret > 0);
3243 assert(ret <= state->left_to_receive);
3244
3245 state->left_to_receive -= ret;
3246 state->received += ret;
3247
3248 if (state->left_to_receive > 0) {
3249 /*
3250 * Can't transition to the protocol's next state, wait to
3251 * receive the rest of the header.
3252 */
3253 DBG3("Partial reception of control connection protocol header (received %" PRIu64 " bytes, %" PRIu64 " bytes left to receive, fd = %i)",
3254 state->received, state->left_to_receive,
3255 conn->sock->fd);
3256 goto end;
3257 }
3258
3259 /* Transition to next state: receiving the command's payload. */
3260 conn->protocol.ctrl.state_id =
3261 CTRL_CONNECTION_STATE_RECEIVE_PAYLOAD;
3262 memcpy(&header, reception_buffer->data, sizeof(header));
3263 header.circuit_id = be64toh(header.circuit_id);
3264 header.data_size = be64toh(header.data_size);
3265 header.cmd = be32toh(header.cmd);
3266 header.cmd_version = be32toh(header.cmd_version);
3267 memcpy(&conn->protocol.ctrl.state.receive_payload.header,
3268 &header, sizeof(header));
3269
3270 DBG("Done receiving control command header: fd = %i, cmd = %" PRIu32 ", cmd_version = %" PRIu32 ", payload size = %" PRIu64 " bytes",
3271 conn->sock->fd, header.cmd, header.cmd_version,
3272 header.data_size);
3273
3274 if (header.data_size > DEFAULT_NETWORK_RELAYD_CTRL_MAX_PAYLOAD_SIZE) {
3275 ERR("Command header indicates a payload (%" PRIu64 " bytes) that exceeds the maximal payload size allowed on a control connection.",
3276 header.data_size);
3277 status = RELAY_CONNECTION_STATUS_ERROR;
3278 goto end;
3279 }
3280
3281 conn->protocol.ctrl.state.receive_payload.left_to_receive =
3282 header.data_size;
3283 conn->protocol.ctrl.state.receive_payload.received = 0;
3284 ret = lttng_dynamic_buffer_set_size(reception_buffer,
3285 header.data_size);
3286 if (ret) {
3287 status = RELAY_CONNECTION_STATUS_ERROR;
3288 goto end;
3289 }
3290
3291 if (header.data_size == 0) {
3292 /*
3293 * Manually invoke the next state as the poll loop
3294 * will not wake-up to allow us to proceed further.
3295 */
3296 status = relay_process_control_receive_payload(conn);
3297 }
3298end:
3299 return status;
3300}
3301
3302/*
3303 * Process the commands received on the control socket
3304 */
3305static enum relay_connection_status relay_process_control(
3306 struct relay_connection *conn)
3307{
3308 enum relay_connection_status status;
3309
3310 switch (conn->protocol.ctrl.state_id) {
3311 case CTRL_CONNECTION_STATE_RECEIVE_HEADER:
3312 status = relay_process_control_receive_header(conn);
3313 break;
3314 case CTRL_CONNECTION_STATE_RECEIVE_PAYLOAD:
3315 status = relay_process_control_receive_payload(conn);
3316 break;
3317 default:
3318 ERR("Unknown control connection protocol state encountered.");
3319 abort();
3320 }
3321
3322 return status;
3323}
3324
3325static enum relay_connection_status relay_process_data_receive_header(
3326 struct relay_connection *conn)
3327{
3328 int ret;
3329 enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK;
3330 struct data_connection_state_receive_header *state =
3331 &conn->protocol.data.state.receive_header;
3332 struct lttcomm_relayd_data_hdr header;
3333 struct relay_stream *stream;
3334
3335 assert(state->left_to_receive != 0);
3336
3337 ret = conn->sock->ops->recvmsg(conn->sock,
3338 state->header_reception_buffer + state->received,
3339 state->left_to_receive, MSG_DONTWAIT);
3340 if (ret < 0) {
3341 if (errno != EAGAIN && errno != EWOULDBLOCK) {
3342 PERROR("Unable to receive data header on sock %d", conn->sock->fd);
3343 status = RELAY_CONNECTION_STATUS_ERROR;
3344 }
3345 goto end;
3346 } else if (ret == 0) {
3347 /* Orderly shutdown. Not necessary to print an error. */
3348 DBG("Socket %d performed an orderly shutdown (received EOF)", conn->sock->fd);
3349 status = RELAY_CONNECTION_STATUS_CLOSED;
3350 goto end;
3351 }
3352
3353 assert(ret > 0);
3354 assert(ret <= state->left_to_receive);
3355
3356 state->left_to_receive -= ret;
3357 state->received += ret;
3358
3359 if (state->left_to_receive > 0) {
3360 /*
3361 * Can't transition to the protocol's next state, wait to
3362 * receive the rest of the header.
3363 */
3364 DBG3("Partial reception of data connection header (received %" PRIu64 " bytes, %" PRIu64 " bytes left to receive, fd = %i)",
3365 state->received, state->left_to_receive,
3366 conn->sock->fd);
3367 goto end;
3368 }
3369
3370 /* Transition to next state: receiving the payload. */
3371 conn->protocol.data.state_id = DATA_CONNECTION_STATE_RECEIVE_PAYLOAD;
3372
3373 memcpy(&header, state->header_reception_buffer, sizeof(header));
3374 header.circuit_id = be64toh(header.circuit_id);
3375 header.stream_id = be64toh(header.stream_id);
3376 header.data_size = be32toh(header.data_size);
3377 header.net_seq_num = be64toh(header.net_seq_num);
3378 header.padding_size = be32toh(header.padding_size);
3379 memcpy(&conn->protocol.data.state.receive_payload.header, &header, sizeof(header));
3380
3381 conn->protocol.data.state.receive_payload.left_to_receive =
3382 header.data_size;
3383 conn->protocol.data.state.receive_payload.received = 0;
3384 conn->protocol.data.state.receive_payload.rotate_index = false;
3385
3386 DBG("Received data connection header on fd %i: circuit_id = %" PRIu64 ", stream_id = %" PRIu64 ", data_size = %" PRIu32 ", net_seq_num = %" PRIu64 ", padding_size = %" PRIu32,
3387 conn->sock->fd, header.circuit_id,
3388 header.stream_id, header.data_size,
3389 header.net_seq_num, header.padding_size);
3390
3391 stream = stream_get_by_id(header.stream_id);
3392 if (!stream) {
3393 DBG("relay_process_data_receive_payload: Cannot find stream %" PRIu64,
3394 header.stream_id);
3395 /* Protocol error. */
3396 status = RELAY_CONNECTION_STATUS_ERROR;
3397 goto end;
3398 }
3399
3400 pthread_mutex_lock(&stream->lock);
3401 /* Prepare stream for the reception of a new packet. */
3402 ret = stream_init_packet(stream, header.data_size,
3403 &conn->protocol.data.state.receive_payload.rotate_index);
3404 pthread_mutex_unlock(&stream->lock);
3405 if (ret) {
3406 ERR("Failed to rotate stream output file");
3407 status = RELAY_CONNECTION_STATUS_ERROR;
3408 goto end_stream_unlock;
3409 }
3410
3411end_stream_unlock:
3412 stream_put(stream);
3413end:
3414 return status;
3415}
3416
3417static enum relay_connection_status relay_process_data_receive_payload(
3418 struct relay_connection *conn)
3419{
3420 int ret;
3421 enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK;
3422 struct relay_stream *stream;
3423 struct data_connection_state_receive_payload *state =
3424 &conn->protocol.data.state.receive_payload;
3425 const size_t chunk_size = RECV_DATA_BUFFER_SIZE;
3426 char data_buffer[chunk_size];
3427 bool partial_recv = false;
3428 bool new_stream = false, close_requested = false, index_flushed = false;
3429 uint64_t left_to_receive = state->left_to_receive;
3430 struct relay_session *session;
3431
3432 DBG3("Receiving data for stream id %" PRIu64 " seqnum %" PRIu64 ", %" PRIu64" bytes received, %" PRIu64 " bytes left to receive",
3433 state->header.stream_id, state->header.net_seq_num,
3434 state->received, left_to_receive);
3435
3436 stream = stream_get_by_id(state->header.stream_id);
3437 if (!stream) {
3438 /* Protocol error. */
3439 ERR("relay_process_data_receive_payload: cannot find stream %" PRIu64,
3440 state->header.stream_id);
3441 status = RELAY_CONNECTION_STATUS_ERROR;
3442 goto end;
3443 }
3444
3445 pthread_mutex_lock(&stream->lock);
3446 session = stream->trace->session;
3447 if (!conn->session) {
3448 ret = connection_set_session(conn, session);
3449 if (ret) {
3450 status = RELAY_CONNECTION_STATUS_ERROR;
3451 goto end_stream_unlock;
3452 }
3453 }
3454
3455 /*
3456 * The size of the "chunk" received on any iteration is bounded by:
3457 * - the data left to receive,
3458 * - the data immediately available on the socket,
3459 * - the on-stack data buffer
3460 */
3461 while (left_to_receive > 0 && !partial_recv) {
3462 size_t recv_size = min(left_to_receive, chunk_size);
3463 struct lttng_buffer_view packet_chunk;
3464
3465 ret = conn->sock->ops->recvmsg(conn->sock, data_buffer,
3466 recv_size, MSG_DONTWAIT);
3467 if (ret < 0) {
3468 if (errno != EAGAIN && errno != EWOULDBLOCK) {
3469 PERROR("Socket %d error", conn->sock->fd);
3470 status = RELAY_CONNECTION_STATUS_ERROR;
3471 }
3472 goto end_stream_unlock;
3473 } else if (ret == 0) {
3474 /* No more data ready to be consumed on socket. */
3475 DBG3("No more data ready for consumption on data socket of stream id %" PRIu64,
3476 state->header.stream_id);
3477 status = RELAY_CONNECTION_STATUS_CLOSED;
3478 break;
3479 } else if (ret < (int) recv_size) {
3480 /*
3481 * All the data available on the socket has been
3482 * consumed.
3483 */
3484 partial_recv = true;
3485 recv_size = ret;
3486 }
3487
3488 packet_chunk = lttng_buffer_view_init(data_buffer,
3489 0, recv_size);
3490 assert(packet_chunk.data);
3491
3492 ret = stream_write(stream, &packet_chunk, 0);
3493 if (ret) {
3494 ERR("Relay error writing data to file");
3495 status = RELAY_CONNECTION_STATUS_ERROR;
3496 goto end_stream_unlock;
3497 }
3498
3499 left_to_receive -= recv_size;
3500 state->received += recv_size;
3501 state->left_to_receive = left_to_receive;
3502 }
3503
3504 if (state->left_to_receive > 0) {
3505 /*
3506 * Did not receive all the data expected, wait for more data to
3507 * become available on the socket.
3508 */
3509 DBG3("Partial receive on data connection of stream id %" PRIu64 ", %" PRIu64 " bytes received, %" PRIu64 " bytes left to receive",
3510 state->header.stream_id, state->received,
3511 state->left_to_receive);
3512 goto end_stream_unlock;
3513 }
3514
3515 ret = stream_write(stream, NULL, state->header.padding_size);
3516 if (ret) {
3517 status = RELAY_CONNECTION_STATUS_ERROR;
3518 goto end_stream_unlock;
3519 }
3520
3521 if (session_streams_have_index(session)) {
3522 ret = stream_update_index(stream, state->header.net_seq_num,
3523 state->rotate_index, &index_flushed,
3524 state->header.data_size + state->header.padding_size);
3525 if (ret < 0) {
3526 ERR("Failed to update index: stream %" PRIu64 " net_seq_num %" PRIu64 " ret %d",
3527 stream->stream_handle,
3528 state->header.net_seq_num, ret);
3529 status = RELAY_CONNECTION_STATUS_ERROR;
3530 goto end_stream_unlock;
3531 }
3532 }
3533
3534 if (stream->prev_data_seq == -1ULL) {
3535 new_stream = true;
3536 }
3537
3538 ret = stream_complete_packet(stream, state->header.data_size +
3539 state->header.padding_size, state->header.net_seq_num,
3540 index_flushed);
3541 if (ret) {
3542 status = RELAY_CONNECTION_STATUS_ERROR;
3543 goto end_stream_unlock;
3544 }
3545
3546 /*
3547 * Resetting the protocol state (to RECEIVE_HEADER) will trash the
3548 * contents of *state which are aliased (union) to the same location as
3549 * the new state. Don't use it beyond this point.
3550 */
3551 connection_reset_protocol_state(conn);
3552 state = NULL;
3553
3554end_stream_unlock:
3555 close_requested = stream->close_requested;
3556 pthread_mutex_unlock(&stream->lock);
3557 if (close_requested && left_to_receive == 0) {
3558 try_stream_close(stream);
3559 }
3560
3561 if (new_stream) {
3562 pthread_mutex_lock(&session->lock);
3563 uatomic_set(&session->new_streams, 1);
3564 pthread_mutex_unlock(&session->lock);
3565 }
3566
3567 stream_put(stream);
3568end:
3569 return status;
3570}
3571
3572/*
3573 * relay_process_data: Process the data received on the data socket
3574 */
3575static enum relay_connection_status relay_process_data(
3576 struct relay_connection *conn)
3577{
3578 enum relay_connection_status status;
3579
3580 switch (conn->protocol.data.state_id) {
3581 case DATA_CONNECTION_STATE_RECEIVE_HEADER:
3582 status = relay_process_data_receive_header(conn);
3583 break;
3584 case DATA_CONNECTION_STATE_RECEIVE_PAYLOAD:
3585 status = relay_process_data_receive_payload(conn);
3586 break;
3587 default:
3588 ERR("Unexpected data connection communication state.");
3589 abort();
3590 }
3591
3592 return status;
3593}
3594
3595static void cleanup_connection_pollfd(struct lttng_poll_event *events, int pollfd)
3596{
3597 int ret;
3598
3599 (void) lttng_poll_del(events, pollfd);
3600
3601 ret = close(pollfd);
3602 if (ret < 0) {
3603 ERR("Closing pollfd %d", pollfd);
3604 }
3605}
3606
3607static void relay_thread_close_connection(struct lttng_poll_event *events,
3608 int pollfd, struct relay_connection *conn)
3609{
3610 const char *type_str;
3611
3612 switch (conn->type) {
3613 case RELAY_DATA:
3614 type_str = "Data";
3615 break;
3616 case RELAY_CONTROL:
3617 type_str = "Control";
3618 break;
3619 case RELAY_VIEWER_COMMAND:
3620 type_str = "Viewer Command";
3621 break;
3622 case RELAY_VIEWER_NOTIFICATION:
3623 type_str = "Viewer Notification";
3624 break;
3625 default:
3626 type_str = "Unknown";
3627 }
3628 cleanup_connection_pollfd(events, pollfd);
3629 connection_put(conn);
3630 DBG("%s connection closed with %d", type_str, pollfd);
3631}
3632
3633/*
3634 * This thread does the actual work
3635 */
3636static void *relay_thread_worker(void *data)
3637{
3638 int ret, err = -1, last_seen_data_fd = -1;
3639 uint32_t nb_fd;
3640 struct lttng_poll_event events;
3641 struct lttng_ht *relay_connections_ht;
3642 struct lttng_ht_iter iter;
3643 struct relay_connection *destroy_conn = NULL;
3644
3645 DBG("[thread] Relay worker started");
3646
3647 rcu_register_thread();
3648
3649 health_register(health_relayd, HEALTH_RELAYD_TYPE_WORKER);
3650
3651 if (testpoint(relayd_thread_worker)) {
3652 goto error_testpoint;
3653 }
3654
3655 health_code_update();
3656
3657 /* table of connections indexed on socket */
3658 relay_connections_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3659 if (!relay_connections_ht) {
3660 goto relay_connections_ht_error;
3661 }
3662
3663 ret = create_thread_poll_set(&events, 2);
3664 if (ret < 0) {
3665 goto error_poll_create;
3666 }
3667
3668 ret = lttng_poll_add(&events, relay_conn_pipe[0], LPOLLIN | LPOLLRDHUP);
3669 if (ret < 0) {
3670 goto error;
3671 }
3672
3673restart:
3674 while (1) {
3675 int idx = -1, i, seen_control = 0, last_notdel_data_fd = -1;
3676
3677 health_code_update();
3678
3679 /* Infinite blocking call, waiting for transmission */
3680 DBG3("Relayd worker thread polling...");
3681 health_poll_entry();
3682 ret = lttng_poll_wait(&events, -1);
3683 health_poll_exit();
3684 if (ret < 0) {
3685 /*
3686 * Restart interrupted system call.
3687 */
3688 if (errno == EINTR) {
3689 goto restart;
3690 }
3691 goto error;
3692 }
3693
3694 nb_fd = ret;
3695
3696 /*
3697 * Process control. The control connection is
3698 * prioritized so we don't starve it with high
3699 * throughput tracing data on the data connection.
3700 */
3701 for (i = 0; i < nb_fd; i++) {
3702 /* Fetch once the poll data */
3703 uint32_t revents = LTTNG_POLL_GETEV(&events, i);
3704 int pollfd = LTTNG_POLL_GETFD(&events, i);
3705
3706 health_code_update();
3707
3708 /* Thread quit pipe has been closed. Killing thread. */
3709 ret = check_thread_quit_pipe(pollfd, revents);
3710 if (ret) {
3711 err = 0;
3712 goto exit;
3713 }
3714
3715 /* Inspect the relay conn pipe for new connection */
3716 if (pollfd == relay_conn_pipe[0]) {
3717 if (revents & LPOLLIN) {
3718 struct relay_connection *conn;
3719
3720 ret = lttng_read(relay_conn_pipe[0], &conn, sizeof(conn));
3721 if (ret < 0) {
3722 goto error;
3723 }
3724 ret = lttng_poll_add(&events,
3725 conn->sock->fd,
3726 LPOLLIN | LPOLLRDHUP);
3727 if (ret) {
3728 ERR("Failed to add new connection file descriptor to poll set");
3729 goto error;
3730 }
3731 connection_ht_add(relay_connections_ht, conn);
3732 DBG("Connection socket %d added", conn->sock->fd);
3733 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3734 ERR("Relay connection pipe error");
3735 goto error;
3736 } else {
3737 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
3738 goto error;
3739 }
3740 } else {
3741 struct relay_connection *ctrl_conn;
3742
3743 ctrl_conn = connection_get_by_sock(relay_connections_ht, pollfd);
3744 /* If not found, there is a synchronization issue. */
3745 assert(ctrl_conn);
3746
3747 if (ctrl_conn->type == RELAY_DATA) {
3748 if (revents & LPOLLIN) {
3749 /*
3750 * Flag the last seen data fd not deleted. It will be
3751 * used as the last seen fd if any fd gets deleted in
3752 * this first loop.
3753 */
3754 last_notdel_data_fd = pollfd;
3755 }
3756 goto put_ctrl_connection;
3757 }
3758 assert(ctrl_conn->type == RELAY_CONTROL);
3759
3760 if (revents & LPOLLIN) {
3761 enum relay_connection_status status;
3762
3763 status = relay_process_control(ctrl_conn);
3764 if (status != RELAY_CONNECTION_STATUS_OK) {
3765 /*
3766 * On socket error flag the session as aborted to force
3767 * the cleanup of its stream otherwise it can leak
3768 * during the lifetime of the relayd.
3769 *
3770 * This prevents situations in which streams can be
3771 * left opened because an index was received, the
3772 * control connection is closed, and the data
3773 * connection is closed (uncleanly) before the packet's
3774 * data provided.
3775 *
3776 * Since the control connection encountered an error,
3777 * it is okay to be conservative and close the
3778 * session right now as we can't rely on the protocol
3779 * being respected anymore.
3780 */
3781 if (status == RELAY_CONNECTION_STATUS_ERROR) {
3782 session_abort(ctrl_conn->session);
3783 }
3784
3785 /* Clear the connection on error or close. */
3786 relay_thread_close_connection(&events,
3787 pollfd,
3788 ctrl_conn);
3789 }
3790 seen_control = 1;
3791 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3792 relay_thread_close_connection(&events,
3793 pollfd, ctrl_conn);
3794 if (last_seen_data_fd == pollfd) {
3795 last_seen_data_fd = last_notdel_data_fd;
3796 }
3797 } else {
3798 ERR("Unexpected poll events %u for control sock %d",
3799 revents, pollfd);
3800 connection_put(ctrl_conn);
3801 goto error;
3802 }
3803 put_ctrl_connection:
3804 connection_put(ctrl_conn);
3805 }
3806 }
3807
3808 /*
3809 * The last loop handled a control request, go back to poll to make
3810 * sure we prioritise the control socket.
3811 */
3812 if (seen_control) {
3813 continue;
3814 }
3815
3816 if (last_seen_data_fd >= 0) {
3817 for (i = 0; i < nb_fd; i++) {
3818 int pollfd = LTTNG_POLL_GETFD(&events, i);
3819
3820 health_code_update();
3821
3822 if (last_seen_data_fd == pollfd) {
3823 idx = i;
3824 break;
3825 }
3826 }
3827 }
3828
3829 /* Process data connection. */
3830 for (i = idx + 1; i < nb_fd; i++) {
3831 /* Fetch the poll data. */
3832 uint32_t revents = LTTNG_POLL_GETEV(&events, i);
3833 int pollfd = LTTNG_POLL_GETFD(&events, i);
3834 struct relay_connection *data_conn;
3835
3836 health_code_update();
3837
3838 if (!revents) {
3839 /* No activity for this FD (poll implementation). */
3840 continue;
3841 }
3842
3843 /* Skip the command pipe. It's handled in the first loop. */
3844 if (pollfd == relay_conn_pipe[0]) {
3845 continue;
3846 }
3847
3848 data_conn = connection_get_by_sock(relay_connections_ht, pollfd);
3849 if (!data_conn) {
3850 /* Skip it. Might be removed before. */
3851 continue;
3852 }
3853 if (data_conn->type == RELAY_CONTROL) {
3854 goto put_data_connection;
3855 }
3856 assert(data_conn->type == RELAY_DATA);
3857
3858 if (revents & LPOLLIN) {
3859 enum relay_connection_status status;
3860
3861 status = relay_process_data(data_conn);
3862 /* Connection closed or error. */
3863 if (status != RELAY_CONNECTION_STATUS_OK) {
3864 /*
3865 * On socket error flag the session as aborted to force
3866 * the cleanup of its stream otherwise it can leak
3867 * during the lifetime of the relayd.
3868 *
3869 * This prevents situations in which streams can be
3870 * left opened because an index was received, the
3871 * control connection is closed, and the data
3872 * connection is closed (uncleanly) before the packet's
3873 * data provided.
3874 *
3875 * Since the data connection encountered an error,
3876 * it is okay to be conservative and close the
3877 * session right now as we can't rely on the protocol
3878 * being respected anymore.
3879 */
3880 if (status == RELAY_CONNECTION_STATUS_ERROR) {
3881 session_abort(data_conn->session);
3882 }
3883 relay_thread_close_connection(&events, pollfd,
3884 data_conn);
3885 /*
3886 * Every goto restart call sets the last seen fd where
3887 * here we don't really care since we gracefully
3888 * continue the loop after the connection is deleted.
3889 */
3890 } else {
3891 /* Keep last seen port. */
3892 last_seen_data_fd = pollfd;
3893 connection_put(data_conn);
3894 goto restart;
3895 }
3896 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3897 relay_thread_close_connection(&events, pollfd,
3898 data_conn);
3899 } else {
3900 ERR("Unknown poll events %u for data sock %d",
3901 revents, pollfd);
3902 }
3903 put_data_connection:
3904 connection_put(data_conn);
3905 }
3906 last_seen_data_fd = -1;
3907 }
3908
3909 /* Normal exit, no error */
3910 ret = 0;
3911
3912exit:
3913error:
3914 /* Cleanup remaining connection object. */
3915 rcu_read_lock();
3916 cds_lfht_for_each_entry(relay_connections_ht->ht, &iter.iter,
3917 destroy_conn,
3918 sock_n.node) {
3919 health_code_update();
3920
3921 session_abort(destroy_conn->session);
3922
3923 /*
3924 * No need to grab another ref, because we own
3925 * destroy_conn.
3926 */
3927 relay_thread_close_connection(&events, destroy_conn->sock->fd,
3928 destroy_conn);
3929 }
3930 rcu_read_unlock();
3931
3932 lttng_poll_clean(&events);
3933error_poll_create:
3934 lttng_ht_destroy(relay_connections_ht);
3935relay_connections_ht_error:
3936 /* Close relay conn pipes */
3937 utils_close_pipe(relay_conn_pipe);
3938 if (err) {
3939 DBG("Thread exited with error");
3940 }
3941 DBG("Worker thread cleanup complete");
3942error_testpoint:
3943 if (err) {
3944 health_error();
3945 ERR("Health error occurred in %s", __func__);
3946 }
3947 health_unregister(health_relayd);
3948 rcu_unregister_thread();
3949 lttng_relay_stop_threads();
3950 return NULL;
3951}
3952
3953/*
3954 * Create the relay command pipe to wake thread_manage_apps.
3955 * Closed in cleanup().
3956 */
3957static int create_relay_conn_pipe(void)
3958{
3959 int ret;
3960
3961 ret = utils_create_pipe_cloexec(relay_conn_pipe);
3962
3963 return ret;
3964}
3965
3966/*
3967 * main
3968 */
3969int main(int argc, char **argv)
3970{
3971 int ret = 0, retval = 0;
3972 void *status;
3973
3974 /* Parse environment variables */
3975 ret = parse_env_options();
3976 if (ret) {
3977 retval = -1;
3978 goto exit_options;
3979 }
3980
3981 /*
3982 * Parse arguments.
3983 * Command line arguments overwrite environment.
3984 */
3985 progname = argv[0];
3986 if (set_options(argc, argv)) {
3987 retval = -1;
3988 goto exit_options;
3989 }
3990
3991 if (set_signal_handler()) {
3992 retval = -1;
3993 goto exit_options;
3994 }
3995
3996 relayd_config_log();
3997
3998 if (opt_print_version) {
3999 print_version();
4000 retval = 0;
4001 goto exit_options;
4002 }
4003
4004 ret = fclose(stdin);
4005 if (ret) {
4006 PERROR("Failed to close stdin");
4007 goto exit_options;
4008 }
4009
4010 DBG("Clear command %s", opt_allow_clear ? "allowed" : "disallowed");
4011
4012 /* Try to create directory if -o, --output is specified. */
4013 if (opt_output_path) {
4014 if (*opt_output_path != '/') {
4015 ERR("Please specify an absolute path for -o, --output PATH");
4016 retval = -1;
4017 goto exit_options;
4018 }
4019
4020 ret = utils_mkdir_recursive(opt_output_path, S_IRWXU | S_IRWXG,
4021 -1, -1);
4022 if (ret < 0) {
4023 ERR("Unable to create %s", opt_output_path);
4024 retval = -1;
4025 goto exit_options;
4026 }
4027 }
4028
4029 /* Daemonize */
4030 if (opt_daemon || opt_background) {
4031 int i;
4032
4033 ret = lttng_daemonize(&child_ppid, &recv_child_signal,
4034 !opt_background);
4035 if (ret < 0) {
4036 retval = -1;
4037 goto exit_options;
4038 }
4039
4040 /*
4041 * We are in the child. Make sure all other file
4042 * descriptors are closed, in case we are called with
4043 * more opened file descriptors than the standard ones.
4044 */
4045 for (i = 3; i < sysconf(_SC_OPEN_MAX); i++) {
4046 (void) close(i);
4047 }
4048 }
4049
4050 if (opt_working_directory) {
4051 ret = utils_change_working_directory(opt_working_directory);
4052 if (ret) {
4053 /* All errors are already logged. */
4054 goto exit_options;
4055 }
4056 }
4057
4058 sessiond_trace_chunk_registry = sessiond_trace_chunk_registry_create();
4059 if (!sessiond_trace_chunk_registry) {
4060 ERR("Failed to initialize session daemon trace chunk registry");
4061 retval = -1;
4062 goto exit_sessiond_trace_chunk_registry;
4063 }
4064
4065 /* Initialize thread health monitoring */
4066 health_relayd = health_app_create(NR_HEALTH_RELAYD_TYPES);
4067 if (!health_relayd) {
4068 PERROR("health_app_create error");
4069 retval = -1;
4070 goto exit_health_app_create;
4071 }
4072
4073 /* Create thread quit pipe */
4074 if (init_thread_quit_pipe()) {
4075 retval = -1;
4076 goto exit_init_data;
4077 }
4078
4079 /* Setup the thread apps communication pipe. */
4080 if (create_relay_conn_pipe()) {
4081 retval = -1;
4082 goto exit_init_data;
4083 }
4084
4085 /* Init relay command queue. */
4086 cds_wfcq_init(&relay_conn_queue.head, &relay_conn_queue.tail);
4087
4088 /* Initialize communication library */
4089 lttcomm_init();
4090 lttcomm_inet_init();
4091
4092 /* tables of sessions indexed by session ID */
4093 sessions_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
4094 if (!sessions_ht) {
4095 retval = -1;
4096 goto exit_init_data;
4097 }
4098
4099 /* tables of streams indexed by stream ID */
4100 relay_streams_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
4101 if (!relay_streams_ht) {
4102 retval = -1;
4103 goto exit_init_data;
4104 }
4105
4106 /* tables of streams indexed by stream ID */
4107 viewer_streams_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
4108 if (!viewer_streams_ht) {
4109 retval = -1;
4110 goto exit_init_data;
4111 }
4112
4113 ret = utils_create_pipe(health_quit_pipe);
4114 if (ret) {
4115 retval = -1;
4116 goto exit_health_quit_pipe;
4117 }
4118
4119 /* Create thread to manage the client socket */
4120 ret = pthread_create(&health_thread, default_pthread_attr(),
4121 thread_manage_health, (void *) NULL);
4122 if (ret) {
4123 errno = ret;
4124 PERROR("pthread_create health");
4125 retval = -1;
4126 goto exit_health_thread;
4127 }
4128
4129 /* Setup the dispatcher thread */
4130 ret = pthread_create(&dispatcher_thread, default_pthread_attr(),
4131 relay_thread_dispatcher, (void *) NULL);
4132 if (ret) {
4133 errno = ret;
4134 PERROR("pthread_create dispatcher");
4135 retval = -1;
4136 goto exit_dispatcher_thread;
4137 }
4138
4139 /* Setup the worker thread */
4140 ret = pthread_create(&worker_thread, default_pthread_attr(),
4141 relay_thread_worker, NULL);
4142 if (ret) {
4143 errno = ret;
4144 PERROR("pthread_create worker");
4145 retval = -1;
4146 goto exit_worker_thread;
4147 }
4148
4149 /* Setup the listener thread */
4150 ret = pthread_create(&listener_thread, default_pthread_attr(),
4151 relay_thread_listener, (void *) NULL);
4152 if (ret) {
4153 errno = ret;
4154 PERROR("pthread_create listener");
4155 retval = -1;
4156 goto exit_listener_thread;
4157 }
4158
4159 ret = relayd_live_create(live_uri);
4160 if (ret) {
4161 ERR("Starting live viewer threads");
4162 retval = -1;
4163 goto exit_live;
4164 }
4165
4166 /*
4167 * This is where we start awaiting program completion (e.g. through
4168 * signal that asks threads to teardown).
4169 */
4170
4171 ret = relayd_live_join();
4172 if (ret) {
4173 retval = -1;
4174 }
4175exit_live:
4176
4177 ret = pthread_join(listener_thread, &status);
4178 if (ret) {
4179 errno = ret;
4180 PERROR("pthread_join listener_thread");
4181 retval = -1;
4182 }
4183
4184exit_listener_thread:
4185 ret = pthread_join(worker_thread, &status);
4186 if (ret) {
4187 errno = ret;
4188 PERROR("pthread_join worker_thread");
4189 retval = -1;
4190 }
4191
4192exit_worker_thread:
4193 ret = pthread_join(dispatcher_thread, &status);
4194 if (ret) {
4195 errno = ret;
4196 PERROR("pthread_join dispatcher_thread");
4197 retval = -1;
4198 }
4199exit_dispatcher_thread:
4200
4201 ret = pthread_join(health_thread, &status);
4202 if (ret) {
4203 errno = ret;
4204 PERROR("pthread_join health_thread");
4205 retval = -1;
4206 }
4207exit_health_thread:
4208
4209 utils_close_pipe(health_quit_pipe);
4210exit_health_quit_pipe:
4211
4212exit_init_data:
4213 health_app_destroy(health_relayd);
4214 sessiond_trace_chunk_registry_destroy(sessiond_trace_chunk_registry);
4215exit_health_app_create:
4216exit_sessiond_trace_chunk_registry:
4217exit_options:
4218 /*
4219 * Wait for all pending call_rcu work to complete before tearing
4220 * down data structures. call_rcu worker may be trying to
4221 * perform lookups in those structures.
4222 */
4223 rcu_barrier();
4224 relayd_cleanup();
4225
4226 /* Ensure all prior call_rcu are done. */
4227 rcu_barrier();
4228
4229 if (!retval) {
4230 exit(EXIT_SUCCESS);
4231 } else {
4232 exit(EXIT_FAILURE);
4233 }
4234}
This page took 0.101802 seconds and 4 git commands to generate.