6318f8308f10ba059af1a3f886bff46a274ad93c
[lttng-tools.git] / src / bin / lttng-relayd / main.c
1 /*
2 * Copyright (C) 2012 - Julien Desfossez <jdesfossez@efficios.com>
3 * David Goulet <dgoulet@efficios.com>
4 * 2013 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 * 2015 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License, version 2 only,
9 * as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20
21 #define _LGPL_SOURCE
22 #include <getopt.h>
23 #include <grp.h>
24 #include <limits.h>
25 #include <pthread.h>
26 #include <signal.h>
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <sys/mman.h>
31 #include <sys/mount.h>
32 #include <sys/resource.h>
33 #include <sys/socket.h>
34 #include <sys/stat.h>
35 #include <sys/types.h>
36 #include <sys/wait.h>
37 #include <sys/resource.h>
38 #include <inttypes.h>
39 #include <urcu/futex.h>
40 #include <urcu/uatomic.h>
41 #include <urcu/rculist.h>
42 #include <unistd.h>
43 #include <fcntl.h>
44 #include <strings.h>
45 #include <ctype.h>
46
47 #include <lttng/lttng.h>
48 #include <common/common.h>
49 #include <common/compat/poll.h>
50 #include <common/compat/socket.h>
51 #include <common/compat/endian.h>
52 #include <common/compat/getenv.h>
53 #include <common/defaults.h>
54 #include <common/daemonize.h>
55 #include <common/futex.h>
56 #include <common/sessiond-comm/sessiond-comm.h>
57 #include <common/sessiond-comm/inet.h>
58 #include <common/sessiond-comm/relayd.h>
59 #include <common/uri.h>
60 #include <common/utils.h>
61 #include <common/align.h>
62 #include <common/config/session-config.h>
63 #include <common/dynamic-buffer.h>
64 #include <common/buffer-view.h>
65 #include <common/string-utils/format.h>
66 #include <common/fd-tracker/fd-tracker.h>
67 #include <common/fd-tracker/utils.h>
68
69 #include "backward-compatibility-group-by.h"
70 #include "cmd.h"
71 #include "connection.h"
72 #include "ctf-trace.h"
73 #include "health-relayd.h"
74 #include "index.h"
75 #include "live.h"
76 #include "lttng-relayd.h"
77 #include "session.h"
78 #include "sessiond-trace-chunks.h"
79 #include "stream.h"
80 #include "tcp_keep_alive.h"
81 #include "testpoint.h"
82 #include "tracefile-array.h"
83 #include "utils.h"
84 #include "version.h"
85 #include "viewer-stream.h"
86
87 static const char *help_msg =
88 #ifdef LTTNG_EMBED_HELP
89 #include <lttng-relayd.8.h>
90 #else
91 NULL
92 #endif
93 ;
94
95 enum relay_connection_status {
96 RELAY_CONNECTION_STATUS_OK,
97 /* An error occurred while processing an event on the connection. */
98 RELAY_CONNECTION_STATUS_ERROR,
99 /* Connection closed/shutdown cleanly. */
100 RELAY_CONNECTION_STATUS_CLOSED,
101 };
102
103 /* command line options */
104 char *opt_output_path, *opt_working_directory;
105 static int opt_daemon, opt_background, opt_print_version, opt_allow_clear = 1;
106 enum relay_group_output_by opt_group_output_by = RELAYD_GROUP_OUTPUT_BY_UNKNOWN;
107
108 /*
109 * We need to wait for listener and live listener threads, as well as
110 * health check thread, before being ready to signal readiness.
111 */
112 #define NR_LTTNG_RELAY_READY 3
113 static int lttng_relay_ready = NR_LTTNG_RELAY_READY;
114
115 /* Size of receive buffer. */
116 #define RECV_DATA_BUFFER_SIZE 65536
117
118 static int recv_child_signal; /* Set to 1 when a SIGUSR1 signal is received. */
119 static pid_t child_ppid; /* Internal parent PID use with daemonize. */
120
121 static struct lttng_uri *control_uri;
122 static struct lttng_uri *data_uri;
123 static struct lttng_uri *live_uri;
124
125 const char *progname;
126
127 const char *tracing_group_name = DEFAULT_TRACING_GROUP;
128 static int tracing_group_name_override;
129
130 const char * const config_section_name = "relayd";
131
132 /*
133 * Quit pipe for all threads. This permits a single cancellation point
134 * for all threads when receiving an event on the pipe.
135 */
136 int thread_quit_pipe[2] = { -1, -1 };
137
138 /*
139 * This pipe is used to inform the worker thread that a command is queued and
140 * ready to be processed.
141 */
142 static int relay_conn_pipe[2] = { -1, -1 };
143
144 /* Shared between threads */
145 static int dispatch_thread_exit;
146
147 static pthread_t listener_thread;
148 static pthread_t dispatcher_thread;
149 static pthread_t worker_thread;
150 static pthread_t health_thread;
151
152 /*
153 * last_relay_stream_id_lock protects last_relay_stream_id increment
154 * atomicity on 32-bit architectures.
155 */
156 static pthread_mutex_t last_relay_stream_id_lock = PTHREAD_MUTEX_INITIALIZER;
157 static uint64_t last_relay_stream_id;
158
159 /*
160 * Relay command queue.
161 *
162 * The relay_thread_listener and relay_thread_dispatcher communicate with this
163 * queue.
164 */
165 static struct relay_conn_queue relay_conn_queue;
166
167 /* Cap of file desriptors to be in simultaneous use by the relay daemon. */
168 static unsigned int lttng_opt_fd_cap;
169
170 /* Global relay stream hash table. */
171 struct lttng_ht *relay_streams_ht;
172
173 /* Global relay viewer stream hash table. */
174 struct lttng_ht *viewer_streams_ht;
175
176 /* Global relay sessions hash table. */
177 struct lttng_ht *sessions_ht;
178
179 /* Relayd health monitoring */
180 struct health_app *health_relayd;
181
182 struct sessiond_trace_chunk_registry *sessiond_trace_chunk_registry;
183
184 /* Global fd tracker. */
185 struct fd_tracker *the_fd_tracker;
186
187 static struct option long_options[] = {
188 { "control-port", 1, 0, 'C', },
189 { "data-port", 1, 0, 'D', },
190 { "live-port", 1, 0, 'L', },
191 { "daemonize", 0, 0, 'd', },
192 { "background", 0, 0, 'b', },
193 { "group", 1, 0, 'g', },
194 { "fd-cap", 1, 0, '\0', },
195 { "help", 0, 0, 'h', },
196 { "output", 1, 0, 'o', },
197 { "verbose", 0, 0, 'v', },
198 { "config", 1, 0, 'f' },
199 { "version", 0, 0, 'V' },
200 { "working-directory", 1, 0, 'w', },
201 { "group-output-by-session", 0, 0, 's', },
202 { "group-output-by-host", 0, 0, 'p', },
203 { "disallow-clear", 0, 0, 'x' },
204 { NULL, 0, 0, 0, },
205 };
206
207 static const char *config_ignore_options[] = { "help", "config", "version" };
208
209 static void print_version(void) {
210 fprintf(stdout, "%s\n", VERSION);
211 }
212
213 static void relayd_config_log(void)
214 {
215 DBG("LTTng-relayd " VERSION " - " VERSION_NAME "%s%s",
216 GIT_VERSION[0] == '\0' ? "" : " - " GIT_VERSION,
217 EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " EXTRA_VERSION_NAME);
218 if (EXTRA_VERSION_DESCRIPTION[0] != '\0') {
219 DBG("LTTng-relayd extra version description:\n\t" EXTRA_VERSION_DESCRIPTION "\n");
220 }
221 if (EXTRA_VERSION_PATCHES[0] != '\0') {
222 DBG("LTTng-relayd extra patches:\n\t" EXTRA_VERSION_PATCHES "\n");
223 }
224 }
225
226 /*
227 * Take an option from the getopt output and set it in the right variable to be
228 * used later.
229 *
230 * Return 0 on success else a negative value.
231 */
232 static int set_option(int opt, const char *arg, const char *optname)
233 {
234 int ret;
235
236 switch (opt) {
237 case 0:
238 if (!strcmp(optname, "fd-cap")) {
239 unsigned long v;
240
241 errno = 0;
242 v = strtoul(arg, NULL, 0);
243 if (errno != 0 || !isdigit(arg[0])) {
244 ERR("Wrong value in --fd-cap parameter: %s",
245 arg);
246 ret = -1;
247 goto end;
248 }
249 if (v < DEFAULT_RELAYD_MINIMAL_FD_CAP) {
250 ERR("File descriptor cap must be set to at least %d",
251 DEFAULT_RELAYD_MINIMAL_FD_CAP);
252 }
253 if (v >= UINT_MAX) {
254 ERR("File descriptor cap overflow in --fd-cap parameter: %s",
255 arg);
256 ret = -1;
257 goto end;
258 }
259 lttng_opt_fd_cap = (unsigned int) v;
260 DBG3("File descriptor cap set to %u", lttng_opt_fd_cap);
261 } else {
262 fprintf(stderr, "unknown option %s", optname);
263 if (arg) {
264 fprintf(stderr, " with arg %s\n", arg);
265 }
266 }
267 break;
268 case 'C':
269 if (lttng_is_setuid_setgid()) {
270 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
271 "-C, --control-port");
272 } else {
273 ret = uri_parse(arg, &control_uri);
274 if (ret < 0) {
275 ERR("Invalid control URI specified");
276 goto end;
277 }
278 if (control_uri->port == 0) {
279 control_uri->port = DEFAULT_NETWORK_CONTROL_PORT;
280 }
281 }
282 break;
283 case 'D':
284 if (lttng_is_setuid_setgid()) {
285 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
286 "-D, -data-port");
287 } else {
288 ret = uri_parse(arg, &data_uri);
289 if (ret < 0) {
290 ERR("Invalid data URI specified");
291 goto end;
292 }
293 if (data_uri->port == 0) {
294 data_uri->port = DEFAULT_NETWORK_DATA_PORT;
295 }
296 }
297 break;
298 case 'L':
299 if (lttng_is_setuid_setgid()) {
300 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
301 "-L, -live-port");
302 } else {
303 ret = uri_parse(arg, &live_uri);
304 if (ret < 0) {
305 ERR("Invalid live URI specified");
306 goto end;
307 }
308 if (live_uri->port == 0) {
309 live_uri->port = DEFAULT_NETWORK_VIEWER_PORT;
310 }
311 }
312 break;
313 case 'd':
314 opt_daemon = 1;
315 break;
316 case 'b':
317 opt_background = 1;
318 break;
319 case 'g':
320 if (lttng_is_setuid_setgid()) {
321 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
322 "-g, --group");
323 } else {
324 tracing_group_name = strdup(arg);
325 if (tracing_group_name == NULL) {
326 ret = -errno;
327 PERROR("strdup");
328 goto end;
329 }
330 tracing_group_name_override = 1;
331 }
332 break;
333 case 'h':
334 ret = utils_show_help(8, "lttng-relayd", help_msg);
335 if (ret) {
336 ERR("Cannot show --help for `lttng-relayd`");
337 perror("exec");
338 }
339 exit(EXIT_FAILURE);
340 case 'V':
341 opt_print_version = 1;
342 break;
343 case 'o':
344 if (lttng_is_setuid_setgid()) {
345 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
346 "-o, --output");
347 } else {
348 ret = asprintf(&opt_output_path, "%s", arg);
349 if (ret < 0) {
350 ret = -errno;
351 PERROR("asprintf opt_output_path");
352 goto end;
353 }
354 }
355 break;
356 case 'w':
357 if (lttng_is_setuid_setgid()) {
358 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
359 "-w, --working-directory");
360 } else {
361 ret = asprintf(&opt_working_directory, "%s", arg);
362 if (ret < 0) {
363 ret = -errno;
364 PERROR("asprintf opt_working_directory");
365 goto end;
366 }
367 }
368 break;
369
370 case 'v':
371 /* Verbose level can increase using multiple -v */
372 if (arg) {
373 lttng_opt_verbose = config_parse_value(arg);
374 } else {
375 /* Only 3 level of verbosity (-vvv). */
376 if (lttng_opt_verbose < 3) {
377 lttng_opt_verbose += 1;
378 }
379 }
380 break;
381 case 's':
382 if (opt_group_output_by != RELAYD_GROUP_OUTPUT_BY_UNKNOWN) {
383 ERR("Cannot set --group-output-by-session, another --group-output-by argument is present");
384 exit(EXIT_FAILURE);
385 }
386 opt_group_output_by = RELAYD_GROUP_OUTPUT_BY_SESSION;
387 break;
388 case 'p':
389 if (opt_group_output_by != RELAYD_GROUP_OUTPUT_BY_UNKNOWN) {
390 ERR("Cannot set --group-output-by-host, another --group-output-by argument is present");
391 exit(EXIT_FAILURE);
392 }
393 opt_group_output_by = RELAYD_GROUP_OUTPUT_BY_HOST;
394 break;
395 case 'x':
396 /* Disallow clear */
397 opt_allow_clear = 0;
398 break;
399 default:
400 /* Unknown option or other error.
401 * Error is printed by getopt, just return */
402 ret = -1;
403 goto end;
404 }
405
406 /* All good. */
407 ret = 0;
408
409 end:
410 return ret;
411 }
412
413 /*
414 * config_entry_handler_cb used to handle options read from a config file.
415 * See config_entry_handler_cb comment in common/config/session-config.h for the
416 * return value conventions.
417 */
418 static int config_entry_handler(const struct config_entry *entry, void *unused)
419 {
420 int ret = 0, i;
421
422 if (!entry || !entry->name || !entry->value) {
423 ret = -EINVAL;
424 goto end;
425 }
426
427 /* Check if the option is to be ignored */
428 for (i = 0; i < sizeof(config_ignore_options) / sizeof(char *); i++) {
429 if (!strcmp(entry->name, config_ignore_options[i])) {
430 goto end;
431 }
432 }
433
434 for (i = 0; i < (sizeof(long_options) / sizeof(struct option)) - 1; i++) {
435 /* Ignore if entry name is not fully matched. */
436 if (strcmp(entry->name, long_options[i].name)) {
437 continue;
438 }
439
440 /*
441 * If the option takes no argument on the command line,
442 * we have to check if the value is "true". We support
443 * non-zero numeric values, true, on and yes.
444 */
445 if (!long_options[i].has_arg) {
446 ret = config_parse_value(entry->value);
447 if (ret <= 0) {
448 if (ret) {
449 WARN("Invalid configuration value \"%s\" for option %s",
450 entry->value, entry->name);
451 }
452 /* False, skip boolean config option. */
453 goto end;
454 }
455 }
456
457 ret = set_option(long_options[i].val, entry->value, entry->name);
458 goto end;
459 }
460
461 WARN("Unrecognized option \"%s\" in daemon configuration file.",
462 entry->name);
463
464 end:
465 return ret;
466 }
467
468 static int parse_env_options(void)
469 {
470 int ret = 0;
471 char *value = NULL;
472
473 value = lttng_secure_getenv(DEFAULT_LTTNG_RELAYD_WORKING_DIRECTORY_ENV);
474 if (value) {
475 opt_working_directory = strdup(value);
476 if (!opt_working_directory) {
477 ERR("Failed to allocate working directory string (\"%s\")",
478 value);
479 ret = -1;
480 }
481 }
482 return ret;
483 }
484
485 static int set_options(int argc, char **argv)
486 {
487 int c, ret = 0, option_index = 0, retval = 0;
488 int orig_optopt = optopt, orig_optind = optind;
489 char *default_address, *optstring;
490 const char *config_path = NULL;
491
492 optstring = utils_generate_optstring(long_options,
493 sizeof(long_options) / sizeof(struct option));
494 if (!optstring) {
495 retval = -ENOMEM;
496 goto exit;
497 }
498
499 /* Check for the --config option */
500
501 while ((c = getopt_long(argc, argv, optstring, long_options,
502 &option_index)) != -1) {
503 if (c == '?') {
504 retval = -EINVAL;
505 goto exit;
506 } else if (c != 'f') {
507 continue;
508 }
509
510 if (lttng_is_setuid_setgid()) {
511 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
512 "-f, --config");
513 } else {
514 config_path = utils_expand_path(optarg);
515 if (!config_path) {
516 ERR("Failed to resolve path: %s", optarg);
517 }
518 }
519 }
520
521 ret = config_get_section_entries(config_path, config_section_name,
522 config_entry_handler, NULL);
523 if (ret) {
524 if (ret > 0) {
525 ERR("Invalid configuration option at line %i", ret);
526 }
527 retval = -1;
528 goto exit;
529 }
530
531 /* Reset getopt's global state */
532 optopt = orig_optopt;
533 optind = orig_optind;
534 while (1) {
535 c = getopt_long(argc, argv, optstring, long_options, &option_index);
536 if (c == -1) {
537 break;
538 }
539
540 ret = set_option(c, optarg, long_options[option_index].name);
541 if (ret < 0) {
542 retval = -1;
543 goto exit;
544 }
545 }
546
547 /* assign default values */
548 if (control_uri == NULL) {
549 ret = asprintf(&default_address,
550 "tcp://" DEFAULT_NETWORK_CONTROL_BIND_ADDRESS ":%d",
551 DEFAULT_NETWORK_CONTROL_PORT);
552 if (ret < 0) {
553 PERROR("asprintf default data address");
554 retval = -1;
555 goto exit;
556 }
557
558 ret = uri_parse(default_address, &control_uri);
559 free(default_address);
560 if (ret < 0) {
561 ERR("Invalid control URI specified");
562 retval = -1;
563 goto exit;
564 }
565 }
566 if (data_uri == NULL) {
567 ret = asprintf(&default_address,
568 "tcp://" DEFAULT_NETWORK_DATA_BIND_ADDRESS ":%d",
569 DEFAULT_NETWORK_DATA_PORT);
570 if (ret < 0) {
571 PERROR("asprintf default data address");
572 retval = -1;
573 goto exit;
574 }
575
576 ret = uri_parse(default_address, &data_uri);
577 free(default_address);
578 if (ret < 0) {
579 ERR("Invalid data URI specified");
580 retval = -1;
581 goto exit;
582 }
583 }
584 if (live_uri == NULL) {
585 ret = asprintf(&default_address,
586 "tcp://" DEFAULT_NETWORK_VIEWER_BIND_ADDRESS ":%d",
587 DEFAULT_NETWORK_VIEWER_PORT);
588 if (ret < 0) {
589 PERROR("asprintf default viewer control address");
590 retval = -1;
591 goto exit;
592 }
593
594 ret = uri_parse(default_address, &live_uri);
595 free(default_address);
596 if (ret < 0) {
597 ERR("Invalid viewer control URI specified");
598 retval = -1;
599 goto exit;
600 }
601 }
602 if (lttng_opt_fd_cap == 0) {
603 int ret;
604 struct rlimit rlimit;
605
606 ret = getrlimit(RLIMIT_NOFILE, &rlimit);
607 if (ret) {
608 PERROR("Failed to get file descriptor limit");
609 retval = -1;
610 }
611
612 lttng_opt_fd_cap = rlimit.rlim_cur;
613 }
614
615 if (opt_group_output_by == RELAYD_GROUP_OUTPUT_BY_UNKNOWN) {
616 opt_group_output_by = RELAYD_GROUP_OUTPUT_BY_HOST;
617 }
618 if (opt_allow_clear) {
619 /* Check if env variable exists. */
620 const char *value = lttng_secure_getenv(DEFAULT_LTTNG_RELAYD_DISALLOW_CLEAR_ENV);
621 if (value) {
622 ret = config_parse_value(value);
623 if (ret < 0) {
624 ERR("Invalid value for %s specified", DEFAULT_LTTNG_RELAYD_DISALLOW_CLEAR_ENV);
625 retval = -1;
626 goto exit;
627 }
628 opt_allow_clear = !ret;
629 }
630 }
631
632 exit:
633 free(optstring);
634 return retval;
635 }
636
637 static void print_global_objects(void)
638 {
639 print_viewer_streams();
640 print_relay_streams();
641 print_sessions();
642 }
643
644 /*
645 * Cleanup the daemon
646 */
647 static void relayd_cleanup(void)
648 {
649 print_global_objects();
650
651 DBG("Cleaning up");
652
653 if (viewer_streams_ht)
654 lttng_ht_destroy(viewer_streams_ht);
655 if (relay_streams_ht)
656 lttng_ht_destroy(relay_streams_ht);
657 if (sessions_ht)
658 lttng_ht_destroy(sessions_ht);
659
660 free(opt_output_path);
661 free(opt_working_directory);
662
663 if (health_relayd) {
664 health_app_destroy(health_relayd);
665 }
666 /* Close thread quit pipes */
667 if (health_quit_pipe[0] != -1) {
668 (void) fd_tracker_util_pipe_close(
669 the_fd_tracker, health_quit_pipe);
670 }
671 if (thread_quit_pipe[0] != -1) {
672 (void) fd_tracker_util_pipe_close(
673 the_fd_tracker, thread_quit_pipe);
674 }
675 if (sessiond_trace_chunk_registry) {
676 sessiond_trace_chunk_registry_destroy(
677 sessiond_trace_chunk_registry);
678 }
679 if (the_fd_tracker) {
680 fd_tracker_destroy(the_fd_tracker);
681 }
682
683 uri_free(control_uri);
684 uri_free(data_uri);
685 /* Live URI is freed in the live thread. */
686
687 if (tracing_group_name_override) {
688 free((void *) tracing_group_name);
689 }
690 fd_tracker_log(the_fd_tracker);
691 }
692
693 /*
694 * Write to writable pipe used to notify a thread.
695 */
696 static int notify_thread_pipe(int wpipe)
697 {
698 ssize_t ret;
699
700 ret = lttng_write(wpipe, "!", 1);
701 if (ret < 1) {
702 PERROR("write poll pipe");
703 goto end;
704 }
705 ret = 0;
706 end:
707 return ret;
708 }
709
710 static int notify_health_quit_pipe(int *pipe)
711 {
712 ssize_t ret;
713
714 ret = lttng_write(pipe[1], "4", 1);
715 if (ret < 1) {
716 PERROR("write relay health quit");
717 goto end;
718 }
719 ret = 0;
720 end:
721 return ret;
722 }
723
724 /*
725 * Stop all relayd and relayd-live threads.
726 */
727 int lttng_relay_stop_threads(void)
728 {
729 int retval = 0;
730
731 /* Stopping all threads */
732 DBG("Terminating all threads");
733 if (notify_thread_pipe(thread_quit_pipe[1])) {
734 ERR("write error on thread quit pipe");
735 retval = -1;
736 }
737
738 if (notify_health_quit_pipe(health_quit_pipe)) {
739 ERR("write error on health quit pipe");
740 }
741
742 /* Dispatch thread */
743 CMM_STORE_SHARED(dispatch_thread_exit, 1);
744 futex_nto1_wake(&relay_conn_queue.futex);
745
746 if (relayd_live_stop()) {
747 ERR("Error stopping live threads");
748 retval = -1;
749 }
750 return retval;
751 }
752
753 /*
754 * Signal handler for the daemon
755 *
756 * Simply stop all worker threads, leaving main() return gracefully after
757 * joining all threads and calling cleanup().
758 */
759 static void sighandler(int sig)
760 {
761 switch (sig) {
762 case SIGINT:
763 DBG("SIGINT caught");
764 if (lttng_relay_stop_threads()) {
765 ERR("Error stopping threads");
766 }
767 break;
768 case SIGTERM:
769 DBG("SIGTERM caught");
770 if (lttng_relay_stop_threads()) {
771 ERR("Error stopping threads");
772 }
773 break;
774 case SIGUSR1:
775 CMM_STORE_SHARED(recv_child_signal, 1);
776 break;
777 default:
778 break;
779 }
780 }
781
782 /*
783 * Setup signal handler for :
784 * SIGINT, SIGTERM, SIGPIPE
785 */
786 static int set_signal_handler(void)
787 {
788 int ret = 0;
789 struct sigaction sa;
790 sigset_t sigset;
791
792 if ((ret = sigemptyset(&sigset)) < 0) {
793 PERROR("sigemptyset");
794 return ret;
795 }
796
797 sa.sa_mask = sigset;
798 sa.sa_flags = 0;
799
800 sa.sa_handler = sighandler;
801 if ((ret = sigaction(SIGTERM, &sa, NULL)) < 0) {
802 PERROR("sigaction");
803 return ret;
804 }
805
806 if ((ret = sigaction(SIGINT, &sa, NULL)) < 0) {
807 PERROR("sigaction");
808 return ret;
809 }
810
811 if ((ret = sigaction(SIGUSR1, &sa, NULL)) < 0) {
812 PERROR("sigaction");
813 return ret;
814 }
815
816 sa.sa_handler = SIG_IGN;
817 if ((ret = sigaction(SIGPIPE, &sa, NULL)) < 0) {
818 PERROR("sigaction");
819 return ret;
820 }
821
822 DBG("Signal handler set for SIGTERM, SIGUSR1, SIGPIPE and SIGINT");
823
824 return ret;
825 }
826
827 void lttng_relay_notify_ready(void)
828 {
829 /* Notify the parent of the fork() process that we are ready. */
830 if (opt_daemon || opt_background) {
831 if (uatomic_sub_return(&lttng_relay_ready, 1) == 0) {
832 kill(child_ppid, SIGUSR1);
833 }
834 }
835 }
836
837 /*
838 * Init thread quit pipe.
839 *
840 * Return -1 on error or 0 if all pipes are created.
841 */
842 static int init_thread_quit_pipe(void)
843 {
844 return fd_tracker_util_pipe_open_cloexec(
845 the_fd_tracker, "Quit pipe", thread_quit_pipe);
846 }
847
848 /*
849 * Init health quit pipe.
850 *
851 * Return -1 on error or 0 if all pipes are created.
852 */
853 static int init_health_quit_pipe(void)
854 {
855 return fd_tracker_util_pipe_open_cloexec(the_fd_tracker,
856 "Health quit pipe", health_quit_pipe);
857 }
858
859 /*
860 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
861 */
862 static int create_named_thread_poll_set(struct lttng_poll_event *events,
863 int size, const char *name)
864 {
865 int ret;
866
867 if (events == NULL || size == 0) {
868 ret = -1;
869 goto error;
870 }
871
872 ret = fd_tracker_util_poll_create(the_fd_tracker,
873 name, events, 1, LTTNG_CLOEXEC);
874
875 /* Add quit pipe */
876 ret = lttng_poll_add(events, thread_quit_pipe[0], LPOLLIN | LPOLLERR);
877 if (ret < 0) {
878 goto error;
879 }
880
881 return 0;
882
883 error:
884 return ret;
885 }
886
887 /*
888 * Check if the thread quit pipe was triggered.
889 *
890 * Return 1 if it was triggered else 0;
891 */
892 static int check_thread_quit_pipe(int fd, uint32_t events)
893 {
894 if (fd == thread_quit_pipe[0] && (events & LPOLLIN)) {
895 return 1;
896 }
897
898 return 0;
899 }
900
901 /*
902 * Create and init socket from uri.
903 */
904 static struct lttcomm_sock *relay_socket_create(struct lttng_uri *uri)
905 {
906 int ret;
907 struct lttcomm_sock *sock = NULL;
908
909 sock = lttcomm_alloc_sock_from_uri(uri);
910 if (sock == NULL) {
911 ERR("Allocating socket");
912 goto error;
913 }
914
915 ret = lttcomm_create_sock(sock);
916 if (ret < 0) {
917 goto error;
918 }
919 DBG("Listening on sock %d", sock->fd);
920
921 ret = sock->ops->bind(sock);
922 if (ret < 0) {
923 PERROR("Failed to bind socket");
924 goto error;
925 }
926
927 ret = sock->ops->listen(sock, -1);
928 if (ret < 0) {
929 goto error;
930
931 }
932
933 return sock;
934
935 error:
936 if (sock) {
937 lttcomm_destroy_sock(sock);
938 }
939 return NULL;
940 }
941
942 /*
943 * This thread manages the listening for new connections on the network
944 */
945 static void *relay_thread_listener(void *data)
946 {
947 int i, ret, pollfd, err = -1;
948 uint32_t revents, nb_fd;
949 struct lttng_poll_event events;
950 struct lttcomm_sock *control_sock, *data_sock;
951
952 DBG("[thread] Relay listener started");
953
954 health_register(health_relayd, HEALTH_RELAYD_TYPE_LISTENER);
955
956 health_code_update();
957
958 control_sock = relay_socket_create(control_uri);
959 if (!control_sock) {
960 goto error_sock_control;
961 }
962
963 data_sock = relay_socket_create(data_uri);
964 if (!data_sock) {
965 goto error_sock_relay;
966 }
967
968 /*
969 * Pass 3 as size here for the thread quit pipe, control and
970 * data socket.
971 */
972 ret = create_named_thread_poll_set(&events, 3, "Listener thread epoll");
973 if (ret < 0) {
974 goto error_create_poll;
975 }
976
977 /* Add the control socket */
978 ret = lttng_poll_add(&events, control_sock->fd, LPOLLIN | LPOLLRDHUP);
979 if (ret < 0) {
980 goto error_poll_add;
981 }
982
983 /* Add the data socket */
984 ret = lttng_poll_add(&events, data_sock->fd, LPOLLIN | LPOLLRDHUP);
985 if (ret < 0) {
986 goto error_poll_add;
987 }
988
989 lttng_relay_notify_ready();
990
991 if (testpoint(relayd_thread_listener)) {
992 goto error_testpoint;
993 }
994
995 while (1) {
996 health_code_update();
997
998 DBG("Listener accepting connections");
999
1000 restart:
1001 health_poll_entry();
1002 ret = lttng_poll_wait(&events, -1);
1003 health_poll_exit();
1004 if (ret < 0) {
1005 /*
1006 * Restart interrupted system call.
1007 */
1008 if (errno == EINTR) {
1009 goto restart;
1010 }
1011 goto error;
1012 }
1013
1014 nb_fd = ret;
1015
1016 DBG("Relay new connection received");
1017 for (i = 0; i < nb_fd; i++) {
1018 health_code_update();
1019
1020 /* Fetch once the poll data */
1021 revents = LTTNG_POLL_GETEV(&events, i);
1022 pollfd = LTTNG_POLL_GETFD(&events, i);
1023
1024 /* Thread quit pipe has been closed. Killing thread. */
1025 ret = check_thread_quit_pipe(pollfd, revents);
1026 if (ret) {
1027 err = 0;
1028 goto exit;
1029 }
1030
1031 if (revents & LPOLLIN) {
1032 /*
1033 * A new connection is requested, therefore a
1034 * sessiond/consumerd connection is allocated in
1035 * this thread, enqueued to a global queue and
1036 * dequeued (and freed) in the worker thread.
1037 */
1038 int val = 1;
1039 struct relay_connection *new_conn;
1040 struct lttcomm_sock *newsock;
1041 enum connection_type type;
1042
1043 if (pollfd == data_sock->fd) {
1044 type = RELAY_DATA;
1045 newsock = data_sock->ops->accept(data_sock);
1046 DBG("Relay data connection accepted, socket %d",
1047 newsock->fd);
1048 } else {
1049 assert(pollfd == control_sock->fd);
1050 type = RELAY_CONTROL;
1051 newsock = control_sock->ops->accept(control_sock);
1052 DBG("Relay control connection accepted, socket %d",
1053 newsock->fd);
1054 }
1055 if (!newsock) {
1056 PERROR("accepting sock");
1057 goto error;
1058 }
1059
1060 ret = setsockopt(newsock->fd, SOL_SOCKET, SO_REUSEADDR, &val,
1061 sizeof(val));
1062 if (ret < 0) {
1063 PERROR("setsockopt inet");
1064 lttcomm_destroy_sock(newsock);
1065 goto error;
1066 }
1067
1068 ret = socket_apply_keep_alive_config(newsock->fd);
1069 if (ret < 0) {
1070 ERR("Failed to apply TCP keep-alive configuration on socket (%i)",
1071 newsock->fd);
1072 lttcomm_destroy_sock(newsock);
1073 goto error;
1074 }
1075
1076 new_conn = connection_create(newsock, type);
1077 if (!new_conn) {
1078 lttcomm_destroy_sock(newsock);
1079 goto error;
1080 }
1081
1082 /* Enqueue request for the dispatcher thread. */
1083 cds_wfcq_enqueue(&relay_conn_queue.head, &relay_conn_queue.tail,
1084 &new_conn->qnode);
1085
1086 /*
1087 * Wake the dispatch queue futex.
1088 * Implicit memory barrier with the
1089 * exchange in cds_wfcq_enqueue.
1090 */
1091 futex_nto1_wake(&relay_conn_queue.futex);
1092 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1093 ERR("socket poll error");
1094 goto error;
1095 } else {
1096 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
1097 goto error;
1098 }
1099 }
1100 }
1101
1102 exit:
1103 error:
1104 error_poll_add:
1105 error_testpoint:
1106 (void) fd_tracker_util_poll_clean(the_fd_tracker, &events);
1107 error_create_poll:
1108 if (data_sock->fd >= 0) {
1109 ret = data_sock->ops->close(data_sock);
1110 if (ret) {
1111 PERROR("close");
1112 }
1113 }
1114 lttcomm_destroy_sock(data_sock);
1115 error_sock_relay:
1116 if (control_sock->fd >= 0) {
1117 ret = control_sock->ops->close(control_sock);
1118 if (ret) {
1119 PERROR("close");
1120 }
1121 }
1122 lttcomm_destroy_sock(control_sock);
1123 error_sock_control:
1124 if (err) {
1125 health_error();
1126 ERR("Health error occurred in %s", __func__);
1127 }
1128 health_unregister(health_relayd);
1129 DBG("Relay listener thread cleanup complete");
1130 lttng_relay_stop_threads();
1131 return NULL;
1132 }
1133
1134 /*
1135 * This thread manages the dispatching of the requests to worker threads
1136 */
1137 static void *relay_thread_dispatcher(void *data)
1138 {
1139 int err = -1;
1140 ssize_t ret;
1141 struct cds_wfcq_node *node;
1142 struct relay_connection *new_conn = NULL;
1143
1144 DBG("[thread] Relay dispatcher started");
1145
1146 health_register(health_relayd, HEALTH_RELAYD_TYPE_DISPATCHER);
1147
1148 if (testpoint(relayd_thread_dispatcher)) {
1149 goto error_testpoint;
1150 }
1151
1152 health_code_update();
1153
1154 for (;;) {
1155 health_code_update();
1156
1157 /* Atomically prepare the queue futex */
1158 futex_nto1_prepare(&relay_conn_queue.futex);
1159
1160 if (CMM_LOAD_SHARED(dispatch_thread_exit)) {
1161 break;
1162 }
1163
1164 do {
1165 health_code_update();
1166
1167 /* Dequeue commands */
1168 node = cds_wfcq_dequeue_blocking(&relay_conn_queue.head,
1169 &relay_conn_queue.tail);
1170 if (node == NULL) {
1171 DBG("Woken up but nothing in the relay command queue");
1172 /* Continue thread execution */
1173 break;
1174 }
1175 new_conn = caa_container_of(node, struct relay_connection, qnode);
1176
1177 DBG("Dispatching request waiting on sock %d", new_conn->sock->fd);
1178
1179 /*
1180 * Inform worker thread of the new request. This
1181 * call is blocking so we can be assured that
1182 * the data will be read at some point in time
1183 * or wait to the end of the world :)
1184 */
1185 ret = lttng_write(relay_conn_pipe[1], &new_conn, sizeof(new_conn));
1186 if (ret < 0) {
1187 PERROR("write connection pipe");
1188 connection_put(new_conn);
1189 goto error;
1190 }
1191 } while (node != NULL);
1192
1193 /* Futex wait on queue. Blocking call on futex() */
1194 health_poll_entry();
1195 futex_nto1_wait(&relay_conn_queue.futex);
1196 health_poll_exit();
1197 }
1198
1199 /* Normal exit, no error */
1200 err = 0;
1201
1202 error:
1203 error_testpoint:
1204 if (err) {
1205 health_error();
1206 ERR("Health error occurred in %s", __func__);
1207 }
1208 health_unregister(health_relayd);
1209 DBG("Dispatch thread dying");
1210 lttng_relay_stop_threads();
1211 return NULL;
1212 }
1213
1214 static bool session_streams_have_index(const struct relay_session *session)
1215 {
1216 return session->minor >= 4 && !session->snapshot;
1217 }
1218
1219 /*
1220 * Handle the RELAYD_CREATE_SESSION command.
1221 *
1222 * On success, send back the session id or else return a negative value.
1223 */
1224 static int relay_create_session(const struct lttcomm_relayd_hdr *recv_hdr,
1225 struct relay_connection *conn,
1226 const struct lttng_buffer_view *payload)
1227 {
1228 int ret = 0;
1229 ssize_t send_ret;
1230 struct relay_session *session = NULL;
1231 struct lttcomm_relayd_create_session_reply_2_11 reply = {};
1232 char session_name[LTTNG_NAME_MAX] = {};
1233 char hostname[LTTNG_HOST_NAME_MAX] = {};
1234 uint32_t live_timer = 0;
1235 bool snapshot = false;
1236 bool session_name_contains_creation_timestamp = false;
1237 /* Left nil for peers < 2.11. */
1238 char base_path[LTTNG_PATH_MAX] = {};
1239 lttng_uuid sessiond_uuid = {};
1240 LTTNG_OPTIONAL(uint64_t) id_sessiond = {};
1241 LTTNG_OPTIONAL(uint64_t) current_chunk_id = {};
1242 LTTNG_OPTIONAL(time_t) creation_time = {};
1243 struct lttng_dynamic_buffer reply_payload;
1244
1245 lttng_dynamic_buffer_init(&reply_payload);
1246
1247 if (conn->minor < 4) {
1248 /* From 2.1 to 2.3 */
1249 ret = 0;
1250 } else if (conn->minor >= 4 && conn->minor < 11) {
1251 /* From 2.4 to 2.10 */
1252 ret = cmd_create_session_2_4(payload, session_name,
1253 hostname, &live_timer, &snapshot);
1254 } else {
1255 bool has_current_chunk;
1256 uint64_t current_chunk_id_value;
1257 time_t creation_time_value;
1258 uint64_t id_sessiond_value;
1259
1260 /* From 2.11 to ... */
1261 ret = cmd_create_session_2_11(payload, session_name, hostname,
1262 base_path, &live_timer, &snapshot, &id_sessiond_value,
1263 sessiond_uuid, &has_current_chunk,
1264 &current_chunk_id_value, &creation_time_value,
1265 &session_name_contains_creation_timestamp);
1266 if (lttng_uuid_is_nil(sessiond_uuid)) {
1267 /* The nil UUID is reserved for pre-2.11 clients. */
1268 ERR("Illegal nil UUID announced by peer in create session command");
1269 ret = -1;
1270 goto send_reply;
1271 }
1272 LTTNG_OPTIONAL_SET(&id_sessiond, id_sessiond_value);
1273 LTTNG_OPTIONAL_SET(&creation_time, creation_time_value);
1274 if (has_current_chunk) {
1275 LTTNG_OPTIONAL_SET(&current_chunk_id,
1276 current_chunk_id_value);
1277 }
1278 }
1279
1280 if (ret < 0) {
1281 goto send_reply;
1282 }
1283
1284 session = session_create(session_name, hostname, base_path, live_timer,
1285 snapshot, sessiond_uuid,
1286 id_sessiond.is_set ? &id_sessiond.value : NULL,
1287 current_chunk_id.is_set ? &current_chunk_id.value : NULL,
1288 creation_time.is_set ? &creation_time.value : NULL,
1289 conn->major, conn->minor,
1290 session_name_contains_creation_timestamp);
1291 if (!session) {
1292 ret = -1;
1293 goto send_reply;
1294 }
1295 assert(!conn->session);
1296 conn->session = session;
1297 DBG("Created session %" PRIu64, session->id);
1298
1299 reply.generic.session_id = htobe64(session->id);
1300
1301 send_reply:
1302 if (ret < 0) {
1303 reply.generic.ret_code = htobe32(LTTNG_ERR_FATAL);
1304 } else {
1305 reply.generic.ret_code = htobe32(LTTNG_OK);
1306 }
1307
1308 if (conn->minor < 11) {
1309 /* From 2.1 to 2.10 */
1310 ret = lttng_dynamic_buffer_append(&reply_payload,
1311 &reply.generic, sizeof(reply.generic));
1312 if (ret) {
1313 ERR("Failed to append \"create session\" command reply header to payload buffer");
1314 ret = -1;
1315 goto end;
1316 }
1317 } else {
1318 const uint32_t output_path_length =
1319 session ? strlen(session->output_path) + 1 : 0;
1320
1321 reply.output_path_length = htobe32(output_path_length);
1322 ret = lttng_dynamic_buffer_append(
1323 &reply_payload, &reply, sizeof(reply));
1324 if (ret) {
1325 ERR("Failed to append \"create session\" command reply header to payload buffer");
1326 goto end;
1327 }
1328
1329 if (output_path_length) {
1330 ret = lttng_dynamic_buffer_append(&reply_payload,
1331 session->output_path,
1332 output_path_length);
1333 if (ret) {
1334 ERR("Failed to append \"create session\" command reply path to payload buffer");
1335 goto end;
1336 }
1337 }
1338 }
1339
1340 send_ret = conn->sock->ops->sendmsg(conn->sock, reply_payload.data,
1341 reply_payload.size, 0);
1342 if (send_ret < (ssize_t) reply_payload.size) {
1343 ERR("Failed to send \"create session\" command reply of %zu bytes (ret = %zd)",
1344 reply_payload.size, send_ret);
1345 ret = -1;
1346 }
1347 end:
1348 if (ret < 0 && session) {
1349 session_put(session);
1350 }
1351 lttng_dynamic_buffer_reset(&reply_payload);
1352 return ret;
1353 }
1354
1355 /*
1356 * When we have received all the streams and the metadata for a channel,
1357 * we make them visible to the viewer threads.
1358 */
1359 static void publish_connection_local_streams(struct relay_connection *conn)
1360 {
1361 struct relay_stream *stream;
1362 struct relay_session *session = conn->session;
1363
1364 /*
1365 * We publish all streams belonging to a session atomically wrt
1366 * session lock.
1367 */
1368 pthread_mutex_lock(&session->lock);
1369 rcu_read_lock();
1370 cds_list_for_each_entry_rcu(stream, &session->recv_list,
1371 recv_node) {
1372 stream_publish(stream);
1373 }
1374 rcu_read_unlock();
1375
1376 /*
1377 * Inform the viewer that there are new streams in the session.
1378 */
1379 if (session->viewer_attached) {
1380 uatomic_set(&session->new_streams, 1);
1381 }
1382 pthread_mutex_unlock(&session->lock);
1383 }
1384
1385 static int conform_channel_path(char *channel_path)
1386 {
1387 int ret = 0;
1388
1389 if (strstr("../", channel_path)) {
1390 ERR("Refusing channel path as it walks up the path hierarchy: \"%s\"",
1391 channel_path);
1392 ret = -1;
1393 goto end;
1394 }
1395
1396 if (*channel_path == '/') {
1397 const size_t len = strlen(channel_path);
1398
1399 /*
1400 * Channel paths from peers prior to 2.11 are expressed as an
1401 * absolute path that is, in reality, relative to the relay
1402 * daemon's output directory. Remove the leading slash so it
1403 * is correctly interpreted as a relative path later on.
1404 *
1405 * len (and not len - 1) is used to copy the trailing NULL.
1406 */
1407 bcopy(channel_path + 1, channel_path, len);
1408 }
1409 end:
1410 return ret;
1411 }
1412
1413 /*
1414 * relay_add_stream: allocate a new stream for a session
1415 */
1416 static int relay_add_stream(const struct lttcomm_relayd_hdr *recv_hdr,
1417 struct relay_connection *conn,
1418 const struct lttng_buffer_view *payload)
1419 {
1420 int ret;
1421 ssize_t send_ret;
1422 struct relay_session *session = conn->session;
1423 struct relay_stream *stream = NULL;
1424 struct lttcomm_relayd_status_stream reply;
1425 struct ctf_trace *trace = NULL;
1426 uint64_t stream_handle = -1ULL;
1427 char *path_name = NULL, *channel_name = NULL;
1428 uint64_t tracefile_size = 0, tracefile_count = 0;
1429 LTTNG_OPTIONAL(uint64_t) stream_chunk_id = {};
1430
1431 if (!session || !conn->version_check_done) {
1432 ERR("Trying to add a stream before version check");
1433 ret = -1;
1434 goto end_no_session;
1435 }
1436
1437 if (session->minor == 1) {
1438 /* For 2.1 */
1439 ret = cmd_recv_stream_2_1(payload, &path_name,
1440 &channel_name);
1441 } else if (session->minor > 1 && session->minor < 11) {
1442 /* From 2.2 to 2.10 */
1443 ret = cmd_recv_stream_2_2(payload, &path_name,
1444 &channel_name, &tracefile_size, &tracefile_count);
1445 } else {
1446 /* From 2.11 to ... */
1447 ret = cmd_recv_stream_2_11(payload, &path_name,
1448 &channel_name, &tracefile_size, &tracefile_count,
1449 &stream_chunk_id.value);
1450 stream_chunk_id.is_set = true;
1451 }
1452
1453 if (ret < 0) {
1454 goto send_reply;
1455 }
1456
1457 if (conform_channel_path(path_name)) {
1458 goto send_reply;
1459 }
1460
1461 /*
1462 * Backward compatibility for --group-output-by-session.
1463 * Prior to lttng 2.11, the complete path is passed by the stream.
1464 * Starting at 2.11, lttng-relayd uses chunk. When dealing with producer
1465 * >=2.11 the chunk is responsible for the output path. When dealing
1466 * with producer < 2.11 the chunk output_path is the root output path
1467 * and the stream carries the complete path (path_name).
1468 * To support --group-output-by-session with older producer (<2.11), we
1469 * need to craft the path based on the stream path.
1470 */
1471 if (opt_group_output_by == RELAYD_GROUP_OUTPUT_BY_SESSION) {
1472 if (conn->minor < 4) {
1473 /*
1474 * From 2.1 to 2.3, the session_name is not passed on
1475 * the RELAYD_CREATE_SESSION command. The session name
1476 * is necessary to detect the presence of a base_path
1477 * inside the stream path. Without it we cannot perform
1478 * a valid group-output-by-session transformation.
1479 */
1480 WARN("Unable to perform a --group-by-session transformation for session %" PRIu64
1481 " for stream with path \"%s\" as it is produced by a peer using a protocol older than v2.4",
1482 session->id, path_name);
1483 } else if (conn->minor >= 4 && conn->minor < 11) {
1484 char *group_by_session_path_name;
1485
1486 assert(session->session_name[0] != '\0');
1487
1488 group_by_session_path_name =
1489 backward_compat_group_by_session(
1490 path_name,
1491 session->session_name);
1492 if (!group_by_session_path_name) {
1493 ERR("Failed to apply group by session to stream of session %" PRIu64,
1494 session->id);
1495 goto send_reply;
1496 }
1497
1498 DBG("Transformed session path from \"%s\" to \"%s\" to honor per-session name grouping",
1499 path_name, group_by_session_path_name);
1500
1501 free(path_name);
1502 path_name = group_by_session_path_name;
1503 }
1504 }
1505
1506 trace = ctf_trace_get_by_path_or_create(session, path_name);
1507 if (!trace) {
1508 goto send_reply;
1509 }
1510
1511 /* This stream here has one reference on the trace. */
1512 pthread_mutex_lock(&last_relay_stream_id_lock);
1513 stream_handle = ++last_relay_stream_id;
1514 pthread_mutex_unlock(&last_relay_stream_id_lock);
1515
1516 /* We pass ownership of path_name and channel_name. */
1517 stream = stream_create(trace, stream_handle, path_name,
1518 channel_name, tracefile_size, tracefile_count);
1519 path_name = NULL;
1520 channel_name = NULL;
1521
1522 /*
1523 * Streams are the owners of their trace. Reference to trace is
1524 * kept within stream_create().
1525 */
1526 ctf_trace_put(trace);
1527
1528 send_reply:
1529 memset(&reply, 0, sizeof(reply));
1530 reply.handle = htobe64(stream_handle);
1531 if (!stream) {
1532 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1533 } else {
1534 reply.ret_code = htobe32(LTTNG_OK);
1535 }
1536
1537 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1538 sizeof(struct lttcomm_relayd_status_stream), 0);
1539 if (send_ret < (ssize_t) sizeof(reply)) {
1540 ERR("Failed to send \"add stream\" command reply (ret = %zd)",
1541 send_ret);
1542 ret = -1;
1543 }
1544
1545 end_no_session:
1546 free(path_name);
1547 free(channel_name);
1548 return ret;
1549 }
1550
1551 /*
1552 * relay_close_stream: close a specific stream
1553 */
1554 static int relay_close_stream(const struct lttcomm_relayd_hdr *recv_hdr,
1555 struct relay_connection *conn,
1556 const struct lttng_buffer_view *payload)
1557 {
1558 int ret;
1559 ssize_t send_ret;
1560 struct relay_session *session = conn->session;
1561 struct lttcomm_relayd_close_stream stream_info;
1562 struct lttcomm_relayd_generic_reply reply;
1563 struct relay_stream *stream;
1564
1565 DBG("Close stream received");
1566
1567 if (!session || !conn->version_check_done) {
1568 ERR("Trying to close a stream before version check");
1569 ret = -1;
1570 goto end_no_session;
1571 }
1572
1573 if (payload->size < sizeof(stream_info)) {
1574 ERR("Unexpected payload size in \"relay_close_stream\": expected >= %zu bytes, got %zu bytes",
1575 sizeof(stream_info), payload->size);
1576 ret = -1;
1577 goto end_no_session;
1578 }
1579 memcpy(&stream_info, payload->data, sizeof(stream_info));
1580 stream_info.stream_id = be64toh(stream_info.stream_id);
1581 stream_info.last_net_seq_num = be64toh(stream_info.last_net_seq_num);
1582
1583 stream = stream_get_by_id(stream_info.stream_id);
1584 if (!stream) {
1585 ret = -1;
1586 goto end;
1587 }
1588
1589 /*
1590 * Set last_net_seq_num before the close flag. Required by data
1591 * pending check.
1592 */
1593 pthread_mutex_lock(&stream->lock);
1594 stream->last_net_seq_num = stream_info.last_net_seq_num;
1595 pthread_mutex_unlock(&stream->lock);
1596
1597 /*
1598 * This is one of the conditions which may trigger a stream close
1599 * with the others being:
1600 * 1) A close command is received for a stream
1601 * 2) The control connection owning the stream is closed
1602 * 3) We have received all of the stream's data _after_ a close
1603 * request.
1604 */
1605 try_stream_close(stream);
1606 if (stream->is_metadata) {
1607 struct relay_viewer_stream *vstream;
1608
1609 vstream = viewer_stream_get_by_id(stream->stream_handle);
1610 if (vstream) {
1611 if (stream->no_new_metadata_notified) {
1612 /*
1613 * Since all the metadata has been sent to the
1614 * viewer and that we have a request to close
1615 * its stream, we can safely teardown the
1616 * corresponding metadata viewer stream.
1617 */
1618 viewer_stream_put(vstream);
1619 }
1620 /* Put local reference. */
1621 viewer_stream_put(vstream);
1622 }
1623 }
1624 stream_put(stream);
1625 ret = 0;
1626
1627 end:
1628 memset(&reply, 0, sizeof(reply));
1629 if (ret < 0) {
1630 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1631 } else {
1632 reply.ret_code = htobe32(LTTNG_OK);
1633 }
1634 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1635 sizeof(struct lttcomm_relayd_generic_reply), 0);
1636 if (send_ret < (ssize_t) sizeof(reply)) {
1637 ERR("Failed to send \"close stream\" command reply (ret = %zd)",
1638 send_ret);
1639 ret = -1;
1640 }
1641
1642 end_no_session:
1643 return ret;
1644 }
1645
1646 /*
1647 * relay_reset_metadata: reset a metadata stream
1648 */
1649 static
1650 int relay_reset_metadata(const struct lttcomm_relayd_hdr *recv_hdr,
1651 struct relay_connection *conn,
1652 const struct lttng_buffer_view *payload)
1653 {
1654 int ret;
1655 ssize_t send_ret;
1656 struct relay_session *session = conn->session;
1657 struct lttcomm_relayd_reset_metadata stream_info;
1658 struct lttcomm_relayd_generic_reply reply;
1659 struct relay_stream *stream;
1660
1661 DBG("Reset metadata received");
1662
1663 if (!session || !conn->version_check_done) {
1664 ERR("Trying to reset a metadata stream before version check");
1665 ret = -1;
1666 goto end_no_session;
1667 }
1668
1669 if (payload->size < sizeof(stream_info)) {
1670 ERR("Unexpected payload size in \"relay_reset_metadata\": expected >= %zu bytes, got %zu bytes",
1671 sizeof(stream_info), payload->size);
1672 ret = -1;
1673 goto end_no_session;
1674 }
1675 memcpy(&stream_info, payload->data, sizeof(stream_info));
1676 stream_info.stream_id = be64toh(stream_info.stream_id);
1677 stream_info.version = be64toh(stream_info.version);
1678
1679 DBG("Update metadata to version %" PRIu64, stream_info.version);
1680
1681 /* Unsupported for live sessions for now. */
1682 if (session->live_timer != 0) {
1683 ret = -1;
1684 goto end;
1685 }
1686
1687 stream = stream_get_by_id(stream_info.stream_id);
1688 if (!stream) {
1689 ret = -1;
1690 goto end;
1691 }
1692 pthread_mutex_lock(&stream->lock);
1693 if (!stream->is_metadata) {
1694 ret = -1;
1695 goto end_unlock;
1696 }
1697
1698 ret = stream_reset_file(stream);
1699 if (ret < 0) {
1700 ERR("Failed to reset metadata stream %" PRIu64
1701 ": stream_path = %s, channel = %s",
1702 stream->stream_handle, stream->path_name,
1703 stream->channel_name);
1704 goto end_unlock;
1705 }
1706 end_unlock:
1707 pthread_mutex_unlock(&stream->lock);
1708 stream_put(stream);
1709
1710 end:
1711 memset(&reply, 0, sizeof(reply));
1712 if (ret < 0) {
1713 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1714 } else {
1715 reply.ret_code = htobe32(LTTNG_OK);
1716 }
1717 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1718 sizeof(struct lttcomm_relayd_generic_reply), 0);
1719 if (send_ret < (ssize_t) sizeof(reply)) {
1720 ERR("Failed to send \"reset metadata\" command reply (ret = %zd)",
1721 send_ret);
1722 ret = -1;
1723 }
1724
1725 end_no_session:
1726 return ret;
1727 }
1728
1729 /*
1730 * relay_unknown_command: send -1 if received unknown command
1731 */
1732 static void relay_unknown_command(struct relay_connection *conn)
1733 {
1734 struct lttcomm_relayd_generic_reply reply;
1735 ssize_t send_ret;
1736
1737 memset(&reply, 0, sizeof(reply));
1738 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1739 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
1740 if (send_ret < sizeof(reply)) {
1741 ERR("Failed to send \"unknown command\" command reply (ret = %zd)", send_ret);
1742 }
1743 }
1744
1745 /*
1746 * relay_start: send an acknowledgment to the client to tell if we are
1747 * ready to receive data. We are ready if a session is established.
1748 */
1749 static int relay_start(const struct lttcomm_relayd_hdr *recv_hdr,
1750 struct relay_connection *conn,
1751 const struct lttng_buffer_view *payload)
1752 {
1753 int ret = 0;
1754 ssize_t send_ret;
1755 struct lttcomm_relayd_generic_reply reply;
1756 struct relay_session *session = conn->session;
1757
1758 if (!session) {
1759 DBG("Trying to start the streaming without a session established");
1760 ret = htobe32(LTTNG_ERR_UNK);
1761 }
1762
1763 memset(&reply, 0, sizeof(reply));
1764 reply.ret_code = htobe32(LTTNG_OK);
1765 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1766 sizeof(reply), 0);
1767 if (send_ret < (ssize_t) sizeof(reply)) {
1768 ERR("Failed to send \"relay_start\" command reply (ret = %zd)",
1769 send_ret);
1770 ret = -1;
1771 }
1772
1773 return ret;
1774 }
1775
1776 /*
1777 * relay_recv_metadata: receive the metadata for the session.
1778 */
1779 static int relay_recv_metadata(const struct lttcomm_relayd_hdr *recv_hdr,
1780 struct relay_connection *conn,
1781 const struct lttng_buffer_view *payload)
1782 {
1783 int ret = 0;
1784 struct relay_session *session = conn->session;
1785 struct lttcomm_relayd_metadata_payload metadata_payload_header;
1786 struct relay_stream *metadata_stream;
1787 uint64_t metadata_payload_size;
1788 struct lttng_buffer_view packet_view;
1789
1790 if (!session) {
1791 ERR("Metadata sent before version check");
1792 ret = -1;
1793 goto end;
1794 }
1795
1796 if (recv_hdr->data_size < sizeof(struct lttcomm_relayd_metadata_payload)) {
1797 ERR("Incorrect data size");
1798 ret = -1;
1799 goto end;
1800 }
1801 metadata_payload_size = recv_hdr->data_size -
1802 sizeof(struct lttcomm_relayd_metadata_payload);
1803
1804 memcpy(&metadata_payload_header, payload->data,
1805 sizeof(metadata_payload_header));
1806 metadata_payload_header.stream_id = be64toh(
1807 metadata_payload_header.stream_id);
1808 metadata_payload_header.padding_size = be32toh(
1809 metadata_payload_header.padding_size);
1810
1811 metadata_stream = stream_get_by_id(metadata_payload_header.stream_id);
1812 if (!metadata_stream) {
1813 ret = -1;
1814 goto end;
1815 }
1816
1817 packet_view = lttng_buffer_view_from_view(payload,
1818 sizeof(metadata_payload_header), metadata_payload_size);
1819 if (!packet_view.data) {
1820 ERR("Invalid metadata packet length announced by header");
1821 ret = -1;
1822 goto end_put;
1823 }
1824
1825 pthread_mutex_lock(&metadata_stream->lock);
1826 ret = stream_write(metadata_stream, &packet_view,
1827 metadata_payload_header.padding_size);
1828 pthread_mutex_unlock(&metadata_stream->lock);
1829 if (ret){
1830 ret = -1;
1831 goto end_put;
1832 }
1833 end_put:
1834 stream_put(metadata_stream);
1835 end:
1836 return ret;
1837 }
1838
1839 /*
1840 * relay_send_version: send relayd version number
1841 */
1842 static int relay_send_version(const struct lttcomm_relayd_hdr *recv_hdr,
1843 struct relay_connection *conn,
1844 const struct lttng_buffer_view *payload)
1845 {
1846 int ret;
1847 ssize_t send_ret;
1848 struct lttcomm_relayd_version reply, msg;
1849 bool compatible = true;
1850
1851 conn->version_check_done = true;
1852
1853 /* Get version from the other side. */
1854 if (payload->size < sizeof(msg)) {
1855 ERR("Unexpected payload size in \"relay_send_version\": expected >= %zu bytes, got %zu bytes",
1856 sizeof(msg), payload->size);
1857 ret = -1;
1858 goto end;
1859 }
1860
1861 memcpy(&msg, payload->data, sizeof(msg));
1862 msg.major = be32toh(msg.major);
1863 msg.minor = be32toh(msg.minor);
1864
1865 memset(&reply, 0, sizeof(reply));
1866 reply.major = RELAYD_VERSION_COMM_MAJOR;
1867 reply.minor = RELAYD_VERSION_COMM_MINOR;
1868
1869 /* Major versions must be the same */
1870 if (reply.major != msg.major) {
1871 DBG("Incompatible major versions (%u vs %u), deleting session",
1872 reply.major, msg.major);
1873 compatible = false;
1874 }
1875
1876 conn->major = reply.major;
1877 /* We adapt to the lowest compatible version */
1878 if (reply.minor <= msg.minor) {
1879 conn->minor = reply.minor;
1880 } else {
1881 conn->minor = msg.minor;
1882 }
1883
1884 reply.major = htobe32(reply.major);
1885 reply.minor = htobe32(reply.minor);
1886 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1887 sizeof(reply), 0);
1888 if (send_ret < (ssize_t) sizeof(reply)) {
1889 ERR("Failed to send \"send version\" command reply (ret = %zd)",
1890 send_ret);
1891 ret = -1;
1892 goto end;
1893 } else {
1894 ret = 0;
1895 }
1896
1897 if (!compatible) {
1898 ret = -1;
1899 goto end;
1900 }
1901
1902 DBG("Version check done using protocol %u.%u", conn->major,
1903 conn->minor);
1904
1905 end:
1906 return ret;
1907 }
1908
1909 /*
1910 * Check for data pending for a given stream id from the session daemon.
1911 */
1912 static int relay_data_pending(const struct lttcomm_relayd_hdr *recv_hdr,
1913 struct relay_connection *conn,
1914 const struct lttng_buffer_view *payload)
1915 {
1916 struct relay_session *session = conn->session;
1917 struct lttcomm_relayd_data_pending msg;
1918 struct lttcomm_relayd_generic_reply reply;
1919 struct relay_stream *stream;
1920 ssize_t send_ret;
1921 int ret;
1922 uint64_t stream_seq;
1923
1924 DBG("Data pending command received");
1925
1926 if (!session || !conn->version_check_done) {
1927 ERR("Trying to check for data before version check");
1928 ret = -1;
1929 goto end_no_session;
1930 }
1931
1932 if (payload->size < sizeof(msg)) {
1933 ERR("Unexpected payload size in \"relay_data_pending\": expected >= %zu bytes, got %zu bytes",
1934 sizeof(msg), payload->size);
1935 ret = -1;
1936 goto end_no_session;
1937 }
1938 memcpy(&msg, payload->data, sizeof(msg));
1939 msg.stream_id = be64toh(msg.stream_id);
1940 msg.last_net_seq_num = be64toh(msg.last_net_seq_num);
1941
1942 stream = stream_get_by_id(msg.stream_id);
1943 if (stream == NULL) {
1944 ret = -1;
1945 goto end;
1946 }
1947
1948 pthread_mutex_lock(&stream->lock);
1949
1950 if (session_streams_have_index(session)) {
1951 /*
1952 * Ensure that both the index and stream data have been
1953 * flushed up to the requested point.
1954 */
1955 stream_seq = min(stream->prev_data_seq, stream->prev_index_seq);
1956 } else {
1957 stream_seq = stream->prev_data_seq;
1958 }
1959 DBG("Data pending for stream id %" PRIu64 ": prev_data_seq %" PRIu64
1960 ", prev_index_seq %" PRIu64
1961 ", and last_seq %" PRIu64, msg.stream_id,
1962 stream->prev_data_seq, stream->prev_index_seq,
1963 msg.last_net_seq_num);
1964
1965 /* Avoid wrapping issue */
1966 if (((int64_t) (stream_seq - msg.last_net_seq_num)) >= 0) {
1967 /* Data has in fact been written and is NOT pending */
1968 ret = 0;
1969 } else {
1970 /* Data still being streamed thus pending */
1971 ret = 1;
1972 }
1973
1974 stream->data_pending_check_done = true;
1975 pthread_mutex_unlock(&stream->lock);
1976
1977 stream_put(stream);
1978 end:
1979
1980 memset(&reply, 0, sizeof(reply));
1981 reply.ret_code = htobe32(ret);
1982 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
1983 if (send_ret < (ssize_t) sizeof(reply)) {
1984 ERR("Failed to send \"data pending\" command reply (ret = %zd)",
1985 send_ret);
1986 ret = -1;
1987 }
1988
1989 end_no_session:
1990 return ret;
1991 }
1992
1993 /*
1994 * Wait for the control socket to reach a quiescent state.
1995 *
1996 * Note that for now, when receiving this command from the session
1997 * daemon, this means that every subsequent commands or data received on
1998 * the control socket has been handled. So, this is why we simply return
1999 * OK here.
2000 */
2001 static int relay_quiescent_control(const struct lttcomm_relayd_hdr *recv_hdr,
2002 struct relay_connection *conn,
2003 const struct lttng_buffer_view *payload)
2004 {
2005 int ret;
2006 ssize_t send_ret;
2007 struct relay_stream *stream;
2008 struct lttcomm_relayd_quiescent_control msg;
2009 struct lttcomm_relayd_generic_reply reply;
2010
2011 DBG("Checking quiescent state on control socket");
2012
2013 if (!conn->session || !conn->version_check_done) {
2014 ERR("Trying to check for data before version check");
2015 ret = -1;
2016 goto end_no_session;
2017 }
2018
2019 if (payload->size < sizeof(msg)) {
2020 ERR("Unexpected payload size in \"relay_quiescent_control\": expected >= %zu bytes, got %zu bytes",
2021 sizeof(msg), payload->size);
2022 ret = -1;
2023 goto end_no_session;
2024 }
2025 memcpy(&msg, payload->data, sizeof(msg));
2026 msg.stream_id = be64toh(msg.stream_id);
2027
2028 stream = stream_get_by_id(msg.stream_id);
2029 if (!stream) {
2030 goto reply;
2031 }
2032 pthread_mutex_lock(&stream->lock);
2033 stream->data_pending_check_done = true;
2034 pthread_mutex_unlock(&stream->lock);
2035
2036 DBG("Relay quiescent control pending flag set to %" PRIu64, msg.stream_id);
2037 stream_put(stream);
2038 reply:
2039 memset(&reply, 0, sizeof(reply));
2040 reply.ret_code = htobe32(LTTNG_OK);
2041 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
2042 if (send_ret < (ssize_t) sizeof(reply)) {
2043 ERR("Failed to send \"quiescent control\" command reply (ret = %zd)",
2044 send_ret);
2045 ret = -1;
2046 } else {
2047 ret = 0;
2048 }
2049
2050 end_no_session:
2051 return ret;
2052 }
2053
2054 /*
2055 * Initialize a data pending command. This means that a consumer is about
2056 * to ask for data pending for each stream it holds. Simply iterate over
2057 * all streams of a session and set the data_pending_check_done flag.
2058 *
2059 * This command returns to the client a LTTNG_OK code.
2060 */
2061 static int relay_begin_data_pending(const struct lttcomm_relayd_hdr *recv_hdr,
2062 struct relay_connection *conn,
2063 const struct lttng_buffer_view *payload)
2064 {
2065 int ret;
2066 ssize_t send_ret;
2067 struct lttng_ht_iter iter;
2068 struct lttcomm_relayd_begin_data_pending msg;
2069 struct lttcomm_relayd_generic_reply reply;
2070 struct relay_stream *stream;
2071
2072 assert(recv_hdr);
2073 assert(conn);
2074
2075 DBG("Init streams for data pending");
2076
2077 if (!conn->session || !conn->version_check_done) {
2078 ERR("Trying to check for data before version check");
2079 ret = -1;
2080 goto end_no_session;
2081 }
2082
2083 if (payload->size < sizeof(msg)) {
2084 ERR("Unexpected payload size in \"relay_begin_data_pending\": expected >= %zu bytes, got %zu bytes",
2085 sizeof(msg), payload->size);
2086 ret = -1;
2087 goto end_no_session;
2088 }
2089 memcpy(&msg, payload->data, sizeof(msg));
2090 msg.session_id = be64toh(msg.session_id);
2091
2092 /*
2093 * Iterate over all streams to set the begin data pending flag.
2094 * For now, the streams are indexed by stream handle so we have
2095 * to iterate over all streams to find the one associated with
2096 * the right session_id.
2097 */
2098 rcu_read_lock();
2099 cds_lfht_for_each_entry(relay_streams_ht->ht, &iter.iter, stream,
2100 node.node) {
2101 if (!stream_get(stream)) {
2102 continue;
2103 }
2104 if (stream->trace->session->id == msg.session_id) {
2105 pthread_mutex_lock(&stream->lock);
2106 stream->data_pending_check_done = false;
2107 pthread_mutex_unlock(&stream->lock);
2108 DBG("Set begin data pending flag to stream %" PRIu64,
2109 stream->stream_handle);
2110 }
2111 stream_put(stream);
2112 }
2113 rcu_read_unlock();
2114
2115 memset(&reply, 0, sizeof(reply));
2116 /* All good, send back reply. */
2117 reply.ret_code = htobe32(LTTNG_OK);
2118
2119 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
2120 if (send_ret < (ssize_t) sizeof(reply)) {
2121 ERR("Failed to send \"begin data pending\" command reply (ret = %zd)",
2122 send_ret);
2123 ret = -1;
2124 } else {
2125 ret = 0;
2126 }
2127
2128 end_no_session:
2129 return ret;
2130 }
2131
2132 /*
2133 * End data pending command. This will check, for a given session id, if
2134 * each stream associated with it has its data_pending_check_done flag
2135 * set. If not, this means that the client lost track of the stream but
2136 * the data is still being streamed on our side. In this case, we inform
2137 * the client that data is in flight.
2138 *
2139 * Return to the client if there is data in flight or not with a ret_code.
2140 */
2141 static int relay_end_data_pending(const struct lttcomm_relayd_hdr *recv_hdr,
2142 struct relay_connection *conn,
2143 const struct lttng_buffer_view *payload)
2144 {
2145 int ret;
2146 ssize_t send_ret;
2147 struct lttng_ht_iter iter;
2148 struct lttcomm_relayd_end_data_pending msg;
2149 struct lttcomm_relayd_generic_reply reply;
2150 struct relay_stream *stream;
2151 uint32_t is_data_inflight = 0;
2152
2153 DBG("End data pending command");
2154
2155 if (!conn->session || !conn->version_check_done) {
2156 ERR("Trying to check for data before version check");
2157 ret = -1;
2158 goto end_no_session;
2159 }
2160
2161 if (payload->size < sizeof(msg)) {
2162 ERR("Unexpected payload size in \"relay_end_data_pending\": expected >= %zu bytes, got %zu bytes",
2163 sizeof(msg), payload->size);
2164 ret = -1;
2165 goto end_no_session;
2166 }
2167 memcpy(&msg, payload->data, sizeof(msg));
2168 msg.session_id = be64toh(msg.session_id);
2169
2170 /*
2171 * Iterate over all streams to see if the begin data pending
2172 * flag is set.
2173 */
2174 rcu_read_lock();
2175 cds_lfht_for_each_entry(relay_streams_ht->ht, &iter.iter, stream,
2176 node.node) {
2177 if (!stream_get(stream)) {
2178 continue;
2179 }
2180 if (stream->trace->session->id != msg.session_id) {
2181 stream_put(stream);
2182 continue;
2183 }
2184 pthread_mutex_lock(&stream->lock);
2185 if (!stream->data_pending_check_done) {
2186 uint64_t stream_seq;
2187
2188 if (session_streams_have_index(conn->session)) {
2189 /*
2190 * Ensure that both the index and stream data have been
2191 * flushed up to the requested point.
2192 */
2193 stream_seq = min(stream->prev_data_seq, stream->prev_index_seq);
2194 } else {
2195 stream_seq = stream->prev_data_seq;
2196 }
2197 if (!stream->closed || !(((int64_t) (stream_seq - stream->last_net_seq_num)) >= 0)) {
2198 is_data_inflight = 1;
2199 DBG("Data is still in flight for stream %" PRIu64,
2200 stream->stream_handle);
2201 pthread_mutex_unlock(&stream->lock);
2202 stream_put(stream);
2203 break;
2204 }
2205 }
2206 pthread_mutex_unlock(&stream->lock);
2207 stream_put(stream);
2208 }
2209 rcu_read_unlock();
2210
2211 memset(&reply, 0, sizeof(reply));
2212 /* All good, send back reply. */
2213 reply.ret_code = htobe32(is_data_inflight);
2214
2215 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
2216 if (send_ret < (ssize_t) sizeof(reply)) {
2217 ERR("Failed to send \"end data pending\" command reply (ret = %zd)",
2218 send_ret);
2219 ret = -1;
2220 } else {
2221 ret = 0;
2222 }
2223
2224 end_no_session:
2225 return ret;
2226 }
2227
2228 /*
2229 * Receive an index for a specific stream.
2230 *
2231 * Return 0 on success else a negative value.
2232 */
2233 static int relay_recv_index(const struct lttcomm_relayd_hdr *recv_hdr,
2234 struct relay_connection *conn,
2235 const struct lttng_buffer_view *payload)
2236 {
2237 int ret;
2238 ssize_t send_ret;
2239 struct relay_session *session = conn->session;
2240 struct lttcomm_relayd_index index_info;
2241 struct lttcomm_relayd_generic_reply reply;
2242 struct relay_stream *stream;
2243 size_t msg_len;
2244
2245 assert(conn);
2246
2247 DBG("Relay receiving index");
2248
2249 if (!session || !conn->version_check_done) {
2250 ERR("Trying to close a stream before version check");
2251 ret = -1;
2252 goto end_no_session;
2253 }
2254
2255 msg_len = lttcomm_relayd_index_len(
2256 lttng_to_index_major(conn->major, conn->minor),
2257 lttng_to_index_minor(conn->major, conn->minor));
2258 if (payload->size < msg_len) {
2259 ERR("Unexpected payload size in \"relay_recv_index\": expected >= %zu bytes, got %zu bytes",
2260 msg_len, payload->size);
2261 ret = -1;
2262 goto end_no_session;
2263 }
2264 memcpy(&index_info, payload->data, msg_len);
2265 index_info.relay_stream_id = be64toh(index_info.relay_stream_id);
2266 index_info.net_seq_num = be64toh(index_info.net_seq_num);
2267 index_info.packet_size = be64toh(index_info.packet_size);
2268 index_info.content_size = be64toh(index_info.content_size);
2269 index_info.timestamp_begin = be64toh(index_info.timestamp_begin);
2270 index_info.timestamp_end = be64toh(index_info.timestamp_end);
2271 index_info.events_discarded = be64toh(index_info.events_discarded);
2272 index_info.stream_id = be64toh(index_info.stream_id);
2273
2274 if (conn->minor >= 8) {
2275 index_info.stream_instance_id =
2276 be64toh(index_info.stream_instance_id);
2277 index_info.packet_seq_num = be64toh(index_info.packet_seq_num);
2278 } else {
2279 index_info.stream_instance_id = -1ULL;
2280 index_info.packet_seq_num = -1ULL;
2281 }
2282
2283 stream = stream_get_by_id(index_info.relay_stream_id);
2284 if (!stream) {
2285 ERR("stream_get_by_id not found");
2286 ret = -1;
2287 goto end;
2288 }
2289
2290 pthread_mutex_lock(&stream->lock);
2291 ret = stream_add_index(stream, &index_info);
2292 pthread_mutex_unlock(&stream->lock);
2293 if (ret) {
2294 goto end_stream_put;
2295 }
2296
2297 end_stream_put:
2298 stream_put(stream);
2299 end:
2300 memset(&reply, 0, sizeof(reply));
2301 if (ret < 0) {
2302 reply.ret_code = htobe32(LTTNG_ERR_UNK);
2303 } else {
2304 reply.ret_code = htobe32(LTTNG_OK);
2305 }
2306 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
2307 if (send_ret < (ssize_t) sizeof(reply)) {
2308 ERR("Failed to send \"recv index\" command reply (ret = %zd)", send_ret);
2309 ret = -1;
2310 }
2311
2312 end_no_session:
2313 return ret;
2314 }
2315
2316 /*
2317 * Receive the streams_sent message.
2318 *
2319 * Return 0 on success else a negative value.
2320 */
2321 static int relay_streams_sent(const struct lttcomm_relayd_hdr *recv_hdr,
2322 struct relay_connection *conn,
2323 const struct lttng_buffer_view *payload)
2324 {
2325 int ret;
2326 ssize_t send_ret;
2327 struct lttcomm_relayd_generic_reply reply;
2328
2329 assert(conn);
2330
2331 DBG("Relay receiving streams_sent");
2332
2333 if (!conn->session || !conn->version_check_done) {
2334 ERR("Trying to close a stream before version check");
2335 ret = -1;
2336 goto end_no_session;
2337 }
2338
2339 /*
2340 * Publish every pending stream in the connection recv list which are
2341 * now ready to be used by the viewer.
2342 */
2343 publish_connection_local_streams(conn);
2344
2345 memset(&reply, 0, sizeof(reply));
2346 reply.ret_code = htobe32(LTTNG_OK);
2347 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
2348 if (send_ret < (ssize_t) sizeof(reply)) {
2349 ERR("Failed to send \"streams sent\" command reply (ret = %zd)",
2350 send_ret);
2351 ret = -1;
2352 } else {
2353 /* Success. */
2354 ret = 0;
2355 }
2356
2357 end_no_session:
2358 return ret;
2359 }
2360
2361 /*
2362 * relay_rotate_session_stream: rotate a stream to a new tracefile for the
2363 * session rotation feature (not the tracefile rotation feature).
2364 */
2365 static int relay_rotate_session_streams(
2366 const struct lttcomm_relayd_hdr *recv_hdr,
2367 struct relay_connection *conn,
2368 const struct lttng_buffer_view *payload)
2369 {
2370 int ret = 0;
2371 uint32_t i;
2372 ssize_t send_ret;
2373 enum lttng_error_code reply_code = LTTNG_ERR_UNK;
2374 struct relay_session *session = conn->session;
2375 struct lttcomm_relayd_rotate_streams rotate_streams;
2376 struct lttcomm_relayd_generic_reply reply = {};
2377 struct relay_stream *stream = NULL;
2378 const size_t header_len = sizeof(struct lttcomm_relayd_rotate_streams);
2379 struct lttng_trace_chunk *next_trace_chunk = NULL;
2380 struct lttng_buffer_view stream_positions;
2381 char chunk_id_buf[MAX_INT_DEC_LEN(uint64_t)];
2382 const char *chunk_id_str = "none";
2383
2384 if (!session || !conn->version_check_done) {
2385 ERR("Trying to rotate a stream before version check");
2386 ret = -1;
2387 goto end_no_reply;
2388 }
2389
2390 if (session->major == 2 && session->minor < 11) {
2391 ERR("Unsupported feature before 2.11");
2392 ret = -1;
2393 goto end_no_reply;
2394 }
2395
2396 if (payload->size < header_len) {
2397 ERR("Unexpected payload size in \"relay_rotate_session_stream\": expected >= %zu bytes, got %zu bytes",
2398 header_len, payload->size);
2399 ret = -1;
2400 goto end_no_reply;
2401 }
2402
2403 memcpy(&rotate_streams, payload->data, header_len);
2404
2405 /* Convert header to host endianness. */
2406 rotate_streams = (typeof(rotate_streams)) {
2407 .stream_count = be32toh(rotate_streams.stream_count),
2408 .new_chunk_id = (typeof(rotate_streams.new_chunk_id)) {
2409 .is_set = !!rotate_streams.new_chunk_id.is_set,
2410 .value = be64toh(rotate_streams.new_chunk_id.value),
2411 }
2412 };
2413
2414 if (rotate_streams.new_chunk_id.is_set) {
2415 /*
2416 * Retrieve the trace chunk the stream must transition to. As
2417 * per the protocol, this chunk should have been created
2418 * before this command is received.
2419 */
2420 next_trace_chunk = sessiond_trace_chunk_registry_get_chunk(
2421 sessiond_trace_chunk_registry,
2422 session->sessiond_uuid, session->id,
2423 rotate_streams.new_chunk_id.value);
2424 if (!next_trace_chunk) {
2425 char uuid_str[LTTNG_UUID_STR_LEN];
2426
2427 lttng_uuid_to_str(session->sessiond_uuid, uuid_str);
2428 ERR("Unknown next trace chunk in ROTATE_STREAMS command: sessiond_uuid = {%s}, session_id = %" PRIu64
2429 ", trace_chunk_id = %" PRIu64,
2430 uuid_str, session->id,
2431 rotate_streams.new_chunk_id.value);
2432 reply_code = LTTNG_ERR_INVALID_PROTOCOL;
2433 ret = -1;
2434 goto end;
2435 }
2436
2437 ret = snprintf(chunk_id_buf, sizeof(chunk_id_buf), "%" PRIu64,
2438 rotate_streams.new_chunk_id.value);
2439 if (ret < 0 || ret >= sizeof(chunk_id_buf)) {
2440 chunk_id_str = "formatting error";
2441 } else {
2442 chunk_id_str = chunk_id_buf;
2443 }
2444 }
2445
2446 DBG("Rotate %" PRIu32 " streams of session \"%s\" to chunk \"%s\"",
2447 rotate_streams.stream_count, session->session_name,
2448 chunk_id_str);
2449
2450 stream_positions = lttng_buffer_view_from_view(payload,
2451 sizeof(rotate_streams), -1);
2452 if (!stream_positions.data ||
2453 stream_positions.size <
2454 (rotate_streams.stream_count *
2455 sizeof(struct lttcomm_relayd_stream_rotation_position))) {
2456 reply_code = LTTNG_ERR_INVALID_PROTOCOL;
2457 ret = -1;
2458 goto end;
2459 }
2460
2461 for (i = 0; i < rotate_streams.stream_count; i++) {
2462 struct lttcomm_relayd_stream_rotation_position *position_comm =
2463 &((typeof(position_comm)) stream_positions.data)[i];
2464 const struct lttcomm_relayd_stream_rotation_position pos = {
2465 .stream_id = be64toh(position_comm->stream_id),
2466 .rotate_at_seq_num = be64toh(
2467 position_comm->rotate_at_seq_num),
2468 };
2469
2470 stream = stream_get_by_id(pos.stream_id);
2471 if (!stream) {
2472 reply_code = LTTNG_ERR_INVALID;
2473 ret = -1;
2474 goto end;
2475 }
2476
2477 pthread_mutex_lock(&stream->lock);
2478 ret = stream_set_pending_rotation(stream, next_trace_chunk,
2479 pos.rotate_at_seq_num);
2480 pthread_mutex_unlock(&stream->lock);
2481 if (ret) {
2482 reply_code = LTTNG_ERR_FILE_CREATION_ERROR;
2483 goto end;
2484 }
2485
2486 stream_put(stream);
2487 stream = NULL;
2488 }
2489
2490 reply_code = LTTNG_OK;
2491 ret = 0;
2492 end:
2493 if (stream) {
2494 stream_put(stream);
2495 }
2496
2497 reply.ret_code = htobe32((uint32_t) reply_code);
2498 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
2499 sizeof(struct lttcomm_relayd_generic_reply), 0);
2500 if (send_ret < (ssize_t) sizeof(reply)) {
2501 ERR("Failed to send \"rotate session stream\" command reply (ret = %zd)",
2502 send_ret);
2503 ret = -1;
2504 }
2505 end_no_reply:
2506 lttng_trace_chunk_put(next_trace_chunk);
2507 return ret;
2508 }
2509
2510
2511
2512 /*
2513 * relay_create_trace_chunk: create a new trace chunk
2514 */
2515 static int relay_create_trace_chunk(const struct lttcomm_relayd_hdr *recv_hdr,
2516 struct relay_connection *conn,
2517 const struct lttng_buffer_view *payload)
2518 {
2519 int ret = 0;
2520 ssize_t send_ret;
2521 struct relay_session *session = conn->session;
2522 struct lttcomm_relayd_create_trace_chunk *msg;
2523 struct lttcomm_relayd_generic_reply reply = {};
2524 struct lttng_buffer_view header_view;
2525 struct lttng_buffer_view chunk_name_view;
2526 struct lttng_trace_chunk *chunk = NULL, *published_chunk = NULL;
2527 enum lttng_error_code reply_code = LTTNG_OK;
2528 enum lttng_trace_chunk_status chunk_status;
2529 struct lttng_directory_handle *session_output = NULL;
2530 const char *new_path;
2531
2532 if (!session || !conn->version_check_done) {
2533 ERR("Trying to create a trace chunk before version check");
2534 ret = -1;
2535 goto end_no_reply;
2536 }
2537
2538 if (session->major == 2 && session->minor < 11) {
2539 ERR("Chunk creation command is unsupported before 2.11");
2540 ret = -1;
2541 goto end_no_reply;
2542 }
2543
2544 header_view = lttng_buffer_view_from_view(payload, 0, sizeof(*msg));
2545 if (!header_view.data) {
2546 ERR("Failed to receive payload of chunk creation command");
2547 ret = -1;
2548 goto end_no_reply;
2549 }
2550
2551 /* Convert to host endianness. */
2552 msg = (typeof(msg)) header_view.data;
2553 msg->chunk_id = be64toh(msg->chunk_id);
2554 msg->creation_timestamp = be64toh(msg->creation_timestamp);
2555 msg->override_name_length = be32toh(msg->override_name_length);
2556
2557 if (session->current_trace_chunk &&
2558 !lttng_trace_chunk_get_name_overridden(session->current_trace_chunk)) {
2559 chunk_status = lttng_trace_chunk_rename_path(session->current_trace_chunk,
2560 DEFAULT_CHUNK_TMP_OLD_DIRECTORY);
2561 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
2562 ERR("Failed to rename old chunk");
2563 ret = -1;
2564 reply_code = LTTNG_ERR_UNK;
2565 goto end;
2566 }
2567 }
2568 session->ongoing_rotation = true;
2569 if (!session->current_trace_chunk) {
2570 if (!session->has_rotated) {
2571 new_path = "";
2572 } else {
2573 new_path = NULL;
2574 }
2575 } else {
2576 new_path = DEFAULT_CHUNK_TMP_NEW_DIRECTORY;
2577 }
2578 chunk = lttng_trace_chunk_create(
2579 msg->chunk_id, msg->creation_timestamp, new_path);
2580 if (!chunk) {
2581 ERR("Failed to create trace chunk in trace chunk creation command");
2582 ret = -1;
2583 reply_code = LTTNG_ERR_NOMEM;
2584 goto end;
2585 }
2586
2587 if (msg->override_name_length) {
2588 const char *name;
2589
2590 chunk_name_view = lttng_buffer_view_from_view(payload,
2591 sizeof(*msg),
2592 msg->override_name_length);
2593 name = chunk_name_view.data;
2594 if (!name || name[msg->override_name_length - 1]) {
2595 ERR("Failed to receive payload of chunk creation command");
2596 ret = -1;
2597 reply_code = LTTNG_ERR_INVALID;
2598 goto end;
2599 }
2600
2601 chunk_status = lttng_trace_chunk_override_name(
2602 chunk, chunk_name_view.data);
2603 switch (chunk_status) {
2604 case LTTNG_TRACE_CHUNK_STATUS_OK:
2605 break;
2606 case LTTNG_TRACE_CHUNK_STATUS_INVALID_ARGUMENT:
2607 ERR("Failed to set the name of new trace chunk in trace chunk creation command (invalid name)");
2608 reply_code = LTTNG_ERR_INVALID;
2609 ret = -1;
2610 goto end;
2611 default:
2612 ERR("Failed to set the name of new trace chunk in trace chunk creation command (unknown error)");
2613 reply_code = LTTNG_ERR_UNK;
2614 ret = -1;
2615 goto end;
2616 }
2617 }
2618
2619 chunk_status = lttng_trace_chunk_set_credentials_current_user(chunk);
2620 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
2621 reply_code = LTTNG_ERR_UNK;
2622 ret = -1;
2623 goto end;
2624 }
2625
2626 session_output = session_create_output_directory_handle(
2627 conn->session);
2628 if (!session_output) {
2629 reply_code = LTTNG_ERR_CREATE_DIR_FAIL;
2630 goto end;
2631 }
2632 chunk_status = lttng_trace_chunk_set_as_owner(chunk, session_output);
2633 lttng_directory_handle_put(session_output);
2634 session_output = NULL;
2635 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
2636 reply_code = LTTNG_ERR_UNK;
2637 ret = -1;
2638 goto end;
2639 }
2640
2641 published_chunk = sessiond_trace_chunk_registry_publish_chunk(
2642 sessiond_trace_chunk_registry,
2643 conn->session->sessiond_uuid,
2644 conn->session->id,
2645 chunk);
2646 if (!published_chunk) {
2647 char uuid_str[LTTNG_UUID_STR_LEN];
2648
2649 lttng_uuid_to_str(conn->session->sessiond_uuid, uuid_str);
2650 ERR("Failed to publish chunk: sessiond_uuid = %s, session_id = %" PRIu64 ", chunk_id = %" PRIu64,
2651 uuid_str,
2652 conn->session->id,
2653 msg->chunk_id);
2654 ret = -1;
2655 reply_code = LTTNG_ERR_NOMEM;
2656 goto end;
2657 }
2658
2659 pthread_mutex_lock(&conn->session->lock);
2660 if (conn->session->pending_closure_trace_chunk) {
2661 /*
2662 * Invalid; this means a second create_trace_chunk command was
2663 * received before a close_trace_chunk.
2664 */
2665 ERR("Invalid trace chunk close command received; a trace chunk is already waiting for a trace chunk close command");
2666 reply_code = LTTNG_ERR_INVALID_PROTOCOL;
2667 ret = -1;
2668 goto end_unlock_session;
2669 }
2670 conn->session->pending_closure_trace_chunk =
2671 conn->session->current_trace_chunk;
2672 conn->session->current_trace_chunk = published_chunk;
2673 published_chunk = NULL;
2674 if (!conn->session->pending_closure_trace_chunk) {
2675 session->ongoing_rotation = false;
2676 }
2677 end_unlock_session:
2678 pthread_mutex_unlock(&conn->session->lock);
2679 end:
2680 reply.ret_code = htobe32((uint32_t) reply_code);
2681 send_ret = conn->sock->ops->sendmsg(conn->sock,
2682 &reply,
2683 sizeof(struct lttcomm_relayd_generic_reply),
2684 0);
2685 if (send_ret < (ssize_t) sizeof(reply)) {
2686 ERR("Failed to send \"create trace chunk\" command reply (ret = %zd)",
2687 send_ret);
2688 ret = -1;
2689 }
2690 end_no_reply:
2691 lttng_trace_chunk_put(chunk);
2692 lttng_trace_chunk_put(published_chunk);
2693 lttng_directory_handle_put(session_output);
2694 return ret;
2695 }
2696
2697 /*
2698 * relay_close_trace_chunk: close a trace chunk
2699 */
2700 static int relay_close_trace_chunk(const struct lttcomm_relayd_hdr *recv_hdr,
2701 struct relay_connection *conn,
2702 const struct lttng_buffer_view *payload)
2703 {
2704 int ret = 0, buf_ret;
2705 ssize_t send_ret;
2706 struct relay_session *session = conn->session;
2707 struct lttcomm_relayd_close_trace_chunk *msg;
2708 struct lttcomm_relayd_close_trace_chunk_reply reply = {};
2709 struct lttng_buffer_view header_view;
2710 struct lttng_trace_chunk *chunk = NULL;
2711 enum lttng_error_code reply_code = LTTNG_OK;
2712 enum lttng_trace_chunk_status chunk_status;
2713 uint64_t chunk_id;
2714 LTTNG_OPTIONAL(enum lttng_trace_chunk_command_type) close_command = {};
2715 time_t close_timestamp;
2716 char closed_trace_chunk_path[LTTNG_PATH_MAX];
2717 size_t path_length = 0;
2718 const char *chunk_name = NULL;
2719 struct lttng_dynamic_buffer reply_payload;
2720 const char *new_path;
2721
2722 lttng_dynamic_buffer_init(&reply_payload);
2723
2724 if (!session || !conn->version_check_done) {
2725 ERR("Trying to close a trace chunk before version check");
2726 ret = -1;
2727 goto end_no_reply;
2728 }
2729
2730 if (session->major == 2 && session->minor < 11) {
2731 ERR("Chunk close command is unsupported before 2.11");
2732 ret = -1;
2733 goto end_no_reply;
2734 }
2735
2736 header_view = lttng_buffer_view_from_view(payload, 0, sizeof(*msg));
2737 if (!header_view.data) {
2738 ERR("Failed to receive payload of chunk close command");
2739 ret = -1;
2740 goto end_no_reply;
2741 }
2742
2743 /* Convert to host endianness. */
2744 msg = (typeof(msg)) header_view.data;
2745 chunk_id = be64toh(msg->chunk_id);
2746 close_timestamp = (time_t) be64toh(msg->close_timestamp);
2747 close_command = (typeof(close_command)){
2748 .value = be32toh(msg->close_command.value),
2749 .is_set = msg->close_command.is_set,
2750 };
2751
2752 chunk = sessiond_trace_chunk_registry_get_chunk(
2753 sessiond_trace_chunk_registry,
2754 conn->session->sessiond_uuid,
2755 conn->session->id,
2756 chunk_id);
2757 if (!chunk) {
2758 char uuid_str[LTTNG_UUID_STR_LEN];
2759
2760 lttng_uuid_to_str(conn->session->sessiond_uuid, uuid_str);
2761 ERR("Failed to find chunk to close: sessiond_uuid = %s, session_id = %" PRIu64 ", chunk_id = %" PRIu64,
2762 uuid_str,
2763 conn->session->id,
2764 msg->chunk_id);
2765 ret = -1;
2766 reply_code = LTTNG_ERR_NOMEM;
2767 goto end;
2768 }
2769
2770 pthread_mutex_lock(&session->lock);
2771 if (close_command.is_set &&
2772 close_command.value == LTTNG_TRACE_CHUNK_COMMAND_TYPE_DELETE) {
2773 /*
2774 * Clear command. It is a protocol error to ask for a
2775 * clear on a relay which does not allow it. Querying
2776 * the configuration allows figuring out whether
2777 * clearing is allowed before doing the clear.
2778 */
2779 if (!opt_allow_clear) {
2780 ret = -1;
2781 reply_code = LTTNG_ERR_INVALID_PROTOCOL;
2782 goto end_unlock_session;
2783 }
2784 }
2785 if (session->pending_closure_trace_chunk &&
2786 session->pending_closure_trace_chunk != chunk) {
2787 ERR("Trace chunk close command for session \"%s\" does not target the trace chunk pending closure",
2788 session->session_name);
2789 reply_code = LTTNG_ERR_INVALID_PROTOCOL;
2790 ret = -1;
2791 goto end_unlock_session;
2792 }
2793
2794 if (session->current_trace_chunk && session->current_trace_chunk != chunk &&
2795 !lttng_trace_chunk_get_name_overridden(session->current_trace_chunk)) {
2796 if (close_command.is_set &&
2797 close_command.value == LTTNG_TRACE_CHUNK_COMMAND_TYPE_DELETE &&
2798 !session->has_rotated) {
2799 /* New chunk stays in session output directory. */
2800 new_path = "";
2801 } else {
2802 /* Use chunk name for new chunk. */
2803 new_path = NULL;
2804 }
2805 /* Rename new chunk path. */
2806 chunk_status = lttng_trace_chunk_rename_path(session->current_trace_chunk,
2807 new_path);
2808 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
2809 ret = -1;
2810 goto end;
2811 }
2812 session->ongoing_rotation = false;
2813 }
2814 if ((!close_command.is_set ||
2815 close_command.value == LTTNG_TRACE_CHUNK_COMMAND_TYPE_NO_OPERATION) &&
2816 !lttng_trace_chunk_get_name_overridden(chunk)) {
2817 const char *old_path;
2818
2819 if (!session->has_rotated) {
2820 old_path = "";
2821 } else {
2822 old_path = NULL;
2823 }
2824 /* We need to move back the .tmp_old_chunk to its rightful place. */
2825 chunk_status = lttng_trace_chunk_rename_path(chunk, old_path);
2826 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
2827 ret = -1;
2828 goto end;
2829 }
2830 }
2831 chunk_status = lttng_trace_chunk_set_close_timestamp(
2832 chunk, close_timestamp);
2833 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
2834 ERR("Failed to set trace chunk close timestamp");
2835 ret = -1;
2836 reply_code = LTTNG_ERR_UNK;
2837 goto end_unlock_session;
2838 }
2839
2840 if (close_command.is_set) {
2841 chunk_status = lttng_trace_chunk_set_close_command(
2842 chunk, close_command.value);
2843 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
2844 ret = -1;
2845 reply_code = LTTNG_ERR_INVALID;
2846 goto end_unlock_session;
2847 }
2848 }
2849 chunk_status = lttng_trace_chunk_get_name(chunk, &chunk_name, NULL);
2850 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
2851 ERR("Failed to get chunk name");
2852 ret = -1;
2853 reply_code = LTTNG_ERR_UNK;
2854 goto end_unlock_session;
2855 }
2856 if (!session->has_rotated && !session->snapshot) {
2857 ret = lttng_strncpy(closed_trace_chunk_path,
2858 session->output_path,
2859 sizeof(closed_trace_chunk_path));
2860 if (ret) {
2861 ERR("Failed to send trace chunk path: path length of %zu bytes exceeds the maximal allowed length of %zu bytes",
2862 strlen(session->output_path),
2863 sizeof(closed_trace_chunk_path));
2864 reply_code = LTTNG_ERR_NOMEM;
2865 ret = -1;
2866 goto end_unlock_session;
2867 }
2868 } else {
2869 if (session->snapshot) {
2870 ret = snprintf(closed_trace_chunk_path,
2871 sizeof(closed_trace_chunk_path),
2872 "%s/%s", session->output_path,
2873 chunk_name);
2874 } else {
2875 ret = snprintf(closed_trace_chunk_path,
2876 sizeof(closed_trace_chunk_path),
2877 "%s/" DEFAULT_ARCHIVED_TRACE_CHUNKS_DIRECTORY
2878 "/%s",
2879 session->output_path, chunk_name);
2880 }
2881 if (ret < 0 || ret == sizeof(closed_trace_chunk_path)) {
2882 ERR("Failed to format closed trace chunk resulting path");
2883 reply_code = ret < 0 ? LTTNG_ERR_UNK : LTTNG_ERR_NOMEM;
2884 ret = -1;
2885 goto end_unlock_session;
2886 }
2887 }
2888 if (close_command.is_set &&
2889 close_command.value == LTTNG_TRACE_CHUNK_COMMAND_TYPE_MOVE_TO_COMPLETED) {
2890 session->has_rotated = true;
2891 }
2892 DBG("Reply chunk path on close: %s", closed_trace_chunk_path);
2893 path_length = strlen(closed_trace_chunk_path) + 1;
2894 if (path_length > UINT32_MAX) {
2895 ERR("Closed trace chunk path exceeds the maximal length allowed by the protocol");
2896 ret = -1;
2897 reply_code = LTTNG_ERR_INVALID_PROTOCOL;
2898 goto end_unlock_session;
2899 }
2900
2901 if (session->current_trace_chunk == chunk) {
2902 /*
2903 * After a trace chunk close command, no new streams
2904 * referencing the chunk may be created. Hence, on the
2905 * event that no new trace chunk have been created for
2906 * the session, the reference to the current trace chunk
2907 * is released in order to allow it to be reclaimed when
2908 * the last stream releases its reference to it.
2909 */
2910 lttng_trace_chunk_put(session->current_trace_chunk);
2911 session->current_trace_chunk = NULL;
2912 }
2913 lttng_trace_chunk_put(session->pending_closure_trace_chunk);
2914 session->pending_closure_trace_chunk = NULL;
2915 end_unlock_session:
2916 pthread_mutex_unlock(&session->lock);
2917
2918 end:
2919 reply.generic.ret_code = htobe32((uint32_t) reply_code);
2920 reply.path_length = htobe32((uint32_t) path_length);
2921 buf_ret = lttng_dynamic_buffer_append(
2922 &reply_payload, &reply, sizeof(reply));
2923 if (buf_ret) {
2924 ERR("Failed to append \"close trace chunk\" command reply header to payload buffer");
2925 goto end_no_reply;
2926 }
2927
2928 if (reply_code == LTTNG_OK) {
2929 buf_ret = lttng_dynamic_buffer_append(&reply_payload,
2930 closed_trace_chunk_path, path_length);
2931 if (buf_ret) {
2932 ERR("Failed to append \"close trace chunk\" command reply path to payload buffer");
2933 goto end_no_reply;
2934 }
2935 }
2936
2937 send_ret = conn->sock->ops->sendmsg(conn->sock,
2938 reply_payload.data,
2939 reply_payload.size,
2940 0);
2941 if (send_ret < reply_payload.size) {
2942 ERR("Failed to send \"close trace chunk\" command reply of %zu bytes (ret = %zd)",
2943 reply_payload.size, send_ret);
2944 ret = -1;
2945 goto end_no_reply;
2946 }
2947 end_no_reply:
2948 lttng_trace_chunk_put(chunk);
2949 lttng_dynamic_buffer_reset(&reply_payload);
2950 return ret;
2951 }
2952
2953 /*
2954 * relay_trace_chunk_exists: check if a trace chunk exists
2955 */
2956 static int relay_trace_chunk_exists(const struct lttcomm_relayd_hdr *recv_hdr,
2957 struct relay_connection *conn,
2958 const struct lttng_buffer_view *payload)
2959 {
2960 int ret = 0;
2961 ssize_t send_ret;
2962 struct relay_session *session = conn->session;
2963 struct lttcomm_relayd_trace_chunk_exists *msg;
2964 struct lttcomm_relayd_trace_chunk_exists_reply reply = {};
2965 struct lttng_buffer_view header_view;
2966 uint64_t chunk_id;
2967 bool chunk_exists;
2968
2969 if (!session || !conn->version_check_done) {
2970 ERR("Trying to close a trace chunk before version check");
2971 ret = -1;
2972 goto end_no_reply;
2973 }
2974
2975 if (session->major == 2 && session->minor < 11) {
2976 ERR("Chunk close command is unsupported before 2.11");
2977 ret = -1;
2978 goto end_no_reply;
2979 }
2980
2981 header_view = lttng_buffer_view_from_view(payload, 0, sizeof(*msg));
2982 if (!header_view.data) {
2983 ERR("Failed to receive payload of chunk close command");
2984 ret = -1;
2985 goto end_no_reply;
2986 }
2987
2988 /* Convert to host endianness. */
2989 msg = (typeof(msg)) header_view.data;
2990 chunk_id = be64toh(msg->chunk_id);
2991
2992 ret = sessiond_trace_chunk_registry_chunk_exists(
2993 sessiond_trace_chunk_registry,
2994 conn->session->sessiond_uuid,
2995 conn->session->id,
2996 chunk_id, &chunk_exists);
2997 /*
2998 * If ret is not 0, send the reply and report the error to the caller.
2999 * It is a protocol (or internal) error and the session/connection
3000 * should be torn down.
3001 */
3002 reply = (typeof(reply)){
3003 .generic.ret_code = htobe32((uint32_t)
3004 (ret == 0 ? LTTNG_OK : LTTNG_ERR_INVALID_PROTOCOL)),
3005 .trace_chunk_exists = ret == 0 ? chunk_exists : 0,
3006 };
3007 send_ret = conn->sock->ops->sendmsg(
3008 conn->sock, &reply, sizeof(reply), 0);
3009 if (send_ret < (ssize_t) sizeof(reply)) {
3010 ERR("Failed to send \"create trace chunk\" command reply (ret = %zd)",
3011 send_ret);
3012 ret = -1;
3013 }
3014 end_no_reply:
3015 return ret;
3016 }
3017
3018 /*
3019 * relay_get_configuration: query whether feature is available
3020 */
3021 static int relay_get_configuration(const struct lttcomm_relayd_hdr *recv_hdr,
3022 struct relay_connection *conn,
3023 const struct lttng_buffer_view *payload)
3024 {
3025 int ret = 0;
3026 ssize_t send_ret;
3027 struct lttcomm_relayd_get_configuration *msg;
3028 struct lttcomm_relayd_get_configuration_reply reply = {};
3029 struct lttng_buffer_view header_view;
3030 uint64_t query_flags = 0;
3031 uint64_t result_flags = 0;
3032
3033 header_view = lttng_buffer_view_from_view(payload, 0, sizeof(*msg));
3034 if (!header_view.data) {
3035 ERR("Failed to receive payload of chunk close command");
3036 ret = -1;
3037 goto end_no_reply;
3038 }
3039
3040 /* Convert to host endianness. */
3041 msg = (typeof(msg)) header_view.data;
3042 query_flags = be64toh(msg->query_flags);
3043
3044 if (query_flags) {
3045 ret = LTTNG_ERR_INVALID_PROTOCOL;
3046 goto reply;
3047 }
3048 if (opt_allow_clear) {
3049 result_flags |= LTTCOMM_RELAYD_CONFIGURATION_FLAG_CLEAR_ALLOWED;
3050 }
3051 ret = 0;
3052 reply:
3053 reply = (typeof(reply)){
3054 .generic.ret_code = htobe32((uint32_t)
3055 (ret == 0 ? LTTNG_OK : LTTNG_ERR_INVALID_PROTOCOL)),
3056 .relayd_configuration_flags = htobe64(result_flags),
3057 };
3058 send_ret = conn->sock->ops->sendmsg(
3059 conn->sock, &reply, sizeof(reply), 0);
3060 if (send_ret < (ssize_t) sizeof(reply)) {
3061 ERR("Failed to send \"get configuration\" command reply (ret = %zd)",
3062 send_ret);
3063 ret = -1;
3064 }
3065 end_no_reply:
3066 return ret;
3067 }
3068
3069 #define DBG_CMD(cmd_name, conn) \
3070 DBG3("Processing \"%s\" command for socket %i", cmd_name, conn->sock->fd);
3071
3072 static int relay_process_control_command(struct relay_connection *conn,
3073 const struct lttcomm_relayd_hdr *header,
3074 const struct lttng_buffer_view *payload)
3075 {
3076 int ret = 0;
3077
3078 switch (header->cmd) {
3079 case RELAYD_CREATE_SESSION:
3080 DBG_CMD("RELAYD_CREATE_SESSION", conn);
3081 ret = relay_create_session(header, conn, payload);
3082 break;
3083 case RELAYD_ADD_STREAM:
3084 DBG_CMD("RELAYD_ADD_STREAM", conn);
3085 ret = relay_add_stream(header, conn, payload);
3086 break;
3087 case RELAYD_START_DATA:
3088 DBG_CMD("RELAYD_START_DATA", conn);
3089 ret = relay_start(header, conn, payload);
3090 break;
3091 case RELAYD_SEND_METADATA:
3092 DBG_CMD("RELAYD_SEND_METADATA", conn);
3093 ret = relay_recv_metadata(header, conn, payload);
3094 break;
3095 case RELAYD_VERSION:
3096 DBG_CMD("RELAYD_VERSION", conn);
3097 ret = relay_send_version(header, conn, payload);
3098 break;
3099 case RELAYD_CLOSE_STREAM:
3100 DBG_CMD("RELAYD_CLOSE_STREAM", conn);
3101 ret = relay_close_stream(header, conn, payload);
3102 break;
3103 case RELAYD_DATA_PENDING:
3104 DBG_CMD("RELAYD_DATA_PENDING", conn);
3105 ret = relay_data_pending(header, conn, payload);
3106 break;
3107 case RELAYD_QUIESCENT_CONTROL:
3108 DBG_CMD("RELAYD_QUIESCENT_CONTROL", conn);
3109 ret = relay_quiescent_control(header, conn, payload);
3110 break;
3111 case RELAYD_BEGIN_DATA_PENDING:
3112 DBG_CMD("RELAYD_BEGIN_DATA_PENDING", conn);
3113 ret = relay_begin_data_pending(header, conn, payload);
3114 break;
3115 case RELAYD_END_DATA_PENDING:
3116 DBG_CMD("RELAYD_END_DATA_PENDING", conn);
3117 ret = relay_end_data_pending(header, conn, payload);
3118 break;
3119 case RELAYD_SEND_INDEX:
3120 DBG_CMD("RELAYD_SEND_INDEX", conn);
3121 ret = relay_recv_index(header, conn, payload);
3122 break;
3123 case RELAYD_STREAMS_SENT:
3124 DBG_CMD("RELAYD_STREAMS_SENT", conn);
3125 ret = relay_streams_sent(header, conn, payload);
3126 break;
3127 case RELAYD_RESET_METADATA:
3128 DBG_CMD("RELAYD_RESET_METADATA", conn);
3129 ret = relay_reset_metadata(header, conn, payload);
3130 break;
3131 case RELAYD_ROTATE_STREAMS:
3132 DBG_CMD("RELAYD_ROTATE_STREAMS", conn);
3133 ret = relay_rotate_session_streams(header, conn, payload);
3134 break;
3135 case RELAYD_CREATE_TRACE_CHUNK:
3136 DBG_CMD("RELAYD_CREATE_TRACE_CHUNK", conn);
3137 ret = relay_create_trace_chunk(header, conn, payload);
3138 break;
3139 case RELAYD_CLOSE_TRACE_CHUNK:
3140 DBG_CMD("RELAYD_CLOSE_TRACE_CHUNK", conn);
3141 ret = relay_close_trace_chunk(header, conn, payload);
3142 break;
3143 case RELAYD_TRACE_CHUNK_EXISTS:
3144 DBG_CMD("RELAYD_TRACE_CHUNK_EXISTS", conn);
3145 ret = relay_trace_chunk_exists(header, conn, payload);
3146 break;
3147 case RELAYD_GET_CONFIGURATION:
3148 DBG_CMD("RELAYD_GET_CONFIGURATION", conn);
3149 ret = relay_get_configuration(header, conn, payload);
3150 break;
3151 case RELAYD_UPDATE_SYNC_INFO:
3152 default:
3153 ERR("Received unknown command (%u)", header->cmd);
3154 relay_unknown_command(conn);
3155 ret = -1;
3156 goto end;
3157 }
3158
3159 end:
3160 return ret;
3161 }
3162
3163 static enum relay_connection_status relay_process_control_receive_payload(
3164 struct relay_connection *conn)
3165 {
3166 int ret = 0;
3167 enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK;
3168 struct lttng_dynamic_buffer *reception_buffer =
3169 &conn->protocol.ctrl.reception_buffer;
3170 struct ctrl_connection_state_receive_payload *state =
3171 &conn->protocol.ctrl.state.receive_payload;
3172 struct lttng_buffer_view payload_view;
3173
3174 if (state->left_to_receive == 0) {
3175 /* Short-circuit for payload-less commands. */
3176 goto reception_complete;
3177 }
3178
3179 ret = conn->sock->ops->recvmsg(conn->sock,
3180 reception_buffer->data + state->received,
3181 state->left_to_receive, MSG_DONTWAIT);
3182 if (ret < 0) {
3183 if (errno != EAGAIN && errno != EWOULDBLOCK) {
3184 PERROR("Unable to receive command payload on sock %d",
3185 conn->sock->fd);
3186 status = RELAY_CONNECTION_STATUS_ERROR;
3187 }
3188 goto end;
3189 } else if (ret == 0) {
3190 DBG("Socket %d performed an orderly shutdown (received EOF)", conn->sock->fd);
3191 status = RELAY_CONNECTION_STATUS_CLOSED;
3192 goto end;
3193 }
3194
3195 assert(ret > 0);
3196 assert(ret <= state->left_to_receive);
3197
3198 state->left_to_receive -= ret;
3199 state->received += ret;
3200
3201 if (state->left_to_receive > 0) {
3202 /*
3203 * Can't transition to the protocol's next state, wait to
3204 * receive the rest of the header.
3205 */
3206 DBG3("Partial reception of control connection protocol payload (received %" PRIu64 " bytes, %" PRIu64 " bytes left to receive, fd = %i)",
3207 state->received, state->left_to_receive,
3208 conn->sock->fd);
3209 goto end;
3210 }
3211
3212 reception_complete:
3213 DBG("Done receiving control command payload: fd = %i, payload size = %" PRIu64 " bytes",
3214 conn->sock->fd, state->received);
3215 /*
3216 * The payload required to process the command has been received.
3217 * A view to the reception buffer is forwarded to the various
3218 * commands and the state of the control is reset on success.
3219 *
3220 * Commands are responsible for sending their reply to the peer.
3221 */
3222 payload_view = lttng_buffer_view_from_dynamic_buffer(reception_buffer,
3223 0, -1);
3224 ret = relay_process_control_command(conn,
3225 &state->header, &payload_view);
3226 if (ret < 0) {
3227 status = RELAY_CONNECTION_STATUS_ERROR;
3228 goto end;
3229 }
3230
3231 ret = connection_reset_protocol_state(conn);
3232 if (ret) {
3233 status = RELAY_CONNECTION_STATUS_ERROR;
3234 }
3235 end:
3236 return status;
3237 }
3238
3239 static enum relay_connection_status relay_process_control_receive_header(
3240 struct relay_connection *conn)
3241 {
3242 int ret = 0;
3243 enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK;
3244 struct lttcomm_relayd_hdr header;
3245 struct lttng_dynamic_buffer *reception_buffer =
3246 &conn->protocol.ctrl.reception_buffer;
3247 struct ctrl_connection_state_receive_header *state =
3248 &conn->protocol.ctrl.state.receive_header;
3249
3250 assert(state->left_to_receive != 0);
3251
3252 ret = conn->sock->ops->recvmsg(conn->sock,
3253 reception_buffer->data + state->received,
3254 state->left_to_receive, MSG_DONTWAIT);
3255 if (ret < 0) {
3256 if (errno != EAGAIN && errno != EWOULDBLOCK) {
3257 PERROR("Unable to receive control command header on sock %d",
3258 conn->sock->fd);
3259 status = RELAY_CONNECTION_STATUS_ERROR;
3260 }
3261 goto end;
3262 } else if (ret == 0) {
3263 DBG("Socket %d performed an orderly shutdown (received EOF)", conn->sock->fd);
3264 status = RELAY_CONNECTION_STATUS_CLOSED;
3265 goto end;
3266 }
3267
3268 assert(ret > 0);
3269 assert(ret <= state->left_to_receive);
3270
3271 state->left_to_receive -= ret;
3272 state->received += ret;
3273
3274 if (state->left_to_receive > 0) {
3275 /*
3276 * Can't transition to the protocol's next state, wait to
3277 * receive the rest of the header.
3278 */
3279 DBG3("Partial reception of control connection protocol header (received %" PRIu64 " bytes, %" PRIu64 " bytes left to receive, fd = %i)",
3280 state->received, state->left_to_receive,
3281 conn->sock->fd);
3282 goto end;
3283 }
3284
3285 /* Transition to next state: receiving the command's payload. */
3286 conn->protocol.ctrl.state_id =
3287 CTRL_CONNECTION_STATE_RECEIVE_PAYLOAD;
3288 memcpy(&header, reception_buffer->data, sizeof(header));
3289 header.circuit_id = be64toh(header.circuit_id);
3290 header.data_size = be64toh(header.data_size);
3291 header.cmd = be32toh(header.cmd);
3292 header.cmd_version = be32toh(header.cmd_version);
3293 memcpy(&conn->protocol.ctrl.state.receive_payload.header,
3294 &header, sizeof(header));
3295
3296 DBG("Done receiving control command header: fd = %i, cmd = %" PRIu32 ", cmd_version = %" PRIu32 ", payload size = %" PRIu64 " bytes",
3297 conn->sock->fd, header.cmd, header.cmd_version,
3298 header.data_size);
3299
3300 if (header.data_size > DEFAULT_NETWORK_RELAYD_CTRL_MAX_PAYLOAD_SIZE) {
3301 ERR("Command header indicates a payload (%" PRIu64 " bytes) that exceeds the maximal payload size allowed on a control connection.",
3302 header.data_size);
3303 status = RELAY_CONNECTION_STATUS_ERROR;
3304 goto end;
3305 }
3306
3307 conn->protocol.ctrl.state.receive_payload.left_to_receive =
3308 header.data_size;
3309 conn->protocol.ctrl.state.receive_payload.received = 0;
3310 ret = lttng_dynamic_buffer_set_size(reception_buffer,
3311 header.data_size);
3312 if (ret) {
3313 status = RELAY_CONNECTION_STATUS_ERROR;
3314 goto end;
3315 }
3316
3317 if (header.data_size == 0) {
3318 /*
3319 * Manually invoke the next state as the poll loop
3320 * will not wake-up to allow us to proceed further.
3321 */
3322 status = relay_process_control_receive_payload(conn);
3323 }
3324 end:
3325 return status;
3326 }
3327
3328 /*
3329 * Process the commands received on the control socket
3330 */
3331 static enum relay_connection_status relay_process_control(
3332 struct relay_connection *conn)
3333 {
3334 enum relay_connection_status status;
3335
3336 switch (conn->protocol.ctrl.state_id) {
3337 case CTRL_CONNECTION_STATE_RECEIVE_HEADER:
3338 status = relay_process_control_receive_header(conn);
3339 break;
3340 case CTRL_CONNECTION_STATE_RECEIVE_PAYLOAD:
3341 status = relay_process_control_receive_payload(conn);
3342 break;
3343 default:
3344 ERR("Unknown control connection protocol state encountered.");
3345 abort();
3346 }
3347
3348 return status;
3349 }
3350
3351 static enum relay_connection_status relay_process_data_receive_header(
3352 struct relay_connection *conn)
3353 {
3354 int ret;
3355 enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK;
3356 struct data_connection_state_receive_header *state =
3357 &conn->protocol.data.state.receive_header;
3358 struct lttcomm_relayd_data_hdr header;
3359 struct relay_stream *stream;
3360
3361 assert(state->left_to_receive != 0);
3362
3363 ret = conn->sock->ops->recvmsg(conn->sock,
3364 state->header_reception_buffer + state->received,
3365 state->left_to_receive, MSG_DONTWAIT);
3366 if (ret < 0) {
3367 if (errno != EAGAIN && errno != EWOULDBLOCK) {
3368 PERROR("Unable to receive data header on sock %d", conn->sock->fd);
3369 status = RELAY_CONNECTION_STATUS_ERROR;
3370 }
3371 goto end;
3372 } else if (ret == 0) {
3373 /* Orderly shutdown. Not necessary to print an error. */
3374 DBG("Socket %d performed an orderly shutdown (received EOF)", conn->sock->fd);
3375 status = RELAY_CONNECTION_STATUS_CLOSED;
3376 goto end;
3377 }
3378
3379 assert(ret > 0);
3380 assert(ret <= state->left_to_receive);
3381
3382 state->left_to_receive -= ret;
3383 state->received += ret;
3384
3385 if (state->left_to_receive > 0) {
3386 /*
3387 * Can't transition to the protocol's next state, wait to
3388 * receive the rest of the header.
3389 */
3390 DBG3("Partial reception of data connection header (received %" PRIu64 " bytes, %" PRIu64 " bytes left to receive, fd = %i)",
3391 state->received, state->left_to_receive,
3392 conn->sock->fd);
3393 goto end;
3394 }
3395
3396 /* Transition to next state: receiving the payload. */
3397 conn->protocol.data.state_id = DATA_CONNECTION_STATE_RECEIVE_PAYLOAD;
3398
3399 memcpy(&header, state->header_reception_buffer, sizeof(header));
3400 header.circuit_id = be64toh(header.circuit_id);
3401 header.stream_id = be64toh(header.stream_id);
3402 header.data_size = be32toh(header.data_size);
3403 header.net_seq_num = be64toh(header.net_seq_num);
3404 header.padding_size = be32toh(header.padding_size);
3405 memcpy(&conn->protocol.data.state.receive_payload.header, &header, sizeof(header));
3406
3407 conn->protocol.data.state.receive_payload.left_to_receive =
3408 header.data_size;
3409 conn->protocol.data.state.receive_payload.received = 0;
3410 conn->protocol.data.state.receive_payload.rotate_index = false;
3411
3412 DBG("Received data connection header on fd %i: circuit_id = %" PRIu64 ", stream_id = %" PRIu64 ", data_size = %" PRIu32 ", net_seq_num = %" PRIu64 ", padding_size = %" PRIu32,
3413 conn->sock->fd, header.circuit_id,
3414 header.stream_id, header.data_size,
3415 header.net_seq_num, header.padding_size);
3416
3417 stream = stream_get_by_id(header.stream_id);
3418 if (!stream) {
3419 DBG("relay_process_data_receive_payload: Cannot find stream %" PRIu64,
3420 header.stream_id);
3421 /* Protocol error. */
3422 status = RELAY_CONNECTION_STATUS_ERROR;
3423 goto end;
3424 }
3425
3426 pthread_mutex_lock(&stream->lock);
3427 /* Prepare stream for the reception of a new packet. */
3428 ret = stream_init_packet(stream, header.data_size,
3429 &conn->protocol.data.state.receive_payload.rotate_index);
3430 pthread_mutex_unlock(&stream->lock);
3431 if (ret) {
3432 ERR("Failed to rotate stream output file");
3433 status = RELAY_CONNECTION_STATUS_ERROR;
3434 goto end_stream_unlock;
3435 }
3436
3437 end_stream_unlock:
3438 stream_put(stream);
3439 end:
3440 return status;
3441 }
3442
3443 static enum relay_connection_status relay_process_data_receive_payload(
3444 struct relay_connection *conn)
3445 {
3446 int ret;
3447 enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK;
3448 struct relay_stream *stream;
3449 struct data_connection_state_receive_payload *state =
3450 &conn->protocol.data.state.receive_payload;
3451 const size_t chunk_size = RECV_DATA_BUFFER_SIZE;
3452 char data_buffer[chunk_size];
3453 bool partial_recv = false;
3454 bool new_stream = false, close_requested = false, index_flushed = false;
3455 uint64_t left_to_receive = state->left_to_receive;
3456 struct relay_session *session;
3457
3458 DBG3("Receiving data for stream id %" PRIu64 " seqnum %" PRIu64 ", %" PRIu64" bytes received, %" PRIu64 " bytes left to receive",
3459 state->header.stream_id, state->header.net_seq_num,
3460 state->received, left_to_receive);
3461
3462 stream = stream_get_by_id(state->header.stream_id);
3463 if (!stream) {
3464 /* Protocol error. */
3465 ERR("relay_process_data_receive_payload: cannot find stream %" PRIu64,
3466 state->header.stream_id);
3467 status = RELAY_CONNECTION_STATUS_ERROR;
3468 goto end;
3469 }
3470
3471 pthread_mutex_lock(&stream->lock);
3472 session = stream->trace->session;
3473 if (!conn->session) {
3474 ret = connection_set_session(conn, session);
3475 if (ret) {
3476 status = RELAY_CONNECTION_STATUS_ERROR;
3477 goto end_stream_unlock;
3478 }
3479 }
3480
3481 /*
3482 * The size of the "chunk" received on any iteration is bounded by:
3483 * - the data left to receive,
3484 * - the data immediately available on the socket,
3485 * - the on-stack data buffer
3486 */
3487 while (left_to_receive > 0 && !partial_recv) {
3488 size_t recv_size = min(left_to_receive, chunk_size);
3489 struct lttng_buffer_view packet_chunk;
3490
3491 ret = conn->sock->ops->recvmsg(conn->sock, data_buffer,
3492 recv_size, MSG_DONTWAIT);
3493 if (ret < 0) {
3494 if (errno != EAGAIN && errno != EWOULDBLOCK) {
3495 PERROR("Socket %d error", conn->sock->fd);
3496 status = RELAY_CONNECTION_STATUS_ERROR;
3497 }
3498 goto end_stream_unlock;
3499 } else if (ret == 0) {
3500 /* No more data ready to be consumed on socket. */
3501 DBG3("No more data ready for consumption on data socket of stream id %" PRIu64,
3502 state->header.stream_id);
3503 status = RELAY_CONNECTION_STATUS_CLOSED;
3504 break;
3505 } else if (ret < (int) recv_size) {
3506 /*
3507 * All the data available on the socket has been
3508 * consumed.
3509 */
3510 partial_recv = true;
3511 recv_size = ret;
3512 }
3513
3514 packet_chunk = lttng_buffer_view_init(data_buffer,
3515 0, recv_size);
3516 assert(packet_chunk.data);
3517
3518 ret = stream_write(stream, &packet_chunk, 0);
3519 if (ret) {
3520 ERR("Relay error writing data to file");
3521 status = RELAY_CONNECTION_STATUS_ERROR;
3522 goto end_stream_unlock;
3523 }
3524
3525 left_to_receive -= recv_size;
3526 state->received += recv_size;
3527 state->left_to_receive = left_to_receive;
3528 }
3529
3530 if (state->left_to_receive > 0) {
3531 /*
3532 * Did not receive all the data expected, wait for more data to
3533 * become available on the socket.
3534 */
3535 DBG3("Partial receive on data connection of stream id %" PRIu64 ", %" PRIu64 " bytes received, %" PRIu64 " bytes left to receive",
3536 state->header.stream_id, state->received,
3537 state->left_to_receive);
3538 goto end_stream_unlock;
3539 }
3540
3541 ret = stream_write(stream, NULL, state->header.padding_size);
3542 if (ret) {
3543 status = RELAY_CONNECTION_STATUS_ERROR;
3544 goto end_stream_unlock;
3545 }
3546
3547 if (session_streams_have_index(session)) {
3548 ret = stream_update_index(stream, state->header.net_seq_num,
3549 state->rotate_index, &index_flushed,
3550 state->header.data_size + state->header.padding_size);
3551 if (ret < 0) {
3552 ERR("Failed to update index: stream %" PRIu64 " net_seq_num %" PRIu64 " ret %d",
3553 stream->stream_handle,
3554 state->header.net_seq_num, ret);
3555 status = RELAY_CONNECTION_STATUS_ERROR;
3556 goto end_stream_unlock;
3557 }
3558 }
3559
3560 if (stream->prev_data_seq == -1ULL) {
3561 new_stream = true;
3562 }
3563
3564 ret = stream_complete_packet(stream, state->header.data_size +
3565 state->header.padding_size, state->header.net_seq_num,
3566 index_flushed);
3567 if (ret) {
3568 status = RELAY_CONNECTION_STATUS_ERROR;
3569 goto end_stream_unlock;
3570 }
3571
3572 /*
3573 * Resetting the protocol state (to RECEIVE_HEADER) will trash the
3574 * contents of *state which are aliased (union) to the same location as
3575 * the new state. Don't use it beyond this point.
3576 */
3577 connection_reset_protocol_state(conn);
3578 state = NULL;
3579
3580 end_stream_unlock:
3581 close_requested = stream->close_requested;
3582 pthread_mutex_unlock(&stream->lock);
3583 if (close_requested && left_to_receive == 0) {
3584 try_stream_close(stream);
3585 }
3586
3587 if (new_stream) {
3588 pthread_mutex_lock(&session->lock);
3589 uatomic_set(&session->new_streams, 1);
3590 pthread_mutex_unlock(&session->lock);
3591 }
3592
3593 stream_put(stream);
3594 end:
3595 return status;
3596 }
3597
3598 /*
3599 * relay_process_data: Process the data received on the data socket
3600 */
3601 static enum relay_connection_status relay_process_data(
3602 struct relay_connection *conn)
3603 {
3604 enum relay_connection_status status;
3605
3606 switch (conn->protocol.data.state_id) {
3607 case DATA_CONNECTION_STATE_RECEIVE_HEADER:
3608 status = relay_process_data_receive_header(conn);
3609 break;
3610 case DATA_CONNECTION_STATE_RECEIVE_PAYLOAD:
3611 status = relay_process_data_receive_payload(conn);
3612 break;
3613 default:
3614 ERR("Unexpected data connection communication state.");
3615 abort();
3616 }
3617
3618 return status;
3619 }
3620
3621 static void cleanup_connection_pollfd(struct lttng_poll_event *events, int pollfd)
3622 {
3623 int ret;
3624
3625 (void) lttng_poll_del(events, pollfd);
3626
3627 ret = close(pollfd);
3628 if (ret < 0) {
3629 ERR("Closing pollfd %d", pollfd);
3630 }
3631 }
3632
3633 static void relay_thread_close_connection(struct lttng_poll_event *events,
3634 int pollfd, struct relay_connection *conn)
3635 {
3636 const char *type_str;
3637
3638 switch (conn->type) {
3639 case RELAY_DATA:
3640 type_str = "Data";
3641 break;
3642 case RELAY_CONTROL:
3643 type_str = "Control";
3644 break;
3645 case RELAY_VIEWER_COMMAND:
3646 type_str = "Viewer Command";
3647 break;
3648 case RELAY_VIEWER_NOTIFICATION:
3649 type_str = "Viewer Notification";
3650 break;
3651 default:
3652 type_str = "Unknown";
3653 }
3654 cleanup_connection_pollfd(events, pollfd);
3655 connection_put(conn);
3656 DBG("%s connection closed with %d", type_str, pollfd);
3657 }
3658
3659 /*
3660 * This thread does the actual work
3661 */
3662 static void *relay_thread_worker(void *data)
3663 {
3664 int ret, err = -1, last_seen_data_fd = -1;
3665 uint32_t nb_fd;
3666 struct lttng_poll_event events;
3667 struct lttng_ht *relay_connections_ht;
3668 struct lttng_ht_iter iter;
3669 struct relay_connection *destroy_conn = NULL;
3670
3671 DBG("[thread] Relay worker started");
3672
3673 rcu_register_thread();
3674
3675 health_register(health_relayd, HEALTH_RELAYD_TYPE_WORKER);
3676
3677 if (testpoint(relayd_thread_worker)) {
3678 goto error_testpoint;
3679 }
3680
3681 health_code_update();
3682
3683 /* table of connections indexed on socket */
3684 relay_connections_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3685 if (!relay_connections_ht) {
3686 goto relay_connections_ht_error;
3687 }
3688
3689 ret = create_named_thread_poll_set(&events, 2, "Worker thread epoll");
3690 if (ret < 0) {
3691 goto error_poll_create;
3692 }
3693
3694 ret = lttng_poll_add(&events, relay_conn_pipe[0], LPOLLIN | LPOLLRDHUP);
3695 if (ret < 0) {
3696 goto error;
3697 }
3698
3699 restart:
3700 while (1) {
3701 int idx = -1, i, seen_control = 0, last_notdel_data_fd = -1;
3702
3703 health_code_update();
3704
3705 /* Infinite blocking call, waiting for transmission */
3706 DBG3("Relayd worker thread polling...");
3707 health_poll_entry();
3708 ret = lttng_poll_wait(&events, -1);
3709 health_poll_exit();
3710 if (ret < 0) {
3711 /*
3712 * Restart interrupted system call.
3713 */
3714 if (errno == EINTR) {
3715 goto restart;
3716 }
3717 goto error;
3718 }
3719
3720 nb_fd = ret;
3721
3722 /*
3723 * Process control. The control connection is
3724 * prioritized so we don't starve it with high
3725 * throughput tracing data on the data connection.
3726 */
3727 for (i = 0; i < nb_fd; i++) {
3728 /* Fetch once the poll data */
3729 uint32_t revents = LTTNG_POLL_GETEV(&events, i);
3730 int pollfd = LTTNG_POLL_GETFD(&events, i);
3731
3732 health_code_update();
3733
3734 /* Thread quit pipe has been closed. Killing thread. */
3735 ret = check_thread_quit_pipe(pollfd, revents);
3736 if (ret) {
3737 err = 0;
3738 goto exit;
3739 }
3740
3741 /* Inspect the relay conn pipe for new connection */
3742 if (pollfd == relay_conn_pipe[0]) {
3743 if (revents & LPOLLIN) {
3744 struct relay_connection *conn;
3745
3746 ret = lttng_read(relay_conn_pipe[0], &conn, sizeof(conn));
3747 if (ret < 0) {
3748 goto error;
3749 }
3750 ret = lttng_poll_add(&events,
3751 conn->sock->fd,
3752 LPOLLIN | LPOLLRDHUP);
3753 if (ret) {
3754 ERR("Failed to add new connection file descriptor to poll set");
3755 goto error;
3756 }
3757 connection_ht_add(relay_connections_ht, conn);
3758 DBG("Connection socket %d added", conn->sock->fd);
3759 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3760 ERR("Relay connection pipe error");
3761 goto error;
3762 } else {
3763 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
3764 goto error;
3765 }
3766 } else {
3767 struct relay_connection *ctrl_conn;
3768
3769 ctrl_conn = connection_get_by_sock(relay_connections_ht, pollfd);
3770 /* If not found, there is a synchronization issue. */
3771 assert(ctrl_conn);
3772
3773 if (ctrl_conn->type == RELAY_DATA) {
3774 if (revents & LPOLLIN) {
3775 /*
3776 * Flag the last seen data fd not deleted. It will be
3777 * used as the last seen fd if any fd gets deleted in
3778 * this first loop.
3779 */
3780 last_notdel_data_fd = pollfd;
3781 }
3782 goto put_ctrl_connection;
3783 }
3784 assert(ctrl_conn->type == RELAY_CONTROL);
3785
3786 if (revents & LPOLLIN) {
3787 enum relay_connection_status status;
3788
3789 status = relay_process_control(ctrl_conn);
3790 if (status != RELAY_CONNECTION_STATUS_OK) {
3791 /*
3792 * On socket error flag the session as aborted to force
3793 * the cleanup of its stream otherwise it can leak
3794 * during the lifetime of the relayd.
3795 *
3796 * This prevents situations in which streams can be
3797 * left opened because an index was received, the
3798 * control connection is closed, and the data
3799 * connection is closed (uncleanly) before the packet's
3800 * data provided.
3801 *
3802 * Since the control connection encountered an error,
3803 * it is okay to be conservative and close the
3804 * session right now as we can't rely on the protocol
3805 * being respected anymore.
3806 */
3807 if (status == RELAY_CONNECTION_STATUS_ERROR) {
3808 session_abort(ctrl_conn->session);
3809 }
3810
3811 /* Clear the connection on error or close. */
3812 relay_thread_close_connection(&events,
3813 pollfd,
3814 ctrl_conn);
3815 }
3816 seen_control = 1;
3817 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3818 relay_thread_close_connection(&events,
3819 pollfd, ctrl_conn);
3820 if (last_seen_data_fd == pollfd) {
3821 last_seen_data_fd = last_notdel_data_fd;
3822 }
3823 } else {
3824 ERR("Unexpected poll events %u for control sock %d",
3825 revents, pollfd);
3826 connection_put(ctrl_conn);
3827 goto error;
3828 }
3829 put_ctrl_connection:
3830 connection_put(ctrl_conn);
3831 }
3832 }
3833
3834 /*
3835 * The last loop handled a control request, go back to poll to make
3836 * sure we prioritise the control socket.
3837 */
3838 if (seen_control) {
3839 continue;
3840 }
3841
3842 if (last_seen_data_fd >= 0) {
3843 for (i = 0; i < nb_fd; i++) {
3844 int pollfd = LTTNG_POLL_GETFD(&events, i);
3845
3846 health_code_update();
3847
3848 if (last_seen_data_fd == pollfd) {
3849 idx = i;
3850 break;
3851 }
3852 }
3853 }
3854
3855 /* Process data connection. */
3856 for (i = idx + 1; i < nb_fd; i++) {
3857 /* Fetch the poll data. */
3858 uint32_t revents = LTTNG_POLL_GETEV(&events, i);
3859 int pollfd = LTTNG_POLL_GETFD(&events, i);
3860 struct relay_connection *data_conn;
3861
3862 health_code_update();
3863
3864 if (!revents) {
3865 /* No activity for this FD (poll implementation). */
3866 continue;
3867 }
3868
3869 /* Skip the command pipe. It's handled in the first loop. */
3870 if (pollfd == relay_conn_pipe[0]) {
3871 continue;
3872 }
3873
3874 data_conn = connection_get_by_sock(relay_connections_ht, pollfd);
3875 if (!data_conn) {
3876 /* Skip it. Might be removed before. */
3877 continue;
3878 }
3879 if (data_conn->type == RELAY_CONTROL) {
3880 goto put_data_connection;
3881 }
3882 assert(data_conn->type == RELAY_DATA);
3883
3884 if (revents & LPOLLIN) {
3885 enum relay_connection_status status;
3886
3887 status = relay_process_data(data_conn);
3888 /* Connection closed or error. */
3889 if (status != RELAY_CONNECTION_STATUS_OK) {
3890 /*
3891 * On socket error flag the session as aborted to force
3892 * the cleanup of its stream otherwise it can leak
3893 * during the lifetime of the relayd.
3894 *
3895 * This prevents situations in which streams can be
3896 * left opened because an index was received, the
3897 * control connection is closed, and the data
3898 * connection is closed (uncleanly) before the packet's
3899 * data provided.
3900 *
3901 * Since the data connection encountered an error,
3902 * it is okay to be conservative and close the
3903 * session right now as we can't rely on the protocol
3904 * being respected anymore.
3905 */
3906 if (status == RELAY_CONNECTION_STATUS_ERROR) {
3907 session_abort(data_conn->session);
3908 }
3909 relay_thread_close_connection(&events, pollfd,
3910 data_conn);
3911 /*
3912 * Every goto restart call sets the last seen fd where
3913 * here we don't really care since we gracefully
3914 * continue the loop after the connection is deleted.
3915 */
3916 } else {
3917 /* Keep last seen port. */
3918 last_seen_data_fd = pollfd;
3919 connection_put(data_conn);
3920 goto restart;
3921 }
3922 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3923 relay_thread_close_connection(&events, pollfd,
3924 data_conn);
3925 } else {
3926 ERR("Unknown poll events %u for data sock %d",
3927 revents, pollfd);
3928 }
3929 put_data_connection:
3930 connection_put(data_conn);
3931 }
3932 last_seen_data_fd = -1;
3933 }
3934
3935 /* Normal exit, no error */
3936 ret = 0;
3937
3938 exit:
3939 error:
3940 /* Cleanup remaining connection object. */
3941 rcu_read_lock();
3942 cds_lfht_for_each_entry(relay_connections_ht->ht, &iter.iter,
3943 destroy_conn,
3944 sock_n.node) {
3945 health_code_update();
3946
3947 session_abort(destroy_conn->session);
3948
3949 /*
3950 * No need to grab another ref, because we own
3951 * destroy_conn.
3952 */
3953 relay_thread_close_connection(&events, destroy_conn->sock->fd,
3954 destroy_conn);
3955 }
3956 rcu_read_unlock();
3957
3958 (void) fd_tracker_util_poll_clean(the_fd_tracker, &events);
3959 error_poll_create:
3960 lttng_ht_destroy(relay_connections_ht);
3961 relay_connections_ht_error:
3962 /* Close relay conn pipes */
3963 (void) fd_tracker_util_pipe_close(the_fd_tracker,
3964 relay_conn_pipe);
3965 if (err) {
3966 DBG("Thread exited with error");
3967 }
3968 DBG("Worker thread cleanup complete");
3969 error_testpoint:
3970 if (err) {
3971 health_error();
3972 ERR("Health error occurred in %s", __func__);
3973 }
3974 health_unregister(health_relayd);
3975 rcu_unregister_thread();
3976 lttng_relay_stop_threads();
3977 return NULL;
3978 }
3979
3980 /*
3981 * Create the relay command pipe to wake thread_manage_apps.
3982 * Closed in cleanup().
3983 */
3984 static int create_relay_conn_pipe(void)
3985 {
3986 return fd_tracker_util_pipe_open_cloexec(the_fd_tracker,
3987 "Relayd connection pipe", relay_conn_pipe);
3988 }
3989
3990 /*
3991 * main
3992 */
3993 int main(int argc, char **argv)
3994 {
3995 bool thread_is_rcu_registered = false;
3996 int ret = 0, retval = 0;
3997 void *status;
3998
3999 /* Parse environment variables */
4000 ret = parse_env_options();
4001 if (ret) {
4002 retval = -1;
4003 goto exit_options;
4004 }
4005
4006 /*
4007 * Parse arguments.
4008 * Command line arguments overwrite environment.
4009 */
4010 progname = argv[0];
4011 if (set_options(argc, argv)) {
4012 retval = -1;
4013 goto exit_options;
4014 }
4015
4016 if (set_signal_handler()) {
4017 retval = -1;
4018 goto exit_options;
4019 }
4020
4021 relayd_config_log();
4022
4023 if (opt_print_version) {
4024 print_version();
4025 retval = 0;
4026 goto exit_options;
4027 }
4028
4029 ret = fclose(stdin);
4030 if (ret) {
4031 PERROR("Failed to close stdin");
4032 goto exit_options;
4033 }
4034
4035 DBG("Clear command %s", opt_allow_clear ? "allowed" : "disallowed");
4036
4037 /* Try to create directory if -o, --output is specified. */
4038 if (opt_output_path) {
4039 if (*opt_output_path != '/') {
4040 ERR("Please specify an absolute path for -o, --output PATH");
4041 retval = -1;
4042 goto exit_options;
4043 }
4044
4045 ret = utils_mkdir_recursive(opt_output_path, S_IRWXU | S_IRWXG,
4046 -1, -1);
4047 if (ret < 0) {
4048 ERR("Unable to create %s", opt_output_path);
4049 retval = -1;
4050 goto exit_options;
4051 }
4052 }
4053
4054 /* Daemonize */
4055 if (opt_daemon || opt_background) {
4056 ret = lttng_daemonize(&child_ppid, &recv_child_signal,
4057 !opt_background);
4058 if (ret < 0) {
4059 retval = -1;
4060 goto exit_options;
4061 }
4062 }
4063
4064 if (opt_working_directory) {
4065 ret = utils_change_working_directory(opt_working_directory);
4066 if (ret) {
4067 /* All errors are already logged. */
4068 goto exit_options;
4069 }
4070 }
4071
4072 sessiond_trace_chunk_registry = sessiond_trace_chunk_registry_create();
4073 if (!sessiond_trace_chunk_registry) {
4074 ERR("Failed to initialize session daemon trace chunk registry");
4075 retval = -1;
4076 goto exit_options;
4077 }
4078
4079 /*
4080 * The RCU thread registration (and use, through the fd-tracker's
4081 * creation) is done after the daemonization to allow us to not
4082 * deal with liburcu's fork() management as the call RCU needs to
4083 * be restored.
4084 */
4085 rcu_register_thread();
4086 thread_is_rcu_registered = true;
4087
4088 the_fd_tracker = fd_tracker_create(lttng_opt_fd_cap);
4089 if (!the_fd_tracker) {
4090 retval = -1;
4091 goto exit_options;
4092 }
4093
4094 /* Initialize thread health monitoring */
4095 health_relayd = health_app_create(NR_HEALTH_RELAYD_TYPES);
4096 if (!health_relayd) {
4097 PERROR("health_app_create error");
4098 retval = -1;
4099 goto exit_options;
4100 }
4101
4102 /* Create thread quit pipe */
4103 if (init_thread_quit_pipe()) {
4104 retval = -1;
4105 goto exit_options;
4106 }
4107
4108 /* Setup the thread apps communication pipe. */
4109 if (create_relay_conn_pipe()) {
4110 retval = -1;
4111 goto exit_options;
4112 }
4113
4114 /* Init relay command queue. */
4115 cds_wfcq_init(&relay_conn_queue.head, &relay_conn_queue.tail);
4116
4117 /* Initialize communication library */
4118 lttcomm_init();
4119 lttcomm_inet_init();
4120
4121 /* tables of sessions indexed by session ID */
4122 sessions_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
4123 if (!sessions_ht) {
4124 retval = -1;
4125 goto exit_options;
4126 }
4127
4128 /* tables of streams indexed by stream ID */
4129 relay_streams_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
4130 if (!relay_streams_ht) {
4131 retval = -1;
4132 goto exit_options;
4133 }
4134
4135 /* tables of streams indexed by stream ID */
4136 viewer_streams_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
4137 if (!viewer_streams_ht) {
4138 retval = -1;
4139 goto exit_options;
4140 }
4141
4142 ret = init_health_quit_pipe();
4143 if (ret) {
4144 retval = -1;
4145 goto exit_options;
4146 }
4147
4148 /* Create thread to manage the client socket */
4149 ret = pthread_create(&health_thread, default_pthread_attr(),
4150 thread_manage_health, (void *) NULL);
4151 if (ret) {
4152 errno = ret;
4153 PERROR("pthread_create health");
4154 retval = -1;
4155 goto exit_options;
4156 }
4157
4158 /* Setup the dispatcher thread */
4159 ret = pthread_create(&dispatcher_thread, default_pthread_attr(),
4160 relay_thread_dispatcher, (void *) NULL);
4161 if (ret) {
4162 errno = ret;
4163 PERROR("pthread_create dispatcher");
4164 retval = -1;
4165 goto exit_dispatcher_thread;
4166 }
4167
4168 /* Setup the worker thread */
4169 ret = pthread_create(&worker_thread, default_pthread_attr(),
4170 relay_thread_worker, NULL);
4171 if (ret) {
4172 errno = ret;
4173 PERROR("pthread_create worker");
4174 retval = -1;
4175 goto exit_worker_thread;
4176 }
4177
4178 /* Setup the listener thread */
4179 ret = pthread_create(&listener_thread, default_pthread_attr(),
4180 relay_thread_listener, (void *) NULL);
4181 if (ret) {
4182 errno = ret;
4183 PERROR("pthread_create listener");
4184 retval = -1;
4185 goto exit_listener_thread;
4186 }
4187
4188 ret = relayd_live_create(live_uri);
4189 if (ret) {
4190 ERR("Starting live viewer threads");
4191 retval = -1;
4192 goto exit_live;
4193 }
4194
4195 /*
4196 * This is where we start awaiting program completion (e.g. through
4197 * signal that asks threads to teardown).
4198 */
4199
4200 ret = relayd_live_join();
4201 if (ret) {
4202 retval = -1;
4203 }
4204 exit_live:
4205
4206 ret = pthread_join(listener_thread, &status);
4207 if (ret) {
4208 errno = ret;
4209 PERROR("pthread_join listener_thread");
4210 retval = -1;
4211 }
4212
4213 exit_listener_thread:
4214 ret = pthread_join(worker_thread, &status);
4215 if (ret) {
4216 errno = ret;
4217 PERROR("pthread_join worker_thread");
4218 retval = -1;
4219 }
4220
4221 exit_worker_thread:
4222 ret = pthread_join(dispatcher_thread, &status);
4223 if (ret) {
4224 errno = ret;
4225 PERROR("pthread_join dispatcher_thread");
4226 retval = -1;
4227 }
4228 exit_dispatcher_thread:
4229
4230 ret = pthread_join(health_thread, &status);
4231 if (ret) {
4232 errno = ret;
4233 PERROR("pthread_join health_thread");
4234 retval = -1;
4235 }
4236 exit_options:
4237 /*
4238 * Wait for all pending call_rcu work to complete before tearing
4239 * down data structures. call_rcu worker may be trying to
4240 * perform lookups in those structures.
4241 */
4242 rcu_barrier();
4243 relayd_cleanup();
4244
4245 /* Ensure all prior call_rcu are done. */
4246 rcu_barrier();
4247
4248 if (thread_is_rcu_registered) {
4249 rcu_unregister_thread();
4250 }
4251
4252 if (!retval) {
4253 exit(EXIT_SUCCESS);
4254 } else {
4255 exit(EXIT_FAILURE);
4256 }
4257 }
This page took 0.176072 seconds and 4 git commands to generate.