Commit | Line | Data |
---|---|---|
b8aa1682 | 1 | /* |
ab5be9fa MJ |
2 | * Copyright (C) 2012 Julien Desfossez <jdesfossez@efficios.com> |
3 | * Copyright (C) 2012 David Goulet <dgoulet@efficios.com> | |
4 | * Copyright (C) 2013 Jérémie Galarneau <jeremie.galarneau@efficios.com> | |
5 | * Copyright (C) 2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
b8aa1682 | 6 | * |
ab5be9fa | 7 | * SPDX-License-Identifier: GPL-2.0-only |
b8aa1682 | 8 | * |
b8aa1682 JD |
9 | */ |
10 | ||
6c1c0768 | 11 | #define _LGPL_SOURCE |
b8aa1682 JD |
12 | #include <getopt.h> |
13 | #include <grp.h> | |
14 | #include <limits.h> | |
15 | #include <pthread.h> | |
16 | #include <signal.h> | |
17 | #include <stdio.h> | |
18 | #include <stdlib.h> | |
19 | #include <string.h> | |
20 | #include <sys/mman.h> | |
21 | #include <sys/mount.h> | |
22 | #include <sys/resource.h> | |
23 | #include <sys/socket.h> | |
24 | #include <sys/stat.h> | |
25 | #include <sys/types.h> | |
26 | #include <sys/wait.h> | |
896010e3 | 27 | #include <sys/resource.h> |
173af62f | 28 | #include <inttypes.h> |
b8aa1682 JD |
29 | #include <urcu/futex.h> |
30 | #include <urcu/uatomic.h> | |
70626904 | 31 | #include <urcu/rculist.h> |
b8aa1682 JD |
32 | #include <unistd.h> |
33 | #include <fcntl.h> | |
f8be1183 | 34 | #include <strings.h> |
896010e3 | 35 | #include <ctype.h> |
ac497a37 | 36 | #include <algorithm> |
b8aa1682 JD |
37 | |
38 | #include <lttng/lttng.h> | |
39 | #include <common/common.h> | |
40 | #include <common/compat/poll.h> | |
41 | #include <common/compat/socket.h> | |
f263b7fd | 42 | #include <common/compat/endian.h> |
e8fa9fb0 | 43 | #include <common/compat/getenv.h> |
b8aa1682 | 44 | #include <common/defaults.h> |
3fd27398 | 45 | #include <common/daemonize.h> |
b8aa1682 JD |
46 | #include <common/futex.h> |
47 | #include <common/sessiond-comm/sessiond-comm.h> | |
48 | #include <common/sessiond-comm/inet.h> | |
b8aa1682 JD |
49 | #include <common/sessiond-comm/relayd.h> |
50 | #include <common/uri.h> | |
a02de639 | 51 | #include <common/utils.h> |
4971b7f0 | 52 | #include <common/path.h> |
d3ecc550 | 53 | #include <common/align.h> |
3299fd31 | 54 | #include <common/ini-config/ini-config.h> |
5312a3ed JG |
55 | #include <common/dynamic-buffer.h> |
56 | #include <common/buffer-view.h> | |
70626904 | 57 | #include <common/string-utils/format.h> |
00e3b7f1 | 58 | #include <common/fd-tracker/fd-tracker.h> |
67609994 | 59 | #include <common/fd-tracker/utils.h> |
b8aa1682 | 60 | |
2a635488 | 61 | #include "backward-compatibility-group-by.h" |
0f907de1 | 62 | #include "cmd.h" |
2a635488 | 63 | #include "connection.h" |
d3e2ba59 | 64 | #include "ctf-trace.h" |
2a635488 | 65 | #include "health-relayd.h" |
1c20f0e2 | 66 | #include "index.h" |
d3e2ba59 | 67 | #include "live.h" |
2a635488 | 68 | #include "lttng-relayd.h" |
2a174661 | 69 | #include "session.h" |
2a635488 | 70 | #include "sessiond-trace-chunks.h" |
2a174661 | 71 | #include "stream.h" |
f056029c | 72 | #include "tcp_keep_alive.h" |
2a635488 JR |
73 | #include "testpoint.h" |
74 | #include "tracefile-array.h" | |
75 | #include "utils.h" | |
76 | #include "version.h" | |
77 | #include "viewer-stream.h" | |
b8aa1682 | 78 | |
4fc83d94 PP |
79 | static const char *help_msg = |
80 | #ifdef LTTNG_EMBED_HELP | |
81 | #include <lttng-relayd.8.h> | |
82 | #else | |
83 | NULL | |
84 | #endif | |
85 | ; | |
86 | ||
5569b118 JG |
87 | enum relay_connection_status { |
88 | RELAY_CONNECTION_STATUS_OK, | |
a9577b76 | 89 | /* An error occurred while processing an event on the connection. */ |
5569b118 JG |
90 | RELAY_CONNECTION_STATUS_ERROR, |
91 | /* Connection closed/shutdown cleanly. */ | |
92 | RELAY_CONNECTION_STATUS_CLOSED, | |
93 | }; | |
94 | ||
b8aa1682 | 95 | /* command line options */ |
ce9ee1fb | 96 | char *opt_output_path, *opt_working_directory; |
35ab25e5 | 97 | static int opt_daemon, opt_background, opt_print_version, opt_allow_clear = 1; |
a8b66566 | 98 | enum relay_group_output_by opt_group_output_by = RELAYD_GROUP_OUTPUT_BY_UNKNOWN; |
3fd27398 | 99 | |
e56e5792 MJ |
100 | /* Argument variables */ |
101 | int lttng_opt_quiet; /* not static in error.h */ | |
102 | int lttng_opt_verbose; /* not static in error.h */ | |
103 | int lttng_opt_mi; /* not static in error.h */ | |
104 | ||
3fd27398 MD |
105 | /* |
106 | * We need to wait for listener and live listener threads, as well as | |
107 | * health check thread, before being ready to signal readiness. | |
108 | */ | |
109 | #define NR_LTTNG_RELAY_READY 3 | |
110 | static int lttng_relay_ready = NR_LTTNG_RELAY_READY; | |
0848dba7 MD |
111 | |
112 | /* Size of receive buffer. */ | |
113 | #define RECV_DATA_BUFFER_SIZE 65536 | |
114 | ||
3fd27398 MD |
115 | static int recv_child_signal; /* Set to 1 when a SIGUSR1 signal is received. */ |
116 | static pid_t child_ppid; /* Internal parent PID use with daemonize. */ | |
117 | ||
095a4ae5 MD |
118 | static struct lttng_uri *control_uri; |
119 | static struct lttng_uri *data_uri; | |
d3e2ba59 | 120 | static struct lttng_uri *live_uri; |
b8aa1682 JD |
121 | |
122 | const char *progname; | |
b8aa1682 | 123 | |
65931c8b | 124 | const char *tracing_group_name = DEFAULT_TRACING_GROUP; |
cd60b05a JG |
125 | static int tracing_group_name_override; |
126 | ||
127 | const char * const config_section_name = "relayd"; | |
65931c8b | 128 | |
b8aa1682 JD |
129 | /* |
130 | * Quit pipe for all threads. This permits a single cancellation point | |
131 | * for all threads when receiving an event on the pipe. | |
132 | */ | |
0b242f62 | 133 | int thread_quit_pipe[2] = { -1, -1 }; |
b8aa1682 JD |
134 | |
135 | /* | |
136 | * This pipe is used to inform the worker thread that a command is queued and | |
137 | * ready to be processed. | |
138 | */ | |
58eb9381 | 139 | static int relay_conn_pipe[2] = { -1, -1 }; |
b8aa1682 | 140 | |
26c9d55e | 141 | /* Shared between threads */ |
b8aa1682 JD |
142 | static int dispatch_thread_exit; |
143 | ||
144 | static pthread_t listener_thread; | |
145 | static pthread_t dispatcher_thread; | |
146 | static pthread_t worker_thread; | |
65931c8b | 147 | static pthread_t health_thread; |
b8aa1682 | 148 | |
7591bab1 MD |
149 | /* |
150 | * last_relay_stream_id_lock protects last_relay_stream_id increment | |
151 | * atomicity on 32-bit architectures. | |
152 | */ | |
153 | static pthread_mutex_t last_relay_stream_id_lock = PTHREAD_MUTEX_INITIALIZER; | |
095a4ae5 | 154 | static uint64_t last_relay_stream_id; |
b8aa1682 JD |
155 | |
156 | /* | |
157 | * Relay command queue. | |
158 | * | |
159 | * The relay_thread_listener and relay_thread_dispatcher communicate with this | |
160 | * queue. | |
161 | */ | |
58eb9381 | 162 | static struct relay_conn_queue relay_conn_queue; |
b8aa1682 | 163 | |
896010e3 | 164 | /* Cap of file desriptors to be in simultaneous use by the relay daemon. */ |
5c0551f9 | 165 | static unsigned int lttng_opt_fd_pool_size = -1; |
896010e3 | 166 | |
d3e2ba59 JD |
167 | /* Global relay stream hash table. */ |
168 | struct lttng_ht *relay_streams_ht; | |
169 | ||
92c6ca54 DG |
170 | /* Global relay viewer stream hash table. */ |
171 | struct lttng_ht *viewer_streams_ht; | |
172 | ||
7591bab1 MD |
173 | /* Global relay sessions hash table. */ |
174 | struct lttng_ht *sessions_ht; | |
0a6518b0 | 175 | |
55706a7d | 176 | /* Relayd health monitoring */ |
eea7556c | 177 | struct health_app *health_relayd; |
55706a7d | 178 | |
23c8ff50 JG |
179 | struct sessiond_trace_chunk_registry *sessiond_trace_chunk_registry; |
180 | ||
00e3b7f1 JG |
181 | /* Global fd tracker. */ |
182 | struct fd_tracker *the_fd_tracker; | |
183 | ||
cd60b05a JG |
184 | static struct option long_options[] = { |
185 | { "control-port", 1, 0, 'C', }, | |
186 | { "data-port", 1, 0, 'D', }, | |
8d5c808e | 187 | { "live-port", 1, 0, 'L', }, |
cd60b05a | 188 | { "daemonize", 0, 0, 'd', }, |
b5218ffb | 189 | { "background", 0, 0, 'b', }, |
cd60b05a | 190 | { "group", 1, 0, 'g', }, |
5c0551f9 | 191 | { "fd-pool-size", 1, 0, '\0', }, |
cd60b05a JG |
192 | { "help", 0, 0, 'h', }, |
193 | { "output", 1, 0, 'o', }, | |
194 | { "verbose", 0, 0, 'v', }, | |
195 | { "config", 1, 0, 'f' }, | |
3a904098 | 196 | { "version", 0, 0, 'V' }, |
ce9ee1fb | 197 | { "working-directory", 1, 0, 'w', }, |
a8b66566 JR |
198 | { "group-output-by-session", 0, 0, 's', }, |
199 | { "group-output-by-host", 0, 0, 'p', }, | |
35ab25e5 | 200 | { "disallow-clear", 0, 0, 'x' }, |
cd60b05a JG |
201 | { NULL, 0, 0, 0, }, |
202 | }; | |
203 | ||
3a904098 | 204 | static const char *config_ignore_options[] = { "help", "config", "version" }; |
cd60b05a | 205 | |
a3bc3918 JR |
206 | static void print_version(void) { |
207 | fprintf(stdout, "%s\n", VERSION); | |
208 | } | |
209 | ||
210 | static void relayd_config_log(void) | |
211 | { | |
212 | DBG("LTTng-relayd " VERSION " - " VERSION_NAME "%s%s", | |
213 | GIT_VERSION[0] == '\0' ? "" : " - " GIT_VERSION, | |
214 | EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " EXTRA_VERSION_NAME); | |
215 | if (EXTRA_VERSION_DESCRIPTION[0] != '\0') { | |
216 | DBG("LTTng-relayd extra version description:\n\t" EXTRA_VERSION_DESCRIPTION "\n"); | |
217 | } | |
7f5ed73a JR |
218 | if (EXTRA_VERSION_PATCHES[0] != '\0') { |
219 | DBG("LTTng-relayd extra patches:\n\t" EXTRA_VERSION_PATCHES "\n"); | |
220 | } | |
a3bc3918 JR |
221 | } |
222 | ||
cd60b05a JG |
223 | /* |
224 | * Take an option from the getopt output and set it in the right variable to be | |
225 | * used later. | |
226 | * | |
227 | * Return 0 on success else a negative value. | |
228 | */ | |
7591bab1 | 229 | static int set_option(int opt, const char *arg, const char *optname) |
b8aa1682 | 230 | { |
cd60b05a JG |
231 | int ret; |
232 | ||
233 | switch (opt) { | |
234 | case 0: | |
5c0551f9 | 235 | if (!strcmp(optname, "fd-pool-size")) { |
896010e3 JG |
236 | unsigned long v; |
237 | ||
238 | errno = 0; | |
239 | v = strtoul(arg, NULL, 0); | |
60b7e1f8 | 240 | if (errno != 0 || !isdigit((unsigned char) arg[0])) { |
5c0551f9 | 241 | ERR("Wrong value in --fd-pool-size parameter: %s", arg); |
896010e3 JG |
242 | ret = -1; |
243 | goto end; | |
244 | } | |
896010e3 | 245 | if (v >= UINT_MAX) { |
5c0551f9 | 246 | ERR("File descriptor cap overflow in --fd-pool-size parameter: %s", arg); |
896010e3 JG |
247 | ret = -1; |
248 | goto end; | |
249 | } | |
5c0551f9 | 250 | lttng_opt_fd_pool_size = (unsigned int) v; |
896010e3 JG |
251 | } else { |
252 | fprintf(stderr, "unknown option %s", optname); | |
253 | if (arg) { | |
254 | fprintf(stderr, " with arg %s\n", arg); | |
255 | } | |
cd60b05a JG |
256 | } |
257 | break; | |
258 | case 'C': | |
e8fa9fb0 MD |
259 | if (lttng_is_setuid_setgid()) { |
260 | WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.", | |
261 | "-C, --control-port"); | |
262 | } else { | |
263 | ret = uri_parse(arg, &control_uri); | |
264 | if (ret < 0) { | |
265 | ERR("Invalid control URI specified"); | |
266 | goto end; | |
267 | } | |
268 | if (control_uri->port == 0) { | |
269 | control_uri->port = DEFAULT_NETWORK_CONTROL_PORT; | |
270 | } | |
cd60b05a JG |
271 | } |
272 | break; | |
273 | case 'D': | |
e8fa9fb0 MD |
274 | if (lttng_is_setuid_setgid()) { |
275 | WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.", | |
276 | "-D, -data-port"); | |
277 | } else { | |
278 | ret = uri_parse(arg, &data_uri); | |
279 | if (ret < 0) { | |
280 | ERR("Invalid data URI specified"); | |
281 | goto end; | |
282 | } | |
283 | if (data_uri->port == 0) { | |
284 | data_uri->port = DEFAULT_NETWORK_DATA_PORT; | |
285 | } | |
cd60b05a JG |
286 | } |
287 | break; | |
8d5c808e | 288 | case 'L': |
e8fa9fb0 MD |
289 | if (lttng_is_setuid_setgid()) { |
290 | WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.", | |
291 | "-L, -live-port"); | |
292 | } else { | |
293 | ret = uri_parse(arg, &live_uri); | |
294 | if (ret < 0) { | |
295 | ERR("Invalid live URI specified"); | |
296 | goto end; | |
297 | } | |
298 | if (live_uri->port == 0) { | |
299 | live_uri->port = DEFAULT_NETWORK_VIEWER_PORT; | |
300 | } | |
8d5c808e AM |
301 | } |
302 | break; | |
cd60b05a JG |
303 | case 'd': |
304 | opt_daemon = 1; | |
305 | break; | |
b5218ffb MD |
306 | case 'b': |
307 | opt_background = 1; | |
308 | break; | |
cd60b05a | 309 | case 'g': |
e8fa9fb0 MD |
310 | if (lttng_is_setuid_setgid()) { |
311 | WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.", | |
312 | "-g, --group"); | |
313 | } else { | |
314 | tracing_group_name = strdup(arg); | |
315 | if (tracing_group_name == NULL) { | |
316 | ret = -errno; | |
317 | PERROR("strdup"); | |
318 | goto end; | |
319 | } | |
320 | tracing_group_name_override = 1; | |
330a40bb | 321 | } |
cd60b05a JG |
322 | break; |
323 | case 'h': | |
4fc83d94 | 324 | ret = utils_show_help(8, "lttng-relayd", help_msg); |
655b5cc1 | 325 | if (ret) { |
4fc83d94 | 326 | ERR("Cannot show --help for `lttng-relayd`"); |
655b5cc1 PP |
327 | perror("exec"); |
328 | } | |
cd60b05a | 329 | exit(EXIT_FAILURE); |
3a904098 | 330 | case 'V': |
a3bc3918 JR |
331 | opt_print_version = 1; |
332 | break; | |
cd60b05a | 333 | case 'o': |
e8fa9fb0 MD |
334 | if (lttng_is_setuid_setgid()) { |
335 | WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.", | |
336 | "-o, --output"); | |
337 | } else { | |
338 | ret = asprintf(&opt_output_path, "%s", arg); | |
339 | if (ret < 0) { | |
340 | ret = -errno; | |
341 | PERROR("asprintf opt_output_path"); | |
342 | goto end; | |
343 | } | |
cd60b05a JG |
344 | } |
345 | break; | |
ce9ee1fb JR |
346 | case 'w': |
347 | if (lttng_is_setuid_setgid()) { | |
348 | WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.", | |
349 | "-w, --working-directory"); | |
350 | } else { | |
351 | ret = asprintf(&opt_working_directory, "%s", arg); | |
352 | if (ret < 0) { | |
353 | ret = -errno; | |
354 | PERROR("asprintf opt_working_directory"); | |
355 | goto end; | |
356 | } | |
357 | } | |
358 | break; | |
359 | ||
cd60b05a JG |
360 | case 'v': |
361 | /* Verbose level can increase using multiple -v */ | |
362 | if (arg) { | |
363 | lttng_opt_verbose = config_parse_value(arg); | |
364 | } else { | |
849e5b7b DG |
365 | /* Only 3 level of verbosity (-vvv). */ |
366 | if (lttng_opt_verbose < 3) { | |
367 | lttng_opt_verbose += 1; | |
368 | } | |
cd60b05a JG |
369 | } |
370 | break; | |
a8b66566 JR |
371 | case 's': |
372 | if (opt_group_output_by != RELAYD_GROUP_OUTPUT_BY_UNKNOWN) { | |
373 | ERR("Cannot set --group-output-by-session, another --group-output-by argument is present"); | |
374 | exit(EXIT_FAILURE); | |
375 | } | |
376 | opt_group_output_by = RELAYD_GROUP_OUTPUT_BY_SESSION; | |
377 | break; | |
378 | case 'p': | |
379 | if (opt_group_output_by != RELAYD_GROUP_OUTPUT_BY_UNKNOWN) { | |
380 | ERR("Cannot set --group-output-by-host, another --group-output-by argument is present"); | |
381 | exit(EXIT_FAILURE); | |
382 | } | |
383 | opt_group_output_by = RELAYD_GROUP_OUTPUT_BY_HOST; | |
384 | break; | |
35ab25e5 MD |
385 | case 'x': |
386 | /* Disallow clear */ | |
387 | opt_allow_clear = 0; | |
388 | break; | |
cd60b05a JG |
389 | default: |
390 | /* Unknown option or other error. | |
391 | * Error is printed by getopt, just return */ | |
392 | ret = -1; | |
393 | goto end; | |
394 | } | |
395 | ||
396 | /* All good. */ | |
397 | ret = 0; | |
398 | ||
399 | end: | |
400 | return ret; | |
401 | } | |
402 | ||
403 | /* | |
404 | * config_entry_handler_cb used to handle options read from a config file. | |
f40ef1d5 | 405 | * See config_entry_handler_cb comment in common/config/session-config.h for the |
cd60b05a JG |
406 | * return value conventions. |
407 | */ | |
7591bab1 | 408 | static int config_entry_handler(const struct config_entry *entry, void *unused) |
cd60b05a JG |
409 | { |
410 | int ret = 0, i; | |
411 | ||
412 | if (!entry || !entry->name || !entry->value) { | |
413 | ret = -EINVAL; | |
414 | goto end; | |
415 | } | |
416 | ||
417 | /* Check if the option is to be ignored */ | |
418 | for (i = 0; i < sizeof(config_ignore_options) / sizeof(char *); i++) { | |
419 | if (!strcmp(entry->name, config_ignore_options[i])) { | |
420 | goto end; | |
421 | } | |
422 | } | |
423 | ||
424 | for (i = 0; i < (sizeof(long_options) / sizeof(struct option)) - 1; i++) { | |
425 | /* Ignore if entry name is not fully matched. */ | |
426 | if (strcmp(entry->name, long_options[i].name)) { | |
427 | continue; | |
428 | } | |
429 | ||
430 | /* | |
7591bab1 MD |
431 | * If the option takes no argument on the command line, |
432 | * we have to check if the value is "true". We support | |
433 | * non-zero numeric values, true, on and yes. | |
cd60b05a JG |
434 | */ |
435 | if (!long_options[i].has_arg) { | |
436 | ret = config_parse_value(entry->value); | |
437 | if (ret <= 0) { | |
438 | if (ret) { | |
439 | WARN("Invalid configuration value \"%s\" for option %s", | |
440 | entry->value, entry->name); | |
441 | } | |
442 | /* False, skip boolean config option. */ | |
443 | goto end; | |
444 | } | |
445 | } | |
446 | ||
447 | ret = set_option(long_options[i].val, entry->value, entry->name); | |
448 | goto end; | |
449 | } | |
450 | ||
451 | WARN("Unrecognized option \"%s\" in daemon configuration file.", | |
452 | entry->name); | |
453 | ||
454 | end: | |
455 | return ret; | |
456 | } | |
457 | ||
2a10de3b JR |
458 | static int parse_env_options(void) |
459 | { | |
460 | int ret = 0; | |
461 | char *value = NULL; | |
462 | ||
463 | value = lttng_secure_getenv(DEFAULT_LTTNG_RELAYD_WORKING_DIRECTORY_ENV); | |
464 | if (value) { | |
465 | opt_working_directory = strdup(value); | |
466 | if (!opt_working_directory) { | |
467 | ERR("Failed to allocate working directory string (\"%s\")", | |
468 | value); | |
469 | ret = -1; | |
470 | } | |
471 | } | |
472 | return ret; | |
473 | } | |
474 | ||
5c0551f9 JG |
475 | static int set_fd_pool_size(void) |
476 | { | |
477 | int ret = 0; | |
478 | struct rlimit rlimit; | |
479 | ||
480 | ret = getrlimit(RLIMIT_NOFILE, &rlimit); | |
481 | if (ret) { | |
482 | PERROR("Failed to get file descriptor limit"); | |
483 | ret = -1; | |
484 | goto end; | |
485 | } | |
486 | ||
487 | DBG("File descriptor count limits are %" PRIu64 " (soft) and %" PRIu64 " (hard)", | |
488 | (uint64_t) rlimit.rlim_cur, | |
489 | (uint64_t) rlimit.rlim_max); | |
490 | if (lttng_opt_fd_pool_size == -1) { | |
491 | /* Use default value (soft limit - reserve). */ | |
492 | if (rlimit.rlim_cur < DEFAULT_RELAYD_MIN_FD_POOL_SIZE) { | |
493 | ERR("The process' file number limit is too low (%" PRIu64 "). The process' file number limit must be set to at least %i.", | |
494 | (uint64_t) rlimit.rlim_cur, DEFAULT_RELAYD_MIN_FD_POOL_SIZE); | |
495 | ret = -1; | |
496 | goto end; | |
497 | } | |
498 | lttng_opt_fd_pool_size = rlimit.rlim_cur - | |
499 | DEFAULT_RELAYD_FD_POOL_SIZE_RESERVE; | |
500 | goto end; | |
501 | } | |
502 | ||
503 | if (lttng_opt_fd_pool_size < DEFAULT_RELAYD_MIN_FD_POOL_SIZE) { | |
504 | ERR("File descriptor pool size must be set to at least %d", | |
505 | DEFAULT_RELAYD_MIN_FD_POOL_SIZE); | |
506 | ret = -1; | |
507 | goto end; | |
508 | } | |
509 | ||
510 | if (lttng_opt_fd_pool_size > rlimit.rlim_cur) { | |
511 | ERR("File descriptor pool size argument (%u) exceeds the process' soft limit (%" PRIu64 ").", | |
512 | lttng_opt_fd_pool_size, (uint64_t) rlimit.rlim_cur); | |
513 | ret = -1; | |
514 | goto end; | |
515 | } | |
516 | ||
d49487dc | 517 | DBG("File descriptor pool size argument (%u) adjusted to %u to accommodates transient fd uses", |
5c0551f9 JG |
518 | lttng_opt_fd_pool_size, |
519 | lttng_opt_fd_pool_size - DEFAULT_RELAYD_FD_POOL_SIZE_RESERVE); | |
520 | lttng_opt_fd_pool_size -= DEFAULT_RELAYD_FD_POOL_SIZE_RESERVE; | |
521 | end: | |
522 | return ret; | |
523 | } | |
524 | ||
7591bab1 | 525 | static int set_options(int argc, char **argv) |
cd60b05a | 526 | { |
178a0557 | 527 | int c, ret = 0, option_index = 0, retval = 0; |
cd60b05a JG |
528 | int orig_optopt = optopt, orig_optind = optind; |
529 | char *default_address, *optstring; | |
3a9e5d16 | 530 | char *config_path = NULL; |
cd60b05a JG |
531 | |
532 | optstring = utils_generate_optstring(long_options, | |
533 | sizeof(long_options) / sizeof(struct option)); | |
534 | if (!optstring) { | |
178a0557 | 535 | retval = -ENOMEM; |
cd60b05a JG |
536 | goto exit; |
537 | } | |
538 | ||
539 | /* Check for the --config option */ | |
540 | ||
541 | while ((c = getopt_long(argc, argv, optstring, long_options, | |
542 | &option_index)) != -1) { | |
543 | if (c == '?') { | |
178a0557 | 544 | retval = -EINVAL; |
cd60b05a JG |
545 | goto exit; |
546 | } else if (c != 'f') { | |
547 | continue; | |
548 | } | |
549 | ||
e8fa9fb0 MD |
550 | if (lttng_is_setuid_setgid()) { |
551 | WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.", | |
552 | "-f, --config"); | |
553 | } else { | |
3a9e5d16 | 554 | free(config_path); |
e8fa9fb0 MD |
555 | config_path = utils_expand_path(optarg); |
556 | if (!config_path) { | |
557 | ERR("Failed to resolve path: %s", optarg); | |
558 | } | |
cd60b05a JG |
559 | } |
560 | } | |
561 | ||
562 | ret = config_get_section_entries(config_path, config_section_name, | |
563 | config_entry_handler, NULL); | |
564 | if (ret) { | |
565 | if (ret > 0) { | |
566 | ERR("Invalid configuration option at line %i", ret); | |
cd60b05a | 567 | } |
178a0557 | 568 | retval = -1; |
cd60b05a JG |
569 | goto exit; |
570 | } | |
b8aa1682 | 571 | |
cd60b05a JG |
572 | /* Reset getopt's global state */ |
573 | optopt = orig_optopt; | |
574 | optind = orig_optind; | |
b8aa1682 | 575 | while (1) { |
cd60b05a | 576 | c = getopt_long(argc, argv, optstring, long_options, &option_index); |
b8aa1682 JD |
577 | if (c == -1) { |
578 | break; | |
579 | } | |
580 | ||
cd60b05a JG |
581 | ret = set_option(c, optarg, long_options[option_index].name); |
582 | if (ret < 0) { | |
178a0557 | 583 | retval = -1; |
b8aa1682 JD |
584 | goto exit; |
585 | } | |
586 | } | |
587 | ||
588 | /* assign default values */ | |
589 | if (control_uri == NULL) { | |
fa91dc52 MD |
590 | ret = asprintf(&default_address, |
591 | "tcp://" DEFAULT_NETWORK_CONTROL_BIND_ADDRESS ":%d", | |
592 | DEFAULT_NETWORK_CONTROL_PORT); | |
b8aa1682 JD |
593 | if (ret < 0) { |
594 | PERROR("asprintf default data address"); | |
178a0557 | 595 | retval = -1; |
b8aa1682 JD |
596 | goto exit; |
597 | } | |
598 | ||
599 | ret = uri_parse(default_address, &control_uri); | |
600 | free(default_address); | |
601 | if (ret < 0) { | |
602 | ERR("Invalid control URI specified"); | |
178a0557 | 603 | retval = -1; |
b8aa1682 JD |
604 | goto exit; |
605 | } | |
606 | } | |
607 | if (data_uri == NULL) { | |
fa91dc52 MD |
608 | ret = asprintf(&default_address, |
609 | "tcp://" DEFAULT_NETWORK_DATA_BIND_ADDRESS ":%d", | |
610 | DEFAULT_NETWORK_DATA_PORT); | |
b8aa1682 JD |
611 | if (ret < 0) { |
612 | PERROR("asprintf default data address"); | |
178a0557 | 613 | retval = -1; |
b8aa1682 JD |
614 | goto exit; |
615 | } | |
616 | ||
617 | ret = uri_parse(default_address, &data_uri); | |
618 | free(default_address); | |
619 | if (ret < 0) { | |
620 | ERR("Invalid data URI specified"); | |
178a0557 | 621 | retval = -1; |
b8aa1682 JD |
622 | goto exit; |
623 | } | |
624 | } | |
d3e2ba59 | 625 | if (live_uri == NULL) { |
fa91dc52 MD |
626 | ret = asprintf(&default_address, |
627 | "tcp://" DEFAULT_NETWORK_VIEWER_BIND_ADDRESS ":%d", | |
628 | DEFAULT_NETWORK_VIEWER_PORT); | |
d3e2ba59 JD |
629 | if (ret < 0) { |
630 | PERROR("asprintf default viewer control address"); | |
178a0557 | 631 | retval = -1; |
d3e2ba59 JD |
632 | goto exit; |
633 | } | |
634 | ||
635 | ret = uri_parse(default_address, &live_uri); | |
636 | free(default_address); | |
637 | if (ret < 0) { | |
638 | ERR("Invalid viewer control URI specified"); | |
178a0557 | 639 | retval = -1; |
d3e2ba59 JD |
640 | goto exit; |
641 | } | |
642 | } | |
5c0551f9 JG |
643 | ret = set_fd_pool_size(); |
644 | if (ret) { | |
645 | retval = -1; | |
646 | goto exit; | |
896010e3 | 647 | } |
b8aa1682 | 648 | |
a8b66566 JR |
649 | if (opt_group_output_by == RELAYD_GROUP_OUTPUT_BY_UNKNOWN) { |
650 | opt_group_output_by = RELAYD_GROUP_OUTPUT_BY_HOST; | |
651 | } | |
35ab25e5 MD |
652 | if (opt_allow_clear) { |
653 | /* Check if env variable exists. */ | |
654 | const char *value = lttng_secure_getenv(DEFAULT_LTTNG_RELAYD_DISALLOW_CLEAR_ENV); | |
655 | if (value) { | |
656 | ret = config_parse_value(value); | |
657 | if (ret < 0) { | |
658 | ERR("Invalid value for %s specified", DEFAULT_LTTNG_RELAYD_DISALLOW_CLEAR_ENV); | |
659 | retval = -1; | |
660 | goto exit; | |
661 | } | |
662 | opt_allow_clear = !ret; | |
663 | } | |
664 | } | |
a8b66566 | 665 | |
b8aa1682 | 666 | exit: |
3a9e5d16 | 667 | free(config_path); |
cd60b05a | 668 | free(optstring); |
178a0557 | 669 | return retval; |
b8aa1682 JD |
670 | } |
671 | ||
7591bab1 MD |
672 | static void print_global_objects(void) |
673 | { | |
7591bab1 MD |
674 | print_viewer_streams(); |
675 | print_relay_streams(); | |
676 | print_sessions(); | |
7591bab1 MD |
677 | } |
678 | ||
5c0551f9 JG |
679 | static int noop_close(void *data, int *fds) |
680 | { | |
681 | return 0; | |
682 | } | |
683 | ||
684 | static void untrack_stdio(void) | |
685 | { | |
686 | int fds[] = { fileno(stdout), fileno(stderr) }; | |
687 | ||
688 | /* | |
689 | * noop_close is used since we don't really want to close | |
690 | * the stdio output fds; we merely want to stop tracking them. | |
691 | */ | |
692 | (void) fd_tracker_close_unsuspendable_fd(the_fd_tracker, | |
693 | fds, 2, noop_close, NULL); | |
694 | } | |
695 | ||
b8aa1682 JD |
696 | /* |
697 | * Cleanup the daemon | |
698 | */ | |
7591bab1 | 699 | static void relayd_cleanup(void) |
b8aa1682 | 700 | { |
7591bab1 MD |
701 | print_global_objects(); |
702 | ||
b8aa1682 JD |
703 | DBG("Cleaning up"); |
704 | ||
178a0557 MD |
705 | if (viewer_streams_ht) |
706 | lttng_ht_destroy(viewer_streams_ht); | |
707 | if (relay_streams_ht) | |
708 | lttng_ht_destroy(relay_streams_ht); | |
7591bab1 MD |
709 | if (sessions_ht) |
710 | lttng_ht_destroy(sessions_ht); | |
178a0557 | 711 | |
095a4ae5 | 712 | free(opt_output_path); |
ce9ee1fb | 713 | free(opt_working_directory); |
095a4ae5 | 714 | |
794e2e5f JG |
715 | if (health_relayd) { |
716 | health_app_destroy(health_relayd); | |
717 | } | |
a02de639 | 718 | /* Close thread quit pipes */ |
bcee2b96 JG |
719 | if (health_quit_pipe[0] != -1) { |
720 | (void) fd_tracker_util_pipe_close( | |
721 | the_fd_tracker, health_quit_pipe); | |
722 | } | |
67609994 JG |
723 | if (thread_quit_pipe[0] != -1) { |
724 | (void) fd_tracker_util_pipe_close( | |
725 | the_fd_tracker, thread_quit_pipe); | |
726 | } | |
794e2e5f JG |
727 | if (sessiond_trace_chunk_registry) { |
728 | sessiond_trace_chunk_registry_destroy( | |
729 | sessiond_trace_chunk_registry); | |
730 | } | |
00e3b7f1 | 731 | if (the_fd_tracker) { |
9c256b01 JG |
732 | untrack_stdio(); |
733 | /* | |
734 | * fd_tracker_destroy() will log the contents of the fd-tracker | |
735 | * if a leak is detected. | |
736 | */ | |
00e3b7f1 JG |
737 | fd_tracker_destroy(the_fd_tracker); |
738 | } | |
794e2e5f | 739 | |
710c1f73 DG |
740 | uri_free(control_uri); |
741 | uri_free(data_uri); | |
8d5c808e | 742 | /* Live URI is freed in the live thread. */ |
cd60b05a JG |
743 | |
744 | if (tracing_group_name_override) { | |
745 | free((void *) tracing_group_name); | |
746 | } | |
b8aa1682 JD |
747 | } |
748 | ||
749 | /* | |
750 | * Write to writable pipe used to notify a thread. | |
751 | */ | |
7591bab1 | 752 | static int notify_thread_pipe(int wpipe) |
b8aa1682 | 753 | { |
6cd525e8 | 754 | ssize_t ret; |
b8aa1682 | 755 | |
6cd525e8 MD |
756 | ret = lttng_write(wpipe, "!", 1); |
757 | if (ret < 1) { | |
b8aa1682 | 758 | PERROR("write poll pipe"); |
b4aacfdc | 759 | goto end; |
b8aa1682 | 760 | } |
b4aacfdc MD |
761 | ret = 0; |
762 | end: | |
b8aa1682 JD |
763 | return ret; |
764 | } | |
765 | ||
7591bab1 | 766 | static int notify_health_quit_pipe(int *pipe) |
65931c8b | 767 | { |
6cd525e8 | 768 | ssize_t ret; |
65931c8b | 769 | |
6cd525e8 MD |
770 | ret = lttng_write(pipe[1], "4", 1); |
771 | if (ret < 1) { | |
65931c8b | 772 | PERROR("write relay health quit"); |
b4aacfdc | 773 | goto end; |
65931c8b | 774 | } |
b4aacfdc MD |
775 | ret = 0; |
776 | end: | |
777 | return ret; | |
65931c8b MD |
778 | } |
779 | ||
b8aa1682 | 780 | /* |
b4aacfdc | 781 | * Stop all relayd and relayd-live threads. |
b8aa1682 | 782 | */ |
b4aacfdc | 783 | int lttng_relay_stop_threads(void) |
b8aa1682 | 784 | { |
b4aacfdc | 785 | int retval = 0; |
b8aa1682 JD |
786 | |
787 | /* Stopping all threads */ | |
788 | DBG("Terminating all threads"); | |
b4aacfdc | 789 | if (notify_thread_pipe(thread_quit_pipe[1])) { |
b8aa1682 | 790 | ERR("write error on thread quit pipe"); |
b4aacfdc | 791 | retval = -1; |
b8aa1682 JD |
792 | } |
793 | ||
b4aacfdc MD |
794 | if (notify_health_quit_pipe(health_quit_pipe)) { |
795 | ERR("write error on health quit pipe"); | |
796 | } | |
65931c8b | 797 | |
b8aa1682 | 798 | /* Dispatch thread */ |
26c9d55e | 799 | CMM_STORE_SHARED(dispatch_thread_exit, 1); |
58eb9381 | 800 | futex_nto1_wake(&relay_conn_queue.futex); |
178a0557 | 801 | |
b4aacfdc | 802 | if (relayd_live_stop()) { |
178a0557 | 803 | ERR("Error stopping live threads"); |
b4aacfdc | 804 | retval = -1; |
178a0557 | 805 | } |
b4aacfdc | 806 | return retval; |
b8aa1682 JD |
807 | } |
808 | ||
809 | /* | |
810 | * Signal handler for the daemon | |
811 | * | |
812 | * Simply stop all worker threads, leaving main() return gracefully after | |
813 | * joining all threads and calling cleanup(). | |
814 | */ | |
7591bab1 | 815 | static void sighandler(int sig) |
b8aa1682 JD |
816 | { |
817 | switch (sig) { | |
b8aa1682 JD |
818 | case SIGINT: |
819 | DBG("SIGINT caught"); | |
b4aacfdc MD |
820 | if (lttng_relay_stop_threads()) { |
821 | ERR("Error stopping threads"); | |
822 | } | |
b8aa1682 JD |
823 | break; |
824 | case SIGTERM: | |
825 | DBG("SIGTERM caught"); | |
b4aacfdc MD |
826 | if (lttng_relay_stop_threads()) { |
827 | ERR("Error stopping threads"); | |
828 | } | |
b8aa1682 | 829 | break; |
3fd27398 MD |
830 | case SIGUSR1: |
831 | CMM_STORE_SHARED(recv_child_signal, 1); | |
832 | break; | |
b8aa1682 JD |
833 | default: |
834 | break; | |
835 | } | |
836 | } | |
837 | ||
838 | /* | |
839 | * Setup signal handler for : | |
840 | * SIGINT, SIGTERM, SIGPIPE | |
841 | */ | |
7591bab1 | 842 | static int set_signal_handler(void) |
b8aa1682 JD |
843 | { |
844 | int ret = 0; | |
845 | struct sigaction sa; | |
846 | sigset_t sigset; | |
847 | ||
848 | if ((ret = sigemptyset(&sigset)) < 0) { | |
849 | PERROR("sigemptyset"); | |
850 | return ret; | |
851 | } | |
852 | ||
b8aa1682 JD |
853 | sa.sa_mask = sigset; |
854 | sa.sa_flags = 0; | |
0072e5e2 MD |
855 | |
856 | sa.sa_handler = sighandler; | |
b8aa1682 JD |
857 | if ((ret = sigaction(SIGTERM, &sa, NULL)) < 0) { |
858 | PERROR("sigaction"); | |
859 | return ret; | |
860 | } | |
861 | ||
862 | if ((ret = sigaction(SIGINT, &sa, NULL)) < 0) { | |
863 | PERROR("sigaction"); | |
864 | return ret; | |
865 | } | |
866 | ||
0072e5e2 | 867 | if ((ret = sigaction(SIGUSR1, &sa, NULL)) < 0) { |
b8aa1682 JD |
868 | PERROR("sigaction"); |
869 | return ret; | |
870 | } | |
871 | ||
0072e5e2 MD |
872 | sa.sa_handler = SIG_IGN; |
873 | if ((ret = sigaction(SIGPIPE, &sa, NULL)) < 0) { | |
3fd27398 MD |
874 | PERROR("sigaction"); |
875 | return ret; | |
876 | } | |
877 | ||
878 | DBG("Signal handler set for SIGTERM, SIGUSR1, SIGPIPE and SIGINT"); | |
b8aa1682 JD |
879 | |
880 | return ret; | |
881 | } | |
882 | ||
3fd27398 MD |
883 | void lttng_relay_notify_ready(void) |
884 | { | |
885 | /* Notify the parent of the fork() process that we are ready. */ | |
886 | if (opt_daemon || opt_background) { | |
887 | if (uatomic_sub_return(<tng_relay_ready, 1) == 0) { | |
888 | kill(child_ppid, SIGUSR1); | |
889 | } | |
890 | } | |
891 | } | |
892 | ||
b8aa1682 JD |
893 | /* |
894 | * Init thread quit pipe. | |
895 | * | |
896 | * Return -1 on error or 0 if all pipes are created. | |
897 | */ | |
7591bab1 | 898 | static int init_thread_quit_pipe(void) |
b8aa1682 | 899 | { |
67609994 JG |
900 | return fd_tracker_util_pipe_open_cloexec( |
901 | the_fd_tracker, "Quit pipe", thread_quit_pipe); | |
b8aa1682 JD |
902 | } |
903 | ||
bcee2b96 JG |
904 | /* |
905 | * Init health quit pipe. | |
906 | * | |
907 | * Return -1 on error or 0 if all pipes are created. | |
908 | */ | |
909 | static int init_health_quit_pipe(void) | |
910 | { | |
911 | return fd_tracker_util_pipe_open_cloexec(the_fd_tracker, | |
912 | "Health quit pipe", health_quit_pipe); | |
913 | } | |
914 | ||
b8aa1682 JD |
915 | /* |
916 | * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set. | |
917 | */ | |
e32a0864 JG |
918 | static int create_named_thread_poll_set(struct lttng_poll_event *events, |
919 | int size, const char *name) | |
b8aa1682 JD |
920 | { |
921 | int ret; | |
922 | ||
923 | if (events == NULL || size == 0) { | |
924 | ret = -1; | |
925 | goto error; | |
926 | } | |
927 | ||
e32a0864 | 928 | ret = fd_tracker_util_poll_create(the_fd_tracker, |
f118099a | 929 | name, events, 1, LTTNG_CLOEXEC); |
64e2c0ed JG |
930 | if (ret) { |
931 | PERROR("Failed to create \"%s\" poll file descriptor", name); | |
932 | goto error; | |
933 | } | |
b8aa1682 JD |
934 | |
935 | /* Add quit pipe */ | |
c7759e6a | 936 | ret = lttng_poll_add(events, thread_quit_pipe[0], LPOLLIN | LPOLLERR); |
b8aa1682 JD |
937 | if (ret < 0) { |
938 | goto error; | |
939 | } | |
940 | ||
941 | return 0; | |
942 | ||
943 | error: | |
944 | return ret; | |
945 | } | |
946 | ||
947 | /* | |
948 | * Check if the thread quit pipe was triggered. | |
949 | * | |
950 | * Return 1 if it was triggered else 0; | |
951 | */ | |
7591bab1 | 952 | static int check_thread_quit_pipe(int fd, uint32_t events) |
b8aa1682 JD |
953 | { |
954 | if (fd == thread_quit_pipe[0] && (events & LPOLLIN)) { | |
955 | return 1; | |
956 | } | |
957 | ||
958 | return 0; | |
959 | } | |
960 | ||
40212d87 JG |
961 | static int create_sock(void *data, int *out_fd) |
962 | { | |
963 | int ret; | |
ac497a37 | 964 | struct lttcomm_sock *sock = (lttcomm_sock *) data; |
40212d87 JG |
965 | |
966 | ret = lttcomm_create_sock(sock); | |
967 | if (ret < 0) { | |
968 | goto end; | |
969 | } | |
970 | ||
971 | *out_fd = sock->fd; | |
972 | end: | |
973 | return ret; | |
974 | } | |
975 | ||
976 | static int close_sock(void *data, int *in_fd) | |
977 | { | |
ac497a37 | 978 | struct lttcomm_sock *sock = (lttcomm_sock *) data; |
40212d87 JG |
979 | |
980 | return sock->ops->close(sock); | |
981 | } | |
982 | ||
f355467e JG |
983 | static int accept_sock(void *data, int *out_fd) |
984 | { | |
985 | int ret = 0; | |
986 | /* Socks is an array of in_sock, out_sock. */ | |
ac497a37 | 987 | struct lttcomm_sock **socks = (lttcomm_sock **) data; |
f355467e JG |
988 | struct lttcomm_sock *in_sock = socks[0]; |
989 | ||
f118099a | 990 | socks[1] = in_sock->ops->accept(in_sock); |
f355467e JG |
991 | if (!socks[1]) { |
992 | ret = -1; | |
993 | goto end; | |
994 | } | |
995 | *out_fd = socks[1]->fd; | |
996 | end: | |
997 | return ret; | |
998 | } | |
999 | ||
b8aa1682 JD |
1000 | /* |
1001 | * Create and init socket from uri. | |
1002 | */ | |
40212d87 JG |
1003 | static struct lttcomm_sock *relay_socket_create(struct lttng_uri *uri, |
1004 | const char *name) | |
b8aa1682 | 1005 | { |
40212d87 | 1006 | int ret, sock_fd; |
b8aa1682 | 1007 | struct lttcomm_sock *sock = NULL; |
40212d87 JG |
1008 | char uri_str[PATH_MAX]; |
1009 | char *formated_name = NULL; | |
b8aa1682 JD |
1010 | |
1011 | sock = lttcomm_alloc_sock_from_uri(uri); | |
1012 | if (sock == NULL) { | |
1013 | ERR("Allocating socket"); | |
1014 | goto error; | |
1015 | } | |
1016 | ||
40212d87 JG |
1017 | /* |
1018 | * Don't fail to create the socket if the name can't be built as it is | |
1019 | * only used for debugging purposes. | |
1020 | */ | |
1021 | ret = uri_to_str_url(uri, uri_str, sizeof(uri_str)); | |
1022 | uri_str[sizeof(uri_str) - 1] = '\0'; | |
1023 | if (ret >= 0) { | |
1024 | ret = asprintf(&formated_name, "%s socket @ %s", name, | |
1025 | uri_str); | |
1026 | if (ret < 0) { | |
1027 | formated_name = NULL; | |
1028 | } | |
b8aa1682 | 1029 | } |
40212d87 JG |
1030 | |
1031 | ret = fd_tracker_open_unsuspendable_fd(the_fd_tracker, &sock_fd, | |
1032 | (const char **) (formated_name ? &formated_name : NULL), | |
1033 | 1, create_sock, sock); | |
6016eb62 JG |
1034 | if (ret) { |
1035 | PERROR("Failed to open \"%s\" relay socket", | |
1036 | formated_name ?: "Unknown"); | |
1037 | goto error; | |
1038 | } | |
40212d87 | 1039 | DBG("Listening on %s socket %d", name, sock->fd); |
b8aa1682 JD |
1040 | |
1041 | ret = sock->ops->bind(sock); | |
1042 | if (ret < 0) { | |
2288467f | 1043 | PERROR("Failed to bind socket"); |
b8aa1682 JD |
1044 | goto error; |
1045 | } | |
1046 | ||
1047 | ret = sock->ops->listen(sock, -1); | |
1048 | if (ret < 0) { | |
1049 | goto error; | |
1050 | ||
1051 | } | |
1052 | ||
6016eb62 | 1053 | free(formated_name); |
b8aa1682 JD |
1054 | return sock; |
1055 | ||
1056 | error: | |
1057 | if (sock) { | |
1058 | lttcomm_destroy_sock(sock); | |
1059 | } | |
6016eb62 | 1060 | free(formated_name); |
b8aa1682 JD |
1061 | return NULL; |
1062 | } | |
1063 | ||
f355467e JG |
1064 | static |
1065 | struct lttcomm_sock *accept_relayd_sock(struct lttcomm_sock *listening_sock, | |
1066 | const char *name) | |
1067 | { | |
1068 | int out_fd, ret; | |
1069 | struct lttcomm_sock *socks[2] = { listening_sock, NULL }; | |
1070 | struct lttcomm_sock *new_sock = NULL; | |
1071 | ||
f118099a | 1072 | ret = fd_tracker_open_unsuspendable_fd( |
f355467e JG |
1073 | the_fd_tracker, &out_fd, |
1074 | (const char **) &name, | |
1075 | 1, accept_sock, &socks); | |
1076 | if (ret) { | |
1077 | goto end; | |
1078 | } | |
1079 | new_sock = socks[1]; | |
1080 | DBG("%s accepted, socket %d", name, new_sock->fd); | |
1081 | end: | |
1082 | return new_sock; | |
1083 | } | |
1084 | ||
b8aa1682 JD |
1085 | /* |
1086 | * This thread manages the listening for new connections on the network | |
1087 | */ | |
7591bab1 | 1088 | static void *relay_thread_listener(void *data) |
b8aa1682 | 1089 | { |
095a4ae5 | 1090 | int i, ret, pollfd, err = -1; |
b8aa1682 JD |
1091 | uint32_t revents, nb_fd; |
1092 | struct lttng_poll_event events; | |
1093 | struct lttcomm_sock *control_sock, *data_sock; | |
1094 | ||
b8aa1682 JD |
1095 | DBG("[thread] Relay listener started"); |
1096 | ||
8fba2b8d | 1097 | rcu_register_thread(); |
55706a7d MD |
1098 | health_register(health_relayd, HEALTH_RELAYD_TYPE_LISTENER); |
1099 | ||
f385ae0a MD |
1100 | health_code_update(); |
1101 | ||
40212d87 | 1102 | control_sock = relay_socket_create(control_uri, "Control listener"); |
b8aa1682 | 1103 | if (!control_sock) { |
095a4ae5 | 1104 | goto error_sock_control; |
b8aa1682 JD |
1105 | } |
1106 | ||
40212d87 | 1107 | data_sock = relay_socket_create(data_uri, "Data listener"); |
b8aa1682 | 1108 | if (!data_sock) { |
095a4ae5 | 1109 | goto error_sock_relay; |
b8aa1682 JD |
1110 | } |
1111 | ||
1112 | /* | |
7591bab1 MD |
1113 | * Pass 3 as size here for the thread quit pipe, control and |
1114 | * data socket. | |
b8aa1682 | 1115 | */ |
ba9cf8e1 | 1116 | ret = create_named_thread_poll_set(&events, 3, "Listener thread epoll"); |
b8aa1682 JD |
1117 | if (ret < 0) { |
1118 | goto error_create_poll; | |
1119 | } | |
1120 | ||
1121 | /* Add the control socket */ | |
1122 | ret = lttng_poll_add(&events, control_sock->fd, LPOLLIN | LPOLLRDHUP); | |
1123 | if (ret < 0) { | |
1124 | goto error_poll_add; | |
1125 | } | |
1126 | ||
1127 | /* Add the data socket */ | |
1128 | ret = lttng_poll_add(&events, data_sock->fd, LPOLLIN | LPOLLRDHUP); | |
1129 | if (ret < 0) { | |
1130 | goto error_poll_add; | |
1131 | } | |
1132 | ||
3fd27398 MD |
1133 | lttng_relay_notify_ready(); |
1134 | ||
9b5e0863 MD |
1135 | if (testpoint(relayd_thread_listener)) { |
1136 | goto error_testpoint; | |
1137 | } | |
1138 | ||
b8aa1682 | 1139 | while (1) { |
f385ae0a MD |
1140 | health_code_update(); |
1141 | ||
b8aa1682 JD |
1142 | DBG("Listener accepting connections"); |
1143 | ||
b8aa1682 | 1144 | restart: |
f385ae0a | 1145 | health_poll_entry(); |
b8aa1682 | 1146 | ret = lttng_poll_wait(&events, -1); |
f385ae0a | 1147 | health_poll_exit(); |
b8aa1682 JD |
1148 | if (ret < 0) { |
1149 | /* | |
1150 | * Restart interrupted system call. | |
1151 | */ | |
1152 | if (errno == EINTR) { | |
1153 | goto restart; | |
1154 | } | |
1155 | goto error; | |
1156 | } | |
1157 | ||
0d9c5d77 DG |
1158 | nb_fd = ret; |
1159 | ||
b8aa1682 JD |
1160 | DBG("Relay new connection received"); |
1161 | for (i = 0; i < nb_fd; i++) { | |
f385ae0a MD |
1162 | health_code_update(); |
1163 | ||
b8aa1682 JD |
1164 | /* Fetch once the poll data */ |
1165 | revents = LTTNG_POLL_GETEV(&events, i); | |
1166 | pollfd = LTTNG_POLL_GETFD(&events, i); | |
1167 | ||
1168 | /* Thread quit pipe has been closed. Killing thread. */ | |
1169 | ret = check_thread_quit_pipe(pollfd, revents); | |
1170 | if (ret) { | |
095a4ae5 MD |
1171 | err = 0; |
1172 | goto exit; | |
b8aa1682 JD |
1173 | } |
1174 | ||
03e43155 | 1175 | if (revents & LPOLLIN) { |
4b7f17b2 | 1176 | /* |
7591bab1 MD |
1177 | * A new connection is requested, therefore a |
1178 | * sessiond/consumerd connection is allocated in | |
1179 | * this thread, enqueued to a global queue and | |
1180 | * dequeued (and freed) in the worker thread. | |
4b7f17b2 | 1181 | */ |
58eb9381 DG |
1182 | int val = 1; |
1183 | struct relay_connection *new_conn; | |
f355467e | 1184 | struct lttcomm_sock *newsock = NULL; |
7591bab1 | 1185 | enum connection_type type; |
b8aa1682 JD |
1186 | |
1187 | if (pollfd == data_sock->fd) { | |
7591bab1 | 1188 | type = RELAY_DATA; |
f355467e JG |
1189 | newsock = accept_relayd_sock(data_sock, |
1190 | "Data socket to relayd"); | |
4b7f17b2 | 1191 | } else { |
a0377dfe | 1192 | LTTNG_ASSERT(pollfd == control_sock->fd); |
7591bab1 | 1193 | type = RELAY_CONTROL; |
875e3164 JG |
1194 | newsock = accept_relayd_sock(control_sock, |
1195 | "Control socket to relayd"); | |
b8aa1682 | 1196 | } |
58eb9381 DG |
1197 | if (!newsock) { |
1198 | PERROR("accepting sock"); | |
58eb9381 DG |
1199 | goto error; |
1200 | } | |
1201 | ||
1202 | ret = setsockopt(newsock->fd, SOL_SOCKET, SO_REUSEADDR, &val, | |
1203 | sizeof(val)); | |
b8aa1682 JD |
1204 | if (ret < 0) { |
1205 | PERROR("setsockopt inet"); | |
4b7f17b2 | 1206 | lttcomm_destroy_sock(newsock); |
b8aa1682 JD |
1207 | goto error; |
1208 | } | |
f056029c JR |
1209 | |
1210 | ret = socket_apply_keep_alive_config(newsock->fd); | |
1211 | if (ret < 0) { | |
1212 | ERR("Failed to apply TCP keep-alive configuration on socket (%i)", | |
1213 | newsock->fd); | |
1214 | lttcomm_destroy_sock(newsock); | |
1215 | goto error; | |
1216 | } | |
1217 | ||
7591bab1 MD |
1218 | new_conn = connection_create(newsock, type); |
1219 | if (!new_conn) { | |
1220 | lttcomm_destroy_sock(newsock); | |
1221 | goto error; | |
1222 | } | |
58eb9381 DG |
1223 | |
1224 | /* Enqueue request for the dispatcher thread. */ | |
ac497a37 SM |
1225 | cds_wfcq_head_ptr_t head; |
1226 | head.h = &relay_conn_queue.head; | |
1227 | cds_wfcq_enqueue(head, &relay_conn_queue.tail, | |
8bdee6e2 | 1228 | &new_conn->qnode); |
b8aa1682 JD |
1229 | |
1230 | /* | |
7591bab1 MD |
1231 | * Wake the dispatch queue futex. |
1232 | * Implicit memory barrier with the | |
1233 | * exchange in cds_wfcq_enqueue. | |
b8aa1682 | 1234 | */ |
58eb9381 | 1235 | futex_nto1_wake(&relay_conn_queue.futex); |
03e43155 MD |
1236 | } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) { |
1237 | ERR("socket poll error"); | |
1238 | goto error; | |
1239 | } else { | |
1240 | ERR("Unexpected poll events %u for sock %d", revents, pollfd); | |
1241 | goto error; | |
b8aa1682 JD |
1242 | } |
1243 | } | |
1244 | } | |
1245 | ||
095a4ae5 | 1246 | exit: |
b8aa1682 JD |
1247 | error: |
1248 | error_poll_add: | |
9b5e0863 | 1249 | error_testpoint: |
ba9cf8e1 | 1250 | (void) fd_tracker_util_poll_clean(the_fd_tracker, &events); |
b8aa1682 | 1251 | error_create_poll: |
095a4ae5 | 1252 | if (data_sock->fd >= 0) { |
40212d87 JG |
1253 | int data_sock_fd = data_sock->fd; |
1254 | ||
1255 | ret = fd_tracker_close_unsuspendable_fd(the_fd_tracker, | |
1256 | &data_sock_fd, 1, close_sock, | |
1257 | data_sock); | |
b8aa1682 | 1258 | if (ret) { |
40212d87 | 1259 | PERROR("Failed to close the data listener socket file descriptor"); |
b8aa1682 | 1260 | } |
40212d87 | 1261 | data_sock->fd = -1; |
b8aa1682 | 1262 | } |
095a4ae5 MD |
1263 | lttcomm_destroy_sock(data_sock); |
1264 | error_sock_relay: | |
1265 | if (control_sock->fd >= 0) { | |
40212d87 JG |
1266 | int control_sock_fd = control_sock->fd; |
1267 | ||
1268 | ret = fd_tracker_close_unsuspendable_fd(the_fd_tracker, | |
1269 | &control_sock_fd, 1, close_sock, | |
1270 | control_sock); | |
b8aa1682 | 1271 | if (ret) { |
40212d87 | 1272 | PERROR("Failed to close the control listener socket file descriptor"); |
b8aa1682 | 1273 | } |
40212d87 | 1274 | control_sock->fd = -1; |
b8aa1682 | 1275 | } |
095a4ae5 MD |
1276 | lttcomm_destroy_sock(control_sock); |
1277 | error_sock_control: | |
1278 | if (err) { | |
f385ae0a MD |
1279 | health_error(); |
1280 | ERR("Health error occurred in %s", __func__); | |
095a4ae5 | 1281 | } |
55706a7d | 1282 | health_unregister(health_relayd); |
8fba2b8d | 1283 | rcu_unregister_thread(); |
b8aa1682 | 1284 | DBG("Relay listener thread cleanup complete"); |
b4aacfdc | 1285 | lttng_relay_stop_threads(); |
b8aa1682 JD |
1286 | return NULL; |
1287 | } | |
1288 | ||
1289 | /* | |
1290 | * This thread manages the dispatching of the requests to worker threads | |
1291 | */ | |
7591bab1 | 1292 | static void *relay_thread_dispatcher(void *data) |
b8aa1682 | 1293 | { |
6cd525e8 MD |
1294 | int err = -1; |
1295 | ssize_t ret; | |
8bdee6e2 | 1296 | struct cds_wfcq_node *node; |
58eb9381 | 1297 | struct relay_connection *new_conn = NULL; |
b8aa1682 JD |
1298 | |
1299 | DBG("[thread] Relay dispatcher started"); | |
1300 | ||
55706a7d MD |
1301 | health_register(health_relayd, HEALTH_RELAYD_TYPE_DISPATCHER); |
1302 | ||
9b5e0863 MD |
1303 | if (testpoint(relayd_thread_dispatcher)) { |
1304 | goto error_testpoint; | |
1305 | } | |
1306 | ||
f385ae0a MD |
1307 | health_code_update(); |
1308 | ||
0ed3b1a8 | 1309 | for (;;) { |
f385ae0a MD |
1310 | health_code_update(); |
1311 | ||
b8aa1682 | 1312 | /* Atomically prepare the queue futex */ |
58eb9381 | 1313 | futex_nto1_prepare(&relay_conn_queue.futex); |
b8aa1682 | 1314 | |
0ed3b1a8 MD |
1315 | if (CMM_LOAD_SHARED(dispatch_thread_exit)) { |
1316 | break; | |
1317 | } | |
1318 | ||
b8aa1682 | 1319 | do { |
f385ae0a MD |
1320 | health_code_update(); |
1321 | ||
b8aa1682 | 1322 | /* Dequeue commands */ |
8bdee6e2 SM |
1323 | node = cds_wfcq_dequeue_blocking(&relay_conn_queue.head, |
1324 | &relay_conn_queue.tail); | |
b8aa1682 JD |
1325 | if (node == NULL) { |
1326 | DBG("Woken up but nothing in the relay command queue"); | |
1327 | /* Continue thread execution */ | |
1328 | break; | |
1329 | } | |
58eb9381 | 1330 | new_conn = caa_container_of(node, struct relay_connection, qnode); |
b8aa1682 | 1331 | |
58eb9381 | 1332 | DBG("Dispatching request waiting on sock %d", new_conn->sock->fd); |
b8aa1682 JD |
1333 | |
1334 | /* | |
7591bab1 MD |
1335 | * Inform worker thread of the new request. This |
1336 | * call is blocking so we can be assured that | |
1337 | * the data will be read at some point in time | |
1338 | * or wait to the end of the world :) | |
b8aa1682 | 1339 | */ |
58eb9381 DG |
1340 | ret = lttng_write(relay_conn_pipe[1], &new_conn, sizeof(new_conn)); |
1341 | if (ret < 0) { | |
1342 | PERROR("write connection pipe"); | |
7591bab1 | 1343 | connection_put(new_conn); |
b8aa1682 JD |
1344 | goto error; |
1345 | } | |
1346 | } while (node != NULL); | |
1347 | ||
1348 | /* Futex wait on queue. Blocking call on futex() */ | |
f385ae0a | 1349 | health_poll_entry(); |
58eb9381 | 1350 | futex_nto1_wait(&relay_conn_queue.futex); |
f385ae0a | 1351 | health_poll_exit(); |
b8aa1682 JD |
1352 | } |
1353 | ||
f385ae0a MD |
1354 | /* Normal exit, no error */ |
1355 | err = 0; | |
1356 | ||
b8aa1682 | 1357 | error: |
9b5e0863 | 1358 | error_testpoint: |
f385ae0a MD |
1359 | if (err) { |
1360 | health_error(); | |
1361 | ERR("Health error occurred in %s", __func__); | |
1362 | } | |
55706a7d | 1363 | health_unregister(health_relayd); |
b8aa1682 | 1364 | DBG("Dispatch thread dying"); |
b4aacfdc | 1365 | lttng_relay_stop_threads(); |
b8aa1682 JD |
1366 | return NULL; |
1367 | } | |
1368 | ||
298a25ca JG |
1369 | static bool session_streams_have_index(const struct relay_session *session) |
1370 | { | |
1371 | return session->minor >= 4 && !session->snapshot; | |
1372 | } | |
1373 | ||
c5b6f4f0 DG |
1374 | /* |
1375 | * Handle the RELAYD_CREATE_SESSION command. | |
1376 | * | |
1377 | * On success, send back the session id or else return a negative value. | |
1378 | */ | |
5312a3ed JG |
1379 | static int relay_create_session(const struct lttcomm_relayd_hdr *recv_hdr, |
1380 | struct relay_connection *conn, | |
1381 | const struct lttng_buffer_view *payload) | |
c5b6f4f0 | 1382 | { |
5312a3ed JG |
1383 | int ret = 0; |
1384 | ssize_t send_ret; | |
4c6885d2 | 1385 | struct relay_session *session = NULL; |
ecd1a12f | 1386 | struct lttcomm_relayd_create_session_reply_2_11 reply = {}; |
1e791a74 JG |
1387 | char session_name[LTTNG_NAME_MAX] = {}; |
1388 | char hostname[LTTNG_HOST_NAME_MAX] = {}; | |
7591bab1 MD |
1389 | uint32_t live_timer = 0; |
1390 | bool snapshot = false; | |
46ef2188 | 1391 | bool session_name_contains_creation_timestamp = false; |
23c8ff50 | 1392 | /* Left nil for peers < 2.11. */ |
6fa5fe7c | 1393 | char base_path[LTTNG_PATH_MAX] = {}; |
23c8ff50 | 1394 | lttng_uuid sessiond_uuid = {}; |
1e791a74 JG |
1395 | LTTNG_OPTIONAL(uint64_t) id_sessiond = {}; |
1396 | LTTNG_OPTIONAL(uint64_t) current_chunk_id = {}; | |
db1da059 | 1397 | LTTNG_OPTIONAL(time_t) creation_time = {}; |
ecd1a12f MD |
1398 | struct lttng_dynamic_buffer reply_payload; |
1399 | ||
1400 | lttng_dynamic_buffer_init(&reply_payload); | |
c5b6f4f0 | 1401 | |
f86f6389 JR |
1402 | if (conn->minor < 4) { |
1403 | /* From 2.1 to 2.3 */ | |
1404 | ret = 0; | |
1405 | } else if (conn->minor >= 4 && conn->minor < 11) { | |
1406 | /* From 2.4 to 2.10 */ | |
5312a3ed | 1407 | ret = cmd_create_session_2_4(payload, session_name, |
7591bab1 | 1408 | hostname, &live_timer, &snapshot); |
f86f6389 | 1409 | } else { |
84fa4db5 | 1410 | bool has_current_chunk; |
db1da059 JG |
1411 | uint64_t current_chunk_id_value; |
1412 | time_t creation_time_value; | |
1413 | uint64_t id_sessiond_value; | |
84fa4db5 | 1414 | |
f86f6389 | 1415 | /* From 2.11 to ... */ |
db1da059 | 1416 | ret = cmd_create_session_2_11(payload, session_name, hostname, |
6fa5fe7c | 1417 | base_path, &live_timer, &snapshot, &id_sessiond_value, |
db1da059 | 1418 | sessiond_uuid, &has_current_chunk, |
46ef2188 MD |
1419 | ¤t_chunk_id_value, &creation_time_value, |
1420 | &session_name_contains_creation_timestamp); | |
23c8ff50 JG |
1421 | if (lttng_uuid_is_nil(sessiond_uuid)) { |
1422 | /* The nil UUID is reserved for pre-2.11 clients. */ | |
1423 | ERR("Illegal nil UUID announced by peer in create session command"); | |
1424 | ret = -1; | |
1425 | goto send_reply; | |
1426 | } | |
db1da059 JG |
1427 | LTTNG_OPTIONAL_SET(&id_sessiond, id_sessiond_value); |
1428 | LTTNG_OPTIONAL_SET(&creation_time, creation_time_value); | |
1429 | if (has_current_chunk) { | |
1430 | LTTNG_OPTIONAL_SET(¤t_chunk_id, | |
1431 | current_chunk_id_value); | |
1432 | } | |
7591bab1 | 1433 | } |
f86f6389 | 1434 | |
7591bab1 MD |
1435 | if (ret < 0) { |
1436 | goto send_reply; | |
d3e2ba59 JD |
1437 | } |
1438 | ||
6fa5fe7c | 1439 | session = session_create(session_name, hostname, base_path, live_timer, |
1e791a74 JG |
1440 | snapshot, sessiond_uuid, |
1441 | id_sessiond.is_set ? &id_sessiond.value : NULL, | |
1442 | current_chunk_id.is_set ? ¤t_chunk_id.value : NULL, | |
db1da059 | 1443 | creation_time.is_set ? &creation_time.value : NULL, |
46ef2188 MD |
1444 | conn->major, conn->minor, |
1445 | session_name_contains_creation_timestamp); | |
7591bab1 MD |
1446 | if (!session) { |
1447 | ret = -1; | |
1448 | goto send_reply; | |
1449 | } | |
a0377dfe | 1450 | LTTNG_ASSERT(!conn->session); |
7591bab1 | 1451 | conn->session = session; |
c5b6f4f0 DG |
1452 | DBG("Created session %" PRIu64, session->id); |
1453 | ||
ecd1a12f | 1454 | reply.generic.session_id = htobe64(session->id); |
7591bab1 MD |
1455 | |
1456 | send_reply: | |
c5b6f4f0 | 1457 | if (ret < 0) { |
ecd1a12f | 1458 | reply.generic.ret_code = htobe32(LTTNG_ERR_FATAL); |
c5b6f4f0 | 1459 | } else { |
ecd1a12f | 1460 | reply.generic.ret_code = htobe32(LTTNG_OK); |
c5b6f4f0 DG |
1461 | } |
1462 | ||
ecd1a12f MD |
1463 | if (conn->minor < 11) { |
1464 | /* From 2.1 to 2.10 */ | |
1465 | ret = lttng_dynamic_buffer_append(&reply_payload, | |
1466 | &reply.generic, sizeof(reply.generic)); | |
1467 | if (ret) { | |
1468 | ERR("Failed to append \"create session\" command reply header to payload buffer"); | |
1469 | ret = -1; | |
1470 | goto end; | |
1471 | } | |
1472 | } else { | |
1473 | const uint32_t output_path_length = | |
8d382dd4 | 1474 | session ? strlen(session->output_path) + 1 : 0; |
ecd1a12f MD |
1475 | |
1476 | reply.output_path_length = htobe32(output_path_length); | |
8d382dd4 JG |
1477 | ret = lttng_dynamic_buffer_append( |
1478 | &reply_payload, &reply, sizeof(reply)); | |
ecd1a12f MD |
1479 | if (ret) { |
1480 | ERR("Failed to append \"create session\" command reply header to payload buffer"); | |
1481 | goto end; | |
1482 | } | |
1483 | ||
8d382dd4 JG |
1484 | if (output_path_length) { |
1485 | ret = lttng_dynamic_buffer_append(&reply_payload, | |
1486 | session->output_path, | |
1487 | output_path_length); | |
1488 | if (ret) { | |
1489 | ERR("Failed to append \"create session\" command reply path to payload buffer"); | |
1490 | goto end; | |
1491 | } | |
ecd1a12f MD |
1492 | } |
1493 | } | |
1494 | ||
1495 | send_ret = conn->sock->ops->sendmsg(conn->sock, reply_payload.data, | |
1496 | reply_payload.size, 0); | |
1497 | if (send_ret < (ssize_t) reply_payload.size) { | |
1498 | ERR("Failed to send \"create session\" command reply of %zu bytes (ret = %zd)", | |
1499 | reply_payload.size, send_ret); | |
5312a3ed | 1500 | ret = -1; |
c5b6f4f0 | 1501 | } |
ecd1a12f | 1502 | end: |
4c6885d2 JG |
1503 | if (ret < 0 && session) { |
1504 | session_put(session); | |
1505 | } | |
ecd1a12f | 1506 | lttng_dynamic_buffer_reset(&reply_payload); |
c5b6f4f0 DG |
1507 | return ret; |
1508 | } | |
1509 | ||
a4baae1b JD |
1510 | /* |
1511 | * When we have received all the streams and the metadata for a channel, | |
1512 | * we make them visible to the viewer threads. | |
1513 | */ | |
7591bab1 | 1514 | static void publish_connection_local_streams(struct relay_connection *conn) |
a4baae1b | 1515 | { |
7591bab1 MD |
1516 | struct relay_stream *stream; |
1517 | struct relay_session *session = conn->session; | |
a4baae1b | 1518 | |
7591bab1 MD |
1519 | /* |
1520 | * We publish all streams belonging to a session atomically wrt | |
1521 | * session lock. | |
1522 | */ | |
1523 | pthread_mutex_lock(&session->lock); | |
1524 | rcu_read_lock(); | |
1525 | cds_list_for_each_entry_rcu(stream, &session->recv_list, | |
1526 | recv_node) { | |
1527 | stream_publish(stream); | |
a4baae1b | 1528 | } |
7591bab1 | 1529 | rcu_read_unlock(); |
a4baae1b | 1530 | |
7591bab1 MD |
1531 | /* |
1532 | * Inform the viewer that there are new streams in the session. | |
1533 | */ | |
1534 | if (session->viewer_attached) { | |
1535 | uatomic_set(&session->new_streams, 1); | |
1536 | } | |
1537 | pthread_mutex_unlock(&session->lock); | |
a4baae1b JD |
1538 | } |
1539 | ||
348a81dc JG |
1540 | static int conform_channel_path(char *channel_path) |
1541 | { | |
1542 | int ret = 0; | |
1543 | ||
1544 | if (strstr("../", channel_path)) { | |
1545 | ERR("Refusing channel path as it walks up the path hierarchy: \"%s\"", | |
1546 | channel_path); | |
1547 | ret = -1; | |
1548 | goto end; | |
1549 | } | |
1550 | ||
1551 | if (*channel_path == '/') { | |
1552 | const size_t len = strlen(channel_path); | |
1553 | ||
1554 | /* | |
1555 | * Channel paths from peers prior to 2.11 are expressed as an | |
1556 | * absolute path that is, in reality, relative to the relay | |
1557 | * daemon's output directory. Remove the leading slash so it | |
1558 | * is correctly interpreted as a relative path later on. | |
1559 | * | |
1560 | * len (and not len - 1) is used to copy the trailing NULL. | |
1561 | */ | |
1562 | bcopy(channel_path + 1, channel_path, len); | |
1563 | } | |
1564 | end: | |
1565 | return ret; | |
1566 | } | |
1567 | ||
b8aa1682 JD |
1568 | /* |
1569 | * relay_add_stream: allocate a new stream for a session | |
1570 | */ | |
5312a3ed JG |
1571 | static int relay_add_stream(const struct lttcomm_relayd_hdr *recv_hdr, |
1572 | struct relay_connection *conn, | |
1573 | const struct lttng_buffer_view *payload) | |
b8aa1682 | 1574 | { |
7591bab1 MD |
1575 | int ret; |
1576 | ssize_t send_ret; | |
58eb9381 | 1577 | struct relay_session *session = conn->session; |
b8aa1682 JD |
1578 | struct relay_stream *stream = NULL; |
1579 | struct lttcomm_relayd_status_stream reply; | |
4030a636 | 1580 | struct ctf_trace *trace = NULL; |
7591bab1 MD |
1581 | uint64_t stream_handle = -1ULL; |
1582 | char *path_name = NULL, *channel_name = NULL; | |
1583 | uint64_t tracefile_size = 0, tracefile_count = 0; | |
348a81dc | 1584 | LTTNG_OPTIONAL(uint64_t) stream_chunk_id = {}; |
b8aa1682 | 1585 | |
5312a3ed | 1586 | if (!session || !conn->version_check_done) { |
b8aa1682 JD |
1587 | ERR("Trying to add a stream before version check"); |
1588 | ret = -1; | |
1589 | goto end_no_session; | |
1590 | } | |
1591 | ||
2f21a469 JR |
1592 | if (session->minor == 1) { |
1593 | /* For 2.1 */ | |
5312a3ed | 1594 | ret = cmd_recv_stream_2_1(payload, &path_name, |
7591bab1 | 1595 | &channel_name); |
2f21a469 JR |
1596 | } else if (session->minor > 1 && session->minor < 11) { |
1597 | /* From 2.2 to 2.10 */ | |
5312a3ed | 1598 | ret = cmd_recv_stream_2_2(payload, &path_name, |
7591bab1 | 1599 | &channel_name, &tracefile_size, &tracefile_count); |
2f21a469 JR |
1600 | } else { |
1601 | /* From 2.11 to ... */ | |
1602 | ret = cmd_recv_stream_2_11(payload, &path_name, | |
0b50e4b3 JG |
1603 | &channel_name, &tracefile_size, &tracefile_count, |
1604 | &stream_chunk_id.value); | |
1605 | stream_chunk_id.is_set = true; | |
0f907de1 | 1606 | } |
2f21a469 | 1607 | |
0f907de1 | 1608 | if (ret < 0) { |
7591bab1 | 1609 | goto send_reply; |
b8aa1682 JD |
1610 | } |
1611 | ||
348a81dc JG |
1612 | if (conform_channel_path(path_name)) { |
1613 | goto send_reply; | |
1614 | } | |
1615 | ||
2a635488 JR |
1616 | /* |
1617 | * Backward compatibility for --group-output-by-session. | |
1618 | * Prior to lttng 2.11, the complete path is passed by the stream. | |
1619 | * Starting at 2.11, lttng-relayd uses chunk. When dealing with producer | |
1620 | * >=2.11 the chunk is responsible for the output path. When dealing | |
1621 | * with producer < 2.11 the chunk output_path is the root output path | |
1622 | * and the stream carries the complete path (path_name). | |
1623 | * To support --group-output-by-session with older producer (<2.11), we | |
1624 | * need to craft the path based on the stream path. | |
1625 | */ | |
1626 | if (opt_group_output_by == RELAYD_GROUP_OUTPUT_BY_SESSION) { | |
1627 | if (conn->minor < 4) { | |
1628 | /* | |
1629 | * From 2.1 to 2.3, the session_name is not passed on | |
1630 | * the RELAYD_CREATE_SESSION command. The session name | |
1631 | * is necessary to detect the presence of a base_path | |
1632 | * inside the stream path. Without it we cannot perform | |
1633 | * a valid group-output-by-session transformation. | |
1634 | */ | |
1635 | WARN("Unable to perform a --group-by-session transformation for session %" PRIu64 | |
1636 | " for stream with path \"%s\" as it is produced by a peer using a protocol older than v2.4", | |
1637 | session->id, path_name); | |
1638 | } else if (conn->minor >= 4 && conn->minor < 11) { | |
1639 | char *group_by_session_path_name; | |
1640 | ||
a0377dfe | 1641 | LTTNG_ASSERT(session->session_name[0] != '\0'); |
2a635488 JR |
1642 | |
1643 | group_by_session_path_name = | |
1644 | backward_compat_group_by_session( | |
1645 | path_name, | |
d2cb4a90 JG |
1646 | session->session_name, |
1647 | session->creation_time.value); | |
2a635488 JR |
1648 | if (!group_by_session_path_name) { |
1649 | ERR("Failed to apply group by session to stream of session %" PRIu64, | |
1650 | session->id); | |
1651 | goto send_reply; | |
1652 | } | |
1653 | ||
1654 | DBG("Transformed session path from \"%s\" to \"%s\" to honor per-session name grouping", | |
1655 | path_name, group_by_session_path_name); | |
1656 | ||
1657 | free(path_name); | |
1658 | path_name = group_by_session_path_name; | |
1659 | } | |
1660 | } | |
1661 | ||
7591bab1 | 1662 | trace = ctf_trace_get_by_path_or_create(session, path_name); |
2a174661 | 1663 | if (!trace) { |
7591bab1 | 1664 | goto send_reply; |
2a174661 | 1665 | } |
2a174661 | 1666 | |
2a635488 | 1667 | /* This stream here has one reference on the trace. */ |
7591bab1 MD |
1668 | pthread_mutex_lock(&last_relay_stream_id_lock); |
1669 | stream_handle = ++last_relay_stream_id; | |
1670 | pthread_mutex_unlock(&last_relay_stream_id_lock); | |
d3e2ba59 | 1671 | |
7591bab1 MD |
1672 | /* We pass ownership of path_name and channel_name. */ |
1673 | stream = stream_create(trace, stream_handle, path_name, | |
348a81dc | 1674 | channel_name, tracefile_size, tracefile_count); |
7591bab1 MD |
1675 | path_name = NULL; |
1676 | channel_name = NULL; | |
a4baae1b | 1677 | |
2a174661 | 1678 | /* |
7591bab1 MD |
1679 | * Streams are the owners of their trace. Reference to trace is |
1680 | * kept within stream_create(). | |
2a174661 | 1681 | */ |
7591bab1 | 1682 | ctf_trace_put(trace); |
d3e2ba59 | 1683 | |
7591bab1 | 1684 | send_reply: |
53efb85a | 1685 | memset(&reply, 0, sizeof(reply)); |
7591bab1 MD |
1686 | reply.handle = htobe64(stream_handle); |
1687 | if (!stream) { | |
f73fabfd | 1688 | reply.ret_code = htobe32(LTTNG_ERR_UNK); |
b8aa1682 | 1689 | } else { |
f73fabfd | 1690 | reply.ret_code = htobe32(LTTNG_OK); |
b8aa1682 | 1691 | } |
5af40280 | 1692 | |
58eb9381 | 1693 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, |
b8aa1682 | 1694 | sizeof(struct lttcomm_relayd_status_stream), 0); |
5312a3ed JG |
1695 | if (send_ret < (ssize_t) sizeof(reply)) { |
1696 | ERR("Failed to send \"add stream\" command reply (ret = %zd)", | |
1697 | send_ret); | |
1698 | ret = -1; | |
b8aa1682 JD |
1699 | } |
1700 | ||
1701 | end_no_session: | |
7591bab1 MD |
1702 | free(path_name); |
1703 | free(channel_name); | |
0f907de1 | 1704 | return ret; |
b8aa1682 JD |
1705 | } |
1706 | ||
173af62f DG |
1707 | /* |
1708 | * relay_close_stream: close a specific stream | |
1709 | */ | |
5312a3ed JG |
1710 | static int relay_close_stream(const struct lttcomm_relayd_hdr *recv_hdr, |
1711 | struct relay_connection *conn, | |
1712 | const struct lttng_buffer_view *payload) | |
173af62f | 1713 | { |
5312a3ed JG |
1714 | int ret; |
1715 | ssize_t send_ret; | |
58eb9381 | 1716 | struct relay_session *session = conn->session; |
173af62f DG |
1717 | struct lttcomm_relayd_close_stream stream_info; |
1718 | struct lttcomm_relayd_generic_reply reply; | |
1719 | struct relay_stream *stream; | |
173af62f DG |
1720 | |
1721 | DBG("Close stream received"); | |
1722 | ||
5312a3ed | 1723 | if (!session || !conn->version_check_done) { |
173af62f DG |
1724 | ERR("Trying to close a stream before version check"); |
1725 | ret = -1; | |
1726 | goto end_no_session; | |
1727 | } | |
1728 | ||
5312a3ed JG |
1729 | if (payload->size < sizeof(stream_info)) { |
1730 | ERR("Unexpected payload size in \"relay_close_stream\": expected >= %zu bytes, got %zu bytes", | |
1731 | sizeof(stream_info), payload->size); | |
173af62f DG |
1732 | ret = -1; |
1733 | goto end_no_session; | |
1734 | } | |
5312a3ed JG |
1735 | memcpy(&stream_info, payload->data, sizeof(stream_info)); |
1736 | stream_info.stream_id = be64toh(stream_info.stream_id); | |
1737 | stream_info.last_net_seq_num = be64toh(stream_info.last_net_seq_num); | |
173af62f | 1738 | |
5312a3ed | 1739 | stream = stream_get_by_id(stream_info.stream_id); |
173af62f DG |
1740 | if (!stream) { |
1741 | ret = -1; | |
7591bab1 | 1742 | goto end; |
173af62f | 1743 | } |
77f7bd85 MD |
1744 | |
1745 | /* | |
1746 | * Set last_net_seq_num before the close flag. Required by data | |
1747 | * pending check. | |
1748 | */ | |
7591bab1 | 1749 | pthread_mutex_lock(&stream->lock); |
5312a3ed | 1750 | stream->last_net_seq_num = stream_info.last_net_seq_num; |
77f7bd85 MD |
1751 | pthread_mutex_unlock(&stream->lock); |
1752 | ||
bda7c7b9 JG |
1753 | /* |
1754 | * This is one of the conditions which may trigger a stream close | |
1755 | * with the others being: | |
1756 | * 1) A close command is received for a stream | |
1757 | * 2) The control connection owning the stream is closed | |
1758 | * 3) We have received all of the stream's data _after_ a close | |
1759 | * request. | |
1760 | */ | |
1761 | try_stream_close(stream); | |
7591bab1 | 1762 | stream_put(stream); |
5312a3ed | 1763 | ret = 0; |
173af62f | 1764 | |
7591bab1 | 1765 | end: |
53efb85a | 1766 | memset(&reply, 0, sizeof(reply)); |
173af62f | 1767 | if (ret < 0) { |
f73fabfd | 1768 | reply.ret_code = htobe32(LTTNG_ERR_UNK); |
173af62f | 1769 | } else { |
f73fabfd | 1770 | reply.ret_code = htobe32(LTTNG_OK); |
173af62f | 1771 | } |
58eb9381 | 1772 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, |
173af62f | 1773 | sizeof(struct lttcomm_relayd_generic_reply), 0); |
5312a3ed JG |
1774 | if (send_ret < (ssize_t) sizeof(reply)) { |
1775 | ERR("Failed to send \"close stream\" command reply (ret = %zd)", | |
1776 | send_ret); | |
1777 | ret = -1; | |
173af62f DG |
1778 | } |
1779 | ||
1780 | end_no_session: | |
1781 | return ret; | |
1782 | } | |
1783 | ||
93ec662e JD |
1784 | /* |
1785 | * relay_reset_metadata: reset a metadata stream | |
1786 | */ | |
1787 | static | |
5312a3ed JG |
1788 | int relay_reset_metadata(const struct lttcomm_relayd_hdr *recv_hdr, |
1789 | struct relay_connection *conn, | |
1790 | const struct lttng_buffer_view *payload) | |
93ec662e | 1791 | { |
5312a3ed JG |
1792 | int ret; |
1793 | ssize_t send_ret; | |
93ec662e JD |
1794 | struct relay_session *session = conn->session; |
1795 | struct lttcomm_relayd_reset_metadata stream_info; | |
1796 | struct lttcomm_relayd_generic_reply reply; | |
1797 | struct relay_stream *stream; | |
1798 | ||
1799 | DBG("Reset metadata received"); | |
1800 | ||
5312a3ed | 1801 | if (!session || !conn->version_check_done) { |
93ec662e JD |
1802 | ERR("Trying to reset a metadata stream before version check"); |
1803 | ret = -1; | |
1804 | goto end_no_session; | |
1805 | } | |
1806 | ||
5312a3ed JG |
1807 | if (payload->size < sizeof(stream_info)) { |
1808 | ERR("Unexpected payload size in \"relay_reset_metadata\": expected >= %zu bytes, got %zu bytes", | |
1809 | sizeof(stream_info), payload->size); | |
93ec662e JD |
1810 | ret = -1; |
1811 | goto end_no_session; | |
1812 | } | |
5312a3ed JG |
1813 | memcpy(&stream_info, payload->data, sizeof(stream_info)); |
1814 | stream_info.stream_id = be64toh(stream_info.stream_id); | |
1815 | stream_info.version = be64toh(stream_info.version); | |
1816 | ||
1817 | DBG("Update metadata to version %" PRIu64, stream_info.version); | |
93ec662e JD |
1818 | |
1819 | /* Unsupported for live sessions for now. */ | |
1820 | if (session->live_timer != 0) { | |
1821 | ret = -1; | |
1822 | goto end; | |
1823 | } | |
1824 | ||
5312a3ed | 1825 | stream = stream_get_by_id(stream_info.stream_id); |
93ec662e JD |
1826 | if (!stream) { |
1827 | ret = -1; | |
1828 | goto end; | |
1829 | } | |
1830 | pthread_mutex_lock(&stream->lock); | |
1831 | if (!stream->is_metadata) { | |
1832 | ret = -1; | |
1833 | goto end_unlock; | |
1834 | } | |
1835 | ||
c35f9726 | 1836 | ret = stream_reset_file(stream); |
93ec662e | 1837 | if (ret < 0) { |
c35f9726 JG |
1838 | ERR("Failed to reset metadata stream %" PRIu64 |
1839 | ": stream_path = %s, channel = %s", | |
1840 | stream->stream_handle, stream->path_name, | |
1841 | stream->channel_name); | |
93ec662e JD |
1842 | goto end_unlock; |
1843 | } | |
93ec662e JD |
1844 | end_unlock: |
1845 | pthread_mutex_unlock(&stream->lock); | |
1846 | stream_put(stream); | |
1847 | ||
1848 | end: | |
1849 | memset(&reply, 0, sizeof(reply)); | |
1850 | if (ret < 0) { | |
1851 | reply.ret_code = htobe32(LTTNG_ERR_UNK); | |
1852 | } else { | |
1853 | reply.ret_code = htobe32(LTTNG_OK); | |
1854 | } | |
1855 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, | |
1856 | sizeof(struct lttcomm_relayd_generic_reply), 0); | |
5312a3ed JG |
1857 | if (send_ret < (ssize_t) sizeof(reply)) { |
1858 | ERR("Failed to send \"reset metadata\" command reply (ret = %zd)", | |
1859 | send_ret); | |
1860 | ret = -1; | |
93ec662e JD |
1861 | } |
1862 | ||
1863 | end_no_session: | |
1864 | return ret; | |
1865 | } | |
1866 | ||
b8aa1682 JD |
1867 | /* |
1868 | * relay_unknown_command: send -1 if received unknown command | |
1869 | */ | |
7591bab1 | 1870 | static void relay_unknown_command(struct relay_connection *conn) |
b8aa1682 JD |
1871 | { |
1872 | struct lttcomm_relayd_generic_reply reply; | |
5312a3ed | 1873 | ssize_t send_ret; |
b8aa1682 | 1874 | |
53efb85a | 1875 | memset(&reply, 0, sizeof(reply)); |
f73fabfd | 1876 | reply.ret_code = htobe32(LTTNG_ERR_UNK); |
5312a3ed JG |
1877 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0); |
1878 | if (send_ret < sizeof(reply)) { | |
1879 | ERR("Failed to send \"unknown command\" command reply (ret = %zd)", send_ret); | |
b8aa1682 JD |
1880 | } |
1881 | } | |
1882 | ||
1883 | /* | |
1884 | * relay_start: send an acknowledgment to the client to tell if we are | |
1885 | * ready to receive data. We are ready if a session is established. | |
1886 | */ | |
5312a3ed JG |
1887 | static int relay_start(const struct lttcomm_relayd_hdr *recv_hdr, |
1888 | struct relay_connection *conn, | |
1889 | const struct lttng_buffer_view *payload) | |
b8aa1682 | 1890 | { |
5312a3ed JG |
1891 | int ret = 0; |
1892 | ssize_t send_ret; | |
b8aa1682 | 1893 | struct lttcomm_relayd_generic_reply reply; |
58eb9381 | 1894 | struct relay_session *session = conn->session; |
b8aa1682 JD |
1895 | |
1896 | if (!session) { | |
1897 | DBG("Trying to start the streaming without a session established"); | |
f73fabfd | 1898 | ret = htobe32(LTTNG_ERR_UNK); |
b8aa1682 JD |
1899 | } |
1900 | ||
53efb85a | 1901 | memset(&reply, 0, sizeof(reply)); |
5312a3ed JG |
1902 | reply.ret_code = htobe32(LTTNG_OK); |
1903 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, | |
1904 | sizeof(reply), 0); | |
1905 | if (send_ret < (ssize_t) sizeof(reply)) { | |
1906 | ERR("Failed to send \"relay_start\" command reply (ret = %zd)", | |
1907 | send_ret); | |
1908 | ret = -1; | |
b8aa1682 JD |
1909 | } |
1910 | ||
1911 | return ret; | |
1912 | } | |
1913 | ||
b8aa1682 | 1914 | /* |
7591bab1 | 1915 | * relay_recv_metadata: receive the metadata for the session. |
b8aa1682 | 1916 | */ |
5312a3ed JG |
1917 | static int relay_recv_metadata(const struct lttcomm_relayd_hdr *recv_hdr, |
1918 | struct relay_connection *conn, | |
1919 | const struct lttng_buffer_view *payload) | |
b8aa1682 | 1920 | { |
32d1569c | 1921 | int ret = 0; |
58eb9381 | 1922 | struct relay_session *session = conn->session; |
5312a3ed | 1923 | struct lttcomm_relayd_metadata_payload metadata_payload_header; |
b8aa1682 | 1924 | struct relay_stream *metadata_stream; |
5312a3ed | 1925 | uint64_t metadata_payload_size; |
c35f9726 | 1926 | struct lttng_buffer_view packet_view; |
b8aa1682 JD |
1927 | |
1928 | if (!session) { | |
1929 | ERR("Metadata sent before version check"); | |
1930 | ret = -1; | |
1931 | goto end; | |
1932 | } | |
1933 | ||
5312a3ed | 1934 | if (recv_hdr->data_size < sizeof(struct lttcomm_relayd_metadata_payload)) { |
f6416125 MD |
1935 | ERR("Incorrect data size"); |
1936 | ret = -1; | |
1937 | goto end; | |
1938 | } | |
5312a3ed JG |
1939 | metadata_payload_size = recv_hdr->data_size - |
1940 | sizeof(struct lttcomm_relayd_metadata_payload); | |
f6416125 | 1941 | |
5312a3ed JG |
1942 | memcpy(&metadata_payload_header, payload->data, |
1943 | sizeof(metadata_payload_header)); | |
1944 | metadata_payload_header.stream_id = be64toh( | |
1945 | metadata_payload_header.stream_id); | |
1946 | metadata_payload_header.padding_size = be32toh( | |
1947 | metadata_payload_header.padding_size); | |
9d1bbf21 | 1948 | |
5312a3ed | 1949 | metadata_stream = stream_get_by_id(metadata_payload_header.stream_id); |
b8aa1682 JD |
1950 | if (!metadata_stream) { |
1951 | ret = -1; | |
7591bab1 | 1952 | goto end; |
b8aa1682 JD |
1953 | } |
1954 | ||
c35f9726 JG |
1955 | packet_view = lttng_buffer_view_from_view(payload, |
1956 | sizeof(metadata_payload_header), metadata_payload_size); | |
3e6e0df2 | 1957 | if (!lttng_buffer_view_is_valid(&packet_view)) { |
c35f9726 | 1958 | ERR("Invalid metadata packet length announced by header"); |
b8aa1682 | 1959 | ret = -1; |
7591bab1 | 1960 | goto end_put; |
b8aa1682 | 1961 | } |
1d4dfdef | 1962 | |
c35f9726 JG |
1963 | pthread_mutex_lock(&metadata_stream->lock); |
1964 | ret = stream_write(metadata_stream, &packet_view, | |
5312a3ed | 1965 | metadata_payload_header.padding_size); |
c35f9726 JG |
1966 | pthread_mutex_unlock(&metadata_stream->lock); |
1967 | if (ret){ | |
5312a3ed | 1968 | ret = -1; |
7591bab1 | 1969 | goto end_put; |
1d4dfdef | 1970 | } |
7591bab1 | 1971 | end_put: |
7591bab1 | 1972 | stream_put(metadata_stream); |
b8aa1682 JD |
1973 | end: |
1974 | return ret; | |
1975 | } | |
1976 | ||
1977 | /* | |
1978 | * relay_send_version: send relayd version number | |
1979 | */ | |
5312a3ed JG |
1980 | static int relay_send_version(const struct lttcomm_relayd_hdr *recv_hdr, |
1981 | struct relay_connection *conn, | |
1982 | const struct lttng_buffer_view *payload) | |
b8aa1682 | 1983 | { |
7f51dcba | 1984 | int ret; |
5312a3ed | 1985 | ssize_t send_ret; |
092b6259 | 1986 | struct lttcomm_relayd_version reply, msg; |
87cb6359 | 1987 | bool compatible = true; |
b8aa1682 | 1988 | |
5312a3ed | 1989 | conn->version_check_done = true; |
b8aa1682 | 1990 | |
092b6259 | 1991 | /* Get version from the other side. */ |
5312a3ed JG |
1992 | if (payload->size < sizeof(msg)) { |
1993 | ERR("Unexpected payload size in \"relay_send_version\": expected >= %zu bytes, got %zu bytes", | |
1994 | sizeof(msg), payload->size); | |
092b6259 | 1995 | ret = -1; |
092b6259 DG |
1996 | goto end; |
1997 | } | |
1998 | ||
5312a3ed JG |
1999 | memcpy(&msg, payload->data, sizeof(msg)); |
2000 | msg.major = be32toh(msg.major); | |
2001 | msg.minor = be32toh(msg.minor); | |
2002 | ||
53efb85a | 2003 | memset(&reply, 0, sizeof(reply)); |
d83a952c MD |
2004 | reply.major = RELAYD_VERSION_COMM_MAJOR; |
2005 | reply.minor = RELAYD_VERSION_COMM_MINOR; | |
d4519fa3 JD |
2006 | |
2007 | /* Major versions must be the same */ | |
5312a3ed | 2008 | if (reply.major != msg.major) { |
6151a90f | 2009 | DBG("Incompatible major versions (%u vs %u), deleting session", |
5312a3ed | 2010 | reply.major, msg.major); |
87cb6359 | 2011 | compatible = false; |
d4519fa3 JD |
2012 | } |
2013 | ||
58eb9381 | 2014 | conn->major = reply.major; |
0f907de1 | 2015 | /* We adapt to the lowest compatible version */ |
5312a3ed | 2016 | if (reply.minor <= msg.minor) { |
58eb9381 | 2017 | conn->minor = reply.minor; |
0f907de1 | 2018 | } else { |
5312a3ed | 2019 | conn->minor = msg.minor; |
0f907de1 JD |
2020 | } |
2021 | ||
6151a90f JD |
2022 | reply.major = htobe32(reply.major); |
2023 | reply.minor = htobe32(reply.minor); | |
5312a3ed JG |
2024 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, |
2025 | sizeof(reply), 0); | |
2026 | if (send_ret < (ssize_t) sizeof(reply)) { | |
2027 | ERR("Failed to send \"send version\" command reply (ret = %zd)", | |
2028 | send_ret); | |
2029 | ret = -1; | |
2030 | goto end; | |
2031 | } else { | |
2032 | ret = 0; | |
6151a90f JD |
2033 | } |
2034 | ||
87cb6359 JD |
2035 | if (!compatible) { |
2036 | ret = -1; | |
2037 | goto end; | |
2038 | } | |
2039 | ||
58eb9381 DG |
2040 | DBG("Version check done using protocol %u.%u", conn->major, |
2041 | conn->minor); | |
b8aa1682 JD |
2042 | |
2043 | end: | |
2044 | return ret; | |
2045 | } | |
2046 | ||
c8f59ee5 | 2047 | /* |
6d805429 | 2048 | * Check for data pending for a given stream id from the session daemon. |
c8f59ee5 | 2049 | */ |
5312a3ed JG |
2050 | static int relay_data_pending(const struct lttcomm_relayd_hdr *recv_hdr, |
2051 | struct relay_connection *conn, | |
2052 | const struct lttng_buffer_view *payload) | |
c8f59ee5 | 2053 | { |
58eb9381 | 2054 | struct relay_session *session = conn->session; |
6d805429 | 2055 | struct lttcomm_relayd_data_pending msg; |
c8f59ee5 DG |
2056 | struct lttcomm_relayd_generic_reply reply; |
2057 | struct relay_stream *stream; | |
5312a3ed | 2058 | ssize_t send_ret; |
c8f59ee5 | 2059 | int ret; |
298a25ca | 2060 | uint64_t stream_seq; |
c8f59ee5 | 2061 | |
6d805429 | 2062 | DBG("Data pending command received"); |
c8f59ee5 | 2063 | |
5312a3ed | 2064 | if (!session || !conn->version_check_done) { |
c8f59ee5 DG |
2065 | ERR("Trying to check for data before version check"); |
2066 | ret = -1; | |
2067 | goto end_no_session; | |
2068 | } | |
2069 | ||
5312a3ed JG |
2070 | if (payload->size < sizeof(msg)) { |
2071 | ERR("Unexpected payload size in \"relay_data_pending\": expected >= %zu bytes, got %zu bytes", | |
2072 | sizeof(msg), payload->size); | |
c8f59ee5 DG |
2073 | ret = -1; |
2074 | goto end_no_session; | |
2075 | } | |
5312a3ed JG |
2076 | memcpy(&msg, payload->data, sizeof(msg)); |
2077 | msg.stream_id = be64toh(msg.stream_id); | |
2078 | msg.last_net_seq_num = be64toh(msg.last_net_seq_num); | |
c8f59ee5 | 2079 | |
5312a3ed | 2080 | stream = stream_get_by_id(msg.stream_id); |
de91f48a | 2081 | if (stream == NULL) { |
c8f59ee5 | 2082 | ret = -1; |
7591bab1 | 2083 | goto end; |
c8f59ee5 DG |
2084 | } |
2085 | ||
7591bab1 MD |
2086 | pthread_mutex_lock(&stream->lock); |
2087 | ||
298a25ca JG |
2088 | if (session_streams_have_index(session)) { |
2089 | /* | |
2090 | * Ensure that both the index and stream data have been | |
2091 | * flushed up to the requested point. | |
2092 | */ | |
ac497a37 | 2093 | stream_seq = std::min(stream->prev_data_seq, stream->prev_index_seq); |
298a25ca | 2094 | } else { |
a8f9f353 | 2095 | stream_seq = stream->prev_data_seq; |
298a25ca | 2096 | } |
a8f9f353 | 2097 | DBG("Data pending for stream id %" PRIu64 ": prev_data_seq %" PRIu64 |
298a25ca JG |
2098 | ", prev_index_seq %" PRIu64 |
2099 | ", and last_seq %" PRIu64, msg.stream_id, | |
a8f9f353 | 2100 | stream->prev_data_seq, stream->prev_index_seq, |
298a25ca | 2101 | msg.last_net_seq_num); |
c8f59ee5 | 2102 | |
33832e64 | 2103 | /* Avoid wrapping issue */ |
298a25ca | 2104 | if (((int64_t) (stream_seq - msg.last_net_seq_num)) >= 0) { |
6d805429 | 2105 | /* Data has in fact been written and is NOT pending */ |
c8f59ee5 | 2106 | ret = 0; |
6d805429 DG |
2107 | } else { |
2108 | /* Data still being streamed thus pending */ | |
2109 | ret = 1; | |
c8f59ee5 DG |
2110 | } |
2111 | ||
7591bab1 MD |
2112 | stream->data_pending_check_done = true; |
2113 | pthread_mutex_unlock(&stream->lock); | |
f7079f67 | 2114 | |
7591bab1 MD |
2115 | stream_put(stream); |
2116 | end: | |
c8f59ee5 | 2117 | |
53efb85a | 2118 | memset(&reply, 0, sizeof(reply)); |
c8f59ee5 | 2119 | reply.ret_code = htobe32(ret); |
5312a3ed JG |
2120 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0); |
2121 | if (send_ret < (ssize_t) sizeof(reply)) { | |
2122 | ERR("Failed to send \"data pending\" command reply (ret = %zd)", | |
2123 | send_ret); | |
2124 | ret = -1; | |
c8f59ee5 DG |
2125 | } |
2126 | ||
2127 | end_no_session: | |
2128 | return ret; | |
2129 | } | |
2130 | ||
2131 | /* | |
2132 | * Wait for the control socket to reach a quiescent state. | |
2133 | * | |
7591bab1 MD |
2134 | * Note that for now, when receiving this command from the session |
2135 | * daemon, this means that every subsequent commands or data received on | |
2136 | * the control socket has been handled. So, this is why we simply return | |
2137 | * OK here. | |
c8f59ee5 | 2138 | */ |
5312a3ed JG |
2139 | static int relay_quiescent_control(const struct lttcomm_relayd_hdr *recv_hdr, |
2140 | struct relay_connection *conn, | |
2141 | const struct lttng_buffer_view *payload) | |
c8f59ee5 DG |
2142 | { |
2143 | int ret; | |
5312a3ed | 2144 | ssize_t send_ret; |
ad7051c0 | 2145 | struct relay_stream *stream; |
ad7051c0 | 2146 | struct lttcomm_relayd_quiescent_control msg; |
c8f59ee5 DG |
2147 | struct lttcomm_relayd_generic_reply reply; |
2148 | ||
2149 | DBG("Checking quiescent state on control socket"); | |
2150 | ||
5312a3ed | 2151 | if (!conn->session || !conn->version_check_done) { |
ad7051c0 DG |
2152 | ERR("Trying to check for data before version check"); |
2153 | ret = -1; | |
2154 | goto end_no_session; | |
2155 | } | |
2156 | ||
5312a3ed JG |
2157 | if (payload->size < sizeof(msg)) { |
2158 | ERR("Unexpected payload size in \"relay_quiescent_control\": expected >= %zu bytes, got %zu bytes", | |
2159 | sizeof(msg), payload->size); | |
ad7051c0 DG |
2160 | ret = -1; |
2161 | goto end_no_session; | |
2162 | } | |
5312a3ed JG |
2163 | memcpy(&msg, payload->data, sizeof(msg)); |
2164 | msg.stream_id = be64toh(msg.stream_id); | |
ad7051c0 | 2165 | |
5312a3ed | 2166 | stream = stream_get_by_id(msg.stream_id); |
7591bab1 MD |
2167 | if (!stream) { |
2168 | goto reply; | |
2169 | } | |
2170 | pthread_mutex_lock(&stream->lock); | |
2171 | stream->data_pending_check_done = true; | |
2172 | pthread_mutex_unlock(&stream->lock); | |
5312a3ed JG |
2173 | |
2174 | DBG("Relay quiescent control pending flag set to %" PRIu64, msg.stream_id); | |
7591bab1 MD |
2175 | stream_put(stream); |
2176 | reply: | |
53efb85a | 2177 | memset(&reply, 0, sizeof(reply)); |
c8f59ee5 | 2178 | reply.ret_code = htobe32(LTTNG_OK); |
5312a3ed JG |
2179 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0); |
2180 | if (send_ret < (ssize_t) sizeof(reply)) { | |
2181 | ERR("Failed to send \"quiescent control\" command reply (ret = %zd)", | |
2182 | send_ret); | |
2183 | ret = -1; | |
2184 | } else { | |
2185 | ret = 0; | |
c8f59ee5 DG |
2186 | } |
2187 | ||
ad7051c0 | 2188 | end_no_session: |
c8f59ee5 DG |
2189 | return ret; |
2190 | } | |
2191 | ||
f7079f67 | 2192 | /* |
7591bab1 MD |
2193 | * Initialize a data pending command. This means that a consumer is about |
2194 | * to ask for data pending for each stream it holds. Simply iterate over | |
2195 | * all streams of a session and set the data_pending_check_done flag. | |
f7079f67 DG |
2196 | * |
2197 | * This command returns to the client a LTTNG_OK code. | |
2198 | */ | |
5312a3ed JG |
2199 | static int relay_begin_data_pending(const struct lttcomm_relayd_hdr *recv_hdr, |
2200 | struct relay_connection *conn, | |
2201 | const struct lttng_buffer_view *payload) | |
f7079f67 DG |
2202 | { |
2203 | int ret; | |
5312a3ed | 2204 | ssize_t send_ret; |
f7079f67 DG |
2205 | struct lttng_ht_iter iter; |
2206 | struct lttcomm_relayd_begin_data_pending msg; | |
2207 | struct lttcomm_relayd_generic_reply reply; | |
2208 | struct relay_stream *stream; | |
f7079f67 | 2209 | |
a0377dfe FD |
2210 | LTTNG_ASSERT(recv_hdr); |
2211 | LTTNG_ASSERT(conn); | |
f7079f67 DG |
2212 | |
2213 | DBG("Init streams for data pending"); | |
2214 | ||
5312a3ed | 2215 | if (!conn->session || !conn->version_check_done) { |
f7079f67 DG |
2216 | ERR("Trying to check for data before version check"); |
2217 | ret = -1; | |
2218 | goto end_no_session; | |
2219 | } | |
2220 | ||
5312a3ed JG |
2221 | if (payload->size < sizeof(msg)) { |
2222 | ERR("Unexpected payload size in \"relay_begin_data_pending\": expected >= %zu bytes, got %zu bytes", | |
2223 | sizeof(msg), payload->size); | |
f7079f67 DG |
2224 | ret = -1; |
2225 | goto end_no_session; | |
2226 | } | |
5312a3ed JG |
2227 | memcpy(&msg, payload->data, sizeof(msg)); |
2228 | msg.session_id = be64toh(msg.session_id); | |
f7079f67 DG |
2229 | |
2230 | /* | |
7591bab1 MD |
2231 | * Iterate over all streams to set the begin data pending flag. |
2232 | * For now, the streams are indexed by stream handle so we have | |
2233 | * to iterate over all streams to find the one associated with | |
2234 | * the right session_id. | |
f7079f67 DG |
2235 | */ |
2236 | rcu_read_lock(); | |
d3e2ba59 | 2237 | cds_lfht_for_each_entry(relay_streams_ht->ht, &iter.iter, stream, |
2a174661 | 2238 | node.node) { |
7591bab1 MD |
2239 | if (!stream_get(stream)) { |
2240 | continue; | |
2241 | } | |
5312a3ed | 2242 | if (stream->trace->session->id == msg.session_id) { |
7591bab1 MD |
2243 | pthread_mutex_lock(&stream->lock); |
2244 | stream->data_pending_check_done = false; | |
2245 | pthread_mutex_unlock(&stream->lock); | |
f7079f67 DG |
2246 | DBG("Set begin data pending flag to stream %" PRIu64, |
2247 | stream->stream_handle); | |
2248 | } | |
7591bab1 | 2249 | stream_put(stream); |
f7079f67 DG |
2250 | } |
2251 | rcu_read_unlock(); | |
2252 | ||
53efb85a | 2253 | memset(&reply, 0, sizeof(reply)); |
f7079f67 DG |
2254 | /* All good, send back reply. */ |
2255 | reply.ret_code = htobe32(LTTNG_OK); | |
2256 | ||
5312a3ed JG |
2257 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0); |
2258 | if (send_ret < (ssize_t) sizeof(reply)) { | |
2259 | ERR("Failed to send \"begin data pending\" command reply (ret = %zd)", | |
2260 | send_ret); | |
2261 | ret = -1; | |
2262 | } else { | |
2263 | ret = 0; | |
f7079f67 DG |
2264 | } |
2265 | ||
2266 | end_no_session: | |
2267 | return ret; | |
2268 | } | |
2269 | ||
2270 | /* | |
7591bab1 MD |
2271 | * End data pending command. This will check, for a given session id, if |
2272 | * each stream associated with it has its data_pending_check_done flag | |
2273 | * set. If not, this means that the client lost track of the stream but | |
2274 | * the data is still being streamed on our side. In this case, we inform | |
2275 | * the client that data is in flight. | |
f7079f67 DG |
2276 | * |
2277 | * Return to the client if there is data in flight or not with a ret_code. | |
2278 | */ | |
5312a3ed JG |
2279 | static int relay_end_data_pending(const struct lttcomm_relayd_hdr *recv_hdr, |
2280 | struct relay_connection *conn, | |
2281 | const struct lttng_buffer_view *payload) | |
f7079f67 DG |
2282 | { |
2283 | int ret; | |
5312a3ed | 2284 | ssize_t send_ret; |
f7079f67 DG |
2285 | struct lttng_ht_iter iter; |
2286 | struct lttcomm_relayd_end_data_pending msg; | |
2287 | struct lttcomm_relayd_generic_reply reply; | |
2288 | struct relay_stream *stream; | |
f7079f67 DG |
2289 | uint32_t is_data_inflight = 0; |
2290 | ||
f7079f67 DG |
2291 | DBG("End data pending command"); |
2292 | ||
5312a3ed | 2293 | if (!conn->session || !conn->version_check_done) { |
f7079f67 DG |
2294 | ERR("Trying to check for data before version check"); |
2295 | ret = -1; | |
2296 | goto end_no_session; | |
2297 | } | |
2298 | ||
5312a3ed JG |
2299 | if (payload->size < sizeof(msg)) { |
2300 | ERR("Unexpected payload size in \"relay_end_data_pending\": expected >= %zu bytes, got %zu bytes", | |
2301 | sizeof(msg), payload->size); | |
f7079f67 DG |
2302 | ret = -1; |
2303 | goto end_no_session; | |
2304 | } | |
5312a3ed JG |
2305 | memcpy(&msg, payload->data, sizeof(msg)); |
2306 | msg.session_id = be64toh(msg.session_id); | |
f7079f67 | 2307 | |
7591bab1 MD |
2308 | /* |
2309 | * Iterate over all streams to see if the begin data pending | |
2310 | * flag is set. | |
2311 | */ | |
f7079f67 | 2312 | rcu_read_lock(); |
d3e2ba59 | 2313 | cds_lfht_for_each_entry(relay_streams_ht->ht, &iter.iter, stream, |
2a174661 | 2314 | node.node) { |
7591bab1 MD |
2315 | if (!stream_get(stream)) { |
2316 | continue; | |
2317 | } | |
5312a3ed | 2318 | if (stream->trace->session->id != msg.session_id) { |
7591bab1 MD |
2319 | stream_put(stream); |
2320 | continue; | |
2321 | } | |
2322 | pthread_mutex_lock(&stream->lock); | |
2323 | if (!stream->data_pending_check_done) { | |
298a25ca JG |
2324 | uint64_t stream_seq; |
2325 | ||
2326 | if (session_streams_have_index(conn->session)) { | |
2327 | /* | |
2328 | * Ensure that both the index and stream data have been | |
2329 | * flushed up to the requested point. | |
2330 | */ | |
ac497a37 | 2331 | stream_seq = std::min(stream->prev_data_seq, stream->prev_index_seq); |
298a25ca | 2332 | } else { |
a8f9f353 | 2333 | stream_seq = stream->prev_data_seq; |
298a25ca JG |
2334 | } |
2335 | if (!stream->closed || !(((int64_t) (stream_seq - stream->last_net_seq_num)) >= 0)) { | |
7591bab1 MD |
2336 | is_data_inflight = 1; |
2337 | DBG("Data is still in flight for stream %" PRIu64, | |
2338 | stream->stream_handle); | |
2339 | pthread_mutex_unlock(&stream->lock); | |
2340 | stream_put(stream); | |
2341 | break; | |
2342 | } | |
f7079f67 | 2343 | } |
7591bab1 MD |
2344 | pthread_mutex_unlock(&stream->lock); |
2345 | stream_put(stream); | |
f7079f67 DG |
2346 | } |
2347 | rcu_read_unlock(); | |
2348 | ||
53efb85a | 2349 | memset(&reply, 0, sizeof(reply)); |
f7079f67 DG |
2350 | /* All good, send back reply. */ |
2351 | reply.ret_code = htobe32(is_data_inflight); | |
2352 | ||
5312a3ed JG |
2353 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0); |
2354 | if (send_ret < (ssize_t) sizeof(reply)) { | |
2355 | ERR("Failed to send \"end data pending\" command reply (ret = %zd)", | |
2356 | send_ret); | |
2357 | ret = -1; | |
2358 | } else { | |
2359 | ret = 0; | |
f7079f67 DG |
2360 | } |
2361 | ||
2362 | end_no_session: | |
2363 | return ret; | |
2364 | } | |
2365 | ||
1c20f0e2 JD |
2366 | /* |
2367 | * Receive an index for a specific stream. | |
2368 | * | |
2369 | * Return 0 on success else a negative value. | |
2370 | */ | |
5312a3ed JG |
2371 | static int relay_recv_index(const struct lttcomm_relayd_hdr *recv_hdr, |
2372 | struct relay_connection *conn, | |
2373 | const struct lttng_buffer_view *payload) | |
1c20f0e2 | 2374 | { |
5312a3ed JG |
2375 | int ret; |
2376 | ssize_t send_ret; | |
58eb9381 | 2377 | struct relay_session *session = conn->session; |
1c20f0e2 | 2378 | struct lttcomm_relayd_index index_info; |
1c20f0e2 JD |
2379 | struct lttcomm_relayd_generic_reply reply; |
2380 | struct relay_stream *stream; | |
f8f3885c | 2381 | size_t msg_len; |
1c20f0e2 | 2382 | |
a0377dfe | 2383 | LTTNG_ASSERT(conn); |
1c20f0e2 JD |
2384 | |
2385 | DBG("Relay receiving index"); | |
2386 | ||
5312a3ed | 2387 | if (!session || !conn->version_check_done) { |
1c20f0e2 JD |
2388 | ERR("Trying to close a stream before version check"); |
2389 | ret = -1; | |
2390 | goto end_no_session; | |
2391 | } | |
2392 | ||
f8f3885c MD |
2393 | msg_len = lttcomm_relayd_index_len( |
2394 | lttng_to_index_major(conn->major, conn->minor), | |
2395 | lttng_to_index_minor(conn->major, conn->minor)); | |
5312a3ed JG |
2396 | if (payload->size < msg_len) { |
2397 | ERR("Unexpected payload size in \"relay_recv_index\": expected >= %zu bytes, got %zu bytes", | |
2398 | msg_len, payload->size); | |
1c20f0e2 JD |
2399 | ret = -1; |
2400 | goto end_no_session; | |
2401 | } | |
5312a3ed JG |
2402 | memcpy(&index_info, payload->data, msg_len); |
2403 | index_info.relay_stream_id = be64toh(index_info.relay_stream_id); | |
2404 | index_info.net_seq_num = be64toh(index_info.net_seq_num); | |
2405 | index_info.packet_size = be64toh(index_info.packet_size); | |
2406 | index_info.content_size = be64toh(index_info.content_size); | |
2407 | index_info.timestamp_begin = be64toh(index_info.timestamp_begin); | |
2408 | index_info.timestamp_end = be64toh(index_info.timestamp_end); | |
2409 | index_info.events_discarded = be64toh(index_info.events_discarded); | |
2410 | index_info.stream_id = be64toh(index_info.stream_id); | |
81df238b JR |
2411 | |
2412 | if (conn->minor >= 8) { | |
2413 | index_info.stream_instance_id = | |
2414 | be64toh(index_info.stream_instance_id); | |
2415 | index_info.packet_seq_num = be64toh(index_info.packet_seq_num); | |
0f83d1cc MD |
2416 | } else { |
2417 | index_info.stream_instance_id = -1ULL; | |
2418 | index_info.packet_seq_num = -1ULL; | |
81df238b | 2419 | } |
5312a3ed JG |
2420 | |
2421 | stream = stream_get_by_id(index_info.relay_stream_id); | |
1c20f0e2 | 2422 | if (!stream) { |
7591bab1 | 2423 | ERR("stream_get_by_id not found"); |
1c20f0e2 | 2424 | ret = -1; |
7591bab1 | 2425 | goto end; |
1c20f0e2 | 2426 | } |
d3e2ba59 | 2427 | |
c35f9726 JG |
2428 | pthread_mutex_lock(&stream->lock); |
2429 | ret = stream_add_index(stream, &index_info); | |
2430 | pthread_mutex_unlock(&stream->lock); | |
2431 | if (ret) { | |
7591bab1 MD |
2432 | goto end_stream_put; |
2433 | } | |
1c20f0e2 | 2434 | |
7591bab1 | 2435 | end_stream_put: |
7591bab1 | 2436 | stream_put(stream); |
7591bab1 | 2437 | end: |
53efb85a | 2438 | memset(&reply, 0, sizeof(reply)); |
1c20f0e2 JD |
2439 | if (ret < 0) { |
2440 | reply.ret_code = htobe32(LTTNG_ERR_UNK); | |
2441 | } else { | |
2442 | reply.ret_code = htobe32(LTTNG_OK); | |
2443 | } | |
58eb9381 | 2444 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0); |
5312a3ed JG |
2445 | if (send_ret < (ssize_t) sizeof(reply)) { |
2446 | ERR("Failed to send \"recv index\" command reply (ret = %zd)", send_ret); | |
2447 | ret = -1; | |
1c20f0e2 JD |
2448 | } |
2449 | ||
2450 | end_no_session: | |
2451 | return ret; | |
2452 | } | |
2453 | ||
a4baae1b JD |
2454 | /* |
2455 | * Receive the streams_sent message. | |
2456 | * | |
2457 | * Return 0 on success else a negative value. | |
2458 | */ | |
5312a3ed JG |
2459 | static int relay_streams_sent(const struct lttcomm_relayd_hdr *recv_hdr, |
2460 | struct relay_connection *conn, | |
2461 | const struct lttng_buffer_view *payload) | |
a4baae1b | 2462 | { |
5312a3ed JG |
2463 | int ret; |
2464 | ssize_t send_ret; | |
a4baae1b JD |
2465 | struct lttcomm_relayd_generic_reply reply; |
2466 | ||
a0377dfe | 2467 | LTTNG_ASSERT(conn); |
a4baae1b JD |
2468 | |
2469 | DBG("Relay receiving streams_sent"); | |
2470 | ||
5312a3ed | 2471 | if (!conn->session || !conn->version_check_done) { |
a4baae1b JD |
2472 | ERR("Trying to close a stream before version check"); |
2473 | ret = -1; | |
2474 | goto end_no_session; | |
2475 | } | |
2476 | ||
2477 | /* | |
7591bab1 MD |
2478 | * Publish every pending stream in the connection recv list which are |
2479 | * now ready to be used by the viewer. | |
4a9daf17 | 2480 | */ |
7591bab1 | 2481 | publish_connection_local_streams(conn); |
4a9daf17 | 2482 | |
53efb85a | 2483 | memset(&reply, 0, sizeof(reply)); |
a4baae1b | 2484 | reply.ret_code = htobe32(LTTNG_OK); |
58eb9381 | 2485 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0); |
5312a3ed JG |
2486 | if (send_ret < (ssize_t) sizeof(reply)) { |
2487 | ERR("Failed to send \"streams sent\" command reply (ret = %zd)", | |
2488 | send_ret); | |
2489 | ret = -1; | |
a4baae1b JD |
2490 | } else { |
2491 | /* Success. */ | |
2492 | ret = 0; | |
2493 | } | |
2494 | ||
2495 | end_no_session: | |
2496 | return ret; | |
2497 | } | |
2498 | ||
ce9dbd47 JG |
2499 | static ssize_t relay_unpack_rotate_streams_header( |
2500 | const struct lttng_buffer_view *payload, | |
2501 | struct lttcomm_relayd_rotate_streams *_rotate_streams) | |
2502 | { | |
2503 | struct lttcomm_relayd_rotate_streams rotate_streams; | |
2504 | /* | |
2505 | * Set to the smallest version (packed) of `lttcomm_relayd_rotate_streams`. | |
2506 | * This is the smallest version of this structure, but it can be larger; | |
2507 | * this variable is updated once the proper size of the structure is known. | |
2508 | * | |
2509 | * See comment at the declaration of this structure for more information. | |
2510 | */ | |
2511 | ssize_t header_len = sizeof(struct lttcomm_relayd_rotate_streams_packed); | |
2512 | size_t expected_payload_size_no_padding, | |
2513 | expected_payload_size_3_bytes_padding, | |
2514 | expected_payload_size_7_bytes_padding; | |
2515 | ||
2516 | if (payload->size < header_len) { | |
2517 | ERR("Unexpected payload size in \"relay_rotate_session_stream\": expected >= %zu bytes, got %zu bytes", | |
2518 | header_len, payload->size); | |
2519 | goto error; | |
2520 | } | |
2521 | ||
2522 | /* | |
2523 | * Some versions incorrectly omitted the LTTNG_PACKED annotation on the | |
2524 | * `new_chunk_id` optional field of struct lttcomm_relayd_rotate_streams. | |
2525 | * | |
2526 | * We start by "unpacking" `stream_count` to figure out the padding length | |
2527 | * emited by our peer. | |
2528 | */ | |
2529 | memcpy(&rotate_streams.stream_count, payload->data, | |
2530 | sizeof(rotate_streams.stream_count)); | |
2531 | rotate_streams = (typeof(rotate_streams)) { | |
2532 | .stream_count = be32toh(rotate_streams.stream_count), | |
2533 | }; | |
2534 | ||
2535 | /* | |
2536 | * Payload size expected given the possible padding lengths in | |
2537 | * `struct lttcomm_relayd_rotate_streams`. | |
2538 | */ | |
2539 | expected_payload_size_no_padding = (rotate_streams.stream_count * | |
2540 | sizeof(*rotate_streams.rotation_positions)) + | |
2541 | sizeof(lttcomm_relayd_rotate_streams_packed); | |
2542 | expected_payload_size_3_bytes_padding = (rotate_streams.stream_count * | |
2543 | sizeof(*rotate_streams.rotation_positions)) + | |
2544 | sizeof(lttcomm_relayd_rotate_streams_3_bytes_padding); | |
2545 | expected_payload_size_7_bytes_padding = (rotate_streams.stream_count * | |
2546 | sizeof(*rotate_streams.rotation_positions)) + | |
2547 | sizeof(lttcomm_relayd_rotate_streams_7_bytes_padding); | |
2548 | ||
2549 | if (payload->size == expected_payload_size_no_padding) { | |
2550 | struct lttcomm_relayd_rotate_streams_packed packed_rotate_streams; | |
2551 | ||
2552 | /* | |
2553 | * This handles cases where someone might build with | |
2554 | * -fpack-struct or any other toolchain that wouldn't produce | |
2555 | * padding to align `value`. | |
2556 | */ | |
2557 | DBG("Received `struct lttcomm_relayd_rotate_streams` with no padding"); | |
2558 | ||
2559 | header_len = sizeof(packed_rotate_streams); | |
2560 | memcpy(&packed_rotate_streams, payload->data, header_len); | |
2561 | ||
2562 | /* Unpack the packed structure to the natively-packed version. */ | |
2563 | *_rotate_streams = (typeof(*_rotate_streams)) { | |
2564 | .stream_count = be32toh(packed_rotate_streams.stream_count), | |
2565 | .new_chunk_id = (typeof(_rotate_streams->new_chunk_id)) { | |
2566 | .is_set = !!packed_rotate_streams.new_chunk_id.is_set, | |
2567 | .value = be64toh(packed_rotate_streams.new_chunk_id.value), | |
2568 | } | |
2569 | }; | |
2570 | } else if (payload->size == expected_payload_size_3_bytes_padding) { | |
2571 | struct lttcomm_relayd_rotate_streams_3_bytes_padding padded_rotate_streams; | |
2572 | ||
2573 | DBG("Received `struct lttcomm_relayd_rotate_streams` with 3 bytes of padding (4-byte aligned peer)"); | |
2574 | ||
2575 | header_len = sizeof(padded_rotate_streams); | |
2576 | memcpy(&padded_rotate_streams, payload->data, header_len); | |
2577 | ||
2578 | /* Unpack the 3-byte padded structure to the natively-packed version. */ | |
2579 | *_rotate_streams = (typeof(*_rotate_streams)) { | |
2580 | .stream_count = be32toh(padded_rotate_streams.stream_count), | |
2581 | .new_chunk_id = (typeof(_rotate_streams->new_chunk_id)) { | |
2582 | .is_set = !!padded_rotate_streams.new_chunk_id.is_set, | |
2583 | .value = be64toh(padded_rotate_streams.new_chunk_id.value), | |
2584 | } | |
2585 | }; | |
2586 | } else if (payload->size == expected_payload_size_7_bytes_padding) { | |
2587 | struct lttcomm_relayd_rotate_streams_7_bytes_padding padded_rotate_streams; | |
2588 | ||
2589 | DBG("Received `struct lttcomm_relayd_rotate_streams` with 7 bytes of padding (8-byte aligned peer)"); | |
2590 | ||
2591 | header_len = sizeof(padded_rotate_streams); | |
2592 | memcpy(&padded_rotate_streams, payload->data, header_len); | |
2593 | ||
2594 | /* Unpack the 7-byte padded structure to the natively-packed version. */ | |
2595 | *_rotate_streams = (typeof(*_rotate_streams)) { | |
2596 | .stream_count = be32toh(padded_rotate_streams.stream_count), | |
2597 | .new_chunk_id = (typeof(_rotate_streams->new_chunk_id)) { | |
2598 | .is_set = !!padded_rotate_streams.new_chunk_id.is_set, | |
2599 | .value = be64toh(padded_rotate_streams.new_chunk_id.value), | |
2600 | } | |
2601 | }; | |
2602 | ||
2603 | header_len = sizeof(padded_rotate_streams); | |
2604 | } else { | |
2605 | ERR("Unexpected payload size in \"relay_rotate_session_stream\": expected %zu, %zu or %zu bytes, got %zu bytes", | |
2606 | expected_payload_size_no_padding, | |
2607 | expected_payload_size_3_bytes_padding, | |
2608 | expected_payload_size_7_bytes_padding, | |
2609 | payload->size); | |
2610 | goto error; | |
2611 | } | |
2612 | ||
2613 | return header_len; | |
2614 | error: | |
2615 | return -1; | |
2616 | } | |
2617 | ||
d3ecc550 | 2618 | /* |
c35f9726 JG |
2619 | * relay_rotate_session_stream: rotate a stream to a new tracefile for the |
2620 | * session rotation feature (not the tracefile rotation feature). | |
d3ecc550 | 2621 | */ |
c35f9726 JG |
2622 | static int relay_rotate_session_streams( |
2623 | const struct lttcomm_relayd_hdr *recv_hdr, | |
5312a3ed JG |
2624 | struct relay_connection *conn, |
2625 | const struct lttng_buffer_view *payload) | |
d3ecc550 | 2626 | { |
30b9d5ab | 2627 | int ret = 0; |
c35f9726 | 2628 | uint32_t i; |
5312a3ed | 2629 | ssize_t send_ret; |
c35f9726 | 2630 | enum lttng_error_code reply_code = LTTNG_ERR_UNK; |
d3ecc550 | 2631 | struct relay_session *session = conn->session; |
c35f9726 JG |
2632 | struct lttcomm_relayd_rotate_streams rotate_streams; |
2633 | struct lttcomm_relayd_generic_reply reply = {}; | |
2634 | struct relay_stream *stream = NULL; | |
c35f9726 JG |
2635 | struct lttng_trace_chunk *next_trace_chunk = NULL; |
2636 | struct lttng_buffer_view stream_positions; | |
70626904 JG |
2637 | char chunk_id_buf[MAX_INT_DEC_LEN(uint64_t)]; |
2638 | const char *chunk_id_str = "none"; | |
ce9dbd47 | 2639 | ssize_t header_len; |
d3ecc550 | 2640 | |
d3ecc550 JD |
2641 | if (!session || !conn->version_check_done) { |
2642 | ERR("Trying to rotate a stream before version check"); | |
2643 | ret = -1; | |
2644 | goto end_no_reply; | |
2645 | } | |
2646 | ||
2647 | if (session->major == 2 && session->minor < 11) { | |
2648 | ERR("Unsupported feature before 2.11"); | |
2649 | ret = -1; | |
2650 | goto end_no_reply; | |
2651 | } | |
2652 | ||
ce9dbd47 JG |
2653 | header_len = relay_unpack_rotate_streams_header(payload, &rotate_streams); |
2654 | if (header_len < 0) { | |
d3ecc550 JD |
2655 | ret = -1; |
2656 | goto end_no_reply; | |
2657 | } | |
2658 | ||
c35f9726 JG |
2659 | if (rotate_streams.new_chunk_id.is_set) { |
2660 | /* | |
2661 | * Retrieve the trace chunk the stream must transition to. As | |
2662 | * per the protocol, this chunk should have been created | |
2663 | * before this command is received. | |
2664 | */ | |
2665 | next_trace_chunk = sessiond_trace_chunk_registry_get_chunk( | |
2666 | sessiond_trace_chunk_registry, | |
c5c79321 JG |
2667 | session->sessiond_uuid, |
2668 | conn->session->id_sessiond.is_set ? | |
2669 | conn->session->id_sessiond.value : | |
2670 | conn->session->id, | |
c35f9726 JG |
2671 | rotate_streams.new_chunk_id.value); |
2672 | if (!next_trace_chunk) { | |
c70636a7 | 2673 | char uuid_str[LTTNG_UUID_STR_LEN]; |
c35f9726 JG |
2674 | |
2675 | lttng_uuid_to_str(session->sessiond_uuid, uuid_str); | |
2676 | ERR("Unknown next trace chunk in ROTATE_STREAMS command: sessiond_uuid = {%s}, session_id = %" PRIu64 | |
2677 | ", trace_chunk_id = %" PRIu64, | |
2678 | uuid_str, session->id, | |
2679 | rotate_streams.new_chunk_id.value); | |
2680 | reply_code = LTTNG_ERR_INVALID_PROTOCOL; | |
2681 | ret = -1; | |
2682 | goto end; | |
2683 | } | |
70626904 JG |
2684 | |
2685 | ret = snprintf(chunk_id_buf, sizeof(chunk_id_buf), "%" PRIu64, | |
2686 | rotate_streams.new_chunk_id.value); | |
2687 | if (ret < 0 || ret >= sizeof(chunk_id_buf)) { | |
2688 | chunk_id_str = "formatting error"; | |
2689 | } else { | |
2690 | chunk_id_str = chunk_id_buf; | |
2691 | } | |
d3ecc550 JD |
2692 | } |
2693 | ||
70626904 JG |
2694 | DBG("Rotate %" PRIu32 " streams of session \"%s\" to chunk \"%s\"", |
2695 | rotate_streams.stream_count, session->session_name, | |
2696 | chunk_id_str); | |
2697 | ||
c35f9726 | 2698 | stream_positions = lttng_buffer_view_from_view(payload, |
ce9dbd47 | 2699 | header_len, -1); |
c35f9726 JG |
2700 | if (!stream_positions.data || |
2701 | stream_positions.size < | |
2702 | (rotate_streams.stream_count * | |
2703 | sizeof(struct lttcomm_relayd_stream_rotation_position))) { | |
2704 | reply_code = LTTNG_ERR_INVALID_PROTOCOL; | |
d3ecc550 | 2705 | ret = -1; |
5312a3ed | 2706 | goto end; |
d3ecc550 JD |
2707 | } |
2708 | ||
c35f9726 JG |
2709 | for (i = 0; i < rotate_streams.stream_count; i++) { |
2710 | struct lttcomm_relayd_stream_rotation_position *position_comm = | |
2711 | &((typeof(position_comm)) stream_positions.data)[i]; | |
2712 | const struct lttcomm_relayd_stream_rotation_position pos = { | |
2713 | .stream_id = be64toh(position_comm->stream_id), | |
2714 | .rotate_at_seq_num = be64toh( | |
2715 | position_comm->rotate_at_seq_num), | |
2716 | }; | |
5312a3ed | 2717 | |
c35f9726 JG |
2718 | stream = stream_get_by_id(pos.stream_id); |
2719 | if (!stream) { | |
2720 | reply_code = LTTNG_ERR_INVALID; | |
2721 | ret = -1; | |
2722 | goto end; | |
c6db3843 JG |
2723 | } |
2724 | ||
c35f9726 JG |
2725 | pthread_mutex_lock(&stream->lock); |
2726 | ret = stream_set_pending_rotation(stream, next_trace_chunk, | |
2727 | pos.rotate_at_seq_num); | |
2728 | pthread_mutex_unlock(&stream->lock); | |
2729 | if (ret) { | |
2730 | reply_code = LTTNG_ERR_FILE_CREATION_ERROR; | |
2731 | goto end; | |
c6db3843 | 2732 | } |
c35f9726 JG |
2733 | |
2734 | stream_put(stream); | |
2735 | stream = NULL; | |
d3ecc550 JD |
2736 | } |
2737 | ||
c35f9726 | 2738 | reply_code = LTTNG_OK; |
eaeb64a9 | 2739 | ret = 0; |
d3ecc550 | 2740 | end: |
c35f9726 JG |
2741 | if (stream) { |
2742 | stream_put(stream); | |
d3ecc550 | 2743 | } |
c35f9726 JG |
2744 | |
2745 | reply.ret_code = htobe32((uint32_t) reply_code); | |
d3ecc550 JD |
2746 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, |
2747 | sizeof(struct lttcomm_relayd_generic_reply), 0); | |
5312a3ed JG |
2748 | if (send_ret < (ssize_t) sizeof(reply)) { |
2749 | ERR("Failed to send \"rotate session stream\" command reply (ret = %zd)", | |
2750 | send_ret); | |
2751 | ret = -1; | |
d3ecc550 | 2752 | } |
d3ecc550 | 2753 | end_no_reply: |
c35f9726 | 2754 | lttng_trace_chunk_put(next_trace_chunk); |
d3ecc550 JD |
2755 | return ret; |
2756 | } | |
2757 | ||
e5add6d0 JG |
2758 | /* |
2759 | * relay_create_trace_chunk: create a new trace chunk | |
2760 | */ | |
2761 | static int relay_create_trace_chunk(const struct lttcomm_relayd_hdr *recv_hdr, | |
2762 | struct relay_connection *conn, | |
2763 | const struct lttng_buffer_view *payload) | |
2764 | { | |
2765 | int ret = 0; | |
2766 | ssize_t send_ret; | |
2767 | struct relay_session *session = conn->session; | |
2768 | struct lttcomm_relayd_create_trace_chunk *msg; | |
2769 | struct lttcomm_relayd_generic_reply reply = {}; | |
2770 | struct lttng_buffer_view header_view; | |
e5add6d0 JG |
2771 | struct lttng_trace_chunk *chunk = NULL, *published_chunk = NULL; |
2772 | enum lttng_error_code reply_code = LTTNG_OK; | |
2773 | enum lttng_trace_chunk_status chunk_status; | |
a7ceb342 | 2774 | const char *new_path; |
e5add6d0 JG |
2775 | |
2776 | if (!session || !conn->version_check_done) { | |
2777 | ERR("Trying to create a trace chunk before version check"); | |
2778 | ret = -1; | |
2779 | goto end_no_reply; | |
2780 | } | |
2781 | ||
2782 | if (session->major == 2 && session->minor < 11) { | |
2783 | ERR("Chunk creation command is unsupported before 2.11"); | |
2784 | ret = -1; | |
2785 | goto end_no_reply; | |
2786 | } | |
2787 | ||
2788 | header_view = lttng_buffer_view_from_view(payload, 0, sizeof(*msg)); | |
3e6e0df2 | 2789 | if (!lttng_buffer_view_is_valid(&header_view)) { |
e5add6d0 JG |
2790 | ERR("Failed to receive payload of chunk creation command"); |
2791 | ret = -1; | |
2792 | goto end_no_reply; | |
2793 | } | |
2794 | ||
2795 | /* Convert to host endianness. */ | |
2796 | msg = (typeof(msg)) header_view.data; | |
2797 | msg->chunk_id = be64toh(msg->chunk_id); | |
2798 | msg->creation_timestamp = be64toh(msg->creation_timestamp); | |
2799 | msg->override_name_length = be32toh(msg->override_name_length); | |
2800 | ||
8cd15f6a MD |
2801 | pthread_mutex_lock(&conn->session->lock); |
2802 | session->ongoing_rotation = true; | |
a7ceb342 MD |
2803 | if (session->current_trace_chunk && |
2804 | !lttng_trace_chunk_get_name_overridden(session->current_trace_chunk)) { | |
2805 | chunk_status = lttng_trace_chunk_rename_path(session->current_trace_chunk, | |
2806 | DEFAULT_CHUNK_TMP_OLD_DIRECTORY); | |
2807 | if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) { | |
2808 | ERR("Failed to rename old chunk"); | |
2809 | ret = -1; | |
2810 | reply_code = LTTNG_ERR_UNK; | |
2811 | goto end; | |
2812 | } | |
2813 | } | |
a7ceb342 MD |
2814 | if (!session->current_trace_chunk) { |
2815 | if (!session->has_rotated) { | |
2816 | new_path = ""; | |
2817 | } else { | |
2818 | new_path = NULL; | |
2819 | } | |
2820 | } else { | |
2821 | new_path = DEFAULT_CHUNK_TMP_NEW_DIRECTORY; | |
2822 | } | |
e5add6d0 | 2823 | chunk = lttng_trace_chunk_create( |
a7ceb342 | 2824 | msg->chunk_id, msg->creation_timestamp, new_path); |
e5add6d0 JG |
2825 | if (!chunk) { |
2826 | ERR("Failed to create trace chunk in trace chunk creation command"); | |
2827 | ret = -1; | |
2828 | reply_code = LTTNG_ERR_NOMEM; | |
2829 | goto end; | |
2830 | } | |
7145f5e9 | 2831 | lttng_trace_chunk_set_fd_tracker(chunk, the_fd_tracker); |
e5add6d0 JG |
2832 | |
2833 | if (msg->override_name_length) { | |
2834 | const char *name; | |
3e6e0df2 JG |
2835 | const struct lttng_buffer_view chunk_name_view = |
2836 | lttng_buffer_view_from_view(payload, | |
2837 | sizeof(*msg), | |
2838 | msg->override_name_length); | |
2839 | ||
2840 | if (!lttng_buffer_view_is_valid(&chunk_name_view)) { | |
2841 | ERR("Invalid payload of chunk creation command (protocol error): buffer too short for expected name length"); | |
2842 | ret = -1; | |
2843 | reply_code = LTTNG_ERR_INVALID; | |
2844 | goto end; | |
2845 | } | |
e5add6d0 | 2846 | |
e5add6d0 | 2847 | name = chunk_name_view.data; |
3e6e0df2 JG |
2848 | if (name[msg->override_name_length - 1]) { |
2849 | ERR("Invalid payload of chunk creation command (protocol error): name is not null-terminated"); | |
e5add6d0 JG |
2850 | ret = -1; |
2851 | reply_code = LTTNG_ERR_INVALID; | |
2852 | goto end; | |
2853 | } | |
2854 | ||
2855 | chunk_status = lttng_trace_chunk_override_name( | |
2856 | chunk, chunk_name_view.data); | |
2857 | switch (chunk_status) { | |
2858 | case LTTNG_TRACE_CHUNK_STATUS_OK: | |
2859 | break; | |
2860 | case LTTNG_TRACE_CHUNK_STATUS_INVALID_ARGUMENT: | |
2861 | ERR("Failed to set the name of new trace chunk in trace chunk creation command (invalid name)"); | |
2862 | reply_code = LTTNG_ERR_INVALID; | |
2863 | ret = -1; | |
2864 | goto end; | |
2865 | default: | |
2866 | ERR("Failed to set the name of new trace chunk in trace chunk creation command (unknown error)"); | |
2867 | reply_code = LTTNG_ERR_UNK; | |
2868 | ret = -1; | |
2869 | goto end; | |
2870 | } | |
2871 | } | |
2872 | ||
e5add6d0 JG |
2873 | chunk_status = lttng_trace_chunk_set_credentials_current_user(chunk); |
2874 | if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) { | |
2875 | reply_code = LTTNG_ERR_UNK; | |
2876 | ret = -1; | |
2877 | goto end; | |
2878 | } | |
2879 | ||
a0377dfe | 2880 | LTTNG_ASSERT(conn->session->output_directory); |
7ceefac4 JG |
2881 | chunk_status = lttng_trace_chunk_set_as_owner(chunk, |
2882 | conn->session->output_directory); | |
e5add6d0 JG |
2883 | if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) { |
2884 | reply_code = LTTNG_ERR_UNK; | |
2885 | ret = -1; | |
2886 | goto end; | |
2887 | } | |
2888 | ||
2889 | published_chunk = sessiond_trace_chunk_registry_publish_chunk( | |
2890 | sessiond_trace_chunk_registry, | |
2891 | conn->session->sessiond_uuid, | |
c5c79321 JG |
2892 | conn->session->id_sessiond.is_set ? |
2893 | conn->session->id_sessiond.value : | |
2894 | conn->session->id, | |
e5add6d0 JG |
2895 | chunk); |
2896 | if (!published_chunk) { | |
c70636a7 | 2897 | char uuid_str[LTTNG_UUID_STR_LEN]; |
e5add6d0 JG |
2898 | |
2899 | lttng_uuid_to_str(conn->session->sessiond_uuid, uuid_str); | |
2900 | ERR("Failed to publish chunk: sessiond_uuid = %s, session_id = %" PRIu64 ", chunk_id = %" PRIu64, | |
2901 | uuid_str, | |
2902 | conn->session->id, | |
2903 | msg->chunk_id); | |
2904 | ret = -1; | |
2905 | reply_code = LTTNG_ERR_NOMEM; | |
2906 | goto end; | |
2907 | } | |
2908 | ||
62bad3bf JG |
2909 | if (conn->session->pending_closure_trace_chunk) { |
2910 | /* | |
2911 | * Invalid; this means a second create_trace_chunk command was | |
2912 | * received before a close_trace_chunk. | |
2913 | */ | |
2914 | ERR("Invalid trace chunk close command received; a trace chunk is already waiting for a trace chunk close command"); | |
2915 | reply_code = LTTNG_ERR_INVALID_PROTOCOL; | |
2916 | ret = -1; | |
8cd15f6a | 2917 | goto end; |
62bad3bf JG |
2918 | } |
2919 | conn->session->pending_closure_trace_chunk = | |
2920 | conn->session->current_trace_chunk; | |
e5add6d0 | 2921 | conn->session->current_trace_chunk = published_chunk; |
e5add6d0 | 2922 | published_chunk = NULL; |
a7ceb342 MD |
2923 | if (!conn->session->pending_closure_trace_chunk) { |
2924 | session->ongoing_rotation = false; | |
2925 | } | |
e5add6d0 | 2926 | end: |
8cd15f6a | 2927 | pthread_mutex_unlock(&conn->session->lock); |
e5add6d0 JG |
2928 | reply.ret_code = htobe32((uint32_t) reply_code); |
2929 | send_ret = conn->sock->ops->sendmsg(conn->sock, | |
2930 | &reply, | |
2931 | sizeof(struct lttcomm_relayd_generic_reply), | |
2932 | 0); | |
2933 | if (send_ret < (ssize_t) sizeof(reply)) { | |
2934 | ERR("Failed to send \"create trace chunk\" command reply (ret = %zd)", | |
2935 | send_ret); | |
2936 | ret = -1; | |
2937 | } | |
2938 | end_no_reply: | |
2939 | lttng_trace_chunk_put(chunk); | |
2940 | lttng_trace_chunk_put(published_chunk); | |
e5add6d0 JG |
2941 | return ret; |
2942 | } | |
2943 | ||
bbc4768c JG |
2944 | /* |
2945 | * relay_close_trace_chunk: close a trace chunk | |
2946 | */ | |
2947 | static int relay_close_trace_chunk(const struct lttcomm_relayd_hdr *recv_hdr, | |
2948 | struct relay_connection *conn, | |
2949 | const struct lttng_buffer_view *payload) | |
2950 | { | |
9898f786 | 2951 | int ret = 0, buf_ret; |
bbc4768c JG |
2952 | ssize_t send_ret; |
2953 | struct relay_session *session = conn->session; | |
2954 | struct lttcomm_relayd_close_trace_chunk *msg; | |
ecd1a12f | 2955 | struct lttcomm_relayd_close_trace_chunk_reply reply = {}; |
bbc4768c JG |
2956 | struct lttng_buffer_view header_view; |
2957 | struct lttng_trace_chunk *chunk = NULL; | |
2958 | enum lttng_error_code reply_code = LTTNG_OK; | |
2959 | enum lttng_trace_chunk_status chunk_status; | |
2960 | uint64_t chunk_id; | |
c35f9726 | 2961 | LTTNG_OPTIONAL(enum lttng_trace_chunk_command_type) close_command = {}; |
bbc4768c | 2962 | time_t close_timestamp; |
ecd1a12f MD |
2963 | char closed_trace_chunk_path[LTTNG_PATH_MAX]; |
2964 | size_t path_length = 0; | |
2965 | const char *chunk_name = NULL; | |
2966 | struct lttng_dynamic_buffer reply_payload; | |
a7ceb342 | 2967 | const char *new_path; |
ecd1a12f MD |
2968 | |
2969 | lttng_dynamic_buffer_init(&reply_payload); | |
bbc4768c JG |
2970 | |
2971 | if (!session || !conn->version_check_done) { | |
2972 | ERR("Trying to close a trace chunk before version check"); | |
2973 | ret = -1; | |
2974 | goto end_no_reply; | |
2975 | } | |
2976 | ||
2977 | if (session->major == 2 && session->minor < 11) { | |
2978 | ERR("Chunk close command is unsupported before 2.11"); | |
2979 | ret = -1; | |
2980 | goto end_no_reply; | |
2981 | } | |
2982 | ||
2983 | header_view = lttng_buffer_view_from_view(payload, 0, sizeof(*msg)); | |
3e6e0df2 | 2984 | if (!lttng_buffer_view_is_valid(&header_view)) { |
bbc4768c JG |
2985 | ERR("Failed to receive payload of chunk close command"); |
2986 | ret = -1; | |
2987 | goto end_no_reply; | |
2988 | } | |
2989 | ||
2990 | /* Convert to host endianness. */ | |
2991 | msg = (typeof(msg)) header_view.data; | |
2992 | chunk_id = be64toh(msg->chunk_id); | |
2993 | close_timestamp = (time_t) be64toh(msg->close_timestamp); | |
ac497a37 SM |
2994 | close_command.value = (lttng_trace_chunk_command_type) be32toh(msg->close_command.value); |
2995 | close_command.is_set = msg->close_command.is_set; | |
bbc4768c JG |
2996 | |
2997 | chunk = sessiond_trace_chunk_registry_get_chunk( | |
2998 | sessiond_trace_chunk_registry, | |
2999 | conn->session->sessiond_uuid, | |
c5c79321 JG |
3000 | conn->session->id_sessiond.is_set ? |
3001 | conn->session->id_sessiond.value : | |
3002 | conn->session->id, | |
bbc4768c JG |
3003 | chunk_id); |
3004 | if (!chunk) { | |
c70636a7 | 3005 | char uuid_str[LTTNG_UUID_STR_LEN]; |
bbc4768c JG |
3006 | |
3007 | lttng_uuid_to_str(conn->session->sessiond_uuid, uuid_str); | |
3008 | ERR("Failed to find chunk to close: sessiond_uuid = %s, session_id = %" PRIu64 ", chunk_id = %" PRIu64, | |
3009 | uuid_str, | |
3010 | conn->session->id, | |
3011 | msg->chunk_id); | |
3012 | ret = -1; | |
3013 | reply_code = LTTNG_ERR_NOMEM; | |
3014 | goto end; | |
3015 | } | |
3016 | ||
62bad3bf | 3017 | pthread_mutex_lock(&session->lock); |
f77a6ec2 MD |
3018 | if (close_command.is_set && |
3019 | close_command.value == LTTNG_TRACE_CHUNK_COMMAND_TYPE_DELETE) { | |
3020 | /* | |
3021 | * Clear command. It is a protocol error to ask for a | |
3022 | * clear on a relay which does not allow it. Querying | |
3023 | * the configuration allows figuring out whether | |
3024 | * clearing is allowed before doing the clear. | |
3025 | */ | |
3026 | if (!opt_allow_clear) { | |
3027 | ret = -1; | |
3028 | reply_code = LTTNG_ERR_INVALID_PROTOCOL; | |
3029 | goto end_unlock_session; | |
3030 | } | |
3031 | } | |
62bad3bf JG |
3032 | if (session->pending_closure_trace_chunk && |
3033 | session->pending_closure_trace_chunk != chunk) { | |
3034 | ERR("Trace chunk close command for session \"%s\" does not target the trace chunk pending closure", | |
3035 | session->session_name); | |
3036 | reply_code = LTTNG_ERR_INVALID_PROTOCOL; | |
3037 | ret = -1; | |
3038 | goto end_unlock_session; | |
3039 | } | |
3040 | ||
a7ceb342 MD |
3041 | if (session->current_trace_chunk && session->current_trace_chunk != chunk && |
3042 | !lttng_trace_chunk_get_name_overridden(session->current_trace_chunk)) { | |
3043 | if (close_command.is_set && | |
3044 | close_command.value == LTTNG_TRACE_CHUNK_COMMAND_TYPE_DELETE && | |
3045 | !session->has_rotated) { | |
3046 | /* New chunk stays in session output directory. */ | |
3047 | new_path = ""; | |
3048 | } else { | |
3049 | /* Use chunk name for new chunk. */ | |
3050 | new_path = NULL; | |
3051 | } | |
3052 | /* Rename new chunk path. */ | |
3053 | chunk_status = lttng_trace_chunk_rename_path(session->current_trace_chunk, | |
3054 | new_path); | |
3055 | if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) { | |
3056 | ret = -1; | |
88188199 | 3057 | goto end_unlock_session; |
a7ceb342 MD |
3058 | } |
3059 | session->ongoing_rotation = false; | |
3060 | } | |
3061 | if ((!close_command.is_set || | |
3062 | close_command.value == LTTNG_TRACE_CHUNK_COMMAND_TYPE_NO_OPERATION) && | |
3063 | !lttng_trace_chunk_get_name_overridden(chunk)) { | |
3064 | const char *old_path; | |
3065 | ||
3066 | if (!session->has_rotated) { | |
3067 | old_path = ""; | |
3068 | } else { | |
3069 | old_path = NULL; | |
3070 | } | |
3071 | /* We need to move back the .tmp_old_chunk to its rightful place. */ | |
3072 | chunk_status = lttng_trace_chunk_rename_path(chunk, old_path); | |
3073 | if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) { | |
3074 | ret = -1; | |
88188199 | 3075 | goto end_unlock_session; |
a7ceb342 MD |
3076 | } |
3077 | } | |
bbc4768c JG |
3078 | chunk_status = lttng_trace_chunk_set_close_timestamp( |
3079 | chunk, close_timestamp); | |
3080 | if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) { | |
3081 | ERR("Failed to set trace chunk close timestamp"); | |
3082 | ret = -1; | |
3083 | reply_code = LTTNG_ERR_UNK; | |
62bad3bf | 3084 | goto end_unlock_session; |
bbc4768c JG |
3085 | } |
3086 | ||
3087 | if (close_command.is_set) { | |
3088 | chunk_status = lttng_trace_chunk_set_close_command( | |
3089 | chunk, close_command.value); | |
3090 | if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) { | |
3091 | ret = -1; | |
3092 | reply_code = LTTNG_ERR_INVALID; | |
62bad3bf | 3093 | goto end_unlock_session; |
bbc4768c JG |
3094 | } |
3095 | } | |
ecd1a12f MD |
3096 | chunk_status = lttng_trace_chunk_get_name(chunk, &chunk_name, NULL); |
3097 | if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) { | |
3098 | ERR("Failed to get chunk name"); | |
3099 | ret = -1; | |
3100 | reply_code = LTTNG_ERR_UNK; | |
3101 | goto end_unlock_session; | |
3102 | } | |
3103 | if (!session->has_rotated && !session->snapshot) { | |
3104 | ret = lttng_strncpy(closed_trace_chunk_path, | |
3105 | session->output_path, | |
3106 | sizeof(closed_trace_chunk_path)); | |
3107 | if (ret) { | |
3108 | ERR("Failed to send trace chunk path: path length of %zu bytes exceeds the maximal allowed length of %zu bytes", | |
3109 | strlen(session->output_path), | |
3110 | sizeof(closed_trace_chunk_path)); | |
3111 | reply_code = LTTNG_ERR_NOMEM; | |
3112 | ret = -1; | |
3113 | goto end_unlock_session; | |
3114 | } | |
3115 | } else { | |
3116 | if (session->snapshot) { | |
3117 | ret = snprintf(closed_trace_chunk_path, | |
3118 | sizeof(closed_trace_chunk_path), | |
3119 | "%s/%s", session->output_path, | |
3120 | chunk_name); | |
3121 | } else { | |
3122 | ret = snprintf(closed_trace_chunk_path, | |
3123 | sizeof(closed_trace_chunk_path), | |
3124 | "%s/" DEFAULT_ARCHIVED_TRACE_CHUNKS_DIRECTORY | |
3125 | "/%s", | |
3126 | session->output_path, chunk_name); | |
3127 | } | |
3128 | if (ret < 0 || ret == sizeof(closed_trace_chunk_path)) { | |
3129 | ERR("Failed to format closed trace chunk resulting path"); | |
3130 | reply_code = ret < 0 ? LTTNG_ERR_UNK : LTTNG_ERR_NOMEM; | |
3131 | ret = -1; | |
3132 | goto end_unlock_session; | |
3133 | } | |
3134 | } | |
489ea154 MD |
3135 | if (close_command.is_set && |
3136 | close_command.value == LTTNG_TRACE_CHUNK_COMMAND_TYPE_MOVE_TO_COMPLETED) { | |
3137 | session->has_rotated = true; | |
3138 | } | |
ecd1a12f MD |
3139 | DBG("Reply chunk path on close: %s", closed_trace_chunk_path); |
3140 | path_length = strlen(closed_trace_chunk_path) + 1; | |
3141 | if (path_length > UINT32_MAX) { | |
3142 | ERR("Closed trace chunk path exceeds the maximal length allowed by the protocol"); | |
3143 | ret = -1; | |
3144 | reply_code = LTTNG_ERR_INVALID_PROTOCOL; | |
3145 | goto end_unlock_session; | |
3146 | } | |
bbc4768c | 3147 | |
c35f9726 JG |
3148 | if (session->current_trace_chunk == chunk) { |
3149 | /* | |
3150 | * After a trace chunk close command, no new streams | |
3151 | * referencing the chunk may be created. Hence, on the | |
3152 | * event that no new trace chunk have been created for | |
3153 | * the session, the reference to the current trace chunk | |
3154 | * is released in order to allow it to be reclaimed when | |
3155 | * the last stream releases its reference to it. | |
3156 | */ | |
3157 | lttng_trace_chunk_put(session->current_trace_chunk); | |
3158 | session->current_trace_chunk = NULL; | |
3159 | } | |
62bad3bf JG |
3160 | lttng_trace_chunk_put(session->pending_closure_trace_chunk); |
3161 | session->pending_closure_trace_chunk = NULL; | |
3162 | end_unlock_session: | |
c35f9726 JG |
3163 | pthread_mutex_unlock(&session->lock); |
3164 | ||
bbc4768c | 3165 | end: |
ecd1a12f MD |
3166 | reply.generic.ret_code = htobe32((uint32_t) reply_code); |
3167 | reply.path_length = htobe32((uint32_t) path_length); | |
9898f786 | 3168 | buf_ret = lttng_dynamic_buffer_append( |
ecd1a12f | 3169 | &reply_payload, &reply, sizeof(reply)); |
9898f786 | 3170 | if (buf_ret) { |
ecd1a12f MD |
3171 | ERR("Failed to append \"close trace chunk\" command reply header to payload buffer"); |
3172 | goto end_no_reply; | |
3173 | } | |
3174 | ||
3175 | if (reply_code == LTTNG_OK) { | |
9898f786 | 3176 | buf_ret = lttng_dynamic_buffer_append(&reply_payload, |
ecd1a12f | 3177 | closed_trace_chunk_path, path_length); |
9898f786 | 3178 | if (buf_ret) { |
ecd1a12f MD |
3179 | ERR("Failed to append \"close trace chunk\" command reply path to payload buffer"); |
3180 | goto end_no_reply; | |
3181 | } | |
3182 | } | |
3183 | ||
bbc4768c | 3184 | send_ret = conn->sock->ops->sendmsg(conn->sock, |
ecd1a12f MD |
3185 | reply_payload.data, |
3186 | reply_payload.size, | |
bbc4768c | 3187 | 0); |
ecd1a12f MD |
3188 | if (send_ret < reply_payload.size) { |
3189 | ERR("Failed to send \"close trace chunk\" command reply of %zu bytes (ret = %zd)", | |
3190 | reply_payload.size, send_ret); | |
bbc4768c | 3191 | ret = -1; |
ecd1a12f | 3192 | goto end_no_reply; |
bbc4768c JG |
3193 | } |
3194 | end_no_reply: | |
3195 | lttng_trace_chunk_put(chunk); | |
ecd1a12f | 3196 | lttng_dynamic_buffer_reset(&reply_payload); |
bbc4768c JG |
3197 | return ret; |
3198 | } | |
3199 | ||
c35f9726 JG |
3200 | /* |
3201 | * relay_trace_chunk_exists: check if a trace chunk exists | |
3202 | */ | |
3203 | static int relay_trace_chunk_exists(const struct lttcomm_relayd_hdr *recv_hdr, | |
3204 | struct relay_connection *conn, | |
3205 | const struct lttng_buffer_view *payload) | |
3206 | { | |
3207 | int ret = 0; | |
3208 | ssize_t send_ret; | |
3209 | struct relay_session *session = conn->session; | |
3210 | struct lttcomm_relayd_trace_chunk_exists *msg; | |
3211 | struct lttcomm_relayd_trace_chunk_exists_reply reply = {}; | |
3212 | struct lttng_buffer_view header_view; | |
c35f9726 | 3213 | uint64_t chunk_id; |
6b584c2e | 3214 | bool chunk_exists; |
c35f9726 JG |
3215 | |
3216 | if (!session || !conn->version_check_done) { | |
0f1b1d25 | 3217 | ERR("Trying to check for the presence of a trace chunk before version check"); |
c35f9726 JG |
3218 | ret = -1; |
3219 | goto end_no_reply; | |
3220 | } | |
3221 | ||
3222 | if (session->major == 2 && session->minor < 11) { | |
8a82be4c | 3223 | ERR("Chunk exists command is unsupported before 2.11"); |
c35f9726 JG |
3224 | ret = -1; |
3225 | goto end_no_reply; | |
3226 | } | |
3227 | ||
3228 | header_view = lttng_buffer_view_from_view(payload, 0, sizeof(*msg)); | |
3e6e0df2 | 3229 | if (!lttng_buffer_view_is_valid(&header_view)) { |
8a82be4c | 3230 | ERR("Failed to receive payload of chunk exists command"); |
c35f9726 JG |
3231 | ret = -1; |
3232 | goto end_no_reply; | |
3233 | } | |
3234 | ||
3235 | /* Convert to host endianness. */ | |
3236 | msg = (typeof(msg)) header_view.data; | |
3237 | chunk_id = be64toh(msg->chunk_id); | |
3238 | ||
6b584c2e | 3239 | ret = sessiond_trace_chunk_registry_chunk_exists( |
c35f9726 JG |
3240 | sessiond_trace_chunk_registry, |
3241 | conn->session->sessiond_uuid, | |
3242 | conn->session->id, | |
6b584c2e JG |
3243 | chunk_id, &chunk_exists); |
3244 | /* | |
3245 | * If ret is not 0, send the reply and report the error to the caller. | |
3246 | * It is a protocol (or internal) error and the session/connection | |
3247 | * should be torn down. | |
3248 | */ | |
ac497a37 SM |
3249 | reply.generic.ret_code = htobe32((uint32_t) (ret == 0 ? LTTNG_OK : LTTNG_ERR_INVALID_PROTOCOL)); |
3250 | reply.trace_chunk_exists = ret == 0 ? chunk_exists : 0; | |
3251 | ||
6b584c2e JG |
3252 | send_ret = conn->sock->ops->sendmsg( |
3253 | conn->sock, &reply, sizeof(reply), 0); | |
c35f9726 JG |
3254 | if (send_ret < (ssize_t) sizeof(reply)) { |
3255 | ERR("Failed to send \"create trace chunk\" command reply (ret = %zd)", | |
3256 | send_ret); | |
3257 | ret = -1; | |
3258 | } | |
3259 | end_no_reply: | |
c35f9726 JG |
3260 | return ret; |
3261 | } | |
3262 | ||
8614e600 MD |
3263 | /* |
3264 | * relay_get_configuration: query whether feature is available | |
3265 | */ | |
3266 | static int relay_get_configuration(const struct lttcomm_relayd_hdr *recv_hdr, | |
3267 | struct relay_connection *conn, | |
3268 | const struct lttng_buffer_view *payload) | |
3269 | { | |
3270 | int ret = 0; | |
3271 | ssize_t send_ret; | |
3272 | struct lttcomm_relayd_get_configuration *msg; | |
3273 | struct lttcomm_relayd_get_configuration_reply reply = {}; | |
3274 | struct lttng_buffer_view header_view; | |
3275 | uint64_t query_flags = 0; | |
3276 | uint64_t result_flags = 0; | |
3277 | ||
3278 | header_view = lttng_buffer_view_from_view(payload, 0, sizeof(*msg)); | |
3e6e0df2 | 3279 | if (!lttng_buffer_view_is_valid(&header_view)) { |
8614e600 MD |
3280 | ERR("Failed to receive payload of chunk close command"); |
3281 | ret = -1; | |
3282 | goto end_no_reply; | |
3283 | } | |
3284 | ||
3285 | /* Convert to host endianness. */ | |
3286 | msg = (typeof(msg)) header_view.data; | |
3287 | query_flags = be64toh(msg->query_flags); | |
3288 | ||
3289 | if (query_flags) { | |
3290 | ret = LTTNG_ERR_INVALID_PROTOCOL; | |
3291 | goto reply; | |
3292 | } | |
3293 | if (opt_allow_clear) { | |
3294 | result_flags |= LTTCOMM_RELAYD_CONFIGURATION_FLAG_CLEAR_ALLOWED; | |
3295 | } | |
3296 | ret = 0; | |
3297 | reply: | |
ac497a37 SM |
3298 | reply.generic.ret_code = htobe32((uint32_t) (ret == 0 ? LTTNG_OK : LTTNG_ERR_INVALID_PROTOCOL)); |
3299 | reply.relayd_configuration_flags = htobe64(result_flags); | |
3300 | ||
8614e600 MD |
3301 | send_ret = conn->sock->ops->sendmsg( |
3302 | conn->sock, &reply, sizeof(reply), 0); | |
3303 | if (send_ret < (ssize_t) sizeof(reply)) { | |
3304 | ERR("Failed to send \"get configuration\" command reply (ret = %zd)", | |
3305 | send_ret); | |
3306 | ret = -1; | |
3307 | } | |
3308 | end_no_reply: | |
3309 | return ret; | |
3310 | } | |
3311 | ||
5312a3ed JG |
3312 | static int relay_process_control_command(struct relay_connection *conn, |
3313 | const struct lttcomm_relayd_hdr *header, | |
3314 | const struct lttng_buffer_view *payload) | |
b8aa1682 JD |
3315 | { |
3316 | int ret = 0; | |
3317 | ||
00e71031 FD |
3318 | DBG3("Processing \"%s\" command for socket %i", |
3319 | lttcomm_relayd_command_str((lttcomm_relayd_command) header->cmd), | |
3320 | conn->sock->fd); | |
5312a3ed | 3321 | switch (header->cmd) { |
b8aa1682 | 3322 | case RELAYD_CREATE_SESSION: |
5312a3ed | 3323 | ret = relay_create_session(header, conn, payload); |
b8aa1682 | 3324 | break; |
b8aa1682 | 3325 | case RELAYD_ADD_STREAM: |
5312a3ed | 3326 | ret = relay_add_stream(header, conn, payload); |
b8aa1682 JD |
3327 | break; |
3328 | case RELAYD_START_DATA: | |
5312a3ed | 3329 | ret = relay_start(header, conn, payload); |
b8aa1682 JD |
3330 | break; |
3331 | case RELAYD_SEND_METADATA: | |
5312a3ed | 3332 | ret = relay_recv_metadata(header, conn, payload); |
b8aa1682 JD |
3333 | break; |
3334 | case RELAYD_VERSION: | |
5312a3ed | 3335 | ret = relay_send_version(header, conn, payload); |
b8aa1682 | 3336 | break; |
173af62f | 3337 | case RELAYD_CLOSE_STREAM: |
5312a3ed | 3338 | ret = relay_close_stream(header, conn, payload); |
173af62f | 3339 | break; |
6d805429 | 3340 | case RELAYD_DATA_PENDING: |
5312a3ed | 3341 | ret = relay_data_pending(header, conn, payload); |
c8f59ee5 DG |
3342 | break; |
3343 | case RELAYD_QUIESCENT_CONTROL: | |
5312a3ed | 3344 | ret = relay_quiescent_control(header, conn, payload); |
c8f59ee5 | 3345 | break; |
f7079f67 | 3346 | case RELAYD_BEGIN_DATA_PENDING: |
5312a3ed | 3347 | ret = relay_begin_data_pending(header, conn, payload); |
f7079f67 DG |
3348 | break; |
3349 | case RELAYD_END_DATA_PENDING: | |
5312a3ed | 3350 | ret = relay_end_data_pending(header, conn, payload); |
f7079f67 | 3351 | break; |
1c20f0e2 | 3352 | case RELAYD_SEND_INDEX: |
5312a3ed | 3353 | ret = relay_recv_index(header, conn, payload); |
1c20f0e2 | 3354 | break; |
a4baae1b | 3355 | case RELAYD_STREAMS_SENT: |
5312a3ed | 3356 | ret = relay_streams_sent(header, conn, payload); |
a4baae1b | 3357 | break; |
93ec662e | 3358 | case RELAYD_RESET_METADATA: |
5312a3ed | 3359 | ret = relay_reset_metadata(header, conn, payload); |
93ec662e | 3360 | break; |
c35f9726 | 3361 | case RELAYD_ROTATE_STREAMS: |
c35f9726 | 3362 | ret = relay_rotate_session_streams(header, conn, payload); |
d3ecc550 | 3363 | break; |
e5add6d0 | 3364 | case RELAYD_CREATE_TRACE_CHUNK: |
e5add6d0 JG |
3365 | ret = relay_create_trace_chunk(header, conn, payload); |
3366 | break; | |
bbc4768c | 3367 | case RELAYD_CLOSE_TRACE_CHUNK: |
bbc4768c JG |
3368 | ret = relay_close_trace_chunk(header, conn, payload); |
3369 | break; | |
c35f9726 | 3370 | case RELAYD_TRACE_CHUNK_EXISTS: |
c35f9726 JG |
3371 | ret = relay_trace_chunk_exists(header, conn, payload); |
3372 | break; | |
8614e600 | 3373 | case RELAYD_GET_CONFIGURATION: |
8614e600 MD |
3374 | ret = relay_get_configuration(header, conn, payload); |
3375 | break; | |
b8aa1682 JD |
3376 | case RELAYD_UPDATE_SYNC_INFO: |
3377 | default: | |
5312a3ed | 3378 | ERR("Received unknown command (%u)", header->cmd); |
58eb9381 | 3379 | relay_unknown_command(conn); |
b8aa1682 JD |
3380 | ret = -1; |
3381 | goto end; | |
3382 | } | |
3383 | ||
3384 | end: | |
3385 | return ret; | |
3386 | } | |
3387 | ||
5569b118 JG |
3388 | static enum relay_connection_status relay_process_control_receive_payload( |
3389 | struct relay_connection *conn) | |
5312a3ed JG |
3390 | { |
3391 | int ret = 0; | |
5569b118 | 3392 | enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK; |
5312a3ed JG |
3393 | struct lttng_dynamic_buffer *reception_buffer = |
3394 | &conn->protocol.ctrl.reception_buffer; | |
3395 | struct ctrl_connection_state_receive_payload *state = | |
3396 | &conn->protocol.ctrl.state.receive_payload; | |
3397 | struct lttng_buffer_view payload_view; | |
3398 | ||
3399 | if (state->left_to_receive == 0) { | |
3400 | /* Short-circuit for payload-less commands. */ | |
3401 | goto reception_complete; | |
3402 | } | |
3403 | ||
3404 | ret = conn->sock->ops->recvmsg(conn->sock, | |
3405 | reception_buffer->data + state->received, | |
3406 | state->left_to_receive, MSG_DONTWAIT); | |
3407 | if (ret < 0) { | |
942003e5 MJ |
3408 | DIAGNOSTIC_PUSH |
3409 | DIAGNOSTIC_IGNORE_LOGICAL_OP | |
5569b118 | 3410 | if (errno != EAGAIN && errno != EWOULDBLOCK) { |
942003e5 | 3411 | DIAGNOSTIC_POP |
5569b118 JG |
3412 | PERROR("Unable to receive command payload on sock %d", |
3413 | conn->sock->fd); | |
3414 | status = RELAY_CONNECTION_STATUS_ERROR; | |
3415 | } | |
5312a3ed JG |
3416 | goto end; |
3417 | } else if (ret == 0) { | |
3418 | DBG("Socket %d performed an orderly shutdown (received EOF)", conn->sock->fd); | |
5569b118 | 3419 | status = RELAY_CONNECTION_STATUS_CLOSED; |
5312a3ed JG |
3420 | goto end; |
3421 | } | |
3422 | ||
a0377dfe FD |
3423 | LTTNG_ASSERT(ret > 0); |
3424 | LTTNG_ASSERT(ret <= state->left_to_receive); | |
5312a3ed JG |
3425 | |
3426 | state->left_to_receive -= ret; | |
3427 | state->received += ret; | |
3428 | ||
3429 | if (state->left_to_receive > 0) { | |
3430 | /* | |
3431 | * Can't transition to the protocol's next state, wait to | |
3432 | * receive the rest of the header. | |
3433 | */ | |
3434 | DBG3("Partial reception of control connection protocol payload (received %" PRIu64 " bytes, %" PRIu64 " bytes left to receive, fd = %i)", | |
3435 | state->received, state->left_to_receive, | |
3436 | conn->sock->fd); | |
5312a3ed JG |
3437 | goto end; |
3438 | } | |
3439 | ||
3440 | reception_complete: | |
3441 | DBG("Done receiving control command payload: fd = %i, payload size = %" PRIu64 " bytes", | |
3442 | conn->sock->fd, state->received); | |
3443 | /* | |
3444 | * The payload required to process the command has been received. | |
3445 | * A view to the reception buffer is forwarded to the various | |
3446 | * commands and the state of the control is reset on success. | |
3447 | * | |
3448 | * Commands are responsible for sending their reply to the peer. | |
3449 | */ | |
3450 | payload_view = lttng_buffer_view_from_dynamic_buffer(reception_buffer, | |
3451 | 0, -1); | |
3452 | ret = relay_process_control_command(conn, | |
3453 | &state->header, &payload_view); | |
3454 | if (ret < 0) { | |
5569b118 | 3455 | status = RELAY_CONNECTION_STATUS_ERROR; |
5312a3ed JG |
3456 | goto end; |
3457 | } | |
3458 | ||
3459 | ret = connection_reset_protocol_state(conn); | |
5569b118 JG |
3460 | if (ret) { |
3461 | status = RELAY_CONNECTION_STATUS_ERROR; | |
3462 | } | |
5312a3ed | 3463 | end: |
5569b118 | 3464 | return status; |
5312a3ed JG |
3465 | } |
3466 | ||
5569b118 JG |
3467 | static enum relay_connection_status relay_process_control_receive_header( |
3468 | struct relay_connection *conn) | |
5312a3ed JG |
3469 | { |
3470 | int ret = 0; | |
5569b118 | 3471 | enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK; |
5312a3ed JG |
3472 | struct lttcomm_relayd_hdr header; |
3473 | struct lttng_dynamic_buffer *reception_buffer = | |
3474 | &conn->protocol.ctrl.reception_buffer; | |
3475 | struct ctrl_connection_state_receive_header *state = | |
3476 | &conn->protocol.ctrl.state.receive_header; | |
3477 | ||
a0377dfe | 3478 | LTTNG_ASSERT(state->left_to_receive != 0); |
5312a3ed JG |
3479 | |
3480 | ret = conn->sock->ops->recvmsg(conn->sock, | |
3481 | reception_buffer->data + state->received, | |
3482 | state->left_to_receive, MSG_DONTWAIT); | |
3483 | if (ret < 0) { | |
942003e5 MJ |
3484 | DIAGNOSTIC_PUSH |
3485 | DIAGNOSTIC_IGNORE_LOGICAL_OP | |
5569b118 | 3486 | if (errno != EAGAIN && errno != EWOULDBLOCK) { |
942003e5 | 3487 | DIAGNOSTIC_POP |
5569b118 JG |
3488 | PERROR("Unable to receive control command header on sock %d", |
3489 | conn->sock->fd); | |
3490 | status = RELAY_CONNECTION_STATUS_ERROR; | |
3491 | } | |
5312a3ed JG |
3492 | goto end; |
3493 | } else if (ret == 0) { | |
3494 | DBG("Socket %d performed an orderly shutdown (received EOF)", conn->sock->fd); | |
5569b118 | 3495 | status = RELAY_CONNECTION_STATUS_CLOSED; |
5312a3ed JG |
3496 | goto end; |
3497 | } | |
3498 | ||
a0377dfe FD |
3499 | LTTNG_ASSERT(ret > 0); |
3500 | LTTNG_ASSERT(ret <= state->left_to_receive); | |
5312a3ed JG |
3501 | |
3502 | state->left_to_receive -= ret; | |
3503 | state->received += ret; | |
3504 | ||
3505 | if (state->left_to_receive > 0) { | |
3506 | /* | |
3507 | * Can't transition to the protocol's next state, wait to | |
3508 | * receive the rest of the header. | |
3509 | */ | |
3510 | DBG3("Partial reception of control connection protocol header (received %" PRIu64 " bytes, %" PRIu64 " bytes left to receive, fd = %i)", | |
3511 | state->received, state->left_to_receive, | |
3512 | conn->sock->fd); | |
5312a3ed JG |
3513 | goto end; |
3514 | } | |
3515 | ||
3516 | /* Transition to next state: receiving the command's payload. */ | |
3517 | conn->protocol.ctrl.state_id = | |
3518 | CTRL_CONNECTION_STATE_RECEIVE_PAYLOAD; | |
3519 | memcpy(&header, reception_buffer->data, sizeof(header)); | |
3520 | header.circuit_id = be64toh(header.circuit_id); | |
3521 | header.data_size = be64toh(header.data_size); | |
3522 | header.cmd = be32toh(header.cmd); | |
3523 | header.cmd_version = be32toh(header.cmd_version); | |
3524 | memcpy(&conn->protocol.ctrl.state.receive_payload.header, | |
3525 | &header, sizeof(header)); | |
3526 | ||
00e71031 FD |
3527 | DBG("Done receiving control command header: fd = %i, cmd = %s, cmd_version = %" PRIu32 ", payload size = %" PRIu64 " bytes", |
3528 | conn->sock->fd, lttcomm_relayd_command_str((enum lttcomm_relayd_command) header.cmd), | |
3529 | header.cmd_version, header.data_size); | |
5312a3ed | 3530 | |
715e6fb1 | 3531 | if (header.data_size > DEFAULT_NETWORK_RELAYD_CTRL_MAX_PAYLOAD_SIZE) { |
5312a3ed JG |
3532 | ERR("Command header indicates a payload (%" PRIu64 " bytes) that exceeds the maximal payload size allowed on a control connection.", |
3533 | header.data_size); | |
5569b118 | 3534 | status = RELAY_CONNECTION_STATUS_ERROR; |
5312a3ed JG |
3535 | goto end; |
3536 | } | |
3537 | ||
3538 | conn->protocol.ctrl.state.receive_payload.left_to_receive = | |
3539 | header.data_size; | |
3540 | conn->protocol.ctrl.state.receive_payload.received = 0; | |
3541 | ret = lttng_dynamic_buffer_set_size(reception_buffer, | |
3542 | header.data_size); | |
3543 | if (ret) { | |
5569b118 | 3544 | status = RELAY_CONNECTION_STATUS_ERROR; |
5312a3ed JG |
3545 | goto end; |
3546 | } | |
3547 | ||
3548 | if (header.data_size == 0) { | |
3549 | /* | |
3550 | * Manually invoke the next state as the poll loop | |
3551 | * will not wake-up to allow us to proceed further. | |
3552 | */ | |
5569b118 | 3553 | status = relay_process_control_receive_payload(conn); |
5312a3ed JG |
3554 | } |
3555 | end: | |
5569b118 | 3556 | return status; |
5312a3ed JG |
3557 | } |
3558 | ||
3559 | /* | |
3560 | * Process the commands received on the control socket | |
3561 | */ | |
5569b118 JG |
3562 | static enum relay_connection_status relay_process_control( |
3563 | struct relay_connection *conn) | |
5312a3ed | 3564 | { |
5569b118 | 3565 | enum relay_connection_status status; |
5312a3ed JG |
3566 | |
3567 | switch (conn->protocol.ctrl.state_id) { | |
3568 | case CTRL_CONNECTION_STATE_RECEIVE_HEADER: | |
5569b118 | 3569 | status = relay_process_control_receive_header(conn); |
5312a3ed JG |
3570 | break; |
3571 | case CTRL_CONNECTION_STATE_RECEIVE_PAYLOAD: | |
5569b118 | 3572 | status = relay_process_control_receive_payload(conn); |
5312a3ed JG |
3573 | break; |
3574 | default: | |
3575 | ERR("Unknown control connection protocol state encountered."); | |
3576 | abort(); | |
3577 | } | |
3578 | ||
5569b118 | 3579 | return status; |
5312a3ed JG |
3580 | } |
3581 | ||
5569b118 JG |
3582 | static enum relay_connection_status relay_process_data_receive_header( |
3583 | struct relay_connection *conn) | |
b8aa1682 | 3584 | { |
5312a3ed | 3585 | int ret; |
5569b118 | 3586 | enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK; |
5312a3ed JG |
3587 | struct data_connection_state_receive_header *state = |
3588 | &conn->protocol.data.state.receive_header; | |
3589 | struct lttcomm_relayd_data_hdr header; | |
b8aa1682 | 3590 | struct relay_stream *stream; |
5312a3ed | 3591 | |
a0377dfe | 3592 | LTTNG_ASSERT(state->left_to_receive != 0); |
5312a3ed JG |
3593 | |
3594 | ret = conn->sock->ops->recvmsg(conn->sock, | |
3595 | state->header_reception_buffer + state->received, | |
3596 | state->left_to_receive, MSG_DONTWAIT); | |
3597 | if (ret < 0) { | |
942003e5 MJ |
3598 | DIAGNOSTIC_PUSH |
3599 | DIAGNOSTIC_IGNORE_LOGICAL_OP | |
5569b118 | 3600 | if (errno != EAGAIN && errno != EWOULDBLOCK) { |
942003e5 | 3601 | DIAGNOSTIC_POP |
5569b118 JG |
3602 | PERROR("Unable to receive data header on sock %d", conn->sock->fd); |
3603 | status = RELAY_CONNECTION_STATUS_ERROR; | |
3604 | } | |
5312a3ed JG |
3605 | goto end; |
3606 | } else if (ret == 0) { | |
3607 | /* Orderly shutdown. Not necessary to print an error. */ | |
3608 | DBG("Socket %d performed an orderly shutdown (received EOF)", conn->sock->fd); | |
5569b118 | 3609 | status = RELAY_CONNECTION_STATUS_CLOSED; |
b8aa1682 JD |
3610 | goto end; |
3611 | } | |
3612 | ||
a0377dfe FD |
3613 | LTTNG_ASSERT(ret > 0); |
3614 | LTTNG_ASSERT(ret <= state->left_to_receive); | |
5312a3ed JG |
3615 | |
3616 | state->left_to_receive -= ret; | |
3617 | state->received += ret; | |
3618 | ||
3619 | if (state->left_to_receive > 0) { | |
3620 | /* | |
3621 | * Can't transition to the protocol's next state, wait to | |
3622 | * receive the rest of the header. | |
3623 | */ | |
3624 | DBG3("Partial reception of data connection header (received %" PRIu64 " bytes, %" PRIu64 " bytes left to receive, fd = %i)", | |
3625 | state->received, state->left_to_receive, | |
3626 | conn->sock->fd); | |
7591bab1 | 3627 | goto end; |
b8aa1682 | 3628 | } |
b8aa1682 | 3629 | |
5312a3ed JG |
3630 | /* Transition to next state: receiving the payload. */ |
3631 | conn->protocol.data.state_id = DATA_CONNECTION_STATE_RECEIVE_PAYLOAD; | |
173af62f | 3632 | |
5312a3ed JG |
3633 | memcpy(&header, state->header_reception_buffer, sizeof(header)); |
3634 | header.circuit_id = be64toh(header.circuit_id); | |
3635 | header.stream_id = be64toh(header.stream_id); | |
3636 | header.data_size = be32toh(header.data_size); | |
3637 | header.net_seq_num = be64toh(header.net_seq_num); | |
3638 | header.padding_size = be32toh(header.padding_size); | |
3639 | memcpy(&conn->protocol.data.state.receive_payload.header, &header, sizeof(header)); | |
3640 | ||
3641 | conn->protocol.data.state.receive_payload.left_to_receive = | |
3642 | header.data_size; | |
3643 | conn->protocol.data.state.receive_payload.received = 0; | |
3644 | conn->protocol.data.state.receive_payload.rotate_index = false; | |
3645 | ||
3646 | DBG("Received data connection header on fd %i: circuit_id = %" PRIu64 ", stream_id = %" PRIu64 ", data_size = %" PRIu32 ", net_seq_num = %" PRIu64 ", padding_size = %" PRIu32, | |
3647 | conn->sock->fd, header.circuit_id, | |
3648 | header.stream_id, header.data_size, | |
3649 | header.net_seq_num, header.padding_size); | |
3650 | ||
3651 | stream = stream_get_by_id(header.stream_id); | |
3652 | if (!stream) { | |
3653 | DBG("relay_process_data_receive_payload: Cannot find stream %" PRIu64, | |
3654 | header.stream_id); | |
5569b118 JG |
3655 | /* Protocol error. */ |
3656 | status = RELAY_CONNECTION_STATUS_ERROR; | |
5312a3ed JG |
3657 | goto end; |
3658 | } | |
b8aa1682 | 3659 | |
7591bab1 | 3660 | pthread_mutex_lock(&stream->lock); |
c35f9726 JG |
3661 | /* Prepare stream for the reception of a new packet. */ |
3662 | ret = stream_init_packet(stream, header.data_size, | |
3663 | &conn->protocol.data.state.receive_payload.rotate_index); | |
3664 | pthread_mutex_unlock(&stream->lock); | |
3665 | if (ret) { | |
3666 | ERR("Failed to rotate stream output file"); | |
3667 | status = RELAY_CONNECTION_STATUS_ERROR; | |
3668 | goto end_stream_unlock; | |
1c20f0e2 JD |
3669 | } |
3670 | ||
5312a3ed | 3671 | end_stream_unlock: |
5312a3ed JG |
3672 | stream_put(stream); |
3673 | end: | |
5569b118 | 3674 | return status; |
5312a3ed JG |
3675 | } |
3676 | ||
5569b118 JG |
3677 | static enum relay_connection_status relay_process_data_receive_payload( |
3678 | struct relay_connection *conn) | |
5312a3ed JG |
3679 | { |
3680 | int ret; | |
5569b118 | 3681 | enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK; |
5312a3ed JG |
3682 | struct relay_stream *stream; |
3683 | struct data_connection_state_receive_payload *state = | |
3684 | &conn->protocol.data.state.receive_payload; | |
3685 | const size_t chunk_size = RECV_DATA_BUFFER_SIZE; | |
3686 | char data_buffer[chunk_size]; | |
3687 | bool partial_recv = false; | |
3688 | bool new_stream = false, close_requested = false, index_flushed = false; | |
3689 | uint64_t left_to_receive = state->left_to_receive; | |
3690 | struct relay_session *session; | |
3691 | ||
fd0f1e3e JR |
3692 | DBG3("Receiving data for stream id %" PRIu64 " seqnum %" PRIu64 ", %" PRIu64" bytes received, %" PRIu64 " bytes left to receive", |
3693 | state->header.stream_id, state->header.net_seq_num, | |
3694 | state->received, left_to_receive); | |
3695 | ||
5312a3ed JG |
3696 | stream = stream_get_by_id(state->header.stream_id); |
3697 | if (!stream) { | |
5569b118 | 3698 | /* Protocol error. */ |
fd0f1e3e | 3699 | ERR("relay_process_data_receive_payload: cannot find stream %" PRIu64, |
5312a3ed | 3700 | state->header.stream_id); |
5569b118 | 3701 | status = RELAY_CONNECTION_STATUS_ERROR; |
5312a3ed | 3702 | goto end; |
1c20f0e2 JD |
3703 | } |
3704 | ||
5312a3ed JG |
3705 | pthread_mutex_lock(&stream->lock); |
3706 | session = stream->trace->session; | |
fd0f1e3e JR |
3707 | if (!conn->session) { |
3708 | ret = connection_set_session(conn, session); | |
3709 | if (ret) { | |
3710 | status = RELAY_CONNECTION_STATUS_ERROR; | |
3711 | goto end_stream_unlock; | |
3712 | } | |
3713 | } | |
5312a3ed JG |
3714 | |
3715 | /* | |
3716 | * The size of the "chunk" received on any iteration is bounded by: | |
3717 | * - the data left to receive, | |
3718 | * - the data immediately available on the socket, | |
3719 | * - the on-stack data buffer | |
3720 | */ | |
3721 | while (left_to_receive > 0 && !partial_recv) { | |
ff2aa8f0 | 3722 | size_t recv_size = std::min<uint64_t>(left_to_receive, chunk_size); |
c35f9726 | 3723 | struct lttng_buffer_view packet_chunk; |
5312a3ed JG |
3724 | |
3725 | ret = conn->sock->ops->recvmsg(conn->sock, data_buffer, | |
3726 | recv_size, MSG_DONTWAIT); | |
3727 | if (ret < 0) { | |
942003e5 MJ |
3728 | DIAGNOSTIC_PUSH |
3729 | DIAGNOSTIC_IGNORE_LOGICAL_OP | |
5569b118 | 3730 | if (errno != EAGAIN && errno != EWOULDBLOCK) { |
942003e5 | 3731 | DIAGNOSTIC_POP |
5569b118 JG |
3732 | PERROR("Socket %d error", conn->sock->fd); |
3733 | status = RELAY_CONNECTION_STATUS_ERROR; | |
3734 | } | |
0848dba7 | 3735 | goto end_stream_unlock; |
5312a3ed JG |
3736 | } else if (ret == 0) { |
3737 | /* No more data ready to be consumed on socket. */ | |
3738 | DBG3("No more data ready for consumption on data socket of stream id %" PRIu64, | |
3739 | state->header.stream_id); | |
5569b118 | 3740 | status = RELAY_CONNECTION_STATUS_CLOSED; |
5312a3ed JG |
3741 | break; |
3742 | } else if (ret < (int) recv_size) { | |
3743 | /* | |
3744 | * All the data available on the socket has been | |
3745 | * consumed. | |
3746 | */ | |
3747 | partial_recv = true; | |
c35f9726 | 3748 | recv_size = ret; |
0848dba7 MD |
3749 | } |
3750 | ||
c35f9726 JG |
3751 | packet_chunk = lttng_buffer_view_init(data_buffer, |
3752 | 0, recv_size); | |
a0377dfe | 3753 | LTTNG_ASSERT(packet_chunk.data); |
5312a3ed | 3754 | |
c35f9726 JG |
3755 | ret = stream_write(stream, &packet_chunk, 0); |
3756 | if (ret) { | |
0848dba7 | 3757 | ERR("Relay error writing data to file"); |
5569b118 | 3758 | status = RELAY_CONNECTION_STATUS_ERROR; |
0848dba7 MD |
3759 | goto end_stream_unlock; |
3760 | } | |
3761 | ||
5312a3ed JG |
3762 | left_to_receive -= recv_size; |
3763 | state->received += recv_size; | |
3764 | state->left_to_receive = left_to_receive; | |
5312a3ed JG |
3765 | } |
3766 | ||
3767 | if (state->left_to_receive > 0) { | |
3768 | /* | |
3769 | * Did not receive all the data expected, wait for more data to | |
3770 | * become available on the socket. | |
3771 | */ | |
3772 | DBG3("Partial receive on data connection of stream id %" PRIu64 ", %" PRIu64 " bytes received, %" PRIu64 " bytes left to receive", | |
3773 | state->header.stream_id, state->received, | |
3774 | state->left_to_receive); | |
5312a3ed | 3775 | goto end_stream_unlock; |
0848dba7 | 3776 | } |
5ab7344e | 3777 | |
c35f9726 JG |
3778 | ret = stream_write(stream, NULL, state->header.padding_size); |
3779 | if (ret) { | |
5569b118 | 3780 | status = RELAY_CONNECTION_STATUS_ERROR; |
7591bab1 | 3781 | goto end_stream_unlock; |
1d4dfdef | 3782 | } |
5312a3ed | 3783 | |
298a25ca | 3784 | if (session_streams_have_index(session)) { |
c35f9726 JG |
3785 | ret = stream_update_index(stream, state->header.net_seq_num, |
3786 | state->rotate_index, &index_flushed, | |
3787 | state->header.data_size + state->header.padding_size); | |
5312a3ed | 3788 | if (ret < 0) { |
c35f9726 | 3789 | ERR("Failed to update index: stream %" PRIu64 " net_seq_num %" PRIu64 " ret %d", |
5312a3ed JG |
3790 | stream->stream_handle, |
3791 | state->header.net_seq_num, ret); | |
5569b118 | 3792 | status = RELAY_CONNECTION_STATUS_ERROR; |
5312a3ed JG |
3793 | goto end_stream_unlock; |
3794 | } | |
3795 | } | |
3796 | ||
a8f9f353 | 3797 | if (stream->prev_data_seq == -1ULL) { |
c0bae11d MD |
3798 | new_stream = true; |
3799 | } | |
3800 | ||
c35f9726 JG |
3801 | ret = stream_complete_packet(stream, state->header.data_size + |
3802 | state->header.padding_size, state->header.net_seq_num, | |
3803 | index_flushed); | |
3804 | if (ret) { | |
3805 | status = RELAY_CONNECTION_STATUS_ERROR; | |
3806 | goto end_stream_unlock; | |
3807 | } | |
5312a3ed JG |
3808 | |
3809 | /* | |
3810 | * Resetting the protocol state (to RECEIVE_HEADER) will trash the | |
3811 | * contents of *state which are aliased (union) to the same location as | |
3812 | * the new state. Don't use it beyond this point. | |
3813 | */ | |
3814 | connection_reset_protocol_state(conn); | |
3815 | state = NULL; | |
173af62f | 3816 | |
7591bab1 | 3817 | end_stream_unlock: |
bda7c7b9 | 3818 | close_requested = stream->close_requested; |
7591bab1 | 3819 | pthread_mutex_unlock(&stream->lock); |
5312a3ed | 3820 | if (close_requested && left_to_receive == 0) { |
bda7c7b9 JG |
3821 | try_stream_close(stream); |
3822 | } | |
3823 | ||
c0bae11d MD |
3824 | if (new_stream) { |
3825 | pthread_mutex_lock(&session->lock); | |
3826 | uatomic_set(&session->new_streams, 1); | |
3827 | pthread_mutex_unlock(&session->lock); | |
3828 | } | |
5312a3ed | 3829 | |
7591bab1 | 3830 | stream_put(stream); |
b8aa1682 | 3831 | end: |
5569b118 | 3832 | return status; |
b8aa1682 JD |
3833 | } |
3834 | ||
5312a3ed JG |
3835 | /* |
3836 | * relay_process_data: Process the data received on the data socket | |
3837 | */ | |
5569b118 JG |
3838 | static enum relay_connection_status relay_process_data( |
3839 | struct relay_connection *conn) | |
5312a3ed | 3840 | { |
5569b118 | 3841 | enum relay_connection_status status; |
5312a3ed JG |
3842 | |
3843 | switch (conn->protocol.data.state_id) { | |
3844 | case DATA_CONNECTION_STATE_RECEIVE_HEADER: | |
5569b118 | 3845 | status = relay_process_data_receive_header(conn); |
5312a3ed JG |
3846 | break; |
3847 | case DATA_CONNECTION_STATE_RECEIVE_PAYLOAD: | |
5569b118 | 3848 | status = relay_process_data_receive_payload(conn); |
5312a3ed JG |
3849 | break; |
3850 | default: | |
3851 | ERR("Unexpected data connection communication state."); | |
3852 | abort(); | |
3853 | } | |
3854 | ||
5569b118 | 3855 | return status; |
5312a3ed JG |
3856 | } |
3857 | ||
7591bab1 | 3858 | static void cleanup_connection_pollfd(struct lttng_poll_event *events, int pollfd) |
b8aa1682 JD |
3859 | { |
3860 | int ret; | |
3861 | ||
58eb9381 | 3862 | (void) lttng_poll_del(events, pollfd); |
b8aa1682 | 3863 | |
f355467e JG |
3864 | ret = fd_tracker_close_unsuspendable_fd(the_fd_tracker, &pollfd, 1, |
3865 | fd_tracker_util_close_fd, NULL); | |
b8aa1682 JD |
3866 | if (ret < 0) { |
3867 | ERR("Closing pollfd %d", pollfd); | |
3868 | } | |
3869 | } | |
3870 | ||
7591bab1 MD |
3871 | static void relay_thread_close_connection(struct lttng_poll_event *events, |
3872 | int pollfd, struct relay_connection *conn) | |
9d1bbf21 | 3873 | { |
7591bab1 | 3874 | const char *type_str; |
2a174661 | 3875 | |
7591bab1 MD |
3876 | switch (conn->type) { |
3877 | case RELAY_DATA: | |
3878 | type_str = "Data"; | |
3879 | break; | |
3880 | case RELAY_CONTROL: | |
3881 | type_str = "Control"; | |
3882 | break; | |
3883 | case RELAY_VIEWER_COMMAND: | |
3884 | type_str = "Viewer Command"; | |
3885 | break; | |
3886 | case RELAY_VIEWER_NOTIFICATION: | |
3887 | type_str = "Viewer Notification"; | |
3888 | break; | |
3889 | default: | |
3890 | type_str = "Unknown"; | |
9d1bbf21 | 3891 | } |
7591bab1 MD |
3892 | cleanup_connection_pollfd(events, pollfd); |
3893 | connection_put(conn); | |
3894 | DBG("%s connection closed with %d", type_str, pollfd); | |
b8aa1682 JD |
3895 | } |
3896 | ||
3897 | /* | |
3898 | * This thread does the actual work | |
3899 | */ | |
7591bab1 | 3900 | static void *relay_thread_worker(void *data) |
b8aa1682 | 3901 | { |
beaad64c DG |
3902 | int ret, err = -1, last_seen_data_fd = -1; |
3903 | uint32_t nb_fd; | |
b8aa1682 JD |
3904 | struct lttng_poll_event events; |
3905 | struct lttng_ht *relay_connections_ht; | |
b8aa1682 | 3906 | struct lttng_ht_iter iter; |
90e7d72f | 3907 | struct relay_connection *destroy_conn = NULL; |
b8aa1682 JD |
3908 | |
3909 | DBG("[thread] Relay worker started"); | |
3910 | ||
9d1bbf21 MD |
3911 | rcu_register_thread(); |
3912 | ||
55706a7d MD |
3913 | health_register(health_relayd, HEALTH_RELAYD_TYPE_WORKER); |
3914 | ||
9b5e0863 MD |
3915 | if (testpoint(relayd_thread_worker)) { |
3916 | goto error_testpoint; | |
3917 | } | |
3918 | ||
f385ae0a MD |
3919 | health_code_update(); |
3920 | ||
b8aa1682 JD |
3921 | /* table of connections indexed on socket */ |
3922 | relay_connections_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG); | |
095a4ae5 MD |
3923 | if (!relay_connections_ht) { |
3924 | goto relay_connections_ht_error; | |
3925 | } | |
b8aa1682 | 3926 | |
e32a0864 | 3927 | ret = create_named_thread_poll_set(&events, 2, "Worker thread epoll"); |
b8aa1682 JD |
3928 | if (ret < 0) { |
3929 | goto error_poll_create; | |
3930 | } | |
3931 | ||
58eb9381 | 3932 | ret = lttng_poll_add(&events, relay_conn_pipe[0], LPOLLIN | LPOLLRDHUP); |
b8aa1682 JD |
3933 | if (ret < 0) { |
3934 | goto error; | |
3935 | } | |
3936 | ||
beaad64c | 3937 | restart: |
b8aa1682 | 3938 | while (1) { |
beaad64c DG |
3939 | int idx = -1, i, seen_control = 0, last_notdel_data_fd = -1; |
3940 | ||
f385ae0a MD |
3941 | health_code_update(); |
3942 | ||
b8aa1682 | 3943 | /* Infinite blocking call, waiting for transmission */ |
87c1611d | 3944 | DBG3("Relayd worker thread polling..."); |
f385ae0a | 3945 | health_poll_entry(); |
b8aa1682 | 3946 | ret = lttng_poll_wait(&events, -1); |
f385ae0a | 3947 | health_poll_exit(); |
b8aa1682 JD |
3948 | if (ret < 0) { |
3949 | /* | |
3950 | * Restart interrupted system call. | |
3951 | */ | |
3952 | if (errno == EINTR) { | |
3953 | goto restart; | |
3954 | } | |
3955 | goto error; | |
3956 | } | |
3957 | ||
0d9c5d77 DG |
3958 | nb_fd = ret; |
3959 | ||
beaad64c | 3960 | /* |
7591bab1 MD |
3961 | * Process control. The control connection is |
3962 | * prioritized so we don't starve it with high | |
3963 | * throughput tracing data on the data connection. | |
beaad64c | 3964 | */ |
b8aa1682 JD |
3965 | for (i = 0; i < nb_fd; i++) { |
3966 | /* Fetch once the poll data */ | |
beaad64c DG |
3967 | uint32_t revents = LTTNG_POLL_GETEV(&events, i); |
3968 | int pollfd = LTTNG_POLL_GETFD(&events, i); | |
b8aa1682 | 3969 | |
f385ae0a MD |
3970 | health_code_update(); |
3971 | ||
b8aa1682 JD |
3972 | /* Thread quit pipe has been closed. Killing thread. */ |
3973 | ret = check_thread_quit_pipe(pollfd, revents); | |
3974 | if (ret) { | |
095a4ae5 MD |
3975 | err = 0; |
3976 | goto exit; | |
b8aa1682 JD |
3977 | } |
3978 | ||
58eb9381 DG |
3979 | /* Inspect the relay conn pipe for new connection */ |
3980 | if (pollfd == relay_conn_pipe[0]) { | |
03e43155 | 3981 | if (revents & LPOLLIN) { |
90e7d72f JG |
3982 | struct relay_connection *conn; |
3983 | ||
58eb9381 | 3984 | ret = lttng_read(relay_conn_pipe[0], &conn, sizeof(conn)); |
b8aa1682 JD |
3985 | if (ret < 0) { |
3986 | goto error; | |
3987 | } | |
73039936 FD |
3988 | ret = lttng_poll_add(&events, |
3989 | conn->sock->fd, | |
58eb9381 | 3990 | LPOLLIN | LPOLLRDHUP); |
73039936 FD |
3991 | if (ret) { |
3992 | ERR("Failed to add new connection file descriptor to poll set"); | |
3993 | goto error; | |
3994 | } | |
7591bab1 | 3995 | connection_ht_add(relay_connections_ht, conn); |
58eb9381 | 3996 | DBG("Connection socket %d added", conn->sock->fd); |
03e43155 MD |
3997 | } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) { |
3998 | ERR("Relay connection pipe error"); | |
3999 | goto error; | |
4000 | } else { | |
4001 | ERR("Unexpected poll events %u for sock %d", revents, pollfd); | |
4002 | goto error; | |
b8aa1682 | 4003 | } |
58eb9381 | 4004 | } else { |
90e7d72f JG |
4005 | struct relay_connection *ctrl_conn; |
4006 | ||
7591bab1 | 4007 | ctrl_conn = connection_get_by_sock(relay_connections_ht, pollfd); |
58eb9381 | 4008 | /* If not found, there is a synchronization issue. */ |
a0377dfe | 4009 | LTTNG_ASSERT(ctrl_conn); |
58eb9381 | 4010 | |
03e43155 MD |
4011 | if (ctrl_conn->type == RELAY_DATA) { |
4012 | if (revents & LPOLLIN) { | |
beaad64c DG |
4013 | /* |
4014 | * Flag the last seen data fd not deleted. It will be | |
4015 | * used as the last seen fd if any fd gets deleted in | |
4016 | * this first loop. | |
4017 | */ | |
4018 | last_notdel_data_fd = pollfd; | |
4019 | } | |
03e43155 MD |
4020 | goto put_ctrl_connection; |
4021 | } | |
a0377dfe | 4022 | LTTNG_ASSERT(ctrl_conn->type == RELAY_CONTROL); |
03e43155 MD |
4023 | |
4024 | if (revents & LPOLLIN) { | |
5569b118 JG |
4025 | enum relay_connection_status status; |
4026 | ||
4027 | status = relay_process_control(ctrl_conn); | |
4028 | if (status != RELAY_CONNECTION_STATUS_OK) { | |
fd0f1e3e JR |
4029 | /* |
4030 | * On socket error flag the session as aborted to force | |
4031 | * the cleanup of its stream otherwise it can leak | |
4032 | * during the lifetime of the relayd. | |
4033 | * | |
4034 | * This prevents situations in which streams can be | |
4035 | * left opened because an index was received, the | |
4036 | * control connection is closed, and the data | |
4037 | * connection is closed (uncleanly) before the packet's | |
4038 | * data provided. | |
4039 | * | |
4040 | * Since the control connection encountered an error, | |
4041 | * it is okay to be conservative and close the | |
4042 | * session right now as we can't rely on the protocol | |
4043 | * being respected anymore. | |
4044 | */ | |
4045 | if (status == RELAY_CONNECTION_STATUS_ERROR) { | |
4046 | session_abort(ctrl_conn->session); | |
4047 | } | |
4048 | ||
5569b118 | 4049 | /* Clear the connection on error or close. */ |
5312a3ed JG |
4050 | relay_thread_close_connection(&events, |
4051 | pollfd, | |
03e43155 | 4052 | ctrl_conn); |
03e43155 | 4053 | } |
5312a3ed | 4054 | seen_control = 1; |
03e43155 MD |
4055 | } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) { |
4056 | relay_thread_close_connection(&events, | |
4057 | pollfd, ctrl_conn); | |
4058 | if (last_seen_data_fd == pollfd) { | |
4059 | last_seen_data_fd = last_notdel_data_fd; | |
4060 | } | |
58eb9381 | 4061 | } else { |
03e43155 MD |
4062 | ERR("Unexpected poll events %u for control sock %d", |
4063 | revents, pollfd); | |
4064 | connection_put(ctrl_conn); | |
4065 | goto error; | |
beaad64c | 4066 | } |
03e43155 | 4067 | put_ctrl_connection: |
7591bab1 | 4068 | connection_put(ctrl_conn); |
beaad64c DG |
4069 | } |
4070 | } | |
4071 | ||
4072 | /* | |
4073 | * The last loop handled a control request, go back to poll to make | |
4074 | * sure we prioritise the control socket. | |
4075 | */ | |
4076 | if (seen_control) { | |
4077 | continue; | |
4078 | } | |
4079 | ||
4080 | if (last_seen_data_fd >= 0) { | |
4081 | for (i = 0; i < nb_fd; i++) { | |
4082 | int pollfd = LTTNG_POLL_GETFD(&events, i); | |
f385ae0a MD |
4083 | |
4084 | health_code_update(); | |
4085 | ||
beaad64c DG |
4086 | if (last_seen_data_fd == pollfd) { |
4087 | idx = i; | |
4088 | break; | |
4089 | } | |
4090 | } | |
4091 | } | |
4092 | ||
4093 | /* Process data connection. */ | |
4094 | for (i = idx + 1; i < nb_fd; i++) { | |
4095 | /* Fetch the poll data. */ | |
4096 | uint32_t revents = LTTNG_POLL_GETEV(&events, i); | |
4097 | int pollfd = LTTNG_POLL_GETFD(&events, i); | |
90e7d72f | 4098 | struct relay_connection *data_conn; |
beaad64c | 4099 | |
f385ae0a MD |
4100 | health_code_update(); |
4101 | ||
fd20dac9 MD |
4102 | if (!revents) { |
4103 | /* No activity for this FD (poll implementation). */ | |
4104 | continue; | |
4105 | } | |
4106 | ||
beaad64c | 4107 | /* Skip the command pipe. It's handled in the first loop. */ |
58eb9381 | 4108 | if (pollfd == relay_conn_pipe[0]) { |
beaad64c DG |
4109 | continue; |
4110 | } | |
4111 | ||
7591bab1 | 4112 | data_conn = connection_get_by_sock(relay_connections_ht, pollfd); |
90e7d72f | 4113 | if (!data_conn) { |
fd20dac9 | 4114 | /* Skip it. Might be removed before. */ |
fd20dac9 MD |
4115 | continue; |
4116 | } | |
03e43155 MD |
4117 | if (data_conn->type == RELAY_CONTROL) { |
4118 | goto put_data_connection; | |
4119 | } | |
a0377dfe | 4120 | LTTNG_ASSERT(data_conn->type == RELAY_DATA); |
fd20dac9 MD |
4121 | |
4122 | if (revents & LPOLLIN) { | |
5569b118 JG |
4123 | enum relay_connection_status status; |
4124 | ||
4125 | status = relay_process_data(data_conn); | |
4126 | /* Connection closed or error. */ | |
4127 | if (status != RELAY_CONNECTION_STATUS_OK) { | |
fd0f1e3e JR |
4128 | /* |
4129 | * On socket error flag the session as aborted to force | |
4130 | * the cleanup of its stream otherwise it can leak | |
4131 | * during the lifetime of the relayd. | |
4132 | * | |
4133 | * This prevents situations in which streams can be | |
4134 | * left opened because an index was received, the | |
4135 | * control connection is closed, and the data | |
4136 | * connection is closed (uncleanly) before the packet's | |
4137 | * data provided. | |
4138 | * | |
4139 | * Since the data connection encountered an error, | |
4140 | * it is okay to be conservative and close the | |
4141 | * session right now as we can't rely on the protocol | |
4142 | * being respected anymore. | |
4143 | */ | |
4144 | if (status == RELAY_CONNECTION_STATUS_ERROR) { | |
4145 | session_abort(data_conn->session); | |
4146 | } | |
7591bab1 | 4147 | relay_thread_close_connection(&events, pollfd, |
03e43155 | 4148 | data_conn); |
fd20dac9 MD |
4149 | /* |
4150 | * Every goto restart call sets the last seen fd where | |
4151 | * here we don't really care since we gracefully | |
4152 | * continue the loop after the connection is deleted. | |
4153 | */ | |
4154 | } else { | |
4155 | /* Keep last seen port. */ | |
4156 | last_seen_data_fd = pollfd; | |
7591bab1 | 4157 | connection_put(data_conn); |
fd20dac9 | 4158 | goto restart; |
b8aa1682 | 4159 | } |
03e43155 MD |
4160 | } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) { |
4161 | relay_thread_close_connection(&events, pollfd, | |
4162 | data_conn); | |
4163 | } else { | |
4164 | ERR("Unknown poll events %u for data sock %d", | |
4165 | revents, pollfd); | |
b8aa1682 | 4166 | } |
03e43155 | 4167 | put_data_connection: |
7591bab1 | 4168 | connection_put(data_conn); |
b8aa1682 | 4169 | } |
beaad64c | 4170 | last_seen_data_fd = -1; |
b8aa1682 JD |
4171 | } |
4172 | ||
f385ae0a MD |
4173 | /* Normal exit, no error */ |
4174 | ret = 0; | |
4175 | ||
095a4ae5 | 4176 | exit: |
b8aa1682 | 4177 | error: |
71efa8ef | 4178 | /* Cleanup remaining connection object. */ |
9d1bbf21 | 4179 | rcu_read_lock(); |
90e7d72f JG |
4180 | cds_lfht_for_each_entry(relay_connections_ht->ht, &iter.iter, |
4181 | destroy_conn, | |
58eb9381 | 4182 | sock_n.node) { |
f385ae0a | 4183 | health_code_update(); |
98ba050e | 4184 | |
fd0f1e3e | 4185 | session_abort(destroy_conn->session); |
98ba050e | 4186 | |
7591bab1 MD |
4187 | /* |
4188 | * No need to grab another ref, because we own | |
4189 | * destroy_conn. | |
4190 | */ | |
4191 | relay_thread_close_connection(&events, destroy_conn->sock->fd, | |
4192 | destroy_conn); | |
b8aa1682 | 4193 | } |
94d49140 | 4194 | rcu_read_unlock(); |
7591bab1 | 4195 | |
e32a0864 | 4196 | (void) fd_tracker_util_poll_clean(the_fd_tracker, &events); |
7d2f7452 | 4197 | error_poll_create: |
b8aa1682 | 4198 | lttng_ht_destroy(relay_connections_ht); |
095a4ae5 | 4199 | relay_connections_ht_error: |
58eb9381 | 4200 | /* Close relay conn pipes */ |
726b2396 JG |
4201 | (void) fd_tracker_util_pipe_close(the_fd_tracker, |
4202 | relay_conn_pipe); | |
095a4ae5 MD |
4203 | if (err) { |
4204 | DBG("Thread exited with error"); | |
4205 | } | |
b8aa1682 | 4206 | DBG("Worker thread cleanup complete"); |
9b5e0863 | 4207 | error_testpoint: |
f385ae0a MD |
4208 | if (err) { |
4209 | health_error(); | |
4210 | ERR("Health error occurred in %s", __func__); | |
4211 | } | |
4212 | health_unregister(health_relayd); | |
9d1bbf21 | 4213 | rcu_unregister_thread(); |
b4aacfdc | 4214 | lttng_relay_stop_threads(); |
b8aa1682 JD |
4215 | return NULL; |
4216 | } | |
4217 | ||
4218 | /* | |
4219 | * Create the relay command pipe to wake thread_manage_apps. | |
4220 | * Closed in cleanup(). | |
4221 | */ | |
58eb9381 | 4222 | static int create_relay_conn_pipe(void) |
b8aa1682 | 4223 | { |
726b2396 JG |
4224 | return fd_tracker_util_pipe_open_cloexec(the_fd_tracker, |
4225 | "Relayd connection pipe", relay_conn_pipe); | |
b8aa1682 JD |
4226 | } |
4227 | ||
9c256b01 JG |
4228 | static int stdio_open(void *data, int *fds) |
4229 | { | |
4230 | fds[0] = fileno(stdout); | |
4231 | fds[1] = fileno(stderr); | |
4232 | return 0; | |
4233 | } | |
4234 | ||
9c256b01 JG |
4235 | static int track_stdio(void) |
4236 | { | |
4237 | int fds[2]; | |
4238 | const char *names[] = { "stdout", "stderr" }; | |
4239 | ||
4240 | return fd_tracker_open_unsuspendable_fd(the_fd_tracker, fds, | |
4241 | names, 2, stdio_open, NULL); | |
4242 | } | |
4243 | ||
b8aa1682 JD |
4244 | /* |
4245 | * main | |
4246 | */ | |
4247 | int main(int argc, char **argv) | |
4248 | { | |
00e3b7f1 | 4249 | bool thread_is_rcu_registered = false; |
178a0557 | 4250 | int ret = 0, retval = 0; |
b8aa1682 | 4251 | void *status; |
f7c3ffd7 | 4252 | char *unlinked_file_directory_path = NULL, *output_path = NULL; |
b8aa1682 | 4253 | |
2a10de3b JR |
4254 | /* Parse environment variables */ |
4255 | ret = parse_env_options(); | |
4256 | if (ret) { | |
4257 | retval = -1; | |
4258 | goto exit_options; | |
4259 | } | |
4260 | ||
4261 | /* | |
4262 | * Parse arguments. | |
4263 | * Command line arguments overwrite environment. | |
4264 | */ | |
b8aa1682 | 4265 | progname = argv[0]; |
178a0557 MD |
4266 | if (set_options(argc, argv)) { |
4267 | retval = -1; | |
4268 | goto exit_options; | |
b8aa1682 JD |
4269 | } |
4270 | ||
178a0557 MD |
4271 | if (set_signal_handler()) { |
4272 | retval = -1; | |
4273 | goto exit_options; | |
b8aa1682 JD |
4274 | } |
4275 | ||
a3bc3918 JR |
4276 | relayd_config_log(); |
4277 | ||
4278 | if (opt_print_version) { | |
4279 | print_version(); | |
4280 | retval = 0; | |
4281 | goto exit_options; | |
4282 | } | |
4283 | ||
c0407718 JG |
4284 | ret = fclose(stdin); |
4285 | if (ret) { | |
4286 | PERROR("Failed to close stdin"); | |
4287 | goto exit_options; | |
4288 | } | |
4289 | ||
35ab25e5 MD |
4290 | DBG("Clear command %s", opt_allow_clear ? "allowed" : "disallowed"); |
4291 | ||
4d513a50 DG |
4292 | /* Try to create directory if -o, --output is specified. */ |
4293 | if (opt_output_path) { | |
994fa64f DG |
4294 | if (*opt_output_path != '/') { |
4295 | ERR("Please specify an absolute path for -o, --output PATH"); | |
178a0557 MD |
4296 | retval = -1; |
4297 | goto exit_options; | |
994fa64f DG |
4298 | } |
4299 | ||
d77dded2 JG |
4300 | ret = utils_mkdir_recursive(opt_output_path, S_IRWXU | S_IRWXG, |
4301 | -1, -1); | |
4d513a50 DG |
4302 | if (ret < 0) { |
4303 | ERR("Unable to create %s", opt_output_path); | |
178a0557 MD |
4304 | retval = -1; |
4305 | goto exit_options; | |
4d513a50 DG |
4306 | } |
4307 | } | |
4308 | ||
b8aa1682 | 4309 | /* Daemonize */ |
b5218ffb | 4310 | if (opt_daemon || opt_background) { |
3fd27398 MD |
4311 | ret = lttng_daemonize(&child_ppid, &recv_child_signal, |
4312 | !opt_background); | |
b8aa1682 | 4313 | if (ret < 0) { |
178a0557 MD |
4314 | retval = -1; |
4315 | goto exit_options; | |
b8aa1682 | 4316 | } |
3fd27398 MD |
4317 | } |
4318 | ||
ce9ee1fb JR |
4319 | if (opt_working_directory) { |
4320 | ret = utils_change_working_directory(opt_working_directory); | |
4321 | if (ret) { | |
4322 | /* All errors are already logged. */ | |
4323 | goto exit_options; | |
4324 | } | |
4325 | } | |
4326 | ||
23c8ff50 JG |
4327 | sessiond_trace_chunk_registry = sessiond_trace_chunk_registry_create(); |
4328 | if (!sessiond_trace_chunk_registry) { | |
4329 | ERR("Failed to initialize session daemon trace chunk registry"); | |
4330 | retval = -1; | |
794e2e5f | 4331 | goto exit_options; |
23c8ff50 JG |
4332 | } |
4333 | ||
00e3b7f1 JG |
4334 | /* |
4335 | * The RCU thread registration (and use, through the fd-tracker's | |
4336 | * creation) is done after the daemonization to allow us to not | |
4337 | * deal with liburcu's fork() management as the call RCU needs to | |
4338 | * be restored. | |
4339 | */ | |
4340 | rcu_register_thread(); | |
4341 | thread_is_rcu_registered = true; | |
4342 | ||
f7c3ffd7 JG |
4343 | output_path = create_output_path(""); |
4344 | if (!output_path) { | |
4345 | ERR("Failed to get output path"); | |
4346 | retval = -1; | |
4347 | goto exit_options; | |
4348 | } | |
4349 | ret = asprintf(&unlinked_file_directory_path, "%s/%s", output_path, | |
4350 | DEFAULT_UNLINKED_FILES_DIRECTORY); | |
4351 | free(output_path); | |
4352 | if (ret < 0) { | |
4353 | ERR("Failed to format unlinked file directory path"); | |
4354 | retval = -1; | |
4355 | goto exit_options; | |
4356 | } | |
4357 | the_fd_tracker = fd_tracker_create( | |
4358 | unlinked_file_directory_path, lttng_opt_fd_pool_size); | |
4359 | free(unlinked_file_directory_path); | |
00e3b7f1 JG |
4360 | if (!the_fd_tracker) { |
4361 | retval = -1; | |
4362 | goto exit_options; | |
4363 | } | |
4364 | ||
9c256b01 JG |
4365 | ret = track_stdio(); |
4366 | if (ret) { | |
4367 | retval = -1; | |
4368 | goto exit_options; | |
4369 | } | |
4370 | ||
178a0557 MD |
4371 | /* Initialize thread health monitoring */ |
4372 | health_relayd = health_app_create(NR_HEALTH_RELAYD_TYPES); | |
4373 | if (!health_relayd) { | |
4374 | PERROR("health_app_create error"); | |
4375 | retval = -1; | |
794e2e5f | 4376 | goto exit_options; |
178a0557 MD |
4377 | } |
4378 | ||
3fd27398 | 4379 | /* Create thread quit pipe */ |
178a0557 MD |
4380 | if (init_thread_quit_pipe()) { |
4381 | retval = -1; | |
794e2e5f | 4382 | goto exit_options; |
b8aa1682 JD |
4383 | } |
4384 | ||
b8aa1682 | 4385 | /* Setup the thread apps communication pipe. */ |
178a0557 MD |
4386 | if (create_relay_conn_pipe()) { |
4387 | retval = -1; | |
794e2e5f | 4388 | goto exit_options; |
b8aa1682 JD |
4389 | } |
4390 | ||
4391 | /* Init relay command queue. */ | |
8bdee6e2 | 4392 | cds_wfcq_init(&relay_conn_queue.head, &relay_conn_queue.tail); |
b8aa1682 | 4393 | |
554831e7 MD |
4394 | /* Initialize communication library */ |
4395 | lttcomm_init(); | |
87e45c13 | 4396 | lttcomm_inet_init(); |
554831e7 | 4397 | |
d3e2ba59 | 4398 | /* tables of sessions indexed by session ID */ |
7591bab1 MD |
4399 | sessions_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64); |
4400 | if (!sessions_ht) { | |
178a0557 | 4401 | retval = -1; |
794e2e5f | 4402 | goto exit_options; |
d3e2ba59 JD |
4403 | } |
4404 | ||
4405 | /* tables of streams indexed by stream ID */ | |
2a174661 | 4406 | relay_streams_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64); |
d3e2ba59 | 4407 | if (!relay_streams_ht) { |
178a0557 | 4408 | retval = -1; |
794e2e5f | 4409 | goto exit_options; |
d3e2ba59 JD |
4410 | } |
4411 | ||
4412 | /* tables of streams indexed by stream ID */ | |
92c6ca54 DG |
4413 | viewer_streams_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64); |
4414 | if (!viewer_streams_ht) { | |
178a0557 | 4415 | retval = -1; |
794e2e5f | 4416 | goto exit_options; |
55706a7d MD |
4417 | } |
4418 | ||
bcee2b96 | 4419 | ret = init_health_quit_pipe(); |
178a0557 MD |
4420 | if (ret) { |
4421 | retval = -1; | |
794e2e5f | 4422 | goto exit_options; |
65931c8b MD |
4423 | } |
4424 | ||
4425 | /* Create thread to manage the client socket */ | |
1a1a34b4 | 4426 | ret = pthread_create(&health_thread, default_pthread_attr(), |
65931c8b | 4427 | thread_manage_health, (void *) NULL); |
178a0557 MD |
4428 | if (ret) { |
4429 | errno = ret; | |
65931c8b | 4430 | PERROR("pthread_create health"); |
178a0557 | 4431 | retval = -1; |
794e2e5f | 4432 | goto exit_options; |
65931c8b MD |
4433 | } |
4434 | ||
b8aa1682 | 4435 | /* Setup the dispatcher thread */ |
1a1a34b4 | 4436 | ret = pthread_create(&dispatcher_thread, default_pthread_attr(), |
b8aa1682 | 4437 | relay_thread_dispatcher, (void *) NULL); |
178a0557 MD |
4438 | if (ret) { |
4439 | errno = ret; | |
b8aa1682 | 4440 | PERROR("pthread_create dispatcher"); |
178a0557 MD |
4441 | retval = -1; |
4442 | goto exit_dispatcher_thread; | |
b8aa1682 JD |
4443 | } |
4444 | ||
4445 | /* Setup the worker thread */ | |
1a1a34b4 | 4446 | ret = pthread_create(&worker_thread, default_pthread_attr(), |
7591bab1 | 4447 | relay_thread_worker, NULL); |
178a0557 MD |
4448 | if (ret) { |
4449 | errno = ret; | |
b8aa1682 | 4450 | PERROR("pthread_create worker"); |
178a0557 MD |
4451 | retval = -1; |
4452 | goto exit_worker_thread; | |
b8aa1682 JD |
4453 | } |
4454 | ||
4455 | /* Setup the listener thread */ | |
1a1a34b4 | 4456 | ret = pthread_create(&listener_thread, default_pthread_attr(), |
b8aa1682 | 4457 | relay_thread_listener, (void *) NULL); |
178a0557 MD |
4458 | if (ret) { |
4459 | errno = ret; | |
b8aa1682 | 4460 | PERROR("pthread_create listener"); |
178a0557 MD |
4461 | retval = -1; |
4462 | goto exit_listener_thread; | |
b8aa1682 JD |
4463 | } |
4464 | ||
7591bab1 | 4465 | ret = relayd_live_create(live_uri); |
178a0557 | 4466 | if (ret) { |
d3e2ba59 | 4467 | ERR("Starting live viewer threads"); |
178a0557 | 4468 | retval = -1; |
50138f51 | 4469 | goto exit_live; |
d3e2ba59 JD |
4470 | } |
4471 | ||
178a0557 MD |
4472 | /* |
4473 | * This is where we start awaiting program completion (e.g. through | |
4474 | * signal that asks threads to teardown). | |
4475 | */ | |
4476 | ||
4477 | ret = relayd_live_join(); | |
4478 | if (ret) { | |
4479 | retval = -1; | |
4480 | } | |
50138f51 | 4481 | exit_live: |
178a0557 | 4482 | |
b8aa1682 | 4483 | ret = pthread_join(listener_thread, &status); |
178a0557 MD |
4484 | if (ret) { |
4485 | errno = ret; | |
4486 | PERROR("pthread_join listener_thread"); | |
4487 | retval = -1; | |
b8aa1682 JD |
4488 | } |
4489 | ||
178a0557 | 4490 | exit_listener_thread: |
b8aa1682 | 4491 | ret = pthread_join(worker_thread, &status); |
178a0557 MD |
4492 | if (ret) { |
4493 | errno = ret; | |
4494 | PERROR("pthread_join worker_thread"); | |
4495 | retval = -1; | |
b8aa1682 JD |
4496 | } |
4497 | ||
178a0557 | 4498 | exit_worker_thread: |
b8aa1682 | 4499 | ret = pthread_join(dispatcher_thread, &status); |
178a0557 MD |
4500 | if (ret) { |
4501 | errno = ret; | |
4502 | PERROR("pthread_join dispatcher_thread"); | |
4503 | retval = -1; | |
b8aa1682 | 4504 | } |
178a0557 | 4505 | exit_dispatcher_thread: |
42415026 | 4506 | |
65931c8b | 4507 | ret = pthread_join(health_thread, &status); |
178a0557 MD |
4508 | if (ret) { |
4509 | errno = ret; | |
4510 | PERROR("pthread_join health_thread"); | |
4511 | retval = -1; | |
65931c8b | 4512 | } |
178a0557 | 4513 | exit_options: |
4d62fbf8 MD |
4514 | /* |
4515 | * Wait for all pending call_rcu work to complete before tearing | |
4516 | * down data structures. call_rcu worker may be trying to | |
4517 | * perform lookups in those structures. | |
4518 | */ | |
4519 | rcu_barrier(); | |
7591bab1 MD |
4520 | relayd_cleanup(); |
4521 | ||
4522 | /* Ensure all prior call_rcu are done. */ | |
4523 | rcu_barrier(); | |
d3e2ba59 | 4524 | |
00e3b7f1 JG |
4525 | if (thread_is_rcu_registered) { |
4526 | rcu_unregister_thread(); | |
4527 | } | |
4528 | ||
178a0557 | 4529 | if (!retval) { |
b8aa1682 | 4530 | exit(EXIT_SUCCESS); |
178a0557 MD |
4531 | } else { |
4532 | exit(EXIT_FAILURE); | |
b8aa1682 | 4533 | } |
b8aa1682 | 4534 | } |