Commit | Line | Data |
---|---|---|
b8aa1682 JD |
1 | /* |
2 | * Copyright (C) 2012 - Julien Desfossez <jdesfossez@efficios.com> | |
3 | * David Goulet <dgoulet@efficios.com> | |
cd60b05a | 4 | * 2013 - Jérémie Galarneau <jeremie.galarneau@efficios.com> |
7591bab1 | 5 | * 2015 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
b8aa1682 JD |
6 | * |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License, version 2 only, | |
9 | * as published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
14 | * more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License along | |
17 | * with this program; if not, write to the Free Software Foundation, Inc., | |
18 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | |
19 | */ | |
20 | ||
6c1c0768 | 21 | #define _LGPL_SOURCE |
b8aa1682 JD |
22 | #include <getopt.h> |
23 | #include <grp.h> | |
24 | #include <limits.h> | |
25 | #include <pthread.h> | |
26 | #include <signal.h> | |
27 | #include <stdio.h> | |
28 | #include <stdlib.h> | |
29 | #include <string.h> | |
30 | #include <sys/mman.h> | |
31 | #include <sys/mount.h> | |
32 | #include <sys/resource.h> | |
33 | #include <sys/socket.h> | |
34 | #include <sys/stat.h> | |
35 | #include <sys/types.h> | |
36 | #include <sys/wait.h> | |
173af62f | 37 | #include <inttypes.h> |
b8aa1682 JD |
38 | #include <urcu/futex.h> |
39 | #include <urcu/uatomic.h> | |
70626904 | 40 | #include <urcu/rculist.h> |
b8aa1682 JD |
41 | #include <unistd.h> |
42 | #include <fcntl.h> | |
f8be1183 | 43 | #include <strings.h> |
b8aa1682 JD |
44 | |
45 | #include <lttng/lttng.h> | |
46 | #include <common/common.h> | |
47 | #include <common/compat/poll.h> | |
48 | #include <common/compat/socket.h> | |
f263b7fd | 49 | #include <common/compat/endian.h> |
e8fa9fb0 | 50 | #include <common/compat/getenv.h> |
b8aa1682 | 51 | #include <common/defaults.h> |
3fd27398 | 52 | #include <common/daemonize.h> |
b8aa1682 JD |
53 | #include <common/futex.h> |
54 | #include <common/sessiond-comm/sessiond-comm.h> | |
55 | #include <common/sessiond-comm/inet.h> | |
b8aa1682 JD |
56 | #include <common/sessiond-comm/relayd.h> |
57 | #include <common/uri.h> | |
a02de639 | 58 | #include <common/utils.h> |
d3ecc550 | 59 | #include <common/align.h> |
f40ef1d5 | 60 | #include <common/config/session-config.h> |
5312a3ed JG |
61 | #include <common/dynamic-buffer.h> |
62 | #include <common/buffer-view.h> | |
70626904 | 63 | #include <common/string-utils/format.h> |
b8aa1682 | 64 | |
a3bc3918 | 65 | #include "version.h" |
0f907de1 | 66 | #include "cmd.h" |
d3e2ba59 | 67 | #include "ctf-trace.h" |
1c20f0e2 | 68 | #include "index.h" |
0f907de1 | 69 | #include "utils.h" |
b8aa1682 | 70 | #include "lttng-relayd.h" |
d3e2ba59 | 71 | #include "live.h" |
55706a7d | 72 | #include "health-relayd.h" |
9b5e0863 | 73 | #include "testpoint.h" |
2f8f53af | 74 | #include "viewer-stream.h" |
2a174661 DG |
75 | #include "session.h" |
76 | #include "stream.h" | |
58eb9381 | 77 | #include "connection.h" |
a44ca2ca | 78 | #include "tracefile-array.h" |
f056029c | 79 | #include "tcp_keep_alive.h" |
23c8ff50 | 80 | #include "sessiond-trace-chunks.h" |
b8aa1682 | 81 | |
4fc83d94 PP |
82 | static const char *help_msg = |
83 | #ifdef LTTNG_EMBED_HELP | |
84 | #include <lttng-relayd.8.h> | |
85 | #else | |
86 | NULL | |
87 | #endif | |
88 | ; | |
89 | ||
5569b118 JG |
90 | enum relay_connection_status { |
91 | RELAY_CONNECTION_STATUS_OK, | |
a9577b76 | 92 | /* An error occurred while processing an event on the connection. */ |
5569b118 JG |
93 | RELAY_CONNECTION_STATUS_ERROR, |
94 | /* Connection closed/shutdown cleanly. */ | |
95 | RELAY_CONNECTION_STATUS_CLOSED, | |
96 | }; | |
97 | ||
b8aa1682 | 98 | /* command line options */ |
0f907de1 | 99 | char *opt_output_path; |
a3bc3918 | 100 | static int opt_daemon, opt_background, opt_print_version; |
3fd27398 MD |
101 | |
102 | /* | |
103 | * We need to wait for listener and live listener threads, as well as | |
104 | * health check thread, before being ready to signal readiness. | |
105 | */ | |
106 | #define NR_LTTNG_RELAY_READY 3 | |
107 | static int lttng_relay_ready = NR_LTTNG_RELAY_READY; | |
0848dba7 MD |
108 | |
109 | /* Size of receive buffer. */ | |
110 | #define RECV_DATA_BUFFER_SIZE 65536 | |
111 | ||
3fd27398 MD |
112 | static int recv_child_signal; /* Set to 1 when a SIGUSR1 signal is received. */ |
113 | static pid_t child_ppid; /* Internal parent PID use with daemonize. */ | |
114 | ||
095a4ae5 MD |
115 | static struct lttng_uri *control_uri; |
116 | static struct lttng_uri *data_uri; | |
d3e2ba59 | 117 | static struct lttng_uri *live_uri; |
b8aa1682 JD |
118 | |
119 | const char *progname; | |
b8aa1682 | 120 | |
65931c8b | 121 | const char *tracing_group_name = DEFAULT_TRACING_GROUP; |
cd60b05a JG |
122 | static int tracing_group_name_override; |
123 | ||
124 | const char * const config_section_name = "relayd"; | |
65931c8b | 125 | |
b8aa1682 JD |
126 | /* |
127 | * Quit pipe for all threads. This permits a single cancellation point | |
128 | * for all threads when receiving an event on the pipe. | |
129 | */ | |
0b242f62 | 130 | int thread_quit_pipe[2] = { -1, -1 }; |
b8aa1682 JD |
131 | |
132 | /* | |
133 | * This pipe is used to inform the worker thread that a command is queued and | |
134 | * ready to be processed. | |
135 | */ | |
58eb9381 | 136 | static int relay_conn_pipe[2] = { -1, -1 }; |
b8aa1682 | 137 | |
26c9d55e | 138 | /* Shared between threads */ |
b8aa1682 JD |
139 | static int dispatch_thread_exit; |
140 | ||
141 | static pthread_t listener_thread; | |
142 | static pthread_t dispatcher_thread; | |
143 | static pthread_t worker_thread; | |
65931c8b | 144 | static pthread_t health_thread; |
b8aa1682 | 145 | |
7591bab1 MD |
146 | /* |
147 | * last_relay_stream_id_lock protects last_relay_stream_id increment | |
148 | * atomicity on 32-bit architectures. | |
149 | */ | |
150 | static pthread_mutex_t last_relay_stream_id_lock = PTHREAD_MUTEX_INITIALIZER; | |
095a4ae5 | 151 | static uint64_t last_relay_stream_id; |
b8aa1682 JD |
152 | |
153 | /* | |
154 | * Relay command queue. | |
155 | * | |
156 | * The relay_thread_listener and relay_thread_dispatcher communicate with this | |
157 | * queue. | |
158 | */ | |
58eb9381 | 159 | static struct relay_conn_queue relay_conn_queue; |
b8aa1682 | 160 | |
d3e2ba59 JD |
161 | /* Global relay stream hash table. */ |
162 | struct lttng_ht *relay_streams_ht; | |
163 | ||
92c6ca54 DG |
164 | /* Global relay viewer stream hash table. */ |
165 | struct lttng_ht *viewer_streams_ht; | |
166 | ||
7591bab1 MD |
167 | /* Global relay sessions hash table. */ |
168 | struct lttng_ht *sessions_ht; | |
0a6518b0 | 169 | |
55706a7d | 170 | /* Relayd health monitoring */ |
eea7556c | 171 | struct health_app *health_relayd; |
55706a7d | 172 | |
23c8ff50 JG |
173 | struct sessiond_trace_chunk_registry *sessiond_trace_chunk_registry; |
174 | ||
cd60b05a JG |
175 | static struct option long_options[] = { |
176 | { "control-port", 1, 0, 'C', }, | |
177 | { "data-port", 1, 0, 'D', }, | |
8d5c808e | 178 | { "live-port", 1, 0, 'L', }, |
cd60b05a | 179 | { "daemonize", 0, 0, 'd', }, |
b5218ffb | 180 | { "background", 0, 0, 'b', }, |
cd60b05a JG |
181 | { "group", 1, 0, 'g', }, |
182 | { "help", 0, 0, 'h', }, | |
183 | { "output", 1, 0, 'o', }, | |
184 | { "verbose", 0, 0, 'v', }, | |
185 | { "config", 1, 0, 'f' }, | |
3a904098 | 186 | { "version", 0, 0, 'V' }, |
cd60b05a JG |
187 | { NULL, 0, 0, 0, }, |
188 | }; | |
189 | ||
3a904098 | 190 | static const char *config_ignore_options[] = { "help", "config", "version" }; |
cd60b05a | 191 | |
a3bc3918 JR |
192 | static void print_version(void) { |
193 | fprintf(stdout, "%s\n", VERSION); | |
194 | } | |
195 | ||
196 | static void relayd_config_log(void) | |
197 | { | |
198 | DBG("LTTng-relayd " VERSION " - " VERSION_NAME "%s%s", | |
199 | GIT_VERSION[0] == '\0' ? "" : " - " GIT_VERSION, | |
200 | EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " EXTRA_VERSION_NAME); | |
201 | if (EXTRA_VERSION_DESCRIPTION[0] != '\0') { | |
202 | DBG("LTTng-relayd extra version description:\n\t" EXTRA_VERSION_DESCRIPTION "\n"); | |
203 | } | |
7f5ed73a JR |
204 | if (EXTRA_VERSION_PATCHES[0] != '\0') { |
205 | DBG("LTTng-relayd extra patches:\n\t" EXTRA_VERSION_PATCHES "\n"); | |
206 | } | |
a3bc3918 JR |
207 | } |
208 | ||
cd60b05a JG |
209 | /* |
210 | * Take an option from the getopt output and set it in the right variable to be | |
211 | * used later. | |
212 | * | |
213 | * Return 0 on success else a negative value. | |
214 | */ | |
7591bab1 | 215 | static int set_option(int opt, const char *arg, const char *optname) |
b8aa1682 | 216 | { |
cd60b05a JG |
217 | int ret; |
218 | ||
219 | switch (opt) { | |
220 | case 0: | |
221 | fprintf(stderr, "option %s", optname); | |
222 | if (arg) { | |
223 | fprintf(stderr, " with arg %s\n", arg); | |
224 | } | |
225 | break; | |
226 | case 'C': | |
e8fa9fb0 MD |
227 | if (lttng_is_setuid_setgid()) { |
228 | WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.", | |
229 | "-C, --control-port"); | |
230 | } else { | |
231 | ret = uri_parse(arg, &control_uri); | |
232 | if (ret < 0) { | |
233 | ERR("Invalid control URI specified"); | |
234 | goto end; | |
235 | } | |
236 | if (control_uri->port == 0) { | |
237 | control_uri->port = DEFAULT_NETWORK_CONTROL_PORT; | |
238 | } | |
cd60b05a JG |
239 | } |
240 | break; | |
241 | case 'D': | |
e8fa9fb0 MD |
242 | if (lttng_is_setuid_setgid()) { |
243 | WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.", | |
244 | "-D, -data-port"); | |
245 | } else { | |
246 | ret = uri_parse(arg, &data_uri); | |
247 | if (ret < 0) { | |
248 | ERR("Invalid data URI specified"); | |
249 | goto end; | |
250 | } | |
251 | if (data_uri->port == 0) { | |
252 | data_uri->port = DEFAULT_NETWORK_DATA_PORT; | |
253 | } | |
cd60b05a JG |
254 | } |
255 | break; | |
8d5c808e | 256 | case 'L': |
e8fa9fb0 MD |
257 | if (lttng_is_setuid_setgid()) { |
258 | WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.", | |
259 | "-L, -live-port"); | |
260 | } else { | |
261 | ret = uri_parse(arg, &live_uri); | |
262 | if (ret < 0) { | |
263 | ERR("Invalid live URI specified"); | |
264 | goto end; | |
265 | } | |
266 | if (live_uri->port == 0) { | |
267 | live_uri->port = DEFAULT_NETWORK_VIEWER_PORT; | |
268 | } | |
8d5c808e AM |
269 | } |
270 | break; | |
cd60b05a JG |
271 | case 'd': |
272 | opt_daemon = 1; | |
273 | break; | |
b5218ffb MD |
274 | case 'b': |
275 | opt_background = 1; | |
276 | break; | |
cd60b05a | 277 | case 'g': |
e8fa9fb0 MD |
278 | if (lttng_is_setuid_setgid()) { |
279 | WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.", | |
280 | "-g, --group"); | |
281 | } else { | |
282 | tracing_group_name = strdup(arg); | |
283 | if (tracing_group_name == NULL) { | |
284 | ret = -errno; | |
285 | PERROR("strdup"); | |
286 | goto end; | |
287 | } | |
288 | tracing_group_name_override = 1; | |
330a40bb | 289 | } |
cd60b05a JG |
290 | break; |
291 | case 'h': | |
4fc83d94 | 292 | ret = utils_show_help(8, "lttng-relayd", help_msg); |
655b5cc1 | 293 | if (ret) { |
4fc83d94 | 294 | ERR("Cannot show --help for `lttng-relayd`"); |
655b5cc1 PP |
295 | perror("exec"); |
296 | } | |
cd60b05a | 297 | exit(EXIT_FAILURE); |
3a904098 | 298 | case 'V': |
a3bc3918 JR |
299 | opt_print_version = 1; |
300 | break; | |
cd60b05a | 301 | case 'o': |
e8fa9fb0 MD |
302 | if (lttng_is_setuid_setgid()) { |
303 | WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.", | |
304 | "-o, --output"); | |
305 | } else { | |
306 | ret = asprintf(&opt_output_path, "%s", arg); | |
307 | if (ret < 0) { | |
308 | ret = -errno; | |
309 | PERROR("asprintf opt_output_path"); | |
310 | goto end; | |
311 | } | |
cd60b05a JG |
312 | } |
313 | break; | |
314 | case 'v': | |
315 | /* Verbose level can increase using multiple -v */ | |
316 | if (arg) { | |
317 | lttng_opt_verbose = config_parse_value(arg); | |
318 | } else { | |
849e5b7b DG |
319 | /* Only 3 level of verbosity (-vvv). */ |
320 | if (lttng_opt_verbose < 3) { | |
321 | lttng_opt_verbose += 1; | |
322 | } | |
cd60b05a JG |
323 | } |
324 | break; | |
325 | default: | |
326 | /* Unknown option or other error. | |
327 | * Error is printed by getopt, just return */ | |
328 | ret = -1; | |
329 | goto end; | |
330 | } | |
331 | ||
332 | /* All good. */ | |
333 | ret = 0; | |
334 | ||
335 | end: | |
336 | return ret; | |
337 | } | |
338 | ||
339 | /* | |
340 | * config_entry_handler_cb used to handle options read from a config file. | |
f40ef1d5 | 341 | * See config_entry_handler_cb comment in common/config/session-config.h for the |
cd60b05a JG |
342 | * return value conventions. |
343 | */ | |
7591bab1 | 344 | static int config_entry_handler(const struct config_entry *entry, void *unused) |
cd60b05a JG |
345 | { |
346 | int ret = 0, i; | |
347 | ||
348 | if (!entry || !entry->name || !entry->value) { | |
349 | ret = -EINVAL; | |
350 | goto end; | |
351 | } | |
352 | ||
353 | /* Check if the option is to be ignored */ | |
354 | for (i = 0; i < sizeof(config_ignore_options) / sizeof(char *); i++) { | |
355 | if (!strcmp(entry->name, config_ignore_options[i])) { | |
356 | goto end; | |
357 | } | |
358 | } | |
359 | ||
360 | for (i = 0; i < (sizeof(long_options) / sizeof(struct option)) - 1; i++) { | |
361 | /* Ignore if entry name is not fully matched. */ | |
362 | if (strcmp(entry->name, long_options[i].name)) { | |
363 | continue; | |
364 | } | |
365 | ||
366 | /* | |
7591bab1 MD |
367 | * If the option takes no argument on the command line, |
368 | * we have to check if the value is "true". We support | |
369 | * non-zero numeric values, true, on and yes. | |
cd60b05a JG |
370 | */ |
371 | if (!long_options[i].has_arg) { | |
372 | ret = config_parse_value(entry->value); | |
373 | if (ret <= 0) { | |
374 | if (ret) { | |
375 | WARN("Invalid configuration value \"%s\" for option %s", | |
376 | entry->value, entry->name); | |
377 | } | |
378 | /* False, skip boolean config option. */ | |
379 | goto end; | |
380 | } | |
381 | } | |
382 | ||
383 | ret = set_option(long_options[i].val, entry->value, entry->name); | |
384 | goto end; | |
385 | } | |
386 | ||
387 | WARN("Unrecognized option \"%s\" in daemon configuration file.", | |
388 | entry->name); | |
389 | ||
390 | end: | |
391 | return ret; | |
392 | } | |
393 | ||
7591bab1 | 394 | static int set_options(int argc, char **argv) |
cd60b05a | 395 | { |
178a0557 | 396 | int c, ret = 0, option_index = 0, retval = 0; |
cd60b05a JG |
397 | int orig_optopt = optopt, orig_optind = optind; |
398 | char *default_address, *optstring; | |
399 | const char *config_path = NULL; | |
400 | ||
401 | optstring = utils_generate_optstring(long_options, | |
402 | sizeof(long_options) / sizeof(struct option)); | |
403 | if (!optstring) { | |
178a0557 | 404 | retval = -ENOMEM; |
cd60b05a JG |
405 | goto exit; |
406 | } | |
407 | ||
408 | /* Check for the --config option */ | |
409 | ||
410 | while ((c = getopt_long(argc, argv, optstring, long_options, | |
411 | &option_index)) != -1) { | |
412 | if (c == '?') { | |
178a0557 | 413 | retval = -EINVAL; |
cd60b05a JG |
414 | goto exit; |
415 | } else if (c != 'f') { | |
416 | continue; | |
417 | } | |
418 | ||
e8fa9fb0 MD |
419 | if (lttng_is_setuid_setgid()) { |
420 | WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.", | |
421 | "-f, --config"); | |
422 | } else { | |
423 | config_path = utils_expand_path(optarg); | |
424 | if (!config_path) { | |
425 | ERR("Failed to resolve path: %s", optarg); | |
426 | } | |
cd60b05a JG |
427 | } |
428 | } | |
429 | ||
430 | ret = config_get_section_entries(config_path, config_section_name, | |
431 | config_entry_handler, NULL); | |
432 | if (ret) { | |
433 | if (ret > 0) { | |
434 | ERR("Invalid configuration option at line %i", ret); | |
cd60b05a | 435 | } |
178a0557 | 436 | retval = -1; |
cd60b05a JG |
437 | goto exit; |
438 | } | |
b8aa1682 | 439 | |
cd60b05a JG |
440 | /* Reset getopt's global state */ |
441 | optopt = orig_optopt; | |
442 | optind = orig_optind; | |
b8aa1682 | 443 | while (1) { |
cd60b05a | 444 | c = getopt_long(argc, argv, optstring, long_options, &option_index); |
b8aa1682 JD |
445 | if (c == -1) { |
446 | break; | |
447 | } | |
448 | ||
cd60b05a JG |
449 | ret = set_option(c, optarg, long_options[option_index].name); |
450 | if (ret < 0) { | |
178a0557 | 451 | retval = -1; |
b8aa1682 JD |
452 | goto exit; |
453 | } | |
454 | } | |
455 | ||
456 | /* assign default values */ | |
457 | if (control_uri == NULL) { | |
fa91dc52 MD |
458 | ret = asprintf(&default_address, |
459 | "tcp://" DEFAULT_NETWORK_CONTROL_BIND_ADDRESS ":%d", | |
460 | DEFAULT_NETWORK_CONTROL_PORT); | |
b8aa1682 JD |
461 | if (ret < 0) { |
462 | PERROR("asprintf default data address"); | |
178a0557 | 463 | retval = -1; |
b8aa1682 JD |
464 | goto exit; |
465 | } | |
466 | ||
467 | ret = uri_parse(default_address, &control_uri); | |
468 | free(default_address); | |
469 | if (ret < 0) { | |
470 | ERR("Invalid control URI specified"); | |
178a0557 | 471 | retval = -1; |
b8aa1682 JD |
472 | goto exit; |
473 | } | |
474 | } | |
475 | if (data_uri == NULL) { | |
fa91dc52 MD |
476 | ret = asprintf(&default_address, |
477 | "tcp://" DEFAULT_NETWORK_DATA_BIND_ADDRESS ":%d", | |
478 | DEFAULT_NETWORK_DATA_PORT); | |
b8aa1682 JD |
479 | if (ret < 0) { |
480 | PERROR("asprintf default data address"); | |
178a0557 | 481 | retval = -1; |
b8aa1682 JD |
482 | goto exit; |
483 | } | |
484 | ||
485 | ret = uri_parse(default_address, &data_uri); | |
486 | free(default_address); | |
487 | if (ret < 0) { | |
488 | ERR("Invalid data URI specified"); | |
178a0557 | 489 | retval = -1; |
b8aa1682 JD |
490 | goto exit; |
491 | } | |
492 | } | |
d3e2ba59 | 493 | if (live_uri == NULL) { |
fa91dc52 MD |
494 | ret = asprintf(&default_address, |
495 | "tcp://" DEFAULT_NETWORK_VIEWER_BIND_ADDRESS ":%d", | |
496 | DEFAULT_NETWORK_VIEWER_PORT); | |
d3e2ba59 JD |
497 | if (ret < 0) { |
498 | PERROR("asprintf default viewer control address"); | |
178a0557 | 499 | retval = -1; |
d3e2ba59 JD |
500 | goto exit; |
501 | } | |
502 | ||
503 | ret = uri_parse(default_address, &live_uri); | |
504 | free(default_address); | |
505 | if (ret < 0) { | |
506 | ERR("Invalid viewer control URI specified"); | |
178a0557 | 507 | retval = -1; |
d3e2ba59 JD |
508 | goto exit; |
509 | } | |
510 | } | |
b8aa1682 JD |
511 | |
512 | exit: | |
cd60b05a | 513 | free(optstring); |
178a0557 | 514 | return retval; |
b8aa1682 JD |
515 | } |
516 | ||
7591bab1 MD |
517 | static void print_global_objects(void) |
518 | { | |
519 | rcu_register_thread(); | |
520 | ||
521 | print_viewer_streams(); | |
522 | print_relay_streams(); | |
523 | print_sessions(); | |
524 | ||
525 | rcu_unregister_thread(); | |
526 | } | |
527 | ||
b8aa1682 JD |
528 | /* |
529 | * Cleanup the daemon | |
530 | */ | |
7591bab1 | 531 | static void relayd_cleanup(void) |
b8aa1682 | 532 | { |
7591bab1 MD |
533 | print_global_objects(); |
534 | ||
b8aa1682 JD |
535 | DBG("Cleaning up"); |
536 | ||
178a0557 MD |
537 | if (viewer_streams_ht) |
538 | lttng_ht_destroy(viewer_streams_ht); | |
539 | if (relay_streams_ht) | |
540 | lttng_ht_destroy(relay_streams_ht); | |
7591bab1 MD |
541 | if (sessions_ht) |
542 | lttng_ht_destroy(sessions_ht); | |
178a0557 | 543 | |
095a4ae5 MD |
544 | /* free the dynamically allocated opt_output_path */ |
545 | free(opt_output_path); | |
546 | ||
a02de639 CB |
547 | /* Close thread quit pipes */ |
548 | utils_close_pipe(thread_quit_pipe); | |
549 | ||
710c1f73 DG |
550 | uri_free(control_uri); |
551 | uri_free(data_uri); | |
8d5c808e | 552 | /* Live URI is freed in the live thread. */ |
cd60b05a JG |
553 | |
554 | if (tracing_group_name_override) { | |
555 | free((void *) tracing_group_name); | |
556 | } | |
b8aa1682 JD |
557 | } |
558 | ||
559 | /* | |
560 | * Write to writable pipe used to notify a thread. | |
561 | */ | |
7591bab1 | 562 | static int notify_thread_pipe(int wpipe) |
b8aa1682 | 563 | { |
6cd525e8 | 564 | ssize_t ret; |
b8aa1682 | 565 | |
6cd525e8 MD |
566 | ret = lttng_write(wpipe, "!", 1); |
567 | if (ret < 1) { | |
b8aa1682 | 568 | PERROR("write poll pipe"); |
b4aacfdc | 569 | goto end; |
b8aa1682 | 570 | } |
b4aacfdc MD |
571 | ret = 0; |
572 | end: | |
b8aa1682 JD |
573 | return ret; |
574 | } | |
575 | ||
7591bab1 | 576 | static int notify_health_quit_pipe(int *pipe) |
65931c8b | 577 | { |
6cd525e8 | 578 | ssize_t ret; |
65931c8b | 579 | |
6cd525e8 MD |
580 | ret = lttng_write(pipe[1], "4", 1); |
581 | if (ret < 1) { | |
65931c8b | 582 | PERROR("write relay health quit"); |
b4aacfdc | 583 | goto end; |
65931c8b | 584 | } |
b4aacfdc MD |
585 | ret = 0; |
586 | end: | |
587 | return ret; | |
65931c8b MD |
588 | } |
589 | ||
b8aa1682 | 590 | /* |
b4aacfdc | 591 | * Stop all relayd and relayd-live threads. |
b8aa1682 | 592 | */ |
b4aacfdc | 593 | int lttng_relay_stop_threads(void) |
b8aa1682 | 594 | { |
b4aacfdc | 595 | int retval = 0; |
b8aa1682 JD |
596 | |
597 | /* Stopping all threads */ | |
598 | DBG("Terminating all threads"); | |
b4aacfdc | 599 | if (notify_thread_pipe(thread_quit_pipe[1])) { |
b8aa1682 | 600 | ERR("write error on thread quit pipe"); |
b4aacfdc | 601 | retval = -1; |
b8aa1682 JD |
602 | } |
603 | ||
b4aacfdc MD |
604 | if (notify_health_quit_pipe(health_quit_pipe)) { |
605 | ERR("write error on health quit pipe"); | |
606 | } | |
65931c8b | 607 | |
b8aa1682 | 608 | /* Dispatch thread */ |
26c9d55e | 609 | CMM_STORE_SHARED(dispatch_thread_exit, 1); |
58eb9381 | 610 | futex_nto1_wake(&relay_conn_queue.futex); |
178a0557 | 611 | |
b4aacfdc | 612 | if (relayd_live_stop()) { |
178a0557 | 613 | ERR("Error stopping live threads"); |
b4aacfdc | 614 | retval = -1; |
178a0557 | 615 | } |
b4aacfdc | 616 | return retval; |
b8aa1682 JD |
617 | } |
618 | ||
619 | /* | |
620 | * Signal handler for the daemon | |
621 | * | |
622 | * Simply stop all worker threads, leaving main() return gracefully after | |
623 | * joining all threads and calling cleanup(). | |
624 | */ | |
7591bab1 | 625 | static void sighandler(int sig) |
b8aa1682 JD |
626 | { |
627 | switch (sig) { | |
b8aa1682 JD |
628 | case SIGINT: |
629 | DBG("SIGINT caught"); | |
b4aacfdc MD |
630 | if (lttng_relay_stop_threads()) { |
631 | ERR("Error stopping threads"); | |
632 | } | |
b8aa1682 JD |
633 | break; |
634 | case SIGTERM: | |
635 | DBG("SIGTERM caught"); | |
b4aacfdc MD |
636 | if (lttng_relay_stop_threads()) { |
637 | ERR("Error stopping threads"); | |
638 | } | |
b8aa1682 | 639 | break; |
3fd27398 MD |
640 | case SIGUSR1: |
641 | CMM_STORE_SHARED(recv_child_signal, 1); | |
642 | break; | |
b8aa1682 JD |
643 | default: |
644 | break; | |
645 | } | |
646 | } | |
647 | ||
648 | /* | |
649 | * Setup signal handler for : | |
650 | * SIGINT, SIGTERM, SIGPIPE | |
651 | */ | |
7591bab1 | 652 | static int set_signal_handler(void) |
b8aa1682 JD |
653 | { |
654 | int ret = 0; | |
655 | struct sigaction sa; | |
656 | sigset_t sigset; | |
657 | ||
658 | if ((ret = sigemptyset(&sigset)) < 0) { | |
659 | PERROR("sigemptyset"); | |
660 | return ret; | |
661 | } | |
662 | ||
b8aa1682 JD |
663 | sa.sa_mask = sigset; |
664 | sa.sa_flags = 0; | |
0072e5e2 MD |
665 | |
666 | sa.sa_handler = sighandler; | |
b8aa1682 JD |
667 | if ((ret = sigaction(SIGTERM, &sa, NULL)) < 0) { |
668 | PERROR("sigaction"); | |
669 | return ret; | |
670 | } | |
671 | ||
672 | if ((ret = sigaction(SIGINT, &sa, NULL)) < 0) { | |
673 | PERROR("sigaction"); | |
674 | return ret; | |
675 | } | |
676 | ||
0072e5e2 | 677 | if ((ret = sigaction(SIGUSR1, &sa, NULL)) < 0) { |
b8aa1682 JD |
678 | PERROR("sigaction"); |
679 | return ret; | |
680 | } | |
681 | ||
0072e5e2 MD |
682 | sa.sa_handler = SIG_IGN; |
683 | if ((ret = sigaction(SIGPIPE, &sa, NULL)) < 0) { | |
3fd27398 MD |
684 | PERROR("sigaction"); |
685 | return ret; | |
686 | } | |
687 | ||
688 | DBG("Signal handler set for SIGTERM, SIGUSR1, SIGPIPE and SIGINT"); | |
b8aa1682 JD |
689 | |
690 | return ret; | |
691 | } | |
692 | ||
3fd27398 MD |
693 | void lttng_relay_notify_ready(void) |
694 | { | |
695 | /* Notify the parent of the fork() process that we are ready. */ | |
696 | if (opt_daemon || opt_background) { | |
697 | if (uatomic_sub_return(<tng_relay_ready, 1) == 0) { | |
698 | kill(child_ppid, SIGUSR1); | |
699 | } | |
700 | } | |
701 | } | |
702 | ||
b8aa1682 JD |
703 | /* |
704 | * Init thread quit pipe. | |
705 | * | |
706 | * Return -1 on error or 0 if all pipes are created. | |
707 | */ | |
7591bab1 | 708 | static int init_thread_quit_pipe(void) |
b8aa1682 | 709 | { |
a02de639 | 710 | int ret; |
b8aa1682 | 711 | |
a02de639 | 712 | ret = utils_create_pipe_cloexec(thread_quit_pipe); |
b8aa1682 | 713 | |
b8aa1682 JD |
714 | return ret; |
715 | } | |
716 | ||
717 | /* | |
718 | * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set. | |
719 | */ | |
7591bab1 | 720 | static int create_thread_poll_set(struct lttng_poll_event *events, int size) |
b8aa1682 JD |
721 | { |
722 | int ret; | |
723 | ||
724 | if (events == NULL || size == 0) { | |
725 | ret = -1; | |
726 | goto error; | |
727 | } | |
728 | ||
729 | ret = lttng_poll_create(events, size, LTTNG_CLOEXEC); | |
730 | if (ret < 0) { | |
731 | goto error; | |
732 | } | |
733 | ||
734 | /* Add quit pipe */ | |
c7759e6a | 735 | ret = lttng_poll_add(events, thread_quit_pipe[0], LPOLLIN | LPOLLERR); |
b8aa1682 JD |
736 | if (ret < 0) { |
737 | goto error; | |
738 | } | |
739 | ||
740 | return 0; | |
741 | ||
742 | error: | |
743 | return ret; | |
744 | } | |
745 | ||
746 | /* | |
747 | * Check if the thread quit pipe was triggered. | |
748 | * | |
749 | * Return 1 if it was triggered else 0; | |
750 | */ | |
7591bab1 | 751 | static int check_thread_quit_pipe(int fd, uint32_t events) |
b8aa1682 JD |
752 | { |
753 | if (fd == thread_quit_pipe[0] && (events & LPOLLIN)) { | |
754 | return 1; | |
755 | } | |
756 | ||
757 | return 0; | |
758 | } | |
759 | ||
760 | /* | |
761 | * Create and init socket from uri. | |
762 | */ | |
7591bab1 | 763 | static struct lttcomm_sock *relay_socket_create(struct lttng_uri *uri) |
b8aa1682 JD |
764 | { |
765 | int ret; | |
766 | struct lttcomm_sock *sock = NULL; | |
767 | ||
768 | sock = lttcomm_alloc_sock_from_uri(uri); | |
769 | if (sock == NULL) { | |
770 | ERR("Allocating socket"); | |
771 | goto error; | |
772 | } | |
773 | ||
774 | ret = lttcomm_create_sock(sock); | |
775 | if (ret < 0) { | |
776 | goto error; | |
777 | } | |
778 | DBG("Listening on sock %d", sock->fd); | |
779 | ||
780 | ret = sock->ops->bind(sock); | |
781 | if (ret < 0) { | |
2288467f | 782 | PERROR("Failed to bind socket"); |
b8aa1682 JD |
783 | goto error; |
784 | } | |
785 | ||
786 | ret = sock->ops->listen(sock, -1); | |
787 | if (ret < 0) { | |
788 | goto error; | |
789 | ||
790 | } | |
791 | ||
792 | return sock; | |
793 | ||
794 | error: | |
795 | if (sock) { | |
796 | lttcomm_destroy_sock(sock); | |
797 | } | |
798 | return NULL; | |
799 | } | |
800 | ||
801 | /* | |
802 | * This thread manages the listening for new connections on the network | |
803 | */ | |
7591bab1 | 804 | static void *relay_thread_listener(void *data) |
b8aa1682 | 805 | { |
095a4ae5 | 806 | int i, ret, pollfd, err = -1; |
b8aa1682 JD |
807 | uint32_t revents, nb_fd; |
808 | struct lttng_poll_event events; | |
809 | struct lttcomm_sock *control_sock, *data_sock; | |
810 | ||
b8aa1682 JD |
811 | DBG("[thread] Relay listener started"); |
812 | ||
55706a7d MD |
813 | health_register(health_relayd, HEALTH_RELAYD_TYPE_LISTENER); |
814 | ||
f385ae0a MD |
815 | health_code_update(); |
816 | ||
7591bab1 | 817 | control_sock = relay_socket_create(control_uri); |
b8aa1682 | 818 | if (!control_sock) { |
095a4ae5 | 819 | goto error_sock_control; |
b8aa1682 JD |
820 | } |
821 | ||
7591bab1 | 822 | data_sock = relay_socket_create(data_uri); |
b8aa1682 | 823 | if (!data_sock) { |
095a4ae5 | 824 | goto error_sock_relay; |
b8aa1682 JD |
825 | } |
826 | ||
827 | /* | |
7591bab1 MD |
828 | * Pass 3 as size here for the thread quit pipe, control and |
829 | * data socket. | |
b8aa1682 JD |
830 | */ |
831 | ret = create_thread_poll_set(&events, 3); | |
832 | if (ret < 0) { | |
833 | goto error_create_poll; | |
834 | } | |
835 | ||
836 | /* Add the control socket */ | |
837 | ret = lttng_poll_add(&events, control_sock->fd, LPOLLIN | LPOLLRDHUP); | |
838 | if (ret < 0) { | |
839 | goto error_poll_add; | |
840 | } | |
841 | ||
842 | /* Add the data socket */ | |
843 | ret = lttng_poll_add(&events, data_sock->fd, LPOLLIN | LPOLLRDHUP); | |
844 | if (ret < 0) { | |
845 | goto error_poll_add; | |
846 | } | |
847 | ||
3fd27398 MD |
848 | lttng_relay_notify_ready(); |
849 | ||
9b5e0863 MD |
850 | if (testpoint(relayd_thread_listener)) { |
851 | goto error_testpoint; | |
852 | } | |
853 | ||
b8aa1682 | 854 | while (1) { |
f385ae0a MD |
855 | health_code_update(); |
856 | ||
b8aa1682 JD |
857 | DBG("Listener accepting connections"); |
858 | ||
b8aa1682 | 859 | restart: |
f385ae0a | 860 | health_poll_entry(); |
b8aa1682 | 861 | ret = lttng_poll_wait(&events, -1); |
f385ae0a | 862 | health_poll_exit(); |
b8aa1682 JD |
863 | if (ret < 0) { |
864 | /* | |
865 | * Restart interrupted system call. | |
866 | */ | |
867 | if (errno == EINTR) { | |
868 | goto restart; | |
869 | } | |
870 | goto error; | |
871 | } | |
872 | ||
0d9c5d77 DG |
873 | nb_fd = ret; |
874 | ||
b8aa1682 JD |
875 | DBG("Relay new connection received"); |
876 | for (i = 0; i < nb_fd; i++) { | |
f385ae0a MD |
877 | health_code_update(); |
878 | ||
b8aa1682 JD |
879 | /* Fetch once the poll data */ |
880 | revents = LTTNG_POLL_GETEV(&events, i); | |
881 | pollfd = LTTNG_POLL_GETFD(&events, i); | |
882 | ||
883 | /* Thread quit pipe has been closed. Killing thread. */ | |
884 | ret = check_thread_quit_pipe(pollfd, revents); | |
885 | if (ret) { | |
095a4ae5 MD |
886 | err = 0; |
887 | goto exit; | |
b8aa1682 JD |
888 | } |
889 | ||
03e43155 | 890 | if (revents & LPOLLIN) { |
4b7f17b2 | 891 | /* |
7591bab1 MD |
892 | * A new connection is requested, therefore a |
893 | * sessiond/consumerd connection is allocated in | |
894 | * this thread, enqueued to a global queue and | |
895 | * dequeued (and freed) in the worker thread. | |
4b7f17b2 | 896 | */ |
58eb9381 DG |
897 | int val = 1; |
898 | struct relay_connection *new_conn; | |
4b7f17b2 | 899 | struct lttcomm_sock *newsock; |
7591bab1 | 900 | enum connection_type type; |
b8aa1682 JD |
901 | |
902 | if (pollfd == data_sock->fd) { | |
7591bab1 | 903 | type = RELAY_DATA; |
b8aa1682 | 904 | newsock = data_sock->ops->accept(data_sock); |
58eb9381 DG |
905 | DBG("Relay data connection accepted, socket %d", |
906 | newsock->fd); | |
4b7f17b2 MD |
907 | } else { |
908 | assert(pollfd == control_sock->fd); | |
7591bab1 | 909 | type = RELAY_CONTROL; |
b8aa1682 | 910 | newsock = control_sock->ops->accept(control_sock); |
58eb9381 DG |
911 | DBG("Relay control connection accepted, socket %d", |
912 | newsock->fd); | |
b8aa1682 | 913 | } |
58eb9381 DG |
914 | if (!newsock) { |
915 | PERROR("accepting sock"); | |
58eb9381 DG |
916 | goto error; |
917 | } | |
918 | ||
919 | ret = setsockopt(newsock->fd, SOL_SOCKET, SO_REUSEADDR, &val, | |
920 | sizeof(val)); | |
b8aa1682 JD |
921 | if (ret < 0) { |
922 | PERROR("setsockopt inet"); | |
4b7f17b2 | 923 | lttcomm_destroy_sock(newsock); |
b8aa1682 JD |
924 | goto error; |
925 | } | |
f056029c JR |
926 | |
927 | ret = socket_apply_keep_alive_config(newsock->fd); | |
928 | if (ret < 0) { | |
929 | ERR("Failed to apply TCP keep-alive configuration on socket (%i)", | |
930 | newsock->fd); | |
931 | lttcomm_destroy_sock(newsock); | |
932 | goto error; | |
933 | } | |
934 | ||
7591bab1 MD |
935 | new_conn = connection_create(newsock, type); |
936 | if (!new_conn) { | |
937 | lttcomm_destroy_sock(newsock); | |
938 | goto error; | |
939 | } | |
58eb9381 DG |
940 | |
941 | /* Enqueue request for the dispatcher thread. */ | |
8bdee6e2 SM |
942 | cds_wfcq_enqueue(&relay_conn_queue.head, &relay_conn_queue.tail, |
943 | &new_conn->qnode); | |
b8aa1682 JD |
944 | |
945 | /* | |
7591bab1 MD |
946 | * Wake the dispatch queue futex. |
947 | * Implicit memory barrier with the | |
948 | * exchange in cds_wfcq_enqueue. | |
b8aa1682 | 949 | */ |
58eb9381 | 950 | futex_nto1_wake(&relay_conn_queue.futex); |
03e43155 MD |
951 | } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) { |
952 | ERR("socket poll error"); | |
953 | goto error; | |
954 | } else { | |
955 | ERR("Unexpected poll events %u for sock %d", revents, pollfd); | |
956 | goto error; | |
b8aa1682 JD |
957 | } |
958 | } | |
959 | } | |
960 | ||
095a4ae5 | 961 | exit: |
b8aa1682 JD |
962 | error: |
963 | error_poll_add: | |
9b5e0863 | 964 | error_testpoint: |
b8aa1682 JD |
965 | lttng_poll_clean(&events); |
966 | error_create_poll: | |
095a4ae5 MD |
967 | if (data_sock->fd >= 0) { |
968 | ret = data_sock->ops->close(data_sock); | |
b8aa1682 JD |
969 | if (ret) { |
970 | PERROR("close"); | |
971 | } | |
b8aa1682 | 972 | } |
095a4ae5 MD |
973 | lttcomm_destroy_sock(data_sock); |
974 | error_sock_relay: | |
975 | if (control_sock->fd >= 0) { | |
976 | ret = control_sock->ops->close(control_sock); | |
b8aa1682 JD |
977 | if (ret) { |
978 | PERROR("close"); | |
979 | } | |
b8aa1682 | 980 | } |
095a4ae5 MD |
981 | lttcomm_destroy_sock(control_sock); |
982 | error_sock_control: | |
983 | if (err) { | |
f385ae0a MD |
984 | health_error(); |
985 | ERR("Health error occurred in %s", __func__); | |
095a4ae5 | 986 | } |
55706a7d | 987 | health_unregister(health_relayd); |
b8aa1682 | 988 | DBG("Relay listener thread cleanup complete"); |
b4aacfdc | 989 | lttng_relay_stop_threads(); |
b8aa1682 JD |
990 | return NULL; |
991 | } | |
992 | ||
993 | /* | |
994 | * This thread manages the dispatching of the requests to worker threads | |
995 | */ | |
7591bab1 | 996 | static void *relay_thread_dispatcher(void *data) |
b8aa1682 | 997 | { |
6cd525e8 MD |
998 | int err = -1; |
999 | ssize_t ret; | |
8bdee6e2 | 1000 | struct cds_wfcq_node *node; |
58eb9381 | 1001 | struct relay_connection *new_conn = NULL; |
b8aa1682 JD |
1002 | |
1003 | DBG("[thread] Relay dispatcher started"); | |
1004 | ||
55706a7d MD |
1005 | health_register(health_relayd, HEALTH_RELAYD_TYPE_DISPATCHER); |
1006 | ||
9b5e0863 MD |
1007 | if (testpoint(relayd_thread_dispatcher)) { |
1008 | goto error_testpoint; | |
1009 | } | |
1010 | ||
f385ae0a MD |
1011 | health_code_update(); |
1012 | ||
0ed3b1a8 | 1013 | for (;;) { |
f385ae0a MD |
1014 | health_code_update(); |
1015 | ||
b8aa1682 | 1016 | /* Atomically prepare the queue futex */ |
58eb9381 | 1017 | futex_nto1_prepare(&relay_conn_queue.futex); |
b8aa1682 | 1018 | |
0ed3b1a8 MD |
1019 | if (CMM_LOAD_SHARED(dispatch_thread_exit)) { |
1020 | break; | |
1021 | } | |
1022 | ||
b8aa1682 | 1023 | do { |
f385ae0a MD |
1024 | health_code_update(); |
1025 | ||
b8aa1682 | 1026 | /* Dequeue commands */ |
8bdee6e2 SM |
1027 | node = cds_wfcq_dequeue_blocking(&relay_conn_queue.head, |
1028 | &relay_conn_queue.tail); | |
b8aa1682 JD |
1029 | if (node == NULL) { |
1030 | DBG("Woken up but nothing in the relay command queue"); | |
1031 | /* Continue thread execution */ | |
1032 | break; | |
1033 | } | |
58eb9381 | 1034 | new_conn = caa_container_of(node, struct relay_connection, qnode); |
b8aa1682 | 1035 | |
58eb9381 | 1036 | DBG("Dispatching request waiting on sock %d", new_conn->sock->fd); |
b8aa1682 JD |
1037 | |
1038 | /* | |
7591bab1 MD |
1039 | * Inform worker thread of the new request. This |
1040 | * call is blocking so we can be assured that | |
1041 | * the data will be read at some point in time | |
1042 | * or wait to the end of the world :) | |
b8aa1682 | 1043 | */ |
58eb9381 DG |
1044 | ret = lttng_write(relay_conn_pipe[1], &new_conn, sizeof(new_conn)); |
1045 | if (ret < 0) { | |
1046 | PERROR("write connection pipe"); | |
7591bab1 | 1047 | connection_put(new_conn); |
b8aa1682 JD |
1048 | goto error; |
1049 | } | |
1050 | } while (node != NULL); | |
1051 | ||
1052 | /* Futex wait on queue. Blocking call on futex() */ | |
f385ae0a | 1053 | health_poll_entry(); |
58eb9381 | 1054 | futex_nto1_wait(&relay_conn_queue.futex); |
f385ae0a | 1055 | health_poll_exit(); |
b8aa1682 JD |
1056 | } |
1057 | ||
f385ae0a MD |
1058 | /* Normal exit, no error */ |
1059 | err = 0; | |
1060 | ||
b8aa1682 | 1061 | error: |
9b5e0863 | 1062 | error_testpoint: |
f385ae0a MD |
1063 | if (err) { |
1064 | health_error(); | |
1065 | ERR("Health error occurred in %s", __func__); | |
1066 | } | |
55706a7d | 1067 | health_unregister(health_relayd); |
b8aa1682 | 1068 | DBG("Dispatch thread dying"); |
b4aacfdc | 1069 | lttng_relay_stop_threads(); |
b8aa1682 JD |
1070 | return NULL; |
1071 | } | |
1072 | ||
298a25ca JG |
1073 | static bool session_streams_have_index(const struct relay_session *session) |
1074 | { | |
1075 | return session->minor >= 4 && !session->snapshot; | |
1076 | } | |
1077 | ||
c5b6f4f0 DG |
1078 | /* |
1079 | * Handle the RELAYD_CREATE_SESSION command. | |
1080 | * | |
1081 | * On success, send back the session id or else return a negative value. | |
1082 | */ | |
5312a3ed JG |
1083 | static int relay_create_session(const struct lttcomm_relayd_hdr *recv_hdr, |
1084 | struct relay_connection *conn, | |
1085 | const struct lttng_buffer_view *payload) | |
c5b6f4f0 | 1086 | { |
5312a3ed JG |
1087 | int ret = 0; |
1088 | ssize_t send_ret; | |
4c6885d2 | 1089 | struct relay_session *session = NULL; |
ecd1a12f | 1090 | struct lttcomm_relayd_create_session_reply_2_11 reply = {}; |
1e791a74 JG |
1091 | char session_name[LTTNG_NAME_MAX] = {}; |
1092 | char hostname[LTTNG_HOST_NAME_MAX] = {}; | |
7591bab1 MD |
1093 | uint32_t live_timer = 0; |
1094 | bool snapshot = false; | |
46ef2188 | 1095 | bool session_name_contains_creation_timestamp = false; |
23c8ff50 | 1096 | /* Left nil for peers < 2.11. */ |
6fa5fe7c | 1097 | char base_path[LTTNG_PATH_MAX] = {}; |
23c8ff50 | 1098 | lttng_uuid sessiond_uuid = {}; |
1e791a74 JG |
1099 | LTTNG_OPTIONAL(uint64_t) id_sessiond = {}; |
1100 | LTTNG_OPTIONAL(uint64_t) current_chunk_id = {}; | |
db1da059 | 1101 | LTTNG_OPTIONAL(time_t) creation_time = {}; |
ecd1a12f MD |
1102 | struct lttng_dynamic_buffer reply_payload; |
1103 | ||
1104 | lttng_dynamic_buffer_init(&reply_payload); | |
c5b6f4f0 | 1105 | |
f86f6389 JR |
1106 | if (conn->minor < 4) { |
1107 | /* From 2.1 to 2.3 */ | |
1108 | ret = 0; | |
1109 | } else if (conn->minor >= 4 && conn->minor < 11) { | |
1110 | /* From 2.4 to 2.10 */ | |
5312a3ed | 1111 | ret = cmd_create_session_2_4(payload, session_name, |
7591bab1 | 1112 | hostname, &live_timer, &snapshot); |
f86f6389 | 1113 | } else { |
84fa4db5 | 1114 | bool has_current_chunk; |
db1da059 JG |
1115 | uint64_t current_chunk_id_value; |
1116 | time_t creation_time_value; | |
1117 | uint64_t id_sessiond_value; | |
84fa4db5 | 1118 | |
f86f6389 | 1119 | /* From 2.11 to ... */ |
db1da059 | 1120 | ret = cmd_create_session_2_11(payload, session_name, hostname, |
6fa5fe7c | 1121 | base_path, &live_timer, &snapshot, &id_sessiond_value, |
db1da059 | 1122 | sessiond_uuid, &has_current_chunk, |
46ef2188 MD |
1123 | ¤t_chunk_id_value, &creation_time_value, |
1124 | &session_name_contains_creation_timestamp); | |
23c8ff50 JG |
1125 | if (lttng_uuid_is_nil(sessiond_uuid)) { |
1126 | /* The nil UUID is reserved for pre-2.11 clients. */ | |
1127 | ERR("Illegal nil UUID announced by peer in create session command"); | |
1128 | ret = -1; | |
1129 | goto send_reply; | |
1130 | } | |
db1da059 JG |
1131 | LTTNG_OPTIONAL_SET(&id_sessiond, id_sessiond_value); |
1132 | LTTNG_OPTIONAL_SET(&creation_time, creation_time_value); | |
1133 | if (has_current_chunk) { | |
1134 | LTTNG_OPTIONAL_SET(¤t_chunk_id, | |
1135 | current_chunk_id_value); | |
1136 | } | |
7591bab1 | 1137 | } |
f86f6389 | 1138 | |
7591bab1 MD |
1139 | if (ret < 0) { |
1140 | goto send_reply; | |
d3e2ba59 JD |
1141 | } |
1142 | ||
6fa5fe7c | 1143 | session = session_create(session_name, hostname, base_path, live_timer, |
1e791a74 JG |
1144 | snapshot, sessiond_uuid, |
1145 | id_sessiond.is_set ? &id_sessiond.value : NULL, | |
1146 | current_chunk_id.is_set ? ¤t_chunk_id.value : NULL, | |
db1da059 | 1147 | creation_time.is_set ? &creation_time.value : NULL, |
46ef2188 MD |
1148 | conn->major, conn->minor, |
1149 | session_name_contains_creation_timestamp); | |
7591bab1 MD |
1150 | if (!session) { |
1151 | ret = -1; | |
1152 | goto send_reply; | |
1153 | } | |
1154 | assert(!conn->session); | |
1155 | conn->session = session; | |
c5b6f4f0 DG |
1156 | DBG("Created session %" PRIu64, session->id); |
1157 | ||
ecd1a12f | 1158 | reply.generic.session_id = htobe64(session->id); |
7591bab1 MD |
1159 | |
1160 | send_reply: | |
c5b6f4f0 | 1161 | if (ret < 0) { |
ecd1a12f | 1162 | reply.generic.ret_code = htobe32(LTTNG_ERR_FATAL); |
c5b6f4f0 | 1163 | } else { |
ecd1a12f | 1164 | reply.generic.ret_code = htobe32(LTTNG_OK); |
c5b6f4f0 DG |
1165 | } |
1166 | ||
ecd1a12f MD |
1167 | if (conn->minor < 11) { |
1168 | /* From 2.1 to 2.10 */ | |
1169 | ret = lttng_dynamic_buffer_append(&reply_payload, | |
1170 | &reply.generic, sizeof(reply.generic)); | |
1171 | if (ret) { | |
1172 | ERR("Failed to append \"create session\" command reply header to payload buffer"); | |
1173 | ret = -1; | |
1174 | goto end; | |
1175 | } | |
1176 | } else { | |
1177 | const uint32_t output_path_length = | |
8d382dd4 | 1178 | session ? strlen(session->output_path) + 1 : 0; |
ecd1a12f MD |
1179 | |
1180 | reply.output_path_length = htobe32(output_path_length); | |
8d382dd4 JG |
1181 | ret = lttng_dynamic_buffer_append( |
1182 | &reply_payload, &reply, sizeof(reply)); | |
ecd1a12f MD |
1183 | if (ret) { |
1184 | ERR("Failed to append \"create session\" command reply header to payload buffer"); | |
1185 | goto end; | |
1186 | } | |
1187 | ||
8d382dd4 JG |
1188 | if (output_path_length) { |
1189 | ret = lttng_dynamic_buffer_append(&reply_payload, | |
1190 | session->output_path, | |
1191 | output_path_length); | |
1192 | if (ret) { | |
1193 | ERR("Failed to append \"create session\" command reply path to payload buffer"); | |
1194 | goto end; | |
1195 | } | |
ecd1a12f MD |
1196 | } |
1197 | } | |
1198 | ||
1199 | send_ret = conn->sock->ops->sendmsg(conn->sock, reply_payload.data, | |
1200 | reply_payload.size, 0); | |
1201 | if (send_ret < (ssize_t) reply_payload.size) { | |
1202 | ERR("Failed to send \"create session\" command reply of %zu bytes (ret = %zd)", | |
1203 | reply_payload.size, send_ret); | |
5312a3ed | 1204 | ret = -1; |
c5b6f4f0 | 1205 | } |
ecd1a12f | 1206 | end: |
4c6885d2 JG |
1207 | if (ret < 0 && session) { |
1208 | session_put(session); | |
1209 | } | |
ecd1a12f | 1210 | lttng_dynamic_buffer_reset(&reply_payload); |
c5b6f4f0 DG |
1211 | return ret; |
1212 | } | |
1213 | ||
a4baae1b JD |
1214 | /* |
1215 | * When we have received all the streams and the metadata for a channel, | |
1216 | * we make them visible to the viewer threads. | |
1217 | */ | |
7591bab1 | 1218 | static void publish_connection_local_streams(struct relay_connection *conn) |
a4baae1b | 1219 | { |
7591bab1 MD |
1220 | struct relay_stream *stream; |
1221 | struct relay_session *session = conn->session; | |
a4baae1b | 1222 | |
7591bab1 MD |
1223 | /* |
1224 | * We publish all streams belonging to a session atomically wrt | |
1225 | * session lock. | |
1226 | */ | |
1227 | pthread_mutex_lock(&session->lock); | |
1228 | rcu_read_lock(); | |
1229 | cds_list_for_each_entry_rcu(stream, &session->recv_list, | |
1230 | recv_node) { | |
1231 | stream_publish(stream); | |
a4baae1b | 1232 | } |
7591bab1 | 1233 | rcu_read_unlock(); |
a4baae1b | 1234 | |
7591bab1 MD |
1235 | /* |
1236 | * Inform the viewer that there are new streams in the session. | |
1237 | */ | |
1238 | if (session->viewer_attached) { | |
1239 | uatomic_set(&session->new_streams, 1); | |
1240 | } | |
1241 | pthread_mutex_unlock(&session->lock); | |
a4baae1b JD |
1242 | } |
1243 | ||
348a81dc JG |
1244 | static int conform_channel_path(char *channel_path) |
1245 | { | |
1246 | int ret = 0; | |
1247 | ||
1248 | if (strstr("../", channel_path)) { | |
1249 | ERR("Refusing channel path as it walks up the path hierarchy: \"%s\"", | |
1250 | channel_path); | |
1251 | ret = -1; | |
1252 | goto end; | |
1253 | } | |
1254 | ||
1255 | if (*channel_path == '/') { | |
1256 | const size_t len = strlen(channel_path); | |
1257 | ||
1258 | /* | |
1259 | * Channel paths from peers prior to 2.11 are expressed as an | |
1260 | * absolute path that is, in reality, relative to the relay | |
1261 | * daemon's output directory. Remove the leading slash so it | |
1262 | * is correctly interpreted as a relative path later on. | |
1263 | * | |
1264 | * len (and not len - 1) is used to copy the trailing NULL. | |
1265 | */ | |
1266 | bcopy(channel_path + 1, channel_path, len); | |
1267 | } | |
1268 | end: | |
1269 | return ret; | |
1270 | } | |
1271 | ||
b8aa1682 JD |
1272 | /* |
1273 | * relay_add_stream: allocate a new stream for a session | |
1274 | */ | |
5312a3ed JG |
1275 | static int relay_add_stream(const struct lttcomm_relayd_hdr *recv_hdr, |
1276 | struct relay_connection *conn, | |
1277 | const struct lttng_buffer_view *payload) | |
b8aa1682 | 1278 | { |
7591bab1 MD |
1279 | int ret; |
1280 | ssize_t send_ret; | |
58eb9381 | 1281 | struct relay_session *session = conn->session; |
b8aa1682 JD |
1282 | struct relay_stream *stream = NULL; |
1283 | struct lttcomm_relayd_status_stream reply; | |
4030a636 | 1284 | struct ctf_trace *trace = NULL; |
7591bab1 MD |
1285 | uint64_t stream_handle = -1ULL; |
1286 | char *path_name = NULL, *channel_name = NULL; | |
1287 | uint64_t tracefile_size = 0, tracefile_count = 0; | |
348a81dc | 1288 | LTTNG_OPTIONAL(uint64_t) stream_chunk_id = {}; |
b8aa1682 | 1289 | |
5312a3ed | 1290 | if (!session || !conn->version_check_done) { |
b8aa1682 JD |
1291 | ERR("Trying to add a stream before version check"); |
1292 | ret = -1; | |
1293 | goto end_no_session; | |
1294 | } | |
1295 | ||
2f21a469 JR |
1296 | if (session->minor == 1) { |
1297 | /* For 2.1 */ | |
5312a3ed | 1298 | ret = cmd_recv_stream_2_1(payload, &path_name, |
7591bab1 | 1299 | &channel_name); |
2f21a469 JR |
1300 | } else if (session->minor > 1 && session->minor < 11) { |
1301 | /* From 2.2 to 2.10 */ | |
5312a3ed | 1302 | ret = cmd_recv_stream_2_2(payload, &path_name, |
7591bab1 | 1303 | &channel_name, &tracefile_size, &tracefile_count); |
2f21a469 JR |
1304 | } else { |
1305 | /* From 2.11 to ... */ | |
1306 | ret = cmd_recv_stream_2_11(payload, &path_name, | |
0b50e4b3 JG |
1307 | &channel_name, &tracefile_size, &tracefile_count, |
1308 | &stream_chunk_id.value); | |
1309 | stream_chunk_id.is_set = true; | |
0f907de1 | 1310 | } |
2f21a469 | 1311 | |
0f907de1 | 1312 | if (ret < 0) { |
7591bab1 | 1313 | goto send_reply; |
b8aa1682 JD |
1314 | } |
1315 | ||
348a81dc JG |
1316 | if (conform_channel_path(path_name)) { |
1317 | goto send_reply; | |
1318 | } | |
1319 | ||
7591bab1 | 1320 | trace = ctf_trace_get_by_path_or_create(session, path_name); |
2a174661 | 1321 | if (!trace) { |
7591bab1 | 1322 | goto send_reply; |
2a174661 | 1323 | } |
7591bab1 | 1324 | /* This stream here has one reference on the trace. */ |
2a174661 | 1325 | |
7591bab1 MD |
1326 | pthread_mutex_lock(&last_relay_stream_id_lock); |
1327 | stream_handle = ++last_relay_stream_id; | |
1328 | pthread_mutex_unlock(&last_relay_stream_id_lock); | |
d3e2ba59 | 1329 | |
7591bab1 MD |
1330 | /* We pass ownership of path_name and channel_name. */ |
1331 | stream = stream_create(trace, stream_handle, path_name, | |
348a81dc | 1332 | channel_name, tracefile_size, tracefile_count); |
7591bab1 MD |
1333 | path_name = NULL; |
1334 | channel_name = NULL; | |
a4baae1b | 1335 | |
2a174661 | 1336 | /* |
7591bab1 MD |
1337 | * Streams are the owners of their trace. Reference to trace is |
1338 | * kept within stream_create(). | |
2a174661 | 1339 | */ |
7591bab1 | 1340 | ctf_trace_put(trace); |
d3e2ba59 | 1341 | |
7591bab1 | 1342 | send_reply: |
53efb85a | 1343 | memset(&reply, 0, sizeof(reply)); |
7591bab1 MD |
1344 | reply.handle = htobe64(stream_handle); |
1345 | if (!stream) { | |
f73fabfd | 1346 | reply.ret_code = htobe32(LTTNG_ERR_UNK); |
b8aa1682 | 1347 | } else { |
f73fabfd | 1348 | reply.ret_code = htobe32(LTTNG_OK); |
b8aa1682 | 1349 | } |
5af40280 | 1350 | |
58eb9381 | 1351 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, |
b8aa1682 | 1352 | sizeof(struct lttcomm_relayd_status_stream), 0); |
5312a3ed JG |
1353 | if (send_ret < (ssize_t) sizeof(reply)) { |
1354 | ERR("Failed to send \"add stream\" command reply (ret = %zd)", | |
1355 | send_ret); | |
1356 | ret = -1; | |
b8aa1682 JD |
1357 | } |
1358 | ||
1359 | end_no_session: | |
7591bab1 MD |
1360 | free(path_name); |
1361 | free(channel_name); | |
0f907de1 | 1362 | return ret; |
b8aa1682 JD |
1363 | } |
1364 | ||
173af62f DG |
1365 | /* |
1366 | * relay_close_stream: close a specific stream | |
1367 | */ | |
5312a3ed JG |
1368 | static int relay_close_stream(const struct lttcomm_relayd_hdr *recv_hdr, |
1369 | struct relay_connection *conn, | |
1370 | const struct lttng_buffer_view *payload) | |
173af62f | 1371 | { |
5312a3ed JG |
1372 | int ret; |
1373 | ssize_t send_ret; | |
58eb9381 | 1374 | struct relay_session *session = conn->session; |
173af62f DG |
1375 | struct lttcomm_relayd_close_stream stream_info; |
1376 | struct lttcomm_relayd_generic_reply reply; | |
1377 | struct relay_stream *stream; | |
173af62f DG |
1378 | |
1379 | DBG("Close stream received"); | |
1380 | ||
5312a3ed | 1381 | if (!session || !conn->version_check_done) { |
173af62f DG |
1382 | ERR("Trying to close a stream before version check"); |
1383 | ret = -1; | |
1384 | goto end_no_session; | |
1385 | } | |
1386 | ||
5312a3ed JG |
1387 | if (payload->size < sizeof(stream_info)) { |
1388 | ERR("Unexpected payload size in \"relay_close_stream\": expected >= %zu bytes, got %zu bytes", | |
1389 | sizeof(stream_info), payload->size); | |
173af62f DG |
1390 | ret = -1; |
1391 | goto end_no_session; | |
1392 | } | |
5312a3ed JG |
1393 | memcpy(&stream_info, payload->data, sizeof(stream_info)); |
1394 | stream_info.stream_id = be64toh(stream_info.stream_id); | |
1395 | stream_info.last_net_seq_num = be64toh(stream_info.last_net_seq_num); | |
173af62f | 1396 | |
5312a3ed | 1397 | stream = stream_get_by_id(stream_info.stream_id); |
173af62f DG |
1398 | if (!stream) { |
1399 | ret = -1; | |
7591bab1 | 1400 | goto end; |
173af62f | 1401 | } |
77f7bd85 MD |
1402 | |
1403 | /* | |
1404 | * Set last_net_seq_num before the close flag. Required by data | |
1405 | * pending check. | |
1406 | */ | |
7591bab1 | 1407 | pthread_mutex_lock(&stream->lock); |
5312a3ed | 1408 | stream->last_net_seq_num = stream_info.last_net_seq_num; |
77f7bd85 MD |
1409 | pthread_mutex_unlock(&stream->lock); |
1410 | ||
bda7c7b9 JG |
1411 | /* |
1412 | * This is one of the conditions which may trigger a stream close | |
1413 | * with the others being: | |
1414 | * 1) A close command is received for a stream | |
1415 | * 2) The control connection owning the stream is closed | |
1416 | * 3) We have received all of the stream's data _after_ a close | |
1417 | * request. | |
1418 | */ | |
1419 | try_stream_close(stream); | |
7591bab1 MD |
1420 | if (stream->is_metadata) { |
1421 | struct relay_viewer_stream *vstream; | |
173af62f | 1422 | |
7591bab1 MD |
1423 | vstream = viewer_stream_get_by_id(stream->stream_handle); |
1424 | if (vstream) { | |
1425 | if (vstream->metadata_sent == stream->metadata_received) { | |
1426 | /* | |
1427 | * Since all the metadata has been sent to the | |
1428 | * viewer and that we have a request to close | |
1429 | * its stream, we can safely teardown the | |
1430 | * corresponding metadata viewer stream. | |
1431 | */ | |
1432 | viewer_stream_put(vstream); | |
1433 | } | |
1434 | /* Put local reference. */ | |
1435 | viewer_stream_put(vstream); | |
1436 | } | |
1437 | } | |
7591bab1 | 1438 | stream_put(stream); |
5312a3ed | 1439 | ret = 0; |
173af62f | 1440 | |
7591bab1 | 1441 | end: |
53efb85a | 1442 | memset(&reply, 0, sizeof(reply)); |
173af62f | 1443 | if (ret < 0) { |
f73fabfd | 1444 | reply.ret_code = htobe32(LTTNG_ERR_UNK); |
173af62f | 1445 | } else { |
f73fabfd | 1446 | reply.ret_code = htobe32(LTTNG_OK); |
173af62f | 1447 | } |
58eb9381 | 1448 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, |
173af62f | 1449 | sizeof(struct lttcomm_relayd_generic_reply), 0); |
5312a3ed JG |
1450 | if (send_ret < (ssize_t) sizeof(reply)) { |
1451 | ERR("Failed to send \"close stream\" command reply (ret = %zd)", | |
1452 | send_ret); | |
1453 | ret = -1; | |
173af62f DG |
1454 | } |
1455 | ||
1456 | end_no_session: | |
1457 | return ret; | |
1458 | } | |
1459 | ||
93ec662e JD |
1460 | /* |
1461 | * relay_reset_metadata: reset a metadata stream | |
1462 | */ | |
1463 | static | |
5312a3ed JG |
1464 | int relay_reset_metadata(const struct lttcomm_relayd_hdr *recv_hdr, |
1465 | struct relay_connection *conn, | |
1466 | const struct lttng_buffer_view *payload) | |
93ec662e | 1467 | { |
5312a3ed JG |
1468 | int ret; |
1469 | ssize_t send_ret; | |
93ec662e JD |
1470 | struct relay_session *session = conn->session; |
1471 | struct lttcomm_relayd_reset_metadata stream_info; | |
1472 | struct lttcomm_relayd_generic_reply reply; | |
1473 | struct relay_stream *stream; | |
1474 | ||
1475 | DBG("Reset metadata received"); | |
1476 | ||
5312a3ed | 1477 | if (!session || !conn->version_check_done) { |
93ec662e JD |
1478 | ERR("Trying to reset a metadata stream before version check"); |
1479 | ret = -1; | |
1480 | goto end_no_session; | |
1481 | } | |
1482 | ||
5312a3ed JG |
1483 | if (payload->size < sizeof(stream_info)) { |
1484 | ERR("Unexpected payload size in \"relay_reset_metadata\": expected >= %zu bytes, got %zu bytes", | |
1485 | sizeof(stream_info), payload->size); | |
93ec662e JD |
1486 | ret = -1; |
1487 | goto end_no_session; | |
1488 | } | |
5312a3ed JG |
1489 | memcpy(&stream_info, payload->data, sizeof(stream_info)); |
1490 | stream_info.stream_id = be64toh(stream_info.stream_id); | |
1491 | stream_info.version = be64toh(stream_info.version); | |
1492 | ||
1493 | DBG("Update metadata to version %" PRIu64, stream_info.version); | |
93ec662e JD |
1494 | |
1495 | /* Unsupported for live sessions for now. */ | |
1496 | if (session->live_timer != 0) { | |
1497 | ret = -1; | |
1498 | goto end; | |
1499 | } | |
1500 | ||
5312a3ed | 1501 | stream = stream_get_by_id(stream_info.stream_id); |
93ec662e JD |
1502 | if (!stream) { |
1503 | ret = -1; | |
1504 | goto end; | |
1505 | } | |
1506 | pthread_mutex_lock(&stream->lock); | |
1507 | if (!stream->is_metadata) { | |
1508 | ret = -1; | |
1509 | goto end_unlock; | |
1510 | } | |
1511 | ||
c35f9726 | 1512 | ret = stream_reset_file(stream); |
93ec662e | 1513 | if (ret < 0) { |
c35f9726 JG |
1514 | ERR("Failed to reset metadata stream %" PRIu64 |
1515 | ": stream_path = %s, channel = %s", | |
1516 | stream->stream_handle, stream->path_name, | |
1517 | stream->channel_name); | |
93ec662e JD |
1518 | goto end_unlock; |
1519 | } | |
93ec662e JD |
1520 | end_unlock: |
1521 | pthread_mutex_unlock(&stream->lock); | |
1522 | stream_put(stream); | |
1523 | ||
1524 | end: | |
1525 | memset(&reply, 0, sizeof(reply)); | |
1526 | if (ret < 0) { | |
1527 | reply.ret_code = htobe32(LTTNG_ERR_UNK); | |
1528 | } else { | |
1529 | reply.ret_code = htobe32(LTTNG_OK); | |
1530 | } | |
1531 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, | |
1532 | sizeof(struct lttcomm_relayd_generic_reply), 0); | |
5312a3ed JG |
1533 | if (send_ret < (ssize_t) sizeof(reply)) { |
1534 | ERR("Failed to send \"reset metadata\" command reply (ret = %zd)", | |
1535 | send_ret); | |
1536 | ret = -1; | |
93ec662e JD |
1537 | } |
1538 | ||
1539 | end_no_session: | |
1540 | return ret; | |
1541 | } | |
1542 | ||
b8aa1682 JD |
1543 | /* |
1544 | * relay_unknown_command: send -1 if received unknown command | |
1545 | */ | |
7591bab1 | 1546 | static void relay_unknown_command(struct relay_connection *conn) |
b8aa1682 JD |
1547 | { |
1548 | struct lttcomm_relayd_generic_reply reply; | |
5312a3ed | 1549 | ssize_t send_ret; |
b8aa1682 | 1550 | |
53efb85a | 1551 | memset(&reply, 0, sizeof(reply)); |
f73fabfd | 1552 | reply.ret_code = htobe32(LTTNG_ERR_UNK); |
5312a3ed JG |
1553 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0); |
1554 | if (send_ret < sizeof(reply)) { | |
1555 | ERR("Failed to send \"unknown command\" command reply (ret = %zd)", send_ret); | |
b8aa1682 JD |
1556 | } |
1557 | } | |
1558 | ||
1559 | /* | |
1560 | * relay_start: send an acknowledgment to the client to tell if we are | |
1561 | * ready to receive data. We are ready if a session is established. | |
1562 | */ | |
5312a3ed JG |
1563 | static int relay_start(const struct lttcomm_relayd_hdr *recv_hdr, |
1564 | struct relay_connection *conn, | |
1565 | const struct lttng_buffer_view *payload) | |
b8aa1682 | 1566 | { |
5312a3ed JG |
1567 | int ret = 0; |
1568 | ssize_t send_ret; | |
b8aa1682 | 1569 | struct lttcomm_relayd_generic_reply reply; |
58eb9381 | 1570 | struct relay_session *session = conn->session; |
b8aa1682 JD |
1571 | |
1572 | if (!session) { | |
1573 | DBG("Trying to start the streaming without a session established"); | |
f73fabfd | 1574 | ret = htobe32(LTTNG_ERR_UNK); |
b8aa1682 JD |
1575 | } |
1576 | ||
53efb85a | 1577 | memset(&reply, 0, sizeof(reply)); |
5312a3ed JG |
1578 | reply.ret_code = htobe32(LTTNG_OK); |
1579 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, | |
1580 | sizeof(reply), 0); | |
1581 | if (send_ret < (ssize_t) sizeof(reply)) { | |
1582 | ERR("Failed to send \"relay_start\" command reply (ret = %zd)", | |
1583 | send_ret); | |
1584 | ret = -1; | |
b8aa1682 JD |
1585 | } |
1586 | ||
1587 | return ret; | |
1588 | } | |
1589 | ||
b8aa1682 | 1590 | /* |
7591bab1 | 1591 | * relay_recv_metadata: receive the metadata for the session. |
b8aa1682 | 1592 | */ |
5312a3ed JG |
1593 | static int relay_recv_metadata(const struct lttcomm_relayd_hdr *recv_hdr, |
1594 | struct relay_connection *conn, | |
1595 | const struct lttng_buffer_view *payload) | |
b8aa1682 | 1596 | { |
32d1569c | 1597 | int ret = 0; |
58eb9381 | 1598 | struct relay_session *session = conn->session; |
5312a3ed | 1599 | struct lttcomm_relayd_metadata_payload metadata_payload_header; |
b8aa1682 | 1600 | struct relay_stream *metadata_stream; |
5312a3ed | 1601 | uint64_t metadata_payload_size; |
c35f9726 | 1602 | struct lttng_buffer_view packet_view; |
b8aa1682 JD |
1603 | |
1604 | if (!session) { | |
1605 | ERR("Metadata sent before version check"); | |
1606 | ret = -1; | |
1607 | goto end; | |
1608 | } | |
1609 | ||
5312a3ed | 1610 | if (recv_hdr->data_size < sizeof(struct lttcomm_relayd_metadata_payload)) { |
f6416125 MD |
1611 | ERR("Incorrect data size"); |
1612 | ret = -1; | |
1613 | goto end; | |
1614 | } | |
5312a3ed JG |
1615 | metadata_payload_size = recv_hdr->data_size - |
1616 | sizeof(struct lttcomm_relayd_metadata_payload); | |
f6416125 | 1617 | |
5312a3ed JG |
1618 | memcpy(&metadata_payload_header, payload->data, |
1619 | sizeof(metadata_payload_header)); | |
1620 | metadata_payload_header.stream_id = be64toh( | |
1621 | metadata_payload_header.stream_id); | |
1622 | metadata_payload_header.padding_size = be32toh( | |
1623 | metadata_payload_header.padding_size); | |
9d1bbf21 | 1624 | |
5312a3ed | 1625 | metadata_stream = stream_get_by_id(metadata_payload_header.stream_id); |
b8aa1682 JD |
1626 | if (!metadata_stream) { |
1627 | ret = -1; | |
7591bab1 | 1628 | goto end; |
b8aa1682 JD |
1629 | } |
1630 | ||
c35f9726 JG |
1631 | packet_view = lttng_buffer_view_from_view(payload, |
1632 | sizeof(metadata_payload_header), metadata_payload_size); | |
1633 | if (!packet_view.data) { | |
1634 | ERR("Invalid metadata packet length announced by header"); | |
b8aa1682 | 1635 | ret = -1; |
7591bab1 | 1636 | goto end_put; |
b8aa1682 | 1637 | } |
1d4dfdef | 1638 | |
c35f9726 JG |
1639 | pthread_mutex_lock(&metadata_stream->lock); |
1640 | ret = stream_write(metadata_stream, &packet_view, | |
5312a3ed | 1641 | metadata_payload_header.padding_size); |
c35f9726 JG |
1642 | pthread_mutex_unlock(&metadata_stream->lock); |
1643 | if (ret){ | |
5312a3ed | 1644 | ret = -1; |
7591bab1 | 1645 | goto end_put; |
1d4dfdef | 1646 | } |
7591bab1 | 1647 | end_put: |
7591bab1 | 1648 | stream_put(metadata_stream); |
b8aa1682 JD |
1649 | end: |
1650 | return ret; | |
1651 | } | |
1652 | ||
1653 | /* | |
1654 | * relay_send_version: send relayd version number | |
1655 | */ | |
5312a3ed JG |
1656 | static int relay_send_version(const struct lttcomm_relayd_hdr *recv_hdr, |
1657 | struct relay_connection *conn, | |
1658 | const struct lttng_buffer_view *payload) | |
b8aa1682 | 1659 | { |
7f51dcba | 1660 | int ret; |
5312a3ed | 1661 | ssize_t send_ret; |
092b6259 | 1662 | struct lttcomm_relayd_version reply, msg; |
87cb6359 | 1663 | bool compatible = true; |
b8aa1682 | 1664 | |
5312a3ed | 1665 | conn->version_check_done = true; |
b8aa1682 | 1666 | |
092b6259 | 1667 | /* Get version from the other side. */ |
5312a3ed JG |
1668 | if (payload->size < sizeof(msg)) { |
1669 | ERR("Unexpected payload size in \"relay_send_version\": expected >= %zu bytes, got %zu bytes", | |
1670 | sizeof(msg), payload->size); | |
092b6259 | 1671 | ret = -1; |
092b6259 DG |
1672 | goto end; |
1673 | } | |
1674 | ||
5312a3ed JG |
1675 | memcpy(&msg, payload->data, sizeof(msg)); |
1676 | msg.major = be32toh(msg.major); | |
1677 | msg.minor = be32toh(msg.minor); | |
1678 | ||
53efb85a | 1679 | memset(&reply, 0, sizeof(reply)); |
d83a952c MD |
1680 | reply.major = RELAYD_VERSION_COMM_MAJOR; |
1681 | reply.minor = RELAYD_VERSION_COMM_MINOR; | |
d4519fa3 JD |
1682 | |
1683 | /* Major versions must be the same */ | |
5312a3ed | 1684 | if (reply.major != msg.major) { |
6151a90f | 1685 | DBG("Incompatible major versions (%u vs %u), deleting session", |
5312a3ed | 1686 | reply.major, msg.major); |
87cb6359 | 1687 | compatible = false; |
d4519fa3 JD |
1688 | } |
1689 | ||
58eb9381 | 1690 | conn->major = reply.major; |
0f907de1 | 1691 | /* We adapt to the lowest compatible version */ |
5312a3ed | 1692 | if (reply.minor <= msg.minor) { |
58eb9381 | 1693 | conn->minor = reply.minor; |
0f907de1 | 1694 | } else { |
5312a3ed | 1695 | conn->minor = msg.minor; |
0f907de1 JD |
1696 | } |
1697 | ||
6151a90f JD |
1698 | reply.major = htobe32(reply.major); |
1699 | reply.minor = htobe32(reply.minor); | |
5312a3ed JG |
1700 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, |
1701 | sizeof(reply), 0); | |
1702 | if (send_ret < (ssize_t) sizeof(reply)) { | |
1703 | ERR("Failed to send \"send version\" command reply (ret = %zd)", | |
1704 | send_ret); | |
1705 | ret = -1; | |
1706 | goto end; | |
1707 | } else { | |
1708 | ret = 0; | |
6151a90f JD |
1709 | } |
1710 | ||
87cb6359 JD |
1711 | if (!compatible) { |
1712 | ret = -1; | |
1713 | goto end; | |
1714 | } | |
1715 | ||
58eb9381 DG |
1716 | DBG("Version check done using protocol %u.%u", conn->major, |
1717 | conn->minor); | |
b8aa1682 JD |
1718 | |
1719 | end: | |
1720 | return ret; | |
1721 | } | |
1722 | ||
c8f59ee5 | 1723 | /* |
6d805429 | 1724 | * Check for data pending for a given stream id from the session daemon. |
c8f59ee5 | 1725 | */ |
5312a3ed JG |
1726 | static int relay_data_pending(const struct lttcomm_relayd_hdr *recv_hdr, |
1727 | struct relay_connection *conn, | |
1728 | const struct lttng_buffer_view *payload) | |
c8f59ee5 | 1729 | { |
58eb9381 | 1730 | struct relay_session *session = conn->session; |
6d805429 | 1731 | struct lttcomm_relayd_data_pending msg; |
c8f59ee5 DG |
1732 | struct lttcomm_relayd_generic_reply reply; |
1733 | struct relay_stream *stream; | |
5312a3ed | 1734 | ssize_t send_ret; |
c8f59ee5 | 1735 | int ret; |
298a25ca | 1736 | uint64_t stream_seq; |
c8f59ee5 | 1737 | |
6d805429 | 1738 | DBG("Data pending command received"); |
c8f59ee5 | 1739 | |
5312a3ed | 1740 | if (!session || !conn->version_check_done) { |
c8f59ee5 DG |
1741 | ERR("Trying to check for data before version check"); |
1742 | ret = -1; | |
1743 | goto end_no_session; | |
1744 | } | |
1745 | ||
5312a3ed JG |
1746 | if (payload->size < sizeof(msg)) { |
1747 | ERR("Unexpected payload size in \"relay_data_pending\": expected >= %zu bytes, got %zu bytes", | |
1748 | sizeof(msg), payload->size); | |
c8f59ee5 DG |
1749 | ret = -1; |
1750 | goto end_no_session; | |
1751 | } | |
5312a3ed JG |
1752 | memcpy(&msg, payload->data, sizeof(msg)); |
1753 | msg.stream_id = be64toh(msg.stream_id); | |
1754 | msg.last_net_seq_num = be64toh(msg.last_net_seq_num); | |
c8f59ee5 | 1755 | |
5312a3ed | 1756 | stream = stream_get_by_id(msg.stream_id); |
de91f48a | 1757 | if (stream == NULL) { |
c8f59ee5 | 1758 | ret = -1; |
7591bab1 | 1759 | goto end; |
c8f59ee5 DG |
1760 | } |
1761 | ||
7591bab1 MD |
1762 | pthread_mutex_lock(&stream->lock); |
1763 | ||
298a25ca JG |
1764 | if (session_streams_have_index(session)) { |
1765 | /* | |
1766 | * Ensure that both the index and stream data have been | |
1767 | * flushed up to the requested point. | |
1768 | */ | |
a8f9f353 | 1769 | stream_seq = min(stream->prev_data_seq, stream->prev_index_seq); |
298a25ca | 1770 | } else { |
a8f9f353 | 1771 | stream_seq = stream->prev_data_seq; |
298a25ca | 1772 | } |
a8f9f353 | 1773 | DBG("Data pending for stream id %" PRIu64 ": prev_data_seq %" PRIu64 |
298a25ca JG |
1774 | ", prev_index_seq %" PRIu64 |
1775 | ", and last_seq %" PRIu64, msg.stream_id, | |
a8f9f353 | 1776 | stream->prev_data_seq, stream->prev_index_seq, |
298a25ca | 1777 | msg.last_net_seq_num); |
c8f59ee5 | 1778 | |
33832e64 | 1779 | /* Avoid wrapping issue */ |
298a25ca | 1780 | if (((int64_t) (stream_seq - msg.last_net_seq_num)) >= 0) { |
6d805429 | 1781 | /* Data has in fact been written and is NOT pending */ |
c8f59ee5 | 1782 | ret = 0; |
6d805429 DG |
1783 | } else { |
1784 | /* Data still being streamed thus pending */ | |
1785 | ret = 1; | |
c8f59ee5 DG |
1786 | } |
1787 | ||
7591bab1 MD |
1788 | stream->data_pending_check_done = true; |
1789 | pthread_mutex_unlock(&stream->lock); | |
f7079f67 | 1790 | |
7591bab1 MD |
1791 | stream_put(stream); |
1792 | end: | |
c8f59ee5 | 1793 | |
53efb85a | 1794 | memset(&reply, 0, sizeof(reply)); |
c8f59ee5 | 1795 | reply.ret_code = htobe32(ret); |
5312a3ed JG |
1796 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0); |
1797 | if (send_ret < (ssize_t) sizeof(reply)) { | |
1798 | ERR("Failed to send \"data pending\" command reply (ret = %zd)", | |
1799 | send_ret); | |
1800 | ret = -1; | |
c8f59ee5 DG |
1801 | } |
1802 | ||
1803 | end_no_session: | |
1804 | return ret; | |
1805 | } | |
1806 | ||
1807 | /* | |
1808 | * Wait for the control socket to reach a quiescent state. | |
1809 | * | |
7591bab1 MD |
1810 | * Note that for now, when receiving this command from the session |
1811 | * daemon, this means that every subsequent commands or data received on | |
1812 | * the control socket has been handled. So, this is why we simply return | |
1813 | * OK here. | |
c8f59ee5 | 1814 | */ |
5312a3ed JG |
1815 | static int relay_quiescent_control(const struct lttcomm_relayd_hdr *recv_hdr, |
1816 | struct relay_connection *conn, | |
1817 | const struct lttng_buffer_view *payload) | |
c8f59ee5 DG |
1818 | { |
1819 | int ret; | |
5312a3ed | 1820 | ssize_t send_ret; |
ad7051c0 | 1821 | struct relay_stream *stream; |
ad7051c0 | 1822 | struct lttcomm_relayd_quiescent_control msg; |
c8f59ee5 DG |
1823 | struct lttcomm_relayd_generic_reply reply; |
1824 | ||
1825 | DBG("Checking quiescent state on control socket"); | |
1826 | ||
5312a3ed | 1827 | if (!conn->session || !conn->version_check_done) { |
ad7051c0 DG |
1828 | ERR("Trying to check for data before version check"); |
1829 | ret = -1; | |
1830 | goto end_no_session; | |
1831 | } | |
1832 | ||
5312a3ed JG |
1833 | if (payload->size < sizeof(msg)) { |
1834 | ERR("Unexpected payload size in \"relay_quiescent_control\": expected >= %zu bytes, got %zu bytes", | |
1835 | sizeof(msg), payload->size); | |
ad7051c0 DG |
1836 | ret = -1; |
1837 | goto end_no_session; | |
1838 | } | |
5312a3ed JG |
1839 | memcpy(&msg, payload->data, sizeof(msg)); |
1840 | msg.stream_id = be64toh(msg.stream_id); | |
ad7051c0 | 1841 | |
5312a3ed | 1842 | stream = stream_get_by_id(msg.stream_id); |
7591bab1 MD |
1843 | if (!stream) { |
1844 | goto reply; | |
1845 | } | |
1846 | pthread_mutex_lock(&stream->lock); | |
1847 | stream->data_pending_check_done = true; | |
1848 | pthread_mutex_unlock(&stream->lock); | |
5312a3ed JG |
1849 | |
1850 | DBG("Relay quiescent control pending flag set to %" PRIu64, msg.stream_id); | |
7591bab1 MD |
1851 | stream_put(stream); |
1852 | reply: | |
53efb85a | 1853 | memset(&reply, 0, sizeof(reply)); |
c8f59ee5 | 1854 | reply.ret_code = htobe32(LTTNG_OK); |
5312a3ed JG |
1855 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0); |
1856 | if (send_ret < (ssize_t) sizeof(reply)) { | |
1857 | ERR("Failed to send \"quiescent control\" command reply (ret = %zd)", | |
1858 | send_ret); | |
1859 | ret = -1; | |
1860 | } else { | |
1861 | ret = 0; | |
c8f59ee5 DG |
1862 | } |
1863 | ||
ad7051c0 | 1864 | end_no_session: |
c8f59ee5 DG |
1865 | return ret; |
1866 | } | |
1867 | ||
f7079f67 | 1868 | /* |
7591bab1 MD |
1869 | * Initialize a data pending command. This means that a consumer is about |
1870 | * to ask for data pending for each stream it holds. Simply iterate over | |
1871 | * all streams of a session and set the data_pending_check_done flag. | |
f7079f67 DG |
1872 | * |
1873 | * This command returns to the client a LTTNG_OK code. | |
1874 | */ | |
5312a3ed JG |
1875 | static int relay_begin_data_pending(const struct lttcomm_relayd_hdr *recv_hdr, |
1876 | struct relay_connection *conn, | |
1877 | const struct lttng_buffer_view *payload) | |
f7079f67 DG |
1878 | { |
1879 | int ret; | |
5312a3ed | 1880 | ssize_t send_ret; |
f7079f67 DG |
1881 | struct lttng_ht_iter iter; |
1882 | struct lttcomm_relayd_begin_data_pending msg; | |
1883 | struct lttcomm_relayd_generic_reply reply; | |
1884 | struct relay_stream *stream; | |
f7079f67 DG |
1885 | |
1886 | assert(recv_hdr); | |
58eb9381 | 1887 | assert(conn); |
f7079f67 DG |
1888 | |
1889 | DBG("Init streams for data pending"); | |
1890 | ||
5312a3ed | 1891 | if (!conn->session || !conn->version_check_done) { |
f7079f67 DG |
1892 | ERR("Trying to check for data before version check"); |
1893 | ret = -1; | |
1894 | goto end_no_session; | |
1895 | } | |
1896 | ||
5312a3ed JG |
1897 | if (payload->size < sizeof(msg)) { |
1898 | ERR("Unexpected payload size in \"relay_begin_data_pending\": expected >= %zu bytes, got %zu bytes", | |
1899 | sizeof(msg), payload->size); | |
f7079f67 DG |
1900 | ret = -1; |
1901 | goto end_no_session; | |
1902 | } | |
5312a3ed JG |
1903 | memcpy(&msg, payload->data, sizeof(msg)); |
1904 | msg.session_id = be64toh(msg.session_id); | |
f7079f67 DG |
1905 | |
1906 | /* | |
7591bab1 MD |
1907 | * Iterate over all streams to set the begin data pending flag. |
1908 | * For now, the streams are indexed by stream handle so we have | |
1909 | * to iterate over all streams to find the one associated with | |
1910 | * the right session_id. | |
f7079f67 DG |
1911 | */ |
1912 | rcu_read_lock(); | |
d3e2ba59 | 1913 | cds_lfht_for_each_entry(relay_streams_ht->ht, &iter.iter, stream, |
2a174661 | 1914 | node.node) { |
7591bab1 MD |
1915 | if (!stream_get(stream)) { |
1916 | continue; | |
1917 | } | |
5312a3ed | 1918 | if (stream->trace->session->id == msg.session_id) { |
7591bab1 MD |
1919 | pthread_mutex_lock(&stream->lock); |
1920 | stream->data_pending_check_done = false; | |
1921 | pthread_mutex_unlock(&stream->lock); | |
f7079f67 DG |
1922 | DBG("Set begin data pending flag to stream %" PRIu64, |
1923 | stream->stream_handle); | |
1924 | } | |
7591bab1 | 1925 | stream_put(stream); |
f7079f67 DG |
1926 | } |
1927 | rcu_read_unlock(); | |
1928 | ||
53efb85a | 1929 | memset(&reply, 0, sizeof(reply)); |
f7079f67 DG |
1930 | /* All good, send back reply. */ |
1931 | reply.ret_code = htobe32(LTTNG_OK); | |
1932 | ||
5312a3ed JG |
1933 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0); |
1934 | if (send_ret < (ssize_t) sizeof(reply)) { | |
1935 | ERR("Failed to send \"begin data pending\" command reply (ret = %zd)", | |
1936 | send_ret); | |
1937 | ret = -1; | |
1938 | } else { | |
1939 | ret = 0; | |
f7079f67 DG |
1940 | } |
1941 | ||
1942 | end_no_session: | |
1943 | return ret; | |
1944 | } | |
1945 | ||
1946 | /* | |
7591bab1 MD |
1947 | * End data pending command. This will check, for a given session id, if |
1948 | * each stream associated with it has its data_pending_check_done flag | |
1949 | * set. If not, this means that the client lost track of the stream but | |
1950 | * the data is still being streamed on our side. In this case, we inform | |
1951 | * the client that data is in flight. | |
f7079f67 DG |
1952 | * |
1953 | * Return to the client if there is data in flight or not with a ret_code. | |
1954 | */ | |
5312a3ed JG |
1955 | static int relay_end_data_pending(const struct lttcomm_relayd_hdr *recv_hdr, |
1956 | struct relay_connection *conn, | |
1957 | const struct lttng_buffer_view *payload) | |
f7079f67 DG |
1958 | { |
1959 | int ret; | |
5312a3ed | 1960 | ssize_t send_ret; |
f7079f67 DG |
1961 | struct lttng_ht_iter iter; |
1962 | struct lttcomm_relayd_end_data_pending msg; | |
1963 | struct lttcomm_relayd_generic_reply reply; | |
1964 | struct relay_stream *stream; | |
f7079f67 DG |
1965 | uint32_t is_data_inflight = 0; |
1966 | ||
f7079f67 DG |
1967 | DBG("End data pending command"); |
1968 | ||
5312a3ed | 1969 | if (!conn->session || !conn->version_check_done) { |
f7079f67 DG |
1970 | ERR("Trying to check for data before version check"); |
1971 | ret = -1; | |
1972 | goto end_no_session; | |
1973 | } | |
1974 | ||
5312a3ed JG |
1975 | if (payload->size < sizeof(msg)) { |
1976 | ERR("Unexpected payload size in \"relay_end_data_pending\": expected >= %zu bytes, got %zu bytes", | |
1977 | sizeof(msg), payload->size); | |
f7079f67 DG |
1978 | ret = -1; |
1979 | goto end_no_session; | |
1980 | } | |
5312a3ed JG |
1981 | memcpy(&msg, payload->data, sizeof(msg)); |
1982 | msg.session_id = be64toh(msg.session_id); | |
f7079f67 | 1983 | |
7591bab1 MD |
1984 | /* |
1985 | * Iterate over all streams to see if the begin data pending | |
1986 | * flag is set. | |
1987 | */ | |
f7079f67 | 1988 | rcu_read_lock(); |
d3e2ba59 | 1989 | cds_lfht_for_each_entry(relay_streams_ht->ht, &iter.iter, stream, |
2a174661 | 1990 | node.node) { |
7591bab1 MD |
1991 | if (!stream_get(stream)) { |
1992 | continue; | |
1993 | } | |
5312a3ed | 1994 | if (stream->trace->session->id != msg.session_id) { |
7591bab1 MD |
1995 | stream_put(stream); |
1996 | continue; | |
1997 | } | |
1998 | pthread_mutex_lock(&stream->lock); | |
1999 | if (!stream->data_pending_check_done) { | |
298a25ca JG |
2000 | uint64_t stream_seq; |
2001 | ||
2002 | if (session_streams_have_index(conn->session)) { | |
2003 | /* | |
2004 | * Ensure that both the index and stream data have been | |
2005 | * flushed up to the requested point. | |
2006 | */ | |
a8f9f353 | 2007 | stream_seq = min(stream->prev_data_seq, stream->prev_index_seq); |
298a25ca | 2008 | } else { |
a8f9f353 | 2009 | stream_seq = stream->prev_data_seq; |
298a25ca JG |
2010 | } |
2011 | if (!stream->closed || !(((int64_t) (stream_seq - stream->last_net_seq_num)) >= 0)) { | |
7591bab1 MD |
2012 | is_data_inflight = 1; |
2013 | DBG("Data is still in flight for stream %" PRIu64, | |
2014 | stream->stream_handle); | |
2015 | pthread_mutex_unlock(&stream->lock); | |
2016 | stream_put(stream); | |
2017 | break; | |
2018 | } | |
f7079f67 | 2019 | } |
7591bab1 MD |
2020 | pthread_mutex_unlock(&stream->lock); |
2021 | stream_put(stream); | |
f7079f67 DG |
2022 | } |
2023 | rcu_read_unlock(); | |
2024 | ||
53efb85a | 2025 | memset(&reply, 0, sizeof(reply)); |
f7079f67 DG |
2026 | /* All good, send back reply. */ |
2027 | reply.ret_code = htobe32(is_data_inflight); | |
2028 | ||
5312a3ed JG |
2029 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0); |
2030 | if (send_ret < (ssize_t) sizeof(reply)) { | |
2031 | ERR("Failed to send \"end data pending\" command reply (ret = %zd)", | |
2032 | send_ret); | |
2033 | ret = -1; | |
2034 | } else { | |
2035 | ret = 0; | |
f7079f67 DG |
2036 | } |
2037 | ||
2038 | end_no_session: | |
2039 | return ret; | |
2040 | } | |
2041 | ||
1c20f0e2 JD |
2042 | /* |
2043 | * Receive an index for a specific stream. | |
2044 | * | |
2045 | * Return 0 on success else a negative value. | |
2046 | */ | |
5312a3ed JG |
2047 | static int relay_recv_index(const struct lttcomm_relayd_hdr *recv_hdr, |
2048 | struct relay_connection *conn, | |
2049 | const struct lttng_buffer_view *payload) | |
1c20f0e2 | 2050 | { |
5312a3ed JG |
2051 | int ret; |
2052 | ssize_t send_ret; | |
58eb9381 | 2053 | struct relay_session *session = conn->session; |
1c20f0e2 | 2054 | struct lttcomm_relayd_index index_info; |
1c20f0e2 JD |
2055 | struct lttcomm_relayd_generic_reply reply; |
2056 | struct relay_stream *stream; | |
f8f3885c | 2057 | size_t msg_len; |
1c20f0e2 | 2058 | |
58eb9381 | 2059 | assert(conn); |
1c20f0e2 JD |
2060 | |
2061 | DBG("Relay receiving index"); | |
2062 | ||
5312a3ed | 2063 | if (!session || !conn->version_check_done) { |
1c20f0e2 JD |
2064 | ERR("Trying to close a stream before version check"); |
2065 | ret = -1; | |
2066 | goto end_no_session; | |
2067 | } | |
2068 | ||
f8f3885c MD |
2069 | msg_len = lttcomm_relayd_index_len( |
2070 | lttng_to_index_major(conn->major, conn->minor), | |
2071 | lttng_to_index_minor(conn->major, conn->minor)); | |
5312a3ed JG |
2072 | if (payload->size < msg_len) { |
2073 | ERR("Unexpected payload size in \"relay_recv_index\": expected >= %zu bytes, got %zu bytes", | |
2074 | msg_len, payload->size); | |
1c20f0e2 JD |
2075 | ret = -1; |
2076 | goto end_no_session; | |
2077 | } | |
5312a3ed JG |
2078 | memcpy(&index_info, payload->data, msg_len); |
2079 | index_info.relay_stream_id = be64toh(index_info.relay_stream_id); | |
2080 | index_info.net_seq_num = be64toh(index_info.net_seq_num); | |
2081 | index_info.packet_size = be64toh(index_info.packet_size); | |
2082 | index_info.content_size = be64toh(index_info.content_size); | |
2083 | index_info.timestamp_begin = be64toh(index_info.timestamp_begin); | |
2084 | index_info.timestamp_end = be64toh(index_info.timestamp_end); | |
2085 | index_info.events_discarded = be64toh(index_info.events_discarded); | |
2086 | index_info.stream_id = be64toh(index_info.stream_id); | |
81df238b JR |
2087 | |
2088 | if (conn->minor >= 8) { | |
2089 | index_info.stream_instance_id = | |
2090 | be64toh(index_info.stream_instance_id); | |
2091 | index_info.packet_seq_num = be64toh(index_info.packet_seq_num); | |
2092 | } | |
5312a3ed JG |
2093 | |
2094 | stream = stream_get_by_id(index_info.relay_stream_id); | |
1c20f0e2 | 2095 | if (!stream) { |
7591bab1 | 2096 | ERR("stream_get_by_id not found"); |
1c20f0e2 | 2097 | ret = -1; |
7591bab1 | 2098 | goto end; |
1c20f0e2 | 2099 | } |
d3e2ba59 | 2100 | |
c35f9726 JG |
2101 | pthread_mutex_lock(&stream->lock); |
2102 | ret = stream_add_index(stream, &index_info); | |
2103 | pthread_mutex_unlock(&stream->lock); | |
2104 | if (ret) { | |
7591bab1 MD |
2105 | goto end_stream_put; |
2106 | } | |
1c20f0e2 | 2107 | |
7591bab1 | 2108 | end_stream_put: |
7591bab1 | 2109 | stream_put(stream); |
7591bab1 | 2110 | end: |
53efb85a | 2111 | memset(&reply, 0, sizeof(reply)); |
1c20f0e2 JD |
2112 | if (ret < 0) { |
2113 | reply.ret_code = htobe32(LTTNG_ERR_UNK); | |
2114 | } else { | |
2115 | reply.ret_code = htobe32(LTTNG_OK); | |
2116 | } | |
58eb9381 | 2117 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0); |
5312a3ed JG |
2118 | if (send_ret < (ssize_t) sizeof(reply)) { |
2119 | ERR("Failed to send \"recv index\" command reply (ret = %zd)", send_ret); | |
2120 | ret = -1; | |
1c20f0e2 JD |
2121 | } |
2122 | ||
2123 | end_no_session: | |
2124 | return ret; | |
2125 | } | |
2126 | ||
a4baae1b JD |
2127 | /* |
2128 | * Receive the streams_sent message. | |
2129 | * | |
2130 | * Return 0 on success else a negative value. | |
2131 | */ | |
5312a3ed JG |
2132 | static int relay_streams_sent(const struct lttcomm_relayd_hdr *recv_hdr, |
2133 | struct relay_connection *conn, | |
2134 | const struct lttng_buffer_view *payload) | |
a4baae1b | 2135 | { |
5312a3ed JG |
2136 | int ret; |
2137 | ssize_t send_ret; | |
a4baae1b JD |
2138 | struct lttcomm_relayd_generic_reply reply; |
2139 | ||
58eb9381 | 2140 | assert(conn); |
a4baae1b JD |
2141 | |
2142 | DBG("Relay receiving streams_sent"); | |
2143 | ||
5312a3ed | 2144 | if (!conn->session || !conn->version_check_done) { |
a4baae1b JD |
2145 | ERR("Trying to close a stream before version check"); |
2146 | ret = -1; | |
2147 | goto end_no_session; | |
2148 | } | |
2149 | ||
2150 | /* | |
7591bab1 MD |
2151 | * Publish every pending stream in the connection recv list which are |
2152 | * now ready to be used by the viewer. | |
4a9daf17 | 2153 | */ |
7591bab1 | 2154 | publish_connection_local_streams(conn); |
4a9daf17 | 2155 | |
53efb85a | 2156 | memset(&reply, 0, sizeof(reply)); |
a4baae1b | 2157 | reply.ret_code = htobe32(LTTNG_OK); |
58eb9381 | 2158 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0); |
5312a3ed JG |
2159 | if (send_ret < (ssize_t) sizeof(reply)) { |
2160 | ERR("Failed to send \"streams sent\" command reply (ret = %zd)", | |
2161 | send_ret); | |
2162 | ret = -1; | |
a4baae1b JD |
2163 | } else { |
2164 | /* Success. */ | |
2165 | ret = 0; | |
2166 | } | |
2167 | ||
2168 | end_no_session: | |
2169 | return ret; | |
2170 | } | |
2171 | ||
d3ecc550 | 2172 | /* |
c35f9726 JG |
2173 | * relay_rotate_session_stream: rotate a stream to a new tracefile for the |
2174 | * session rotation feature (not the tracefile rotation feature). | |
d3ecc550 | 2175 | */ |
c35f9726 JG |
2176 | static int relay_rotate_session_streams( |
2177 | const struct lttcomm_relayd_hdr *recv_hdr, | |
5312a3ed JG |
2178 | struct relay_connection *conn, |
2179 | const struct lttng_buffer_view *payload) | |
d3ecc550 | 2180 | { |
30b9d5ab | 2181 | int ret = 0; |
c35f9726 | 2182 | uint32_t i; |
5312a3ed | 2183 | ssize_t send_ret; |
c35f9726 | 2184 | enum lttng_error_code reply_code = LTTNG_ERR_UNK; |
d3ecc550 | 2185 | struct relay_session *session = conn->session; |
c35f9726 JG |
2186 | struct lttcomm_relayd_rotate_streams rotate_streams; |
2187 | struct lttcomm_relayd_generic_reply reply = {}; | |
2188 | struct relay_stream *stream = NULL; | |
2189 | const size_t header_len = sizeof(struct lttcomm_relayd_rotate_streams); | |
2190 | struct lttng_trace_chunk *next_trace_chunk = NULL; | |
2191 | struct lttng_buffer_view stream_positions; | |
70626904 JG |
2192 | char chunk_id_buf[MAX_INT_DEC_LEN(uint64_t)]; |
2193 | const char *chunk_id_str = "none"; | |
d3ecc550 | 2194 | |
d3ecc550 JD |
2195 | if (!session || !conn->version_check_done) { |
2196 | ERR("Trying to rotate a stream before version check"); | |
2197 | ret = -1; | |
2198 | goto end_no_reply; | |
2199 | } | |
2200 | ||
2201 | if (session->major == 2 && session->minor < 11) { | |
2202 | ERR("Unsupported feature before 2.11"); | |
2203 | ret = -1; | |
2204 | goto end_no_reply; | |
2205 | } | |
2206 | ||
5312a3ed JG |
2207 | if (payload->size < header_len) { |
2208 | ERR("Unexpected payload size in \"relay_rotate_session_stream\": expected >= %zu bytes, got %zu bytes", | |
2209 | header_len, payload->size); | |
d3ecc550 JD |
2210 | ret = -1; |
2211 | goto end_no_reply; | |
2212 | } | |
2213 | ||
c35f9726 | 2214 | memcpy(&rotate_streams, payload->data, header_len); |
5312a3ed | 2215 | |
c35f9726 JG |
2216 | /* Convert header to host endianness. */ |
2217 | rotate_streams = (typeof(rotate_streams)) { | |
2218 | .stream_count = be32toh(rotate_streams.stream_count), | |
2219 | .new_chunk_id = (typeof(rotate_streams.new_chunk_id)) { | |
2220 | .is_set = !!rotate_streams.new_chunk_id.is_set, | |
2221 | .value = be64toh(rotate_streams.new_chunk_id.value), | |
2222 | } | |
2223 | }; | |
d3ecc550 | 2224 | |
c35f9726 JG |
2225 | if (rotate_streams.new_chunk_id.is_set) { |
2226 | /* | |
2227 | * Retrieve the trace chunk the stream must transition to. As | |
2228 | * per the protocol, this chunk should have been created | |
2229 | * before this command is received. | |
2230 | */ | |
2231 | next_trace_chunk = sessiond_trace_chunk_registry_get_chunk( | |
2232 | sessiond_trace_chunk_registry, | |
2233 | session->sessiond_uuid, session->id, | |
2234 | rotate_streams.new_chunk_id.value); | |
2235 | if (!next_trace_chunk) { | |
2236 | char uuid_str[UUID_STR_LEN]; | |
2237 | ||
2238 | lttng_uuid_to_str(session->sessiond_uuid, uuid_str); | |
2239 | ERR("Unknown next trace chunk in ROTATE_STREAMS command: sessiond_uuid = {%s}, session_id = %" PRIu64 | |
2240 | ", trace_chunk_id = %" PRIu64, | |
2241 | uuid_str, session->id, | |
2242 | rotate_streams.new_chunk_id.value); | |
2243 | reply_code = LTTNG_ERR_INVALID_PROTOCOL; | |
2244 | ret = -1; | |
2245 | goto end; | |
2246 | } | |
70626904 JG |
2247 | |
2248 | ret = snprintf(chunk_id_buf, sizeof(chunk_id_buf), "%" PRIu64, | |
2249 | rotate_streams.new_chunk_id.value); | |
2250 | if (ret < 0 || ret >= sizeof(chunk_id_buf)) { | |
2251 | chunk_id_str = "formatting error"; | |
2252 | } else { | |
2253 | chunk_id_str = chunk_id_buf; | |
2254 | } | |
ecd1a12f | 2255 | session->has_rotated = true; |
d3ecc550 JD |
2256 | } |
2257 | ||
70626904 JG |
2258 | DBG("Rotate %" PRIu32 " streams of session \"%s\" to chunk \"%s\"", |
2259 | rotate_streams.stream_count, session->session_name, | |
2260 | chunk_id_str); | |
2261 | ||
c35f9726 JG |
2262 | stream_positions = lttng_buffer_view_from_view(payload, |
2263 | sizeof(rotate_streams), -1); | |
2264 | if (!stream_positions.data || | |
2265 | stream_positions.size < | |
2266 | (rotate_streams.stream_count * | |
2267 | sizeof(struct lttcomm_relayd_stream_rotation_position))) { | |
2268 | reply_code = LTTNG_ERR_INVALID_PROTOCOL; | |
d3ecc550 | 2269 | ret = -1; |
5312a3ed | 2270 | goto end; |
d3ecc550 JD |
2271 | } |
2272 | ||
c35f9726 JG |
2273 | for (i = 0; i < rotate_streams.stream_count; i++) { |
2274 | struct lttcomm_relayd_stream_rotation_position *position_comm = | |
2275 | &((typeof(position_comm)) stream_positions.data)[i]; | |
2276 | const struct lttcomm_relayd_stream_rotation_position pos = { | |
2277 | .stream_id = be64toh(position_comm->stream_id), | |
2278 | .rotate_at_seq_num = be64toh( | |
2279 | position_comm->rotate_at_seq_num), | |
2280 | }; | |
5312a3ed | 2281 | |
c35f9726 JG |
2282 | stream = stream_get_by_id(pos.stream_id); |
2283 | if (!stream) { | |
2284 | reply_code = LTTNG_ERR_INVALID; | |
2285 | ret = -1; | |
2286 | goto end; | |
c6db3843 JG |
2287 | } |
2288 | ||
c35f9726 JG |
2289 | pthread_mutex_lock(&stream->lock); |
2290 | ret = stream_set_pending_rotation(stream, next_trace_chunk, | |
2291 | pos.rotate_at_seq_num); | |
2292 | pthread_mutex_unlock(&stream->lock); | |
2293 | if (ret) { | |
2294 | reply_code = LTTNG_ERR_FILE_CREATION_ERROR; | |
2295 | goto end; | |
c6db3843 | 2296 | } |
c35f9726 JG |
2297 | |
2298 | stream_put(stream); | |
2299 | stream = NULL; | |
d3ecc550 JD |
2300 | } |
2301 | ||
c35f9726 | 2302 | reply_code = LTTNG_OK; |
eaeb64a9 | 2303 | ret = 0; |
d3ecc550 | 2304 | end: |
c35f9726 JG |
2305 | if (stream) { |
2306 | stream_put(stream); | |
d3ecc550 | 2307 | } |
c35f9726 JG |
2308 | |
2309 | reply.ret_code = htobe32((uint32_t) reply_code); | |
d3ecc550 JD |
2310 | send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, |
2311 | sizeof(struct lttcomm_relayd_generic_reply), 0); | |
5312a3ed JG |
2312 | if (send_ret < (ssize_t) sizeof(reply)) { |
2313 | ERR("Failed to send \"rotate session stream\" command reply (ret = %zd)", | |
2314 | send_ret); | |
2315 | ret = -1; | |
d3ecc550 | 2316 | } |
d3ecc550 | 2317 | end_no_reply: |
c35f9726 | 2318 | lttng_trace_chunk_put(next_trace_chunk); |
d3ecc550 JD |
2319 | return ret; |
2320 | } | |
2321 | ||
e5add6d0 | 2322 | |
e5add6d0 JG |
2323 | |
2324 | /* | |
2325 | * relay_create_trace_chunk: create a new trace chunk | |
2326 | */ | |
2327 | static int relay_create_trace_chunk(const struct lttcomm_relayd_hdr *recv_hdr, | |
2328 | struct relay_connection *conn, | |
2329 | const struct lttng_buffer_view *payload) | |
2330 | { | |
2331 | int ret = 0; | |
2332 | ssize_t send_ret; | |
2333 | struct relay_session *session = conn->session; | |
2334 | struct lttcomm_relayd_create_trace_chunk *msg; | |
2335 | struct lttcomm_relayd_generic_reply reply = {}; | |
2336 | struct lttng_buffer_view header_view; | |
2337 | struct lttng_buffer_view chunk_name_view; | |
2338 | struct lttng_trace_chunk *chunk = NULL, *published_chunk = NULL; | |
2339 | enum lttng_error_code reply_code = LTTNG_OK; | |
2340 | enum lttng_trace_chunk_status chunk_status; | |
2341 | struct lttng_directory_handle session_output; | |
2342 | ||
2343 | if (!session || !conn->version_check_done) { | |
2344 | ERR("Trying to create a trace chunk before version check"); | |
2345 | ret = -1; | |
2346 | goto end_no_reply; | |
2347 | } | |
2348 | ||
2349 | if (session->major == 2 && session->minor < 11) { | |
2350 | ERR("Chunk creation command is unsupported before 2.11"); | |
2351 | ret = -1; | |
2352 | goto end_no_reply; | |
2353 | } | |
2354 | ||
2355 | header_view = lttng_buffer_view_from_view(payload, 0, sizeof(*msg)); | |
2356 | if (!header_view.data) { | |
2357 | ERR("Failed to receive payload of chunk creation command"); | |
2358 | ret = -1; | |
2359 | goto end_no_reply; | |
2360 | } | |
2361 | ||
2362 | /* Convert to host endianness. */ | |
2363 | msg = (typeof(msg)) header_view.data; | |
2364 | msg->chunk_id = be64toh(msg->chunk_id); | |
2365 | msg->creation_timestamp = be64toh(msg->creation_timestamp); | |
2366 | msg->override_name_length = be32toh(msg->override_name_length); | |
2367 | ||
2368 | chunk = lttng_trace_chunk_create( | |
2369 | msg->chunk_id, msg->creation_timestamp); | |
2370 | if (!chunk) { | |
2371 | ERR("Failed to create trace chunk in trace chunk creation command"); | |
2372 | ret = -1; | |
2373 | reply_code = LTTNG_ERR_NOMEM; | |
2374 | goto end; | |
2375 | } | |
2376 | ||
2377 | if (msg->override_name_length) { | |
2378 | const char *name; | |
2379 | ||
2380 | chunk_name_view = lttng_buffer_view_from_view(payload, | |
2381 | sizeof(*msg), | |
2382 | msg->override_name_length); | |
2383 | name = chunk_name_view.data; | |
2384 | if (!name || name[msg->override_name_length - 1]) { | |
2385 | ERR("Failed to receive payload of chunk creation command"); | |
2386 | ret = -1; | |
2387 | reply_code = LTTNG_ERR_INVALID; | |
2388 | goto end; | |
2389 | } | |
2390 | ||
2391 | chunk_status = lttng_trace_chunk_override_name( | |
2392 | chunk, chunk_name_view.data); | |
2393 | switch (chunk_status) { | |
2394 | case LTTNG_TRACE_CHUNK_STATUS_OK: | |
2395 | break; | |
2396 | case LTTNG_TRACE_CHUNK_STATUS_INVALID_ARGUMENT: | |
2397 | ERR("Failed to set the name of new trace chunk in trace chunk creation command (invalid name)"); | |
2398 | reply_code = LTTNG_ERR_INVALID; | |
2399 | ret = -1; | |
2400 | goto end; | |
2401 | default: | |
2402 | ERR("Failed to set the name of new trace chunk in trace chunk creation command (unknown error)"); | |
2403 | reply_code = LTTNG_ERR_UNK; | |
2404 | ret = -1; | |
2405 | goto end; | |
2406 | } | |
2407 | } | |
2408 | ||
e5add6d0 JG |
2409 | chunk_status = lttng_trace_chunk_set_credentials_current_user(chunk); |
2410 | if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) { | |
2411 | reply_code = LTTNG_ERR_UNK; | |
2412 | ret = -1; | |
2413 | goto end; | |
2414 | } | |
2415 | ||
0ccc0411 JG |
2416 | ret = session_init_output_directory_handle( |
2417 | conn->session, &session_output); | |
2418 | if (ret) { | |
2419 | reply_code = LTTNG_ERR_CREATE_DIR_FAIL; | |
2420 | goto end; | |
2421 | } | |
e5add6d0 | 2422 | chunk_status = lttng_trace_chunk_set_as_owner(chunk, &session_output); |
0ccc0411 | 2423 | lttng_directory_handle_fini(&session_output); |
e5add6d0 JG |
2424 | if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) { |
2425 | reply_code = LTTNG_ERR_UNK; | |
2426 | ret = -1; | |
2427 | goto end; | |
2428 | } | |
2429 | ||
2430 | published_chunk = sessiond_trace_chunk_registry_publish_chunk( | |
2431 | sessiond_trace_chunk_registry, | |
2432 | conn->session->sessiond_uuid, | |
2433 | conn->session->id, | |
2434 | chunk); | |
2435 | if (!published_chunk) { | |
2436 | char uuid_str[UUID_STR_LEN]; | |
2437 | ||
2438 | lttng_uuid_to_str(conn->session->sessiond_uuid, uuid_str); | |
2439 | ERR("Failed to publish chunk: sessiond_uuid = %s, session_id = %" PRIu64 ", chunk_id = %" PRIu64, | |
2440 | uuid_str, | |
2441 | conn->session->id, | |
2442 | msg->chunk_id); | |
2443 | ret = -1; | |
2444 | reply_code = LTTNG_ERR_NOMEM; | |
2445 | goto end; | |
2446 | } | |
2447 | ||
2448 | pthread_mutex_lock(&conn->session->lock); | |
62bad3bf JG |
2449 | if (conn->session->pending_closure_trace_chunk) { |
2450 | /* | |
2451 | * Invalid; this means a second create_trace_chunk command was | |
2452 | * received before a close_trace_chunk. | |
2453 | */ | |
2454 | ERR("Invalid trace chunk close command received; a trace chunk is already waiting for a trace chunk close command"); | |
2455 | reply_code = LTTNG_ERR_INVALID_PROTOCOL; | |
2456 | ret = -1; | |
2457 | goto end_unlock_session; | |
2458 | } | |
2459 | conn->session->pending_closure_trace_chunk = | |
2460 | conn->session->current_trace_chunk; | |
e5add6d0 | 2461 | conn->session->current_trace_chunk = published_chunk; |
e5add6d0 | 2462 | published_chunk = NULL; |
62bad3bf | 2463 | end_unlock_session: |
c35f9726 | 2464 | pthread_mutex_unlock(&conn->session->lock); |
e5add6d0 JG |
2465 | end: |
2466 | reply.ret_code = htobe32((uint32_t) reply_code); | |
2467 | send_ret = conn->sock->ops->sendmsg(conn->sock, | |
2468 | &reply, | |
2469 | sizeof(struct lttcomm_relayd_generic_reply), | |
2470 | 0); | |
2471 | if (send_ret < (ssize_t) sizeof(reply)) { | |
2472 | ERR("Failed to send \"create trace chunk\" command reply (ret = %zd)", | |
2473 | send_ret); | |
2474 | ret = -1; | |
2475 | } | |
2476 | end_no_reply: | |
2477 | lttng_trace_chunk_put(chunk); | |
2478 | lttng_trace_chunk_put(published_chunk); | |
e5add6d0 JG |
2479 | return ret; |
2480 | } | |
2481 | ||
bbc4768c JG |
2482 | /* |
2483 | * relay_close_trace_chunk: close a trace chunk | |
2484 | */ | |
2485 | static int relay_close_trace_chunk(const struct lttcomm_relayd_hdr *recv_hdr, | |
2486 | struct relay_connection *conn, | |
2487 | const struct lttng_buffer_view *payload) | |
2488 | { | |
9898f786 | 2489 | int ret = 0, buf_ret; |
bbc4768c JG |
2490 | ssize_t send_ret; |
2491 | struct relay_session *session = conn->session; | |
2492 | struct lttcomm_relayd_close_trace_chunk *msg; | |
ecd1a12f | 2493 | struct lttcomm_relayd_close_trace_chunk_reply reply = {}; |
bbc4768c JG |
2494 | struct lttng_buffer_view header_view; |
2495 | struct lttng_trace_chunk *chunk = NULL; | |
2496 | enum lttng_error_code reply_code = LTTNG_OK; | |
2497 | enum lttng_trace_chunk_status chunk_status; | |
2498 | uint64_t chunk_id; | |
c35f9726 | 2499 | LTTNG_OPTIONAL(enum lttng_trace_chunk_command_type) close_command = {}; |
bbc4768c | 2500 | time_t close_timestamp; |
ecd1a12f MD |
2501 | char closed_trace_chunk_path[LTTNG_PATH_MAX]; |
2502 | size_t path_length = 0; | |
2503 | const char *chunk_name = NULL; | |
2504 | struct lttng_dynamic_buffer reply_payload; | |
2505 | ||
2506 | lttng_dynamic_buffer_init(&reply_payload); | |
bbc4768c JG |
2507 | |
2508 | if (!session || !conn->version_check_done) { | |
2509 | ERR("Trying to close a trace chunk before version check"); | |
2510 | ret = -1; | |
2511 | goto end_no_reply; | |
2512 | } | |
2513 | ||
2514 | if (session->major == 2 && session->minor < 11) { | |
2515 | ERR("Chunk close command is unsupported before 2.11"); | |
2516 | ret = -1; | |
2517 | goto end_no_reply; | |
2518 | } | |
2519 | ||
2520 | header_view = lttng_buffer_view_from_view(payload, 0, sizeof(*msg)); | |
2521 | if (!header_view.data) { | |
2522 | ERR("Failed to receive payload of chunk close command"); | |
2523 | ret = -1; | |
2524 | goto end_no_reply; | |
2525 | } | |
2526 | ||
2527 | /* Convert to host endianness. */ | |
2528 | msg = (typeof(msg)) header_view.data; | |
2529 | chunk_id = be64toh(msg->chunk_id); | |
2530 | close_timestamp = (time_t) be64toh(msg->close_timestamp); | |
2531 | close_command = (typeof(close_command)){ | |
2532 | .value = be32toh(msg->close_command.value), | |
2533 | .is_set = msg->close_command.is_set, | |
2534 | }; | |
2535 | ||
2536 | chunk = sessiond_trace_chunk_registry_get_chunk( | |
2537 | sessiond_trace_chunk_registry, | |
2538 | conn->session->sessiond_uuid, | |
2539 | conn->session->id, | |
2540 | chunk_id); | |
2541 | if (!chunk) { | |
2542 | char uuid_str[UUID_STR_LEN]; | |
2543 | ||
2544 | lttng_uuid_to_str(conn->session->sessiond_uuid, uuid_str); | |
2545 | ERR("Failed to find chunk to close: sessiond_uuid = %s, session_id = %" PRIu64 ", chunk_id = %" PRIu64, | |
2546 | uuid_str, | |
2547 | conn->session->id, | |
2548 | msg->chunk_id); | |
2549 | ret = -1; | |
2550 | reply_code = LTTNG_ERR_NOMEM; | |
2551 | goto end; | |
2552 | } | |
2553 | ||
62bad3bf JG |
2554 | pthread_mutex_lock(&session->lock); |
2555 | if (session->pending_closure_trace_chunk && | |
2556 | session->pending_closure_trace_chunk != chunk) { | |
2557 | ERR("Trace chunk close command for session \"%s\" does not target the trace chunk pending closure", | |
2558 | session->session_name); | |
2559 | reply_code = LTTNG_ERR_INVALID_PROTOCOL; | |
2560 | ret = -1; | |
2561 | goto end_unlock_session; | |
2562 | } | |
2563 | ||
bbc4768c JG |
2564 | chunk_status = lttng_trace_chunk_set_close_timestamp( |
2565 | chunk, close_timestamp); | |
2566 | if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) { | |
2567 | ERR("Failed to set trace chunk close timestamp"); | |
2568 | ret = -1; | |
2569 | reply_code = LTTNG_ERR_UNK; | |
62bad3bf | 2570 | goto end_unlock_session; |
bbc4768c JG |
2571 | } |
2572 | ||
2573 | if (close_command.is_set) { | |
2574 | chunk_status = lttng_trace_chunk_set_close_command( | |
2575 | chunk, close_command.value); | |
2576 | if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) { | |
2577 | ret = -1; | |
2578 | reply_code = LTTNG_ERR_INVALID; | |
62bad3bf | 2579 | goto end_unlock_session; |
bbc4768c JG |
2580 | } |
2581 | } | |
ecd1a12f MD |
2582 | chunk_status = lttng_trace_chunk_get_name(chunk, &chunk_name, NULL); |
2583 | if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) { | |
2584 | ERR("Failed to get chunk name"); | |
2585 | ret = -1; | |
2586 | reply_code = LTTNG_ERR_UNK; | |
2587 | goto end_unlock_session; | |
2588 | } | |
2589 | if (!session->has_rotated && !session->snapshot) { | |
2590 | ret = lttng_strncpy(closed_trace_chunk_path, | |
2591 | session->output_path, | |
2592 | sizeof(closed_trace_chunk_path)); | |
2593 | if (ret) { | |
2594 | ERR("Failed to send trace chunk path: path length of %zu bytes exceeds the maximal allowed length of %zu bytes", | |
2595 | strlen(session->output_path), | |
2596 | sizeof(closed_trace_chunk_path)); | |
2597 | reply_code = LTTNG_ERR_NOMEM; | |
2598 | ret = -1; | |
2599 | goto end_unlock_session; | |
2600 | } | |
2601 | } else { | |
2602 | if (session->snapshot) { | |
2603 | ret = snprintf(closed_trace_chunk_path, | |
2604 | sizeof(closed_trace_chunk_path), | |
2605 | "%s/%s", session->output_path, | |
2606 | chunk_name); | |
2607 | } else { | |
2608 | ret = snprintf(closed_trace_chunk_path, | |
2609 | sizeof(closed_trace_chunk_path), | |
2610 | "%s/" DEFAULT_ARCHIVED_TRACE_CHUNKS_DIRECTORY | |
2611 | "/%s", | |
2612 | session->output_path, chunk_name); | |
2613 | } | |
2614 | if (ret < 0 || ret == sizeof(closed_trace_chunk_path)) { | |
2615 | ERR("Failed to format closed trace chunk resulting path"); | |
2616 | reply_code = ret < 0 ? LTTNG_ERR_UNK : LTTNG_ERR_NOMEM; | |
2617 | ret = -1; | |
2618 | goto end_unlock_session; | |
2619 | } | |
2620 | } | |
2621 | DBG("Reply chunk path on close: %s", closed_trace_chunk_path); | |
2622 | path_length = strlen(closed_trace_chunk_path) + 1; | |
2623 | if (path_length > UINT32_MAX) { | |
2624 | ERR("Closed trace chunk path exceeds the maximal length allowed by the protocol"); | |
2625 | ret = -1; | |
2626 | reply_code = LTTNG_ERR_INVALID_PROTOCOL; | |
2627 | goto end_unlock_session; | |
2628 | } | |
bbc4768c | 2629 | |
c35f9726 JG |
2630 | if (session->current_trace_chunk == chunk) { |
2631 | /* | |
2632 | * After a trace chunk close command, no new streams | |
2633 | * referencing the chunk may be created. Hence, on the | |
2634 | * event that no new trace chunk have been created for | |
2635 | * the session, the reference to the current trace chunk | |
2636 | * is released in order to allow it to be reclaimed when | |
2637 | * the last stream releases its reference to it. | |
2638 | */ | |
2639 | lttng_trace_chunk_put(session->current_trace_chunk); | |
2640 | session->current_trace_chunk = NULL; | |
2641 | } | |
62bad3bf JG |
2642 | lttng_trace_chunk_put(session->pending_closure_trace_chunk); |
2643 | session->pending_closure_trace_chunk = NULL; | |
2644 | end_unlock_session: | |
c35f9726 JG |
2645 | pthread_mutex_unlock(&session->lock); |
2646 | ||
bbc4768c | 2647 | end: |
ecd1a12f MD |
2648 | reply.generic.ret_code = htobe32((uint32_t) reply_code); |
2649 | reply.path_length = htobe32((uint32_t) path_length); | |
9898f786 | 2650 | buf_ret = lttng_dynamic_buffer_append( |
ecd1a12f | 2651 | &reply_payload, &reply, sizeof(reply)); |
9898f786 | 2652 | if (buf_ret) { |
ecd1a12f MD |
2653 | ERR("Failed to append \"close trace chunk\" command reply header to payload buffer"); |
2654 | goto end_no_reply; | |
2655 | } | |
2656 | ||
2657 | if (reply_code == LTTNG_OK) { | |
9898f786 | 2658 | buf_ret = lttng_dynamic_buffer_append(&reply_payload, |
ecd1a12f | 2659 | closed_trace_chunk_path, path_length); |
9898f786 | 2660 | if (buf_ret) { |
ecd1a12f MD |
2661 | ERR("Failed to append \"close trace chunk\" command reply path to payload buffer"); |
2662 | goto end_no_reply; | |
2663 | } | |
2664 | } | |
2665 | ||
bbc4768c | 2666 | send_ret = conn->sock->ops->sendmsg(conn->sock, |
ecd1a12f MD |
2667 | reply_payload.data, |
2668 | reply_payload.size, | |
bbc4768c | 2669 | 0); |
ecd1a12f MD |
2670 | if (send_ret < reply_payload.size) { |
2671 | ERR("Failed to send \"close trace chunk\" command reply of %zu bytes (ret = %zd)", | |
2672 | reply_payload.size, send_ret); | |
bbc4768c | 2673 | ret = -1; |
ecd1a12f | 2674 | goto end_no_reply; |
bbc4768c JG |
2675 | } |
2676 | end_no_reply: | |
2677 | lttng_trace_chunk_put(chunk); | |
ecd1a12f | 2678 | lttng_dynamic_buffer_reset(&reply_payload); |
bbc4768c JG |
2679 | return ret; |
2680 | } | |
2681 | ||
c35f9726 JG |
2682 | /* |
2683 | * relay_trace_chunk_exists: check if a trace chunk exists | |
2684 | */ | |
2685 | static int relay_trace_chunk_exists(const struct lttcomm_relayd_hdr *recv_hdr, | |
2686 | struct relay_connection *conn, | |
2687 | const struct lttng_buffer_view *payload) | |
2688 | { | |
2689 | int ret = 0; | |
2690 | ssize_t send_ret; | |
2691 | struct relay_session *session = conn->session; | |
2692 | struct lttcomm_relayd_trace_chunk_exists *msg; | |
2693 | struct lttcomm_relayd_trace_chunk_exists_reply reply = {}; | |
2694 | struct lttng_buffer_view header_view; | |
c35f9726 | 2695 | uint64_t chunk_id; |
6b584c2e | 2696 | bool chunk_exists; |
c35f9726 JG |
2697 | |
2698 | if (!session || !conn->version_check_done) { | |
2699 | ERR("Trying to close a trace chunk before version check"); | |
2700 | ret = -1; | |
2701 | goto end_no_reply; | |
2702 | } | |
2703 | ||
2704 | if (session->major == 2 && session->minor < 11) { | |
2705 | ERR("Chunk close command is unsupported before 2.11"); | |
2706 | ret = -1; | |
2707 | goto end_no_reply; | |
2708 | } | |
2709 | ||
2710 | header_view = lttng_buffer_view_from_view(payload, 0, sizeof(*msg)); | |
2711 | if (!header_view.data) { | |
2712 | ERR("Failed to receive payload of chunk close command"); | |
2713 | ret = -1; | |
2714 | goto end_no_reply; | |
2715 | } | |
2716 | ||
2717 | /* Convert to host endianness. */ | |
2718 | msg = (typeof(msg)) header_view.data; | |
2719 | chunk_id = be64toh(msg->chunk_id); | |
2720 | ||
6b584c2e | 2721 | ret = sessiond_trace_chunk_registry_chunk_exists( |
c35f9726 JG |
2722 | sessiond_trace_chunk_registry, |
2723 | conn->session->sessiond_uuid, | |
2724 | conn->session->id, | |
6b584c2e JG |
2725 | chunk_id, &chunk_exists); |
2726 | /* | |
2727 | * If ret is not 0, send the reply and report the error to the caller. | |
2728 | * It is a protocol (or internal) error and the session/connection | |
2729 | * should be torn down. | |
2730 | */ | |
2731 | reply = (typeof(reply)){ | |
2732 | .generic.ret_code = htobe32((uint32_t) | |
2733 | (ret == 0 ? LTTNG_OK : LTTNG_ERR_INVALID_PROTOCOL)), | |
2734 | .trace_chunk_exists = ret == 0 ? chunk_exists : 0, | |
c35f9726 | 2735 | }; |
6b584c2e JG |
2736 | send_ret = conn->sock->ops->sendmsg( |
2737 | conn->sock, &reply, sizeof(reply), 0); | |
c35f9726 JG |
2738 | if (send_ret < (ssize_t) sizeof(reply)) { |
2739 | ERR("Failed to send \"create trace chunk\" command reply (ret = %zd)", | |
2740 | send_ret); | |
2741 | ret = -1; | |
2742 | } | |
2743 | end_no_reply: | |
c35f9726 JG |
2744 | return ret; |
2745 | } | |
2746 | ||
5312a3ed JG |
2747 | #define DBG_CMD(cmd_name, conn) \ |
2748 | DBG3("Processing \"%s\" command for socket %i", cmd_name, conn->sock->fd); | |
2749 | ||
2750 | static int relay_process_control_command(struct relay_connection *conn, | |
2751 | const struct lttcomm_relayd_hdr *header, | |
2752 | const struct lttng_buffer_view *payload) | |
b8aa1682 JD |
2753 | { |
2754 | int ret = 0; | |
2755 | ||
5312a3ed | 2756 | switch (header->cmd) { |
b8aa1682 | 2757 | case RELAYD_CREATE_SESSION: |
5312a3ed JG |
2758 | DBG_CMD("RELAYD_CREATE_SESSION", conn); |
2759 | ret = relay_create_session(header, conn, payload); | |
b8aa1682 | 2760 | break; |
b8aa1682 | 2761 | case RELAYD_ADD_STREAM: |
5312a3ed JG |
2762 | DBG_CMD("RELAYD_ADD_STREAM", conn); |
2763 | ret = relay_add_stream(header, conn, payload); | |
b8aa1682 JD |
2764 | break; |
2765 | case RELAYD_START_DATA: | |
5312a3ed JG |
2766 | DBG_CMD("RELAYD_START_DATA", conn); |
2767 | ret = relay_start(header, conn, payload); | |
b8aa1682 JD |
2768 | break; |
2769 | case RELAYD_SEND_METADATA: | |
5312a3ed JG |
2770 | DBG_CMD("RELAYD_SEND_METADATA", conn); |
2771 | ret = relay_recv_metadata(header, conn, payload); | |
b8aa1682 JD |
2772 | break; |
2773 | case RELAYD_VERSION: | |
5312a3ed JG |
2774 | DBG_CMD("RELAYD_VERSION", conn); |
2775 | ret = relay_send_version(header, conn, payload); | |
b8aa1682 | 2776 | break; |
173af62f | 2777 | case RELAYD_CLOSE_STREAM: |
5312a3ed JG |
2778 | DBG_CMD("RELAYD_CLOSE_STREAM", conn); |
2779 | ret = relay_close_stream(header, conn, payload); | |
173af62f | 2780 | break; |
6d805429 | 2781 | case RELAYD_DATA_PENDING: |
5312a3ed JG |
2782 | DBG_CMD("RELAYD_DATA_PENDING", conn); |
2783 | ret = relay_data_pending(header, conn, payload); | |
c8f59ee5 DG |
2784 | break; |
2785 | case RELAYD_QUIESCENT_CONTROL: | |
5312a3ed JG |
2786 | DBG_CMD("RELAYD_QUIESCENT_CONTROL", conn); |
2787 | ret = relay_quiescent_control(header, conn, payload); | |
c8f59ee5 | 2788 | break; |
f7079f67 | 2789 | case RELAYD_BEGIN_DATA_PENDING: |
5312a3ed JG |
2790 | DBG_CMD("RELAYD_BEGIN_DATA_PENDING", conn); |
2791 | ret = relay_begin_data_pending(header, conn, payload); | |
f7079f67 DG |
2792 | break; |
2793 | case RELAYD_END_DATA_PENDING: | |
5312a3ed JG |
2794 | DBG_CMD("RELAYD_END_DATA_PENDING", conn); |
2795 | ret = relay_end_data_pending(header, conn, payload); | |
f7079f67 | 2796 | break; |
1c20f0e2 | 2797 | case RELAYD_SEND_INDEX: |
5312a3ed JG |
2798 | DBG_CMD("RELAYD_SEND_INDEX", conn); |
2799 | ret = relay_recv_index(header, conn, payload); | |
1c20f0e2 | 2800 | break; |
a4baae1b | 2801 | case RELAYD_STREAMS_SENT: |
5312a3ed JG |
2802 | DBG_CMD("RELAYD_STREAMS_SENT", conn); |
2803 | ret = relay_streams_sent(header, conn, payload); | |
a4baae1b | 2804 | break; |
93ec662e | 2805 | case RELAYD_RESET_METADATA: |
5312a3ed JG |
2806 | DBG_CMD("RELAYD_RESET_METADATA", conn); |
2807 | ret = relay_reset_metadata(header, conn, payload); | |
93ec662e | 2808 | break; |
c35f9726 JG |
2809 | case RELAYD_ROTATE_STREAMS: |
2810 | DBG_CMD("RELAYD_ROTATE_STREAMS", conn); | |
2811 | ret = relay_rotate_session_streams(header, conn, payload); | |
d3ecc550 | 2812 | break; |
e5add6d0 JG |
2813 | case RELAYD_CREATE_TRACE_CHUNK: |
2814 | DBG_CMD("RELAYD_CREATE_TRACE_CHUNK", conn); | |
2815 | ret = relay_create_trace_chunk(header, conn, payload); | |
2816 | break; | |
bbc4768c JG |
2817 | case RELAYD_CLOSE_TRACE_CHUNK: |
2818 | DBG_CMD("RELAYD_CLOSE_TRACE_CHUNK", conn); | |
2819 | ret = relay_close_trace_chunk(header, conn, payload); | |
2820 | break; | |
c35f9726 JG |
2821 | case RELAYD_TRACE_CHUNK_EXISTS: |
2822 | DBG_CMD("RELAYD_TRACE_CHUNK_EXISTS", conn); | |
2823 | ret = relay_trace_chunk_exists(header, conn, payload); | |
2824 | break; | |
b8aa1682 JD |
2825 | case RELAYD_UPDATE_SYNC_INFO: |
2826 | default: | |
5312a3ed | 2827 | ERR("Received unknown command (%u)", header->cmd); |
58eb9381 | 2828 | relay_unknown_command(conn); |
b8aa1682 JD |
2829 | ret = -1; |
2830 | goto end; | |
2831 | } | |
2832 | ||
2833 | end: | |
2834 | return ret; | |
2835 | } | |
2836 | ||
5569b118 JG |
2837 | static enum relay_connection_status relay_process_control_receive_payload( |
2838 | struct relay_connection *conn) | |
5312a3ed JG |
2839 | { |
2840 | int ret = 0; | |
5569b118 | 2841 | enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK; |
5312a3ed JG |
2842 | struct lttng_dynamic_buffer *reception_buffer = |
2843 | &conn->protocol.ctrl.reception_buffer; | |
2844 | struct ctrl_connection_state_receive_payload *state = | |
2845 | &conn->protocol.ctrl.state.receive_payload; | |
2846 | struct lttng_buffer_view payload_view; | |
2847 | ||
2848 | if (state->left_to_receive == 0) { | |
2849 | /* Short-circuit for payload-less commands. */ | |
2850 | goto reception_complete; | |
2851 | } | |
2852 | ||
2853 | ret = conn->sock->ops->recvmsg(conn->sock, | |
2854 | reception_buffer->data + state->received, | |
2855 | state->left_to_receive, MSG_DONTWAIT); | |
2856 | if (ret < 0) { | |
5569b118 JG |
2857 | if (errno != EAGAIN && errno != EWOULDBLOCK) { |
2858 | PERROR("Unable to receive command payload on sock %d", | |
2859 | conn->sock->fd); | |
2860 | status = RELAY_CONNECTION_STATUS_ERROR; | |
2861 | } | |
5312a3ed JG |
2862 | goto end; |
2863 | } else if (ret == 0) { | |
2864 | DBG("Socket %d performed an orderly shutdown (received EOF)", conn->sock->fd); | |
5569b118 | 2865 | status = RELAY_CONNECTION_STATUS_CLOSED; |
5312a3ed JG |
2866 | goto end; |
2867 | } | |
2868 | ||
2869 | assert(ret > 0); | |
2870 | assert(ret <= state->left_to_receive); | |
2871 | ||
2872 | state->left_to_receive -= ret; | |
2873 | state->received += ret; | |
2874 | ||
2875 | if (state->left_to_receive > 0) { | |
2876 | /* | |
2877 | * Can't transition to the protocol's next state, wait to | |
2878 | * receive the rest of the header. | |
2879 | */ | |
2880 | DBG3("Partial reception of control connection protocol payload (received %" PRIu64 " bytes, %" PRIu64 " bytes left to receive, fd = %i)", | |
2881 | state->received, state->left_to_receive, | |
2882 | conn->sock->fd); | |
5312a3ed JG |
2883 | goto end; |
2884 | } | |
2885 | ||
2886 | reception_complete: | |
2887 | DBG("Done receiving control command payload: fd = %i, payload size = %" PRIu64 " bytes", | |
2888 | conn->sock->fd, state->received); | |
2889 | /* | |
2890 | * The payload required to process the command has been received. | |
2891 | * A view to the reception buffer is forwarded to the various | |
2892 | * commands and the state of the control is reset on success. | |
2893 | * | |
2894 | * Commands are responsible for sending their reply to the peer. | |
2895 | */ | |
2896 | payload_view = lttng_buffer_view_from_dynamic_buffer(reception_buffer, | |
2897 | 0, -1); | |
2898 | ret = relay_process_control_command(conn, | |
2899 | &state->header, &payload_view); | |
2900 | if (ret < 0) { | |
5569b118 | 2901 | status = RELAY_CONNECTION_STATUS_ERROR; |
5312a3ed JG |
2902 | goto end; |
2903 | } | |
2904 | ||
2905 | ret = connection_reset_protocol_state(conn); | |
5569b118 JG |
2906 | if (ret) { |
2907 | status = RELAY_CONNECTION_STATUS_ERROR; | |
2908 | } | |
5312a3ed | 2909 | end: |
5569b118 | 2910 | return status; |
5312a3ed JG |
2911 | } |
2912 | ||
5569b118 JG |
2913 | static enum relay_connection_status relay_process_control_receive_header( |
2914 | struct relay_connection *conn) | |
5312a3ed JG |
2915 | { |
2916 | int ret = 0; | |
5569b118 | 2917 | enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK; |
5312a3ed JG |
2918 | struct lttcomm_relayd_hdr header; |
2919 | struct lttng_dynamic_buffer *reception_buffer = | |
2920 | &conn->protocol.ctrl.reception_buffer; | |
2921 | struct ctrl_connection_state_receive_header *state = | |
2922 | &conn->protocol.ctrl.state.receive_header; | |
2923 | ||
2924 | assert(state->left_to_receive != 0); | |
2925 | ||
2926 | ret = conn->sock->ops->recvmsg(conn->sock, | |
2927 | reception_buffer->data + state->received, | |
2928 | state->left_to_receive, MSG_DONTWAIT); | |
2929 | if (ret < 0) { | |
5569b118 JG |
2930 | if (errno != EAGAIN && errno != EWOULDBLOCK) { |
2931 | PERROR("Unable to receive control command header on sock %d", | |
2932 | conn->sock->fd); | |
2933 | status = RELAY_CONNECTION_STATUS_ERROR; | |
2934 | } | |
5312a3ed JG |
2935 | goto end; |
2936 | } else if (ret == 0) { | |
2937 | DBG("Socket %d performed an orderly shutdown (received EOF)", conn->sock->fd); | |
5569b118 | 2938 | status = RELAY_CONNECTION_STATUS_CLOSED; |
5312a3ed JG |
2939 | goto end; |
2940 | } | |
2941 | ||
2942 | assert(ret > 0); | |
2943 | assert(ret <= state->left_to_receive); | |
2944 | ||
2945 | state->left_to_receive -= ret; | |
2946 | state->received += ret; | |
2947 | ||
2948 | if (state->left_to_receive > 0) { | |
2949 | /* | |
2950 | * Can't transition to the protocol's next state, wait to | |
2951 | * receive the rest of the header. | |
2952 | */ | |
2953 | DBG3("Partial reception of control connection protocol header (received %" PRIu64 " bytes, %" PRIu64 " bytes left to receive, fd = %i)", | |
2954 | state->received, state->left_to_receive, | |
2955 | conn->sock->fd); | |
5312a3ed JG |
2956 | goto end; |
2957 | } | |
2958 | ||
2959 | /* Transition to next state: receiving the command's payload. */ | |
2960 | conn->protocol.ctrl.state_id = | |
2961 | CTRL_CONNECTION_STATE_RECEIVE_PAYLOAD; | |
2962 | memcpy(&header, reception_buffer->data, sizeof(header)); | |
2963 | header.circuit_id = be64toh(header.circuit_id); | |
2964 | header.data_size = be64toh(header.data_size); | |
2965 | header.cmd = be32toh(header.cmd); | |
2966 | header.cmd_version = be32toh(header.cmd_version); | |
2967 | memcpy(&conn->protocol.ctrl.state.receive_payload.header, | |
2968 | &header, sizeof(header)); | |
2969 | ||
2970 | DBG("Done receiving control command header: fd = %i, cmd = %" PRIu32 ", cmd_version = %" PRIu32 ", payload size = %" PRIu64 " bytes", | |
2971 | conn->sock->fd, header.cmd, header.cmd_version, | |
2972 | header.data_size); | |
2973 | ||
715e6fb1 | 2974 | if (header.data_size > DEFAULT_NETWORK_RELAYD_CTRL_MAX_PAYLOAD_SIZE) { |
5312a3ed JG |
2975 | ERR("Command header indicates a payload (%" PRIu64 " bytes) that exceeds the maximal payload size allowed on a control connection.", |
2976 | header.data_size); | |
5569b118 | 2977 | status = RELAY_CONNECTION_STATUS_ERROR; |
5312a3ed JG |
2978 | goto end; |
2979 | } | |
2980 | ||
2981 | conn->protocol.ctrl.state.receive_payload.left_to_receive = | |
2982 | header.data_size; | |
2983 | conn->protocol.ctrl.state.receive_payload.received = 0; | |
2984 | ret = lttng_dynamic_buffer_set_size(reception_buffer, | |
2985 | header.data_size); | |
2986 | if (ret) { | |
5569b118 | 2987 | status = RELAY_CONNECTION_STATUS_ERROR; |
5312a3ed JG |
2988 | goto end; |
2989 | } | |
2990 | ||
2991 | if (header.data_size == 0) { | |
2992 | /* | |
2993 | * Manually invoke the next state as the poll loop | |
2994 | * will not wake-up to allow us to proceed further. | |
2995 | */ | |
5569b118 | 2996 | status = relay_process_control_receive_payload(conn); |
5312a3ed JG |
2997 | } |
2998 | end: | |
5569b118 | 2999 | return status; |
5312a3ed JG |
3000 | } |
3001 | ||
3002 | /* | |
3003 | * Process the commands received on the control socket | |
3004 | */ | |
5569b118 JG |
3005 | static enum relay_connection_status relay_process_control( |
3006 | struct relay_connection *conn) | |
5312a3ed | 3007 | { |
5569b118 | 3008 | enum relay_connection_status status; |
5312a3ed JG |
3009 | |
3010 | switch (conn->protocol.ctrl.state_id) { | |
3011 | case CTRL_CONNECTION_STATE_RECEIVE_HEADER: | |
5569b118 | 3012 | status = relay_process_control_receive_header(conn); |
5312a3ed JG |
3013 | break; |
3014 | case CTRL_CONNECTION_STATE_RECEIVE_PAYLOAD: | |
5569b118 | 3015 | status = relay_process_control_receive_payload(conn); |
5312a3ed JG |
3016 | break; |
3017 | default: | |
3018 | ERR("Unknown control connection protocol state encountered."); | |
3019 | abort(); | |
3020 | } | |
3021 | ||
5569b118 | 3022 | return status; |
5312a3ed JG |
3023 | } |
3024 | ||
5569b118 JG |
3025 | static enum relay_connection_status relay_process_data_receive_header( |
3026 | struct relay_connection *conn) | |
b8aa1682 | 3027 | { |
5312a3ed | 3028 | int ret; |
5569b118 | 3029 | enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK; |
5312a3ed JG |
3030 | struct data_connection_state_receive_header *state = |
3031 | &conn->protocol.data.state.receive_header; | |
3032 | struct lttcomm_relayd_data_hdr header; | |
b8aa1682 | 3033 | struct relay_stream *stream; |
5312a3ed JG |
3034 | |
3035 | assert(state->left_to_receive != 0); | |
3036 | ||
3037 | ret = conn->sock->ops->recvmsg(conn->sock, | |
3038 | state->header_reception_buffer + state->received, | |
3039 | state->left_to_receive, MSG_DONTWAIT); | |
3040 | if (ret < 0) { | |
5569b118 JG |
3041 | if (errno != EAGAIN && errno != EWOULDBLOCK) { |
3042 | PERROR("Unable to receive data header on sock %d", conn->sock->fd); | |
3043 | status = RELAY_CONNECTION_STATUS_ERROR; | |
3044 | } | |
5312a3ed JG |
3045 | goto end; |
3046 | } else if (ret == 0) { | |
3047 | /* Orderly shutdown. Not necessary to print an error. */ | |
3048 | DBG("Socket %d performed an orderly shutdown (received EOF)", conn->sock->fd); | |
5569b118 | 3049 | status = RELAY_CONNECTION_STATUS_CLOSED; |
b8aa1682 JD |
3050 | goto end; |
3051 | } | |
3052 | ||
5312a3ed JG |
3053 | assert(ret > 0); |
3054 | assert(ret <= state->left_to_receive); | |
3055 | ||
3056 | state->left_to_receive -= ret; | |
3057 | state->received += ret; | |
3058 | ||
3059 | if (state->left_to_receive > 0) { | |
3060 | /* | |
3061 | * Can't transition to the protocol's next state, wait to | |
3062 | * receive the rest of the header. | |
3063 | */ | |
3064 | DBG3("Partial reception of data connection header (received %" PRIu64 " bytes, %" PRIu64 " bytes left to receive, fd = %i)", | |
3065 | state->received, state->left_to_receive, | |
3066 | conn->sock->fd); | |
7591bab1 | 3067 | goto end; |
b8aa1682 | 3068 | } |
b8aa1682 | 3069 | |
5312a3ed JG |
3070 | /* Transition to next state: receiving the payload. */ |
3071 | conn->protocol.data.state_id = DATA_CONNECTION_STATE_RECEIVE_PAYLOAD; | |
173af62f | 3072 | |
5312a3ed JG |
3073 | memcpy(&header, state->header_reception_buffer, sizeof(header)); |
3074 | header.circuit_id = be64toh(header.circuit_id); | |
3075 | header.stream_id = be64toh(header.stream_id); | |
3076 | header.data_size = be32toh(header.data_size); | |
3077 | header.net_seq_num = be64toh(header.net_seq_num); | |
3078 | header.padding_size = be32toh(header.padding_size); | |
3079 | memcpy(&conn->protocol.data.state.receive_payload.header, &header, sizeof(header)); | |
3080 | ||
3081 | conn->protocol.data.state.receive_payload.left_to_receive = | |
3082 | header.data_size; | |
3083 | conn->protocol.data.state.receive_payload.received = 0; | |
3084 | conn->protocol.data.state.receive_payload.rotate_index = false; | |
3085 | ||
3086 | DBG("Received data connection header on fd %i: circuit_id = %" PRIu64 ", stream_id = %" PRIu64 ", data_size = %" PRIu32 ", net_seq_num = %" PRIu64 ", padding_size = %" PRIu32, | |
3087 | conn->sock->fd, header.circuit_id, | |
3088 | header.stream_id, header.data_size, | |
3089 | header.net_seq_num, header.padding_size); | |
3090 | ||
3091 | stream = stream_get_by_id(header.stream_id); | |
3092 | if (!stream) { | |
3093 | DBG("relay_process_data_receive_payload: Cannot find stream %" PRIu64, | |
3094 | header.stream_id); | |
5569b118 JG |
3095 | /* Protocol error. */ |
3096 | status = RELAY_CONNECTION_STATUS_ERROR; | |
5312a3ed JG |
3097 | goto end; |
3098 | } | |
b8aa1682 | 3099 | |
7591bab1 | 3100 | pthread_mutex_lock(&stream->lock); |
c35f9726 JG |
3101 | /* Prepare stream for the reception of a new packet. */ |
3102 | ret = stream_init_packet(stream, header.data_size, | |
3103 | &conn->protocol.data.state.receive_payload.rotate_index); | |
3104 | pthread_mutex_unlock(&stream->lock); | |
3105 | if (ret) { | |
3106 | ERR("Failed to rotate stream output file"); | |
3107 | status = RELAY_CONNECTION_STATUS_ERROR; | |
3108 | goto end_stream_unlock; | |
1c20f0e2 JD |
3109 | } |
3110 | ||
5312a3ed | 3111 | end_stream_unlock: |
5312a3ed JG |
3112 | stream_put(stream); |
3113 | end: | |
5569b118 | 3114 | return status; |
5312a3ed JG |
3115 | } |
3116 | ||
5569b118 JG |
3117 | static enum relay_connection_status relay_process_data_receive_payload( |
3118 | struct relay_connection *conn) | |
5312a3ed JG |
3119 | { |
3120 | int ret; | |
5569b118 | 3121 | enum relay_connection_status status = RELAY_CONNECTION_STATUS_OK; |
5312a3ed JG |
3122 | struct relay_stream *stream; |
3123 | struct data_connection_state_receive_payload *state = | |
3124 | &conn->protocol.data.state.receive_payload; | |
3125 | const size_t chunk_size = RECV_DATA_BUFFER_SIZE; | |
3126 | char data_buffer[chunk_size]; | |
3127 | bool partial_recv = false; | |
3128 | bool new_stream = false, close_requested = false, index_flushed = false; | |
3129 | uint64_t left_to_receive = state->left_to_receive; | |
3130 | struct relay_session *session; | |
3131 | ||
fd0f1e3e JR |
3132 | DBG3("Receiving data for stream id %" PRIu64 " seqnum %" PRIu64 ", %" PRIu64" bytes received, %" PRIu64 " bytes left to receive", |
3133 | state->header.stream_id, state->header.net_seq_num, | |
3134 | state->received, left_to_receive); | |
3135 | ||
5312a3ed JG |
3136 | stream = stream_get_by_id(state->header.stream_id); |
3137 | if (!stream) { | |
5569b118 | 3138 | /* Protocol error. */ |
fd0f1e3e | 3139 | ERR("relay_process_data_receive_payload: cannot find stream %" PRIu64, |
5312a3ed | 3140 | state->header.stream_id); |
5569b118 | 3141 | status = RELAY_CONNECTION_STATUS_ERROR; |
5312a3ed | 3142 | goto end; |
1c20f0e2 JD |
3143 | } |
3144 | ||
5312a3ed JG |
3145 | pthread_mutex_lock(&stream->lock); |
3146 | session = stream->trace->session; | |
fd0f1e3e JR |
3147 | if (!conn->session) { |
3148 | ret = connection_set_session(conn, session); | |
3149 | if (ret) { | |
3150 | status = RELAY_CONNECTION_STATUS_ERROR; | |
3151 | goto end_stream_unlock; | |
3152 | } | |
3153 | } | |
5312a3ed JG |
3154 | |
3155 | /* | |
3156 | * The size of the "chunk" received on any iteration is bounded by: | |
3157 | * - the data left to receive, | |
3158 | * - the data immediately available on the socket, | |
3159 | * - the on-stack data buffer | |
3160 | */ | |
3161 | while (left_to_receive > 0 && !partial_recv) { | |
5312a3ed | 3162 | size_t recv_size = min(left_to_receive, chunk_size); |
c35f9726 | 3163 | struct lttng_buffer_view packet_chunk; |
5312a3ed JG |
3164 | |
3165 | ret = conn->sock->ops->recvmsg(conn->sock, data_buffer, | |
3166 | recv_size, MSG_DONTWAIT); | |
3167 | if (ret < 0) { | |
5569b118 JG |
3168 | if (errno != EAGAIN && errno != EWOULDBLOCK) { |
3169 | PERROR("Socket %d error", conn->sock->fd); | |
3170 | status = RELAY_CONNECTION_STATUS_ERROR; | |
3171 | } | |
0848dba7 | 3172 | goto end_stream_unlock; |
5312a3ed JG |
3173 | } else if (ret == 0) { |
3174 | /* No more data ready to be consumed on socket. */ | |
3175 | DBG3("No more data ready for consumption on data socket of stream id %" PRIu64, | |
3176 | state->header.stream_id); | |
5569b118 | 3177 | status = RELAY_CONNECTION_STATUS_CLOSED; |
5312a3ed JG |
3178 | break; |
3179 | } else if (ret < (int) recv_size) { | |
3180 | /* | |
3181 | * All the data available on the socket has been | |
3182 | * consumed. | |
3183 | */ | |
3184 | partial_recv = true; | |
c35f9726 | 3185 | recv_size = ret; |
0848dba7 MD |
3186 | } |
3187 | ||
c35f9726 JG |
3188 | packet_chunk = lttng_buffer_view_init(data_buffer, |
3189 | 0, recv_size); | |
3190 | assert(packet_chunk.data); | |
5312a3ed | 3191 | |
c35f9726 JG |
3192 | ret = stream_write(stream, &packet_chunk, 0); |
3193 | if (ret) { | |
0848dba7 | 3194 | ERR("Relay error writing data to file"); |
5569b118 | 3195 | status = RELAY_CONNECTION_STATUS_ERROR; |
0848dba7 MD |
3196 | goto end_stream_unlock; |
3197 | } | |
3198 | ||
5312a3ed JG |
3199 | left_to_receive -= recv_size; |
3200 | state->received += recv_size; | |
3201 | state->left_to_receive = left_to_receive; | |
5312a3ed JG |
3202 | } |
3203 | ||
3204 | if (state->left_to_receive > 0) { | |
3205 | /* | |
3206 | * Did not receive all the data expected, wait for more data to | |
3207 | * become available on the socket. | |
3208 | */ | |
3209 | DBG3("Partial receive on data connection of stream id %" PRIu64 ", %" PRIu64 " bytes received, %" PRIu64 " bytes left to receive", | |
3210 | state->header.stream_id, state->received, | |
3211 | state->left_to_receive); | |
5312a3ed | 3212 | goto end_stream_unlock; |
0848dba7 | 3213 | } |
5ab7344e | 3214 | |
c35f9726 JG |
3215 | ret = stream_write(stream, NULL, state->header.padding_size); |
3216 | if (ret) { | |
5569b118 | 3217 | status = RELAY_CONNECTION_STATUS_ERROR; |
7591bab1 | 3218 | goto end_stream_unlock; |
1d4dfdef | 3219 | } |
5312a3ed | 3220 | |
298a25ca | 3221 | if (session_streams_have_index(session)) { |
c35f9726 JG |
3222 | ret = stream_update_index(stream, state->header.net_seq_num, |
3223 | state->rotate_index, &index_flushed, | |
3224 | state->header.data_size + state->header.padding_size); | |
5312a3ed | 3225 | if (ret < 0) { |
c35f9726 | 3226 | ERR("Failed to update index: stream %" PRIu64 " net_seq_num %" PRIu64 " ret %d", |
5312a3ed JG |
3227 | stream->stream_handle, |
3228 | state->header.net_seq_num, ret); | |
5569b118 | 3229 | status = RELAY_CONNECTION_STATUS_ERROR; |
5312a3ed JG |
3230 | goto end_stream_unlock; |
3231 | } | |
3232 | } | |
3233 | ||
a8f9f353 | 3234 | if (stream->prev_data_seq == -1ULL) { |
c0bae11d MD |
3235 | new_stream = true; |
3236 | } | |
3237 | ||
c35f9726 JG |
3238 | ret = stream_complete_packet(stream, state->header.data_size + |
3239 | state->header.padding_size, state->header.net_seq_num, | |
3240 | index_flushed); | |
3241 | if (ret) { | |
3242 | status = RELAY_CONNECTION_STATUS_ERROR; | |
3243 | goto end_stream_unlock; | |
3244 | } | |
5312a3ed JG |
3245 | |
3246 | /* | |
3247 | * Resetting the protocol state (to RECEIVE_HEADER) will trash the | |
3248 | * contents of *state which are aliased (union) to the same location as | |
3249 | * the new state. Don't use it beyond this point. | |
3250 | */ | |
3251 | connection_reset_protocol_state(conn); | |
3252 | state = NULL; | |
173af62f | 3253 | |
7591bab1 | 3254 | end_stream_unlock: |
bda7c7b9 | 3255 | close_requested = stream->close_requested; |
7591bab1 | 3256 | pthread_mutex_unlock(&stream->lock); |
5312a3ed | 3257 | if (close_requested && left_to_receive == 0) { |
bda7c7b9 JG |
3258 | try_stream_close(stream); |
3259 | } | |
3260 | ||
c0bae11d MD |
3261 | if (new_stream) { |
3262 | pthread_mutex_lock(&session->lock); | |
3263 | uatomic_set(&session->new_streams, 1); | |
3264 | pthread_mutex_unlock(&session->lock); | |
3265 | } | |
5312a3ed | 3266 | |
7591bab1 | 3267 | stream_put(stream); |
b8aa1682 | 3268 | end: |
5569b118 | 3269 | return status; |
b8aa1682 JD |
3270 | } |
3271 | ||
5312a3ed JG |
3272 | /* |
3273 | * relay_process_data: Process the data received on the data socket | |
3274 | */ | |
5569b118 JG |
3275 | static enum relay_connection_status relay_process_data( |
3276 | struct relay_connection *conn) | |
5312a3ed | 3277 | { |
5569b118 | 3278 | enum relay_connection_status status; |
5312a3ed JG |
3279 | |
3280 | switch (conn->protocol.data.state_id) { | |
3281 | case DATA_CONNECTION_STATE_RECEIVE_HEADER: | |
5569b118 | 3282 | status = relay_process_data_receive_header(conn); |
5312a3ed JG |
3283 | break; |
3284 | case DATA_CONNECTION_STATE_RECEIVE_PAYLOAD: | |
5569b118 | 3285 | status = relay_process_data_receive_payload(conn); |
5312a3ed JG |
3286 | break; |
3287 | default: | |
3288 | ERR("Unexpected data connection communication state."); | |
3289 | abort(); | |
3290 | } | |
3291 | ||
5569b118 | 3292 | return status; |
5312a3ed JG |
3293 | } |
3294 | ||
7591bab1 | 3295 | static void cleanup_connection_pollfd(struct lttng_poll_event *events, int pollfd) |
b8aa1682 JD |
3296 | { |
3297 | int ret; | |
3298 | ||
58eb9381 | 3299 | (void) lttng_poll_del(events, pollfd); |
b8aa1682 JD |
3300 | |
3301 | ret = close(pollfd); | |
3302 | if (ret < 0) { | |
3303 | ERR("Closing pollfd %d", pollfd); | |
3304 | } | |
3305 | } | |
3306 | ||
7591bab1 MD |
3307 | static void relay_thread_close_connection(struct lttng_poll_event *events, |
3308 | int pollfd, struct relay_connection *conn) | |
9d1bbf21 | 3309 | { |
7591bab1 | 3310 | const char *type_str; |
2a174661 | 3311 | |
7591bab1 MD |
3312 | switch (conn->type) { |
3313 | case RELAY_DATA: | |
3314 | type_str = "Data"; | |
3315 | break; | |
3316 | case RELAY_CONTROL: | |
3317 | type_str = "Control"; | |
3318 | break; | |
3319 | case RELAY_VIEWER_COMMAND: | |
3320 | type_str = "Viewer Command"; | |
3321 | break; | |
3322 | case RELAY_VIEWER_NOTIFICATION: | |
3323 | type_str = "Viewer Notification"; | |
3324 | break; | |
3325 | default: | |
3326 | type_str = "Unknown"; | |
9d1bbf21 | 3327 | } |
7591bab1 MD |
3328 | cleanup_connection_pollfd(events, pollfd); |
3329 | connection_put(conn); | |
3330 | DBG("%s connection closed with %d", type_str, pollfd); | |
b8aa1682 JD |
3331 | } |
3332 | ||
3333 | /* | |
3334 | * This thread does the actual work | |
3335 | */ | |
7591bab1 | 3336 | static void *relay_thread_worker(void *data) |
b8aa1682 | 3337 | { |
beaad64c DG |
3338 | int ret, err = -1, last_seen_data_fd = -1; |
3339 | uint32_t nb_fd; | |
b8aa1682 JD |
3340 | struct lttng_poll_event events; |
3341 | struct lttng_ht *relay_connections_ht; | |
b8aa1682 | 3342 | struct lttng_ht_iter iter; |
90e7d72f | 3343 | struct relay_connection *destroy_conn = NULL; |
b8aa1682 JD |
3344 | |
3345 | DBG("[thread] Relay worker started"); | |
3346 | ||
9d1bbf21 MD |
3347 | rcu_register_thread(); |
3348 | ||
55706a7d MD |
3349 | health_register(health_relayd, HEALTH_RELAYD_TYPE_WORKER); |
3350 | ||
9b5e0863 MD |
3351 | if (testpoint(relayd_thread_worker)) { |
3352 | goto error_testpoint; | |
3353 | } | |
3354 | ||
f385ae0a MD |
3355 | health_code_update(); |
3356 | ||
b8aa1682 JD |
3357 | /* table of connections indexed on socket */ |
3358 | relay_connections_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG); | |
095a4ae5 MD |
3359 | if (!relay_connections_ht) { |
3360 | goto relay_connections_ht_error; | |
3361 | } | |
b8aa1682 | 3362 | |
b8aa1682 JD |
3363 | ret = create_thread_poll_set(&events, 2); |
3364 | if (ret < 0) { | |
3365 | goto error_poll_create; | |
3366 | } | |
3367 | ||
58eb9381 | 3368 | ret = lttng_poll_add(&events, relay_conn_pipe[0], LPOLLIN | LPOLLRDHUP); |
b8aa1682 JD |
3369 | if (ret < 0) { |
3370 | goto error; | |
3371 | } | |
3372 | ||
beaad64c | 3373 | restart: |
b8aa1682 | 3374 | while (1) { |
beaad64c DG |
3375 | int idx = -1, i, seen_control = 0, last_notdel_data_fd = -1; |
3376 | ||
f385ae0a MD |
3377 | health_code_update(); |
3378 | ||
b8aa1682 | 3379 | /* Infinite blocking call, waiting for transmission */ |
87c1611d | 3380 | DBG3("Relayd worker thread polling..."); |
f385ae0a | 3381 | health_poll_entry(); |
b8aa1682 | 3382 | ret = lttng_poll_wait(&events, -1); |
f385ae0a | 3383 | health_poll_exit(); |
b8aa1682 JD |
3384 | if (ret < 0) { |
3385 | /* | |
3386 | * Restart interrupted system call. | |
3387 | */ | |
3388 | if (errno == EINTR) { | |
3389 | goto restart; | |
3390 | } | |
3391 | goto error; | |
3392 | } | |
3393 | ||
0d9c5d77 DG |
3394 | nb_fd = ret; |
3395 | ||
beaad64c | 3396 | /* |
7591bab1 MD |
3397 | * Process control. The control connection is |
3398 | * prioritized so we don't starve it with high | |
3399 | * throughput tracing data on the data connection. | |
beaad64c | 3400 | */ |
b8aa1682 JD |
3401 | for (i = 0; i < nb_fd; i++) { |
3402 | /* Fetch once the poll data */ | |
beaad64c DG |
3403 | uint32_t revents = LTTNG_POLL_GETEV(&events, i); |
3404 | int pollfd = LTTNG_POLL_GETFD(&events, i); | |
b8aa1682 | 3405 | |
f385ae0a MD |
3406 | health_code_update(); |
3407 | ||
b8aa1682 JD |
3408 | /* Thread quit pipe has been closed. Killing thread. */ |
3409 | ret = check_thread_quit_pipe(pollfd, revents); | |
3410 | if (ret) { | |
095a4ae5 MD |
3411 | err = 0; |
3412 | goto exit; | |
b8aa1682 JD |
3413 | } |
3414 | ||
58eb9381 DG |
3415 | /* Inspect the relay conn pipe for new connection */ |
3416 | if (pollfd == relay_conn_pipe[0]) { | |
03e43155 | 3417 | if (revents & LPOLLIN) { |
90e7d72f JG |
3418 | struct relay_connection *conn; |
3419 | ||
58eb9381 | 3420 | ret = lttng_read(relay_conn_pipe[0], &conn, sizeof(conn)); |
b8aa1682 JD |
3421 | if (ret < 0) { |
3422 | goto error; | |
3423 | } | |
73039936 FD |
3424 | ret = lttng_poll_add(&events, |
3425 | conn->sock->fd, | |
58eb9381 | 3426 | LPOLLIN | LPOLLRDHUP); |
73039936 FD |
3427 | if (ret) { |
3428 | ERR("Failed to add new connection file descriptor to poll set"); | |
3429 | goto error; | |
3430 | } | |
7591bab1 | 3431 | connection_ht_add(relay_connections_ht, conn); |
58eb9381 | 3432 | DBG("Connection socket %d added", conn->sock->fd); |
03e43155 MD |
3433 | } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) { |
3434 | ERR("Relay connection pipe error"); | |
3435 | goto error; | |
3436 | } else { | |
3437 | ERR("Unexpected poll events %u for sock %d", revents, pollfd); | |
3438 | goto error; | |
b8aa1682 | 3439 | } |
58eb9381 | 3440 | } else { |
90e7d72f JG |
3441 | struct relay_connection *ctrl_conn; |
3442 | ||
7591bab1 | 3443 | ctrl_conn = connection_get_by_sock(relay_connections_ht, pollfd); |
58eb9381 | 3444 | /* If not found, there is a synchronization issue. */ |
90e7d72f | 3445 | assert(ctrl_conn); |
58eb9381 | 3446 | |
03e43155 MD |
3447 | if (ctrl_conn->type == RELAY_DATA) { |
3448 | if (revents & LPOLLIN) { | |
beaad64c DG |
3449 | /* |
3450 | * Flag the last seen data fd not deleted. It will be | |
3451 | * used as the last seen fd if any fd gets deleted in | |
3452 | * this first loop. | |
3453 | */ | |
3454 | last_notdel_data_fd = pollfd; | |
3455 | } | |
03e43155 MD |
3456 | goto put_ctrl_connection; |
3457 | } | |
3458 | assert(ctrl_conn->type == RELAY_CONTROL); | |
3459 | ||
3460 | if (revents & LPOLLIN) { | |
5569b118 JG |
3461 | enum relay_connection_status status; |
3462 | ||
3463 | status = relay_process_control(ctrl_conn); | |
3464 | if (status != RELAY_CONNECTION_STATUS_OK) { | |
fd0f1e3e JR |
3465 | /* |
3466 | * On socket error flag the session as aborted to force | |
3467 | * the cleanup of its stream otherwise it can leak | |
3468 | * during the lifetime of the relayd. | |
3469 | * | |
3470 | * This prevents situations in which streams can be | |
3471 | * left opened because an index was received, the | |
3472 | * control connection is closed, and the data | |
3473 | * connection is closed (uncleanly) before the packet's | |
3474 | * data provided. | |
3475 | * | |
3476 | * Since the control connection encountered an error, | |
3477 | * it is okay to be conservative and close the | |
3478 | * session right now as we can't rely on the protocol | |
3479 | * being respected anymore. | |
3480 | */ | |
3481 | if (status == RELAY_CONNECTION_STATUS_ERROR) { | |
3482 | session_abort(ctrl_conn->session); | |
3483 | } | |
3484 | ||
5569b118 | 3485 | /* Clear the connection on error or close. */ |
5312a3ed JG |
3486 | relay_thread_close_connection(&events, |
3487 | pollfd, | |
03e43155 | 3488 | ctrl_conn); |
03e43155 | 3489 | } |
5312a3ed | 3490 | seen_control = 1; |
03e43155 MD |
3491 | } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) { |
3492 | relay_thread_close_connection(&events, | |
3493 | pollfd, ctrl_conn); | |
3494 | if (last_seen_data_fd == pollfd) { | |
3495 | last_seen_data_fd = last_notdel_data_fd; | |
3496 | } | |
58eb9381 | 3497 | } else { |
03e43155 MD |
3498 | ERR("Unexpected poll events %u for control sock %d", |
3499 | revents, pollfd); | |
3500 | connection_put(ctrl_conn); | |
3501 | goto error; | |
beaad64c | 3502 | } |
03e43155 | 3503 | put_ctrl_connection: |
7591bab1 | 3504 | connection_put(ctrl_conn); |
beaad64c DG |
3505 | } |
3506 | } | |
3507 | ||
3508 | /* | |
3509 | * The last loop handled a control request, go back to poll to make | |
3510 | * sure we prioritise the control socket. | |
3511 | */ | |
3512 | if (seen_control) { | |
3513 | continue; | |
3514 | } | |
3515 | ||
3516 | if (last_seen_data_fd >= 0) { | |
3517 | for (i = 0; i < nb_fd; i++) { | |
3518 | int pollfd = LTTNG_POLL_GETFD(&events, i); | |
f385ae0a MD |
3519 | |
3520 | health_code_update(); | |
3521 | ||
beaad64c DG |
3522 | if (last_seen_data_fd == pollfd) { |
3523 | idx = i; | |
3524 | break; | |
3525 | } | |
3526 | } | |
3527 | } | |
3528 | ||
3529 | /* Process data connection. */ | |
3530 | for (i = idx + 1; i < nb_fd; i++) { | |
3531 | /* Fetch the poll data. */ | |
3532 | uint32_t revents = LTTNG_POLL_GETEV(&events, i); | |
3533 | int pollfd = LTTNG_POLL_GETFD(&events, i); | |
90e7d72f | 3534 | struct relay_connection *data_conn; |
beaad64c | 3535 | |
f385ae0a MD |
3536 | health_code_update(); |
3537 | ||
fd20dac9 MD |
3538 | if (!revents) { |
3539 | /* No activity for this FD (poll implementation). */ | |
3540 | continue; | |
3541 | } | |
3542 | ||
beaad64c | 3543 | /* Skip the command pipe. It's handled in the first loop. */ |
58eb9381 | 3544 | if (pollfd == relay_conn_pipe[0]) { |
beaad64c DG |
3545 | continue; |
3546 | } | |
3547 | ||
7591bab1 | 3548 | data_conn = connection_get_by_sock(relay_connections_ht, pollfd); |
90e7d72f | 3549 | if (!data_conn) { |
fd20dac9 | 3550 | /* Skip it. Might be removed before. */ |
fd20dac9 MD |
3551 | continue; |
3552 | } | |
03e43155 MD |
3553 | if (data_conn->type == RELAY_CONTROL) { |
3554 | goto put_data_connection; | |
3555 | } | |
3556 | assert(data_conn->type == RELAY_DATA); | |
fd20dac9 MD |
3557 | |
3558 | if (revents & LPOLLIN) { | |
5569b118 JG |
3559 | enum relay_connection_status status; |
3560 | ||
3561 | status = relay_process_data(data_conn); | |
3562 | /* Connection closed or error. */ | |
3563 | if (status != RELAY_CONNECTION_STATUS_OK) { | |
fd0f1e3e JR |
3564 | /* |
3565 | * On socket error flag the session as aborted to force | |
3566 | * the cleanup of its stream otherwise it can leak | |
3567 | * during the lifetime of the relayd. | |
3568 | * | |
3569 | * This prevents situations in which streams can be | |
3570 | * left opened because an index was received, the | |
3571 | * control connection is closed, and the data | |
3572 | * connection is closed (uncleanly) before the packet's | |
3573 | * data provided. | |
3574 | * | |
3575 | * Since the data connection encountered an error, | |
3576 | * it is okay to be conservative and close the | |
3577 | * session right now as we can't rely on the protocol | |
3578 | * being respected anymore. | |
3579 | */ | |
3580 | if (status == RELAY_CONNECTION_STATUS_ERROR) { | |
3581 | session_abort(data_conn->session); | |
3582 | } | |
7591bab1 | 3583 | relay_thread_close_connection(&events, pollfd, |
03e43155 | 3584 | data_conn); |
fd20dac9 MD |
3585 | /* |
3586 | * Every goto restart call sets the last seen fd where | |
3587 | * here we don't really care since we gracefully | |
3588 | * continue the loop after the connection is deleted. | |
3589 | */ | |
3590 | } else { | |
3591 | /* Keep last seen port. */ | |
3592 | last_seen_data_fd = pollfd; | |
7591bab1 | 3593 | connection_put(data_conn); |
fd20dac9 | 3594 | goto restart; |
b8aa1682 | 3595 | } |
03e43155 MD |
3596 | } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) { |
3597 | relay_thread_close_connection(&events, pollfd, | |
3598 | data_conn); | |
3599 | } else { | |
3600 | ERR("Unknown poll events %u for data sock %d", | |
3601 | revents, pollfd); | |
b8aa1682 | 3602 | } |
03e43155 | 3603 | put_data_connection: |
7591bab1 | 3604 | connection_put(data_conn); |
b8aa1682 | 3605 | } |
beaad64c | 3606 | last_seen_data_fd = -1; |
b8aa1682 JD |
3607 | } |
3608 | ||
f385ae0a MD |
3609 | /* Normal exit, no error */ |
3610 | ret = 0; | |
3611 | ||
095a4ae5 | 3612 | exit: |
b8aa1682 | 3613 | error: |
71efa8ef | 3614 | /* Cleanup remaining connection object. */ |
9d1bbf21 | 3615 | rcu_read_lock(); |
90e7d72f JG |
3616 | cds_lfht_for_each_entry(relay_connections_ht->ht, &iter.iter, |
3617 | destroy_conn, | |
58eb9381 | 3618 | sock_n.node) { |
f385ae0a | 3619 | health_code_update(); |
98ba050e | 3620 | |
fd0f1e3e | 3621 | session_abort(destroy_conn->session); |
98ba050e | 3622 | |
7591bab1 MD |
3623 | /* |
3624 | * No need to grab another ref, because we own | |
3625 | * destroy_conn. | |
3626 | */ | |
3627 | relay_thread_close_connection(&events, destroy_conn->sock->fd, | |
3628 | destroy_conn); | |
b8aa1682 | 3629 | } |
94d49140 | 3630 | rcu_read_unlock(); |
7591bab1 MD |
3631 | |
3632 | lttng_poll_clean(&events); | |
7d2f7452 | 3633 | error_poll_create: |
b8aa1682 | 3634 | lttng_ht_destroy(relay_connections_ht); |
095a4ae5 | 3635 | relay_connections_ht_error: |
58eb9381 DG |
3636 | /* Close relay conn pipes */ |
3637 | utils_close_pipe(relay_conn_pipe); | |
095a4ae5 MD |
3638 | if (err) { |
3639 | DBG("Thread exited with error"); | |
3640 | } | |
b8aa1682 | 3641 | DBG("Worker thread cleanup complete"); |
9b5e0863 | 3642 | error_testpoint: |
f385ae0a MD |
3643 | if (err) { |
3644 | health_error(); | |
3645 | ERR("Health error occurred in %s", __func__); | |
3646 | } | |
3647 | health_unregister(health_relayd); | |
9d1bbf21 | 3648 | rcu_unregister_thread(); |
b4aacfdc | 3649 | lttng_relay_stop_threads(); |
b8aa1682 JD |
3650 | return NULL; |
3651 | } | |
3652 | ||
3653 | /* | |
3654 | * Create the relay command pipe to wake thread_manage_apps. | |
3655 | * Closed in cleanup(). | |
3656 | */ | |
58eb9381 | 3657 | static int create_relay_conn_pipe(void) |
b8aa1682 | 3658 | { |
a02de639 | 3659 | int ret; |
b8aa1682 | 3660 | |
58eb9381 | 3661 | ret = utils_create_pipe_cloexec(relay_conn_pipe); |
b8aa1682 | 3662 | |
b8aa1682 JD |
3663 | return ret; |
3664 | } | |
3665 | ||
3666 | /* | |
3667 | * main | |
3668 | */ | |
3669 | int main(int argc, char **argv) | |
3670 | { | |
178a0557 | 3671 | int ret = 0, retval = 0; |
b8aa1682 JD |
3672 | void *status; |
3673 | ||
b8aa1682 JD |
3674 | /* Parse arguments */ |
3675 | progname = argv[0]; | |
178a0557 MD |
3676 | if (set_options(argc, argv)) { |
3677 | retval = -1; | |
3678 | goto exit_options; | |
b8aa1682 JD |
3679 | } |
3680 | ||
178a0557 MD |
3681 | if (set_signal_handler()) { |
3682 | retval = -1; | |
3683 | goto exit_options; | |
b8aa1682 JD |
3684 | } |
3685 | ||
a3bc3918 JR |
3686 | relayd_config_log(); |
3687 | ||
3688 | if (opt_print_version) { | |
3689 | print_version(); | |
3690 | retval = 0; | |
3691 | goto exit_options; | |
3692 | } | |
3693 | ||
4d513a50 DG |
3694 | /* Try to create directory if -o, --output is specified. */ |
3695 | if (opt_output_path) { | |
994fa64f DG |
3696 | if (*opt_output_path != '/') { |
3697 | ERR("Please specify an absolute path for -o, --output PATH"); | |
178a0557 MD |
3698 | retval = -1; |
3699 | goto exit_options; | |
994fa64f DG |
3700 | } |
3701 | ||
d77dded2 JG |
3702 | ret = utils_mkdir_recursive(opt_output_path, S_IRWXU | S_IRWXG, |
3703 | -1, -1); | |
4d513a50 DG |
3704 | if (ret < 0) { |
3705 | ERR("Unable to create %s", opt_output_path); | |
178a0557 MD |
3706 | retval = -1; |
3707 | goto exit_options; | |
4d513a50 DG |
3708 | } |
3709 | } | |
3710 | ||
b8aa1682 | 3711 | /* Daemonize */ |
b5218ffb | 3712 | if (opt_daemon || opt_background) { |
3fd27398 MD |
3713 | int i; |
3714 | ||
3715 | ret = lttng_daemonize(&child_ppid, &recv_child_signal, | |
3716 | !opt_background); | |
b8aa1682 | 3717 | if (ret < 0) { |
178a0557 MD |
3718 | retval = -1; |
3719 | goto exit_options; | |
b8aa1682 | 3720 | } |
3fd27398 MD |
3721 | |
3722 | /* | |
3723 | * We are in the child. Make sure all other file | |
3724 | * descriptors are closed, in case we are called with | |
3725 | * more opened file descriptors than the standard ones. | |
3726 | */ | |
3727 | for (i = 3; i < sysconf(_SC_OPEN_MAX); i++) { | |
3728 | (void) close(i); | |
3729 | } | |
3730 | } | |
3731 | ||
23c8ff50 JG |
3732 | sessiond_trace_chunk_registry = sessiond_trace_chunk_registry_create(); |
3733 | if (!sessiond_trace_chunk_registry) { | |
3734 | ERR("Failed to initialize session daemon trace chunk registry"); | |
3735 | retval = -1; | |
3736 | goto exit_sessiond_trace_chunk_registry; | |
3737 | } | |
3738 | ||
178a0557 MD |
3739 | /* Initialize thread health monitoring */ |
3740 | health_relayd = health_app_create(NR_HEALTH_RELAYD_TYPES); | |
3741 | if (!health_relayd) { | |
3742 | PERROR("health_app_create error"); | |
3743 | retval = -1; | |
3744 | goto exit_health_app_create; | |
3745 | } | |
3746 | ||
3fd27398 | 3747 | /* Create thread quit pipe */ |
178a0557 MD |
3748 | if (init_thread_quit_pipe()) { |
3749 | retval = -1; | |
3750 | goto exit_init_data; | |
b8aa1682 JD |
3751 | } |
3752 | ||
b8aa1682 | 3753 | /* Setup the thread apps communication pipe. */ |
178a0557 MD |
3754 | if (create_relay_conn_pipe()) { |
3755 | retval = -1; | |
3756 | goto exit_init_data; | |
b8aa1682 JD |
3757 | } |
3758 | ||
3759 | /* Init relay command queue. */ | |
8bdee6e2 | 3760 | cds_wfcq_init(&relay_conn_queue.head, &relay_conn_queue.tail); |
b8aa1682 | 3761 | |
554831e7 MD |
3762 | /* Initialize communication library */ |
3763 | lttcomm_init(); | |
87e45c13 | 3764 | lttcomm_inet_init(); |
554831e7 | 3765 | |
d3e2ba59 | 3766 | /* tables of sessions indexed by session ID */ |
7591bab1 MD |
3767 | sessions_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64); |
3768 | if (!sessions_ht) { | |
178a0557 MD |
3769 | retval = -1; |
3770 | goto exit_init_data; | |
d3e2ba59 JD |
3771 | } |
3772 | ||
3773 | /* tables of streams indexed by stream ID */ | |
2a174661 | 3774 | relay_streams_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64); |
d3e2ba59 | 3775 | if (!relay_streams_ht) { |
178a0557 MD |
3776 | retval = -1; |
3777 | goto exit_init_data; | |
d3e2ba59 JD |
3778 | } |
3779 | ||
3780 | /* tables of streams indexed by stream ID */ | |
92c6ca54 DG |
3781 | viewer_streams_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64); |
3782 | if (!viewer_streams_ht) { | |
178a0557 MD |
3783 | retval = -1; |
3784 | goto exit_init_data; | |
55706a7d MD |
3785 | } |
3786 | ||
65931c8b | 3787 | ret = utils_create_pipe(health_quit_pipe); |
178a0557 MD |
3788 | if (ret) { |
3789 | retval = -1; | |
3790 | goto exit_health_quit_pipe; | |
65931c8b MD |
3791 | } |
3792 | ||
3793 | /* Create thread to manage the client socket */ | |
1a1a34b4 | 3794 | ret = pthread_create(&health_thread, default_pthread_attr(), |
65931c8b | 3795 | thread_manage_health, (void *) NULL); |
178a0557 MD |
3796 | if (ret) { |
3797 | errno = ret; | |
65931c8b | 3798 | PERROR("pthread_create health"); |
178a0557 MD |
3799 | retval = -1; |
3800 | goto exit_health_thread; | |
65931c8b MD |
3801 | } |
3802 | ||
b8aa1682 | 3803 | /* Setup the dispatcher thread */ |
1a1a34b4 | 3804 | ret = pthread_create(&dispatcher_thread, default_pthread_attr(), |
b8aa1682 | 3805 | relay_thread_dispatcher, (void *) NULL); |
178a0557 MD |
3806 | if (ret) { |
3807 | errno = ret; | |
b8aa1682 | 3808 | PERROR("pthread_create dispatcher"); |
178a0557 MD |
3809 | retval = -1; |
3810 | goto exit_dispatcher_thread; | |
b8aa1682 JD |
3811 | } |
3812 | ||
3813 | /* Setup the worker thread */ | |
1a1a34b4 | 3814 | ret = pthread_create(&worker_thread, default_pthread_attr(), |
7591bab1 | 3815 | relay_thread_worker, NULL); |
178a0557 MD |
3816 | if (ret) { |
3817 | errno = ret; | |
b8aa1682 | 3818 | PERROR("pthread_create worker"); |
178a0557 MD |
3819 | retval = -1; |
3820 | goto exit_worker_thread; | |
b8aa1682 JD |
3821 | } |
3822 | ||
3823 | /* Setup the listener thread */ | |
1a1a34b4 | 3824 | ret = pthread_create(&listener_thread, default_pthread_attr(), |
b8aa1682 | 3825 | relay_thread_listener, (void *) NULL); |
178a0557 MD |
3826 | if (ret) { |
3827 | errno = ret; | |
b8aa1682 | 3828 | PERROR("pthread_create listener"); |
178a0557 MD |
3829 | retval = -1; |
3830 | goto exit_listener_thread; | |
b8aa1682 JD |
3831 | } |
3832 | ||
7591bab1 | 3833 | ret = relayd_live_create(live_uri); |
178a0557 | 3834 | if (ret) { |
d3e2ba59 | 3835 | ERR("Starting live viewer threads"); |
178a0557 | 3836 | retval = -1; |
50138f51 | 3837 | goto exit_live; |
d3e2ba59 JD |
3838 | } |
3839 | ||
178a0557 MD |
3840 | /* |
3841 | * This is where we start awaiting program completion (e.g. through | |
3842 | * signal that asks threads to teardown). | |
3843 | */ | |
3844 | ||
3845 | ret = relayd_live_join(); | |
3846 | if (ret) { | |
3847 | retval = -1; | |
3848 | } | |
50138f51 | 3849 | exit_live: |
178a0557 | 3850 | |
b8aa1682 | 3851 | ret = pthread_join(listener_thread, &status); |
178a0557 MD |
3852 | if (ret) { |
3853 | errno = ret; | |
3854 | PERROR("pthread_join listener_thread"); | |
3855 | retval = -1; | |
b8aa1682 JD |
3856 | } |
3857 | ||
178a0557 | 3858 | exit_listener_thread: |
b8aa1682 | 3859 | ret = pthread_join(worker_thread, &status); |
178a0557 MD |
3860 | if (ret) { |
3861 | errno = ret; | |
3862 | PERROR("pthread_join worker_thread"); | |
3863 | retval = -1; | |
b8aa1682 JD |
3864 | } |
3865 | ||
178a0557 | 3866 | exit_worker_thread: |
b8aa1682 | 3867 | ret = pthread_join(dispatcher_thread, &status); |
178a0557 MD |
3868 | if (ret) { |
3869 | errno = ret; | |
3870 | PERROR("pthread_join dispatcher_thread"); | |
3871 | retval = -1; | |
b8aa1682 | 3872 | } |
178a0557 | 3873 | exit_dispatcher_thread: |
42415026 | 3874 | |
65931c8b | 3875 | ret = pthread_join(health_thread, &status); |
178a0557 MD |
3876 | if (ret) { |
3877 | errno = ret; | |
3878 | PERROR("pthread_join health_thread"); | |
3879 | retval = -1; | |
65931c8b | 3880 | } |
178a0557 | 3881 | exit_health_thread: |
65931c8b | 3882 | |
65931c8b | 3883 | utils_close_pipe(health_quit_pipe); |
178a0557 | 3884 | exit_health_quit_pipe: |
65931c8b | 3885 | |
178a0557 | 3886 | exit_init_data: |
55706a7d | 3887 | health_app_destroy(health_relayd); |
23c8ff50 | 3888 | sessiond_trace_chunk_registry_destroy(sessiond_trace_chunk_registry); |
55706a7d | 3889 | exit_health_app_create: |
23c8ff50 | 3890 | exit_sessiond_trace_chunk_registry: |
178a0557 | 3891 | exit_options: |
4d62fbf8 MD |
3892 | /* |
3893 | * Wait for all pending call_rcu work to complete before tearing | |
3894 | * down data structures. call_rcu worker may be trying to | |
3895 | * perform lookups in those structures. | |
3896 | */ | |
3897 | rcu_barrier(); | |
7591bab1 MD |
3898 | relayd_cleanup(); |
3899 | ||
3900 | /* Ensure all prior call_rcu are done. */ | |
3901 | rcu_barrier(); | |
d3e2ba59 | 3902 | |
178a0557 | 3903 | if (!retval) { |
b8aa1682 | 3904 | exit(EXIT_SUCCESS); |
178a0557 MD |
3905 | } else { |
3906 | exit(EXIT_FAILURE); | |
b8aa1682 | 3907 | } |
b8aa1682 | 3908 | } |