Tests: Add test to check shared-memory FD leaks after relayd dies
[lttng-tools.git] / src / bin / lttng-sessiond / agent-thread.cpp
1 /*
2 * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 *
6 */
7
8 #define _LGPL_SOURCE
9
10 #include <common/common.hpp>
11 #include <common/sessiond-comm/sessiond-comm.hpp>
12 #include <common/uri.hpp>
13 #include <common/utils.hpp>
14
15 #include <common/compat/endian.hpp>
16
17 #include "fd-limit.hpp"
18 #include "agent-thread.hpp"
19 #include "agent.hpp"
20 #include "lttng-sessiond.hpp"
21 #include "session.hpp"
22 #include "utils.hpp"
23 #include "thread.hpp"
24
25 namespace {
26 struct thread_notifiers {
27 struct lttng_pipe *quit_pipe;
28 sem_t ready;
29 };
30
31 struct agent_app_id {
32 pid_t pid;
33 enum lttng_domain_type domain;
34 };
35
36 struct agent_protocol_version {
37 unsigned int major, minor;
38 };
39
40 int agent_tracing_enabled = -1;
41
42 /*
43 * Note that there is not port here. It's set after this URI is parsed so we
44 * can let the user define a custom one. However, localhost is ALWAYS the
45 * default listening address.
46 */
47 const char *default_reg_uri = "tcp://" DEFAULT_NETWORK_VIEWER_BIND_ADDRESS;
48 } /* namespace */
49
50 /*
51 * Update agent application using the given socket. This is done just after
52 * registration was successful.
53 *
54 * This will acquire the various sessions' lock; none must be held by the
55 * caller.
56 * The caller must hold the session list lock.
57 */
58 static void update_agent_app(const struct agent_app *app)
59 {
60 struct ltt_session *session, *stmp;
61 struct ltt_session_list *list;
62 struct agent *trigger_agent;
63 struct lttng_ht_iter iter;
64
65 list = session_get_list();
66 LTTNG_ASSERT(list);
67
68 cds_list_for_each_entry_safe(session, stmp, &list->head, list) {
69 if (!session_get(session)) {
70 continue;
71 }
72
73 session_lock(session);
74 if (session->ust_session) {
75 const struct agent *agt;
76
77 rcu_read_lock();
78 agt = trace_ust_find_agent(session->ust_session, app->domain);
79 if (agt) {
80 agent_update(agt, app);
81 }
82 rcu_read_unlock();
83 }
84 session_unlock(session);
85 session_put(session);
86 }
87
88 rcu_read_lock();
89 /*
90 * We are protected against the addition of new events by the session
91 * list lock being held.
92 */
93 cds_lfht_for_each_entry(the_trigger_agents_ht_by_domain->ht,
94 &iter.iter, trigger_agent, node.node) {
95 agent_update(trigger_agent, app);
96 }
97 rcu_read_unlock();
98 }
99
100 /*
101 * Create and init socket from uri.
102 */
103 static struct lttcomm_sock *init_tcp_socket(void)
104 {
105 int ret;
106 struct lttng_uri *uri = NULL;
107 struct lttcomm_sock *sock = NULL;
108 unsigned int port;
109 bool bind_succeeded = false;
110
111 /*
112 * This should never fail since the URI is hardcoded and the port is set
113 * before this thread is launched.
114 */
115 ret = uri_parse(default_reg_uri, &uri);
116 LTTNG_ASSERT(ret);
117 LTTNG_ASSERT(the_config.agent_tcp_port.begin > 0);
118 uri->port = the_config.agent_tcp_port.begin;
119
120 sock = lttcomm_alloc_sock_from_uri(uri);
121 uri_free(uri);
122 if (sock == NULL) {
123 ERR("agent allocating TCP socket");
124 goto error;
125 }
126
127 ret = lttcomm_create_sock(sock);
128 if (ret < 0) {
129 goto error;
130 }
131
132 for (port = the_config.agent_tcp_port.begin;
133 port <= the_config.agent_tcp_port.end; port++) {
134 ret = lttcomm_sock_set_port(sock, (uint16_t) port);
135 if (ret) {
136 ERR("Failed to set port %u on socket",
137 port);
138 goto error;
139 }
140 DBG3("Trying to bind on port %u", port);
141 ret = sock->ops->bind(sock);
142 if (!ret) {
143 bind_succeeded = true;
144 break;
145 }
146
147 if (errno == EADDRINUSE) {
148 DBG("Failed to bind to port %u since it is already in use",
149 port);
150 } else {
151 PERROR("Failed to bind to port %u", port);
152 goto error;
153 }
154 }
155
156 if (!bind_succeeded) {
157 if (the_config.agent_tcp_port.begin ==
158 the_config.agent_tcp_port.end) {
159 WARN("Another process is already using the agent port %i. "
160 "Agent support will be deactivated.",
161 the_config.agent_tcp_port.begin);
162 goto error;
163 } else {
164 WARN("All ports in the range [%i, %i] are already in use. "
165 "Agent support will be deactivated.",
166 the_config.agent_tcp_port.begin,
167 the_config.agent_tcp_port.end);
168 goto error;
169 }
170 }
171
172 ret = sock->ops->listen(sock, -1);
173 if (ret < 0) {
174 goto error;
175 }
176
177 DBG("Listening on TCP port %u and socket %d",
178 port, sock->fd);
179
180 return sock;
181
182 error:
183 if (sock) {
184 lttcomm_destroy_sock(sock);
185 }
186 return NULL;
187 }
188
189 /*
190 * Close and destroy the given TCP socket.
191 */
192 static void destroy_tcp_socket(struct lttcomm_sock *sock)
193 {
194 int ret;
195 uint16_t port;
196
197 LTTNG_ASSERT(sock);
198
199 ret = lttcomm_sock_get_port(sock, &port);
200 if (ret) {
201 ERR("Failed to get port of agent TCP socket");
202 port = 0;
203 }
204
205 DBG3("Destroy TCP socket on port %" PRIu16,
206 port);
207
208 /* This will return gracefully if fd is invalid. */
209 sock->ops->close(sock);
210 lttcomm_destroy_sock(sock);
211 }
212
213 static const char *domain_type_str(enum lttng_domain_type domain_type)
214 {
215 switch (domain_type) {
216 case LTTNG_DOMAIN_NONE:
217 return "none";
218 case LTTNG_DOMAIN_KERNEL:
219 return "kernel";
220 case LTTNG_DOMAIN_UST:
221 return "ust";
222 case LTTNG_DOMAIN_JUL:
223 return "jul";
224 case LTTNG_DOMAIN_LOG4J:
225 return "log4j";
226 case LTTNG_DOMAIN_PYTHON:
227 return "python";
228 default:
229 return "unknown";
230 }
231 }
232
233 static bool is_agent_protocol_version_supported(
234 const struct agent_protocol_version *version)
235 {
236 const bool is_supported = version->major == AGENT_MAJOR_VERSION &&
237 version->minor == AGENT_MINOR_VERSION;
238
239 if (!is_supported) {
240 WARN("Refusing agent connection: unsupported protocol version %ui.%ui, expected %i.%i",
241 version->major, version->minor,
242 AGENT_MAJOR_VERSION, AGENT_MINOR_VERSION);
243 }
244
245 return is_supported;
246 }
247
248 /*
249 * Handle a new agent connection on the registration socket.
250 *
251 * Returns 0 on success, or else a negative errno value.
252 * On success, the resulting socket is returned through `agent_app_socket`
253 * and the application's reported id is updated through `agent_app_id`.
254 */
255 static int accept_agent_connection(
256 struct lttcomm_sock *reg_sock,
257 struct agent_app_id *agent_app_id,
258 struct lttcomm_sock **agent_app_socket)
259 {
260 int ret;
261 struct agent_protocol_version agent_version;
262 ssize_t size;
263 struct agent_register_msg msg;
264 struct lttcomm_sock *new_sock;
265
266 LTTNG_ASSERT(reg_sock);
267
268 new_sock = reg_sock->ops->accept(reg_sock);
269 if (!new_sock) {
270 ret = -ENOTCONN;
271 goto end;
272 }
273
274 size = new_sock->ops->recvmsg(new_sock, &msg, sizeof(msg), 0);
275 if (size < sizeof(msg)) {
276 if (size < 0) {
277 PERROR("Failed to register new agent application");
278 } else if (size != 0) {
279 ERR("Failed to register new agent application: invalid registration message length: expected length = %zu, message length = %zd",
280 sizeof(msg), size);
281 } else {
282 DBG("Failed to register new agent application: connection closed");
283 }
284 ret = -EINVAL;
285 goto error_close_socket;
286 }
287
288 agent_version = (struct agent_protocol_version) {
289 be32toh(msg.major_version),
290 be32toh(msg.minor_version),
291 };
292
293 /* Test communication protocol version of the registering agent. */
294 if (!is_agent_protocol_version_supported(&agent_version)) {
295 ret = -EINVAL;
296 goto error_close_socket;
297 }
298
299 *agent_app_id = (struct agent_app_id) {
300 .pid = (pid_t) be32toh(msg.pid),
301 .domain = (lttng_domain_type) be32toh(msg.domain),
302 };
303
304 DBG2("New registration for agent application: pid = %ld, domain = %s, socket fd = %d",
305 (long) agent_app_id->pid,
306 domain_type_str(agent_app_id->domain), new_sock->fd);
307
308 *agent_app_socket = new_sock;
309 new_sock = NULL;
310 ret = 0;
311 goto end;
312
313 error_close_socket:
314 new_sock->ops->close(new_sock);
315 lttcomm_destroy_sock(new_sock);
316 end:
317 return ret;
318 }
319
320 bool agent_tracing_is_enabled(void)
321 {
322 int enabled;
323
324 enabled = uatomic_read(&agent_tracing_enabled);
325 LTTNG_ASSERT(enabled != -1);
326 return enabled == 1;
327 }
328
329 /*
330 * Write agent TCP port using the rundir.
331 */
332 static int write_agent_port(uint16_t port)
333 {
334 return utils_create_pid_file(
335 (pid_t) port, the_config.agent_port_file_path.value);
336 }
337
338 static
339 void mark_thread_as_ready(struct thread_notifiers *notifiers)
340 {
341 DBG("Marking agent management thread as ready");
342 sem_post(&notifiers->ready);
343 }
344
345 static
346 void wait_until_thread_is_ready(struct thread_notifiers *notifiers)
347 {
348 DBG("Waiting for agent management thread to be ready");
349 sem_wait(&notifiers->ready);
350 DBG("Agent management thread is ready");
351 }
352
353 /*
354 * This thread manage application notify communication.
355 */
356 static void *thread_agent_management(void *data)
357 {
358 int i, ret, pollfd;
359 uint32_t revents, nb_fd;
360 struct lttng_poll_event events;
361 struct lttcomm_sock *reg_sock;
362 struct thread_notifiers *notifiers = (thread_notifiers *) data;
363 const int quit_pipe_read_fd = lttng_pipe_get_readfd(
364 notifiers->quit_pipe);
365
366 DBG("Manage agent application registration.");
367
368 rcu_register_thread();
369 rcu_thread_online();
370
371 /* Agent initialization call MUST be called before starting the thread. */
372 LTTNG_ASSERT(the_agent_apps_ht_by_sock);
373
374 /* Create pollset with size 2, quit pipe and registration socket. */
375 ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
376 if (ret < 0) {
377 goto error_poll_create;
378 }
379
380 ret = lttng_poll_add(&events, quit_pipe_read_fd,
381 LPOLLIN | LPOLLERR);
382 if (ret < 0) {
383 goto error_tcp_socket;
384 }
385
386 reg_sock = init_tcp_socket();
387 if (reg_sock) {
388 uint16_t port;
389
390 ret = lttcomm_sock_get_port(reg_sock, &port);
391 LTTNG_ASSERT(ret == 0);
392
393 ret = write_agent_port(port);
394 if (ret) {
395 ERR("Failed to create agent port file: agent tracing will be unavailable");
396 /* Don't prevent the launch of the sessiond on error. */
397 mark_thread_as_ready(notifiers);
398 goto error;
399 }
400 } else {
401 /* Don't prevent the launch of the sessiond on error. */
402 mark_thread_as_ready(notifiers);
403 goto error_tcp_socket;
404 }
405
406 /*
407 * Signal that the agent thread is ready. The command thread
408 * may start to query whether or not agent tracing is enabled.
409 */
410 uatomic_set(&agent_tracing_enabled, 1);
411 mark_thread_as_ready(notifiers);
412
413 /* Add TCP socket to the poll set. */
414 ret = lttng_poll_add(&events, reg_sock->fd,
415 LPOLLIN | LPOLLERR | LPOLLHUP | LPOLLRDHUP);
416 if (ret < 0) {
417 goto error;
418 }
419
420 while (1) {
421 DBG3("Manage agent polling");
422
423 /* Inifinite blocking call, waiting for transmission */
424 restart:
425 ret = lttng_poll_wait(&events, -1);
426 DBG3("Manage agent return from poll on %d fds",
427 LTTNG_POLL_GETNB(&events));
428 if (ret < 0) {
429 /*
430 * Restart interrupted system call.
431 */
432 if (errno == EINTR) {
433 goto restart;
434 }
435 goto error;
436 }
437 nb_fd = ret;
438 DBG3("%d fd ready", nb_fd);
439
440 for (i = 0; i < nb_fd; i++) {
441 /* Fetch once the poll data */
442 revents = LTTNG_POLL_GETEV(&events, i);
443 pollfd = LTTNG_POLL_GETFD(&events, i);
444
445 /* Thread quit pipe has been closed. Killing thread. */
446 if (pollfd == quit_pipe_read_fd) {
447 goto exit;
448 }
449
450 /* Activity on the registration socket. */
451 if (revents & LPOLLIN) {
452 struct agent_app_id new_app_id;
453 struct agent_app *new_app = NULL;
454 struct lttcomm_sock *new_app_socket;
455 int new_app_socket_fd;
456
457 LTTNG_ASSERT(pollfd == reg_sock->fd);
458
459 ret = accept_agent_connection(
460 reg_sock, &new_app_id, &new_app_socket);
461 if (ret < 0) {
462 /* Errors are already logged. */
463 continue;
464 }
465
466 /*
467 * new_app_socket's ownership has been
468 * transferred to the new agent app.
469 */
470 new_app = agent_create_app(new_app_id.pid,
471 new_app_id.domain,
472 new_app_socket);
473 if (!new_app) {
474 new_app_socket->ops->close(
475 new_app_socket);
476 continue;
477 }
478 new_app_socket_fd = new_app_socket->fd;
479 new_app_socket = NULL;
480
481 /*
482 * Since this is a command socket (write then
483 * read), only add poll error event to only
484 * detect shutdown.
485 */
486 ret = lttng_poll_add(&events, new_app_socket_fd,
487 LPOLLERR | LPOLLHUP | LPOLLRDHUP);
488 if (ret < 0) {
489 agent_destroy_app(new_app);
490 continue;
491 }
492
493 /*
494 * Prevent sessions from being modified while
495 * the agent application's configuration is
496 * updated.
497 */
498 session_lock_list();
499
500 /*
501 * Update the newly registered applications's
502 * configuration.
503 */
504 update_agent_app(new_app);
505
506 ret = agent_send_registration_done(new_app);
507 if (ret < 0) {
508 agent_destroy_app(new_app);
509 /* Removing from the poll set. */
510 ret = lttng_poll_del(&events,
511 new_app_socket_fd);
512 if (ret < 0) {
513 session_unlock_list();
514 goto error;
515 }
516 continue;
517 }
518
519 /* Publish the new agent app. */
520 agent_add_app(new_app);
521
522 session_unlock_list();
523 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
524 /* Removing from the poll set */
525 ret = lttng_poll_del(&events, pollfd);
526 if (ret < 0) {
527 goto error;
528 }
529 agent_destroy_app_by_sock(pollfd);
530 } else {
531 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
532 goto error;
533 }
534 }
535 }
536
537 exit:
538 /* Whatever happens, try to delete it and exit. */
539 (void) lttng_poll_del(&events, reg_sock->fd);
540 error:
541 destroy_tcp_socket(reg_sock);
542 error_tcp_socket:
543 lttng_poll_clean(&events);
544 error_poll_create:
545 uatomic_set(&agent_tracing_enabled, 0);
546 DBG("Cleaning up and stopping.");
547 rcu_thread_offline();
548 rcu_unregister_thread();
549 return NULL;
550 }
551
552 static bool shutdown_agent_management_thread(void *data)
553 {
554 struct thread_notifiers *notifiers = (thread_notifiers *) data;
555 const int write_fd = lttng_pipe_get_writefd(notifiers->quit_pipe);
556
557 return notify_thread_pipe(write_fd) == 1;
558 }
559
560 static void cleanup_agent_management_thread(void *data)
561 {
562 struct thread_notifiers *notifiers = (thread_notifiers *) data;
563
564 lttng_pipe_destroy(notifiers->quit_pipe);
565 sem_destroy(&notifiers->ready);
566 free(notifiers);
567 }
568
569 bool launch_agent_management_thread(void)
570 {
571 struct thread_notifiers *notifiers;
572 struct lttng_thread *thread;
573
574 notifiers = zmalloc<thread_notifiers>();
575 if (!notifiers) {
576 goto error_alloc;
577 }
578
579 sem_init(&notifiers->ready, 0, 0);
580 notifiers->quit_pipe = lttng_pipe_open(FD_CLOEXEC);
581 if (!notifiers->quit_pipe) {
582 goto error;
583 }
584 thread = lttng_thread_create("Agent management",
585 thread_agent_management,
586 shutdown_agent_management_thread,
587 cleanup_agent_management_thread,
588 notifiers);
589 if (!thread) {
590 goto error;
591 }
592 wait_until_thread_is_ready(notifiers);
593 lttng_thread_put(thread);
594 return true;
595 error:
596 cleanup_agent_management_thread(notifiers);
597 error_alloc:
598 return false;
599 }
This page took 0.039905 seconds and 4 git commands to generate.