Run clang-format on the whole tree
[lttng-tools.git] / src / bin / lttng-sessiond / agent-thread.cpp
... / ...
CommitLineData
1/*
2 * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 *
6 */
7
8#define _LGPL_SOURCE
9
10#include "agent-thread.hpp"
11#include "agent.hpp"
12#include "fd-limit.hpp"
13#include "lttng-sessiond.hpp"
14#include "session.hpp"
15#include "thread.hpp"
16#include "utils.hpp"
17
18#include <common/common.hpp>
19#include <common/compat/endian.hpp>
20#include <common/sessiond-comm/sessiond-comm.hpp>
21#include <common/uri.hpp>
22#include <common/utils.hpp>
23
24namespace {
25struct thread_notifiers {
26 struct lttng_pipe *quit_pipe;
27 sem_t ready;
28};
29
30struct agent_app_id {
31 pid_t pid;
32 enum lttng_domain_type domain;
33};
34
35struct agent_protocol_version {
36 unsigned int major, minor;
37};
38
39int agent_tracing_enabled = -1;
40
41/*
42 * Note that there is not port here. It's set after this URI is parsed so we
43 * can let the user define a custom one. However, localhost is ALWAYS the
44 * default listening address.
45 */
46const char *default_reg_uri = "tcp://" DEFAULT_NETWORK_VIEWER_BIND_ADDRESS;
47} /* namespace */
48
49/*
50 * Update agent application using the given socket. This is done just after
51 * registration was successful.
52 *
53 * This will acquire the various sessions' lock; none must be held by the
54 * caller.
55 * The caller must hold the session list lock.
56 */
57static void update_agent_app(const struct agent_app *app)
58{
59 struct ltt_session *session, *stmp;
60 struct ltt_session_list *list;
61 struct agent *trigger_agent;
62 struct lttng_ht_iter iter;
63
64 list = session_get_list();
65 LTTNG_ASSERT(list);
66
67 cds_list_for_each_entry_safe (session, stmp, &list->head, list) {
68 if (!session_get(session)) {
69 continue;
70 }
71
72 session_lock(session);
73 if (session->ust_session) {
74 const struct agent *agt;
75
76 rcu_read_lock();
77 agt = trace_ust_find_agent(session->ust_session, app->domain);
78 if (agt) {
79 agent_update(agt, app);
80 }
81 rcu_read_unlock();
82 }
83 session_unlock(session);
84 session_put(session);
85 }
86
87 rcu_read_lock();
88 /*
89 * We are protected against the addition of new events by the session
90 * list lock being held.
91 */
92 cds_lfht_for_each_entry (
93 the_trigger_agents_ht_by_domain->ht, &iter.iter, trigger_agent, node.node) {
94 agent_update(trigger_agent, app);
95 }
96 rcu_read_unlock();
97}
98
99/*
100 * Create and init socket from uri.
101 */
102static struct lttcomm_sock *init_tcp_socket(void)
103{
104 int ret;
105 struct lttng_uri *uri = NULL;
106 struct lttcomm_sock *sock = NULL;
107 unsigned int port;
108 bool bind_succeeded = false;
109
110 /*
111 * This should never fail since the URI is hardcoded and the port is set
112 * before this thread is launched.
113 */
114 ret = uri_parse(default_reg_uri, &uri);
115 LTTNG_ASSERT(ret);
116 LTTNG_ASSERT(the_config.agent_tcp_port.begin > 0);
117 uri->port = the_config.agent_tcp_port.begin;
118
119 sock = lttcomm_alloc_sock_from_uri(uri);
120 uri_free(uri);
121 if (sock == NULL) {
122 ERR("agent allocating TCP socket");
123 goto error;
124 }
125
126 ret = lttcomm_create_sock(sock);
127 if (ret < 0) {
128 goto error;
129 }
130
131 for (port = the_config.agent_tcp_port.begin; port <= the_config.agent_tcp_port.end;
132 port++) {
133 ret = lttcomm_sock_set_port(sock, (uint16_t) port);
134 if (ret) {
135 ERR("Failed to set port %u on socket", port);
136 goto error;
137 }
138 DBG3("Trying to bind on port %u", port);
139 ret = sock->ops->bind(sock);
140 if (!ret) {
141 bind_succeeded = true;
142 break;
143 }
144
145 if (errno == EADDRINUSE) {
146 DBG("Failed to bind to port %u since it is already in use", port);
147 } else {
148 PERROR("Failed to bind to port %u", port);
149 goto error;
150 }
151 }
152
153 if (!bind_succeeded) {
154 if (the_config.agent_tcp_port.begin == the_config.agent_tcp_port.end) {
155 WARN("Another process is already using the agent port %i. "
156 "Agent support will be deactivated.",
157 the_config.agent_tcp_port.begin);
158 goto error;
159 } else {
160 WARN("All ports in the range [%i, %i] are already in use. "
161 "Agent support will be deactivated.",
162 the_config.agent_tcp_port.begin,
163 the_config.agent_tcp_port.end);
164 goto error;
165 }
166 }
167
168 ret = sock->ops->listen(sock, -1);
169 if (ret < 0) {
170 goto error;
171 }
172
173 DBG("Listening on TCP port %u and socket %d", port, sock->fd);
174
175 return sock;
176
177error:
178 if (sock) {
179 lttcomm_destroy_sock(sock);
180 }
181 return NULL;
182}
183
184/*
185 * Close and destroy the given TCP socket.
186 */
187static void destroy_tcp_socket(struct lttcomm_sock *sock)
188{
189 int ret;
190 uint16_t port;
191
192 LTTNG_ASSERT(sock);
193
194 ret = lttcomm_sock_get_port(sock, &port);
195 if (ret) {
196 ERR("Failed to get port of agent TCP socket");
197 port = 0;
198 }
199
200 DBG3("Destroy TCP socket on port %" PRIu16, port);
201
202 /* This will return gracefully if fd is invalid. */
203 sock->ops->close(sock);
204 lttcomm_destroy_sock(sock);
205}
206
207static const char *domain_type_str(enum lttng_domain_type domain_type)
208{
209 switch (domain_type) {
210 case LTTNG_DOMAIN_NONE:
211 return "none";
212 case LTTNG_DOMAIN_KERNEL:
213 return "kernel";
214 case LTTNG_DOMAIN_UST:
215 return "ust";
216 case LTTNG_DOMAIN_JUL:
217 return "jul";
218 case LTTNG_DOMAIN_LOG4J:
219 return "log4j";
220 case LTTNG_DOMAIN_PYTHON:
221 return "python";
222 default:
223 return "unknown";
224 }
225}
226
227static bool is_agent_protocol_version_supported(const struct agent_protocol_version *version)
228{
229 const bool is_supported = version->major == AGENT_MAJOR_VERSION &&
230 version->minor == AGENT_MINOR_VERSION;
231
232 if (!is_supported) {
233 WARN("Refusing agent connection: unsupported protocol version %ui.%ui, expected %i.%i",
234 version->major,
235 version->minor,
236 AGENT_MAJOR_VERSION,
237 AGENT_MINOR_VERSION);
238 }
239
240 return is_supported;
241}
242
243/*
244 * Handle a new agent connection on the registration socket.
245 *
246 * Returns 0 on success, or else a negative errno value.
247 * On success, the resulting socket is returned through `agent_app_socket`
248 * and the application's reported id is updated through `agent_app_id`.
249 */
250static int accept_agent_connection(struct lttcomm_sock *reg_sock,
251 struct agent_app_id *agent_app_id,
252 struct lttcomm_sock **agent_app_socket)
253{
254 int ret;
255 struct agent_protocol_version agent_version;
256 ssize_t size;
257 struct agent_register_msg msg;
258 struct lttcomm_sock *new_sock;
259
260 LTTNG_ASSERT(reg_sock);
261
262 new_sock = reg_sock->ops->accept(reg_sock);
263 if (!new_sock) {
264 ret = -ENOTCONN;
265 goto end;
266 }
267
268 size = new_sock->ops->recvmsg(new_sock, &msg, sizeof(msg), 0);
269 if (size < sizeof(msg)) {
270 if (size < 0) {
271 PERROR("Failed to register new agent application");
272 } else if (size != 0) {
273 ERR("Failed to register new agent application: invalid registration message length: expected length = %zu, message length = %zd",
274 sizeof(msg),
275 size);
276 } else {
277 DBG("Failed to register new agent application: connection closed");
278 }
279 ret = -EINVAL;
280 goto error_close_socket;
281 }
282
283 agent_version = (struct agent_protocol_version){
284 be32toh(msg.major_version),
285 be32toh(msg.minor_version),
286 };
287
288 /* Test communication protocol version of the registering agent. */
289 if (!is_agent_protocol_version_supported(&agent_version)) {
290 ret = -EINVAL;
291 goto error_close_socket;
292 }
293
294 *agent_app_id = (struct agent_app_id){
295 .pid = (pid_t) be32toh(msg.pid),
296 .domain = (lttng_domain_type) be32toh(msg.domain),
297 };
298
299 DBG2("New registration for agent application: pid = %ld, domain = %s, socket fd = %d",
300 (long) agent_app_id->pid,
301 domain_type_str(agent_app_id->domain),
302 new_sock->fd);
303
304 *agent_app_socket = new_sock;
305 new_sock = NULL;
306 ret = 0;
307 goto end;
308
309error_close_socket:
310 new_sock->ops->close(new_sock);
311 lttcomm_destroy_sock(new_sock);
312end:
313 return ret;
314}
315
316bool agent_tracing_is_enabled(void)
317{
318 int enabled;
319
320 enabled = uatomic_read(&agent_tracing_enabled);
321 LTTNG_ASSERT(enabled != -1);
322 return enabled == 1;
323}
324
325/*
326 * Write agent TCP port using the rundir.
327 */
328static int write_agent_port(uint16_t port)
329{
330 return utils_create_pid_file((pid_t) port, the_config.agent_port_file_path.value);
331}
332
333static void mark_thread_as_ready(struct thread_notifiers *notifiers)
334{
335 DBG("Marking agent management thread as ready");
336 sem_post(&notifiers->ready);
337}
338
339static void wait_until_thread_is_ready(struct thread_notifiers *notifiers)
340{
341 DBG("Waiting for agent management thread to be ready");
342 sem_wait(&notifiers->ready);
343 DBG("Agent management thread is ready");
344}
345
346/*
347 * This thread manage application notify communication.
348 */
349static void *thread_agent_management(void *data)
350{
351 int i, ret;
352 uint32_t nb_fd;
353 struct lttng_poll_event events;
354 struct lttcomm_sock *reg_sock;
355 struct thread_notifiers *notifiers = (thread_notifiers *) data;
356 const auto thread_quit_pipe_fd = lttng_pipe_get_readfd(notifiers->quit_pipe);
357
358 DBG("Manage agent application registration.");
359
360 rcu_register_thread();
361 rcu_thread_online();
362
363 /* Agent initialization call MUST be called before starting the thread. */
364 LTTNG_ASSERT(the_agent_apps_ht_by_sock);
365
366 /* Create pollset with size 2, quit pipe and registration socket. */
367 ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
368 if (ret < 0) {
369 goto error_poll_create;
370 }
371
372 ret = lttng_poll_add(&events, thread_quit_pipe_fd, LPOLLIN);
373 if (ret < 0) {
374 goto error_tcp_socket;
375 }
376
377 reg_sock = init_tcp_socket();
378 if (reg_sock) {
379 uint16_t port;
380
381 ret = lttcomm_sock_get_port(reg_sock, &port);
382 LTTNG_ASSERT(ret == 0);
383
384 ret = write_agent_port(port);
385 if (ret) {
386 ERR("Failed to create agent port file: agent tracing will be unavailable");
387 /* Don't prevent the launch of the sessiond on error. */
388 mark_thread_as_ready(notifiers);
389 goto error;
390 }
391 } else {
392 /* Don't prevent the launch of the sessiond on error. */
393 mark_thread_as_ready(notifiers);
394 goto error_tcp_socket;
395 }
396
397 /*
398 * Signal that the agent thread is ready. The command thread
399 * may start to query whether or not agent tracing is enabled.
400 */
401 uatomic_set(&agent_tracing_enabled, 1);
402 mark_thread_as_ready(notifiers);
403
404 /* Add TCP socket to the poll set. */
405 ret = lttng_poll_add(&events, reg_sock->fd, LPOLLIN | LPOLLRDHUP);
406 if (ret < 0) {
407 goto error;
408 }
409
410 while (1) {
411 DBG3("Manage agent polling");
412
413 /* Inifinite blocking call, waiting for transmission */
414 restart:
415 ret = lttng_poll_wait(&events, -1);
416 DBG3("Manage agent return from poll on %d fds", LTTNG_POLL_GETNB(&events));
417 if (ret < 0) {
418 /*
419 * Restart interrupted system call.
420 */
421 if (errno == EINTR) {
422 goto restart;
423 }
424 goto error;
425 }
426 nb_fd = ret;
427 DBG3("%d fd ready", nb_fd);
428
429 for (i = 0; i < nb_fd; i++) {
430 /* Fetch once the poll data */
431 const auto revents = LTTNG_POLL_GETEV(&events, i);
432 const auto pollfd = LTTNG_POLL_GETFD(&events, i);
433
434 /* Activity on thread quit pipe, exiting. */
435 if (pollfd == thread_quit_pipe_fd) {
436 DBG("Activity on thread quit pipe");
437 goto exit;
438 }
439
440 /* Activity on the registration socket. */
441 if (revents & LPOLLIN) {
442 struct agent_app_id new_app_id;
443 struct agent_app *new_app = NULL;
444 struct lttcomm_sock *new_app_socket;
445 int new_app_socket_fd;
446
447 LTTNG_ASSERT(pollfd == reg_sock->fd);
448
449 ret = accept_agent_connection(
450 reg_sock, &new_app_id, &new_app_socket);
451 if (ret < 0) {
452 /* Errors are already logged. */
453 continue;
454 }
455
456 /*
457 * new_app_socket's ownership has been
458 * transferred to the new agent app.
459 */
460 new_app = agent_create_app(
461 new_app_id.pid, new_app_id.domain, new_app_socket);
462 if (!new_app) {
463 new_app_socket->ops->close(new_app_socket);
464 continue;
465 }
466 new_app_socket_fd = new_app_socket->fd;
467 new_app_socket = NULL;
468
469 /*
470 * Since this is a command socket (write then
471 * read), only add poll error event to only
472 * detect shutdown.
473 */
474 ret = lttng_poll_add(&events, new_app_socket_fd, LPOLLRDHUP);
475 if (ret < 0) {
476 agent_destroy_app(new_app);
477 continue;
478 }
479
480 /*
481 * Prevent sessions from being modified while
482 * the agent application's configuration is
483 * updated.
484 */
485 session_lock_list();
486
487 /*
488 * Update the newly registered applications's
489 * configuration.
490 */
491 update_agent_app(new_app);
492
493 ret = agent_send_registration_done(new_app);
494 if (ret < 0) {
495 agent_destroy_app(new_app);
496 /* Removing from the poll set. */
497 ret = lttng_poll_del(&events, new_app_socket_fd);
498 if (ret < 0) {
499 session_unlock_list();
500 goto error;
501 }
502 continue;
503 }
504
505 /* Publish the new agent app. */
506 agent_add_app(new_app);
507
508 session_unlock_list();
509 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
510 /* Removing from the poll set */
511 ret = lttng_poll_del(&events, pollfd);
512 if (ret < 0) {
513 goto error;
514 }
515 agent_destroy_app_by_sock(pollfd);
516 } else {
517 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
518 goto error;
519 }
520 }
521 }
522
523exit:
524 /* Whatever happens, try to delete it and exit. */
525 (void) lttng_poll_del(&events, reg_sock->fd);
526error:
527 destroy_tcp_socket(reg_sock);
528error_tcp_socket:
529 lttng_poll_clean(&events);
530error_poll_create:
531 uatomic_set(&agent_tracing_enabled, 0);
532 DBG("Cleaning up and stopping.");
533 rcu_thread_offline();
534 rcu_unregister_thread();
535 return NULL;
536}
537
538static bool shutdown_agent_management_thread(void *data)
539{
540 struct thread_notifiers *notifiers = (thread_notifiers *) data;
541 const int write_fd = lttng_pipe_get_writefd(notifiers->quit_pipe);
542
543 return notify_thread_pipe(write_fd) == 1;
544}
545
546static void cleanup_agent_management_thread(void *data)
547{
548 struct thread_notifiers *notifiers = (thread_notifiers *) data;
549
550 lttng_pipe_destroy(notifiers->quit_pipe);
551 sem_destroy(&notifiers->ready);
552 free(notifiers);
553}
554
555bool launch_agent_management_thread(void)
556{
557 struct thread_notifiers *notifiers;
558 struct lttng_thread *thread;
559
560 notifiers = zmalloc<thread_notifiers>();
561 if (!notifiers) {
562 goto error_alloc;
563 }
564
565 sem_init(&notifiers->ready, 0, 0);
566 notifiers->quit_pipe = lttng_pipe_open(FD_CLOEXEC);
567 if (!notifiers->quit_pipe) {
568 goto error;
569 }
570 thread = lttng_thread_create("Agent management",
571 thread_agent_management,
572 shutdown_agent_management_thread,
573 cleanup_agent_management_thread,
574 notifiers);
575 if (!thread) {
576 goto error;
577 }
578 wait_until_thread_is_ready(notifiers);
579 lttng_thread_put(thread);
580 return true;
581error:
582 cleanup_agent_management_thread(notifiers);
583error_alloc:
584 return false;
585}
This page took 0.024331 seconds and 4 git commands to generate.