vscode: Add configurations to run the executables under the debugger
[lttng-tools.git] / src / bin / lttng-sessiond / agent-thread.cpp
1 /*
2 * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 *
6 */
7
8 #define _LGPL_SOURCE
9
10 #include "agent-thread.hpp"
11 #include "agent.hpp"
12 #include "fd-limit.hpp"
13 #include "lttng-sessiond.hpp"
14 #include "session.hpp"
15 #include "thread.hpp"
16 #include "utils.hpp"
17
18 #include <common/common.hpp>
19 #include <common/compat/endian.hpp>
20 #include <common/sessiond-comm/sessiond-comm.hpp>
21 #include <common/urcu.hpp>
22 #include <common/uri.hpp>
23 #include <common/utils.hpp>
24
25 namespace {
26 struct thread_notifiers {
27 struct lttng_pipe *quit_pipe;
28 sem_t ready;
29 };
30
31 struct agent_app_id {
32 pid_t pid;
33 enum lttng_domain_type domain;
34 };
35
36 struct agent_protocol_version {
37 unsigned int major, minor;
38 };
39
40 int agent_tracing_enabled = -1;
41
42 /*
43 * Note that there is not port here. It's set after this URI is parsed so we
44 * can let the user define a custom one. However, localhost is ALWAYS the
45 * default listening address.
46 */
47 const char *default_reg_uri = "tcp://" DEFAULT_NETWORK_VIEWER_BIND_ADDRESS;
48 } /* namespace */
49
50 /*
51 * Update agent application using the given socket. This is done just after
52 * registration was successful.
53 *
54 * This will acquire the various sessions' lock; none must be held by the
55 * caller.
56 * The caller must hold the session list lock.
57 */
58 static void update_agent_app(const struct agent_app *app)
59 {
60 struct ltt_session *session, *stmp;
61 struct ltt_session_list *list;
62 struct agent *trigger_agent;
63 struct lttng_ht_iter iter;
64
65 list = session_get_list();
66 LTTNG_ASSERT(list);
67
68 cds_list_for_each_entry_safe (session, stmp, &list->head, list) {
69 if (!session_get(session)) {
70 continue;
71 }
72
73 session_lock(session);
74 if (session->ust_session) {
75 const struct agent *agt;
76
77 lttng::urcu::read_lock_guard read_lock;
78 agt = trace_ust_find_agent(session->ust_session, app->domain);
79 if (agt) {
80 agent_update(agt, app);
81 }
82 }
83 session_unlock(session);
84 session_put(session);
85 }
86
87 {
88 /*
89 * We are protected against the addition of new events by the session
90 * list lock being held.
91 */
92 lttng::urcu::read_lock_guard read_lock;
93
94 cds_lfht_for_each_entry (
95 the_trigger_agents_ht_by_domain->ht, &iter.iter, trigger_agent, node.node) {
96 agent_update(trigger_agent, app);
97 }
98 }
99 }
100
101 /*
102 * Create and init socket from uri.
103 */
104 static struct lttcomm_sock *init_tcp_socket()
105 {
106 int ret;
107 struct lttng_uri *uri = nullptr;
108 struct lttcomm_sock *sock = nullptr;
109 unsigned int port;
110 bool bind_succeeded = false;
111
112 /*
113 * This should never fail since the URI is hardcoded and the port is set
114 * before this thread is launched.
115 */
116 ret = uri_parse(default_reg_uri, &uri);
117 LTTNG_ASSERT(ret);
118 LTTNG_ASSERT(the_config.agent_tcp_port.begin > 0);
119 uri->port = the_config.agent_tcp_port.begin;
120
121 sock = lttcomm_alloc_sock_from_uri(uri);
122 uri_free(uri);
123 if (sock == nullptr) {
124 ERR("agent allocating TCP socket");
125 goto error;
126 }
127
128 ret = lttcomm_create_sock(sock);
129 if (ret < 0) {
130 goto error;
131 }
132
133 for (port = the_config.agent_tcp_port.begin; port <= the_config.agent_tcp_port.end;
134 port++) {
135 ret = lttcomm_sock_set_port(sock, (uint16_t) port);
136 if (ret) {
137 ERR("Failed to set port %u on socket", port);
138 goto error;
139 }
140 DBG3("Trying to bind on port %u", port);
141 ret = sock->ops->bind(sock);
142 if (!ret) {
143 bind_succeeded = true;
144 break;
145 }
146
147 if (errno == EADDRINUSE) {
148 DBG("Failed to bind to port %u since it is already in use", port);
149 } else {
150 PERROR("Failed to bind to port %u", port);
151 goto error;
152 }
153 }
154
155 if (!bind_succeeded) {
156 if (the_config.agent_tcp_port.begin == the_config.agent_tcp_port.end) {
157 WARN("Another process is already using the agent port %i. "
158 "Agent support will be deactivated.",
159 the_config.agent_tcp_port.begin);
160 goto error;
161 } else {
162 WARN("All ports in the range [%i, %i] are already in use. "
163 "Agent support will be deactivated.",
164 the_config.agent_tcp_port.begin,
165 the_config.agent_tcp_port.end);
166 goto error;
167 }
168 }
169
170 ret = sock->ops->listen(sock, -1);
171 if (ret < 0) {
172 goto error;
173 }
174
175 DBG("Listening on TCP port %u and socket %d", port, sock->fd);
176
177 return sock;
178
179 error:
180 if (sock) {
181 lttcomm_destroy_sock(sock);
182 }
183 return nullptr;
184 }
185
186 /*
187 * Close and destroy the given TCP socket.
188 */
189 static void destroy_tcp_socket(struct lttcomm_sock *sock)
190 {
191 int ret;
192 uint16_t port;
193
194 LTTNG_ASSERT(sock);
195
196 ret = lttcomm_sock_get_port(sock, &port);
197 if (ret) {
198 ERR("Failed to get port of agent TCP socket");
199 port = 0;
200 }
201
202 DBG3("Destroy TCP socket on port %" PRIu16, port);
203
204 /* This will return gracefully if fd is invalid. */
205 sock->ops->close(sock);
206 lttcomm_destroy_sock(sock);
207 }
208
209 static const char *domain_type_str(enum lttng_domain_type domain_type)
210 {
211 switch (domain_type) {
212 case LTTNG_DOMAIN_NONE:
213 return "none";
214 case LTTNG_DOMAIN_KERNEL:
215 return "kernel";
216 case LTTNG_DOMAIN_UST:
217 return "ust";
218 case LTTNG_DOMAIN_JUL:
219 return "jul";
220 case LTTNG_DOMAIN_LOG4J:
221 return "log4j";
222 case LTTNG_DOMAIN_PYTHON:
223 return "python";
224 default:
225 return "unknown";
226 }
227 }
228
229 static bool is_agent_protocol_version_supported(const struct agent_protocol_version *version)
230 {
231 const bool is_supported = version->major == AGENT_MAJOR_VERSION &&
232 version->minor == AGENT_MINOR_VERSION;
233
234 if (!is_supported) {
235 WARN("Refusing agent connection: unsupported protocol version %ui.%ui, expected %i.%i",
236 version->major,
237 version->minor,
238 AGENT_MAJOR_VERSION,
239 AGENT_MINOR_VERSION);
240 }
241
242 return is_supported;
243 }
244
245 /*
246 * Handle a new agent connection on the registration socket.
247 *
248 * Returns 0 on success, or else a negative errno value.
249 * On success, the resulting socket is returned through `agent_app_socket`
250 * and the application's reported id is updated through `agent_app_id`.
251 */
252 static int accept_agent_connection(struct lttcomm_sock *reg_sock,
253 struct agent_app_id *agent_app_id,
254 struct lttcomm_sock **agent_app_socket)
255 {
256 int ret;
257 struct agent_protocol_version agent_version;
258 ssize_t size;
259 struct agent_register_msg msg;
260 struct lttcomm_sock *new_sock;
261
262 LTTNG_ASSERT(reg_sock);
263
264 new_sock = reg_sock->ops->accept(reg_sock);
265 if (!new_sock) {
266 ret = -ENOTCONN;
267 goto end;
268 }
269
270 size = new_sock->ops->recvmsg(new_sock, &msg, sizeof(msg), 0);
271 if (size < sizeof(msg)) {
272 if (size < 0) {
273 PERROR("Failed to register new agent application");
274 } else if (size != 0) {
275 ERR("Failed to register new agent application: invalid registration message length: expected length = %zu, message length = %zd",
276 sizeof(msg),
277 size);
278 } else {
279 DBG("Failed to register new agent application: connection closed");
280 }
281 ret = -EINVAL;
282 goto error_close_socket;
283 }
284
285 agent_version = (struct agent_protocol_version){
286 be32toh(msg.major_version),
287 be32toh(msg.minor_version),
288 };
289
290 /* Test communication protocol version of the registering agent. */
291 if (!is_agent_protocol_version_supported(&agent_version)) {
292 ret = -EINVAL;
293 goto error_close_socket;
294 }
295
296 *agent_app_id = (struct agent_app_id){
297 .pid = (pid_t) be32toh(msg.pid),
298 .domain = (lttng_domain_type) be32toh(msg.domain),
299 };
300
301 DBG2("New registration for agent application: pid = %ld, domain = %s, socket fd = %d",
302 (long) agent_app_id->pid,
303 domain_type_str(agent_app_id->domain),
304 new_sock->fd);
305
306 *agent_app_socket = new_sock;
307 new_sock = nullptr;
308 ret = 0;
309 goto end;
310
311 error_close_socket:
312 new_sock->ops->close(new_sock);
313 lttcomm_destroy_sock(new_sock);
314 end:
315 return ret;
316 }
317
318 bool agent_tracing_is_enabled()
319 {
320 int enabled;
321
322 enabled = uatomic_read(&agent_tracing_enabled);
323 LTTNG_ASSERT(enabled != -1);
324 return enabled == 1;
325 }
326
327 /*
328 * Write agent TCP port using the rundir.
329 */
330 static int write_agent_port(uint16_t port)
331 {
332 return utils_create_pid_file((pid_t) port, the_config.agent_port_file_path.value);
333 }
334
335 static void mark_thread_as_ready(struct thread_notifiers *notifiers)
336 {
337 DBG("Marking agent management thread as ready");
338 sem_post(&notifiers->ready);
339 }
340
341 static void wait_until_thread_is_ready(struct thread_notifiers *notifiers)
342 {
343 DBG("Waiting for agent management thread to be ready");
344 sem_wait(&notifiers->ready);
345 DBG("Agent management thread is ready");
346 }
347
348 /*
349 * This thread manage application notify communication.
350 */
351 static void *thread_agent_management(void *data)
352 {
353 int i, ret;
354 uint32_t nb_fd;
355 struct lttng_poll_event events;
356 struct lttcomm_sock *reg_sock;
357 struct thread_notifiers *notifiers = (thread_notifiers *) data;
358 const auto thread_quit_pipe_fd = lttng_pipe_get_readfd(notifiers->quit_pipe);
359
360 DBG("Manage agent application registration.");
361
362 rcu_register_thread();
363 rcu_thread_online();
364
365 /* Agent initialization call MUST be called before starting the thread. */
366 LTTNG_ASSERT(the_agent_apps_ht_by_sock);
367
368 /* Create pollset with size 2, quit pipe and registration socket. */
369 ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
370 if (ret < 0) {
371 goto error_poll_create;
372 }
373
374 ret = lttng_poll_add(&events, thread_quit_pipe_fd, LPOLLIN);
375 if (ret < 0) {
376 goto error_tcp_socket;
377 }
378
379 reg_sock = init_tcp_socket();
380 if (reg_sock) {
381 uint16_t port;
382
383 ret = lttcomm_sock_get_port(reg_sock, &port);
384 LTTNG_ASSERT(ret == 0);
385
386 ret = write_agent_port(port);
387 if (ret) {
388 ERR("Failed to create agent port file: agent tracing will be unavailable");
389 /* Don't prevent the launch of the sessiond on error. */
390 mark_thread_as_ready(notifiers);
391 goto error;
392 }
393 } else {
394 /* Don't prevent the launch of the sessiond on error. */
395 mark_thread_as_ready(notifiers);
396 goto error_tcp_socket;
397 }
398
399 /*
400 * Signal that the agent thread is ready. The command thread
401 * may start to query whether or not agent tracing is enabled.
402 */
403 uatomic_set(&agent_tracing_enabled, 1);
404 mark_thread_as_ready(notifiers);
405
406 /* Add TCP socket to the poll set. */
407 ret = lttng_poll_add(&events, reg_sock->fd, LPOLLIN | LPOLLRDHUP);
408 if (ret < 0) {
409 goto error;
410 }
411
412 while (true) {
413 DBG3("Manage agent polling");
414
415 /* Inifinite blocking call, waiting for transmission */
416 restart:
417 ret = lttng_poll_wait(&events, -1);
418 DBG3("Manage agent return from poll on %d fds", LTTNG_POLL_GETNB(&events));
419 if (ret < 0) {
420 /*
421 * Restart interrupted system call.
422 */
423 if (errno == EINTR) {
424 goto restart;
425 }
426 goto error;
427 }
428 nb_fd = ret;
429 DBG3("%d fd ready", nb_fd);
430
431 for (i = 0; i < nb_fd; i++) {
432 /* Fetch once the poll data */
433 const auto revents = LTTNG_POLL_GETEV(&events, i);
434 const auto pollfd = LTTNG_POLL_GETFD(&events, i);
435
436 /* Activity on thread quit pipe, exiting. */
437 if (pollfd == thread_quit_pipe_fd) {
438 DBG("Activity on thread quit pipe");
439 goto exit;
440 }
441
442 /* Activity on the registration socket. */
443 if (revents & LPOLLIN) {
444 struct agent_app_id new_app_id;
445 struct agent_app *new_app = nullptr;
446 struct lttcomm_sock *new_app_socket;
447 int new_app_socket_fd;
448
449 LTTNG_ASSERT(pollfd == reg_sock->fd);
450
451 ret = accept_agent_connection(
452 reg_sock, &new_app_id, &new_app_socket);
453 if (ret < 0) {
454 /* Errors are already logged. */
455 continue;
456 }
457
458 /*
459 * new_app_socket's ownership has been
460 * transferred to the new agent app.
461 */
462 new_app = agent_create_app(
463 new_app_id.pid, new_app_id.domain, new_app_socket);
464 if (!new_app) {
465 new_app_socket->ops->close(new_app_socket);
466 continue;
467 }
468 new_app_socket_fd = new_app_socket->fd;
469 new_app_socket = nullptr;
470
471 /*
472 * Since this is a command socket (write then
473 * read), only add poll error event to only
474 * detect shutdown.
475 */
476 ret = lttng_poll_add(&events, new_app_socket_fd, LPOLLRDHUP);
477 if (ret < 0) {
478 agent_destroy_app(new_app);
479 continue;
480 }
481
482 /*
483 * Prevent sessions from being modified while
484 * the agent application's configuration is
485 * updated.
486 */
487 session_lock_list();
488
489 /*
490 * Update the newly registered applications's
491 * configuration.
492 */
493 update_agent_app(new_app);
494
495 ret = agent_send_registration_done(new_app);
496 if (ret < 0) {
497 agent_destroy_app(new_app);
498 /* Removing from the poll set. */
499 ret = lttng_poll_del(&events, new_app_socket_fd);
500 if (ret < 0) {
501 session_unlock_list();
502 goto error;
503 }
504 continue;
505 }
506
507 /* Publish the new agent app. */
508 agent_add_app(new_app);
509
510 session_unlock_list();
511 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
512 /* Removing from the poll set */
513 ret = lttng_poll_del(&events, pollfd);
514 if (ret < 0) {
515 goto error;
516 }
517 agent_destroy_app_by_sock(pollfd);
518 } else {
519 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
520 goto error;
521 }
522 }
523 }
524
525 exit:
526 /* Whatever happens, try to delete it and exit. */
527 (void) lttng_poll_del(&events, reg_sock->fd);
528 error:
529 destroy_tcp_socket(reg_sock);
530 error_tcp_socket:
531 lttng_poll_clean(&events);
532 error_poll_create:
533 uatomic_set(&agent_tracing_enabled, 0);
534 DBG("Cleaning up and stopping.");
535 rcu_thread_offline();
536 rcu_unregister_thread();
537 return nullptr;
538 }
539
540 static bool shutdown_agent_management_thread(void *data)
541 {
542 struct thread_notifiers *notifiers = (thread_notifiers *) data;
543 const int write_fd = lttng_pipe_get_writefd(notifiers->quit_pipe);
544
545 return notify_thread_pipe(write_fd) == 1;
546 }
547
548 static void cleanup_agent_management_thread(void *data)
549 {
550 struct thread_notifiers *notifiers = (thread_notifiers *) data;
551
552 lttng_pipe_destroy(notifiers->quit_pipe);
553 sem_destroy(&notifiers->ready);
554 free(notifiers);
555 }
556
557 bool launch_agent_management_thread()
558 {
559 struct thread_notifiers *notifiers;
560 struct lttng_thread *thread;
561
562 notifiers = zmalloc<thread_notifiers>();
563 if (!notifiers) {
564 goto error_alloc;
565 }
566
567 sem_init(&notifiers->ready, 0, 0);
568 notifiers->quit_pipe = lttng_pipe_open(FD_CLOEXEC);
569 if (!notifiers->quit_pipe) {
570 goto error;
571 }
572 thread = lttng_thread_create("Agent management",
573 thread_agent_management,
574 shutdown_agent_management_thread,
575 cleanup_agent_management_thread,
576 notifiers);
577 if (!thread) {
578 goto error;
579 }
580 wait_until_thread_is_ready(notifiers);
581 lttng_thread_put(thread);
582 return true;
583 error:
584 cleanup_agent_management_thread(notifiers);
585 error_alloc:
586 return false;
587 }
This page took 0.040519 seconds and 4 git commands to generate.