Remove fcntl wrapper
[lttng-tools.git] / src / bin / lttng-sessiond / agent-thread.cpp
1 /*
2 * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 *
6 */
7
8 #define _LGPL_SOURCE
9
10 #include "agent-thread.hpp"
11 #include "agent.hpp"
12 #include "fd-limit.hpp"
13 #include "lttng-sessiond.hpp"
14 #include "session.hpp"
15 #include "thread.hpp"
16 #include "utils.hpp"
17
18 #include <common/common.hpp>
19 #include <common/compat/endian.hpp>
20 #include <common/sessiond-comm/sessiond-comm.hpp>
21 #include <common/urcu.hpp>
22 #include <common/uri.hpp>
23 #include <common/utils.hpp>
24
25 #include <fcntl.h>
26
27 namespace {
28 struct thread_notifiers {
29 struct lttng_pipe *quit_pipe;
30 sem_t ready;
31 };
32
33 struct agent_app_id {
34 pid_t pid;
35 enum lttng_domain_type domain;
36 };
37
38 struct agent_protocol_version {
39 unsigned int major, minor;
40 };
41
42 int agent_tracing_enabled = -1;
43
44 /*
45 * Note that there is not port here. It's set after this URI is parsed so we
46 * can let the user define a custom one. However, localhost is ALWAYS the
47 * default listening address.
48 */
49 const char *default_reg_uri = "tcp://" DEFAULT_NETWORK_VIEWER_BIND_ADDRESS;
50 } /* namespace */
51
52 /*
53 * Update agent application using the given socket. This is done just after
54 * registration was successful.
55 *
56 * This will acquire the various sessions' lock; none must be held by the
57 * caller.
58 * The caller must hold the session list lock.
59 */
60 static void update_agent_app(const struct agent_app *app)
61 {
62 struct ltt_session *session, *stmp;
63 struct ltt_session_list *list;
64 struct agent *trigger_agent;
65 struct lttng_ht_iter iter;
66
67 list = session_get_list();
68 LTTNG_ASSERT(list);
69
70 cds_list_for_each_entry_safe (session, stmp, &list->head, list) {
71 if (!session_get(session)) {
72 continue;
73 }
74
75 session_lock(session);
76 if (session->ust_session) {
77 const struct agent *agt;
78
79 lttng::urcu::read_lock_guard read_lock;
80 agt = trace_ust_find_agent(session->ust_session, app->domain);
81 if (agt) {
82 agent_update(agt, app);
83 }
84 }
85 session_unlock(session);
86 session_put(session);
87 }
88
89 {
90 /*
91 * We are protected against the addition of new events by the session
92 * list lock being held.
93 */
94 lttng::urcu::read_lock_guard read_lock;
95
96 cds_lfht_for_each_entry (
97 the_trigger_agents_ht_by_domain->ht, &iter.iter, trigger_agent, node.node) {
98 agent_update(trigger_agent, app);
99 }
100 }
101 }
102
103 /*
104 * Create and init socket from uri.
105 */
106 static struct lttcomm_sock *init_tcp_socket()
107 {
108 int ret;
109 struct lttng_uri *uri = nullptr;
110 struct lttcomm_sock *sock = nullptr;
111 unsigned int port;
112 bool bind_succeeded = false;
113
114 /*
115 * This should never fail since the URI is hardcoded and the port is set
116 * before this thread is launched.
117 */
118 ret = uri_parse(default_reg_uri, &uri);
119 LTTNG_ASSERT(ret);
120 LTTNG_ASSERT(the_config.agent_tcp_port.begin > 0);
121 uri->port = the_config.agent_tcp_port.begin;
122
123 sock = lttcomm_alloc_sock_from_uri(uri);
124 uri_free(uri);
125 if (sock == nullptr) {
126 ERR("agent allocating TCP socket");
127 goto error;
128 }
129
130 ret = lttcomm_create_sock(sock);
131 if (ret < 0) {
132 goto error;
133 }
134
135 for (port = the_config.agent_tcp_port.begin; port <= the_config.agent_tcp_port.end;
136 port++) {
137 ret = lttcomm_sock_set_port(sock, (uint16_t) port);
138 if (ret) {
139 ERR("Failed to set port %u on socket", port);
140 goto error;
141 }
142 DBG3("Trying to bind on port %u", port);
143 ret = sock->ops->bind(sock);
144 if (!ret) {
145 bind_succeeded = true;
146 break;
147 }
148
149 if (errno == EADDRINUSE) {
150 DBG("Failed to bind to port %u since it is already in use", port);
151 } else {
152 PERROR("Failed to bind to port %u", port);
153 goto error;
154 }
155 }
156
157 if (!bind_succeeded) {
158 if (the_config.agent_tcp_port.begin == the_config.agent_tcp_port.end) {
159 WARN("Another process is already using the agent port %i. "
160 "Agent support will be deactivated.",
161 the_config.agent_tcp_port.begin);
162 goto error;
163 } else {
164 WARN("All ports in the range [%i, %i] are already in use. "
165 "Agent support will be deactivated.",
166 the_config.agent_tcp_port.begin,
167 the_config.agent_tcp_port.end);
168 goto error;
169 }
170 }
171
172 ret = sock->ops->listen(sock, -1);
173 if (ret < 0) {
174 goto error;
175 }
176
177 DBG("Listening on TCP port %u and socket %d", port, sock->fd);
178
179 return sock;
180
181 error:
182 if (sock) {
183 lttcomm_destroy_sock(sock);
184 }
185 return nullptr;
186 }
187
188 /*
189 * Close and destroy the given TCP socket.
190 */
191 static void destroy_tcp_socket(struct lttcomm_sock *sock)
192 {
193 int ret;
194 uint16_t port;
195
196 LTTNG_ASSERT(sock);
197
198 ret = lttcomm_sock_get_port(sock, &port);
199 if (ret) {
200 ERR("Failed to get port of agent TCP socket");
201 port = 0;
202 }
203
204 DBG3("Destroy TCP socket on port %" PRIu16, port);
205
206 /* This will return gracefully if fd is invalid. */
207 sock->ops->close(sock);
208 lttcomm_destroy_sock(sock);
209 }
210
211 static const char *domain_type_str(enum lttng_domain_type domain_type)
212 {
213 switch (domain_type) {
214 case LTTNG_DOMAIN_NONE:
215 return "none";
216 case LTTNG_DOMAIN_KERNEL:
217 return "kernel";
218 case LTTNG_DOMAIN_UST:
219 return "ust";
220 case LTTNG_DOMAIN_JUL:
221 return "jul";
222 case LTTNG_DOMAIN_LOG4J:
223 return "log4j";
224 case LTTNG_DOMAIN_PYTHON:
225 return "python";
226 default:
227 return "unknown";
228 }
229 }
230
231 static bool is_agent_protocol_version_supported(const struct agent_protocol_version *version)
232 {
233 const bool is_supported = version->major == AGENT_MAJOR_VERSION &&
234 version->minor == AGENT_MINOR_VERSION;
235
236 if (!is_supported) {
237 WARN("Refusing agent connection: unsupported protocol version %ui.%ui, expected %i.%i",
238 version->major,
239 version->minor,
240 AGENT_MAJOR_VERSION,
241 AGENT_MINOR_VERSION);
242 }
243
244 return is_supported;
245 }
246
247 /*
248 * Handle a new agent connection on the registration socket.
249 *
250 * Returns 0 on success, or else a negative errno value.
251 * On success, the resulting socket is returned through `agent_app_socket`
252 * and the application's reported id is updated through `agent_app_id`.
253 */
254 static int accept_agent_connection(struct lttcomm_sock *reg_sock,
255 struct agent_app_id *agent_app_id,
256 struct lttcomm_sock **agent_app_socket)
257 {
258 int ret;
259 struct agent_protocol_version agent_version;
260 ssize_t size;
261 struct agent_register_msg msg;
262 struct lttcomm_sock *new_sock;
263
264 LTTNG_ASSERT(reg_sock);
265
266 new_sock = reg_sock->ops->accept(reg_sock);
267 if (!new_sock) {
268 ret = -ENOTCONN;
269 goto end;
270 }
271
272 size = new_sock->ops->recvmsg(new_sock, &msg, sizeof(msg), 0);
273 if (size < sizeof(msg)) {
274 if (size < 0) {
275 PERROR("Failed to register new agent application");
276 } else if (size != 0) {
277 ERR("Failed to register new agent application: invalid registration message length: expected length = %zu, message length = %zd",
278 sizeof(msg),
279 size);
280 } else {
281 DBG("Failed to register new agent application: connection closed");
282 }
283 ret = -EINVAL;
284 goto error_close_socket;
285 }
286
287 agent_version = (struct agent_protocol_version){
288 be32toh(msg.major_version),
289 be32toh(msg.minor_version),
290 };
291
292 /* Test communication protocol version of the registering agent. */
293 if (!is_agent_protocol_version_supported(&agent_version)) {
294 ret = -EINVAL;
295 goto error_close_socket;
296 }
297
298 *agent_app_id = (struct agent_app_id){
299 .pid = (pid_t) be32toh(msg.pid),
300 .domain = (lttng_domain_type) be32toh(msg.domain),
301 };
302
303 DBG2("New registration for agent application: pid = %ld, domain = %s, socket fd = %d",
304 (long) agent_app_id->pid,
305 domain_type_str(agent_app_id->domain),
306 new_sock->fd);
307
308 *agent_app_socket = new_sock;
309 new_sock = nullptr;
310 ret = 0;
311 goto end;
312
313 error_close_socket:
314 new_sock->ops->close(new_sock);
315 lttcomm_destroy_sock(new_sock);
316 end:
317 return ret;
318 }
319
320 bool agent_tracing_is_enabled()
321 {
322 int enabled;
323
324 enabled = uatomic_read(&agent_tracing_enabled);
325 LTTNG_ASSERT(enabled != -1);
326 return enabled == 1;
327 }
328
329 /*
330 * Write agent TCP port using the rundir.
331 */
332 static int write_agent_port(uint16_t port)
333 {
334 return utils_create_pid_file((pid_t) port, the_config.agent_port_file_path.value);
335 }
336
337 static void mark_thread_as_ready(struct thread_notifiers *notifiers)
338 {
339 DBG("Marking agent management thread as ready");
340 sem_post(&notifiers->ready);
341 }
342
343 static void wait_until_thread_is_ready(struct thread_notifiers *notifiers)
344 {
345 DBG("Waiting for agent management thread to be ready");
346 sem_wait(&notifiers->ready);
347 DBG("Agent management thread is ready");
348 }
349
350 /*
351 * This thread manage application notify communication.
352 */
353 static void *thread_agent_management(void *data)
354 {
355 int i, ret;
356 uint32_t nb_fd;
357 struct lttng_poll_event events;
358 struct lttcomm_sock *reg_sock;
359 struct thread_notifiers *notifiers = (thread_notifiers *) data;
360 const auto thread_quit_pipe_fd = lttng_pipe_get_readfd(notifiers->quit_pipe);
361
362 DBG("Manage agent application registration.");
363
364 rcu_register_thread();
365 rcu_thread_online();
366
367 /* Agent initialization call MUST be called before starting the thread. */
368 LTTNG_ASSERT(the_agent_apps_ht_by_sock);
369
370 /* Create pollset with size 2, quit pipe and registration socket. */
371 ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
372 if (ret < 0) {
373 goto error_poll_create;
374 }
375
376 ret = lttng_poll_add(&events, thread_quit_pipe_fd, LPOLLIN);
377 if (ret < 0) {
378 goto error_tcp_socket;
379 }
380
381 reg_sock = init_tcp_socket();
382 if (reg_sock) {
383 uint16_t port;
384
385 ret = lttcomm_sock_get_port(reg_sock, &port);
386 LTTNG_ASSERT(ret == 0);
387
388 ret = write_agent_port(port);
389 if (ret) {
390 ERR("Failed to create agent port file: agent tracing will be unavailable");
391 /* Don't prevent the launch of the sessiond on error. */
392 mark_thread_as_ready(notifiers);
393 goto error;
394 }
395 } else {
396 /* Don't prevent the launch of the sessiond on error. */
397 mark_thread_as_ready(notifiers);
398 goto error_tcp_socket;
399 }
400
401 /*
402 * Signal that the agent thread is ready. The command thread
403 * may start to query whether or not agent tracing is enabled.
404 */
405 uatomic_set(&agent_tracing_enabled, 1);
406 mark_thread_as_ready(notifiers);
407
408 /* Add TCP socket to the poll set. */
409 ret = lttng_poll_add(&events, reg_sock->fd, LPOLLIN | LPOLLRDHUP);
410 if (ret < 0) {
411 goto error;
412 }
413
414 while (true) {
415 DBG3("Manage agent polling");
416
417 /* Inifinite blocking call, waiting for transmission */
418 restart:
419 ret = lttng_poll_wait(&events, -1);
420 DBG3("Manage agent return from poll on %d fds", LTTNG_POLL_GETNB(&events));
421 if (ret < 0) {
422 /*
423 * Restart interrupted system call.
424 */
425 if (errno == EINTR) {
426 goto restart;
427 }
428 goto error;
429 }
430 nb_fd = ret;
431 DBG3("%d fd ready", nb_fd);
432
433 for (i = 0; i < nb_fd; i++) {
434 /* Fetch once the poll data */
435 const auto revents = LTTNG_POLL_GETEV(&events, i);
436 const auto pollfd = LTTNG_POLL_GETFD(&events, i);
437
438 /* Activity on thread quit pipe, exiting. */
439 if (pollfd == thread_quit_pipe_fd) {
440 DBG("Activity on thread quit pipe");
441 goto exit;
442 }
443
444 /* Activity on the registration socket. */
445 if (revents & LPOLLIN) {
446 struct agent_app_id new_app_id;
447 struct agent_app *new_app = nullptr;
448 struct lttcomm_sock *new_app_socket;
449 int new_app_socket_fd;
450
451 LTTNG_ASSERT(pollfd == reg_sock->fd);
452
453 ret = accept_agent_connection(
454 reg_sock, &new_app_id, &new_app_socket);
455 if (ret < 0) {
456 /* Errors are already logged. */
457 continue;
458 }
459
460 /*
461 * new_app_socket's ownership has been
462 * transferred to the new agent app.
463 */
464 new_app = agent_create_app(
465 new_app_id.pid, new_app_id.domain, new_app_socket);
466 if (!new_app) {
467 new_app_socket->ops->close(new_app_socket);
468 continue;
469 }
470 new_app_socket_fd = new_app_socket->fd;
471 new_app_socket = nullptr;
472
473 /*
474 * Since this is a command socket (write then
475 * read), only add poll error event to only
476 * detect shutdown.
477 */
478 ret = lttng_poll_add(&events, new_app_socket_fd, LPOLLRDHUP);
479 if (ret < 0) {
480 agent_destroy_app(new_app);
481 continue;
482 }
483
484 /*
485 * Prevent sessions from being modified while
486 * the agent application's configuration is
487 * updated.
488 */
489 session_lock_list();
490
491 /*
492 * Update the newly registered applications's
493 * configuration.
494 */
495 update_agent_app(new_app);
496
497 ret = agent_send_registration_done(new_app);
498 if (ret < 0) {
499 agent_destroy_app(new_app);
500 /* Removing from the poll set. */
501 ret = lttng_poll_del(&events, new_app_socket_fd);
502 if (ret < 0) {
503 session_unlock_list();
504 goto error;
505 }
506 continue;
507 }
508
509 /* Publish the new agent app. */
510 agent_add_app(new_app);
511
512 session_unlock_list();
513 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
514 /* Removing from the poll set */
515 ret = lttng_poll_del(&events, pollfd);
516 if (ret < 0) {
517 goto error;
518 }
519 agent_destroy_app_by_sock(pollfd);
520 } else {
521 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
522 goto error;
523 }
524 }
525 }
526
527 exit:
528 /* Whatever happens, try to delete it and exit. */
529 (void) lttng_poll_del(&events, reg_sock->fd);
530 error:
531 destroy_tcp_socket(reg_sock);
532 error_tcp_socket:
533 lttng_poll_clean(&events);
534 error_poll_create:
535 uatomic_set(&agent_tracing_enabled, 0);
536 DBG("Cleaning up and stopping.");
537 rcu_thread_offline();
538 rcu_unregister_thread();
539 return nullptr;
540 }
541
542 static bool shutdown_agent_management_thread(void *data)
543 {
544 struct thread_notifiers *notifiers = (thread_notifiers *) data;
545 const int write_fd = lttng_pipe_get_writefd(notifiers->quit_pipe);
546
547 return notify_thread_pipe(write_fd) == 1;
548 }
549
550 static void cleanup_agent_management_thread(void *data)
551 {
552 struct thread_notifiers *notifiers = (thread_notifiers *) data;
553
554 lttng_pipe_destroy(notifiers->quit_pipe);
555 sem_destroy(&notifiers->ready);
556 free(notifiers);
557 }
558
559 bool launch_agent_management_thread()
560 {
561 struct thread_notifiers *notifiers;
562 struct lttng_thread *thread;
563
564 notifiers = zmalloc<thread_notifiers>();
565 if (!notifiers) {
566 goto error_alloc;
567 }
568
569 sem_init(&notifiers->ready, 0, 0);
570 notifiers->quit_pipe = lttng_pipe_open(FD_CLOEXEC);
571 if (!notifiers->quit_pipe) {
572 goto error;
573 }
574 thread = lttng_thread_create("Agent management",
575 thread_agent_management,
576 shutdown_agent_management_thread,
577 cleanup_agent_management_thread,
578 notifiers);
579 if (!thread) {
580 goto error;
581 }
582 wait_until_thread_is_ready(notifiers);
583 lttng_thread_put(thread);
584 return true;
585 error:
586 cleanup_agent_management_thread(notifiers);
587 error_alloc:
588 return false;
589 }
This page took 0.041305 seconds and 5 git commands to generate.