Fix: lttng-sessiond: output stream metadata before events
[lttng-tools.git] / src / bin / lttng-sessiond / agent-thread.cpp
1 /*
2 * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 *
6 */
7
8 #define _LGPL_SOURCE
9
10 #include <common/common.hpp>
11 #include <common/sessiond-comm/sessiond-comm.hpp>
12 #include <common/uri.hpp>
13 #include <common/utils.hpp>
14
15 #include <common/compat/endian.hpp>
16
17 #include "fd-limit.hpp"
18 #include "agent-thread.hpp"
19 #include "agent.hpp"
20 #include "lttng-sessiond.hpp"
21 #include "session.hpp"
22 #include "utils.hpp"
23 #include "thread.hpp"
24
25 struct thread_notifiers {
26 struct lttng_pipe *quit_pipe;
27 sem_t ready;
28 };
29
30 struct agent_app_id {
31 pid_t pid;
32 enum lttng_domain_type domain;
33 };
34
35 struct agent_protocol_version {
36 unsigned int major, minor;
37 };
38
39 static int agent_tracing_enabled = -1;
40
41 /*
42 * Note that there is not port here. It's set after this URI is parsed so we
43 * can let the user define a custom one. However, localhost is ALWAYS the
44 * default listening address.
45 */
46 static const char *default_reg_uri =
47 "tcp://" DEFAULT_NETWORK_VIEWER_BIND_ADDRESS;
48
49 /*
50 * Update agent application using the given socket. This is done just after
51 * registration was successful.
52 *
53 * This will acquire the various sessions' lock; none must be held by the
54 * caller.
55 * The caller must hold the session list lock.
56 */
57 static void update_agent_app(const struct agent_app *app)
58 {
59 struct ltt_session *session, *stmp;
60 struct ltt_session_list *list;
61 struct agent *trigger_agent;
62 struct lttng_ht_iter iter;
63
64 list = session_get_list();
65 LTTNG_ASSERT(list);
66
67 cds_list_for_each_entry_safe(session, stmp, &list->head, list) {
68 if (!session_get(session)) {
69 continue;
70 }
71
72 session_lock(session);
73 if (session->ust_session) {
74 const struct agent *agt;
75
76 rcu_read_lock();
77 agt = trace_ust_find_agent(session->ust_session, app->domain);
78 if (agt) {
79 agent_update(agt, app);
80 }
81 rcu_read_unlock();
82 }
83 session_unlock(session);
84 session_put(session);
85 }
86
87 rcu_read_lock();
88 /*
89 * We are protected against the addition of new events by the session
90 * list lock being held.
91 */
92 cds_lfht_for_each_entry(the_trigger_agents_ht_by_domain->ht,
93 &iter.iter, trigger_agent, node.node) {
94 agent_update(trigger_agent, app);
95 }
96 rcu_read_unlock();
97 }
98
99 /*
100 * Create and init socket from uri.
101 */
102 static struct lttcomm_sock *init_tcp_socket(void)
103 {
104 int ret;
105 struct lttng_uri *uri = NULL;
106 struct lttcomm_sock *sock = NULL;
107 unsigned int port;
108 bool bind_succeeded = false;
109
110 /*
111 * This should never fail since the URI is hardcoded and the port is set
112 * before this thread is launched.
113 */
114 ret = uri_parse(default_reg_uri, &uri);
115 LTTNG_ASSERT(ret);
116 LTTNG_ASSERT(the_config.agent_tcp_port.begin > 0);
117 uri->port = the_config.agent_tcp_port.begin;
118
119 sock = lttcomm_alloc_sock_from_uri(uri);
120 uri_free(uri);
121 if (sock == NULL) {
122 ERR("agent allocating TCP socket");
123 goto error;
124 }
125
126 ret = lttcomm_create_sock(sock);
127 if (ret < 0) {
128 goto error;
129 }
130
131 for (port = the_config.agent_tcp_port.begin;
132 port <= the_config.agent_tcp_port.end; port++) {
133 ret = lttcomm_sock_set_port(sock, (uint16_t) port);
134 if (ret) {
135 ERR("Failed to set port %u on socket",
136 port);
137 goto error;
138 }
139 DBG3("Trying to bind on port %u", port);
140 ret = sock->ops->bind(sock);
141 if (!ret) {
142 bind_succeeded = true;
143 break;
144 }
145
146 if (errno == EADDRINUSE) {
147 DBG("Failed to bind to port %u since it is already in use",
148 port);
149 } else {
150 PERROR("Failed to bind to port %u", port);
151 goto error;
152 }
153 }
154
155 if (!bind_succeeded) {
156 if (the_config.agent_tcp_port.begin ==
157 the_config.agent_tcp_port.end) {
158 WARN("Another process is already using the agent port %i. "
159 "Agent support will be deactivated.",
160 the_config.agent_tcp_port.begin);
161 goto error;
162 } else {
163 WARN("All ports in the range [%i, %i] are already in use. "
164 "Agent support will be deactivated.",
165 the_config.agent_tcp_port.begin,
166 the_config.agent_tcp_port.end);
167 goto error;
168 }
169 }
170
171 ret = sock->ops->listen(sock, -1);
172 if (ret < 0) {
173 goto error;
174 }
175
176 DBG("Listening on TCP port %u and socket %d",
177 port, sock->fd);
178
179 return sock;
180
181 error:
182 if (sock) {
183 lttcomm_destroy_sock(sock);
184 }
185 return NULL;
186 }
187
188 /*
189 * Close and destroy the given TCP socket.
190 */
191 static void destroy_tcp_socket(struct lttcomm_sock *sock)
192 {
193 int ret;
194 uint16_t port;
195
196 LTTNG_ASSERT(sock);
197
198 ret = lttcomm_sock_get_port(sock, &port);
199 if (ret) {
200 ERR("Failed to get port of agent TCP socket");
201 port = 0;
202 }
203
204 DBG3("Destroy TCP socket on port %" PRIu16,
205 port);
206
207 /* This will return gracefully if fd is invalid. */
208 sock->ops->close(sock);
209 lttcomm_destroy_sock(sock);
210 }
211
212 static const char *domain_type_str(enum lttng_domain_type domain_type)
213 {
214 switch (domain_type) {
215 case LTTNG_DOMAIN_NONE:
216 return "none";
217 case LTTNG_DOMAIN_KERNEL:
218 return "kernel";
219 case LTTNG_DOMAIN_UST:
220 return "ust";
221 case LTTNG_DOMAIN_JUL:
222 return "jul";
223 case LTTNG_DOMAIN_LOG4J:
224 return "log4j";
225 case LTTNG_DOMAIN_PYTHON:
226 return "python";
227 default:
228 return "unknown";
229 }
230 }
231
232 static bool is_agent_protocol_version_supported(
233 const struct agent_protocol_version *version)
234 {
235 const bool is_supported = version->major == AGENT_MAJOR_VERSION &&
236 version->minor == AGENT_MINOR_VERSION;
237
238 if (!is_supported) {
239 WARN("Refusing agent connection: unsupported protocol version %ui.%ui, expected %i.%i",
240 version->major, version->minor,
241 AGENT_MAJOR_VERSION, AGENT_MINOR_VERSION);
242 }
243
244 return is_supported;
245 }
246
247 /*
248 * Handle a new agent connection on the registration socket.
249 *
250 * Returns 0 on success, or else a negative errno value.
251 * On success, the resulting socket is returned through `agent_app_socket`
252 * and the application's reported id is updated through `agent_app_id`.
253 */
254 static int accept_agent_connection(
255 struct lttcomm_sock *reg_sock,
256 struct agent_app_id *agent_app_id,
257 struct lttcomm_sock **agent_app_socket)
258 {
259 int ret;
260 struct agent_protocol_version agent_version;
261 ssize_t size;
262 struct agent_register_msg msg;
263 struct lttcomm_sock *new_sock;
264
265 LTTNG_ASSERT(reg_sock);
266
267 new_sock = reg_sock->ops->accept(reg_sock);
268 if (!new_sock) {
269 ret = -ENOTCONN;
270 goto end;
271 }
272
273 size = new_sock->ops->recvmsg(new_sock, &msg, sizeof(msg), 0);
274 if (size < sizeof(msg)) {
275 if (size < 0) {
276 PERROR("Failed to register new agent application");
277 } else if (size != 0) {
278 ERR("Failed to register new agent application: invalid registration message length: expected length = %zu, message length = %zd",
279 sizeof(msg), size);
280 } else {
281 DBG("Failed to register new agent application: connection closed");
282 }
283 ret = -EINVAL;
284 goto error_close_socket;
285 }
286
287 agent_version = (struct agent_protocol_version) {
288 be32toh(msg.major_version),
289 be32toh(msg.minor_version),
290 };
291
292 /* Test communication protocol version of the registering agent. */
293 if (!is_agent_protocol_version_supported(&agent_version)) {
294 ret = -EINVAL;
295 goto error_close_socket;
296 }
297
298 *agent_app_id = (struct agent_app_id) {
299 .pid = (pid_t) be32toh(msg.pid),
300 .domain = (lttng_domain_type) be32toh(msg.domain),
301 };
302
303 DBG2("New registration for agent application: pid = %ld, domain = %s, socket fd = %d",
304 (long) agent_app_id->pid,
305 domain_type_str(agent_app_id->domain), new_sock->fd);
306
307 *agent_app_socket = new_sock;
308 new_sock = NULL;
309 ret = 0;
310 goto end;
311
312 error_close_socket:
313 new_sock->ops->close(new_sock);
314 lttcomm_destroy_sock(new_sock);
315 end:
316 return ret;
317 }
318
319 bool agent_tracing_is_enabled(void)
320 {
321 int enabled;
322
323 enabled = uatomic_read(&agent_tracing_enabled);
324 LTTNG_ASSERT(enabled != -1);
325 return enabled == 1;
326 }
327
328 /*
329 * Write agent TCP port using the rundir.
330 */
331 static int write_agent_port(uint16_t port)
332 {
333 return utils_create_pid_file(
334 (pid_t) port, the_config.agent_port_file_path.value);
335 }
336
337 static
338 void mark_thread_as_ready(struct thread_notifiers *notifiers)
339 {
340 DBG("Marking agent management thread as ready");
341 sem_post(&notifiers->ready);
342 }
343
344 static
345 void wait_until_thread_is_ready(struct thread_notifiers *notifiers)
346 {
347 DBG("Waiting for agent management thread to be ready");
348 sem_wait(&notifiers->ready);
349 DBG("Agent management thread is ready");
350 }
351
352 /*
353 * This thread manage application notify communication.
354 */
355 static void *thread_agent_management(void *data)
356 {
357 int i, ret, pollfd;
358 uint32_t revents, nb_fd;
359 struct lttng_poll_event events;
360 struct lttcomm_sock *reg_sock;
361 struct thread_notifiers *notifiers = (thread_notifiers *) data;
362 const int quit_pipe_read_fd = lttng_pipe_get_readfd(
363 notifiers->quit_pipe);
364
365 DBG("Manage agent application registration.");
366
367 rcu_register_thread();
368 rcu_thread_online();
369
370 /* Agent initialization call MUST be called before starting the thread. */
371 LTTNG_ASSERT(the_agent_apps_ht_by_sock);
372
373 /* Create pollset with size 2, quit pipe and registration socket. */
374 ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
375 if (ret < 0) {
376 goto error_poll_create;
377 }
378
379 ret = lttng_poll_add(&events, quit_pipe_read_fd,
380 LPOLLIN | LPOLLERR);
381 if (ret < 0) {
382 goto error_tcp_socket;
383 }
384
385 reg_sock = init_tcp_socket();
386 if (reg_sock) {
387 uint16_t port;
388
389 ret = lttcomm_sock_get_port(reg_sock, &port);
390 LTTNG_ASSERT(ret == 0);
391
392 ret = write_agent_port(port);
393 if (ret) {
394 ERR("Failed to create agent port file: agent tracing will be unavailable");
395 /* Don't prevent the launch of the sessiond on error. */
396 mark_thread_as_ready(notifiers);
397 goto error;
398 }
399 } else {
400 /* Don't prevent the launch of the sessiond on error. */
401 mark_thread_as_ready(notifiers);
402 goto error_tcp_socket;
403 }
404
405 /*
406 * Signal that the agent thread is ready. The command thread
407 * may start to query whether or not agent tracing is enabled.
408 */
409 uatomic_set(&agent_tracing_enabled, 1);
410 mark_thread_as_ready(notifiers);
411
412 /* Add TCP socket to the poll set. */
413 ret = lttng_poll_add(&events, reg_sock->fd,
414 LPOLLIN | LPOLLERR | LPOLLHUP | LPOLLRDHUP);
415 if (ret < 0) {
416 goto error;
417 }
418
419 while (1) {
420 DBG3("Manage agent polling");
421
422 /* Inifinite blocking call, waiting for transmission */
423 restart:
424 ret = lttng_poll_wait(&events, -1);
425 DBG3("Manage agent return from poll on %d fds",
426 LTTNG_POLL_GETNB(&events));
427 if (ret < 0) {
428 /*
429 * Restart interrupted system call.
430 */
431 if (errno == EINTR) {
432 goto restart;
433 }
434 goto error;
435 }
436 nb_fd = ret;
437 DBG3("%d fd ready", nb_fd);
438
439 for (i = 0; i < nb_fd; i++) {
440 /* Fetch once the poll data */
441 revents = LTTNG_POLL_GETEV(&events, i);
442 pollfd = LTTNG_POLL_GETFD(&events, i);
443
444 /* Thread quit pipe has been closed. Killing thread. */
445 if (pollfd == quit_pipe_read_fd) {
446 goto exit;
447 }
448
449 /* Activity on the registration socket. */
450 if (revents & LPOLLIN) {
451 struct agent_app_id new_app_id;
452 struct agent_app *new_app = NULL;
453 struct lttcomm_sock *new_app_socket;
454 int new_app_socket_fd;
455
456 LTTNG_ASSERT(pollfd == reg_sock->fd);
457
458 ret = accept_agent_connection(
459 reg_sock, &new_app_id, &new_app_socket);
460 if (ret < 0) {
461 /* Errors are already logged. */
462 continue;
463 }
464
465 /*
466 * new_app_socket's ownership has been
467 * transferred to the new agent app.
468 */
469 new_app = agent_create_app(new_app_id.pid,
470 new_app_id.domain,
471 new_app_socket);
472 if (!new_app) {
473 new_app_socket->ops->close(
474 new_app_socket);
475 continue;
476 }
477 new_app_socket_fd = new_app_socket->fd;
478 new_app_socket = NULL;
479
480 /*
481 * Since this is a command socket (write then
482 * read), only add poll error event to only
483 * detect shutdown.
484 */
485 ret = lttng_poll_add(&events, new_app_socket_fd,
486 LPOLLERR | LPOLLHUP | LPOLLRDHUP);
487 if (ret < 0) {
488 agent_destroy_app(new_app);
489 continue;
490 }
491
492 /*
493 * Prevent sessions from being modified while
494 * the agent application's configuration is
495 * updated.
496 */
497 session_lock_list();
498
499 /*
500 * Update the newly registered applications's
501 * configuration.
502 */
503 update_agent_app(new_app);
504
505 ret = agent_send_registration_done(new_app);
506 if (ret < 0) {
507 agent_destroy_app(new_app);
508 /* Removing from the poll set. */
509 ret = lttng_poll_del(&events,
510 new_app_socket_fd);
511 if (ret < 0) {
512 session_unlock_list();
513 goto error;
514 }
515 continue;
516 }
517
518 /* Publish the new agent app. */
519 agent_add_app(new_app);
520
521 session_unlock_list();
522 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
523 /* Removing from the poll set */
524 ret = lttng_poll_del(&events, pollfd);
525 if (ret < 0) {
526 goto error;
527 }
528 agent_destroy_app_by_sock(pollfd);
529 } else {
530 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
531 goto error;
532 }
533 }
534 }
535
536 exit:
537 /* Whatever happens, try to delete it and exit. */
538 (void) lttng_poll_del(&events, reg_sock->fd);
539 error:
540 destroy_tcp_socket(reg_sock);
541 error_tcp_socket:
542 lttng_poll_clean(&events);
543 error_poll_create:
544 uatomic_set(&agent_tracing_enabled, 0);
545 DBG("Cleaning up and stopping.");
546 rcu_thread_offline();
547 rcu_unregister_thread();
548 return NULL;
549 }
550
551 static bool shutdown_agent_management_thread(void *data)
552 {
553 struct thread_notifiers *notifiers = (thread_notifiers *) data;
554 const int write_fd = lttng_pipe_get_writefd(notifiers->quit_pipe);
555
556 return notify_thread_pipe(write_fd) == 1;
557 }
558
559 static void cleanup_agent_management_thread(void *data)
560 {
561 struct thread_notifiers *notifiers = (thread_notifiers *) data;
562
563 lttng_pipe_destroy(notifiers->quit_pipe);
564 sem_destroy(&notifiers->ready);
565 free(notifiers);
566 }
567
568 bool launch_agent_management_thread(void)
569 {
570 struct thread_notifiers *notifiers;
571 struct lttng_thread *thread;
572
573 notifiers = (thread_notifiers *) zmalloc(sizeof(*notifiers));
574 if (!notifiers) {
575 goto error_alloc;
576 }
577
578 sem_init(&notifiers->ready, 0, 0);
579 notifiers->quit_pipe = lttng_pipe_open(FD_CLOEXEC);
580 if (!notifiers->quit_pipe) {
581 goto error;
582 }
583 thread = lttng_thread_create("Agent management",
584 thread_agent_management,
585 shutdown_agent_management_thread,
586 cleanup_agent_management_thread,
587 notifiers);
588 if (!thread) {
589 goto error;
590 }
591 wait_until_thread_is_ready(notifiers);
592 lttng_thread_put(thread);
593 return true;
594 error:
595 cleanup_agent_management_thread(notifiers);
596 error_alloc:
597 return false;
598 }
This page took 0.047431 seconds and 4 git commands to generate.