Fix: tests: update rotation ust number of tests
[lttng-tools.git] / src / bin / lttng-sessiond / agent-thread.c
1 /*
2 * Copyright (C) 2013 - David Goulet <dgoulet@efficios.com>
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License, version 2 only, as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 51
15 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18 #define _LGPL_SOURCE
19 #include <assert.h>
20
21 #include <common/common.h>
22 #include <common/sessiond-comm/sessiond-comm.h>
23 #include <common/uri.h>
24 #include <common/utils.h>
25
26 #include <common/compat/endian.h>
27
28 #include "fd-limit.h"
29 #include "agent-thread.h"
30 #include "agent.h"
31 #include "lttng-sessiond.h"
32 #include "session.h"
33 #include "utils.h"
34 #include "thread.h"
35
36 struct thread_notifiers {
37 struct lttng_pipe *quit_pipe;
38 sem_t ready;
39 };
40
41 static int agent_tracing_enabled = -1;
42
43 /*
44 * Note that there is not port here. It's set after this URI is parsed so we
45 * can let the user define a custom one. However, localhost is ALWAYS the
46 * default listening address.
47 */
48 static const char *default_reg_uri =
49 "tcp://" DEFAULT_NETWORK_VIEWER_BIND_ADDRESS;
50
51 /*
52 * Update agent application using the given socket. This is done just after
53 * registration was successful.
54 *
55 * This is a quite heavy call in terms of locking since the session list lock
56 * AND session lock are acquired.
57 */
58 static void update_agent_app(struct agent_app *app)
59 {
60 struct ltt_session *session, *stmp;
61 struct ltt_session_list *list;
62
63 list = session_get_list();
64 assert(list);
65
66 session_lock_list();
67 cds_list_for_each_entry_safe(session, stmp, &list->head, list) {
68 if (!session_get(session)) {
69 continue;
70 }
71
72 session_lock(session);
73 if (session->ust_session) {
74 struct agent *agt;
75
76 rcu_read_lock();
77 agt = trace_ust_find_agent(session->ust_session, app->domain);
78 if (agt) {
79 agent_update(agt, app->sock->fd);
80 }
81 rcu_read_unlock();
82 }
83 session_unlock(session);
84 session_put(session);
85 }
86 session_unlock_list();
87 }
88
89 /*
90 * Create and init socket from uri.
91 */
92 static struct lttcomm_sock *init_tcp_socket(void)
93 {
94 int ret;
95 struct lttng_uri *uri = NULL;
96 struct lttcomm_sock *sock = NULL;
97 unsigned int port;
98 bool bind_succeeded = false;
99
100 /*
101 * This should never fail since the URI is hardcoded and the port is set
102 * before this thread is launched.
103 */
104 ret = uri_parse(default_reg_uri, &uri);
105 assert(ret);
106 assert(config.agent_tcp_port.begin > 0);
107 uri->port = config.agent_tcp_port.begin;
108
109 sock = lttcomm_alloc_sock_from_uri(uri);
110 uri_free(uri);
111 if (sock == NULL) {
112 ERR("[agent-thread] agent allocating TCP socket");
113 goto error;
114 }
115
116 ret = lttcomm_create_sock(sock);
117 if (ret < 0) {
118 goto error;
119 }
120
121 for (port = config.agent_tcp_port.begin;
122 port <= config.agent_tcp_port.end; port++) {
123 ret = lttcomm_sock_set_port(sock, (uint16_t) port);
124 if (ret) {
125 ERR("[agent-thread] Failed to set port %u on socket",
126 port);
127 goto error;
128 }
129 DBG3("[agent-thread] Trying to bind on port %u", port);
130 ret = sock->ops->bind(sock);
131 if (!ret) {
132 bind_succeeded = true;
133 break;
134 }
135
136 if (errno == EADDRINUSE) {
137 DBG("Failed to bind to port %u since it is already in use",
138 port);
139 } else {
140 PERROR("Failed to bind to port %u", port);
141 goto error;
142 }
143 }
144
145 if (!bind_succeeded) {
146 if (config.agent_tcp_port.begin == config.agent_tcp_port.end) {
147 WARN("Another process is already using the agent port %i. "
148 "Agent support will be deactivated.",
149 config.agent_tcp_port.begin);
150 goto error;
151 } else {
152 WARN("All ports in the range [%i, %i] are already in use. "
153 "Agent support will be deactivated.",
154 config.agent_tcp_port.begin,
155 config.agent_tcp_port.end);
156 goto error;
157 }
158 }
159
160 ret = sock->ops->listen(sock, -1);
161 if (ret < 0) {
162 goto error;
163 }
164
165 DBG("[agent-thread] Listening on TCP port %u and socket %d",
166 port, sock->fd);
167
168 return sock;
169
170 error:
171 if (sock) {
172 lttcomm_destroy_sock(sock);
173 }
174 return NULL;
175 }
176
177 /*
178 * Close and destroy the given TCP socket.
179 */
180 static void destroy_tcp_socket(struct lttcomm_sock *sock)
181 {
182 int ret;
183 uint16_t port;
184
185 assert(sock);
186
187 ret = lttcomm_sock_get_port(sock, &port);
188 if (ret) {
189 ERR("[agent-thread] Failed to get port of agent TCP socket");
190 port = 0;
191 }
192
193 DBG3("[agent-thread] Destroy TCP socket on port %" PRIu16,
194 port);
195
196 /* This will return gracefully if fd is invalid. */
197 sock->ops->close(sock);
198 lttcomm_destroy_sock(sock);
199 }
200
201 /*
202 * Handle a new agent registration using the reg socket. After that, a new
203 * agent application is added to the global hash table and attach to an UST app
204 * object. If r_app is not NULL, the created app is set to the pointer.
205 *
206 * Return the new FD created upon accept() on success or else a negative errno
207 * value.
208 */
209 static int handle_registration(struct lttcomm_sock *reg_sock,
210 struct agent_app **r_app)
211 {
212 int ret;
213 pid_t pid;
214 uint32_t major_version, minor_version;
215 ssize_t size;
216 enum lttng_domain_type domain;
217 struct agent_app *app;
218 struct agent_register_msg msg;
219 struct lttcomm_sock *new_sock;
220
221 assert(reg_sock);
222
223 new_sock = reg_sock->ops->accept(reg_sock);
224 if (!new_sock) {
225 ret = -ENOTCONN;
226 goto error;
227 }
228
229 size = new_sock->ops->recvmsg(new_sock, &msg, sizeof(msg), 0);
230 if (size < sizeof(msg)) {
231 ret = -EINVAL;
232 goto error_socket;
233 }
234 domain = be32toh(msg.domain);
235 pid = be32toh(msg.pid);
236 major_version = be32toh(msg.major_version);
237 minor_version = be32toh(msg.minor_version);
238
239 /* Test communication protocol version of the registring agent. */
240 if (major_version != AGENT_MAJOR_VERSION) {
241 ret = -EINVAL;
242 goto error_socket;
243 }
244 if (minor_version != AGENT_MINOR_VERSION) {
245 ret = -EINVAL;
246 goto error_socket;
247 }
248
249 DBG2("[agent-thread] New registration for pid %d domain %d on socket %d",
250 pid, domain, new_sock->fd);
251
252 app = agent_create_app(pid, domain, new_sock);
253 if (!app) {
254 ret = -ENOMEM;
255 goto error_socket;
256 }
257
258 /*
259 * Add before assigning the socket value to the UST app so it can be found
260 * concurrently.
261 */
262 agent_add_app(app);
263
264 /*
265 * We don't need to attach the agent app to the app. If we ever do so, we
266 * should consider both registration order of agent before app and app
267 * before agent.
268 */
269
270 if (r_app) {
271 *r_app = app;
272 }
273
274 return new_sock->fd;
275
276 error_socket:
277 new_sock->ops->close(new_sock);
278 lttcomm_destroy_sock(new_sock);
279 error:
280 return ret;
281 }
282
283 bool agent_tracing_is_enabled(void)
284 {
285 int enabled;
286
287 enabled = uatomic_read(&agent_tracing_enabled);
288 assert(enabled != -1);
289 return enabled == 1;
290 }
291
292 /*
293 * Write agent TCP port using the rundir.
294 */
295 static int write_agent_port(uint16_t port)
296 {
297 return utils_create_pid_file((pid_t) port,
298 config.agent_port_file_path.value);
299 }
300
301 static
302 void mark_thread_as_ready(struct thread_notifiers *notifiers)
303 {
304 DBG("Marking agent management thread as ready");
305 sem_post(&notifiers->ready);
306 }
307
308 static
309 void wait_until_thread_is_ready(struct thread_notifiers *notifiers)
310 {
311 DBG("Waiting for agent management thread to be ready");
312 sem_wait(&notifiers->ready);
313 DBG("Agent management thread is ready");
314 }
315
316 /*
317 * This thread manage application notify communication.
318 */
319 static void *thread_agent_management(void *data)
320 {
321 int i, ret, pollfd;
322 uint32_t revents, nb_fd;
323 struct lttng_poll_event events;
324 struct lttcomm_sock *reg_sock;
325 struct thread_notifiers *notifiers = data;
326 const int quit_pipe_read_fd = lttng_pipe_get_readfd(
327 notifiers->quit_pipe);
328
329 DBG("[agent-thread] Manage agent application registration.");
330
331 rcu_register_thread();
332 rcu_thread_online();
333
334 /* Agent initialization call MUST be called before starting the thread. */
335 assert(agent_apps_ht_by_sock);
336
337 /* Create pollset with size 2, quit pipe and registration socket. */
338 ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
339 if (ret < 0) {
340 goto error_poll_create;
341 }
342
343 ret = lttng_poll_add(&events, quit_pipe_read_fd,
344 LPOLLIN | LPOLLERR);
345 if (ret < 0) {
346 goto error_tcp_socket;
347 }
348
349 reg_sock = init_tcp_socket();
350 if (reg_sock) {
351 uint16_t port;
352
353 assert(lttcomm_sock_get_port(reg_sock, &port) == 0);
354
355 ret = write_agent_port(port);
356 if (ret) {
357 ERR("[agent-thread] Failed to create agent port file: agent tracing will be unavailable");
358 /* Don't prevent the launch of the sessiond on error. */
359 mark_thread_as_ready(notifiers);
360 goto error;
361 }
362 } else {
363 /* Don't prevent the launch of the sessiond on error. */
364 mark_thread_as_ready(notifiers);
365 goto error_tcp_socket;
366 }
367
368 /*
369 * Signal that the agent thread is ready. The command thread
370 * may start to query whether or not agent tracing is enabled.
371 */
372 uatomic_set(&agent_tracing_enabled, 1);
373 mark_thread_as_ready(notifiers);
374
375 /* Add TCP socket to poll set. */
376 ret = lttng_poll_add(&events, reg_sock->fd,
377 LPOLLIN | LPOLLERR | LPOLLHUP | LPOLLRDHUP);
378 if (ret < 0) {
379 goto error;
380 }
381
382 while (1) {
383 DBG3("[agent-thread] Manage agent polling");
384
385 /* Inifinite blocking call, waiting for transmission */
386 restart:
387 ret = lttng_poll_wait(&events, -1);
388 DBG3("[agent-thread] Manage agent return from poll on %d fds",
389 LTTNG_POLL_GETNB(&events));
390 if (ret < 0) {
391 /*
392 * Restart interrupted system call.
393 */
394 if (errno == EINTR) {
395 goto restart;
396 }
397 goto error;
398 }
399 nb_fd = ret;
400 DBG3("[agent-thread] %d fd ready", nb_fd);
401
402 for (i = 0; i < nb_fd; i++) {
403 /* Fetch once the poll data */
404 revents = LTTNG_POLL_GETEV(&events, i);
405 pollfd = LTTNG_POLL_GETFD(&events, i);
406
407 /* Thread quit pipe has been closed. Killing thread. */
408 if (pollfd == quit_pipe_read_fd) {
409 goto exit;
410 }
411
412 if (revents & LPOLLIN) {
413 int new_fd;
414 struct agent_app *app = NULL;
415
416 assert(pollfd == reg_sock->fd);
417 new_fd = handle_registration(reg_sock, &app);
418 if (new_fd < 0) {
419 continue;
420 }
421 /* Should not have a NULL app on success. */
422 assert(app);
423
424 /*
425 * Since this is a command socket (write then read),
426 * only add poll error event to only detect shutdown.
427 */
428 ret = lttng_poll_add(&events, new_fd,
429 LPOLLERR | LPOLLHUP | LPOLLRDHUP);
430 if (ret < 0) {
431 agent_destroy_app_by_sock(new_fd);
432 continue;
433 }
434
435 /* Update newly registered app. */
436 update_agent_app(app);
437
438 /* On failure, the poll will detect it and clean it up. */
439 ret = agent_send_registration_done(app);
440 if (ret < 0) {
441 /* Removing from the poll set */
442 ret = lttng_poll_del(&events, new_fd);
443 if (ret < 0) {
444 goto error;
445 }
446 agent_destroy_app_by_sock(new_fd);
447 continue;
448 }
449 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
450 /* Removing from the poll set */
451 ret = lttng_poll_del(&events, pollfd);
452 if (ret < 0) {
453 goto error;
454 }
455 agent_destroy_app_by_sock(pollfd);
456 } else {
457 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
458 goto error;
459 }
460 }
461 }
462
463 exit:
464 /* Whatever happens, try to delete it and exit. */
465 (void) lttng_poll_del(&events, reg_sock->fd);
466 error:
467 destroy_tcp_socket(reg_sock);
468 error_tcp_socket:
469 lttng_poll_clean(&events);
470 error_poll_create:
471 uatomic_set(&agent_tracing_enabled, 0);
472 DBG("[agent-thread] Cleaning up and stopping.");
473 rcu_thread_offline();
474 rcu_unregister_thread();
475 return NULL;
476 }
477
478 static bool shutdown_agent_management_thread(void *data)
479 {
480 struct thread_notifiers *notifiers = data;
481 const int write_fd = lttng_pipe_get_writefd(notifiers->quit_pipe);
482
483 return notify_thread_pipe(write_fd) == 1;
484 }
485
486 static void cleanup_agent_management_thread(void *data)
487 {
488 struct thread_notifiers *notifiers = data;
489
490 lttng_pipe_destroy(notifiers->quit_pipe);
491 sem_destroy(&notifiers->ready);
492 free(notifiers);
493 }
494
495 bool launch_agent_management_thread(void)
496 {
497 struct thread_notifiers *notifiers;
498 struct lttng_thread *thread;
499
500 notifiers = zmalloc(sizeof(*notifiers));
501 if (!notifiers) {
502 goto error_alloc;
503 }
504
505 sem_init(&notifiers->ready, 0, 0);
506 notifiers->quit_pipe = lttng_pipe_open(FD_CLOEXEC);
507 if (!notifiers->quit_pipe) {
508 goto error;
509 }
510 thread = lttng_thread_create("Agent management",
511 thread_agent_management,
512 shutdown_agent_management_thread,
513 cleanup_agent_management_thread,
514 notifiers);
515 if (!thread) {
516 goto error;
517 }
518 wait_until_thread_is_ready(notifiers);
519 lttng_thread_put(thread);
520 return true;
521 error:
522 cleanup_agent_management_thread(notifiers);
523 error_alloc:
524 return false;
525 }
This page took 0.039667 seconds and 4 git commands to generate.