Fix: sessiond: leak of application context in channel
[lttng-tools.git] / src / bin / lttng-sessiond / dispatch.cpp
... / ...
CommitLineData
1/*
2 * Copyright (C) 2011 EfficiOS Inc.
3 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * Copyright (C) 2013 Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 *
6 * SPDX-License-Identifier: GPL-2.0-only
7 *
8 */
9
10#include "dispatch.hpp"
11#include "fd-limit.hpp"
12#include "health-sessiond.hpp"
13#include "lttng-sessiond.hpp"
14#include "testpoint.hpp"
15#include "thread.hpp"
16#include "ust-app.hpp"
17
18#include <common/futex.hpp>
19#include <common/macros.hpp>
20#include <common/urcu.hpp>
21
22#include <stddef.h>
23#include <stdlib.h>
24#include <urcu.h>
25
26namespace {
27struct thread_notifiers {
28 struct ust_cmd_queue *ust_cmd_queue;
29 int apps_cmd_pipe_write_fd;
30 int apps_cmd_notify_pipe_write_fd;
31 int dispatch_thread_exit;
32};
33} /* namespace */
34
35/*
36 * For each tracing session, update newly registered apps. The session list
37 * lock MUST be acquired before calling this.
38 */
39static void update_ust_app(int app_sock)
40{
41 struct ltt_session *sess, *stmp;
42 const struct ltt_session_list *session_list = session_get_list();
43 struct ust_app *app;
44
45 /* Consumer is in an ERROR state. Stop any application update. */
46 if (uatomic_read(&the_ust_consumerd_state) == CONSUMER_ERROR) {
47 /* Stop the update process since the consumer is dead. */
48 return;
49 }
50
51 lttng::urcu::read_lock_guard read_lock;
52 LTTNG_ASSERT(app_sock >= 0);
53 app = ust_app_find_by_sock(app_sock);
54 if (app == nullptr) {
55 /*
56 * Application can be unregistered before so
57 * this is possible hence simply stopping the
58 * update.
59 */
60 DBG3("UST app update failed to find app sock %d", app_sock);
61 return;
62 }
63
64 /* Update all event notifiers for the app. */
65 ust_app_global_update_event_notifier_rules(app);
66
67 /* For all tracing session(s) */
68 cds_list_for_each_entry_safe (sess, stmp, &session_list->head, list) {
69 if (!session_get(sess)) {
70 continue;
71 }
72 session_lock(sess);
73 if (!sess->active || !sess->ust_session || !sess->ust_session->active) {
74 goto unlock_session;
75 }
76
77 ust_app_global_update(sess->ust_session, app);
78 unlock_session:
79 session_unlock(sess);
80 session_put(sess);
81 }
82}
83
84/*
85 * Sanitize the wait queue of the dispatch registration thread meaning removing
86 * invalid nodes from it. This is to avoid memory leaks for the case the UST
87 * notify socket is never received.
88 */
89static void sanitize_wait_queue(struct ust_reg_wait_queue *wait_queue)
90{
91 int ret, nb_fd = 0, i;
92 unsigned int fd_added = 0;
93 struct lttng_poll_event events;
94 struct ust_reg_wait_node *wait_node = nullptr, *tmp_wait_node;
95
96 LTTNG_ASSERT(wait_queue);
97
98 lttng_poll_init(&events);
99
100 /* Just skip everything for an empty queue. */
101 if (!wait_queue->count) {
102 goto end;
103 }
104
105 ret = lttng_poll_create(&events, wait_queue->count, LTTNG_CLOEXEC);
106 if (ret < 0) {
107 goto error_create;
108 }
109
110 cds_list_for_each_entry_safe (wait_node, tmp_wait_node, &wait_queue->head, head) {
111 LTTNG_ASSERT(wait_node->app);
112 ret = lttng_poll_add(&events, wait_node->app->sock, LPOLLIN);
113 if (ret < 0) {
114 goto error;
115 }
116
117 fd_added = 1;
118 }
119
120 if (!fd_added) {
121 goto end;
122 }
123
124 /*
125 * Poll but don't block so we can quickly identify the faulty events and
126 * clean them afterwards from the wait queue.
127 */
128 ret = lttng_poll_wait(&events, 0);
129 if (ret < 0) {
130 goto error;
131 }
132 nb_fd = ret;
133
134 for (i = 0; i < nb_fd; i++) {
135 /* Get faulty FD. */
136 uint32_t revents = LTTNG_POLL_GETEV(&events, i);
137 int pollfd = LTTNG_POLL_GETFD(&events, i);
138
139 cds_list_for_each_entry_safe (wait_node, tmp_wait_node, &wait_queue->head, head) {
140 if (pollfd == wait_node->app->sock && (revents & (LPOLLHUP | LPOLLERR))) {
141 cds_list_del(&wait_node->head);
142 wait_queue->count--;
143 ust_app_destroy(wait_node->app);
144 free(wait_node);
145 /*
146 * Silence warning of use-after-free in
147 * cds_list_for_each_entry_safe which uses
148 * __typeof__(*wait_node).
149 */
150 wait_node = nullptr;
151 break;
152 } else {
153 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
154 goto error;
155 }
156 }
157 }
158
159 if (nb_fd > 0) {
160 DBG("Wait queue sanitized, %d node were cleaned up", nb_fd);
161 }
162
163end:
164 lttng_poll_clean(&events);
165 return;
166
167error:
168 lttng_poll_clean(&events);
169error_create:
170 ERR("Unable to sanitize wait queue");
171 return;
172}
173
174/*
175 * Send a socket to a thread This is called from the dispatch UST registration
176 * thread once all sockets are set for the application.
177 *
178 * The sock value can be invalid, we don't really care, the thread will handle
179 * it and make the necessary cleanup if so.
180 *
181 * On success, return 0 else a negative value being the errno message of the
182 * write().
183 */
184static int send_socket_to_thread(int fd, int sock)
185{
186 ssize_t ret;
187
188 /*
189 * It's possible that the FD is set as invalid with -1 concurrently just
190 * before calling this function being a shutdown state of the thread.
191 */
192 if (fd < 0) {
193 ret = -EBADF;
194 goto error;
195 }
196
197 ret = lttng_write(fd, &sock, sizeof(sock));
198 if (ret < sizeof(sock)) {
199 PERROR("write apps pipe %d", fd);
200 if (ret < 0) {
201 ret = -errno;
202 }
203 goto error;
204 }
205
206 /* All good. Don't send back the write positive ret value. */
207 ret = 0;
208error:
209 return (int) ret;
210}
211
212static void cleanup_ust_dispatch_thread(void *data)
213{
214 free(data);
215}
216
217/*
218 * Dispatch request from the registration threads to the application
219 * communication thread.
220 */
221static void *thread_dispatch_ust_registration(void *data)
222{
223 int ret, err = -1;
224 struct cds_wfcq_node *node;
225 struct ust_command *ust_cmd = nullptr;
226 struct ust_reg_wait_node *wait_node = nullptr, *tmp_wait_node;
227 struct ust_reg_wait_queue wait_queue = {
228 .count = 0,
229 .head = {},
230 };
231 struct thread_notifiers *notifiers = (thread_notifiers *) data;
232
233 rcu_register_thread();
234
235 health_register(the_health_sessiond, HEALTH_SESSIOND_TYPE_APP_REG_DISPATCH);
236
237 if (testpoint(sessiond_thread_app_reg_dispatch)) {
238 goto error_testpoint;
239 }
240
241 health_code_update();
242
243 CDS_INIT_LIST_HEAD(&wait_queue.head);
244
245 DBG("[thread] Dispatch UST command started");
246
247 for (;;) {
248 health_code_update();
249
250 /* Atomically prepare the queue futex */
251 futex_nto1_prepare(&notifiers->ust_cmd_queue->futex);
252
253 if (CMM_LOAD_SHARED(notifiers->dispatch_thread_exit)) {
254 break;
255 }
256
257 do {
258 struct ust_app *app = nullptr;
259 ust_cmd = nullptr;
260
261 /*
262 * Make sure we don't have node(s) that have hung up before receiving
263 * the notify socket. This is to clean the list in order to avoid
264 * memory leaks from notify socket that are never seen.
265 */
266 sanitize_wait_queue(&wait_queue);
267
268 health_code_update();
269 /* Dequeue command for registration */
270 node = cds_wfcq_dequeue_blocking(&notifiers->ust_cmd_queue->head,
271 &notifiers->ust_cmd_queue->tail);
272 if (node == nullptr) {
273 DBG("Woken up but nothing in the UST command queue");
274 /* Continue thread execution */
275 break;
276 }
277
278 ust_cmd = lttng::utils::container_of(node, &ust_command::node);
279
280 DBG("Dispatching UST registration pid:%d ppid:%d uid:%d"
281 " gid:%d sock:%d name:%s (version %d.%d)",
282 ust_cmd->reg_msg.pid,
283 ust_cmd->reg_msg.ppid,
284 ust_cmd->reg_msg.uid,
285 ust_cmd->reg_msg.gid,
286 ust_cmd->sock,
287 ust_cmd->reg_msg.name,
288 ust_cmd->reg_msg.major,
289 ust_cmd->reg_msg.minor);
290
291 if (ust_cmd->reg_msg.type == LTTNG_UST_CTL_SOCKET_CMD) {
292 wait_node = zmalloc<ust_reg_wait_node>();
293 if (!wait_node) {
294 PERROR("zmalloc wait_node dispatch");
295 ret = close(ust_cmd->sock);
296 if (ret < 0) {
297 PERROR("close ust sock dispatch %d", ust_cmd->sock);
298 }
299 lttng_fd_put(LTTNG_FD_APPS, 1);
300 free(ust_cmd);
301 ust_cmd = nullptr;
302 goto error;
303 }
304 CDS_INIT_LIST_HEAD(&wait_node->head);
305
306 /* Create application object if socket is CMD. */
307 wait_node->app = ust_app_create(&ust_cmd->reg_msg, ust_cmd->sock);
308 if (!wait_node->app) {
309 ret = close(ust_cmd->sock);
310 if (ret < 0) {
311 PERROR("close ust sock dispatch %d", ust_cmd->sock);
312 }
313 lttng_fd_put(LTTNG_FD_APPS, 1);
314 free(wait_node);
315 wait_node = nullptr;
316 free(ust_cmd);
317 ust_cmd = nullptr;
318 continue;
319 }
320 /*
321 * Add application to the wait queue so we can set the notify
322 * socket before putting this object in the global ht.
323 */
324 cds_list_add(&wait_node->head, &wait_queue.head);
325 wait_queue.count++;
326
327 free(ust_cmd);
328 ust_cmd = nullptr;
329 /*
330 * We have to continue here since we don't have the notify
331 * socket and the application MUST be added to the hash table
332 * only at that moment.
333 */
334 continue;
335 } else {
336 /*
337 * Look for the application in the local wait queue and set the
338 * notify socket if found.
339 */
340 cds_list_for_each_entry_safe (
341 wait_node, tmp_wait_node, &wait_queue.head, head) {
342 health_code_update();
343 if (wait_node->app->pid == ust_cmd->reg_msg.pid) {
344 wait_node->app->notify_sock = ust_cmd->sock;
345 cds_list_del(&wait_node->head);
346 wait_queue.count--;
347 app = wait_node->app;
348 free(wait_node);
349 wait_node = nullptr;
350 DBG3("UST app notify socket %d is set",
351 ust_cmd->sock);
352 break;
353 }
354 }
355
356 /*
357 * With no application at this stage the received socket is
358 * basically useless so close it before we free the cmd data
359 * structure for good.
360 */
361 if (!app) {
362 ret = close(ust_cmd->sock);
363 if (ret < 0) {
364 PERROR("close ust sock dispatch %d", ust_cmd->sock);
365 }
366 lttng_fd_put(LTTNG_FD_APPS, 1);
367 }
368 free(ust_cmd);
369 ust_cmd = nullptr;
370 }
371
372 if (app) {
373 /*
374 * @session_lock_list
375 *
376 * Lock the global session list so from the register up to the
377 * registration done message, no thread can see the application
378 * and change its state.
379 */
380 session_lock_list();
381 lttng::urcu::read_lock_guard read_lock;
382
383 /*
384 * Add application to the global hash table. This needs to be
385 * done before the update to the UST registry can locate the
386 * application.
387 */
388 ust_app_add(app);
389
390 /* Set app version. This call will print an error if needed. */
391 (void) ust_app_version(app);
392
393 (void) ust_app_setup_event_notifier_group(app);
394
395 /* Send notify socket through the notify pipe. */
396 ret = send_socket_to_thread(
397 notifiers->apps_cmd_notify_pipe_write_fd, app->notify_sock);
398 if (ret < 0) {
399 session_unlock_list();
400 /*
401 * No notify thread, stop the UST tracing. However, this is
402 * not an internal error of the this thread thus setting
403 * the health error code to a normal exit.
404 */
405 err = 0;
406 goto error;
407 }
408
409 /*
410 * Update newly registered application with the tracing
411 * registry info already enabled information.
412 */
413 update_ust_app(app->sock);
414
415 /*
416 * Don't care about return value. Let the manage apps threads
417 * handle app unregistration upon socket close.
418 */
419 (void) ust_app_register_done(app);
420
421 /*
422 * Even if the application socket has been closed, send the app
423 * to the thread and unregistration will take place at that
424 * place.
425 */
426 ret = send_socket_to_thread(notifiers->apps_cmd_pipe_write_fd,
427 app->sock);
428 if (ret < 0) {
429 session_unlock_list();
430 /*
431 * No apps. thread, stop the UST tracing. However, this is
432 * not an internal error of the this thread thus setting
433 * the health error code to a normal exit.
434 */
435 err = 0;
436 goto error;
437 }
438
439 session_unlock_list();
440 }
441 } while (node != nullptr);
442
443 health_poll_entry();
444 /* Futex wait on queue. Blocking call on futex() */
445 futex_nto1_wait(&notifiers->ust_cmd_queue->futex);
446 health_poll_exit();
447 }
448 /* Normal exit, no error */
449 err = 0;
450
451error:
452 /* Clean up wait queue. */
453 cds_list_for_each_entry_safe (wait_node, tmp_wait_node, &wait_queue.head, head) {
454 cds_list_del(&wait_node->head);
455 wait_queue.count--;
456 free(wait_node);
457 }
458
459 /* Empty command queue. */
460 for (;;) {
461 /* Dequeue command for registration */
462 node = cds_wfcq_dequeue_blocking(&notifiers->ust_cmd_queue->head,
463 &notifiers->ust_cmd_queue->tail);
464 if (node == nullptr) {
465 break;
466 }
467 ust_cmd = lttng::utils::container_of(node, &ust_command::node);
468 ret = close(ust_cmd->sock);
469 if (ret < 0) {
470 PERROR("close ust sock exit dispatch %d", ust_cmd->sock);
471 }
472 lttng_fd_put(LTTNG_FD_APPS, 1);
473 free(ust_cmd);
474 }
475
476error_testpoint:
477 DBG("Dispatch thread dying");
478 if (err) {
479 health_error();
480 ERR("Health error occurred in %s", __func__);
481 }
482 health_unregister(the_health_sessiond);
483 rcu_unregister_thread();
484 return nullptr;
485}
486
487static bool shutdown_ust_dispatch_thread(void *data)
488{
489 struct thread_notifiers *notifiers = (thread_notifiers *) data;
490
491 CMM_STORE_SHARED(notifiers->dispatch_thread_exit, 1);
492 futex_nto1_wake(&notifiers->ust_cmd_queue->futex);
493 return true;
494}
495
496bool launch_ust_dispatch_thread(struct ust_cmd_queue *cmd_queue,
497 int apps_cmd_pipe_write_fd,
498 int apps_cmd_notify_pipe_write_fd)
499{
500 struct lttng_thread *thread;
501 struct thread_notifiers *notifiers;
502
503 notifiers = zmalloc<thread_notifiers>();
504 if (!notifiers) {
505 goto error;
506 }
507 notifiers->ust_cmd_queue = cmd_queue;
508 notifiers->apps_cmd_pipe_write_fd = apps_cmd_pipe_write_fd;
509 notifiers->apps_cmd_notify_pipe_write_fd = apps_cmd_notify_pipe_write_fd;
510
511 thread = lttng_thread_create("UST registration dispatch",
512 thread_dispatch_ust_registration,
513 shutdown_ust_dispatch_thread,
514 cleanup_ust_dispatch_thread,
515 notifiers);
516 if (!thread) {
517 goto error;
518 }
519 lttng_thread_put(thread);
520 return true;
521error:
522 free(notifiers);
523 return false;
524}
This page took 0.025924 seconds and 4 git commands to generate.