Fix: notification: kernel: consumption of event notification stalls
[lttng-tools.git] / src / bin / lttng-sessiond / dispatch.cpp
1 /*
2 * Copyright (C) 2011 EfficiOS Inc.
3 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * Copyright (C) 2013 Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 *
6 * SPDX-License-Identifier: GPL-2.0-only
7 *
8 */
9
10 #include <stddef.h>
11 #include <stdlib.h>
12 #include <urcu.h>
13 #include <common/futex.hpp>
14 #include <common/macros.hpp>
15
16 #include "dispatch.hpp"
17 #include "ust-app.hpp"
18 #include "testpoint.hpp"
19 #include "fd-limit.hpp"
20 #include "health-sessiond.hpp"
21 #include "lttng-sessiond.hpp"
22 #include "thread.hpp"
23
24 struct thread_notifiers {
25 struct ust_cmd_queue *ust_cmd_queue;
26 int apps_cmd_pipe_write_fd;
27 int apps_cmd_notify_pipe_write_fd;
28 int dispatch_thread_exit;
29 };
30
31 /*
32 * For each tracing session, update newly registered apps. The session list
33 * lock MUST be acquired before calling this.
34 */
35 static void update_ust_app(int app_sock)
36 {
37 struct ltt_session *sess, *stmp;
38 const struct ltt_session_list *session_list = session_get_list();
39 struct ust_app *app;
40
41 /* Consumer is in an ERROR state. Stop any application update. */
42 if (uatomic_read(&the_ust_consumerd_state) == CONSUMER_ERROR) {
43 /* Stop the update process since the consumer is dead. */
44 return;
45 }
46
47 rcu_read_lock();
48 LTTNG_ASSERT(app_sock >= 0);
49 app = ust_app_find_by_sock(app_sock);
50 if (app == NULL) {
51 /*
52 * Application can be unregistered before so
53 * this is possible hence simply stopping the
54 * update.
55 */
56 DBG3("UST app update failed to find app sock %d",
57 app_sock);
58 goto unlock_rcu;
59 }
60
61 /* Update all event notifiers for the app. */
62 ust_app_global_update_event_notifier_rules(app);
63
64 /* For all tracing session(s) */
65 cds_list_for_each_entry_safe(sess, stmp, &session_list->head, list) {
66 if (!session_get(sess)) {
67 continue;
68 }
69 session_lock(sess);
70 if (!sess->active || !sess->ust_session ||
71 !sess->ust_session->active) {
72 goto unlock_session;
73 }
74
75 ust_app_global_update(sess->ust_session, app);
76 unlock_session:
77 session_unlock(sess);
78 session_put(sess);
79 }
80
81 unlock_rcu:
82 rcu_read_unlock();
83 }
84
85 /*
86 * Sanitize the wait queue of the dispatch registration thread meaning removing
87 * invalid nodes from it. This is to avoid memory leaks for the case the UST
88 * notify socket is never received.
89 */
90 static void sanitize_wait_queue(struct ust_reg_wait_queue *wait_queue)
91 {
92 int ret, nb_fd = 0, i;
93 unsigned int fd_added = 0;
94 struct lttng_poll_event events;
95 struct ust_reg_wait_node *wait_node = NULL, *tmp_wait_node;
96
97 LTTNG_ASSERT(wait_queue);
98
99 lttng_poll_init(&events);
100
101 /* Just skip everything for an empty queue. */
102 if (!wait_queue->count) {
103 goto end;
104 }
105
106 ret = lttng_poll_create(&events, wait_queue->count, LTTNG_CLOEXEC);
107 if (ret < 0) {
108 goto error_create;
109 }
110
111 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
112 &wait_queue->head, head) {
113 LTTNG_ASSERT(wait_node->app);
114 ret = lttng_poll_add(&events, wait_node->app->sock,
115 LPOLLHUP | LPOLLERR);
116 if (ret < 0) {
117 goto error;
118 }
119
120 fd_added = 1;
121 }
122
123 if (!fd_added) {
124 goto end;
125 }
126
127 /*
128 * Poll but don't block so we can quickly identify the faulty events and
129 * clean them afterwards from the wait queue.
130 */
131 ret = lttng_poll_wait(&events, 0);
132 if (ret < 0) {
133 goto error;
134 }
135 nb_fd = ret;
136
137 for (i = 0; i < nb_fd; i++) {
138 /* Get faulty FD. */
139 uint32_t revents = LTTNG_POLL_GETEV(&events, i);
140 int pollfd = LTTNG_POLL_GETFD(&events, i);
141
142 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
143 &wait_queue->head, head) {
144 if (pollfd == wait_node->app->sock &&
145 (revents & (LPOLLHUP | LPOLLERR))) {
146 cds_list_del(&wait_node->head);
147 wait_queue->count--;
148 ust_app_destroy(wait_node->app);
149 free(wait_node);
150 /*
151 * Silence warning of use-after-free in
152 * cds_list_for_each_entry_safe which uses
153 * __typeof__(*wait_node).
154 */
155 wait_node = NULL;
156 break;
157 } else {
158 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
159 goto error;
160 }
161 }
162 }
163
164 if (nb_fd > 0) {
165 DBG("Wait queue sanitized, %d node were cleaned up", nb_fd);
166 }
167
168 end:
169 lttng_poll_clean(&events);
170 return;
171
172 error:
173 lttng_poll_clean(&events);
174 error_create:
175 ERR("Unable to sanitize wait queue");
176 return;
177 }
178
179 /*
180 * Send a socket to a thread This is called from the dispatch UST registration
181 * thread once all sockets are set for the application.
182 *
183 * The sock value can be invalid, we don't really care, the thread will handle
184 * it and make the necessary cleanup if so.
185 *
186 * On success, return 0 else a negative value being the errno message of the
187 * write().
188 */
189 static int send_socket_to_thread(int fd, int sock)
190 {
191 ssize_t ret;
192
193 /*
194 * It's possible that the FD is set as invalid with -1 concurrently just
195 * before calling this function being a shutdown state of the thread.
196 */
197 if (fd < 0) {
198 ret = -EBADF;
199 goto error;
200 }
201
202 ret = lttng_write(fd, &sock, sizeof(sock));
203 if (ret < sizeof(sock)) {
204 PERROR("write apps pipe %d", fd);
205 if (ret < 0) {
206 ret = -errno;
207 }
208 goto error;
209 }
210
211 /* All good. Don't send back the write positive ret value. */
212 ret = 0;
213 error:
214 return (int) ret;
215 }
216
217 static void cleanup_ust_dispatch_thread(void *data)
218 {
219 free(data);
220 }
221
222 /*
223 * Dispatch request from the registration threads to the application
224 * communication thread.
225 */
226 static void *thread_dispatch_ust_registration(void *data)
227 {
228 int ret, err = -1;
229 struct cds_wfcq_node *node;
230 struct ust_command *ust_cmd = NULL;
231 struct ust_reg_wait_node *wait_node = NULL, *tmp_wait_node;
232 struct ust_reg_wait_queue wait_queue = {
233 .count = 0,
234 .head = {},
235 };
236 struct thread_notifiers *notifiers = (thread_notifiers *) data;
237
238 rcu_register_thread();
239
240 health_register(the_health_sessiond,
241 HEALTH_SESSIOND_TYPE_APP_REG_DISPATCH);
242
243 if (testpoint(sessiond_thread_app_reg_dispatch)) {
244 goto error_testpoint;
245 }
246
247 health_code_update();
248
249 CDS_INIT_LIST_HEAD(&wait_queue.head);
250
251 DBG("[thread] Dispatch UST command started");
252
253 for (;;) {
254 health_code_update();
255
256 /* Atomically prepare the queue futex */
257 futex_nto1_prepare(&notifiers->ust_cmd_queue->futex);
258
259 if (CMM_LOAD_SHARED(notifiers->dispatch_thread_exit)) {
260 break;
261 }
262
263 do {
264 struct ust_app *app = NULL;
265 ust_cmd = NULL;
266
267 /*
268 * Make sure we don't have node(s) that have hung up before receiving
269 * the notify socket. This is to clean the list in order to avoid
270 * memory leaks from notify socket that are never seen.
271 */
272 sanitize_wait_queue(&wait_queue);
273
274 health_code_update();
275 /* Dequeue command for registration */
276 node = cds_wfcq_dequeue_blocking(
277 &notifiers->ust_cmd_queue->head,
278 &notifiers->ust_cmd_queue->tail);
279 if (node == NULL) {
280 DBG("Woken up but nothing in the UST command queue");
281 /* Continue thread execution */
282 break;
283 }
284
285 ust_cmd = caa_container_of(node, struct ust_command, node);
286
287 DBG("Dispatching UST registration pid:%d ppid:%d uid:%d"
288 " gid:%d sock:%d name:%s (version %d.%d)",
289 ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
290 ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
291 ust_cmd->sock, ust_cmd->reg_msg.name,
292 ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
293
294 if (ust_cmd->reg_msg.type == LTTNG_UST_CTL_SOCKET_CMD) {
295 wait_node = zmalloc<ust_reg_wait_node>();
296 if (!wait_node) {
297 PERROR("zmalloc wait_node dispatch");
298 ret = close(ust_cmd->sock);
299 if (ret < 0) {
300 PERROR("close ust sock dispatch %d", ust_cmd->sock);
301 }
302 lttng_fd_put(LTTNG_FD_APPS, 1);
303 free(ust_cmd);
304 ust_cmd = NULL;
305 goto error;
306 }
307 CDS_INIT_LIST_HEAD(&wait_node->head);
308
309 /* Create application object if socket is CMD. */
310 wait_node->app = ust_app_create(&ust_cmd->reg_msg,
311 ust_cmd->sock);
312 if (!wait_node->app) {
313 ret = close(ust_cmd->sock);
314 if (ret < 0) {
315 PERROR("close ust sock dispatch %d", ust_cmd->sock);
316 }
317 lttng_fd_put(LTTNG_FD_APPS, 1);
318 free(wait_node);
319 wait_node = NULL;
320 free(ust_cmd);
321 ust_cmd = NULL;
322 continue;
323 }
324 /*
325 * Add application to the wait queue so we can set the notify
326 * socket before putting this object in the global ht.
327 */
328 cds_list_add(&wait_node->head, &wait_queue.head);
329 wait_queue.count++;
330
331 free(ust_cmd);
332 ust_cmd = NULL;
333 /*
334 * We have to continue here since we don't have the notify
335 * socket and the application MUST be added to the hash table
336 * only at that moment.
337 */
338 continue;
339 } else {
340 /*
341 * Look for the application in the local wait queue and set the
342 * notify socket if found.
343 */
344 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
345 &wait_queue.head, head) {
346 health_code_update();
347 if (wait_node->app->pid == ust_cmd->reg_msg.pid) {
348 wait_node->app->notify_sock = ust_cmd->sock;
349 cds_list_del(&wait_node->head);
350 wait_queue.count--;
351 app = wait_node->app;
352 free(wait_node);
353 wait_node = NULL;
354 DBG3("UST app notify socket %d is set", ust_cmd->sock);
355 break;
356 }
357 }
358
359 /*
360 * With no application at this stage the received socket is
361 * basically useless so close it before we free the cmd data
362 * structure for good.
363 */
364 if (!app) {
365 ret = close(ust_cmd->sock);
366 if (ret < 0) {
367 PERROR("close ust sock dispatch %d", ust_cmd->sock);
368 }
369 lttng_fd_put(LTTNG_FD_APPS, 1);
370 }
371 free(ust_cmd);
372 ust_cmd = NULL;
373 }
374
375 if (app) {
376 /*
377 * @session_lock_list
378 *
379 * Lock the global session list so from the register up to the
380 * registration done message, no thread can see the application
381 * and change its state.
382 */
383 session_lock_list();
384 rcu_read_lock();
385
386 /*
387 * Add application to the global hash table. This needs to be
388 * done before the update to the UST registry can locate the
389 * application.
390 */
391 ust_app_add(app);
392
393 /* Set app version. This call will print an error if needed. */
394 (void) ust_app_version(app);
395
396 (void) ust_app_setup_event_notifier_group(app);
397
398 /* Send notify socket through the notify pipe. */
399 ret = send_socket_to_thread(
400 notifiers->apps_cmd_notify_pipe_write_fd,
401 app->notify_sock);
402 if (ret < 0) {
403 rcu_read_unlock();
404 session_unlock_list();
405 /*
406 * No notify thread, stop the UST tracing. However, this is
407 * not an internal error of the this thread thus setting
408 * the health error code to a normal exit.
409 */
410 err = 0;
411 goto error;
412 }
413
414 /*
415 * Update newly registered application with the tracing
416 * registry info already enabled information.
417 */
418 update_ust_app(app->sock);
419
420 /*
421 * Don't care about return value. Let the manage apps threads
422 * handle app unregistration upon socket close.
423 */
424 (void) ust_app_register_done(app);
425
426 /*
427 * Even if the application socket has been closed, send the app
428 * to the thread and unregistration will take place at that
429 * place.
430 */
431 ret = send_socket_to_thread(
432 notifiers->apps_cmd_pipe_write_fd,
433 app->sock);
434 if (ret < 0) {
435 rcu_read_unlock();
436 session_unlock_list();
437 /*
438 * No apps. thread, stop the UST tracing. However, this is
439 * not an internal error of the this thread thus setting
440 * the health error code to a normal exit.
441 */
442 err = 0;
443 goto error;
444 }
445
446 rcu_read_unlock();
447 session_unlock_list();
448 }
449 } while (node != NULL);
450
451 health_poll_entry();
452 /* Futex wait on queue. Blocking call on futex() */
453 futex_nto1_wait(&notifiers->ust_cmd_queue->futex);
454 health_poll_exit();
455 }
456 /* Normal exit, no error */
457 err = 0;
458
459 error:
460 /* Clean up wait queue. */
461 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
462 &wait_queue.head, head) {
463 cds_list_del(&wait_node->head);
464 wait_queue.count--;
465 free(wait_node);
466 }
467
468 /* Empty command queue. */
469 for (;;) {
470 /* Dequeue command for registration */
471 node = cds_wfcq_dequeue_blocking(
472 &notifiers->ust_cmd_queue->head,
473 &notifiers->ust_cmd_queue->tail);
474 if (node == NULL) {
475 break;
476 }
477 ust_cmd = caa_container_of(node, struct ust_command, node);
478 ret = close(ust_cmd->sock);
479 if (ret < 0) {
480 PERROR("close ust sock exit dispatch %d", ust_cmd->sock);
481 }
482 lttng_fd_put(LTTNG_FD_APPS, 1);
483 free(ust_cmd);
484 }
485
486 error_testpoint:
487 DBG("Dispatch thread dying");
488 if (err) {
489 health_error();
490 ERR("Health error occurred in %s", __func__);
491 }
492 health_unregister(the_health_sessiond);
493 rcu_unregister_thread();
494 return NULL;
495 }
496
497 static bool shutdown_ust_dispatch_thread(void *data)
498 {
499 struct thread_notifiers *notifiers = (thread_notifiers *) data;
500
501 CMM_STORE_SHARED(notifiers->dispatch_thread_exit, 1);
502 futex_nto1_wake(&notifiers->ust_cmd_queue->futex);
503 return true;
504 }
505
506 bool launch_ust_dispatch_thread(struct ust_cmd_queue *cmd_queue,
507 int apps_cmd_pipe_write_fd,
508 int apps_cmd_notify_pipe_write_fd)
509 {
510 struct lttng_thread *thread;
511 struct thread_notifiers *notifiers;
512
513 notifiers = zmalloc<thread_notifiers>();
514 if (!notifiers) {
515 goto error;
516 }
517 notifiers->ust_cmd_queue = cmd_queue;
518 notifiers->apps_cmd_pipe_write_fd = apps_cmd_pipe_write_fd;
519 notifiers->apps_cmd_notify_pipe_write_fd = apps_cmd_notify_pipe_write_fd;
520
521 thread = lttng_thread_create("UST registration dispatch",
522 thread_dispatch_ust_registration,
523 shutdown_ust_dispatch_thread,
524 cleanup_ust_dispatch_thread,
525 notifiers);
526 if (!thread) {
527 goto error;
528 }
529 lttng_thread_put(thread);
530 return true;
531 error:
532 free(notifiers);
533 return false;
534 }
This page took 0.040534 seconds and 4 git commands to generate.