Fix: liblttng-ust-fork deadlock
[lttng-ust.git] / liblttng-ust / lttng-ust-comm.c
CommitLineData
2691221a
MD
1/*
2 * lttng-ust-comm.c
3 *
4 * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
5 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; only
10 * version 2.1 of the License.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
80e2814b 22#define _LGPL_SOURCE
2691221a
MD
23#include <sys/types.h>
24#include <sys/socket.h>
7fc90dca
MD
25#include <sys/mman.h>
26#include <sys/stat.h>
58d4b2a2
MD
27#include <sys/types.h>
28#include <sys/wait.h>
7fc90dca 29#include <fcntl.h>
2691221a
MD
30#include <unistd.h>
31#include <errno.h>
d9e99d10 32#include <pthread.h>
11ff9c7d
MD
33#include <semaphore.h>
34#include <time.h>
1ea11eab 35#include <assert.h>
e822f505 36#include <signal.h>
95259bd0 37#include <urcu/uatomic.h>
80e2814b 38#include <urcu/futex.h>
c117fb1b 39#include <urcu/compiler.h>
1ea11eab 40
4318ae1b 41#include <lttng/ust-events.h>
4318ae1b 42#include <lttng/ust-abi.h>
4318ae1b 43#include <lttng/ust.h>
44c72f10
MD
44#include <ust-comm.h>
45#include <usterr-signal-safe.h>
46#include "tracepoint-internal.h"
b751f722 47#include "ltt-tracer-core.h"
08114193 48#include "compat.h"
f645cfa7 49#include "../libringbuffer/tlsfixup.h"
edaa1431
MD
50
51/*
52 * Has lttng ust comm constructor been called ?
53 */
54static int initialized;
55
1ea11eab 56/*
17dfb34b
MD
57 * The ust_lock/ust_unlock lock is used as a communication thread mutex.
58 * Held when handling a command, also held by fork() to deal with
59 * removal of threads, and by exit path.
1ea11eab 60 */
1ea11eab
MD
61
62/* Should the ust comm thread quit ? */
63static int lttng_ust_comm_should_quit;
64
11ff9c7d
MD
65/*
66 * Wait for either of these before continuing to the main
67 * program:
68 * - the register_done message from sessiond daemon
69 * (will let the sessiond daemon enable sessions before main
70 * starts.)
71 * - sessiond daemon is not reachable.
72 * - timeout (ensuring applications are resilient to session
73 * daemon problems).
74 */
75static sem_t constructor_wait;
950aab0c
MD
76/*
77 * Doing this for both the global and local sessiond.
78 */
95259bd0 79static int sem_count = { 2 };
11ff9c7d 80
e8508a49
MD
81/*
82 * Counting nesting within lttng-ust. Used to ensure that calling fork()
83 * from liblttng-ust does not execute the pre/post fork handlers.
84 */
85static int __thread lttng_ust_nest_count;
86
1ea11eab
MD
87/*
88 * Info about socket and associated listener thread.
89 */
90struct sock_info {
11ff9c7d 91 const char *name;
1ea11eab 92 pthread_t ust_listener; /* listener thread */
46050b1a 93 int root_handle;
8d20bf54
MD
94 int constructor_sem_posted;
95 int allowed;
44e073f5 96 int global;
7fc90dca
MD
97
98 char sock_path[PATH_MAX];
99 int socket;
100
101 char wait_shm_path[PATH_MAX];
102 char *wait_shm_mmap;
1ea11eab 103};
2691221a
MD
104
105/* Socket from app (connect) to session daemon (listen) for communication */
1ea11eab 106struct sock_info global_apps = {
11ff9c7d 107 .name = "global",
44e073f5 108 .global = 1,
7fc90dca 109
46050b1a 110 .root_handle = -1,
8d20bf54 111 .allowed = 1,
7fc90dca
MD
112
113 .sock_path = DEFAULT_GLOBAL_APPS_UNIX_SOCK,
114 .socket = -1,
115
116 .wait_shm_path = DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH,
1ea11eab 117};
2691221a
MD
118
119/* TODO: allow global_apps_sock_path override */
120
1ea11eab 121struct sock_info local_apps = {
11ff9c7d 122 .name = "local",
44e073f5 123 .global = 0,
46050b1a 124 .root_handle = -1,
8d20bf54 125 .allowed = 0, /* Check setuid bit first */
7fc90dca
MD
126
127 .socket = -1,
1ea11eab 128};
2691221a 129
37ed587a
MD
130static int wait_poll_fallback;
131
edaa1431
MD
132extern void ltt_ring_buffer_client_overwrite_init(void);
133extern void ltt_ring_buffer_client_discard_init(void);
134extern void ltt_ring_buffer_metadata_client_init(void);
135extern void ltt_ring_buffer_client_overwrite_exit(void);
136extern void ltt_ring_buffer_client_discard_exit(void);
137extern void ltt_ring_buffer_metadata_client_exit(void);
138
2691221a 139static
8d20bf54 140int setup_local_apps(void)
2691221a
MD
141{
142 const char *home_dir;
7fc90dca 143 uid_t uid;
2691221a 144
7fc90dca 145 uid = getuid();
8d20bf54
MD
146 /*
147 * Disallow per-user tracing for setuid binaries.
148 */
7fc90dca 149 if (uid != geteuid()) {
8d20bf54 150 local_apps.allowed = 0;
d0a1ae63 151 return 0;
8d20bf54
MD
152 } else {
153 local_apps.allowed = 1;
154 }
2691221a
MD
155 home_dir = (const char *) getenv("HOME");
156 if (!home_dir)
157 return -ENOENT;
1ea11eab 158 snprintf(local_apps.sock_path, PATH_MAX,
2691221a 159 DEFAULT_HOME_APPS_UNIX_SOCK, home_dir);
7fc90dca
MD
160 snprintf(local_apps.wait_shm_path, PATH_MAX,
161 DEFAULT_HOME_APPS_WAIT_SHM_PATH, uid);
2691221a
MD
162 return 0;
163}
164
165static
166int register_app_to_sessiond(int socket)
167{
168 ssize_t ret;
169 struct {
e44418f3
MD
170 uint32_t major;
171 uint32_t minor;
2691221a 172 pid_t pid;
5c33bde8 173 pid_t ppid;
2691221a 174 uid_t uid;
83610856 175 gid_t gid;
c117fb1b 176 uint32_t bits_per_long;
2629549e 177 char name[16]; /* process name */
2691221a
MD
178 } reg_msg;
179
e44418f3
MD
180 reg_msg.major = LTTNG_UST_COMM_VERSION_MAJOR;
181 reg_msg.minor = LTTNG_UST_COMM_VERSION_MINOR;
2691221a 182 reg_msg.pid = getpid();
5c33bde8 183 reg_msg.ppid = getppid();
2691221a 184 reg_msg.uid = getuid();
83610856 185 reg_msg.gid = getgid();
c117fb1b 186 reg_msg.bits_per_long = CAA_BITS_PER_LONG;
08114193 187 lttng_ust_getprocname(reg_msg.name);
2691221a 188
57773204 189 ret = ustcomm_send_unix_sock(socket, &reg_msg, sizeof(reg_msg));
2691221a
MD
190 if (ret >= 0 && ret != sizeof(reg_msg))
191 return -EIO;
192 return ret;
193}
194
d9e99d10 195static
57773204 196int send_reply(int sock, struct ustcomm_ust_reply *lur)
d9e99d10 197{
9eb62b9c 198 ssize_t len;
d3a492d1 199
57773204 200 len = ustcomm_send_unix_sock(sock, lur, sizeof(*lur));
d3a492d1 201 switch (len) {
a4be8962 202 case sizeof(*lur):
d3a492d1
MD
203 DBG("message successfully sent");
204 return 0;
205 case -1:
206 if (errno == ECONNRESET) {
207 printf("remote end closed connection\n");
208 return 0;
209 }
210 return -1;
211 default:
212 printf("incorrect message size: %zd\n", len);
213 return -1;
214 }
215}
216
217static
edaa1431 218int handle_register_done(struct sock_info *sock_info)
11ff9c7d
MD
219{
220 int ret;
221
edaa1431
MD
222 if (sock_info->constructor_sem_posted)
223 return 0;
224 sock_info->constructor_sem_posted = 1;
56cd7e2f
MD
225 if (uatomic_read(&sem_count) <= 0) {
226 return 0;
227 }
95259bd0
MD
228 ret = uatomic_add_return(&sem_count, -1);
229 if (ret == 0) {
230 ret = sem_post(&constructor_wait);
231 assert(!ret);
232 }
11ff9c7d
MD
233 return 0;
234}
235
236static
237int handle_message(struct sock_info *sock_info,
57773204 238 int sock, struct ustcomm_ust_msg *lum)
d3a492d1 239{
1ea11eab 240 int ret = 0;
b61ce3b2 241 const struct lttng_ust_objd_ops *ops;
57773204 242 struct ustcomm_ust_reply lur;
193183fb 243 int shm_fd, wait_fd;
ef9ff354 244 union ust_args args;
40003310 245 ssize_t len;
1ea11eab 246
17dfb34b 247 ust_lock();
1ea11eab 248
46050b1a
MD
249 memset(&lur, 0, sizeof(lur));
250
1ea11eab 251 if (lttng_ust_comm_should_quit) {
46050b1a 252 ret = -EPERM;
1ea11eab
MD
253 goto end;
254 }
9eb62b9c 255
46050b1a
MD
256 ops = objd_ops(lum->handle);
257 if (!ops) {
258 ret = -ENOENT;
259 goto end;
1ea11eab 260 }
46050b1a
MD
261
262 switch (lum->cmd) {
11ff9c7d
MD
263 case LTTNG_UST_REGISTER_DONE:
264 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
edaa1431 265 ret = handle_register_done(sock_info);
11ff9c7d
MD
266 else
267 ret = -EINVAL;
268 break;
46050b1a
MD
269 case LTTNG_UST_RELEASE:
270 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
271 ret = -EPERM;
272 else
d4419b81 273 ret = lttng_ust_objd_unref(lum->handle);
d9e99d10
MD
274 break;
275 default:
46050b1a
MD
276 if (ops->cmd)
277 ret = ops->cmd(lum->handle, lum->cmd,
ef9ff354
MD
278 (unsigned long) &lum->u,
279 &args);
46050b1a
MD
280 else
281 ret = -ENOSYS;
282 break;
d9e99d10 283 }
46050b1a 284
1ea11eab 285end:
46050b1a
MD
286 lur.handle = lum->handle;
287 lur.cmd = lum->cmd;
288 lur.ret_val = ret;
289 if (ret >= 0) {
57773204 290 lur.ret_code = USTCOMM_OK;
46050b1a 291 } else {
57773204 292 //lur.ret_code = USTCOMM_SESSION_FAIL;
193183fb 293 lur.ret_code = ret;
46050b1a 294 }
e6ea14c5
MD
295 if (ret >= 0) {
296 switch (lum->cmd) {
297 case LTTNG_UST_STREAM:
298 /*
299 * Special-case reply to send stream info.
300 * Use lum.u output.
301 */
302 lur.u.stream.memory_map_size = *args.stream.memory_map_size;
303 shm_fd = *args.stream.shm_fd;
304 wait_fd = *args.stream.wait_fd;
305 break;
306 case LTTNG_UST_METADATA:
307 case LTTNG_UST_CHANNEL:
308 lur.u.channel.memory_map_size = *args.channel.memory_map_size;
309 shm_fd = *args.channel.shm_fd;
310 wait_fd = *args.channel.wait_fd;
311 break;
312 case LTTNG_UST_TRACER_VERSION:
313 lur.u.version = lum->u.version;
314 break;
315 case LTTNG_UST_TRACEPOINT_LIST_GET:
316 memcpy(&lur.u.tracepoint, &lum->u.tracepoint, sizeof(lur.u.tracepoint));
317 break;
318 }
381c0f1e 319 }
46050b1a 320 ret = send_reply(sock, &lur);
193183fb
MD
321 if (ret < 0) {
322 perror("error sending reply");
323 goto error;
324 }
46050b1a 325
824f40b8
MD
326 if ((lum->cmd == LTTNG_UST_STREAM
327 || lum->cmd == LTTNG_UST_CHANNEL
328 || lum->cmd == LTTNG_UST_METADATA)
57773204 329 && lur.ret_code == USTCOMM_OK) {
50be2fc4
MD
330 int sendret = 0;
331
381c0f1e 332 /* we also need to send the file descriptors. */
57773204 333 ret = ustcomm_send_fds_unix_sock(sock,
193183fb 334 &shm_fd, &shm_fd,
381c0f1e
MD
335 1, sizeof(int));
336 if (ret < 0) {
337 perror("send shm_fd");
50be2fc4 338 sendret = ret;
381c0f1e 339 }
50be2fc4
MD
340 /*
341 * The sessiond expects 2 file descriptors, even upon
342 * error.
343 */
57773204 344 ret = ustcomm_send_fds_unix_sock(sock,
193183fb 345 &wait_fd, &wait_fd,
381c0f1e
MD
346 1, sizeof(int));
347 if (ret < 0) {
348 perror("send wait_fd");
349 goto error;
350 }
50be2fc4
MD
351 if (sendret) {
352 ret = sendret;
353 goto error;
354 }
381c0f1e 355 }
40003310
MD
356 /*
357 * LTTNG_UST_TRACEPOINT_FIELD_LIST_GET needs to send the field
358 * after the reply.
359 */
360 if (lur.ret_code == USTCOMM_OK) {
361 switch (lum->cmd) {
362 case LTTNG_UST_TRACEPOINT_FIELD_LIST_GET:
363 len = ustcomm_send_unix_sock(sock,
364 &args.field_list.entry,
365 sizeof(args.field_list.entry));
366 if (len != sizeof(args.field_list.entry)) {
367 ret = -1;
368 goto error;
369 }
370 }
371 }
ef9ff354
MD
372 /*
373 * We still have the memory map reference, and the fds have been
3ad5f707
MD
374 * sent to the sessiond. We can therefore close those fds. Note
375 * that we keep the write side of the wait_fd open, but close
376 * the read side.
ef9ff354
MD
377 */
378 if (lur.ret_code == USTCOMM_OK) {
379 switch (lum->cmd) {
380 case LTTNG_UST_STREAM:
381 if (shm_fd >= 0) {
382 ret = close(shm_fd);
383 if (ret) {
384 PERROR("Error closing stream shm_fd");
385 }
386 *args.stream.shm_fd = -1;
387 }
388 if (wait_fd >= 0) {
389 ret = close(wait_fd);
390 if (ret) {
391 PERROR("Error closing stream wait_fd");
392 }
393 *args.stream.wait_fd = -1;
394 }
395 break;
396 case LTTNG_UST_METADATA:
397 case LTTNG_UST_CHANNEL:
398 if (shm_fd >= 0) {
399 ret = close(shm_fd);
400 if (ret) {
401 PERROR("Error closing channel shm_fd");
402 }
403 *args.channel.shm_fd = -1;
404 }
405 if (wait_fd >= 0) {
406 ret = close(wait_fd);
407 if (ret) {
408 PERROR("Error closing channel wait_fd");
409 }
410 *args.channel.wait_fd = -1;
411 }
412 break;
413 }
414 }
415
381c0f1e 416error:
17dfb34b 417 ust_unlock();
1ea11eab 418 return ret;
d9e99d10
MD
419}
420
46050b1a 421static
efe0de09 422void cleanup_sock_info(struct sock_info *sock_info, int exiting)
46050b1a
MD
423{
424 int ret;
425
426 if (sock_info->socket != -1) {
e6973a89 427 ret = ustcomm_close_unix_sock(sock_info->socket);
46050b1a 428 if (ret) {
7fc90dca 429 ERR("Error closing apps socket");
46050b1a
MD
430 }
431 sock_info->socket = -1;
432 }
433 if (sock_info->root_handle != -1) {
d4419b81 434 ret = lttng_ust_objd_unref(sock_info->root_handle);
46050b1a
MD
435 if (ret) {
436 ERR("Error unref root handle");
437 }
438 sock_info->root_handle = -1;
439 }
318dfea9 440 sock_info->constructor_sem_posted = 0;
efe0de09
MD
441 /*
442 * wait_shm_mmap is used by listener threads outside of the
443 * ust lock, so we cannot tear it down ourselves, because we
444 * cannot join on these threads. Leave this task to the OS
445 * process exit.
446 */
447 if (!exiting && sock_info->wait_shm_mmap) {
7fc90dca
MD
448 ret = munmap(sock_info->wait_shm_mmap, sysconf(_SC_PAGE_SIZE));
449 if (ret) {
450 ERR("Error unmapping wait shm");
451 }
452 sock_info->wait_shm_mmap = NULL;
453 }
454}
455
58d4b2a2 456/*
33bbeb90
MD
457 * Using fork to set umask in the child process (not multi-thread safe).
458 * We deal with the shm_open vs ftruncate race (happening when the
459 * sessiond owns the shm and does not let everybody modify it, to ensure
460 * safety against shm_unlink) by simply letting the mmap fail and
461 * retrying after a few seconds.
462 * For global shm, everybody has rw access to it until the sessiond
463 * starts.
58d4b2a2 464 */
7fc90dca 465static
58d4b2a2 466int get_wait_shm(struct sock_info *sock_info, size_t mmap_size)
7fc90dca 467{
7fc90dca 468 int wait_shm_fd, ret;
58d4b2a2 469 pid_t pid;
44e073f5 470
58d4b2a2 471 /*
33bbeb90 472 * Try to open read-only.
58d4b2a2 473 */
33bbeb90 474 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
58d4b2a2
MD
475 if (wait_shm_fd >= 0) {
476 goto end;
477 } else if (wait_shm_fd < 0 && errno != ENOENT) {
478 /*
33bbeb90
MD
479 * Real-only open did not work, and it's not because the
480 * entry was not present. It's a failure that prohibits
481 * using shm.
58d4b2a2 482 */
7fc90dca 483 ERR("Error opening shm %s", sock_info->wait_shm_path);
58d4b2a2 484 goto end;
7fc90dca
MD
485 }
486 /*
58d4b2a2
MD
487 * If the open failed because the file did not exist, try
488 * creating it ourself.
7fc90dca 489 */
e8508a49 490 lttng_ust_nest_count++;
58d4b2a2 491 pid = fork();
e8508a49 492 lttng_ust_nest_count--;
58d4b2a2
MD
493 if (pid > 0) {
494 int status;
495
496 /*
497 * Parent: wait for child to return, in which case the
498 * shared memory map will have been created.
499 */
500 pid = wait(&status);
b7d3cb32 501 if (pid < 0 || !WIFEXITED(status) || WEXITSTATUS(status) != 0) {
58d4b2a2
MD
502 wait_shm_fd = -1;
503 goto end;
7fc90dca 504 }
58d4b2a2
MD
505 /*
506 * Try to open read-only again after creation.
507 */
33bbeb90 508 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
58d4b2a2
MD
509 if (wait_shm_fd < 0) {
510 /*
511 * Real-only open did not work. It's a failure
512 * that prohibits using shm.
513 */
514 ERR("Error opening shm %s", sock_info->wait_shm_path);
515 goto end;
516 }
517 goto end;
518 } else if (pid == 0) {
519 int create_mode;
520
521 /* Child */
33bbeb90 522 create_mode = S_IRUSR | S_IWUSR | S_IRGRP;
58d4b2a2 523 if (sock_info->global)
33bbeb90 524 create_mode |= S_IROTH | S_IWGRP | S_IWOTH;
58d4b2a2
MD
525 /*
526 * We're alone in a child process, so we can modify the
527 * process-wide umask.
528 */
33bbeb90 529 umask(~create_mode);
58d4b2a2 530 /*
33bbeb90
MD
531 * Try creating shm (or get rw access).
532 * We don't do an exclusive open, because we allow other
533 * processes to create+ftruncate it concurrently.
58d4b2a2
MD
534 */
535 wait_shm_fd = shm_open(sock_info->wait_shm_path,
536 O_RDWR | O_CREAT, create_mode);
537 if (wait_shm_fd >= 0) {
538 ret = ftruncate(wait_shm_fd, mmap_size);
539 if (ret) {
540 PERROR("ftruncate");
541 exit(EXIT_FAILURE);
542 }
58d4b2a2
MD
543 exit(EXIT_SUCCESS);
544 }
33bbeb90
MD
545 /*
546 * For local shm, we need to have rw access to accept
547 * opening it: this means the local sessiond will be
548 * able to wake us up. For global shm, we open it even
549 * if rw access is not granted, because the root.root
550 * sessiond will be able to override all rights and wake
551 * us up.
552 */
553 if (!sock_info->global && errno != EACCES) {
58d4b2a2
MD
554 ERR("Error opening shm %s", sock_info->wait_shm_path);
555 exit(EXIT_FAILURE);
556 }
557 /*
33bbeb90
MD
558 * The shm exists, but we cannot open it RW. Report
559 * success.
58d4b2a2
MD
560 */
561 exit(EXIT_SUCCESS);
562 } else {
563 return -1;
7fc90dca 564 }
58d4b2a2 565end:
33bbeb90
MD
566 if (wait_shm_fd >= 0 && !sock_info->global) {
567 struct stat statbuf;
568
569 /*
570 * Ensure that our user is the owner of the shm file for
571 * local shm. If we do not own the file, it means our
572 * sessiond will not have access to wake us up (there is
573 * probably a rogue process trying to fake our
574 * sessiond). Fallback to polling method in this case.
575 */
576 ret = fstat(wait_shm_fd, &statbuf);
577 if (ret) {
578 PERROR("fstat");
579 goto error_close;
580 }
581 if (statbuf.st_uid != getuid())
582 goto error_close;
583 }
58d4b2a2 584 return wait_shm_fd;
33bbeb90
MD
585
586error_close:
587 ret = close(wait_shm_fd);
588 if (ret) {
589 PERROR("Error closing fd");
590 }
591 return -1;
58d4b2a2
MD
592}
593
594static
595char *get_map_shm(struct sock_info *sock_info)
596{
597 size_t mmap_size = sysconf(_SC_PAGE_SIZE);
598 int wait_shm_fd, ret;
599 char *wait_shm_mmap;
600
601 wait_shm_fd = get_wait_shm(sock_info, mmap_size);
602 if (wait_shm_fd < 0) {
603 goto error;
44e073f5 604 }
7fc90dca
MD
605 wait_shm_mmap = mmap(NULL, mmap_size, PROT_READ,
606 MAP_SHARED, wait_shm_fd, 0);
7fc90dca
MD
607 /* close shm fd immediately after taking the mmap reference */
608 ret = close(wait_shm_fd);
609 if (ret) {
33bbeb90
MD
610 PERROR("Error closing fd");
611 }
612 if (wait_shm_mmap == MAP_FAILED) {
613 DBG("mmap error (can be caused by race with sessiond). Fallback to poll mode.");
614 goto error;
7fc90dca
MD
615 }
616 return wait_shm_mmap;
617
618error:
619 return NULL;
620}
621
622static
623void wait_for_sessiond(struct sock_info *sock_info)
624{
efe0de09 625 int ret;
80e2814b 626
7fc90dca
MD
627 ust_lock();
628 if (lttng_ust_comm_should_quit) {
629 goto quit;
630 }
37ed587a
MD
631 if (wait_poll_fallback) {
632 goto error;
633 }
7fc90dca
MD
634 if (!sock_info->wait_shm_mmap) {
635 sock_info->wait_shm_mmap = get_map_shm(sock_info);
636 if (!sock_info->wait_shm_mmap)
637 goto error;
638 }
639 ust_unlock();
640
641 DBG("Waiting for %s apps sessiond", sock_info->name);
80e2814b
MD
642 /* Wait for futex wakeup */
643 if (uatomic_read((int32_t *) sock_info->wait_shm_mmap) == 0) {
644 ret = futex_async((int32_t *) sock_info->wait_shm_mmap,
645 FUTEX_WAIT, 0, NULL, NULL, 0);
80e2814b 646 if (ret < 0) {
37ed587a
MD
647 if (errno == EFAULT) {
648 wait_poll_fallback = 1;
a8b870ad 649 DBG(
37ed587a
MD
650"Linux kernels 2.6.33 to 3.0 (with the exception of stable versions) "
651"do not support FUTEX_WAKE on read-only memory mappings correctly. "
652"Please upgrade your kernel "
653"(fix is commit 9ea71503a8ed9184d2d0b8ccc4d269d05f7940ae in Linux kernel "
654"mainline). LTTng-UST will use polling mode fallback.");
cd27263b
MD
655 if (ust_debug())
656 PERROR("futex");
37ed587a 657 }
80e2814b
MD
658 }
659 }
7fc90dca
MD
660 return;
661
662quit:
663 ust_unlock();
664 return;
665
666error:
667 ust_unlock();
7fc90dca 668 return;
46050b1a
MD
669}
670
1ea11eab
MD
671/*
672 * This thread does not allocate any resource, except within
673 * handle_message, within mutex protection. This mutex protects against
674 * fork and exit.
675 * The other moment it allocates resources is at socket connexion, which
676 * is also protected by the mutex.
677 */
d9e99d10
MD
678static
679void *ust_listener_thread(void *arg)
680{
1ea11eab 681 struct sock_info *sock_info = arg;
c0eedf81 682 int sock, ret, prev_connect_failed = 0, has_waited = 0;
d9e99d10 683
9eb62b9c
MD
684 /* Restart trying to connect to the session daemon */
685restart:
c0eedf81
MD
686 if (prev_connect_failed) {
687 /* Wait for sessiond availability with pipe */
688 wait_for_sessiond(sock_info);
689 if (has_waited) {
690 has_waited = 0;
691 /*
692 * Sleep for 5 seconds before retrying after a
693 * sequence of failure / wait / failure. This
694 * deals with a killed or broken session daemon.
695 */
696 sleep(5);
697 }
698 has_waited = 1;
699 prev_connect_failed = 0;
700 }
17dfb34b 701 ust_lock();
1ea11eab
MD
702
703 if (lttng_ust_comm_should_quit) {
17dfb34b 704 ust_unlock();
1ea11eab
MD
705 goto quit;
706 }
9eb62b9c 707
1ea11eab 708 if (sock_info->socket != -1) {
e6973a89 709 ret = ustcomm_close_unix_sock(sock_info->socket);
1ea11eab 710 if (ret) {
11ff9c7d 711 ERR("Error closing %s apps socket", sock_info->name);
1ea11eab
MD
712 }
713 sock_info->socket = -1;
714 }
46050b1a 715
9eb62b9c 716 /* Register */
57773204 717 ret = ustcomm_connect_unix_sock(sock_info->sock_path);
9eb62b9c 718 if (ret < 0) {
4d3c9523 719 DBG("Info: sessiond not accepting connections to %s apps socket", sock_info->name);
c0eedf81 720 prev_connect_failed = 1;
11ff9c7d
MD
721 /*
722 * If we cannot find the sessiond daemon, don't delay
723 * constructor execution.
724 */
edaa1431 725 ret = handle_register_done(sock_info);
11ff9c7d 726 assert(!ret);
17dfb34b 727 ust_unlock();
1ea11eab 728 goto restart;
46050b1a
MD
729 }
730
731 sock_info->socket = sock = ret;
732
733 /*
734 * Create only one root handle per listener thread for the whole
735 * process lifetime.
736 */
737 if (sock_info->root_handle == -1) {
738 ret = lttng_abi_create_root_handle();
a51070bb 739 if (ret < 0) {
46050b1a 740 ERR("Error creating root handle");
17dfb34b 741 ust_unlock();
46050b1a
MD
742 goto quit;
743 }
744 sock_info->root_handle = ret;
9eb62b9c 745 }
1ea11eab 746
9eb62b9c
MD
747 ret = register_app_to_sessiond(sock);
748 if (ret < 0) {
11ff9c7d 749 ERR("Error registering to %s apps socket", sock_info->name);
c0eedf81 750 prev_connect_failed = 1;
11ff9c7d
MD
751 /*
752 * If we cannot register to the sessiond daemon, don't
753 * delay constructor execution.
754 */
edaa1431 755 ret = handle_register_done(sock_info);
11ff9c7d 756 assert(!ret);
17dfb34b 757 ust_unlock();
9eb62b9c
MD
758 goto restart;
759 }
17dfb34b 760 ust_unlock();
46050b1a 761
d9e99d10
MD
762 for (;;) {
763 ssize_t len;
57773204 764 struct ustcomm_ust_msg lum;
d9e99d10 765
57773204 766 len = ustcomm_recv_unix_sock(sock, &lum, sizeof(lum));
d9e99d10
MD
767 switch (len) {
768 case 0: /* orderly shutdown */
11ff9c7d 769 DBG("%s ltt-sessiond has performed an orderly shutdown\n", sock_info->name);
8236ba10
MD
770 ust_lock();
771 /*
772 * Either sessiond has shutdown or refused us by closing the socket.
773 * In either case, we don't want to delay construction execution,
774 * and we need to wait before retry.
775 */
776 prev_connect_failed = 1;
777 /*
778 * If we cannot register to the sessiond daemon, don't
779 * delay constructor execution.
780 */
781 ret = handle_register_done(sock_info);
782 assert(!ret);
783 ust_unlock();
d9e99d10 784 goto end;
e7723462 785 case sizeof(lum):
d9e99d10 786 DBG("message received\n");
11ff9c7d 787 ret = handle_message(sock_info, sock, &lum);
2a80c9d8 788 if (ret < 0) {
11ff9c7d 789 ERR("Error handling message for %s socket", sock_info->name);
d9e99d10
MD
790 }
791 continue;
792 case -1:
8236ba10 793 DBG("Receive failed from lttng-sessiond with errno %d", errno);
d9e99d10 794 if (errno == ECONNRESET) {
11ff9c7d 795 ERR("%s remote end closed connection\n", sock_info->name);
d9e99d10
MD
796 goto end;
797 }
798 goto end;
799 default:
11ff9c7d 800 ERR("incorrect message size (%s socket): %zd\n", sock_info->name, len);
d9e99d10
MD
801 continue;
802 }
803
804 }
805end:
9eb62b9c 806 goto restart; /* try to reconnect */
1ea11eab 807quit:
d9e99d10
MD
808 return NULL;
809}
810
cf12a773
MD
811/*
812 * Return values: -1: don't wait. 0: wait forever. 1: timeout wait.
813 */
11ff9c7d
MD
814static
815int get_timeout(struct timespec *constructor_timeout)
816{
cf12a773
MD
817 long constructor_delay_ms = LTTNG_UST_DEFAULT_CONSTRUCTOR_TIMEOUT_MS;
818 char *str_delay;
11ff9c7d
MD
819 int ret;
820
69400ac4 821 str_delay = getenv("LTTNG_UST_REGISTER_TIMEOUT");
cf12a773
MD
822 if (str_delay) {
823 constructor_delay_ms = strtol(str_delay, NULL, 10);
824 }
825
826 switch (constructor_delay_ms) {
827 case -1:/* fall-through */
828 case 0:
829 return constructor_delay_ms;
830 default:
831 break;
832 }
833
834 /*
835 * If we are unable to find the current time, don't wait.
836 */
837 ret = clock_gettime(CLOCK_REALTIME, constructor_timeout);
838 if (ret) {
839 return -1;
840 }
95259bd0
MD
841 constructor_timeout->tv_sec += constructor_delay_ms / 1000UL;
842 constructor_timeout->tv_nsec +=
843 (constructor_delay_ms % 1000UL) * 1000000UL;
11ff9c7d
MD
844 if (constructor_timeout->tv_nsec >= 1000000000UL) {
845 constructor_timeout->tv_sec++;
846 constructor_timeout->tv_nsec -= 1000000000UL;
847 }
cf12a773 848 return 1;
11ff9c7d 849}
d9e99d10 850
2691221a
MD
851/*
852 * sessiond monitoring thread: monitor presence of global and per-user
853 * sessiond by polling the application common named pipe.
854 */
855/* TODO */
856
edaa1431 857void __attribute__((constructor)) lttng_ust_init(void)
2691221a 858{
11ff9c7d 859 struct timespec constructor_timeout;
ae6a58bf 860 sigset_t sig_all_blocked, orig_parent_mask;
cf12a773 861 int timeout_mode;
2691221a
MD
862 int ret;
863
edaa1431
MD
864 if (uatomic_xchg(&initialized, 1) == 1)
865 return;
866
eddd8d5d
MD
867 /*
868 * Fixup interdependency between TLS fixup mutex (which happens
869 * to be the dynamic linker mutex) and ust_lock, taken within
870 * the ust lock.
871 */
872 lttng_fixup_event_tls();
f645cfa7 873 lttng_fixup_ringbuffer_tls();
4158a15a 874 lttng_fixup_vtid_tls();
eddd8d5d 875
edaa1431
MD
876 /*
877 * We want precise control over the order in which we construct
878 * our sub-libraries vs starting to receive commands from
879 * sessiond (otherwise leading to errors when trying to create
880 * sessiond before the init functions are completed).
881 */
2691221a 882 init_usterr();
edaa1431
MD
883 init_tracepoint();
884 ltt_ring_buffer_metadata_client_init();
885 ltt_ring_buffer_client_overwrite_init();
886 ltt_ring_buffer_client_discard_init();
2691221a 887
cf12a773 888 timeout_mode = get_timeout(&constructor_timeout);
11ff9c7d 889
95259bd0 890 ret = sem_init(&constructor_wait, 0, 0);
11ff9c7d
MD
891 assert(!ret);
892
8d20bf54 893 ret = setup_local_apps();
2691221a 894 if (ret) {
8d20bf54 895 ERR("Error setting up to local apps");
2691221a 896 }
ae6a58bf
WP
897
898 /* A new thread created by pthread_create inherits the signal mask
899 * from the parent. To avoid any signal being received by the
900 * listener thread, we block all signals temporarily in the parent,
901 * while we create the listener thread.
902 */
903 sigfillset(&sig_all_blocked);
904 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_parent_mask);
905 if (ret) {
d94d802c 906 ERR("pthread_sigmask: %s", strerror(ret));
ae6a58bf
WP
907 }
908
dde70ea0
MD
909 ret = pthread_create(&global_apps.ust_listener, NULL,
910 ust_listener_thread, &global_apps);
d94d802c
MD
911 if (ret) {
912 ERR("pthread_create global: %s", strerror(ret));
913 }
8d20bf54 914 if (local_apps.allowed) {
dde70ea0
MD
915 ret = pthread_create(&local_apps.ust_listener, NULL,
916 ust_listener_thread, &local_apps);
d94d802c
MD
917 if (ret) {
918 ERR("pthread_create local: %s", strerror(ret));
919 }
8d20bf54
MD
920 } else {
921 handle_register_done(&local_apps);
922 }
923
ae6a58bf
WP
924 /* Restore original signal mask in parent */
925 ret = pthread_sigmask(SIG_SETMASK, &orig_parent_mask, NULL);
926 if (ret) {
d94d802c 927 ERR("pthread_sigmask: %s", strerror(ret));
ae6a58bf
WP
928 }
929
cf12a773
MD
930 switch (timeout_mode) {
931 case 1: /* timeout wait */
95259bd0
MD
932 do {
933 ret = sem_timedwait(&constructor_wait,
934 &constructor_timeout);
935 } while (ret < 0 && errno == EINTR);
cf12a773
MD
936 if (ret < 0 && errno == ETIMEDOUT) {
937 ERR("Timed out waiting for ltt-sessiond");
938 } else {
939 assert(!ret);
940 }
941 break;
7b766b16 942 case -1:/* wait forever */
95259bd0
MD
943 do {
944 ret = sem_wait(&constructor_wait);
945 } while (ret < 0 && errno == EINTR);
11ff9c7d 946 assert(!ret);
cf12a773 947 break;
7b766b16 948 case 0: /* no timeout */
cf12a773 949 break;
11ff9c7d 950 }
2691221a
MD
951}
952
17dfb34b
MD
953static
954void lttng_ust_cleanup(int exiting)
955{
efe0de09 956 cleanup_sock_info(&global_apps, exiting);
17dfb34b 957 if (local_apps.allowed) {
efe0de09 958 cleanup_sock_info(&local_apps, exiting);
17dfb34b 959 }
efe0de09
MD
960 /*
961 * The teardown in this function all affect data structures
962 * accessed under the UST lock by the listener thread. This
963 * lock, along with the lttng_ust_comm_should_quit flag, ensure
964 * that none of these threads are accessing this data at this
965 * point.
966 */
17dfb34b 967 lttng_ust_abi_exit();
003fedf4 968 lttng_ust_events_exit();
17dfb34b
MD
969 ltt_ring_buffer_client_discard_exit();
970 ltt_ring_buffer_client_overwrite_exit();
971 ltt_ring_buffer_metadata_client_exit();
972 exit_tracepoint();
973 if (!exiting) {
974 /* Reinitialize values for fork */
975 sem_count = 2;
976 lttng_ust_comm_should_quit = 0;
977 initialized = 0;
978 }
979}
980
edaa1431 981void __attribute__((destructor)) lttng_ust_exit(void)
2691221a
MD
982{
983 int ret;
984
9eb62b9c
MD
985 /*
986 * Using pthread_cancel here because:
987 * A) we don't want to hang application teardown.
988 * B) the thread is not allocating any resource.
989 */
1ea11eab
MD
990
991 /*
992 * Require the communication thread to quit. Synchronize with
993 * mutexes to ensure it is not in a mutex critical section when
994 * pthread_cancel is later called.
995 */
17dfb34b 996 ust_lock();
1ea11eab 997 lttng_ust_comm_should_quit = 1;
17dfb34b 998 ust_unlock();
1ea11eab 999
f5f94532 1000 /* cancel threads */
1ea11eab 1001 ret = pthread_cancel(global_apps.ust_listener);
9eb62b9c 1002 if (ret) {
d94d802c
MD
1003 ERR("Error cancelling global ust listener thread: %s",
1004 strerror(ret));
2691221a 1005 }
8d20bf54
MD
1006 if (local_apps.allowed) {
1007 ret = pthread_cancel(local_apps.ust_listener);
1008 if (ret) {
d94d802c
MD
1009 ERR("Error cancelling local ust listener thread: %s",
1010 strerror(ret));
8d20bf54 1011 }
8d20bf54 1012 }
efe0de09
MD
1013 /*
1014 * Do NOT join threads: use of sys_futex makes it impossible to
1015 * join the threads without using async-cancel, but async-cancel
1016 * is delivered by a signal, which could hit the target thread
1017 * anywhere in its code path, including while the ust_lock() is
1018 * held, causing a deadlock for the other thread. Let the OS
1019 * cleanup the threads if there are stalled in a syscall.
1020 */
17dfb34b 1021 lttng_ust_cleanup(1);
2691221a 1022}
e822f505
MD
1023
1024/*
1025 * We exclude the worker threads across fork and clone (except
1026 * CLONE_VM), because these system calls only keep the forking thread
1027 * running in the child. Therefore, we don't want to call fork or clone
1028 * in the middle of an tracepoint or ust tracing state modification.
1029 * Holding this mutex protects these structures across fork and clone.
1030 */
b728d87e 1031void ust_before_fork(sigset_t *save_sigset)
e822f505
MD
1032{
1033 /*
1034 * Disable signals. This is to avoid that the child intervenes
1035 * before it is properly setup for tracing. It is safer to
1036 * disable all signals, because then we know we are not breaking
1037 * anything by restoring the original mask.
1038 */
1039 sigset_t all_sigs;
1040 int ret;
1041
e8508a49
MD
1042 if (lttng_ust_nest_count)
1043 return;
e822f505
MD
1044 /* Disable signals */
1045 sigfillset(&all_sigs);
b728d87e 1046 ret = sigprocmask(SIG_BLOCK, &all_sigs, save_sigset);
e822f505
MD
1047 if (ret == -1) {
1048 PERROR("sigprocmask");
1049 }
17dfb34b 1050 ust_lock();
e822f505
MD
1051 rcu_bp_before_fork();
1052}
1053
b728d87e 1054static void ust_after_fork_common(sigset_t *restore_sigset)
e822f505
MD
1055{
1056 int ret;
1057
17dfb34b
MD
1058 DBG("process %d", getpid());
1059 ust_unlock();
e822f505 1060 /* Restore signals */
23c8854a 1061 ret = sigprocmask(SIG_SETMASK, restore_sigset, NULL);
e822f505
MD
1062 if (ret == -1) {
1063 PERROR("sigprocmask");
1064 }
1065}
1066
b728d87e 1067void ust_after_fork_parent(sigset_t *restore_sigset)
e822f505 1068{
e8508a49
MD
1069 if (lttng_ust_nest_count)
1070 return;
17dfb34b 1071 DBG("process %d", getpid());
e822f505
MD
1072 rcu_bp_after_fork_parent();
1073 /* Release mutexes and reenable signals */
b728d87e 1074 ust_after_fork_common(restore_sigset);
e822f505
MD
1075}
1076
17dfb34b
MD
1077/*
1078 * After fork, in the child, we need to cleanup all the leftover state,
1079 * except the worker thread which already magically disappeared thanks
1080 * to the weird Linux fork semantics. After tyding up, we call
1081 * lttng_ust_init() again to start over as a new PID.
1082 *
1083 * This is meant for forks() that have tracing in the child between the
1084 * fork and following exec call (if there is any).
1085 */
b728d87e 1086void ust_after_fork_child(sigset_t *restore_sigset)
e822f505 1087{
e8508a49
MD
1088 if (lttng_ust_nest_count)
1089 return;
17dfb34b 1090 DBG("process %d", getpid());
e822f505
MD
1091 /* Release urcu mutexes */
1092 rcu_bp_after_fork_child();
17dfb34b 1093 lttng_ust_cleanup(0);
a93bfc45 1094 lttng_context_vtid_reset();
e822f505 1095 /* Release mutexes and reenable signals */
b728d87e 1096 ust_after_fork_common(restore_sigset);
318dfea9 1097 lttng_ust_init();
e822f505 1098}
This page took 0.082764 seconds and 4 git commands to generate.