Fix: fixup vtid TLS
[lttng-ust.git] / liblttng-ust / lttng-ust-comm.c
1 /*
2 * lttng-ust-comm.c
3 *
4 * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
5 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; only
10 * version 2.1 of the License.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #define _LGPL_SOURCE
23 #include <sys/types.h>
24 #include <sys/socket.h>
25 #include <sys/prctl.h>
26 #include <sys/mman.h>
27 #include <sys/stat.h>
28 #include <sys/types.h>
29 #include <sys/wait.h>
30 #include <fcntl.h>
31 #include <unistd.h>
32 #include <errno.h>
33 #include <pthread.h>
34 #include <semaphore.h>
35 #include <time.h>
36 #include <assert.h>
37 #include <signal.h>
38 #include <urcu/uatomic.h>
39 #include <urcu/futex.h>
40 #include <urcu/compiler.h>
41
42 #include <lttng/ust-events.h>
43 #include <lttng/ust-abi.h>
44 #include <lttng/ust.h>
45 #include <ust-comm.h>
46 #include <usterr-signal-safe.h>
47 #include "tracepoint-internal.h"
48 #include "ltt-tracer-core.h"
49 #include "../libringbuffer/tlsfixup.h"
50
51 /*
52 * Has lttng ust comm constructor been called ?
53 */
54 static int initialized;
55
56 /*
57 * The ust_lock/ust_unlock lock is used as a communication thread mutex.
58 * Held when handling a command, also held by fork() to deal with
59 * removal of threads, and by exit path.
60 */
61
62 /* Should the ust comm thread quit ? */
63 static int lttng_ust_comm_should_quit;
64
65 /*
66 * Wait for either of these before continuing to the main
67 * program:
68 * - the register_done message from sessiond daemon
69 * (will let the sessiond daemon enable sessions before main
70 * starts.)
71 * - sessiond daemon is not reachable.
72 * - timeout (ensuring applications are resilient to session
73 * daemon problems).
74 */
75 static sem_t constructor_wait;
76 /*
77 * Doing this for both the global and local sessiond.
78 */
79 static int sem_count = { 2 };
80
81 /*
82 * Info about socket and associated listener thread.
83 */
84 struct sock_info {
85 const char *name;
86 pthread_t ust_listener; /* listener thread */
87 int root_handle;
88 int constructor_sem_posted;
89 int allowed;
90 int global;
91
92 char sock_path[PATH_MAX];
93 int socket;
94
95 char wait_shm_path[PATH_MAX];
96 char *wait_shm_mmap;
97 };
98
99 /* Socket from app (connect) to session daemon (listen) for communication */
100 struct sock_info global_apps = {
101 .name = "global",
102 .global = 1,
103
104 .root_handle = -1,
105 .allowed = 1,
106
107 .sock_path = DEFAULT_GLOBAL_APPS_UNIX_SOCK,
108 .socket = -1,
109
110 .wait_shm_path = DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH,
111 };
112
113 /* TODO: allow global_apps_sock_path override */
114
115 struct sock_info local_apps = {
116 .name = "local",
117 .global = 0,
118 .root_handle = -1,
119 .allowed = 0, /* Check setuid bit first */
120
121 .socket = -1,
122 };
123
124 static int wait_poll_fallback;
125
126 extern void ltt_ring_buffer_client_overwrite_init(void);
127 extern void ltt_ring_buffer_client_discard_init(void);
128 extern void ltt_ring_buffer_metadata_client_init(void);
129 extern void ltt_ring_buffer_client_overwrite_exit(void);
130 extern void ltt_ring_buffer_client_discard_exit(void);
131 extern void ltt_ring_buffer_metadata_client_exit(void);
132
133 static
134 int setup_local_apps(void)
135 {
136 const char *home_dir;
137 uid_t uid;
138
139 uid = getuid();
140 /*
141 * Disallow per-user tracing for setuid binaries.
142 */
143 if (uid != geteuid()) {
144 local_apps.allowed = 0;
145 return 0;
146 } else {
147 local_apps.allowed = 1;
148 }
149 home_dir = (const char *) getenv("HOME");
150 if (!home_dir)
151 return -ENOENT;
152 snprintf(local_apps.sock_path, PATH_MAX,
153 DEFAULT_HOME_APPS_UNIX_SOCK, home_dir);
154 snprintf(local_apps.wait_shm_path, PATH_MAX,
155 DEFAULT_HOME_APPS_WAIT_SHM_PATH, uid);
156 return 0;
157 }
158
159 static
160 int register_app_to_sessiond(int socket)
161 {
162 ssize_t ret;
163 int prctl_ret;
164 struct {
165 uint32_t major;
166 uint32_t minor;
167 pid_t pid;
168 pid_t ppid;
169 uid_t uid;
170 gid_t gid;
171 uint32_t bits_per_long;
172 char name[16]; /* process name */
173 } reg_msg;
174
175 reg_msg.major = LTTNG_UST_COMM_VERSION_MAJOR;
176 reg_msg.minor = LTTNG_UST_COMM_VERSION_MINOR;
177 reg_msg.pid = getpid();
178 reg_msg.ppid = getppid();
179 reg_msg.uid = getuid();
180 reg_msg.gid = getgid();
181 reg_msg.bits_per_long = CAA_BITS_PER_LONG;
182 prctl_ret = prctl(PR_GET_NAME, (unsigned long) reg_msg.name, 0, 0, 0);
183 if (prctl_ret) {
184 ERR("Error executing prctl");
185 return -errno;
186 }
187
188 ret = ustcomm_send_unix_sock(socket, &reg_msg, sizeof(reg_msg));
189 if (ret >= 0 && ret != sizeof(reg_msg))
190 return -EIO;
191 return ret;
192 }
193
194 static
195 int send_reply(int sock, struct ustcomm_ust_reply *lur)
196 {
197 ssize_t len;
198
199 len = ustcomm_send_unix_sock(sock, lur, sizeof(*lur));
200 switch (len) {
201 case sizeof(*lur):
202 DBG("message successfully sent");
203 return 0;
204 case -1:
205 if (errno == ECONNRESET) {
206 printf("remote end closed connection\n");
207 return 0;
208 }
209 return -1;
210 default:
211 printf("incorrect message size: %zd\n", len);
212 return -1;
213 }
214 }
215
216 static
217 int handle_register_done(struct sock_info *sock_info)
218 {
219 int ret;
220
221 if (sock_info->constructor_sem_posted)
222 return 0;
223 sock_info->constructor_sem_posted = 1;
224 if (uatomic_read(&sem_count) <= 0) {
225 return 0;
226 }
227 ret = uatomic_add_return(&sem_count, -1);
228 if (ret == 0) {
229 ret = sem_post(&constructor_wait);
230 assert(!ret);
231 }
232 return 0;
233 }
234
235 static
236 int handle_message(struct sock_info *sock_info,
237 int sock, struct ustcomm_ust_msg *lum)
238 {
239 int ret = 0;
240 const struct lttng_ust_objd_ops *ops;
241 struct ustcomm_ust_reply lur;
242 int shm_fd, wait_fd;
243 union ust_args args;
244
245 ust_lock();
246
247 memset(&lur, 0, sizeof(lur));
248
249 if (lttng_ust_comm_should_quit) {
250 ret = -EPERM;
251 goto end;
252 }
253
254 ops = objd_ops(lum->handle);
255 if (!ops) {
256 ret = -ENOENT;
257 goto end;
258 }
259
260 switch (lum->cmd) {
261 case LTTNG_UST_REGISTER_DONE:
262 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
263 ret = handle_register_done(sock_info);
264 else
265 ret = -EINVAL;
266 break;
267 case LTTNG_UST_RELEASE:
268 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
269 ret = -EPERM;
270 else
271 ret = lttng_ust_objd_unref(lum->handle);
272 break;
273 default:
274 if (ops->cmd)
275 ret = ops->cmd(lum->handle, lum->cmd,
276 (unsigned long) &lum->u,
277 &args);
278 else
279 ret = -ENOSYS;
280 break;
281 }
282
283 end:
284 lur.handle = lum->handle;
285 lur.cmd = lum->cmd;
286 lur.ret_val = ret;
287 if (ret >= 0) {
288 lur.ret_code = USTCOMM_OK;
289 } else {
290 //lur.ret_code = USTCOMM_SESSION_FAIL;
291 lur.ret_code = ret;
292 }
293 if (ret >= 0) {
294 switch (lum->cmd) {
295 case LTTNG_UST_STREAM:
296 /*
297 * Special-case reply to send stream info.
298 * Use lum.u output.
299 */
300 lur.u.stream.memory_map_size = *args.stream.memory_map_size;
301 shm_fd = *args.stream.shm_fd;
302 wait_fd = *args.stream.wait_fd;
303 break;
304 case LTTNG_UST_METADATA:
305 case LTTNG_UST_CHANNEL:
306 lur.u.channel.memory_map_size = *args.channel.memory_map_size;
307 shm_fd = *args.channel.shm_fd;
308 wait_fd = *args.channel.wait_fd;
309 break;
310 case LTTNG_UST_TRACER_VERSION:
311 lur.u.version = lum->u.version;
312 break;
313 case LTTNG_UST_TRACEPOINT_LIST_GET:
314 memcpy(&lur.u.tracepoint, &lum->u.tracepoint, sizeof(lur.u.tracepoint));
315 break;
316 }
317 }
318 ret = send_reply(sock, &lur);
319 if (ret < 0) {
320 perror("error sending reply");
321 goto error;
322 }
323
324 if ((lum->cmd == LTTNG_UST_STREAM
325 || lum->cmd == LTTNG_UST_CHANNEL
326 || lum->cmd == LTTNG_UST_METADATA)
327 && lur.ret_code == USTCOMM_OK) {
328 /* we also need to send the file descriptors. */
329 ret = ustcomm_send_fds_unix_sock(sock,
330 &shm_fd, &shm_fd,
331 1, sizeof(int));
332 if (ret < 0) {
333 perror("send shm_fd");
334 goto error;
335 }
336 ret = ustcomm_send_fds_unix_sock(sock,
337 &wait_fd, &wait_fd,
338 1, sizeof(int));
339 if (ret < 0) {
340 perror("send wait_fd");
341 goto error;
342 }
343 }
344 /*
345 * We still have the memory map reference, and the fds have been
346 * sent to the sessiond. We can therefore close those fds. Note
347 * that we keep the write side of the wait_fd open, but close
348 * the read side.
349 */
350 if (lur.ret_code == USTCOMM_OK) {
351 switch (lum->cmd) {
352 case LTTNG_UST_STREAM:
353 if (shm_fd >= 0) {
354 ret = close(shm_fd);
355 if (ret) {
356 PERROR("Error closing stream shm_fd");
357 }
358 *args.stream.shm_fd = -1;
359 }
360 if (wait_fd >= 0) {
361 ret = close(wait_fd);
362 if (ret) {
363 PERROR("Error closing stream wait_fd");
364 }
365 *args.stream.wait_fd = -1;
366 }
367 break;
368 case LTTNG_UST_METADATA:
369 case LTTNG_UST_CHANNEL:
370 if (shm_fd >= 0) {
371 ret = close(shm_fd);
372 if (ret) {
373 PERROR("Error closing channel shm_fd");
374 }
375 *args.channel.shm_fd = -1;
376 }
377 if (wait_fd >= 0) {
378 ret = close(wait_fd);
379 if (ret) {
380 PERROR("Error closing channel wait_fd");
381 }
382 *args.channel.wait_fd = -1;
383 }
384 break;
385 }
386 }
387
388 error:
389 ust_unlock();
390 return ret;
391 }
392
393 static
394 void cleanup_sock_info(struct sock_info *sock_info, int exiting)
395 {
396 int ret;
397
398 if (sock_info->socket != -1) {
399 ret = close(sock_info->socket);
400 if (ret) {
401 ERR("Error closing apps socket");
402 }
403 sock_info->socket = -1;
404 }
405 if (sock_info->root_handle != -1) {
406 ret = lttng_ust_objd_unref(sock_info->root_handle);
407 if (ret) {
408 ERR("Error unref root handle");
409 }
410 sock_info->root_handle = -1;
411 }
412 sock_info->constructor_sem_posted = 0;
413 /*
414 * wait_shm_mmap is used by listener threads outside of the
415 * ust lock, so we cannot tear it down ourselves, because we
416 * cannot join on these threads. Leave this task to the OS
417 * process exit.
418 */
419 if (!exiting && sock_info->wait_shm_mmap) {
420 ret = munmap(sock_info->wait_shm_mmap, sysconf(_SC_PAGE_SIZE));
421 if (ret) {
422 ERR("Error unmapping wait shm");
423 }
424 sock_info->wait_shm_mmap = NULL;
425 }
426 }
427
428 /*
429 * Using fork to set umask in the child process (not multi-thread safe).
430 * We deal with the shm_open vs ftruncate race (happening when the
431 * sessiond owns the shm and does not let everybody modify it, to ensure
432 * safety against shm_unlink) by simply letting the mmap fail and
433 * retrying after a few seconds.
434 * For global shm, everybody has rw access to it until the sessiond
435 * starts.
436 */
437 static
438 int get_wait_shm(struct sock_info *sock_info, size_t mmap_size)
439 {
440 int wait_shm_fd, ret;
441 pid_t pid;
442
443 /*
444 * Try to open read-only.
445 */
446 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
447 if (wait_shm_fd >= 0) {
448 goto end;
449 } else if (wait_shm_fd < 0 && errno != ENOENT) {
450 /*
451 * Real-only open did not work, and it's not because the
452 * entry was not present. It's a failure that prohibits
453 * using shm.
454 */
455 ERR("Error opening shm %s", sock_info->wait_shm_path);
456 goto end;
457 }
458 /*
459 * If the open failed because the file did not exist, try
460 * creating it ourself.
461 */
462 pid = fork();
463 if (pid > 0) {
464 int status;
465
466 /*
467 * Parent: wait for child to return, in which case the
468 * shared memory map will have been created.
469 */
470 pid = wait(&status);
471 if (pid < 0 || !WIFEXITED(status) || WEXITSTATUS(status) != 0) {
472 wait_shm_fd = -1;
473 goto end;
474 }
475 /*
476 * Try to open read-only again after creation.
477 */
478 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
479 if (wait_shm_fd < 0) {
480 /*
481 * Real-only open did not work. It's a failure
482 * that prohibits using shm.
483 */
484 ERR("Error opening shm %s", sock_info->wait_shm_path);
485 goto end;
486 }
487 goto end;
488 } else if (pid == 0) {
489 int create_mode;
490
491 /* Child */
492 create_mode = S_IRUSR | S_IWUSR | S_IRGRP;
493 if (sock_info->global)
494 create_mode |= S_IROTH | S_IWGRP | S_IWOTH;
495 /*
496 * We're alone in a child process, so we can modify the
497 * process-wide umask.
498 */
499 umask(~create_mode);
500 /*
501 * Try creating shm (or get rw access).
502 * We don't do an exclusive open, because we allow other
503 * processes to create+ftruncate it concurrently.
504 */
505 wait_shm_fd = shm_open(sock_info->wait_shm_path,
506 O_RDWR | O_CREAT, create_mode);
507 if (wait_shm_fd >= 0) {
508 ret = ftruncate(wait_shm_fd, mmap_size);
509 if (ret) {
510 PERROR("ftruncate");
511 exit(EXIT_FAILURE);
512 }
513 exit(EXIT_SUCCESS);
514 }
515 /*
516 * For local shm, we need to have rw access to accept
517 * opening it: this means the local sessiond will be
518 * able to wake us up. For global shm, we open it even
519 * if rw access is not granted, because the root.root
520 * sessiond will be able to override all rights and wake
521 * us up.
522 */
523 if (!sock_info->global && errno != EACCES) {
524 ERR("Error opening shm %s", sock_info->wait_shm_path);
525 exit(EXIT_FAILURE);
526 }
527 /*
528 * The shm exists, but we cannot open it RW. Report
529 * success.
530 */
531 exit(EXIT_SUCCESS);
532 } else {
533 return -1;
534 }
535 end:
536 if (wait_shm_fd >= 0 && !sock_info->global) {
537 struct stat statbuf;
538
539 /*
540 * Ensure that our user is the owner of the shm file for
541 * local shm. If we do not own the file, it means our
542 * sessiond will not have access to wake us up (there is
543 * probably a rogue process trying to fake our
544 * sessiond). Fallback to polling method in this case.
545 */
546 ret = fstat(wait_shm_fd, &statbuf);
547 if (ret) {
548 PERROR("fstat");
549 goto error_close;
550 }
551 if (statbuf.st_uid != getuid())
552 goto error_close;
553 }
554 return wait_shm_fd;
555
556 error_close:
557 ret = close(wait_shm_fd);
558 if (ret) {
559 PERROR("Error closing fd");
560 }
561 return -1;
562 }
563
564 static
565 char *get_map_shm(struct sock_info *sock_info)
566 {
567 size_t mmap_size = sysconf(_SC_PAGE_SIZE);
568 int wait_shm_fd, ret;
569 char *wait_shm_mmap;
570
571 wait_shm_fd = get_wait_shm(sock_info, mmap_size);
572 if (wait_shm_fd < 0) {
573 goto error;
574 }
575 wait_shm_mmap = mmap(NULL, mmap_size, PROT_READ,
576 MAP_SHARED, wait_shm_fd, 0);
577 /* close shm fd immediately after taking the mmap reference */
578 ret = close(wait_shm_fd);
579 if (ret) {
580 PERROR("Error closing fd");
581 }
582 if (wait_shm_mmap == MAP_FAILED) {
583 DBG("mmap error (can be caused by race with sessiond). Fallback to poll mode.");
584 goto error;
585 }
586 return wait_shm_mmap;
587
588 error:
589 return NULL;
590 }
591
592 static
593 void wait_for_sessiond(struct sock_info *sock_info)
594 {
595 int ret;
596
597 ust_lock();
598 if (lttng_ust_comm_should_quit) {
599 goto quit;
600 }
601 if (wait_poll_fallback) {
602 goto error;
603 }
604 if (!sock_info->wait_shm_mmap) {
605 sock_info->wait_shm_mmap = get_map_shm(sock_info);
606 if (!sock_info->wait_shm_mmap)
607 goto error;
608 }
609 ust_unlock();
610
611 DBG("Waiting for %s apps sessiond", sock_info->name);
612 /* Wait for futex wakeup */
613 if (uatomic_read((int32_t *) sock_info->wait_shm_mmap) == 0) {
614 ret = futex_async((int32_t *) sock_info->wait_shm_mmap,
615 FUTEX_WAIT, 0, NULL, NULL, 0);
616 if (ret < 0) {
617 if (errno == EFAULT) {
618 wait_poll_fallback = 1;
619 DBG(
620 "Linux kernels 2.6.33 to 3.0 (with the exception of stable versions) "
621 "do not support FUTEX_WAKE on read-only memory mappings correctly. "
622 "Please upgrade your kernel "
623 "(fix is commit 9ea71503a8ed9184d2d0b8ccc4d269d05f7940ae in Linux kernel "
624 "mainline). LTTng-UST will use polling mode fallback.");
625 if (ust_debug())
626 PERROR("futex");
627 }
628 }
629 }
630 return;
631
632 quit:
633 ust_unlock();
634 return;
635
636 error:
637 ust_unlock();
638 return;
639 }
640
641 /*
642 * This thread does not allocate any resource, except within
643 * handle_message, within mutex protection. This mutex protects against
644 * fork and exit.
645 * The other moment it allocates resources is at socket connexion, which
646 * is also protected by the mutex.
647 */
648 static
649 void *ust_listener_thread(void *arg)
650 {
651 struct sock_info *sock_info = arg;
652 int sock, ret, prev_connect_failed = 0, has_waited = 0;
653
654 /* Restart trying to connect to the session daemon */
655 restart:
656 if (prev_connect_failed) {
657 /* Wait for sessiond availability with pipe */
658 wait_for_sessiond(sock_info);
659 if (has_waited) {
660 has_waited = 0;
661 /*
662 * Sleep for 5 seconds before retrying after a
663 * sequence of failure / wait / failure. This
664 * deals with a killed or broken session daemon.
665 */
666 sleep(5);
667 }
668 has_waited = 1;
669 prev_connect_failed = 0;
670 }
671 ust_lock();
672
673 if (lttng_ust_comm_should_quit) {
674 ust_unlock();
675 goto quit;
676 }
677
678 if (sock_info->socket != -1) {
679 ret = close(sock_info->socket);
680 if (ret) {
681 ERR("Error closing %s apps socket", sock_info->name);
682 }
683 sock_info->socket = -1;
684 }
685
686 /* Register */
687 ret = ustcomm_connect_unix_sock(sock_info->sock_path);
688 if (ret < 0) {
689 DBG("Info: sessiond not accepting connections to %s apps socket", sock_info->name);
690 prev_connect_failed = 1;
691 /*
692 * If we cannot find the sessiond daemon, don't delay
693 * constructor execution.
694 */
695 ret = handle_register_done(sock_info);
696 assert(!ret);
697 ust_unlock();
698 goto restart;
699 }
700
701 sock_info->socket = sock = ret;
702
703 /*
704 * Create only one root handle per listener thread for the whole
705 * process lifetime.
706 */
707 if (sock_info->root_handle == -1) {
708 ret = lttng_abi_create_root_handle();
709 if (ret < 0) {
710 ERR("Error creating root handle");
711 ust_unlock();
712 goto quit;
713 }
714 sock_info->root_handle = ret;
715 }
716
717 ret = register_app_to_sessiond(sock);
718 if (ret < 0) {
719 ERR("Error registering to %s apps socket", sock_info->name);
720 prev_connect_failed = 1;
721 /*
722 * If we cannot register to the sessiond daemon, don't
723 * delay constructor execution.
724 */
725 ret = handle_register_done(sock_info);
726 assert(!ret);
727 ust_unlock();
728 goto restart;
729 }
730 ust_unlock();
731
732 for (;;) {
733 ssize_t len;
734 struct ustcomm_ust_msg lum;
735
736 len = ustcomm_recv_unix_sock(sock, &lum, sizeof(lum));
737 switch (len) {
738 case 0: /* orderly shutdown */
739 DBG("%s ltt-sessiond has performed an orderly shutdown\n", sock_info->name);
740 ust_lock();
741 /*
742 * Either sessiond has shutdown or refused us by closing the socket.
743 * In either case, we don't want to delay construction execution,
744 * and we need to wait before retry.
745 */
746 prev_connect_failed = 1;
747 /*
748 * If we cannot register to the sessiond daemon, don't
749 * delay constructor execution.
750 */
751 ret = handle_register_done(sock_info);
752 assert(!ret);
753 ust_unlock();
754 goto end;
755 case sizeof(lum):
756 DBG("message received\n");
757 ret = handle_message(sock_info, sock, &lum);
758 if (ret < 0) {
759 ERR("Error handling message for %s socket", sock_info->name);
760 }
761 continue;
762 case -1:
763 DBG("Receive failed from lttng-sessiond with errno %d", errno);
764 if (errno == ECONNRESET) {
765 ERR("%s remote end closed connection\n", sock_info->name);
766 goto end;
767 }
768 goto end;
769 default:
770 ERR("incorrect message size (%s socket): %zd\n", sock_info->name, len);
771 continue;
772 }
773
774 }
775 end:
776 goto restart; /* try to reconnect */
777 quit:
778 return NULL;
779 }
780
781 /*
782 * Return values: -1: don't wait. 0: wait forever. 1: timeout wait.
783 */
784 static
785 int get_timeout(struct timespec *constructor_timeout)
786 {
787 long constructor_delay_ms = LTTNG_UST_DEFAULT_CONSTRUCTOR_TIMEOUT_MS;
788 char *str_delay;
789 int ret;
790
791 str_delay = getenv("LTTNG_UST_REGISTER_TIMEOUT");
792 if (str_delay) {
793 constructor_delay_ms = strtol(str_delay, NULL, 10);
794 }
795
796 switch (constructor_delay_ms) {
797 case -1:/* fall-through */
798 case 0:
799 return constructor_delay_ms;
800 default:
801 break;
802 }
803
804 /*
805 * If we are unable to find the current time, don't wait.
806 */
807 ret = clock_gettime(CLOCK_REALTIME, constructor_timeout);
808 if (ret) {
809 return -1;
810 }
811 constructor_timeout->tv_sec += constructor_delay_ms / 1000UL;
812 constructor_timeout->tv_nsec +=
813 (constructor_delay_ms % 1000UL) * 1000000UL;
814 if (constructor_timeout->tv_nsec >= 1000000000UL) {
815 constructor_timeout->tv_sec++;
816 constructor_timeout->tv_nsec -= 1000000000UL;
817 }
818 return 1;
819 }
820
821 /*
822 * sessiond monitoring thread: monitor presence of global and per-user
823 * sessiond by polling the application common named pipe.
824 */
825 /* TODO */
826
827 void __attribute__((constructor)) lttng_ust_init(void)
828 {
829 struct timespec constructor_timeout;
830 int timeout_mode;
831 int ret;
832
833 if (uatomic_xchg(&initialized, 1) == 1)
834 return;
835
836 /*
837 * Fixup interdependency between TLS fixup mutex (which happens
838 * to be the dynamic linker mutex) and ust_lock, taken within
839 * the ust lock.
840 */
841 lttng_fixup_event_tls();
842 lttng_fixup_ringbuffer_tls();
843 lttng_fixup_vtid_tls();
844
845 /*
846 * We want precise control over the order in which we construct
847 * our sub-libraries vs starting to receive commands from
848 * sessiond (otherwise leading to errors when trying to create
849 * sessiond before the init functions are completed).
850 */
851 init_usterr();
852 init_tracepoint();
853 ltt_ring_buffer_metadata_client_init();
854 ltt_ring_buffer_client_overwrite_init();
855 ltt_ring_buffer_client_discard_init();
856
857 timeout_mode = get_timeout(&constructor_timeout);
858
859 ret = sem_init(&constructor_wait, 0, 0);
860 assert(!ret);
861
862 ret = setup_local_apps();
863 if (ret) {
864 ERR("Error setting up to local apps");
865 }
866 ret = pthread_create(&local_apps.ust_listener, NULL,
867 ust_listener_thread, &local_apps);
868
869 if (local_apps.allowed) {
870 ret = pthread_create(&global_apps.ust_listener, NULL,
871 ust_listener_thread, &global_apps);
872 } else {
873 handle_register_done(&local_apps);
874 }
875
876 switch (timeout_mode) {
877 case 1: /* timeout wait */
878 do {
879 ret = sem_timedwait(&constructor_wait,
880 &constructor_timeout);
881 } while (ret < 0 && errno == EINTR);
882 if (ret < 0 && errno == ETIMEDOUT) {
883 ERR("Timed out waiting for ltt-sessiond");
884 } else {
885 assert(!ret);
886 }
887 break;
888 case -1:/* wait forever */
889 do {
890 ret = sem_wait(&constructor_wait);
891 } while (ret < 0 && errno == EINTR);
892 assert(!ret);
893 break;
894 case 0: /* no timeout */
895 break;
896 }
897 }
898
899 static
900 void lttng_ust_cleanup(int exiting)
901 {
902 cleanup_sock_info(&global_apps, exiting);
903 if (local_apps.allowed) {
904 cleanup_sock_info(&local_apps, exiting);
905 }
906 /*
907 * The teardown in this function all affect data structures
908 * accessed under the UST lock by the listener thread. This
909 * lock, along with the lttng_ust_comm_should_quit flag, ensure
910 * that none of these threads are accessing this data at this
911 * point.
912 */
913 lttng_ust_abi_exit();
914 lttng_ust_events_exit();
915 ltt_ring_buffer_client_discard_exit();
916 ltt_ring_buffer_client_overwrite_exit();
917 ltt_ring_buffer_metadata_client_exit();
918 exit_tracepoint();
919 if (!exiting) {
920 /* Reinitialize values for fork */
921 sem_count = 2;
922 lttng_ust_comm_should_quit = 0;
923 initialized = 0;
924 }
925 }
926
927 void __attribute__((destructor)) lttng_ust_exit(void)
928 {
929 int ret;
930
931 /*
932 * Using pthread_cancel here because:
933 * A) we don't want to hang application teardown.
934 * B) the thread is not allocating any resource.
935 */
936
937 /*
938 * Require the communication thread to quit. Synchronize with
939 * mutexes to ensure it is not in a mutex critical section when
940 * pthread_cancel is later called.
941 */
942 ust_lock();
943 lttng_ust_comm_should_quit = 1;
944 ust_unlock();
945
946 /* cancel threads */
947 ret = pthread_cancel(global_apps.ust_listener);
948 if (ret) {
949 ERR("Error cancelling global ust listener thread");
950 }
951 if (local_apps.allowed) {
952 ret = pthread_cancel(local_apps.ust_listener);
953 if (ret) {
954 ERR("Error cancelling local ust listener thread");
955 }
956 }
957 /*
958 * Do NOT join threads: use of sys_futex makes it impossible to
959 * join the threads without using async-cancel, but async-cancel
960 * is delivered by a signal, which could hit the target thread
961 * anywhere in its code path, including while the ust_lock() is
962 * held, causing a deadlock for the other thread. Let the OS
963 * cleanup the threads if there are stalled in a syscall.
964 */
965 lttng_ust_cleanup(1);
966 }
967
968 /*
969 * We exclude the worker threads across fork and clone (except
970 * CLONE_VM), because these system calls only keep the forking thread
971 * running in the child. Therefore, we don't want to call fork or clone
972 * in the middle of an tracepoint or ust tracing state modification.
973 * Holding this mutex protects these structures across fork and clone.
974 */
975 void ust_before_fork(sigset_t *save_sigset)
976 {
977 /*
978 * Disable signals. This is to avoid that the child intervenes
979 * before it is properly setup for tracing. It is safer to
980 * disable all signals, because then we know we are not breaking
981 * anything by restoring the original mask.
982 */
983 sigset_t all_sigs;
984 int ret;
985
986 /* Disable signals */
987 sigfillset(&all_sigs);
988 ret = sigprocmask(SIG_BLOCK, &all_sigs, save_sigset);
989 if (ret == -1) {
990 PERROR("sigprocmask");
991 }
992 ust_lock();
993 rcu_bp_before_fork();
994 }
995
996 static void ust_after_fork_common(sigset_t *restore_sigset)
997 {
998 int ret;
999
1000 DBG("process %d", getpid());
1001 ust_unlock();
1002 /* Restore signals */
1003 ret = sigprocmask(SIG_SETMASK, restore_sigset, NULL);
1004 if (ret == -1) {
1005 PERROR("sigprocmask");
1006 }
1007 }
1008
1009 void ust_after_fork_parent(sigset_t *restore_sigset)
1010 {
1011 DBG("process %d", getpid());
1012 rcu_bp_after_fork_parent();
1013 /* Release mutexes and reenable signals */
1014 ust_after_fork_common(restore_sigset);
1015 }
1016
1017 /*
1018 * After fork, in the child, we need to cleanup all the leftover state,
1019 * except the worker thread which already magically disappeared thanks
1020 * to the weird Linux fork semantics. After tyding up, we call
1021 * lttng_ust_init() again to start over as a new PID.
1022 *
1023 * This is meant for forks() that have tracing in the child between the
1024 * fork and following exec call (if there is any).
1025 */
1026 void ust_after_fork_child(sigset_t *restore_sigset)
1027 {
1028 DBG("process %d", getpid());
1029 /* Release urcu mutexes */
1030 rcu_bp_after_fork_child();
1031 lttng_ust_cleanup(0);
1032 lttng_context_vtid_reset();
1033 /* Release mutexes and reenable signals */
1034 ust_after_fork_common(restore_sigset);
1035 lttng_ust_init();
1036 }
This page took 0.049677 seconds and 5 git commands to generate.