Reset vtid after fork in child
[lttng-ust.git] / libust / lttng-ust-comm.c
1 /*
2 * lttng-ust-comm.c
3 *
4 * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
5 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; only
10 * version 2.1 of the License.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #define _LGPL_SOURCE
23 #include <sys/types.h>
24 #include <sys/socket.h>
25 #include <sys/prctl.h>
26 #include <sys/mman.h>
27 #include <sys/stat.h>
28 #include <sys/types.h>
29 #include <sys/wait.h>
30 #include <fcntl.h>
31 #include <unistd.h>
32 #include <errno.h>
33 #include <pthread.h>
34 #include <semaphore.h>
35 #include <time.h>
36 #include <assert.h>
37 #include <signal.h>
38 #include <urcu/uatomic.h>
39 #include <urcu/futex.h>
40
41 #include <lttng-ust-comm.h>
42 #include <ust/lttng-events.h>
43 #include <ust/usterr-signal-safe.h>
44 #include <ust/lttng-ust-abi.h>
45 #include <ust/tracepoint.h>
46 #include <ust/tracepoint-internal.h>
47 #include <ust/ust.h>
48 #include "ltt-tracer-core.h"
49
50 /*
51 * Has lttng ust comm constructor been called ?
52 */
53 static int initialized;
54
55 /*
56 * The ust_lock/ust_unlock lock is used as a communication thread mutex.
57 * Held when handling a command, also held by fork() to deal with
58 * removal of threads, and by exit path.
59 */
60
61 /* Should the ust comm thread quit ? */
62 static int lttng_ust_comm_should_quit;
63
64 /*
65 * Wait for either of these before continuing to the main
66 * program:
67 * - the register_done message from sessiond daemon
68 * (will let the sessiond daemon enable sessions before main
69 * starts.)
70 * - sessiond daemon is not reachable.
71 * - timeout (ensuring applications are resilient to session
72 * daemon problems).
73 */
74 static sem_t constructor_wait;
75 /*
76 * Doing this for both the global and local sessiond.
77 */
78 static int sem_count = { 2 };
79
80 /*
81 * Info about socket and associated listener thread.
82 */
83 struct sock_info {
84 const char *name;
85 pthread_t ust_listener; /* listener thread */
86 int root_handle;
87 int constructor_sem_posted;
88 int allowed;
89 int global;
90
91 char sock_path[PATH_MAX];
92 int socket;
93
94 char wait_shm_path[PATH_MAX];
95 char *wait_shm_mmap;
96 };
97
98 /* Socket from app (connect) to session daemon (listen) for communication */
99 struct sock_info global_apps = {
100 .name = "global",
101 .global = 1,
102
103 .root_handle = -1,
104 .allowed = 1,
105
106 .sock_path = DEFAULT_GLOBAL_APPS_UNIX_SOCK,
107 .socket = -1,
108
109 .wait_shm_path = DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH,
110 };
111
112 /* TODO: allow global_apps_sock_path override */
113
114 struct sock_info local_apps = {
115 .name = "local",
116 .global = 0,
117 .root_handle = -1,
118 .allowed = 0, /* Check setuid bit first */
119
120 .socket = -1,
121 };
122
123 static int wait_poll_fallback;
124
125 extern void ltt_ring_buffer_client_overwrite_init(void);
126 extern void ltt_ring_buffer_client_discard_init(void);
127 extern void ltt_ring_buffer_metadata_client_init(void);
128 extern void ltt_ring_buffer_client_overwrite_exit(void);
129 extern void ltt_ring_buffer_client_discard_exit(void);
130 extern void ltt_ring_buffer_metadata_client_exit(void);
131
132 static
133 int setup_local_apps(void)
134 {
135 const char *home_dir;
136 uid_t uid;
137
138 uid = getuid();
139 /*
140 * Disallow per-user tracing for setuid binaries.
141 */
142 if (uid != geteuid()) {
143 local_apps.allowed = 0;
144 return 0;
145 } else {
146 local_apps.allowed = 1;
147 }
148 home_dir = (const char *) getenv("HOME");
149 if (!home_dir)
150 return -ENOENT;
151 snprintf(local_apps.sock_path, PATH_MAX,
152 DEFAULT_HOME_APPS_UNIX_SOCK, home_dir);
153 snprintf(local_apps.wait_shm_path, PATH_MAX,
154 DEFAULT_HOME_APPS_WAIT_SHM_PATH, uid);
155 return 0;
156 }
157
158 static
159 int register_app_to_sessiond(int socket)
160 {
161 ssize_t ret;
162 int prctl_ret;
163 struct {
164 uint32_t major;
165 uint32_t minor;
166 pid_t pid;
167 pid_t ppid;
168 uid_t uid;
169 gid_t gid;
170 char name[16]; /* process name */
171 } reg_msg;
172
173 reg_msg.major = LTTNG_UST_COMM_VERSION_MAJOR;
174 reg_msg.minor = LTTNG_UST_COMM_VERSION_MINOR;
175 reg_msg.pid = getpid();
176 reg_msg.ppid = getppid();
177 reg_msg.uid = getuid();
178 reg_msg.gid = getgid();
179 prctl_ret = prctl(PR_GET_NAME, (unsigned long) reg_msg.name, 0, 0, 0);
180 if (prctl_ret) {
181 ERR("Error executing prctl");
182 return -errno;
183 }
184
185 ret = lttcomm_send_unix_sock(socket, &reg_msg, sizeof(reg_msg));
186 if (ret >= 0 && ret != sizeof(reg_msg))
187 return -EIO;
188 return ret;
189 }
190
191 static
192 int send_reply(int sock, struct lttcomm_ust_reply *lur)
193 {
194 ssize_t len;
195
196 len = lttcomm_send_unix_sock(sock, lur, sizeof(*lur));
197 switch (len) {
198 case sizeof(*lur):
199 DBG("message successfully sent");
200 return 0;
201 case -1:
202 if (errno == ECONNRESET) {
203 printf("remote end closed connection\n");
204 return 0;
205 }
206 return -1;
207 default:
208 printf("incorrect message size: %zd\n", len);
209 return -1;
210 }
211 }
212
213 static
214 int handle_register_done(struct sock_info *sock_info)
215 {
216 int ret;
217
218 if (sock_info->constructor_sem_posted)
219 return 0;
220 sock_info->constructor_sem_posted = 1;
221 if (uatomic_read(&sem_count) <= 0) {
222 return 0;
223 }
224 ret = uatomic_add_return(&sem_count, -1);
225 if (ret == 0) {
226 ret = sem_post(&constructor_wait);
227 assert(!ret);
228 }
229 return 0;
230 }
231
232 static
233 int handle_message(struct sock_info *sock_info,
234 int sock, struct lttcomm_ust_msg *lum)
235 {
236 int ret = 0;
237 const struct objd_ops *ops;
238 struct lttcomm_ust_reply lur;
239 int shm_fd, wait_fd;
240
241 ust_lock();
242
243 memset(&lur, 0, sizeof(lur));
244
245 if (lttng_ust_comm_should_quit) {
246 ret = -EPERM;
247 goto end;
248 }
249
250 ops = objd_ops(lum->handle);
251 if (!ops) {
252 ret = -ENOENT;
253 goto end;
254 }
255
256 switch (lum->cmd) {
257 case LTTNG_UST_REGISTER_DONE:
258 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
259 ret = handle_register_done(sock_info);
260 else
261 ret = -EINVAL;
262 break;
263 case LTTNG_UST_RELEASE:
264 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
265 ret = -EPERM;
266 else
267 ret = objd_unref(lum->handle);
268 break;
269 default:
270 if (ops->cmd)
271 ret = ops->cmd(lum->handle, lum->cmd,
272 (unsigned long) &lum->u);
273 else
274 ret = -ENOSYS;
275 break;
276 }
277
278 end:
279 lur.handle = lum->handle;
280 lur.cmd = lum->cmd;
281 lur.ret_val = ret;
282 if (ret >= 0) {
283 lur.ret_code = LTTCOMM_OK;
284 } else {
285 //lur.ret_code = LTTCOMM_SESSION_FAIL;
286 lur.ret_code = ret;
287 }
288 switch (lum->cmd) {
289 case LTTNG_UST_STREAM:
290 /*
291 * Special-case reply to send stream info.
292 * Use lum.u output.
293 */
294 lur.u.stream.memory_map_size = lum->u.stream.memory_map_size;
295 shm_fd = lum->u.stream.shm_fd;
296 wait_fd = lum->u.stream.wait_fd;
297 break;
298 case LTTNG_UST_METADATA:
299 case LTTNG_UST_CHANNEL:
300 lur.u.channel.memory_map_size = lum->u.channel.memory_map_size;
301 shm_fd = lum->u.channel.shm_fd;
302 wait_fd = lum->u.channel.wait_fd;
303 break;
304 }
305 ret = send_reply(sock, &lur);
306 if (ret < 0) {
307 perror("error sending reply");
308 goto error;
309 }
310
311 if ((lum->cmd == LTTNG_UST_STREAM
312 || lum->cmd == LTTNG_UST_CHANNEL
313 || lum->cmd == LTTNG_UST_METADATA)
314 && lur.ret_code == LTTCOMM_OK) {
315 /* we also need to send the file descriptors. */
316 ret = lttcomm_send_fds_unix_sock(sock,
317 &shm_fd, &shm_fd,
318 1, sizeof(int));
319 if (ret < 0) {
320 perror("send shm_fd");
321 goto error;
322 }
323 ret = lttcomm_send_fds_unix_sock(sock,
324 &wait_fd, &wait_fd,
325 1, sizeof(int));
326 if (ret < 0) {
327 perror("send wait_fd");
328 goto error;
329 }
330 }
331 error:
332 ust_unlock();
333 return ret;
334 }
335
336 static
337 void cleanup_sock_info(struct sock_info *sock_info)
338 {
339 int ret;
340
341 if (sock_info->socket != -1) {
342 ret = close(sock_info->socket);
343 if (ret) {
344 ERR("Error closing apps socket");
345 }
346 sock_info->socket = -1;
347 }
348 if (sock_info->root_handle != -1) {
349 ret = objd_unref(sock_info->root_handle);
350 if (ret) {
351 ERR("Error unref root handle");
352 }
353 sock_info->root_handle = -1;
354 }
355 sock_info->constructor_sem_posted = 0;
356 if (sock_info->wait_shm_mmap) {
357 ret = munmap(sock_info->wait_shm_mmap, sysconf(_SC_PAGE_SIZE));
358 if (ret) {
359 ERR("Error unmapping wait shm");
360 }
361 sock_info->wait_shm_mmap = NULL;
362 }
363 }
364
365 /*
366 * Using fork to set umask in the child process (not multi-thread safe).
367 * We deal with the shm_open vs ftruncate race (happening when the
368 * sessiond owns the shm and does not let everybody modify it, to ensure
369 * safety against shm_unlink) by simply letting the mmap fail and
370 * retrying after a few seconds.
371 * For global shm, everybody has rw access to it until the sessiond
372 * starts.
373 */
374 static
375 int get_wait_shm(struct sock_info *sock_info, size_t mmap_size)
376 {
377 int wait_shm_fd, ret;
378 pid_t pid;
379
380 /*
381 * Try to open read-only.
382 */
383 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
384 if (wait_shm_fd >= 0) {
385 goto end;
386 } else if (wait_shm_fd < 0 && errno != ENOENT) {
387 /*
388 * Real-only open did not work, and it's not because the
389 * entry was not present. It's a failure that prohibits
390 * using shm.
391 */
392 ERR("Error opening shm %s", sock_info->wait_shm_path);
393 goto end;
394 }
395 /*
396 * If the open failed because the file did not exist, try
397 * creating it ourself.
398 */
399 pid = fork();
400 if (pid > 0) {
401 int status;
402
403 /*
404 * Parent: wait for child to return, in which case the
405 * shared memory map will have been created.
406 */
407 pid = wait(&status);
408 if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
409 wait_shm_fd = -1;
410 goto end;
411 }
412 /*
413 * Try to open read-only again after creation.
414 */
415 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
416 if (wait_shm_fd < 0) {
417 /*
418 * Real-only open did not work. It's a failure
419 * that prohibits using shm.
420 */
421 ERR("Error opening shm %s", sock_info->wait_shm_path);
422 goto end;
423 }
424 goto end;
425 } else if (pid == 0) {
426 int create_mode;
427
428 /* Child */
429 create_mode = S_IRUSR | S_IWUSR | S_IRGRP;
430 if (sock_info->global)
431 create_mode |= S_IROTH | S_IWGRP | S_IWOTH;
432 /*
433 * We're alone in a child process, so we can modify the
434 * process-wide umask.
435 */
436 umask(~create_mode);
437 /*
438 * Try creating shm (or get rw access).
439 * We don't do an exclusive open, because we allow other
440 * processes to create+ftruncate it concurrently.
441 */
442 wait_shm_fd = shm_open(sock_info->wait_shm_path,
443 O_RDWR | O_CREAT, create_mode);
444 if (wait_shm_fd >= 0) {
445 ret = ftruncate(wait_shm_fd, mmap_size);
446 if (ret) {
447 PERROR("ftruncate");
448 exit(EXIT_FAILURE);
449 }
450 exit(EXIT_SUCCESS);
451 }
452 /*
453 * For local shm, we need to have rw access to accept
454 * opening it: this means the local sessiond will be
455 * able to wake us up. For global shm, we open it even
456 * if rw access is not granted, because the root.root
457 * sessiond will be able to override all rights and wake
458 * us up.
459 */
460 if (!sock_info->global && errno != EACCES) {
461 ERR("Error opening shm %s", sock_info->wait_shm_path);
462 exit(EXIT_FAILURE);
463 }
464 /*
465 * The shm exists, but we cannot open it RW. Report
466 * success.
467 */
468 exit(EXIT_SUCCESS);
469 } else {
470 return -1;
471 }
472 end:
473 if (wait_shm_fd >= 0 && !sock_info->global) {
474 struct stat statbuf;
475
476 /*
477 * Ensure that our user is the owner of the shm file for
478 * local shm. If we do not own the file, it means our
479 * sessiond will not have access to wake us up (there is
480 * probably a rogue process trying to fake our
481 * sessiond). Fallback to polling method in this case.
482 */
483 ret = fstat(wait_shm_fd, &statbuf);
484 if (ret) {
485 PERROR("fstat");
486 goto error_close;
487 }
488 if (statbuf.st_uid != getuid())
489 goto error_close;
490 }
491 return wait_shm_fd;
492
493 error_close:
494 ret = close(wait_shm_fd);
495 if (ret) {
496 PERROR("Error closing fd");
497 }
498 return -1;
499 }
500
501 static
502 char *get_map_shm(struct sock_info *sock_info)
503 {
504 size_t mmap_size = sysconf(_SC_PAGE_SIZE);
505 int wait_shm_fd, ret;
506 char *wait_shm_mmap;
507
508 wait_shm_fd = get_wait_shm(sock_info, mmap_size);
509 if (wait_shm_fd < 0) {
510 goto error;
511 }
512 wait_shm_mmap = mmap(NULL, mmap_size, PROT_READ,
513 MAP_SHARED, wait_shm_fd, 0);
514 /* close shm fd immediately after taking the mmap reference */
515 ret = close(wait_shm_fd);
516 if (ret) {
517 PERROR("Error closing fd");
518 }
519 if (wait_shm_mmap == MAP_FAILED) {
520 DBG("mmap error (can be caused by race with sessiond). Fallback to poll mode.");
521 goto error;
522 }
523 return wait_shm_mmap;
524
525 error:
526 return NULL;
527 }
528
529 static
530 void wait_for_sessiond(struct sock_info *sock_info)
531 {
532 int ret;
533
534 ust_lock();
535 if (lttng_ust_comm_should_quit) {
536 goto quit;
537 }
538 if (wait_poll_fallback) {
539 goto error;
540 }
541 if (!sock_info->wait_shm_mmap) {
542 sock_info->wait_shm_mmap = get_map_shm(sock_info);
543 if (!sock_info->wait_shm_mmap)
544 goto error;
545 }
546 ust_unlock();
547
548 DBG("Waiting for %s apps sessiond", sock_info->name);
549 /* Wait for futex wakeup */
550 if (uatomic_read((int32_t *) sock_info->wait_shm_mmap) == 0) {
551 ret = futex_async((int32_t *) sock_info->wait_shm_mmap,
552 FUTEX_WAIT, 0, NULL, NULL, 0);
553 if (ret < 0) {
554 if (errno == EFAULT) {
555 wait_poll_fallback = 1;
556 ERR(
557 "Linux kernels 2.6.33 to 3.0 (with the exception of stable versions) "
558 "do not support FUTEX_WAKE on read-only memory mappings correctly. "
559 "Please upgrade your kernel "
560 "(fix is commit 9ea71503a8ed9184d2d0b8ccc4d269d05f7940ae in Linux kernel "
561 "mainline). LTTng-UST will use polling mode fallback.");
562 }
563 PERROR("futex");
564 }
565 }
566 return;
567
568 quit:
569 ust_unlock();
570 return;
571
572 error:
573 ust_unlock();
574 return;
575 }
576
577 /*
578 * This thread does not allocate any resource, except within
579 * handle_message, within mutex protection. This mutex protects against
580 * fork and exit.
581 * The other moment it allocates resources is at socket connexion, which
582 * is also protected by the mutex.
583 */
584 static
585 void *ust_listener_thread(void *arg)
586 {
587 struct sock_info *sock_info = arg;
588 int sock, ret, prev_connect_failed = 0, has_waited = 0;
589
590 /* Restart trying to connect to the session daemon */
591 restart:
592 if (prev_connect_failed) {
593 /* Wait for sessiond availability with pipe */
594 wait_for_sessiond(sock_info);
595 if (has_waited) {
596 has_waited = 0;
597 /*
598 * Sleep for 5 seconds before retrying after a
599 * sequence of failure / wait / failure. This
600 * deals with a killed or broken session daemon.
601 */
602 sleep(5);
603 }
604 has_waited = 1;
605 prev_connect_failed = 0;
606 }
607 ust_lock();
608
609 if (lttng_ust_comm_should_quit) {
610 ust_unlock();
611 goto quit;
612 }
613
614 if (sock_info->socket != -1) {
615 ret = close(sock_info->socket);
616 if (ret) {
617 ERR("Error closing %s apps socket", sock_info->name);
618 }
619 sock_info->socket = -1;
620 }
621
622 /* Register */
623 ret = lttcomm_connect_unix_sock(sock_info->sock_path);
624 if (ret < 0) {
625 ERR("Error connecting to %s apps socket", sock_info->name);
626 prev_connect_failed = 1;
627 /*
628 * If we cannot find the sessiond daemon, don't delay
629 * constructor execution.
630 */
631 ret = handle_register_done(sock_info);
632 assert(!ret);
633 ust_unlock();
634 goto restart;
635 }
636
637 sock_info->socket = sock = ret;
638
639 /*
640 * Create only one root handle per listener thread for the whole
641 * process lifetime.
642 */
643 if (sock_info->root_handle == -1) {
644 ret = lttng_abi_create_root_handle();
645 if (ret < 0) {
646 ERR("Error creating root handle");
647 ust_unlock();
648 goto quit;
649 }
650 sock_info->root_handle = ret;
651 }
652
653 ret = register_app_to_sessiond(sock);
654 if (ret < 0) {
655 ERR("Error registering to %s apps socket", sock_info->name);
656 prev_connect_failed = 1;
657 /*
658 * If we cannot register to the sessiond daemon, don't
659 * delay constructor execution.
660 */
661 ret = handle_register_done(sock_info);
662 assert(!ret);
663 ust_unlock();
664 goto restart;
665 }
666 ust_unlock();
667
668 for (;;) {
669 ssize_t len;
670 struct lttcomm_ust_msg lum;
671
672 len = lttcomm_recv_unix_sock(sock, &lum, sizeof(lum));
673 switch (len) {
674 case 0: /* orderly shutdown */
675 DBG("%s ltt-sessiond has performed an orderly shutdown\n", sock_info->name);
676 goto end;
677 case sizeof(lum):
678 DBG("message received\n");
679 ret = handle_message(sock_info, sock, &lum);
680 if (ret < 0) {
681 ERR("Error handling message for %s socket", sock_info->name);
682 }
683 continue;
684 case -1:
685 if (errno == ECONNRESET) {
686 ERR("%s remote end closed connection\n", sock_info->name);
687 goto end;
688 }
689 goto end;
690 default:
691 ERR("incorrect message size (%s socket): %zd\n", sock_info->name, len);
692 continue;
693 }
694
695 }
696 end:
697 goto restart; /* try to reconnect */
698 quit:
699 return NULL;
700 }
701
702 /*
703 * Return values: -1: don't wait. 0: wait forever. 1: timeout wait.
704 */
705 static
706 int get_timeout(struct timespec *constructor_timeout)
707 {
708 long constructor_delay_ms = LTTNG_UST_DEFAULT_CONSTRUCTOR_TIMEOUT_MS;
709 char *str_delay;
710 int ret;
711
712 str_delay = getenv("UST_REGISTER_TIMEOUT");
713 if (str_delay) {
714 constructor_delay_ms = strtol(str_delay, NULL, 10);
715 }
716
717 switch (constructor_delay_ms) {
718 case -1:/* fall-through */
719 case 0:
720 return constructor_delay_ms;
721 default:
722 break;
723 }
724
725 /*
726 * If we are unable to find the current time, don't wait.
727 */
728 ret = clock_gettime(CLOCK_REALTIME, constructor_timeout);
729 if (ret) {
730 return -1;
731 }
732 constructor_timeout->tv_sec += constructor_delay_ms / 1000UL;
733 constructor_timeout->tv_nsec +=
734 (constructor_delay_ms % 1000UL) * 1000000UL;
735 if (constructor_timeout->tv_nsec >= 1000000000UL) {
736 constructor_timeout->tv_sec++;
737 constructor_timeout->tv_nsec -= 1000000000UL;
738 }
739 return 1;
740 }
741
742 /*
743 * sessiond monitoring thread: monitor presence of global and per-user
744 * sessiond by polling the application common named pipe.
745 */
746 /* TODO */
747
748 void __attribute__((constructor)) lttng_ust_init(void)
749 {
750 struct timespec constructor_timeout;
751 int timeout_mode;
752 int ret;
753
754 if (uatomic_xchg(&initialized, 1) == 1)
755 return;
756
757 /*
758 * We want precise control over the order in which we construct
759 * our sub-libraries vs starting to receive commands from
760 * sessiond (otherwise leading to errors when trying to create
761 * sessiond before the init functions are completed).
762 */
763 init_usterr();
764 init_tracepoint();
765 ltt_ring_buffer_metadata_client_init();
766 ltt_ring_buffer_client_overwrite_init();
767 ltt_ring_buffer_client_discard_init();
768
769 timeout_mode = get_timeout(&constructor_timeout);
770
771 ret = sem_init(&constructor_wait, 0, 0);
772 assert(!ret);
773
774 ret = setup_local_apps();
775 if (ret) {
776 ERR("Error setting up to local apps");
777 }
778 ret = pthread_create(&local_apps.ust_listener, NULL,
779 ust_listener_thread, &local_apps);
780
781 if (local_apps.allowed) {
782 ret = pthread_create(&global_apps.ust_listener, NULL,
783 ust_listener_thread, &global_apps);
784 } else {
785 handle_register_done(&local_apps);
786 }
787
788 switch (timeout_mode) {
789 case 1: /* timeout wait */
790 do {
791 ret = sem_timedwait(&constructor_wait,
792 &constructor_timeout);
793 } while (ret < 0 && errno == EINTR);
794 if (ret < 0 && errno == ETIMEDOUT) {
795 ERR("Timed out waiting for ltt-sessiond");
796 } else {
797 assert(!ret);
798 }
799 break;
800 case -1:/* wait forever */
801 do {
802 ret = sem_wait(&constructor_wait);
803 } while (ret < 0 && errno == EINTR);
804 assert(!ret);
805 break;
806 case 0: /* no timeout */
807 break;
808 }
809 }
810
811 static
812 void lttng_ust_cleanup(int exiting)
813 {
814 cleanup_sock_info(&global_apps);
815 if (local_apps.allowed) {
816 cleanup_sock_info(&local_apps);
817 }
818 lttng_ust_abi_exit();
819 ltt_events_exit();
820 ltt_ring_buffer_client_discard_exit();
821 ltt_ring_buffer_client_overwrite_exit();
822 ltt_ring_buffer_metadata_client_exit();
823 exit_tracepoint();
824 if (!exiting) {
825 /* Reinitialize values for fork */
826 sem_count = 2;
827 lttng_ust_comm_should_quit = 0;
828 initialized = 0;
829 }
830 }
831
832 void __attribute__((destructor)) lttng_ust_exit(void)
833 {
834 int ret;
835
836 /*
837 * Using pthread_cancel here because:
838 * A) we don't want to hang application teardown.
839 * B) the thread is not allocating any resource.
840 */
841
842 /*
843 * Require the communication thread to quit. Synchronize with
844 * mutexes to ensure it is not in a mutex critical section when
845 * pthread_cancel is later called.
846 */
847 ust_lock();
848 lttng_ust_comm_should_quit = 1;
849 ust_unlock();
850
851 ret = pthread_cancel(global_apps.ust_listener);
852 if (ret) {
853 ERR("Error cancelling global ust listener thread");
854 }
855 if (local_apps.allowed) {
856 ret = pthread_cancel(local_apps.ust_listener);
857 if (ret) {
858 ERR("Error cancelling local ust listener thread");
859 }
860 }
861 lttng_ust_cleanup(1);
862 }
863
864 /*
865 * We exclude the worker threads across fork and clone (except
866 * CLONE_VM), because these system calls only keep the forking thread
867 * running in the child. Therefore, we don't want to call fork or clone
868 * in the middle of an tracepoint or ust tracing state modification.
869 * Holding this mutex protects these structures across fork and clone.
870 */
871 void ust_before_fork(ust_fork_info_t *fork_info)
872 {
873 /*
874 * Disable signals. This is to avoid that the child intervenes
875 * before it is properly setup for tracing. It is safer to
876 * disable all signals, because then we know we are not breaking
877 * anything by restoring the original mask.
878 */
879 sigset_t all_sigs;
880 int ret;
881
882 /* Disable signals */
883 sigfillset(&all_sigs);
884 ret = sigprocmask(SIG_BLOCK, &all_sigs, &fork_info->orig_sigs);
885 if (ret == -1) {
886 PERROR("sigprocmask");
887 }
888 ust_lock();
889 rcu_bp_before_fork();
890 }
891
892 static void ust_after_fork_common(ust_fork_info_t *fork_info)
893 {
894 int ret;
895
896 DBG("process %d", getpid());
897 ust_unlock();
898 /* Restore signals */
899 ret = sigprocmask(SIG_SETMASK, &fork_info->orig_sigs, NULL);
900 if (ret == -1) {
901 PERROR("sigprocmask");
902 }
903 }
904
905 void ust_after_fork_parent(ust_fork_info_t *fork_info)
906 {
907 DBG("process %d", getpid());
908 rcu_bp_after_fork_parent();
909 /* Release mutexes and reenable signals */
910 ust_after_fork_common(fork_info);
911 }
912
913 /*
914 * After fork, in the child, we need to cleanup all the leftover state,
915 * except the worker thread which already magically disappeared thanks
916 * to the weird Linux fork semantics. After tyding up, we call
917 * lttng_ust_init() again to start over as a new PID.
918 *
919 * This is meant for forks() that have tracing in the child between the
920 * fork and following exec call (if there is any).
921 */
922 void ust_after_fork_child(ust_fork_info_t *fork_info)
923 {
924 DBG("process %d", getpid());
925 /* Release urcu mutexes */
926 rcu_bp_after_fork_child();
927 lttng_ust_cleanup(0);
928 lttng_context_vtid_reset();
929 /* Release mutexes and reenable signals */
930 ust_after_fork_common(fork_info);
931 lttng_ust_init();
932 }
This page took 0.049061 seconds and 5 git commands to generate.