fix: on exit, leave thread/mmap reclaim to OS
[lttng-ust.git] / liblttng-ust / lttng-ust-comm.c
1 /*
2 * lttng-ust-comm.c
3 *
4 * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
5 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; only
10 * version 2.1 of the License.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #define _LGPL_SOURCE
23 #include <sys/types.h>
24 #include <sys/socket.h>
25 #include <sys/mman.h>
26 #include <sys/stat.h>
27 #include <sys/types.h>
28 #include <sys/wait.h>
29 #include <fcntl.h>
30 #include <unistd.h>
31 #include <errno.h>
32 #include <pthread.h>
33 #include <semaphore.h>
34 #include <time.h>
35 #include <assert.h>
36 #include <signal.h>
37 #include <urcu/uatomic.h>
38 #include <urcu/futex.h>
39 #include <urcu/compiler.h>
40
41 #include <lttng/ust-events.h>
42 #include <lttng/ust-abi.h>
43 #include <lttng/ust.h>
44 #include <ust-comm.h>
45 #include <usterr-signal-safe.h>
46 #include "tracepoint-internal.h"
47 #include "ltt-tracer-core.h"
48 #include "compat.h"
49
50 /*
51 * Has lttng ust comm constructor been called ?
52 */
53 static int initialized;
54
55 /*
56 * The ust_lock/ust_unlock lock is used as a communication thread mutex.
57 * Held when handling a command, also held by fork() to deal with
58 * removal of threads, and by exit path.
59 */
60
61 /* Should the ust comm thread quit ? */
62 static int lttng_ust_comm_should_quit;
63
64 /*
65 * Wait for either of these before continuing to the main
66 * program:
67 * - the register_done message from sessiond daemon
68 * (will let the sessiond daemon enable sessions before main
69 * starts.)
70 * - sessiond daemon is not reachable.
71 * - timeout (ensuring applications are resilient to session
72 * daemon problems).
73 */
74 static sem_t constructor_wait;
75 /*
76 * Doing this for both the global and local sessiond.
77 */
78 static int sem_count = { 2 };
79
80 /*
81 * Info about socket and associated listener thread.
82 */
83 struct sock_info {
84 const char *name;
85 pthread_t ust_listener; /* listener thread */
86 int root_handle;
87 int constructor_sem_posted;
88 int allowed;
89 int global;
90
91 char sock_path[PATH_MAX];
92 int socket;
93
94 char wait_shm_path[PATH_MAX];
95 char *wait_shm_mmap;
96 };
97
98 /* Socket from app (connect) to session daemon (listen) for communication */
99 struct sock_info global_apps = {
100 .name = "global",
101 .global = 1,
102
103 .root_handle = -1,
104 .allowed = 1,
105
106 .sock_path = DEFAULT_GLOBAL_APPS_UNIX_SOCK,
107 .socket = -1,
108
109 .wait_shm_path = DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH,
110 };
111
112 /* TODO: allow global_apps_sock_path override */
113
114 struct sock_info local_apps = {
115 .name = "local",
116 .global = 0,
117 .root_handle = -1,
118 .allowed = 0, /* Check setuid bit first */
119
120 .socket = -1,
121 };
122
123 static int wait_poll_fallback;
124
125 extern void ltt_ring_buffer_client_overwrite_init(void);
126 extern void ltt_ring_buffer_client_discard_init(void);
127 extern void ltt_ring_buffer_metadata_client_init(void);
128 extern void ltt_ring_buffer_client_overwrite_exit(void);
129 extern void ltt_ring_buffer_client_discard_exit(void);
130 extern void ltt_ring_buffer_metadata_client_exit(void);
131
132 static
133 int setup_local_apps(void)
134 {
135 const char *home_dir;
136 uid_t uid;
137
138 uid = getuid();
139 /*
140 * Disallow per-user tracing for setuid binaries.
141 */
142 if (uid != geteuid()) {
143 local_apps.allowed = 0;
144 return 0;
145 } else {
146 local_apps.allowed = 1;
147 }
148 home_dir = (const char *) getenv("HOME");
149 if (!home_dir)
150 return -ENOENT;
151 snprintf(local_apps.sock_path, PATH_MAX,
152 DEFAULT_HOME_APPS_UNIX_SOCK, home_dir);
153 snprintf(local_apps.wait_shm_path, PATH_MAX,
154 DEFAULT_HOME_APPS_WAIT_SHM_PATH, uid);
155 return 0;
156 }
157
158 static
159 int register_app_to_sessiond(int socket)
160 {
161 ssize_t ret;
162 struct {
163 uint32_t major;
164 uint32_t minor;
165 pid_t pid;
166 pid_t ppid;
167 uid_t uid;
168 gid_t gid;
169 uint32_t bits_per_long;
170 char name[16]; /* process name */
171 } reg_msg;
172
173 reg_msg.major = LTTNG_UST_COMM_VERSION_MAJOR;
174 reg_msg.minor = LTTNG_UST_COMM_VERSION_MINOR;
175 reg_msg.pid = getpid();
176 reg_msg.ppid = getppid();
177 reg_msg.uid = getuid();
178 reg_msg.gid = getgid();
179 reg_msg.bits_per_long = CAA_BITS_PER_LONG;
180 lttng_ust_getprocname(reg_msg.name);
181
182 ret = ustcomm_send_unix_sock(socket, &reg_msg, sizeof(reg_msg));
183 if (ret >= 0 && ret != sizeof(reg_msg))
184 return -EIO;
185 return ret;
186 }
187
188 static
189 int send_reply(int sock, struct ustcomm_ust_reply *lur)
190 {
191 ssize_t len;
192
193 len = ustcomm_send_unix_sock(sock, lur, sizeof(*lur));
194 switch (len) {
195 case sizeof(*lur):
196 DBG("message successfully sent");
197 return 0;
198 case -1:
199 if (errno == ECONNRESET) {
200 printf("remote end closed connection\n");
201 return 0;
202 }
203 return -1;
204 default:
205 printf("incorrect message size: %zd\n", len);
206 return -1;
207 }
208 }
209
210 static
211 int handle_register_done(struct sock_info *sock_info)
212 {
213 int ret;
214
215 if (sock_info->constructor_sem_posted)
216 return 0;
217 sock_info->constructor_sem_posted = 1;
218 if (uatomic_read(&sem_count) <= 0) {
219 return 0;
220 }
221 ret = uatomic_add_return(&sem_count, -1);
222 if (ret == 0) {
223 ret = sem_post(&constructor_wait);
224 assert(!ret);
225 }
226 return 0;
227 }
228
229 static
230 int handle_message(struct sock_info *sock_info,
231 int sock, struct ustcomm_ust_msg *lum)
232 {
233 int ret = 0;
234 const struct lttng_ust_objd_ops *ops;
235 struct ustcomm_ust_reply lur;
236 int shm_fd, wait_fd;
237 union ust_args args;
238
239 ust_lock();
240
241 memset(&lur, 0, sizeof(lur));
242
243 if (lttng_ust_comm_should_quit) {
244 ret = -EPERM;
245 goto end;
246 }
247
248 ops = objd_ops(lum->handle);
249 if (!ops) {
250 ret = -ENOENT;
251 goto end;
252 }
253
254 switch (lum->cmd) {
255 case LTTNG_UST_REGISTER_DONE:
256 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
257 ret = handle_register_done(sock_info);
258 else
259 ret = -EINVAL;
260 break;
261 case LTTNG_UST_RELEASE:
262 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
263 ret = -EPERM;
264 else
265 ret = lttng_ust_objd_unref(lum->handle);
266 break;
267 default:
268 if (ops->cmd)
269 ret = ops->cmd(lum->handle, lum->cmd,
270 (unsigned long) &lum->u,
271 &args);
272 else
273 ret = -ENOSYS;
274 break;
275 }
276
277 end:
278 lur.handle = lum->handle;
279 lur.cmd = lum->cmd;
280 lur.ret_val = ret;
281 if (ret >= 0) {
282 lur.ret_code = USTCOMM_OK;
283 } else {
284 //lur.ret_code = USTCOMM_SESSION_FAIL;
285 lur.ret_code = ret;
286 }
287 switch (lum->cmd) {
288 case LTTNG_UST_STREAM:
289 /*
290 * Special-case reply to send stream info.
291 * Use lum.u output.
292 */
293 lur.u.stream.memory_map_size = *args.stream.memory_map_size;
294 shm_fd = *args.stream.shm_fd;
295 wait_fd = *args.stream.wait_fd;
296 break;
297 case LTTNG_UST_METADATA:
298 case LTTNG_UST_CHANNEL:
299 lur.u.channel.memory_map_size = *args.channel.memory_map_size;
300 shm_fd = *args.channel.shm_fd;
301 wait_fd = *args.channel.wait_fd;
302 break;
303 case LTTNG_UST_TRACER_VERSION:
304 lur.u.version = lum->u.version;
305 break;
306 case LTTNG_UST_TRACEPOINT_LIST_GET:
307 memcpy(&lur.u.tracepoint, &lum->u.tracepoint, sizeof(lur.u.tracepoint));
308 break;
309 }
310 ret = send_reply(sock, &lur);
311 if (ret < 0) {
312 perror("error sending reply");
313 goto error;
314 }
315
316 if ((lum->cmd == LTTNG_UST_STREAM
317 || lum->cmd == LTTNG_UST_CHANNEL
318 || lum->cmd == LTTNG_UST_METADATA)
319 && lur.ret_code == USTCOMM_OK) {
320 /* we also need to send the file descriptors. */
321 ret = ustcomm_send_fds_unix_sock(sock,
322 &shm_fd, &shm_fd,
323 1, sizeof(int));
324 if (ret < 0) {
325 perror("send shm_fd");
326 goto error;
327 }
328 ret = ustcomm_send_fds_unix_sock(sock,
329 &wait_fd, &wait_fd,
330 1, sizeof(int));
331 if (ret < 0) {
332 perror("send wait_fd");
333 goto error;
334 }
335 }
336 /*
337 * We still have the memory map reference, and the fds have been
338 * sent to the sessiond. We can therefore close those fds. Note
339 * that we keep the write side of the wait_fd open, but close
340 * the read side.
341 */
342 if (lur.ret_code == USTCOMM_OK) {
343 switch (lum->cmd) {
344 case LTTNG_UST_STREAM:
345 if (shm_fd >= 0) {
346 ret = close(shm_fd);
347 if (ret) {
348 PERROR("Error closing stream shm_fd");
349 }
350 *args.stream.shm_fd = -1;
351 }
352 if (wait_fd >= 0) {
353 ret = close(wait_fd);
354 if (ret) {
355 PERROR("Error closing stream wait_fd");
356 }
357 *args.stream.wait_fd = -1;
358 }
359 break;
360 case LTTNG_UST_METADATA:
361 case LTTNG_UST_CHANNEL:
362 if (shm_fd >= 0) {
363 ret = close(shm_fd);
364 if (ret) {
365 PERROR("Error closing channel shm_fd");
366 }
367 *args.channel.shm_fd = -1;
368 }
369 if (wait_fd >= 0) {
370 ret = close(wait_fd);
371 if (ret) {
372 PERROR("Error closing channel wait_fd");
373 }
374 *args.channel.wait_fd = -1;
375 }
376 break;
377 }
378 }
379
380 error:
381 ust_unlock();
382 return ret;
383 }
384
385 static
386 void cleanup_sock_info(struct sock_info *sock_info, int exiting)
387 {
388 int ret;
389
390 if (sock_info->socket != -1) {
391 ret = close(sock_info->socket);
392 if (ret) {
393 ERR("Error closing apps socket");
394 }
395 sock_info->socket = -1;
396 }
397 if (sock_info->root_handle != -1) {
398 ret = lttng_ust_objd_unref(sock_info->root_handle);
399 if (ret) {
400 ERR("Error unref root handle");
401 }
402 sock_info->root_handle = -1;
403 }
404 sock_info->constructor_sem_posted = 0;
405 /*
406 * wait_shm_mmap is used by listener threads outside of the
407 * ust lock, so we cannot tear it down ourselves, because we
408 * cannot join on these threads. Leave this task to the OS
409 * process exit.
410 */
411 if (!exiting && sock_info->wait_shm_mmap) {
412 ret = munmap(sock_info->wait_shm_mmap, sysconf(_SC_PAGE_SIZE));
413 if (ret) {
414 ERR("Error unmapping wait shm");
415 }
416 sock_info->wait_shm_mmap = NULL;
417 }
418 }
419
420 /*
421 * Using fork to set umask in the child process (not multi-thread safe).
422 * We deal with the shm_open vs ftruncate race (happening when the
423 * sessiond owns the shm and does not let everybody modify it, to ensure
424 * safety against shm_unlink) by simply letting the mmap fail and
425 * retrying after a few seconds.
426 * For global shm, everybody has rw access to it until the sessiond
427 * starts.
428 */
429 static
430 int get_wait_shm(struct sock_info *sock_info, size_t mmap_size)
431 {
432 int wait_shm_fd, ret;
433 pid_t pid;
434
435 /*
436 * Try to open read-only.
437 */
438 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
439 if (wait_shm_fd >= 0) {
440 goto end;
441 } else if (wait_shm_fd < 0 && errno != ENOENT) {
442 /*
443 * Real-only open did not work, and it's not because the
444 * entry was not present. It's a failure that prohibits
445 * using shm.
446 */
447 ERR("Error opening shm %s", sock_info->wait_shm_path);
448 goto end;
449 }
450 /*
451 * If the open failed because the file did not exist, try
452 * creating it ourself.
453 */
454 pid = fork();
455 if (pid > 0) {
456 int status;
457
458 /*
459 * Parent: wait for child to return, in which case the
460 * shared memory map will have been created.
461 */
462 pid = wait(&status);
463 if (pid < 0 || !WIFEXITED(status) || WEXITSTATUS(status) != 0) {
464 wait_shm_fd = -1;
465 goto end;
466 }
467 /*
468 * Try to open read-only again after creation.
469 */
470 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
471 if (wait_shm_fd < 0) {
472 /*
473 * Real-only open did not work. It's a failure
474 * that prohibits using shm.
475 */
476 ERR("Error opening shm %s", sock_info->wait_shm_path);
477 goto end;
478 }
479 goto end;
480 } else if (pid == 0) {
481 int create_mode;
482
483 /* Child */
484 create_mode = S_IRUSR | S_IWUSR | S_IRGRP;
485 if (sock_info->global)
486 create_mode |= S_IROTH | S_IWGRP | S_IWOTH;
487 /*
488 * We're alone in a child process, so we can modify the
489 * process-wide umask.
490 */
491 umask(~create_mode);
492 /*
493 * Try creating shm (or get rw access).
494 * We don't do an exclusive open, because we allow other
495 * processes to create+ftruncate it concurrently.
496 */
497 wait_shm_fd = shm_open(sock_info->wait_shm_path,
498 O_RDWR | O_CREAT, create_mode);
499 if (wait_shm_fd >= 0) {
500 ret = ftruncate(wait_shm_fd, mmap_size);
501 if (ret) {
502 PERROR("ftruncate");
503 exit(EXIT_FAILURE);
504 }
505 exit(EXIT_SUCCESS);
506 }
507 /*
508 * For local shm, we need to have rw access to accept
509 * opening it: this means the local sessiond will be
510 * able to wake us up. For global shm, we open it even
511 * if rw access is not granted, because the root.root
512 * sessiond will be able to override all rights and wake
513 * us up.
514 */
515 if (!sock_info->global && errno != EACCES) {
516 ERR("Error opening shm %s", sock_info->wait_shm_path);
517 exit(EXIT_FAILURE);
518 }
519 /*
520 * The shm exists, but we cannot open it RW. Report
521 * success.
522 */
523 exit(EXIT_SUCCESS);
524 } else {
525 return -1;
526 }
527 end:
528 if (wait_shm_fd >= 0 && !sock_info->global) {
529 struct stat statbuf;
530
531 /*
532 * Ensure that our user is the owner of the shm file for
533 * local shm. If we do not own the file, it means our
534 * sessiond will not have access to wake us up (there is
535 * probably a rogue process trying to fake our
536 * sessiond). Fallback to polling method in this case.
537 */
538 ret = fstat(wait_shm_fd, &statbuf);
539 if (ret) {
540 PERROR("fstat");
541 goto error_close;
542 }
543 if (statbuf.st_uid != getuid())
544 goto error_close;
545 }
546 return wait_shm_fd;
547
548 error_close:
549 ret = close(wait_shm_fd);
550 if (ret) {
551 PERROR("Error closing fd");
552 }
553 return -1;
554 }
555
556 static
557 char *get_map_shm(struct sock_info *sock_info)
558 {
559 size_t mmap_size = sysconf(_SC_PAGE_SIZE);
560 int wait_shm_fd, ret;
561 char *wait_shm_mmap;
562
563 wait_shm_fd = get_wait_shm(sock_info, mmap_size);
564 if (wait_shm_fd < 0) {
565 goto error;
566 }
567 wait_shm_mmap = mmap(NULL, mmap_size, PROT_READ,
568 MAP_SHARED, wait_shm_fd, 0);
569 /* close shm fd immediately after taking the mmap reference */
570 ret = close(wait_shm_fd);
571 if (ret) {
572 PERROR("Error closing fd");
573 }
574 if (wait_shm_mmap == MAP_FAILED) {
575 DBG("mmap error (can be caused by race with sessiond). Fallback to poll mode.");
576 goto error;
577 }
578 return wait_shm_mmap;
579
580 error:
581 return NULL;
582 }
583
584 static
585 void wait_for_sessiond(struct sock_info *sock_info)
586 {
587 int ret;
588
589 ust_lock();
590 if (lttng_ust_comm_should_quit) {
591 goto quit;
592 }
593 if (wait_poll_fallback) {
594 goto error;
595 }
596 if (!sock_info->wait_shm_mmap) {
597 sock_info->wait_shm_mmap = get_map_shm(sock_info);
598 if (!sock_info->wait_shm_mmap)
599 goto error;
600 }
601 ust_unlock();
602
603 DBG("Waiting for %s apps sessiond", sock_info->name);
604 /* Wait for futex wakeup */
605 if (uatomic_read((int32_t *) sock_info->wait_shm_mmap) == 0) {
606 ret = futex_async((int32_t *) sock_info->wait_shm_mmap,
607 FUTEX_WAIT, 0, NULL, NULL, 0);
608 if (ret < 0) {
609 if (errno == EFAULT) {
610 wait_poll_fallback = 1;
611 DBG(
612 "Linux kernels 2.6.33 to 3.0 (with the exception of stable versions) "
613 "do not support FUTEX_WAKE on read-only memory mappings correctly. "
614 "Please upgrade your kernel "
615 "(fix is commit 9ea71503a8ed9184d2d0b8ccc4d269d05f7940ae in Linux kernel "
616 "mainline). LTTng-UST will use polling mode fallback.");
617 if (ust_debug())
618 PERROR("futex");
619 }
620 }
621 }
622 return;
623
624 quit:
625 ust_unlock();
626 return;
627
628 error:
629 ust_unlock();
630 return;
631 }
632
633 /*
634 * This thread does not allocate any resource, except within
635 * handle_message, within mutex protection. This mutex protects against
636 * fork and exit.
637 * The other moment it allocates resources is at socket connexion, which
638 * is also protected by the mutex.
639 */
640 static
641 void *ust_listener_thread(void *arg)
642 {
643 struct sock_info *sock_info = arg;
644 int sock, ret, prev_connect_failed = 0, has_waited = 0;
645
646 /* Restart trying to connect to the session daemon */
647 restart:
648 if (prev_connect_failed) {
649 /* Wait for sessiond availability with pipe */
650 wait_for_sessiond(sock_info);
651 if (has_waited) {
652 has_waited = 0;
653 /*
654 * Sleep for 5 seconds before retrying after a
655 * sequence of failure / wait / failure. This
656 * deals with a killed or broken session daemon.
657 */
658 sleep(5);
659 }
660 has_waited = 1;
661 prev_connect_failed = 0;
662 }
663 ust_lock();
664
665 if (lttng_ust_comm_should_quit) {
666 ust_unlock();
667 goto quit;
668 }
669
670 if (sock_info->socket != -1) {
671 ret = close(sock_info->socket);
672 if (ret) {
673 ERR("Error closing %s apps socket", sock_info->name);
674 }
675 sock_info->socket = -1;
676 }
677
678 /* Register */
679 ret = ustcomm_connect_unix_sock(sock_info->sock_path);
680 if (ret < 0) {
681 DBG("Info: sessiond not accepting connections to %s apps socket", sock_info->name);
682 prev_connect_failed = 1;
683 /*
684 * If we cannot find the sessiond daemon, don't delay
685 * constructor execution.
686 */
687 ret = handle_register_done(sock_info);
688 assert(!ret);
689 ust_unlock();
690 goto restart;
691 }
692
693 sock_info->socket = sock = ret;
694
695 /*
696 * Create only one root handle per listener thread for the whole
697 * process lifetime.
698 */
699 if (sock_info->root_handle == -1) {
700 ret = lttng_abi_create_root_handle();
701 if (ret < 0) {
702 ERR("Error creating root handle");
703 ust_unlock();
704 goto quit;
705 }
706 sock_info->root_handle = ret;
707 }
708
709 ret = register_app_to_sessiond(sock);
710 if (ret < 0) {
711 ERR("Error registering to %s apps socket", sock_info->name);
712 prev_connect_failed = 1;
713 /*
714 * If we cannot register to the sessiond daemon, don't
715 * delay constructor execution.
716 */
717 ret = handle_register_done(sock_info);
718 assert(!ret);
719 ust_unlock();
720 goto restart;
721 }
722 ust_unlock();
723
724 for (;;) {
725 ssize_t len;
726 struct ustcomm_ust_msg lum;
727
728 len = ustcomm_recv_unix_sock(sock, &lum, sizeof(lum));
729 switch (len) {
730 case 0: /* orderly shutdown */
731 DBG("%s ltt-sessiond has performed an orderly shutdown\n", sock_info->name);
732 ust_lock();
733 /*
734 * Either sessiond has shutdown or refused us by closing the socket.
735 * In either case, we don't want to delay construction execution,
736 * and we need to wait before retry.
737 */
738 prev_connect_failed = 1;
739 /*
740 * If we cannot register to the sessiond daemon, don't
741 * delay constructor execution.
742 */
743 ret = handle_register_done(sock_info);
744 assert(!ret);
745 ust_unlock();
746 goto end;
747 case sizeof(lum):
748 DBG("message received\n");
749 ret = handle_message(sock_info, sock, &lum);
750 if (ret < 0) {
751 ERR("Error handling message for %s socket", sock_info->name);
752 }
753 continue;
754 case -1:
755 DBG("Receive failed from lttng-sessiond with errno %d", errno);
756 if (errno == ECONNRESET) {
757 ERR("%s remote end closed connection\n", sock_info->name);
758 goto end;
759 }
760 goto end;
761 default:
762 ERR("incorrect message size (%s socket): %zd\n", sock_info->name, len);
763 continue;
764 }
765
766 }
767 end:
768 goto restart; /* try to reconnect */
769 quit:
770 return NULL;
771 }
772
773 /*
774 * Return values: -1: don't wait. 0: wait forever. 1: timeout wait.
775 */
776 static
777 int get_timeout(struct timespec *constructor_timeout)
778 {
779 long constructor_delay_ms = LTTNG_UST_DEFAULT_CONSTRUCTOR_TIMEOUT_MS;
780 char *str_delay;
781 int ret;
782
783 str_delay = getenv("LTTNG_UST_REGISTER_TIMEOUT");
784 if (str_delay) {
785 constructor_delay_ms = strtol(str_delay, NULL, 10);
786 }
787
788 switch (constructor_delay_ms) {
789 case -1:/* fall-through */
790 case 0:
791 return constructor_delay_ms;
792 default:
793 break;
794 }
795
796 /*
797 * If we are unable to find the current time, don't wait.
798 */
799 ret = clock_gettime(CLOCK_REALTIME, constructor_timeout);
800 if (ret) {
801 return -1;
802 }
803 constructor_timeout->tv_sec += constructor_delay_ms / 1000UL;
804 constructor_timeout->tv_nsec +=
805 (constructor_delay_ms % 1000UL) * 1000000UL;
806 if (constructor_timeout->tv_nsec >= 1000000000UL) {
807 constructor_timeout->tv_sec++;
808 constructor_timeout->tv_nsec -= 1000000000UL;
809 }
810 return 1;
811 }
812
813 /*
814 * sessiond monitoring thread: monitor presence of global and per-user
815 * sessiond by polling the application common named pipe.
816 */
817 /* TODO */
818
819 void __attribute__((constructor)) lttng_ust_init(void)
820 {
821 struct timespec constructor_timeout;
822 int timeout_mode;
823 int ret;
824
825 if (uatomic_xchg(&initialized, 1) == 1)
826 return;
827
828 /*
829 * We want precise control over the order in which we construct
830 * our sub-libraries vs starting to receive commands from
831 * sessiond (otherwise leading to errors when trying to create
832 * sessiond before the init functions are completed).
833 */
834 init_usterr();
835 init_tracepoint();
836 ltt_ring_buffer_metadata_client_init();
837 ltt_ring_buffer_client_overwrite_init();
838 ltt_ring_buffer_client_discard_init();
839
840 timeout_mode = get_timeout(&constructor_timeout);
841
842 ret = sem_init(&constructor_wait, 0, 0);
843 assert(!ret);
844
845 ret = setup_local_apps();
846 if (ret) {
847 ERR("Error setting up to local apps");
848 }
849 ret = pthread_create(&local_apps.ust_listener, NULL,
850 ust_listener_thread, &local_apps);
851
852 if (local_apps.allowed) {
853 ret = pthread_create(&global_apps.ust_listener, NULL,
854 ust_listener_thread, &global_apps);
855 } else {
856 handle_register_done(&local_apps);
857 }
858
859 switch (timeout_mode) {
860 case 1: /* timeout wait */
861 do {
862 ret = sem_timedwait(&constructor_wait,
863 &constructor_timeout);
864 } while (ret < 0 && errno == EINTR);
865 if (ret < 0 && errno == ETIMEDOUT) {
866 ERR("Timed out waiting for ltt-sessiond");
867 } else {
868 assert(!ret);
869 }
870 break;
871 case -1:/* wait forever */
872 do {
873 ret = sem_wait(&constructor_wait);
874 } while (ret < 0 && errno == EINTR);
875 assert(!ret);
876 break;
877 case 0: /* no timeout */
878 break;
879 }
880 }
881
882 static
883 void lttng_ust_cleanup(int exiting)
884 {
885 cleanup_sock_info(&global_apps, exiting);
886 if (local_apps.allowed) {
887 cleanup_sock_info(&local_apps, exiting);
888 }
889 /*
890 * The teardown in this function all affect data structures
891 * accessed under the UST lock by the listener thread. This
892 * lock, along with the lttng_ust_comm_should_quit flag, ensure
893 * that none of these threads are accessing this data at this
894 * point.
895 */
896 lttng_ust_abi_exit();
897 lttng_ust_events_exit();
898 ltt_ring_buffer_client_discard_exit();
899 ltt_ring_buffer_client_overwrite_exit();
900 ltt_ring_buffer_metadata_client_exit();
901 exit_tracepoint();
902 if (!exiting) {
903 /* Reinitialize values for fork */
904 sem_count = 2;
905 lttng_ust_comm_should_quit = 0;
906 initialized = 0;
907 }
908 }
909
910 void __attribute__((destructor)) lttng_ust_exit(void)
911 {
912 int ret;
913
914 /*
915 * Using pthread_cancel here because:
916 * A) we don't want to hang application teardown.
917 * B) the thread is not allocating any resource.
918 */
919
920 /*
921 * Require the communication thread to quit. Synchronize with
922 * mutexes to ensure it is not in a mutex critical section when
923 * pthread_cancel is later called.
924 */
925 ust_lock();
926 lttng_ust_comm_should_quit = 1;
927 ust_unlock();
928
929 /* cancel threads */
930 ret = pthread_cancel(global_apps.ust_listener);
931 if (ret) {
932 ERR("Error cancelling global ust listener thread");
933 }
934 if (local_apps.allowed) {
935 ret = pthread_cancel(local_apps.ust_listener);
936 if (ret) {
937 ERR("Error cancelling local ust listener thread");
938 }
939 }
940 /*
941 * Do NOT join threads: use of sys_futex makes it impossible to
942 * join the threads without using async-cancel, but async-cancel
943 * is delivered by a signal, which could hit the target thread
944 * anywhere in its code path, including while the ust_lock() is
945 * held, causing a deadlock for the other thread. Let the OS
946 * cleanup the threads if there are stalled in a syscall.
947 */
948 lttng_ust_cleanup(1);
949 }
950
951 /*
952 * We exclude the worker threads across fork and clone (except
953 * CLONE_VM), because these system calls only keep the forking thread
954 * running in the child. Therefore, we don't want to call fork or clone
955 * in the middle of an tracepoint or ust tracing state modification.
956 * Holding this mutex protects these structures across fork and clone.
957 */
958 void ust_before_fork(sigset_t *save_sigset)
959 {
960 /*
961 * Disable signals. This is to avoid that the child intervenes
962 * before it is properly setup for tracing. It is safer to
963 * disable all signals, because then we know we are not breaking
964 * anything by restoring the original mask.
965 */
966 sigset_t all_sigs;
967 int ret;
968
969 /* Disable signals */
970 sigfillset(&all_sigs);
971 ret = sigprocmask(SIG_BLOCK, &all_sigs, save_sigset);
972 if (ret == -1) {
973 PERROR("sigprocmask");
974 }
975 ust_lock();
976 rcu_bp_before_fork();
977 }
978
979 static void ust_after_fork_common(sigset_t *restore_sigset)
980 {
981 int ret;
982
983 DBG("process %d", getpid());
984 ust_unlock();
985 /* Restore signals */
986 ret = sigprocmask(SIG_SETMASK, restore_sigset, NULL);
987 if (ret == -1) {
988 PERROR("sigprocmask");
989 }
990 }
991
992 void ust_after_fork_parent(sigset_t *restore_sigset)
993 {
994 DBG("process %d", getpid());
995 rcu_bp_after_fork_parent();
996 /* Release mutexes and reenable signals */
997 ust_after_fork_common(restore_sigset);
998 }
999
1000 /*
1001 * After fork, in the child, we need to cleanup all the leftover state,
1002 * except the worker thread which already magically disappeared thanks
1003 * to the weird Linux fork semantics. After tyding up, we call
1004 * lttng_ust_init() again to start over as a new PID.
1005 *
1006 * This is meant for forks() that have tracing in the child between the
1007 * fork and following exec call (if there is any).
1008 */
1009 void ust_after_fork_child(sigset_t *restore_sigset)
1010 {
1011 DBG("process %d", getpid());
1012 /* Release urcu mutexes */
1013 rcu_bp_after_fork_child();
1014 lttng_ust_cleanup(0);
1015 lttng_context_vtid_reset();
1016 /* Release mutexes and reenable signals */
1017 ust_after_fork_common(restore_sigset);
1018 lttng_ust_init();
1019 }
This page took 0.049468 seconds and 5 git commands to generate.