Close stream and channel file descriptors as soon as passed to sessiond
[lttng-ust.git] / liblttng-ust / lttng-ust-comm.c
1 /*
2 * lttng-ust-comm.c
3 *
4 * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
5 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; only
10 * version 2.1 of the License.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #define _LGPL_SOURCE
23 #include <sys/types.h>
24 #include <sys/socket.h>
25 #include <sys/prctl.h>
26 #include <sys/mman.h>
27 #include <sys/stat.h>
28 #include <sys/types.h>
29 #include <sys/wait.h>
30 #include <fcntl.h>
31 #include <unistd.h>
32 #include <errno.h>
33 #include <pthread.h>
34 #include <semaphore.h>
35 #include <time.h>
36 #include <assert.h>
37 #include <signal.h>
38 #include <urcu/uatomic.h>
39 #include <urcu/futex.h>
40 #include <urcu/compiler.h>
41
42 #include <lttng/ust-events.h>
43 #include <lttng/ust-abi.h>
44 #include <lttng/ust.h>
45 #include <ust-comm.h>
46 #include <usterr-signal-safe.h>
47 #include "tracepoint-internal.h"
48 #include "ltt-tracer-core.h"
49
50 /*
51 * Has lttng ust comm constructor been called ?
52 */
53 static int initialized;
54
55 /*
56 * The ust_lock/ust_unlock lock is used as a communication thread mutex.
57 * Held when handling a command, also held by fork() to deal with
58 * removal of threads, and by exit path.
59 */
60
61 /* Should the ust comm thread quit ? */
62 static int lttng_ust_comm_should_quit;
63
64 /*
65 * Wait for either of these before continuing to the main
66 * program:
67 * - the register_done message from sessiond daemon
68 * (will let the sessiond daemon enable sessions before main
69 * starts.)
70 * - sessiond daemon is not reachable.
71 * - timeout (ensuring applications are resilient to session
72 * daemon problems).
73 */
74 static sem_t constructor_wait;
75 /*
76 * Doing this for both the global and local sessiond.
77 */
78 static int sem_count = { 2 };
79
80 /*
81 * Info about socket and associated listener thread.
82 */
83 struct sock_info {
84 const char *name;
85 pthread_t ust_listener; /* listener thread */
86 int root_handle;
87 int constructor_sem_posted;
88 int allowed;
89 int global;
90
91 char sock_path[PATH_MAX];
92 int socket;
93
94 char wait_shm_path[PATH_MAX];
95 char *wait_shm_mmap;
96 };
97
98 /* Socket from app (connect) to session daemon (listen) for communication */
99 struct sock_info global_apps = {
100 .name = "global",
101 .global = 1,
102
103 .root_handle = -1,
104 .allowed = 1,
105
106 .sock_path = DEFAULT_GLOBAL_APPS_UNIX_SOCK,
107 .socket = -1,
108
109 .wait_shm_path = DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH,
110 };
111
112 /* TODO: allow global_apps_sock_path override */
113
114 struct sock_info local_apps = {
115 .name = "local",
116 .global = 0,
117 .root_handle = -1,
118 .allowed = 0, /* Check setuid bit first */
119
120 .socket = -1,
121 };
122
123 static int wait_poll_fallback;
124
125 extern void ltt_ring_buffer_client_overwrite_init(void);
126 extern void ltt_ring_buffer_client_discard_init(void);
127 extern void ltt_ring_buffer_metadata_client_init(void);
128 extern void ltt_ring_buffer_client_overwrite_exit(void);
129 extern void ltt_ring_buffer_client_discard_exit(void);
130 extern void ltt_ring_buffer_metadata_client_exit(void);
131
132 static
133 int setup_local_apps(void)
134 {
135 const char *home_dir;
136 uid_t uid;
137
138 uid = getuid();
139 /*
140 * Disallow per-user tracing for setuid binaries.
141 */
142 if (uid != geteuid()) {
143 local_apps.allowed = 0;
144 return 0;
145 } else {
146 local_apps.allowed = 1;
147 }
148 home_dir = (const char *) getenv("HOME");
149 if (!home_dir)
150 return -ENOENT;
151 snprintf(local_apps.sock_path, PATH_MAX,
152 DEFAULT_HOME_APPS_UNIX_SOCK, home_dir);
153 snprintf(local_apps.wait_shm_path, PATH_MAX,
154 DEFAULT_HOME_APPS_WAIT_SHM_PATH, uid);
155 return 0;
156 }
157
158 static
159 int register_app_to_sessiond(int socket)
160 {
161 ssize_t ret;
162 int prctl_ret;
163 struct {
164 uint32_t major;
165 uint32_t minor;
166 pid_t pid;
167 pid_t ppid;
168 uid_t uid;
169 gid_t gid;
170 uint32_t bits_per_long;
171 char name[16]; /* process name */
172 } reg_msg;
173
174 reg_msg.major = LTTNG_UST_COMM_VERSION_MAJOR;
175 reg_msg.minor = LTTNG_UST_COMM_VERSION_MINOR;
176 reg_msg.pid = getpid();
177 reg_msg.ppid = getppid();
178 reg_msg.uid = getuid();
179 reg_msg.gid = getgid();
180 reg_msg.bits_per_long = CAA_BITS_PER_LONG;
181 prctl_ret = prctl(PR_GET_NAME, (unsigned long) reg_msg.name, 0, 0, 0);
182 if (prctl_ret) {
183 ERR("Error executing prctl");
184 return -errno;
185 }
186
187 ret = ustcomm_send_unix_sock(socket, &reg_msg, sizeof(reg_msg));
188 if (ret >= 0 && ret != sizeof(reg_msg))
189 return -EIO;
190 return ret;
191 }
192
193 static
194 int send_reply(int sock, struct ustcomm_ust_reply *lur)
195 {
196 ssize_t len;
197
198 len = ustcomm_send_unix_sock(sock, lur, sizeof(*lur));
199 switch (len) {
200 case sizeof(*lur):
201 DBG("message successfully sent");
202 return 0;
203 case -1:
204 if (errno == ECONNRESET) {
205 printf("remote end closed connection\n");
206 return 0;
207 }
208 return -1;
209 default:
210 printf("incorrect message size: %zd\n", len);
211 return -1;
212 }
213 }
214
215 static
216 int handle_register_done(struct sock_info *sock_info)
217 {
218 int ret;
219
220 if (sock_info->constructor_sem_posted)
221 return 0;
222 sock_info->constructor_sem_posted = 1;
223 if (uatomic_read(&sem_count) <= 0) {
224 return 0;
225 }
226 ret = uatomic_add_return(&sem_count, -1);
227 if (ret == 0) {
228 ret = sem_post(&constructor_wait);
229 assert(!ret);
230 }
231 return 0;
232 }
233
234 static
235 int handle_message(struct sock_info *sock_info,
236 int sock, struct ustcomm_ust_msg *lum)
237 {
238 int ret = 0;
239 const struct lttng_ust_objd_ops *ops;
240 struct ustcomm_ust_reply lur;
241 int shm_fd, wait_fd;
242 union ust_args args;
243
244 ust_lock();
245
246 memset(&lur, 0, sizeof(lur));
247
248 if (lttng_ust_comm_should_quit) {
249 ret = -EPERM;
250 goto end;
251 }
252
253 ops = objd_ops(lum->handle);
254 if (!ops) {
255 ret = -ENOENT;
256 goto end;
257 }
258
259 switch (lum->cmd) {
260 case LTTNG_UST_REGISTER_DONE:
261 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
262 ret = handle_register_done(sock_info);
263 else
264 ret = -EINVAL;
265 break;
266 case LTTNG_UST_RELEASE:
267 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
268 ret = -EPERM;
269 else
270 ret = lttng_ust_objd_unref(lum->handle);
271 break;
272 default:
273 if (ops->cmd)
274 ret = ops->cmd(lum->handle, lum->cmd,
275 (unsigned long) &lum->u,
276 &args);
277 else
278 ret = -ENOSYS;
279 break;
280 }
281
282 end:
283 lur.handle = lum->handle;
284 lur.cmd = lum->cmd;
285 lur.ret_val = ret;
286 if (ret >= 0) {
287 lur.ret_code = USTCOMM_OK;
288 } else {
289 //lur.ret_code = USTCOMM_SESSION_FAIL;
290 lur.ret_code = ret;
291 }
292 switch (lum->cmd) {
293 case LTTNG_UST_STREAM:
294 /*
295 * Special-case reply to send stream info.
296 * Use lum.u output.
297 */
298 lur.u.stream.memory_map_size = *args.stream.memory_map_size;
299 shm_fd = *args.stream.shm_fd;
300 wait_fd = *args.stream.wait_fd;
301 break;
302 case LTTNG_UST_METADATA:
303 case LTTNG_UST_CHANNEL:
304 lur.u.channel.memory_map_size = *args.channel.memory_map_size;
305 shm_fd = *args.channel.shm_fd;
306 wait_fd = *args.channel.wait_fd;
307 break;
308 case LTTNG_UST_TRACER_VERSION:
309 lur.u.version = lum->u.version;
310 break;
311 case LTTNG_UST_TRACEPOINT_LIST_GET:
312 memcpy(&lur.u.tracepoint, &lum->u.tracepoint, sizeof(lur.u.tracepoint));
313 break;
314 }
315 ret = send_reply(sock, &lur);
316 if (ret < 0) {
317 perror("error sending reply");
318 goto error;
319 }
320
321 if ((lum->cmd == LTTNG_UST_STREAM
322 || lum->cmd == LTTNG_UST_CHANNEL
323 || lum->cmd == LTTNG_UST_METADATA)
324 && lur.ret_code == USTCOMM_OK) {
325 /* we also need to send the file descriptors. */
326 ret = ustcomm_send_fds_unix_sock(sock,
327 &shm_fd, &shm_fd,
328 1, sizeof(int));
329 if (ret < 0) {
330 perror("send shm_fd");
331 goto error;
332 }
333 ret = ustcomm_send_fds_unix_sock(sock,
334 &wait_fd, &wait_fd,
335 1, sizeof(int));
336 if (ret < 0) {
337 perror("send wait_fd");
338 goto error;
339 }
340 }
341 /*
342 * We still have the memory map reference, and the fds have been
343 * sent to the sessiond. We can therefore close those fds.
344 */
345 if (lur.ret_code == USTCOMM_OK) {
346 switch (lum->cmd) {
347 case LTTNG_UST_STREAM:
348 if (shm_fd >= 0) {
349 ret = close(shm_fd);
350 if (ret) {
351 PERROR("Error closing stream shm_fd");
352 }
353 *args.stream.shm_fd = -1;
354 }
355 if (wait_fd >= 0) {
356 ret = close(wait_fd);
357 if (ret) {
358 PERROR("Error closing stream wait_fd");
359 }
360 *args.stream.wait_fd = -1;
361 }
362 break;
363 case LTTNG_UST_METADATA:
364 case LTTNG_UST_CHANNEL:
365 if (shm_fd >= 0) {
366 ret = close(shm_fd);
367 if (ret) {
368 PERROR("Error closing channel shm_fd");
369 }
370 *args.channel.shm_fd = -1;
371 }
372 if (wait_fd >= 0) {
373 ret = close(wait_fd);
374 if (ret) {
375 PERROR("Error closing channel wait_fd");
376 }
377 *args.channel.wait_fd = -1;
378 }
379 break;
380 }
381 }
382
383 error:
384 ust_unlock();
385 return ret;
386 }
387
388 static
389 void cleanup_sock_info(struct sock_info *sock_info)
390 {
391 int ret;
392
393 if (sock_info->socket != -1) {
394 ret = close(sock_info->socket);
395 if (ret) {
396 ERR("Error closing apps socket");
397 }
398 sock_info->socket = -1;
399 }
400 if (sock_info->root_handle != -1) {
401 ret = lttng_ust_objd_unref(sock_info->root_handle);
402 if (ret) {
403 ERR("Error unref root handle");
404 }
405 sock_info->root_handle = -1;
406 }
407 sock_info->constructor_sem_posted = 0;
408 if (sock_info->wait_shm_mmap) {
409 ret = munmap(sock_info->wait_shm_mmap, sysconf(_SC_PAGE_SIZE));
410 if (ret) {
411 ERR("Error unmapping wait shm");
412 }
413 sock_info->wait_shm_mmap = NULL;
414 }
415 }
416
417 /*
418 * Using fork to set umask in the child process (not multi-thread safe).
419 * We deal with the shm_open vs ftruncate race (happening when the
420 * sessiond owns the shm and does not let everybody modify it, to ensure
421 * safety against shm_unlink) by simply letting the mmap fail and
422 * retrying after a few seconds.
423 * For global shm, everybody has rw access to it until the sessiond
424 * starts.
425 */
426 static
427 int get_wait_shm(struct sock_info *sock_info, size_t mmap_size)
428 {
429 int wait_shm_fd, ret;
430 pid_t pid;
431
432 /*
433 * Try to open read-only.
434 */
435 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
436 if (wait_shm_fd >= 0) {
437 goto end;
438 } else if (wait_shm_fd < 0 && errno != ENOENT) {
439 /*
440 * Real-only open did not work, and it's not because the
441 * entry was not present. It's a failure that prohibits
442 * using shm.
443 */
444 ERR("Error opening shm %s", sock_info->wait_shm_path);
445 goto end;
446 }
447 /*
448 * If the open failed because the file did not exist, try
449 * creating it ourself.
450 */
451 pid = fork();
452 if (pid > 0) {
453 int status;
454
455 /*
456 * Parent: wait for child to return, in which case the
457 * shared memory map will have been created.
458 */
459 pid = wait(&status);
460 if (pid < 0 || !WIFEXITED(status) || WEXITSTATUS(status) != 0) {
461 wait_shm_fd = -1;
462 goto end;
463 }
464 /*
465 * Try to open read-only again after creation.
466 */
467 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
468 if (wait_shm_fd < 0) {
469 /*
470 * Real-only open did not work. It's a failure
471 * that prohibits using shm.
472 */
473 ERR("Error opening shm %s", sock_info->wait_shm_path);
474 goto end;
475 }
476 goto end;
477 } else if (pid == 0) {
478 int create_mode;
479
480 /* Child */
481 create_mode = S_IRUSR | S_IWUSR | S_IRGRP;
482 if (sock_info->global)
483 create_mode |= S_IROTH | S_IWGRP | S_IWOTH;
484 /*
485 * We're alone in a child process, so we can modify the
486 * process-wide umask.
487 */
488 umask(~create_mode);
489 /*
490 * Try creating shm (or get rw access).
491 * We don't do an exclusive open, because we allow other
492 * processes to create+ftruncate it concurrently.
493 */
494 wait_shm_fd = shm_open(sock_info->wait_shm_path,
495 O_RDWR | O_CREAT, create_mode);
496 if (wait_shm_fd >= 0) {
497 ret = ftruncate(wait_shm_fd, mmap_size);
498 if (ret) {
499 PERROR("ftruncate");
500 exit(EXIT_FAILURE);
501 }
502 exit(EXIT_SUCCESS);
503 }
504 /*
505 * For local shm, we need to have rw access to accept
506 * opening it: this means the local sessiond will be
507 * able to wake us up. For global shm, we open it even
508 * if rw access is not granted, because the root.root
509 * sessiond will be able to override all rights and wake
510 * us up.
511 */
512 if (!sock_info->global && errno != EACCES) {
513 ERR("Error opening shm %s", sock_info->wait_shm_path);
514 exit(EXIT_FAILURE);
515 }
516 /*
517 * The shm exists, but we cannot open it RW. Report
518 * success.
519 */
520 exit(EXIT_SUCCESS);
521 } else {
522 return -1;
523 }
524 end:
525 if (wait_shm_fd >= 0 && !sock_info->global) {
526 struct stat statbuf;
527
528 /*
529 * Ensure that our user is the owner of the shm file for
530 * local shm. If we do not own the file, it means our
531 * sessiond will not have access to wake us up (there is
532 * probably a rogue process trying to fake our
533 * sessiond). Fallback to polling method in this case.
534 */
535 ret = fstat(wait_shm_fd, &statbuf);
536 if (ret) {
537 PERROR("fstat");
538 goto error_close;
539 }
540 if (statbuf.st_uid != getuid())
541 goto error_close;
542 }
543 return wait_shm_fd;
544
545 error_close:
546 ret = close(wait_shm_fd);
547 if (ret) {
548 PERROR("Error closing fd");
549 }
550 return -1;
551 }
552
553 static
554 char *get_map_shm(struct sock_info *sock_info)
555 {
556 size_t mmap_size = sysconf(_SC_PAGE_SIZE);
557 int wait_shm_fd, ret;
558 char *wait_shm_mmap;
559
560 wait_shm_fd = get_wait_shm(sock_info, mmap_size);
561 if (wait_shm_fd < 0) {
562 goto error;
563 }
564 wait_shm_mmap = mmap(NULL, mmap_size, PROT_READ,
565 MAP_SHARED, wait_shm_fd, 0);
566 /* close shm fd immediately after taking the mmap reference */
567 ret = close(wait_shm_fd);
568 if (ret) {
569 PERROR("Error closing fd");
570 }
571 if (wait_shm_mmap == MAP_FAILED) {
572 DBG("mmap error (can be caused by race with sessiond). Fallback to poll mode.");
573 goto error;
574 }
575 return wait_shm_mmap;
576
577 error:
578 return NULL;
579 }
580
581 static
582 void wait_for_sessiond(struct sock_info *sock_info)
583 {
584 int ret;
585
586 ust_lock();
587 if (lttng_ust_comm_should_quit) {
588 goto quit;
589 }
590 if (wait_poll_fallback) {
591 goto error;
592 }
593 if (!sock_info->wait_shm_mmap) {
594 sock_info->wait_shm_mmap = get_map_shm(sock_info);
595 if (!sock_info->wait_shm_mmap)
596 goto error;
597 }
598 ust_unlock();
599
600 DBG("Waiting for %s apps sessiond", sock_info->name);
601 /* Wait for futex wakeup */
602 if (uatomic_read((int32_t *) sock_info->wait_shm_mmap) == 0) {
603 ret = futex_async((int32_t *) sock_info->wait_shm_mmap,
604 FUTEX_WAIT, 0, NULL, NULL, 0);
605 if (ret < 0) {
606 if (errno == EFAULT) {
607 wait_poll_fallback = 1;
608 DBG(
609 "Linux kernels 2.6.33 to 3.0 (with the exception of stable versions) "
610 "do not support FUTEX_WAKE on read-only memory mappings correctly. "
611 "Please upgrade your kernel "
612 "(fix is commit 9ea71503a8ed9184d2d0b8ccc4d269d05f7940ae in Linux kernel "
613 "mainline). LTTng-UST will use polling mode fallback.");
614 if (ust_debug())
615 PERROR("futex");
616 }
617 }
618 }
619 return;
620
621 quit:
622 ust_unlock();
623 return;
624
625 error:
626 ust_unlock();
627 return;
628 }
629
630 /*
631 * This thread does not allocate any resource, except within
632 * handle_message, within mutex protection. This mutex protects against
633 * fork and exit.
634 * The other moment it allocates resources is at socket connexion, which
635 * is also protected by the mutex.
636 */
637 static
638 void *ust_listener_thread(void *arg)
639 {
640 struct sock_info *sock_info = arg;
641 int sock, ret, prev_connect_failed = 0, has_waited = 0;
642
643 /* Restart trying to connect to the session daemon */
644 restart:
645 if (prev_connect_failed) {
646 /* Wait for sessiond availability with pipe */
647 wait_for_sessiond(sock_info);
648 if (has_waited) {
649 has_waited = 0;
650 /*
651 * Sleep for 5 seconds before retrying after a
652 * sequence of failure / wait / failure. This
653 * deals with a killed or broken session daemon.
654 */
655 sleep(5);
656 }
657 has_waited = 1;
658 prev_connect_failed = 0;
659 }
660 ust_lock();
661
662 if (lttng_ust_comm_should_quit) {
663 ust_unlock();
664 goto quit;
665 }
666
667 if (sock_info->socket != -1) {
668 ret = close(sock_info->socket);
669 if (ret) {
670 ERR("Error closing %s apps socket", sock_info->name);
671 }
672 sock_info->socket = -1;
673 }
674
675 /* Register */
676 ret = ustcomm_connect_unix_sock(sock_info->sock_path);
677 if (ret < 0) {
678 DBG("Info: sessiond not accepting connections to %s apps socket", sock_info->name);
679 prev_connect_failed = 1;
680 /*
681 * If we cannot find the sessiond daemon, don't delay
682 * constructor execution.
683 */
684 ret = handle_register_done(sock_info);
685 assert(!ret);
686 ust_unlock();
687 goto restart;
688 }
689
690 sock_info->socket = sock = ret;
691
692 /*
693 * Create only one root handle per listener thread for the whole
694 * process lifetime.
695 */
696 if (sock_info->root_handle == -1) {
697 ret = lttng_abi_create_root_handle();
698 if (ret < 0) {
699 ERR("Error creating root handle");
700 ust_unlock();
701 goto quit;
702 }
703 sock_info->root_handle = ret;
704 }
705
706 ret = register_app_to_sessiond(sock);
707 if (ret < 0) {
708 ERR("Error registering to %s apps socket", sock_info->name);
709 prev_connect_failed = 1;
710 /*
711 * If we cannot register to the sessiond daemon, don't
712 * delay constructor execution.
713 */
714 ret = handle_register_done(sock_info);
715 assert(!ret);
716 ust_unlock();
717 goto restart;
718 }
719 ust_unlock();
720
721 for (;;) {
722 ssize_t len;
723 struct ustcomm_ust_msg lum;
724
725 len = ustcomm_recv_unix_sock(sock, &lum, sizeof(lum));
726 switch (len) {
727 case 0: /* orderly shutdown */
728 DBG("%s ltt-sessiond has performed an orderly shutdown\n", sock_info->name);
729 ust_lock();
730 /*
731 * Either sessiond has shutdown or refused us by closing the socket.
732 * In either case, we don't want to delay construction execution,
733 * and we need to wait before retry.
734 */
735 prev_connect_failed = 1;
736 /*
737 * If we cannot register to the sessiond daemon, don't
738 * delay constructor execution.
739 */
740 ret = handle_register_done(sock_info);
741 assert(!ret);
742 ust_unlock();
743 goto end;
744 case sizeof(lum):
745 DBG("message received\n");
746 ret = handle_message(sock_info, sock, &lum);
747 if (ret < 0) {
748 ERR("Error handling message for %s socket", sock_info->name);
749 }
750 continue;
751 case -1:
752 DBG("Receive failed from lttng-sessiond with errno %d", errno);
753 if (errno == ECONNRESET) {
754 ERR("%s remote end closed connection\n", sock_info->name);
755 goto end;
756 }
757 goto end;
758 default:
759 ERR("incorrect message size (%s socket): %zd\n", sock_info->name, len);
760 continue;
761 }
762
763 }
764 end:
765 goto restart; /* try to reconnect */
766 quit:
767 return NULL;
768 }
769
770 /*
771 * Return values: -1: don't wait. 0: wait forever. 1: timeout wait.
772 */
773 static
774 int get_timeout(struct timespec *constructor_timeout)
775 {
776 long constructor_delay_ms = LTTNG_UST_DEFAULT_CONSTRUCTOR_TIMEOUT_MS;
777 char *str_delay;
778 int ret;
779
780 str_delay = getenv("LTTNG_UST_REGISTER_TIMEOUT");
781 if (str_delay) {
782 constructor_delay_ms = strtol(str_delay, NULL, 10);
783 }
784
785 switch (constructor_delay_ms) {
786 case -1:/* fall-through */
787 case 0:
788 return constructor_delay_ms;
789 default:
790 break;
791 }
792
793 /*
794 * If we are unable to find the current time, don't wait.
795 */
796 ret = clock_gettime(CLOCK_REALTIME, constructor_timeout);
797 if (ret) {
798 return -1;
799 }
800 constructor_timeout->tv_sec += constructor_delay_ms / 1000UL;
801 constructor_timeout->tv_nsec +=
802 (constructor_delay_ms % 1000UL) * 1000000UL;
803 if (constructor_timeout->tv_nsec >= 1000000000UL) {
804 constructor_timeout->tv_sec++;
805 constructor_timeout->tv_nsec -= 1000000000UL;
806 }
807 return 1;
808 }
809
810 /*
811 * sessiond monitoring thread: monitor presence of global and per-user
812 * sessiond by polling the application common named pipe.
813 */
814 /* TODO */
815
816 void __attribute__((constructor)) lttng_ust_init(void)
817 {
818 struct timespec constructor_timeout;
819 int timeout_mode;
820 int ret;
821
822 if (uatomic_xchg(&initialized, 1) == 1)
823 return;
824
825 /*
826 * We want precise control over the order in which we construct
827 * our sub-libraries vs starting to receive commands from
828 * sessiond (otherwise leading to errors when trying to create
829 * sessiond before the init functions are completed).
830 */
831 init_usterr();
832 init_tracepoint();
833 ltt_ring_buffer_metadata_client_init();
834 ltt_ring_buffer_client_overwrite_init();
835 ltt_ring_buffer_client_discard_init();
836
837 timeout_mode = get_timeout(&constructor_timeout);
838
839 ret = sem_init(&constructor_wait, 0, 0);
840 assert(!ret);
841
842 ret = setup_local_apps();
843 if (ret) {
844 ERR("Error setting up to local apps");
845 }
846 ret = pthread_create(&local_apps.ust_listener, NULL,
847 ust_listener_thread, &local_apps);
848
849 if (local_apps.allowed) {
850 ret = pthread_create(&global_apps.ust_listener, NULL,
851 ust_listener_thread, &global_apps);
852 } else {
853 handle_register_done(&local_apps);
854 }
855
856 switch (timeout_mode) {
857 case 1: /* timeout wait */
858 do {
859 ret = sem_timedwait(&constructor_wait,
860 &constructor_timeout);
861 } while (ret < 0 && errno == EINTR);
862 if (ret < 0 && errno == ETIMEDOUT) {
863 ERR("Timed out waiting for ltt-sessiond");
864 } else {
865 assert(!ret);
866 }
867 break;
868 case -1:/* wait forever */
869 do {
870 ret = sem_wait(&constructor_wait);
871 } while (ret < 0 && errno == EINTR);
872 assert(!ret);
873 break;
874 case 0: /* no timeout */
875 break;
876 }
877 }
878
879 static
880 void lttng_ust_cleanup(int exiting)
881 {
882 cleanup_sock_info(&global_apps);
883 if (local_apps.allowed) {
884 cleanup_sock_info(&local_apps);
885 }
886 lttng_ust_abi_exit();
887 lttng_ust_events_exit();
888 ltt_ring_buffer_client_discard_exit();
889 ltt_ring_buffer_client_overwrite_exit();
890 ltt_ring_buffer_metadata_client_exit();
891 exit_tracepoint();
892 if (!exiting) {
893 /* Reinitialize values for fork */
894 sem_count = 2;
895 lttng_ust_comm_should_quit = 0;
896 initialized = 0;
897 }
898 }
899
900 void __attribute__((destructor)) lttng_ust_exit(void)
901 {
902 int ret;
903
904 /*
905 * Using pthread_cancel here because:
906 * A) we don't want to hang application teardown.
907 * B) the thread is not allocating any resource.
908 */
909
910 /*
911 * Require the communication thread to quit. Synchronize with
912 * mutexes to ensure it is not in a mutex critical section when
913 * pthread_cancel is later called.
914 */
915 ust_lock();
916 lttng_ust_comm_should_quit = 1;
917 ust_unlock();
918
919 ret = pthread_cancel(global_apps.ust_listener);
920 if (ret) {
921 ERR("Error cancelling global ust listener thread");
922 }
923 if (local_apps.allowed) {
924 ret = pthread_cancel(local_apps.ust_listener);
925 if (ret) {
926 ERR("Error cancelling local ust listener thread");
927 }
928 }
929 lttng_ust_cleanup(1);
930 }
931
932 /*
933 * We exclude the worker threads across fork and clone (except
934 * CLONE_VM), because these system calls only keep the forking thread
935 * running in the child. Therefore, we don't want to call fork or clone
936 * in the middle of an tracepoint or ust tracing state modification.
937 * Holding this mutex protects these structures across fork and clone.
938 */
939 void ust_before_fork(sigset_t *save_sigset)
940 {
941 /*
942 * Disable signals. This is to avoid that the child intervenes
943 * before it is properly setup for tracing. It is safer to
944 * disable all signals, because then we know we are not breaking
945 * anything by restoring the original mask.
946 */
947 sigset_t all_sigs;
948 int ret;
949
950 /* Disable signals */
951 sigfillset(&all_sigs);
952 ret = sigprocmask(SIG_BLOCK, &all_sigs, save_sigset);
953 if (ret == -1) {
954 PERROR("sigprocmask");
955 }
956 ust_lock();
957 rcu_bp_before_fork();
958 }
959
960 static void ust_after_fork_common(sigset_t *restore_sigset)
961 {
962 int ret;
963
964 DBG("process %d", getpid());
965 ust_unlock();
966 /* Restore signals */
967 ret = sigprocmask(SIG_SETMASK, restore_sigset, NULL);
968 if (ret == -1) {
969 PERROR("sigprocmask");
970 }
971 }
972
973 void ust_after_fork_parent(sigset_t *restore_sigset)
974 {
975 DBG("process %d", getpid());
976 rcu_bp_after_fork_parent();
977 /* Release mutexes and reenable signals */
978 ust_after_fork_common(restore_sigset);
979 }
980
981 /*
982 * After fork, in the child, we need to cleanup all the leftover state,
983 * except the worker thread which already magically disappeared thanks
984 * to the weird Linux fork semantics. After tyding up, we call
985 * lttng_ust_init() again to start over as a new PID.
986 *
987 * This is meant for forks() that have tracing in the child between the
988 * fork and following exec call (if there is any).
989 */
990 void ust_after_fork_child(sigset_t *restore_sigset)
991 {
992 DBG("process %d", getpid());
993 /* Release urcu mutexes */
994 rcu_bp_after_fork_child();
995 lttng_ust_cleanup(0);
996 lttng_context_vtid_reset();
997 /* Release mutexes and reenable signals */
998 ust_after_fork_common(restore_sigset);
999 lttng_ust_init();
1000 }
This page took 0.04846 seconds and 5 git commands to generate.