Add get proc name wrapper for FreeBSD
[lttng-ust.git] / liblttng-ust / lttng-ust-comm.c
1 /*
2 * lttng-ust-comm.c
3 *
4 * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
5 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; only
10 * version 2.1 of the License.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #define _LGPL_SOURCE
23 #include <sys/types.h>
24 #include <sys/socket.h>
25 #include <sys/mman.h>
26 #include <sys/stat.h>
27 #include <sys/types.h>
28 #include <sys/wait.h>
29 #include <fcntl.h>
30 #include <unistd.h>
31 #include <errno.h>
32 #include <pthread.h>
33 #include <semaphore.h>
34 #include <time.h>
35 #include <assert.h>
36 #include <signal.h>
37 #include <urcu/uatomic.h>
38 #include <urcu/futex.h>
39 #include <urcu/compiler.h>
40
41 #include <lttng/ust-events.h>
42 #include <lttng/ust-abi.h>
43 #include <lttng/ust.h>
44 #include <ust-comm.h>
45 #include <usterr-signal-safe.h>
46 #include "tracepoint-internal.h"
47 #include "ltt-tracer-core.h"
48 #include "compat.h"
49
50 /*
51 * Has lttng ust comm constructor been called ?
52 */
53 static int initialized;
54
55 /*
56 * The ust_lock/ust_unlock lock is used as a communication thread mutex.
57 * Held when handling a command, also held by fork() to deal with
58 * removal of threads, and by exit path.
59 */
60
61 /* Should the ust comm thread quit ? */
62 static int lttng_ust_comm_should_quit;
63
64 /*
65 * Wait for either of these before continuing to the main
66 * program:
67 * - the register_done message from sessiond daemon
68 * (will let the sessiond daemon enable sessions before main
69 * starts.)
70 * - sessiond daemon is not reachable.
71 * - timeout (ensuring applications are resilient to session
72 * daemon problems).
73 */
74 static sem_t constructor_wait;
75 /*
76 * Doing this for both the global and local sessiond.
77 */
78 static int sem_count = { 2 };
79
80 /*
81 * Info about socket and associated listener thread.
82 */
83 struct sock_info {
84 const char *name;
85 pthread_t ust_listener; /* listener thread */
86 int root_handle;
87 int constructor_sem_posted;
88 int allowed;
89 int global;
90
91 char sock_path[PATH_MAX];
92 int socket;
93
94 char wait_shm_path[PATH_MAX];
95 char *wait_shm_mmap;
96 };
97
98 /* Socket from app (connect) to session daemon (listen) for communication */
99 struct sock_info global_apps = {
100 .name = "global",
101 .global = 1,
102
103 .root_handle = -1,
104 .allowed = 1,
105
106 .sock_path = DEFAULT_GLOBAL_APPS_UNIX_SOCK,
107 .socket = -1,
108
109 .wait_shm_path = DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH,
110 };
111
112 /* TODO: allow global_apps_sock_path override */
113
114 struct sock_info local_apps = {
115 .name = "local",
116 .global = 0,
117 .root_handle = -1,
118 .allowed = 0, /* Check setuid bit first */
119
120 .socket = -1,
121 };
122
123 static int wait_poll_fallback;
124
125 extern void ltt_ring_buffer_client_overwrite_init(void);
126 extern void ltt_ring_buffer_client_discard_init(void);
127 extern void ltt_ring_buffer_metadata_client_init(void);
128 extern void ltt_ring_buffer_client_overwrite_exit(void);
129 extern void ltt_ring_buffer_client_discard_exit(void);
130 extern void ltt_ring_buffer_metadata_client_exit(void);
131
132 static
133 int setup_local_apps(void)
134 {
135 const char *home_dir;
136 uid_t uid;
137
138 uid = getuid();
139 /*
140 * Disallow per-user tracing for setuid binaries.
141 */
142 if (uid != geteuid()) {
143 local_apps.allowed = 0;
144 return 0;
145 } else {
146 local_apps.allowed = 1;
147 }
148 home_dir = (const char *) getenv("HOME");
149 if (!home_dir)
150 return -ENOENT;
151 snprintf(local_apps.sock_path, PATH_MAX,
152 DEFAULT_HOME_APPS_UNIX_SOCK, home_dir);
153 snprintf(local_apps.wait_shm_path, PATH_MAX,
154 DEFAULT_HOME_APPS_WAIT_SHM_PATH, uid);
155 return 0;
156 }
157
158 static
159 int register_app_to_sessiond(int socket)
160 {
161 ssize_t ret;
162 struct {
163 uint32_t major;
164 uint32_t minor;
165 pid_t pid;
166 pid_t ppid;
167 uid_t uid;
168 gid_t gid;
169 uint32_t bits_per_long;
170 char name[16]; /* process name */
171 } reg_msg;
172
173 reg_msg.major = LTTNG_UST_COMM_VERSION_MAJOR;
174 reg_msg.minor = LTTNG_UST_COMM_VERSION_MINOR;
175 reg_msg.pid = getpid();
176 reg_msg.ppid = getppid();
177 reg_msg.uid = getuid();
178 reg_msg.gid = getgid();
179 reg_msg.bits_per_long = CAA_BITS_PER_LONG;
180 lttng_ust_getprocname(reg_msg.name);
181
182 ret = ustcomm_send_unix_sock(socket, &reg_msg, sizeof(reg_msg));
183 if (ret >= 0 && ret != sizeof(reg_msg))
184 return -EIO;
185 return ret;
186 }
187
188 static
189 int send_reply(int sock, struct ustcomm_ust_reply *lur)
190 {
191 ssize_t len;
192
193 len = ustcomm_send_unix_sock(sock, lur, sizeof(*lur));
194 switch (len) {
195 case sizeof(*lur):
196 DBG("message successfully sent");
197 return 0;
198 case -1:
199 if (errno == ECONNRESET) {
200 printf("remote end closed connection\n");
201 return 0;
202 }
203 return -1;
204 default:
205 printf("incorrect message size: %zd\n", len);
206 return -1;
207 }
208 }
209
210 static
211 int handle_register_done(struct sock_info *sock_info)
212 {
213 int ret;
214
215 if (sock_info->constructor_sem_posted)
216 return 0;
217 sock_info->constructor_sem_posted = 1;
218 if (uatomic_read(&sem_count) <= 0) {
219 return 0;
220 }
221 ret = uatomic_add_return(&sem_count, -1);
222 if (ret == 0) {
223 ret = sem_post(&constructor_wait);
224 assert(!ret);
225 }
226 return 0;
227 }
228
229 static
230 int handle_message(struct sock_info *sock_info,
231 int sock, struct ustcomm_ust_msg *lum)
232 {
233 int ret = 0;
234 const struct lttng_ust_objd_ops *ops;
235 struct ustcomm_ust_reply lur;
236 int shm_fd, wait_fd;
237 union ust_args args;
238
239 ust_lock();
240
241 memset(&lur, 0, sizeof(lur));
242
243 if (lttng_ust_comm_should_quit) {
244 ret = -EPERM;
245 goto end;
246 }
247
248 ops = objd_ops(lum->handle);
249 if (!ops) {
250 ret = -ENOENT;
251 goto end;
252 }
253
254 switch (lum->cmd) {
255 case LTTNG_UST_REGISTER_DONE:
256 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
257 ret = handle_register_done(sock_info);
258 else
259 ret = -EINVAL;
260 break;
261 case LTTNG_UST_RELEASE:
262 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
263 ret = -EPERM;
264 else
265 ret = lttng_ust_objd_unref(lum->handle);
266 break;
267 default:
268 if (ops->cmd)
269 ret = ops->cmd(lum->handle, lum->cmd,
270 (unsigned long) &lum->u,
271 &args);
272 else
273 ret = -ENOSYS;
274 break;
275 }
276
277 end:
278 lur.handle = lum->handle;
279 lur.cmd = lum->cmd;
280 lur.ret_val = ret;
281 if (ret >= 0) {
282 lur.ret_code = USTCOMM_OK;
283 } else {
284 //lur.ret_code = USTCOMM_SESSION_FAIL;
285 lur.ret_code = ret;
286 }
287 switch (lum->cmd) {
288 case LTTNG_UST_STREAM:
289 /*
290 * Special-case reply to send stream info.
291 * Use lum.u output.
292 */
293 lur.u.stream.memory_map_size = *args.stream.memory_map_size;
294 shm_fd = *args.stream.shm_fd;
295 wait_fd = *args.stream.wait_fd;
296 break;
297 case LTTNG_UST_METADATA:
298 case LTTNG_UST_CHANNEL:
299 lur.u.channel.memory_map_size = *args.channel.memory_map_size;
300 shm_fd = *args.channel.shm_fd;
301 wait_fd = *args.channel.wait_fd;
302 break;
303 case LTTNG_UST_TRACER_VERSION:
304 lur.u.version = lum->u.version;
305 break;
306 case LTTNG_UST_TRACEPOINT_LIST_GET:
307 memcpy(&lur.u.tracepoint, &lum->u.tracepoint, sizeof(lur.u.tracepoint));
308 break;
309 }
310 ret = send_reply(sock, &lur);
311 if (ret < 0) {
312 perror("error sending reply");
313 goto error;
314 }
315
316 if ((lum->cmd == LTTNG_UST_STREAM
317 || lum->cmd == LTTNG_UST_CHANNEL
318 || lum->cmd == LTTNG_UST_METADATA)
319 && lur.ret_code == USTCOMM_OK) {
320 /* we also need to send the file descriptors. */
321 ret = ustcomm_send_fds_unix_sock(sock,
322 &shm_fd, &shm_fd,
323 1, sizeof(int));
324 if (ret < 0) {
325 perror("send shm_fd");
326 goto error;
327 }
328 ret = ustcomm_send_fds_unix_sock(sock,
329 &wait_fd, &wait_fd,
330 1, sizeof(int));
331 if (ret < 0) {
332 perror("send wait_fd");
333 goto error;
334 }
335 }
336 /*
337 * We still have the memory map reference, and the fds have been
338 * sent to the sessiond. We can therefore close those fds. Note
339 * that we keep the write side of the wait_fd open, but close
340 * the read side.
341 */
342 if (lur.ret_code == USTCOMM_OK) {
343 switch (lum->cmd) {
344 case LTTNG_UST_STREAM:
345 if (shm_fd >= 0) {
346 ret = close(shm_fd);
347 if (ret) {
348 PERROR("Error closing stream shm_fd");
349 }
350 *args.stream.shm_fd = -1;
351 }
352 if (wait_fd >= 0) {
353 ret = close(wait_fd);
354 if (ret) {
355 PERROR("Error closing stream wait_fd");
356 }
357 *args.stream.wait_fd = -1;
358 }
359 break;
360 case LTTNG_UST_METADATA:
361 case LTTNG_UST_CHANNEL:
362 if (shm_fd >= 0) {
363 ret = close(shm_fd);
364 if (ret) {
365 PERROR("Error closing channel shm_fd");
366 }
367 *args.channel.shm_fd = -1;
368 }
369 if (wait_fd >= 0) {
370 ret = close(wait_fd);
371 if (ret) {
372 PERROR("Error closing channel wait_fd");
373 }
374 *args.channel.wait_fd = -1;
375 }
376 break;
377 }
378 }
379
380 error:
381 ust_unlock();
382 return ret;
383 }
384
385 static
386 void cleanup_sock_info(struct sock_info *sock_info)
387 {
388 int ret;
389
390 if (sock_info->socket != -1) {
391 ret = close(sock_info->socket);
392 if (ret) {
393 ERR("Error closing apps socket");
394 }
395 sock_info->socket = -1;
396 }
397 if (sock_info->root_handle != -1) {
398 ret = lttng_ust_objd_unref(sock_info->root_handle);
399 if (ret) {
400 ERR("Error unref root handle");
401 }
402 sock_info->root_handle = -1;
403 }
404 sock_info->constructor_sem_posted = 0;
405 if (sock_info->wait_shm_mmap) {
406 ret = munmap(sock_info->wait_shm_mmap, sysconf(_SC_PAGE_SIZE));
407 if (ret) {
408 ERR("Error unmapping wait shm");
409 }
410 sock_info->wait_shm_mmap = NULL;
411 }
412 }
413
414 /*
415 * Using fork to set umask in the child process (not multi-thread safe).
416 * We deal with the shm_open vs ftruncate race (happening when the
417 * sessiond owns the shm and does not let everybody modify it, to ensure
418 * safety against shm_unlink) by simply letting the mmap fail and
419 * retrying after a few seconds.
420 * For global shm, everybody has rw access to it until the sessiond
421 * starts.
422 */
423 static
424 int get_wait_shm(struct sock_info *sock_info, size_t mmap_size)
425 {
426 int wait_shm_fd, ret;
427 pid_t pid;
428
429 /*
430 * Try to open read-only.
431 */
432 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
433 if (wait_shm_fd >= 0) {
434 goto end;
435 } else if (wait_shm_fd < 0 && errno != ENOENT) {
436 /*
437 * Real-only open did not work, and it's not because the
438 * entry was not present. It's a failure that prohibits
439 * using shm.
440 */
441 ERR("Error opening shm %s", sock_info->wait_shm_path);
442 goto end;
443 }
444 /*
445 * If the open failed because the file did not exist, try
446 * creating it ourself.
447 */
448 pid = fork();
449 if (pid > 0) {
450 int status;
451
452 /*
453 * Parent: wait for child to return, in which case the
454 * shared memory map will have been created.
455 */
456 pid = wait(&status);
457 if (pid < 0 || !WIFEXITED(status) || WEXITSTATUS(status) != 0) {
458 wait_shm_fd = -1;
459 goto end;
460 }
461 /*
462 * Try to open read-only again after creation.
463 */
464 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
465 if (wait_shm_fd < 0) {
466 /*
467 * Real-only open did not work. It's a failure
468 * that prohibits using shm.
469 */
470 ERR("Error opening shm %s", sock_info->wait_shm_path);
471 goto end;
472 }
473 goto end;
474 } else if (pid == 0) {
475 int create_mode;
476
477 /* Child */
478 create_mode = S_IRUSR | S_IWUSR | S_IRGRP;
479 if (sock_info->global)
480 create_mode |= S_IROTH | S_IWGRP | S_IWOTH;
481 /*
482 * We're alone in a child process, so we can modify the
483 * process-wide umask.
484 */
485 umask(~create_mode);
486 /*
487 * Try creating shm (or get rw access).
488 * We don't do an exclusive open, because we allow other
489 * processes to create+ftruncate it concurrently.
490 */
491 wait_shm_fd = shm_open(sock_info->wait_shm_path,
492 O_RDWR | O_CREAT, create_mode);
493 if (wait_shm_fd >= 0) {
494 ret = ftruncate(wait_shm_fd, mmap_size);
495 if (ret) {
496 PERROR("ftruncate");
497 exit(EXIT_FAILURE);
498 }
499 exit(EXIT_SUCCESS);
500 }
501 /*
502 * For local shm, we need to have rw access to accept
503 * opening it: this means the local sessiond will be
504 * able to wake us up. For global shm, we open it even
505 * if rw access is not granted, because the root.root
506 * sessiond will be able to override all rights and wake
507 * us up.
508 */
509 if (!sock_info->global && errno != EACCES) {
510 ERR("Error opening shm %s", sock_info->wait_shm_path);
511 exit(EXIT_FAILURE);
512 }
513 /*
514 * The shm exists, but we cannot open it RW. Report
515 * success.
516 */
517 exit(EXIT_SUCCESS);
518 } else {
519 return -1;
520 }
521 end:
522 if (wait_shm_fd >= 0 && !sock_info->global) {
523 struct stat statbuf;
524
525 /*
526 * Ensure that our user is the owner of the shm file for
527 * local shm. If we do not own the file, it means our
528 * sessiond will not have access to wake us up (there is
529 * probably a rogue process trying to fake our
530 * sessiond). Fallback to polling method in this case.
531 */
532 ret = fstat(wait_shm_fd, &statbuf);
533 if (ret) {
534 PERROR("fstat");
535 goto error_close;
536 }
537 if (statbuf.st_uid != getuid())
538 goto error_close;
539 }
540 return wait_shm_fd;
541
542 error_close:
543 ret = close(wait_shm_fd);
544 if (ret) {
545 PERROR("Error closing fd");
546 }
547 return -1;
548 }
549
550 static
551 char *get_map_shm(struct sock_info *sock_info)
552 {
553 size_t mmap_size = sysconf(_SC_PAGE_SIZE);
554 int wait_shm_fd, ret;
555 char *wait_shm_mmap;
556
557 wait_shm_fd = get_wait_shm(sock_info, mmap_size);
558 if (wait_shm_fd < 0) {
559 goto error;
560 }
561 wait_shm_mmap = mmap(NULL, mmap_size, PROT_READ,
562 MAP_SHARED, wait_shm_fd, 0);
563 /* close shm fd immediately after taking the mmap reference */
564 ret = close(wait_shm_fd);
565 if (ret) {
566 PERROR("Error closing fd");
567 }
568 if (wait_shm_mmap == MAP_FAILED) {
569 DBG("mmap error (can be caused by race with sessiond). Fallback to poll mode.");
570 goto error;
571 }
572 return wait_shm_mmap;
573
574 error:
575 return NULL;
576 }
577
578 static
579 void wait_for_sessiond(struct sock_info *sock_info)
580 {
581 int ret;
582
583 ust_lock();
584 if (lttng_ust_comm_should_quit) {
585 goto quit;
586 }
587 if (wait_poll_fallback) {
588 goto error;
589 }
590 if (!sock_info->wait_shm_mmap) {
591 sock_info->wait_shm_mmap = get_map_shm(sock_info);
592 if (!sock_info->wait_shm_mmap)
593 goto error;
594 }
595 ust_unlock();
596
597 DBG("Waiting for %s apps sessiond", sock_info->name);
598 /* Wait for futex wakeup */
599 if (uatomic_read((int32_t *) sock_info->wait_shm_mmap) == 0) {
600 ret = futex_async((int32_t *) sock_info->wait_shm_mmap,
601 FUTEX_WAIT, 0, NULL, NULL, 0);
602 if (ret < 0) {
603 if (errno == EFAULT) {
604 wait_poll_fallback = 1;
605 DBG(
606 "Linux kernels 2.6.33 to 3.0 (with the exception of stable versions) "
607 "do not support FUTEX_WAKE on read-only memory mappings correctly. "
608 "Please upgrade your kernel "
609 "(fix is commit 9ea71503a8ed9184d2d0b8ccc4d269d05f7940ae in Linux kernel "
610 "mainline). LTTng-UST will use polling mode fallback.");
611 if (ust_debug())
612 PERROR("futex");
613 }
614 }
615 }
616 return;
617
618 quit:
619 ust_unlock();
620 return;
621
622 error:
623 ust_unlock();
624 return;
625 }
626
627 /*
628 * This thread does not allocate any resource, except within
629 * handle_message, within mutex protection. This mutex protects against
630 * fork and exit.
631 * The other moment it allocates resources is at socket connexion, which
632 * is also protected by the mutex.
633 */
634 static
635 void *ust_listener_thread(void *arg)
636 {
637 struct sock_info *sock_info = arg;
638 int sock, ret, prev_connect_failed = 0, has_waited = 0;
639
640 /* Restart trying to connect to the session daemon */
641 restart:
642 if (prev_connect_failed) {
643 /* Wait for sessiond availability with pipe */
644 wait_for_sessiond(sock_info);
645 if (has_waited) {
646 has_waited = 0;
647 /*
648 * Sleep for 5 seconds before retrying after a
649 * sequence of failure / wait / failure. This
650 * deals with a killed or broken session daemon.
651 */
652 sleep(5);
653 }
654 has_waited = 1;
655 prev_connect_failed = 0;
656 }
657 ust_lock();
658
659 if (lttng_ust_comm_should_quit) {
660 ust_unlock();
661 goto quit;
662 }
663
664 if (sock_info->socket != -1) {
665 ret = close(sock_info->socket);
666 if (ret) {
667 ERR("Error closing %s apps socket", sock_info->name);
668 }
669 sock_info->socket = -1;
670 }
671
672 /* Register */
673 ret = ustcomm_connect_unix_sock(sock_info->sock_path);
674 if (ret < 0) {
675 DBG("Info: sessiond not accepting connections to %s apps socket", sock_info->name);
676 prev_connect_failed = 1;
677 /*
678 * If we cannot find the sessiond daemon, don't delay
679 * constructor execution.
680 */
681 ret = handle_register_done(sock_info);
682 assert(!ret);
683 ust_unlock();
684 goto restart;
685 }
686
687 sock_info->socket = sock = ret;
688
689 /*
690 * Create only one root handle per listener thread for the whole
691 * process lifetime.
692 */
693 if (sock_info->root_handle == -1) {
694 ret = lttng_abi_create_root_handle();
695 if (ret < 0) {
696 ERR("Error creating root handle");
697 ust_unlock();
698 goto quit;
699 }
700 sock_info->root_handle = ret;
701 }
702
703 ret = register_app_to_sessiond(sock);
704 if (ret < 0) {
705 ERR("Error registering to %s apps socket", sock_info->name);
706 prev_connect_failed = 1;
707 /*
708 * If we cannot register to the sessiond daemon, don't
709 * delay constructor execution.
710 */
711 ret = handle_register_done(sock_info);
712 assert(!ret);
713 ust_unlock();
714 goto restart;
715 }
716 ust_unlock();
717
718 for (;;) {
719 ssize_t len;
720 struct ustcomm_ust_msg lum;
721
722 len = ustcomm_recv_unix_sock(sock, &lum, sizeof(lum));
723 switch (len) {
724 case 0: /* orderly shutdown */
725 DBG("%s ltt-sessiond has performed an orderly shutdown\n", sock_info->name);
726 ust_lock();
727 /*
728 * Either sessiond has shutdown or refused us by closing the socket.
729 * In either case, we don't want to delay construction execution,
730 * and we need to wait before retry.
731 */
732 prev_connect_failed = 1;
733 /*
734 * If we cannot register to the sessiond daemon, don't
735 * delay constructor execution.
736 */
737 ret = handle_register_done(sock_info);
738 assert(!ret);
739 ust_unlock();
740 goto end;
741 case sizeof(lum):
742 DBG("message received\n");
743 ret = handle_message(sock_info, sock, &lum);
744 if (ret < 0) {
745 ERR("Error handling message for %s socket", sock_info->name);
746 }
747 continue;
748 case -1:
749 DBG("Receive failed from lttng-sessiond with errno %d", errno);
750 if (errno == ECONNRESET) {
751 ERR("%s remote end closed connection\n", sock_info->name);
752 goto end;
753 }
754 goto end;
755 default:
756 ERR("incorrect message size (%s socket): %zd\n", sock_info->name, len);
757 continue;
758 }
759
760 }
761 end:
762 goto restart; /* try to reconnect */
763 quit:
764 return NULL;
765 }
766
767 /*
768 * Return values: -1: don't wait. 0: wait forever. 1: timeout wait.
769 */
770 static
771 int get_timeout(struct timespec *constructor_timeout)
772 {
773 long constructor_delay_ms = LTTNG_UST_DEFAULT_CONSTRUCTOR_TIMEOUT_MS;
774 char *str_delay;
775 int ret;
776
777 str_delay = getenv("LTTNG_UST_REGISTER_TIMEOUT");
778 if (str_delay) {
779 constructor_delay_ms = strtol(str_delay, NULL, 10);
780 }
781
782 switch (constructor_delay_ms) {
783 case -1:/* fall-through */
784 case 0:
785 return constructor_delay_ms;
786 default:
787 break;
788 }
789
790 /*
791 * If we are unable to find the current time, don't wait.
792 */
793 ret = clock_gettime(CLOCK_REALTIME, constructor_timeout);
794 if (ret) {
795 return -1;
796 }
797 constructor_timeout->tv_sec += constructor_delay_ms / 1000UL;
798 constructor_timeout->tv_nsec +=
799 (constructor_delay_ms % 1000UL) * 1000000UL;
800 if (constructor_timeout->tv_nsec >= 1000000000UL) {
801 constructor_timeout->tv_sec++;
802 constructor_timeout->tv_nsec -= 1000000000UL;
803 }
804 return 1;
805 }
806
807 /*
808 * sessiond monitoring thread: monitor presence of global and per-user
809 * sessiond by polling the application common named pipe.
810 */
811 /* TODO */
812
813 void __attribute__((constructor)) lttng_ust_init(void)
814 {
815 struct timespec constructor_timeout;
816 int timeout_mode;
817 int ret;
818
819 if (uatomic_xchg(&initialized, 1) == 1)
820 return;
821
822 /*
823 * We want precise control over the order in which we construct
824 * our sub-libraries vs starting to receive commands from
825 * sessiond (otherwise leading to errors when trying to create
826 * sessiond before the init functions are completed).
827 */
828 init_usterr();
829 init_tracepoint();
830 ltt_ring_buffer_metadata_client_init();
831 ltt_ring_buffer_client_overwrite_init();
832 ltt_ring_buffer_client_discard_init();
833
834 timeout_mode = get_timeout(&constructor_timeout);
835
836 ret = sem_init(&constructor_wait, 0, 0);
837 assert(!ret);
838
839 ret = setup_local_apps();
840 if (ret) {
841 ERR("Error setting up to local apps");
842 }
843 ret = pthread_create(&local_apps.ust_listener, NULL,
844 ust_listener_thread, &local_apps);
845
846 if (local_apps.allowed) {
847 ret = pthread_create(&global_apps.ust_listener, NULL,
848 ust_listener_thread, &global_apps);
849 } else {
850 handle_register_done(&local_apps);
851 }
852
853 switch (timeout_mode) {
854 case 1: /* timeout wait */
855 do {
856 ret = sem_timedwait(&constructor_wait,
857 &constructor_timeout);
858 } while (ret < 0 && errno == EINTR);
859 if (ret < 0 && errno == ETIMEDOUT) {
860 ERR("Timed out waiting for ltt-sessiond");
861 } else {
862 assert(!ret);
863 }
864 break;
865 case -1:/* wait forever */
866 do {
867 ret = sem_wait(&constructor_wait);
868 } while (ret < 0 && errno == EINTR);
869 assert(!ret);
870 break;
871 case 0: /* no timeout */
872 break;
873 }
874 }
875
876 static
877 void lttng_ust_cleanup(int exiting)
878 {
879 cleanup_sock_info(&global_apps);
880 if (local_apps.allowed) {
881 cleanup_sock_info(&local_apps);
882 }
883 lttng_ust_abi_exit();
884 lttng_ust_events_exit();
885 ltt_ring_buffer_client_discard_exit();
886 ltt_ring_buffer_client_overwrite_exit();
887 ltt_ring_buffer_metadata_client_exit();
888 exit_tracepoint();
889 if (!exiting) {
890 /* Reinitialize values for fork */
891 sem_count = 2;
892 lttng_ust_comm_should_quit = 0;
893 initialized = 0;
894 }
895 }
896
897 void __attribute__((destructor)) lttng_ust_exit(void)
898 {
899 int ret;
900
901 /*
902 * Using pthread_cancel here because:
903 * A) we don't want to hang application teardown.
904 * B) the thread is not allocating any resource.
905 */
906
907 /*
908 * Require the communication thread to quit. Synchronize with
909 * mutexes to ensure it is not in a mutex critical section when
910 * pthread_cancel is later called.
911 */
912 ust_lock();
913 lttng_ust_comm_should_quit = 1;
914 ust_unlock();
915
916 ret = pthread_cancel(global_apps.ust_listener);
917 if (ret) {
918 ERR("Error cancelling global ust listener thread");
919 }
920 if (local_apps.allowed) {
921 ret = pthread_cancel(local_apps.ust_listener);
922 if (ret) {
923 ERR("Error cancelling local ust listener thread");
924 }
925 }
926 lttng_ust_cleanup(1);
927 }
928
929 /*
930 * We exclude the worker threads across fork and clone (except
931 * CLONE_VM), because these system calls only keep the forking thread
932 * running in the child. Therefore, we don't want to call fork or clone
933 * in the middle of an tracepoint or ust tracing state modification.
934 * Holding this mutex protects these structures across fork and clone.
935 */
936 void ust_before_fork(sigset_t *save_sigset)
937 {
938 /*
939 * Disable signals. This is to avoid that the child intervenes
940 * before it is properly setup for tracing. It is safer to
941 * disable all signals, because then we know we are not breaking
942 * anything by restoring the original mask.
943 */
944 sigset_t all_sigs;
945 int ret;
946
947 /* Disable signals */
948 sigfillset(&all_sigs);
949 ret = sigprocmask(SIG_BLOCK, &all_sigs, save_sigset);
950 if (ret == -1) {
951 PERROR("sigprocmask");
952 }
953 ust_lock();
954 rcu_bp_before_fork();
955 }
956
957 static void ust_after_fork_common(sigset_t *restore_sigset)
958 {
959 int ret;
960
961 DBG("process %d", getpid());
962 ust_unlock();
963 /* Restore signals */
964 ret = sigprocmask(SIG_SETMASK, restore_sigset, NULL);
965 if (ret == -1) {
966 PERROR("sigprocmask");
967 }
968 }
969
970 void ust_after_fork_parent(sigset_t *restore_sigset)
971 {
972 DBG("process %d", getpid());
973 rcu_bp_after_fork_parent();
974 /* Release mutexes and reenable signals */
975 ust_after_fork_common(restore_sigset);
976 }
977
978 /*
979 * After fork, in the child, we need to cleanup all the leftover state,
980 * except the worker thread which already magically disappeared thanks
981 * to the weird Linux fork semantics. After tyding up, we call
982 * lttng_ust_init() again to start over as a new PID.
983 *
984 * This is meant for forks() that have tracing in the child between the
985 * fork and following exec call (if there is any).
986 */
987 void ust_after_fork_child(sigset_t *restore_sigset)
988 {
989 DBG("process %d", getpid());
990 /* Release urcu mutexes */
991 rcu_bp_after_fork_child();
992 lttng_ust_cleanup(0);
993 lttng_context_vtid_reset();
994 /* Release mutexes and reenable signals */
995 ust_after_fork_common(restore_sigset);
996 lttng_ust_init();
997 }
This page took 0.04912 seconds and 5 git commands to generate.