Implement event fields listing
[lttng-ust.git] / liblttng-ust / lttng-ust-comm.c
1 /*
2 * lttng-ust-comm.c
3 *
4 * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
5 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; only
10 * version 2.1 of the License.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #define _LGPL_SOURCE
23 #include <sys/types.h>
24 #include <sys/socket.h>
25 #include <sys/mman.h>
26 #include <sys/stat.h>
27 #include <sys/types.h>
28 #include <sys/wait.h>
29 #include <fcntl.h>
30 #include <unistd.h>
31 #include <errno.h>
32 #include <pthread.h>
33 #include <semaphore.h>
34 #include <time.h>
35 #include <assert.h>
36 #include <signal.h>
37 #include <urcu/uatomic.h>
38 #include <urcu/futex.h>
39 #include <urcu/compiler.h>
40
41 #include <lttng/ust-events.h>
42 #include <lttng/ust-abi.h>
43 #include <lttng/ust.h>
44 #include <ust-comm.h>
45 #include <usterr-signal-safe.h>
46 #include "tracepoint-internal.h"
47 #include "ltt-tracer-core.h"
48 #include "compat.h"
49 #include "../libringbuffer/tlsfixup.h"
50
51 /*
52 * Has lttng ust comm constructor been called ?
53 */
54 static int initialized;
55
56 /*
57 * The ust_lock/ust_unlock lock is used as a communication thread mutex.
58 * Held when handling a command, also held by fork() to deal with
59 * removal of threads, and by exit path.
60 */
61
62 /* Should the ust comm thread quit ? */
63 static int lttng_ust_comm_should_quit;
64
65 /*
66 * Wait for either of these before continuing to the main
67 * program:
68 * - the register_done message from sessiond daemon
69 * (will let the sessiond daemon enable sessions before main
70 * starts.)
71 * - sessiond daemon is not reachable.
72 * - timeout (ensuring applications are resilient to session
73 * daemon problems).
74 */
75 static sem_t constructor_wait;
76 /*
77 * Doing this for both the global and local sessiond.
78 */
79 static int sem_count = { 2 };
80
81 /*
82 * Info about socket and associated listener thread.
83 */
84 struct sock_info {
85 const char *name;
86 pthread_t ust_listener; /* listener thread */
87 int root_handle;
88 int constructor_sem_posted;
89 int allowed;
90 int global;
91
92 char sock_path[PATH_MAX];
93 int socket;
94
95 char wait_shm_path[PATH_MAX];
96 char *wait_shm_mmap;
97 };
98
99 /* Socket from app (connect) to session daemon (listen) for communication */
100 struct sock_info global_apps = {
101 .name = "global",
102 .global = 1,
103
104 .root_handle = -1,
105 .allowed = 1,
106
107 .sock_path = DEFAULT_GLOBAL_APPS_UNIX_SOCK,
108 .socket = -1,
109
110 .wait_shm_path = DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH,
111 };
112
113 /* TODO: allow global_apps_sock_path override */
114
115 struct sock_info local_apps = {
116 .name = "local",
117 .global = 0,
118 .root_handle = -1,
119 .allowed = 0, /* Check setuid bit first */
120
121 .socket = -1,
122 };
123
124 static int wait_poll_fallback;
125
126 extern void ltt_ring_buffer_client_overwrite_init(void);
127 extern void ltt_ring_buffer_client_discard_init(void);
128 extern void ltt_ring_buffer_metadata_client_init(void);
129 extern void ltt_ring_buffer_client_overwrite_exit(void);
130 extern void ltt_ring_buffer_client_discard_exit(void);
131 extern void ltt_ring_buffer_metadata_client_exit(void);
132
133 static
134 int setup_local_apps(void)
135 {
136 const char *home_dir;
137 uid_t uid;
138
139 uid = getuid();
140 /*
141 * Disallow per-user tracing for setuid binaries.
142 */
143 if (uid != geteuid()) {
144 local_apps.allowed = 0;
145 return 0;
146 } else {
147 local_apps.allowed = 1;
148 }
149 home_dir = (const char *) getenv("HOME");
150 if (!home_dir)
151 return -ENOENT;
152 snprintf(local_apps.sock_path, PATH_MAX,
153 DEFAULT_HOME_APPS_UNIX_SOCK, home_dir);
154 snprintf(local_apps.wait_shm_path, PATH_MAX,
155 DEFAULT_HOME_APPS_WAIT_SHM_PATH, uid);
156 return 0;
157 }
158
159 static
160 int register_app_to_sessiond(int socket)
161 {
162 ssize_t ret;
163 struct {
164 uint32_t major;
165 uint32_t minor;
166 pid_t pid;
167 pid_t ppid;
168 uid_t uid;
169 gid_t gid;
170 uint32_t bits_per_long;
171 char name[16]; /* process name */
172 } reg_msg;
173
174 reg_msg.major = LTTNG_UST_COMM_VERSION_MAJOR;
175 reg_msg.minor = LTTNG_UST_COMM_VERSION_MINOR;
176 reg_msg.pid = getpid();
177 reg_msg.ppid = getppid();
178 reg_msg.uid = getuid();
179 reg_msg.gid = getgid();
180 reg_msg.bits_per_long = CAA_BITS_PER_LONG;
181 lttng_ust_getprocname(reg_msg.name);
182
183 ret = ustcomm_send_unix_sock(socket, &reg_msg, sizeof(reg_msg));
184 if (ret >= 0 && ret != sizeof(reg_msg))
185 return -EIO;
186 return ret;
187 }
188
189 static
190 int send_reply(int sock, struct ustcomm_ust_reply *lur)
191 {
192 ssize_t len;
193
194 len = ustcomm_send_unix_sock(sock, lur, sizeof(*lur));
195 switch (len) {
196 case sizeof(*lur):
197 DBG("message successfully sent");
198 return 0;
199 case -1:
200 if (errno == ECONNRESET) {
201 printf("remote end closed connection\n");
202 return 0;
203 }
204 return -1;
205 default:
206 printf("incorrect message size: %zd\n", len);
207 return -1;
208 }
209 }
210
211 static
212 int handle_register_done(struct sock_info *sock_info)
213 {
214 int ret;
215
216 if (sock_info->constructor_sem_posted)
217 return 0;
218 sock_info->constructor_sem_posted = 1;
219 if (uatomic_read(&sem_count) <= 0) {
220 return 0;
221 }
222 ret = uatomic_add_return(&sem_count, -1);
223 if (ret == 0) {
224 ret = sem_post(&constructor_wait);
225 assert(!ret);
226 }
227 return 0;
228 }
229
230 static
231 int handle_message(struct sock_info *sock_info,
232 int sock, struct ustcomm_ust_msg *lum)
233 {
234 int ret = 0;
235 const struct lttng_ust_objd_ops *ops;
236 struct ustcomm_ust_reply lur;
237 int shm_fd, wait_fd;
238 union ust_args args;
239 ssize_t len;
240
241 ust_lock();
242
243 memset(&lur, 0, sizeof(lur));
244
245 if (lttng_ust_comm_should_quit) {
246 ret = -EPERM;
247 goto end;
248 }
249
250 ops = objd_ops(lum->handle);
251 if (!ops) {
252 ret = -ENOENT;
253 goto end;
254 }
255
256 switch (lum->cmd) {
257 case LTTNG_UST_REGISTER_DONE:
258 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
259 ret = handle_register_done(sock_info);
260 else
261 ret = -EINVAL;
262 break;
263 case LTTNG_UST_RELEASE:
264 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
265 ret = -EPERM;
266 else
267 ret = lttng_ust_objd_unref(lum->handle);
268 break;
269 default:
270 if (ops->cmd)
271 ret = ops->cmd(lum->handle, lum->cmd,
272 (unsigned long) &lum->u,
273 &args);
274 else
275 ret = -ENOSYS;
276 break;
277 }
278
279 end:
280 lur.handle = lum->handle;
281 lur.cmd = lum->cmd;
282 lur.ret_val = ret;
283 if (ret >= 0) {
284 lur.ret_code = USTCOMM_OK;
285 } else {
286 //lur.ret_code = USTCOMM_SESSION_FAIL;
287 lur.ret_code = ret;
288 }
289 if (ret >= 0) {
290 switch (lum->cmd) {
291 case LTTNG_UST_STREAM:
292 /*
293 * Special-case reply to send stream info.
294 * Use lum.u output.
295 */
296 lur.u.stream.memory_map_size = *args.stream.memory_map_size;
297 shm_fd = *args.stream.shm_fd;
298 wait_fd = *args.stream.wait_fd;
299 break;
300 case LTTNG_UST_METADATA:
301 case LTTNG_UST_CHANNEL:
302 lur.u.channel.memory_map_size = *args.channel.memory_map_size;
303 shm_fd = *args.channel.shm_fd;
304 wait_fd = *args.channel.wait_fd;
305 break;
306 case LTTNG_UST_TRACER_VERSION:
307 lur.u.version = lum->u.version;
308 break;
309 case LTTNG_UST_TRACEPOINT_LIST_GET:
310 memcpy(&lur.u.tracepoint, &lum->u.tracepoint, sizeof(lur.u.tracepoint));
311 break;
312 }
313 }
314 ret = send_reply(sock, &lur);
315 if (ret < 0) {
316 perror("error sending reply");
317 goto error;
318 }
319
320 if ((lum->cmd == LTTNG_UST_STREAM
321 || lum->cmd == LTTNG_UST_CHANNEL
322 || lum->cmd == LTTNG_UST_METADATA)
323 && lur.ret_code == USTCOMM_OK) {
324 int sendret = 0;
325
326 /* we also need to send the file descriptors. */
327 ret = ustcomm_send_fds_unix_sock(sock,
328 &shm_fd, &shm_fd,
329 1, sizeof(int));
330 if (ret < 0) {
331 perror("send shm_fd");
332 sendret = ret;
333 }
334 /*
335 * The sessiond expects 2 file descriptors, even upon
336 * error.
337 */
338 ret = ustcomm_send_fds_unix_sock(sock,
339 &wait_fd, &wait_fd,
340 1, sizeof(int));
341 if (ret < 0) {
342 perror("send wait_fd");
343 goto error;
344 }
345 if (sendret) {
346 ret = sendret;
347 goto error;
348 }
349 }
350 /*
351 * LTTNG_UST_TRACEPOINT_FIELD_LIST_GET needs to send the field
352 * after the reply.
353 */
354 if (lur.ret_code == USTCOMM_OK) {
355 switch (lum->cmd) {
356 case LTTNG_UST_TRACEPOINT_FIELD_LIST_GET:
357 len = ustcomm_send_unix_sock(sock,
358 &args.field_list.entry,
359 sizeof(args.field_list.entry));
360 if (len != sizeof(args.field_list.entry)) {
361 ret = -1;
362 goto error;
363 }
364 }
365 }
366 /*
367 * We still have the memory map reference, and the fds have been
368 * sent to the sessiond. We can therefore close those fds. Note
369 * that we keep the write side of the wait_fd open, but close
370 * the read side.
371 */
372 if (lur.ret_code == USTCOMM_OK) {
373 switch (lum->cmd) {
374 case LTTNG_UST_STREAM:
375 if (shm_fd >= 0) {
376 ret = close(shm_fd);
377 if (ret) {
378 PERROR("Error closing stream shm_fd");
379 }
380 *args.stream.shm_fd = -1;
381 }
382 if (wait_fd >= 0) {
383 ret = close(wait_fd);
384 if (ret) {
385 PERROR("Error closing stream wait_fd");
386 }
387 *args.stream.wait_fd = -1;
388 }
389 break;
390 case LTTNG_UST_METADATA:
391 case LTTNG_UST_CHANNEL:
392 if (shm_fd >= 0) {
393 ret = close(shm_fd);
394 if (ret) {
395 PERROR("Error closing channel shm_fd");
396 }
397 *args.channel.shm_fd = -1;
398 }
399 if (wait_fd >= 0) {
400 ret = close(wait_fd);
401 if (ret) {
402 PERROR("Error closing channel wait_fd");
403 }
404 *args.channel.wait_fd = -1;
405 }
406 break;
407 }
408 }
409
410 error:
411 ust_unlock();
412 return ret;
413 }
414
415 static
416 void cleanup_sock_info(struct sock_info *sock_info, int exiting)
417 {
418 int ret;
419
420 if (sock_info->socket != -1) {
421 ret = ustcomm_close_unix_sock(sock_info->socket);
422 if (ret) {
423 ERR("Error closing apps socket");
424 }
425 sock_info->socket = -1;
426 }
427 if (sock_info->root_handle != -1) {
428 ret = lttng_ust_objd_unref(sock_info->root_handle);
429 if (ret) {
430 ERR("Error unref root handle");
431 }
432 sock_info->root_handle = -1;
433 }
434 sock_info->constructor_sem_posted = 0;
435 /*
436 * wait_shm_mmap is used by listener threads outside of the
437 * ust lock, so we cannot tear it down ourselves, because we
438 * cannot join on these threads. Leave this task to the OS
439 * process exit.
440 */
441 if (!exiting && sock_info->wait_shm_mmap) {
442 ret = munmap(sock_info->wait_shm_mmap, sysconf(_SC_PAGE_SIZE));
443 if (ret) {
444 ERR("Error unmapping wait shm");
445 }
446 sock_info->wait_shm_mmap = NULL;
447 }
448 }
449
450 /*
451 * Using fork to set umask in the child process (not multi-thread safe).
452 * We deal with the shm_open vs ftruncate race (happening when the
453 * sessiond owns the shm and does not let everybody modify it, to ensure
454 * safety against shm_unlink) by simply letting the mmap fail and
455 * retrying after a few seconds.
456 * For global shm, everybody has rw access to it until the sessiond
457 * starts.
458 */
459 static
460 int get_wait_shm(struct sock_info *sock_info, size_t mmap_size)
461 {
462 int wait_shm_fd, ret;
463 pid_t pid;
464
465 /*
466 * Try to open read-only.
467 */
468 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
469 if (wait_shm_fd >= 0) {
470 goto end;
471 } else if (wait_shm_fd < 0 && errno != ENOENT) {
472 /*
473 * Real-only open did not work, and it's not because the
474 * entry was not present. It's a failure that prohibits
475 * using shm.
476 */
477 ERR("Error opening shm %s", sock_info->wait_shm_path);
478 goto end;
479 }
480 /*
481 * If the open failed because the file did not exist, try
482 * creating it ourself.
483 */
484 pid = fork();
485 if (pid > 0) {
486 int status;
487
488 /*
489 * Parent: wait for child to return, in which case the
490 * shared memory map will have been created.
491 */
492 pid = wait(&status);
493 if (pid < 0 || !WIFEXITED(status) || WEXITSTATUS(status) != 0) {
494 wait_shm_fd = -1;
495 goto end;
496 }
497 /*
498 * Try to open read-only again after creation.
499 */
500 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
501 if (wait_shm_fd < 0) {
502 /*
503 * Real-only open did not work. It's a failure
504 * that prohibits using shm.
505 */
506 ERR("Error opening shm %s", sock_info->wait_shm_path);
507 goto end;
508 }
509 goto end;
510 } else if (pid == 0) {
511 int create_mode;
512
513 /* Child */
514 create_mode = S_IRUSR | S_IWUSR | S_IRGRP;
515 if (sock_info->global)
516 create_mode |= S_IROTH | S_IWGRP | S_IWOTH;
517 /*
518 * We're alone in a child process, so we can modify the
519 * process-wide umask.
520 */
521 umask(~create_mode);
522 /*
523 * Try creating shm (or get rw access).
524 * We don't do an exclusive open, because we allow other
525 * processes to create+ftruncate it concurrently.
526 */
527 wait_shm_fd = shm_open(sock_info->wait_shm_path,
528 O_RDWR | O_CREAT, create_mode);
529 if (wait_shm_fd >= 0) {
530 ret = ftruncate(wait_shm_fd, mmap_size);
531 if (ret) {
532 PERROR("ftruncate");
533 exit(EXIT_FAILURE);
534 }
535 exit(EXIT_SUCCESS);
536 }
537 /*
538 * For local shm, we need to have rw access to accept
539 * opening it: this means the local sessiond will be
540 * able to wake us up. For global shm, we open it even
541 * if rw access is not granted, because the root.root
542 * sessiond will be able to override all rights and wake
543 * us up.
544 */
545 if (!sock_info->global && errno != EACCES) {
546 ERR("Error opening shm %s", sock_info->wait_shm_path);
547 exit(EXIT_FAILURE);
548 }
549 /*
550 * The shm exists, but we cannot open it RW. Report
551 * success.
552 */
553 exit(EXIT_SUCCESS);
554 } else {
555 return -1;
556 }
557 end:
558 if (wait_shm_fd >= 0 && !sock_info->global) {
559 struct stat statbuf;
560
561 /*
562 * Ensure that our user is the owner of the shm file for
563 * local shm. If we do not own the file, it means our
564 * sessiond will not have access to wake us up (there is
565 * probably a rogue process trying to fake our
566 * sessiond). Fallback to polling method in this case.
567 */
568 ret = fstat(wait_shm_fd, &statbuf);
569 if (ret) {
570 PERROR("fstat");
571 goto error_close;
572 }
573 if (statbuf.st_uid != getuid())
574 goto error_close;
575 }
576 return wait_shm_fd;
577
578 error_close:
579 ret = close(wait_shm_fd);
580 if (ret) {
581 PERROR("Error closing fd");
582 }
583 return -1;
584 }
585
586 static
587 char *get_map_shm(struct sock_info *sock_info)
588 {
589 size_t mmap_size = sysconf(_SC_PAGE_SIZE);
590 int wait_shm_fd, ret;
591 char *wait_shm_mmap;
592
593 wait_shm_fd = get_wait_shm(sock_info, mmap_size);
594 if (wait_shm_fd < 0) {
595 goto error;
596 }
597 wait_shm_mmap = mmap(NULL, mmap_size, PROT_READ,
598 MAP_SHARED, wait_shm_fd, 0);
599 /* close shm fd immediately after taking the mmap reference */
600 ret = close(wait_shm_fd);
601 if (ret) {
602 PERROR("Error closing fd");
603 }
604 if (wait_shm_mmap == MAP_FAILED) {
605 DBG("mmap error (can be caused by race with sessiond). Fallback to poll mode.");
606 goto error;
607 }
608 return wait_shm_mmap;
609
610 error:
611 return NULL;
612 }
613
614 static
615 void wait_for_sessiond(struct sock_info *sock_info)
616 {
617 int ret;
618
619 ust_lock();
620 if (lttng_ust_comm_should_quit) {
621 goto quit;
622 }
623 if (wait_poll_fallback) {
624 goto error;
625 }
626 if (!sock_info->wait_shm_mmap) {
627 sock_info->wait_shm_mmap = get_map_shm(sock_info);
628 if (!sock_info->wait_shm_mmap)
629 goto error;
630 }
631 ust_unlock();
632
633 DBG("Waiting for %s apps sessiond", sock_info->name);
634 /* Wait for futex wakeup */
635 if (uatomic_read((int32_t *) sock_info->wait_shm_mmap) == 0) {
636 ret = futex_async((int32_t *) sock_info->wait_shm_mmap,
637 FUTEX_WAIT, 0, NULL, NULL, 0);
638 if (ret < 0) {
639 if (errno == EFAULT) {
640 wait_poll_fallback = 1;
641 DBG(
642 "Linux kernels 2.6.33 to 3.0 (with the exception of stable versions) "
643 "do not support FUTEX_WAKE on read-only memory mappings correctly. "
644 "Please upgrade your kernel "
645 "(fix is commit 9ea71503a8ed9184d2d0b8ccc4d269d05f7940ae in Linux kernel "
646 "mainline). LTTng-UST will use polling mode fallback.");
647 if (ust_debug())
648 PERROR("futex");
649 }
650 }
651 }
652 return;
653
654 quit:
655 ust_unlock();
656 return;
657
658 error:
659 ust_unlock();
660 return;
661 }
662
663 /*
664 * This thread does not allocate any resource, except within
665 * handle_message, within mutex protection. This mutex protects against
666 * fork and exit.
667 * The other moment it allocates resources is at socket connexion, which
668 * is also protected by the mutex.
669 */
670 static
671 void *ust_listener_thread(void *arg)
672 {
673 struct sock_info *sock_info = arg;
674 int sock, ret, prev_connect_failed = 0, has_waited = 0;
675
676 /* Restart trying to connect to the session daemon */
677 restart:
678 if (prev_connect_failed) {
679 /* Wait for sessiond availability with pipe */
680 wait_for_sessiond(sock_info);
681 if (has_waited) {
682 has_waited = 0;
683 /*
684 * Sleep for 5 seconds before retrying after a
685 * sequence of failure / wait / failure. This
686 * deals with a killed or broken session daemon.
687 */
688 sleep(5);
689 }
690 has_waited = 1;
691 prev_connect_failed = 0;
692 }
693 ust_lock();
694
695 if (lttng_ust_comm_should_quit) {
696 ust_unlock();
697 goto quit;
698 }
699
700 if (sock_info->socket != -1) {
701 ret = ustcomm_close_unix_sock(sock_info->socket);
702 if (ret) {
703 ERR("Error closing %s apps socket", sock_info->name);
704 }
705 sock_info->socket = -1;
706 }
707
708 /* Register */
709 ret = ustcomm_connect_unix_sock(sock_info->sock_path);
710 if (ret < 0) {
711 DBG("Info: sessiond not accepting connections to %s apps socket", sock_info->name);
712 prev_connect_failed = 1;
713 /*
714 * If we cannot find the sessiond daemon, don't delay
715 * constructor execution.
716 */
717 ret = handle_register_done(sock_info);
718 assert(!ret);
719 ust_unlock();
720 goto restart;
721 }
722
723 sock_info->socket = sock = ret;
724
725 /*
726 * Create only one root handle per listener thread for the whole
727 * process lifetime.
728 */
729 if (sock_info->root_handle == -1) {
730 ret = lttng_abi_create_root_handle();
731 if (ret < 0) {
732 ERR("Error creating root handle");
733 ust_unlock();
734 goto quit;
735 }
736 sock_info->root_handle = ret;
737 }
738
739 ret = register_app_to_sessiond(sock);
740 if (ret < 0) {
741 ERR("Error registering to %s apps socket", sock_info->name);
742 prev_connect_failed = 1;
743 /*
744 * If we cannot register to the sessiond daemon, don't
745 * delay constructor execution.
746 */
747 ret = handle_register_done(sock_info);
748 assert(!ret);
749 ust_unlock();
750 goto restart;
751 }
752 ust_unlock();
753
754 for (;;) {
755 ssize_t len;
756 struct ustcomm_ust_msg lum;
757
758 len = ustcomm_recv_unix_sock(sock, &lum, sizeof(lum));
759 switch (len) {
760 case 0: /* orderly shutdown */
761 DBG("%s ltt-sessiond has performed an orderly shutdown\n", sock_info->name);
762 ust_lock();
763 /*
764 * Either sessiond has shutdown or refused us by closing the socket.
765 * In either case, we don't want to delay construction execution,
766 * and we need to wait before retry.
767 */
768 prev_connect_failed = 1;
769 /*
770 * If we cannot register to the sessiond daemon, don't
771 * delay constructor execution.
772 */
773 ret = handle_register_done(sock_info);
774 assert(!ret);
775 ust_unlock();
776 goto end;
777 case sizeof(lum):
778 DBG("message received\n");
779 ret = handle_message(sock_info, sock, &lum);
780 if (ret < 0) {
781 ERR("Error handling message for %s socket", sock_info->name);
782 }
783 continue;
784 case -1:
785 DBG("Receive failed from lttng-sessiond with errno %d", errno);
786 if (errno == ECONNRESET) {
787 ERR("%s remote end closed connection\n", sock_info->name);
788 goto end;
789 }
790 goto end;
791 default:
792 ERR("incorrect message size (%s socket): %zd\n", sock_info->name, len);
793 continue;
794 }
795
796 }
797 end:
798 goto restart; /* try to reconnect */
799 quit:
800 return NULL;
801 }
802
803 /*
804 * Return values: -1: don't wait. 0: wait forever. 1: timeout wait.
805 */
806 static
807 int get_timeout(struct timespec *constructor_timeout)
808 {
809 long constructor_delay_ms = LTTNG_UST_DEFAULT_CONSTRUCTOR_TIMEOUT_MS;
810 char *str_delay;
811 int ret;
812
813 str_delay = getenv("LTTNG_UST_REGISTER_TIMEOUT");
814 if (str_delay) {
815 constructor_delay_ms = strtol(str_delay, NULL, 10);
816 }
817
818 switch (constructor_delay_ms) {
819 case -1:/* fall-through */
820 case 0:
821 return constructor_delay_ms;
822 default:
823 break;
824 }
825
826 /*
827 * If we are unable to find the current time, don't wait.
828 */
829 ret = clock_gettime(CLOCK_REALTIME, constructor_timeout);
830 if (ret) {
831 return -1;
832 }
833 constructor_timeout->tv_sec += constructor_delay_ms / 1000UL;
834 constructor_timeout->tv_nsec +=
835 (constructor_delay_ms % 1000UL) * 1000000UL;
836 if (constructor_timeout->tv_nsec >= 1000000000UL) {
837 constructor_timeout->tv_sec++;
838 constructor_timeout->tv_nsec -= 1000000000UL;
839 }
840 return 1;
841 }
842
843 /*
844 * sessiond monitoring thread: monitor presence of global and per-user
845 * sessiond by polling the application common named pipe.
846 */
847 /* TODO */
848
849 void __attribute__((constructor)) lttng_ust_init(void)
850 {
851 struct timespec constructor_timeout;
852 sigset_t sig_all_blocked, orig_parent_mask;
853 int timeout_mode;
854 int ret;
855
856 if (uatomic_xchg(&initialized, 1) == 1)
857 return;
858
859 /*
860 * Fixup interdependency between TLS fixup mutex (which happens
861 * to be the dynamic linker mutex) and ust_lock, taken within
862 * the ust lock.
863 */
864 lttng_fixup_event_tls();
865 lttng_fixup_ringbuffer_tls();
866 lttng_fixup_vtid_tls();
867
868 /*
869 * We want precise control over the order in which we construct
870 * our sub-libraries vs starting to receive commands from
871 * sessiond (otherwise leading to errors when trying to create
872 * sessiond before the init functions are completed).
873 */
874 init_usterr();
875 init_tracepoint();
876 ltt_ring_buffer_metadata_client_init();
877 ltt_ring_buffer_client_overwrite_init();
878 ltt_ring_buffer_client_discard_init();
879
880 timeout_mode = get_timeout(&constructor_timeout);
881
882 ret = sem_init(&constructor_wait, 0, 0);
883 assert(!ret);
884
885 ret = setup_local_apps();
886 if (ret) {
887 ERR("Error setting up to local apps");
888 }
889
890 /* A new thread created by pthread_create inherits the signal mask
891 * from the parent. To avoid any signal being received by the
892 * listener thread, we block all signals temporarily in the parent,
893 * while we create the listener thread.
894 */
895 sigfillset(&sig_all_blocked);
896 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_parent_mask);
897 if (ret) {
898 PERROR("pthread_sigmask: %s", strerror(ret));
899 }
900
901 ret = pthread_create(&local_apps.ust_listener, NULL,
902 ust_listener_thread, &local_apps);
903
904 if (local_apps.allowed) {
905 ret = pthread_create(&global_apps.ust_listener, NULL,
906 ust_listener_thread, &global_apps);
907 } else {
908 handle_register_done(&local_apps);
909 }
910
911 /* Restore original signal mask in parent */
912 ret = pthread_sigmask(SIG_SETMASK, &orig_parent_mask, NULL);
913 if (ret) {
914 PERROR("pthread_sigmask: %s", strerror(ret));
915 }
916
917 switch (timeout_mode) {
918 case 1: /* timeout wait */
919 do {
920 ret = sem_timedwait(&constructor_wait,
921 &constructor_timeout);
922 } while (ret < 0 && errno == EINTR);
923 if (ret < 0 && errno == ETIMEDOUT) {
924 ERR("Timed out waiting for ltt-sessiond");
925 } else {
926 assert(!ret);
927 }
928 break;
929 case -1:/* wait forever */
930 do {
931 ret = sem_wait(&constructor_wait);
932 } while (ret < 0 && errno == EINTR);
933 assert(!ret);
934 break;
935 case 0: /* no timeout */
936 break;
937 }
938 }
939
940 static
941 void lttng_ust_cleanup(int exiting)
942 {
943 cleanup_sock_info(&global_apps, exiting);
944 if (local_apps.allowed) {
945 cleanup_sock_info(&local_apps, exiting);
946 }
947 /*
948 * The teardown in this function all affect data structures
949 * accessed under the UST lock by the listener thread. This
950 * lock, along with the lttng_ust_comm_should_quit flag, ensure
951 * that none of these threads are accessing this data at this
952 * point.
953 */
954 lttng_ust_abi_exit();
955 lttng_ust_events_exit();
956 ltt_ring_buffer_client_discard_exit();
957 ltt_ring_buffer_client_overwrite_exit();
958 ltt_ring_buffer_metadata_client_exit();
959 exit_tracepoint();
960 if (!exiting) {
961 /* Reinitialize values for fork */
962 sem_count = 2;
963 lttng_ust_comm_should_quit = 0;
964 initialized = 0;
965 }
966 }
967
968 void __attribute__((destructor)) lttng_ust_exit(void)
969 {
970 int ret;
971
972 /*
973 * Using pthread_cancel here because:
974 * A) we don't want to hang application teardown.
975 * B) the thread is not allocating any resource.
976 */
977
978 /*
979 * Require the communication thread to quit. Synchronize with
980 * mutexes to ensure it is not in a mutex critical section when
981 * pthread_cancel is later called.
982 */
983 ust_lock();
984 lttng_ust_comm_should_quit = 1;
985 ust_unlock();
986
987 /* cancel threads */
988 ret = pthread_cancel(global_apps.ust_listener);
989 if (ret) {
990 ERR("Error cancelling global ust listener thread");
991 }
992 if (local_apps.allowed) {
993 ret = pthread_cancel(local_apps.ust_listener);
994 if (ret) {
995 ERR("Error cancelling local ust listener thread");
996 }
997 }
998 /*
999 * Do NOT join threads: use of sys_futex makes it impossible to
1000 * join the threads without using async-cancel, but async-cancel
1001 * is delivered by a signal, which could hit the target thread
1002 * anywhere in its code path, including while the ust_lock() is
1003 * held, causing a deadlock for the other thread. Let the OS
1004 * cleanup the threads if there are stalled in a syscall.
1005 */
1006 lttng_ust_cleanup(1);
1007 }
1008
1009 /*
1010 * We exclude the worker threads across fork and clone (except
1011 * CLONE_VM), because these system calls only keep the forking thread
1012 * running in the child. Therefore, we don't want to call fork or clone
1013 * in the middle of an tracepoint or ust tracing state modification.
1014 * Holding this mutex protects these structures across fork and clone.
1015 */
1016 void ust_before_fork(sigset_t *save_sigset)
1017 {
1018 /*
1019 * Disable signals. This is to avoid that the child intervenes
1020 * before it is properly setup for tracing. It is safer to
1021 * disable all signals, because then we know we are not breaking
1022 * anything by restoring the original mask.
1023 */
1024 sigset_t all_sigs;
1025 int ret;
1026
1027 /* Disable signals */
1028 sigfillset(&all_sigs);
1029 ret = sigprocmask(SIG_BLOCK, &all_sigs, save_sigset);
1030 if (ret == -1) {
1031 PERROR("sigprocmask");
1032 }
1033 ust_lock();
1034 rcu_bp_before_fork();
1035 }
1036
1037 static void ust_after_fork_common(sigset_t *restore_sigset)
1038 {
1039 int ret;
1040
1041 DBG("process %d", getpid());
1042 ust_unlock();
1043 /* Restore signals */
1044 ret = sigprocmask(SIG_SETMASK, restore_sigset, NULL);
1045 if (ret == -1) {
1046 PERROR("sigprocmask");
1047 }
1048 }
1049
1050 void ust_after_fork_parent(sigset_t *restore_sigset)
1051 {
1052 DBG("process %d", getpid());
1053 rcu_bp_after_fork_parent();
1054 /* Release mutexes and reenable signals */
1055 ust_after_fork_common(restore_sigset);
1056 }
1057
1058 /*
1059 * After fork, in the child, we need to cleanup all the leftover state,
1060 * except the worker thread which already magically disappeared thanks
1061 * to the weird Linux fork semantics. After tyding up, we call
1062 * lttng_ust_init() again to start over as a new PID.
1063 *
1064 * This is meant for forks() that have tracing in the child between the
1065 * fork and following exec call (if there is any).
1066 */
1067 void ust_after_fork_child(sigset_t *restore_sigset)
1068 {
1069 DBG("process %d", getpid());
1070 /* Release urcu mutexes */
1071 rcu_bp_after_fork_child();
1072 lttng_ust_cleanup(0);
1073 lttng_context_vtid_reset();
1074 /* Release mutexes and reenable signals */
1075 ust_after_fork_common(restore_sigset);
1076 lttng_ust_init();
1077 }
This page took 0.049651 seconds and 5 git commands to generate.