Cygwin: Pass file paths instead of file descriptors over UNIX sockets
[lttng-ust.git] / liblttng-ust / lttng-ust-comm.c
1 /*
2 * lttng-ust-comm.c
3 *
4 * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
5 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; only
10 * version 2.1 of the License.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #define _LGPL_SOURCE
23 #include <sys/types.h>
24 #include <sys/socket.h>
25 #include <sys/mman.h>
26 #include <sys/stat.h>
27 #include <sys/types.h>
28 #include <sys/wait.h>
29 #include <fcntl.h>
30 #include <unistd.h>
31 #include <errno.h>
32 #include <pthread.h>
33 #include <semaphore.h>
34 #include <time.h>
35 #include <assert.h>
36 #include <signal.h>
37 #include <urcu/uatomic.h>
38 #include <urcu/futex.h>
39 #include <urcu/compiler.h>
40
41 #include <lttng/ust-events.h>
42 #include <lttng/ust-abi.h>
43 #include <lttng/ust.h>
44 #include <ust-comm.h>
45 #include <usterr-signal-safe.h>
46 #include "tracepoint-internal.h"
47 #include "ltt-tracer-core.h"
48 #include "compat.h"
49 #include "../libringbuffer/tlsfixup.h"
50
51 /*
52 * Has lttng ust comm constructor been called ?
53 */
54 static int initialized;
55
56 /*
57 * The ust_lock/ust_unlock lock is used as a communication thread mutex.
58 * Held when handling a command, also held by fork() to deal with
59 * removal of threads, and by exit path.
60 */
61
62 /* Should the ust comm thread quit ? */
63 static int lttng_ust_comm_should_quit;
64
65 /*
66 * Wait for either of these before continuing to the main
67 * program:
68 * - the register_done message from sessiond daemon
69 * (will let the sessiond daemon enable sessions before main
70 * starts.)
71 * - sessiond daemon is not reachable.
72 * - timeout (ensuring applications are resilient to session
73 * daemon problems).
74 */
75 static sem_t constructor_wait;
76 /*
77 * Doing this for both the global and local sessiond.
78 */
79 static int sem_count = { 2 };
80
81 /*
82 * Counting nesting within lttng-ust. Used to ensure that calling fork()
83 * from liblttng-ust does not execute the pre/post fork handlers.
84 */
85 static int __thread lttng_ust_nest_count;
86
87 /*
88 * Info about socket and associated listener thread.
89 */
90 struct sock_info {
91 const char *name;
92 pthread_t ust_listener; /* listener thread */
93 int root_handle;
94 int constructor_sem_posted;
95 int allowed;
96 int global;
97
98 char sock_path[PATH_MAX];
99 int socket;
100
101 char wait_shm_path[PATH_MAX];
102 char *wait_shm_mmap;
103 };
104
105 /* Socket from app (connect) to session daemon (listen) for communication */
106 struct sock_info global_apps = {
107 .name = "global",
108 .global = 1,
109
110 .root_handle = -1,
111 .allowed = 1,
112
113 .sock_path = DEFAULT_GLOBAL_APPS_UNIX_SOCK,
114 .socket = -1,
115
116 .wait_shm_path = DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH,
117 };
118
119 /* TODO: allow global_apps_sock_path override */
120
121 struct sock_info local_apps = {
122 .name = "local",
123 .global = 0,
124 .root_handle = -1,
125 .allowed = 0, /* Check setuid bit first */
126
127 .socket = -1,
128 };
129
130 static int wait_poll_fallback;
131
132 extern void ltt_ring_buffer_client_overwrite_init(void);
133 extern void ltt_ring_buffer_client_discard_init(void);
134 extern void ltt_ring_buffer_metadata_client_init(void);
135 extern void ltt_ring_buffer_client_overwrite_exit(void);
136 extern void ltt_ring_buffer_client_discard_exit(void);
137 extern void ltt_ring_buffer_metadata_client_exit(void);
138
139 /*
140 * Force a read (imply TLS fixup for dlopen) of TLS variables.
141 */
142 static
143 void lttng_fixup_nest_count_tls(void)
144 {
145 asm volatile ("" : : "m" (lttng_ust_nest_count));
146 }
147
148 static
149 int setup_local_apps(void)
150 {
151 const char *home_dir;
152 uid_t uid;
153
154 uid = getuid();
155 /*
156 * Disallow per-user tracing for setuid binaries.
157 */
158 if (uid != geteuid()) {
159 assert(local_apps.allowed == 0);
160 return 0;
161 }
162 home_dir = (const char *) getenv("HOME");
163 if (!home_dir) {
164 WARN("HOME environment variable not set. Disabling LTTng-UST per-user tracing.");
165 assert(local_apps.allowed == 0);
166 return -ENOENT;
167 }
168 local_apps.allowed = 1;
169 snprintf(local_apps.sock_path, PATH_MAX,
170 DEFAULT_HOME_APPS_UNIX_SOCK, home_dir);
171 snprintf(local_apps.wait_shm_path, PATH_MAX,
172 DEFAULT_HOME_APPS_WAIT_SHM_PATH, uid);
173 return 0;
174 }
175
176 static
177 int register_app_to_sessiond(int socket)
178 {
179 ssize_t ret;
180 struct {
181 uint32_t major;
182 uint32_t minor;
183 pid_t pid;
184 pid_t ppid;
185 uid_t uid;
186 gid_t gid;
187 uint32_t bits_per_long;
188 char name[16]; /* process name */
189 } reg_msg;
190
191 reg_msg.major = LTTNG_UST_COMM_VERSION_MAJOR;
192 reg_msg.minor = LTTNG_UST_COMM_VERSION_MINOR;
193 reg_msg.pid = getpid();
194 reg_msg.ppid = getppid();
195 reg_msg.uid = getuid();
196 reg_msg.gid = getgid();
197 reg_msg.bits_per_long = CAA_BITS_PER_LONG;
198 lttng_ust_getprocname(reg_msg.name);
199
200 ret = ustcomm_send_unix_sock(socket, &reg_msg, sizeof(reg_msg));
201 if (ret >= 0 && ret != sizeof(reg_msg))
202 return -EIO;
203 return ret;
204 }
205
206 static
207 int send_reply(int sock, struct ustcomm_ust_reply *lur)
208 {
209 ssize_t len;
210
211 len = ustcomm_send_unix_sock(sock, lur, sizeof(*lur));
212 switch (len) {
213 case sizeof(*lur):
214 DBG("message successfully sent");
215 return 0;
216 case -1:
217 if (errno == ECONNRESET) {
218 printf("remote end closed connection\n");
219 return 0;
220 }
221 return -1;
222 default:
223 printf("incorrect message size: %zd\n", len);
224 return -1;
225 }
226 }
227
228 static
229 int handle_register_done(struct sock_info *sock_info)
230 {
231 int ret;
232
233 if (sock_info->constructor_sem_posted)
234 return 0;
235 sock_info->constructor_sem_posted = 1;
236 if (uatomic_read(&sem_count) <= 0) {
237 return 0;
238 }
239 ret = uatomic_add_return(&sem_count, -1);
240 if (ret == 0) {
241 ret = sem_post(&constructor_wait);
242 assert(!ret);
243 }
244 return 0;
245 }
246
247 static
248 int handle_message(struct sock_info *sock_info,
249 int sock, struct ustcomm_ust_msg *lum)
250 {
251 int ret = 0;
252 const struct lttng_ust_objd_ops *ops;
253 struct ustcomm_ust_reply lur;
254 int shm_fd, wait_fd;
255 char *shm_path, *wait_pipe_path;
256 union ust_args args;
257
258 ust_lock();
259
260 memset(&lur, 0, sizeof(lur));
261
262 if (lttng_ust_comm_should_quit) {
263 ret = -EPERM;
264 goto end;
265 }
266
267 ops = objd_ops(lum->handle);
268 if (!ops) {
269 ret = -ENOENT;
270 goto end;
271 }
272
273 switch (lum->cmd) {
274 case LTTNG_UST_REGISTER_DONE:
275 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
276 ret = handle_register_done(sock_info);
277 else
278 ret = -EINVAL;
279 break;
280 case LTTNG_UST_RELEASE:
281 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
282 ret = -EPERM;
283 else
284 ret = lttng_ust_objd_unref(lum->handle);
285 break;
286 default:
287 if (ops->cmd)
288 ret = ops->cmd(lum->handle, lum->cmd,
289 (unsigned long) &lum->u,
290 &args);
291 else
292 ret = -ENOSYS;
293 break;
294 }
295
296 end:
297 lur.handle = lum->handle;
298 lur.cmd = lum->cmd;
299 lur.ret_val = ret;
300 if (ret >= 0) {
301 lur.ret_code = USTCOMM_OK;
302 } else {
303 //lur.ret_code = USTCOMM_SESSION_FAIL;
304 lur.ret_code = ret;
305 }
306 if (ret >= 0) {
307 switch (lum->cmd) {
308 case LTTNG_UST_STREAM:
309 /*
310 * Special-case reply to send stream info.
311 * Use lum.u output.
312 */
313 lur.u.stream.memory_map_size = *args.stream.memory_map_size;
314 shm_fd = *args.stream.shm_fd;
315 shm_path = args.stream.shm_path;
316 wait_fd = *args.stream.wait_fd;
317 wait_pipe_path = args.stream.wait_pipe_path;
318 break;
319 case LTTNG_UST_METADATA:
320 case LTTNG_UST_CHANNEL:
321 lur.u.channel.memory_map_size = *args.channel.memory_map_size;
322 shm_fd = *args.channel.shm_fd;
323 shm_path = args.channel.shm_path;
324 wait_fd = *args.channel.wait_fd;
325 wait_pipe_path = args.channel.wait_pipe_path;
326 break;
327 case LTTNG_UST_TRACER_VERSION:
328 lur.u.version = lum->u.version;
329 break;
330 case LTTNG_UST_TRACEPOINT_LIST_GET:
331 memcpy(&lur.u.tracepoint, &lum->u.tracepoint, sizeof(lur.u.tracepoint));
332 break;
333 }
334 }
335 ret = send_reply(sock, &lur);
336 if (ret < 0) {
337 perror("error sending reply");
338 goto error;
339 }
340
341 if ((lum->cmd == LTTNG_UST_STREAM
342 || lum->cmd == LTTNG_UST_CHANNEL
343 || lum->cmd == LTTNG_UST_METADATA)
344 && lur.ret_code == USTCOMM_OK) {
345 int sendret = 0;
346
347 /* send the shm path */
348 ret = ustcomm_send_string(sock, shm_path, strlen(shm_path));
349 if (ret < 0) {
350 perror("send shm_path");
351 sendret = ret;
352 }
353 /*
354 * The sessiond expects 2 file descriptors, even upon
355 * error.
356 */
357 ret = ustcomm_send_string(sock, wait_pipe_path, strlen(wait_pipe_path));
358 if (ret < 0) {
359 perror("send wait_pipe_path");
360 goto error;
361 }
362 if (sendret) {
363 ret = sendret;
364 goto error;
365 }
366 }
367 /*
368 * We still have the memory map reference, and the fds have been
369 * sent to the sessiond. We can therefore close those fds. Note
370 * that we keep the write side of the wait_fd open, but close
371 * the read side.
372 */
373 if (lur.ret_code == USTCOMM_OK) {
374 switch (lum->cmd) {
375 case LTTNG_UST_STREAM:
376 if (shm_fd >= 0) {
377 ret = close(shm_fd);
378 if (ret) {
379 PERROR("Error closing stream shm_fd");
380 }
381 *args.stream.shm_fd = -1;
382 }
383 if (wait_fd >= 0) {
384 ret = close(wait_fd);
385 if (ret) {
386 PERROR("Error closing stream wait_fd");
387 }
388 *args.stream.wait_fd = -1;
389 }
390 break;
391 case LTTNG_UST_METADATA:
392 case LTTNG_UST_CHANNEL:
393 if (shm_fd >= 0) {
394 ret = close(shm_fd);
395 if (ret) {
396 PERROR("Error closing channel shm_fd");
397 }
398 *args.channel.shm_fd = -1;
399 }
400 if (wait_fd >= 0) {
401 ret = close(wait_fd);
402 if (ret) {
403 PERROR("Error closing channel wait_fd");
404 }
405 *args.channel.wait_fd = -1;
406 }
407 break;
408 }
409 }
410
411 error:
412 ust_unlock();
413 return ret;
414 }
415
416 static
417 void cleanup_sock_info(struct sock_info *sock_info, int exiting)
418 {
419 int ret;
420
421 if (sock_info->socket != -1) {
422 ret = ustcomm_close_unix_sock(sock_info->socket);
423 if (ret) {
424 ERR("Error closing apps socket");
425 }
426 sock_info->socket = -1;
427 }
428 if (sock_info->root_handle != -1) {
429 ret = lttng_ust_objd_unref(sock_info->root_handle);
430 if (ret) {
431 ERR("Error unref root handle");
432 }
433 sock_info->root_handle = -1;
434 }
435 sock_info->constructor_sem_posted = 0;
436 /*
437 * wait_shm_mmap is used by listener threads outside of the
438 * ust lock, so we cannot tear it down ourselves, because we
439 * cannot join on these threads. Leave this task to the OS
440 * process exit.
441 */
442 if (!exiting && sock_info->wait_shm_mmap) {
443 ret = munmap(sock_info->wait_shm_mmap, sysconf(_SC_PAGE_SIZE));
444 if (ret) {
445 ERR("Error unmapping wait shm");
446 }
447 sock_info->wait_shm_mmap = NULL;
448 }
449 }
450
451 /*
452 * Using fork to set umask in the child process (not multi-thread safe).
453 * We deal with the shm_open vs ftruncate race (happening when the
454 * sessiond owns the shm and does not let everybody modify it, to ensure
455 * safety against shm_unlink) by simply letting the mmap fail and
456 * retrying after a few seconds.
457 * For global shm, everybody has rw access to it until the sessiond
458 * starts.
459 */
460 static
461 int get_wait_shm(struct sock_info *sock_info, size_t mmap_size)
462 {
463 int wait_shm_fd, ret;
464 pid_t pid;
465
466 /*
467 * Try to open read-only.
468 */
469 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
470 if (wait_shm_fd >= 0) {
471 goto end;
472 } else if (wait_shm_fd < 0 && errno != ENOENT) {
473 /*
474 * Real-only open did not work, and it's not because the
475 * entry was not present. It's a failure that prohibits
476 * using shm.
477 */
478 ERR("Error opening shm %s", sock_info->wait_shm_path);
479 goto end;
480 }
481 /*
482 * If the open failed because the file did not exist, try
483 * creating it ourself.
484 */
485 lttng_ust_nest_count++;
486 pid = fork();
487 lttng_ust_nest_count--;
488 if (pid > 0) {
489 int status;
490
491 /*
492 * Parent: wait for child to return, in which case the
493 * shared memory map will have been created.
494 */
495 pid = wait(&status);
496 if (pid < 0 || !WIFEXITED(status) || WEXITSTATUS(status) != 0) {
497 wait_shm_fd = -1;
498 goto end;
499 }
500 /*
501 * Try to open read-only again after creation.
502 */
503 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
504 if (wait_shm_fd < 0) {
505 /*
506 * Real-only open did not work. It's a failure
507 * that prohibits using shm.
508 */
509 ERR("Error opening shm %s", sock_info->wait_shm_path);
510 goto end;
511 }
512 goto end;
513 } else if (pid == 0) {
514 int create_mode;
515
516 /* Child */
517 create_mode = S_IRUSR | S_IWUSR | S_IRGRP;
518 if (sock_info->global)
519 create_mode |= S_IROTH | S_IWGRP | S_IWOTH;
520 /*
521 * We're alone in a child process, so we can modify the
522 * process-wide umask.
523 */
524 umask(~create_mode);
525 /*
526 * Try creating shm (or get rw access).
527 * We don't do an exclusive open, because we allow other
528 * processes to create+ftruncate it concurrently.
529 */
530 wait_shm_fd = shm_open(sock_info->wait_shm_path,
531 O_RDWR | O_CREAT, create_mode);
532 if (wait_shm_fd >= 0) {
533 ret = ftruncate(wait_shm_fd, mmap_size);
534 if (ret) {
535 PERROR("ftruncate");
536 _exit(EXIT_FAILURE);
537 }
538 _exit(EXIT_SUCCESS);
539 }
540 /*
541 * For local shm, we need to have rw access to accept
542 * opening it: this means the local sessiond will be
543 * able to wake us up. For global shm, we open it even
544 * if rw access is not granted, because the root.root
545 * sessiond will be able to override all rights and wake
546 * us up.
547 */
548 if (!sock_info->global && errno != EACCES) {
549 ERR("Error opening shm %s", sock_info->wait_shm_path);
550 _exit(EXIT_FAILURE);
551 }
552 /*
553 * The shm exists, but we cannot open it RW. Report
554 * success.
555 */
556 _exit(EXIT_SUCCESS);
557 } else {
558 return -1;
559 }
560 end:
561 if (wait_shm_fd >= 0 && !sock_info->global) {
562 struct stat statbuf;
563
564 /*
565 * Ensure that our user is the owner of the shm file for
566 * local shm. If we do not own the file, it means our
567 * sessiond will not have access to wake us up (there is
568 * probably a rogue process trying to fake our
569 * sessiond). Fallback to polling method in this case.
570 */
571 ret = fstat(wait_shm_fd, &statbuf);
572 if (ret) {
573 PERROR("fstat");
574 goto error_close;
575 }
576 if (statbuf.st_uid != getuid())
577 goto error_close;
578 }
579 return wait_shm_fd;
580
581 error_close:
582 ret = close(wait_shm_fd);
583 if (ret) {
584 PERROR("Error closing fd");
585 }
586 return -1;
587 }
588
589 static
590 char *get_map_shm(struct sock_info *sock_info)
591 {
592 size_t mmap_size = sysconf(_SC_PAGE_SIZE);
593 int wait_shm_fd, ret;
594 char *wait_shm_mmap;
595
596 wait_shm_fd = get_wait_shm(sock_info, mmap_size);
597 if (wait_shm_fd < 0) {
598 goto error;
599 }
600 wait_shm_mmap = mmap(NULL, mmap_size, PROT_READ,
601 MAP_SHARED, wait_shm_fd, 0);
602 /* close shm fd immediately after taking the mmap reference */
603 ret = close(wait_shm_fd);
604 if (ret) {
605 PERROR("Error closing fd");
606 }
607 if (wait_shm_mmap == MAP_FAILED) {
608 DBG("mmap error (can be caused by race with sessiond). Fallback to poll mode.");
609 goto error;
610 }
611 return wait_shm_mmap;
612
613 error:
614 return NULL;
615 }
616
617 static
618 void wait_for_sessiond(struct sock_info *sock_info)
619 {
620 int ret;
621
622 ust_lock();
623 if (lttng_ust_comm_should_quit) {
624 goto quit;
625 }
626 if (wait_poll_fallback) {
627 goto error;
628 }
629 if (!sock_info->wait_shm_mmap) {
630 sock_info->wait_shm_mmap = get_map_shm(sock_info);
631 if (!sock_info->wait_shm_mmap)
632 goto error;
633 }
634 ust_unlock();
635
636 DBG("Waiting for %s apps sessiond", sock_info->name);
637 /* Wait for futex wakeup */
638 if (uatomic_read((int32_t *) sock_info->wait_shm_mmap) == 0) {
639 ret = futex_async((int32_t *) sock_info->wait_shm_mmap,
640 FUTEX_WAIT, 0, NULL, NULL, 0);
641 if (ret < 0) {
642 if (errno == EFAULT) {
643 wait_poll_fallback = 1;
644 DBG(
645 "Linux kernels 2.6.33 to 3.0 (with the exception of stable versions) "
646 "do not support FUTEX_WAKE on read-only memory mappings correctly. "
647 "Please upgrade your kernel "
648 "(fix is commit 9ea71503a8ed9184d2d0b8ccc4d269d05f7940ae in Linux kernel "
649 "mainline). LTTng-UST will use polling mode fallback.");
650 if (ust_debug())
651 PERROR("futex");
652 }
653 }
654 }
655 return;
656
657 quit:
658 ust_unlock();
659 return;
660
661 error:
662 ust_unlock();
663 return;
664 }
665
666 /*
667 * This thread does not allocate any resource, except within
668 * handle_message, within mutex protection. This mutex protects against
669 * fork and exit.
670 * The other moment it allocates resources is at socket connexion, which
671 * is also protected by the mutex.
672 */
673 static
674 void *ust_listener_thread(void *arg)
675 {
676 struct sock_info *sock_info = arg;
677 int sock, ret, prev_connect_failed = 0, has_waited = 0;
678
679 /* Restart trying to connect to the session daemon */
680 restart:
681 if (prev_connect_failed) {
682 /* Wait for sessiond availability with pipe */
683 wait_for_sessiond(sock_info);
684 if (has_waited) {
685 has_waited = 0;
686 /*
687 * Sleep for 5 seconds before retrying after a
688 * sequence of failure / wait / failure. This
689 * deals with a killed or broken session daemon.
690 */
691 sleep(5);
692 }
693 has_waited = 1;
694 prev_connect_failed = 0;
695 }
696 ust_lock();
697
698 if (lttng_ust_comm_should_quit) {
699 ust_unlock();
700 goto quit;
701 }
702
703 if (sock_info->socket != -1) {
704 ret = ustcomm_close_unix_sock(sock_info->socket);
705 if (ret) {
706 ERR("Error closing %s apps socket", sock_info->name);
707 }
708 sock_info->socket = -1;
709 }
710
711 /* Register */
712 ret = ustcomm_connect_unix_sock(sock_info->sock_path);
713 if (ret < 0) {
714 DBG("Info: sessiond not accepting connections to %s apps socket", sock_info->name);
715 prev_connect_failed = 1;
716 /*
717 * If we cannot find the sessiond daemon, don't delay
718 * constructor execution.
719 */
720 ret = handle_register_done(sock_info);
721 assert(!ret);
722 ust_unlock();
723 goto restart;
724 }
725
726 sock_info->socket = sock = ret;
727
728 /*
729 * Create only one root handle per listener thread for the whole
730 * process lifetime.
731 */
732 if (sock_info->root_handle == -1) {
733 ret = lttng_abi_create_root_handle();
734 if (ret < 0) {
735 ERR("Error creating root handle");
736 ust_unlock();
737 goto quit;
738 }
739 sock_info->root_handle = ret;
740 }
741
742 ret = register_app_to_sessiond(sock);
743 if (ret < 0) {
744 ERR("Error registering to %s apps socket", sock_info->name);
745 prev_connect_failed = 1;
746 /*
747 * If we cannot register to the sessiond daemon, don't
748 * delay constructor execution.
749 */
750 ret = handle_register_done(sock_info);
751 assert(!ret);
752 ust_unlock();
753 goto restart;
754 }
755 ust_unlock();
756
757 for (;;) {
758 ssize_t len;
759 struct ustcomm_ust_msg lum;
760
761 len = ustcomm_recv_unix_sock(sock, &lum, sizeof(lum));
762 switch (len) {
763 case 0: /* orderly shutdown */
764 DBG("%s ltt-sessiond has performed an orderly shutdown\n", sock_info->name);
765 ust_lock();
766 /*
767 * Either sessiond has shutdown or refused us by closing the socket.
768 * In either case, we don't want to delay construction execution,
769 * and we need to wait before retry.
770 */
771 prev_connect_failed = 1;
772 /*
773 * If we cannot register to the sessiond daemon, don't
774 * delay constructor execution.
775 */
776 ret = handle_register_done(sock_info);
777 assert(!ret);
778 ust_unlock();
779 goto end;
780 case sizeof(lum):
781 DBG("message received\n");
782 ret = handle_message(sock_info, sock, &lum);
783 if (ret < 0) {
784 ERR("Error handling message for %s socket", sock_info->name);
785 }
786 continue;
787 case -1:
788 DBG("Receive failed from lttng-sessiond with errno %d", errno);
789 if (errno == ECONNRESET) {
790 ERR("%s remote end closed connection\n", sock_info->name);
791 goto end;
792 }
793 goto end;
794 default:
795 ERR("incorrect message size (%s socket): %zd\n", sock_info->name, len);
796 continue;
797 }
798
799 }
800 end:
801 goto restart; /* try to reconnect */
802 quit:
803 return NULL;
804 }
805
806 /*
807 * Return values: -1: don't wait. 0: wait forever. 1: timeout wait.
808 */
809 static
810 int get_timeout(struct timespec *constructor_timeout)
811 {
812 long constructor_delay_ms = LTTNG_UST_DEFAULT_CONSTRUCTOR_TIMEOUT_MS;
813 char *str_delay;
814 int ret;
815
816 str_delay = getenv("LTTNG_UST_REGISTER_TIMEOUT");
817 if (str_delay) {
818 constructor_delay_ms = strtol(str_delay, NULL, 10);
819 }
820
821 switch (constructor_delay_ms) {
822 case -1:/* fall-through */
823 case 0:
824 return constructor_delay_ms;
825 default:
826 break;
827 }
828
829 /*
830 * If we are unable to find the current time, don't wait.
831 */
832 ret = clock_gettime(CLOCK_REALTIME, constructor_timeout);
833 if (ret) {
834 return -1;
835 }
836 constructor_timeout->tv_sec += constructor_delay_ms / 1000UL;
837 constructor_timeout->tv_nsec +=
838 (constructor_delay_ms % 1000UL) * 1000000UL;
839 if (constructor_timeout->tv_nsec >= 1000000000UL) {
840 constructor_timeout->tv_sec++;
841 constructor_timeout->tv_nsec -= 1000000000UL;
842 }
843 return 1;
844 }
845
846 /*
847 * sessiond monitoring thread: monitor presence of global and per-user
848 * sessiond by polling the application common named pipe.
849 */
850 /* TODO */
851
852 void __attribute__((constructor)) lttng_ust_init(void)
853 {
854 struct timespec constructor_timeout;
855 sigset_t sig_all_blocked, orig_parent_mask;
856 pthread_attr_t thread_attr;
857 int timeout_mode;
858 int ret;
859
860 if (uatomic_xchg(&initialized, 1) == 1)
861 return;
862
863 /*
864 * Fixup interdependency between TLS fixup mutex (which happens
865 * to be the dynamic linker mutex) and ust_lock, taken within
866 * the ust lock.
867 */
868 lttng_fixup_event_tls();
869 lttng_fixup_ringbuffer_tls();
870 lttng_fixup_vtid_tls();
871 lttng_fixup_nest_count_tls();
872
873 /*
874 * We want precise control over the order in which we construct
875 * our sub-libraries vs starting to receive commands from
876 * sessiond (otherwise leading to errors when trying to create
877 * sessiond before the init functions are completed).
878 */
879 init_usterr();
880 init_tracepoint();
881 ltt_ring_buffer_metadata_client_init();
882 ltt_ring_buffer_client_overwrite_init();
883 ltt_ring_buffer_client_discard_init();
884
885 timeout_mode = get_timeout(&constructor_timeout);
886
887 ret = sem_init(&constructor_wait, 0, 0);
888 assert(!ret);
889
890 ret = setup_local_apps();
891 if (ret) {
892 DBG("local apps setup returned %d", ret);
893 }
894
895 /* A new thread created by pthread_create inherits the signal mask
896 * from the parent. To avoid any signal being received by the
897 * listener thread, we block all signals temporarily in the parent,
898 * while we create the listener thread.
899 */
900 sigfillset(&sig_all_blocked);
901 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_parent_mask);
902 if (ret) {
903 ERR("pthread_sigmask: %s", strerror(ret));
904 }
905
906 ret = pthread_attr_init(&thread_attr);
907 if (ret) {
908 ERR("pthread_attr_init: %s", strerror(ret));
909 }
910 ret = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_DETACHED);
911 if (ret) {
912 ERR("pthread_attr_setdetachstate: %s", strerror(ret));
913 }
914
915 ret = pthread_create(&global_apps.ust_listener, &thread_attr,
916 ust_listener_thread, &global_apps);
917 if (ret) {
918 ERR("pthread_create global: %s", strerror(ret));
919 }
920 if (local_apps.allowed) {
921 ret = pthread_create(&local_apps.ust_listener, &thread_attr,
922 ust_listener_thread, &local_apps);
923 if (ret) {
924 ERR("pthread_create local: %s", strerror(ret));
925 }
926 } else {
927 handle_register_done(&local_apps);
928 }
929 ret = pthread_attr_destroy(&thread_attr);
930 if (ret) {
931 ERR("pthread_attr_destroy: %s", strerror(ret));
932 }
933
934 /* Restore original signal mask in parent */
935 ret = pthread_sigmask(SIG_SETMASK, &orig_parent_mask, NULL);
936 if (ret) {
937 ERR("pthread_sigmask: %s", strerror(ret));
938 }
939
940 switch (timeout_mode) {
941 case 1: /* timeout wait */
942 do {
943 ret = sem_timedwait(&constructor_wait,
944 &constructor_timeout);
945 } while (ret < 0 && errno == EINTR);
946 if (ret < 0 && errno == ETIMEDOUT) {
947 ERR("Timed out waiting for ltt-sessiond");
948 } else {
949 assert(!ret);
950 }
951 break;
952 case -1:/* wait forever */
953 do {
954 ret = sem_wait(&constructor_wait);
955 } while (ret < 0 && errno == EINTR);
956 assert(!ret);
957 break;
958 case 0: /* no timeout */
959 break;
960 }
961 }
962
963 static
964 void lttng_ust_cleanup(int exiting)
965 {
966 cleanup_sock_info(&global_apps, exiting);
967 if (local_apps.allowed) {
968 cleanup_sock_info(&local_apps, exiting);
969 }
970 /*
971 * The teardown in this function all affect data structures
972 * accessed under the UST lock by the listener thread. This
973 * lock, along with the lttng_ust_comm_should_quit flag, ensure
974 * that none of these threads are accessing this data at this
975 * point.
976 */
977 lttng_ust_abi_exit();
978 lttng_ust_events_exit();
979 ltt_ring_buffer_client_discard_exit();
980 ltt_ring_buffer_client_overwrite_exit();
981 ltt_ring_buffer_metadata_client_exit();
982 exit_tracepoint();
983 if (!exiting) {
984 /* Reinitialize values for fork */
985 sem_count = 2;
986 lttng_ust_comm_should_quit = 0;
987 initialized = 0;
988 }
989 }
990
991 void __attribute__((destructor)) lttng_ust_exit(void)
992 {
993 int ret;
994
995 /*
996 * Using pthread_cancel here because:
997 * A) we don't want to hang application teardown.
998 * B) the thread is not allocating any resource.
999 */
1000
1001 /*
1002 * Require the communication thread to quit. Synchronize with
1003 * mutexes to ensure it is not in a mutex critical section when
1004 * pthread_cancel is later called.
1005 */
1006 ust_lock();
1007 lttng_ust_comm_should_quit = 1;
1008 ust_unlock();
1009
1010 /* cancel threads */
1011 ret = pthread_cancel(global_apps.ust_listener);
1012 if (ret) {
1013 ERR("Error cancelling global ust listener thread: %s",
1014 strerror(ret));
1015 }
1016 if (local_apps.allowed) {
1017 ret = pthread_cancel(local_apps.ust_listener);
1018 if (ret) {
1019 ERR("Error cancelling local ust listener thread: %s",
1020 strerror(ret));
1021 }
1022 }
1023 /*
1024 * Do NOT join threads: use of sys_futex makes it impossible to
1025 * join the threads without using async-cancel, but async-cancel
1026 * is delivered by a signal, which could hit the target thread
1027 * anywhere in its code path, including while the ust_lock() is
1028 * held, causing a deadlock for the other thread. Let the OS
1029 * cleanup the threads if there are stalled in a syscall.
1030 */
1031 lttng_ust_cleanup(1);
1032 }
1033
1034 /*
1035 * We exclude the worker threads across fork and clone (except
1036 * CLONE_VM), because these system calls only keep the forking thread
1037 * running in the child. Therefore, we don't want to call fork or clone
1038 * in the middle of an tracepoint or ust tracing state modification.
1039 * Holding this mutex protects these structures across fork and clone.
1040 */
1041 void ust_before_fork(sigset_t *save_sigset)
1042 {
1043 /*
1044 * Disable signals. This is to avoid that the child intervenes
1045 * before it is properly setup for tracing. It is safer to
1046 * disable all signals, because then we know we are not breaking
1047 * anything by restoring the original mask.
1048 */
1049 sigset_t all_sigs;
1050 int ret;
1051
1052 if (lttng_ust_nest_count)
1053 return;
1054 /* Disable signals */
1055 sigfillset(&all_sigs);
1056 ret = sigprocmask(SIG_BLOCK, &all_sigs, save_sigset);
1057 if (ret == -1) {
1058 PERROR("sigprocmask");
1059 }
1060 ust_lock();
1061 rcu_bp_before_fork();
1062 }
1063
1064 static void ust_after_fork_common(sigset_t *restore_sigset)
1065 {
1066 int ret;
1067
1068 DBG("process %d", getpid());
1069 ust_unlock();
1070 /* Restore signals */
1071 ret = sigprocmask(SIG_SETMASK, restore_sigset, NULL);
1072 if (ret == -1) {
1073 PERROR("sigprocmask");
1074 }
1075 }
1076
1077 void ust_after_fork_parent(sigset_t *restore_sigset)
1078 {
1079 if (lttng_ust_nest_count)
1080 return;
1081 DBG("process %d", getpid());
1082 rcu_bp_after_fork_parent();
1083 /* Release mutexes and reenable signals */
1084 ust_after_fork_common(restore_sigset);
1085 }
1086
1087 /*
1088 * After fork, in the child, we need to cleanup all the leftover state,
1089 * except the worker thread which already magically disappeared thanks
1090 * to the weird Linux fork semantics. After tyding up, we call
1091 * lttng_ust_init() again to start over as a new PID.
1092 *
1093 * This is meant for forks() that have tracing in the child between the
1094 * fork and following exec call (if there is any).
1095 */
1096 void ust_after_fork_child(sigset_t *restore_sigset)
1097 {
1098 if (lttng_ust_nest_count)
1099 return;
1100 DBG("process %d", getpid());
1101 /* Release urcu mutexes */
1102 rcu_bp_after_fork_child();
1103 lttng_ust_cleanup(0);
1104 lttng_context_vtid_reset();
1105 /* Release mutexes and reenable signals */
1106 ust_after_fork_common(restore_sigset);
1107 lttng_ust_init();
1108 }
This page took 0.051039 seconds and 5 git commands to generate.