Fix: baddr_statedump tracepoint registration
[lttng-ust.git] / liblttng-ust / lttng-ust-comm.c
1 /*
2 * lttng-ust-comm.c
3 *
4 * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
5 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; only
10 * version 2.1 of the License.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #define _LGPL_SOURCE
23 #include <sys/types.h>
24 #include <sys/socket.h>
25 #include <sys/mman.h>
26 #include <sys/stat.h>
27 #include <sys/types.h>
28 #include <sys/wait.h>
29 #include <fcntl.h>
30 #include <unistd.h>
31 #include <errno.h>
32 #include <pthread.h>
33 #include <semaphore.h>
34 #include <time.h>
35 #include <assert.h>
36 #include <signal.h>
37 #include <dlfcn.h>
38 #include <urcu/uatomic.h>
39 #include <urcu/futex.h>
40 #include <urcu/compiler.h>
41
42 #include <lttng/ust-events.h>
43 #include <lttng/ust-abi.h>
44 #include <lttng/ust.h>
45 #include <lttng/ust-error.h>
46 #include <lttng/ust-ctl.h>
47 #include <urcu/tls-compat.h>
48 #include <ust-comm.h>
49 #include <usterr-signal-safe.h>
50 #include <helper.h>
51 #include "tracepoint-internal.h"
52 #include "lttng-tracer-core.h"
53 #include "compat.h"
54 #include "../libringbuffer/tlsfixup.h"
55 #include "lttng-ust-baddr.h"
56
57 /*
58 * Has lttng ust comm constructor been called ?
59 */
60 static int initialized;
61
62 /*
63 * The ust_lock/ust_unlock lock is used as a communication thread mutex.
64 * Held when handling a command, also held by fork() to deal with
65 * removal of threads, and by exit path.
66 *
67 * The UST lock is the centralized mutex across UST tracing control and
68 * probe registration.
69 *
70 * ust_exit_mutex must never nest in ust_mutex.
71 */
72 static pthread_mutex_t ust_mutex = PTHREAD_MUTEX_INITIALIZER;
73
74 /*
75 * ust_exit_mutex protects thread_active variable wrt thread exit. It
76 * cannot be done by ust_mutex because pthread_cancel(), which takes an
77 * internal libc lock, cannot nest within ust_mutex.
78 *
79 * It never nests within a ust_mutex.
80 */
81 static pthread_mutex_t ust_exit_mutex = PTHREAD_MUTEX_INITIALIZER;
82
83 /* Should the ust comm thread quit ? */
84 static int lttng_ust_comm_should_quit;
85
86 /*
87 * Return 0 on success, -1 if should quilt.
88 * The lock is taken in both cases.
89 */
90 int ust_lock(void)
91 {
92 pthread_mutex_lock(&ust_mutex);
93 if (lttng_ust_comm_should_quit) {
94 return -1;
95 } else {
96 return 0;
97 }
98 }
99
100 /*
101 * ust_lock_nocheck() can be used in constructors/destructors, because
102 * they are already nested within the dynamic loader lock, and therefore
103 * have exclusive access against execution of liblttng-ust destructor.
104 */
105 void ust_lock_nocheck(void)
106 {
107 pthread_mutex_lock(&ust_mutex);
108 }
109
110 void ust_unlock(void)
111 {
112 pthread_mutex_unlock(&ust_mutex);
113 }
114
115 /*
116 * Wait for either of these before continuing to the main
117 * program:
118 * - the register_done message from sessiond daemon
119 * (will let the sessiond daemon enable sessions before main
120 * starts.)
121 * - sessiond daemon is not reachable.
122 * - timeout (ensuring applications are resilient to session
123 * daemon problems).
124 */
125 static sem_t constructor_wait;
126 /*
127 * Doing this for both the global and local sessiond.
128 */
129 static int sem_count = { 2 };
130
131 /*
132 * Counting nesting within lttng-ust. Used to ensure that calling fork()
133 * from liblttng-ust does not execute the pre/post fork handlers.
134 */
135 static DEFINE_URCU_TLS(int, lttng_ust_nest_count);
136
137 /*
138 * Info about socket and associated listener thread.
139 */
140 struct sock_info {
141 const char *name;
142 pthread_t ust_listener; /* listener thread */
143 int root_handle;
144 int constructor_sem_posted;
145 int allowed;
146 int global;
147 int thread_active;
148
149 char sock_path[PATH_MAX];
150 int socket;
151 int notify_socket;
152
153 char wait_shm_path[PATH_MAX];
154 char *wait_shm_mmap;
155 /* Keep track of lazy state dump not performed yet. */
156 int statedump_pending;
157 };
158
159 /* Socket from app (connect) to session daemon (listen) for communication */
160 struct sock_info global_apps = {
161 .name = "global",
162 .global = 1,
163
164 .root_handle = -1,
165 .allowed = 1,
166 .thread_active = 0,
167
168 .sock_path = LTTNG_DEFAULT_RUNDIR "/" LTTNG_UST_SOCK_FILENAME,
169 .socket = -1,
170 .notify_socket = -1,
171
172 .wait_shm_path = "/" LTTNG_UST_WAIT_FILENAME,
173
174 .statedump_pending = 0,
175 };
176
177 /* TODO: allow global_apps_sock_path override */
178
179 struct sock_info local_apps = {
180 .name = "local",
181 .global = 0,
182 .root_handle = -1,
183 .allowed = 0, /* Check setuid bit first */
184 .thread_active = 0,
185
186 .socket = -1,
187 .notify_socket = -1,
188
189 .statedump_pending = 0,
190 };
191
192 static int wait_poll_fallback;
193
194 static const char *cmd_name_mapping[] = {
195 [ LTTNG_UST_RELEASE ] = "Release",
196 [ LTTNG_UST_SESSION ] = "Create Session",
197 [ LTTNG_UST_TRACER_VERSION ] = "Get Tracer Version",
198
199 [ LTTNG_UST_TRACEPOINT_LIST ] = "Create Tracepoint List",
200 [ LTTNG_UST_WAIT_QUIESCENT ] = "Wait for Quiescent State",
201 [ LTTNG_UST_REGISTER_DONE ] = "Registration Done",
202 [ LTTNG_UST_TRACEPOINT_FIELD_LIST ] = "Create Tracepoint Field List",
203
204 /* Session FD commands */
205 [ LTTNG_UST_CHANNEL ] = "Create Channel",
206 [ LTTNG_UST_SESSION_START ] = "Start Session",
207 [ LTTNG_UST_SESSION_STOP ] = "Stop Session",
208
209 /* Channel FD commands */
210 [ LTTNG_UST_STREAM ] = "Create Stream",
211 [ LTTNG_UST_EVENT ] = "Create Event",
212
213 /* Event and Channel FD commands */
214 [ LTTNG_UST_CONTEXT ] = "Create Context",
215 [ LTTNG_UST_FLUSH_BUFFER ] = "Flush Buffer",
216
217 /* Event, Channel and Session commands */
218 [ LTTNG_UST_ENABLE ] = "Enable",
219 [ LTTNG_UST_DISABLE ] = "Disable",
220
221 /* Tracepoint list commands */
222 [ LTTNG_UST_TRACEPOINT_LIST_GET ] = "List Next Tracepoint",
223 [ LTTNG_UST_TRACEPOINT_FIELD_LIST_GET ] = "List Next Tracepoint Field",
224
225 /* Event FD commands */
226 [ LTTNG_UST_FILTER ] = "Create Filter",
227 [ LTTNG_UST_EXCLUSION ] = "Add exclusions to event",
228 };
229
230 static const char *str_timeout;
231 static int got_timeout_env;
232
233 extern void lttng_ring_buffer_client_overwrite_init(void);
234 extern void lttng_ring_buffer_client_overwrite_rt_init(void);
235 extern void lttng_ring_buffer_client_discard_init(void);
236 extern void lttng_ring_buffer_client_discard_rt_init(void);
237 extern void lttng_ring_buffer_metadata_client_init(void);
238 extern void lttng_ring_buffer_client_overwrite_exit(void);
239 extern void lttng_ring_buffer_client_overwrite_rt_exit(void);
240 extern void lttng_ring_buffer_client_discard_exit(void);
241 extern void lttng_ring_buffer_client_discard_rt_exit(void);
242 extern void lttng_ring_buffer_metadata_client_exit(void);
243
244 /*
245 * Returns the HOME directory path. Caller MUST NOT free(3) the returned
246 * pointer.
247 */
248 static
249 const char *get_lttng_home_dir(void)
250 {
251 const char *val;
252
253 val = (const char *) getenv("LTTNG_HOME");
254 if (val != NULL) {
255 return val;
256 }
257 return (const char *) getenv("HOME");
258 }
259
260 /*
261 * Force a read (imply TLS fixup for dlopen) of TLS variables.
262 */
263 static
264 void lttng_fixup_nest_count_tls(void)
265 {
266 asm volatile ("" : : "m" (URCU_TLS(lttng_ust_nest_count)));
267 }
268
269 int lttng_get_notify_socket(void *owner)
270 {
271 struct sock_info *info = owner;
272
273 return info->notify_socket;
274 }
275
276 static
277 void print_cmd(int cmd, int handle)
278 {
279 const char *cmd_name = "Unknown";
280
281 if (cmd >= 0 && cmd < LTTNG_ARRAY_SIZE(cmd_name_mapping)
282 && cmd_name_mapping[cmd]) {
283 cmd_name = cmd_name_mapping[cmd];
284 }
285 DBG("Message Received \"%s\" (%d), Handle \"%s\" (%d)",
286 cmd_name, cmd,
287 lttng_ust_obj_get_name(handle), handle);
288 }
289
290 static
291 int setup_local_apps(void)
292 {
293 const char *home_dir;
294 uid_t uid;
295
296 uid = getuid();
297 /*
298 * Disallow per-user tracing for setuid binaries.
299 */
300 if (uid != geteuid()) {
301 assert(local_apps.allowed == 0);
302 return 0;
303 }
304 home_dir = get_lttng_home_dir();
305 if (!home_dir) {
306 WARN("HOME environment variable not set. Disabling LTTng-UST per-user tracing.");
307 assert(local_apps.allowed == 0);
308 return -ENOENT;
309 }
310 local_apps.allowed = 1;
311 snprintf(local_apps.sock_path, PATH_MAX, "%s/%s/%s",
312 home_dir,
313 LTTNG_DEFAULT_HOME_RUNDIR,
314 LTTNG_UST_SOCK_FILENAME);
315 snprintf(local_apps.wait_shm_path, PATH_MAX, "/%s-%u",
316 LTTNG_UST_WAIT_FILENAME,
317 uid);
318 return 0;
319 }
320
321 /*
322 * Get notify_sock timeout, in ms.
323 * -1: don't wait. 0: wait forever. >0: timeout, in ms.
324 */
325 static
326 long get_timeout(void)
327 {
328 long constructor_delay_ms = LTTNG_UST_DEFAULT_CONSTRUCTOR_TIMEOUT_MS;
329
330 if (!got_timeout_env) {
331 str_timeout = getenv("LTTNG_UST_REGISTER_TIMEOUT");
332 got_timeout_env = 1;
333 }
334 if (str_timeout)
335 constructor_delay_ms = strtol(str_timeout, NULL, 10);
336 return constructor_delay_ms;
337 }
338
339 static
340 long get_notify_sock_timeout(void)
341 {
342 return get_timeout();
343 }
344
345 /*
346 * Return values: -1: don't wait. 0: wait forever. 1: timeout wait.
347 */
348 static
349 int get_constructor_timeout(struct timespec *constructor_timeout)
350 {
351 long constructor_delay_ms;
352 int ret;
353
354 constructor_delay_ms = get_timeout();
355
356 switch (constructor_delay_ms) {
357 case -1:/* fall-through */
358 case 0:
359 return constructor_delay_ms;
360 default:
361 break;
362 }
363
364 /*
365 * If we are unable to find the current time, don't wait.
366 */
367 ret = clock_gettime(CLOCK_REALTIME, constructor_timeout);
368 if (ret) {
369 return -1;
370 }
371 constructor_timeout->tv_sec += constructor_delay_ms / 1000UL;
372 constructor_timeout->tv_nsec +=
373 (constructor_delay_ms % 1000UL) * 1000000UL;
374 if (constructor_timeout->tv_nsec >= 1000000000UL) {
375 constructor_timeout->tv_sec++;
376 constructor_timeout->tv_nsec -= 1000000000UL;
377 }
378 return 1;
379 }
380
381 static
382 int register_to_sessiond(int socket, enum ustctl_socket_type type)
383 {
384 return ustcomm_send_reg_msg(socket,
385 type,
386 CAA_BITS_PER_LONG,
387 lttng_alignof(uint8_t) * CHAR_BIT,
388 lttng_alignof(uint16_t) * CHAR_BIT,
389 lttng_alignof(uint32_t) * CHAR_BIT,
390 lttng_alignof(uint64_t) * CHAR_BIT,
391 lttng_alignof(unsigned long) * CHAR_BIT);
392 }
393
394 static
395 int send_reply(int sock, struct ustcomm_ust_reply *lur)
396 {
397 ssize_t len;
398
399 len = ustcomm_send_unix_sock(sock, lur, sizeof(*lur));
400 switch (len) {
401 case sizeof(*lur):
402 DBG("message successfully sent");
403 return 0;
404 default:
405 if (len == -ECONNRESET) {
406 DBG("remote end closed connection");
407 return 0;
408 }
409 if (len < 0)
410 return len;
411 DBG("incorrect message size: %zd", len);
412 return -EINVAL;
413 }
414 }
415
416 static
417 int handle_register_done(struct sock_info *sock_info)
418 {
419 int ret;
420
421 if (sock_info->constructor_sem_posted)
422 return 0;
423 sock_info->constructor_sem_posted = 1;
424 if (uatomic_read(&sem_count) <= 0) {
425 return 0;
426 }
427 ret = uatomic_add_return(&sem_count, -1);
428 if (ret == 0) {
429 ret = sem_post(&constructor_wait);
430 assert(!ret);
431 }
432 return 0;
433 }
434
435 /*
436 * Only execute pending statedump after the constructor semaphore has
437 * been posted by each listener thread. This means statedump will only
438 * be performed after the "registration done" command is received from
439 * each session daemon the application is connected to.
440 *
441 * This ensures we don't run into deadlock issues with the dynamic
442 * loader mutex, which is held while the constructor is called and
443 * waiting on the constructor semaphore. All operations requiring this
444 * dynamic loader lock need to be postponed using this mechanism.
445 */
446 static
447 void handle_pending_statedump(struct sock_info *sock_info)
448 {
449 int ctor_passed = sock_info->constructor_sem_posted;
450
451 if (ctor_passed && sock_info->statedump_pending) {
452 sock_info->statedump_pending = 0;
453 lttng_handle_pending_statedump(sock_info);
454 }
455 }
456
457 static
458 int handle_message(struct sock_info *sock_info,
459 int sock, struct ustcomm_ust_msg *lum)
460 {
461 int ret = 0;
462 const struct lttng_ust_objd_ops *ops;
463 struct ustcomm_ust_reply lur;
464 union ust_args args;
465 ssize_t len;
466
467 memset(&lur, 0, sizeof(lur));
468
469 if (ust_lock()) {
470 ret = -LTTNG_UST_ERR_EXITING;
471 goto end;
472 }
473
474 ops = objd_ops(lum->handle);
475 if (!ops) {
476 ret = -ENOENT;
477 goto end;
478 }
479
480 switch (lum->cmd) {
481 case LTTNG_UST_REGISTER_DONE:
482 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
483 ret = handle_register_done(sock_info);
484 else
485 ret = -EINVAL;
486 break;
487 case LTTNG_UST_RELEASE:
488 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
489 ret = -EPERM;
490 else
491 ret = lttng_ust_objd_unref(lum->handle, 1);
492 break;
493 case LTTNG_UST_FILTER:
494 {
495 /* Receive filter data */
496 struct lttng_ust_filter_bytecode_node *bytecode;
497
498 if (lum->u.filter.data_size > FILTER_BYTECODE_MAX_LEN) {
499 ERR("Filter data size is too large: %u bytes",
500 lum->u.filter.data_size);
501 ret = -EINVAL;
502 goto error;
503 }
504
505 if (lum->u.filter.reloc_offset > lum->u.filter.data_size) {
506 ERR("Filter reloc offset %u is not within data",
507 lum->u.filter.reloc_offset);
508 ret = -EINVAL;
509 goto error;
510 }
511
512 bytecode = zmalloc(sizeof(*bytecode) + lum->u.filter.data_size);
513 if (!bytecode) {
514 ret = -ENOMEM;
515 goto error;
516 }
517 len = ustcomm_recv_unix_sock(sock, bytecode->bc.data,
518 lum->u.filter.data_size);
519 switch (len) {
520 case 0: /* orderly shutdown */
521 ret = 0;
522 free(bytecode);
523 goto error;
524 default:
525 if (len == lum->u.filter.data_size) {
526 DBG("filter data received");
527 break;
528 } else if (len < 0) {
529 DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
530 if (len == -ECONNRESET) {
531 ERR("%s remote end closed connection", sock_info->name);
532 ret = len;
533 free(bytecode);
534 goto error;
535 }
536 ret = len;
537 free(bytecode);
538 goto end;
539 } else {
540 DBG("incorrect filter data message size: %zd", len);
541 ret = -EINVAL;
542 free(bytecode);
543 goto end;
544 }
545 }
546 bytecode->bc.len = lum->u.filter.data_size;
547 bytecode->bc.reloc_offset = lum->u.filter.reloc_offset;
548 bytecode->bc.seqnum = lum->u.filter.seqnum;
549 if (ops->cmd) {
550 ret = ops->cmd(lum->handle, lum->cmd,
551 (unsigned long) bytecode,
552 &args, sock_info);
553 if (ret) {
554 free(bytecode);
555 }
556 /* don't free bytecode if everything went fine. */
557 } else {
558 ret = -ENOSYS;
559 free(bytecode);
560 }
561 break;
562 }
563 case LTTNG_UST_EXCLUSION:
564 {
565 /* Receive exclusion names */
566 struct lttng_ust_excluder_node *node;
567 unsigned int count;
568
569 count = lum->u.exclusion.count;
570 if (count == 0) {
571 /* There are no names to read */
572 ret = 0;
573 goto error;
574 }
575 node = zmalloc(sizeof(*node) +
576 count * LTTNG_UST_SYM_NAME_LEN);
577 if (!node) {
578 ret = -ENOMEM;
579 goto error;
580 }
581 node->excluder.count = count;
582 len = ustcomm_recv_unix_sock(sock, node->excluder.names,
583 count * LTTNG_UST_SYM_NAME_LEN);
584 switch (len) {
585 case 0: /* orderly shutdown */
586 ret = 0;
587 free(node);
588 goto error;
589 default:
590 if (len == count * LTTNG_UST_SYM_NAME_LEN) {
591 DBG("Exclusion data received");
592 break;
593 } else if (len < 0) {
594 DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
595 if (len == -ECONNRESET) {
596 ERR("%s remote end closed connection", sock_info->name);
597 ret = len;
598 free(node);
599 goto error;
600 }
601 ret = len;
602 free(node);
603 goto end;
604 } else {
605 DBG("Incorrect exclusion data message size: %zd", len);
606 ret = -EINVAL;
607 free(node);
608 goto end;
609 }
610 }
611 if (ops->cmd) {
612 ret = ops->cmd(lum->handle, lum->cmd,
613 (unsigned long) node,
614 &args, sock_info);
615 if (ret) {
616 free(node);
617 }
618 /* Don't free exclusion data if everything went fine. */
619 } else {
620 ret = -ENOSYS;
621 free(node);
622 }
623 break;
624 }
625 case LTTNG_UST_CHANNEL:
626 {
627 void *chan_data;
628 int wakeup_fd;
629
630 len = ustcomm_recv_channel_from_sessiond(sock,
631 &chan_data, lum->u.channel.len,
632 &wakeup_fd);
633 switch (len) {
634 case 0: /* orderly shutdown */
635 ret = 0;
636 goto error;
637 default:
638 if (len == lum->u.channel.len) {
639 DBG("channel data received");
640 break;
641 } else if (len < 0) {
642 DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
643 if (len == -ECONNRESET) {
644 ERR("%s remote end closed connection", sock_info->name);
645 ret = len;
646 goto error;
647 }
648 ret = len;
649 goto end;
650 } else {
651 DBG("incorrect channel data message size: %zd", len);
652 ret = -EINVAL;
653 goto end;
654 }
655 }
656 args.channel.chan_data = chan_data;
657 args.channel.wakeup_fd = wakeup_fd;
658 if (ops->cmd)
659 ret = ops->cmd(lum->handle, lum->cmd,
660 (unsigned long) &lum->u,
661 &args, sock_info);
662 else
663 ret = -ENOSYS;
664 break;
665 }
666 case LTTNG_UST_STREAM:
667 {
668 /* Receive shm_fd, wakeup_fd */
669 ret = ustcomm_recv_stream_from_sessiond(sock,
670 &lum->u.stream.len,
671 &args.stream.shm_fd,
672 &args.stream.wakeup_fd);
673 if (ret) {
674 goto end;
675 }
676 if (ops->cmd)
677 ret = ops->cmd(lum->handle, lum->cmd,
678 (unsigned long) &lum->u,
679 &args, sock_info);
680 else
681 ret = -ENOSYS;
682 break;
683 }
684 default:
685 if (ops->cmd)
686 ret = ops->cmd(lum->handle, lum->cmd,
687 (unsigned long) &lum->u,
688 &args, sock_info);
689 else
690 ret = -ENOSYS;
691 break;
692 }
693
694 end:
695 lur.handle = lum->handle;
696 lur.cmd = lum->cmd;
697 lur.ret_val = ret;
698 if (ret >= 0) {
699 lur.ret_code = LTTNG_UST_OK;
700 } else {
701 /*
702 * Use -LTTNG_UST_ERR as wildcard for UST internal
703 * error that are not caused by the transport, except if
704 * we already have a more precise error message to
705 * report.
706 */
707 if (ret > -LTTNG_UST_ERR) {
708 /* Translate code to UST error. */
709 switch (ret) {
710 case -EEXIST:
711 lur.ret_code = -LTTNG_UST_ERR_EXIST;
712 break;
713 case -EINVAL:
714 lur.ret_code = -LTTNG_UST_ERR_INVAL;
715 break;
716 case -ENOENT:
717 lur.ret_code = -LTTNG_UST_ERR_NOENT;
718 break;
719 case -EPERM:
720 lur.ret_code = -LTTNG_UST_ERR_PERM;
721 break;
722 case -ENOSYS:
723 lur.ret_code = -LTTNG_UST_ERR_NOSYS;
724 break;
725 default:
726 lur.ret_code = -LTTNG_UST_ERR;
727 break;
728 }
729 } else {
730 lur.ret_code = ret;
731 }
732 }
733 if (ret >= 0) {
734 switch (lum->cmd) {
735 case LTTNG_UST_TRACER_VERSION:
736 lur.u.version = lum->u.version;
737 break;
738 case LTTNG_UST_TRACEPOINT_LIST_GET:
739 memcpy(&lur.u.tracepoint, &lum->u.tracepoint, sizeof(lur.u.tracepoint));
740 break;
741 }
742 }
743 DBG("Return value: %d", lur.ret_val);
744 ret = send_reply(sock, &lur);
745 if (ret < 0) {
746 DBG("error sending reply");
747 goto error;
748 }
749
750 /*
751 * LTTNG_UST_TRACEPOINT_FIELD_LIST_GET needs to send the field
752 * after the reply.
753 */
754 if (lur.ret_code == LTTNG_UST_OK) {
755 switch (lum->cmd) {
756 case LTTNG_UST_TRACEPOINT_FIELD_LIST_GET:
757 len = ustcomm_send_unix_sock(sock,
758 &args.field_list.entry,
759 sizeof(args.field_list.entry));
760 if (len < 0) {
761 ret = len;
762 goto error;
763 }
764 if (len != sizeof(args.field_list.entry)) {
765 ret = -EINVAL;
766 goto error;
767 }
768 }
769 }
770
771 error:
772 ust_unlock();
773
774 /*
775 * Performed delayed statedump operations outside of the UST
776 * lock. We need to take the dynamic loader lock before we take
777 * the UST lock internally within handle_pending_statedump().
778 */
779 handle_pending_statedump(sock_info);
780
781 return ret;
782 }
783
784 static
785 void cleanup_sock_info(struct sock_info *sock_info, int exiting)
786 {
787 int ret;
788
789 if (sock_info->root_handle != -1) {
790 ret = lttng_ust_objd_unref(sock_info->root_handle, 1);
791 if (ret) {
792 ERR("Error unref root handle");
793 }
794 sock_info->root_handle = -1;
795 }
796 sock_info->constructor_sem_posted = 0;
797
798 /*
799 * wait_shm_mmap, socket and notify socket are used by listener
800 * threads outside of the ust lock, so we cannot tear them down
801 * ourselves, because we cannot join on these threads. Leave
802 * responsibility of cleaning up these resources to the OS
803 * process exit.
804 */
805 if (exiting)
806 return;
807
808 if (sock_info->socket != -1) {
809 ret = ustcomm_close_unix_sock(sock_info->socket);
810 if (ret) {
811 ERR("Error closing ust cmd socket");
812 }
813 sock_info->socket = -1;
814 }
815 if (sock_info->notify_socket != -1) {
816 ret = ustcomm_close_unix_sock(sock_info->notify_socket);
817 if (ret) {
818 ERR("Error closing ust notify socket");
819 }
820 sock_info->notify_socket = -1;
821 }
822 if (sock_info->wait_shm_mmap) {
823 ret = munmap(sock_info->wait_shm_mmap, sysconf(_SC_PAGE_SIZE));
824 if (ret) {
825 ERR("Error unmapping wait shm");
826 }
827 sock_info->wait_shm_mmap = NULL;
828 }
829 }
830
831 /*
832 * Using fork to set umask in the child process (not multi-thread safe).
833 * We deal with the shm_open vs ftruncate race (happening when the
834 * sessiond owns the shm and does not let everybody modify it, to ensure
835 * safety against shm_unlink) by simply letting the mmap fail and
836 * retrying after a few seconds.
837 * For global shm, everybody has rw access to it until the sessiond
838 * starts.
839 */
840 static
841 int get_wait_shm(struct sock_info *sock_info, size_t mmap_size)
842 {
843 int wait_shm_fd, ret;
844 pid_t pid;
845
846 /*
847 * Try to open read-only.
848 */
849 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
850 if (wait_shm_fd >= 0) {
851 int32_t tmp_read;
852 ssize_t len;
853 size_t bytes_read = 0;
854
855 /*
856 * Try to read the fd. If unable to do so, try opening
857 * it in write mode.
858 */
859 do {
860 len = read(wait_shm_fd,
861 &((char *) &tmp_read)[bytes_read],
862 sizeof(tmp_read) - bytes_read);
863 if (len > 0) {
864 bytes_read += len;
865 }
866 } while ((len < 0 && errno == EINTR)
867 || (len > 0 && bytes_read < sizeof(tmp_read)));
868 if (bytes_read != sizeof(tmp_read)) {
869 ret = close(wait_shm_fd);
870 if (ret) {
871 ERR("close wait_shm_fd");
872 }
873 goto open_write;
874 }
875 goto end;
876 } else if (wait_shm_fd < 0 && errno != ENOENT) {
877 /*
878 * Real-only open did not work, and it's not because the
879 * entry was not present. It's a failure that prohibits
880 * using shm.
881 */
882 ERR("Error opening shm %s", sock_info->wait_shm_path);
883 goto end;
884 }
885
886 open_write:
887 /*
888 * If the open failed because the file did not exist, or because
889 * the file was not truncated yet, try creating it ourself.
890 */
891 URCU_TLS(lttng_ust_nest_count)++;
892 pid = fork();
893 URCU_TLS(lttng_ust_nest_count)--;
894 if (pid > 0) {
895 int status;
896
897 /*
898 * Parent: wait for child to return, in which case the
899 * shared memory map will have been created.
900 */
901 pid = wait(&status);
902 if (pid < 0 || !WIFEXITED(status) || WEXITSTATUS(status) != 0) {
903 wait_shm_fd = -1;
904 goto end;
905 }
906 /*
907 * Try to open read-only again after creation.
908 */
909 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
910 if (wait_shm_fd < 0) {
911 /*
912 * Real-only open did not work. It's a failure
913 * that prohibits using shm.
914 */
915 ERR("Error opening shm %s", sock_info->wait_shm_path);
916 goto end;
917 }
918 goto end;
919 } else if (pid == 0) {
920 int create_mode;
921
922 /* Child */
923 create_mode = S_IRUSR | S_IWUSR | S_IRGRP;
924 if (sock_info->global)
925 create_mode |= S_IROTH | S_IWGRP | S_IWOTH;
926 /*
927 * We're alone in a child process, so we can modify the
928 * process-wide umask.
929 */
930 umask(~create_mode);
931 /*
932 * Try creating shm (or get rw access).
933 * We don't do an exclusive open, because we allow other
934 * processes to create+ftruncate it concurrently.
935 */
936 wait_shm_fd = shm_open(sock_info->wait_shm_path,
937 O_RDWR | O_CREAT, create_mode);
938 if (wait_shm_fd >= 0) {
939 ret = ftruncate(wait_shm_fd, mmap_size);
940 if (ret) {
941 PERROR("ftruncate");
942 _exit(EXIT_FAILURE);
943 }
944 _exit(EXIT_SUCCESS);
945 }
946 /*
947 * For local shm, we need to have rw access to accept
948 * opening it: this means the local sessiond will be
949 * able to wake us up. For global shm, we open it even
950 * if rw access is not granted, because the root.root
951 * sessiond will be able to override all rights and wake
952 * us up.
953 */
954 if (!sock_info->global && errno != EACCES) {
955 ERR("Error opening shm %s", sock_info->wait_shm_path);
956 _exit(EXIT_FAILURE);
957 }
958 /*
959 * The shm exists, but we cannot open it RW. Report
960 * success.
961 */
962 _exit(EXIT_SUCCESS);
963 } else {
964 return -1;
965 }
966 end:
967 if (wait_shm_fd >= 0 && !sock_info->global) {
968 struct stat statbuf;
969
970 /*
971 * Ensure that our user is the owner of the shm file for
972 * local shm. If we do not own the file, it means our
973 * sessiond will not have access to wake us up (there is
974 * probably a rogue process trying to fake our
975 * sessiond). Fallback to polling method in this case.
976 */
977 ret = fstat(wait_shm_fd, &statbuf);
978 if (ret) {
979 PERROR("fstat");
980 goto error_close;
981 }
982 if (statbuf.st_uid != getuid())
983 goto error_close;
984 }
985 return wait_shm_fd;
986
987 error_close:
988 ret = close(wait_shm_fd);
989 if (ret) {
990 PERROR("Error closing fd");
991 }
992 return -1;
993 }
994
995 static
996 char *get_map_shm(struct sock_info *sock_info)
997 {
998 size_t mmap_size = sysconf(_SC_PAGE_SIZE);
999 int wait_shm_fd, ret;
1000 char *wait_shm_mmap;
1001
1002 wait_shm_fd = get_wait_shm(sock_info, mmap_size);
1003 if (wait_shm_fd < 0) {
1004 goto error;
1005 }
1006 wait_shm_mmap = mmap(NULL, mmap_size, PROT_READ,
1007 MAP_SHARED, wait_shm_fd, 0);
1008 /* close shm fd immediately after taking the mmap reference */
1009 ret = close(wait_shm_fd);
1010 if (ret) {
1011 PERROR("Error closing fd");
1012 }
1013 if (wait_shm_mmap == MAP_FAILED) {
1014 DBG("mmap error (can be caused by race with sessiond). Fallback to poll mode.");
1015 goto error;
1016 }
1017 return wait_shm_mmap;
1018
1019 error:
1020 return NULL;
1021 }
1022
1023 static
1024 void wait_for_sessiond(struct sock_info *sock_info)
1025 {
1026 int ret;
1027
1028 if (ust_lock()) {
1029 goto quit;
1030 }
1031 if (wait_poll_fallback) {
1032 goto error;
1033 }
1034 if (!sock_info->wait_shm_mmap) {
1035 sock_info->wait_shm_mmap = get_map_shm(sock_info);
1036 if (!sock_info->wait_shm_mmap)
1037 goto error;
1038 }
1039 ust_unlock();
1040
1041 DBG("Waiting for %s apps sessiond", sock_info->name);
1042 /* Wait for futex wakeup */
1043 if (uatomic_read((int32_t *) sock_info->wait_shm_mmap) == 0) {
1044 ret = futex_async((int32_t *) sock_info->wait_shm_mmap,
1045 FUTEX_WAIT, 0, NULL, NULL, 0);
1046 if (ret < 0) {
1047 if (errno == EFAULT) {
1048 wait_poll_fallback = 1;
1049 DBG(
1050 "Linux kernels 2.6.33 to 3.0 (with the exception of stable versions) "
1051 "do not support FUTEX_WAKE on read-only memory mappings correctly. "
1052 "Please upgrade your kernel "
1053 "(fix is commit 9ea71503a8ed9184d2d0b8ccc4d269d05f7940ae in Linux kernel "
1054 "mainline). LTTng-UST will use polling mode fallback.");
1055 if (ust_debug())
1056 PERROR("futex");
1057 }
1058 }
1059 }
1060 return;
1061
1062 quit:
1063 ust_unlock();
1064 return;
1065
1066 error:
1067 ust_unlock();
1068 return;
1069 }
1070
1071 /*
1072 * This thread does not allocate any resource, except within
1073 * handle_message, within mutex protection. This mutex protects against
1074 * fork and exit.
1075 * The other moment it allocates resources is at socket connection, which
1076 * is also protected by the mutex.
1077 */
1078 static
1079 void *ust_listener_thread(void *arg)
1080 {
1081 struct sock_info *sock_info = arg;
1082 int sock, ret, prev_connect_failed = 0, has_waited = 0;
1083 long timeout;
1084
1085 /* Restart trying to connect to the session daemon */
1086 restart:
1087 if (prev_connect_failed) {
1088 /* Wait for sessiond availability with pipe */
1089 wait_for_sessiond(sock_info);
1090 if (has_waited) {
1091 has_waited = 0;
1092 /*
1093 * Sleep for 5 seconds before retrying after a
1094 * sequence of failure / wait / failure. This
1095 * deals with a killed or broken session daemon.
1096 */
1097 sleep(5);
1098 }
1099 has_waited = 1;
1100 prev_connect_failed = 0;
1101 }
1102
1103 if (sock_info->socket != -1) {
1104 ret = ustcomm_close_unix_sock(sock_info->socket);
1105 if (ret) {
1106 ERR("Error closing %s ust cmd socket",
1107 sock_info->name);
1108 }
1109 sock_info->socket = -1;
1110 }
1111 if (sock_info->notify_socket != -1) {
1112 ret = ustcomm_close_unix_sock(sock_info->notify_socket);
1113 if (ret) {
1114 ERR("Error closing %s ust notify socket",
1115 sock_info->name);
1116 }
1117 sock_info->notify_socket = -1;
1118 }
1119
1120 /*
1121 * Register. We need to perform both connect and sending
1122 * registration message before doing the next connect otherwise
1123 * we may reach unix socket connect queue max limits and block
1124 * on the 2nd connect while the session daemon is awaiting the
1125 * first connect registration message.
1126 */
1127 /* Connect cmd socket */
1128 ret = ustcomm_connect_unix_sock(sock_info->sock_path);
1129 if (ret < 0) {
1130 DBG("Info: sessiond not accepting connections to %s apps socket", sock_info->name);
1131 prev_connect_failed = 1;
1132
1133 if (ust_lock()) {
1134 goto quit;
1135 }
1136
1137 /*
1138 * If we cannot find the sessiond daemon, don't delay
1139 * constructor execution.
1140 */
1141 ret = handle_register_done(sock_info);
1142 assert(!ret);
1143 ust_unlock();
1144 goto restart;
1145 }
1146 sock_info->socket = ret;
1147
1148 if (ust_lock()) {
1149 goto quit;
1150 }
1151
1152 /*
1153 * Create only one root handle per listener thread for the whole
1154 * process lifetime, so we ensure we get ID which is statically
1155 * assigned to the root handle.
1156 */
1157 if (sock_info->root_handle == -1) {
1158 ret = lttng_abi_create_root_handle();
1159 if (ret < 0) {
1160 ERR("Error creating root handle");
1161 goto quit;
1162 }
1163 sock_info->root_handle = ret;
1164 }
1165
1166 ret = register_to_sessiond(sock_info->socket, USTCTL_SOCKET_CMD);
1167 if (ret < 0) {
1168 ERR("Error registering to %s ust cmd socket",
1169 sock_info->name);
1170 prev_connect_failed = 1;
1171 /*
1172 * If we cannot register to the sessiond daemon, don't
1173 * delay constructor execution.
1174 */
1175 ret = handle_register_done(sock_info);
1176 assert(!ret);
1177 ust_unlock();
1178 goto restart;
1179 }
1180
1181 ust_unlock();
1182
1183 /* Connect notify socket */
1184 ret = ustcomm_connect_unix_sock(sock_info->sock_path);
1185 if (ret < 0) {
1186 DBG("Info: sessiond not accepting connections to %s apps socket", sock_info->name);
1187 prev_connect_failed = 1;
1188
1189 if (ust_lock()) {
1190 goto quit;
1191 }
1192
1193 /*
1194 * If we cannot find the sessiond daemon, don't delay
1195 * constructor execution.
1196 */
1197 ret = handle_register_done(sock_info);
1198 assert(!ret);
1199 ust_unlock();
1200 goto restart;
1201 }
1202 sock_info->notify_socket = ret;
1203
1204 timeout = get_notify_sock_timeout();
1205 if (timeout >= 0) {
1206 /*
1207 * Give at least 10ms to sessiond to reply to
1208 * notifications.
1209 */
1210 if (timeout < 10)
1211 timeout = 10;
1212 ret = ustcomm_setsockopt_rcv_timeout(sock_info->notify_socket,
1213 timeout);
1214 if (ret < 0) {
1215 WARN("Error setting socket receive timeout");
1216 }
1217 ret = ustcomm_setsockopt_snd_timeout(sock_info->notify_socket,
1218 timeout);
1219 if (ret < 0) {
1220 WARN("Error setting socket send timeout");
1221 }
1222 } else if (timeout < -1) {
1223 WARN("Unsupported timeout value %ld", timeout);
1224 }
1225
1226 if (ust_lock()) {
1227 goto quit;
1228 }
1229
1230 ret = register_to_sessiond(sock_info->notify_socket,
1231 USTCTL_SOCKET_NOTIFY);
1232 if (ret < 0) {
1233 ERR("Error registering to %s ust notify socket",
1234 sock_info->name);
1235 prev_connect_failed = 1;
1236 /*
1237 * If we cannot register to the sessiond daemon, don't
1238 * delay constructor execution.
1239 */
1240 ret = handle_register_done(sock_info);
1241 assert(!ret);
1242 ust_unlock();
1243 goto restart;
1244 }
1245 sock = sock_info->socket;
1246
1247 ust_unlock();
1248
1249 for (;;) {
1250 ssize_t len;
1251 struct ustcomm_ust_msg lum;
1252
1253 len = ustcomm_recv_unix_sock(sock, &lum, sizeof(lum));
1254 switch (len) {
1255 case 0: /* orderly shutdown */
1256 DBG("%s lttng-sessiond has performed an orderly shutdown", sock_info->name);
1257 if (ust_lock()) {
1258 goto quit;
1259 }
1260 /*
1261 * Either sessiond has shutdown or refused us by closing the socket.
1262 * In either case, we don't want to delay construction execution,
1263 * and we need to wait before retry.
1264 */
1265 prev_connect_failed = 1;
1266 /*
1267 * If we cannot register to the sessiond daemon, don't
1268 * delay constructor execution.
1269 */
1270 ret = handle_register_done(sock_info);
1271 assert(!ret);
1272 ust_unlock();
1273 goto end;
1274 case sizeof(lum):
1275 print_cmd(lum.cmd, lum.handle);
1276 ret = handle_message(sock_info, sock, &lum);
1277 if (ret) {
1278 ERR("Error handling message for %s socket", sock_info->name);
1279 }
1280 continue;
1281 default:
1282 if (len < 0) {
1283 DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
1284 } else {
1285 DBG("incorrect message size (%s socket): %zd", sock_info->name, len);
1286 }
1287 if (len == -ECONNRESET) {
1288 DBG("%s remote end closed connection", sock_info->name);
1289 goto end;
1290 }
1291 goto end;
1292 }
1293
1294 }
1295 end:
1296 if (ust_lock()) {
1297 goto quit;
1298 }
1299 /* Cleanup socket handles before trying to reconnect */
1300 lttng_ust_objd_table_owner_cleanup(sock_info);
1301 ust_unlock();
1302 goto restart; /* try to reconnect */
1303
1304 quit:
1305 ust_unlock();
1306
1307 pthread_mutex_lock(&ust_exit_mutex);
1308 sock_info->thread_active = 0;
1309 pthread_mutex_unlock(&ust_exit_mutex);
1310 return NULL;
1311 }
1312
1313 /*
1314 * sessiond monitoring thread: monitor presence of global and per-user
1315 * sessiond by polling the application common named pipe.
1316 */
1317 void __attribute__((constructor)) lttng_ust_init(void)
1318 {
1319 struct timespec constructor_timeout;
1320 sigset_t sig_all_blocked, orig_parent_mask;
1321 pthread_attr_t thread_attr;
1322 int timeout_mode;
1323 int ret;
1324
1325 if (uatomic_xchg(&initialized, 1) == 1)
1326 return;
1327
1328 /*
1329 * Fixup interdependency between TLS fixup mutex (which happens
1330 * to be the dynamic linker mutex) and ust_lock, taken within
1331 * the ust lock.
1332 */
1333 lttng_fixup_ringbuffer_tls();
1334 lttng_fixup_vtid_tls();
1335 lttng_fixup_nest_count_tls();
1336 lttng_fixup_procname_tls();
1337
1338 /*
1339 * We want precise control over the order in which we construct
1340 * our sub-libraries vs starting to receive commands from
1341 * sessiond (otherwise leading to errors when trying to create
1342 * sessiond before the init functions are completed).
1343 */
1344 init_usterr();
1345 init_tracepoint();
1346 lttng_ust_baddr_statedump_init();
1347 lttng_ring_buffer_metadata_client_init();
1348 lttng_ring_buffer_client_overwrite_init();
1349 lttng_ring_buffer_client_overwrite_rt_init();
1350 lttng_ring_buffer_client_discard_init();
1351 lttng_ring_buffer_client_discard_rt_init();
1352 lttng_context_init();
1353
1354 timeout_mode = get_constructor_timeout(&constructor_timeout);
1355
1356 ret = sem_init(&constructor_wait, 0, 0);
1357 assert(!ret);
1358
1359 ret = setup_local_apps();
1360 if (ret) {
1361 DBG("local apps setup returned %d", ret);
1362 }
1363
1364 /* A new thread created by pthread_create inherits the signal mask
1365 * from the parent. To avoid any signal being received by the
1366 * listener thread, we block all signals temporarily in the parent,
1367 * while we create the listener thread.
1368 */
1369 sigfillset(&sig_all_blocked);
1370 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_parent_mask);
1371 if (ret) {
1372 ERR("pthread_sigmask: %s", strerror(ret));
1373 }
1374
1375 ret = pthread_attr_init(&thread_attr);
1376 if (ret) {
1377 ERR("pthread_attr_init: %s", strerror(ret));
1378 }
1379 ret = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_DETACHED);
1380 if (ret) {
1381 ERR("pthread_attr_setdetachstate: %s", strerror(ret));
1382 }
1383
1384 pthread_mutex_lock(&ust_exit_mutex);
1385 ret = pthread_create(&global_apps.ust_listener, &thread_attr,
1386 ust_listener_thread, &global_apps);
1387 if (ret) {
1388 ERR("pthread_create global: %s", strerror(ret));
1389 }
1390 global_apps.thread_active = 1;
1391 pthread_mutex_unlock(&ust_exit_mutex);
1392
1393 if (local_apps.allowed) {
1394 pthread_mutex_lock(&ust_exit_mutex);
1395 ret = pthread_create(&local_apps.ust_listener, &thread_attr,
1396 ust_listener_thread, &local_apps);
1397 if (ret) {
1398 ERR("pthread_create local: %s", strerror(ret));
1399 }
1400 local_apps.thread_active = 1;
1401 pthread_mutex_unlock(&ust_exit_mutex);
1402 } else {
1403 handle_register_done(&local_apps);
1404 }
1405 ret = pthread_attr_destroy(&thread_attr);
1406 if (ret) {
1407 ERR("pthread_attr_destroy: %s", strerror(ret));
1408 }
1409
1410 /* Restore original signal mask in parent */
1411 ret = pthread_sigmask(SIG_SETMASK, &orig_parent_mask, NULL);
1412 if (ret) {
1413 ERR("pthread_sigmask: %s", strerror(ret));
1414 }
1415
1416 switch (timeout_mode) {
1417 case 1: /* timeout wait */
1418 do {
1419 ret = sem_timedwait(&constructor_wait,
1420 &constructor_timeout);
1421 } while (ret < 0 && errno == EINTR);
1422 if (ret < 0 && errno == ETIMEDOUT) {
1423 ERR("Timed out waiting for lttng-sessiond");
1424 } else {
1425 assert(!ret);
1426 }
1427 break;
1428 case -1:/* wait forever */
1429 do {
1430 ret = sem_wait(&constructor_wait);
1431 } while (ret < 0 && errno == EINTR);
1432 assert(!ret);
1433 break;
1434 case 0: /* no timeout */
1435 break;
1436 }
1437 }
1438
1439 static
1440 void lttng_ust_cleanup(int exiting)
1441 {
1442 cleanup_sock_info(&global_apps, exiting);
1443 if (local_apps.allowed) {
1444 cleanup_sock_info(&local_apps, exiting);
1445 }
1446 /*
1447 * The teardown in this function all affect data structures
1448 * accessed under the UST lock by the listener thread. This
1449 * lock, along with the lttng_ust_comm_should_quit flag, ensure
1450 * that none of these threads are accessing this data at this
1451 * point.
1452 */
1453 lttng_ust_abi_exit();
1454 lttng_ust_events_exit();
1455 lttng_context_exit();
1456 lttng_ring_buffer_client_discard_rt_exit();
1457 lttng_ring_buffer_client_discard_exit();
1458 lttng_ring_buffer_client_overwrite_rt_exit();
1459 lttng_ring_buffer_client_overwrite_exit();
1460 lttng_ring_buffer_metadata_client_exit();
1461 lttng_ust_baddr_statedump_destroy();
1462 exit_tracepoint();
1463 if (!exiting) {
1464 /* Reinitialize values for fork */
1465 sem_count = 2;
1466 lttng_ust_comm_should_quit = 0;
1467 initialized = 0;
1468 }
1469 }
1470
1471 void __attribute__((destructor)) lttng_ust_exit(void)
1472 {
1473 int ret;
1474
1475 /*
1476 * Using pthread_cancel here because:
1477 * A) we don't want to hang application teardown.
1478 * B) the thread is not allocating any resource.
1479 */
1480
1481 /*
1482 * Require the communication thread to quit. Synchronize with
1483 * mutexes to ensure it is not in a mutex critical section when
1484 * pthread_cancel is later called.
1485 */
1486 ust_lock_nocheck();
1487 lttng_ust_comm_should_quit = 1;
1488 ust_unlock();
1489
1490 pthread_mutex_lock(&ust_exit_mutex);
1491 /* cancel threads */
1492 if (global_apps.thread_active) {
1493 ret = pthread_cancel(global_apps.ust_listener);
1494 if (ret) {
1495 ERR("Error cancelling global ust listener thread: %s",
1496 strerror(ret));
1497 } else {
1498 global_apps.thread_active = 0;
1499 }
1500 }
1501 if (local_apps.thread_active) {
1502 ret = pthread_cancel(local_apps.ust_listener);
1503 if (ret) {
1504 ERR("Error cancelling local ust listener thread: %s",
1505 strerror(ret));
1506 } else {
1507 local_apps.thread_active = 0;
1508 }
1509 }
1510 pthread_mutex_unlock(&ust_exit_mutex);
1511
1512 /*
1513 * Do NOT join threads: use of sys_futex makes it impossible to
1514 * join the threads without using async-cancel, but async-cancel
1515 * is delivered by a signal, which could hit the target thread
1516 * anywhere in its code path, including while the ust_lock() is
1517 * held, causing a deadlock for the other thread. Let the OS
1518 * cleanup the threads if there are stalled in a syscall.
1519 */
1520 lttng_ust_cleanup(1);
1521 }
1522
1523 /*
1524 * We exclude the worker threads across fork and clone (except
1525 * CLONE_VM), because these system calls only keep the forking thread
1526 * running in the child. Therefore, we don't want to call fork or clone
1527 * in the middle of an tracepoint or ust tracing state modification.
1528 * Holding this mutex protects these structures across fork and clone.
1529 */
1530 void ust_before_fork(sigset_t *save_sigset)
1531 {
1532 /*
1533 * Disable signals. This is to avoid that the child intervenes
1534 * before it is properly setup for tracing. It is safer to
1535 * disable all signals, because then we know we are not breaking
1536 * anything by restoring the original mask.
1537 */
1538 sigset_t all_sigs;
1539 int ret;
1540
1541 if (URCU_TLS(lttng_ust_nest_count))
1542 return;
1543 /* Disable signals */
1544 sigfillset(&all_sigs);
1545 ret = sigprocmask(SIG_BLOCK, &all_sigs, save_sigset);
1546 if (ret == -1) {
1547 PERROR("sigprocmask");
1548 }
1549 ust_lock_nocheck();
1550 rcu_bp_before_fork();
1551 }
1552
1553 static void ust_after_fork_common(sigset_t *restore_sigset)
1554 {
1555 int ret;
1556
1557 DBG("process %d", getpid());
1558 ust_unlock();
1559 /* Restore signals */
1560 ret = sigprocmask(SIG_SETMASK, restore_sigset, NULL);
1561 if (ret == -1) {
1562 PERROR("sigprocmask");
1563 }
1564 }
1565
1566 void ust_after_fork_parent(sigset_t *restore_sigset)
1567 {
1568 if (URCU_TLS(lttng_ust_nest_count))
1569 return;
1570 DBG("process %d", getpid());
1571 rcu_bp_after_fork_parent();
1572 /* Release mutexes and reenable signals */
1573 ust_after_fork_common(restore_sigset);
1574 }
1575
1576 /*
1577 * After fork, in the child, we need to cleanup all the leftover state,
1578 * except the worker thread which already magically disappeared thanks
1579 * to the weird Linux fork semantics. After tyding up, we call
1580 * lttng_ust_init() again to start over as a new PID.
1581 *
1582 * This is meant for forks() that have tracing in the child between the
1583 * fork and following exec call (if there is any).
1584 */
1585 void ust_after_fork_child(sigset_t *restore_sigset)
1586 {
1587 if (URCU_TLS(lttng_ust_nest_count))
1588 return;
1589 DBG("process %d", getpid());
1590 /* Release urcu mutexes */
1591 rcu_bp_after_fork_child();
1592 lttng_ust_cleanup(0);
1593 lttng_context_vtid_reset();
1594 /* Release mutexes and reenable signals */
1595 ust_after_fork_common(restore_sigset);
1596 lttng_ust_init();
1597 }
1598
1599 void lttng_ust_sockinfo_session_enabled(void *owner)
1600 {
1601 struct sock_info *sock_info = owner;
1602 sock_info->statedump_pending = 1;
1603 }
This page took 0.089351 seconds and 5 git commands to generate.