Fix: take the ust lock around session iteration in statedump
[lttng-ust.git] / liblttng-ust / lttng-ust-comm.c
1 /*
2 * lttng-ust-comm.c
3 *
4 * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
5 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; only
10 * version 2.1 of the License.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #define _LGPL_SOURCE
23 #include <sys/types.h>
24 #include <sys/socket.h>
25 #include <sys/mman.h>
26 #include <sys/stat.h>
27 #include <sys/types.h>
28 #include <sys/wait.h>
29 #include <fcntl.h>
30 #include <unistd.h>
31 #include <errno.h>
32 #include <pthread.h>
33 #include <semaphore.h>
34 #include <time.h>
35 #include <assert.h>
36 #include <signal.h>
37 #include <dlfcn.h>
38 #include <urcu/uatomic.h>
39 #include <urcu/futex.h>
40 #include <urcu/compiler.h>
41
42 #include <lttng/ust-events.h>
43 #include <lttng/ust-abi.h>
44 #include <lttng/ust.h>
45 #include <lttng/ust-error.h>
46 #include <lttng/ust-ctl.h>
47 #include <urcu/tls-compat.h>
48 #include <ust-comm.h>
49 #include <usterr-signal-safe.h>
50 #include <helper.h>
51 #include "tracepoint-internal.h"
52 #include "lttng-tracer-core.h"
53 #include "compat.h"
54 #include "../libringbuffer/tlsfixup.h"
55 #include "lttng-ust-baddr.h"
56
57 /*
58 * Has lttng ust comm constructor been called ?
59 */
60 static int initialized;
61
62 /*
63 * The ust_lock/ust_unlock lock is used as a communication thread mutex.
64 * Held when handling a command, also held by fork() to deal with
65 * removal of threads, and by exit path.
66 */
67
68 /* Should the ust comm thread quit ? */
69 static int lttng_ust_comm_should_quit;
70
71 /*
72 * Wait for either of these before continuing to the main
73 * program:
74 * - the register_done message from sessiond daemon
75 * (will let the sessiond daemon enable sessions before main
76 * starts.)
77 * - sessiond daemon is not reachable.
78 * - timeout (ensuring applications are resilient to session
79 * daemon problems).
80 */
81 static sem_t constructor_wait;
82 /*
83 * Doing this for both the global and local sessiond.
84 */
85 static int sem_count = { 2 };
86
87 /*
88 * Counting nesting within lttng-ust. Used to ensure that calling fork()
89 * from liblttng-ust does not execute the pre/post fork handlers.
90 */
91 static DEFINE_URCU_TLS(int, lttng_ust_nest_count);
92
93 /*
94 * Info about socket and associated listener thread.
95 */
96 struct sock_info {
97 const char *name;
98 pthread_t ust_listener; /* listener thread */
99 int root_handle;
100 int constructor_sem_posted;
101 int allowed;
102 int global;
103 int thread_active;
104
105 char sock_path[PATH_MAX];
106 int socket;
107 int notify_socket;
108
109 char wait_shm_path[PATH_MAX];
110 char *wait_shm_mmap;
111 /* Keep track of lazy state dump not performed yet. */
112 int statedump_pending;
113 };
114
115 /* Socket from app (connect) to session daemon (listen) for communication */
116 struct sock_info global_apps = {
117 .name = "global",
118 .global = 1,
119
120 .root_handle = -1,
121 .allowed = 1,
122 .thread_active = 0,
123
124 .sock_path = LTTNG_DEFAULT_RUNDIR "/" LTTNG_UST_SOCK_FILENAME,
125 .socket = -1,
126 .notify_socket = -1,
127
128 .wait_shm_path = "/" LTTNG_UST_WAIT_FILENAME,
129
130 .statedump_pending = 0,
131 };
132
133 /* TODO: allow global_apps_sock_path override */
134
135 struct sock_info local_apps = {
136 .name = "local",
137 .global = 0,
138 .root_handle = -1,
139 .allowed = 0, /* Check setuid bit first */
140 .thread_active = 0,
141
142 .socket = -1,
143 .notify_socket = -1,
144
145 .statedump_pending = 0,
146 };
147
148 static int wait_poll_fallback;
149
150 static const char *cmd_name_mapping[] = {
151 [ LTTNG_UST_RELEASE ] = "Release",
152 [ LTTNG_UST_SESSION ] = "Create Session",
153 [ LTTNG_UST_TRACER_VERSION ] = "Get Tracer Version",
154
155 [ LTTNG_UST_TRACEPOINT_LIST ] = "Create Tracepoint List",
156 [ LTTNG_UST_WAIT_QUIESCENT ] = "Wait for Quiescent State",
157 [ LTTNG_UST_REGISTER_DONE ] = "Registration Done",
158 [ LTTNG_UST_TRACEPOINT_FIELD_LIST ] = "Create Tracepoint Field List",
159
160 /* Session FD commands */
161 [ LTTNG_UST_CHANNEL ] = "Create Channel",
162 [ LTTNG_UST_SESSION_START ] = "Start Session",
163 [ LTTNG_UST_SESSION_STOP ] = "Stop Session",
164
165 /* Channel FD commands */
166 [ LTTNG_UST_STREAM ] = "Create Stream",
167 [ LTTNG_UST_EVENT ] = "Create Event",
168
169 /* Event and Channel FD commands */
170 [ LTTNG_UST_CONTEXT ] = "Create Context",
171 [ LTTNG_UST_FLUSH_BUFFER ] = "Flush Buffer",
172
173 /* Event, Channel and Session commands */
174 [ LTTNG_UST_ENABLE ] = "Enable",
175 [ LTTNG_UST_DISABLE ] = "Disable",
176
177 /* Tracepoint list commands */
178 [ LTTNG_UST_TRACEPOINT_LIST_GET ] = "List Next Tracepoint",
179 [ LTTNG_UST_TRACEPOINT_FIELD_LIST_GET ] = "List Next Tracepoint Field",
180
181 /* Event FD commands */
182 [ LTTNG_UST_FILTER ] = "Create Filter",
183 [ LTTNG_UST_EXCLUSION ] = "Add exclusions to event",
184 };
185
186 static const char *str_timeout;
187 static int got_timeout_env;
188
189 extern void lttng_ring_buffer_client_overwrite_init(void);
190 extern void lttng_ring_buffer_client_overwrite_rt_init(void);
191 extern void lttng_ring_buffer_client_discard_init(void);
192 extern void lttng_ring_buffer_client_discard_rt_init(void);
193 extern void lttng_ring_buffer_metadata_client_init(void);
194 extern void lttng_ring_buffer_client_overwrite_exit(void);
195 extern void lttng_ring_buffer_client_overwrite_rt_exit(void);
196 extern void lttng_ring_buffer_client_discard_exit(void);
197 extern void lttng_ring_buffer_client_discard_rt_exit(void);
198 extern void lttng_ring_buffer_metadata_client_exit(void);
199
200 /*
201 * Returns the HOME directory path. Caller MUST NOT free(3) the returned
202 * pointer.
203 */
204 static
205 const char *get_lttng_home_dir(void)
206 {
207 const char *val;
208
209 val = (const char *) getenv("LTTNG_HOME");
210 if (val != NULL) {
211 return val;
212 }
213 return (const char *) getenv("HOME");
214 }
215
216 /*
217 * Force a read (imply TLS fixup for dlopen) of TLS variables.
218 */
219 static
220 void lttng_fixup_nest_count_tls(void)
221 {
222 asm volatile ("" : : "m" (URCU_TLS(lttng_ust_nest_count)));
223 }
224
225 int lttng_get_notify_socket(void *owner)
226 {
227 struct sock_info *info = owner;
228
229 return info->notify_socket;
230 }
231
232 static
233 void print_cmd(int cmd, int handle)
234 {
235 const char *cmd_name = "Unknown";
236
237 if (cmd >= 0 && cmd < LTTNG_ARRAY_SIZE(cmd_name_mapping)
238 && cmd_name_mapping[cmd]) {
239 cmd_name = cmd_name_mapping[cmd];
240 }
241 DBG("Message Received \"%s\" (%d), Handle \"%s\" (%d)",
242 cmd_name, cmd,
243 lttng_ust_obj_get_name(handle), handle);
244 }
245
246 static
247 int setup_local_apps(void)
248 {
249 const char *home_dir;
250 uid_t uid;
251
252 uid = getuid();
253 /*
254 * Disallow per-user tracing for setuid binaries.
255 */
256 if (uid != geteuid()) {
257 assert(local_apps.allowed == 0);
258 return 0;
259 }
260 home_dir = get_lttng_home_dir();
261 if (!home_dir) {
262 WARN("HOME environment variable not set. Disabling LTTng-UST per-user tracing.");
263 assert(local_apps.allowed == 0);
264 return -ENOENT;
265 }
266 local_apps.allowed = 1;
267 snprintf(local_apps.sock_path, PATH_MAX, "%s/%s/%s",
268 home_dir,
269 LTTNG_DEFAULT_HOME_RUNDIR,
270 LTTNG_UST_SOCK_FILENAME);
271 snprintf(local_apps.wait_shm_path, PATH_MAX, "/%s-%u",
272 LTTNG_UST_WAIT_FILENAME,
273 uid);
274 return 0;
275 }
276
277 /*
278 * Get notify_sock timeout, in ms.
279 * -1: don't wait. 0: wait forever. >0: timeout, in ms.
280 */
281 static
282 long get_timeout(void)
283 {
284 long constructor_delay_ms = LTTNG_UST_DEFAULT_CONSTRUCTOR_TIMEOUT_MS;
285
286 if (!got_timeout_env) {
287 str_timeout = getenv("LTTNG_UST_REGISTER_TIMEOUT");
288 got_timeout_env = 1;
289 }
290 if (str_timeout)
291 constructor_delay_ms = strtol(str_timeout, NULL, 10);
292 return constructor_delay_ms;
293 }
294
295 static
296 long get_notify_sock_timeout(void)
297 {
298 return get_timeout();
299 }
300
301 /*
302 * Return values: -1: don't wait. 0: wait forever. 1: timeout wait.
303 */
304 static
305 int get_constructor_timeout(struct timespec *constructor_timeout)
306 {
307 long constructor_delay_ms;
308 int ret;
309
310 constructor_delay_ms = get_timeout();
311
312 switch (constructor_delay_ms) {
313 case -1:/* fall-through */
314 case 0:
315 return constructor_delay_ms;
316 default:
317 break;
318 }
319
320 /*
321 * If we are unable to find the current time, don't wait.
322 */
323 ret = clock_gettime(CLOCK_REALTIME, constructor_timeout);
324 if (ret) {
325 return -1;
326 }
327 constructor_timeout->tv_sec += constructor_delay_ms / 1000UL;
328 constructor_timeout->tv_nsec +=
329 (constructor_delay_ms % 1000UL) * 1000000UL;
330 if (constructor_timeout->tv_nsec >= 1000000000UL) {
331 constructor_timeout->tv_sec++;
332 constructor_timeout->tv_nsec -= 1000000000UL;
333 }
334 return 1;
335 }
336
337 static
338 int register_to_sessiond(int socket, enum ustctl_socket_type type)
339 {
340 return ustcomm_send_reg_msg(socket,
341 type,
342 CAA_BITS_PER_LONG,
343 lttng_alignof(uint8_t) * CHAR_BIT,
344 lttng_alignof(uint16_t) * CHAR_BIT,
345 lttng_alignof(uint32_t) * CHAR_BIT,
346 lttng_alignof(uint64_t) * CHAR_BIT,
347 lttng_alignof(unsigned long) * CHAR_BIT);
348 }
349
350 static
351 int send_reply(int sock, struct ustcomm_ust_reply *lur)
352 {
353 ssize_t len;
354
355 len = ustcomm_send_unix_sock(sock, lur, sizeof(*lur));
356 switch (len) {
357 case sizeof(*lur):
358 DBG("message successfully sent");
359 return 0;
360 default:
361 if (len == -ECONNRESET) {
362 DBG("remote end closed connection");
363 return 0;
364 }
365 if (len < 0)
366 return len;
367 DBG("incorrect message size: %zd", len);
368 return -EINVAL;
369 }
370 }
371
372 static
373 int handle_register_done(struct sock_info *sock_info)
374 {
375 int ret;
376
377 if (sock_info->constructor_sem_posted)
378 return 0;
379 sock_info->constructor_sem_posted = 1;
380 if (uatomic_read(&sem_count) <= 0) {
381 return 0;
382 }
383 ret = uatomic_add_return(&sem_count, -1);
384 if (ret == 0) {
385 ret = sem_post(&constructor_wait);
386 assert(!ret);
387 }
388 return 0;
389 }
390
391 /*
392 * Only execute pending statedump after the constructor semaphore has
393 * been posted by each listener thread. This means statedump will only
394 * be performed after the "registration done" command is received from
395 * each session daemon the application is connected to.
396 *
397 * This ensures we don't run into deadlock issues with the dynamic
398 * loader mutex, which is held while the constructor is called and
399 * waiting on the constructor semaphore. All operations requiring this
400 * dynamic loader lock need to be postponed using this mechanism.
401 */
402 static
403 void handle_pending_statedump(struct sock_info *sock_info)
404 {
405 int ctor_passed = sock_info->constructor_sem_posted;
406
407 if (ctor_passed && sock_info->statedump_pending) {
408 sock_info->statedump_pending = 0;
409 lttng_handle_pending_statedump(sock_info);
410 }
411 }
412
413 static
414 int handle_message(struct sock_info *sock_info,
415 int sock, struct ustcomm_ust_msg *lum)
416 {
417 int ret = 0;
418 const struct lttng_ust_objd_ops *ops;
419 struct ustcomm_ust_reply lur;
420 union ust_args args;
421 ssize_t len;
422
423 ust_lock();
424
425 memset(&lur, 0, sizeof(lur));
426
427 if (lttng_ust_comm_should_quit) {
428 ret = -LTTNG_UST_ERR_EXITING;
429 goto end;
430 }
431
432 ops = objd_ops(lum->handle);
433 if (!ops) {
434 ret = -ENOENT;
435 goto end;
436 }
437
438 switch (lum->cmd) {
439 case LTTNG_UST_REGISTER_DONE:
440 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
441 ret = handle_register_done(sock_info);
442 else
443 ret = -EINVAL;
444 break;
445 case LTTNG_UST_RELEASE:
446 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
447 ret = -EPERM;
448 else
449 ret = lttng_ust_objd_unref(lum->handle, 1);
450 break;
451 case LTTNG_UST_FILTER:
452 {
453 /* Receive filter data */
454 struct lttng_ust_filter_bytecode_node *bytecode;
455
456 if (lum->u.filter.data_size > FILTER_BYTECODE_MAX_LEN) {
457 ERR("Filter data size is too large: %u bytes",
458 lum->u.filter.data_size);
459 ret = -EINVAL;
460 goto error;
461 }
462
463 if (lum->u.filter.reloc_offset > lum->u.filter.data_size) {
464 ERR("Filter reloc offset %u is not within data",
465 lum->u.filter.reloc_offset);
466 ret = -EINVAL;
467 goto error;
468 }
469
470 bytecode = zmalloc(sizeof(*bytecode) + lum->u.filter.data_size);
471 if (!bytecode) {
472 ret = -ENOMEM;
473 goto error;
474 }
475 len = ustcomm_recv_unix_sock(sock, bytecode->bc.data,
476 lum->u.filter.data_size);
477 switch (len) {
478 case 0: /* orderly shutdown */
479 ret = 0;
480 free(bytecode);
481 goto error;
482 default:
483 if (len == lum->u.filter.data_size) {
484 DBG("filter data received");
485 break;
486 } else if (len < 0) {
487 DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
488 if (len == -ECONNRESET) {
489 ERR("%s remote end closed connection", sock_info->name);
490 ret = len;
491 free(bytecode);
492 goto error;
493 }
494 ret = len;
495 free(bytecode);
496 goto end;
497 } else {
498 DBG("incorrect filter data message size: %zd", len);
499 ret = -EINVAL;
500 free(bytecode);
501 goto end;
502 }
503 }
504 bytecode->bc.len = lum->u.filter.data_size;
505 bytecode->bc.reloc_offset = lum->u.filter.reloc_offset;
506 bytecode->bc.seqnum = lum->u.filter.seqnum;
507 if (ops->cmd) {
508 ret = ops->cmd(lum->handle, lum->cmd,
509 (unsigned long) bytecode,
510 &args, sock_info);
511 if (ret) {
512 free(bytecode);
513 }
514 /* don't free bytecode if everything went fine. */
515 } else {
516 ret = -ENOSYS;
517 free(bytecode);
518 }
519 break;
520 }
521 case LTTNG_UST_EXCLUSION:
522 {
523 /* Receive exclusion names */
524 struct lttng_ust_excluder_node *node;
525 unsigned int count;
526
527 count = lum->u.exclusion.count;
528 if (count == 0) {
529 /* There are no names to read */
530 ret = 0;
531 goto error;
532 }
533 node = zmalloc(sizeof(*node) +
534 count * LTTNG_UST_SYM_NAME_LEN);
535 if (!node) {
536 ret = -ENOMEM;
537 goto error;
538 }
539 node->excluder.count = count;
540 len = ustcomm_recv_unix_sock(sock, node->excluder.names,
541 count * LTTNG_UST_SYM_NAME_LEN);
542 switch (len) {
543 case 0: /* orderly shutdown */
544 ret = 0;
545 free(node);
546 goto error;
547 default:
548 if (len == count * LTTNG_UST_SYM_NAME_LEN) {
549 DBG("Exclusion data received");
550 break;
551 } else if (len < 0) {
552 DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
553 if (len == -ECONNRESET) {
554 ERR("%s remote end closed connection", sock_info->name);
555 ret = len;
556 free(node);
557 goto error;
558 }
559 ret = len;
560 free(node);
561 goto end;
562 } else {
563 DBG("Incorrect exclusion data message size: %zd", len);
564 ret = -EINVAL;
565 free(node);
566 goto end;
567 }
568 }
569 if (ops->cmd) {
570 ret = ops->cmd(lum->handle, lum->cmd,
571 (unsigned long) node,
572 &args, sock_info);
573 if (ret) {
574 free(node);
575 }
576 /* Don't free exclusion data if everything went fine. */
577 } else {
578 ret = -ENOSYS;
579 free(node);
580 }
581 break;
582 }
583 case LTTNG_UST_CHANNEL:
584 {
585 void *chan_data;
586 int wakeup_fd;
587
588 len = ustcomm_recv_channel_from_sessiond(sock,
589 &chan_data, lum->u.channel.len,
590 &wakeup_fd);
591 switch (len) {
592 case 0: /* orderly shutdown */
593 ret = 0;
594 goto error;
595 default:
596 if (len == lum->u.channel.len) {
597 DBG("channel data received");
598 break;
599 } else if (len < 0) {
600 DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
601 if (len == -ECONNRESET) {
602 ERR("%s remote end closed connection", sock_info->name);
603 ret = len;
604 goto error;
605 }
606 ret = len;
607 goto end;
608 } else {
609 DBG("incorrect channel data message size: %zd", len);
610 ret = -EINVAL;
611 goto end;
612 }
613 }
614 args.channel.chan_data = chan_data;
615 args.channel.wakeup_fd = wakeup_fd;
616 if (ops->cmd)
617 ret = ops->cmd(lum->handle, lum->cmd,
618 (unsigned long) &lum->u,
619 &args, sock_info);
620 else
621 ret = -ENOSYS;
622 break;
623 }
624 case LTTNG_UST_STREAM:
625 {
626 /* Receive shm_fd, wakeup_fd */
627 ret = ustcomm_recv_stream_from_sessiond(sock,
628 &lum->u.stream.len,
629 &args.stream.shm_fd,
630 &args.stream.wakeup_fd);
631 if (ret) {
632 goto end;
633 }
634 if (ops->cmd)
635 ret = ops->cmd(lum->handle, lum->cmd,
636 (unsigned long) &lum->u,
637 &args, sock_info);
638 else
639 ret = -ENOSYS;
640 break;
641 }
642 default:
643 if (ops->cmd)
644 ret = ops->cmd(lum->handle, lum->cmd,
645 (unsigned long) &lum->u,
646 &args, sock_info);
647 else
648 ret = -ENOSYS;
649 break;
650 }
651
652 end:
653 lur.handle = lum->handle;
654 lur.cmd = lum->cmd;
655 lur.ret_val = ret;
656 if (ret >= 0) {
657 lur.ret_code = LTTNG_UST_OK;
658 } else {
659 /*
660 * Use -LTTNG_UST_ERR as wildcard for UST internal
661 * error that are not caused by the transport, except if
662 * we already have a more precise error message to
663 * report.
664 */
665 if (ret > -LTTNG_UST_ERR) {
666 /* Translate code to UST error. */
667 switch (ret) {
668 case -EEXIST:
669 lur.ret_code = -LTTNG_UST_ERR_EXIST;
670 break;
671 case -EINVAL:
672 lur.ret_code = -LTTNG_UST_ERR_INVAL;
673 break;
674 case -ENOENT:
675 lur.ret_code = -LTTNG_UST_ERR_NOENT;
676 break;
677 case -EPERM:
678 lur.ret_code = -LTTNG_UST_ERR_PERM;
679 break;
680 case -ENOSYS:
681 lur.ret_code = -LTTNG_UST_ERR_NOSYS;
682 break;
683 default:
684 lur.ret_code = -LTTNG_UST_ERR;
685 break;
686 }
687 } else {
688 lur.ret_code = ret;
689 }
690 }
691 if (ret >= 0) {
692 switch (lum->cmd) {
693 case LTTNG_UST_TRACER_VERSION:
694 lur.u.version = lum->u.version;
695 break;
696 case LTTNG_UST_TRACEPOINT_LIST_GET:
697 memcpy(&lur.u.tracepoint, &lum->u.tracepoint, sizeof(lur.u.tracepoint));
698 break;
699 }
700 }
701 DBG("Return value: %d", lur.ret_val);
702 ret = send_reply(sock, &lur);
703 if (ret < 0) {
704 DBG("error sending reply");
705 goto error;
706 }
707
708 /*
709 * LTTNG_UST_TRACEPOINT_FIELD_LIST_GET needs to send the field
710 * after the reply.
711 */
712 if (lur.ret_code == LTTNG_UST_OK) {
713 switch (lum->cmd) {
714 case LTTNG_UST_TRACEPOINT_FIELD_LIST_GET:
715 len = ustcomm_send_unix_sock(sock,
716 &args.field_list.entry,
717 sizeof(args.field_list.entry));
718 if (len < 0) {
719 ret = len;
720 goto error;
721 }
722 if (len != sizeof(args.field_list.entry)) {
723 ret = -EINVAL;
724 goto error;
725 }
726 }
727 }
728
729 error:
730 ust_unlock();
731
732 /*
733 * Performed delayed statedump operations outside of the UST
734 * lock. We need to take the dynamic loader lock before we take
735 * the UST lock internally within handle_pending_statedump().
736 */
737 handle_pending_statedump(sock_info);
738
739 return ret;
740 }
741
742 static
743 void cleanup_sock_info(struct sock_info *sock_info, int exiting)
744 {
745 int ret;
746
747 if (sock_info->root_handle != -1) {
748 ret = lttng_ust_objd_unref(sock_info->root_handle, 1);
749 if (ret) {
750 ERR("Error unref root handle");
751 }
752 sock_info->root_handle = -1;
753 }
754 sock_info->constructor_sem_posted = 0;
755
756 /*
757 * wait_shm_mmap, socket and notify socket are used by listener
758 * threads outside of the ust lock, so we cannot tear them down
759 * ourselves, because we cannot join on these threads. Leave
760 * responsibility of cleaning up these resources to the OS
761 * process exit.
762 */
763 if (exiting)
764 return;
765
766 if (sock_info->socket != -1) {
767 ret = ustcomm_close_unix_sock(sock_info->socket);
768 if (ret) {
769 ERR("Error closing ust cmd socket");
770 }
771 sock_info->socket = -1;
772 }
773 if (sock_info->notify_socket != -1) {
774 ret = ustcomm_close_unix_sock(sock_info->notify_socket);
775 if (ret) {
776 ERR("Error closing ust notify socket");
777 }
778 sock_info->notify_socket = -1;
779 }
780 if (sock_info->wait_shm_mmap) {
781 ret = munmap(sock_info->wait_shm_mmap, sysconf(_SC_PAGE_SIZE));
782 if (ret) {
783 ERR("Error unmapping wait shm");
784 }
785 sock_info->wait_shm_mmap = NULL;
786 }
787 }
788
789 /*
790 * Using fork to set umask in the child process (not multi-thread safe).
791 * We deal with the shm_open vs ftruncate race (happening when the
792 * sessiond owns the shm and does not let everybody modify it, to ensure
793 * safety against shm_unlink) by simply letting the mmap fail and
794 * retrying after a few seconds.
795 * For global shm, everybody has rw access to it until the sessiond
796 * starts.
797 */
798 static
799 int get_wait_shm(struct sock_info *sock_info, size_t mmap_size)
800 {
801 int wait_shm_fd, ret;
802 pid_t pid;
803
804 /*
805 * Try to open read-only.
806 */
807 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
808 if (wait_shm_fd >= 0) {
809 int32_t tmp_read;
810 ssize_t len;
811 size_t bytes_read = 0;
812
813 /*
814 * Try to read the fd. If unable to do so, try opening
815 * it in write mode.
816 */
817 do {
818 len = read(wait_shm_fd,
819 &((char *) &tmp_read)[bytes_read],
820 sizeof(tmp_read) - bytes_read);
821 if (len > 0) {
822 bytes_read += len;
823 }
824 } while ((len < 0 && errno == EINTR)
825 || (len > 0 && bytes_read < sizeof(tmp_read)));
826 if (bytes_read != sizeof(tmp_read)) {
827 ret = close(wait_shm_fd);
828 if (ret) {
829 ERR("close wait_shm_fd");
830 }
831 goto open_write;
832 }
833 goto end;
834 } else if (wait_shm_fd < 0 && errno != ENOENT) {
835 /*
836 * Real-only open did not work, and it's not because the
837 * entry was not present. It's a failure that prohibits
838 * using shm.
839 */
840 ERR("Error opening shm %s", sock_info->wait_shm_path);
841 goto end;
842 }
843
844 open_write:
845 /*
846 * If the open failed because the file did not exist, or because
847 * the file was not truncated yet, try creating it ourself.
848 */
849 URCU_TLS(lttng_ust_nest_count)++;
850 pid = fork();
851 URCU_TLS(lttng_ust_nest_count)--;
852 if (pid > 0) {
853 int status;
854
855 /*
856 * Parent: wait for child to return, in which case the
857 * shared memory map will have been created.
858 */
859 pid = wait(&status);
860 if (pid < 0 || !WIFEXITED(status) || WEXITSTATUS(status) != 0) {
861 wait_shm_fd = -1;
862 goto end;
863 }
864 /*
865 * Try to open read-only again after creation.
866 */
867 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
868 if (wait_shm_fd < 0) {
869 /*
870 * Real-only open did not work. It's a failure
871 * that prohibits using shm.
872 */
873 ERR("Error opening shm %s", sock_info->wait_shm_path);
874 goto end;
875 }
876 goto end;
877 } else if (pid == 0) {
878 int create_mode;
879
880 /* Child */
881 create_mode = S_IRUSR | S_IWUSR | S_IRGRP;
882 if (sock_info->global)
883 create_mode |= S_IROTH | S_IWGRP | S_IWOTH;
884 /*
885 * We're alone in a child process, so we can modify the
886 * process-wide umask.
887 */
888 umask(~create_mode);
889 /*
890 * Try creating shm (or get rw access).
891 * We don't do an exclusive open, because we allow other
892 * processes to create+ftruncate it concurrently.
893 */
894 wait_shm_fd = shm_open(sock_info->wait_shm_path,
895 O_RDWR | O_CREAT, create_mode);
896 if (wait_shm_fd >= 0) {
897 ret = ftruncate(wait_shm_fd, mmap_size);
898 if (ret) {
899 PERROR("ftruncate");
900 _exit(EXIT_FAILURE);
901 }
902 _exit(EXIT_SUCCESS);
903 }
904 /*
905 * For local shm, we need to have rw access to accept
906 * opening it: this means the local sessiond will be
907 * able to wake us up. For global shm, we open it even
908 * if rw access is not granted, because the root.root
909 * sessiond will be able to override all rights and wake
910 * us up.
911 */
912 if (!sock_info->global && errno != EACCES) {
913 ERR("Error opening shm %s", sock_info->wait_shm_path);
914 _exit(EXIT_FAILURE);
915 }
916 /*
917 * The shm exists, but we cannot open it RW. Report
918 * success.
919 */
920 _exit(EXIT_SUCCESS);
921 } else {
922 return -1;
923 }
924 end:
925 if (wait_shm_fd >= 0 && !sock_info->global) {
926 struct stat statbuf;
927
928 /*
929 * Ensure that our user is the owner of the shm file for
930 * local shm. If we do not own the file, it means our
931 * sessiond will not have access to wake us up (there is
932 * probably a rogue process trying to fake our
933 * sessiond). Fallback to polling method in this case.
934 */
935 ret = fstat(wait_shm_fd, &statbuf);
936 if (ret) {
937 PERROR("fstat");
938 goto error_close;
939 }
940 if (statbuf.st_uid != getuid())
941 goto error_close;
942 }
943 return wait_shm_fd;
944
945 error_close:
946 ret = close(wait_shm_fd);
947 if (ret) {
948 PERROR("Error closing fd");
949 }
950 return -1;
951 }
952
953 static
954 char *get_map_shm(struct sock_info *sock_info)
955 {
956 size_t mmap_size = sysconf(_SC_PAGE_SIZE);
957 int wait_shm_fd, ret;
958 char *wait_shm_mmap;
959
960 wait_shm_fd = get_wait_shm(sock_info, mmap_size);
961 if (wait_shm_fd < 0) {
962 goto error;
963 }
964 wait_shm_mmap = mmap(NULL, mmap_size, PROT_READ,
965 MAP_SHARED, wait_shm_fd, 0);
966 /* close shm fd immediately after taking the mmap reference */
967 ret = close(wait_shm_fd);
968 if (ret) {
969 PERROR("Error closing fd");
970 }
971 if (wait_shm_mmap == MAP_FAILED) {
972 DBG("mmap error (can be caused by race with sessiond). Fallback to poll mode.");
973 goto error;
974 }
975 return wait_shm_mmap;
976
977 error:
978 return NULL;
979 }
980
981 static
982 void wait_for_sessiond(struct sock_info *sock_info)
983 {
984 int ret;
985
986 ust_lock();
987 if (lttng_ust_comm_should_quit) {
988 goto quit;
989 }
990 if (wait_poll_fallback) {
991 goto error;
992 }
993 if (!sock_info->wait_shm_mmap) {
994 sock_info->wait_shm_mmap = get_map_shm(sock_info);
995 if (!sock_info->wait_shm_mmap)
996 goto error;
997 }
998 ust_unlock();
999
1000 DBG("Waiting for %s apps sessiond", sock_info->name);
1001 /* Wait for futex wakeup */
1002 if (uatomic_read((int32_t *) sock_info->wait_shm_mmap) == 0) {
1003 ret = futex_async((int32_t *) sock_info->wait_shm_mmap,
1004 FUTEX_WAIT, 0, NULL, NULL, 0);
1005 if (ret < 0) {
1006 if (errno == EFAULT) {
1007 wait_poll_fallback = 1;
1008 DBG(
1009 "Linux kernels 2.6.33 to 3.0 (with the exception of stable versions) "
1010 "do not support FUTEX_WAKE on read-only memory mappings correctly. "
1011 "Please upgrade your kernel "
1012 "(fix is commit 9ea71503a8ed9184d2d0b8ccc4d269d05f7940ae in Linux kernel "
1013 "mainline). LTTng-UST will use polling mode fallback.");
1014 if (ust_debug())
1015 PERROR("futex");
1016 }
1017 }
1018 }
1019 return;
1020
1021 quit:
1022 ust_unlock();
1023 return;
1024
1025 error:
1026 ust_unlock();
1027 return;
1028 }
1029
1030 /*
1031 * This thread does not allocate any resource, except within
1032 * handle_message, within mutex protection. This mutex protects against
1033 * fork and exit.
1034 * The other moment it allocates resources is at socket connection, which
1035 * is also protected by the mutex.
1036 */
1037 static
1038 void *ust_listener_thread(void *arg)
1039 {
1040 struct sock_info *sock_info = arg;
1041 int sock, ret, prev_connect_failed = 0, has_waited = 0;
1042 long timeout;
1043
1044 /* Restart trying to connect to the session daemon */
1045 restart:
1046 if (prev_connect_failed) {
1047 /* Wait for sessiond availability with pipe */
1048 wait_for_sessiond(sock_info);
1049 if (has_waited) {
1050 has_waited = 0;
1051 /*
1052 * Sleep for 5 seconds before retrying after a
1053 * sequence of failure / wait / failure. This
1054 * deals with a killed or broken session daemon.
1055 */
1056 sleep(5);
1057 }
1058 has_waited = 1;
1059 prev_connect_failed = 0;
1060 }
1061
1062 if (sock_info->socket != -1) {
1063 ret = ustcomm_close_unix_sock(sock_info->socket);
1064 if (ret) {
1065 ERR("Error closing %s ust cmd socket",
1066 sock_info->name);
1067 }
1068 sock_info->socket = -1;
1069 }
1070 if (sock_info->notify_socket != -1) {
1071 ret = ustcomm_close_unix_sock(sock_info->notify_socket);
1072 if (ret) {
1073 ERR("Error closing %s ust notify socket",
1074 sock_info->name);
1075 }
1076 sock_info->notify_socket = -1;
1077 }
1078
1079 /*
1080 * Register. We need to perform both connect and sending
1081 * registration message before doing the next connect otherwise
1082 * we may reach unix socket connect queue max limits and block
1083 * on the 2nd connect while the session daemon is awaiting the
1084 * first connect registration message.
1085 */
1086 /* Connect cmd socket */
1087 ret = ustcomm_connect_unix_sock(sock_info->sock_path);
1088 if (ret < 0) {
1089 DBG("Info: sessiond not accepting connections to %s apps socket", sock_info->name);
1090 prev_connect_failed = 1;
1091
1092 ust_lock();
1093
1094 if (lttng_ust_comm_should_quit) {
1095 goto quit;
1096 }
1097
1098 /*
1099 * If we cannot find the sessiond daemon, don't delay
1100 * constructor execution.
1101 */
1102 ret = handle_register_done(sock_info);
1103 assert(!ret);
1104 ust_unlock();
1105 goto restart;
1106 }
1107 sock_info->socket = ret;
1108
1109 ust_lock();
1110
1111 if (lttng_ust_comm_should_quit) {
1112 goto quit;
1113 }
1114
1115 /*
1116 * Create only one root handle per listener thread for the whole
1117 * process lifetime, so we ensure we get ID which is statically
1118 * assigned to the root handle.
1119 */
1120 if (sock_info->root_handle == -1) {
1121 ret = lttng_abi_create_root_handle();
1122 if (ret < 0) {
1123 ERR("Error creating root handle");
1124 goto quit;
1125 }
1126 sock_info->root_handle = ret;
1127 }
1128
1129 ret = register_to_sessiond(sock_info->socket, USTCTL_SOCKET_CMD);
1130 if (ret < 0) {
1131 ERR("Error registering to %s ust cmd socket",
1132 sock_info->name);
1133 prev_connect_failed = 1;
1134 /*
1135 * If we cannot register to the sessiond daemon, don't
1136 * delay constructor execution.
1137 */
1138 ret = handle_register_done(sock_info);
1139 assert(!ret);
1140 ust_unlock();
1141 goto restart;
1142 }
1143
1144 ust_unlock();
1145
1146 /* Connect notify socket */
1147 ret = ustcomm_connect_unix_sock(sock_info->sock_path);
1148 if (ret < 0) {
1149 DBG("Info: sessiond not accepting connections to %s apps socket", sock_info->name);
1150 prev_connect_failed = 1;
1151
1152 ust_lock();
1153
1154 if (lttng_ust_comm_should_quit) {
1155 goto quit;
1156 }
1157
1158 /*
1159 * If we cannot find the sessiond daemon, don't delay
1160 * constructor execution.
1161 */
1162 ret = handle_register_done(sock_info);
1163 assert(!ret);
1164 ust_unlock();
1165 goto restart;
1166 }
1167 sock_info->notify_socket = ret;
1168
1169 timeout = get_notify_sock_timeout();
1170 if (timeout >= 0) {
1171 /*
1172 * Give at least 10ms to sessiond to reply to
1173 * notifications.
1174 */
1175 if (timeout < 10)
1176 timeout = 10;
1177 ret = ustcomm_setsockopt_rcv_timeout(sock_info->notify_socket,
1178 timeout);
1179 if (ret < 0) {
1180 WARN("Error setting socket receive timeout");
1181 }
1182 ret = ustcomm_setsockopt_snd_timeout(sock_info->notify_socket,
1183 timeout);
1184 if (ret < 0) {
1185 WARN("Error setting socket send timeout");
1186 }
1187 } else if (timeout < -1) {
1188 WARN("Unsupported timeout value %ld", timeout);
1189 }
1190
1191 ust_lock();
1192
1193 if (lttng_ust_comm_should_quit) {
1194 goto quit;
1195 }
1196
1197 ret = register_to_sessiond(sock_info->notify_socket,
1198 USTCTL_SOCKET_NOTIFY);
1199 if (ret < 0) {
1200 ERR("Error registering to %s ust notify socket",
1201 sock_info->name);
1202 prev_connect_failed = 1;
1203 /*
1204 * If we cannot register to the sessiond daemon, don't
1205 * delay constructor execution.
1206 */
1207 ret = handle_register_done(sock_info);
1208 assert(!ret);
1209 ust_unlock();
1210 goto restart;
1211 }
1212 sock = sock_info->socket;
1213
1214 ust_unlock();
1215
1216 for (;;) {
1217 ssize_t len;
1218 struct ustcomm_ust_msg lum;
1219
1220 len = ustcomm_recv_unix_sock(sock, &lum, sizeof(lum));
1221 switch (len) {
1222 case 0: /* orderly shutdown */
1223 DBG("%s lttng-sessiond has performed an orderly shutdown", sock_info->name);
1224 ust_lock();
1225 if (lttng_ust_comm_should_quit) {
1226 goto quit;
1227 }
1228 /*
1229 * Either sessiond has shutdown or refused us by closing the socket.
1230 * In either case, we don't want to delay construction execution,
1231 * and we need to wait before retry.
1232 */
1233 prev_connect_failed = 1;
1234 /*
1235 * If we cannot register to the sessiond daemon, don't
1236 * delay constructor execution.
1237 */
1238 ret = handle_register_done(sock_info);
1239 assert(!ret);
1240 ust_unlock();
1241 goto end;
1242 case sizeof(lum):
1243 print_cmd(lum.cmd, lum.handle);
1244 ret = handle_message(sock_info, sock, &lum);
1245 if (ret) {
1246 ERR("Error handling message for %s socket", sock_info->name);
1247 }
1248 continue;
1249 default:
1250 if (len < 0) {
1251 DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
1252 } else {
1253 DBG("incorrect message size (%s socket): %zd", sock_info->name, len);
1254 }
1255 if (len == -ECONNRESET) {
1256 DBG("%s remote end closed connection", sock_info->name);
1257 goto end;
1258 }
1259 goto end;
1260 }
1261
1262 }
1263 end:
1264 ust_lock();
1265 if (lttng_ust_comm_should_quit) {
1266 goto quit;
1267 }
1268 /* Cleanup socket handles before trying to reconnect */
1269 lttng_ust_objd_table_owner_cleanup(sock_info);
1270 ust_unlock();
1271 goto restart; /* try to reconnect */
1272
1273 quit:
1274 sock_info->thread_active = 0;
1275 ust_unlock();
1276 return NULL;
1277 }
1278
1279 /*
1280 * sessiond monitoring thread: monitor presence of global and per-user
1281 * sessiond by polling the application common named pipe.
1282 */
1283 void __attribute__((constructor)) lttng_ust_init(void)
1284 {
1285 struct timespec constructor_timeout;
1286 sigset_t sig_all_blocked, orig_parent_mask;
1287 pthread_attr_t thread_attr;
1288 int timeout_mode;
1289 int ret;
1290
1291 if (uatomic_xchg(&initialized, 1) == 1)
1292 return;
1293
1294 /*
1295 * Fixup interdependency between TLS fixup mutex (which happens
1296 * to be the dynamic linker mutex) and ust_lock, taken within
1297 * the ust lock.
1298 */
1299 lttng_fixup_ringbuffer_tls();
1300 lttng_fixup_vtid_tls();
1301 lttng_fixup_nest_count_tls();
1302 lttng_fixup_procname_tls();
1303
1304 /*
1305 * We want precise control over the order in which we construct
1306 * our sub-libraries vs starting to receive commands from
1307 * sessiond (otherwise leading to errors when trying to create
1308 * sessiond before the init functions are completed).
1309 */
1310 init_usterr();
1311 init_tracepoint();
1312 lttng_ring_buffer_metadata_client_init();
1313 lttng_ring_buffer_client_overwrite_init();
1314 lttng_ring_buffer_client_overwrite_rt_init();
1315 lttng_ring_buffer_client_discard_init();
1316 lttng_ring_buffer_client_discard_rt_init();
1317 lttng_context_init();
1318
1319 timeout_mode = get_constructor_timeout(&constructor_timeout);
1320
1321 ret = sem_init(&constructor_wait, 0, 0);
1322 assert(!ret);
1323
1324 ret = setup_local_apps();
1325 if (ret) {
1326 DBG("local apps setup returned %d", ret);
1327 }
1328
1329 /* A new thread created by pthread_create inherits the signal mask
1330 * from the parent. To avoid any signal being received by the
1331 * listener thread, we block all signals temporarily in the parent,
1332 * while we create the listener thread.
1333 */
1334 sigfillset(&sig_all_blocked);
1335 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_parent_mask);
1336 if (ret) {
1337 ERR("pthread_sigmask: %s", strerror(ret));
1338 }
1339
1340 ret = pthread_attr_init(&thread_attr);
1341 if (ret) {
1342 ERR("pthread_attr_init: %s", strerror(ret));
1343 }
1344 ret = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_DETACHED);
1345 if (ret) {
1346 ERR("pthread_attr_setdetachstate: %s", strerror(ret));
1347 }
1348
1349 ust_lock();
1350 ret = pthread_create(&global_apps.ust_listener, &thread_attr,
1351 ust_listener_thread, &global_apps);
1352 if (ret) {
1353 ERR("pthread_create global: %s", strerror(ret));
1354 }
1355 global_apps.thread_active = 1;
1356 ust_unlock();
1357
1358 if (local_apps.allowed) {
1359 ust_lock();
1360 ret = pthread_create(&local_apps.ust_listener, &thread_attr,
1361 ust_listener_thread, &local_apps);
1362 if (ret) {
1363 ERR("pthread_create local: %s", strerror(ret));
1364 }
1365 local_apps.thread_active = 1;
1366 ust_unlock();
1367 } else {
1368 handle_register_done(&local_apps);
1369 }
1370 ret = pthread_attr_destroy(&thread_attr);
1371 if (ret) {
1372 ERR("pthread_attr_destroy: %s", strerror(ret));
1373 }
1374
1375 /* Restore original signal mask in parent */
1376 ret = pthread_sigmask(SIG_SETMASK, &orig_parent_mask, NULL);
1377 if (ret) {
1378 ERR("pthread_sigmask: %s", strerror(ret));
1379 }
1380
1381 switch (timeout_mode) {
1382 case 1: /* timeout wait */
1383 do {
1384 ret = sem_timedwait(&constructor_wait,
1385 &constructor_timeout);
1386 } while (ret < 0 && errno == EINTR);
1387 if (ret < 0 && errno == ETIMEDOUT) {
1388 ERR("Timed out waiting for lttng-sessiond");
1389 } else {
1390 assert(!ret);
1391 }
1392 break;
1393 case -1:/* wait forever */
1394 do {
1395 ret = sem_wait(&constructor_wait);
1396 } while (ret < 0 && errno == EINTR);
1397 assert(!ret);
1398 break;
1399 case 0: /* no timeout */
1400 break;
1401 }
1402 }
1403
1404 static
1405 void lttng_ust_cleanup(int exiting)
1406 {
1407 cleanup_sock_info(&global_apps, exiting);
1408 if (local_apps.allowed) {
1409 cleanup_sock_info(&local_apps, exiting);
1410 }
1411 /*
1412 * The teardown in this function all affect data structures
1413 * accessed under the UST lock by the listener thread. This
1414 * lock, along with the lttng_ust_comm_should_quit flag, ensure
1415 * that none of these threads are accessing this data at this
1416 * point.
1417 */
1418 lttng_ust_abi_exit();
1419 lttng_ust_events_exit();
1420 lttng_context_exit();
1421 lttng_ring_buffer_client_discard_rt_exit();
1422 lttng_ring_buffer_client_discard_exit();
1423 lttng_ring_buffer_client_overwrite_rt_exit();
1424 lttng_ring_buffer_client_overwrite_exit();
1425 lttng_ring_buffer_metadata_client_exit();
1426 exit_tracepoint();
1427 if (!exiting) {
1428 /* Reinitialize values for fork */
1429 sem_count = 2;
1430 lttng_ust_comm_should_quit = 0;
1431 initialized = 0;
1432 }
1433 }
1434
1435 void __attribute__((destructor)) lttng_ust_exit(void)
1436 {
1437 int ret;
1438
1439 /*
1440 * Using pthread_cancel here because:
1441 * A) we don't want to hang application teardown.
1442 * B) the thread is not allocating any resource.
1443 */
1444
1445 /*
1446 * Require the communication thread to quit. Synchronize with
1447 * mutexes to ensure it is not in a mutex critical section when
1448 * pthread_cancel is later called.
1449 */
1450 ust_lock();
1451 lttng_ust_comm_should_quit = 1;
1452
1453 /* cancel threads */
1454 if (global_apps.thread_active) {
1455 ret = pthread_cancel(global_apps.ust_listener);
1456 if (ret) {
1457 ERR("Error cancelling global ust listener thread: %s",
1458 strerror(ret));
1459 } else {
1460 global_apps.thread_active = 0;
1461 }
1462 }
1463 if (local_apps.thread_active) {
1464 ret = pthread_cancel(local_apps.ust_listener);
1465 if (ret) {
1466 ERR("Error cancelling local ust listener thread: %s",
1467 strerror(ret));
1468 } else {
1469 local_apps.thread_active = 0;
1470 }
1471 }
1472 ust_unlock();
1473
1474 /*
1475 * Do NOT join threads: use of sys_futex makes it impossible to
1476 * join the threads without using async-cancel, but async-cancel
1477 * is delivered by a signal, which could hit the target thread
1478 * anywhere in its code path, including while the ust_lock() is
1479 * held, causing a deadlock for the other thread. Let the OS
1480 * cleanup the threads if there are stalled in a syscall.
1481 */
1482 lttng_ust_cleanup(1);
1483 }
1484
1485 /*
1486 * We exclude the worker threads across fork and clone (except
1487 * CLONE_VM), because these system calls only keep the forking thread
1488 * running in the child. Therefore, we don't want to call fork or clone
1489 * in the middle of an tracepoint or ust tracing state modification.
1490 * Holding this mutex protects these structures across fork and clone.
1491 */
1492 void ust_before_fork(sigset_t *save_sigset)
1493 {
1494 /*
1495 * Disable signals. This is to avoid that the child intervenes
1496 * before it is properly setup for tracing. It is safer to
1497 * disable all signals, because then we know we are not breaking
1498 * anything by restoring the original mask.
1499 */
1500 sigset_t all_sigs;
1501 int ret;
1502
1503 if (URCU_TLS(lttng_ust_nest_count))
1504 return;
1505 /* Disable signals */
1506 sigfillset(&all_sigs);
1507 ret = sigprocmask(SIG_BLOCK, &all_sigs, save_sigset);
1508 if (ret == -1) {
1509 PERROR("sigprocmask");
1510 }
1511 ust_lock();
1512 rcu_bp_before_fork();
1513 }
1514
1515 static void ust_after_fork_common(sigset_t *restore_sigset)
1516 {
1517 int ret;
1518
1519 DBG("process %d", getpid());
1520 ust_unlock();
1521 /* Restore signals */
1522 ret = sigprocmask(SIG_SETMASK, restore_sigset, NULL);
1523 if (ret == -1) {
1524 PERROR("sigprocmask");
1525 }
1526 }
1527
1528 void ust_after_fork_parent(sigset_t *restore_sigset)
1529 {
1530 if (URCU_TLS(lttng_ust_nest_count))
1531 return;
1532 DBG("process %d", getpid());
1533 rcu_bp_after_fork_parent();
1534 /* Release mutexes and reenable signals */
1535 ust_after_fork_common(restore_sigset);
1536 }
1537
1538 /*
1539 * After fork, in the child, we need to cleanup all the leftover state,
1540 * except the worker thread which already magically disappeared thanks
1541 * to the weird Linux fork semantics. After tyding up, we call
1542 * lttng_ust_init() again to start over as a new PID.
1543 *
1544 * This is meant for forks() that have tracing in the child between the
1545 * fork and following exec call (if there is any).
1546 */
1547 void ust_after_fork_child(sigset_t *restore_sigset)
1548 {
1549 if (URCU_TLS(lttng_ust_nest_count))
1550 return;
1551 DBG("process %d", getpid());
1552 /* Release urcu mutexes */
1553 rcu_bp_after_fork_child();
1554 lttng_ust_cleanup(0);
1555 lttng_context_vtid_reset();
1556 /* Release mutexes and reenable signals */
1557 ust_after_fork_common(restore_sigset);
1558 lttng_ust_init();
1559 }
1560
1561 void lttng_ust_sockinfo_session_enabled(void *owner)
1562 {
1563 struct sock_info *sock_info = owner;
1564 sock_info->statedump_pending = 1;
1565 }
This page took 0.095927 seconds and 5 git commands to generate.