Fix: lttng perf counter deadlock
[lttng-ust.git] / liblttng-ust / lttng-ust-comm.c
1 /*
2 * lttng-ust-comm.c
3 *
4 * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
5 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; only
10 * version 2.1 of the License.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #define _LGPL_SOURCE
23 #define _GNU_SOURCE
24 #include <sys/types.h>
25 #include <sys/socket.h>
26 #include <sys/mman.h>
27 #include <sys/stat.h>
28 #include <sys/types.h>
29 #include <sys/wait.h>
30 #include <fcntl.h>
31 #include <unistd.h>
32 #include <errno.h>
33 #include <pthread.h>
34 #include <semaphore.h>
35 #include <time.h>
36 #include <assert.h>
37 #include <signal.h>
38 #include <limits.h>
39 #include <urcu/uatomic.h>
40 #include <urcu/futex.h>
41 #include <urcu/compiler.h>
42
43 #include <lttng/ust-events.h>
44 #include <lttng/ust-abi.h>
45 #include <lttng/ust.h>
46 #include <lttng/ust-error.h>
47 #include <lttng/ust-ctl.h>
48 #include <urcu/tls-compat.h>
49 #include <ust-comm.h>
50 #include <ust-fd.h>
51 #include <usterr-signal-safe.h>
52 #include <helper.h>
53 #include "tracepoint-internal.h"
54 #include "lttng-tracer-core.h"
55 #include "compat.h"
56 #include "../libringbuffer/rb-init.h"
57 #include "lttng-ust-statedump.h"
58 #include "clock.h"
59 #include "../libringbuffer/getcpu.h"
60 #include "getenv.h"
61
62 /*
63 * Has lttng ust comm constructor been called ?
64 */
65 static int initialized;
66
67 /*
68 * The ust_lock/ust_unlock lock is used as a communication thread mutex.
69 * Held when handling a command, also held by fork() to deal with
70 * removal of threads, and by exit path.
71 *
72 * The UST lock is the centralized mutex across UST tracing control and
73 * probe registration.
74 *
75 * ust_exit_mutex must never nest in ust_mutex.
76 *
77 * ust_fork_mutex must never nest in ust_mutex.
78 *
79 * ust_mutex_nest is a per-thread nesting counter, allowing the perf
80 * counter lazy initialization called by events within the statedump,
81 * which traces while the ust_mutex is held.
82 *
83 * ust_lock nests within the dynamic loader lock (within glibc) because
84 * it is taken within the library constructor.
85 *
86 * The ust fd tracker lock nests within the ust_mutex.
87 */
88 static pthread_mutex_t ust_mutex = PTHREAD_MUTEX_INITIALIZER;
89
90 /* Allow nesting the ust_mutex within the same thread. */
91 static DEFINE_URCU_TLS(int, ust_mutex_nest);
92
93 /*
94 * ust_exit_mutex protects thread_active variable wrt thread exit. It
95 * cannot be done by ust_mutex because pthread_cancel(), which takes an
96 * internal libc lock, cannot nest within ust_mutex.
97 *
98 * It never nests within a ust_mutex.
99 */
100 static pthread_mutex_t ust_exit_mutex = PTHREAD_MUTEX_INITIALIZER;
101
102 /*
103 * ust_fork_mutex protects base address statedump tracing against forks. It
104 * prevents the dynamic loader lock to be taken (by base address statedump
105 * tracing) while a fork is happening, thus preventing deadlock issues with
106 * the dynamic loader lock.
107 */
108 static pthread_mutex_t ust_fork_mutex = PTHREAD_MUTEX_INITIALIZER;
109
110 /* Should the ust comm thread quit ? */
111 static int lttng_ust_comm_should_quit;
112
113 /*
114 * This variable can be tested by applications to check whether
115 * lttng-ust is loaded. They simply have to define their own
116 * "lttng_ust_loaded" weak symbol, and test it. It is set to 1 by the
117 * library constructor.
118 */
119 int lttng_ust_loaded __attribute__((weak));
120
121 /*
122 * Return 0 on success, -1 if should quit.
123 * The lock is taken in both cases.
124 * Signal-safe.
125 */
126 int ust_lock(void)
127 {
128 sigset_t sig_all_blocked, orig_mask;
129 int ret, oldstate;
130
131 ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
132 if (ret) {
133 ERR("pthread_setcancelstate: %s", strerror(ret));
134 }
135 if (oldstate != PTHREAD_CANCEL_ENABLE) {
136 ERR("pthread_setcancelstate: unexpected oldstate");
137 }
138 sigfillset(&sig_all_blocked);
139 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
140 if (ret) {
141 ERR("pthread_sigmask: %s", strerror(ret));
142 }
143 if (!URCU_TLS(ust_mutex_nest)++)
144 pthread_mutex_lock(&ust_mutex);
145 ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
146 if (ret) {
147 ERR("pthread_sigmask: %s", strerror(ret));
148 }
149 if (lttng_ust_comm_should_quit) {
150 return -1;
151 } else {
152 return 0;
153 }
154 }
155
156 /*
157 * ust_lock_nocheck() can be used in constructors/destructors, because
158 * they are already nested within the dynamic loader lock, and therefore
159 * have exclusive access against execution of liblttng-ust destructor.
160 * Signal-safe.
161 */
162 void ust_lock_nocheck(void)
163 {
164 sigset_t sig_all_blocked, orig_mask;
165 int ret, oldstate;
166
167 ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
168 if (ret) {
169 ERR("pthread_setcancelstate: %s", strerror(ret));
170 }
171 if (oldstate != PTHREAD_CANCEL_ENABLE) {
172 ERR("pthread_setcancelstate: unexpected oldstate");
173 }
174 sigfillset(&sig_all_blocked);
175 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
176 if (ret) {
177 ERR("pthread_sigmask: %s", strerror(ret));
178 }
179 if (!URCU_TLS(ust_mutex_nest)++)
180 pthread_mutex_lock(&ust_mutex);
181 ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
182 if (ret) {
183 ERR("pthread_sigmask: %s", strerror(ret));
184 }
185 }
186
187 /*
188 * Signal-safe.
189 */
190 void ust_unlock(void)
191 {
192 sigset_t sig_all_blocked, orig_mask;
193 int ret, oldstate;
194
195 sigfillset(&sig_all_blocked);
196 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
197 if (ret) {
198 ERR("pthread_sigmask: %s", strerror(ret));
199 }
200 if (!--URCU_TLS(ust_mutex_nest))
201 pthread_mutex_unlock(&ust_mutex);
202 ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
203 if (ret) {
204 ERR("pthread_sigmask: %s", strerror(ret));
205 }
206 ret = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &oldstate);
207 if (ret) {
208 ERR("pthread_setcancelstate: %s", strerror(ret));
209 }
210 if (oldstate != PTHREAD_CANCEL_DISABLE) {
211 ERR("pthread_setcancelstate: unexpected oldstate");
212 }
213 }
214
215 /*
216 * Wait for either of these before continuing to the main
217 * program:
218 * - the register_done message from sessiond daemon
219 * (will let the sessiond daemon enable sessions before main
220 * starts.)
221 * - sessiond daemon is not reachable.
222 * - timeout (ensuring applications are resilient to session
223 * daemon problems).
224 */
225 static sem_t constructor_wait;
226 /*
227 * Doing this for both the global and local sessiond.
228 */
229 enum {
230 sem_count_initial_value = 4,
231 };
232
233 static int sem_count = sem_count_initial_value;
234
235 /*
236 * Counting nesting within lttng-ust. Used to ensure that calling fork()
237 * from liblttng-ust does not execute the pre/post fork handlers.
238 */
239 static DEFINE_URCU_TLS(int, lttng_ust_nest_count);
240
241 /*
242 * Info about socket and associated listener thread.
243 */
244 struct sock_info {
245 const char *name;
246 pthread_t ust_listener; /* listener thread */
247 int root_handle;
248 int registration_done;
249 int allowed;
250 int global;
251 int thread_active;
252
253 char sock_path[PATH_MAX];
254 int socket;
255 int notify_socket;
256
257 char wait_shm_path[PATH_MAX];
258 char *wait_shm_mmap;
259 /* Keep track of lazy state dump not performed yet. */
260 int statedump_pending;
261 int initial_statedump_done;
262 };
263
264 /* Socket from app (connect) to session daemon (listen) for communication */
265 struct sock_info global_apps = {
266 .name = "global",
267 .global = 1,
268
269 .root_handle = -1,
270 .registration_done = 0,
271 .allowed = 0,
272 .thread_active = 0,
273
274 .sock_path = LTTNG_DEFAULT_RUNDIR "/" LTTNG_UST_SOCK_FILENAME,
275 .socket = -1,
276 .notify_socket = -1,
277
278 .wait_shm_path = "/" LTTNG_UST_WAIT_FILENAME,
279
280 .statedump_pending = 0,
281 .initial_statedump_done = 0,
282 };
283
284 /* TODO: allow global_apps_sock_path override */
285
286 struct sock_info local_apps = {
287 .name = "local",
288 .global = 0,
289 .root_handle = -1,
290 .registration_done = 0,
291 .allowed = 0, /* Check setuid bit first */
292 .thread_active = 0,
293
294 .socket = -1,
295 .notify_socket = -1,
296
297 .statedump_pending = 0,
298 .initial_statedump_done = 0,
299 };
300
301 static int wait_poll_fallback;
302
303 static const char *cmd_name_mapping[] = {
304 [ LTTNG_UST_RELEASE ] = "Release",
305 [ LTTNG_UST_SESSION ] = "Create Session",
306 [ LTTNG_UST_TRACER_VERSION ] = "Get Tracer Version",
307
308 [ LTTNG_UST_TRACEPOINT_LIST ] = "Create Tracepoint List",
309 [ LTTNG_UST_WAIT_QUIESCENT ] = "Wait for Quiescent State",
310 [ LTTNG_UST_REGISTER_DONE ] = "Registration Done",
311 [ LTTNG_UST_TRACEPOINT_FIELD_LIST ] = "Create Tracepoint Field List",
312
313 /* Session FD commands */
314 [ LTTNG_UST_CHANNEL ] = "Create Channel",
315 [ LTTNG_UST_SESSION_START ] = "Start Session",
316 [ LTTNG_UST_SESSION_STOP ] = "Stop Session",
317
318 /* Channel FD commands */
319 [ LTTNG_UST_STREAM ] = "Create Stream",
320 [ LTTNG_UST_EVENT ] = "Create Event",
321
322 /* Event and Channel FD commands */
323 [ LTTNG_UST_CONTEXT ] = "Create Context",
324 [ LTTNG_UST_FLUSH_BUFFER ] = "Flush Buffer",
325
326 /* Event, Channel and Session commands */
327 [ LTTNG_UST_ENABLE ] = "Enable",
328 [ LTTNG_UST_DISABLE ] = "Disable",
329
330 /* Tracepoint list commands */
331 [ LTTNG_UST_TRACEPOINT_LIST_GET ] = "List Next Tracepoint",
332 [ LTTNG_UST_TRACEPOINT_FIELD_LIST_GET ] = "List Next Tracepoint Field",
333
334 /* Event FD commands */
335 [ LTTNG_UST_FILTER ] = "Create Filter",
336 [ LTTNG_UST_EXCLUSION ] = "Add exclusions to event",
337 };
338
339 static const char *str_timeout;
340 static int got_timeout_env;
341
342 extern void lttng_ring_buffer_client_overwrite_init(void);
343 extern void lttng_ring_buffer_client_overwrite_rt_init(void);
344 extern void lttng_ring_buffer_client_discard_init(void);
345 extern void lttng_ring_buffer_client_discard_rt_init(void);
346 extern void lttng_ring_buffer_metadata_client_init(void);
347 extern void lttng_ring_buffer_client_overwrite_exit(void);
348 extern void lttng_ring_buffer_client_overwrite_rt_exit(void);
349 extern void lttng_ring_buffer_client_discard_exit(void);
350 extern void lttng_ring_buffer_client_discard_rt_exit(void);
351 extern void lttng_ring_buffer_metadata_client_exit(void);
352
353 static char *get_map_shm(struct sock_info *sock_info);
354
355 ssize_t lttng_ust_read(int fd, void *buf, size_t len)
356 {
357 ssize_t ret;
358 size_t copied = 0, to_copy = len;
359
360 do {
361 ret = read(fd, buf + copied, to_copy);
362 if (ret > 0) {
363 copied += ret;
364 to_copy -= ret;
365 }
366 } while ((ret > 0 && to_copy > 0)
367 || (ret < 0 && errno == EINTR));
368 if (ret > 0) {
369 ret = copied;
370 }
371 return ret;
372 }
373 /*
374 * Returns the HOME directory path. Caller MUST NOT free(3) the returned
375 * pointer.
376 */
377 static
378 const char *get_lttng_home_dir(void)
379 {
380 const char *val;
381
382 val = (const char *) lttng_getenv("LTTNG_HOME");
383 if (val != NULL) {
384 return val;
385 }
386 return (const char *) lttng_getenv("HOME");
387 }
388
389 /*
390 * Force a read (imply TLS fixup for dlopen) of TLS variables.
391 */
392 static
393 void lttng_fixup_nest_count_tls(void)
394 {
395 asm volatile ("" : : "m" (URCU_TLS(lttng_ust_nest_count)));
396 }
397
398 static
399 void lttng_fixup_ust_mutex_nest_tls(void)
400 {
401 asm volatile ("" : : "m" (URCU_TLS(ust_mutex_nest)));
402 }
403
404 /*
405 * Fixup urcu bp TLS.
406 */
407 static
408 void lttng_fixup_urcu_bp_tls(void)
409 {
410 rcu_read_lock();
411 rcu_read_unlock();
412 }
413
414 void lttng_ust_fixup_tls(void)
415 {
416 lttng_fixup_urcu_bp_tls();
417 lttng_fixup_ringbuffer_tls();
418 lttng_fixup_vtid_tls();
419 lttng_fixup_nest_count_tls();
420 lttng_fixup_procname_tls();
421 lttng_fixup_ust_mutex_nest_tls();
422 lttng_ust_fixup_perf_counter_tls();
423 lttng_ust_fixup_fd_tracker_tls();
424 }
425
426 int lttng_get_notify_socket(void *owner)
427 {
428 struct sock_info *info = owner;
429
430 return info->notify_socket;
431 }
432
433 static
434 void print_cmd(int cmd, int handle)
435 {
436 const char *cmd_name = "Unknown";
437
438 if (cmd >= 0 && cmd < LTTNG_ARRAY_SIZE(cmd_name_mapping)
439 && cmd_name_mapping[cmd]) {
440 cmd_name = cmd_name_mapping[cmd];
441 }
442 DBG("Message Received \"%s\" (%d), Handle \"%s\" (%d)",
443 cmd_name, cmd,
444 lttng_ust_obj_get_name(handle), handle);
445 }
446
447 static
448 int setup_global_apps(void)
449 {
450 int ret = 0;
451 assert(!global_apps.wait_shm_mmap);
452
453 global_apps.wait_shm_mmap = get_map_shm(&global_apps);
454 if (!global_apps.wait_shm_mmap) {
455 WARN("Unable to get map shm for global apps. Disabling LTTng-UST global tracing.");
456 global_apps.allowed = 0;
457 ret = -EIO;
458 goto error;
459 }
460
461 global_apps.allowed = 1;
462 error:
463 return ret;
464 }
465 static
466 int setup_local_apps(void)
467 {
468 int ret = 0;
469 const char *home_dir;
470 uid_t uid;
471
472 assert(!local_apps.wait_shm_mmap);
473
474 uid = getuid();
475 /*
476 * Disallow per-user tracing for setuid binaries.
477 */
478 if (uid != geteuid()) {
479 assert(local_apps.allowed == 0);
480 ret = 0;
481 goto end;
482 }
483 home_dir = get_lttng_home_dir();
484 if (!home_dir) {
485 WARN("HOME environment variable not set. Disabling LTTng-UST per-user tracing.");
486 assert(local_apps.allowed == 0);
487 ret = -ENOENT;
488 goto end;
489 }
490 local_apps.allowed = 1;
491 snprintf(local_apps.sock_path, PATH_MAX, "%s/%s/%s",
492 home_dir,
493 LTTNG_DEFAULT_HOME_RUNDIR,
494 LTTNG_UST_SOCK_FILENAME);
495 snprintf(local_apps.wait_shm_path, PATH_MAX, "/%s-%u",
496 LTTNG_UST_WAIT_FILENAME,
497 uid);
498
499 local_apps.wait_shm_mmap = get_map_shm(&local_apps);
500 if (!local_apps.wait_shm_mmap) {
501 WARN("Unable to get map shm for local apps. Disabling LTTng-UST per-user tracing.");
502 local_apps.allowed = 0;
503 ret = -EIO;
504 goto end;
505 }
506 end:
507 return ret;
508 }
509
510 /*
511 * Get socket timeout, in ms.
512 * -1: wait forever. 0: don't wait. >0: timeout, in ms.
513 */
514 static
515 long get_timeout(void)
516 {
517 long constructor_delay_ms = LTTNG_UST_DEFAULT_CONSTRUCTOR_TIMEOUT_MS;
518
519 if (!got_timeout_env) {
520 str_timeout = lttng_getenv("LTTNG_UST_REGISTER_TIMEOUT");
521 got_timeout_env = 1;
522 }
523 if (str_timeout)
524 constructor_delay_ms = strtol(str_timeout, NULL, 10);
525 /* All negative values are considered as "-1". */
526 if (constructor_delay_ms < -1)
527 constructor_delay_ms = -1;
528 return constructor_delay_ms;
529 }
530
531 /* Timeout for notify socket send and recv. */
532 static
533 long get_notify_sock_timeout(void)
534 {
535 return get_timeout();
536 }
537
538 /* Timeout for connecting to cmd and notify sockets. */
539 static
540 long get_connect_sock_timeout(void)
541 {
542 return get_timeout();
543 }
544
545 /*
546 * Return values: -1: wait forever. 0: don't wait. 1: timeout wait.
547 */
548 static
549 int get_constructor_timeout(struct timespec *constructor_timeout)
550 {
551 long constructor_delay_ms;
552 int ret;
553
554 constructor_delay_ms = get_timeout();
555
556 switch (constructor_delay_ms) {
557 case -1:/* fall-through */
558 case 0:
559 return constructor_delay_ms;
560 default:
561 break;
562 }
563
564 /*
565 * If we are unable to find the current time, don't wait.
566 */
567 ret = clock_gettime(CLOCK_REALTIME, constructor_timeout);
568 if (ret) {
569 /* Don't wait. */
570 return 0;
571 }
572 constructor_timeout->tv_sec += constructor_delay_ms / 1000UL;
573 constructor_timeout->tv_nsec +=
574 (constructor_delay_ms % 1000UL) * 1000000UL;
575 if (constructor_timeout->tv_nsec >= 1000000000UL) {
576 constructor_timeout->tv_sec++;
577 constructor_timeout->tv_nsec -= 1000000000UL;
578 }
579 /* Timeout wait (constructor_delay_ms). */
580 return 1;
581 }
582
583 static
584 void get_allow_blocking(void)
585 {
586 const char *str_allow_blocking =
587 lttng_getenv("LTTNG_UST_ALLOW_BLOCKING");
588
589 if (str_allow_blocking) {
590 DBG("%s environment variable is set",
591 "LTTNG_UST_ALLOW_BLOCKING");
592 lttng_ust_ringbuffer_set_allow_blocking();
593 }
594 }
595
596 static
597 int register_to_sessiond(int socket, enum ustctl_socket_type type)
598 {
599 return ustcomm_send_reg_msg(socket,
600 type,
601 CAA_BITS_PER_LONG,
602 lttng_alignof(uint8_t) * CHAR_BIT,
603 lttng_alignof(uint16_t) * CHAR_BIT,
604 lttng_alignof(uint32_t) * CHAR_BIT,
605 lttng_alignof(uint64_t) * CHAR_BIT,
606 lttng_alignof(unsigned long) * CHAR_BIT);
607 }
608
609 static
610 int send_reply(int sock, struct ustcomm_ust_reply *lur)
611 {
612 ssize_t len;
613
614 len = ustcomm_send_unix_sock(sock, lur, sizeof(*lur));
615 switch (len) {
616 case sizeof(*lur):
617 DBG("message successfully sent");
618 return 0;
619 default:
620 if (len == -ECONNRESET) {
621 DBG("remote end closed connection");
622 return 0;
623 }
624 if (len < 0)
625 return len;
626 DBG("incorrect message size: %zd", len);
627 return -EINVAL;
628 }
629 }
630
631 static
632 void decrement_sem_count(unsigned int count)
633 {
634 int ret;
635
636 assert(uatomic_read(&sem_count) >= count);
637
638 if (uatomic_read(&sem_count) <= 0) {
639 return;
640 }
641
642 ret = uatomic_add_return(&sem_count, -count);
643 if (ret == 0) {
644 ret = sem_post(&constructor_wait);
645 assert(!ret);
646 }
647 }
648
649 static
650 int handle_register_done(struct sock_info *sock_info)
651 {
652 if (sock_info->registration_done)
653 return 0;
654 sock_info->registration_done = 1;
655
656 decrement_sem_count(1);
657 if (!sock_info->statedump_pending) {
658 sock_info->initial_statedump_done = 1;
659 decrement_sem_count(1);
660 }
661
662 return 0;
663 }
664
665 static
666 int handle_register_failed(struct sock_info *sock_info)
667 {
668 if (sock_info->registration_done)
669 return 0;
670 sock_info->registration_done = 1;
671 sock_info->initial_statedump_done = 1;
672
673 decrement_sem_count(2);
674
675 return 0;
676 }
677
678 /*
679 * Only execute pending statedump after the constructor semaphore has
680 * been posted by the current listener thread. This means statedump will
681 * only be performed after the "registration done" command is received
682 * from this thread's session daemon.
683 *
684 * This ensures we don't run into deadlock issues with the dynamic
685 * loader mutex, which is held while the constructor is called and
686 * waiting on the constructor semaphore. All operations requiring this
687 * dynamic loader lock need to be postponed using this mechanism.
688 *
689 * In a scenario with two session daemons connected to the application,
690 * it is possible that the first listener thread which receives the
691 * registration done command issues its statedump while the dynamic
692 * loader lock is still held by the application constructor waiting on
693 * the semaphore. It will however be allowed to proceed when the
694 * second session daemon sends the registration done command to the
695 * second listener thread. This situation therefore does not produce
696 * a deadlock.
697 */
698 static
699 void handle_pending_statedump(struct sock_info *sock_info)
700 {
701 if (sock_info->registration_done && sock_info->statedump_pending) {
702 sock_info->statedump_pending = 0;
703 pthread_mutex_lock(&ust_fork_mutex);
704 lttng_handle_pending_statedump(sock_info);
705 pthread_mutex_unlock(&ust_fork_mutex);
706
707 if (!sock_info->initial_statedump_done) {
708 sock_info->initial_statedump_done = 1;
709 decrement_sem_count(1);
710 }
711 }
712 }
713
714 static
715 int handle_message(struct sock_info *sock_info,
716 int sock, struct ustcomm_ust_msg *lum)
717 {
718 int ret = 0;
719 const struct lttng_ust_objd_ops *ops;
720 struct ustcomm_ust_reply lur;
721 union ust_args args;
722 char ctxstr[LTTNG_UST_SYM_NAME_LEN]; /* App context string. */
723 ssize_t len;
724
725 memset(&lur, 0, sizeof(lur));
726
727 if (ust_lock()) {
728 ret = -LTTNG_UST_ERR_EXITING;
729 goto error;
730 }
731
732 ops = objd_ops(lum->handle);
733 if (!ops) {
734 ret = -ENOENT;
735 goto error;
736 }
737
738 switch (lum->cmd) {
739 case LTTNG_UST_REGISTER_DONE:
740 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
741 ret = handle_register_done(sock_info);
742 else
743 ret = -EINVAL;
744 break;
745 case LTTNG_UST_RELEASE:
746 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
747 ret = -EPERM;
748 else
749 ret = lttng_ust_objd_unref(lum->handle, 1);
750 break;
751 case LTTNG_UST_FILTER:
752 {
753 /* Receive filter data */
754 struct lttng_ust_filter_bytecode_node *bytecode;
755
756 if (lum->u.filter.data_size > FILTER_BYTECODE_MAX_LEN) {
757 ERR("Filter data size is too large: %u bytes",
758 lum->u.filter.data_size);
759 ret = -EINVAL;
760 goto error;
761 }
762
763 if (lum->u.filter.reloc_offset > lum->u.filter.data_size) {
764 ERR("Filter reloc offset %u is not within data",
765 lum->u.filter.reloc_offset);
766 ret = -EINVAL;
767 goto error;
768 }
769
770 bytecode = zmalloc(sizeof(*bytecode) + lum->u.filter.data_size);
771 if (!bytecode) {
772 ret = -ENOMEM;
773 goto error;
774 }
775 len = ustcomm_recv_unix_sock(sock, bytecode->bc.data,
776 lum->u.filter.data_size);
777 switch (len) {
778 case 0: /* orderly shutdown */
779 ret = 0;
780 free(bytecode);
781 goto error;
782 default:
783 if (len == lum->u.filter.data_size) {
784 DBG("filter data received");
785 break;
786 } else if (len < 0) {
787 DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
788 if (len == -ECONNRESET) {
789 ERR("%s remote end closed connection", sock_info->name);
790 ret = len;
791 free(bytecode);
792 goto error;
793 }
794 ret = len;
795 free(bytecode);
796 goto error;
797 } else {
798 DBG("incorrect filter data message size: %zd", len);
799 ret = -EINVAL;
800 free(bytecode);
801 goto error;
802 }
803 }
804 bytecode->bc.len = lum->u.filter.data_size;
805 bytecode->bc.reloc_offset = lum->u.filter.reloc_offset;
806 bytecode->bc.seqnum = lum->u.filter.seqnum;
807 if (ops->cmd) {
808 ret = ops->cmd(lum->handle, lum->cmd,
809 (unsigned long) bytecode,
810 &args, sock_info);
811 if (ret) {
812 free(bytecode);
813 }
814 /* don't free bytecode if everything went fine. */
815 } else {
816 ret = -ENOSYS;
817 free(bytecode);
818 }
819 break;
820 }
821 case LTTNG_UST_EXCLUSION:
822 {
823 /* Receive exclusion names */
824 struct lttng_ust_excluder_node *node;
825 unsigned int count;
826
827 count = lum->u.exclusion.count;
828 if (count == 0) {
829 /* There are no names to read */
830 ret = 0;
831 goto error;
832 }
833 node = zmalloc(sizeof(*node) +
834 count * LTTNG_UST_SYM_NAME_LEN);
835 if (!node) {
836 ret = -ENOMEM;
837 goto error;
838 }
839 node->excluder.count = count;
840 len = ustcomm_recv_unix_sock(sock, node->excluder.names,
841 count * LTTNG_UST_SYM_NAME_LEN);
842 switch (len) {
843 case 0: /* orderly shutdown */
844 ret = 0;
845 free(node);
846 goto error;
847 default:
848 if (len == count * LTTNG_UST_SYM_NAME_LEN) {
849 DBG("Exclusion data received");
850 break;
851 } else if (len < 0) {
852 DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
853 if (len == -ECONNRESET) {
854 ERR("%s remote end closed connection", sock_info->name);
855 ret = len;
856 free(node);
857 goto error;
858 }
859 ret = len;
860 free(node);
861 goto error;
862 } else {
863 DBG("Incorrect exclusion data message size: %zd", len);
864 ret = -EINVAL;
865 free(node);
866 goto error;
867 }
868 }
869 if (ops->cmd) {
870 ret = ops->cmd(lum->handle, lum->cmd,
871 (unsigned long) node,
872 &args, sock_info);
873 if (ret) {
874 free(node);
875 }
876 /* Don't free exclusion data if everything went fine. */
877 } else {
878 ret = -ENOSYS;
879 free(node);
880 }
881 break;
882 }
883 case LTTNG_UST_CHANNEL:
884 {
885 void *chan_data;
886 int wakeup_fd;
887
888 len = ustcomm_recv_channel_from_sessiond(sock,
889 &chan_data, lum->u.channel.len,
890 &wakeup_fd);
891 switch (len) {
892 case 0: /* orderly shutdown */
893 ret = 0;
894 goto error;
895 default:
896 if (len == lum->u.channel.len) {
897 DBG("channel data received");
898 break;
899 } else if (len < 0) {
900 DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
901 if (len == -ECONNRESET) {
902 ERR("%s remote end closed connection", sock_info->name);
903 ret = len;
904 goto error;
905 }
906 ret = len;
907 goto error;
908 } else {
909 DBG("incorrect channel data message size: %zd", len);
910 ret = -EINVAL;
911 goto error;
912 }
913 }
914 args.channel.chan_data = chan_data;
915 args.channel.wakeup_fd = wakeup_fd;
916 if (ops->cmd)
917 ret = ops->cmd(lum->handle, lum->cmd,
918 (unsigned long) &lum->u,
919 &args, sock_info);
920 else
921 ret = -ENOSYS;
922 break;
923 }
924 case LTTNG_UST_STREAM:
925 {
926 /* Receive shm_fd, wakeup_fd */
927 ret = ustcomm_recv_stream_from_sessiond(sock,
928 NULL,
929 &args.stream.shm_fd,
930 &args.stream.wakeup_fd);
931 if (ret) {
932 goto error;
933 }
934
935 if (ops->cmd)
936 ret = ops->cmd(lum->handle, lum->cmd,
937 (unsigned long) &lum->u,
938 &args, sock_info);
939 else
940 ret = -ENOSYS;
941 break;
942 }
943 case LTTNG_UST_CONTEXT:
944 switch (lum->u.context.ctx) {
945 case LTTNG_UST_CONTEXT_APP_CONTEXT:
946 {
947 char *p;
948 size_t ctxlen, recvlen;
949
950 ctxlen = strlen("$app.") + lum->u.context.u.app_ctx.provider_name_len - 1
951 + strlen(":") + lum->u.context.u.app_ctx.ctx_name_len;
952 if (ctxlen >= LTTNG_UST_SYM_NAME_LEN) {
953 ERR("Application context string length size is too large: %zu bytes",
954 ctxlen);
955 ret = -EINVAL;
956 goto error;
957 }
958 strcpy(ctxstr, "$app.");
959 p = &ctxstr[strlen("$app.")];
960 recvlen = ctxlen - strlen("$app.");
961 len = ustcomm_recv_unix_sock(sock, p, recvlen);
962 switch (len) {
963 case 0: /* orderly shutdown */
964 ret = 0;
965 goto error;
966 default:
967 if (len == recvlen) {
968 DBG("app context data received");
969 break;
970 } else if (len < 0) {
971 DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
972 if (len == -ECONNRESET) {
973 ERR("%s remote end closed connection", sock_info->name);
974 ret = len;
975 goto error;
976 }
977 ret = len;
978 goto error;
979 } else {
980 DBG("incorrect app context data message size: %zd", len);
981 ret = -EINVAL;
982 goto error;
983 }
984 }
985 /* Put : between provider and ctxname. */
986 p[lum->u.context.u.app_ctx.provider_name_len - 1] = ':';
987 args.app_context.ctxname = ctxstr;
988 break;
989 }
990 default:
991 break;
992 }
993 if (ops->cmd) {
994 ret = ops->cmd(lum->handle, lum->cmd,
995 (unsigned long) &lum->u,
996 &args, sock_info);
997 } else {
998 ret = -ENOSYS;
999 }
1000 break;
1001 default:
1002 if (ops->cmd)
1003 ret = ops->cmd(lum->handle, lum->cmd,
1004 (unsigned long) &lum->u,
1005 &args, sock_info);
1006 else
1007 ret = -ENOSYS;
1008 break;
1009 }
1010
1011 lur.handle = lum->handle;
1012 lur.cmd = lum->cmd;
1013 lur.ret_val = ret;
1014 if (ret >= 0) {
1015 lur.ret_code = LTTNG_UST_OK;
1016 } else {
1017 /*
1018 * Use -LTTNG_UST_ERR as wildcard for UST internal
1019 * error that are not caused by the transport, except if
1020 * we already have a more precise error message to
1021 * report.
1022 */
1023 if (ret > -LTTNG_UST_ERR) {
1024 /* Translate code to UST error. */
1025 switch (ret) {
1026 case -EEXIST:
1027 lur.ret_code = -LTTNG_UST_ERR_EXIST;
1028 break;
1029 case -EINVAL:
1030 lur.ret_code = -LTTNG_UST_ERR_INVAL;
1031 break;
1032 case -ENOENT:
1033 lur.ret_code = -LTTNG_UST_ERR_NOENT;
1034 break;
1035 case -EPERM:
1036 lur.ret_code = -LTTNG_UST_ERR_PERM;
1037 break;
1038 case -ENOSYS:
1039 lur.ret_code = -LTTNG_UST_ERR_NOSYS;
1040 break;
1041 default:
1042 lur.ret_code = -LTTNG_UST_ERR;
1043 break;
1044 }
1045 } else {
1046 lur.ret_code = ret;
1047 }
1048 }
1049 if (ret >= 0) {
1050 switch (lum->cmd) {
1051 case LTTNG_UST_TRACER_VERSION:
1052 lur.u.version = lum->u.version;
1053 break;
1054 case LTTNG_UST_TRACEPOINT_LIST_GET:
1055 memcpy(&lur.u.tracepoint, &lum->u.tracepoint, sizeof(lur.u.tracepoint));
1056 break;
1057 }
1058 }
1059 DBG("Return value: %d", lur.ret_val);
1060
1061 ust_unlock();
1062
1063 /*
1064 * Performed delayed statedump operations outside of the UST
1065 * lock. We need to take the dynamic loader lock before we take
1066 * the UST lock internally within handle_pending_statedump().
1067 */
1068 handle_pending_statedump(sock_info);
1069
1070 if (ust_lock()) {
1071 ret = -LTTNG_UST_ERR_EXITING;
1072 goto error;
1073 }
1074
1075 ret = send_reply(sock, &lur);
1076 if (ret < 0) {
1077 DBG("error sending reply");
1078 goto error;
1079 }
1080
1081 /*
1082 * LTTNG_UST_TRACEPOINT_FIELD_LIST_GET needs to send the field
1083 * after the reply.
1084 */
1085 if (lur.ret_code == LTTNG_UST_OK) {
1086 switch (lum->cmd) {
1087 case LTTNG_UST_TRACEPOINT_FIELD_LIST_GET:
1088 len = ustcomm_send_unix_sock(sock,
1089 &args.field_list.entry,
1090 sizeof(args.field_list.entry));
1091 if (len < 0) {
1092 ret = len;
1093 goto error;
1094 }
1095 if (len != sizeof(args.field_list.entry)) {
1096 ret = -EINVAL;
1097 goto error;
1098 }
1099 }
1100 }
1101
1102 error:
1103 ust_unlock();
1104
1105 return ret;
1106 }
1107
1108 static
1109 void cleanup_sock_info(struct sock_info *sock_info, int exiting)
1110 {
1111 int ret;
1112
1113 if (sock_info->root_handle != -1) {
1114 ret = lttng_ust_objd_unref(sock_info->root_handle, 1);
1115 if (ret) {
1116 ERR("Error unref root handle");
1117 }
1118 sock_info->root_handle = -1;
1119 }
1120 sock_info->registration_done = 0;
1121 sock_info->initial_statedump_done = 0;
1122
1123 /*
1124 * wait_shm_mmap, socket and notify socket are used by listener
1125 * threads outside of the ust lock, so we cannot tear them down
1126 * ourselves, because we cannot join on these threads. Leave
1127 * responsibility of cleaning up these resources to the OS
1128 * process exit.
1129 */
1130 if (exiting)
1131 return;
1132
1133 if (sock_info->socket != -1) {
1134 ret = ustcomm_close_unix_sock(sock_info->socket);
1135 if (ret) {
1136 ERR("Error closing ust cmd socket");
1137 }
1138 sock_info->socket = -1;
1139 }
1140 if (sock_info->notify_socket != -1) {
1141 ret = ustcomm_close_unix_sock(sock_info->notify_socket);
1142 if (ret) {
1143 ERR("Error closing ust notify socket");
1144 }
1145 sock_info->notify_socket = -1;
1146 }
1147 if (sock_info->wait_shm_mmap) {
1148 long page_size;
1149
1150 page_size = sysconf(_SC_PAGE_SIZE);
1151 if (page_size <= 0) {
1152 if (!page_size) {
1153 errno = EINVAL;
1154 }
1155 PERROR("Error in sysconf(_SC_PAGE_SIZE)");
1156 } else {
1157 ret = munmap(sock_info->wait_shm_mmap, page_size);
1158 if (ret) {
1159 ERR("Error unmapping wait shm");
1160 }
1161 }
1162 sock_info->wait_shm_mmap = NULL;
1163 }
1164 }
1165
1166 /*
1167 * Using fork to set umask in the child process (not multi-thread safe).
1168 * We deal with the shm_open vs ftruncate race (happening when the
1169 * sessiond owns the shm and does not let everybody modify it, to ensure
1170 * safety against shm_unlink) by simply letting the mmap fail and
1171 * retrying after a few seconds.
1172 * For global shm, everybody has rw access to it until the sessiond
1173 * starts.
1174 */
1175 static
1176 int get_wait_shm(struct sock_info *sock_info, size_t mmap_size)
1177 {
1178 int wait_shm_fd, ret;
1179 pid_t pid;
1180
1181 /*
1182 * Try to open read-only.
1183 */
1184 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
1185 if (wait_shm_fd >= 0) {
1186 int32_t tmp_read;
1187 ssize_t len;
1188 size_t bytes_read = 0;
1189
1190 /*
1191 * Try to read the fd. If unable to do so, try opening
1192 * it in write mode.
1193 */
1194 do {
1195 len = read(wait_shm_fd,
1196 &((char *) &tmp_read)[bytes_read],
1197 sizeof(tmp_read) - bytes_read);
1198 if (len > 0) {
1199 bytes_read += len;
1200 }
1201 } while ((len < 0 && errno == EINTR)
1202 || (len > 0 && bytes_read < sizeof(tmp_read)));
1203 if (bytes_read != sizeof(tmp_read)) {
1204 ret = close(wait_shm_fd);
1205 if (ret) {
1206 ERR("close wait_shm_fd");
1207 }
1208 goto open_write;
1209 }
1210 goto end;
1211 } else if (wait_shm_fd < 0 && errno != ENOENT) {
1212 /*
1213 * Real-only open did not work, and it's not because the
1214 * entry was not present. It's a failure that prohibits
1215 * using shm.
1216 */
1217 ERR("Error opening shm %s", sock_info->wait_shm_path);
1218 goto end;
1219 }
1220
1221 open_write:
1222 /*
1223 * If the open failed because the file did not exist, or because
1224 * the file was not truncated yet, try creating it ourself.
1225 */
1226 URCU_TLS(lttng_ust_nest_count)++;
1227 pid = fork();
1228 URCU_TLS(lttng_ust_nest_count)--;
1229 if (pid > 0) {
1230 int status;
1231
1232 /*
1233 * Parent: wait for child to return, in which case the
1234 * shared memory map will have been created.
1235 */
1236 pid = wait(&status);
1237 if (pid < 0 || !WIFEXITED(status) || WEXITSTATUS(status) != 0) {
1238 wait_shm_fd = -1;
1239 goto end;
1240 }
1241 /*
1242 * Try to open read-only again after creation.
1243 */
1244 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
1245 if (wait_shm_fd < 0) {
1246 /*
1247 * Real-only open did not work. It's a failure
1248 * that prohibits using shm.
1249 */
1250 ERR("Error opening shm %s", sock_info->wait_shm_path);
1251 goto end;
1252 }
1253 goto end;
1254 } else if (pid == 0) {
1255 int create_mode;
1256
1257 /* Child */
1258 create_mode = S_IRUSR | S_IWUSR | S_IRGRP;
1259 if (sock_info->global)
1260 create_mode |= S_IROTH | S_IWGRP | S_IWOTH;
1261 /*
1262 * We're alone in a child process, so we can modify the
1263 * process-wide umask.
1264 */
1265 umask(~create_mode);
1266 /*
1267 * Try creating shm (or get rw access).
1268 * We don't do an exclusive open, because we allow other
1269 * processes to create+ftruncate it concurrently.
1270 */
1271 wait_shm_fd = shm_open(sock_info->wait_shm_path,
1272 O_RDWR | O_CREAT, create_mode);
1273 if (wait_shm_fd >= 0) {
1274 ret = ftruncate(wait_shm_fd, mmap_size);
1275 if (ret) {
1276 PERROR("ftruncate");
1277 _exit(EXIT_FAILURE);
1278 }
1279 _exit(EXIT_SUCCESS);
1280 }
1281 /*
1282 * For local shm, we need to have rw access to accept
1283 * opening it: this means the local sessiond will be
1284 * able to wake us up. For global shm, we open it even
1285 * if rw access is not granted, because the root.root
1286 * sessiond will be able to override all rights and wake
1287 * us up.
1288 */
1289 if (!sock_info->global && errno != EACCES) {
1290 ERR("Error opening shm %s", sock_info->wait_shm_path);
1291 _exit(EXIT_FAILURE);
1292 }
1293 /*
1294 * The shm exists, but we cannot open it RW. Report
1295 * success.
1296 */
1297 _exit(EXIT_SUCCESS);
1298 } else {
1299 return -1;
1300 }
1301 end:
1302 if (wait_shm_fd >= 0 && !sock_info->global) {
1303 struct stat statbuf;
1304
1305 /*
1306 * Ensure that our user is the owner of the shm file for
1307 * local shm. If we do not own the file, it means our
1308 * sessiond will not have access to wake us up (there is
1309 * probably a rogue process trying to fake our
1310 * sessiond). Fallback to polling method in this case.
1311 */
1312 ret = fstat(wait_shm_fd, &statbuf);
1313 if (ret) {
1314 PERROR("fstat");
1315 goto error_close;
1316 }
1317 if (statbuf.st_uid != getuid())
1318 goto error_close;
1319 }
1320 return wait_shm_fd;
1321
1322 error_close:
1323 ret = close(wait_shm_fd);
1324 if (ret) {
1325 PERROR("Error closing fd");
1326 }
1327 return -1;
1328 }
1329
1330 static
1331 char *get_map_shm(struct sock_info *sock_info)
1332 {
1333 long page_size;
1334 int wait_shm_fd, ret;
1335 char *wait_shm_mmap;
1336
1337 page_size = sysconf(_SC_PAGE_SIZE);
1338 if (page_size <= 0) {
1339 if (!page_size) {
1340 errno = EINVAL;
1341 }
1342 PERROR("Error in sysconf(_SC_PAGE_SIZE)");
1343 goto error;
1344 }
1345
1346 lttng_ust_lock_fd_tracker();
1347 wait_shm_fd = get_wait_shm(sock_info, page_size);
1348 if (wait_shm_fd < 0) {
1349 lttng_ust_unlock_fd_tracker();
1350 goto error;
1351 }
1352
1353 ret = lttng_ust_add_fd_to_tracker(wait_shm_fd);
1354 if (ret < 0) {
1355 ret = close(wait_shm_fd);
1356 if (!ret) {
1357 PERROR("Error closing fd");
1358 }
1359 lttng_ust_unlock_fd_tracker();
1360 goto error;
1361 }
1362
1363 wait_shm_fd = ret;
1364 lttng_ust_unlock_fd_tracker();
1365
1366 wait_shm_mmap = mmap(NULL, page_size, PROT_READ,
1367 MAP_SHARED, wait_shm_fd, 0);
1368
1369 /* close shm fd immediately after taking the mmap reference */
1370 lttng_ust_lock_fd_tracker();
1371 ret = close(wait_shm_fd);
1372 if (!ret) {
1373 lttng_ust_delete_fd_from_tracker(wait_shm_fd);
1374 } else {
1375 PERROR("Error closing fd");
1376 }
1377 lttng_ust_unlock_fd_tracker();
1378
1379 if (wait_shm_mmap == MAP_FAILED) {
1380 DBG("mmap error (can be caused by race with sessiond). Fallback to poll mode.");
1381 goto error;
1382 }
1383 return wait_shm_mmap;
1384
1385 error:
1386 return NULL;
1387 }
1388
1389 static
1390 void wait_for_sessiond(struct sock_info *sock_info)
1391 {
1392 /* Use ust_lock to check if we should quit. */
1393 if (ust_lock()) {
1394 goto quit;
1395 }
1396 if (wait_poll_fallback) {
1397 goto error;
1398 }
1399 ust_unlock();
1400
1401 assert(sock_info->wait_shm_mmap);
1402
1403 DBG("Waiting for %s apps sessiond", sock_info->name);
1404 /* Wait for futex wakeup */
1405 if (uatomic_read((int32_t *) sock_info->wait_shm_mmap))
1406 goto end_wait;
1407
1408 while (futex_async((int32_t *) sock_info->wait_shm_mmap,
1409 FUTEX_WAIT, 0, NULL, NULL, 0)) {
1410 switch (errno) {
1411 case EWOULDBLOCK:
1412 /* Value already changed. */
1413 goto end_wait;
1414 case EINTR:
1415 /* Retry if interrupted by signal. */
1416 break; /* Get out of switch. */
1417 case EFAULT:
1418 wait_poll_fallback = 1;
1419 DBG(
1420 "Linux kernels 2.6.33 to 3.0 (with the exception of stable versions) "
1421 "do not support FUTEX_WAKE on read-only memory mappings correctly. "
1422 "Please upgrade your kernel "
1423 "(fix is commit 9ea71503a8ed9184d2d0b8ccc4d269d05f7940ae in Linux kernel "
1424 "mainline). LTTng-UST will use polling mode fallback.");
1425 if (ust_debug())
1426 PERROR("futex");
1427 goto end_wait;
1428 }
1429 }
1430 end_wait:
1431 return;
1432
1433 quit:
1434 ust_unlock();
1435 return;
1436
1437 error:
1438 ust_unlock();
1439 return;
1440 }
1441
1442 /*
1443 * This thread does not allocate any resource, except within
1444 * handle_message, within mutex protection. This mutex protects against
1445 * fork and exit.
1446 * The other moment it allocates resources is at socket connection, which
1447 * is also protected by the mutex.
1448 */
1449 static
1450 void *ust_listener_thread(void *arg)
1451 {
1452 struct sock_info *sock_info = arg;
1453 int sock, ret, prev_connect_failed = 0, has_waited = 0, fd;
1454 long timeout;
1455
1456 lttng_ust_fixup_tls();
1457 /*
1458 * If available, add '-ust' to the end of this thread's
1459 * process name
1460 */
1461 ret = lttng_ust_setustprocname();
1462 if (ret) {
1463 ERR("Unable to set UST process name");
1464 }
1465
1466 /* Restart trying to connect to the session daemon */
1467 restart:
1468 if (prev_connect_failed) {
1469 /* Wait for sessiond availability with pipe */
1470 wait_for_sessiond(sock_info);
1471 if (has_waited) {
1472 has_waited = 0;
1473 /*
1474 * Sleep for 5 seconds before retrying after a
1475 * sequence of failure / wait / failure. This
1476 * deals with a killed or broken session daemon.
1477 */
1478 sleep(5);
1479 } else {
1480 has_waited = 1;
1481 }
1482 prev_connect_failed = 0;
1483 }
1484
1485 if (ust_lock()) {
1486 goto quit;
1487 }
1488
1489 if (sock_info->socket != -1) {
1490 /* FD tracker is updated by ustcomm_close_unix_sock() */
1491 ret = ustcomm_close_unix_sock(sock_info->socket);
1492 if (ret) {
1493 ERR("Error closing %s ust cmd socket",
1494 sock_info->name);
1495 }
1496 sock_info->socket = -1;
1497 }
1498 if (sock_info->notify_socket != -1) {
1499 /* FD tracker is updated by ustcomm_close_unix_sock() */
1500 ret = ustcomm_close_unix_sock(sock_info->notify_socket);
1501 if (ret) {
1502 ERR("Error closing %s ust notify socket",
1503 sock_info->name);
1504 }
1505 sock_info->notify_socket = -1;
1506 }
1507
1508
1509 /*
1510 * Register. We need to perform both connect and sending
1511 * registration message before doing the next connect otherwise
1512 * we may reach unix socket connect queue max limits and block
1513 * on the 2nd connect while the session daemon is awaiting the
1514 * first connect registration message.
1515 */
1516 /* Connect cmd socket */
1517 lttng_ust_lock_fd_tracker();
1518 ret = ustcomm_connect_unix_sock(sock_info->sock_path,
1519 get_connect_sock_timeout());
1520 if (ret < 0) {
1521 lttng_ust_unlock_fd_tracker();
1522 DBG("Info: sessiond not accepting connections to %s apps socket", sock_info->name);
1523 prev_connect_failed = 1;
1524
1525 /*
1526 * If we cannot find the sessiond daemon, don't delay
1527 * constructor execution.
1528 */
1529 ret = handle_register_failed(sock_info);
1530 assert(!ret);
1531 ust_unlock();
1532 goto restart;
1533 }
1534 fd = ret;
1535 ret = lttng_ust_add_fd_to_tracker(fd);
1536 if (ret < 0) {
1537 ret = close(fd);
1538 if (ret) {
1539 PERROR("close on sock_info->socket");
1540 }
1541 ret = -1;
1542 lttng_ust_unlock_fd_tracker();
1543 ust_unlock();
1544 goto quit;
1545 }
1546
1547 sock_info->socket = ret;
1548 lttng_ust_unlock_fd_tracker();
1549
1550 ust_unlock();
1551 /*
1552 * Unlock/relock ust lock because connect is blocking (with
1553 * timeout). Don't delay constructors on the ust lock for too
1554 * long.
1555 */
1556 if (ust_lock()) {
1557 goto quit;
1558 }
1559
1560 /*
1561 * Create only one root handle per listener thread for the whole
1562 * process lifetime, so we ensure we get ID which is statically
1563 * assigned to the root handle.
1564 */
1565 if (sock_info->root_handle == -1) {
1566 ret = lttng_abi_create_root_handle();
1567 if (ret < 0) {
1568 ERR("Error creating root handle");
1569 goto quit;
1570 }
1571 sock_info->root_handle = ret;
1572 }
1573
1574 ret = register_to_sessiond(sock_info->socket, USTCTL_SOCKET_CMD);
1575 if (ret < 0) {
1576 ERR("Error registering to %s ust cmd socket",
1577 sock_info->name);
1578 prev_connect_failed = 1;
1579 /*
1580 * If we cannot register to the sessiond daemon, don't
1581 * delay constructor execution.
1582 */
1583 ret = handle_register_failed(sock_info);
1584 assert(!ret);
1585 ust_unlock();
1586 goto restart;
1587 }
1588
1589 ust_unlock();
1590 /*
1591 * Unlock/relock ust lock because connect is blocking (with
1592 * timeout). Don't delay constructors on the ust lock for too
1593 * long.
1594 */
1595 if (ust_lock()) {
1596 goto quit;
1597 }
1598
1599 /* Connect notify socket */
1600 lttng_ust_lock_fd_tracker();
1601 ret = ustcomm_connect_unix_sock(sock_info->sock_path,
1602 get_connect_sock_timeout());
1603 if (ret < 0) {
1604 lttng_ust_unlock_fd_tracker();
1605 DBG("Info: sessiond not accepting connections to %s apps socket", sock_info->name);
1606 prev_connect_failed = 1;
1607
1608 /*
1609 * If we cannot find the sessiond daemon, don't delay
1610 * constructor execution.
1611 */
1612 ret = handle_register_failed(sock_info);
1613 assert(!ret);
1614 ust_unlock();
1615 goto restart;
1616 }
1617
1618 fd = ret;
1619 ret = lttng_ust_add_fd_to_tracker(fd);
1620 if (ret < 0) {
1621 ret = close(fd);
1622 if (ret) {
1623 PERROR("close on sock_info->notify_socket");
1624 }
1625 ret = -1;
1626 lttng_ust_unlock_fd_tracker();
1627 ust_unlock();
1628 goto quit;
1629 }
1630
1631 sock_info->notify_socket = ret;
1632 lttng_ust_unlock_fd_tracker();
1633
1634 ust_unlock();
1635 /*
1636 * Unlock/relock ust lock because connect is blocking (with
1637 * timeout). Don't delay constructors on the ust lock for too
1638 * long.
1639 */
1640 if (ust_lock()) {
1641 goto quit;
1642 }
1643
1644 timeout = get_notify_sock_timeout();
1645 if (timeout >= 0) {
1646 /*
1647 * Give at least 10ms to sessiond to reply to
1648 * notifications.
1649 */
1650 if (timeout < 10)
1651 timeout = 10;
1652 ret = ustcomm_setsockopt_rcv_timeout(sock_info->notify_socket,
1653 timeout);
1654 if (ret < 0) {
1655 WARN("Error setting socket receive timeout");
1656 }
1657 ret = ustcomm_setsockopt_snd_timeout(sock_info->notify_socket,
1658 timeout);
1659 if (ret < 0) {
1660 WARN("Error setting socket send timeout");
1661 }
1662 } else if (timeout < -1) {
1663 WARN("Unsupported timeout value %ld", timeout);
1664 }
1665
1666 ret = register_to_sessiond(sock_info->notify_socket,
1667 USTCTL_SOCKET_NOTIFY);
1668 if (ret < 0) {
1669 ERR("Error registering to %s ust notify socket",
1670 sock_info->name);
1671 prev_connect_failed = 1;
1672 /*
1673 * If we cannot register to the sessiond daemon, don't
1674 * delay constructor execution.
1675 */
1676 ret = handle_register_failed(sock_info);
1677 assert(!ret);
1678 ust_unlock();
1679 goto restart;
1680 }
1681 sock = sock_info->socket;
1682
1683 ust_unlock();
1684
1685 for (;;) {
1686 ssize_t len;
1687 struct ustcomm_ust_msg lum;
1688
1689 len = ustcomm_recv_unix_sock(sock, &lum, sizeof(lum));
1690 switch (len) {
1691 case 0: /* orderly shutdown */
1692 DBG("%s lttng-sessiond has performed an orderly shutdown", sock_info->name);
1693 if (ust_lock()) {
1694 goto quit;
1695 }
1696 /*
1697 * Either sessiond has shutdown or refused us by closing the socket.
1698 * In either case, we don't want to delay construction execution,
1699 * and we need to wait before retry.
1700 */
1701 prev_connect_failed = 1;
1702 /*
1703 * If we cannot register to the sessiond daemon, don't
1704 * delay constructor execution.
1705 */
1706 ret = handle_register_failed(sock_info);
1707 assert(!ret);
1708 ust_unlock();
1709 goto end;
1710 case sizeof(lum):
1711 print_cmd(lum.cmd, lum.handle);
1712 ret = handle_message(sock_info, sock, &lum);
1713 if (ret) {
1714 ERR("Error handling message for %s socket",
1715 sock_info->name);
1716 /*
1717 * Close socket if protocol error is
1718 * detected.
1719 */
1720 goto end;
1721 }
1722 continue;
1723 default:
1724 if (len < 0) {
1725 DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
1726 } else {
1727 DBG("incorrect message size (%s socket): %zd", sock_info->name, len);
1728 }
1729 if (len == -ECONNRESET) {
1730 DBG("%s remote end closed connection", sock_info->name);
1731 goto end;
1732 }
1733 goto end;
1734 }
1735
1736 }
1737 end:
1738 if (ust_lock()) {
1739 goto quit;
1740 }
1741 /* Cleanup socket handles before trying to reconnect */
1742 lttng_ust_objd_table_owner_cleanup(sock_info);
1743 ust_unlock();
1744 goto restart; /* try to reconnect */
1745
1746 quit:
1747 ust_unlock();
1748
1749 pthread_mutex_lock(&ust_exit_mutex);
1750 sock_info->thread_active = 0;
1751 pthread_mutex_unlock(&ust_exit_mutex);
1752 return NULL;
1753 }
1754
1755 /*
1756 * Weak symbol to call when the ust malloc wrapper is not loaded.
1757 */
1758 __attribute__((weak))
1759 void lttng_ust_malloc_wrapper_init(void)
1760 {
1761 }
1762
1763 /*
1764 * sessiond monitoring thread: monitor presence of global and per-user
1765 * sessiond by polling the application common named pipe.
1766 */
1767 void __attribute__((constructor)) lttng_ust_init(void)
1768 {
1769 struct timespec constructor_timeout;
1770 sigset_t sig_all_blocked, orig_parent_mask;
1771 pthread_attr_t thread_attr;
1772 int timeout_mode;
1773 int ret;
1774
1775 if (uatomic_xchg(&initialized, 1) == 1)
1776 return;
1777
1778 /*
1779 * Fixup interdependency between TLS fixup mutex (which happens
1780 * to be the dynamic linker mutex) and ust_lock, taken within
1781 * the ust lock.
1782 */
1783 lttng_ust_fixup_tls();
1784
1785 lttng_ust_loaded = 1;
1786
1787 /*
1788 * We want precise control over the order in which we construct
1789 * our sub-libraries vs starting to receive commands from
1790 * sessiond (otherwise leading to errors when trying to create
1791 * sessiond before the init functions are completed).
1792 */
1793 init_usterr();
1794 lttng_ust_getenv_init(); /* Needs init_usterr() to be completed. */
1795 init_tracepoint();
1796 lttng_ust_init_fd_tracker();
1797 lttng_ust_clock_init();
1798 lttng_ust_getcpu_init();
1799 lttng_ust_statedump_init();
1800 lttng_ring_buffer_metadata_client_init();
1801 lttng_ring_buffer_client_overwrite_init();
1802 lttng_ring_buffer_client_overwrite_rt_init();
1803 lttng_ring_buffer_client_discard_init();
1804 lttng_ring_buffer_client_discard_rt_init();
1805 lttng_perf_counter_init();
1806 /*
1807 * Invoke ust malloc wrapper init before starting other threads.
1808 */
1809 lttng_ust_malloc_wrapper_init();
1810
1811 timeout_mode = get_constructor_timeout(&constructor_timeout);
1812
1813 get_allow_blocking();
1814
1815 ret = sem_init(&constructor_wait, 0, 0);
1816 if (ret) {
1817 PERROR("sem_init");
1818 }
1819
1820 ret = setup_global_apps();
1821 if (ret) {
1822 assert(global_apps.allowed == 0);
1823 DBG("global apps setup returned %d", ret);
1824 }
1825
1826 ret = setup_local_apps();
1827 if (ret) {
1828 assert(local_apps.allowed == 0);
1829 DBG("local apps setup returned %d", ret);
1830 }
1831
1832 /* A new thread created by pthread_create inherits the signal mask
1833 * from the parent. To avoid any signal being received by the
1834 * listener thread, we block all signals temporarily in the parent,
1835 * while we create the listener thread.
1836 */
1837 sigfillset(&sig_all_blocked);
1838 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_parent_mask);
1839 if (ret) {
1840 ERR("pthread_sigmask: %s", strerror(ret));
1841 }
1842
1843 ret = pthread_attr_init(&thread_attr);
1844 if (ret) {
1845 ERR("pthread_attr_init: %s", strerror(ret));
1846 }
1847 ret = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_DETACHED);
1848 if (ret) {
1849 ERR("pthread_attr_setdetachstate: %s", strerror(ret));
1850 }
1851
1852 if (global_apps.allowed) {
1853 pthread_mutex_lock(&ust_exit_mutex);
1854 ret = pthread_create(&global_apps.ust_listener, &thread_attr,
1855 ust_listener_thread, &global_apps);
1856 if (ret) {
1857 ERR("pthread_create global: %s", strerror(ret));
1858 }
1859 global_apps.thread_active = 1;
1860 pthread_mutex_unlock(&ust_exit_mutex);
1861 } else {
1862 handle_register_done(&global_apps);
1863 }
1864
1865 if (local_apps.allowed) {
1866 pthread_mutex_lock(&ust_exit_mutex);
1867 ret = pthread_create(&local_apps.ust_listener, &thread_attr,
1868 ust_listener_thread, &local_apps);
1869 if (ret) {
1870 ERR("pthread_create local: %s", strerror(ret));
1871 }
1872 local_apps.thread_active = 1;
1873 pthread_mutex_unlock(&ust_exit_mutex);
1874 } else {
1875 handle_register_done(&local_apps);
1876 }
1877 ret = pthread_attr_destroy(&thread_attr);
1878 if (ret) {
1879 ERR("pthread_attr_destroy: %s", strerror(ret));
1880 }
1881
1882 /* Restore original signal mask in parent */
1883 ret = pthread_sigmask(SIG_SETMASK, &orig_parent_mask, NULL);
1884 if (ret) {
1885 ERR("pthread_sigmask: %s", strerror(ret));
1886 }
1887
1888 switch (timeout_mode) {
1889 case 1: /* timeout wait */
1890 do {
1891 ret = sem_timedwait(&constructor_wait,
1892 &constructor_timeout);
1893 } while (ret < 0 && errno == EINTR);
1894 if (ret < 0) {
1895 switch (errno) {
1896 case ETIMEDOUT:
1897 ERR("Timed out waiting for lttng-sessiond");
1898 break;
1899 case EINVAL:
1900 PERROR("sem_timedwait");
1901 break;
1902 default:
1903 ERR("Unexpected error \"%s\" returned by sem_timedwait",
1904 strerror(errno));
1905 }
1906 }
1907 break;
1908 case -1:/* wait forever */
1909 do {
1910 ret = sem_wait(&constructor_wait);
1911 } while (ret < 0 && errno == EINTR);
1912 if (ret < 0) {
1913 switch (errno) {
1914 case EINVAL:
1915 PERROR("sem_wait");
1916 break;
1917 default:
1918 ERR("Unexpected error \"%s\" returned by sem_wait",
1919 strerror(errno));
1920 }
1921 }
1922 break;
1923 case 0: /* no timeout */
1924 break;
1925 }
1926 }
1927
1928 static
1929 void lttng_ust_cleanup(int exiting)
1930 {
1931 cleanup_sock_info(&global_apps, exiting);
1932 cleanup_sock_info(&local_apps, exiting);
1933 local_apps.allowed = 0;
1934 global_apps.allowed = 0;
1935 /*
1936 * The teardown in this function all affect data structures
1937 * accessed under the UST lock by the listener thread. This
1938 * lock, along with the lttng_ust_comm_should_quit flag, ensure
1939 * that none of these threads are accessing this data at this
1940 * point.
1941 */
1942 lttng_ust_abi_exit();
1943 lttng_ust_events_exit();
1944 lttng_perf_counter_exit();
1945 lttng_ring_buffer_client_discard_rt_exit();
1946 lttng_ring_buffer_client_discard_exit();
1947 lttng_ring_buffer_client_overwrite_rt_exit();
1948 lttng_ring_buffer_client_overwrite_exit();
1949 lttng_ring_buffer_metadata_client_exit();
1950 lttng_ust_statedump_destroy();
1951 exit_tracepoint();
1952 if (!exiting) {
1953 /* Reinitialize values for fork */
1954 sem_count = sem_count_initial_value;
1955 lttng_ust_comm_should_quit = 0;
1956 initialized = 0;
1957 }
1958 }
1959
1960 void __attribute__((destructor)) lttng_ust_exit(void)
1961 {
1962 int ret;
1963
1964 /*
1965 * Using pthread_cancel here because:
1966 * A) we don't want to hang application teardown.
1967 * B) the thread is not allocating any resource.
1968 */
1969
1970 /*
1971 * Require the communication thread to quit. Synchronize with
1972 * mutexes to ensure it is not in a mutex critical section when
1973 * pthread_cancel is later called.
1974 */
1975 ust_lock_nocheck();
1976 lttng_ust_comm_should_quit = 1;
1977 ust_unlock();
1978
1979 pthread_mutex_lock(&ust_exit_mutex);
1980 /* cancel threads */
1981 if (global_apps.thread_active) {
1982 ret = pthread_cancel(global_apps.ust_listener);
1983 if (ret) {
1984 ERR("Error cancelling global ust listener thread: %s",
1985 strerror(ret));
1986 } else {
1987 global_apps.thread_active = 0;
1988 }
1989 }
1990 if (local_apps.thread_active) {
1991 ret = pthread_cancel(local_apps.ust_listener);
1992 if (ret) {
1993 ERR("Error cancelling local ust listener thread: %s",
1994 strerror(ret));
1995 } else {
1996 local_apps.thread_active = 0;
1997 }
1998 }
1999 pthread_mutex_unlock(&ust_exit_mutex);
2000
2001 /*
2002 * Do NOT join threads: use of sys_futex makes it impossible to
2003 * join the threads without using async-cancel, but async-cancel
2004 * is delivered by a signal, which could hit the target thread
2005 * anywhere in its code path, including while the ust_lock() is
2006 * held, causing a deadlock for the other thread. Let the OS
2007 * cleanup the threads if there are stalled in a syscall.
2008 */
2009 lttng_ust_cleanup(1);
2010 }
2011
2012 /*
2013 * We exclude the worker threads across fork and clone (except
2014 * CLONE_VM), because these system calls only keep the forking thread
2015 * running in the child. Therefore, we don't want to call fork or clone
2016 * in the middle of an tracepoint or ust tracing state modification.
2017 * Holding this mutex protects these structures across fork and clone.
2018 */
2019 void ust_before_fork(sigset_t *save_sigset)
2020 {
2021 /*
2022 * Disable signals. This is to avoid that the child intervenes
2023 * before it is properly setup for tracing. It is safer to
2024 * disable all signals, because then we know we are not breaking
2025 * anything by restoring the original mask.
2026 */
2027 sigset_t all_sigs;
2028 int ret;
2029
2030 /* Fixup lttng-ust TLS. */
2031 lttng_ust_fixup_tls();
2032
2033 if (URCU_TLS(lttng_ust_nest_count))
2034 return;
2035 /* Disable signals */
2036 sigfillset(&all_sigs);
2037 ret = sigprocmask(SIG_BLOCK, &all_sigs, save_sigset);
2038 if (ret == -1) {
2039 PERROR("sigprocmask");
2040 }
2041
2042 pthread_mutex_lock(&ust_fork_mutex);
2043
2044 ust_lock_nocheck();
2045 rcu_bp_before_fork();
2046 lttng_ust_lock_fd_tracker();
2047 lttng_perf_lock();
2048 }
2049
2050 static void ust_after_fork_common(sigset_t *restore_sigset)
2051 {
2052 int ret;
2053
2054 DBG("process %d", getpid());
2055 lttng_perf_unlock();
2056 lttng_ust_unlock_fd_tracker();
2057 ust_unlock();
2058
2059 pthread_mutex_unlock(&ust_fork_mutex);
2060
2061 /* Restore signals */
2062 ret = sigprocmask(SIG_SETMASK, restore_sigset, NULL);
2063 if (ret == -1) {
2064 PERROR("sigprocmask");
2065 }
2066 }
2067
2068 void ust_after_fork_parent(sigset_t *restore_sigset)
2069 {
2070 if (URCU_TLS(lttng_ust_nest_count))
2071 return;
2072 DBG("process %d", getpid());
2073 rcu_bp_after_fork_parent();
2074 /* Release mutexes and reenable signals */
2075 ust_after_fork_common(restore_sigset);
2076 }
2077
2078 /*
2079 * After fork, in the child, we need to cleanup all the leftover state,
2080 * except the worker thread which already magically disappeared thanks
2081 * to the weird Linux fork semantics. After tyding up, we call
2082 * lttng_ust_init() again to start over as a new PID.
2083 *
2084 * This is meant for forks() that have tracing in the child between the
2085 * fork and following exec call (if there is any).
2086 */
2087 void ust_after_fork_child(sigset_t *restore_sigset)
2088 {
2089 if (URCU_TLS(lttng_ust_nest_count))
2090 return;
2091 lttng_context_vpid_reset();
2092 lttng_context_vtid_reset();
2093 lttng_context_procname_reset();
2094 DBG("process %d", getpid());
2095 /* Release urcu mutexes */
2096 rcu_bp_after_fork_child();
2097 lttng_ust_cleanup(0);
2098 /* Release mutexes and reenable signals */
2099 ust_after_fork_common(restore_sigset);
2100 lttng_ust_init();
2101 }
2102
2103 void lttng_ust_sockinfo_session_enabled(void *owner)
2104 {
2105 struct sock_info *sock_info = owner;
2106 sock_info->statedump_pending = 1;
2107 }
This page took 0.10718 seconds and 4 git commands to generate.