Fix: add urcu-bp TLS fixup
[lttng-ust.git] / liblttng-ust / lttng-ust-comm.c
CommitLineData
2691221a
MD
1/*
2 * lttng-ust-comm.c
3 *
4 * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
5 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; only
10 * version 2.1 of the License.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
80e2814b 22#define _LGPL_SOURCE
2691221a
MD
23#include <sys/types.h>
24#include <sys/socket.h>
7fc90dca
MD
25#include <sys/mman.h>
26#include <sys/stat.h>
58d4b2a2
MD
27#include <sys/types.h>
28#include <sys/wait.h>
7fc90dca 29#include <fcntl.h>
2691221a
MD
30#include <unistd.h>
31#include <errno.h>
d9e99d10 32#include <pthread.h>
11ff9c7d
MD
33#include <semaphore.h>
34#include <time.h>
1ea11eab 35#include <assert.h>
e822f505 36#include <signal.h>
95259bd0 37#include <urcu/uatomic.h>
80e2814b 38#include <urcu/futex.h>
c117fb1b 39#include <urcu/compiler.h>
1ea11eab 40
4318ae1b 41#include <lttng/ust-events.h>
4318ae1b 42#include <lttng/ust-abi.h>
4318ae1b 43#include <lttng/ust.h>
7bc53e94 44#include <lttng/ust-error.h>
74d81a6c 45#include <lttng/ust-ctl.h>
8c90a710 46#include <urcu/tls-compat.h>
44c72f10
MD
47#include <ust-comm.h>
48#include <usterr-signal-safe.h>
cd54f6d9 49#include <helper.h>
44c72f10 50#include "tracepoint-internal.h"
7dd08bec 51#include "lttng-tracer-core.h"
08114193 52#include "compat.h"
f645cfa7 53#include "../libringbuffer/tlsfixup.h"
394598c1 54#include "lttng-ust-baddr.h"
edaa1431
MD
55
56/*
57 * Has lttng ust comm constructor been called ?
58 */
59static int initialized;
60
1ea11eab 61/*
17dfb34b
MD
62 * The ust_lock/ust_unlock lock is used as a communication thread mutex.
63 * Held when handling a command, also held by fork() to deal with
64 * removal of threads, and by exit path.
3327ac33
MD
65 *
66 * The UST lock is the centralized mutex across UST tracing control and
67 * probe registration.
68 *
69 * ust_exit_mutex must never nest in ust_mutex.
70 */
71static pthread_mutex_t ust_mutex = PTHREAD_MUTEX_INITIALIZER;
72
73/*
74 * ust_exit_mutex protects thread_active variable wrt thread exit. It
75 * cannot be done by ust_mutex because pthread_cancel(), which takes an
76 * internal libc lock, cannot nest within ust_mutex.
77 *
78 * It never nests within a ust_mutex.
1ea11eab 79 */
3327ac33 80static pthread_mutex_t ust_exit_mutex = PTHREAD_MUTEX_INITIALIZER;
1ea11eab
MD
81
82/* Should the ust comm thread quit ? */
83static int lttng_ust_comm_should_quit;
84
3327ac33
MD
85/*
86 * Return 0 on success, -1 if should quilt.
87 * The lock is taken in both cases.
88 */
89int ust_lock(void)
90{
91 pthread_mutex_lock(&ust_mutex);
92 if (lttng_ust_comm_should_quit) {
93 return -1;
94 } else {
95 return 0;
96 }
97}
98
99/*
100 * ust_lock_nocheck() can be used in constructors/destructors, because
101 * they are already nested within the dynamic loader lock, and therefore
102 * have exclusive access against execution of liblttng-ust destructor.
103 */
104void ust_lock_nocheck(void)
105{
106 pthread_mutex_lock(&ust_mutex);
107}
108
109void ust_unlock(void)
110{
111 pthread_mutex_unlock(&ust_mutex);
112}
113
11ff9c7d
MD
114/*
115 * Wait for either of these before continuing to the main
116 * program:
117 * - the register_done message from sessiond daemon
118 * (will let the sessiond daemon enable sessions before main
119 * starts.)
120 * - sessiond daemon is not reachable.
121 * - timeout (ensuring applications are resilient to session
122 * daemon problems).
123 */
124static sem_t constructor_wait;
950aab0c
MD
125/*
126 * Doing this for both the global and local sessiond.
127 */
95259bd0 128static int sem_count = { 2 };
11ff9c7d 129
e8508a49
MD
130/*
131 * Counting nesting within lttng-ust. Used to ensure that calling fork()
132 * from liblttng-ust does not execute the pre/post fork handlers.
133 */
8c90a710 134static DEFINE_URCU_TLS(int, lttng_ust_nest_count);
e8508a49 135
1ea11eab
MD
136/*
137 * Info about socket and associated listener thread.
138 */
139struct sock_info {
11ff9c7d 140 const char *name;
1ea11eab 141 pthread_t ust_listener; /* listener thread */
46050b1a 142 int root_handle;
8d20bf54
MD
143 int constructor_sem_posted;
144 int allowed;
44e073f5 145 int global;
e33f3265 146 int thread_active;
7fc90dca
MD
147
148 char sock_path[PATH_MAX];
149 int socket;
32ce8569 150 int notify_socket;
7fc90dca
MD
151
152 char wait_shm_path[PATH_MAX];
153 char *wait_shm_mmap;
37dddb65
MD
154 /* Keep track of lazy state dump not performed yet. */
155 int statedump_pending;
1ea11eab 156};
2691221a
MD
157
158/* Socket from app (connect) to session daemon (listen) for communication */
1ea11eab 159struct sock_info global_apps = {
11ff9c7d 160 .name = "global",
44e073f5 161 .global = 1,
7fc90dca 162
46050b1a 163 .root_handle = -1,
8d20bf54 164 .allowed = 1,
e33f3265 165 .thread_active = 0,
7fc90dca 166
32ce8569 167 .sock_path = LTTNG_DEFAULT_RUNDIR "/" LTTNG_UST_SOCK_FILENAME,
7fc90dca 168 .socket = -1,
32ce8569 169 .notify_socket = -1,
7fc90dca 170
32ce8569 171 .wait_shm_path = "/" LTTNG_UST_WAIT_FILENAME,
95c25348 172
37dddb65 173 .statedump_pending = 0,
1ea11eab 174};
2691221a
MD
175
176/* TODO: allow global_apps_sock_path override */
177
1ea11eab 178struct sock_info local_apps = {
11ff9c7d 179 .name = "local",
44e073f5 180 .global = 0,
46050b1a 181 .root_handle = -1,
8d20bf54 182 .allowed = 0, /* Check setuid bit first */
e33f3265 183 .thread_active = 0,
7fc90dca
MD
184
185 .socket = -1,
32ce8569 186 .notify_socket = -1,
95c25348 187
37dddb65 188 .statedump_pending = 0,
1ea11eab 189};
2691221a 190
37ed587a
MD
191static int wait_poll_fallback;
192
74d81a6c
MD
193static const char *cmd_name_mapping[] = {
194 [ LTTNG_UST_RELEASE ] = "Release",
195 [ LTTNG_UST_SESSION ] = "Create Session",
196 [ LTTNG_UST_TRACER_VERSION ] = "Get Tracer Version",
197
198 [ LTTNG_UST_TRACEPOINT_LIST ] = "Create Tracepoint List",
199 [ LTTNG_UST_WAIT_QUIESCENT ] = "Wait for Quiescent State",
200 [ LTTNG_UST_REGISTER_DONE ] = "Registration Done",
201 [ LTTNG_UST_TRACEPOINT_FIELD_LIST ] = "Create Tracepoint Field List",
202
203 /* Session FD commands */
204 [ LTTNG_UST_CHANNEL ] = "Create Channel",
205 [ LTTNG_UST_SESSION_START ] = "Start Session",
206 [ LTTNG_UST_SESSION_STOP ] = "Stop Session",
207
208 /* Channel FD commands */
209 [ LTTNG_UST_STREAM ] = "Create Stream",
210 [ LTTNG_UST_EVENT ] = "Create Event",
211
212 /* Event and Channel FD commands */
213 [ LTTNG_UST_CONTEXT ] = "Create Context",
214 [ LTTNG_UST_FLUSH_BUFFER ] = "Flush Buffer",
215
216 /* Event, Channel and Session commands */
217 [ LTTNG_UST_ENABLE ] = "Enable",
218 [ LTTNG_UST_DISABLE ] = "Disable",
219
220 /* Tracepoint list commands */
221 [ LTTNG_UST_TRACEPOINT_LIST_GET ] = "List Next Tracepoint",
222 [ LTTNG_UST_TRACEPOINT_FIELD_LIST_GET ] = "List Next Tracepoint Field",
223
224 /* Event FD commands */
225 [ LTTNG_UST_FILTER ] = "Create Filter",
75582b3a 226 [ LTTNG_UST_EXCLUSION ] = "Add exclusions to event",
74d81a6c
MD
227};
228
ff517991
MD
229static const char *str_timeout;
230static int got_timeout_env;
231
7dd08bec 232extern void lttng_ring_buffer_client_overwrite_init(void);
34a91bdb 233extern void lttng_ring_buffer_client_overwrite_rt_init(void);
7dd08bec 234extern void lttng_ring_buffer_client_discard_init(void);
34a91bdb 235extern void lttng_ring_buffer_client_discard_rt_init(void);
7dd08bec
MD
236extern void lttng_ring_buffer_metadata_client_init(void);
237extern void lttng_ring_buffer_client_overwrite_exit(void);
34a91bdb 238extern void lttng_ring_buffer_client_overwrite_rt_exit(void);
7dd08bec 239extern void lttng_ring_buffer_client_discard_exit(void);
34a91bdb 240extern void lttng_ring_buffer_client_discard_rt_exit(void);
7dd08bec 241extern void lttng_ring_buffer_metadata_client_exit(void);
edaa1431 242
3c6f6263
AM
243/*
244 * Returns the HOME directory path. Caller MUST NOT free(3) the returned
245 * pointer.
246 */
247static
248const char *get_lttng_home_dir(void)
249{
250 const char *val;
251
252 val = (const char *) getenv("LTTNG_HOME");
253 if (val != NULL) {
254 return val;
255 }
256 return (const char *) getenv("HOME");
257}
258
a903623f
MD
259/*
260 * Force a read (imply TLS fixup for dlopen) of TLS variables.
261 */
262static
263void lttng_fixup_nest_count_tls(void)
264{
8c90a710 265 asm volatile ("" : : "m" (URCU_TLS(lttng_ust_nest_count)));
a903623f
MD
266}
267
cec91e30
MD
268/*
269 * Fixup urcu bp TLS.
270 */
271static
272void lttng_fixup_urcu_bp_tls(void)
273{
274 rcu_read_lock();
275 rcu_read_unlock();
276}
277
32ce8569
MD
278int lttng_get_notify_socket(void *owner)
279{
280 struct sock_info *info = owner;
281
282 return info->notify_socket;
283}
284
74d81a6c
MD
285static
286void print_cmd(int cmd, int handle)
287{
288 const char *cmd_name = "Unknown";
289
fd67a004
MD
290 if (cmd >= 0 && cmd < LTTNG_ARRAY_SIZE(cmd_name_mapping)
291 && cmd_name_mapping[cmd]) {
74d81a6c
MD
292 cmd_name = cmd_name_mapping[cmd];
293 }
fd67a004
MD
294 DBG("Message Received \"%s\" (%d), Handle \"%s\" (%d)",
295 cmd_name, cmd,
74d81a6c
MD
296 lttng_ust_obj_get_name(handle), handle);
297}
298
2691221a 299static
8d20bf54 300int setup_local_apps(void)
2691221a
MD
301{
302 const char *home_dir;
7fc90dca 303 uid_t uid;
2691221a 304
7fc90dca 305 uid = getuid();
8d20bf54
MD
306 /*
307 * Disallow per-user tracing for setuid binaries.
308 */
7fc90dca 309 if (uid != geteuid()) {
9ec6895c 310 assert(local_apps.allowed == 0);
d0a1ae63 311 return 0;
8d20bf54 312 }
3c6f6263 313 home_dir = get_lttng_home_dir();
9ec6895c
MD
314 if (!home_dir) {
315 WARN("HOME environment variable not set. Disabling LTTng-UST per-user tracing.");
316 assert(local_apps.allowed == 0);
2691221a 317 return -ENOENT;
9ec6895c
MD
318 }
319 local_apps.allowed = 1;
32ce8569
MD
320 snprintf(local_apps.sock_path, PATH_MAX, "%s/%s/%s",
321 home_dir,
322 LTTNG_DEFAULT_HOME_RUNDIR,
323 LTTNG_UST_SOCK_FILENAME);
324 snprintf(local_apps.wait_shm_path, PATH_MAX, "/%s-%u",
325 LTTNG_UST_WAIT_FILENAME,
326 uid);
2691221a
MD
327 return 0;
328}
329
ff517991
MD
330/*
331 * Get notify_sock timeout, in ms.
332 * -1: don't wait. 0: wait forever. >0: timeout, in ms.
333 */
334static
335long get_timeout(void)
336{
337 long constructor_delay_ms = LTTNG_UST_DEFAULT_CONSTRUCTOR_TIMEOUT_MS;
338
339 if (!got_timeout_env) {
340 str_timeout = getenv("LTTNG_UST_REGISTER_TIMEOUT");
341 got_timeout_env = 1;
342 }
343 if (str_timeout)
344 constructor_delay_ms = strtol(str_timeout, NULL, 10);
345 return constructor_delay_ms;
346}
347
348static
349long get_notify_sock_timeout(void)
350{
351 return get_timeout();
352}
353
354/*
355 * Return values: -1: don't wait. 0: wait forever. 1: timeout wait.
356 */
357static
358int get_constructor_timeout(struct timespec *constructor_timeout)
359{
360 long constructor_delay_ms;
361 int ret;
362
363 constructor_delay_ms = get_timeout();
364
365 switch (constructor_delay_ms) {
366 case -1:/* fall-through */
367 case 0:
368 return constructor_delay_ms;
369 default:
370 break;
371 }
372
373 /*
374 * If we are unable to find the current time, don't wait.
375 */
376 ret = clock_gettime(CLOCK_REALTIME, constructor_timeout);
377 if (ret) {
378 return -1;
379 }
380 constructor_timeout->tv_sec += constructor_delay_ms / 1000UL;
381 constructor_timeout->tv_nsec +=
382 (constructor_delay_ms % 1000UL) * 1000000UL;
383 if (constructor_timeout->tv_nsec >= 1000000000UL) {
384 constructor_timeout->tv_sec++;
385 constructor_timeout->tv_nsec -= 1000000000UL;
386 }
387 return 1;
388}
389
2691221a 390static
32ce8569 391int register_to_sessiond(int socket, enum ustctl_socket_type type)
2691221a 392{
32ce8569
MD
393 return ustcomm_send_reg_msg(socket,
394 type,
395 CAA_BITS_PER_LONG,
396 lttng_alignof(uint8_t) * CHAR_BIT,
397 lttng_alignof(uint16_t) * CHAR_BIT,
398 lttng_alignof(uint32_t) * CHAR_BIT,
399 lttng_alignof(uint64_t) * CHAR_BIT,
400 lttng_alignof(unsigned long) * CHAR_BIT);
2691221a
MD
401}
402
d9e99d10 403static
57773204 404int send_reply(int sock, struct ustcomm_ust_reply *lur)
d9e99d10 405{
9eb62b9c 406 ssize_t len;
d3a492d1 407
57773204 408 len = ustcomm_send_unix_sock(sock, lur, sizeof(*lur));
d3a492d1 409 switch (len) {
a4be8962 410 case sizeof(*lur):
d3a492d1
MD
411 DBG("message successfully sent");
412 return 0;
7bc53e94
MD
413 default:
414 if (len == -ECONNRESET) {
415 DBG("remote end closed connection");
d3a492d1
MD
416 return 0;
417 }
7bc53e94
MD
418 if (len < 0)
419 return len;
420 DBG("incorrect message size: %zd", len);
421 return -EINVAL;
d3a492d1
MD
422 }
423}
424
425static
edaa1431 426int handle_register_done(struct sock_info *sock_info)
11ff9c7d
MD
427{
428 int ret;
429
edaa1431
MD
430 if (sock_info->constructor_sem_posted)
431 return 0;
432 sock_info->constructor_sem_posted = 1;
56cd7e2f
MD
433 if (uatomic_read(&sem_count) <= 0) {
434 return 0;
435 }
95259bd0
MD
436 ret = uatomic_add_return(&sem_count, -1);
437 if (ret == 0) {
438 ret = sem_post(&constructor_wait);
439 assert(!ret);
440 }
11ff9c7d
MD
441 return 0;
442}
443
37dddb65
MD
444/*
445 * Only execute pending statedump after the constructor semaphore has
446 * been posted by each listener thread. This means statedump will only
447 * be performed after the "registration done" command is received from
448 * each session daemon the application is connected to.
449 *
450 * This ensures we don't run into deadlock issues with the dynamic
451 * loader mutex, which is held while the constructor is called and
452 * waiting on the constructor semaphore. All operations requiring this
453 * dynamic loader lock need to be postponed using this mechanism.
454 */
455static
456void handle_pending_statedump(struct sock_info *sock_info)
457{
458 int ctor_passed = sock_info->constructor_sem_posted;
459
460 if (ctor_passed && sock_info->statedump_pending) {
461 sock_info->statedump_pending = 0;
462 lttng_handle_pending_statedump(sock_info);
463 }
464}
465
11ff9c7d
MD
466static
467int handle_message(struct sock_info *sock_info,
57773204 468 int sock, struct ustcomm_ust_msg *lum)
d3a492d1 469{
1ea11eab 470 int ret = 0;
b61ce3b2 471 const struct lttng_ust_objd_ops *ops;
57773204 472 struct ustcomm_ust_reply lur;
ef9ff354 473 union ust_args args;
40003310 474 ssize_t len;
1ea11eab 475
46050b1a
MD
476 memset(&lur, 0, sizeof(lur));
477
3327ac33 478 if (ust_lock()) {
74d81a6c 479 ret = -LTTNG_UST_ERR_EXITING;
1ea11eab
MD
480 goto end;
481 }
9eb62b9c 482
46050b1a
MD
483 ops = objd_ops(lum->handle);
484 if (!ops) {
485 ret = -ENOENT;
486 goto end;
1ea11eab 487 }
46050b1a
MD
488
489 switch (lum->cmd) {
11ff9c7d
MD
490 case LTTNG_UST_REGISTER_DONE:
491 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
edaa1431 492 ret = handle_register_done(sock_info);
11ff9c7d
MD
493 else
494 ret = -EINVAL;
495 break;
46050b1a
MD
496 case LTTNG_UST_RELEASE:
497 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
498 ret = -EPERM;
499 else
1849ef7c 500 ret = lttng_ust_objd_unref(lum->handle, 1);
d9e99d10 501 break;
2d78951a
MD
502 case LTTNG_UST_FILTER:
503 {
504 /* Receive filter data */
f488575f 505 struct lttng_ust_filter_bytecode_node *bytecode;
2d78951a 506
cd54f6d9 507 if (lum->u.filter.data_size > FILTER_BYTECODE_MAX_LEN) {
7bc53e94 508 ERR("Filter data size is too large: %u bytes",
2d78951a
MD
509 lum->u.filter.data_size);
510 ret = -EINVAL;
511 goto error;
512 }
2734ca65 513
885b1dfd 514 if (lum->u.filter.reloc_offset > lum->u.filter.data_size) {
7bc53e94 515 ERR("Filter reloc offset %u is not within data",
2734ca65
CB
516 lum->u.filter.reloc_offset);
517 ret = -EINVAL;
518 goto error;
519 }
520
cd54f6d9
MD
521 bytecode = zmalloc(sizeof(*bytecode) + lum->u.filter.data_size);
522 if (!bytecode) {
523 ret = -ENOMEM;
524 goto error;
525 }
f488575f 526 len = ustcomm_recv_unix_sock(sock, bytecode->bc.data,
2d78951a
MD
527 lum->u.filter.data_size);
528 switch (len) {
529 case 0: /* orderly shutdown */
530 ret = 0;
cd54f6d9 531 free(bytecode);
2d78951a 532 goto error;
2d78951a
MD
533 default:
534 if (len == lum->u.filter.data_size) {
7bc53e94 535 DBG("filter data received");
2d78951a 536 break;
7bc53e94
MD
537 } else if (len < 0) {
538 DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
539 if (len == -ECONNRESET) {
540 ERR("%s remote end closed connection", sock_info->name);
541 ret = len;
542 free(bytecode);
543 goto error;
544 }
545 ret = len;
eb8bf361 546 free(bytecode);
7bc53e94 547 goto end;
2d78951a 548 } else {
7bc53e94 549 DBG("incorrect filter data message size: %zd", len);
2d78951a 550 ret = -EINVAL;
cd54f6d9 551 free(bytecode);
2d78951a
MD
552 goto end;
553 }
554 }
f488575f
MD
555 bytecode->bc.len = lum->u.filter.data_size;
556 bytecode->bc.reloc_offset = lum->u.filter.reloc_offset;
3f6fd224 557 bytecode->bc.seqnum = lum->u.filter.seqnum;
cd54f6d9 558 if (ops->cmd) {
2d78951a 559 ret = ops->cmd(lum->handle, lum->cmd,
cd54f6d9 560 (unsigned long) bytecode,
f59ed768 561 &args, sock_info);
cd54f6d9
MD
562 if (ret) {
563 free(bytecode);
564 }
565 /* don't free bytecode if everything went fine. */
566 } else {
2d78951a 567 ret = -ENOSYS;
cd54f6d9
MD
568 free(bytecode);
569 }
2d78951a
MD
570 break;
571 }
86e36163
JI
572 case LTTNG_UST_EXCLUSION:
573 {
574 /* Receive exclusion names */
575 struct lttng_ust_excluder_node *node;
576 unsigned int count;
577
578 count = lum->u.exclusion.count;
579 if (count == 0) {
580 /* There are no names to read */
581 ret = 0;
582 goto error;
583 }
584 node = zmalloc(sizeof(*node) +
585 count * LTTNG_UST_SYM_NAME_LEN);
586 if (!node) {
587 ret = -ENOMEM;
588 goto error;
589 }
590 node->excluder.count = count;
591 len = ustcomm_recv_unix_sock(sock, node->excluder.names,
592 count * LTTNG_UST_SYM_NAME_LEN);
593 switch (len) {
594 case 0: /* orderly shutdown */
595 ret = 0;
596 free(node);
597 goto error;
598 default:
599 if (len == count * LTTNG_UST_SYM_NAME_LEN) {
600 DBG("Exclusion data received");
601 break;
602 } else if (len < 0) {
603 DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
604 if (len == -ECONNRESET) {
605 ERR("%s remote end closed connection", sock_info->name);
606 ret = len;
607 free(node);
608 goto error;
609 }
610 ret = len;
611 free(node);
612 goto end;
613 } else {
614 DBG("Incorrect exclusion data message size: %zd", len);
615 ret = -EINVAL;
616 free(node);
617 goto end;
618 }
619 }
620 if (ops->cmd) {
621 ret = ops->cmd(lum->handle, lum->cmd,
622 (unsigned long) node,
623 &args, sock_info);
624 if (ret) {
625 free(node);
626 }
627 /* Don't free exclusion data if everything went fine. */
628 } else {
629 ret = -ENOSYS;
630 free(node);
631 }
632 break;
633 }
74d81a6c
MD
634 case LTTNG_UST_CHANNEL:
635 {
636 void *chan_data;
ff0f5728 637 int wakeup_fd;
74d81a6c
MD
638
639 len = ustcomm_recv_channel_from_sessiond(sock,
ff0f5728
MD
640 &chan_data, lum->u.channel.len,
641 &wakeup_fd);
74d81a6c
MD
642 switch (len) {
643 case 0: /* orderly shutdown */
644 ret = 0;
645 goto error;
646 default:
647 if (len == lum->u.channel.len) {
648 DBG("channel data received");
649 break;
650 } else if (len < 0) {
651 DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
652 if (len == -ECONNRESET) {
653 ERR("%s remote end closed connection", sock_info->name);
654 ret = len;
655 goto error;
656 }
657 ret = len;
658 goto end;
659 } else {
660 DBG("incorrect channel data message size: %zd", len);
661 ret = -EINVAL;
662 goto end;
663 }
664 }
665 args.channel.chan_data = chan_data;
ff0f5728 666 args.channel.wakeup_fd = wakeup_fd;
74d81a6c
MD
667 if (ops->cmd)
668 ret = ops->cmd(lum->handle, lum->cmd,
669 (unsigned long) &lum->u,
670 &args, sock_info);
671 else
672 ret = -ENOSYS;
673 break;
674 }
675 case LTTNG_UST_STREAM:
676 {
677 /* Receive shm_fd, wakeup_fd */
678 ret = ustcomm_recv_stream_from_sessiond(sock,
679 &lum->u.stream.len,
680 &args.stream.shm_fd,
681 &args.stream.wakeup_fd);
682 if (ret) {
683 goto end;
684 }
685 if (ops->cmd)
686 ret = ops->cmd(lum->handle, lum->cmd,
687 (unsigned long) &lum->u,
688 &args, sock_info);
689 else
690 ret = -ENOSYS;
691 break;
692 }
d9e99d10 693 default:
46050b1a
MD
694 if (ops->cmd)
695 ret = ops->cmd(lum->handle, lum->cmd,
ef9ff354 696 (unsigned long) &lum->u,
f59ed768 697 &args, sock_info);
46050b1a
MD
698 else
699 ret = -ENOSYS;
700 break;
d9e99d10 701 }
46050b1a 702
1ea11eab 703end:
46050b1a
MD
704 lur.handle = lum->handle;
705 lur.cmd = lum->cmd;
706 lur.ret_val = ret;
707 if (ret >= 0) {
7bc53e94 708 lur.ret_code = LTTNG_UST_OK;
46050b1a 709 } else {
7bc53e94
MD
710 /*
711 * Use -LTTNG_UST_ERR as wildcard for UST internal
712 * error that are not caused by the transport, except if
713 * we already have a more precise error message to
714 * report.
715 */
64b2564e
DG
716 if (ret > -LTTNG_UST_ERR) {
717 /* Translate code to UST error. */
718 switch (ret) {
719 case -EEXIST:
720 lur.ret_code = -LTTNG_UST_ERR_EXIST;
721 break;
722 case -EINVAL:
723 lur.ret_code = -LTTNG_UST_ERR_INVAL;
724 break;
725 case -ENOENT:
726 lur.ret_code = -LTTNG_UST_ERR_NOENT;
727 break;
728 case -EPERM:
729 lur.ret_code = -LTTNG_UST_ERR_PERM;
730 break;
731 case -ENOSYS:
732 lur.ret_code = -LTTNG_UST_ERR_NOSYS;
733 break;
734 default:
735 lur.ret_code = -LTTNG_UST_ERR;
736 break;
737 }
738 } else {
7bc53e94 739 lur.ret_code = ret;
64b2564e 740 }
46050b1a 741 }
e6ea14c5
MD
742 if (ret >= 0) {
743 switch (lum->cmd) {
e6ea14c5
MD
744 case LTTNG_UST_TRACER_VERSION:
745 lur.u.version = lum->u.version;
746 break;
747 case LTTNG_UST_TRACEPOINT_LIST_GET:
748 memcpy(&lur.u.tracepoint, &lum->u.tracepoint, sizeof(lur.u.tracepoint));
749 break;
750 }
381c0f1e 751 }
74d81a6c 752 DBG("Return value: %d", lur.ret_val);
46050b1a 753 ret = send_reply(sock, &lur);
193183fb 754 if (ret < 0) {
7bc53e94 755 DBG("error sending reply");
193183fb
MD
756 goto error;
757 }
46050b1a 758
40003310
MD
759 /*
760 * LTTNG_UST_TRACEPOINT_FIELD_LIST_GET needs to send the field
761 * after the reply.
762 */
7bc53e94 763 if (lur.ret_code == LTTNG_UST_OK) {
40003310
MD
764 switch (lum->cmd) {
765 case LTTNG_UST_TRACEPOINT_FIELD_LIST_GET:
766 len = ustcomm_send_unix_sock(sock,
767 &args.field_list.entry,
768 sizeof(args.field_list.entry));
7bc53e94
MD
769 if (len < 0) {
770 ret = len;
771 goto error;
772 }
40003310 773 if (len != sizeof(args.field_list.entry)) {
7bc53e94 774 ret = -EINVAL;
40003310
MD
775 goto error;
776 }
777 }
778 }
ef9ff354 779
381c0f1e 780error:
17dfb34b 781 ust_unlock();
d9e99d10 782
37dddb65
MD
783 /*
784 * Performed delayed statedump operations outside of the UST
785 * lock. We need to take the dynamic loader lock before we take
786 * the UST lock internally within handle_pending_statedump().
787 */
788 handle_pending_statedump(sock_info);
246be17e 789
37dddb65 790 return ret;
246be17e
PW
791}
792
46050b1a 793static
efe0de09 794void cleanup_sock_info(struct sock_info *sock_info, int exiting)
46050b1a
MD
795{
796 int ret;
797
5b14aab3
MD
798 if (sock_info->root_handle != -1) {
799 ret = lttng_ust_objd_unref(sock_info->root_handle, 1);
800 if (ret) {
801 ERR("Error unref root handle");
802 }
803 sock_info->root_handle = -1;
804 }
805 sock_info->constructor_sem_posted = 0;
806
807 /*
808 * wait_shm_mmap, socket and notify socket are used by listener
809 * threads outside of the ust lock, so we cannot tear them down
810 * ourselves, because we cannot join on these threads. Leave
811 * responsibility of cleaning up these resources to the OS
812 * process exit.
813 */
814 if (exiting)
815 return;
816
46050b1a 817 if (sock_info->socket != -1) {
e6973a89 818 ret = ustcomm_close_unix_sock(sock_info->socket);
46050b1a 819 if (ret) {
32ce8569 820 ERR("Error closing ust cmd socket");
46050b1a
MD
821 }
822 sock_info->socket = -1;
823 }
32ce8569
MD
824 if (sock_info->notify_socket != -1) {
825 ret = ustcomm_close_unix_sock(sock_info->notify_socket);
826 if (ret) {
827 ERR("Error closing ust notify socket");
828 }
829 sock_info->notify_socket = -1;
830 }
5b14aab3 831 if (sock_info->wait_shm_mmap) {
172d6b68
MD
832 long page_size;
833
834 page_size = sysconf(_SC_PAGE_SIZE);
835 if (page_size > 0) {
836 ret = munmap(sock_info->wait_shm_mmap, page_size);
837 if (ret) {
838 ERR("Error unmapping wait shm");
839 }
7fc90dca
MD
840 }
841 sock_info->wait_shm_mmap = NULL;
842 }
843}
844
58d4b2a2 845/*
33bbeb90
MD
846 * Using fork to set umask in the child process (not multi-thread safe).
847 * We deal with the shm_open vs ftruncate race (happening when the
848 * sessiond owns the shm and does not let everybody modify it, to ensure
849 * safety against shm_unlink) by simply letting the mmap fail and
850 * retrying after a few seconds.
851 * For global shm, everybody has rw access to it until the sessiond
852 * starts.
58d4b2a2 853 */
7fc90dca 854static
58d4b2a2 855int get_wait_shm(struct sock_info *sock_info, size_t mmap_size)
7fc90dca 856{
7fc90dca 857 int wait_shm_fd, ret;
58d4b2a2 858 pid_t pid;
44e073f5 859
58d4b2a2 860 /*
33bbeb90 861 * Try to open read-only.
58d4b2a2 862 */
33bbeb90 863 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
58d4b2a2 864 if (wait_shm_fd >= 0) {
7aa76730
MD
865 int32_t tmp_read;
866 ssize_t len;
867 size_t bytes_read = 0;
868
869 /*
870 * Try to read the fd. If unable to do so, try opening
871 * it in write mode.
872 */
873 do {
874 len = read(wait_shm_fd,
875 &((char *) &tmp_read)[bytes_read],
876 sizeof(tmp_read) - bytes_read);
877 if (len > 0) {
878 bytes_read += len;
879 }
880 } while ((len < 0 && errno == EINTR)
881 || (len > 0 && bytes_read < sizeof(tmp_read)));
882 if (bytes_read != sizeof(tmp_read)) {
883 ret = close(wait_shm_fd);
884 if (ret) {
885 ERR("close wait_shm_fd");
886 }
887 goto open_write;
888 }
58d4b2a2
MD
889 goto end;
890 } else if (wait_shm_fd < 0 && errno != ENOENT) {
891 /*
33bbeb90
MD
892 * Real-only open did not work, and it's not because the
893 * entry was not present. It's a failure that prohibits
894 * using shm.
58d4b2a2 895 */
7fc90dca 896 ERR("Error opening shm %s", sock_info->wait_shm_path);
58d4b2a2 897 goto end;
7fc90dca 898 }
7aa76730
MD
899
900open_write:
7fc90dca 901 /*
7aa76730
MD
902 * If the open failed because the file did not exist, or because
903 * the file was not truncated yet, try creating it ourself.
7fc90dca 904 */
8c90a710 905 URCU_TLS(lttng_ust_nest_count)++;
58d4b2a2 906 pid = fork();
8c90a710 907 URCU_TLS(lttng_ust_nest_count)--;
58d4b2a2
MD
908 if (pid > 0) {
909 int status;
910
911 /*
912 * Parent: wait for child to return, in which case the
913 * shared memory map will have been created.
914 */
915 pid = wait(&status);
b7d3cb32 916 if (pid < 0 || !WIFEXITED(status) || WEXITSTATUS(status) != 0) {
58d4b2a2
MD
917 wait_shm_fd = -1;
918 goto end;
7fc90dca 919 }
58d4b2a2
MD
920 /*
921 * Try to open read-only again after creation.
922 */
33bbeb90 923 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
58d4b2a2
MD
924 if (wait_shm_fd < 0) {
925 /*
926 * Real-only open did not work. It's a failure
927 * that prohibits using shm.
928 */
929 ERR("Error opening shm %s", sock_info->wait_shm_path);
930 goto end;
931 }
932 goto end;
933 } else if (pid == 0) {
934 int create_mode;
935
936 /* Child */
33bbeb90 937 create_mode = S_IRUSR | S_IWUSR | S_IRGRP;
58d4b2a2 938 if (sock_info->global)
33bbeb90 939 create_mode |= S_IROTH | S_IWGRP | S_IWOTH;
58d4b2a2
MD
940 /*
941 * We're alone in a child process, so we can modify the
942 * process-wide umask.
943 */
33bbeb90 944 umask(~create_mode);
58d4b2a2 945 /*
33bbeb90
MD
946 * Try creating shm (or get rw access).
947 * We don't do an exclusive open, because we allow other
948 * processes to create+ftruncate it concurrently.
58d4b2a2
MD
949 */
950 wait_shm_fd = shm_open(sock_info->wait_shm_path,
951 O_RDWR | O_CREAT, create_mode);
952 if (wait_shm_fd >= 0) {
953 ret = ftruncate(wait_shm_fd, mmap_size);
954 if (ret) {
955 PERROR("ftruncate");
b0c1425d 956 _exit(EXIT_FAILURE);
58d4b2a2 957 }
b0c1425d 958 _exit(EXIT_SUCCESS);
58d4b2a2 959 }
33bbeb90
MD
960 /*
961 * For local shm, we need to have rw access to accept
962 * opening it: this means the local sessiond will be
963 * able to wake us up. For global shm, we open it even
964 * if rw access is not granted, because the root.root
965 * sessiond will be able to override all rights and wake
966 * us up.
967 */
968 if (!sock_info->global && errno != EACCES) {
58d4b2a2 969 ERR("Error opening shm %s", sock_info->wait_shm_path);
5d3bc5ed 970 _exit(EXIT_FAILURE);
58d4b2a2
MD
971 }
972 /*
33bbeb90
MD
973 * The shm exists, but we cannot open it RW. Report
974 * success.
58d4b2a2 975 */
5d3bc5ed 976 _exit(EXIT_SUCCESS);
58d4b2a2
MD
977 } else {
978 return -1;
7fc90dca 979 }
58d4b2a2 980end:
33bbeb90
MD
981 if (wait_shm_fd >= 0 && !sock_info->global) {
982 struct stat statbuf;
983
984 /*
985 * Ensure that our user is the owner of the shm file for
986 * local shm. If we do not own the file, it means our
987 * sessiond will not have access to wake us up (there is
988 * probably a rogue process trying to fake our
989 * sessiond). Fallback to polling method in this case.
990 */
991 ret = fstat(wait_shm_fd, &statbuf);
992 if (ret) {
993 PERROR("fstat");
994 goto error_close;
995 }
996 if (statbuf.st_uid != getuid())
997 goto error_close;
998 }
58d4b2a2 999 return wait_shm_fd;
33bbeb90
MD
1000
1001error_close:
1002 ret = close(wait_shm_fd);
1003 if (ret) {
1004 PERROR("Error closing fd");
1005 }
1006 return -1;
58d4b2a2
MD
1007}
1008
1009static
1010char *get_map_shm(struct sock_info *sock_info)
1011{
172d6b68 1012 long page_size;
58d4b2a2
MD
1013 int wait_shm_fd, ret;
1014 char *wait_shm_mmap;
1015
172d6b68
MD
1016 page_size = sysconf(_SC_PAGE_SIZE);
1017 if (page_size < 0) {
1018 goto error;
1019 }
1020
1021 wait_shm_fd = get_wait_shm(sock_info, page_size);
58d4b2a2
MD
1022 if (wait_shm_fd < 0) {
1023 goto error;
44e073f5 1024 }
172d6b68 1025 wait_shm_mmap = mmap(NULL, page_size, PROT_READ,
7fc90dca 1026 MAP_SHARED, wait_shm_fd, 0);
7fc90dca
MD
1027 /* close shm fd immediately after taking the mmap reference */
1028 ret = close(wait_shm_fd);
1029 if (ret) {
33bbeb90
MD
1030 PERROR("Error closing fd");
1031 }
1032 if (wait_shm_mmap == MAP_FAILED) {
1033 DBG("mmap error (can be caused by race with sessiond). Fallback to poll mode.");
1034 goto error;
7fc90dca
MD
1035 }
1036 return wait_shm_mmap;
1037
1038error:
1039 return NULL;
1040}
1041
1042static
1043void wait_for_sessiond(struct sock_info *sock_info)
1044{
efe0de09 1045 int ret;
80e2814b 1046
3327ac33 1047 if (ust_lock()) {
7fc90dca
MD
1048 goto quit;
1049 }
37ed587a
MD
1050 if (wait_poll_fallback) {
1051 goto error;
1052 }
7fc90dca
MD
1053 if (!sock_info->wait_shm_mmap) {
1054 sock_info->wait_shm_mmap = get_map_shm(sock_info);
1055 if (!sock_info->wait_shm_mmap)
1056 goto error;
1057 }
1058 ust_unlock();
1059
1060 DBG("Waiting for %s apps sessiond", sock_info->name);
80e2814b
MD
1061 /* Wait for futex wakeup */
1062 if (uatomic_read((int32_t *) sock_info->wait_shm_mmap) == 0) {
1063 ret = futex_async((int32_t *) sock_info->wait_shm_mmap,
1064 FUTEX_WAIT, 0, NULL, NULL, 0);
80e2814b 1065 if (ret < 0) {
37ed587a
MD
1066 if (errno == EFAULT) {
1067 wait_poll_fallback = 1;
a8b870ad 1068 DBG(
37ed587a
MD
1069"Linux kernels 2.6.33 to 3.0 (with the exception of stable versions) "
1070"do not support FUTEX_WAKE on read-only memory mappings correctly. "
1071"Please upgrade your kernel "
1072"(fix is commit 9ea71503a8ed9184d2d0b8ccc4d269d05f7940ae in Linux kernel "
1073"mainline). LTTng-UST will use polling mode fallback.");
cd27263b
MD
1074 if (ust_debug())
1075 PERROR("futex");
37ed587a 1076 }
80e2814b
MD
1077 }
1078 }
7fc90dca
MD
1079 return;
1080
1081quit:
1082 ust_unlock();
1083 return;
1084
1085error:
1086 ust_unlock();
7fc90dca 1087 return;
46050b1a
MD
1088}
1089
1ea11eab
MD
1090/*
1091 * This thread does not allocate any resource, except within
1092 * handle_message, within mutex protection. This mutex protects against
1093 * fork and exit.
98bf993f 1094 * The other moment it allocates resources is at socket connection, which
1ea11eab
MD
1095 * is also protected by the mutex.
1096 */
d9e99d10
MD
1097static
1098void *ust_listener_thread(void *arg)
1099{
1ea11eab 1100 struct sock_info *sock_info = arg;
c0eedf81 1101 int sock, ret, prev_connect_failed = 0, has_waited = 0;
ff517991 1102 long timeout;
d9e99d10 1103
9eb62b9c
MD
1104 /* Restart trying to connect to the session daemon */
1105restart:
c0eedf81
MD
1106 if (prev_connect_failed) {
1107 /* Wait for sessiond availability with pipe */
1108 wait_for_sessiond(sock_info);
1109 if (has_waited) {
1110 has_waited = 0;
1111 /*
1112 * Sleep for 5 seconds before retrying after a
1113 * sequence of failure / wait / failure. This
1114 * deals with a killed or broken session daemon.
1115 */
1116 sleep(5);
1117 }
1118 has_waited = 1;
1119 prev_connect_failed = 0;
1120 }
9eb62b9c 1121
1ea11eab 1122 if (sock_info->socket != -1) {
e6973a89 1123 ret = ustcomm_close_unix_sock(sock_info->socket);
1ea11eab 1124 if (ret) {
32ce8569
MD
1125 ERR("Error closing %s ust cmd socket",
1126 sock_info->name);
1ea11eab
MD
1127 }
1128 sock_info->socket = -1;
1129 }
32ce8569
MD
1130 if (sock_info->notify_socket != -1) {
1131 ret = ustcomm_close_unix_sock(sock_info->notify_socket);
1132 if (ret) {
1133 ERR("Error closing %s ust notify socket",
1134 sock_info->name);
1135 }
1136 sock_info->notify_socket = -1;
1137 }
46050b1a 1138
321f2351
MD
1139 /*
1140 * Register. We need to perform both connect and sending
1141 * registration message before doing the next connect otherwise
1142 * we may reach unix socket connect queue max limits and block
1143 * on the 2nd connect while the session daemon is awaiting the
1144 * first connect registration message.
1145 */
1146 /* Connect cmd socket */
1147 ret = ustcomm_connect_unix_sock(sock_info->sock_path);
1148 if (ret < 0) {
1149 DBG("Info: sessiond not accepting connections to %s apps socket", sock_info->name);
1150 prev_connect_failed = 1;
5b14aab3 1151
3327ac33 1152 if (ust_lock()) {
321f2351 1153 goto quit;
32ce8569 1154 }
46050b1a 1155
e3426ddc 1156 /*
321f2351
MD
1157 * If we cannot find the sessiond daemon, don't delay
1158 * constructor execution.
e3426ddc 1159 */
321f2351
MD
1160 ret = handle_register_done(sock_info);
1161 assert(!ret);
1162 ust_unlock();
1163 goto restart;
27fe9f21 1164 }
321f2351 1165 sock_info->socket = ret;
27fe9f21 1166
3327ac33 1167 if (ust_lock()) {
5b14aab3
MD
1168 goto quit;
1169 }
1170
46050b1a
MD
1171 /*
1172 * Create only one root handle per listener thread for the whole
f59ed768
MD
1173 * process lifetime, so we ensure we get ID which is statically
1174 * assigned to the root handle.
46050b1a
MD
1175 */
1176 if (sock_info->root_handle == -1) {
1177 ret = lttng_abi_create_root_handle();
a51070bb 1178 if (ret < 0) {
46050b1a 1179 ERR("Error creating root handle");
46050b1a
MD
1180 goto quit;
1181 }
1182 sock_info->root_handle = ret;
9eb62b9c 1183 }
1ea11eab 1184
32ce8569 1185 ret = register_to_sessiond(sock_info->socket, USTCTL_SOCKET_CMD);
9eb62b9c 1186 if (ret < 0) {
32ce8569
MD
1187 ERR("Error registering to %s ust cmd socket",
1188 sock_info->name);
c0eedf81 1189 prev_connect_failed = 1;
11ff9c7d
MD
1190 /*
1191 * If we cannot register to the sessiond daemon, don't
1192 * delay constructor execution.
1193 */
edaa1431 1194 ret = handle_register_done(sock_info);
11ff9c7d 1195 assert(!ret);
17dfb34b 1196 ust_unlock();
9eb62b9c
MD
1197 goto restart;
1198 }
321f2351
MD
1199
1200 ust_unlock();
1201
1202 /* Connect notify socket */
1203 ret = ustcomm_connect_unix_sock(sock_info->sock_path);
1204 if (ret < 0) {
1205 DBG("Info: sessiond not accepting connections to %s apps socket", sock_info->name);
1206 prev_connect_failed = 1;
1207
3327ac33 1208 if (ust_lock()) {
321f2351
MD
1209 goto quit;
1210 }
1211
1212 /*
1213 * If we cannot find the sessiond daemon, don't delay
1214 * constructor execution.
1215 */
1216 ret = handle_register_done(sock_info);
1217 assert(!ret);
1218 ust_unlock();
1219 goto restart;
1220 }
1221 sock_info->notify_socket = ret;
1222
1223 timeout = get_notify_sock_timeout();
1224 if (timeout >= 0) {
1225 /*
1226 * Give at least 10ms to sessiond to reply to
1227 * notifications.
1228 */
1229 if (timeout < 10)
1230 timeout = 10;
1231 ret = ustcomm_setsockopt_rcv_timeout(sock_info->notify_socket,
1232 timeout);
1233 if (ret < 0) {
1234 WARN("Error setting socket receive timeout");
1235 }
1236 ret = ustcomm_setsockopt_snd_timeout(sock_info->notify_socket,
1237 timeout);
1238 if (ret < 0) {
1239 WARN("Error setting socket send timeout");
1240 }
1241 } else if (timeout < -1) {
1242 WARN("Unsupported timeout value %ld", timeout);
1243 }
1244
3327ac33 1245 if (ust_lock()) {
321f2351
MD
1246 goto quit;
1247 }
1248
32ce8569
MD
1249 ret = register_to_sessiond(sock_info->notify_socket,
1250 USTCTL_SOCKET_NOTIFY);
1251 if (ret < 0) {
1252 ERR("Error registering to %s ust notify socket",
1253 sock_info->name);
1254 prev_connect_failed = 1;
1255 /*
1256 * If we cannot register to the sessiond daemon, don't
1257 * delay constructor execution.
1258 */
1259 ret = handle_register_done(sock_info);
1260 assert(!ret);
1261 ust_unlock();
1262 goto restart;
1263 }
1264 sock = sock_info->socket;
1265
17dfb34b 1266 ust_unlock();
46050b1a 1267
d9e99d10
MD
1268 for (;;) {
1269 ssize_t len;
57773204 1270 struct ustcomm_ust_msg lum;
d9e99d10 1271
57773204 1272 len = ustcomm_recv_unix_sock(sock, &lum, sizeof(lum));
d9e99d10
MD
1273 switch (len) {
1274 case 0: /* orderly shutdown */
7dd08bec 1275 DBG("%s lttng-sessiond has performed an orderly shutdown", sock_info->name);
3327ac33 1276 if (ust_lock()) {
d5e1fea6
MD
1277 goto quit;
1278 }
8236ba10
MD
1279 /*
1280 * Either sessiond has shutdown or refused us by closing the socket.
1281 * In either case, we don't want to delay construction execution,
1282 * and we need to wait before retry.
1283 */
1284 prev_connect_failed = 1;
1285 /*
1286 * If we cannot register to the sessiond daemon, don't
1287 * delay constructor execution.
1288 */
1289 ret = handle_register_done(sock_info);
1290 assert(!ret);
1291 ust_unlock();
d9e99d10 1292 goto end;
e7723462 1293 case sizeof(lum):
74d81a6c 1294 print_cmd(lum.cmd, lum.handle);
11ff9c7d 1295 ret = handle_message(sock_info, sock, &lum);
7bc53e94 1296 if (ret) {
11ff9c7d 1297 ERR("Error handling message for %s socket", sock_info->name);
d9e99d10
MD
1298 }
1299 continue;
7bc53e94
MD
1300 default:
1301 if (len < 0) {
1302 DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
1303 } else {
1304 DBG("incorrect message size (%s socket): %zd", sock_info->name, len);
1305 }
1306 if (len == -ECONNRESET) {
1307 DBG("%s remote end closed connection", sock_info->name);
d9e99d10
MD
1308 goto end;
1309 }
1310 goto end;
d9e99d10
MD
1311 }
1312
1313 }
1314end:
3327ac33 1315 if (ust_lock()) {
d5e1fea6
MD
1316 goto quit;
1317 }
f59ed768
MD
1318 /* Cleanup socket handles before trying to reconnect */
1319 lttng_ust_objd_table_owner_cleanup(sock_info);
1320 ust_unlock();
9eb62b9c 1321 goto restart; /* try to reconnect */
e33f3265 1322
1ea11eab 1323quit:
e33f3265 1324 ust_unlock();
3327ac33
MD
1325
1326 pthread_mutex_lock(&ust_exit_mutex);
1327 sock_info->thread_active = 0;
1328 pthread_mutex_unlock(&ust_exit_mutex);
d9e99d10
MD
1329 return NULL;
1330}
1331
2594a5b4
MD
1332/*
1333 * Weak symbol to call when the ust malloc wrapper is not loaded.
1334 */
1335__attribute__((weak))
1336void lttng_ust_malloc_wrapper_init(void)
1337{
1338}
1339
2691221a
MD
1340/*
1341 * sessiond monitoring thread: monitor presence of global and per-user
1342 * sessiond by polling the application common named pipe.
1343 */
edaa1431 1344void __attribute__((constructor)) lttng_ust_init(void)
2691221a 1345{
11ff9c7d 1346 struct timespec constructor_timeout;
ae6a58bf 1347 sigset_t sig_all_blocked, orig_parent_mask;
1879f67f 1348 pthread_attr_t thread_attr;
cf12a773 1349 int timeout_mode;
2691221a
MD
1350 int ret;
1351
edaa1431
MD
1352 if (uatomic_xchg(&initialized, 1) == 1)
1353 return;
1354
eddd8d5d
MD
1355 /*
1356 * Fixup interdependency between TLS fixup mutex (which happens
1357 * to be the dynamic linker mutex) and ust_lock, taken within
1358 * the ust lock.
1359 */
cec91e30 1360 lttng_fixup_urcu_bp_tls();
f645cfa7 1361 lttng_fixup_ringbuffer_tls();
4158a15a 1362 lttng_fixup_vtid_tls();
a903623f 1363 lttng_fixup_nest_count_tls();
009745db 1364 lttng_fixup_procname_tls();
eddd8d5d 1365
edaa1431
MD
1366 /*
1367 * We want precise control over the order in which we construct
1368 * our sub-libraries vs starting to receive commands from
1369 * sessiond (otherwise leading to errors when trying to create
1370 * sessiond before the init functions are completed).
1371 */
2691221a 1372 init_usterr();
edaa1431 1373 init_tracepoint();
bd703713 1374 lttng_ust_baddr_statedump_init();
7dd08bec
MD
1375 lttng_ring_buffer_metadata_client_init();
1376 lttng_ring_buffer_client_overwrite_init();
34a91bdb 1377 lttng_ring_buffer_client_overwrite_rt_init();
7dd08bec 1378 lttng_ring_buffer_client_discard_init();
34a91bdb 1379 lttng_ring_buffer_client_discard_rt_init();
a0a3bef9 1380 lttng_context_init();
2594a5b4
MD
1381 /*
1382 * Invoke ust malloc wrapper init before starting other threads.
1383 */
1384 lttng_ust_malloc_wrapper_init();
2691221a 1385
ff517991 1386 timeout_mode = get_constructor_timeout(&constructor_timeout);
11ff9c7d 1387
95259bd0 1388 ret = sem_init(&constructor_wait, 0, 0);
11ff9c7d
MD
1389 assert(!ret);
1390
8d20bf54 1391 ret = setup_local_apps();
2691221a 1392 if (ret) {
9ec6895c 1393 DBG("local apps setup returned %d", ret);
2691221a 1394 }
ae6a58bf
WP
1395
1396 /* A new thread created by pthread_create inherits the signal mask
1397 * from the parent. To avoid any signal being received by the
1398 * listener thread, we block all signals temporarily in the parent,
1399 * while we create the listener thread.
1400 */
1401 sigfillset(&sig_all_blocked);
1402 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_parent_mask);
1403 if (ret) {
d94d802c 1404 ERR("pthread_sigmask: %s", strerror(ret));
ae6a58bf
WP
1405 }
1406
1879f67f
MG
1407 ret = pthread_attr_init(&thread_attr);
1408 if (ret) {
1409 ERR("pthread_attr_init: %s", strerror(ret));
1410 }
1411 ret = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_DETACHED);
1412 if (ret) {
1413 ERR("pthread_attr_setdetachstate: %s", strerror(ret));
1414 }
1415
c0bbbd5a 1416 pthread_mutex_lock(&ust_exit_mutex);
1879f67f 1417 ret = pthread_create(&global_apps.ust_listener, &thread_attr,
dde70ea0 1418 ust_listener_thread, &global_apps);
d94d802c
MD
1419 if (ret) {
1420 ERR("pthread_create global: %s", strerror(ret));
1421 }
e33f3265 1422 global_apps.thread_active = 1;
c0bbbd5a 1423 pthread_mutex_unlock(&ust_exit_mutex);
e33f3265 1424
8d20bf54 1425 if (local_apps.allowed) {
c0bbbd5a 1426 pthread_mutex_lock(&ust_exit_mutex);
1879f67f 1427 ret = pthread_create(&local_apps.ust_listener, &thread_attr,
dde70ea0 1428 ust_listener_thread, &local_apps);
d94d802c
MD
1429 if (ret) {
1430 ERR("pthread_create local: %s", strerror(ret));
1431 }
e33f3265 1432 local_apps.thread_active = 1;
c0bbbd5a 1433 pthread_mutex_unlock(&ust_exit_mutex);
8d20bf54
MD
1434 } else {
1435 handle_register_done(&local_apps);
1436 }
1879f67f
MG
1437 ret = pthread_attr_destroy(&thread_attr);
1438 if (ret) {
1439 ERR("pthread_attr_destroy: %s", strerror(ret));
1440 }
8d20bf54 1441
ae6a58bf
WP
1442 /* Restore original signal mask in parent */
1443 ret = pthread_sigmask(SIG_SETMASK, &orig_parent_mask, NULL);
1444 if (ret) {
d94d802c 1445 ERR("pthread_sigmask: %s", strerror(ret));
ae6a58bf
WP
1446 }
1447
cf12a773
MD
1448 switch (timeout_mode) {
1449 case 1: /* timeout wait */
95259bd0
MD
1450 do {
1451 ret = sem_timedwait(&constructor_wait,
1452 &constructor_timeout);
1453 } while (ret < 0 && errno == EINTR);
cf12a773 1454 if (ret < 0 && errno == ETIMEDOUT) {
7dd08bec 1455 ERR("Timed out waiting for lttng-sessiond");
cf12a773
MD
1456 } else {
1457 assert(!ret);
1458 }
1459 break;
7b766b16 1460 case -1:/* wait forever */
95259bd0
MD
1461 do {
1462 ret = sem_wait(&constructor_wait);
1463 } while (ret < 0 && errno == EINTR);
11ff9c7d 1464 assert(!ret);
cf12a773 1465 break;
7b766b16 1466 case 0: /* no timeout */
cf12a773 1467 break;
11ff9c7d 1468 }
2691221a
MD
1469}
1470
17dfb34b
MD
1471static
1472void lttng_ust_cleanup(int exiting)
1473{
efe0de09 1474 cleanup_sock_info(&global_apps, exiting);
17dfb34b 1475 if (local_apps.allowed) {
efe0de09 1476 cleanup_sock_info(&local_apps, exiting);
17dfb34b 1477 }
efe0de09
MD
1478 /*
1479 * The teardown in this function all affect data structures
1480 * accessed under the UST lock by the listener thread. This
1481 * lock, along with the lttng_ust_comm_should_quit flag, ensure
1482 * that none of these threads are accessing this data at this
1483 * point.
1484 */
17dfb34b 1485 lttng_ust_abi_exit();
003fedf4 1486 lttng_ust_events_exit();
a0a3bef9 1487 lttng_context_exit();
34a91bdb 1488 lttng_ring_buffer_client_discard_rt_exit();
7dd08bec 1489 lttng_ring_buffer_client_discard_exit();
34a91bdb 1490 lttng_ring_buffer_client_overwrite_rt_exit();
7dd08bec
MD
1491 lttng_ring_buffer_client_overwrite_exit();
1492 lttng_ring_buffer_metadata_client_exit();
bd703713 1493 lttng_ust_baddr_statedump_destroy();
17dfb34b
MD
1494 exit_tracepoint();
1495 if (!exiting) {
1496 /* Reinitialize values for fork */
1497 sem_count = 2;
1498 lttng_ust_comm_should_quit = 0;
1499 initialized = 0;
1500 }
1501}
1502
edaa1431 1503void __attribute__((destructor)) lttng_ust_exit(void)
2691221a
MD
1504{
1505 int ret;
1506
9eb62b9c
MD
1507 /*
1508 * Using pthread_cancel here because:
1509 * A) we don't want to hang application teardown.
1510 * B) the thread is not allocating any resource.
1511 */
1ea11eab
MD
1512
1513 /*
1514 * Require the communication thread to quit. Synchronize with
1515 * mutexes to ensure it is not in a mutex critical section when
1516 * pthread_cancel is later called.
1517 */
3327ac33 1518 ust_lock_nocheck();
1ea11eab 1519 lttng_ust_comm_should_quit = 1;
3327ac33 1520 ust_unlock();
1ea11eab 1521
3327ac33 1522 pthread_mutex_lock(&ust_exit_mutex);
f5f94532 1523 /* cancel threads */
e33f3265
MD
1524 if (global_apps.thread_active) {
1525 ret = pthread_cancel(global_apps.ust_listener);
1526 if (ret) {
1527 ERR("Error cancelling global ust listener thread: %s",
1528 strerror(ret));
1529 } else {
1530 global_apps.thread_active = 0;
1531 }
2691221a 1532 }
e33f3265 1533 if (local_apps.thread_active) {
8d20bf54
MD
1534 ret = pthread_cancel(local_apps.ust_listener);
1535 if (ret) {
d94d802c
MD
1536 ERR("Error cancelling local ust listener thread: %s",
1537 strerror(ret));
e33f3265
MD
1538 } else {
1539 local_apps.thread_active = 0;
8d20bf54 1540 }
8d20bf54 1541 }
3327ac33 1542 pthread_mutex_unlock(&ust_exit_mutex);
e33f3265 1543
efe0de09
MD
1544 /*
1545 * Do NOT join threads: use of sys_futex makes it impossible to
1546 * join the threads without using async-cancel, but async-cancel
1547 * is delivered by a signal, which could hit the target thread
1548 * anywhere in its code path, including while the ust_lock() is
1549 * held, causing a deadlock for the other thread. Let the OS
1550 * cleanup the threads if there are stalled in a syscall.
1551 */
17dfb34b 1552 lttng_ust_cleanup(1);
2691221a 1553}
e822f505
MD
1554
1555/*
1556 * We exclude the worker threads across fork and clone (except
1557 * CLONE_VM), because these system calls only keep the forking thread
1558 * running in the child. Therefore, we don't want to call fork or clone
1559 * in the middle of an tracepoint or ust tracing state modification.
1560 * Holding this mutex protects these structures across fork and clone.
1561 */
b728d87e 1562void ust_before_fork(sigset_t *save_sigset)
e822f505
MD
1563{
1564 /*
1565 * Disable signals. This is to avoid that the child intervenes
1566 * before it is properly setup for tracing. It is safer to
1567 * disable all signals, because then we know we are not breaking
1568 * anything by restoring the original mask.
1569 */
1570 sigset_t all_sigs;
1571 int ret;
1572
8c90a710 1573 if (URCU_TLS(lttng_ust_nest_count))
e8508a49 1574 return;
e822f505
MD
1575 /* Disable signals */
1576 sigfillset(&all_sigs);
b728d87e 1577 ret = sigprocmask(SIG_BLOCK, &all_sigs, save_sigset);
e822f505
MD
1578 if (ret == -1) {
1579 PERROR("sigprocmask");
1580 }
3327ac33 1581 ust_lock_nocheck();
e822f505
MD
1582 rcu_bp_before_fork();
1583}
1584
b728d87e 1585static void ust_after_fork_common(sigset_t *restore_sigset)
e822f505
MD
1586{
1587 int ret;
1588
17dfb34b
MD
1589 DBG("process %d", getpid());
1590 ust_unlock();
e822f505 1591 /* Restore signals */
23c8854a 1592 ret = sigprocmask(SIG_SETMASK, restore_sigset, NULL);
e822f505
MD
1593 if (ret == -1) {
1594 PERROR("sigprocmask");
1595 }
1596}
1597
b728d87e 1598void ust_after_fork_parent(sigset_t *restore_sigset)
e822f505 1599{
8c90a710 1600 if (URCU_TLS(lttng_ust_nest_count))
e8508a49 1601 return;
17dfb34b 1602 DBG("process %d", getpid());
e822f505
MD
1603 rcu_bp_after_fork_parent();
1604 /* Release mutexes and reenable signals */
b728d87e 1605 ust_after_fork_common(restore_sigset);
e822f505
MD
1606}
1607
17dfb34b
MD
1608/*
1609 * After fork, in the child, we need to cleanup all the leftover state,
1610 * except the worker thread which already magically disappeared thanks
1611 * to the weird Linux fork semantics. After tyding up, we call
1612 * lttng_ust_init() again to start over as a new PID.
1613 *
1614 * This is meant for forks() that have tracing in the child between the
1615 * fork and following exec call (if there is any).
1616 */
b728d87e 1617void ust_after_fork_child(sigset_t *restore_sigset)
e822f505 1618{
8c90a710 1619 if (URCU_TLS(lttng_ust_nest_count))
e8508a49 1620 return;
17dfb34b 1621 DBG("process %d", getpid());
e822f505
MD
1622 /* Release urcu mutexes */
1623 rcu_bp_after_fork_child();
17dfb34b 1624 lttng_ust_cleanup(0);
a93bfc45 1625 lttng_context_vtid_reset();
e822f505 1626 /* Release mutexes and reenable signals */
b728d87e 1627 ust_after_fork_common(restore_sigset);
318dfea9 1628 lttng_ust_init();
e822f505 1629}
95c25348 1630
246be17e 1631void lttng_ust_sockinfo_session_enabled(void *owner)
95c25348
PW
1632{
1633 struct sock_info *sock_info = owner;
37dddb65 1634 sock_info->statedump_pending = 1;
95c25348 1635}
This page took 0.11567 seconds and 4 git commands to generate.