Centralize locking with ust_lock, fork handling
[lttng-ust.git] / libust / lttng-ust-comm.c
1 /*
2 * lttng-ust-comm.c
3 *
4 * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
5 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; only
10 * version 2.1 of the License.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include <sys/types.h>
23 #include <sys/socket.h>
24 #include <sys/prctl.h>
25 #include <unistd.h>
26 #include <errno.h>
27 #include <pthread.h>
28 #include <semaphore.h>
29 #include <time.h>
30 #include <assert.h>
31 #include <signal.h>
32 #include <urcu/uatomic.h>
33
34 #include <lttng-ust-comm.h>
35 #include <ust/usterr-signal-safe.h>
36 #include <ust/lttng-ust-abi.h>
37 #include <ust/tracepoint.h>
38 #include <ust/tracepoint-internal.h>
39 #include <ust/ust.h>
40
41 /*
42 * Has lttng ust comm constructor been called ?
43 */
44 static int initialized;
45
46 /*
47 * The ust_lock/ust_unlock lock is used as a communication thread mutex.
48 * Held when handling a command, also held by fork() to deal with
49 * removal of threads, and by exit path.
50 */
51
52 /* Should the ust comm thread quit ? */
53 static int lttng_ust_comm_should_quit;
54
55 /*
56 * Wait for either of these before continuing to the main
57 * program:
58 * - the register_done message from sessiond daemon
59 * (will let the sessiond daemon enable sessions before main
60 * starts.)
61 * - sessiond daemon is not reachable.
62 * - timeout (ensuring applications are resilient to session
63 * daemon problems).
64 */
65 static sem_t constructor_wait;
66 /*
67 * Doing this for both the global and local sessiond.
68 */
69 static int sem_count = { 2 };
70
71 /*
72 * Info about socket and associated listener thread.
73 */
74 struct sock_info {
75 const char *name;
76 char sock_path[PATH_MAX];
77 int socket;
78 pthread_t ust_listener; /* listener thread */
79 int root_handle;
80 int constructor_sem_posted;
81 int allowed;
82 };
83
84 /* Socket from app (connect) to session daemon (listen) for communication */
85 struct sock_info global_apps = {
86 .name = "global",
87 .sock_path = DEFAULT_GLOBAL_APPS_UNIX_SOCK,
88 .socket = -1,
89 .root_handle = -1,
90 .allowed = 1,
91 };
92
93 /* TODO: allow global_apps_sock_path override */
94
95 struct sock_info local_apps = {
96 .name = "local",
97 .socket = -1,
98 .root_handle = -1,
99 .allowed = 0, /* Check setuid bit first */
100 };
101
102 extern void ltt_ring_buffer_client_overwrite_init(void);
103 extern void ltt_ring_buffer_client_discard_init(void);
104 extern void ltt_ring_buffer_metadata_client_init(void);
105 extern void ltt_ring_buffer_client_overwrite_exit(void);
106 extern void ltt_ring_buffer_client_discard_exit(void);
107 extern void ltt_ring_buffer_metadata_client_exit(void);
108
109 static
110 int setup_local_apps(void)
111 {
112 const char *home_dir;
113
114 /*
115 * Disallow per-user tracing for setuid binaries.
116 */
117 if (getuid() != geteuid()) {
118 local_apps.allowed = 0;
119 return 0;
120 } else {
121 local_apps.allowed = 1;
122 }
123 home_dir = (const char *) getenv("HOME");
124 if (!home_dir)
125 return -ENOENT;
126 snprintf(local_apps.sock_path, PATH_MAX,
127 DEFAULT_HOME_APPS_UNIX_SOCK, home_dir);
128 return 0;
129 }
130
131 static
132 int register_app_to_sessiond(int socket)
133 {
134 ssize_t ret;
135 int prctl_ret;
136 struct {
137 uint32_t major;
138 uint32_t minor;
139 pid_t pid;
140 pid_t ppid;
141 uid_t uid;
142 gid_t gid;
143 char name[16]; /* process name */
144 } reg_msg;
145
146 reg_msg.major = LTTNG_UST_COMM_VERSION_MAJOR;
147 reg_msg.minor = LTTNG_UST_COMM_VERSION_MINOR;
148 reg_msg.pid = getpid();
149 reg_msg.ppid = getppid();
150 reg_msg.uid = getuid();
151 reg_msg.gid = getgid();
152 prctl_ret = prctl(PR_GET_NAME, (unsigned long) reg_msg.name, 0, 0, 0);
153 if (prctl_ret) {
154 ERR("Error executing prctl");
155 return -errno;
156 }
157
158 ret = lttcomm_send_unix_sock(socket, &reg_msg, sizeof(reg_msg));
159 if (ret >= 0 && ret != sizeof(reg_msg))
160 return -EIO;
161 return ret;
162 }
163
164 static
165 int send_reply(int sock, struct lttcomm_ust_reply *lur)
166 {
167 ssize_t len;
168
169 len = lttcomm_send_unix_sock(sock, lur, sizeof(*lur));
170 switch (len) {
171 case sizeof(*lur):
172 DBG("message successfully sent");
173 return 0;
174 case -1:
175 if (errno == ECONNRESET) {
176 printf("remote end closed connection\n");
177 return 0;
178 }
179 return -1;
180 default:
181 printf("incorrect message size: %zd\n", len);
182 return -1;
183 }
184 }
185
186 static
187 int handle_register_done(struct sock_info *sock_info)
188 {
189 int ret;
190
191 if (sock_info->constructor_sem_posted)
192 return 0;
193 sock_info->constructor_sem_posted = 1;
194 ret = uatomic_add_return(&sem_count, -1);
195 if (ret == 0) {
196 ret = sem_post(&constructor_wait);
197 assert(!ret);
198 }
199 return 0;
200 }
201
202 static
203 int handle_message(struct sock_info *sock_info,
204 int sock, struct lttcomm_ust_msg *lum)
205 {
206 int ret = 0;
207 const struct objd_ops *ops;
208 struct lttcomm_ust_reply lur;
209
210 ust_lock();
211
212 memset(&lur, 0, sizeof(lur));
213
214 if (lttng_ust_comm_should_quit) {
215 ret = -EPERM;
216 goto end;
217 }
218
219 ops = objd_ops(lum->handle);
220 if (!ops) {
221 ret = -ENOENT;
222 goto end;
223 }
224
225 switch (lum->cmd) {
226 case LTTNG_UST_REGISTER_DONE:
227 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
228 ret = handle_register_done(sock_info);
229 else
230 ret = -EINVAL;
231 break;
232 case LTTNG_UST_RELEASE:
233 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
234 ret = -EPERM;
235 else
236 ret = objd_unref(lum->handle);
237 break;
238 default:
239 if (ops->cmd)
240 ret = ops->cmd(lum->handle, lum->cmd,
241 (unsigned long) &lum->u);
242 else
243 ret = -ENOSYS;
244 break;
245 }
246
247 end:
248 lur.handle = lum->handle;
249 lur.cmd = lum->cmd;
250 lur.ret_val = ret;
251 if (ret >= 0) {
252 lur.ret_code = LTTCOMM_OK;
253 } else {
254 lur.ret_code = LTTCOMM_SESSION_FAIL;
255 }
256 ret = send_reply(sock, &lur);
257
258 ust_unlock();
259 return ret;
260 }
261
262 static
263 void cleanup_sock_info(struct sock_info *sock_info)
264 {
265 int ret;
266
267 if (sock_info->socket != -1) {
268 ret = close(sock_info->socket);
269 if (ret) {
270 ERR("Error closing local apps socket");
271 }
272 sock_info->socket = -1;
273 }
274 if (sock_info->root_handle != -1) {
275 ret = objd_unref(sock_info->root_handle);
276 if (ret) {
277 ERR("Error unref root handle");
278 }
279 sock_info->root_handle = -1;
280 }
281 }
282
283 /*
284 * This thread does not allocate any resource, except within
285 * handle_message, within mutex protection. This mutex protects against
286 * fork and exit.
287 * The other moment it allocates resources is at socket connexion, which
288 * is also protected by the mutex.
289 */
290 static
291 void *ust_listener_thread(void *arg)
292 {
293 struct sock_info *sock_info = arg;
294 int sock, ret;
295
296 /* Restart trying to connect to the session daemon */
297 restart:
298 ust_lock();
299
300 if (lttng_ust_comm_should_quit) {
301 ust_unlock();
302 goto quit;
303 }
304
305 if (sock_info->socket != -1) {
306 ret = close(sock_info->socket);
307 if (ret) {
308 ERR("Error closing %s apps socket", sock_info->name);
309 }
310 sock_info->socket = -1;
311 }
312
313 /* Check for sessiond availability with pipe TODO */
314
315 /* Register */
316 ret = lttcomm_connect_unix_sock(sock_info->sock_path);
317 if (ret < 0) {
318 ERR("Error connecting to %s apps socket", sock_info->name);
319 /*
320 * If we cannot find the sessiond daemon, don't delay
321 * constructor execution.
322 */
323 ret = handle_register_done(sock_info);
324 assert(!ret);
325 ust_unlock();
326 sleep(5);
327 goto restart;
328 }
329
330 sock_info->socket = sock = ret;
331
332 /*
333 * Create only one root handle per listener thread for the whole
334 * process lifetime.
335 */
336 if (sock_info->root_handle == -1) {
337 ret = lttng_abi_create_root_handle();
338 if (ret) {
339 ERR("Error creating root handle");
340 ust_unlock();
341 goto quit;
342 }
343 sock_info->root_handle = ret;
344 }
345
346 ret = register_app_to_sessiond(sock);
347 if (ret < 0) {
348 ERR("Error registering to %s apps socket", sock_info->name);
349 /*
350 * If we cannot register to the sessiond daemon, don't
351 * delay constructor execution.
352 */
353 ret = handle_register_done(sock_info);
354 assert(!ret);
355 ust_unlock();
356 sleep(5);
357 goto restart;
358 }
359 ust_unlock();
360
361 for (;;) {
362 ssize_t len;
363 struct lttcomm_ust_msg lum;
364
365 len = lttcomm_recv_unix_sock(sock, &lum, sizeof(lum));
366 switch (len) {
367 case 0: /* orderly shutdown */
368 DBG("%s ltt-sessiond has performed an orderly shutdown\n", sock_info->name);
369 goto end;
370 case sizeof(lum):
371 DBG("message received\n");
372 ret = handle_message(sock_info, sock, &lum);
373 if (ret < 0) {
374 ERR("Error handling message for %s socket", sock_info->name);
375 }
376 continue;
377 case -1:
378 if (errno == ECONNRESET) {
379 ERR("%s remote end closed connection\n", sock_info->name);
380 goto end;
381 }
382 goto end;
383 default:
384 ERR("incorrect message size (%s socket): %zd\n", sock_info->name, len);
385 continue;
386 }
387
388 }
389 end:
390 goto restart; /* try to reconnect */
391 quit:
392 return NULL;
393 }
394
395 /*
396 * Return values: -1: don't wait. 0: wait forever. 1: timeout wait.
397 */
398 static
399 int get_timeout(struct timespec *constructor_timeout)
400 {
401 long constructor_delay_ms = LTTNG_UST_DEFAULT_CONSTRUCTOR_TIMEOUT_MS;
402 char *str_delay;
403 int ret;
404
405 str_delay = getenv("UST_REGISTER_TIMEOUT");
406 if (str_delay) {
407 constructor_delay_ms = strtol(str_delay, NULL, 10);
408 }
409
410 switch (constructor_delay_ms) {
411 case -1:/* fall-through */
412 case 0:
413 return constructor_delay_ms;
414 default:
415 break;
416 }
417
418 /*
419 * If we are unable to find the current time, don't wait.
420 */
421 ret = clock_gettime(CLOCK_REALTIME, constructor_timeout);
422 if (ret) {
423 return -1;
424 }
425 constructor_timeout->tv_sec += constructor_delay_ms / 1000UL;
426 constructor_timeout->tv_nsec +=
427 (constructor_delay_ms % 1000UL) * 1000000UL;
428 if (constructor_timeout->tv_nsec >= 1000000000UL) {
429 constructor_timeout->tv_sec++;
430 constructor_timeout->tv_nsec -= 1000000000UL;
431 }
432 return 1;
433 }
434
435 /*
436 * sessiond monitoring thread: monitor presence of global and per-user
437 * sessiond by polling the application common named pipe.
438 */
439 /* TODO */
440
441 void __attribute__((constructor)) lttng_ust_init(void)
442 {
443 struct timespec constructor_timeout;
444 int timeout_mode;
445 int ret;
446
447 if (uatomic_xchg(&initialized, 1) == 1)
448 return;
449
450 /*
451 * We want precise control over the order in which we construct
452 * our sub-libraries vs starting to receive commands from
453 * sessiond (otherwise leading to errors when trying to create
454 * sessiond before the init functions are completed).
455 */
456 init_usterr();
457 init_tracepoint();
458 ltt_ring_buffer_metadata_client_init();
459 ltt_ring_buffer_client_overwrite_init();
460 ltt_ring_buffer_client_discard_init();
461
462 timeout_mode = get_timeout(&constructor_timeout);
463
464 ret = sem_init(&constructor_wait, 0, 0);
465 assert(!ret);
466
467 ret = setup_local_apps();
468 if (ret) {
469 ERR("Error setting up to local apps");
470 }
471 ret = pthread_create(&local_apps.ust_listener, NULL,
472 ust_listener_thread, &local_apps);
473
474 if (local_apps.allowed) {
475 ret = pthread_create(&global_apps.ust_listener, NULL,
476 ust_listener_thread, &global_apps);
477 } else {
478 handle_register_done(&local_apps);
479 }
480
481 switch (timeout_mode) {
482 case 1: /* timeout wait */
483 do {
484 ret = sem_timedwait(&constructor_wait,
485 &constructor_timeout);
486 } while (ret < 0 && errno == EINTR);
487 if (ret < 0 && errno == ETIMEDOUT) {
488 ERR("Timed out waiting for ltt-sessiond");
489 } else {
490 assert(!ret);
491 }
492 break;
493 case -1:/* wait forever */
494 do {
495 ret = sem_wait(&constructor_wait);
496 } while (ret < 0 && errno == EINTR);
497 assert(!ret);
498 break;
499 case 0: /* no timeout */
500 break;
501 }
502 }
503
504 static
505 void lttng_ust_cleanup(int exiting)
506 {
507 cleanup_sock_info(&global_apps);
508 if (local_apps.allowed) {
509 cleanup_sock_info(&local_apps);
510 }
511 lttng_ust_abi_exit();
512 ltt_events_exit();
513 ltt_ring_buffer_client_discard_exit();
514 ltt_ring_buffer_client_overwrite_exit();
515 ltt_ring_buffer_metadata_client_exit();
516 exit_tracepoint();
517 if (!exiting) {
518 /* Reinitialize values for fork */
519 sem_count = 2;
520 lttng_ust_comm_should_quit = 0;
521 initialized = 0;
522 }
523 }
524
525 void __attribute__((destructor)) lttng_ust_exit(void)
526 {
527 int ret;
528
529 /*
530 * Using pthread_cancel here because:
531 * A) we don't want to hang application teardown.
532 * B) the thread is not allocating any resource.
533 */
534
535 /*
536 * Require the communication thread to quit. Synchronize with
537 * mutexes to ensure it is not in a mutex critical section when
538 * pthread_cancel is later called.
539 */
540 ust_lock();
541 lttng_ust_comm_should_quit = 1;
542 ust_unlock();
543
544 ret = pthread_cancel(global_apps.ust_listener);
545 if (ret) {
546 ERR("Error cancelling global ust listener thread");
547 }
548 if (local_apps.allowed) {
549 ret = pthread_cancel(local_apps.ust_listener);
550 if (ret) {
551 ERR("Error cancelling local ust listener thread");
552 }
553 }
554 lttng_ust_cleanup(1);
555 }
556
557 /*
558 * We exclude the worker threads across fork and clone (except
559 * CLONE_VM), because these system calls only keep the forking thread
560 * running in the child. Therefore, we don't want to call fork or clone
561 * in the middle of an tracepoint or ust tracing state modification.
562 * Holding this mutex protects these structures across fork and clone.
563 */
564 void ust_before_fork(ust_fork_info_t *fork_info)
565 {
566 /*
567 * Disable signals. This is to avoid that the child intervenes
568 * before it is properly setup for tracing. It is safer to
569 * disable all signals, because then we know we are not breaking
570 * anything by restoring the original mask.
571 */
572 sigset_t all_sigs;
573 int ret;
574
575 /* Disable signals */
576 sigfillset(&all_sigs);
577 ret = sigprocmask(SIG_BLOCK, &all_sigs, &fork_info->orig_sigs);
578 if (ret == -1) {
579 PERROR("sigprocmask");
580 }
581 ust_lock();
582 rcu_bp_before_fork();
583 }
584
585 static void ust_after_fork_common(ust_fork_info_t *fork_info)
586 {
587 int ret;
588
589 DBG("process %d", getpid());
590 ust_unlock();
591 /* Restore signals */
592 ret = sigprocmask(SIG_SETMASK, &fork_info->orig_sigs, NULL);
593 if (ret == -1) {
594 PERROR("sigprocmask");
595 }
596 }
597
598 void ust_after_fork_parent(ust_fork_info_t *fork_info)
599 {
600 DBG("process %d", getpid());
601 rcu_bp_after_fork_parent();
602 /* Release mutexes and reenable signals */
603 ust_after_fork_common(fork_info);
604 }
605
606 /*
607 * After fork, in the child, we need to cleanup all the leftover state,
608 * except the worker thread which already magically disappeared thanks
609 * to the weird Linux fork semantics. After tyding up, we call
610 * lttng_ust_init() again to start over as a new PID.
611 *
612 * This is meant for forks() that have tracing in the child between the
613 * fork and following exec call (if there is any).
614 */
615 void ust_after_fork_child(ust_fork_info_t *fork_info)
616 {
617 DBG("process %d", getpid());
618 /* Release urcu mutexes */
619 rcu_bp_after_fork_child();
620 lttng_ust_cleanup(0);
621 lttng_ust_init();
622 /* Release mutexes and reenable signals */
623 ust_after_fork_common(fork_info);
624 }
This page took 0.04119 seconds and 4 git commands to generate.