Clean-up: use a define for support thread count
[lttng-tools.git] / src / bin / lttng-sessiond / main.c
CommitLineData
826d496d
MD
1/*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
0fdd1e2c 3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
26296c48 4 * 2013 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
fac6795d 5 *
d14d33bf
AM
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
91d76f53 9 *
d14d33bf
AM
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
91d76f53 14 *
d14d33bf
AM
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
fac6795d
DG
18 */
19
6c1c0768 20#define _LGPL_SOURCE
fac6795d
DG
21#include <getopt.h>
22#include <grp.h>
23#include <limits.h>
0bb7724a 24#include <paths.h>
fac6795d
DG
25#include <pthread.h>
26#include <signal.h>
27#include <stdio.h>
28#include <stdlib.h>
29#include <string.h>
331744e3 30#include <inttypes.h>
0fdd1e2c 31#include <sys/mman.h>
b73401da 32#include <sys/mount.h>
1e307fab 33#include <sys/resource.h>
fac6795d
DG
34#include <sys/socket.h>
35#include <sys/stat.h>
36#include <sys/types.h>
0fdd1e2c 37#include <sys/wait.h>
5c827ce0 38#include <urcu/uatomic.h>
fac6795d 39#include <unistd.h>
4f0b90ee 40#include <ctype.h>
fac6795d 41
990570ed 42#include <common/common.h>
d27c42b8 43#include <common/compat/socket.h>
e8fa9fb0 44#include <common/compat/getenv.h>
db758600
DG
45#include <common/defaults.h>
46#include <common/kernel-consumer/kernel-consumer.h>
50c8f484 47#include <common/futex.h>
00e2e675 48#include <common/relayd/relayd.h>
81b86775 49#include <common/utils.h>
3ccdf997 50#include <common/daemonize.h>
f40ef1d5 51#include <common/config/session-config.h>
fac6795d 52
10a8a223 53#include "lttng-sessiond.h"
7972aab2 54#include "buffer-registry.h"
54d01ffb 55#include "channel.h"
2f77fc4b 56#include "cmd.h"
00e2e675 57#include "consumer.h"
099e26bd 58#include "context.h"
54d01ffb 59#include "event.h"
4771f025 60#include "kernel.h"
f1e16794 61#include "kernel-consumer.h"
096102bd 62#include "modprobe.h"
0fdd1e2c 63#include "shm.h"
1e307fab 64#include "ust-ctl.h"
00e2e675 65#include "ust-consumer.h"
8e68d1c8 66#include "utils.h"
4063050c 67#include "fd-limit.h"
8782cc74 68#include "health-sessiond.h"
8ac94142 69#include "testpoint.h"
d0b96690 70#include "ust-thread.h"
022d91ba 71#include "agent-thread.h"
fb198a11 72#include "save.h"
ef367a93 73#include "load-session-thread.h"
b3530820
JG
74#include "notification-thread.h"
75#include "notification-thread-commands.h"
db66e574 76#include "rotation-thread.h"
0dbc2034 77#include "lttng-syscall.h"
7c1d2758 78#include "agent.h"
5e97de00 79#include "ht-cleanup.h"
e6142f2e 80#include "sessiond-config.h"
d086f507 81#include "sessiond-timer.h"
ebaeda94 82
4fc83d94
PP
83static const char *help_msg =
84#ifdef LTTNG_EMBED_HELP
85#include <lttng-sessiond.8.h>
86#else
87NULL
88#endif
89;
90
fac6795d 91const char *progname;
1d4b027a 92static pid_t ppid; /* Parent PID for --sig-parent option */
0bb7724a 93static pid_t child_ppid; /* Internal parent PID use with daemonize. */
c9cb3e7d 94static int lockfile_fd = -1;
3bd1e081 95
0bb7724a
DG
96/* Set to 1 when a SIGUSR1 signal is received. */
97static int recv_child_signal;
98
88076e89
JD
99static struct lttng_kernel_tracer_version kernel_tracer_version;
100static struct lttng_kernel_tracer_abi_version kernel_tracer_abi_version;
101
a23ec3a7
DG
102/*
103 * Consumer daemon specific control data. Every value not initialized here is
104 * set to 0 by the static definition.
105 */
3bd1e081
MD
106static struct consumer_data kconsumer_data = {
107 .type = LTTNG_CONSUMER_KERNEL,
03550b58
MD
108 .err_sock = -1,
109 .cmd_sock = -1,
b3530820 110 .channel_monitor_pipe = -1,
62c43103 111 .channel_rotate_pipe = -1,
173af62f
DG
112 .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
113 .lock = PTHREAD_MUTEX_INITIALIZER,
a23ec3a7
DG
114 .cond = PTHREAD_COND_INITIALIZER,
115 .cond_mutex = PTHREAD_MUTEX_INITIALIZER,
3bd1e081 116};
7753dea8
MD
117static struct consumer_data ustconsumer64_data = {
118 .type = LTTNG_CONSUMER64_UST,
03550b58
MD
119 .err_sock = -1,
120 .cmd_sock = -1,
b3530820 121 .channel_monitor_pipe = -1,
62c43103 122 .channel_rotate_pipe = -1,
173af62f
DG
123 .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
124 .lock = PTHREAD_MUTEX_INITIALIZER,
a23ec3a7
DG
125 .cond = PTHREAD_COND_INITIALIZER,
126 .cond_mutex = PTHREAD_MUTEX_INITIALIZER,
7753dea8
MD
127};
128static struct consumer_data ustconsumer32_data = {
129 .type = LTTNG_CONSUMER32_UST,
03550b58
MD
130 .err_sock = -1,
131 .cmd_sock = -1,
b3530820 132 .channel_monitor_pipe = -1,
62c43103 133 .channel_rotate_pipe = -1,
173af62f
DG
134 .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
135 .lock = PTHREAD_MUTEX_INITIALIZER,
a23ec3a7
DG
136 .cond = PTHREAD_COND_INITIALIZER,
137 .cond_mutex = PTHREAD_MUTEX_INITIALIZER,
3bd1e081
MD
138};
139
26296c48
JG
140/* Command line options */
141static const struct option long_options[] = {
0f5ea17c
JG
142 { "client-sock", required_argument, 0, 'c' },
143 { "apps-sock", required_argument, 0, 'a' },
144 { "kconsumerd-cmd-sock", required_argument, 0, '\0' },
145 { "kconsumerd-err-sock", required_argument, 0, '\0' },
146 { "ustconsumerd32-cmd-sock", required_argument, 0, '\0' },
147 { "ustconsumerd32-err-sock", required_argument, 0, '\0' },
148 { "ustconsumerd64-cmd-sock", required_argument, 0, '\0' },
149 { "ustconsumerd64-err-sock", required_argument, 0, '\0' },
150 { "consumerd32-path", required_argument, 0, '\0' },
151 { "consumerd32-libdir", required_argument, 0, '\0' },
152 { "consumerd64-path", required_argument, 0, '\0' },
153 { "consumerd64-libdir", required_argument, 0, '\0' },
154 { "daemonize", no_argument, 0, 'd' },
155 { "background", no_argument, 0, 'b' },
156 { "sig-parent", no_argument, 0, 'S' },
157 { "help", no_argument, 0, 'h' },
158 { "group", required_argument, 0, 'g' },
159 { "version", no_argument, 0, 'V' },
160 { "quiet", no_argument, 0, 'q' },
161 { "verbose", no_argument, 0, 'v' },
162 { "verbose-consumer", no_argument, 0, '\0' },
163 { "no-kernel", no_argument, 0, '\0' },
164 { "pidfile", required_argument, 0, 'p' },
165 { "agent-tcp-port", required_argument, 0, '\0' },
166 { "config", required_argument, 0, 'f' },
167 { "load", required_argument, 0, 'l' },
168 { "kmod-probes", required_argument, 0, '\0' },
169 { "extra-kmod-probes", required_argument, 0, '\0' },
26296c48
JG
170 { NULL, 0, 0, 0 }
171};
172
e6142f2e
JG
173struct sessiond_config config;
174
26296c48
JG
175/* Command line options to ignore from configuration file */
176static const char *config_ignore_options[] = { "help", "version", "config" };
177
26c9d55e 178/* Shared between threads */
099e26bd 179static int dispatch_thread_exit;
fac6795d 180
1d4b027a 181/* Sockets and FDs */
a4b35e07
MD
182static int client_sock = -1;
183static int apps_sock = -1;
2f77fc4b 184int kernel_tracer_fd = -1;
76d7553f 185static int kernel_poll_pipe[2] = { -1, -1 };
1d4b027a 186
273ea72c
DG
187/*
188 * Quit pipe for all threads. This permits a single cancellation point
189 * for all threads when receiving an event on the pipe.
190 */
76d7553f 191static int thread_quit_pipe[2] = { -1, -1 };
273ea72c 192
099e26bd
DG
193/*
194 * This pipe is used to inform the thread managing application communication
195 * that a command is queued and ready to be processed.
196 */
76d7553f 197static int apps_cmd_pipe[2] = { -1, -1 };
099e26bd 198
d0b96690
DG
199int apps_cmd_notify_pipe[2] = { -1, -1 };
200
1d4b027a 201/* Pthread, Mutexes and Semaphores */
1d4b027a 202static pthread_t apps_thread;
d0b96690 203static pthread_t apps_notify_thread;
099e26bd 204static pthread_t reg_apps_thread;
1d4b027a 205static pthread_t client_thread;
7a485870 206static pthread_t kernel_thread;
099e26bd 207static pthread_t dispatch_thread;
44a5e5eb 208static pthread_t health_thread;
0b2dc8df 209static pthread_t ht_cleanup_thread;
022d91ba 210static pthread_t agent_reg_thread;
ef367a93 211static pthread_t load_session_thread;
b3530820 212static pthread_t notification_thread;
db66e574 213static pthread_t rotation_thread;
d086f507 214static pthread_t timer_thread;
5eb91c98 215
099e26bd
DG
216/*
217 * UST registration command queue. This queue is tied with a futex and uses a N
218 * wakers / 1 waiter implemented and detailed in futex.c/.h
219 *
b22c5da8
DG
220 * The thread_registration_apps and thread_dispatch_ust_registration uses this
221 * queue along with the wait/wake scheme. The thread_manage_apps receives down
222 * the line new application socket and monitors it for any I/O error or clean
223 * close that triggers an unregistration of the application.
099e26bd
DG
224 */
225static struct ust_cmd_queue ust_cmd_queue;
226
b5541356
DG
227/*
228 * Pointer initialized before thread creation.
229 *
230 * This points to the tracing session list containing the session count and a
231 * mutex lock. The lock MUST be taken if you iterate over the list. The lock
232 * MUST NOT be taken if you call a public function in session.c.
04ea676f 233 *
d063d709 234 * The lock is nested inside the structure: session_list_ptr->lock. Please use
54d01ffb 235 * session_lock_list and session_unlock_list for lock acquisition.
b5541356
DG
236 */
237static struct ltt_session_list *session_list_ptr;
238
7753dea8
MD
239int ust_consumerd64_fd = -1;
240int ust_consumerd32_fd = -1;
241
2f77fc4b
DG
242static const char *module_proc_lttng = "/proc/lttng";
243
5c827ce0
DG
244/*
245 * Consumer daemon state which is changed when spawning it, killing it or in
246 * case of a fatal error.
247 */
248enum consumerd_state {
249 CONSUMER_STARTED = 1,
250 CONSUMER_STOPPED = 2,
251 CONSUMER_ERROR = 3,
252};
253
254/*
255 * This consumer daemon state is used to validate if a client command will be
256 * able to reach the consumer. If not, the client is informed. For instance,
257 * doing a "lttng start" when the consumer state is set to ERROR will return an
258 * error to the client.
259 *
260 * The following example shows a possible race condition of this scheme:
261 *
262 * consumer thread error happens
263 * client cmd arrives
264 * client cmd checks state -> still OK
265 * consumer thread exit, sets error
266 * client cmd try to talk to consumer
267 * ...
268 *
269 * However, since the consumer is a different daemon, we have no way of making
270 * sure the command will reach it safely even with this state flag. This is why
271 * we consider that up to the state validation during command processing, the
272 * command is safe. After that, we can not guarantee the correctness of the
273 * client request vis-a-vis the consumer.
274 */
275static enum consumerd_state ust_consumerd_state;
276static enum consumerd_state kernel_consumerd_state;
277
12744796
DG
278/* Set in main() with the current page size. */
279long page_size;
280
8782cc74
MD
281/* Application health monitoring */
282struct health_app *health_sessiond;
283
f43f95a9
DG
284/* Am I root or not. */
285int is_root; /* Set to 1 if the daemon is running as root */
286
26296c48
JG
287const char * const config_section_name = "sessiond";
288
ef367a93
JG
289/* Load session thread information to operate. */
290struct load_session_thread_data *load_info;
291
b3530820
JG
292/* Notification thread handle. */
293struct notification_thread_handle *notification_thread_handle;
294
db66e574
JD
295/* Rotation thread handle. */
296struct rotation_thread_handle *rotation_thread_handle;
297
7c1d2758
JG
298/* Global hash tables */
299struct lttng_ht *agent_apps_ht_by_sock = NULL;
300
97bc1426 301/*
9cc1ba3b
JG
302 * The initialization of the session daemon is done in multiple phases.
303 *
304 * While all threads are launched near-simultaneously, only some of them
305 * are needed to ensure the session daemon can start to respond to client
b3530820 306 * requests.
9cc1ba3b
JG
307 *
308 * There are two important guarantees that we wish to offer with respect
309 * to the initialisation of the session daemon:
310 * - When the daemonize/background launcher process exits, the sessiond
311 * is fully able to respond to client requests,
312 * - Auto-loaded sessions are visible to clients.
313 *
314 * In order to achieve this, a number of support threads have to be launched
315 * to allow the "client" thread to function properly. Moreover, since the
316 * "load session" thread needs the client thread, we must provide a way
317 * for the "load session" thread to know that the "client" thread is up
318 * and running.
319 *
320 * Hence, the support threads decrement the lttng_sessiond_ready counter
321 * while the "client" threads waits for it to reach 0. Once the "client" thread
322 * unblocks, it posts the message_thread_ready semaphore which allows the
323 * "load session" thread to progress.
324 *
325 * This implies that the "load session" thread is the last to be initialized
326 * and will explicitly call sessiond_signal_parents(), which signals the parents
327 * that the session daemon is fully initialized.
328 *
329 * The four (4) support threads are:
330 * - agent_thread
331 * - notification_thread
332 * - rotation_thread
333 * - health_thread
97bc1426 334 */
bc4b3ff1
JG
335#define NR_LTTNG_SESSIOND_SUPPORT_THREADS 4
336int lttng_sessiond_ready = NR_LTTNG_SESSIOND_SUPPORT_THREADS;
97bc1426 337
5e97de00
JG
338int sessiond_check_thread_quit_pipe(int fd, uint32_t events)
339{
340 return (fd == thread_quit_pipe[0] && (events & LPOLLIN)) ? 1 : 0;
341}
342
97bc1426 343/* Notify parents that we are ready for cmd and health check */
ef367a93 344LTTNG_HIDDEN
9cc1ba3b 345void sessiond_signal_parents(void)
97bc1426 346{
9cc1ba3b
JG
347 /*
348 * Notify parent pid that we are ready to accept command
349 * for client side. This ppid is the one from the
350 * external process that spawned us.
351 */
352 if (config.sig_parent) {
353 kill(ppid, SIGUSR1);
354 }
97bc1426 355
9cc1ba3b
JG
356 /*
357 * Notify the parent of the fork() process that we are
358 * ready.
359 */
360 if (config.daemonize || config.background) {
361 kill(child_ppid, SIGUSR1);
97bc1426
MD
362 }
363}
364
9cc1ba3b
JG
365LTTNG_HIDDEN
366void sessiond_notify_ready(void)
367{
368 /*
369 * The _return variant is used since the implied memory barriers are
370 * required.
371 */
372 (void) uatomic_sub_return(&lttng_sessiond_ready, 1);
373}
374
4a15001e
MD
375static
376int __sessiond_set_thread_pollset(struct lttng_poll_event *events, size_t size,
377 int *a_pipe)
5eb91c98
DG
378{
379 int ret;
380
d0b96690 381 assert(events);
5eb91c98
DG
382
383 ret = lttng_poll_create(events, size, LTTNG_CLOEXEC);
384 if (ret < 0) {
385 goto error;
386 }
387
388 /* Add quit pipe */
4a15001e 389 ret = lttng_poll_add(events, a_pipe[0], LPOLLIN | LPOLLERR);
5eb91c98
DG
390 if (ret < 0) {
391 goto error;
392 }
393
394 return 0;
395
396error:
397 return ret;
398}
399
4a15001e
MD
400/*
401 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
402 */
403int sessiond_set_thread_pollset(struct lttng_poll_event *events, size_t size)
404{
405 return __sessiond_set_thread_pollset(events, size, thread_quit_pipe);
406}
407
273ea72c 408/*
5eb91c98 409 * Init thread quit pipe.
273ea72c
DG
410 *
411 * Return -1 on error or 0 if all pipes are created.
412 */
4a15001e 413static int __init_thread_quit_pipe(int *a_pipe)
273ea72c 414{
730389d9 415 int ret, i;
273ea72c 416
4a15001e 417 ret = pipe(a_pipe);
273ea72c 418 if (ret < 0) {
730389d9 419 PERROR("thread quit pipe");
273ea72c
DG
420 goto error;
421 }
422
730389d9 423 for (i = 0; i < 2; i++) {
4a15001e 424 ret = fcntl(a_pipe[i], F_SETFD, FD_CLOEXEC);
730389d9
DG
425 if (ret < 0) {
426 PERROR("fcntl");
427 goto error;
428 }
429 }
430
273ea72c
DG
431error:
432 return ret;
433}
434
4a15001e
MD
435static int init_thread_quit_pipe(void)
436{
437 return __init_thread_quit_pipe(thread_quit_pipe);
438}
439
099e26bd
DG
440/*
441 * Stop all threads by closing the thread quit pipe.
442 */
cf3af59e
MD
443static void stop_threads(void)
444{
5eb91c98
DG
445 int ret;
446
cf3af59e
MD
447 /* Stopping all threads */
448 DBG("Terminating all threads");
54d01ffb 449 ret = notify_thread_pipe(thread_quit_pipe[1]);
5eb91c98
DG
450 if (ret < 0) {
451 ERR("write error on thread quit pipe");
452 }
453
099e26bd 454 /* Dispatch thread */
26c9d55e 455 CMM_STORE_SHARED(dispatch_thread_exit, 1);
099e26bd 456 futex_nto1_wake(&ust_cmd_queue.futex);
cf3af59e
MD
457}
458
e975f9f8
DG
459/*
460 * Close every consumer sockets.
461 */
462static void close_consumer_sockets(void)
463{
464 int ret;
465
466 if (kconsumer_data.err_sock >= 0) {
467 ret = close(kconsumer_data.err_sock);
468 if (ret < 0) {
469 PERROR("kernel consumer err_sock close");
470 }
471 }
472 if (ustconsumer32_data.err_sock >= 0) {
473 ret = close(ustconsumer32_data.err_sock);
474 if (ret < 0) {
a76cbd9f 475 PERROR("UST consumerd32 err_sock close");
e975f9f8
DG
476 }
477 }
478 if (ustconsumer64_data.err_sock >= 0) {
479 ret = close(ustconsumer64_data.err_sock);
480 if (ret < 0) {
a76cbd9f 481 PERROR("UST consumerd64 err_sock close");
e975f9f8
DG
482 }
483 }
484 if (kconsumer_data.cmd_sock >= 0) {
485 ret = close(kconsumer_data.cmd_sock);
486 if (ret < 0) {
487 PERROR("kernel consumer cmd_sock close");
488 }
489 }
490 if (ustconsumer32_data.cmd_sock >= 0) {
491 ret = close(ustconsumer32_data.cmd_sock);
492 if (ret < 0) {
a76cbd9f 493 PERROR("UST consumerd32 cmd_sock close");
e975f9f8
DG
494 }
495 }
496 if (ustconsumer64_data.cmd_sock >= 0) {
497 ret = close(ustconsumer64_data.cmd_sock);
498 if (ret < 0) {
a76cbd9f 499 PERROR("UST consumerd64 cmd_sock close");
e975f9f8
DG
500 }
501 }
b3530820
JG
502 if (kconsumer_data.channel_monitor_pipe >= 0) {
503 ret = close(kconsumer_data.channel_monitor_pipe);
504 if (ret < 0) {
505 PERROR("kernel consumer channel monitor pipe close");
506 }
507 }
508 if (ustconsumer32_data.channel_monitor_pipe >= 0) {
509 ret = close(ustconsumer32_data.channel_monitor_pipe);
510 if (ret < 0) {
511 PERROR("UST consumerd32 channel monitor pipe close");
512 }
513 }
514 if (ustconsumer64_data.channel_monitor_pipe >= 0) {
515 ret = close(ustconsumer64_data.channel_monitor_pipe);
516 if (ret < 0) {
517 PERROR("UST consumerd64 channel monitor pipe close");
518 }
519 }
62c43103
JD
520 if (kconsumer_data.channel_rotate_pipe >= 0) {
521 ret = close(kconsumer_data.channel_rotate_pipe);
522 if (ret < 0) {
523 PERROR("kernel consumer channel rotate pipe close");
524 }
525 }
526 if (ustconsumer32_data.channel_rotate_pipe >= 0) {
527 ret = close(ustconsumer32_data.channel_rotate_pipe);
528 if (ret < 0) {
529 PERROR("UST consumerd32 channel rotate pipe close");
530 }
531 }
532 if (ustconsumer64_data.channel_rotate_pipe >= 0) {
533 ret = close(ustconsumer64_data.channel_rotate_pipe);
534 if (ret < 0) {
535 PERROR("UST consumerd64 channel rotate pipe close");
536 }
537 }
e975f9f8
DG
538}
539
4e4714cb
JR
540/*
541 * Wait on consumer process termination.
542 *
543 * Need to be called with the consumer data lock held or from a context
544 * ensuring no concurrent access to data (e.g: cleanup).
545 */
546static void wait_consumer(struct consumer_data *consumer_data)
547{
548 pid_t ret;
549 int status;
550
551 if (consumer_data->pid <= 0) {
552 return;
553 }
554
555 DBG("Waiting for complete teardown of consumerd (PID: %d)",
556 consumer_data->pid);
557 ret = waitpid(consumer_data->pid, &status, 0);
558 if (ret == -1) {
559 PERROR("consumerd waitpid pid: %d", consumer_data->pid)
1640c24c 560 } else if (!WIFEXITED(status)) {
4e4714cb
JR
561 ERR("consumerd termination with error: %d",
562 WEXITSTATUS(ret));
563 }
564 consumer_data->pid = 0;
565}
566
fac6795d 567/*
4a15001e 568 * Cleanup the session daemon's data structures.
fac6795d 569 */
4a15001e 570static void sessiond_cleanup(void)
fac6795d 571{
ef599319 572 int ret;
af9737e9 573 struct ltt_session *sess, *stmp;
fac6795d 574
4a15001e 575 DBG("Cleanup sessiond");
e07ae692 576
4e449f3f
MD
577 /*
578 * Close the thread quit pipe. It has already done its job,
579 * since we are now called.
580 */
2f77fc4b
DG
581 utils_close_pipe(thread_quit_pipe);
582
35f90c40 583 /*
e6142f2e
JG
584 * If config.pid_file_path.value is undefined, the default file will be
585 * wiped when removing the rundir.
35f90c40 586 */
e6142f2e
JG
587 if (config.pid_file_path.value) {
588 ret = remove(config.pid_file_path.value);
35f90c40 589 if (ret < 0) {
e6142f2e 590 PERROR("remove pidfile %s", config.pid_file_path.value);
35f90c40
DG
591 }
592 }
593
e6142f2e
JG
594 DBG("Removing sessiond and consumerd content of directory %s",
595 config.rundir.value);
8c6c56c2
MD
596
597 /* sessiond */
e6142f2e
JG
598 DBG("Removing %s", config.pid_file_path.value);
599 (void) unlink(config.pid_file_path.value);
8c6c56c2 600
e6142f2e
JG
601 DBG("Removing %s", config.agent_port_file_path.value);
602 (void) unlink(config.agent_port_file_path.value);
cd9290dd 603
8c6c56c2 604 /* kconsumerd */
e6142f2e
JG
605 DBG("Removing %s", kconsumer_data.err_unix_sock_path);
606 (void) unlink(kconsumer_data.err_unix_sock_path);
607
608 DBG("Removing directory %s", config.kconsumerd_path.value);
609 (void) rmdir(config.kconsumerd_path.value);
8c6c56c2
MD
610
611 /* ust consumerd 32 */
e6142f2e
JG
612 DBG("Removing %s", config.consumerd32_err_unix_sock_path.value);
613 (void) unlink(config.consumerd32_err_unix_sock_path.value);
614
615 DBG("Removing directory %s", config.consumerd32_path.value);
616 (void) rmdir(config.consumerd32_path.value);
8c6c56c2
MD
617
618 /* ust consumerd 64 */
e6142f2e
JG
619 DBG("Removing %s", config.consumerd64_err_unix_sock_path.value);
620 (void) unlink(config.consumerd64_err_unix_sock_path.value);
621
622 DBG("Removing directory %s", config.consumerd64_path.value);
623 (void) rmdir(config.consumerd64_path.value);
5461b305 624
99bab54f 625 DBG("Cleaning up all sessions");
fac6795d 626
b5541356 627 /* Destroy session list mutex */
273ea72c
DG
628 if (session_list_ptr != NULL) {
629 pthread_mutex_destroy(&session_list_ptr->lock);
630
631 /* Cleanup ALL session */
54d01ffb
DG
632 cds_list_for_each_entry_safe(sess, stmp,
633 &session_list_ptr->head, list) {
90936dcf
JD
634 cmd_destroy_session(sess, kernel_poll_pipe[1],
635 notification_thread_handle);
273ea72c
DG
636 }
637 }
638
4e4714cb
JR
639 wait_consumer(&kconsumer_data);
640 wait_consumer(&ustconsumer64_data);
641 wait_consumer(&ustconsumer32_data);
642
6a4e4039
JG
643 DBG("Cleaning up all agent apps");
644 agent_app_ht_clean();
645
099e26bd 646 DBG("Closing all UST sockets");
56fff090 647 ust_app_clean_list();
7972aab2 648 buffer_reg_destroy_registries();
099e26bd 649
e6142f2e 650 if (is_root && !config.no_kernel) {
4fba7219 651 DBG2("Closing kernel fd");
a4b35e07 652 if (kernel_tracer_fd >= 0) {
76d7553f
MD
653 ret = close(kernel_tracer_fd);
654 if (ret) {
655 PERROR("close");
656 }
a4b35e07 657 }
2f50c8a3 658 DBG("Unloading kernel modules");
096102bd 659 modprobe_remove_lttng_all();
834978fd 660 free(syscall_table);
2f50c8a3 661 }
2f77fc4b 662
e975f9f8
DG
663 close_consumer_sockets();
664
ef367a93
JG
665 if (load_info) {
666 load_session_destroy_data(load_info);
667 free(load_info);
668 }
669
c9cb3e7d
JG
670 /*
671 * We do NOT rmdir rundir because there are other processes
672 * using it, for instance lttng-relayd, which can start in
673 * parallel with this teardown.
674 */
4a15001e
MD
675}
676
677/*
678 * Cleanup the daemon's option data structures.
679 */
680static void sessiond_cleanup_options(void)
681{
682 DBG("Cleaning up options");
683
e6142f2e 684 sessiond_config_fini(&config);
c9cb3e7d 685
7567352f 686 run_as_destroy_worker();
fac6795d
DG
687}
688
e065084a 689/*
d063d709 690 * Send data on a unix socket using the liblttsessiondcomm API.
e065084a 691 *
d063d709 692 * Return lttcomm error code.
e065084a
DG
693 */
694static int send_unix_sock(int sock, void *buf, size_t len)
695{
696 /* Check valid length */
c617c0c6 697 if (len == 0) {
e065084a
DG
698 return -1;
699 }
700
701 return lttcomm_send_unix_sock(sock, buf, len);
702}
703
5461b305 704/*
d063d709 705 * Free memory of a command context structure.
5461b305 706 */
a2fb29a5 707static void clean_command_ctx(struct command_ctx **cmd_ctx)
5461b305 708{
a2fb29a5
DG
709 DBG("Clean command context structure");
710 if (*cmd_ctx) {
711 if ((*cmd_ctx)->llm) {
712 free((*cmd_ctx)->llm);
5461b305 713 }
a2fb29a5
DG
714 if ((*cmd_ctx)->lsm) {
715 free((*cmd_ctx)->lsm);
5461b305 716 }
a2fb29a5
DG
717 free(*cmd_ctx);
718 *cmd_ctx = NULL;
5461b305
DG
719 }
720}
721
fac6795d 722/*
0fdd1e2c 723 * Notify UST applications using the shm mmap futex.
fac6795d 724 */
0fdd1e2c 725static int notify_ust_apps(int active)
fac6795d 726{
0fdd1e2c 727 char *wait_shm_mmap;
fac6795d 728
0fdd1e2c 729 DBG("Notifying applications of session daemon state: %d", active);
e07ae692 730
0fdd1e2c 731 /* See shm.c for this call implying mmap, shm and futex calls */
e6142f2e 732 wait_shm_mmap = shm_ust_get_mmap(config.wait_shm_path.value, is_root);
0fdd1e2c 733 if (wait_shm_mmap == NULL) {
fac6795d
DG
734 goto error;
735 }
736
0fdd1e2c
DG
737 /* Wake waiting process */
738 futex_wait_update((int32_t *) wait_shm_mmap, active);
739
740 /* Apps notified successfully */
741 return 0;
fac6795d
DG
742
743error:
0fdd1e2c 744 return -1;
fac6795d
DG
745}
746
e065084a 747/*
d063d709
DG
748 * Setup the outgoing data buffer for the response (llm) by allocating the
749 * right amount of memory and copying the original information from the lsm
750 * structure.
ca95a216 751 *
6e10c9b9 752 * Return 0 on success, negative value on error.
ca95a216 753 */
6e10c9b9
PP
754static int setup_lttng_msg(struct command_ctx *cmd_ctx,
755 const void *payload_buf, size_t payload_len,
756 const void *cmd_header_buf, size_t cmd_header_len)
ca95a216 757{
6e10c9b9
PP
758 int ret = 0;
759 const size_t header_len = sizeof(struct lttcomm_lttng_msg);
760 const size_t cmd_header_offset = header_len;
761 const size_t payload_offset = cmd_header_offset + cmd_header_len;
762 const size_t total_msg_size = header_len + cmd_header_len + payload_len;
ca95a216 763
6e10c9b9 764 cmd_ctx->llm = zmalloc(total_msg_size);
5461b305 765
5461b305 766 if (cmd_ctx->llm == NULL) {
76d7553f 767 PERROR("zmalloc");
5461b305 768 ret = -ENOMEM;
6e10c9b9 769 goto end;
ca95a216
DG
770 }
771
5461b305
DG
772 /* Copy common data */
773 cmd_ctx->llm->cmd_type = cmd_ctx->lsm->cmd_type;
9f19cc17 774 cmd_ctx->llm->pid = cmd_ctx->lsm->domain.attr.pid;
6e10c9b9
PP
775 cmd_ctx->llm->cmd_header_size = cmd_header_len;
776 cmd_ctx->llm->data_size = payload_len;
777 cmd_ctx->lttng_msg_size = total_msg_size;
5461b305 778
6e10c9b9 779 /* Copy command header */
b4e3ceb9
PP
780 if (cmd_header_len) {
781 memcpy(((uint8_t *) cmd_ctx->llm) + cmd_header_offset, cmd_header_buf,
782 cmd_header_len);
783 }
5461b305 784
6e10c9b9 785 /* Copy payload */
b4e3ceb9
PP
786 if (payload_len) {
787 memcpy(((uint8_t *) cmd_ctx->llm) + payload_offset, payload_buf,
788 payload_len);
789 }
ca95a216 790
6e10c9b9 791end:
ca95a216
DG
792 return ret;
793}
794
6e10c9b9
PP
795/*
796 * Version of setup_lttng_msg() without command header.
797 */
798static int setup_lttng_msg_no_cmd_header(struct command_ctx *cmd_ctx,
799 void *payload_buf, size_t payload_len)
800{
801 return setup_lttng_msg(cmd_ctx, payload_buf, payload_len, NULL, 0);
802}
7a485870 803/*
5eb91c98 804 * Update the kernel poll set of all channel fd available over all tracing
d063d709 805 * session. Add the wakeup pipe at the end of the set.
7a485870 806 */
5eb91c98 807static int update_kernel_poll(struct lttng_poll_event *events)
7a485870 808{
5eb91c98 809 int ret;
7a485870
DG
810 struct ltt_session *session;
811 struct ltt_kernel_channel *channel;
812
5eb91c98 813 DBG("Updating kernel poll set");
7a485870 814
54d01ffb 815 session_lock_list();
b5541356 816 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
54d01ffb 817 session_lock(session);
7a485870 818 if (session->kernel_session == NULL) {
54d01ffb 819 session_unlock(session);
7a485870
DG
820 continue;
821 }
7a485870 822
54d01ffb
DG
823 cds_list_for_each_entry(channel,
824 &session->kernel_session->channel_list.head, list) {
5eb91c98
DG
825 /* Add channel fd to the kernel poll set */
826 ret = lttng_poll_add(events, channel->fd, LPOLLIN | LPOLLRDNORM);
827 if (ret < 0) {
54d01ffb 828 session_unlock(session);
5eb91c98
DG
829 goto error;
830 }
831 DBG("Channel fd %d added to kernel set", channel->fd);
7a485870 832 }
54d01ffb 833 session_unlock(session);
7a485870 834 }
54d01ffb 835 session_unlock_list();
7a485870 836
5eb91c98 837 return 0;
7a485870
DG
838
839error:
54d01ffb 840 session_unlock_list();
7a485870
DG
841 return -1;
842}
843
844/*
54d01ffb 845 * Find the channel fd from 'fd' over all tracing session. When found, check
d063d709 846 * for new channel stream and send those stream fds to the kernel consumer.
7a485870 847 *
d063d709 848 * Useful for CPU hotplug feature.
7a485870 849 */
2bdd86d4 850static int update_kernel_stream(struct consumer_data *consumer_data, int fd)
7a485870
DG
851{
852 int ret = 0;
853 struct ltt_session *session;
173af62f 854 struct ltt_kernel_session *ksess;
7a485870
DG
855 struct ltt_kernel_channel *channel;
856
857 DBG("Updating kernel streams for channel fd %d", fd);
858
54d01ffb 859 session_lock_list();
b5541356 860 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
54d01ffb 861 session_lock(session);
7a485870 862 if (session->kernel_session == NULL) {
54d01ffb 863 session_unlock(session);
7a485870
DG
864 continue;
865 }
173af62f 866 ksess = session->kernel_session;
d9800920 867
4a15001e
MD
868 cds_list_for_each_entry(channel,
869 &ksess->channel_list.head, list) {
870 struct lttng_ht_iter iter;
871 struct consumer_socket *socket;
d9800920 872
4a15001e
MD
873 if (channel->fd != fd) {
874 continue;
875 }
876 DBG("Channel found, updating kernel streams");
877 ret = kernel_open_channel_stream(channel);
878 if (ret < 0) {
879 goto error;
880 }
881 /* Update the stream global counter */
882 ksess->stream_count_global += ret;
883
884 /*
885 * Have we already sent fds to the consumer? If yes, it
886 * means that tracing is started so it is safe to send
887 * our updated stream fds.
888 */
889 if (ksess->consumer_fds_sent != 1
890 || ksess->consumer == NULL) {
891 ret = -1;
892 goto error;
893 }
894
895 rcu_read_lock();
896 cds_lfht_for_each_entry(ksess->consumer->socks->ht,
897 &iter.iter, socket, node.node) {
898 pthread_mutex_lock(socket->lock);
1fc1b7c8 899 ret = kernel_consumer_send_channel_streams(socket,
4a15001e
MD
900 channel, ksess,
901 session->output_traces ? 1 : 0);
902 pthread_mutex_unlock(socket->lock);
903 if (ret < 0) {
e7fe706f 904 rcu_read_unlock();
4a15001e 905 goto error;
7a485870 906 }
7a485870 907 }
4a15001e 908 rcu_read_unlock();
7a485870 909 }
54d01ffb 910 session_unlock(session);
7a485870 911 }
54d01ffb 912 session_unlock_list();
b3c750d2 913 return ret;
7a485870 914
b3c750d2 915error:
54d01ffb
DG
916 session_unlock(session);
917 session_unlock_list();
7a485870
DG
918 return ret;
919}
920
487cf67c 921/*
ffe60014
DG
922 * For each tracing session, update newly registered apps. The session list
923 * lock MUST be acquired before calling this.
487cf67c
DG
924 */
925static void update_ust_app(int app_sock)
926{
927 struct ltt_session *sess, *stmp;
928
fdadac08
DG
929 /* Consumer is in an ERROR state. Stop any application update. */
930 if (uatomic_read(&ust_consumerd_state) == CONSUMER_ERROR) {
931 /* Stop the update process since the consumer is dead. */
932 return;
933 }
934
487cf67c
DG
935 /* For all tracing session(s) */
936 cds_list_for_each_entry_safe(sess, stmp, &session_list_ptr->head, list) {
a9ad0c8f
MD
937 struct ust_app *app;
938
4ee14516 939 session_lock(sess);
a9ad0c8f
MD
940 if (!sess->ust_session) {
941 goto unlock_session;
942 }
943
944 rcu_read_lock();
945 assert(app_sock >= 0);
946 app = ust_app_find_by_sock(app_sock);
947 if (app == NULL) {
948 /*
949 * Application can be unregistered before so
950 * this is possible hence simply stopping the
951 * update.
952 */
953 DBG3("UST app update failed to find app sock %d",
954 app_sock);
955 goto unlock_rcu;
421cb601 956 }
a9ad0c8f
MD
957 ust_app_global_update(sess->ust_session, app);
958 unlock_rcu:
959 rcu_read_unlock();
960 unlock_session:
4ee14516 961 session_unlock(sess);
487cf67c
DG
962 }
963}
964
7a485870 965/*
d063d709 966 * This thread manage event coming from the kernel.
7a485870 967 *
d063d709
DG
968 * Features supported in this thread:
969 * -) CPU Hotplug
7a485870
DG
970 */
971static void *thread_manage_kernel(void *data)
972{
139ac872 973 int ret, i, pollfd, update_poll_flag = 1, err = -1;
5eb91c98 974 uint32_t revents, nb_fd;
7a485870 975 char tmp;
5eb91c98 976 struct lttng_poll_event events;
7a485870 977
6993eeb3 978 DBG("[thread] Thread manage kernel started");
7a485870 979
6c71277b 980 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_KERNEL);
927ca06a 981
d5d63bf1
DG
982 /*
983 * This first step of the while is to clean this structure which could free
6d737ce4 984 * non NULL pointers so initialize it before the loop.
d5d63bf1 985 */
6d737ce4 986 lttng_poll_init(&events);
d5d63bf1 987
e547b070 988 if (testpoint(sessiond_thread_manage_kernel)) {
6993eeb3
CB
989 goto error_testpoint;
990 }
8ac94142 991
840cb59c 992 health_code_update();
44a5e5eb 993
e547b070 994 if (testpoint(sessiond_thread_manage_kernel_before_loop)) {
d21b0d71 995 goto error_testpoint;
6993eeb3
CB
996 }
997
7a485870 998 while (1) {
840cb59c 999 health_code_update();
44a5e5eb 1000
7a485870 1001 if (update_poll_flag == 1) {
d21b0d71
DG
1002 /* Clean events object. We are about to populate it again. */
1003 lttng_poll_clean(&events);
1004
d0b96690 1005 ret = sessiond_set_thread_pollset(&events, 2);
d21b0d71
DG
1006 if (ret < 0) {
1007 goto error_poll_create;
1008 }
1009
1010 ret = lttng_poll_add(&events, kernel_poll_pipe[0], LPOLLIN);
1011 if (ret < 0) {
1012 goto error;
1013 }
5f822d0a 1014
d21b0d71 1015 /* This will add the available kernel channel if any. */
5eb91c98
DG
1016 ret = update_kernel_poll(&events);
1017 if (ret < 0) {
7a485870
DG
1018 goto error;
1019 }
1020 update_poll_flag = 0;
1021 }
1022
7fa2082e 1023 DBG("Thread kernel polling");
7a485870
DG
1024
1025 /* Poll infinite value of time */
88f2b785 1026 restart:
a78af745 1027 health_poll_entry();
5eb91c98 1028 ret = lttng_poll_wait(&events, -1);
7fa2082e
MD
1029 DBG("Thread kernel return from poll on %d fds",
1030 LTTNG_POLL_GETNB(&events));
a78af745 1031 health_poll_exit();
7a485870 1032 if (ret < 0) {
88f2b785
MD
1033 /*
1034 * Restart interrupted system call.
1035 */
1036 if (errno == EINTR) {
1037 goto restart;
1038 }
7a485870
DG
1039 goto error;
1040 } else if (ret == 0) {
1041 /* Should not happen since timeout is infinite */
85611738
DG
1042 ERR("Return value of poll is 0 with an infinite timeout.\n"
1043 "This should not have happened! Continuing...");
7a485870
DG
1044 continue;
1045 }
1046
0d9c5d77
DG
1047 nb_fd = ret;
1048
5eb91c98
DG
1049 for (i = 0; i < nb_fd; i++) {
1050 /* Fetch once the poll data */
1051 revents = LTTNG_POLL_GETEV(&events, i);
1052 pollfd = LTTNG_POLL_GETFD(&events, i);
7a485870 1053
840cb59c 1054 health_code_update();
44a5e5eb 1055
fd20dac9
MD
1056 if (!revents) {
1057 /* No activity for this FD (poll implementation). */
1058 continue;
1059 }
1060
5eb91c98 1061 /* Thread quit pipe has been closed. Killing thread. */
d0b96690 1062 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
5eb91c98 1063 if (ret) {
139ac872
MD
1064 err = 0;
1065 goto exit;
5eb91c98 1066 }
7a485870 1067
5eb91c98 1068 /* Check for data on kernel pipe */
03e43155
MD
1069 if (revents & LPOLLIN) {
1070 if (pollfd == kernel_poll_pipe[0]) {
1071 (void) lttng_read(kernel_poll_pipe[0],
1072 &tmp, 1);
1073 /*
1074 * Ret value is useless here, if this pipe gets any actions an
1075 * update is required anyway.
1076 */
1077 update_poll_flag = 1;
1078 continue;
1079 } else {
1080 /*
1081 * New CPU detected by the kernel. Adding kernel stream to
1082 * kernel session and updating the kernel consumer
1083 */
2bdd86d4 1084 ret = update_kernel_stream(&kconsumer_data, pollfd);
5eb91c98
DG
1085 if (ret < 0) {
1086 continue;
1087 }
1088 break;
7a485870 1089 }
03e43155
MD
1090 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1091 update_poll_flag = 1;
1092 continue;
1093 } else {
1094 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
1095 goto error;
7a485870
DG
1096 }
1097 }
1098 }
1099
139ac872 1100exit:
7a485870 1101error:
5eb91c98 1102 lttng_poll_clean(&events);
76d7553f 1103error_poll_create:
6993eeb3 1104error_testpoint:
6620da75
DG
1105 utils_close_pipe(kernel_poll_pipe);
1106 kernel_poll_pipe[0] = kernel_poll_pipe[1] = -1;
139ac872 1107 if (err) {
840cb59c 1108 health_error();
139ac872 1109 ERR("Health error occurred in %s", __func__);
6620da75
DG
1110 WARN("Kernel thread died unexpectedly. "
1111 "Kernel tracing can continue but CPU hotplug is disabled.");
139ac872 1112 }
8782cc74 1113 health_unregister(health_sessiond);
76d7553f 1114 DBG("Kernel thread dying");
7a485870
DG
1115 return NULL;
1116}
1117
a23ec3a7
DG
1118/*
1119 * Signal pthread condition of the consumer data that the thread.
1120 */
1121static void signal_consumer_condition(struct consumer_data *data, int state)
1122{
1123 pthread_mutex_lock(&data->cond_mutex);
1124
1125 /*
1126 * The state is set before signaling. It can be any value, it's the waiter
1127 * job to correctly interpret this condition variable associated to the
1128 * consumer pthread_cond.
1129 *
1130 * A value of 0 means that the corresponding thread of the consumer data
1131 * was not started. 1 indicates that the thread has started and is ready
1132 * for action. A negative value means that there was an error during the
1133 * thread bootstrap.
1134 */
1135 data->consumer_thread_is_ready = state;
1136 (void) pthread_cond_signal(&data->cond);
1137
1138 pthread_mutex_unlock(&data->cond_mutex);
1139}
1140
1d4b027a 1141/*
3bd1e081 1142 * This thread manage the consumer error sent back to the session daemon.
1d4b027a 1143 */
3bd1e081 1144static void *thread_manage_consumer(void *data)
1d4b027a 1145{
42fc1d0b 1146 int sock = -1, i, ret, pollfd, err = -1, should_quit = 0;
5eb91c98 1147 uint32_t revents, nb_fd;
1d4b027a 1148 enum lttcomm_return_code code;
5eb91c98 1149 struct lttng_poll_event events;
3bd1e081 1150 struct consumer_data *consumer_data = data;
b3530820 1151 struct consumer_socket *cmd_socket_wrapper = NULL;
1d4b027a 1152
3bd1e081 1153 DBG("[thread] Manage consumer started");
1d4b027a 1154
34c1e15a
MD
1155 rcu_register_thread();
1156 rcu_thread_online();
1157
6c71277b 1158 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_CONSUMER);
927ca06a 1159
855060f8 1160 health_code_update();
9449cc75 1161
5eb91c98 1162 /*
331744e3
JD
1163 * Pass 3 as size here for the thread quit pipe, consumerd_err_sock and the
1164 * metadata_sock. Nothing more will be added to this poll set.
5eb91c98 1165 */
331744e3 1166 ret = sessiond_set_thread_pollset(&events, 3);
5eb91c98 1167 if (ret < 0) {
76d7553f 1168 goto error_poll;
5eb91c98 1169 }
273ea72c 1170
edb8b045
DG
1171 /*
1172 * The error socket here is already in a listening state which was done
1173 * just before spawning this thread to avoid a race between the consumer
1174 * daemon exec trying to connect and the listen() call.
1175 */
3bd1e081 1176 ret = lttng_poll_add(&events, consumer_data->err_sock, LPOLLIN | LPOLLRDHUP);
5eb91c98
DG
1177 if (ret < 0) {
1178 goto error;
1179 }
1180
840cb59c 1181 health_code_update();
44a5e5eb 1182
331744e3 1183 /* Infinite blocking call, waiting for transmission */
88f2b785 1184restart:
a78af745 1185 health_poll_entry();
8ac94142 1186
e547b070 1187 if (testpoint(sessiond_thread_manage_consumer)) {
6993eeb3
CB
1188 goto error;
1189 }
8ac94142 1190
5eb91c98 1191 ret = lttng_poll_wait(&events, -1);
a78af745 1192 health_poll_exit();
273ea72c 1193 if (ret < 0) {
88f2b785
MD
1194 /*
1195 * Restart interrupted system call.
1196 */
1197 if (errno == EINTR) {
1198 goto restart;
1199 }
273ea72c
DG
1200 goto error;
1201 }
1202
0d9c5d77
DG
1203 nb_fd = ret;
1204
5eb91c98
DG
1205 for (i = 0; i < nb_fd; i++) {
1206 /* Fetch once the poll data */
1207 revents = LTTNG_POLL_GETEV(&events, i);
1208 pollfd = LTTNG_POLL_GETFD(&events, i);
1209
840cb59c 1210 health_code_update();
44a5e5eb 1211
fd20dac9
MD
1212 if (!revents) {
1213 /* No activity for this FD (poll implementation). */
1214 continue;
1215 }
1216
5eb91c98 1217 /* Thread quit pipe has been closed. Killing thread. */
d0b96690 1218 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
5eb91c98 1219 if (ret) {
139ac872
MD
1220 err = 0;
1221 goto exit;
5eb91c98
DG
1222 }
1223
1224 /* Event on the registration socket */
3bd1e081 1225 if (pollfd == consumer_data->err_sock) {
03e43155
MD
1226 if (revents & LPOLLIN) {
1227 continue;
1228 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3bd1e081 1229 ERR("consumer err socket poll error");
5eb91c98 1230 goto error;
03e43155
MD
1231 } else {
1232 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
1233 goto error;
5eb91c98
DG
1234 }
1235 }
273ea72c
DG
1236 }
1237
3bd1e081 1238 sock = lttcomm_accept_unix_sock(consumer_data->err_sock);
1d4b027a
DG
1239 if (sock < 0) {
1240 goto error;
1241 }
1242
b662582b
DG
1243 /*
1244 * Set the CLOEXEC flag. Return code is useless because either way, the
1245 * show must go on.
1246 */
1247 (void) utils_set_fd_cloexec(sock);
1248
840cb59c 1249 health_code_update();
44a5e5eb 1250
3bd1e081 1251 DBG2("Receiving code from consumer err_sock");
ee0b0061 1252
712ea556 1253 /* Getting status code from kconsumerd */
54d01ffb
DG
1254 ret = lttcomm_recv_unix_sock(sock, &code,
1255 sizeof(enum lttcomm_return_code));
1d4b027a
DG
1256 if (ret <= 0) {
1257 goto error;
1258 }
1259
840cb59c 1260 health_code_update();
b3530820 1261 if (code != LTTCOMM_CONSUMERD_COMMAND_SOCK_READY) {
3bd1e081 1262 ERR("consumer error when waiting for SOCK_READY : %s",
1d4b027a
DG
1263 lttcomm_get_readable_code(-code));
1264 goto error;
1265 }
1266
b3530820
JG
1267 /* Connect both command and metadata sockets. */
1268 consumer_data->cmd_sock =
1269 lttcomm_connect_unix_sock(
1270 consumer_data->cmd_unix_sock_path);
1271 consumer_data->metadata_fd =
1272 lttcomm_connect_unix_sock(
1273 consumer_data->cmd_unix_sock_path);
1274 if (consumer_data->cmd_sock < 0 || consumer_data->metadata_fd < 0) {
1275 PERROR("consumer connect cmd socket");
1276 /* On error, signal condition and quit. */
1277 signal_consumer_condition(consumer_data, -1);
1278 goto error;
1279 }
1280
1281 consumer_data->metadata_sock.fd_ptr = &consumer_data->metadata_fd;
1282
1283 /* Create metadata socket lock. */
1284 consumer_data->metadata_sock.lock = zmalloc(sizeof(pthread_mutex_t));
1285 if (consumer_data->metadata_sock.lock == NULL) {
1286 PERROR("zmalloc pthread mutex");
1287 goto error;
1288 }
1289 pthread_mutex_init(consumer_data->metadata_sock.lock, NULL);
1290
1291 DBG("Consumer command socket ready (fd: %d", consumer_data->cmd_sock);
1292 DBG("Consumer metadata socket ready (fd: %d)",
1293 consumer_data->metadata_fd);
1294
1295 /*
1296 * Remove the consumerd error sock since we've established a connection.
1297 */
3bd1e081 1298 ret = lttng_poll_del(&events, consumer_data->err_sock);
72079cae 1299 if (ret < 0) {
72079cae
DG
1300 goto error;
1301 }
1302
331744e3 1303 /* Add new accepted error socket. */
5eb91c98
DG
1304 ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLRDHUP);
1305 if (ret < 0) {
72079cae 1306 goto error;
5eb91c98
DG
1307 }
1308
331744e3 1309 /* Add metadata socket that is successfully connected. */
4ce514c4 1310 ret = lttng_poll_add(&events, consumer_data->metadata_fd,
331744e3
JD
1311 LPOLLIN | LPOLLRDHUP);
1312 if (ret < 0) {
1313 goto error;
1314 }
1315
840cb59c 1316 health_code_update();
44a5e5eb 1317
b3530820 1318 /*
62c43103
JD
1319 * Transfer the write-end of the channel monitoring and rotate pipe
1320 * to the consumer by issuing a SET_CHANNEL_MONITOR_PIPE and
1321 * SET_CHANNEL_ROTATE_PIPE commands.
b3530820
JG
1322 */
1323 cmd_socket_wrapper = consumer_allocate_socket(&consumer_data->cmd_sock);
1324 if (!cmd_socket_wrapper) {
1325 goto error;
1326 }
3e4dc117 1327 cmd_socket_wrapper->lock = &consumer_data->lock;
b3530820
JG
1328
1329 ret = consumer_send_channel_monitor_pipe(cmd_socket_wrapper,
1330 consumer_data->channel_monitor_pipe);
1331 if (ret) {
1332 goto error;
1333 }
62c43103
JD
1334
1335 ret = consumer_send_channel_rotate_pipe(cmd_socket_wrapper,
1336 consumer_data->channel_rotate_pipe);
1337 if (ret) {
1338 goto error;
1339 }
1340
b3530820
JG
1341 /* Discard the socket wrapper as it is no longer needed. */
1342 consumer_destroy_socket(cmd_socket_wrapper);
1343 cmd_socket_wrapper = NULL;
1344
1345 /* The thread is completely initialized, signal that it is ready. */
1346 signal_consumer_condition(consumer_data, 1);
1347
331744e3 1348 /* Infinite blocking call, waiting for transmission */
88f2b785 1349restart_poll:
331744e3 1350 while (1) {
42fc1d0b
DG
1351 health_code_update();
1352
1353 /* Exit the thread because the thread quit pipe has been triggered. */
1354 if (should_quit) {
1355 /* Not a health error. */
1356 err = 0;
1357 goto exit;
1358 }
1359
331744e3
JD
1360 health_poll_entry();
1361 ret = lttng_poll_wait(&events, -1);
1362 health_poll_exit();
1363 if (ret < 0) {
1364 /*
1365 * Restart interrupted system call.
1366 */
1367 if (errno == EINTR) {
1368 goto restart_poll;
1369 }
1370 goto error;
88f2b785 1371 }
72079cae 1372
331744e3 1373 nb_fd = ret;
0d9c5d77 1374
331744e3
JD
1375 for (i = 0; i < nb_fd; i++) {
1376 /* Fetch once the poll data */
1377 revents = LTTNG_POLL_GETEV(&events, i);
1378 pollfd = LTTNG_POLL_GETFD(&events, i);
5eb91c98 1379
331744e3 1380 health_code_update();
44a5e5eb 1381
fd20dac9
MD
1382 if (!revents) {
1383 /* No activity for this FD (poll implementation). */
1384 continue;
1385 }
1386
42fc1d0b
DG
1387 /*
1388 * Thread quit pipe has been triggered, flag that we should stop
1389 * but continue the current loop to handle potential data from
1390 * consumer.
1391 */
1392 should_quit = sessiond_check_thread_quit_pipe(pollfd, revents);
5eb91c98 1393
331744e3
JD
1394 if (pollfd == sock) {
1395 /* Event on the consumerd socket */
03e43155
MD
1396 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)
1397 && !(revents & LPOLLIN)) {
331744e3
JD
1398 ERR("consumer err socket second poll error");
1399 goto error;
1400 }
1401 health_code_update();
1402 /* Wait for any kconsumerd error */
1403 ret = lttcomm_recv_unix_sock(sock, &code,
1404 sizeof(enum lttcomm_return_code));
1405 if (ret <= 0) {
1406 ERR("consumer closed the command socket");
1407 goto error;
1408 }
1409
1410 ERR("consumer return code : %s",
1411 lttcomm_get_readable_code(-code));
1412
1413 goto exit;
4ce514c4 1414 } else if (pollfd == consumer_data->metadata_fd) {
03e43155
MD
1415 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)
1416 && !(revents & LPOLLIN)) {
1417 ERR("consumer err metadata socket second poll error");
1418 goto error;
1419 }
331744e3
JD
1420 /* UST metadata requests */
1421 ret = ust_consumer_metadata_request(
1422 &consumer_data->metadata_sock);
1423 if (ret < 0) {
1424 ERR("Handling metadata request");
1425 goto error;
1426 }
5eb91c98 1427 }
42fc1d0b 1428 /* No need for an else branch all FDs are tested prior. */
5eb91c98 1429 }
331744e3 1430 health_code_update();
5eb91c98
DG
1431 }
1432
139ac872 1433exit:
1d4b027a 1434error:
fdadac08
DG
1435 /*
1436 * We lock here because we are about to close the sockets and some other
92db7cdc
DG
1437 * thread might be using them so get exclusive access which will abort all
1438 * other consumer command by other threads.
fdadac08
DG
1439 */
1440 pthread_mutex_lock(&consumer_data->lock);
1441
5c827ce0
DG
1442 /* Immediately set the consumerd state to stopped */
1443 if (consumer_data->type == LTTNG_CONSUMER_KERNEL) {
1444 uatomic_set(&kernel_consumerd_state, CONSUMER_ERROR);
1445 } else if (consumer_data->type == LTTNG_CONSUMER64_UST ||
1446 consumer_data->type == LTTNG_CONSUMER32_UST) {
1447 uatomic_set(&ust_consumerd_state, CONSUMER_ERROR);
1448 } else {
1449 /* Code flow error... */
1450 assert(0);
1451 }
1452
76d7553f
MD
1453 if (consumer_data->err_sock >= 0) {
1454 ret = close(consumer_data->err_sock);
1455 if (ret) {
1456 PERROR("close");
1457 }
a76cbd9f 1458 consumer_data->err_sock = -1;
76d7553f
MD
1459 }
1460 if (consumer_data->cmd_sock >= 0) {
1461 ret = close(consumer_data->cmd_sock);
1462 if (ret) {
1463 PERROR("close");
1464 }
a76cbd9f 1465 consumer_data->cmd_sock = -1;
76d7553f 1466 }
96544455
SS
1467 if (consumer_data->metadata_sock.fd_ptr &&
1468 *consumer_data->metadata_sock.fd_ptr >= 0) {
9363801e 1469 ret = close(*consumer_data->metadata_sock.fd_ptr);
331744e3
JD
1470 if (ret) {
1471 PERROR("close");
1472 }
1473 }
76d7553f
MD
1474 if (sock >= 0) {
1475 ret = close(sock);
1476 if (ret) {
1477 PERROR("close");
1478 }
1479 }
273ea72c 1480
3bd1e081
MD
1481 unlink(consumer_data->err_unix_sock_path);
1482 unlink(consumer_data->cmd_unix_sock_path);
fdadac08 1483 pthread_mutex_unlock(&consumer_data->lock);
92db7cdc 1484
fdadac08 1485 /* Cleanup metadata socket mutex. */
96544455
SS
1486 if (consumer_data->metadata_sock.lock) {
1487 pthread_mutex_destroy(consumer_data->metadata_sock.lock);
1488 free(consumer_data->metadata_sock.lock);
1489 }
5eb91c98 1490 lttng_poll_clean(&events);
b3530820
JG
1491
1492 if (cmd_socket_wrapper) {
1493 consumer_destroy_socket(cmd_socket_wrapper);
1494 }
76d7553f 1495error_poll:
139ac872 1496 if (err) {
840cb59c 1497 health_error();
139ac872
MD
1498 ERR("Health error occurred in %s", __func__);
1499 }
8782cc74 1500 health_unregister(health_sessiond);
76d7553f 1501 DBG("consumer thread cleanup completed");
0177d773 1502
34c1e15a
MD
1503 rcu_thread_offline();
1504 rcu_unregister_thread();
1505
5eb91c98 1506 return NULL;
099e26bd
DG
1507}
1508
099e26bd 1509/*
81f04d5f
JG
1510 * This thread receives application command sockets (FDs) on the
1511 * apps_cmd_pipe and waits (polls) on them until they are closed
1512 * or an error occurs.
1513 *
1514 * At that point, it flushes the data (tracing and metadata) associated
1515 * with this application and tears down ust app sessions and other
1516 * associated data structures through ust_app_unregister().
1517 *
1518 * Note that this thread never sends commands to the applications
1519 * through the command sockets; it merely listens for hang-ups
1520 * and errors on those sockets and cleans-up as they occur.
1d4b027a
DG
1521 */
1522static void *thread_manage_apps(void *data)
099e26bd 1523{
139ac872 1524 int i, ret, pollfd, err = -1;
6cd525e8 1525 ssize_t size_ret;
5eb91c98 1526 uint32_t revents, nb_fd;
5eb91c98 1527 struct lttng_poll_event events;
099e26bd
DG
1528
1529 DBG("[thread] Manage application started");
1530
f6a9efaa
DG
1531 rcu_register_thread();
1532 rcu_thread_online();
1533
6c71277b 1534 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_APP_MANAGE);
927ca06a 1535
e547b070 1536 if (testpoint(sessiond_thread_manage_apps)) {
6993eeb3
CB
1537 goto error_testpoint;
1538 }
1539
840cb59c 1540 health_code_update();
44a5e5eb 1541
d0b96690 1542 ret = sessiond_set_thread_pollset(&events, 2);
5eb91c98 1543 if (ret < 0) {
76d7553f 1544 goto error_poll_create;
5eb91c98 1545 }
099e26bd 1546
5eb91c98
DG
1547 ret = lttng_poll_add(&events, apps_cmd_pipe[0], LPOLLIN | LPOLLRDHUP);
1548 if (ret < 0) {
1549 goto error;
1550 }
099e26bd 1551
e547b070 1552 if (testpoint(sessiond_thread_manage_apps_before_loop)) {
6993eeb3
CB
1553 goto error;
1554 }
8ac94142 1555
840cb59c 1556 health_code_update();
44a5e5eb 1557
5eb91c98 1558 while (1) {
7fa2082e 1559 DBG("Apps thread polling");
099e26bd
DG
1560
1561 /* Inifinite blocking call, waiting for transmission */
88f2b785 1562 restart:
a78af745 1563 health_poll_entry();
5eb91c98 1564 ret = lttng_poll_wait(&events, -1);
7fa2082e
MD
1565 DBG("Apps thread return from poll on %d fds",
1566 LTTNG_POLL_GETNB(&events));
a78af745 1567 health_poll_exit();
099e26bd 1568 if (ret < 0) {
88f2b785
MD
1569 /*
1570 * Restart interrupted system call.
1571 */
1572 if (errno == EINTR) {
1573 goto restart;
1574 }
099e26bd
DG
1575 goto error;
1576 }
1577
0d9c5d77
DG
1578 nb_fd = ret;
1579
5eb91c98
DG
1580 for (i = 0; i < nb_fd; i++) {
1581 /* Fetch once the poll data */
1582 revents = LTTNG_POLL_GETEV(&events, i);
1583 pollfd = LTTNG_POLL_GETFD(&events, i);
1584
840cb59c 1585 health_code_update();
44a5e5eb 1586
fd20dac9
MD
1587 if (!revents) {
1588 /* No activity for this FD (poll implementation). */
1589 continue;
1590 }
1591
5eb91c98 1592 /* Thread quit pipe has been closed. Killing thread. */
d0b96690 1593 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
5eb91c98 1594 if (ret) {
139ac872
MD
1595 err = 0;
1596 goto exit;
5eb91c98 1597 }
099e26bd 1598
5eb91c98
DG
1599 /* Inspect the apps cmd pipe */
1600 if (pollfd == apps_cmd_pipe[0]) {
03e43155 1601 if (revents & LPOLLIN) {
d0b96690
DG
1602 int sock;
1603
5eb91c98 1604 /* Empty pipe */
6cd525e8
MD
1605 size_ret = lttng_read(apps_cmd_pipe[0], &sock, sizeof(sock));
1606 if (size_ret < sizeof(sock)) {
76d7553f 1607 PERROR("read apps cmd pipe");
5eb91c98
DG
1608 goto error;
1609 }
099e26bd 1610
840cb59c 1611 health_code_update();
44a5e5eb 1612
ffe60014 1613 /*
03e43155
MD
1614 * Since this is a command socket (write then read),
1615 * we only monitor the error events of the socket.
ffe60014 1616 */
d0b96690
DG
1617 ret = lttng_poll_add(&events, sock,
1618 LPOLLERR | LPOLLHUP | LPOLLRDHUP);
1619 if (ret < 0) {
5eb91c98 1620 goto error;
e0c7ec2b 1621 }
acc7b41b 1622
d0b96690 1623 DBG("Apps with sock %d added to poll set", sock);
03e43155
MD
1624 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1625 ERR("Apps command pipe error");
1626 goto error;
1627 } else {
1628 ERR("Unknown poll events %u for sock %d", revents, pollfd);
1629 goto error;
0177d773 1630 }
5eb91c98
DG
1631 } else {
1632 /*
54d01ffb
DG
1633 * At this point, we know that a registered application made
1634 * the event at poll_wait.
5eb91c98
DG
1635 */
1636 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1637 /* Removing from the poll set */
1638 ret = lttng_poll_del(&events, pollfd);
1639 if (ret < 0) {
1640 goto error;
1641 }
099e26bd 1642
b9d9b220 1643 /* Socket closed on remote end. */
56fff090 1644 ust_app_unregister(pollfd);
03e43155
MD
1645 } else {
1646 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
1647 goto error;
5eb91c98 1648 }
099e26bd 1649 }
44a5e5eb 1650
840cb59c 1651 health_code_update();
099e26bd 1652 }
099e26bd
DG
1653 }
1654
139ac872 1655exit:
099e26bd 1656error:
5eb91c98 1657 lttng_poll_clean(&events);
76d7553f 1658error_poll_create:
6993eeb3 1659error_testpoint:
6620da75
DG
1660 utils_close_pipe(apps_cmd_pipe);
1661 apps_cmd_pipe[0] = apps_cmd_pipe[1] = -1;
1662
1663 /*
1664 * We don't clean the UST app hash table here since already registered
1665 * applications can still be controlled so let them be until the session
1666 * daemon dies or the applications stop.
1667 */
1668
139ac872 1669 if (err) {
840cb59c 1670 health_error();
139ac872
MD
1671 ERR("Health error occurred in %s", __func__);
1672 }
8782cc74 1673 health_unregister(health_sessiond);
76d7553f 1674 DBG("Application communication apps thread cleanup complete");
f6a9efaa
DG
1675 rcu_thread_offline();
1676 rcu_unregister_thread();
099e26bd
DG
1677 return NULL;
1678}
1679
d0b96690 1680/*
d88aee68
DG
1681 * Send a socket to a thread This is called from the dispatch UST registration
1682 * thread once all sockets are set for the application.
d0b96690 1683 *
b85dc84c
DG
1684 * The sock value can be invalid, we don't really care, the thread will handle
1685 * it and make the necessary cleanup if so.
1686 *
d0b96690
DG
1687 * On success, return 0 else a negative value being the errno message of the
1688 * write().
1689 */
d88aee68 1690static int send_socket_to_thread(int fd, int sock)
d0b96690 1691{
6cd525e8 1692 ssize_t ret;
d0b96690 1693
b85dc84c
DG
1694 /*
1695 * It's possible that the FD is set as invalid with -1 concurrently just
1696 * before calling this function being a shutdown state of the thread.
1697 */
1698 if (fd < 0) {
1699 ret = -EBADF;
1700 goto error;
1701 }
d0b96690 1702
6cd525e8
MD
1703 ret = lttng_write(fd, &sock, sizeof(sock));
1704 if (ret < sizeof(sock)) {
d88aee68 1705 PERROR("write apps pipe %d", fd);
d0b96690
DG
1706 if (ret < 0) {
1707 ret = -errno;
1708 }
1709 goto error;
1710 }
1711
1712 /* All good. Don't send back the write positive ret value. */
1713 ret = 0;
1714error:
6cd525e8 1715 return (int) ret;
d0b96690
DG
1716}
1717
f45e313d
DG
1718/*
1719 * Sanitize the wait queue of the dispatch registration thread meaning removing
1720 * invalid nodes from it. This is to avoid memory leaks for the case the UST
1721 * notify socket is never received.
1722 */
1723static void sanitize_wait_queue(struct ust_reg_wait_queue *wait_queue)
1724{
1725 int ret, nb_fd = 0, i;
1726 unsigned int fd_added = 0;
1727 struct lttng_poll_event events;
1728 struct ust_reg_wait_node *wait_node = NULL, *tmp_wait_node;
1729
1730 assert(wait_queue);
1731
1732 lttng_poll_init(&events);
1733
1734 /* Just skip everything for an empty queue. */
1735 if (!wait_queue->count) {
1736 goto end;
1737 }
1738
1739 ret = lttng_poll_create(&events, wait_queue->count, LTTNG_CLOEXEC);
1740 if (ret < 0) {
1741 goto error_create;
1742 }
1743
1744 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1745 &wait_queue->head, head) {
1746 assert(wait_node->app);
1747 ret = lttng_poll_add(&events, wait_node->app->sock,
1748 LPOLLHUP | LPOLLERR);
1749 if (ret < 0) {
1750 goto error;
1751 }
1752
1753 fd_added = 1;
1754 }
1755
1756 if (!fd_added) {
1757 goto end;
1758 }
1759
1760 /*
1761 * Poll but don't block so we can quickly identify the faulty events and
1762 * clean them afterwards from the wait queue.
1763 */
1764 ret = lttng_poll_wait(&events, 0);
1765 if (ret < 0) {
1766 goto error;
1767 }
1768 nb_fd = ret;
1769
1770 for (i = 0; i < nb_fd; i++) {
1771 /* Get faulty FD. */
1772 uint32_t revents = LTTNG_POLL_GETEV(&events, i);
1773 int pollfd = LTTNG_POLL_GETFD(&events, i);
1774
fd20dac9
MD
1775 if (!revents) {
1776 /* No activity for this FD (poll implementation). */
1777 continue;
1778 }
1779
f45e313d
DG
1780 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1781 &wait_queue->head, head) {
1782 if (pollfd == wait_node->app->sock &&
1783 (revents & (LPOLLHUP | LPOLLERR))) {
1784 cds_list_del(&wait_node->head);
1785 wait_queue->count--;
1786 ust_app_destroy(wait_node->app);
1787 free(wait_node);
48b40bcf
JG
1788 /*
1789 * Silence warning of use-after-free in
1790 * cds_list_for_each_entry_safe which uses
1791 * __typeof__(*wait_node).
1792 */
1793 wait_node = NULL;
f45e313d 1794 break;
03e43155
MD
1795 } else {
1796 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
1797 goto error;
f45e313d
DG
1798 }
1799 }
1800 }
1801
1802 if (nb_fd > 0) {
1803 DBG("Wait queue sanitized, %d node were cleaned up", nb_fd);
1804 }
1805
1806end:
1807 lttng_poll_clean(&events);
1808 return;
1809
1810error:
1811 lttng_poll_clean(&events);
1812error_create:
1813 ERR("Unable to sanitize wait queue");
1814 return;
1815}
1816
099e26bd
DG
1817/*
1818 * Dispatch request from the registration threads to the application
1819 * communication thread.
1820 */
1821static void *thread_dispatch_ust_registration(void *data)
1822{
12e2b881 1823 int ret, err = -1;
8bdee6e2 1824 struct cds_wfcq_node *node;
099e26bd 1825 struct ust_command *ust_cmd = NULL;
f45e313d
DG
1826 struct ust_reg_wait_node *wait_node = NULL, *tmp_wait_node;
1827 struct ust_reg_wait_queue wait_queue = {
1828 .count = 0,
1829 };
d0b96690 1830
967e3668
MD
1831 rcu_register_thread();
1832
6c71277b 1833 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_APP_REG_DISPATCH);
12e2b881 1834
9ad42ec1
MD
1835 if (testpoint(sessiond_thread_app_reg_dispatch)) {
1836 goto error_testpoint;
1837 }
1838
12e2b881
MD
1839 health_code_update();
1840
f45e313d 1841 CDS_INIT_LIST_HEAD(&wait_queue.head);
099e26bd
DG
1842
1843 DBG("[thread] Dispatch UST command started");
1844
0ed3b1a8 1845 for (;;) {
12e2b881
MD
1846 health_code_update();
1847
099e26bd
DG
1848 /* Atomically prepare the queue futex */
1849 futex_nto1_prepare(&ust_cmd_queue.futex);
1850
0ed3b1a8
MD
1851 if (CMM_LOAD_SHARED(dispatch_thread_exit)) {
1852 break;
1853 }
1854
099e26bd 1855 do {
d0b96690 1856 struct ust_app *app = NULL;
7972aab2 1857 ust_cmd = NULL;
d0b96690 1858
f45e313d
DG
1859 /*
1860 * Make sure we don't have node(s) that have hung up before receiving
1861 * the notify socket. This is to clean the list in order to avoid
1862 * memory leaks from notify socket that are never seen.
1863 */
1864 sanitize_wait_queue(&wait_queue);
1865
12e2b881 1866 health_code_update();
099e26bd 1867 /* Dequeue command for registration */
8bdee6e2 1868 node = cds_wfcq_dequeue_blocking(&ust_cmd_queue.head, &ust_cmd_queue.tail);
099e26bd 1869 if (node == NULL) {
00a17c97 1870 DBG("Woken up but nothing in the UST command queue");
099e26bd
DG
1871 /* Continue thread execution */
1872 break;
1873 }
1874
1875 ust_cmd = caa_container_of(node, struct ust_command, node);
1876
2f50c8a3
DG
1877 DBG("Dispatching UST registration pid:%d ppid:%d uid:%d"
1878 " gid:%d sock:%d name:%s (version %d.%d)",
1879 ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
1880 ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
1881 ust_cmd->sock, ust_cmd->reg_msg.name,
1882 ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
d0b96690
DG
1883
1884 if (ust_cmd->reg_msg.type == USTCTL_SOCKET_CMD) {
1885 wait_node = zmalloc(sizeof(*wait_node));
1886 if (!wait_node) {
1887 PERROR("zmalloc wait_node dispatch");
020d7f60
DG
1888 ret = close(ust_cmd->sock);
1889 if (ret < 0) {
1890 PERROR("close ust sock dispatch %d", ust_cmd->sock);
1891 }
51dec90d 1892 lttng_fd_put(LTTNG_FD_APPS, 1);
7972aab2 1893 free(ust_cmd);
d0b96690
DG
1894 goto error;
1895 }
1896 CDS_INIT_LIST_HEAD(&wait_node->head);
1897
1898 /* Create application object if socket is CMD. */
1899 wait_node->app = ust_app_create(&ust_cmd->reg_msg,
1900 ust_cmd->sock);
1901 if (!wait_node->app) {
1902 ret = close(ust_cmd->sock);
1903 if (ret < 0) {
1904 PERROR("close ust sock dispatch %d", ust_cmd->sock);
6620da75 1905 }
51dec90d 1906 lttng_fd_put(LTTNG_FD_APPS, 1);
d88aee68 1907 free(wait_node);
7972aab2 1908 free(ust_cmd);
d0b96690
DG
1909 continue;
1910 }
1911 /*
1912 * Add application to the wait queue so we can set the notify
1913 * socket before putting this object in the global ht.
1914 */
f45e313d
DG
1915 cds_list_add(&wait_node->head, &wait_queue.head);
1916 wait_queue.count++;
d0b96690 1917
7972aab2 1918 free(ust_cmd);
d0b96690
DG
1919 /*
1920 * We have to continue here since we don't have the notify
1921 * socket and the application MUST be added to the hash table
1922 * only at that moment.
1923 */
1924 continue;
1925 } else {
1926 /*
1927 * Look for the application in the local wait queue and set the
1928 * notify socket if found.
1929 */
d88aee68 1930 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
f45e313d 1931 &wait_queue.head, head) {
12e2b881 1932 health_code_update();
d0b96690
DG
1933 if (wait_node->app->pid == ust_cmd->reg_msg.pid) {
1934 wait_node->app->notify_sock = ust_cmd->sock;
1935 cds_list_del(&wait_node->head);
f45e313d 1936 wait_queue.count--;
d0b96690
DG
1937 app = wait_node->app;
1938 free(wait_node);
1939 DBG3("UST app notify socket %d is set", ust_cmd->sock);
1940 break;
1941 }
1942 }
020d7f60
DG
1943
1944 /*
1945 * With no application at this stage the received socket is
1946 * basically useless so close it before we free the cmd data
1947 * structure for good.
1948 */
1949 if (!app) {
1950 ret = close(ust_cmd->sock);
1951 if (ret < 0) {
1952 PERROR("close ust sock dispatch %d", ust_cmd->sock);
1953 }
51dec90d 1954 lttng_fd_put(LTTNG_FD_APPS, 1);
020d7f60 1955 }
7972aab2 1956 free(ust_cmd);
d0b96690
DG
1957 }
1958
1959 if (app) {
d0b96690
DG
1960 /*
1961 * @session_lock_list
1962 *
1963 * Lock the global session list so from the register up to the
1964 * registration done message, no thread can see the application
1965 * and change its state.
1966 */
1967 session_lock_list();
1968 rcu_read_lock();
d88aee68 1969
d0b96690
DG
1970 /*
1971 * Add application to the global hash table. This needs to be
1972 * done before the update to the UST registry can locate the
1973 * application.
1974 */
1975 ust_app_add(app);
d88aee68
DG
1976
1977 /* Set app version. This call will print an error if needed. */
1978 (void) ust_app_version(app);
1979
1980 /* Send notify socket through the notify pipe. */
1981 ret = send_socket_to_thread(apps_cmd_notify_pipe[1],
1982 app->notify_sock);
1983 if (ret < 0) {
1984 rcu_read_unlock();
1985 session_unlock_list();
b85dc84c
DG
1986 /*
1987 * No notify thread, stop the UST tracing. However, this is
1988 * not an internal error of the this thread thus setting
1989 * the health error code to a normal exit.
1990 */
1991 err = 0;
d88aee68 1992 goto error;
6620da75 1993 }
d88aee68 1994
d0b96690
DG
1995 /*
1996 * Update newly registered application with the tracing
1997 * registry info already enabled information.
1998 */
1999 update_ust_app(app->sock);
d88aee68
DG
2000
2001 /*
2002 * Don't care about return value. Let the manage apps threads
2003 * handle app unregistration upon socket close.
2004 */
fb45065e 2005 (void) ust_app_register_done(app);
d88aee68
DG
2006
2007 /*
2008 * Even if the application socket has been closed, send the app
2009 * to the thread and unregistration will take place at that
2010 * place.
2011 */
2012 ret = send_socket_to_thread(apps_cmd_pipe[1], app->sock);
d0b96690 2013 if (ret < 0) {
d88aee68
DG
2014 rcu_read_unlock();
2015 session_unlock_list();
b85dc84c
DG
2016 /*
2017 * No apps. thread, stop the UST tracing. However, this is
2018 * not an internal error of the this thread thus setting
2019 * the health error code to a normal exit.
2020 */
2021 err = 0;
d88aee68 2022 goto error;
d0b96690 2023 }
d88aee68 2024
d0b96690
DG
2025 rcu_read_unlock();
2026 session_unlock_list();
099e26bd 2027 }
099e26bd
DG
2028 } while (node != NULL);
2029
12e2b881 2030 health_poll_entry();
099e26bd
DG
2031 /* Futex wait on queue. Blocking call on futex() */
2032 futex_nto1_wait(&ust_cmd_queue.futex);
12e2b881 2033 health_poll_exit();
099e26bd 2034 }
12e2b881
MD
2035 /* Normal exit, no error */
2036 err = 0;
099e26bd
DG
2037
2038error:
d88aee68
DG
2039 /* Clean up wait queue. */
2040 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
f45e313d 2041 &wait_queue.head, head) {
d88aee68 2042 cds_list_del(&wait_node->head);
f45e313d 2043 wait_queue.count--;
d88aee68
DG
2044 free(wait_node);
2045 }
2046
772b8f4d
MD
2047 /* Empty command queue. */
2048 for (;;) {
2049 /* Dequeue command for registration */
2050 node = cds_wfcq_dequeue_blocking(&ust_cmd_queue.head, &ust_cmd_queue.tail);
2051 if (node == NULL) {
2052 break;
2053 }
2054 ust_cmd = caa_container_of(node, struct ust_command, node);
2055 ret = close(ust_cmd->sock);
2056 if (ret < 0) {
2057 PERROR("close ust sock exit dispatch %d", ust_cmd->sock);
2058 }
2059 lttng_fd_put(LTTNG_FD_APPS, 1);
2060 free(ust_cmd);
2061 }
2062
9ad42ec1 2063error_testpoint:
099e26bd 2064 DBG("Dispatch thread dying");
12e2b881
MD
2065 if (err) {
2066 health_error();
2067 ERR("Health error occurred in %s", __func__);
2068 }
8782cc74 2069 health_unregister(health_sessiond);
967e3668 2070 rcu_unregister_thread();
099e26bd
DG
2071 return NULL;
2072}
2073
2074/*
2075 * This thread manage application registration.
2076 */
2077static void *thread_registration_apps(void *data)
1d4b027a 2078{
139ac872 2079 int sock = -1, i, ret, pollfd, err = -1;
5eb91c98
DG
2080 uint32_t revents, nb_fd;
2081 struct lttng_poll_event events;
099e26bd
DG
2082 /*
2083 * Get allocated in this thread, enqueued to a global queue, dequeued and
2084 * freed in the manage apps thread.
2085 */
2086 struct ust_command *ust_cmd = NULL;
1d4b027a 2087
099e26bd 2088 DBG("[thread] Manage application registration started");
1d4b027a 2089
6c71277b 2090 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_APP_REG);
927ca06a 2091
e547b070 2092 if (testpoint(sessiond_thread_registration_apps)) {
6993eeb3
CB
2093 goto error_testpoint;
2094 }
8ac94142 2095
1d4b027a
DG
2096 ret = lttcomm_listen_unix_sock(apps_sock);
2097 if (ret < 0) {
76d7553f 2098 goto error_listen;
1d4b027a
DG
2099 }
2100
5eb91c98
DG
2101 /*
2102 * Pass 2 as size here for the thread quit pipe and apps socket. Nothing
2103 * more will be added to this poll set.
2104 */
d0b96690 2105 ret = sessiond_set_thread_pollset(&events, 2);
5eb91c98 2106 if (ret < 0) {
76d7553f 2107 goto error_create_poll;
5eb91c98 2108 }
273ea72c 2109
5eb91c98
DG
2110 /* Add the application registration socket */
2111 ret = lttng_poll_add(&events, apps_sock, LPOLLIN | LPOLLRDHUP);
2112 if (ret < 0) {
76d7553f 2113 goto error_poll_add;
5eb91c98 2114 }
273ea72c 2115
1d4b027a 2116 /* Notify all applications to register */
0fdd1e2c
DG
2117 ret = notify_ust_apps(1);
2118 if (ret < 0) {
2119 ERR("Failed to notify applications or create the wait shared memory.\n"
54d01ffb
DG
2120 "Execution continues but there might be problem for already\n"
2121 "running applications that wishes to register.");
0fdd1e2c 2122 }
1d4b027a
DG
2123
2124 while (1) {
2125 DBG("Accepting application registration");
273ea72c
DG
2126
2127 /* Inifinite blocking call, waiting for transmission */
88f2b785 2128 restart:
a78af745 2129 health_poll_entry();
5eb91c98 2130 ret = lttng_poll_wait(&events, -1);
a78af745 2131 health_poll_exit();
273ea72c 2132 if (ret < 0) {
88f2b785
MD
2133 /*
2134 * Restart interrupted system call.
2135 */
2136 if (errno == EINTR) {
2137 goto restart;
2138 }
273ea72c
DG
2139 goto error;
2140 }
2141
0d9c5d77
DG
2142 nb_fd = ret;
2143
5eb91c98 2144 for (i = 0; i < nb_fd; i++) {
840cb59c 2145 health_code_update();
139ac872 2146
5eb91c98
DG
2147 /* Fetch once the poll data */
2148 revents = LTTNG_POLL_GETEV(&events, i);
2149 pollfd = LTTNG_POLL_GETFD(&events, i);
273ea72c 2150
fd20dac9
MD
2151 if (!revents) {
2152 /* No activity for this FD (poll implementation). */
2153 continue;
2154 }
2155
5eb91c98 2156 /* Thread quit pipe has been closed. Killing thread. */
d0b96690 2157 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
5eb91c98 2158 if (ret) {
139ac872
MD
2159 err = 0;
2160 goto exit;
90014c57 2161 }
1d4b027a 2162
5eb91c98
DG
2163 /* Event on the registration socket */
2164 if (pollfd == apps_sock) {
03e43155 2165 if (revents & LPOLLIN) {
5eb91c98
DG
2166 sock = lttcomm_accept_unix_sock(apps_sock);
2167 if (sock < 0) {
2168 goto error;
2169 }
099e26bd 2170
16c5c8fa
DG
2171 /*
2172 * Set socket timeout for both receiving and ending.
2173 * app_socket_timeout is in seconds, whereas
2174 * lttcomm_setsockopt_rcv_timeout and
2175 * lttcomm_setsockopt_snd_timeout expect msec as
2176 * parameter.
2177 */
e6142f2e 2178 if (config.app_socket_timeout >= 0) {
28ce0ff2 2179 (void) lttcomm_setsockopt_rcv_timeout(sock,
e6142f2e 2180 config.app_socket_timeout * 1000);
28ce0ff2 2181 (void) lttcomm_setsockopt_snd_timeout(sock,
e6142f2e 2182 config.app_socket_timeout * 1000);
28ce0ff2 2183 }
16c5c8fa 2184
b662582b
DG
2185 /*
2186 * Set the CLOEXEC flag. Return code is useless because
2187 * either way, the show must go on.
2188 */
2189 (void) utils_set_fd_cloexec(sock);
2190
5eb91c98 2191 /* Create UST registration command for enqueuing */
ba7f0ae5 2192 ust_cmd = zmalloc(sizeof(struct ust_command));
5eb91c98 2193 if (ust_cmd == NULL) {
76d7553f 2194 PERROR("ust command zmalloc");
41ed8e47
MD
2195 ret = close(sock);
2196 if (ret) {
2197 PERROR("close");
2198 }
5eb91c98
DG
2199 goto error;
2200 }
1d4b027a 2201
5eb91c98
DG
2202 /*
2203 * Using message-based transmissions to ensure we don't
2204 * have to deal with partially received messages.
2205 */
4063050c
MD
2206 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2207 if (ret < 0) {
2208 ERR("Exhausted file descriptors allowed for applications.");
2209 free(ust_cmd);
2210 ret = close(sock);
2211 if (ret) {
2212 PERROR("close");
2213 }
2214 sock = -1;
2215 continue;
2216 }
d88aee68 2217
840cb59c 2218 health_code_update();
d0b96690
DG
2219 ret = ust_app_recv_registration(sock, &ust_cmd->reg_msg);
2220 if (ret < 0) {
5eb91c98 2221 free(ust_cmd);
d0b96690 2222 /* Close socket of the application. */
76d7553f
MD
2223 ret = close(sock);
2224 if (ret) {
2225 PERROR("close");
2226 }
4063050c 2227 lttng_fd_put(LTTNG_FD_APPS, 1);
76d7553f 2228 sock = -1;
5eb91c98
DG
2229 continue;
2230 }
840cb59c 2231 health_code_update();
099e26bd 2232
5eb91c98 2233 ust_cmd->sock = sock;
34a2494f 2234 sock = -1;
099e26bd 2235
5eb91c98
DG
2236 DBG("UST registration received with pid:%d ppid:%d uid:%d"
2237 " gid:%d sock:%d name:%s (version %d.%d)",
2238 ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
2239 ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
2240 ust_cmd->sock, ust_cmd->reg_msg.name,
2241 ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
54d01ffb 2242
5eb91c98
DG
2243 /*
2244 * Lock free enqueue the registration request. The red pill
54d01ffb 2245 * has been taken! This apps will be part of the *system*.
5eb91c98 2246 */
8bdee6e2 2247 cds_wfcq_enqueue(&ust_cmd_queue.head, &ust_cmd_queue.tail, &ust_cmd->node);
5eb91c98
DG
2248
2249 /*
2250 * Wake the registration queue futex. Implicit memory
8bdee6e2 2251 * barrier with the exchange in cds_wfcq_enqueue.
5eb91c98
DG
2252 */
2253 futex_nto1_wake(&ust_cmd_queue.futex);
03e43155
MD
2254 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
2255 ERR("Register apps socket poll error");
2256 goto error;
2257 } else {
2258 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
2259 goto error;
5eb91c98
DG
2260 }
2261 }
90014c57 2262 }
1d4b027a
DG
2263 }
2264
139ac872 2265exit:
1d4b027a 2266error:
0fdd1e2c
DG
2267 /* Notify that the registration thread is gone */
2268 notify_ust_apps(0);
2269
a4b35e07 2270 if (apps_sock >= 0) {
76d7553f
MD
2271 ret = close(apps_sock);
2272 if (ret) {
2273 PERROR("close");
2274 }
a4b35e07 2275 }
46c3f085 2276 if (sock >= 0) {
76d7553f
MD
2277 ret = close(sock);
2278 if (ret) {
2279 PERROR("close");
2280 }
4063050c 2281 lttng_fd_put(LTTNG_FD_APPS, 1);
a4b35e07 2282 }
e6142f2e 2283 unlink(config.apps_unix_sock_path.value);
0fdd1e2c 2284
76d7553f 2285error_poll_add:
5eb91c98 2286 lttng_poll_clean(&events);
76d7553f
MD
2287error_listen:
2288error_create_poll:
6993eeb3 2289error_testpoint:
76d7553f 2290 DBG("UST Registration thread cleanup complete");
9ad42ec1
MD
2291 if (err) {
2292 health_error();
2293 ERR("Health error occurred in %s", __func__);
2294 }
8782cc74 2295 health_unregister(health_sessiond);
5eb91c98 2296
1d4b027a
DG
2297 return NULL;
2298}
2299
8c0faa1d 2300/*
3bd1e081 2301 * Start the thread_manage_consumer. This must be done after a lttng-consumerd
d063d709 2302 * exec or it will fails.
8c0faa1d 2303 */
3bd1e081 2304static int spawn_consumer_thread(struct consumer_data *consumer_data)
8c0faa1d 2305{
a23ec3a7 2306 int ret, clock_ret;
ee0b0061
DG
2307 struct timespec timeout;
2308
13a7bce3
JG
2309 /*
2310 * Make sure we set the readiness flag to 0 because we are NOT ready.
2311 * This access to consumer_thread_is_ready does not need to be
2312 * protected by consumer_data.cond_mutex (yet) since the consumer
2313 * management thread has not been started at this point.
2314 */
a23ec3a7 2315 consumer_data->consumer_thread_is_ready = 0;
8c0faa1d 2316
a23ec3a7
DG
2317 /* Setup pthread condition */
2318 ret = pthread_condattr_init(&consumer_data->condattr);
4a15001e 2319 if (ret) {
a23ec3a7
DG
2320 errno = ret;
2321 PERROR("pthread_condattr_init consumer data");
2322 goto error;
2323 }
2324
2325 /*
2326 * Set the monotonic clock in order to make sure we DO NOT jump in time
2327 * between the clock_gettime() call and the timedwait call. See bug #324
2328 * for a more details and how we noticed it.
2329 */
2330 ret = pthread_condattr_setclock(&consumer_data->condattr, CLOCK_MONOTONIC);
4a15001e 2331 if (ret) {
a23ec3a7
DG
2332 errno = ret;
2333 PERROR("pthread_condattr_setclock consumer data");
ee0b0061
DG
2334 goto error;
2335 }
8c0faa1d 2336
a23ec3a7 2337 ret = pthread_cond_init(&consumer_data->cond, &consumer_data->condattr);
4a15001e 2338 if (ret) {
a23ec3a7
DG
2339 errno = ret;
2340 PERROR("pthread_cond_init consumer data");
2341 goto error;
2342 }
2343
1a1a34b4
MJ
2344 ret = pthread_create(&consumer_data->thread, default_pthread_attr(),
2345 thread_manage_consumer, consumer_data);
4a15001e
MD
2346 if (ret) {
2347 errno = ret;
3bd1e081 2348 PERROR("pthread_create consumer");
ee0b0061 2349 ret = -1;
8c0faa1d
DG
2350 goto error;
2351 }
2352
a23ec3a7
DG
2353 /* We are about to wait on a pthread condition */
2354 pthread_mutex_lock(&consumer_data->cond_mutex);
2355
ee0b0061 2356 /* Get time for sem_timedwait absolute timeout */
389fbf04 2357 clock_ret = lttng_clock_gettime(CLOCK_MONOTONIC, &timeout);
a23ec3a7
DG
2358 /*
2359 * Set the timeout for the condition timed wait even if the clock gettime
2360 * call fails since we might loop on that call and we want to avoid to
2361 * increment the timeout too many times.
2362 */
2363 timeout.tv_sec += DEFAULT_SEM_WAIT_TIMEOUT;
2364
2365 /*
2366 * The following loop COULD be skipped in some conditions so this is why we
2367 * set ret to 0 in order to make sure at least one round of the loop is
2368 * done.
2369 */
2370 ret = 0;
2371
2372 /*
2373 * Loop until the condition is reached or when a timeout is reached. Note
2374 * that the pthread_cond_timedwait(P) man page specifies that EINTR can NOT
2375 * be returned but the pthread_cond(3), from the glibc-doc, says that it is
2376 * possible. This loop does not take any chances and works with both of
2377 * them.
2378 */
2379 while (!consumer_data->consumer_thread_is_ready && ret != ETIMEDOUT) {
2380 if (clock_ret < 0) {
2381 PERROR("clock_gettime spawn consumer");
2382 /* Infinite wait for the consumerd thread to be ready */
2383 ret = pthread_cond_wait(&consumer_data->cond,
2384 &consumer_data->cond_mutex);
2385 } else {
2386 ret = pthread_cond_timedwait(&consumer_data->cond,
2387 &consumer_data->cond_mutex, &timeout);
2388 }
ee0b0061 2389 }
8c0faa1d 2390
a23ec3a7
DG
2391 /* Release the pthread condition */
2392 pthread_mutex_unlock(&consumer_data->cond_mutex);
2393
2394 if (ret != 0) {
2395 errno = ret;
2396 if (ret == ETIMEDOUT) {
4282f9a3
DG
2397 int pth_ret;
2398
ee0b0061
DG
2399 /*
2400 * Call has timed out so we kill the kconsumerd_thread and return
2401 * an error.
2402 */
a23ec3a7
DG
2403 ERR("Condition timed out. The consumer thread was never ready."
2404 " Killing it");
4282f9a3
DG
2405 pth_ret = pthread_cancel(consumer_data->thread);
2406 if (pth_ret < 0) {
3bd1e081 2407 PERROR("pthread_cancel consumer thread");
ee0b0061
DG
2408 }
2409 } else {
a23ec3a7 2410 PERROR("pthread_cond_wait failed consumer thread");
ee0b0061 2411 }
4282f9a3
DG
2412 /* Caller is expecting a negative value on failure. */
2413 ret = -1;
ee0b0061
DG
2414 goto error;
2415 }
2416
3bd1e081
MD
2417 pthread_mutex_lock(&consumer_data->pid_mutex);
2418 if (consumer_data->pid == 0) {
a23ec3a7 2419 ERR("Consumerd did not start");
3bd1e081 2420 pthread_mutex_unlock(&consumer_data->pid_mutex);
712ea556
DG
2421 goto error;
2422 }
3bd1e081 2423 pthread_mutex_unlock(&consumer_data->pid_mutex);
712ea556 2424
8c0faa1d
DG
2425 return 0;
2426
2427error:
2428 return ret;
2429}
2430
d9800920 2431/*
3bd1e081 2432 * Join consumer thread
d9800920 2433 */
3bd1e081 2434static int join_consumer_thread(struct consumer_data *consumer_data)
cf3af59e
MD
2435{
2436 void *status;
cf3af59e 2437
e8209f6b
DG
2438 /* Consumer pid must be a real one. */
2439 if (consumer_data->pid > 0) {
c617c0c6 2440 int ret;
3bd1e081 2441 ret = kill(consumer_data->pid, SIGTERM);
cf3af59e 2442 if (ret) {
4a15001e 2443 PERROR("Error killing consumer daemon");
cf3af59e
MD
2444 return ret;
2445 }
3bd1e081 2446 return pthread_join(consumer_data->thread, &status);
cf3af59e
MD
2447 } else {
2448 return 0;
2449 }
2450}
2451
8c0faa1d 2452/*
3bd1e081 2453 * Fork and exec a consumer daemon (consumerd).
8c0faa1d 2454 *
d063d709 2455 * Return pid if successful else -1.
8c0faa1d 2456 */
3bd1e081 2457static pid_t spawn_consumerd(struct consumer_data *consumer_data)
8c0faa1d
DG
2458{
2459 int ret;
2460 pid_t pid;
94c55f17 2461 const char *consumer_to_use;
53086306 2462 const char *verbosity;
94c55f17 2463 struct stat st;
8c0faa1d 2464
3bd1e081 2465 DBG("Spawning consumerd");
c49dc785 2466
8c0faa1d
DG
2467 pid = fork();
2468 if (pid == 0) {
2469 /*
3bd1e081 2470 * Exec consumerd.
8c0faa1d 2471 */
e6142f2e 2472 if (config.verbose_consumer) {
53086306 2473 verbosity = "--verbose";
4421f712 2474 } else if (lttng_opt_quiet) {
53086306 2475 verbosity = "--quiet";
4421f712
DG
2476 } else {
2477 verbosity = "";
53086306 2478 }
4421f712 2479
3bd1e081
MD
2480 switch (consumer_data->type) {
2481 case LTTNG_CONSUMER_KERNEL:
94c55f17 2482 /*
c7704d57
DG
2483 * Find out which consumerd to execute. We will first try the
2484 * 64-bit path, then the sessiond's installation directory, and
2485 * fallback on the 32-bit one,
94c55f17 2486 */
63a799e8 2487 DBG3("Looking for a kernel consumer at these locations:");
59ee5091 2488 DBG3(" 1) %s", config.consumerd64_bin_path.value ? : "NULL");
e6142f2e 2489 DBG3(" 2) %s/%s", INSTALL_BIN_PATH, DEFAULT_CONSUMERD_FILE);
59ee5091 2490 DBG3(" 3) %s", config.consumerd32_bin_path.value ? : "NULL");
e6142f2e 2491 if (stat(config.consumerd64_bin_path.value, &st) == 0) {
63a799e8 2492 DBG3("Found location #1");
e6142f2e
JG
2493 consumer_to_use = config.consumerd64_bin_path.value;
2494 } else if (stat(INSTALL_BIN_PATH "/" DEFAULT_CONSUMERD_FILE, &st) == 0) {
63a799e8 2495 DBG3("Found location #2");
e6142f2e
JG
2496 consumer_to_use = INSTALL_BIN_PATH "/" DEFAULT_CONSUMERD_FILE;
2497 } else if (stat(config.consumerd32_bin_path.value, &st) == 0) {
63a799e8 2498 DBG3("Found location #3");
e6142f2e 2499 consumer_to_use = config.consumerd32_bin_path.value;
94c55f17 2500 } else {
63a799e8 2501 DBG("Could not find any valid consumerd executable");
4282f9a3 2502 ret = -EINVAL;
3d678709 2503 goto error;
94c55f17
AM
2504 }
2505 DBG("Using kernel consumer at: %s", consumer_to_use);
6e2cc8d8 2506 (void) execl(consumer_to_use,
94c55f17
AM
2507 "lttng-consumerd", verbosity, "-k",
2508 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
2509 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
e6142f2e 2510 "--group", config.tracing_group_name.value,
94c55f17 2511 NULL);
3bd1e081 2512 break;
7753dea8
MD
2513 case LTTNG_CONSUMER64_UST:
2514 {
46b23495 2515 if (config.consumerd64_lib_dir.value) {
8f4905da
MD
2516 char *tmp;
2517 size_t tmplen;
ddaec4a2 2518 char *tmpnew;
8f4905da 2519
e8fa9fb0 2520 tmp = lttng_secure_getenv("LD_LIBRARY_PATH");
8f4905da
MD
2521 if (!tmp) {
2522 tmp = "";
2523 }
d222983e 2524 tmplen = strlen(config.consumerd64_lib_dir.value) + 1 /* : */ + strlen(tmp);
8f4905da
MD
2525 tmpnew = zmalloc(tmplen + 1 /* \0 */);
2526 if (!tmpnew) {
2527 ret = -ENOMEM;
2528 goto error;
2529 }
e6142f2e 2530 strcat(tmpnew, config.consumerd64_lib_dir.value);
8f4905da
MD
2531 if (tmp[0] != '\0') {
2532 strcat(tmpnew, ":");
2533 strcat(tmpnew, tmp);
2534 }
d222983e 2535 ret = setenv("LD_LIBRARY_PATH", tmpnew, 1);
ddaec4a2 2536 free(tmpnew);
8f4905da
MD
2537 if (ret) {
2538 ret = -errno;
2539 goto error;
2540 }
2541 }
e6142f2e
JG
2542 DBG("Using 64-bit UST consumer at: %s", config.consumerd64_bin_path.value);
2543 (void) execl(config.consumerd64_bin_path.value, "lttng-consumerd", verbosity, "-u",
7753dea8
MD
2544 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
2545 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
e6142f2e 2546 "--group", config.tracing_group_name.value,
7753dea8 2547 NULL);
3bd1e081 2548 break;
7753dea8
MD
2549 }
2550 case LTTNG_CONSUMER32_UST:
2551 {
46b23495 2552 if (config.consumerd32_lib_dir.value) {
8f4905da
MD
2553 char *tmp;
2554 size_t tmplen;
ddaec4a2 2555 char *tmpnew;
8f4905da 2556
e8fa9fb0 2557 tmp = lttng_secure_getenv("LD_LIBRARY_PATH");
8f4905da
MD
2558 if (!tmp) {
2559 tmp = "";
2560 }
d222983e 2561 tmplen = strlen(config.consumerd32_lib_dir.value) + 1 /* : */ + strlen(tmp);
8f4905da
MD
2562 tmpnew = zmalloc(tmplen + 1 /* \0 */);
2563 if (!tmpnew) {
2564 ret = -ENOMEM;
2565 goto error;
2566 }
e6142f2e 2567 strcat(tmpnew, config.consumerd32_lib_dir.value);
8f4905da
MD
2568 if (tmp[0] != '\0') {
2569 strcat(tmpnew, ":");
2570 strcat(tmpnew, tmp);
2571 }
d222983e 2572 ret = setenv("LD_LIBRARY_PATH", tmpnew, 1);
ddaec4a2 2573 free(tmpnew);
8f4905da
MD
2574 if (ret) {
2575 ret = -errno;
2576 goto error;
2577 }
2578 }
e6142f2e
JG
2579 DBG("Using 32-bit UST consumer at: %s", config.consumerd32_bin_path.value);
2580 (void) execl(config.consumerd32_bin_path.value, "lttng-consumerd", verbosity, "-u",
7753dea8
MD
2581 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
2582 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
e6142f2e 2583 "--group", config.tracing_group_name.value,
7753dea8
MD
2584 NULL);
2585 break;
2586 }
3bd1e081 2587 default:
b60e7183 2588 ERR("unknown consumer type");
e9d6b496 2589 errno = 0;
3bd1e081 2590 }
8c0faa1d 2591 if (errno != 0) {
4282f9a3 2592 PERROR("Consumer execl()");
8c0faa1d 2593 }
4282f9a3 2594 /* Reaching this point, we got a failure on our execl(). */
8c0faa1d
DG
2595 exit(EXIT_FAILURE);
2596 } else if (pid > 0) {
2597 ret = pid;
8c0faa1d 2598 } else {
76d7553f 2599 PERROR("start consumer fork");
8c0faa1d 2600 ret = -errno;
8c0faa1d 2601 }
8f4905da 2602error:
8c0faa1d
DG
2603 return ret;
2604}
2605
693bd40b 2606/*
3bd1e081 2607 * Spawn the consumerd daemon and session daemon thread.
693bd40b 2608 */
3bd1e081 2609static int start_consumerd(struct consumer_data *consumer_data)
693bd40b 2610{
c617c0c6 2611 int ret;
edb8b045
DG
2612
2613 /*
2614 * Set the listen() state on the socket since there is a possible race
2615 * between the exec() of the consumer daemon and this call if place in the
2616 * consumer thread. See bug #366 for more details.
2617 */
2618 ret = lttcomm_listen_unix_sock(consumer_data->err_sock);
2619 if (ret < 0) {
2620 goto error;
2621 }
693bd40b 2622
3bd1e081
MD
2623 pthread_mutex_lock(&consumer_data->pid_mutex);
2624 if (consumer_data->pid != 0) {
2625 pthread_mutex_unlock(&consumer_data->pid_mutex);
c49dc785
DG
2626 goto end;
2627 }
693bd40b 2628
3bd1e081 2629 ret = spawn_consumerd(consumer_data);
c49dc785 2630 if (ret < 0) {
3bd1e081
MD
2631 ERR("Spawning consumerd failed");
2632 pthread_mutex_unlock(&consumer_data->pid_mutex);
c49dc785 2633 goto error;
693bd40b 2634 }
c49dc785 2635
3bd1e081
MD
2636 /* Setting up the consumer_data pid */
2637 consumer_data->pid = ret;
48842b30 2638 DBG2("Consumer pid %d", consumer_data->pid);
3bd1e081 2639 pthread_mutex_unlock(&consumer_data->pid_mutex);
693bd40b 2640
3bd1e081
MD
2641 DBG2("Spawning consumer control thread");
2642 ret = spawn_consumer_thread(consumer_data);
693bd40b 2643 if (ret < 0) {
3bd1e081 2644 ERR("Fatal error spawning consumer control thread");
693bd40b
DG
2645 goto error;
2646 }
2647
c49dc785 2648end:
693bd40b
DG
2649 return 0;
2650
2651error:
331744e3 2652 /* Cleanup already created sockets on error. */
edb8b045 2653 if (consumer_data->err_sock >= 0) {
c617c0c6
MD
2654 int err;
2655
edb8b045
DG
2656 err = close(consumer_data->err_sock);
2657 if (err < 0) {
2658 PERROR("close consumer data error socket");
2659 }
2660 }
693bd40b
DG
2661 return ret;
2662}
2663
b73401da 2664/*
096102bd 2665 * Setup necessary data for kernel tracer action.
b73401da 2666 */
096102bd 2667static int init_kernel_tracer(void)
b73401da
DG
2668{
2669 int ret;
b73401da 2670
096102bd
DG
2671 /* Modprobe lttng kernel modules */
2672 ret = modprobe_lttng_control();
b73401da 2673 if (ret < 0) {
b73401da
DG
2674 goto error;
2675 }
2676
096102bd
DG
2677 /* Open debugfs lttng */
2678 kernel_tracer_fd = open(module_proc_lttng, O_RDWR);
2679 if (kernel_tracer_fd < 0) {
2680 DBG("Failed to open %s", module_proc_lttng);
2f77fc4b 2681 goto error_open;
54d01ffb
DG
2682 }
2683
2f77fc4b 2684 /* Validate kernel version */
88076e89
JD
2685 ret = kernel_validate_version(kernel_tracer_fd, &kernel_tracer_version,
2686 &kernel_tracer_abi_version);
2f77fc4b
DG
2687 if (ret < 0) {
2688 goto error_version;
b551a063 2689 }
54d01ffb 2690
2f77fc4b
DG
2691 ret = modprobe_lttng_data();
2692 if (ret < 0) {
2693 goto error_modules;
54d01ffb
DG
2694 }
2695
6e21424e
JR
2696 ret = kernel_supports_ring_buffer_snapshot_sample_positions(
2697 kernel_tracer_fd);
2698 if (ret < 0) {
2699 goto error_modules;
2700 }
2701
2702 if (ret < 1) {
2703 WARN("Kernel tracer does not support buffer monitoring. "
2704 "The monitoring timer of channels in the kernel domain "
2705 "will be set to 0 (disabled).");
2706 }
2707
2f77fc4b
DG
2708 DBG("Kernel tracer fd %d", kernel_tracer_fd);
2709 return 0;
2710
2711error_version:
2712 modprobe_remove_lttng_control();
2713 ret = close(kernel_tracer_fd);
2714 if (ret) {
2715 PERROR("close");
b551a063 2716 }
2f77fc4b 2717 kernel_tracer_fd = -1;
f73fabfd 2718 return LTTNG_ERR_KERN_VERSION;
b551a063 2719
2f77fc4b
DG
2720error_modules:
2721 ret = close(kernel_tracer_fd);
2722 if (ret) {
2723 PERROR("close");
b551a063 2724 }
54d01ffb 2725
2f77fc4b
DG
2726error_open:
2727 modprobe_remove_lttng_control();
54d01ffb
DG
2728
2729error:
2f77fc4b
DG
2730 WARN("No kernel tracer available");
2731 kernel_tracer_fd = -1;
2732 if (!is_root) {
f73fabfd 2733 return LTTNG_ERR_NEED_ROOT_SESSIOND;
2f77fc4b 2734 } else {
f73fabfd 2735 return LTTNG_ERR_KERN_NA;
2f77fc4b 2736 }
54d01ffb
DG
2737}
2738
2f77fc4b 2739
54d01ffb 2740/*
2f77fc4b
DG
2741 * Copy consumer output from the tracing session to the domain session. The
2742 * function also applies the right modification on a per domain basis for the
2743 * trace files destination directory.
36b588ed
MD
2744 *
2745 * Should *NOT* be called with RCU read-side lock held.
54d01ffb 2746 */
2f77fc4b 2747static int copy_session_consumer(int domain, struct ltt_session *session)
54d01ffb
DG
2748{
2749 int ret;
2f77fc4b
DG
2750 const char *dir_name;
2751 struct consumer_output *consumer;
2752
2753 assert(session);
2754 assert(session->consumer);
54d01ffb 2755
b551a063
DG
2756 switch (domain) {
2757 case LTTNG_DOMAIN_KERNEL:
2f77fc4b 2758 DBG3("Copying tracing session consumer output in kernel session");
09a90bcd
DG
2759 /*
2760 * XXX: We should audit the session creation and what this function
2761 * does "extra" in order to avoid a destroy since this function is used
2762 * in the domain session creation (kernel and ust) only. Same for UST
2763 * domain.
2764 */
2765 if (session->kernel_session->consumer) {
6addfa37 2766 consumer_output_put(session->kernel_session->consumer);
09a90bcd 2767 }
2f77fc4b
DG
2768 session->kernel_session->consumer =
2769 consumer_copy_output(session->consumer);
2770 /* Ease our life a bit for the next part */
2771 consumer = session->kernel_session->consumer;
2772 dir_name = DEFAULT_KERNEL_TRACE_DIR;
b551a063 2773 break;
f20baf8e 2774 case LTTNG_DOMAIN_JUL:
5cdb6027 2775 case LTTNG_DOMAIN_LOG4J:
0e115563 2776 case LTTNG_DOMAIN_PYTHON:
b551a063 2777 case LTTNG_DOMAIN_UST:
2f77fc4b 2778 DBG3("Copying tracing session consumer output in UST session");
09a90bcd 2779 if (session->ust_session->consumer) {
6addfa37 2780 consumer_output_put(session->ust_session->consumer);
09a90bcd 2781 }
2f77fc4b
DG
2782 session->ust_session->consumer =
2783 consumer_copy_output(session->consumer);
2784 /* Ease our life a bit for the next part */
2785 consumer = session->ust_session->consumer;
2786 dir_name = DEFAULT_UST_TRACE_DIR;
b551a063
DG
2787 break;
2788 default:
f73fabfd 2789 ret = LTTNG_ERR_UNKNOWN_DOMAIN;
54d01ffb
DG
2790 goto error;
2791 }
2792
2f77fc4b 2793 /* Append correct directory to subdir */
c30ce0b3
CB
2794 strncat(consumer->subdir, dir_name,
2795 sizeof(consumer->subdir) - strlen(consumer->subdir) - 1);
2f77fc4b
DG
2796 DBG3("Copy session consumer subdir %s", consumer->subdir);
2797
f73fabfd 2798 ret = LTTNG_OK;
54d01ffb
DG
2799
2800error:
2801 return ret;
2802}
2803
00e2e675 2804/*
2f77fc4b 2805 * Create an UST session and add it to the session ust list.
36b588ed
MD
2806 *
2807 * Should *NOT* be called with RCU read-side lock held.
00e2e675 2808 */
2f77fc4b
DG
2809static int create_ust_session(struct ltt_session *session,
2810 struct lttng_domain *domain)
00e2e675
DG
2811{
2812 int ret;
2f77fc4b 2813 struct ltt_ust_session *lus = NULL;
00e2e675 2814
a4b92340 2815 assert(session);
2f77fc4b
DG
2816 assert(domain);
2817 assert(session->consumer);
a4b92340 2818
2f77fc4b 2819 switch (domain->type) {
f20baf8e 2820 case LTTNG_DOMAIN_JUL:
5cdb6027 2821 case LTTNG_DOMAIN_LOG4J:
0e115563 2822 case LTTNG_DOMAIN_PYTHON:
2f77fc4b
DG
2823 case LTTNG_DOMAIN_UST:
2824 break;
2825 default:
2826 ERR("Unknown UST domain on create session %d", domain->type);
f73fabfd 2827 ret = LTTNG_ERR_UNKNOWN_DOMAIN;
00e2e675
DG
2828 goto error;
2829 }
2830
2f77fc4b
DG
2831 DBG("Creating UST session");
2832
dec56f6c 2833 lus = trace_ust_create_session(session->id);
2f77fc4b 2834 if (lus == NULL) {
f73fabfd 2835 ret = LTTNG_ERR_UST_SESS_FAIL;
a4b92340
DG
2836 goto error;
2837 }
2838
2f77fc4b
DG
2839 lus->uid = session->uid;
2840 lus->gid = session->gid;
2bba9e53 2841 lus->output_traces = session->output_traces;
27babd3a 2842 lus->snapshot_mode = session->snapshot_mode;
ecc48a90 2843 lus->live_timer_interval = session->live_timer;
2f77fc4b 2844 session->ust_session = lus;
d7ba1388 2845 if (session->shm_path[0]) {
3d071855
MD
2846 strncpy(lus->root_shm_path, session->shm_path,
2847 sizeof(lus->root_shm_path));
2848 lus->root_shm_path[sizeof(lus->root_shm_path) - 1] = '\0';
d7ba1388
MD
2849 strncpy(lus->shm_path, session->shm_path,
2850 sizeof(lus->shm_path));
2851 lus->shm_path[sizeof(lus->shm_path) - 1] = '\0';
2852 strncat(lus->shm_path, "/ust",
2853 sizeof(lus->shm_path) - strlen(lus->shm_path) - 1);
2854 }
2f77fc4b
DG
2855 /* Copy session output to the newly created UST session */
2856 ret = copy_session_consumer(domain->type, session);
f73fabfd 2857 if (ret != LTTNG_OK) {
00e2e675
DG
2858 goto error;
2859 }
2860
f73fabfd 2861 return LTTNG_OK;
00e2e675
DG
2862
2863error:
2f77fc4b
DG
2864 free(lus);
2865 session->ust_session = NULL;
00e2e675
DG
2866 return ret;
2867}
2868
2869/*
2f77fc4b 2870 * Create a kernel tracer session then create the default channel.
00e2e675 2871 */
2f77fc4b 2872static int create_kernel_session(struct ltt_session *session)
00e2e675
DG
2873{
2874 int ret;
a4b92340 2875
2f77fc4b 2876 DBG("Creating kernel session");
00e2e675 2877
2f77fc4b
DG
2878 ret = kernel_create_session(session, kernel_tracer_fd);
2879 if (ret < 0) {
f73fabfd 2880 ret = LTTNG_ERR_KERN_SESS_FAIL;
00e2e675
DG
2881 goto error;
2882 }
2883
2f77fc4b
DG
2884 /* Code flow safety */
2885 assert(session->kernel_session);
2886
2887 /* Copy session output to the newly created Kernel session */
2888 ret = copy_session_consumer(LTTNG_DOMAIN_KERNEL, session);
f73fabfd 2889 if (ret != LTTNG_OK) {
a4b92340
DG
2890 goto error;
2891 }
2892
2f77fc4b
DG
2893 session->kernel_session->uid = session->uid;
2894 session->kernel_session->gid = session->gid;
2bba9e53 2895 session->kernel_session->output_traces = session->output_traces;
27babd3a 2896 session->kernel_session->snapshot_mode = session->snapshot_mode;
00e2e675 2897
f73fabfd 2898 return LTTNG_OK;
00e2e675 2899
2f77fc4b
DG
2900error:
2901 trace_kernel_destroy_session(session->kernel_session);
2902 session->kernel_session = NULL;
2903 return ret;
2904}
00e2e675 2905
2f77fc4b
DG
2906/*
2907 * Count number of session permitted by uid/gid.
2908 */
2909static unsigned int lttng_sessions_count(uid_t uid, gid_t gid)
2910{
2911 unsigned int i = 0;
2912 struct ltt_session *session;
07424f16 2913
2f77fc4b
DG
2914 DBG("Counting number of available session for UID %d GID %d",
2915 uid, gid);
2916 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
00e2e675 2917 /*
2f77fc4b 2918 * Only list the sessions the user can control.
00e2e675 2919 */
2f77fc4b
DG
2920 if (!session_access_ok(session, uid, gid)) {
2921 continue;
2922 }
2923 i++;
a4b92340 2924 }
2f77fc4b 2925 return i;
00e2e675
DG
2926}
2927
5c408ad8
JD
2928/*
2929 * Check if the current kernel tracer supports the session rotation feature.
2930 * Return 1 if it does, 0 otherwise.
2931 */
2932static int check_rotate_compatible(void)
2933{
2934 int ret = 1;
2935
2936 if (kernel_tracer_version.major != 2 || kernel_tracer_version.minor < 11) {
2937 DBG("Kernel tracer version is not compatible with the rotation feature");
2938 ret = 0;
2939 }
2940
2941 return ret;
2942}
2943
54d01ffb
DG
2944/*
2945 * Process the command requested by the lttng client within the command
2946 * context structure. This function make sure that the return structure (llm)
2947 * is set and ready for transmission before returning.
2948 *
2949 * Return any error encountered or 0 for success.
53a80697
MD
2950 *
2951 * "sock" is only used for special-case var. len data.
36b588ed
MD
2952 *
2953 * Should *NOT* be called with RCU read-side lock held.
54d01ffb 2954 */
53a80697
MD
2955static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
2956 int *sock_error)
54d01ffb 2957{
f73fabfd 2958 int ret = LTTNG_OK;
44d3bd01 2959 int need_tracing_session = 1;
2e09ba09 2960 int need_domain;
54d01ffb
DG
2961
2962 DBG("Processing client command %d", cmd_ctx->lsm->cmd_type);
2963
3745d315
MD
2964 assert(!rcu_read_ongoing());
2965
53a80697
MD
2966 *sock_error = 0;
2967
2e09ba09
MD
2968 switch (cmd_ctx->lsm->cmd_type) {
2969 case LTTNG_CREATE_SESSION:
27babd3a 2970 case LTTNG_CREATE_SESSION_SNAPSHOT:
ecc48a90 2971 case LTTNG_CREATE_SESSION_LIVE:
2e09ba09
MD
2972 case LTTNG_DESTROY_SESSION:
2973 case LTTNG_LIST_SESSIONS:
2974 case LTTNG_LIST_DOMAINS:
2975 case LTTNG_START_TRACE:
2976 case LTTNG_STOP_TRACE:
6d805429 2977 case LTTNG_DATA_PENDING:
da3c9ec1
DG
2978 case LTTNG_SNAPSHOT_ADD_OUTPUT:
2979 case LTTNG_SNAPSHOT_DEL_OUTPUT:
2980 case LTTNG_SNAPSHOT_LIST_OUTPUT:
2981 case LTTNG_SNAPSHOT_RECORD:
fb198a11 2982 case LTTNG_SAVE_SESSION:
d7ba1388 2983 case LTTNG_SET_SESSION_SHM_PATH:
eded6438 2984 case LTTNG_REGENERATE_METADATA:
c2561365 2985 case LTTNG_REGENERATE_STATEDUMP:
b3530820
JG
2986 case LTTNG_REGISTER_TRIGGER:
2987 case LTTNG_UNREGISTER_TRIGGER:
5c408ad8 2988 case LTTNG_ROTATE_SESSION:
d68c9a04
JD
2989 case LTTNG_ROTATION_GET_INFO:
2990 case LTTNG_SESSION_GET_CURRENT_OUTPUT:
259c2674 2991 case LTTNG_ROTATION_SET_SCHEDULE:
329f3443
JD
2992 case LTTNG_ROTATION_SCHEDULE_GET_TIMER_PERIOD:
2993 case LTTNG_ROTATION_SCHEDULE_GET_SIZE:
2e09ba09 2994 need_domain = 0;
3aace903 2995 break;
2e09ba09
MD
2996 default:
2997 need_domain = 1;
2998 }
2999
e6142f2e 3000 if (config.no_kernel && need_domain
2e09ba09 3001 && cmd_ctx->lsm->domain.type == LTTNG_DOMAIN_KERNEL) {
531d29f9 3002 if (!is_root) {
f73fabfd 3003 ret = LTTNG_ERR_NEED_ROOT_SESSIOND;
531d29f9 3004 } else {
f73fabfd 3005 ret = LTTNG_ERR_KERN_NA;
531d29f9 3006 }
4fba7219
DG
3007 goto error;
3008 }
3009
8d3113b2
DG
3010 /* Deny register consumer if we already have a spawned consumer. */
3011 if (cmd_ctx->lsm->cmd_type == LTTNG_REGISTER_CONSUMER) {
3012 pthread_mutex_lock(&kconsumer_data.pid_mutex);
3013 if (kconsumer_data.pid > 0) {
f73fabfd 3014 ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
fa317f24 3015 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
8d3113b2
DG
3016 goto error;
3017 }
3018 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
3019 }
3020
54d01ffb
DG
3021 /*
3022 * Check for command that don't needs to allocate a returned payload. We do
44d3bd01 3023 * this here so we don't have to make the call for no payload at each
54d01ffb
DG
3024 * command.
3025 */
3026 switch(cmd_ctx->lsm->cmd_type) {
3027 case LTTNG_LIST_SESSIONS:
3028 case LTTNG_LIST_TRACEPOINTS:
f37d259d 3029 case LTTNG_LIST_TRACEPOINT_FIELDS:
54d01ffb
DG
3030 case LTTNG_LIST_DOMAINS:
3031 case LTTNG_LIST_CHANNELS:
3032 case LTTNG_LIST_EVENTS:
834978fd 3033 case LTTNG_LIST_SYSCALLS:
a5dfbb9d 3034 case LTTNG_LIST_TRACKER_PIDS:
5cd0780d 3035 case LTTNG_DATA_PENDING:
5c408ad8 3036 case LTTNG_ROTATE_SESSION:
d68c9a04 3037 case LTTNG_ROTATION_GET_INFO:
329f3443
JD
3038 case LTTNG_ROTATION_SCHEDULE_GET_TIMER_PERIOD:
3039 case LTTNG_ROTATION_SCHEDULE_GET_SIZE:
54d01ffb
DG
3040 break;
3041 default:
3042 /* Setup lttng message with no payload */
6e10c9b9 3043 ret = setup_lttng_msg_no_cmd_header(cmd_ctx, NULL, 0);
54d01ffb
DG
3044 if (ret < 0) {
3045 /* This label does not try to unlock the session */
3046 goto init_setup_error;
3047 }
3048 }
3049
3050 /* Commands that DO NOT need a session. */
3051 switch (cmd_ctx->lsm->cmd_type) {
54d01ffb 3052 case LTTNG_CREATE_SESSION:
27babd3a 3053 case LTTNG_CREATE_SESSION_SNAPSHOT:
ecc48a90 3054 case LTTNG_CREATE_SESSION_LIVE:
54d01ffb
DG
3055 case LTTNG_LIST_SESSIONS:
3056 case LTTNG_LIST_TRACEPOINTS:
834978fd 3057 case LTTNG_LIST_SYSCALLS:
f37d259d 3058 case LTTNG_LIST_TRACEPOINT_FIELDS:
fb198a11 3059 case LTTNG_SAVE_SESSION:
b3530820
JG
3060 case LTTNG_REGISTER_TRIGGER:
3061 case LTTNG_UNREGISTER_TRIGGER:
44d3bd01 3062 need_tracing_session = 0;
54d01ffb
DG
3063 break;
3064 default:
3065 DBG("Getting session %s by name", cmd_ctx->lsm->session.name);
256a5576
MD
3066 /*
3067 * We keep the session list lock across _all_ commands
3068 * for now, because the per-session lock does not
3069 * handle teardown properly.
3070 */
74babd95 3071 session_lock_list();
54d01ffb
DG
3072 cmd_ctx->session = session_find_by_name(cmd_ctx->lsm->session.name);
3073 if (cmd_ctx->session == NULL) {
bba2d65f 3074 ret = LTTNG_ERR_SESS_NOT_FOUND;
54d01ffb
DG
3075 goto error;
3076 } else {
3077 /* Acquire lock for the session */
3078 session_lock(cmd_ctx->session);