Launch the client management thread using lttng_thread
[lttng-tools.git] / src / bin / lttng-sessiond / client.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * 2013 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20 #include <stddef.h>
21 #include <pthread.h>
22 #include <signal.h>
23 #include <sys/stat.h>
24 #include <common/compat/getenv.h>
25 #include <common/unix.h>
26 #include <common/utils.h>
27 #include <lttng/userspace-probe-internal.h>
28 #include <lttng/event-internal.h>
29
30 #include "client.h"
31 #include "lttng-sessiond.h"
32 #include "cmd.h"
33 #include "kernel.h"
34 #include "save.h"
35 #include "health-sessiond.h"
36 #include "testpoint.h"
37 #include "utils.h"
38
39 static bool is_root;
40
41 static struct thread_state {
42 pthread_cond_t cond;
43 pthread_mutex_t lock;
44 bool is_running;
45 } thread_state = {
46 .cond = PTHREAD_COND_INITIALIZER,
47 .lock = PTHREAD_MUTEX_INITIALIZER,
48 .is_running = false
49 };
50
51 void set_thread_state_running(void)
52 {
53 pthread_mutex_lock(&thread_state.lock);
54 thread_state.is_running = true;
55 pthread_cond_broadcast(&thread_state.cond);
56 pthread_mutex_unlock(&thread_state.lock);
57 }
58
59 static void wait_thread_state_running(void)
60 {
61 pthread_mutex_lock(&thread_state.lock);
62 while (!thread_state.is_running) {
63 pthread_cond_wait(&thread_state.cond,
64 &thread_state.lock);
65 }
66 pthread_mutex_unlock(&thread_state.lock);
67 }
68
69 /*
70 * Setup the outgoing data buffer for the response (llm) by allocating the
71 * right amount of memory and copying the original information from the lsm
72 * structure.
73 *
74 * Return 0 on success, negative value on error.
75 */
76 static int setup_lttng_msg(struct command_ctx *cmd_ctx,
77 const void *payload_buf, size_t payload_len,
78 const void *cmd_header_buf, size_t cmd_header_len)
79 {
80 int ret = 0;
81 const size_t header_len = sizeof(struct lttcomm_lttng_msg);
82 const size_t cmd_header_offset = header_len;
83 const size_t payload_offset = cmd_header_offset + cmd_header_len;
84 const size_t total_msg_size = header_len + cmd_header_len + payload_len;
85
86 cmd_ctx->llm = zmalloc(total_msg_size);
87
88 if (cmd_ctx->llm == NULL) {
89 PERROR("zmalloc");
90 ret = -ENOMEM;
91 goto end;
92 }
93
94 /* Copy common data */
95 cmd_ctx->llm->cmd_type = cmd_ctx->lsm->cmd_type;
96 cmd_ctx->llm->pid = cmd_ctx->lsm->domain.attr.pid;
97 cmd_ctx->llm->cmd_header_size = cmd_header_len;
98 cmd_ctx->llm->data_size = payload_len;
99 cmd_ctx->lttng_msg_size = total_msg_size;
100
101 /* Copy command header */
102 if (cmd_header_len) {
103 memcpy(((uint8_t *) cmd_ctx->llm) + cmd_header_offset, cmd_header_buf,
104 cmd_header_len);
105 }
106
107 /* Copy payload */
108 if (payload_len) {
109 memcpy(((uint8_t *) cmd_ctx->llm) + payload_offset, payload_buf,
110 payload_len);
111 }
112
113 end:
114 return ret;
115 }
116
117 /*
118 * Start the thread_manage_consumer. This must be done after a lttng-consumerd
119 * exec or it will fails.
120 */
121 static int spawn_consumer_thread(struct consumer_data *consumer_data)
122 {
123 int ret, clock_ret;
124 struct timespec timeout;
125
126 /*
127 * Make sure we set the readiness flag to 0 because we are NOT ready.
128 * This access to consumer_thread_is_ready does not need to be
129 * protected by consumer_data.cond_mutex (yet) since the consumer
130 * management thread has not been started at this point.
131 */
132 consumer_data->consumer_thread_is_ready = 0;
133
134 /* Setup pthread condition */
135 ret = pthread_condattr_init(&consumer_data->condattr);
136 if (ret) {
137 errno = ret;
138 PERROR("pthread_condattr_init consumer data");
139 goto error;
140 }
141
142 /*
143 * Set the monotonic clock in order to make sure we DO NOT jump in time
144 * between the clock_gettime() call and the timedwait call. See bug #324
145 * for a more details and how we noticed it.
146 */
147 ret = pthread_condattr_setclock(&consumer_data->condattr, CLOCK_MONOTONIC);
148 if (ret) {
149 errno = ret;
150 PERROR("pthread_condattr_setclock consumer data");
151 goto error;
152 }
153
154 ret = pthread_cond_init(&consumer_data->cond, &consumer_data->condattr);
155 if (ret) {
156 errno = ret;
157 PERROR("pthread_cond_init consumer data");
158 goto error;
159 }
160
161 ret = pthread_create(&consumer_data->thread, default_pthread_attr(),
162 thread_manage_consumer, consumer_data);
163 if (ret) {
164 errno = ret;
165 PERROR("pthread_create consumer");
166 ret = -1;
167 goto error;
168 }
169
170 /* We are about to wait on a pthread condition */
171 pthread_mutex_lock(&consumer_data->cond_mutex);
172
173 /* Get time for sem_timedwait absolute timeout */
174 clock_ret = lttng_clock_gettime(CLOCK_MONOTONIC, &timeout);
175 /*
176 * Set the timeout for the condition timed wait even if the clock gettime
177 * call fails since we might loop on that call and we want to avoid to
178 * increment the timeout too many times.
179 */
180 timeout.tv_sec += DEFAULT_SEM_WAIT_TIMEOUT;
181
182 /*
183 * The following loop COULD be skipped in some conditions so this is why we
184 * set ret to 0 in order to make sure at least one round of the loop is
185 * done.
186 */
187 ret = 0;
188
189 /*
190 * Loop until the condition is reached or when a timeout is reached. Note
191 * that the pthread_cond_timedwait(P) man page specifies that EINTR can NOT
192 * be returned but the pthread_cond(3), from the glibc-doc, says that it is
193 * possible. This loop does not take any chances and works with both of
194 * them.
195 */
196 while (!consumer_data->consumer_thread_is_ready && ret != ETIMEDOUT) {
197 if (clock_ret < 0) {
198 PERROR("clock_gettime spawn consumer");
199 /* Infinite wait for the consumerd thread to be ready */
200 ret = pthread_cond_wait(&consumer_data->cond,
201 &consumer_data->cond_mutex);
202 } else {
203 ret = pthread_cond_timedwait(&consumer_data->cond,
204 &consumer_data->cond_mutex, &timeout);
205 }
206 }
207
208 /* Release the pthread condition */
209 pthread_mutex_unlock(&consumer_data->cond_mutex);
210
211 if (ret != 0) {
212 errno = ret;
213 if (ret == ETIMEDOUT) {
214 int pth_ret;
215
216 /*
217 * Call has timed out so we kill the kconsumerd_thread and return
218 * an error.
219 */
220 ERR("Condition timed out. The consumer thread was never ready."
221 " Killing it");
222 pth_ret = pthread_cancel(consumer_data->thread);
223 if (pth_ret < 0) {
224 PERROR("pthread_cancel consumer thread");
225 }
226 } else {
227 PERROR("pthread_cond_wait failed consumer thread");
228 }
229 /* Caller is expecting a negative value on failure. */
230 ret = -1;
231 goto error;
232 }
233
234 pthread_mutex_lock(&consumer_data->pid_mutex);
235 if (consumer_data->pid == 0) {
236 ERR("Consumerd did not start");
237 pthread_mutex_unlock(&consumer_data->pid_mutex);
238 goto error;
239 }
240 pthread_mutex_unlock(&consumer_data->pid_mutex);
241
242 return 0;
243
244 error:
245 return ret;
246 }
247
248 /*
249 * Fork and exec a consumer daemon (consumerd).
250 *
251 * Return pid if successful else -1.
252 */
253 static pid_t spawn_consumerd(struct consumer_data *consumer_data)
254 {
255 int ret;
256 pid_t pid;
257 const char *consumer_to_use;
258 const char *verbosity;
259 struct stat st;
260
261 DBG("Spawning consumerd");
262
263 pid = fork();
264 if (pid == 0) {
265 /*
266 * Exec consumerd.
267 */
268 if (config.verbose_consumer) {
269 verbosity = "--verbose";
270 } else if (lttng_opt_quiet) {
271 verbosity = "--quiet";
272 } else {
273 verbosity = "";
274 }
275
276 switch (consumer_data->type) {
277 case LTTNG_CONSUMER_KERNEL:
278 /*
279 * Find out which consumerd to execute. We will first try the
280 * 64-bit path, then the sessiond's installation directory, and
281 * fallback on the 32-bit one,
282 */
283 DBG3("Looking for a kernel consumer at these locations:");
284 DBG3(" 1) %s", config.consumerd64_bin_path.value ? : "NULL");
285 DBG3(" 2) %s/%s", INSTALL_BIN_PATH, DEFAULT_CONSUMERD_FILE);
286 DBG3(" 3) %s", config.consumerd32_bin_path.value ? : "NULL");
287 if (stat(config.consumerd64_bin_path.value, &st) == 0) {
288 DBG3("Found location #1");
289 consumer_to_use = config.consumerd64_bin_path.value;
290 } else if (stat(INSTALL_BIN_PATH "/" DEFAULT_CONSUMERD_FILE, &st) == 0) {
291 DBG3("Found location #2");
292 consumer_to_use = INSTALL_BIN_PATH "/" DEFAULT_CONSUMERD_FILE;
293 } else if (config.consumerd32_bin_path.value &&
294 stat(config.consumerd32_bin_path.value, &st) == 0) {
295 DBG3("Found location #3");
296 consumer_to_use = config.consumerd32_bin_path.value;
297 } else {
298 DBG("Could not find any valid consumerd executable");
299 ret = -EINVAL;
300 goto error;
301 }
302 DBG("Using kernel consumer at: %s", consumer_to_use);
303 (void) execl(consumer_to_use,
304 "lttng-consumerd", verbosity, "-k",
305 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
306 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
307 "--group", config.tracing_group_name.value,
308 NULL);
309 break;
310 case LTTNG_CONSUMER64_UST:
311 {
312 if (config.consumerd64_lib_dir.value) {
313 char *tmp;
314 size_t tmplen;
315 char *tmpnew;
316
317 tmp = lttng_secure_getenv("LD_LIBRARY_PATH");
318 if (!tmp) {
319 tmp = "";
320 }
321 tmplen = strlen(config.consumerd64_lib_dir.value) + 1 /* : */ + strlen(tmp);
322 tmpnew = zmalloc(tmplen + 1 /* \0 */);
323 if (!tmpnew) {
324 ret = -ENOMEM;
325 goto error;
326 }
327 strcat(tmpnew, config.consumerd64_lib_dir.value);
328 if (tmp[0] != '\0') {
329 strcat(tmpnew, ":");
330 strcat(tmpnew, tmp);
331 }
332 ret = setenv("LD_LIBRARY_PATH", tmpnew, 1);
333 free(tmpnew);
334 if (ret) {
335 ret = -errno;
336 goto error;
337 }
338 }
339 DBG("Using 64-bit UST consumer at: %s", config.consumerd64_bin_path.value);
340 (void) execl(config.consumerd64_bin_path.value, "lttng-consumerd", verbosity, "-u",
341 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
342 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
343 "--group", config.tracing_group_name.value,
344 NULL);
345 break;
346 }
347 case LTTNG_CONSUMER32_UST:
348 {
349 if (config.consumerd32_lib_dir.value) {
350 char *tmp;
351 size_t tmplen;
352 char *tmpnew;
353
354 tmp = lttng_secure_getenv("LD_LIBRARY_PATH");
355 if (!tmp) {
356 tmp = "";
357 }
358 tmplen = strlen(config.consumerd32_lib_dir.value) + 1 /* : */ + strlen(tmp);
359 tmpnew = zmalloc(tmplen + 1 /* \0 */);
360 if (!tmpnew) {
361 ret = -ENOMEM;
362 goto error;
363 }
364 strcat(tmpnew, config.consumerd32_lib_dir.value);
365 if (tmp[0] != '\0') {
366 strcat(tmpnew, ":");
367 strcat(tmpnew, tmp);
368 }
369 ret = setenv("LD_LIBRARY_PATH", tmpnew, 1);
370 free(tmpnew);
371 if (ret) {
372 ret = -errno;
373 goto error;
374 }
375 }
376 DBG("Using 32-bit UST consumer at: %s", config.consumerd32_bin_path.value);
377 (void) execl(config.consumerd32_bin_path.value, "lttng-consumerd", verbosity, "-u",
378 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
379 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
380 "--group", config.tracing_group_name.value,
381 NULL);
382 break;
383 }
384 default:
385 ERR("unknown consumer type");
386 errno = 0;
387 }
388 if (errno != 0) {
389 PERROR("Consumer execl()");
390 }
391 /* Reaching this point, we got a failure on our execl(). */
392 exit(EXIT_FAILURE);
393 } else if (pid > 0) {
394 ret = pid;
395 } else {
396 PERROR("start consumer fork");
397 ret = -errno;
398 }
399 error:
400 return ret;
401 }
402
403 /*
404 * Spawn the consumerd daemon and session daemon thread.
405 */
406 static int start_consumerd(struct consumer_data *consumer_data)
407 {
408 int ret;
409
410 /*
411 * Set the listen() state on the socket since there is a possible race
412 * between the exec() of the consumer daemon and this call if place in the
413 * consumer thread. See bug #366 for more details.
414 */
415 ret = lttcomm_listen_unix_sock(consumer_data->err_sock);
416 if (ret < 0) {
417 goto error;
418 }
419
420 pthread_mutex_lock(&consumer_data->pid_mutex);
421 if (consumer_data->pid != 0) {
422 pthread_mutex_unlock(&consumer_data->pid_mutex);
423 goto end;
424 }
425
426 ret = spawn_consumerd(consumer_data);
427 if (ret < 0) {
428 ERR("Spawning consumerd failed");
429 pthread_mutex_unlock(&consumer_data->pid_mutex);
430 goto error;
431 }
432
433 /* Setting up the consumer_data pid */
434 consumer_data->pid = ret;
435 DBG2("Consumer pid %d", consumer_data->pid);
436 pthread_mutex_unlock(&consumer_data->pid_mutex);
437
438 DBG2("Spawning consumer control thread");
439 ret = spawn_consumer_thread(consumer_data);
440 if (ret < 0) {
441 ERR("Fatal error spawning consumer control thread");
442 goto error;
443 }
444
445 end:
446 return 0;
447
448 error:
449 /* Cleanup already created sockets on error. */
450 if (consumer_data->err_sock >= 0) {
451 int err;
452
453 err = close(consumer_data->err_sock);
454 if (err < 0) {
455 PERROR("close consumer data error socket");
456 }
457 }
458 return ret;
459 }
460
461 /*
462 * Copy consumer output from the tracing session to the domain session. The
463 * function also applies the right modification on a per domain basis for the
464 * trace files destination directory.
465 *
466 * Should *NOT* be called with RCU read-side lock held.
467 */
468 static int copy_session_consumer(int domain, struct ltt_session *session)
469 {
470 int ret;
471 const char *dir_name;
472 struct consumer_output *consumer;
473
474 assert(session);
475 assert(session->consumer);
476
477 switch (domain) {
478 case LTTNG_DOMAIN_KERNEL:
479 DBG3("Copying tracing session consumer output in kernel session");
480 /*
481 * XXX: We should audit the session creation and what this function
482 * does "extra" in order to avoid a destroy since this function is used
483 * in the domain session creation (kernel and ust) only. Same for UST
484 * domain.
485 */
486 if (session->kernel_session->consumer) {
487 consumer_output_put(session->kernel_session->consumer);
488 }
489 session->kernel_session->consumer =
490 consumer_copy_output(session->consumer);
491 /* Ease our life a bit for the next part */
492 consumer = session->kernel_session->consumer;
493 dir_name = DEFAULT_KERNEL_TRACE_DIR;
494 break;
495 case LTTNG_DOMAIN_JUL:
496 case LTTNG_DOMAIN_LOG4J:
497 case LTTNG_DOMAIN_PYTHON:
498 case LTTNG_DOMAIN_UST:
499 DBG3("Copying tracing session consumer output in UST session");
500 if (session->ust_session->consumer) {
501 consumer_output_put(session->ust_session->consumer);
502 }
503 session->ust_session->consumer =
504 consumer_copy_output(session->consumer);
505 /* Ease our life a bit for the next part */
506 consumer = session->ust_session->consumer;
507 dir_name = DEFAULT_UST_TRACE_DIR;
508 break;
509 default:
510 ret = LTTNG_ERR_UNKNOWN_DOMAIN;
511 goto error;
512 }
513
514 /* Append correct directory to subdir */
515 strncat(consumer->subdir, dir_name,
516 sizeof(consumer->subdir) - strlen(consumer->subdir) - 1);
517 DBG3("Copy session consumer subdir %s", consumer->subdir);
518
519 ret = LTTNG_OK;
520
521 error:
522 return ret;
523 }
524
525 /*
526 * Create an UST session and add it to the session ust list.
527 *
528 * Should *NOT* be called with RCU read-side lock held.
529 */
530 static int create_ust_session(struct ltt_session *session,
531 struct lttng_domain *domain)
532 {
533 int ret;
534 struct ltt_ust_session *lus = NULL;
535
536 assert(session);
537 assert(domain);
538 assert(session->consumer);
539
540 switch (domain->type) {
541 case LTTNG_DOMAIN_JUL:
542 case LTTNG_DOMAIN_LOG4J:
543 case LTTNG_DOMAIN_PYTHON:
544 case LTTNG_DOMAIN_UST:
545 break;
546 default:
547 ERR("Unknown UST domain on create session %d", domain->type);
548 ret = LTTNG_ERR_UNKNOWN_DOMAIN;
549 goto error;
550 }
551
552 DBG("Creating UST session");
553
554 lus = trace_ust_create_session(session->id);
555 if (lus == NULL) {
556 ret = LTTNG_ERR_UST_SESS_FAIL;
557 goto error;
558 }
559
560 lus->uid = session->uid;
561 lus->gid = session->gid;
562 lus->output_traces = session->output_traces;
563 lus->snapshot_mode = session->snapshot_mode;
564 lus->live_timer_interval = session->live_timer;
565 session->ust_session = lus;
566 if (session->shm_path[0]) {
567 strncpy(lus->root_shm_path, session->shm_path,
568 sizeof(lus->root_shm_path));
569 lus->root_shm_path[sizeof(lus->root_shm_path) - 1] = '\0';
570 strncpy(lus->shm_path, session->shm_path,
571 sizeof(lus->shm_path));
572 lus->shm_path[sizeof(lus->shm_path) - 1] = '\0';
573 strncat(lus->shm_path, "/ust",
574 sizeof(lus->shm_path) - strlen(lus->shm_path) - 1);
575 }
576 /* Copy session output to the newly created UST session */
577 ret = copy_session_consumer(domain->type, session);
578 if (ret != LTTNG_OK) {
579 goto error;
580 }
581
582 return LTTNG_OK;
583
584 error:
585 free(lus);
586 session->ust_session = NULL;
587 return ret;
588 }
589
590 /*
591 * Create a kernel tracer session then create the default channel.
592 */
593 static int create_kernel_session(struct ltt_session *session)
594 {
595 int ret;
596
597 DBG("Creating kernel session");
598
599 ret = kernel_create_session(session, kernel_tracer_fd);
600 if (ret < 0) {
601 ret = LTTNG_ERR_KERN_SESS_FAIL;
602 goto error;
603 }
604
605 /* Code flow safety */
606 assert(session->kernel_session);
607
608 /* Copy session output to the newly created Kernel session */
609 ret = copy_session_consumer(LTTNG_DOMAIN_KERNEL, session);
610 if (ret != LTTNG_OK) {
611 goto error;
612 }
613
614 session->kernel_session->uid = session->uid;
615 session->kernel_session->gid = session->gid;
616 session->kernel_session->output_traces = session->output_traces;
617 session->kernel_session->snapshot_mode = session->snapshot_mode;
618
619 return LTTNG_OK;
620
621 error:
622 trace_kernel_destroy_session(session->kernel_session);
623 session->kernel_session = NULL;
624 return ret;
625 }
626
627 /*
628 * Count number of session permitted by uid/gid.
629 */
630 static unsigned int lttng_sessions_count(uid_t uid, gid_t gid)
631 {
632 unsigned int i = 0;
633 struct ltt_session *session;
634 const struct ltt_session_list *session_list = session_get_list();
635
636 DBG("Counting number of available session for UID %d GID %d",
637 uid, gid);
638 cds_list_for_each_entry(session, &session_list->head, list) {
639 if (!session_get(session)) {
640 continue;
641 }
642 session_lock(session);
643 /* Only count the sessions the user can control. */
644 if (session_access_ok(session, uid, gid) &&
645 !session->destroyed) {
646 i++;
647 }
648 session_unlock(session);
649 session_put(session);
650 }
651 return i;
652 }
653
654 static int receive_userspace_probe(struct command_ctx *cmd_ctx, int sock,
655 int *sock_error, struct lttng_event *event)
656 {
657 int fd, ret;
658 struct lttng_userspace_probe_location *probe_location;
659 const struct lttng_userspace_probe_location_lookup_method *lookup = NULL;
660 struct lttng_dynamic_buffer probe_location_buffer;
661 struct lttng_buffer_view buffer_view;
662
663 /*
664 * Create a buffer to store the serialized version of the probe
665 * location.
666 */
667 lttng_dynamic_buffer_init(&probe_location_buffer);
668 ret = lttng_dynamic_buffer_set_size(&probe_location_buffer,
669 cmd_ctx->lsm->u.enable.userspace_probe_location_len);
670 if (ret) {
671 ret = LTTNG_ERR_NOMEM;
672 goto error;
673 }
674
675 /*
676 * Receive the probe location.
677 */
678 ret = lttcomm_recv_unix_sock(sock, probe_location_buffer.data,
679 probe_location_buffer.size);
680 if (ret <= 0) {
681 DBG("Nothing recv() from client var len data... continuing");
682 *sock_error = 1;
683 lttng_dynamic_buffer_reset(&probe_location_buffer);
684 ret = LTTNG_ERR_PROBE_LOCATION_INVAL;
685 goto error;
686 }
687
688 buffer_view = lttng_buffer_view_from_dynamic_buffer(
689 &probe_location_buffer, 0, probe_location_buffer.size);
690
691 /*
692 * Extract the probe location from the serialized version.
693 */
694 ret = lttng_userspace_probe_location_create_from_buffer(
695 &buffer_view, &probe_location);
696 if (ret < 0) {
697 WARN("Failed to create a userspace probe location from the received buffer");
698 lttng_dynamic_buffer_reset( &probe_location_buffer);
699 ret = LTTNG_ERR_PROBE_LOCATION_INVAL;
700 goto error;
701 }
702
703 /*
704 * Receive the file descriptor to the target binary from the client.
705 */
706 DBG("Receiving userspace probe target FD from client ...");
707 ret = lttcomm_recv_fds_unix_sock(sock, &fd, 1);
708 if (ret <= 0) {
709 DBG("Nothing recv() from client userspace probe fd... continuing");
710 *sock_error = 1;
711 ret = LTTNG_ERR_PROBE_LOCATION_INVAL;
712 goto error;
713 }
714
715 /*
716 * Set the file descriptor received from the client through the unix
717 * socket in the probe location.
718 */
719 lookup = lttng_userspace_probe_location_get_lookup_method(probe_location);
720 if (!lookup) {
721 ret = LTTNG_ERR_PROBE_LOCATION_INVAL;
722 goto error;
723 }
724
725 /*
726 * From the kernel tracer's perspective, all userspace probe event types
727 * are all the same: a file and an offset.
728 */
729 switch (lttng_userspace_probe_location_lookup_method_get_type(lookup)) {
730 case LTTNG_USERSPACE_PROBE_LOCATION_LOOKUP_METHOD_TYPE_FUNCTION_ELF:
731 ret = lttng_userspace_probe_location_function_set_binary_fd(
732 probe_location, fd);
733 break;
734 case LTTNG_USERSPACE_PROBE_LOCATION_LOOKUP_METHOD_TYPE_TRACEPOINT_SDT:
735 ret = lttng_userspace_probe_location_tracepoint_set_binary_fd(
736 probe_location, fd);
737 break;
738 default:
739 ret = LTTNG_ERR_PROBE_LOCATION_INVAL;
740 goto error;
741 }
742
743 if (ret) {
744 ret = LTTNG_ERR_PROBE_LOCATION_INVAL;
745 goto error;
746 }
747
748 /* Attach the probe location to the event. */
749 ret = lttng_event_set_userspace_probe_location(event, probe_location);
750 if (ret) {
751 ret = LTTNG_ERR_PROBE_LOCATION_INVAL;
752 goto error;
753 }
754
755 lttng_dynamic_buffer_reset(&probe_location_buffer);
756 error:
757 return ret;
758 }
759
760 /*
761 * Join consumer thread
762 */
763 static int join_consumer_thread(struct consumer_data *consumer_data)
764 {
765 void *status;
766
767 /* Consumer pid must be a real one. */
768 if (consumer_data->pid > 0) {
769 int ret;
770 ret = kill(consumer_data->pid, SIGTERM);
771 if (ret) {
772 PERROR("Error killing consumer daemon");
773 return ret;
774 }
775 return pthread_join(consumer_data->thread, &status);
776 } else {
777 return 0;
778 }
779 }
780
781 /*
782 * Version of setup_lttng_msg() without command header.
783 */
784 static int setup_lttng_msg_no_cmd_header(struct command_ctx *cmd_ctx,
785 void *payload_buf, size_t payload_len)
786 {
787 return setup_lttng_msg(cmd_ctx, payload_buf, payload_len, NULL, 0);
788 }
789
790 /*
791 * Free memory of a command context structure.
792 */
793 static void clean_command_ctx(struct command_ctx **cmd_ctx)
794 {
795 DBG("Clean command context structure");
796 if (*cmd_ctx) {
797 if ((*cmd_ctx)->llm) {
798 free((*cmd_ctx)->llm);
799 }
800 if ((*cmd_ctx)->lsm) {
801 free((*cmd_ctx)->lsm);
802 }
803 free(*cmd_ctx);
804 *cmd_ctx = NULL;
805 }
806 }
807
808 /*
809 * Check if the current kernel tracer supports the session rotation feature.
810 * Return 1 if it does, 0 otherwise.
811 */
812 static int check_rotate_compatible(void)
813 {
814 int ret = 1;
815
816 if (kernel_tracer_version.major != 2 || kernel_tracer_version.minor < 11) {
817 DBG("Kernel tracer version is not compatible with the rotation feature");
818 ret = 0;
819 }
820
821 return ret;
822 }
823
824 /*
825 * Send data on a unix socket using the liblttsessiondcomm API.
826 *
827 * Return lttcomm error code.
828 */
829 static int send_unix_sock(int sock, void *buf, size_t len)
830 {
831 /* Check valid length */
832 if (len == 0) {
833 return -1;
834 }
835
836 return lttcomm_send_unix_sock(sock, buf, len);
837 }
838
839 /*
840 * Process the command requested by the lttng client within the command
841 * context structure. This function make sure that the return structure (llm)
842 * is set and ready for transmission before returning.
843 *
844 * Return any error encountered or 0 for success.
845 *
846 * "sock" is only used for special-case var. len data.
847 *
848 * Should *NOT* be called with RCU read-side lock held.
849 */
850 static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
851 int *sock_error)
852 {
853 int ret = LTTNG_OK;
854 int need_tracing_session = 1;
855 int need_domain;
856
857 DBG("Processing client command %d", cmd_ctx->lsm->cmd_type);
858
859 assert(!rcu_read_ongoing());
860
861 *sock_error = 0;
862
863 switch (cmd_ctx->lsm->cmd_type) {
864 case LTTNG_CREATE_SESSION:
865 case LTTNG_CREATE_SESSION_SNAPSHOT:
866 case LTTNG_CREATE_SESSION_LIVE:
867 case LTTNG_DESTROY_SESSION:
868 case LTTNG_LIST_SESSIONS:
869 case LTTNG_LIST_DOMAINS:
870 case LTTNG_START_TRACE:
871 case LTTNG_STOP_TRACE:
872 case LTTNG_DATA_PENDING:
873 case LTTNG_SNAPSHOT_ADD_OUTPUT:
874 case LTTNG_SNAPSHOT_DEL_OUTPUT:
875 case LTTNG_SNAPSHOT_LIST_OUTPUT:
876 case LTTNG_SNAPSHOT_RECORD:
877 case LTTNG_SAVE_SESSION:
878 case LTTNG_SET_SESSION_SHM_PATH:
879 case LTTNG_REGENERATE_METADATA:
880 case LTTNG_REGENERATE_STATEDUMP:
881 case LTTNG_REGISTER_TRIGGER:
882 case LTTNG_UNREGISTER_TRIGGER:
883 case LTTNG_ROTATE_SESSION:
884 case LTTNG_ROTATION_GET_INFO:
885 case LTTNG_ROTATION_SET_SCHEDULE:
886 case LTTNG_SESSION_LIST_ROTATION_SCHEDULES:
887 need_domain = 0;
888 break;
889 default:
890 need_domain = 1;
891 }
892
893 if (config.no_kernel && need_domain
894 && cmd_ctx->lsm->domain.type == LTTNG_DOMAIN_KERNEL) {
895 if (!is_root) {
896 ret = LTTNG_ERR_NEED_ROOT_SESSIOND;
897 } else {
898 ret = LTTNG_ERR_KERN_NA;
899 }
900 goto error;
901 }
902
903 /* Deny register consumer if we already have a spawned consumer. */
904 if (cmd_ctx->lsm->cmd_type == LTTNG_REGISTER_CONSUMER) {
905 pthread_mutex_lock(&kconsumer_data.pid_mutex);
906 if (kconsumer_data.pid > 0) {
907 ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
908 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
909 goto error;
910 }
911 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
912 }
913
914 /*
915 * Check for command that don't needs to allocate a returned payload. We do
916 * this here so we don't have to make the call for no payload at each
917 * command.
918 */
919 switch(cmd_ctx->lsm->cmd_type) {
920 case LTTNG_LIST_SESSIONS:
921 case LTTNG_LIST_TRACEPOINTS:
922 case LTTNG_LIST_TRACEPOINT_FIELDS:
923 case LTTNG_LIST_DOMAINS:
924 case LTTNG_LIST_CHANNELS:
925 case LTTNG_LIST_EVENTS:
926 case LTTNG_LIST_SYSCALLS:
927 case LTTNG_LIST_TRACKER_PIDS:
928 case LTTNG_DATA_PENDING:
929 case LTTNG_ROTATE_SESSION:
930 case LTTNG_ROTATION_GET_INFO:
931 case LTTNG_SESSION_LIST_ROTATION_SCHEDULES:
932 break;
933 default:
934 /* Setup lttng message with no payload */
935 ret = setup_lttng_msg_no_cmd_header(cmd_ctx, NULL, 0);
936 if (ret < 0) {
937 /* This label does not try to unlock the session */
938 goto init_setup_error;
939 }
940 }
941
942 /* Commands that DO NOT need a session. */
943 switch (cmd_ctx->lsm->cmd_type) {
944 case LTTNG_CREATE_SESSION:
945 case LTTNG_CREATE_SESSION_SNAPSHOT:
946 case LTTNG_CREATE_SESSION_LIVE:
947 case LTTNG_LIST_SESSIONS:
948 case LTTNG_LIST_TRACEPOINTS:
949 case LTTNG_LIST_SYSCALLS:
950 case LTTNG_LIST_TRACEPOINT_FIELDS:
951 case LTTNG_SAVE_SESSION:
952 case LTTNG_REGISTER_TRIGGER:
953 case LTTNG_UNREGISTER_TRIGGER:
954 need_tracing_session = 0;
955 break;
956 default:
957 DBG("Getting session %s by name", cmd_ctx->lsm->session.name);
958 /*
959 * We keep the session list lock across _all_ commands
960 * for now, because the per-session lock does not
961 * handle teardown properly.
962 */
963 session_lock_list();
964 cmd_ctx->session = session_find_by_name(cmd_ctx->lsm->session.name);
965 if (cmd_ctx->session == NULL) {
966 ret = LTTNG_ERR_SESS_NOT_FOUND;
967 goto error;
968 } else {
969 /* Acquire lock for the session */
970 session_lock(cmd_ctx->session);
971 }
972 break;
973 }
974
975 /*
976 * Commands that need a valid session but should NOT create one if none
977 * exists. Instead of creating one and destroying it when the command is
978 * handled, process that right before so we save some round trip in useless
979 * code path.
980 */
981 switch (cmd_ctx->lsm->cmd_type) {
982 case LTTNG_DISABLE_CHANNEL:
983 case LTTNG_DISABLE_EVENT:
984 switch (cmd_ctx->lsm->domain.type) {
985 case LTTNG_DOMAIN_KERNEL:
986 if (!cmd_ctx->session->kernel_session) {
987 ret = LTTNG_ERR_NO_CHANNEL;
988 goto error;
989 }
990 break;
991 case LTTNG_DOMAIN_JUL:
992 case LTTNG_DOMAIN_LOG4J:
993 case LTTNG_DOMAIN_PYTHON:
994 case LTTNG_DOMAIN_UST:
995 if (!cmd_ctx->session->ust_session) {
996 ret = LTTNG_ERR_NO_CHANNEL;
997 goto error;
998 }
999 break;
1000 default:
1001 ret = LTTNG_ERR_UNKNOWN_DOMAIN;
1002 goto error;
1003 }
1004 default:
1005 break;
1006 }
1007
1008 if (!need_domain) {
1009 goto skip_domain;
1010 }
1011
1012 /*
1013 * Check domain type for specific "pre-action".
1014 */
1015 switch (cmd_ctx->lsm->domain.type) {
1016 case LTTNG_DOMAIN_KERNEL:
1017 if (!is_root) {
1018 ret = LTTNG_ERR_NEED_ROOT_SESSIOND;
1019 goto error;
1020 }
1021
1022 /* Consumer is in an ERROR state. Report back to client */
1023 if (uatomic_read(&kernel_consumerd_state) == CONSUMER_ERROR) {
1024 ret = LTTNG_ERR_NO_KERNCONSUMERD;
1025 goto error;
1026 }
1027
1028 /* Need a session for kernel command */
1029 if (need_tracing_session) {
1030 if (cmd_ctx->session->kernel_session == NULL) {
1031 ret = create_kernel_session(cmd_ctx->session);
1032 if (ret < 0) {
1033 ret = LTTNG_ERR_KERN_SESS_FAIL;
1034 goto error;
1035 }
1036 }
1037
1038 /* Start the kernel consumer daemon */
1039 pthread_mutex_lock(&kconsumer_data.pid_mutex);
1040 if (kconsumer_data.pid == 0 &&
1041 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
1042 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
1043 ret = start_consumerd(&kconsumer_data);
1044 if (ret < 0) {
1045 ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
1046 goto error;
1047 }
1048 uatomic_set(&kernel_consumerd_state, CONSUMER_STARTED);
1049 } else {
1050 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
1051 }
1052
1053 /*
1054 * The consumer was just spawned so we need to add the socket to
1055 * the consumer output of the session if exist.
1056 */
1057 ret = consumer_create_socket(&kconsumer_data,
1058 cmd_ctx->session->kernel_session->consumer);
1059 if (ret < 0) {
1060 goto error;
1061 }
1062 }
1063
1064 break;
1065 case LTTNG_DOMAIN_JUL:
1066 case LTTNG_DOMAIN_LOG4J:
1067 case LTTNG_DOMAIN_PYTHON:
1068 case LTTNG_DOMAIN_UST:
1069 {
1070 if (!ust_app_supported()) {
1071 ret = LTTNG_ERR_NO_UST;
1072 goto error;
1073 }
1074 /* Consumer is in an ERROR state. Report back to client */
1075 if (uatomic_read(&ust_consumerd_state) == CONSUMER_ERROR) {
1076 ret = LTTNG_ERR_NO_USTCONSUMERD;
1077 goto error;
1078 }
1079
1080 if (need_tracing_session) {
1081 /* Create UST session if none exist. */
1082 if (cmd_ctx->session->ust_session == NULL) {
1083 ret = create_ust_session(cmd_ctx->session,
1084 &cmd_ctx->lsm->domain);
1085 if (ret != LTTNG_OK) {
1086 goto error;
1087 }
1088 }
1089
1090 /* Start the UST consumer daemons */
1091 /* 64-bit */
1092 pthread_mutex_lock(&ustconsumer64_data.pid_mutex);
1093 if (config.consumerd64_bin_path.value &&
1094 ustconsumer64_data.pid == 0 &&
1095 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
1096 pthread_mutex_unlock(&ustconsumer64_data.pid_mutex);
1097 ret = start_consumerd(&ustconsumer64_data);
1098 if (ret < 0) {
1099 ret = LTTNG_ERR_UST_CONSUMER64_FAIL;
1100 uatomic_set(&ust_consumerd64_fd, -EINVAL);
1101 goto error;
1102 }
1103
1104 uatomic_set(&ust_consumerd64_fd, ustconsumer64_data.cmd_sock);
1105 uatomic_set(&ust_consumerd_state, CONSUMER_STARTED);
1106 } else {
1107 pthread_mutex_unlock(&ustconsumer64_data.pid_mutex);
1108 }
1109
1110 /*
1111 * Setup socket for consumer 64 bit. No need for atomic access
1112 * since it was set above and can ONLY be set in this thread.
1113 */
1114 ret = consumer_create_socket(&ustconsumer64_data,
1115 cmd_ctx->session->ust_session->consumer);
1116 if (ret < 0) {
1117 goto error;
1118 }
1119
1120 /* 32-bit */
1121 pthread_mutex_lock(&ustconsumer32_data.pid_mutex);
1122 if (config.consumerd32_bin_path.value &&
1123 ustconsumer32_data.pid == 0 &&
1124 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
1125 pthread_mutex_unlock(&ustconsumer32_data.pid_mutex);
1126 ret = start_consumerd(&ustconsumer32_data);
1127 if (ret < 0) {
1128 ret = LTTNG_ERR_UST_CONSUMER32_FAIL;
1129 uatomic_set(&ust_consumerd32_fd, -EINVAL);
1130 goto error;
1131 }
1132
1133 uatomic_set(&ust_consumerd32_fd, ustconsumer32_data.cmd_sock);
1134 uatomic_set(&ust_consumerd_state, CONSUMER_STARTED);
1135 } else {
1136 pthread_mutex_unlock(&ustconsumer32_data.pid_mutex);
1137 }
1138
1139 /*
1140 * Setup socket for consumer 32 bit. No need for atomic access
1141 * since it was set above and can ONLY be set in this thread.
1142 */
1143 ret = consumer_create_socket(&ustconsumer32_data,
1144 cmd_ctx->session->ust_session->consumer);
1145 if (ret < 0) {
1146 goto error;
1147 }
1148 }
1149 break;
1150 }
1151 default:
1152 break;
1153 }
1154 skip_domain:
1155
1156 /* Validate consumer daemon state when start/stop trace command */
1157 if (cmd_ctx->lsm->cmd_type == LTTNG_START_TRACE ||
1158 cmd_ctx->lsm->cmd_type == LTTNG_STOP_TRACE) {
1159 switch (cmd_ctx->lsm->domain.type) {
1160 case LTTNG_DOMAIN_NONE:
1161 break;
1162 case LTTNG_DOMAIN_JUL:
1163 case LTTNG_DOMAIN_LOG4J:
1164 case LTTNG_DOMAIN_PYTHON:
1165 case LTTNG_DOMAIN_UST:
1166 if (uatomic_read(&ust_consumerd_state) != CONSUMER_STARTED) {
1167 ret = LTTNG_ERR_NO_USTCONSUMERD;
1168 goto error;
1169 }
1170 break;
1171 case LTTNG_DOMAIN_KERNEL:
1172 if (uatomic_read(&kernel_consumerd_state) != CONSUMER_STARTED) {
1173 ret = LTTNG_ERR_NO_KERNCONSUMERD;
1174 goto error;
1175 }
1176 break;
1177 default:
1178 ret = LTTNG_ERR_UNKNOWN_DOMAIN;
1179 goto error;
1180 }
1181 }
1182
1183 /*
1184 * Check that the UID or GID match that of the tracing session.
1185 * The root user can interact with all sessions.
1186 */
1187 if (need_tracing_session) {
1188 if (!session_access_ok(cmd_ctx->session,
1189 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
1190 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds)) ||
1191 cmd_ctx->session->destroyed) {
1192 ret = LTTNG_ERR_EPERM;
1193 goto error;
1194 }
1195 }
1196
1197 /*
1198 * Send relayd information to consumer as soon as we have a domain and a
1199 * session defined.
1200 */
1201 if (cmd_ctx->session && need_domain) {
1202 /*
1203 * Setup relayd if not done yet. If the relayd information was already
1204 * sent to the consumer, this call will gracefully return.
1205 */
1206 ret = cmd_setup_relayd(cmd_ctx->session);
1207 if (ret != LTTNG_OK) {
1208 goto error;
1209 }
1210 }
1211
1212 /* Process by command type */
1213 switch (cmd_ctx->lsm->cmd_type) {
1214 case LTTNG_ADD_CONTEXT:
1215 {
1216 /*
1217 * An LTTNG_ADD_CONTEXT command might have a supplementary
1218 * payload if the context being added is an application context.
1219 */
1220 if (cmd_ctx->lsm->u.context.ctx.ctx ==
1221 LTTNG_EVENT_CONTEXT_APP_CONTEXT) {
1222 char *provider_name = NULL, *context_name = NULL;
1223 size_t provider_name_len =
1224 cmd_ctx->lsm->u.context.provider_name_len;
1225 size_t context_name_len =
1226 cmd_ctx->lsm->u.context.context_name_len;
1227
1228 if (provider_name_len == 0 || context_name_len == 0) {
1229 /*
1230 * Application provider and context names MUST
1231 * be provided.
1232 */
1233 ret = -LTTNG_ERR_INVALID;
1234 goto error;
1235 }
1236
1237 provider_name = zmalloc(provider_name_len + 1);
1238 if (!provider_name) {
1239 ret = -LTTNG_ERR_NOMEM;
1240 goto error;
1241 }
1242 cmd_ctx->lsm->u.context.ctx.u.app_ctx.provider_name =
1243 provider_name;
1244
1245 context_name = zmalloc(context_name_len + 1);
1246 if (!context_name) {
1247 ret = -LTTNG_ERR_NOMEM;
1248 goto error_add_context;
1249 }
1250 cmd_ctx->lsm->u.context.ctx.u.app_ctx.ctx_name =
1251 context_name;
1252
1253 ret = lttcomm_recv_unix_sock(sock, provider_name,
1254 provider_name_len);
1255 if (ret < 0) {
1256 goto error_add_context;
1257 }
1258
1259 ret = lttcomm_recv_unix_sock(sock, context_name,
1260 context_name_len);
1261 if (ret < 0) {
1262 goto error_add_context;
1263 }
1264 }
1265
1266 /*
1267 * cmd_add_context assumes ownership of the provider and context
1268 * names.
1269 */
1270 ret = cmd_add_context(cmd_ctx->session,
1271 cmd_ctx->lsm->domain.type,
1272 cmd_ctx->lsm->u.context.channel_name,
1273 &cmd_ctx->lsm->u.context.ctx,
1274 kernel_poll_pipe[1]);
1275
1276 cmd_ctx->lsm->u.context.ctx.u.app_ctx.provider_name = NULL;
1277 cmd_ctx->lsm->u.context.ctx.u.app_ctx.ctx_name = NULL;
1278 error_add_context:
1279 free(cmd_ctx->lsm->u.context.ctx.u.app_ctx.provider_name);
1280 free(cmd_ctx->lsm->u.context.ctx.u.app_ctx.ctx_name);
1281 if (ret < 0) {
1282 goto error;
1283 }
1284 break;
1285 }
1286 case LTTNG_DISABLE_CHANNEL:
1287 {
1288 ret = cmd_disable_channel(cmd_ctx->session, cmd_ctx->lsm->domain.type,
1289 cmd_ctx->lsm->u.disable.channel_name);
1290 break;
1291 }
1292 case LTTNG_DISABLE_EVENT:
1293 {
1294
1295 /*
1296 * FIXME: handle filter; for now we just receive the filter's
1297 * bytecode along with the filter expression which are sent by
1298 * liblttng-ctl and discard them.
1299 *
1300 * This fixes an issue where the client may block while sending
1301 * the filter payload and encounter an error because the session
1302 * daemon closes the socket without ever handling this data.
1303 */
1304 size_t count = cmd_ctx->lsm->u.disable.expression_len +
1305 cmd_ctx->lsm->u.disable.bytecode_len;
1306
1307 if (count) {
1308 char data[LTTNG_FILTER_MAX_LEN];
1309
1310 DBG("Discarding disable event command payload of size %zu", count);
1311 while (count) {
1312 ret = lttcomm_recv_unix_sock(sock, data,
1313 count > sizeof(data) ? sizeof(data) : count);
1314 if (ret < 0) {
1315 goto error;
1316 }
1317
1318 count -= (size_t) ret;
1319 }
1320 }
1321 /* FIXME: passing packed structure to non-packed pointer */
1322 ret = cmd_disable_event(cmd_ctx->session, cmd_ctx->lsm->domain.type,
1323 cmd_ctx->lsm->u.disable.channel_name,
1324 &cmd_ctx->lsm->u.disable.event);
1325 break;
1326 }
1327 case LTTNG_ENABLE_CHANNEL:
1328 {
1329 cmd_ctx->lsm->u.channel.chan.attr.extended.ptr =
1330 (struct lttng_channel_extended *) &cmd_ctx->lsm->u.channel.extended;
1331 ret = cmd_enable_channel(cmd_ctx->session, &cmd_ctx->lsm->domain,
1332 &cmd_ctx->lsm->u.channel.chan,
1333 kernel_poll_pipe[1]);
1334 break;
1335 }
1336 case LTTNG_TRACK_PID:
1337 {
1338 ret = cmd_track_pid(cmd_ctx->session,
1339 cmd_ctx->lsm->domain.type,
1340 cmd_ctx->lsm->u.pid_tracker.pid);
1341 break;
1342 }
1343 case LTTNG_UNTRACK_PID:
1344 {
1345 ret = cmd_untrack_pid(cmd_ctx->session,
1346 cmd_ctx->lsm->domain.type,
1347 cmd_ctx->lsm->u.pid_tracker.pid);
1348 break;
1349 }
1350 case LTTNG_ENABLE_EVENT:
1351 {
1352 struct lttng_event *ev = NULL;
1353 struct lttng_event_exclusion *exclusion = NULL;
1354 struct lttng_filter_bytecode *bytecode = NULL;
1355 char *filter_expression = NULL;
1356
1357 /* Handle exclusion events and receive it from the client. */
1358 if (cmd_ctx->lsm->u.enable.exclusion_count > 0) {
1359 size_t count = cmd_ctx->lsm->u.enable.exclusion_count;
1360
1361 exclusion = zmalloc(sizeof(struct lttng_event_exclusion) +
1362 (count * LTTNG_SYMBOL_NAME_LEN));
1363 if (!exclusion) {
1364 ret = LTTNG_ERR_EXCLUSION_NOMEM;
1365 goto error;
1366 }
1367
1368 DBG("Receiving var len exclusion event list from client ...");
1369 exclusion->count = count;
1370 ret = lttcomm_recv_unix_sock(sock, exclusion->names,
1371 count * LTTNG_SYMBOL_NAME_LEN);
1372 if (ret <= 0) {
1373 DBG("Nothing recv() from client var len data... continuing");
1374 *sock_error = 1;
1375 free(exclusion);
1376 ret = LTTNG_ERR_EXCLUSION_INVAL;
1377 goto error;
1378 }
1379 }
1380
1381 /* Get filter expression from client. */
1382 if (cmd_ctx->lsm->u.enable.expression_len > 0) {
1383 size_t expression_len =
1384 cmd_ctx->lsm->u.enable.expression_len;
1385
1386 if (expression_len > LTTNG_FILTER_MAX_LEN) {
1387 ret = LTTNG_ERR_FILTER_INVAL;
1388 free(exclusion);
1389 goto error;
1390 }
1391
1392 filter_expression = zmalloc(expression_len);
1393 if (!filter_expression) {
1394 free(exclusion);
1395 ret = LTTNG_ERR_FILTER_NOMEM;
1396 goto error;
1397 }
1398
1399 /* Receive var. len. data */
1400 DBG("Receiving var len filter's expression from client ...");
1401 ret = lttcomm_recv_unix_sock(sock, filter_expression,
1402 expression_len);
1403 if (ret <= 0) {
1404 DBG("Nothing recv() from client var len data... continuing");
1405 *sock_error = 1;
1406 free(filter_expression);
1407 free(exclusion);
1408 ret = LTTNG_ERR_FILTER_INVAL;
1409 goto error;
1410 }
1411 }
1412
1413 /* Handle filter and get bytecode from client. */
1414 if (cmd_ctx->lsm->u.enable.bytecode_len > 0) {
1415 size_t bytecode_len = cmd_ctx->lsm->u.enable.bytecode_len;
1416
1417 if (bytecode_len > LTTNG_FILTER_MAX_LEN) {
1418 ret = LTTNG_ERR_FILTER_INVAL;
1419 free(filter_expression);
1420 free(exclusion);
1421 goto error;
1422 }
1423
1424 bytecode = zmalloc(bytecode_len);
1425 if (!bytecode) {
1426 free(filter_expression);
1427 free(exclusion);
1428 ret = LTTNG_ERR_FILTER_NOMEM;
1429 goto error;
1430 }
1431
1432 /* Receive var. len. data */
1433 DBG("Receiving var len filter's bytecode from client ...");
1434 ret = lttcomm_recv_unix_sock(sock, bytecode, bytecode_len);
1435 if (ret <= 0) {
1436 DBG("Nothing recv() from client var len data... continuing");
1437 *sock_error = 1;
1438 free(filter_expression);
1439 free(bytecode);
1440 free(exclusion);
1441 ret = LTTNG_ERR_FILTER_INVAL;
1442 goto error;
1443 }
1444
1445 if ((bytecode->len + sizeof(*bytecode)) != bytecode_len) {
1446 free(filter_expression);
1447 free(bytecode);
1448 free(exclusion);
1449 ret = LTTNG_ERR_FILTER_INVAL;
1450 goto error;
1451 }
1452 }
1453
1454 ev = lttng_event_copy(&cmd_ctx->lsm->u.enable.event);
1455 if (!ev) {
1456 DBG("Failed to copy event: %s",
1457 cmd_ctx->lsm->u.enable.event.name);
1458 free(filter_expression);
1459 free(bytecode);
1460 free(exclusion);
1461 ret = LTTNG_ERR_NOMEM;
1462 goto error;
1463 }
1464
1465
1466 if (cmd_ctx->lsm->u.enable.userspace_probe_location_len > 0) {
1467 /* Expect a userspace probe description. */
1468 ret = receive_userspace_probe(cmd_ctx, sock, sock_error, ev);
1469 if (ret) {
1470 free(filter_expression);
1471 free(bytecode);
1472 free(exclusion);
1473 lttng_event_destroy(ev);
1474 goto error;
1475 }
1476 }
1477
1478 ret = cmd_enable_event(cmd_ctx->session, &cmd_ctx->lsm->domain,
1479 cmd_ctx->lsm->u.enable.channel_name,
1480 ev,
1481 filter_expression, bytecode, exclusion,
1482 kernel_poll_pipe[1]);
1483 lttng_event_destroy(ev);
1484 break;
1485 }
1486 case LTTNG_LIST_TRACEPOINTS:
1487 {
1488 struct lttng_event *events;
1489 ssize_t nb_events;
1490
1491 session_lock_list();
1492 nb_events = cmd_list_tracepoints(cmd_ctx->lsm->domain.type, &events);
1493 session_unlock_list();
1494 if (nb_events < 0) {
1495 /* Return value is a negative lttng_error_code. */
1496 ret = -nb_events;
1497 goto error;
1498 }
1499
1500 /*
1501 * Setup lttng message with payload size set to the event list size in
1502 * bytes and then copy list into the llm payload.
1503 */
1504 ret = setup_lttng_msg_no_cmd_header(cmd_ctx, events,
1505 sizeof(struct lttng_event) * nb_events);
1506 free(events);
1507
1508 if (ret < 0) {
1509 goto setup_error;
1510 }
1511
1512 ret = LTTNG_OK;
1513 break;
1514 }
1515 case LTTNG_LIST_TRACEPOINT_FIELDS:
1516 {
1517 struct lttng_event_field *fields;
1518 ssize_t nb_fields;
1519
1520 session_lock_list();
1521 nb_fields = cmd_list_tracepoint_fields(cmd_ctx->lsm->domain.type,
1522 &fields);
1523 session_unlock_list();
1524 if (nb_fields < 0) {
1525 /* Return value is a negative lttng_error_code. */
1526 ret = -nb_fields;
1527 goto error;
1528 }
1529
1530 /*
1531 * Setup lttng message with payload size set to the event list size in
1532 * bytes and then copy list into the llm payload.
1533 */
1534 ret = setup_lttng_msg_no_cmd_header(cmd_ctx, fields,
1535 sizeof(struct lttng_event_field) * nb_fields);
1536 free(fields);
1537
1538 if (ret < 0) {
1539 goto setup_error;
1540 }
1541
1542 ret = LTTNG_OK;
1543 break;
1544 }
1545 case LTTNG_LIST_SYSCALLS:
1546 {
1547 struct lttng_event *events;
1548 ssize_t nb_events;
1549
1550 nb_events = cmd_list_syscalls(&events);
1551 if (nb_events < 0) {
1552 /* Return value is a negative lttng_error_code. */
1553 ret = -nb_events;
1554 goto error;
1555 }
1556
1557 /*
1558 * Setup lttng message with payload size set to the event list size in
1559 * bytes and then copy list into the llm payload.
1560 */
1561 ret = setup_lttng_msg_no_cmd_header(cmd_ctx, events,
1562 sizeof(struct lttng_event) * nb_events);
1563 free(events);
1564
1565 if (ret < 0) {
1566 goto setup_error;
1567 }
1568
1569 ret = LTTNG_OK;
1570 break;
1571 }
1572 case LTTNG_LIST_TRACKER_PIDS:
1573 {
1574 int32_t *pids = NULL;
1575 ssize_t nr_pids;
1576
1577 nr_pids = cmd_list_tracker_pids(cmd_ctx->session,
1578 cmd_ctx->lsm->domain.type, &pids);
1579 if (nr_pids < 0) {
1580 /* Return value is a negative lttng_error_code. */
1581 ret = -nr_pids;
1582 goto error;
1583 }
1584
1585 /*
1586 * Setup lttng message with payload size set to the event list size in
1587 * bytes and then copy list into the llm payload.
1588 */
1589 ret = setup_lttng_msg_no_cmd_header(cmd_ctx, pids,
1590 sizeof(int32_t) * nr_pids);
1591 free(pids);
1592
1593 if (ret < 0) {
1594 goto setup_error;
1595 }
1596
1597 ret = LTTNG_OK;
1598 break;
1599 }
1600 case LTTNG_SET_CONSUMER_URI:
1601 {
1602 size_t nb_uri, len;
1603 struct lttng_uri *uris;
1604
1605 nb_uri = cmd_ctx->lsm->u.uri.size;
1606 len = nb_uri * sizeof(struct lttng_uri);
1607
1608 if (nb_uri == 0) {
1609 ret = LTTNG_ERR_INVALID;
1610 goto error;
1611 }
1612
1613 uris = zmalloc(len);
1614 if (uris == NULL) {
1615 ret = LTTNG_ERR_FATAL;
1616 goto error;
1617 }
1618
1619 /* Receive variable len data */
1620 DBG("Receiving %zu URI(s) from client ...", nb_uri);
1621 ret = lttcomm_recv_unix_sock(sock, uris, len);
1622 if (ret <= 0) {
1623 DBG("No URIs received from client... continuing");
1624 *sock_error = 1;
1625 ret = LTTNG_ERR_SESSION_FAIL;
1626 free(uris);
1627 goto error;
1628 }
1629
1630 ret = cmd_set_consumer_uri(cmd_ctx->session, nb_uri, uris);
1631 free(uris);
1632 if (ret != LTTNG_OK) {
1633 goto error;
1634 }
1635
1636
1637 break;
1638 }
1639 case LTTNG_START_TRACE:
1640 {
1641 /*
1642 * On the first start, if we have a kernel session and we have
1643 * enabled time or size-based rotations, we have to make sure
1644 * the kernel tracer supports it.
1645 */
1646 if (!cmd_ctx->session->has_been_started && \
1647 cmd_ctx->session->kernel_session && \
1648 (cmd_ctx->session->rotate_timer_period || \
1649 cmd_ctx->session->rotate_size) && \
1650 !check_rotate_compatible()) {
1651 DBG("Kernel tracer version is not compatible with the rotation feature");
1652 ret = LTTNG_ERR_ROTATION_WRONG_VERSION;
1653 goto error;
1654 }
1655 ret = cmd_start_trace(cmd_ctx->session);
1656 break;
1657 }
1658 case LTTNG_STOP_TRACE:
1659 {
1660 ret = cmd_stop_trace(cmd_ctx->session);
1661 break;
1662 }
1663 case LTTNG_CREATE_SESSION:
1664 {
1665 size_t nb_uri, len;
1666 struct lttng_uri *uris = NULL;
1667
1668 nb_uri = cmd_ctx->lsm->u.uri.size;
1669 len = nb_uri * sizeof(struct lttng_uri);
1670
1671 if (nb_uri > 0) {
1672 uris = zmalloc(len);
1673 if (uris == NULL) {
1674 ret = LTTNG_ERR_FATAL;
1675 goto error;
1676 }
1677
1678 /* Receive variable len data */
1679 DBG("Waiting for %zu URIs from client ...", nb_uri);
1680 ret = lttcomm_recv_unix_sock(sock, uris, len);
1681 if (ret <= 0) {
1682 DBG("No URIs received from client... continuing");
1683 *sock_error = 1;
1684 ret = LTTNG_ERR_SESSION_FAIL;
1685 free(uris);
1686 goto error;
1687 }
1688
1689 if (nb_uri == 1 && uris[0].dtype != LTTNG_DST_PATH) {
1690 DBG("Creating session with ONE network URI is a bad call");
1691 ret = LTTNG_ERR_SESSION_FAIL;
1692 free(uris);
1693 goto error;
1694 }
1695 }
1696
1697 ret = cmd_create_session_uri(cmd_ctx->lsm->session.name, uris, nb_uri,
1698 &cmd_ctx->creds, 0);
1699
1700 free(uris);
1701
1702 break;
1703 }
1704 case LTTNG_DESTROY_SESSION:
1705 {
1706 ret = cmd_destroy_session(cmd_ctx->session,
1707 notification_thread_handle);
1708 break;
1709 }
1710 case LTTNG_LIST_DOMAINS:
1711 {
1712 ssize_t nb_dom;
1713 struct lttng_domain *domains = NULL;
1714
1715 nb_dom = cmd_list_domains(cmd_ctx->session, &domains);
1716 if (nb_dom < 0) {
1717 /* Return value is a negative lttng_error_code. */
1718 ret = -nb_dom;
1719 goto error;
1720 }
1721
1722 ret = setup_lttng_msg_no_cmd_header(cmd_ctx, domains,
1723 nb_dom * sizeof(struct lttng_domain));
1724 free(domains);
1725
1726 if (ret < 0) {
1727 goto setup_error;
1728 }
1729
1730 ret = LTTNG_OK;
1731 break;
1732 }
1733 case LTTNG_LIST_CHANNELS:
1734 {
1735 ssize_t payload_size;
1736 struct lttng_channel *channels = NULL;
1737
1738 payload_size = cmd_list_channels(cmd_ctx->lsm->domain.type,
1739 cmd_ctx->session, &channels);
1740 if (payload_size < 0) {
1741 /* Return value is a negative lttng_error_code. */
1742 ret = -payload_size;
1743 goto error;
1744 }
1745
1746 ret = setup_lttng_msg_no_cmd_header(cmd_ctx, channels,
1747 payload_size);
1748 free(channels);
1749
1750 if (ret < 0) {
1751 goto setup_error;
1752 }
1753
1754 ret = LTTNG_OK;
1755 break;
1756 }
1757 case LTTNG_LIST_EVENTS:
1758 {
1759 ssize_t nb_event;
1760 struct lttng_event *events = NULL;
1761 struct lttcomm_event_command_header cmd_header;
1762 size_t total_size;
1763
1764 memset(&cmd_header, 0, sizeof(cmd_header));
1765 /* Extended infos are included at the end of events */
1766 nb_event = cmd_list_events(cmd_ctx->lsm->domain.type,
1767 cmd_ctx->session, cmd_ctx->lsm->u.list.channel_name,
1768 &events, &total_size);
1769
1770 if (nb_event < 0) {
1771 /* Return value is a negative lttng_error_code. */
1772 ret = -nb_event;
1773 goto error;
1774 }
1775
1776 cmd_header.nb_events = nb_event;
1777 ret = setup_lttng_msg(cmd_ctx, events, total_size,
1778 &cmd_header, sizeof(cmd_header));
1779 free(events);
1780
1781 if (ret < 0) {
1782 goto setup_error;
1783 }
1784
1785 ret = LTTNG_OK;
1786 break;
1787 }
1788 case LTTNG_LIST_SESSIONS:
1789 {
1790 unsigned int nr_sessions;
1791 void *sessions_payload;
1792 size_t payload_len;
1793
1794 session_lock_list();
1795 nr_sessions = lttng_sessions_count(
1796 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
1797 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds));
1798 payload_len = sizeof(struct lttng_session) * nr_sessions;
1799 sessions_payload = zmalloc(payload_len);
1800
1801 if (!sessions_payload) {
1802 session_unlock_list();
1803 ret = -ENOMEM;
1804 goto setup_error;
1805 }
1806
1807 cmd_list_lttng_sessions(sessions_payload,
1808 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
1809 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds));
1810 session_unlock_list();
1811
1812 ret = setup_lttng_msg_no_cmd_header(cmd_ctx, sessions_payload,
1813 payload_len);
1814 free(sessions_payload);
1815
1816 if (ret < 0) {
1817 goto setup_error;
1818 }
1819
1820 ret = LTTNG_OK;
1821 break;
1822 }
1823 case LTTNG_REGISTER_CONSUMER:
1824 {
1825 struct consumer_data *cdata;
1826
1827 switch (cmd_ctx->lsm->domain.type) {
1828 case LTTNG_DOMAIN_KERNEL:
1829 cdata = &kconsumer_data;
1830 break;
1831 default:
1832 ret = LTTNG_ERR_UND;
1833 goto error;
1834 }
1835
1836 ret = cmd_register_consumer(cmd_ctx->session, cmd_ctx->lsm->domain.type,
1837 cmd_ctx->lsm->u.reg.path, cdata);
1838 break;
1839 }
1840 case LTTNG_DATA_PENDING:
1841 {
1842 int pending_ret;
1843 uint8_t pending_ret_byte;
1844
1845 pending_ret = cmd_data_pending(cmd_ctx->session);
1846
1847 /*
1848 * FIXME
1849 *
1850 * This function may returns 0 or 1 to indicate whether or not
1851 * there is data pending. In case of error, it should return an
1852 * LTTNG_ERR code. However, some code paths may still return
1853 * a nondescript error code, which we handle by returning an
1854 * "unknown" error.
1855 */
1856 if (pending_ret == 0 || pending_ret == 1) {
1857 /*
1858 * ret will be set to LTTNG_OK at the end of
1859 * this function.
1860 */
1861 } else if (pending_ret < 0) {
1862 ret = LTTNG_ERR_UNK;
1863 goto setup_error;
1864 } else {
1865 ret = pending_ret;
1866 goto setup_error;
1867 }
1868
1869 pending_ret_byte = (uint8_t) pending_ret;
1870
1871 /* 1 byte to return whether or not data is pending */
1872 ret = setup_lttng_msg_no_cmd_header(cmd_ctx,
1873 &pending_ret_byte, 1);
1874
1875 if (ret < 0) {
1876 goto setup_error;
1877 }
1878
1879 ret = LTTNG_OK;
1880 break;
1881 }
1882 case LTTNG_SNAPSHOT_ADD_OUTPUT:
1883 {
1884 struct lttcomm_lttng_output_id reply;
1885
1886 ret = cmd_snapshot_add_output(cmd_ctx->session,
1887 &cmd_ctx->lsm->u.snapshot_output.output, &reply.id);
1888 if (ret != LTTNG_OK) {
1889 goto error;
1890 }
1891
1892 ret = setup_lttng_msg_no_cmd_header(cmd_ctx, &reply,
1893 sizeof(reply));
1894 if (ret < 0) {
1895 goto setup_error;
1896 }
1897
1898 /* Copy output list into message payload */
1899 ret = LTTNG_OK;
1900 break;
1901 }
1902 case LTTNG_SNAPSHOT_DEL_OUTPUT:
1903 {
1904 ret = cmd_snapshot_del_output(cmd_ctx->session,
1905 &cmd_ctx->lsm->u.snapshot_output.output);
1906 break;
1907 }
1908 case LTTNG_SNAPSHOT_LIST_OUTPUT:
1909 {
1910 ssize_t nb_output;
1911 struct lttng_snapshot_output *outputs = NULL;
1912
1913 nb_output = cmd_snapshot_list_outputs(cmd_ctx->session, &outputs);
1914 if (nb_output < 0) {
1915 ret = -nb_output;
1916 goto error;
1917 }
1918
1919 assert((nb_output > 0 && outputs) || nb_output == 0);
1920 ret = setup_lttng_msg_no_cmd_header(cmd_ctx, outputs,
1921 nb_output * sizeof(struct lttng_snapshot_output));
1922 free(outputs);
1923
1924 if (ret < 0) {
1925 goto setup_error;
1926 }
1927
1928 ret = LTTNG_OK;
1929 break;
1930 }
1931 case LTTNG_SNAPSHOT_RECORD:
1932 {
1933 ret = cmd_snapshot_record(cmd_ctx->session,
1934 &cmd_ctx->lsm->u.snapshot_record.output,
1935 cmd_ctx->lsm->u.snapshot_record.wait);
1936 break;
1937 }
1938 case LTTNG_CREATE_SESSION_SNAPSHOT:
1939 {
1940 size_t nb_uri, len;
1941 struct lttng_uri *uris = NULL;
1942
1943 nb_uri = cmd_ctx->lsm->u.uri.size;
1944 len = nb_uri * sizeof(struct lttng_uri);
1945
1946 if (nb_uri > 0) {
1947 uris = zmalloc(len);
1948 if (uris == NULL) {
1949 ret = LTTNG_ERR_FATAL;
1950 goto error;
1951 }
1952
1953 /* Receive variable len data */
1954 DBG("Waiting for %zu URIs from client ...", nb_uri);
1955 ret = lttcomm_recv_unix_sock(sock, uris, len);
1956 if (ret <= 0) {
1957 DBG("No URIs received from client... continuing");
1958 *sock_error = 1;
1959 ret = LTTNG_ERR_SESSION_FAIL;
1960 free(uris);
1961 goto error;
1962 }
1963
1964 if (nb_uri == 1 && uris[0].dtype != LTTNG_DST_PATH) {
1965 DBG("Creating session with ONE network URI is a bad call");
1966 ret = LTTNG_ERR_SESSION_FAIL;
1967 free(uris);
1968 goto error;
1969 }
1970 }
1971
1972 ret = cmd_create_session_snapshot(cmd_ctx->lsm->session.name, uris,
1973 nb_uri, &cmd_ctx->creds);
1974 free(uris);
1975 break;
1976 }
1977 case LTTNG_CREATE_SESSION_LIVE:
1978 {
1979 size_t nb_uri, len;
1980 struct lttng_uri *uris = NULL;
1981
1982 nb_uri = cmd_ctx->lsm->u.uri.size;
1983 len = nb_uri * sizeof(struct lttng_uri);
1984
1985 if (nb_uri > 0) {
1986 uris = zmalloc(len);
1987 if (uris == NULL) {
1988 ret = LTTNG_ERR_FATAL;
1989 goto error;
1990 }
1991
1992 /* Receive variable len data */
1993 DBG("Waiting for %zu URIs from client ...", nb_uri);
1994 ret = lttcomm_recv_unix_sock(sock, uris, len);
1995 if (ret <= 0) {
1996 DBG("No URIs received from client... continuing");
1997 *sock_error = 1;
1998 ret = LTTNG_ERR_SESSION_FAIL;
1999 free(uris);
2000 goto error;
2001 }
2002
2003 if (nb_uri == 1 && uris[0].dtype != LTTNG_DST_PATH) {
2004 DBG("Creating session with ONE network URI is a bad call");
2005 ret = LTTNG_ERR_SESSION_FAIL;
2006 free(uris);
2007 goto error;
2008 }
2009 }
2010
2011 ret = cmd_create_session_uri(cmd_ctx->lsm->session.name, uris,
2012 nb_uri, &cmd_ctx->creds, cmd_ctx->lsm->u.session_live.timer_interval);
2013 free(uris);
2014 break;
2015 }
2016 case LTTNG_SAVE_SESSION:
2017 {
2018 ret = cmd_save_sessions(&cmd_ctx->lsm->u.save_session.attr,
2019 &cmd_ctx->creds);
2020 break;
2021 }
2022 case LTTNG_SET_SESSION_SHM_PATH:
2023 {
2024 ret = cmd_set_session_shm_path(cmd_ctx->session,
2025 cmd_ctx->lsm->u.set_shm_path.shm_path);
2026 break;
2027 }
2028 case LTTNG_REGENERATE_METADATA:
2029 {
2030 ret = cmd_regenerate_metadata(cmd_ctx->session);
2031 break;
2032 }
2033 case LTTNG_REGENERATE_STATEDUMP:
2034 {
2035 ret = cmd_regenerate_statedump(cmd_ctx->session);
2036 break;
2037 }
2038 case LTTNG_REGISTER_TRIGGER:
2039 {
2040 ret = cmd_register_trigger(cmd_ctx, sock,
2041 notification_thread_handle);
2042 break;
2043 }
2044 case LTTNG_UNREGISTER_TRIGGER:
2045 {
2046 ret = cmd_unregister_trigger(cmd_ctx, sock,
2047 notification_thread_handle);
2048 break;
2049 }
2050 case LTTNG_ROTATE_SESSION:
2051 {
2052 struct lttng_rotate_session_return rotate_return;
2053
2054 DBG("Client rotate session \"%s\"", cmd_ctx->session->name);
2055
2056 memset(&rotate_return, 0, sizeof(rotate_return));
2057 if (cmd_ctx->session->kernel_session && !check_rotate_compatible()) {
2058 DBG("Kernel tracer version is not compatible with the rotation feature");
2059 ret = LTTNG_ERR_ROTATION_WRONG_VERSION;
2060 goto error;
2061 }
2062
2063 ret = cmd_rotate_session(cmd_ctx->session, &rotate_return);
2064 if (ret < 0) {
2065 ret = -ret;
2066 goto error;
2067 }
2068
2069 ret = setup_lttng_msg_no_cmd_header(cmd_ctx, &rotate_return,
2070 sizeof(rotate_return));
2071 if (ret < 0) {
2072 ret = -ret;
2073 goto error;
2074 }
2075
2076 ret = LTTNG_OK;
2077 break;
2078 }
2079 case LTTNG_ROTATION_GET_INFO:
2080 {
2081 struct lttng_rotation_get_info_return get_info_return;
2082
2083 memset(&get_info_return, 0, sizeof(get_info_return));
2084 ret = cmd_rotate_get_info(cmd_ctx->session, &get_info_return,
2085 cmd_ctx->lsm->u.get_rotation_info.rotation_id);
2086 if (ret < 0) {
2087 ret = -ret;
2088 goto error;
2089 }
2090
2091 ret = setup_lttng_msg_no_cmd_header(cmd_ctx, &get_info_return,
2092 sizeof(get_info_return));
2093 if (ret < 0) {
2094 ret = -ret;
2095 goto error;
2096 }
2097
2098 ret = LTTNG_OK;
2099 break;
2100 }
2101 case LTTNG_ROTATION_SET_SCHEDULE:
2102 {
2103 bool set_schedule;
2104 enum lttng_rotation_schedule_type schedule_type;
2105 uint64_t value;
2106
2107 if (cmd_ctx->session->kernel_session && !check_rotate_compatible()) {
2108 DBG("Kernel tracer version does not support session rotations");
2109 ret = LTTNG_ERR_ROTATION_WRONG_VERSION;
2110 goto error;
2111 }
2112
2113 set_schedule = cmd_ctx->lsm->u.rotation_set_schedule.set == 1;
2114 schedule_type = (enum lttng_rotation_schedule_type) cmd_ctx->lsm->u.rotation_set_schedule.type;
2115 value = cmd_ctx->lsm->u.rotation_set_schedule.value;
2116
2117 ret = cmd_rotation_set_schedule(cmd_ctx->session,
2118 set_schedule,
2119 schedule_type,
2120 value,
2121 notification_thread_handle);
2122 if (ret != LTTNG_OK) {
2123 goto error;
2124 }
2125
2126 break;
2127 }
2128 case LTTNG_SESSION_LIST_ROTATION_SCHEDULES:
2129 {
2130 struct lttng_session_list_schedules_return schedules = {
2131 .periodic.set = !!cmd_ctx->session->rotate_timer_period,
2132 .periodic.value = cmd_ctx->session->rotate_timer_period,
2133 .size.set = !!cmd_ctx->session->rotate_size,
2134 .size.value = cmd_ctx->session->rotate_size,
2135 };
2136
2137 ret = setup_lttng_msg_no_cmd_header(cmd_ctx, &schedules,
2138 sizeof(schedules));
2139 if (ret < 0) {
2140 ret = -ret;
2141 goto error;
2142 }
2143
2144 ret = LTTNG_OK;
2145 break;
2146 }
2147 default:
2148 ret = LTTNG_ERR_UND;
2149 break;
2150 }
2151
2152 error:
2153 if (cmd_ctx->llm == NULL) {
2154 DBG("Missing llm structure. Allocating one.");
2155 if (setup_lttng_msg_no_cmd_header(cmd_ctx, NULL, 0) < 0) {
2156 goto setup_error;
2157 }
2158 }
2159 /* Set return code */
2160 cmd_ctx->llm->ret_code = ret;
2161 setup_error:
2162 if (cmd_ctx->session) {
2163 session_unlock(cmd_ctx->session);
2164 session_put(cmd_ctx->session);
2165 }
2166 if (need_tracing_session) {
2167 session_unlock_list();
2168 }
2169 init_setup_error:
2170 assert(!rcu_read_ongoing());
2171 return ret;
2172 }
2173
2174 static int create_client_sock(void)
2175 {
2176 int ret, client_sock;
2177 const mode_t old_umask = umask(0);
2178
2179 /* Create client tool unix socket */
2180 client_sock = lttcomm_create_unix_sock(config.client_unix_sock_path.value);
2181 if (client_sock < 0) {
2182 ERR("Create unix sock failed: %s", config.client_unix_sock_path.value);
2183 ret = -1;
2184 goto end;
2185 }
2186
2187 /* Set the cloexec flag */
2188 ret = utils_set_fd_cloexec(client_sock);
2189 if (ret < 0) {
2190 ERR("Unable to set CLOEXEC flag to the client Unix socket (fd: %d). "
2191 "Continuing but note that the consumer daemon will have a "
2192 "reference to this socket on exec()", client_sock);
2193 }
2194
2195 /* File permission MUST be 660 */
2196 ret = chmod(config.client_unix_sock_path.value, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
2197 if (ret < 0) {
2198 ERR("Set file permissions failed: %s", config.client_unix_sock_path.value);
2199 PERROR("chmod");
2200 goto end;
2201 }
2202 DBG("Created client socket (fd = %i)", client_sock);
2203 ret = client_sock;
2204 end:
2205 umask(old_umask);
2206 return ret;
2207 }
2208
2209 static void cleanup_client_thread(void *data)
2210 {
2211 struct lttng_pipe *quit_pipe = data;
2212
2213 lttng_pipe_destroy(quit_pipe);
2214 }
2215
2216 /*
2217 * This thread manage all clients request using the unix client socket for
2218 * communication.
2219 */
2220 static void *thread_manage_clients(void *data)
2221 {
2222 int sock = -1, ret, i, pollfd, err = -1;
2223 int sock_error;
2224 uint32_t revents, nb_fd;
2225 struct command_ctx *cmd_ctx = NULL;
2226 struct lttng_poll_event events;
2227 int client_sock = -1;
2228 struct lttng_pipe *quit_pipe = data;
2229 const int thread_quit_pipe_fd = lttng_pipe_get_readfd(quit_pipe);
2230
2231 DBG("[thread] Manage client started");
2232
2233 is_root = (getuid() == 0);
2234
2235 client_sock = create_client_sock();
2236 if (client_sock < 0) {
2237 goto error_listen;
2238 }
2239
2240 rcu_register_thread();
2241
2242 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_CMD);
2243
2244 health_code_update();
2245
2246 ret = lttcomm_listen_unix_sock(client_sock);
2247 if (ret < 0) {
2248 goto error_listen;
2249 }
2250
2251 /*
2252 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
2253 * more will be added to this poll set.
2254 */
2255 ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
2256 if (ret < 0) {
2257 goto error_create_poll;
2258 }
2259
2260 /* Add the application registration socket */
2261 ret = lttng_poll_add(&events, client_sock, LPOLLIN | LPOLLPRI);
2262 if (ret < 0) {
2263 goto error;
2264 }
2265
2266 /* Add thread quit pipe */
2267 ret = lttng_poll_add(&events, thread_quit_pipe_fd, LPOLLIN | LPOLLERR);
2268 if (ret < 0) {
2269 goto error;
2270 }
2271
2272 /* This testpoint is after we signal readiness to the parent. */
2273 if (testpoint(sessiond_thread_manage_clients)) {
2274 goto error;
2275 }
2276
2277 if (testpoint(sessiond_thread_manage_clients_before_loop)) {
2278 goto error;
2279 }
2280
2281 health_code_update();
2282
2283 /* Set state as running. */
2284 set_thread_state_running();
2285
2286 while (1) {
2287 const struct cmd_completion_handler *cmd_completion_handler;
2288
2289 DBG("Accepting client command ...");
2290
2291 /* Inifinite blocking call, waiting for transmission */
2292 restart:
2293 health_poll_entry();
2294 ret = lttng_poll_wait(&events, -1);
2295 health_poll_exit();
2296 if (ret < 0) {
2297 /*
2298 * Restart interrupted system call.
2299 */
2300 if (errno == EINTR) {
2301 goto restart;
2302 }
2303 goto error;
2304 }
2305
2306 nb_fd = ret;
2307
2308 for (i = 0; i < nb_fd; i++) {
2309 revents = LTTNG_POLL_GETEV(&events, i);
2310 pollfd = LTTNG_POLL_GETFD(&events, i);
2311
2312 health_code_update();
2313
2314 if (!revents) {
2315 /* No activity for this FD (poll implementation). */
2316 continue;
2317 }
2318
2319 if (pollfd == thread_quit_pipe_fd) {
2320 err = 0;
2321 goto exit;
2322 } else {
2323 /* Event on the registration socket */
2324 if (revents & LPOLLIN) {
2325 continue;
2326 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
2327 ERR("Client socket poll error");
2328 goto error;
2329 } else {
2330 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
2331 goto error;
2332 }
2333 }
2334 }
2335
2336 DBG("Wait for client response");
2337
2338 health_code_update();
2339
2340 sock = lttcomm_accept_unix_sock(client_sock);
2341 if (sock < 0) {
2342 goto error;
2343 }
2344
2345 /*
2346 * Set the CLOEXEC flag. Return code is useless because either way, the
2347 * show must go on.
2348 */
2349 (void) utils_set_fd_cloexec(sock);
2350
2351 /* Set socket option for credentials retrieval */
2352 ret = lttcomm_setsockopt_creds_unix_sock(sock);
2353 if (ret < 0) {
2354 goto error;
2355 }
2356
2357 /* Allocate context command to process the client request */
2358 cmd_ctx = zmalloc(sizeof(struct command_ctx));
2359 if (cmd_ctx == NULL) {
2360 PERROR("zmalloc cmd_ctx");
2361 goto error;
2362 }
2363
2364 /* Allocate data buffer for reception */
2365 cmd_ctx->lsm = zmalloc(sizeof(struct lttcomm_session_msg));
2366 if (cmd_ctx->lsm == NULL) {
2367 PERROR("zmalloc cmd_ctx->lsm");
2368 goto error;
2369 }
2370
2371 cmd_ctx->llm = NULL;
2372 cmd_ctx->session = NULL;
2373
2374 health_code_update();
2375
2376 /*
2377 * Data is received from the lttng client. The struct
2378 * lttcomm_session_msg (lsm) contains the command and data request of
2379 * the client.
2380 */
2381 DBG("Receiving data from client ...");
2382 ret = lttcomm_recv_creds_unix_sock(sock, cmd_ctx->lsm,
2383 sizeof(struct lttcomm_session_msg), &cmd_ctx->creds);
2384 if (ret <= 0) {
2385 DBG("Nothing recv() from client... continuing");
2386 ret = close(sock);
2387 if (ret) {
2388 PERROR("close");
2389 }
2390 sock = -1;
2391 clean_command_ctx(&cmd_ctx);
2392 continue;
2393 }
2394
2395 health_code_update();
2396
2397 // TODO: Validate cmd_ctx including sanity check for
2398 // security purpose.
2399
2400 rcu_thread_online();
2401 /*
2402 * This function dispatch the work to the kernel or userspace tracer
2403 * libs and fill the lttcomm_lttng_msg data structure of all the needed
2404 * informations for the client. The command context struct contains
2405 * everything this function may needs.
2406 */
2407 ret = process_client_msg(cmd_ctx, sock, &sock_error);
2408 rcu_thread_offline();
2409 if (ret < 0) {
2410 ret = close(sock);
2411 if (ret) {
2412 PERROR("close");
2413 }
2414 sock = -1;
2415 /*
2416 * TODO: Inform client somehow of the fatal error. At
2417 * this point, ret < 0 means that a zmalloc failed
2418 * (ENOMEM). Error detected but still accept
2419 * command, unless a socket error has been
2420 * detected.
2421 */
2422 clean_command_ctx(&cmd_ctx);
2423 continue;
2424 }
2425
2426 cmd_completion_handler = cmd_pop_completion_handler();
2427 if (cmd_completion_handler) {
2428 enum lttng_error_code completion_code;
2429
2430 completion_code = cmd_completion_handler->run(
2431 cmd_completion_handler->data);
2432 if (completion_code != LTTNG_OK) {
2433 clean_command_ctx(&cmd_ctx);
2434 continue;
2435 }
2436 }
2437
2438 health_code_update();
2439
2440 DBG("Sending response (size: %d, retcode: %s (%d))",
2441 cmd_ctx->lttng_msg_size,
2442 lttng_strerror(-cmd_ctx->llm->ret_code),
2443 cmd_ctx->llm->ret_code);
2444 ret = send_unix_sock(sock, cmd_ctx->llm, cmd_ctx->lttng_msg_size);
2445 if (ret < 0) {
2446 ERR("Failed to send data back to client");
2447 }
2448
2449 /* End of transmission */
2450 ret = close(sock);
2451 if (ret) {
2452 PERROR("close");
2453 }
2454 sock = -1;
2455
2456 clean_command_ctx(&cmd_ctx);
2457
2458 health_code_update();
2459 }
2460
2461 exit:
2462 error:
2463 if (sock >= 0) {
2464 ret = close(sock);
2465 if (ret) {
2466 PERROR("close");
2467 }
2468 }
2469
2470 lttng_poll_clean(&events);
2471 clean_command_ctx(&cmd_ctx);
2472
2473 error_listen:
2474 error_create_poll:
2475 unlink(config.client_unix_sock_path.value);
2476 if (client_sock >= 0) {
2477 ret = close(client_sock);
2478 if (ret) {
2479 PERROR("close");
2480 }
2481 }
2482
2483 if (err) {
2484 health_error();
2485 ERR("Health error occurred in %s", __func__);
2486 }
2487
2488 health_unregister(health_sessiond);
2489
2490 DBG("Client thread dying");
2491
2492 rcu_unregister_thread();
2493
2494 /*
2495 * Since we are creating the consumer threads, we own them, so we need
2496 * to join them before our thread exits.
2497 */
2498 ret = join_consumer_thread(&kconsumer_data);
2499 if (ret) {
2500 errno = ret;
2501 PERROR("join_consumer");
2502 }
2503
2504 ret = join_consumer_thread(&ustconsumer32_data);
2505 if (ret) {
2506 errno = ret;
2507 PERROR("join_consumer ust32");
2508 }
2509
2510 ret = join_consumer_thread(&ustconsumer64_data);
2511 if (ret) {
2512 errno = ret;
2513 PERROR("join_consumer ust64");
2514 }
2515 return NULL;
2516 }
2517
2518 static
2519 bool shutdown_client_thread(void *thread_data)
2520 {
2521 struct lttng_pipe *client_quit_pipe = thread_data;
2522 const int write_fd = lttng_pipe_get_writefd(client_quit_pipe);
2523
2524 return notify_thread_pipe(write_fd) == 1;
2525 }
2526
2527 struct lttng_thread *launch_client_thread(void)
2528 {
2529 struct lttng_pipe *client_quit_pipe;
2530 struct lttng_thread *thread;
2531
2532 client_quit_pipe = lttng_pipe_open(FD_CLOEXEC);
2533 if (!client_quit_pipe) {
2534 goto error;
2535 }
2536
2537 thread = lttng_thread_create("Client management",
2538 thread_manage_clients,
2539 shutdown_client_thread,
2540 cleanup_client_thread,
2541 client_quit_pipe);
2542 if (!thread) {
2543 goto error;
2544 }
2545
2546 /*
2547 * This thread is part of the threads that need to be fully
2548 * initialized before the session daemon is marked as "ready".
2549 */
2550 wait_thread_state_running();
2551
2552 return thread;
2553 error:
2554 cleanup_client_thread(client_quit_pipe);
2555 return NULL;
2556 }
This page took 0.119942 seconds and 4 git commands to generate.