2 * Copyright (C) 2012 - David Goulet <dgoulet@efficios.com>
3 * Copyright (C) 2016 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License, version 2 only, as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 51
16 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 #include <urcu/list.h>
23 #include <urcu/uatomic.h>
26 #include <common/defaults.h>
27 #include <common/common.h>
28 #include <common/sessiond-comm/sessiond-comm.h>
29 #include <common/relayd/relayd.h>
30 #include <common/utils.h>
31 #include <common/compat/string.h>
32 #include <common/kernel-ctl/kernel-ctl.h>
33 #include <common/dynamic-buffer.h>
34 #include <common/buffer-view.h>
35 #include <lttng/trigger/trigger-internal.h>
36 #include <lttng/condition/condition.h>
37 #include <lttng/action/action.h>
38 #include <lttng/channel.h>
39 #include <lttng/channel-internal.h>
40 #include <lttng/rotate-internal.h>
41 #include <lttng/location-internal.h>
42 #include <lttng/userspace-probe-internal.h>
43 #include <common/string-utils/string-utils.h>
48 #include "health-sessiond.h"
50 #include "kernel-consumer.h"
51 #include "lttng-sessiond.h"
53 #include "lttng-syscall.h"
55 #include "buffer-registry.h"
56 #include "notification-thread.h"
57 #include "notification-thread-commands.h"
59 #include "rotation-thread.h"
61 #include "agent-thread.h"
65 /* Sleep for 100ms between each check for the shm path's deletion. */
66 #define SESSION_DESTROY_SHM_PATH_CHECK_DELAY_US 100000
68 static enum lttng_error_code
wait_on_path(void *path
);
71 * Command completion handler that is used by the destroy command
72 * when a session that has a non-default shm_path is being destroyed.
74 * See comment in cmd_destroy_session() for the rationale.
76 static struct destroy_completion_handler
{
77 struct cmd_completion_handler handler
;
78 char shm_path
[member_sizeof(struct ltt_session
, shm_path
)];
79 } destroy_completion_handler
= {
82 .data
= destroy_completion_handler
.shm_path
87 static struct cmd_completion_handler
*current_completion_handler
;
90 * Used to keep a unique index for each relayd socket created where this value
91 * is associated with streams on the consumer so it can match the right relayd
92 * to send to. It must be accessed with the relayd_net_seq_idx_lock
95 static pthread_mutex_t relayd_net_seq_idx_lock
= PTHREAD_MUTEX_INITIALIZER
;
96 static uint64_t relayd_net_seq_idx
;
98 static int validate_ust_event_name(const char *);
99 static int cmd_enable_event_internal(struct ltt_session
*session
,
100 struct lttng_domain
*domain
,
101 char *channel_name
, struct lttng_event
*event
,
102 char *filter_expression
,
103 struct lttng_filter_bytecode
*filter
,
104 struct lttng_event_exclusion
*exclusion
,
108 * Create a session path used by list_lttng_sessions for the case that the
109 * session consumer is on the network.
111 static int build_network_session_path(char *dst
, size_t size
,
112 struct ltt_session
*session
)
114 int ret
, kdata_port
, udata_port
;
115 struct lttng_uri
*kuri
= NULL
, *uuri
= NULL
, *uri
= NULL
;
116 char tmp_uurl
[PATH_MAX
], tmp_urls
[PATH_MAX
];
121 memset(tmp_urls
, 0, sizeof(tmp_urls
));
122 memset(tmp_uurl
, 0, sizeof(tmp_uurl
));
124 kdata_port
= udata_port
= DEFAULT_NETWORK_DATA_PORT
;
126 if (session
->kernel_session
&& session
->kernel_session
->consumer
) {
127 kuri
= &session
->kernel_session
->consumer
->dst
.net
.control
;
128 kdata_port
= session
->kernel_session
->consumer
->dst
.net
.data
.port
;
131 if (session
->ust_session
&& session
->ust_session
->consumer
) {
132 uuri
= &session
->ust_session
->consumer
->dst
.net
.control
;
133 udata_port
= session
->ust_session
->consumer
->dst
.net
.data
.port
;
136 if (uuri
== NULL
&& kuri
== NULL
) {
137 uri
= &session
->consumer
->dst
.net
.control
;
138 kdata_port
= session
->consumer
->dst
.net
.data
.port
;
139 } else if (kuri
&& uuri
) {
140 ret
= uri_compare(kuri
, uuri
);
144 /* Build uuri URL string */
145 ret
= uri_to_str_url(uuri
, tmp_uurl
, sizeof(tmp_uurl
));
152 } else if (kuri
&& uuri
== NULL
) {
154 } else if (uuri
&& kuri
== NULL
) {
158 ret
= uri_to_str_url(uri
, tmp_urls
, sizeof(tmp_urls
));
164 * Do we have a UST url set. If yes, this means we have both kernel and UST
167 if (*tmp_uurl
!= '\0') {
168 ret
= snprintf(dst
, size
, "[K]: %s [data: %d] -- [U]: %s [data: %d]",
169 tmp_urls
, kdata_port
, tmp_uurl
, udata_port
);
172 if (kuri
|| (!kuri
&& !uuri
)) {
175 /* No kernel URI, use the UST port. */
178 ret
= snprintf(dst
, size
, "%s [data: %d]", tmp_urls
, dport
);
186 * Get run-time attributes if the session has been started (discarded events,
189 static int get_kernel_runtime_stats(struct ltt_session
*session
,
190 struct ltt_kernel_channel
*kchan
, uint64_t *discarded_events
,
191 uint64_t *lost_packets
)
195 if (!session
->has_been_started
) {
197 *discarded_events
= 0;
202 ret
= consumer_get_discarded_events(session
->id
, kchan
->key
,
203 session
->kernel_session
->consumer
,
209 ret
= consumer_get_lost_packets(session
->id
, kchan
->key
,
210 session
->kernel_session
->consumer
,
221 * Get run-time attributes if the session has been started (discarded events,
224 static int get_ust_runtime_stats(struct ltt_session
*session
,
225 struct ltt_ust_channel
*uchan
, uint64_t *discarded_events
,
226 uint64_t *lost_packets
)
229 struct ltt_ust_session
*usess
;
231 if (!discarded_events
|| !lost_packets
) {
236 usess
= session
->ust_session
;
237 assert(discarded_events
);
238 assert(lost_packets
);
240 if (!usess
|| !session
->has_been_started
) {
241 *discarded_events
= 0;
247 if (usess
->buffer_type
== LTTNG_BUFFER_PER_UID
) {
248 ret
= ust_app_uid_get_channel_runtime_stats(usess
->id
,
249 &usess
->buffer_reg_uid_list
,
250 usess
->consumer
, uchan
->id
,
251 uchan
->attr
.overwrite
,
254 } else if (usess
->buffer_type
== LTTNG_BUFFER_PER_PID
) {
255 ret
= ust_app_pid_get_channel_runtime_stats(usess
,
256 uchan
, usess
->consumer
,
257 uchan
->attr
.overwrite
,
263 *discarded_events
+= uchan
->per_pid_closed_app_discarded
;
264 *lost_packets
+= uchan
->per_pid_closed_app_lost
;
266 ERR("Unsupported buffer type");
277 * Fill lttng_channel array of all channels.
279 static ssize_t
list_lttng_channels(enum lttng_domain_type domain
,
280 struct ltt_session
*session
, struct lttng_channel
*channels
,
281 struct lttng_channel_extended
*chan_exts
)
284 struct ltt_kernel_channel
*kchan
;
286 DBG("Listing channels for session %s", session
->name
);
289 case LTTNG_DOMAIN_KERNEL
:
290 /* Kernel channels */
291 if (session
->kernel_session
!= NULL
) {
292 cds_list_for_each_entry(kchan
,
293 &session
->kernel_session
->channel_list
.head
, list
) {
294 uint64_t discarded_events
, lost_packets
;
295 struct lttng_channel_extended
*extended
;
297 extended
= (struct lttng_channel_extended
*)
298 kchan
->channel
->attr
.extended
.ptr
;
300 ret
= get_kernel_runtime_stats(session
, kchan
,
301 &discarded_events
, &lost_packets
);
305 /* Copy lttng_channel struct to array */
306 memcpy(&channels
[i
], kchan
->channel
, sizeof(struct lttng_channel
));
307 channels
[i
].enabled
= kchan
->enabled
;
308 chan_exts
[i
].discarded_events
=
310 chan_exts
[i
].lost_packets
= lost_packets
;
311 chan_exts
[i
].monitor_timer_interval
=
312 extended
->monitor_timer_interval
;
313 chan_exts
[i
].blocking_timeout
= 0;
318 case LTTNG_DOMAIN_UST
:
320 struct lttng_ht_iter iter
;
321 struct ltt_ust_channel
*uchan
;
324 cds_lfht_for_each_entry(session
->ust_session
->domain_global
.channels
->ht
,
325 &iter
.iter
, uchan
, node
.node
) {
326 uint64_t discarded_events
= 0, lost_packets
= 0;
328 if (lttng_strncpy(channels
[i
].name
, uchan
->name
,
329 LTTNG_SYMBOL_NAME_LEN
)) {
332 channels
[i
].attr
.overwrite
= uchan
->attr
.overwrite
;
333 channels
[i
].attr
.subbuf_size
= uchan
->attr
.subbuf_size
;
334 channels
[i
].attr
.num_subbuf
= uchan
->attr
.num_subbuf
;
335 channels
[i
].attr
.switch_timer_interval
=
336 uchan
->attr
.switch_timer_interval
;
337 channels
[i
].attr
.read_timer_interval
=
338 uchan
->attr
.read_timer_interval
;
339 channels
[i
].enabled
= uchan
->enabled
;
340 channels
[i
].attr
.tracefile_size
= uchan
->tracefile_size
;
341 channels
[i
].attr
.tracefile_count
= uchan
->tracefile_count
;
344 * Map enum lttng_ust_output to enum lttng_event_output.
346 switch (uchan
->attr
.output
) {
348 channels
[i
].attr
.output
= LTTNG_EVENT_MMAP
;
352 * LTTNG_UST_MMAP is the only supported UST
359 chan_exts
[i
].monitor_timer_interval
=
360 uchan
->monitor_timer_interval
;
361 chan_exts
[i
].blocking_timeout
=
362 uchan
->attr
.u
.s
.blocking_timeout
;
364 ret
= get_ust_runtime_stats(session
, uchan
,
365 &discarded_events
, &lost_packets
);
369 chan_exts
[i
].discarded_events
= discarded_events
;
370 chan_exts
[i
].lost_packets
= lost_packets
;
382 return -LTTNG_ERR_FATAL
;
388 static int increment_extended_len(const char *filter_expression
,
389 struct lttng_event_exclusion
*exclusion
,
390 const struct lttng_userspace_probe_location
*probe_location
,
391 size_t *extended_len
)
395 *extended_len
+= sizeof(struct lttcomm_event_extended_header
);
397 if (filter_expression
) {
398 *extended_len
+= strlen(filter_expression
) + 1;
402 *extended_len
+= exclusion
->count
* LTTNG_SYMBOL_NAME_LEN
;
405 if (probe_location
) {
406 ret
= lttng_userspace_probe_location_serialize(probe_location
,
411 *extended_len
+= ret
;
418 static int append_extended_info(const char *filter_expression
,
419 struct lttng_event_exclusion
*exclusion
,
420 struct lttng_userspace_probe_location
*probe_location
,
424 size_t filter_len
= 0;
425 size_t nb_exclusions
= 0;
426 size_t userspace_probe_location_len
= 0;
427 struct lttng_dynamic_buffer location_buffer
;
428 struct lttcomm_event_extended_header extended_header
;
430 if (filter_expression
) {
431 filter_len
= strlen(filter_expression
) + 1;
435 nb_exclusions
= exclusion
->count
;
438 if (probe_location
) {
439 lttng_dynamic_buffer_init(&location_buffer
);
440 ret
= lttng_userspace_probe_location_serialize(probe_location
,
441 &location_buffer
, NULL
);
446 userspace_probe_location_len
= location_buffer
.size
;
449 /* Set header fields */
450 extended_header
.filter_len
= filter_len
;
451 extended_header
.nb_exclusions
= nb_exclusions
;
452 extended_header
.userspace_probe_location_len
= userspace_probe_location_len
;
455 memcpy(*extended_at
, &extended_header
, sizeof(extended_header
));
456 *extended_at
+= sizeof(extended_header
);
458 /* Copy filter string */
459 if (filter_expression
) {
460 memcpy(*extended_at
, filter_expression
, filter_len
);
461 *extended_at
+= filter_len
;
464 /* Copy exclusion names */
466 size_t len
= nb_exclusions
* LTTNG_SYMBOL_NAME_LEN
;
468 memcpy(*extended_at
, &exclusion
->names
, len
);
472 if (probe_location
) {
473 memcpy(*extended_at
, location_buffer
.data
, location_buffer
.size
);
474 *extended_at
+= location_buffer
.size
;
475 lttng_dynamic_buffer_reset(&location_buffer
);
483 * Create a list of agent domain events.
485 * Return number of events in list on success or else a negative value.
487 static int list_lttng_agent_events(struct agent
*agt
,
488 struct lttng_event
**events
, size_t *total_size
)
491 unsigned int nb_event
= 0;
492 struct agent_event
*event
;
493 struct lttng_event
*tmp_events
= NULL
;
494 struct lttng_ht_iter iter
;
495 size_t extended_len
= 0;
501 DBG3("Listing agent events");
504 nb_event
= lttng_ht_get_count(agt
->events
);
512 /* Compute required extended infos size */
513 extended_len
= nb_event
* sizeof(struct lttcomm_event_extended_header
);
516 * This is only valid because the commands which add events are
517 * processed in the same thread as the listing.
520 cds_lfht_for_each_entry(agt
->events
->ht
, &iter
.iter
, event
, node
.node
) {
521 ret
= increment_extended_len(event
->filter_expression
, NULL
, NULL
,
524 DBG("Error computing the length of extended info message");
525 ret
= -LTTNG_ERR_FATAL
;
531 *total_size
= nb_event
* sizeof(*tmp_events
) + extended_len
;
532 tmp_events
= zmalloc(*total_size
);
534 PERROR("zmalloc agent events session");
535 ret
= -LTTNG_ERR_FATAL
;
539 extended_at
= ((uint8_t *) tmp_events
) +
540 nb_event
* sizeof(struct lttng_event
);
543 cds_lfht_for_each_entry(agt
->events
->ht
, &iter
.iter
, event
, node
.node
) {
544 strncpy(tmp_events
[i
].name
, event
->name
, sizeof(tmp_events
[i
].name
));
545 tmp_events
[i
].name
[sizeof(tmp_events
[i
].name
) - 1] = '\0';
546 tmp_events
[i
].enabled
= event
->enabled
;
547 tmp_events
[i
].loglevel
= event
->loglevel_value
;
548 tmp_events
[i
].loglevel_type
= event
->loglevel_type
;
551 /* Append extended info */
552 ret
= append_extended_info(event
->filter_expression
, NULL
, NULL
,
555 DBG("Error appending extended info message");
556 ret
= -LTTNG_ERR_FATAL
;
561 *events
= tmp_events
;
563 assert(nb_event
== i
);
574 * Create a list of ust global domain events.
576 static int list_lttng_ust_global_events(char *channel_name
,
577 struct ltt_ust_domain_global
*ust_global
,
578 struct lttng_event
**events
, size_t *total_size
)
581 unsigned int nb_event
= 0;
582 struct lttng_ht_iter iter
;
583 struct lttng_ht_node_str
*node
;
584 struct ltt_ust_channel
*uchan
;
585 struct ltt_ust_event
*uevent
;
586 struct lttng_event
*tmp
;
587 size_t extended_len
= 0;
590 DBG("Listing UST global events for channel %s", channel_name
);
594 lttng_ht_lookup(ust_global
->channels
, (void *)channel_name
, &iter
);
595 node
= lttng_ht_iter_get_node_str(&iter
);
597 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
601 uchan
= caa_container_of(&node
->node
, struct ltt_ust_channel
, node
.node
);
603 nb_event
= lttng_ht_get_count(uchan
->events
);
610 DBG3("Listing UST global %d events", nb_event
);
612 /* Compute required extended infos size */
613 cds_lfht_for_each_entry(uchan
->events
->ht
, &iter
.iter
, uevent
, node
.node
) {
614 if (uevent
->internal
) {
619 ret
= increment_extended_len(uevent
->filter_expression
,
620 uevent
->exclusion
, NULL
, &extended_len
);
622 DBG("Error computing the length of extended info message");
623 ret
= -LTTNG_ERR_FATAL
;
628 /* All events are internal, skip. */
634 *total_size
= nb_event
* sizeof(struct lttng_event
) + extended_len
;
635 tmp
= zmalloc(*total_size
);
637 ret
= -LTTNG_ERR_FATAL
;
641 extended_at
= ((uint8_t *) tmp
) + nb_event
* sizeof(struct lttng_event
);
643 cds_lfht_for_each_entry(uchan
->events
->ht
, &iter
.iter
, uevent
, node
.node
) {
644 if (uevent
->internal
) {
645 /* This event should remain hidden from clients */
648 strncpy(tmp
[i
].name
, uevent
->attr
.name
, LTTNG_SYMBOL_NAME_LEN
);
649 tmp
[i
].name
[LTTNG_SYMBOL_NAME_LEN
- 1] = '\0';
650 tmp
[i
].enabled
= uevent
->enabled
;
652 switch (uevent
->attr
.instrumentation
) {
653 case LTTNG_UST_TRACEPOINT
:
654 tmp
[i
].type
= LTTNG_EVENT_TRACEPOINT
;
656 case LTTNG_UST_PROBE
:
657 tmp
[i
].type
= LTTNG_EVENT_PROBE
;
659 case LTTNG_UST_FUNCTION
:
660 tmp
[i
].type
= LTTNG_EVENT_FUNCTION
;
664 tmp
[i
].loglevel
= uevent
->attr
.loglevel
;
665 switch (uevent
->attr
.loglevel_type
) {
666 case LTTNG_UST_LOGLEVEL_ALL
:
667 tmp
[i
].loglevel_type
= LTTNG_EVENT_LOGLEVEL_ALL
;
669 case LTTNG_UST_LOGLEVEL_RANGE
:
670 tmp
[i
].loglevel_type
= LTTNG_EVENT_LOGLEVEL_RANGE
;
672 case LTTNG_UST_LOGLEVEL_SINGLE
:
673 tmp
[i
].loglevel_type
= LTTNG_EVENT_LOGLEVEL_SINGLE
;
676 if (uevent
->filter
) {
679 if (uevent
->exclusion
) {
680 tmp
[i
].exclusion
= 1;
684 /* Append extended info */
685 ret
= append_extended_info(uevent
->filter_expression
,
686 uevent
->exclusion
, NULL
, &extended_at
);
688 DBG("Error appending extended info message");
689 ret
= -LTTNG_ERR_FATAL
;
702 * Fill lttng_event array of all kernel events in the channel.
704 static int list_lttng_kernel_events(char *channel_name
,
705 struct ltt_kernel_session
*kernel_session
,
706 struct lttng_event
**events
, size_t *total_size
)
709 unsigned int nb_event
;
710 struct ltt_kernel_event
*event
;
711 struct ltt_kernel_channel
*kchan
;
712 size_t extended_len
= 0;
715 kchan
= trace_kernel_get_channel_by_name(channel_name
, kernel_session
);
717 ret
= LTTNG_ERR_KERN_CHAN_NOT_FOUND
;
721 nb_event
= kchan
->event_count
;
723 DBG("Listing events for channel %s", kchan
->channel
->name
);
731 /* Compute required extended infos size */
732 cds_list_for_each_entry(event
, &kchan
->events_list
.head
, list
) {
733 ret
= increment_extended_len(event
->filter_expression
, NULL
,
734 event
->userspace_probe_location
,
737 DBG("Error computing the length of extended info message");
738 ret
= -LTTNG_ERR_FATAL
;
743 *total_size
= nb_event
* sizeof(struct lttng_event
) + extended_len
;
744 *events
= zmalloc(*total_size
);
745 if (*events
== NULL
) {
746 ret
= -LTTNG_ERR_FATAL
;
750 extended_at
= ((void *) *events
) +
751 nb_event
* sizeof(struct lttng_event
);
753 /* Kernel channels */
754 cds_list_for_each_entry(event
, &kchan
->events_list
.head
, list
) {
755 strncpy((*events
)[i
].name
, event
->event
->name
, LTTNG_SYMBOL_NAME_LEN
);
756 (*events
)[i
].name
[LTTNG_SYMBOL_NAME_LEN
- 1] = '\0';
757 (*events
)[i
].enabled
= event
->enabled
;
758 (*events
)[i
].filter
=
759 (unsigned char) !!event
->filter_expression
;
761 switch (event
->event
->instrumentation
) {
762 case LTTNG_KERNEL_TRACEPOINT
:
763 (*events
)[i
].type
= LTTNG_EVENT_TRACEPOINT
;
765 case LTTNG_KERNEL_KRETPROBE
:
766 (*events
)[i
].type
= LTTNG_EVENT_FUNCTION
;
767 memcpy(&(*events
)[i
].attr
.probe
, &event
->event
->u
.kprobe
,
768 sizeof(struct lttng_kernel_kprobe
));
770 case LTTNG_KERNEL_KPROBE
:
771 (*events
)[i
].type
= LTTNG_EVENT_PROBE
;
772 memcpy(&(*events
)[i
].attr
.probe
, &event
->event
->u
.kprobe
,
773 sizeof(struct lttng_kernel_kprobe
));
775 case LTTNG_KERNEL_UPROBE
:
776 (*events
)[i
].type
= LTTNG_EVENT_USERSPACE_PROBE
;
778 case LTTNG_KERNEL_FUNCTION
:
779 (*events
)[i
].type
= LTTNG_EVENT_FUNCTION
;
780 memcpy(&((*events
)[i
].attr
.ftrace
), &event
->event
->u
.ftrace
,
781 sizeof(struct lttng_kernel_function
));
783 case LTTNG_KERNEL_NOOP
:
784 (*events
)[i
].type
= LTTNG_EVENT_NOOP
;
786 case LTTNG_KERNEL_SYSCALL
:
787 (*events
)[i
].type
= LTTNG_EVENT_SYSCALL
;
789 case LTTNG_KERNEL_ALL
:
797 /* Append extended info */
798 ret
= append_extended_info(event
->filter_expression
, NULL
,
799 event
->userspace_probe_location
, &extended_at
);
801 DBG("Error appending extended info message");
802 ret
= -LTTNG_ERR_FATAL
;
811 /* Negate the error code to differentiate the size from an error */
816 * Add URI so the consumer output object. Set the correct path depending on the
817 * domain adding the default trace directory.
819 static int add_uri_to_consumer(struct consumer_output
*consumer
,
820 struct lttng_uri
*uri
, enum lttng_domain_type domain
,
821 const char *session_name
)
824 const char *default_trace_dir
;
828 if (consumer
== NULL
) {
829 DBG("No consumer detected. Don't add URI. Stopping.");
830 ret
= LTTNG_ERR_NO_CONSUMER
;
835 case LTTNG_DOMAIN_KERNEL
:
836 default_trace_dir
= DEFAULT_KERNEL_TRACE_DIR
;
838 case LTTNG_DOMAIN_UST
:
839 default_trace_dir
= DEFAULT_UST_TRACE_DIR
;
843 * This case is possible is we try to add the URI to the global tracing
844 * session consumer object which in this case there is no subdir.
846 default_trace_dir
= "";
849 switch (uri
->dtype
) {
852 DBG2("Setting network URI to consumer");
854 if (consumer
->type
== CONSUMER_DST_NET
) {
855 if ((uri
->stype
== LTTNG_STREAM_CONTROL
&&
856 consumer
->dst
.net
.control_isset
) ||
857 (uri
->stype
== LTTNG_STREAM_DATA
&&
858 consumer
->dst
.net
.data_isset
)) {
859 ret
= LTTNG_ERR_URL_EXIST
;
863 memset(&consumer
->dst
.net
, 0, sizeof(consumer
->dst
.net
));
866 consumer
->type
= CONSUMER_DST_NET
;
868 /* Set URI into consumer output object */
869 ret
= consumer_set_network_uri(consumer
, uri
);
873 } else if (ret
== 1) {
875 * URI was the same in the consumer so we do not append the subdir
876 * again so to not duplicate output dir.
882 if (uri
->stype
== LTTNG_STREAM_CONTROL
&& strlen(uri
->subdir
) == 0) {
883 ret
= consumer_set_subdir(consumer
, session_name
);
885 ret
= LTTNG_ERR_FATAL
;
890 if (uri
->stype
== LTTNG_STREAM_CONTROL
) {
891 /* On a new subdir, reappend the default trace dir. */
892 strncat(consumer
->subdir
, default_trace_dir
,
893 sizeof(consumer
->subdir
) - strlen(consumer
->subdir
) - 1);
894 DBG3("Append domain trace name to subdir %s", consumer
->subdir
);
899 DBG2("Setting trace directory path from URI to %s", uri
->dst
.path
);
900 memset(consumer
->dst
.session_root_path
, 0,
901 sizeof(consumer
->dst
.session_root_path
));
902 /* Explicit length checks for strcpy and strcat. */
903 if (strlen(uri
->dst
.path
) + strlen(default_trace_dir
)
904 >= sizeof(consumer
->dst
.session_root_path
)) {
905 ret
= LTTNG_ERR_FATAL
;
908 strcpy(consumer
->dst
.session_root_path
, uri
->dst
.path
);
909 /* Append default trace dir */
910 strcat(consumer
->dst
.session_root_path
, default_trace_dir
);
911 /* Flag consumer as local. */
912 consumer
->type
= CONSUMER_DST_LOCAL
;
923 * Init tracing by creating trace directory and sending fds kernel consumer.
925 static int init_kernel_tracing(struct ltt_kernel_session
*session
)
928 struct lttng_ht_iter iter
;
929 struct consumer_socket
*socket
;
935 if (session
->consumer_fds_sent
== 0 && session
->consumer
!= NULL
) {
936 cds_lfht_for_each_entry(session
->consumer
->socks
->ht
, &iter
.iter
,
938 pthread_mutex_lock(socket
->lock
);
939 ret
= kernel_consumer_send_session(socket
, session
);
940 pthread_mutex_unlock(socket
->lock
);
942 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
954 * Create a socket to the relayd using the URI.
956 * On success, the relayd_sock pointer is set to the created socket.
957 * Else, it remains untouched and an LTTng error code is returned.
959 static enum lttng_error_code
create_connect_relayd(struct lttng_uri
*uri
,
960 struct lttcomm_relayd_sock
**relayd_sock
,
961 struct consumer_output
*consumer
)
964 enum lttng_error_code status
= LTTNG_OK
;
965 struct lttcomm_relayd_sock
*rsock
;
967 rsock
= lttcomm_alloc_relayd_sock(uri
, RELAYD_VERSION_COMM_MAJOR
,
968 RELAYD_VERSION_COMM_MINOR
);
970 status
= LTTNG_ERR_FATAL
;
975 * Connect to relayd so we can proceed with a session creation. This call
976 * can possibly block for an arbitrary amount of time to set the health
977 * state to be in poll execution.
980 ret
= relayd_connect(rsock
);
983 ERR("Unable to reach lttng-relayd");
984 status
= LTTNG_ERR_RELAYD_CONNECT_FAIL
;
988 /* Create socket for control stream. */
989 if (uri
->stype
== LTTNG_STREAM_CONTROL
) {
990 DBG3("Creating relayd stream socket from URI");
992 /* Check relayd version */
993 ret
= relayd_version_check(rsock
);
994 if (ret
== LTTNG_ERR_RELAYD_VERSION_FAIL
) {
995 status
= LTTNG_ERR_RELAYD_VERSION_FAIL
;
997 } else if (ret
< 0) {
998 ERR("Unable to reach lttng-relayd");
999 status
= LTTNG_ERR_RELAYD_CONNECT_FAIL
;
1002 consumer
->relay_major_version
= rsock
->major
;
1003 consumer
->relay_minor_version
= rsock
->minor
;
1004 } else if (uri
->stype
== LTTNG_STREAM_DATA
) {
1005 DBG3("Creating relayd data socket from URI");
1007 /* Command is not valid */
1008 ERR("Relayd invalid stream type: %d", uri
->stype
);
1009 status
= LTTNG_ERR_INVALID
;
1013 *relayd_sock
= rsock
;
1018 /* The returned value is not useful since we are on an error path. */
1019 (void) relayd_close(rsock
);
1027 * Connect to the relayd using URI and send the socket to the right consumer.
1029 * The consumer socket lock must be held by the caller.
1031 * Returns LTTNG_OK on success or an LTTng error code on failure.
1033 static enum lttng_error_code
send_consumer_relayd_socket(
1034 unsigned int session_id
,
1035 struct lttng_uri
*relayd_uri
,
1036 struct consumer_output
*consumer
,
1037 struct consumer_socket
*consumer_sock
,
1038 char *session_name
, char *hostname
, int session_live_timer
)
1041 struct lttcomm_relayd_sock
*rsock
= NULL
;
1042 enum lttng_error_code status
;
1044 /* Connect to relayd and make version check if uri is the control. */
1045 status
= create_connect_relayd(relayd_uri
, &rsock
, consumer
);
1046 if (status
!= LTTNG_OK
) {
1047 goto relayd_comm_error
;
1051 /* Set the network sequence index if not set. */
1052 if (consumer
->net_seq_index
== (uint64_t) -1ULL) {
1053 pthread_mutex_lock(&relayd_net_seq_idx_lock
);
1055 * Increment net_seq_idx because we are about to transfer the
1056 * new relayd socket to the consumer.
1057 * Assign unique key so the consumer can match streams.
1059 consumer
->net_seq_index
= ++relayd_net_seq_idx
;
1060 pthread_mutex_unlock(&relayd_net_seq_idx_lock
);
1063 /* Send relayd socket to consumer. */
1064 ret
= consumer_send_relayd_socket(consumer_sock
, rsock
, consumer
,
1065 relayd_uri
->stype
, session_id
,
1066 session_name
, hostname
, session_live_timer
);
1068 status
= LTTNG_ERR_ENABLE_CONSUMER_FAIL
;
1072 /* Flag that the corresponding socket was sent. */
1073 if (relayd_uri
->stype
== LTTNG_STREAM_CONTROL
) {
1074 consumer_sock
->control_sock_sent
= 1;
1075 } else if (relayd_uri
->stype
== LTTNG_STREAM_DATA
) {
1076 consumer_sock
->data_sock_sent
= 1;
1080 * Close socket which was dup on the consumer side. The session daemon does
1081 * NOT keep track of the relayd socket(s) once transfer to the consumer.
1085 if (status
!= LTTNG_OK
) {
1087 * The consumer output for this session should not be used anymore
1088 * since the relayd connection failed thus making any tracing or/and
1089 * streaming not usable.
1091 consumer
->enabled
= 0;
1093 (void) relayd_close(rsock
);
1101 * Send both relayd sockets to a specific consumer and domain. This is a
1102 * helper function to facilitate sending the information to the consumer for a
1105 * The consumer socket lock must be held by the caller.
1107 * Returns LTTNG_OK, or an LTTng error code on failure.
1109 static enum lttng_error_code
send_consumer_relayd_sockets(
1110 enum lttng_domain_type domain
,
1111 unsigned int session_id
, struct consumer_output
*consumer
,
1112 struct consumer_socket
*sock
, char *session_name
,
1113 char *hostname
, int session_live_timer
)
1115 enum lttng_error_code status
= LTTNG_OK
;
1120 /* Sending control relayd socket. */
1121 if (!sock
->control_sock_sent
) {
1122 status
= send_consumer_relayd_socket(session_id
,
1123 &consumer
->dst
.net
.control
, consumer
, sock
,
1124 session_name
, hostname
, session_live_timer
);
1125 if (status
!= LTTNG_OK
) {
1130 /* Sending data relayd socket. */
1131 if (!sock
->data_sock_sent
) {
1132 status
= send_consumer_relayd_socket(session_id
,
1133 &consumer
->dst
.net
.data
, consumer
, sock
,
1134 session_name
, hostname
, session_live_timer
);
1135 if (status
!= LTTNG_OK
) {
1145 * Setup relayd connections for a tracing session. First creates the socket to
1146 * the relayd and send them to the right domain consumer. Consumer type MUST be
1149 int cmd_setup_relayd(struct ltt_session
*session
)
1152 struct ltt_ust_session
*usess
;
1153 struct ltt_kernel_session
*ksess
;
1154 struct consumer_socket
*socket
;
1155 struct lttng_ht_iter iter
;
1159 usess
= session
->ust_session
;
1160 ksess
= session
->kernel_session
;
1162 DBG("Setting relayd for session %s", session
->name
);
1166 if (usess
&& usess
->consumer
&& usess
->consumer
->type
== CONSUMER_DST_NET
1167 && usess
->consumer
->enabled
) {
1168 /* For each consumer socket, send relayd sockets */
1169 cds_lfht_for_each_entry(usess
->consumer
->socks
->ht
, &iter
.iter
,
1170 socket
, node
.node
) {
1171 pthread_mutex_lock(socket
->lock
);
1172 ret
= send_consumer_relayd_sockets(LTTNG_DOMAIN_UST
, session
->id
,
1173 usess
->consumer
, socket
,
1174 session
->name
, session
->hostname
,
1175 session
->live_timer
);
1176 pthread_mutex_unlock(socket
->lock
);
1177 if (ret
!= LTTNG_OK
) {
1180 /* Session is now ready for network streaming. */
1181 session
->net_handle
= 1;
1183 session
->consumer
->relay_major_version
=
1184 usess
->consumer
->relay_major_version
;
1185 session
->consumer
->relay_minor_version
=
1186 usess
->consumer
->relay_minor_version
;
1189 if (ksess
&& ksess
->consumer
&& ksess
->consumer
->type
== CONSUMER_DST_NET
1190 && ksess
->consumer
->enabled
) {
1191 cds_lfht_for_each_entry(ksess
->consumer
->socks
->ht
, &iter
.iter
,
1192 socket
, node
.node
) {
1193 pthread_mutex_lock(socket
->lock
);
1194 ret
= send_consumer_relayd_sockets(LTTNG_DOMAIN_KERNEL
, session
->id
,
1195 ksess
->consumer
, socket
,
1196 session
->name
, session
->hostname
,
1197 session
->live_timer
);
1198 pthread_mutex_unlock(socket
->lock
);
1199 if (ret
!= LTTNG_OK
) {
1202 /* Session is now ready for network streaming. */
1203 session
->net_handle
= 1;
1205 session
->consumer
->relay_major_version
=
1206 ksess
->consumer
->relay_major_version
;
1207 session
->consumer
->relay_minor_version
=
1208 ksess
->consumer
->relay_minor_version
;
1217 * Start a kernel session by opening all necessary streams.
1219 static int start_kernel_session(struct ltt_kernel_session
*ksess
, int wpipe
)
1222 struct ltt_kernel_channel
*kchan
;
1224 /* Open kernel metadata */
1225 if (ksess
->metadata
== NULL
&& ksess
->output_traces
) {
1226 ret
= kernel_open_metadata(ksess
);
1228 ret
= LTTNG_ERR_KERN_META_FAIL
;
1233 /* Open kernel metadata stream */
1234 if (ksess
->metadata
&& ksess
->metadata_stream_fd
< 0) {
1235 ret
= kernel_open_metadata_stream(ksess
);
1237 ERR("Kernel create metadata stream failed");
1238 ret
= LTTNG_ERR_KERN_STREAM_FAIL
;
1243 /* For each channel */
1244 cds_list_for_each_entry(kchan
, &ksess
->channel_list
.head
, list
) {
1245 if (kchan
->stream_count
== 0) {
1246 ret
= kernel_open_channel_stream(kchan
);
1248 ret
= LTTNG_ERR_KERN_STREAM_FAIL
;
1251 /* Update the stream global counter */
1252 ksess
->stream_count_global
+= ret
;
1256 /* Setup kernel consumer socket and send fds to it */
1257 ret
= init_kernel_tracing(ksess
);
1259 ret
= LTTNG_ERR_KERN_START_FAIL
;
1263 /* This start the kernel tracing */
1264 ret
= kernel_start_session(ksess
);
1266 ret
= LTTNG_ERR_KERN_START_FAIL
;
1270 /* Quiescent wait after starting trace */
1271 kernel_wait_quiescent(wpipe
);
1282 * Command LTTNG_DISABLE_CHANNEL processed by the client thread.
1284 int cmd_disable_channel(struct ltt_session
*session
,
1285 enum lttng_domain_type domain
, char *channel_name
)
1288 struct ltt_ust_session
*usess
;
1290 usess
= session
->ust_session
;
1295 case LTTNG_DOMAIN_KERNEL
:
1297 ret
= channel_kernel_disable(session
->kernel_session
,
1299 if (ret
!= LTTNG_OK
) {
1303 kernel_wait_quiescent(kernel_tracer_fd
);
1306 case LTTNG_DOMAIN_UST
:
1308 struct ltt_ust_channel
*uchan
;
1309 struct lttng_ht
*chan_ht
;
1311 chan_ht
= usess
->domain_global
.channels
;
1313 uchan
= trace_ust_find_channel_by_name(chan_ht
, channel_name
);
1314 if (uchan
== NULL
) {
1315 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
1319 ret
= channel_ust_disable(usess
, uchan
);
1320 if (ret
!= LTTNG_OK
) {
1326 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
1338 * Command LTTNG_TRACK_PID processed by the client thread.
1340 * Called with session lock held.
1342 int cmd_track_pid(struct ltt_session
*session
, enum lttng_domain_type domain
,
1350 case LTTNG_DOMAIN_KERNEL
:
1352 struct ltt_kernel_session
*ksess
;
1354 ksess
= session
->kernel_session
;
1356 ret
= kernel_track_pid(ksess
, pid
);
1357 if (ret
!= LTTNG_OK
) {
1361 kernel_wait_quiescent(kernel_tracer_fd
);
1364 case LTTNG_DOMAIN_UST
:
1366 struct ltt_ust_session
*usess
;
1368 usess
= session
->ust_session
;
1370 ret
= trace_ust_track_pid(usess
, pid
);
1371 if (ret
!= LTTNG_OK
) {
1377 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
1389 * Command LTTNG_UNTRACK_PID processed by the client thread.
1391 * Called with session lock held.
1393 int cmd_untrack_pid(struct ltt_session
*session
, enum lttng_domain_type domain
,
1401 case LTTNG_DOMAIN_KERNEL
:
1403 struct ltt_kernel_session
*ksess
;
1405 ksess
= session
->kernel_session
;
1407 ret
= kernel_untrack_pid(ksess
, pid
);
1408 if (ret
!= LTTNG_OK
) {
1412 kernel_wait_quiescent(kernel_tracer_fd
);
1415 case LTTNG_DOMAIN_UST
:
1417 struct ltt_ust_session
*usess
;
1419 usess
= session
->ust_session
;
1421 ret
= trace_ust_untrack_pid(usess
, pid
);
1422 if (ret
!= LTTNG_OK
) {
1428 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
1440 * Command LTTNG_ENABLE_CHANNEL processed by the client thread.
1442 * The wpipe arguments is used as a notifier for the kernel thread.
1444 int cmd_enable_channel(struct ltt_session
*session
,
1445 struct lttng_domain
*domain
, struct lttng_channel
*attr
, int wpipe
)
1448 struct ltt_ust_session
*usess
= session
->ust_session
;
1449 struct lttng_ht
*chan_ht
;
1456 len
= lttng_strnlen(attr
->name
, sizeof(attr
->name
));
1458 /* Validate channel name */
1459 if (attr
->name
[0] == '.' ||
1460 memchr(attr
->name
, '/', len
) != NULL
) {
1461 ret
= LTTNG_ERR_INVALID_CHANNEL_NAME
;
1465 DBG("Enabling channel %s for session %s", attr
->name
, session
->name
);
1470 * Don't try to enable a channel if the session has been started at
1471 * some point in time before. The tracer does not allow it.
1473 if (session
->has_been_started
) {
1474 ret
= LTTNG_ERR_TRACE_ALREADY_STARTED
;
1479 * If the session is a live session, remove the switch timer, the
1480 * live timer does the same thing but sends also synchronisation
1481 * beacons for inactive streams.
1483 if (session
->live_timer
> 0) {
1484 attr
->attr
.live_timer_interval
= session
->live_timer
;
1485 attr
->attr
.switch_timer_interval
= 0;
1488 /* Check for feature support */
1489 switch (domain
->type
) {
1490 case LTTNG_DOMAIN_KERNEL
:
1492 if (kernel_supports_ring_buffer_snapshot_sample_positions(kernel_tracer_fd
) != 1) {
1493 /* Sampling position of buffer is not supported */
1494 WARN("Kernel tracer does not support buffer monitoring. "
1495 "Setting the monitor interval timer to 0 "
1496 "(disabled) for channel '%s' of session '%s'",
1497 attr
-> name
, session
->name
);
1498 lttng_channel_set_monitor_timer_interval(attr
, 0);
1502 case LTTNG_DOMAIN_UST
:
1504 case LTTNG_DOMAIN_JUL
:
1505 case LTTNG_DOMAIN_LOG4J
:
1506 case LTTNG_DOMAIN_PYTHON
:
1507 if (!agent_tracing_is_enabled()) {
1508 DBG("Attempted to enable a channel in an agent domain but the agent thread is not running");
1509 ret
= LTTNG_ERR_AGENT_TRACING_DISABLED
;
1514 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
1518 switch (domain
->type
) {
1519 case LTTNG_DOMAIN_KERNEL
:
1521 struct ltt_kernel_channel
*kchan
;
1523 kchan
= trace_kernel_get_channel_by_name(attr
->name
,
1524 session
->kernel_session
);
1525 if (kchan
== NULL
) {
1526 ret
= channel_kernel_create(session
->kernel_session
, attr
, wpipe
);
1527 if (attr
->name
[0] != '\0') {
1528 session
->kernel_session
->has_non_default_channel
= 1;
1531 ret
= channel_kernel_enable(session
->kernel_session
, kchan
);
1534 if (ret
!= LTTNG_OK
) {
1538 kernel_wait_quiescent(kernel_tracer_fd
);
1541 case LTTNG_DOMAIN_UST
:
1542 case LTTNG_DOMAIN_JUL
:
1543 case LTTNG_DOMAIN_LOG4J
:
1544 case LTTNG_DOMAIN_PYTHON
:
1546 struct ltt_ust_channel
*uchan
;
1551 * Current agent implementation limitations force us to allow
1552 * only one channel at once in "agent" subdomains. Each
1553 * subdomain has a default channel name which must be strictly
1556 if (domain
->type
== LTTNG_DOMAIN_JUL
) {
1557 if (strncmp(attr
->name
, DEFAULT_JUL_CHANNEL_NAME
,
1558 LTTNG_SYMBOL_NAME_LEN
)) {
1559 ret
= LTTNG_ERR_INVALID_CHANNEL_NAME
;
1562 } else if (domain
->type
== LTTNG_DOMAIN_LOG4J
) {
1563 if (strncmp(attr
->name
, DEFAULT_LOG4J_CHANNEL_NAME
,
1564 LTTNG_SYMBOL_NAME_LEN
)) {
1565 ret
= LTTNG_ERR_INVALID_CHANNEL_NAME
;
1568 } else if (domain
->type
== LTTNG_DOMAIN_PYTHON
) {
1569 if (strncmp(attr
->name
, DEFAULT_PYTHON_CHANNEL_NAME
,
1570 LTTNG_SYMBOL_NAME_LEN
)) {
1571 ret
= LTTNG_ERR_INVALID_CHANNEL_NAME
;
1576 chan_ht
= usess
->domain_global
.channels
;
1578 uchan
= trace_ust_find_channel_by_name(chan_ht
, attr
->name
);
1579 if (uchan
== NULL
) {
1580 ret
= channel_ust_create(usess
, attr
, domain
->buf_type
);
1581 if (attr
->name
[0] != '\0') {
1582 usess
->has_non_default_channel
= 1;
1585 ret
= channel_ust_enable(usess
, uchan
);
1590 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
1601 * Command LTTNG_DISABLE_EVENT processed by the client thread.
1603 int cmd_disable_event(struct ltt_session
*session
,
1604 enum lttng_domain_type domain
, char *channel_name
,
1605 struct lttng_event
*event
)
1610 DBG("Disable event command for event \'%s\'", event
->name
);
1612 event_name
= event
->name
;
1614 /* Error out on unhandled search criteria */
1615 if (event
->loglevel_type
|| event
->loglevel
!= -1 || event
->enabled
1616 || event
->pid
|| event
->filter
|| event
->exclusion
) {
1617 ret
= LTTNG_ERR_UNK
;
1624 case LTTNG_DOMAIN_KERNEL
:
1626 struct ltt_kernel_channel
*kchan
;
1627 struct ltt_kernel_session
*ksess
;
1629 ksess
= session
->kernel_session
;
1632 * If a non-default channel has been created in the
1633 * session, explicitely require that -c chan_name needs
1636 if (ksess
->has_non_default_channel
&& channel_name
[0] == '\0') {
1637 ret
= LTTNG_ERR_NEED_CHANNEL_NAME
;
1641 kchan
= trace_kernel_get_channel_by_name(channel_name
, ksess
);
1642 if (kchan
== NULL
) {
1643 ret
= LTTNG_ERR_KERN_CHAN_NOT_FOUND
;
1647 switch (event
->type
) {
1648 case LTTNG_EVENT_ALL
:
1649 case LTTNG_EVENT_TRACEPOINT
:
1650 case LTTNG_EVENT_SYSCALL
:
1651 case LTTNG_EVENT_PROBE
:
1652 case LTTNG_EVENT_FUNCTION
:
1653 case LTTNG_EVENT_FUNCTION_ENTRY
:/* fall-through */
1654 if (event_name
[0] == '\0') {
1655 ret
= event_kernel_disable_event(kchan
,
1658 ret
= event_kernel_disable_event(kchan
,
1659 event_name
, event
->type
);
1661 if (ret
!= LTTNG_OK
) {
1666 ret
= LTTNG_ERR_UNK
;
1670 kernel_wait_quiescent(kernel_tracer_fd
);
1673 case LTTNG_DOMAIN_UST
:
1675 struct ltt_ust_channel
*uchan
;
1676 struct ltt_ust_session
*usess
;
1678 usess
= session
->ust_session
;
1680 if (validate_ust_event_name(event_name
)) {
1681 ret
= LTTNG_ERR_INVALID_EVENT_NAME
;
1686 * If a non-default channel has been created in the
1687 * session, explicitly require that -c chan_name needs
1690 if (usess
->has_non_default_channel
&& channel_name
[0] == '\0') {
1691 ret
= LTTNG_ERR_NEED_CHANNEL_NAME
;
1695 uchan
= trace_ust_find_channel_by_name(usess
->domain_global
.channels
,
1697 if (uchan
== NULL
) {
1698 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
1702 switch (event
->type
) {
1703 case LTTNG_EVENT_ALL
:
1705 * An empty event name means that everything
1706 * should be disabled.
1708 if (event
->name
[0] == '\0') {
1709 ret
= event_ust_disable_all_tracepoints(usess
, uchan
);
1711 ret
= event_ust_disable_tracepoint(usess
, uchan
,
1714 if (ret
!= LTTNG_OK
) {
1719 ret
= LTTNG_ERR_UNK
;
1723 DBG3("Disable UST event %s in channel %s completed", event_name
,
1727 case LTTNG_DOMAIN_LOG4J
:
1728 case LTTNG_DOMAIN_JUL
:
1729 case LTTNG_DOMAIN_PYTHON
:
1732 struct ltt_ust_session
*usess
= session
->ust_session
;
1736 switch (event
->type
) {
1737 case LTTNG_EVENT_ALL
:
1740 ret
= LTTNG_ERR_UNK
;
1744 agt
= trace_ust_find_agent(usess
, domain
);
1746 ret
= -LTTNG_ERR_UST_EVENT_NOT_FOUND
;
1750 * An empty event name means that everything
1751 * should be disabled.
1753 if (event
->name
[0] == '\0') {
1754 ret
= event_agent_disable_all(usess
, agt
);
1756 ret
= event_agent_disable(usess
, agt
, event_name
);
1758 if (ret
!= LTTNG_OK
) {
1765 ret
= LTTNG_ERR_UND
;
1778 * Command LTTNG_ADD_CONTEXT processed by the client thread.
1780 int cmd_add_context(struct ltt_session
*session
, enum lttng_domain_type domain
,
1781 char *channel_name
, struct lttng_event_context
*ctx
, int kwpipe
)
1783 int ret
, chan_kern_created
= 0, chan_ust_created
= 0;
1784 char *app_ctx_provider_name
= NULL
, *app_ctx_name
= NULL
;
1787 * Don't try to add a context if the session has been started at
1788 * some point in time before. The tracer does not allow it and would
1789 * result in a corrupted trace.
1791 if (session
->has_been_started
) {
1792 ret
= LTTNG_ERR_TRACE_ALREADY_STARTED
;
1796 if (ctx
->ctx
== LTTNG_EVENT_CONTEXT_APP_CONTEXT
) {
1797 app_ctx_provider_name
= ctx
->u
.app_ctx
.provider_name
;
1798 app_ctx_name
= ctx
->u
.app_ctx
.ctx_name
;
1802 case LTTNG_DOMAIN_KERNEL
:
1803 assert(session
->kernel_session
);
1805 if (session
->kernel_session
->channel_count
== 0) {
1806 /* Create default channel */
1807 ret
= channel_kernel_create(session
->kernel_session
, NULL
, kwpipe
);
1808 if (ret
!= LTTNG_OK
) {
1811 chan_kern_created
= 1;
1813 /* Add kernel context to kernel tracer */
1814 ret
= context_kernel_add(session
->kernel_session
, ctx
, channel_name
);
1815 if (ret
!= LTTNG_OK
) {
1819 case LTTNG_DOMAIN_JUL
:
1820 case LTTNG_DOMAIN_LOG4J
:
1823 * Validate channel name.
1824 * If no channel name is given and the domain is JUL or LOG4J,
1825 * set it to the appropriate domain-specific channel name. If
1826 * a name is provided but does not match the expexted channel
1827 * name, return an error.
1829 if (domain
== LTTNG_DOMAIN_JUL
&& *channel_name
&&
1830 strcmp(channel_name
,
1831 DEFAULT_JUL_CHANNEL_NAME
)) {
1832 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
1834 } else if (domain
== LTTNG_DOMAIN_LOG4J
&& *channel_name
&&
1835 strcmp(channel_name
,
1836 DEFAULT_LOG4J_CHANNEL_NAME
)) {
1837 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
1840 /* break is _not_ missing here. */
1842 case LTTNG_DOMAIN_UST
:
1844 struct ltt_ust_session
*usess
= session
->ust_session
;
1845 unsigned int chan_count
;
1849 chan_count
= lttng_ht_get_count(usess
->domain_global
.channels
);
1850 if (chan_count
== 0) {
1851 struct lttng_channel
*attr
;
1852 /* Create default channel */
1853 attr
= channel_new_default_attr(domain
, usess
->buffer_type
);
1855 ret
= LTTNG_ERR_FATAL
;
1859 ret
= channel_ust_create(usess
, attr
, usess
->buffer_type
);
1860 if (ret
!= LTTNG_OK
) {
1864 channel_attr_destroy(attr
);
1865 chan_ust_created
= 1;
1868 ret
= context_ust_add(usess
, domain
, ctx
, channel_name
);
1869 free(app_ctx_provider_name
);
1871 app_ctx_name
= NULL
;
1872 app_ctx_provider_name
= NULL
;
1873 if (ret
!= LTTNG_OK
) {
1879 ret
= LTTNG_ERR_UND
;
1887 if (chan_kern_created
) {
1888 struct ltt_kernel_channel
*kchan
=
1889 trace_kernel_get_channel_by_name(DEFAULT_CHANNEL_NAME
,
1890 session
->kernel_session
);
1891 /* Created previously, this should NOT fail. */
1893 kernel_destroy_channel(kchan
);
1896 if (chan_ust_created
) {
1897 struct ltt_ust_channel
*uchan
=
1898 trace_ust_find_channel_by_name(
1899 session
->ust_session
->domain_global
.channels
,
1900 DEFAULT_CHANNEL_NAME
);
1901 /* Created previously, this should NOT fail. */
1903 /* Remove from the channel list of the session. */
1904 trace_ust_delete_channel(session
->ust_session
->domain_global
.channels
,
1906 trace_ust_destroy_channel(uchan
);
1909 free(app_ctx_provider_name
);
1914 static inline bool name_starts_with(const char *name
, const char *prefix
)
1916 const size_t max_cmp_len
= min(strlen(prefix
), LTTNG_SYMBOL_NAME_LEN
);
1918 return !strncmp(name
, prefix
, max_cmp_len
);
1921 /* Perform userspace-specific event name validation */
1922 static int validate_ust_event_name(const char *name
)
1932 * Check name against all internal UST event component namespaces used
1935 if (name_starts_with(name
, DEFAULT_JUL_EVENT_COMPONENT
) ||
1936 name_starts_with(name
, DEFAULT_LOG4J_EVENT_COMPONENT
) ||
1937 name_starts_with(name
, DEFAULT_PYTHON_EVENT_COMPONENT
)) {
1946 * Internal version of cmd_enable_event() with a supplemental
1947 * "internal_event" flag which is used to enable internal events which should
1948 * be hidden from clients. Such events are used in the agent implementation to
1949 * enable the events through which all "agent" events are funeled.
1951 static int _cmd_enable_event(struct ltt_session
*session
,
1952 struct lttng_domain
*domain
,
1953 char *channel_name
, struct lttng_event
*event
,
1954 char *filter_expression
,
1955 struct lttng_filter_bytecode
*filter
,
1956 struct lttng_event_exclusion
*exclusion
,
1957 int wpipe
, bool internal_event
)
1959 int ret
= 0, channel_created
= 0;
1960 struct lttng_channel
*attr
= NULL
;
1964 assert(channel_name
);
1966 /* If we have a filter, we must have its filter expression */
1967 assert(!(!!filter_expression
^ !!filter
));
1969 /* Normalize event name as a globbing pattern */
1970 strutils_normalize_star_glob_pattern(event
->name
);
1972 /* Normalize exclusion names as globbing patterns */
1976 for (i
= 0; i
< exclusion
->count
; i
++) {
1977 char *name
= LTTNG_EVENT_EXCLUSION_NAME_AT(exclusion
, i
);
1979 strutils_normalize_star_glob_pattern(name
);
1983 DBG("Enable event command for event \'%s\'", event
->name
);
1987 switch (domain
->type
) {
1988 case LTTNG_DOMAIN_KERNEL
:
1990 struct ltt_kernel_channel
*kchan
;
1993 * If a non-default channel has been created in the
1994 * session, explicitely require that -c chan_name needs
1997 if (session
->kernel_session
->has_non_default_channel
1998 && channel_name
[0] == '\0') {
1999 ret
= LTTNG_ERR_NEED_CHANNEL_NAME
;
2003 kchan
= trace_kernel_get_channel_by_name(channel_name
,
2004 session
->kernel_session
);
2005 if (kchan
== NULL
) {
2006 attr
= channel_new_default_attr(LTTNG_DOMAIN_KERNEL
,
2007 LTTNG_BUFFER_GLOBAL
);
2009 ret
= LTTNG_ERR_FATAL
;
2012 if (lttng_strncpy(attr
->name
, channel_name
,
2013 sizeof(attr
->name
))) {
2014 ret
= LTTNG_ERR_INVALID
;
2018 ret
= cmd_enable_channel(session
, domain
, attr
, wpipe
);
2019 if (ret
!= LTTNG_OK
) {
2022 channel_created
= 1;
2025 /* Get the newly created kernel channel pointer */
2026 kchan
= trace_kernel_get_channel_by_name(channel_name
,
2027 session
->kernel_session
);
2028 if (kchan
== NULL
) {
2029 /* This sould not happen... */
2030 ret
= LTTNG_ERR_FATAL
;
2034 switch (event
->type
) {
2035 case LTTNG_EVENT_ALL
:
2037 char *filter_expression_a
= NULL
;
2038 struct lttng_filter_bytecode
*filter_a
= NULL
;
2041 * We need to duplicate filter_expression and filter,
2042 * because ownership is passed to first enable
2045 if (filter_expression
) {
2046 filter_expression_a
= strdup(filter_expression
);
2047 if (!filter_expression_a
) {
2048 ret
= LTTNG_ERR_FATAL
;
2053 filter_a
= zmalloc(sizeof(*filter_a
) + filter
->len
);
2055 free(filter_expression_a
);
2056 ret
= LTTNG_ERR_FATAL
;
2059 memcpy(filter_a
, filter
, sizeof(*filter_a
) + filter
->len
);
2061 event
->type
= LTTNG_EVENT_TRACEPOINT
; /* Hack */
2062 ret
= event_kernel_enable_event(kchan
, event
,
2063 filter_expression
, filter
);
2064 /* We have passed ownership */
2065 filter_expression
= NULL
;
2067 if (ret
!= LTTNG_OK
) {
2068 if (channel_created
) {
2069 /* Let's not leak a useless channel. */
2070 kernel_destroy_channel(kchan
);
2072 free(filter_expression_a
);
2076 event
->type
= LTTNG_EVENT_SYSCALL
; /* Hack */
2077 ret
= event_kernel_enable_event(kchan
, event
,
2078 filter_expression_a
, filter_a
);
2079 /* We have passed ownership */
2080 filter_expression_a
= NULL
;
2082 if (ret
!= LTTNG_OK
) {
2087 case LTTNG_EVENT_PROBE
:
2088 case LTTNG_EVENT_USERSPACE_PROBE
:
2089 case LTTNG_EVENT_FUNCTION
:
2090 case LTTNG_EVENT_FUNCTION_ENTRY
:
2091 case LTTNG_EVENT_TRACEPOINT
:
2092 ret
= event_kernel_enable_event(kchan
, event
,
2093 filter_expression
, filter
);
2094 /* We have passed ownership */
2095 filter_expression
= NULL
;
2097 if (ret
!= LTTNG_OK
) {
2098 if (channel_created
) {
2099 /* Let's not leak a useless channel. */
2100 kernel_destroy_channel(kchan
);
2105 case LTTNG_EVENT_SYSCALL
:
2106 ret
= event_kernel_enable_event(kchan
, event
,
2107 filter_expression
, filter
);
2108 /* We have passed ownership */
2109 filter_expression
= NULL
;
2111 if (ret
!= LTTNG_OK
) {
2116 ret
= LTTNG_ERR_UNK
;
2120 kernel_wait_quiescent(kernel_tracer_fd
);
2123 case LTTNG_DOMAIN_UST
:
2125 struct ltt_ust_channel
*uchan
;
2126 struct ltt_ust_session
*usess
= session
->ust_session
;
2131 * If a non-default channel has been created in the
2132 * session, explicitely require that -c chan_name needs
2135 if (usess
->has_non_default_channel
&& channel_name
[0] == '\0') {
2136 ret
= LTTNG_ERR_NEED_CHANNEL_NAME
;
2140 /* Get channel from global UST domain */
2141 uchan
= trace_ust_find_channel_by_name(usess
->domain_global
.channels
,
2143 if (uchan
== NULL
) {
2144 /* Create default channel */
2145 attr
= channel_new_default_attr(LTTNG_DOMAIN_UST
,
2146 usess
->buffer_type
);
2148 ret
= LTTNG_ERR_FATAL
;
2151 if (lttng_strncpy(attr
->name
, channel_name
,
2152 sizeof(attr
->name
))) {
2153 ret
= LTTNG_ERR_INVALID
;
2157 ret
= cmd_enable_channel(session
, domain
, attr
, wpipe
);
2158 if (ret
!= LTTNG_OK
) {
2162 /* Get the newly created channel reference back */
2163 uchan
= trace_ust_find_channel_by_name(
2164 usess
->domain_global
.channels
, channel_name
);
2168 if (uchan
->domain
!= LTTNG_DOMAIN_UST
&& !internal_event
) {
2170 * Don't allow users to add UST events to channels which
2171 * are assigned to a userspace subdomain (JUL, Log4J,
2174 ret
= LTTNG_ERR_INVALID_CHANNEL_DOMAIN
;
2178 if (!internal_event
) {
2180 * Ensure the event name is not reserved for internal
2183 ret
= validate_ust_event_name(event
->name
);
2185 WARN("Userspace event name %s failed validation.",
2187 ret
= LTTNG_ERR_INVALID_EVENT_NAME
;
2192 /* At this point, the session and channel exist on the tracer */
2193 ret
= event_ust_enable_tracepoint(usess
, uchan
, event
,
2194 filter_expression
, filter
, exclusion
,
2196 /* We have passed ownership */
2197 filter_expression
= NULL
;
2200 if (ret
== LTTNG_ERR_UST_EVENT_ENABLED
) {
2201 goto already_enabled
;
2202 } else if (ret
!= LTTNG_OK
) {
2207 case LTTNG_DOMAIN_LOG4J
:
2208 case LTTNG_DOMAIN_JUL
:
2209 case LTTNG_DOMAIN_PYTHON
:
2211 const char *default_event_name
, *default_chan_name
;
2213 struct lttng_event uevent
;
2214 struct lttng_domain tmp_dom
;
2215 struct ltt_ust_session
*usess
= session
->ust_session
;
2219 if (!agent_tracing_is_enabled()) {
2220 DBG("Attempted to enable an event in an agent domain but the agent thread is not running");
2221 ret
= LTTNG_ERR_AGENT_TRACING_DISABLED
;
2225 agt
= trace_ust_find_agent(usess
, domain
->type
);
2227 agt
= agent_create(domain
->type
);
2229 ret
= LTTNG_ERR_NOMEM
;
2232 agent_add(agt
, usess
->agents
);
2235 /* Create the default tracepoint. */
2236 memset(&uevent
, 0, sizeof(uevent
));
2237 uevent
.type
= LTTNG_EVENT_TRACEPOINT
;
2238 uevent
.loglevel_type
= LTTNG_EVENT_LOGLEVEL_ALL
;
2239 default_event_name
= event_get_default_agent_ust_name(
2241 if (!default_event_name
) {
2242 ret
= LTTNG_ERR_FATAL
;
2245 strncpy(uevent
.name
, default_event_name
, sizeof(uevent
.name
));
2246 uevent
.name
[sizeof(uevent
.name
) - 1] = '\0';
2249 * The domain type is changed because we are about to enable the
2250 * default channel and event for the JUL domain that are hardcoded.
2251 * This happens in the UST domain.
2253 memcpy(&tmp_dom
, domain
, sizeof(tmp_dom
));
2254 tmp_dom
.type
= LTTNG_DOMAIN_UST
;
2256 switch (domain
->type
) {
2257 case LTTNG_DOMAIN_LOG4J
:
2258 default_chan_name
= DEFAULT_LOG4J_CHANNEL_NAME
;
2260 case LTTNG_DOMAIN_JUL
:
2261 default_chan_name
= DEFAULT_JUL_CHANNEL_NAME
;
2263 case LTTNG_DOMAIN_PYTHON
:
2264 default_chan_name
= DEFAULT_PYTHON_CHANNEL_NAME
;
2267 /* The switch/case we are in makes this impossible */
2272 char *filter_expression_copy
= NULL
;
2273 struct lttng_filter_bytecode
*filter_copy
= NULL
;
2276 const size_t filter_size
= sizeof(
2277 struct lttng_filter_bytecode
)
2280 filter_copy
= zmalloc(filter_size
);
2282 ret
= LTTNG_ERR_NOMEM
;
2285 memcpy(filter_copy
, filter
, filter_size
);
2287 filter_expression_copy
=
2288 strdup(filter_expression
);
2289 if (!filter_expression
) {
2290 ret
= LTTNG_ERR_NOMEM
;
2293 if (!filter_expression_copy
|| !filter_copy
) {
2294 free(filter_expression_copy
);
2300 ret
= cmd_enable_event_internal(session
, &tmp_dom
,
2301 (char *) default_chan_name
,
2302 &uevent
, filter_expression_copy
,
2303 filter_copy
, NULL
, wpipe
);
2306 if (ret
== LTTNG_ERR_UST_EVENT_ENABLED
) {
2307 goto already_enabled
;
2308 } else if (ret
!= LTTNG_OK
) {
2312 /* The wild card * means that everything should be enabled. */
2313 if (strncmp(event
->name
, "*", 1) == 0 && strlen(event
->name
) == 1) {
2314 ret
= event_agent_enable_all(usess
, agt
, event
, filter
,
2317 ret
= event_agent_enable(usess
, agt
, event
, filter
,
2321 filter_expression
= NULL
;
2322 if (ret
!= LTTNG_OK
) {
2329 ret
= LTTNG_ERR_UND
;
2337 free(filter_expression
);
2340 channel_attr_destroy(attr
);
2346 * Command LTTNG_ENABLE_EVENT processed by the client thread.
2347 * We own filter, exclusion, and filter_expression.
2349 int cmd_enable_event(struct ltt_session
*session
, struct lttng_domain
*domain
,
2350 char *channel_name
, struct lttng_event
*event
,
2351 char *filter_expression
,
2352 struct lttng_filter_bytecode
*filter
,
2353 struct lttng_event_exclusion
*exclusion
,
2356 return _cmd_enable_event(session
, domain
, channel_name
, event
,
2357 filter_expression
, filter
, exclusion
, wpipe
, false);
2361 * Enable an event which is internal to LTTng. An internal should
2362 * never be made visible to clients and are immune to checks such as
2365 static int cmd_enable_event_internal(struct ltt_session
*session
,
2366 struct lttng_domain
*domain
,
2367 char *channel_name
, struct lttng_event
*event
,
2368 char *filter_expression
,
2369 struct lttng_filter_bytecode
*filter
,
2370 struct lttng_event_exclusion
*exclusion
,
2373 return _cmd_enable_event(session
, domain
, channel_name
, event
,
2374 filter_expression
, filter
, exclusion
, wpipe
, true);
2378 * Command LTTNG_LIST_TRACEPOINTS processed by the client thread.
2380 ssize_t
cmd_list_tracepoints(enum lttng_domain_type domain
,
2381 struct lttng_event
**events
)
2384 ssize_t nb_events
= 0;
2387 case LTTNG_DOMAIN_KERNEL
:
2388 nb_events
= kernel_list_events(kernel_tracer_fd
, events
);
2389 if (nb_events
< 0) {
2390 ret
= LTTNG_ERR_KERN_LIST_FAIL
;
2394 case LTTNG_DOMAIN_UST
:
2395 nb_events
= ust_app_list_events(events
);
2396 if (nb_events
< 0) {
2397 ret
= LTTNG_ERR_UST_LIST_FAIL
;
2401 case LTTNG_DOMAIN_LOG4J
:
2402 case LTTNG_DOMAIN_JUL
:
2403 case LTTNG_DOMAIN_PYTHON
:
2404 nb_events
= agent_list_events(events
, domain
);
2405 if (nb_events
< 0) {
2406 ret
= LTTNG_ERR_UST_LIST_FAIL
;
2411 ret
= LTTNG_ERR_UND
;
2418 /* Return negative value to differentiate return code */
2423 * Command LTTNG_LIST_TRACEPOINT_FIELDS processed by the client thread.
2425 ssize_t
cmd_list_tracepoint_fields(enum lttng_domain_type domain
,
2426 struct lttng_event_field
**fields
)
2429 ssize_t nb_fields
= 0;
2432 case LTTNG_DOMAIN_UST
:
2433 nb_fields
= ust_app_list_event_fields(fields
);
2434 if (nb_fields
< 0) {
2435 ret
= LTTNG_ERR_UST_LIST_FAIL
;
2439 case LTTNG_DOMAIN_KERNEL
:
2440 default: /* fall-through */
2441 ret
= LTTNG_ERR_UND
;
2448 /* Return negative value to differentiate return code */
2452 ssize_t
cmd_list_syscalls(struct lttng_event
**events
)
2454 return syscall_table_list(events
);
2458 * Command LTTNG_LIST_TRACKER_PIDS processed by the client thread.
2460 * Called with session lock held.
2462 ssize_t
cmd_list_tracker_pids(struct ltt_session
*session
,
2463 enum lttng_domain_type domain
, int32_t **pids
)
2466 ssize_t nr_pids
= 0;
2469 case LTTNG_DOMAIN_KERNEL
:
2471 struct ltt_kernel_session
*ksess
;
2473 ksess
= session
->kernel_session
;
2474 nr_pids
= kernel_list_tracker_pids(ksess
, pids
);
2476 ret
= LTTNG_ERR_KERN_LIST_FAIL
;
2481 case LTTNG_DOMAIN_UST
:
2483 struct ltt_ust_session
*usess
;
2485 usess
= session
->ust_session
;
2486 nr_pids
= trace_ust_list_tracker_pids(usess
, pids
);
2488 ret
= LTTNG_ERR_UST_LIST_FAIL
;
2493 case LTTNG_DOMAIN_LOG4J
:
2494 case LTTNG_DOMAIN_JUL
:
2495 case LTTNG_DOMAIN_PYTHON
:
2497 ret
= LTTNG_ERR_UND
;
2504 /* Return negative value to differentiate return code */
2509 int domain_mkdir(const struct consumer_output
*output
,
2510 const struct ltt_session
*session
,
2511 uid_t uid
, gid_t gid
)
2513 struct consumer_socket
*socket
;
2514 struct lttng_ht_iter iter
;
2518 if (!output
|| !output
->socks
) {
2519 ERR("No consumer output found");
2524 path
= zmalloc(LTTNG_PATH_MAX
* sizeof(char));
2526 ERR("Cannot allocate mkdir path");
2531 ret
= snprintf(path
, LTTNG_PATH_MAX
, "%s%s%s",
2532 session_get_base_path(session
),
2533 output
->chunk_path
, output
->subdir
);
2534 if (ret
< 0 || ret
>= LTTNG_PATH_MAX
) {
2540 DBG("Domain mkdir %s for session %" PRIu64
, path
, session
->id
);
2543 * We have to iterate to find a socket, but we only need to send the
2544 * rename command to one consumer, so we break after the first one.
2546 cds_lfht_for_each_entry(output
->socks
->ht
, &iter
.iter
, socket
, node
.node
) {
2547 pthread_mutex_lock(socket
->lock
);
2548 ret
= consumer_mkdir(socket
, session
->id
, output
, path
, uid
, gid
);
2549 pthread_mutex_unlock(socket
->lock
);
2551 ERR("Consumer mkdir");
2568 int session_mkdir(const struct ltt_session
*session
)
2571 struct consumer_output
*output
;
2576 * Unsupported feature in lttng-relayd before 2.11, not an error since it
2577 * is only needed for session rotation and the user will get an error
2580 if (session
->consumer
->type
== CONSUMER_DST_NET
&&
2581 session
->consumer
->relay_major_version
== 2 &&
2582 session
->consumer
->relay_minor_version
< 11) {
2587 if (session
->kernel_session
) {
2588 output
= session
->kernel_session
->consumer
;
2589 uid
= session
->kernel_session
->uid
;
2590 gid
= session
->kernel_session
->gid
;
2591 ret
= domain_mkdir(output
, session
, uid
, gid
);
2593 ERR("Mkdir kernel");
2598 if (session
->ust_session
) {
2599 output
= session
->ust_session
->consumer
;
2600 uid
= session
->ust_session
->uid
;
2601 gid
= session
->ust_session
->gid
;
2602 ret
= domain_mkdir(output
, session
, uid
, gid
);
2616 * Command LTTNG_START_TRACE processed by the client thread.
2618 * Called with session mutex held.
2620 int cmd_start_trace(struct ltt_session
*session
)
2623 unsigned long nb_chan
= 0;
2624 struct ltt_kernel_session
*ksession
;
2625 struct ltt_ust_session
*usess
;
2629 /* Ease our life a bit ;) */
2630 ksession
= session
->kernel_session
;
2631 usess
= session
->ust_session
;
2633 /* Is the session already started? */
2634 if (session
->active
) {
2635 ret
= LTTNG_ERR_TRACE_ALREADY_STARTED
;
2640 * Starting a session without channel is useless since after that it's not
2641 * possible to enable channel thus inform the client.
2643 if (usess
&& usess
->domain_global
.channels
) {
2644 nb_chan
+= lttng_ht_get_count(usess
->domain_global
.channels
);
2647 nb_chan
+= ksession
->channel_count
;
2650 ret
= LTTNG_ERR_NO_CHANNEL
;
2655 * Record the timestamp of the first time the session is started for
2656 * an eventual session rotation call.
2658 if (!session
->has_been_started
) {
2659 session
->current_chunk_start_ts
= time(NULL
);
2660 if (session
->current_chunk_start_ts
== (time_t) -1) {
2661 PERROR("Failed to retrieve the \"%s\" session's start time",
2663 ret
= LTTNG_ERR_FATAL
;
2666 if (!session
->snapshot_mode
&& session
->output_traces
) {
2667 ret
= session_mkdir(session
);
2669 ERR("Failed to create the session directories");
2670 ret
= LTTNG_ERR_CREATE_DIR_FAIL
;
2676 /* Kernel tracing */
2677 if (ksession
!= NULL
) {
2678 DBG("Start kernel tracing session %s", session
->name
);
2679 ret
= start_kernel_session(ksession
, kernel_tracer_fd
);
2680 if (ret
!= LTTNG_OK
) {
2685 /* Flag session that trace should start automatically */
2688 * Even though the start trace might fail, flag this session active so
2689 * other application coming in are started by default.
2693 ret
= ust_app_start_trace_all(usess
);
2695 ret
= LTTNG_ERR_UST_START_FAIL
;
2700 /* Flag this after a successful start. */
2701 session
->has_been_started
= 1;
2702 session
->active
= 1;
2705 * Clear the flag that indicates that a rotation was done while the
2706 * session was stopped.
2708 session
->rotated_after_last_stop
= false;
2710 if (session
->rotate_timer_period
) {
2711 ret
= timer_session_rotation_schedule_timer_start(session
,
2712 session
->rotate_timer_period
);
2714 ERR("Failed to enable rotate timer");
2715 ret
= LTTNG_ERR_UNK
;
2727 * Command LTTNG_STOP_TRACE processed by the client thread.
2729 int cmd_stop_trace(struct ltt_session
*session
)
2732 struct ltt_kernel_channel
*kchan
;
2733 struct ltt_kernel_session
*ksession
;
2734 struct ltt_ust_session
*usess
;
2735 bool error_occurred
= false;
2739 DBG("Begin stop session %s (id %" PRIu64
")", session
->name
, session
->id
);
2741 ksession
= session
->kernel_session
;
2742 usess
= session
->ust_session
;
2744 /* Session is not active. Skip everythong and inform the client. */
2745 if (!session
->active
) {
2746 ret
= LTTNG_ERR_TRACE_ALREADY_STOPPED
;
2750 if (session
->rotation_schedule_timer_enabled
) {
2751 if (timer_session_rotation_schedule_timer_stop(
2753 ERR("Failed to stop the \"rotation schedule\" timer of session %s",
2759 * A rotation is still ongoing. The check timer will continue to wait
2760 * for the rotation to complete. When the rotation finally completes,
2761 * a check will be performed to rename the "active" chunk to the
2762 * expected "timestamp_begin-timestamp_end" format.
2764 if (session
->current_archive_id
> 0 &&
2765 session
->rotation_state
!= LTTNG_ROTATION_STATE_ONGOING
) {
2766 ret
= rename_active_chunk(session
);
2769 * This error should not prevent the user from stopping
2770 * the session. However, it will be reported at the end.
2772 error_occurred
= true;
2777 if (ksession
&& ksession
->active
) {
2778 DBG("Stop kernel tracing");
2780 ret
= kernel_stop_session(ksession
);
2782 ret
= LTTNG_ERR_KERN_STOP_FAIL
;
2786 kernel_wait_quiescent(kernel_tracer_fd
);
2788 /* Flush metadata after stopping (if exists) */
2789 if (ksession
->metadata_stream_fd
>= 0) {
2790 ret
= kernel_metadata_flush_buffer(ksession
->metadata_stream_fd
);
2792 ERR("Kernel metadata flush failed");
2796 /* Flush all buffers after stopping */
2797 cds_list_for_each_entry(kchan
, &ksession
->channel_list
.head
, list
) {
2798 ret
= kernel_flush_buffer(kchan
);
2800 ERR("Kernel flush buffer error");
2804 ksession
->active
= 0;
2805 DBG("Kernel session stopped %s (id %" PRIu64
")", session
->name
,
2809 if (usess
&& usess
->active
) {
2811 * Even though the stop trace might fail, flag this session inactive so
2812 * other application coming in are not started by default.
2816 ret
= ust_app_stop_trace_all(usess
);
2818 ret
= LTTNG_ERR_UST_STOP_FAIL
;
2823 /* Flag inactive after a successful stop. */
2824 session
->active
= 0;
2825 ret
= !error_occurred
? LTTNG_OK
: LTTNG_ERR_UNK
;
2832 * Command LTTNG_SET_CONSUMER_URI processed by the client thread.
2834 int cmd_set_consumer_uri(struct ltt_session
*session
, size_t nb_uri
,
2835 struct lttng_uri
*uris
)
2838 struct ltt_kernel_session
*ksess
= session
->kernel_session
;
2839 struct ltt_ust_session
*usess
= session
->ust_session
;
2845 /* Can't set consumer URI if the session is active. */
2846 if (session
->active
) {
2847 ret
= LTTNG_ERR_TRACE_ALREADY_STARTED
;
2851 /* Set the "global" consumer URIs */
2852 for (i
= 0; i
< nb_uri
; i
++) {
2853 ret
= add_uri_to_consumer(session
->consumer
,
2854 &uris
[i
], 0, session
->name
);
2855 if (ret
!= LTTNG_OK
) {
2860 /* Set UST session URIs */
2861 if (session
->ust_session
) {
2862 for (i
= 0; i
< nb_uri
; i
++) {
2863 ret
= add_uri_to_consumer(
2864 session
->ust_session
->consumer
,
2865 &uris
[i
], LTTNG_DOMAIN_UST
,
2867 if (ret
!= LTTNG_OK
) {
2873 /* Set kernel session URIs */
2874 if (session
->kernel_session
) {
2875 for (i
= 0; i
< nb_uri
; i
++) {
2876 ret
= add_uri_to_consumer(
2877 session
->kernel_session
->consumer
,
2878 &uris
[i
], LTTNG_DOMAIN_KERNEL
,
2880 if (ret
!= LTTNG_OK
) {
2887 * Make sure to set the session in output mode after we set URI since a
2888 * session can be created without URL (thus flagged in no output mode).
2890 session
->output_traces
= 1;
2892 ksess
->output_traces
= 1;
2896 usess
->output_traces
= 1;
2907 * Command LTTNG_CREATE_SESSION processed by the client thread.
2909 int cmd_create_session_uri(char *name
, struct lttng_uri
*uris
,
2910 size_t nb_uri
, lttng_sock_cred
*creds
, unsigned int live_timer
)
2913 struct ltt_session
*session
= NULL
;
2918 /* Check if the session already exists. */
2919 session_lock_list();
2920 session
= session_find_by_name(name
);
2921 session_unlock_list();
2922 if (session
!= NULL
) {
2923 ret
= LTTNG_ERR_EXIST_SESS
;
2927 /* Create tracing session in the registry */
2928 ret
= session_create(name
, LTTNG_SOCK_GET_UID_CRED(creds
),
2929 LTTNG_SOCK_GET_GID_CRED(creds
));
2930 if (ret
!= LTTNG_OK
) {
2934 /* Get the newly created session pointer back. */
2935 session_lock_list();
2936 session
= session_find_by_name(name
);
2937 session_unlock_list();
2940 session
->live_timer
= live_timer
;
2941 /* Create default consumer output for the session not yet created. */
2942 session
->consumer
= consumer_create_output(CONSUMER_DST_LOCAL
);
2943 if (session
->consumer
== NULL
) {
2944 ret
= LTTNG_ERR_FATAL
;
2949 ret
= cmd_set_consumer_uri(session
, nb_uri
, uris
);
2950 if (ret
!= LTTNG_OK
) {
2953 session
->output_traces
= 1;
2955 session
->output_traces
= 0;
2956 DBG2("Session %s created with no output", session
->name
);
2959 session
->consumer
->enabled
= 1;
2964 session_lock_list();
2965 session_put(session
);
2966 session_unlock_list();
2972 * Command LTTNG_CREATE_SESSION_SNAPSHOT processed by the client thread.
2974 int cmd_create_session_snapshot(char *name
, struct lttng_uri
*uris
,
2975 size_t nb_uri
, lttng_sock_cred
*creds
)
2978 struct ltt_session
*session
= NULL
;
2979 struct snapshot_output
*new_output
= NULL
;
2985 * Create session in no output mode with URIs set to NULL. The uris we've
2986 * received are for a default snapshot output if one.
2988 ret
= cmd_create_session_uri(name
, NULL
, 0, creds
, 0);
2989 if (ret
!= LTTNG_OK
) {
2993 /* Get the newly created session pointer back. This should NEVER fail. */
2994 session_lock_list();
2995 session
= session_find_by_name(name
);
2996 session_unlock_list();
2999 /* Flag session for snapshot mode. */
3000 session
->snapshot_mode
= 1;
3002 /* Skip snapshot output creation if no URI is given. */
3008 new_output
= snapshot_output_alloc();
3010 ret
= LTTNG_ERR_NOMEM
;
3011 goto error_snapshot_alloc
;
3014 ret
= snapshot_output_init_with_uri(DEFAULT_SNAPSHOT_MAX_SIZE
, NULL
,
3015 uris
, nb_uri
, session
->consumer
, new_output
, &session
->snapshot
);
3017 if (ret
== -ENOMEM
) {
3018 ret
= LTTNG_ERR_NOMEM
;
3020 ret
= LTTNG_ERR_INVALID
;
3022 goto error_snapshot
;
3026 snapshot_add_output(&session
->snapshot
, new_output
);
3033 snapshot_output_destroy(new_output
);
3034 error_snapshot_alloc
:
3037 session_lock_list();
3038 session_put(session
);
3039 session_unlock_list();
3045 * Command LTTNG_DESTROY_SESSION processed by the client thread.
3047 * Called with session lock held.
3049 int cmd_destroy_session(struct ltt_session
*session
,
3050 struct notification_thread_handle
*notification_thread_handle
)
3057 DBG("Begin destroy session %s (id %" PRIu64
")", session
->name
, session
->id
);
3059 if (session
->rotation_pending_check_timer_enabled
) {
3060 if (timer_session_rotation_pending_check_stop(session
)) {
3061 ERR("Failed to stop the \"rotation pending check\" timer of session %s",
3066 if (session
->rotation_schedule_timer_enabled
) {
3067 if (timer_session_rotation_schedule_timer_stop(
3069 ERR("Failed to stop the \"rotation schedule\" timer of session %s",
3074 if (session
->rotate_size
) {
3075 unsubscribe_session_consumed_size_rotation(session
, notification_thread_handle
);
3076 session
->rotate_size
= 0;
3080 * The rename of the current chunk is performed at stop, but if we rotated
3081 * the session after the previous stop command, we need to rename the
3082 * new (and empty) chunk that was started in between.
3084 if (session
->rotated_after_last_stop
) {
3085 rename_active_chunk(session
);
3088 if (session
->shm_path
[0]) {
3090 * When a session is created with an explicit shm_path,
3091 * the consumer daemon will create its shared memory files
3092 * at that location and will *not* unlink them. This is normal
3093 * as the intention of that feature is to make it possible
3094 * to retrieve the content of those files should a crash occur.
3096 * To ensure the content of those files can be used, the
3097 * sessiond daemon will replicate the content of the metadata
3098 * cache in a metadata file.
3100 * On clean-up, it is expected that the consumer daemon will
3101 * unlink the shared memory files and that the session daemon
3102 * will unlink the metadata file. Then, the session's directory
3103 * in the shm path can be removed.
3105 * Unfortunately, a flaw in the design of the sessiond's and
3106 * consumerd's tear down of channels makes it impossible to
3107 * determine when the sessiond _and_ the consumerd have both
3108 * destroyed their representation of a channel. For one, the
3109 * unlinking, close, and rmdir happen in deferred 'call_rcu'
3110 * callbacks in both daemons.
3112 * However, it is also impossible for the sessiond to know when
3113 * the consumer daemon is done destroying its channel(s) since
3114 * it occurs as a reaction to the closing of the channel's file
3115 * descriptor. There is no resulting communication initiated
3116 * from the consumerd to the sessiond to confirm that the
3117 * operation is completed (and was successful).
3119 * Until this is all fixed, the session daemon checks for the
3120 * removal of the session's shm path which makes it possible
3121 * to safely advertise a session as having been destroyed.
3123 * Prior to this fix, it was not possible to reliably save
3124 * a session making use of the --shm-path option, destroy it,
3125 * and load it again. This is because the creation of the
3126 * session would fail upon seeing the session's shm path
3127 * already in existence.
3129 * Note that none of the error paths in the check for the
3130 * directory's existence return an error. This is normal
3131 * as there isn't much that can be done. The session will
3132 * be destroyed properly, except that we can't offer the
3133 * guarantee that the same session can be re-created.
3135 current_completion_handler
= &destroy_completion_handler
.handler
;
3136 ret
= lttng_strncpy(destroy_completion_handler
.shm_path
,
3138 sizeof(destroy_completion_handler
.shm_path
));
3143 * The session is destroyed. However, note that the command context
3144 * still holds a reference to the session, thus delaying its destruction
3145 * _at least_ up to the point when that reference is released.
3147 session_destroy(session
);
3154 * Command LTTNG_REGISTER_CONSUMER processed by the client thread.
3156 int cmd_register_consumer(struct ltt_session
*session
,
3157 enum lttng_domain_type domain
, const char *sock_path
,
3158 struct consumer_data
*cdata
)
3161 struct consumer_socket
*socket
= NULL
;
3168 case LTTNG_DOMAIN_KERNEL
:
3170 struct ltt_kernel_session
*ksess
= session
->kernel_session
;
3174 /* Can't register a consumer if there is already one */
3175 if (ksess
->consumer_fds_sent
!= 0) {
3176 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
3180 sock
= lttcomm_connect_unix_sock(sock_path
);
3182 ret
= LTTNG_ERR_CONNECT_FAIL
;
3185 cdata
->cmd_sock
= sock
;
3187 socket
= consumer_allocate_socket(&cdata
->cmd_sock
);
3188 if (socket
== NULL
) {
3191 PERROR("close register consumer");
3193 cdata
->cmd_sock
= -1;
3194 ret
= LTTNG_ERR_FATAL
;
3198 socket
->lock
= zmalloc(sizeof(pthread_mutex_t
));
3199 if (socket
->lock
== NULL
) {
3200 PERROR("zmalloc pthread mutex");
3201 ret
= LTTNG_ERR_FATAL
;
3204 pthread_mutex_init(socket
->lock
, NULL
);
3205 socket
->registered
= 1;
3208 consumer_add_socket(socket
, ksess
->consumer
);
3211 pthread_mutex_lock(&cdata
->pid_mutex
);
3213 pthread_mutex_unlock(&cdata
->pid_mutex
);
3218 /* TODO: Userspace tracing */
3219 ret
= LTTNG_ERR_UND
;
3227 consumer_destroy_socket(socket
);
3233 * Command LTTNG_LIST_DOMAINS processed by the client thread.
3235 ssize_t
cmd_list_domains(struct ltt_session
*session
,
3236 struct lttng_domain
**domains
)
3241 struct lttng_ht_iter iter
;
3243 if (session
->kernel_session
!= NULL
) {
3244 DBG3("Listing domains found kernel domain");
3248 if (session
->ust_session
!= NULL
) {
3249 DBG3("Listing domains found UST global domain");
3253 cds_lfht_for_each_entry(session
->ust_session
->agents
->ht
, &iter
.iter
,
3255 if (agt
->being_used
) {
3266 *domains
= zmalloc(nb_dom
* sizeof(struct lttng_domain
));
3267 if (*domains
== NULL
) {
3268 ret
= LTTNG_ERR_FATAL
;
3272 if (session
->kernel_session
!= NULL
) {
3273 (*domains
)[index
].type
= LTTNG_DOMAIN_KERNEL
;
3275 /* Kernel session buffer type is always GLOBAL */
3276 (*domains
)[index
].buf_type
= LTTNG_BUFFER_GLOBAL
;
3281 if (session
->ust_session
!= NULL
) {
3282 (*domains
)[index
].type
= LTTNG_DOMAIN_UST
;
3283 (*domains
)[index
].buf_type
= session
->ust_session
->buffer_type
;
3287 cds_lfht_for_each_entry(session
->ust_session
->agents
->ht
, &iter
.iter
,
3289 if (agt
->being_used
) {
3290 (*domains
)[index
].type
= agt
->domain
;
3291 (*domains
)[index
].buf_type
= session
->ust_session
->buffer_type
;
3301 /* Return negative value to differentiate return code */
3307 * Command LTTNG_LIST_CHANNELS processed by the client thread.
3309 ssize_t
cmd_list_channels(enum lttng_domain_type domain
,
3310 struct ltt_session
*session
, struct lttng_channel
**channels
)
3312 ssize_t nb_chan
= 0, payload_size
= 0, ret
;
3315 case LTTNG_DOMAIN_KERNEL
:
3316 if (session
->kernel_session
!= NULL
) {
3317 nb_chan
= session
->kernel_session
->channel_count
;
3319 DBG3("Number of kernel channels %zd", nb_chan
);
3321 ret
= -LTTNG_ERR_KERN_CHAN_NOT_FOUND
;
3325 case LTTNG_DOMAIN_UST
:
3326 if (session
->ust_session
!= NULL
) {
3328 nb_chan
= lttng_ht_get_count(
3329 session
->ust_session
->domain_global
.channels
);
3332 DBG3("Number of UST global channels %zd", nb_chan
);
3334 ret
= -LTTNG_ERR_UST_CHAN_NOT_FOUND
;
3339 ret
= -LTTNG_ERR_UND
;
3344 const size_t channel_size
= sizeof(struct lttng_channel
) +
3345 sizeof(struct lttng_channel_extended
);
3346 struct lttng_channel_extended
*channel_exts
;
3348 payload_size
= nb_chan
* channel_size
;
3349 *channels
= zmalloc(payload_size
);
3350 if (*channels
== NULL
) {
3351 ret
= -LTTNG_ERR_FATAL
;
3355 channel_exts
= ((void *) *channels
) +
3356 (nb_chan
* sizeof(struct lttng_channel
));
3357 ret
= list_lttng_channels(domain
, session
, *channels
, channel_exts
);
3358 if (ret
!= LTTNG_OK
) {
3373 * Command LTTNG_LIST_EVENTS processed by the client thread.
3375 ssize_t
cmd_list_events(enum lttng_domain_type domain
,
3376 struct ltt_session
*session
, char *channel_name
,
3377 struct lttng_event
**events
, size_t *total_size
)
3380 ssize_t nb_event
= 0;
3383 case LTTNG_DOMAIN_KERNEL
:
3384 if (session
->kernel_session
!= NULL
) {
3385 nb_event
= list_lttng_kernel_events(channel_name
,
3386 session
->kernel_session
, events
,
3390 case LTTNG_DOMAIN_UST
:
3392 if (session
->ust_session
!= NULL
) {
3393 nb_event
= list_lttng_ust_global_events(channel_name
,
3394 &session
->ust_session
->domain_global
, events
,
3399 case LTTNG_DOMAIN_LOG4J
:
3400 case LTTNG_DOMAIN_JUL
:
3401 case LTTNG_DOMAIN_PYTHON
:
3402 if (session
->ust_session
) {
3403 struct lttng_ht_iter iter
;
3407 cds_lfht_for_each_entry(session
->ust_session
->agents
->ht
,
3408 &iter
.iter
, agt
, node
.node
) {
3409 if (agt
->domain
== domain
) {
3410 nb_event
= list_lttng_agent_events(
3420 ret
= LTTNG_ERR_UND
;
3427 /* Return negative value to differentiate return code */
3432 * Using the session list, filled a lttng_session array to send back to the
3433 * client for session listing.
3435 * The session list lock MUST be acquired before calling this function. Use
3436 * session_lock_list() and session_unlock_list().
3438 void cmd_list_lttng_sessions(struct lttng_session
*sessions
, uid_t uid
,
3443 struct ltt_session
*session
;
3444 struct ltt_session_list
*list
= session_get_list();
3446 DBG("Getting all available session for UID %d GID %d",
3449 * Iterate over session list and append data after the control struct in
3452 cds_list_for_each_entry(session
, &list
->head
, list
) {
3453 if (!session_get(session
)) {
3457 * Only list the sessions the user can control.
3459 if (!session_access_ok(session
, uid
, gid
) ||
3460 session
->destroyed
) {
3461 session_put(session
);
3465 struct ltt_kernel_session
*ksess
= session
->kernel_session
;
3466 struct ltt_ust_session
*usess
= session
->ust_session
;
3468 if (session
->consumer
->type
== CONSUMER_DST_NET
||
3469 (ksess
&& ksess
->consumer
->type
== CONSUMER_DST_NET
) ||
3470 (usess
&& usess
->consumer
->type
== CONSUMER_DST_NET
)) {
3471 ret
= build_network_session_path(sessions
[i
].path
,
3472 sizeof(sessions
[i
].path
), session
);
3474 ret
= snprintf(sessions
[i
].path
, sizeof(sessions
[i
].path
), "%s",
3475 session
->consumer
->dst
.session_root_path
);
3478 PERROR("snprintf session path");
3479 session_put(session
);
3483 strncpy(sessions
[i
].name
, session
->name
, NAME_MAX
);
3484 sessions
[i
].name
[NAME_MAX
- 1] = '\0';
3485 sessions
[i
].enabled
= session
->active
;
3486 sessions
[i
].snapshot_mode
= session
->snapshot_mode
;
3487 sessions
[i
].live_timer_interval
= session
->live_timer
;
3489 session_put(session
);
3494 * Command LTTNG_DATA_PENDING returning 0 if the data is NOT pending meaning
3495 * ready for trace analysis (or any kind of reader) or else 1 for pending data.
3497 int cmd_data_pending(struct ltt_session
*session
)
3500 struct ltt_kernel_session
*ksess
= session
->kernel_session
;
3501 struct ltt_ust_session
*usess
= session
->ust_session
;
3505 DBG("Data pending for session %s", session
->name
);
3507 /* Session MUST be stopped to ask for data availability. */
3508 if (session
->active
) {
3509 ret
= LTTNG_ERR_SESSION_STARTED
;
3513 * If stopped, just make sure we've started before else the above call
3514 * will always send that there is data pending.
3516 * The consumer assumes that when the data pending command is received,
3517 * the trace has been started before or else no output data is written
3518 * by the streams which is a condition for data pending. So, this is
3519 * *VERY* important that we don't ask the consumer before a start
3522 if (!session
->has_been_started
) {
3528 /* A rotation is still pending, we have to wait. */
3529 if (session
->rotation_state
== LTTNG_ROTATION_STATE_ONGOING
) {
3530 DBG("Rotate still pending for session %s", session
->name
);
3535 if (ksess
&& ksess
->consumer
) {
3536 ret
= consumer_is_data_pending(ksess
->id
, ksess
->consumer
);
3538 /* Data is still being extracted for the kernel. */
3543 if (usess
&& usess
->consumer
) {
3544 ret
= consumer_is_data_pending(usess
->id
, usess
->consumer
);
3546 /* Data is still being extracted for the kernel. */
3551 /* Data is ready to be read by a viewer */
3559 * Command LTTNG_SNAPSHOT_ADD_OUTPUT from the lttng ctl library.
3561 * Return LTTNG_OK on success or else a LTTNG_ERR code.
3563 int cmd_snapshot_add_output(struct ltt_session
*session
,
3564 struct lttng_snapshot_output
*output
, uint32_t *id
)
3567 struct snapshot_output
*new_output
;
3572 DBG("Cmd snapshot add output for session %s", session
->name
);
3575 * Can't create an output if the session is not set in no-output mode.
3577 if (session
->output_traces
) {
3578 ret
= LTTNG_ERR_NOT_SNAPSHOT_SESSION
;
3582 /* Only one output is allowed until we have the "tee" feature. */
3583 if (session
->snapshot
.nb_output
== 1) {
3584 ret
= LTTNG_ERR_SNAPSHOT_OUTPUT_EXIST
;
3588 new_output
= snapshot_output_alloc();
3590 ret
= LTTNG_ERR_NOMEM
;
3594 ret
= snapshot_output_init(output
->max_size
, output
->name
,
3595 output
->ctrl_url
, output
->data_url
, session
->consumer
, new_output
,
3596 &session
->snapshot
);
3598 if (ret
== -ENOMEM
) {
3599 ret
= LTTNG_ERR_NOMEM
;
3601 ret
= LTTNG_ERR_INVALID
;
3607 snapshot_add_output(&session
->snapshot
, new_output
);
3609 *id
= new_output
->id
;
3616 snapshot_output_destroy(new_output
);
3622 * Command LTTNG_SNAPSHOT_DEL_OUTPUT from lib lttng ctl.
3624 * Return LTTNG_OK on success or else a LTTNG_ERR code.
3626 int cmd_snapshot_del_output(struct ltt_session
*session
,
3627 struct lttng_snapshot_output
*output
)
3630 struct snapshot_output
*sout
= NULL
;
3638 * Permission denied to create an output if the session is not
3639 * set in no output mode.
3641 if (session
->output_traces
) {
3642 ret
= LTTNG_ERR_NOT_SNAPSHOT_SESSION
;
3647 DBG("Cmd snapshot del output id %" PRIu32
" for session %s", output
->id
,
3649 sout
= snapshot_find_output_by_id(output
->id
, &session
->snapshot
);
3650 } else if (*output
->name
!= '\0') {
3651 DBG("Cmd snapshot del output name %s for session %s", output
->name
,
3653 sout
= snapshot_find_output_by_name(output
->name
, &session
->snapshot
);
3656 ret
= LTTNG_ERR_INVALID
;
3660 snapshot_delete_output(&session
->snapshot
, sout
);
3661 snapshot_output_destroy(sout
);
3670 * Command LTTNG_SNAPSHOT_LIST_OUTPUT from lib lttng ctl.
3672 * If no output is available, outputs is untouched and 0 is returned.
3674 * Return the size of the newly allocated outputs or a negative LTTNG_ERR code.
3676 ssize_t
cmd_snapshot_list_outputs(struct ltt_session
*session
,
3677 struct lttng_snapshot_output
**outputs
)
3680 struct lttng_snapshot_output
*list
= NULL
;
3681 struct lttng_ht_iter iter
;
3682 struct snapshot_output
*output
;
3687 DBG("Cmd snapshot list outputs for session %s", session
->name
);
3690 * Permission denied to create an output if the session is not
3691 * set in no output mode.
3693 if (session
->output_traces
) {
3694 ret
= -LTTNG_ERR_NOT_SNAPSHOT_SESSION
;
3698 if (session
->snapshot
.nb_output
== 0) {
3703 list
= zmalloc(session
->snapshot
.nb_output
* sizeof(*list
));
3705 ret
= -LTTNG_ERR_NOMEM
;
3709 /* Copy list from session to the new list object. */
3711 cds_lfht_for_each_entry(session
->snapshot
.output_ht
->ht
, &iter
.iter
,
3712 output
, node
.node
) {
3713 assert(output
->consumer
);
3714 list
[idx
].id
= output
->id
;
3715 list
[idx
].max_size
= output
->max_size
;
3716 if (lttng_strncpy(list
[idx
].name
, output
->name
,
3717 sizeof(list
[idx
].name
))) {
3718 ret
= -LTTNG_ERR_INVALID
;
3721 if (output
->consumer
->type
== CONSUMER_DST_LOCAL
) {
3722 if (lttng_strncpy(list
[idx
].ctrl_url
,
3723 output
->consumer
->dst
.session_root_path
,
3724 sizeof(list
[idx
].ctrl_url
))) {
3725 ret
= -LTTNG_ERR_INVALID
;
3730 ret
= uri_to_str_url(&output
->consumer
->dst
.net
.control
,
3731 list
[idx
].ctrl_url
, sizeof(list
[idx
].ctrl_url
));
3733 ret
= -LTTNG_ERR_NOMEM
;
3738 ret
= uri_to_str_url(&output
->consumer
->dst
.net
.data
,
3739 list
[idx
].data_url
, sizeof(list
[idx
].data_url
));
3741 ret
= -LTTNG_ERR_NOMEM
;
3750 ret
= session
->snapshot
.nb_output
;
3759 * Check if we can regenerate the metadata for this session.
3760 * Only kernel, UST per-uid and non-live sessions are supported.
3762 * Return 0 if the metadata can be generated, a LTTNG_ERR code otherwise.
3765 int check_regenerate_metadata_support(struct ltt_session
*session
)
3771 if (session
->live_timer
!= 0) {
3772 ret
= LTTNG_ERR_LIVE_SESSION
;
3775 if (!session
->active
) {
3776 ret
= LTTNG_ERR_SESSION_NOT_STARTED
;
3779 if (session
->ust_session
) {
3780 switch (session
->ust_session
->buffer_type
) {
3781 case LTTNG_BUFFER_PER_UID
:
3783 case LTTNG_BUFFER_PER_PID
:
3784 ret
= LTTNG_ERR_PER_PID_SESSION
;