2 * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
3 * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 * SPDX-License-Identifier: GPL-2.0-only
16 #include <sys/types.h>
18 #include <urcu/compiler.h>
21 #include <common/compat/errno.h>
22 #include <common/common.h>
23 #include <common/hashtable/utils.h>
24 #include <lttng/event-rule/event-rule.h>
25 #include <lttng/event-rule/event-rule-internal.h>
26 #include <lttng/event-rule/tracepoint.h>
27 #include <lttng/condition/condition.h>
28 #include <lttng/condition/event-rule-internal.h>
29 #include <lttng/condition/event-rule.h>
30 #include <common/sessiond-comm/sessiond-comm.h>
32 #include "buffer-registry.h"
34 #include "health-sessiond.h"
36 #include "ust-consumer.h"
37 #include "lttng-ust-ctl.h"
38 #include "lttng-ust-error.h"
41 #include "lttng-sessiond.h"
42 #include "notification-thread-commands.h"
45 struct lttng_ht
*ust_app_ht
;
46 struct lttng_ht
*ust_app_ht_by_sock
;
47 struct lttng_ht
*ust_app_ht_by_notify_sock
;
50 int ust_app_flush_app_session(struct ust_app
*app
, struct ust_app_session
*ua_sess
);
52 /* Next available channel key. Access under next_channel_key_lock. */
53 static uint64_t _next_channel_key
;
54 static pthread_mutex_t next_channel_key_lock
= PTHREAD_MUTEX_INITIALIZER
;
56 /* Next available session ID. Access under next_session_id_lock. */
57 static uint64_t _next_session_id
;
58 static pthread_mutex_t next_session_id_lock
= PTHREAD_MUTEX_INITIALIZER
;
61 * Return the incremented value of next_channel_key.
63 static uint64_t get_next_channel_key(void)
67 pthread_mutex_lock(&next_channel_key_lock
);
68 ret
= ++_next_channel_key
;
69 pthread_mutex_unlock(&next_channel_key_lock
);
74 * Return the atomically incremented value of next_session_id.
76 static uint64_t get_next_session_id(void)
80 pthread_mutex_lock(&next_session_id_lock
);
81 ret
= ++_next_session_id
;
82 pthread_mutex_unlock(&next_session_id_lock
);
86 static void copy_channel_attr_to_ustctl(
87 struct ustctl_consumer_channel_attr
*attr
,
88 struct lttng_ust_channel_attr
*uattr
)
90 /* Copy event attributes since the layout is different. */
91 attr
->subbuf_size
= uattr
->subbuf_size
;
92 attr
->num_subbuf
= uattr
->num_subbuf
;
93 attr
->overwrite
= uattr
->overwrite
;
94 attr
->switch_timer_interval
= uattr
->switch_timer_interval
;
95 attr
->read_timer_interval
= uattr
->read_timer_interval
;
96 attr
->output
= uattr
->output
;
97 attr
->blocking_timeout
= uattr
->u
.s
.blocking_timeout
;
101 * Match function for the hash table lookup.
103 * It matches an ust app event based on three attributes which are the event
104 * name, the filter bytecode and the loglevel.
106 static int ht_match_ust_app_event(struct cds_lfht_node
*node
, const void *_key
)
108 struct ust_app_event
*event
;
109 const struct ust_app_ht_key
*key
;
110 int ev_loglevel_value
;
115 event
= caa_container_of(node
, struct ust_app_event
, node
.node
);
117 ev_loglevel_value
= event
->attr
.loglevel
;
119 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
122 if (strncmp(event
->attr
.name
, key
->name
, sizeof(event
->attr
.name
)) != 0) {
126 /* Event loglevel. */
127 if (ev_loglevel_value
!= key
->loglevel_type
) {
128 if (event
->attr
.loglevel_type
== LTTNG_UST_LOGLEVEL_ALL
129 && key
->loglevel_type
== 0 &&
130 ev_loglevel_value
== -1) {
132 * Match is accepted. This is because on event creation, the
133 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
134 * -1 are accepted for this loglevel type since 0 is the one set by
135 * the API when receiving an enable event.
142 /* One of the filters is NULL, fail. */
143 if ((key
->filter
&& !event
->filter
) || (!key
->filter
&& event
->filter
)) {
147 if (key
->filter
&& event
->filter
) {
148 /* Both filters exists, check length followed by the bytecode. */
149 if (event
->filter
->len
!= key
->filter
->len
||
150 memcmp(event
->filter
->data
, key
->filter
->data
,
151 event
->filter
->len
) != 0) {
156 /* One of the exclusions is NULL, fail. */
157 if ((key
->exclusion
&& !event
->exclusion
) || (!key
->exclusion
&& event
->exclusion
)) {
161 if (key
->exclusion
&& event
->exclusion
) {
162 /* Both exclusions exists, check count followed by the names. */
163 if (event
->exclusion
->count
!= key
->exclusion
->count
||
164 memcmp(event
->exclusion
->names
, key
->exclusion
->names
,
165 event
->exclusion
->count
* LTTNG_UST_SYM_NAME_LEN
) != 0) {
179 * Unique add of an ust app event in the given ht. This uses the custom
180 * ht_match_ust_app_event match function and the event name as hash.
182 static void add_unique_ust_app_event(struct ust_app_channel
*ua_chan
,
183 struct ust_app_event
*event
)
185 struct cds_lfht_node
*node_ptr
;
186 struct ust_app_ht_key key
;
190 assert(ua_chan
->events
);
193 ht
= ua_chan
->events
;
194 key
.name
= event
->attr
.name
;
195 key
.filter
= event
->filter
;
196 key
.loglevel_type
= event
->attr
.loglevel
;
197 key
.exclusion
= event
->exclusion
;
199 node_ptr
= cds_lfht_add_unique(ht
->ht
,
200 ht
->hash_fct(event
->node
.key
, lttng_ht_seed
),
201 ht_match_ust_app_event
, &key
, &event
->node
.node
);
202 assert(node_ptr
== &event
->node
.node
);
206 * Close the notify socket from the given RCU head object. This MUST be called
207 * through a call_rcu().
209 static void close_notify_sock_rcu(struct rcu_head
*head
)
212 struct ust_app_notify_sock_obj
*obj
=
213 caa_container_of(head
, struct ust_app_notify_sock_obj
, head
);
215 /* Must have a valid fd here. */
216 assert(obj
->fd
>= 0);
218 ret
= close(obj
->fd
);
220 ERR("close notify sock %d RCU", obj
->fd
);
222 lttng_fd_put(LTTNG_FD_APPS
, 1);
228 * Return the session registry according to the buffer type of the given
231 * A registry per UID object MUST exists before calling this function or else
232 * it assert() if not found. RCU read side lock must be acquired.
234 static struct ust_registry_session
*get_session_registry(
235 struct ust_app_session
*ua_sess
)
237 struct ust_registry_session
*registry
= NULL
;
241 switch (ua_sess
->buffer_type
) {
242 case LTTNG_BUFFER_PER_PID
:
244 struct buffer_reg_pid
*reg_pid
= buffer_reg_pid_find(ua_sess
->id
);
248 registry
= reg_pid
->registry
->reg
.ust
;
251 case LTTNG_BUFFER_PER_UID
:
253 struct buffer_reg_uid
*reg_uid
= buffer_reg_uid_find(
254 ua_sess
->tracing_id
, ua_sess
->bits_per_long
,
255 lttng_credentials_get_uid(&ua_sess
->real_credentials
));
259 registry
= reg_uid
->registry
->reg
.ust
;
271 * Delete ust context safely. RCU read lock must be held before calling
275 void delete_ust_app_ctx(int sock
, struct ust_app_ctx
*ua_ctx
,
283 pthread_mutex_lock(&app
->sock_lock
);
284 ret
= ustctl_release_object(sock
, ua_ctx
->obj
);
285 pthread_mutex_unlock(&app
->sock_lock
);
286 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
287 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
288 sock
, ua_ctx
->obj
->handle
, ret
);
296 * Delete ust app event safely. RCU read lock must be held before calling
300 void delete_ust_app_event(int sock
, struct ust_app_event
*ua_event
,
307 free(ua_event
->filter
);
308 if (ua_event
->exclusion
!= NULL
)
309 free(ua_event
->exclusion
);
310 if (ua_event
->obj
!= NULL
) {
311 pthread_mutex_lock(&app
->sock_lock
);
312 ret
= ustctl_release_object(sock
, ua_event
->obj
);
313 pthread_mutex_unlock(&app
->sock_lock
);
314 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
315 ERR("UST app sock %d release event obj failed with ret %d",
324 * Delayed reclaim of a ust_app_event_notifier_rule object. This MUST be called
325 * through a call_rcu().
328 void free_ust_app_event_notifier_rule_rcu(struct rcu_head
*head
)
330 struct ust_app_event_notifier_rule
*obj
= caa_container_of(
331 head
, struct ust_app_event_notifier_rule
, rcu_head
);
337 * Delete ust app event notifier rule safely.
339 static void delete_ust_app_event_notifier_rule(int sock
,
340 struct ust_app_event_notifier_rule
*ua_event_notifier_rule
,
345 assert(ua_event_notifier_rule
);
347 if (ua_event_notifier_rule
->exclusion
!= NULL
) {
348 free(ua_event_notifier_rule
->exclusion
);
351 if (ua_event_notifier_rule
->obj
!= NULL
) {
352 pthread_mutex_lock(&app
->sock_lock
);
353 ret
= ustctl_release_object(sock
, ua_event_notifier_rule
->obj
);
354 pthread_mutex_unlock(&app
->sock_lock
);
355 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
356 ERR("Failed to release event notifier object: app = '%s' (ppid %d), ret = %d",
357 app
->name
, (int) app
->ppid
, ret
);
360 free(ua_event_notifier_rule
->obj
);
363 lttng_event_rule_put(ua_event_notifier_rule
->event_rule
);
364 call_rcu(&ua_event_notifier_rule
->rcu_head
,
365 free_ust_app_event_notifier_rule_rcu
);
369 * Release ust data object of the given stream.
371 * Return 0 on success or else a negative value.
373 static int release_ust_app_stream(int sock
, struct ust_app_stream
*stream
,
381 pthread_mutex_lock(&app
->sock_lock
);
382 ret
= ustctl_release_object(sock
, stream
->obj
);
383 pthread_mutex_unlock(&app
->sock_lock
);
384 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
385 ERR("UST app sock %d release stream obj failed with ret %d",
388 lttng_fd_put(LTTNG_FD_APPS
, 2);
396 * Delete ust app stream safely. RCU read lock must be held before calling
400 void delete_ust_app_stream(int sock
, struct ust_app_stream
*stream
,
405 (void) release_ust_app_stream(sock
, stream
, app
);
410 * We need to execute ht_destroy outside of RCU read-side critical
411 * section and outside of call_rcu thread, so we postpone its execution
412 * using ht_cleanup_push. It is simpler than to change the semantic of
413 * the many callers of delete_ust_app_session().
416 void delete_ust_app_channel_rcu(struct rcu_head
*head
)
418 struct ust_app_channel
*ua_chan
=
419 caa_container_of(head
, struct ust_app_channel
, rcu_head
);
421 ht_cleanup_push(ua_chan
->ctx
);
422 ht_cleanup_push(ua_chan
->events
);
427 * Extract the lost packet or discarded events counter when the channel is
428 * being deleted and store the value in the parent channel so we can
429 * access it from lttng list and at stop/destroy.
431 * The session list lock must be held by the caller.
434 void save_per_pid_lost_discarded_counters(struct ust_app_channel
*ua_chan
)
436 uint64_t discarded
= 0, lost
= 0;
437 struct ltt_session
*session
;
438 struct ltt_ust_channel
*uchan
;
440 if (ua_chan
->attr
.type
!= LTTNG_UST_CHAN_PER_CPU
) {
445 session
= session_find_by_id(ua_chan
->session
->tracing_id
);
446 if (!session
|| !session
->ust_session
) {
448 * Not finding the session is not an error because there are
449 * multiple ways the channels can be torn down.
451 * 1) The session daemon can initiate the destruction of the
452 * ust app session after receiving a destroy command or
453 * during its shutdown/teardown.
454 * 2) The application, since we are in per-pid tracing, is
455 * unregistering and tearing down its ust app session.
457 * Both paths are protected by the session list lock which
458 * ensures that the accounting of lost packets and discarded
459 * events is done exactly once. The session is then unpublished
460 * from the session list, resulting in this condition.
465 if (ua_chan
->attr
.overwrite
) {
466 consumer_get_lost_packets(ua_chan
->session
->tracing_id
,
467 ua_chan
->key
, session
->ust_session
->consumer
,
470 consumer_get_discarded_events(ua_chan
->session
->tracing_id
,
471 ua_chan
->key
, session
->ust_session
->consumer
,
474 uchan
= trace_ust_find_channel_by_name(
475 session
->ust_session
->domain_global
.channels
,
478 ERR("Missing UST channel to store discarded counters");
482 uchan
->per_pid_closed_app_discarded
+= discarded
;
483 uchan
->per_pid_closed_app_lost
+= lost
;
488 session_put(session
);
493 * Delete ust app channel safely. RCU read lock must be held before calling
496 * The session list lock must be held by the caller.
499 void delete_ust_app_channel(int sock
, struct ust_app_channel
*ua_chan
,
503 struct lttng_ht_iter iter
;
504 struct ust_app_event
*ua_event
;
505 struct ust_app_ctx
*ua_ctx
;
506 struct ust_app_stream
*stream
, *stmp
;
507 struct ust_registry_session
*registry
;
511 DBG3("UST app deleting channel %s", ua_chan
->name
);
514 cds_list_for_each_entry_safe(stream
, stmp
, &ua_chan
->streams
.head
, list
) {
515 cds_list_del(&stream
->list
);
516 delete_ust_app_stream(sock
, stream
, app
);
520 cds_lfht_for_each_entry(ua_chan
->ctx
->ht
, &iter
.iter
, ua_ctx
, node
.node
) {
521 cds_list_del(&ua_ctx
->list
);
522 ret
= lttng_ht_del(ua_chan
->ctx
, &iter
);
524 delete_ust_app_ctx(sock
, ua_ctx
, app
);
528 cds_lfht_for_each_entry(ua_chan
->events
->ht
, &iter
.iter
, ua_event
,
530 ret
= lttng_ht_del(ua_chan
->events
, &iter
);
532 delete_ust_app_event(sock
, ua_event
, app
);
535 if (ua_chan
->session
->buffer_type
== LTTNG_BUFFER_PER_PID
) {
536 /* Wipe and free registry from session registry. */
537 registry
= get_session_registry(ua_chan
->session
);
539 ust_registry_channel_del_free(registry
, ua_chan
->key
,
543 * A negative socket can be used by the caller when
544 * cleaning-up a ua_chan in an error path. Skip the
545 * accounting in this case.
548 save_per_pid_lost_discarded_counters(ua_chan
);
552 if (ua_chan
->obj
!= NULL
) {
553 /* Remove channel from application UST object descriptor. */
554 iter
.iter
.node
= &ua_chan
->ust_objd_node
.node
;
555 ret
= lttng_ht_del(app
->ust_objd
, &iter
);
557 pthread_mutex_lock(&app
->sock_lock
);
558 ret
= ustctl_release_object(sock
, ua_chan
->obj
);
559 pthread_mutex_unlock(&app
->sock_lock
);
560 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
561 ERR("UST app sock %d release channel obj failed with ret %d",
564 lttng_fd_put(LTTNG_FD_APPS
, 1);
567 call_rcu(&ua_chan
->rcu_head
, delete_ust_app_channel_rcu
);
570 int ust_app_register_done(struct ust_app
*app
)
574 pthread_mutex_lock(&app
->sock_lock
);
575 ret
= ustctl_register_done(app
->sock
);
576 pthread_mutex_unlock(&app
->sock_lock
);
580 int ust_app_release_object(struct ust_app
*app
, struct lttng_ust_object_data
*data
)
585 pthread_mutex_lock(&app
->sock_lock
);
590 ret
= ustctl_release_object(sock
, data
);
592 pthread_mutex_unlock(&app
->sock_lock
);
598 * Push metadata to consumer socket.
600 * RCU read-side lock must be held to guarantee existance of socket.
601 * Must be called with the ust app session lock held.
602 * Must be called with the registry lock held.
604 * On success, return the len of metadata pushed or else a negative value.
605 * Returning a -EPIPE return value means we could not send the metadata,
606 * but it can be caused by recoverable errors (e.g. the application has
607 * terminated concurrently).
609 ssize_t
ust_app_push_metadata(struct ust_registry_session
*registry
,
610 struct consumer_socket
*socket
, int send_zero_data
)
613 char *metadata_str
= NULL
;
614 size_t len
, offset
, new_metadata_len_sent
;
616 uint64_t metadata_key
, metadata_version
;
621 metadata_key
= registry
->metadata_key
;
624 * Means that no metadata was assigned to the session. This can
625 * happens if no start has been done previously.
631 offset
= registry
->metadata_len_sent
;
632 len
= registry
->metadata_len
- registry
->metadata_len_sent
;
633 new_metadata_len_sent
= registry
->metadata_len
;
634 metadata_version
= registry
->metadata_version
;
636 DBG3("No metadata to push for metadata key %" PRIu64
,
637 registry
->metadata_key
);
639 if (send_zero_data
) {
640 DBG("No metadata to push");
646 /* Allocate only what we have to send. */
647 metadata_str
= zmalloc(len
);
649 PERROR("zmalloc ust app metadata string");
653 /* Copy what we haven't sent out. */
654 memcpy(metadata_str
, registry
->metadata
+ offset
, len
);
657 pthread_mutex_unlock(®istry
->lock
);
659 * We need to unlock the registry while we push metadata to
660 * break a circular dependency between the consumerd metadata
661 * lock and the sessiond registry lock. Indeed, pushing metadata
662 * to the consumerd awaits that it gets pushed all the way to
663 * relayd, but doing so requires grabbing the metadata lock. If
664 * a concurrent metadata request is being performed by
665 * consumerd, this can try to grab the registry lock on the
666 * sessiond while holding the metadata lock on the consumer
667 * daemon. Those push and pull schemes are performed on two
668 * different bidirectionnal communication sockets.
670 ret
= consumer_push_metadata(socket
, metadata_key
,
671 metadata_str
, len
, offset
, metadata_version
);
672 pthread_mutex_lock(®istry
->lock
);
675 * There is an acceptable race here between the registry
676 * metadata key assignment and the creation on the
677 * consumer. The session daemon can concurrently push
678 * metadata for this registry while being created on the
679 * consumer since the metadata key of the registry is
680 * assigned *before* it is setup to avoid the consumer
681 * to ask for metadata that could possibly be not found
682 * in the session daemon.
684 * The metadata will get pushed either by the session
685 * being stopped or the consumer requesting metadata if
686 * that race is triggered.
688 if (ret
== -LTTCOMM_CONSUMERD_CHANNEL_FAIL
) {
691 ERR("Error pushing metadata to consumer");
697 * Metadata may have been concurrently pushed, since
698 * we're not holding the registry lock while pushing to
699 * consumer. This is handled by the fact that we send
700 * the metadata content, size, and the offset at which
701 * that metadata belongs. This may arrive out of order
702 * on the consumer side, and the consumer is able to
703 * deal with overlapping fragments. The consumer
704 * supports overlapping fragments, which must be
705 * contiguous starting from offset 0. We keep the
706 * largest metadata_len_sent value of the concurrent
709 registry
->metadata_len_sent
=
710 max_t(size_t, registry
->metadata_len_sent
,
711 new_metadata_len_sent
);
720 * On error, flag the registry that the metadata is
721 * closed. We were unable to push anything and this
722 * means that either the consumer is not responding or
723 * the metadata cache has been destroyed on the
726 registry
->metadata_closed
= 1;
734 * For a given application and session, push metadata to consumer.
735 * Either sock or consumer is required : if sock is NULL, the default
736 * socket to send the metadata is retrieved from consumer, if sock
737 * is not NULL we use it to send the metadata.
738 * RCU read-side lock must be held while calling this function,
739 * therefore ensuring existance of registry. It also ensures existance
740 * of socket throughout this function.
742 * Return 0 on success else a negative error.
743 * Returning a -EPIPE return value means we could not send the metadata,
744 * but it can be caused by recoverable errors (e.g. the application has
745 * terminated concurrently).
747 static int push_metadata(struct ust_registry_session
*registry
,
748 struct consumer_output
*consumer
)
752 struct consumer_socket
*socket
;
757 pthread_mutex_lock(®istry
->lock
);
758 if (registry
->metadata_closed
) {
763 /* Get consumer socket to use to push the metadata.*/
764 socket
= consumer_find_socket_by_bitness(registry
->bits_per_long
,
771 ret
= ust_app_push_metadata(registry
, socket
, 0);
776 pthread_mutex_unlock(®istry
->lock
);
780 pthread_mutex_unlock(®istry
->lock
);
785 * Send to the consumer a close metadata command for the given session. Once
786 * done, the metadata channel is deleted and the session metadata pointer is
787 * nullified. The session lock MUST be held unless the application is
788 * in the destroy path.
790 * Do not hold the registry lock while communicating with the consumerd, because
791 * doing so causes inter-process deadlocks between consumerd and sessiond with
792 * the metadata request notification.
794 * Return 0 on success else a negative value.
796 static int close_metadata(struct ust_registry_session
*registry
,
797 struct consumer_output
*consumer
)
800 struct consumer_socket
*socket
;
801 uint64_t metadata_key
;
802 bool registry_was_already_closed
;
809 pthread_mutex_lock(®istry
->lock
);
810 metadata_key
= registry
->metadata_key
;
811 registry_was_already_closed
= registry
->metadata_closed
;
812 if (metadata_key
!= 0) {
814 * Metadata closed. Even on error this means that the consumer
815 * is not responding or not found so either way a second close
816 * should NOT be emit for this registry.
818 registry
->metadata_closed
= 1;
820 pthread_mutex_unlock(®istry
->lock
);
822 if (metadata_key
== 0 || registry_was_already_closed
) {
827 /* Get consumer socket to use to push the metadata.*/
828 socket
= consumer_find_socket_by_bitness(registry
->bits_per_long
,
835 ret
= consumer_close_metadata(socket
, metadata_key
);
846 * We need to execute ht_destroy outside of RCU read-side critical
847 * section and outside of call_rcu thread, so we postpone its execution
848 * using ht_cleanup_push. It is simpler than to change the semantic of
849 * the many callers of delete_ust_app_session().
852 void delete_ust_app_session_rcu(struct rcu_head
*head
)
854 struct ust_app_session
*ua_sess
=
855 caa_container_of(head
, struct ust_app_session
, rcu_head
);
857 ht_cleanup_push(ua_sess
->channels
);
862 * Delete ust app session safely. RCU read lock must be held before calling
865 * The session list lock must be held by the caller.
868 void delete_ust_app_session(int sock
, struct ust_app_session
*ua_sess
,
872 struct lttng_ht_iter iter
;
873 struct ust_app_channel
*ua_chan
;
874 struct ust_registry_session
*registry
;
878 pthread_mutex_lock(&ua_sess
->lock
);
880 assert(!ua_sess
->deleted
);
881 ua_sess
->deleted
= true;
883 registry
= get_session_registry(ua_sess
);
884 /* Registry can be null on error path during initialization. */
886 /* Push metadata for application before freeing the application. */
887 (void) push_metadata(registry
, ua_sess
->consumer
);
890 * Don't ask to close metadata for global per UID buffers. Close
891 * metadata only on destroy trace session in this case. Also, the
892 * previous push metadata could have flag the metadata registry to
893 * close so don't send a close command if closed.
895 if (ua_sess
->buffer_type
!= LTTNG_BUFFER_PER_UID
) {
896 /* And ask to close it for this session registry. */
897 (void) close_metadata(registry
, ua_sess
->consumer
);
901 cds_lfht_for_each_entry(ua_sess
->channels
->ht
, &iter
.iter
, ua_chan
,
903 ret
= lttng_ht_del(ua_sess
->channels
, &iter
);
905 delete_ust_app_channel(sock
, ua_chan
, app
);
908 /* In case of per PID, the registry is kept in the session. */
909 if (ua_sess
->buffer_type
== LTTNG_BUFFER_PER_PID
) {
910 struct buffer_reg_pid
*reg_pid
= buffer_reg_pid_find(ua_sess
->id
);
913 * Registry can be null on error path during
916 buffer_reg_pid_remove(reg_pid
);
917 buffer_reg_pid_destroy(reg_pid
);
921 if (ua_sess
->handle
!= -1) {
922 pthread_mutex_lock(&app
->sock_lock
);
923 ret
= ustctl_release_handle(sock
, ua_sess
->handle
);
924 pthread_mutex_unlock(&app
->sock_lock
);
925 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
926 ERR("UST app sock %d release session handle failed with ret %d",
929 /* Remove session from application UST object descriptor. */
930 iter
.iter
.node
= &ua_sess
->ust_objd_node
.node
;
931 ret
= lttng_ht_del(app
->ust_sessions_objd
, &iter
);
935 pthread_mutex_unlock(&ua_sess
->lock
);
937 consumer_output_put(ua_sess
->consumer
);
939 call_rcu(&ua_sess
->rcu_head
, delete_ust_app_session_rcu
);
943 * Delete a traceable application structure from the global list. Never call
944 * this function outside of a call_rcu call.
946 * RCU read side lock should _NOT_ be held when calling this function.
949 void delete_ust_app(struct ust_app
*app
)
952 struct ust_app_session
*ua_sess
, *tmp_ua_sess
;
953 struct lttng_ht_iter iter
;
954 struct ust_app_event_notifier_rule
*event_notifier_rule
;
957 * The session list lock must be held during this function to guarantee
958 * the existence of ua_sess.
961 /* Delete ust app sessions info */
966 cds_list_for_each_entry_safe(ua_sess
, tmp_ua_sess
, &app
->teardown_head
,
968 /* Free every object in the session and the session. */
970 delete_ust_app_session(sock
, ua_sess
, app
);
974 /* Remove the event notifier rules associated with this app. */
976 cds_lfht_for_each_entry (app
->token_to_event_notifier_rule_ht
->ht
,
977 &iter
.iter
, event_notifier_rule
, node
.node
) {
978 ret
= lttng_ht_del(app
->token_to_event_notifier_rule_ht
, &iter
);
981 delete_ust_app_event_notifier_rule(
982 app
->sock
, event_notifier_rule
, app
);
987 ht_cleanup_push(app
->sessions
);
988 ht_cleanup_push(app
->ust_sessions_objd
);
989 ht_cleanup_push(app
->ust_objd
);
990 ht_cleanup_push(app
->token_to_event_notifier_rule_ht
);
993 * This could be NULL if the event notifier setup failed (e.g the app
994 * was killed or the tracer does not support this feature).
996 if (app
->event_notifier_group
.object
) {
997 enum lttng_error_code ret_code
;
998 const int event_notifier_read_fd
= lttng_pipe_get_readfd(
999 app
->event_notifier_group
.event_pipe
);
1001 ret_code
= notification_thread_command_remove_tracer_event_source(
1002 notification_thread_handle
,
1003 event_notifier_read_fd
);
1004 if (ret_code
!= LTTNG_OK
) {
1005 ERR("Failed to remove application tracer event source from notification thread");
1008 ustctl_release_object(sock
, app
->event_notifier_group
.object
);
1009 free(app
->event_notifier_group
.object
);
1012 lttng_pipe_destroy(app
->event_notifier_group
.event_pipe
);
1015 * Wait until we have deleted the application from the sock hash table
1016 * before closing this socket, otherwise an application could re-use the
1017 * socket ID and race with the teardown, using the same hash table entry.
1019 * It's OK to leave the close in call_rcu. We want it to stay unique for
1020 * all RCU readers that could run concurrently with unregister app,
1021 * therefore we _need_ to only close that socket after a grace period. So
1022 * it should stay in this RCU callback.
1024 * This close() is a very important step of the synchronization model so
1025 * every modification to this function must be carefully reviewed.
1031 lttng_fd_put(LTTNG_FD_APPS
, 1);
1033 DBG2("UST app pid %d deleted", app
->pid
);
1035 session_unlock_list();
1039 * URCU intermediate call to delete an UST app.
1042 void delete_ust_app_rcu(struct rcu_head
*head
)
1044 struct lttng_ht_node_ulong
*node
=
1045 caa_container_of(head
, struct lttng_ht_node_ulong
, head
);
1046 struct ust_app
*app
=
1047 caa_container_of(node
, struct ust_app
, pid_n
);
1049 DBG3("Call RCU deleting app PID %d", app
->pid
);
1050 delete_ust_app(app
);
1054 * Delete the session from the application ht and delete the data structure by
1055 * freeing every object inside and releasing them.
1057 * The session list lock must be held by the caller.
1059 static void destroy_app_session(struct ust_app
*app
,
1060 struct ust_app_session
*ua_sess
)
1063 struct lttng_ht_iter iter
;
1068 iter
.iter
.node
= &ua_sess
->node
.node
;
1069 ret
= lttng_ht_del(app
->sessions
, &iter
);
1071 /* Already scheduled for teardown. */
1075 /* Once deleted, free the data structure. */
1076 delete_ust_app_session(app
->sock
, ua_sess
, app
);
1083 * Alloc new UST app session.
1086 struct ust_app_session
*alloc_ust_app_session(void)
1088 struct ust_app_session
*ua_sess
;
1090 /* Init most of the default value by allocating and zeroing */
1091 ua_sess
= zmalloc(sizeof(struct ust_app_session
));
1092 if (ua_sess
== NULL
) {
1097 ua_sess
->handle
= -1;
1098 ua_sess
->channels
= lttng_ht_new(0, LTTNG_HT_TYPE_STRING
);
1099 ua_sess
->metadata_attr
.type
= LTTNG_UST_CHAN_METADATA
;
1100 pthread_mutex_init(&ua_sess
->lock
, NULL
);
1109 * Alloc new UST app channel.
1112 struct ust_app_channel
*alloc_ust_app_channel(const char *name
,
1113 struct ust_app_session
*ua_sess
,
1114 struct lttng_ust_channel_attr
*attr
)
1116 struct ust_app_channel
*ua_chan
;
1118 /* Init most of the default value by allocating and zeroing */
1119 ua_chan
= zmalloc(sizeof(struct ust_app_channel
));
1120 if (ua_chan
== NULL
) {
1125 /* Setup channel name */
1126 strncpy(ua_chan
->name
, name
, sizeof(ua_chan
->name
));
1127 ua_chan
->name
[sizeof(ua_chan
->name
) - 1] = '\0';
1129 ua_chan
->enabled
= 1;
1130 ua_chan
->handle
= -1;
1131 ua_chan
->session
= ua_sess
;
1132 ua_chan
->key
= get_next_channel_key();
1133 ua_chan
->ctx
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
1134 ua_chan
->events
= lttng_ht_new(0, LTTNG_HT_TYPE_STRING
);
1135 lttng_ht_node_init_str(&ua_chan
->node
, ua_chan
->name
);
1137 CDS_INIT_LIST_HEAD(&ua_chan
->streams
.head
);
1138 CDS_INIT_LIST_HEAD(&ua_chan
->ctx_list
);
1140 /* Copy attributes */
1142 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
1143 ua_chan
->attr
.subbuf_size
= attr
->subbuf_size
;
1144 ua_chan
->attr
.num_subbuf
= attr
->num_subbuf
;
1145 ua_chan
->attr
.overwrite
= attr
->overwrite
;
1146 ua_chan
->attr
.switch_timer_interval
= attr
->switch_timer_interval
;
1147 ua_chan
->attr
.read_timer_interval
= attr
->read_timer_interval
;
1148 ua_chan
->attr
.output
= attr
->output
;
1149 ua_chan
->attr
.blocking_timeout
= attr
->u
.s
.blocking_timeout
;
1151 /* By default, the channel is a per cpu channel. */
1152 ua_chan
->attr
.type
= LTTNG_UST_CHAN_PER_CPU
;
1154 DBG3("UST app channel %s allocated", ua_chan
->name
);
1163 * Allocate and initialize a UST app stream.
1165 * Return newly allocated stream pointer or NULL on error.
1167 struct ust_app_stream
*ust_app_alloc_stream(void)
1169 struct ust_app_stream
*stream
= NULL
;
1171 stream
= zmalloc(sizeof(*stream
));
1172 if (stream
== NULL
) {
1173 PERROR("zmalloc ust app stream");
1177 /* Zero could be a valid value for a handle so flag it to -1. */
1178 stream
->handle
= -1;
1185 * Alloc new UST app event.
1188 struct ust_app_event
*alloc_ust_app_event(char *name
,
1189 struct lttng_ust_event
*attr
)
1191 struct ust_app_event
*ua_event
;
1193 /* Init most of the default value by allocating and zeroing */
1194 ua_event
= zmalloc(sizeof(struct ust_app_event
));
1195 if (ua_event
== NULL
) {
1196 PERROR("Failed to allocate ust_app_event structure");
1200 ua_event
->enabled
= 1;
1201 strncpy(ua_event
->name
, name
, sizeof(ua_event
->name
));
1202 ua_event
->name
[sizeof(ua_event
->name
) - 1] = '\0';
1203 lttng_ht_node_init_str(&ua_event
->node
, ua_event
->name
);
1205 /* Copy attributes */
1207 memcpy(&ua_event
->attr
, attr
, sizeof(ua_event
->attr
));
1210 DBG3("UST app event %s allocated", ua_event
->name
);
1219 * Allocate a new UST app event notifier rule.
1221 static struct ust_app_event_notifier_rule
*alloc_ust_app_event_notifier_rule(
1222 struct lttng_event_rule
*event_rule
, uint64_t token
)
1224 enum lttng_event_rule_generate_exclusions_status
1225 generate_exclusion_status
;
1226 struct ust_app_event_notifier_rule
*ua_event_notifier_rule
;
1228 ua_event_notifier_rule
= zmalloc(sizeof(struct ust_app_event_notifier_rule
));
1229 if (ua_event_notifier_rule
== NULL
) {
1230 PERROR("Failed to allocate ust_app_event_notifier_rule structure");
1234 ua_event_notifier_rule
->enabled
= 1;
1235 ua_event_notifier_rule
->token
= token
;
1236 lttng_ht_node_init_u64(&ua_event_notifier_rule
->node
, token
);
1238 /* Get reference of the event rule. */
1239 if (!lttng_event_rule_get(event_rule
)) {
1243 ua_event_notifier_rule
->event_rule
= event_rule
;
1244 ua_event_notifier_rule
->filter
= lttng_event_rule_get_filter_bytecode(event_rule
);
1245 generate_exclusion_status
= lttng_event_rule_generate_exclusions(
1246 event_rule
, &ua_event_notifier_rule
->exclusion
);
1247 switch (generate_exclusion_status
) {
1248 case LTTNG_EVENT_RULE_GENERATE_EXCLUSIONS_STATUS_OK
:
1249 case LTTNG_EVENT_RULE_GENERATE_EXCLUSIONS_STATUS_NONE
:
1252 /* Error occured. */
1253 ERR("Failed to generate exclusions from event rule while allocating an event notifier rule");
1254 goto error_put_event_rule
;
1257 DBG3("UST app event notifier rule allocated: token = %" PRIu64
,
1258 ua_event_notifier_rule
->token
);
1260 return ua_event_notifier_rule
;
1262 error_put_event_rule
:
1263 lttng_event_rule_put(event_rule
);
1265 free(ua_event_notifier_rule
);
1270 * Alloc new UST app context.
1273 struct ust_app_ctx
*alloc_ust_app_ctx(struct lttng_ust_context_attr
*uctx
)
1275 struct ust_app_ctx
*ua_ctx
;
1277 ua_ctx
= zmalloc(sizeof(struct ust_app_ctx
));
1278 if (ua_ctx
== NULL
) {
1282 CDS_INIT_LIST_HEAD(&ua_ctx
->list
);
1285 memcpy(&ua_ctx
->ctx
, uctx
, sizeof(ua_ctx
->ctx
));
1286 if (uctx
->ctx
== LTTNG_UST_CONTEXT_APP_CONTEXT
) {
1287 char *provider_name
= NULL
, *ctx_name
= NULL
;
1289 provider_name
= strdup(uctx
->u
.app_ctx
.provider_name
);
1290 ctx_name
= strdup(uctx
->u
.app_ctx
.ctx_name
);
1291 if (!provider_name
|| !ctx_name
) {
1292 free(provider_name
);
1297 ua_ctx
->ctx
.u
.app_ctx
.provider_name
= provider_name
;
1298 ua_ctx
->ctx
.u
.app_ctx
.ctx_name
= ctx_name
;
1302 DBG3("UST app context %d allocated", ua_ctx
->ctx
.ctx
);
1310 * Allocate a filter and copy the given original filter.
1312 * Return allocated filter or NULL on error.
1314 static struct lttng_filter_bytecode
*copy_filter_bytecode(
1315 struct lttng_filter_bytecode
*orig_f
)
1317 struct lttng_filter_bytecode
*filter
= NULL
;
1319 /* Copy filter bytecode */
1320 filter
= zmalloc(sizeof(*filter
) + orig_f
->len
);
1322 PERROR("zmalloc alloc filter bytecode");
1326 memcpy(filter
, orig_f
, sizeof(*filter
) + orig_f
->len
);
1333 * Create a liblttng-ust filter bytecode from given bytecode.
1335 * Return allocated filter or NULL on error.
1337 static struct lttng_ust_filter_bytecode
*create_ust_bytecode_from_bytecode(
1338 const struct lttng_filter_bytecode
*orig_f
)
1340 struct lttng_ust_filter_bytecode
*filter
= NULL
;
1342 /* Copy filter bytecode */
1343 filter
= zmalloc(sizeof(*filter
) + orig_f
->len
);
1345 PERROR("zmalloc alloc ust filter bytecode");
1349 assert(sizeof(struct lttng_filter_bytecode
) ==
1350 sizeof(struct lttng_ust_filter_bytecode
));
1351 memcpy(filter
, orig_f
, sizeof(*filter
) + orig_f
->len
);
1357 * Find an ust_app using the sock and return it. RCU read side lock must be
1358 * held before calling this helper function.
1360 struct ust_app
*ust_app_find_by_sock(int sock
)
1362 struct lttng_ht_node_ulong
*node
;
1363 struct lttng_ht_iter iter
;
1365 lttng_ht_lookup(ust_app_ht_by_sock
, (void *)((unsigned long) sock
), &iter
);
1366 node
= lttng_ht_iter_get_node_ulong(&iter
);
1368 DBG2("UST app find by sock %d not found", sock
);
1372 return caa_container_of(node
, struct ust_app
, sock_n
);
1379 * Find an ust_app using the notify sock and return it. RCU read side lock must
1380 * be held before calling this helper function.
1382 static struct ust_app
*find_app_by_notify_sock(int sock
)
1384 struct lttng_ht_node_ulong
*node
;
1385 struct lttng_ht_iter iter
;
1387 lttng_ht_lookup(ust_app_ht_by_notify_sock
, (void *)((unsigned long) sock
),
1389 node
= lttng_ht_iter_get_node_ulong(&iter
);
1391 DBG2("UST app find by notify sock %d not found", sock
);
1395 return caa_container_of(node
, struct ust_app
, notify_sock_n
);
1402 * Lookup for an ust app event based on event name, filter bytecode and the
1405 * Return an ust_app_event object or NULL on error.
1407 static struct ust_app_event
*find_ust_app_event(struct lttng_ht
*ht
,
1408 const char *name
, const struct lttng_filter_bytecode
*filter
,
1410 const struct lttng_event_exclusion
*exclusion
)
1412 struct lttng_ht_iter iter
;
1413 struct lttng_ht_node_str
*node
;
1414 struct ust_app_event
*event
= NULL
;
1415 struct ust_app_ht_key key
;
1420 /* Setup key for event lookup. */
1422 key
.filter
= filter
;
1423 key
.loglevel_type
= loglevel_value
;
1424 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
1425 key
.exclusion
= exclusion
;
1427 /* Lookup using the event name as hash and a custom match fct. */
1428 cds_lfht_lookup(ht
->ht
, ht
->hash_fct((void *) name
, lttng_ht_seed
),
1429 ht_match_ust_app_event
, &key
, &iter
.iter
);
1430 node
= lttng_ht_iter_get_node_str(&iter
);
1435 event
= caa_container_of(node
, struct ust_app_event
, node
);
1442 * Look-up an event notifier rule based on its token id.
1444 * Must be called with the RCU read lock held.
1445 * Return an ust_app_event_notifier_rule object or NULL on error.
1447 static struct ust_app_event_notifier_rule
*find_ust_app_event_notifier_rule(
1448 struct lttng_ht
*ht
, uint64_t token
)
1450 struct lttng_ht_iter iter
;
1451 struct lttng_ht_node_u64
*node
;
1452 struct ust_app_event_notifier_rule
*event_notifier_rule
= NULL
;
1456 lttng_ht_lookup(ht
, &token
, &iter
);
1457 node
= lttng_ht_iter_get_node_u64(&iter
);
1459 DBG2("UST app event notifier rule token not found: token = %" PRIu64
,
1464 event_notifier_rule
= caa_container_of(
1465 node
, struct ust_app_event_notifier_rule
, node
);
1467 return event_notifier_rule
;
1471 * Create the channel context on the tracer.
1473 * Called with UST app session lock held.
1476 int create_ust_channel_context(struct ust_app_channel
*ua_chan
,
1477 struct ust_app_ctx
*ua_ctx
, struct ust_app
*app
)
1481 health_code_update();
1483 pthread_mutex_lock(&app
->sock_lock
);
1484 ret
= ustctl_add_context(app
->sock
, &ua_ctx
->ctx
,
1485 ua_chan
->obj
, &ua_ctx
->obj
);
1486 pthread_mutex_unlock(&app
->sock_lock
);
1488 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1489 ERR("UST app create channel context failed for app (pid: %d) "
1490 "with ret %d", app
->pid
, ret
);
1493 * This is normal behavior, an application can die during the
1494 * creation process. Don't report an error so the execution can
1495 * continue normally.
1498 DBG3("UST app add context failed. Application is dead.");
1503 ua_ctx
->handle
= ua_ctx
->obj
->handle
;
1505 DBG2("UST app context handle %d created successfully for channel %s",
1506 ua_ctx
->handle
, ua_chan
->name
);
1509 health_code_update();
1514 * Set the filter on the tracer.
1516 static int set_ust_object_filter(struct ust_app
*app
,
1517 const struct lttng_filter_bytecode
*bytecode
,
1518 struct lttng_ust_object_data
*ust_object
)
1521 struct lttng_ust_filter_bytecode
*ust_bytecode
= NULL
;
1523 health_code_update();
1525 ust_bytecode
= create_ust_bytecode_from_bytecode(bytecode
);
1526 if (!ust_bytecode
) {
1527 ret
= -LTTNG_ERR_NOMEM
;
1530 pthread_mutex_lock(&app
->sock_lock
);
1531 ret
= ustctl_set_filter(app
->sock
, ust_bytecode
,
1533 pthread_mutex_unlock(&app
->sock_lock
);
1535 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1536 ERR("UST app set object filter failed for object %p of app (pid: %d) "
1537 "with ret %d", ust_object
, app
->pid
, ret
);
1540 * This is normal behavior, an application can die during the
1541 * creation process. Don't report an error so the execution can
1542 * continue normally.
1545 DBG3("Failed to set UST app object filter. Application is dead.");
1550 DBG2("UST filter successfully set for object %p", ust_object
);
1553 health_code_update();
1559 struct lttng_ust_event_exclusion
*create_ust_exclusion_from_exclusion(
1560 const struct lttng_event_exclusion
*exclusion
)
1562 struct lttng_ust_event_exclusion
*ust_exclusion
= NULL
;
1563 size_t exclusion_alloc_size
= sizeof(struct lttng_ust_event_exclusion
) +
1564 LTTNG_UST_SYM_NAME_LEN
* exclusion
->count
;
1566 ust_exclusion
= zmalloc(exclusion_alloc_size
);
1567 if (!ust_exclusion
) {
1572 assert(sizeof(struct lttng_event_exclusion
) ==
1573 sizeof(struct lttng_ust_event_exclusion
));
1574 memcpy(ust_exclusion
, exclusion
, exclusion_alloc_size
);
1576 return ust_exclusion
;
1580 * Set event exclusions on the tracer.
1582 static int set_ust_object_exclusions(struct ust_app
*app
,
1583 const struct lttng_event_exclusion
*exclusions
,
1584 struct lttng_ust_object_data
*ust_object
)
1587 struct lttng_ust_event_exclusion
*ust_exclusions
= NULL
;
1589 assert(exclusions
&& exclusions
->count
> 0);
1591 health_code_update();
1593 ust_exclusions
= create_ust_exclusion_from_exclusion(
1595 if (!ust_exclusions
) {
1596 ret
= -LTTNG_ERR_NOMEM
;
1599 pthread_mutex_lock(&app
->sock_lock
);
1600 ret
= ustctl_set_exclusion(app
->sock
, ust_exclusions
, ust_object
);
1601 pthread_mutex_unlock(&app
->sock_lock
);
1603 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1604 ERR("Failed to set UST app exclusions for object %p of app (pid: %d) "
1605 "with ret %d", ust_object
, app
->pid
, ret
);
1608 * This is normal behavior, an application can die during the
1609 * creation process. Don't report an error so the execution can
1610 * continue normally.
1613 DBG3("Failed to set UST app object exclusions. Application is dead.");
1618 DBG2("UST exclusions set successfully for object %p", ust_object
);
1621 health_code_update();
1622 free(ust_exclusions
);
1627 * Disable the specified event on to UST tracer for the UST session.
1629 static int disable_ust_object(struct ust_app
*app
,
1630 struct lttng_ust_object_data
*object
)
1634 health_code_update();
1636 pthread_mutex_lock(&app
->sock_lock
);
1637 ret
= ustctl_disable(app
->sock
, object
);
1638 pthread_mutex_unlock(&app
->sock_lock
);
1640 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1641 ERR("Failed to disable UST app object %p app (pid: %d) with ret %d",
1642 object
, app
->pid
, ret
);
1645 * This is normal behavior, an application can die during the
1646 * creation process. Don't report an error so the execution can
1647 * continue normally.
1650 DBG3("Failed to disable UST app object. Application is dead.");
1655 DBG2("UST app object %p disabled successfully for app (pid: %d)",
1659 health_code_update();
1664 * Disable the specified channel on to UST tracer for the UST session.
1666 static int disable_ust_channel(struct ust_app
*app
,
1667 struct ust_app_session
*ua_sess
, struct ust_app_channel
*ua_chan
)
1671 health_code_update();
1673 pthread_mutex_lock(&app
->sock_lock
);
1674 ret
= ustctl_disable(app
->sock
, ua_chan
->obj
);
1675 pthread_mutex_unlock(&app
->sock_lock
);
1677 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1678 ERR("UST app channel %s disable failed for app (pid: %d) "
1679 "and session handle %d with ret %d",
1680 ua_chan
->name
, app
->pid
, ua_sess
->handle
, ret
);
1683 * This is normal behavior, an application can die during the
1684 * creation process. Don't report an error so the execution can
1685 * continue normally.
1688 DBG3("UST app disable channel failed. Application is dead.");
1693 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
1694 ua_chan
->name
, app
->pid
);
1697 health_code_update();
1702 * Enable the specified channel on to UST tracer for the UST session.
1704 static int enable_ust_channel(struct ust_app
*app
,
1705 struct ust_app_session
*ua_sess
, struct ust_app_channel
*ua_chan
)
1709 health_code_update();
1711 pthread_mutex_lock(&app
->sock_lock
);
1712 ret
= ustctl_enable(app
->sock
, ua_chan
->obj
);
1713 pthread_mutex_unlock(&app
->sock_lock
);
1715 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1716 ERR("UST app channel %s enable failed for app (pid: %d) "
1717 "and session handle %d with ret %d",
1718 ua_chan
->name
, app
->pid
, ua_sess
->handle
, ret
);
1721 * This is normal behavior, an application can die during the
1722 * creation process. Don't report an error so the execution can
1723 * continue normally.
1726 DBG3("UST app enable channel failed. Application is dead.");
1731 ua_chan
->enabled
= 1;
1733 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
1734 ua_chan
->name
, app
->pid
);
1737 health_code_update();
1742 * Enable the specified event on to UST tracer for the UST session.
1744 static int enable_ust_object(
1745 struct ust_app
*app
, struct lttng_ust_object_data
*ust_object
)
1749 health_code_update();
1751 pthread_mutex_lock(&app
->sock_lock
);
1752 ret
= ustctl_enable(app
->sock
, ust_object
);
1753 pthread_mutex_unlock(&app
->sock_lock
);
1755 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1756 ERR("UST app enable failed for object %p app (pid: %d) with ret %d",
1757 ust_object
, app
->pid
, ret
);
1760 * This is normal behavior, an application can die during the
1761 * creation process. Don't report an error so the execution can
1762 * continue normally.
1765 DBG3("Failed to enable UST app object. Application is dead.");
1770 DBG2("UST app object %p enabled successfully for app (pid: %d)",
1771 ust_object
, app
->pid
);
1774 health_code_update();
1779 * Send channel and stream buffer to application.
1781 * Return 0 on success. On error, a negative value is returned.
1783 static int send_channel_pid_to_ust(struct ust_app
*app
,
1784 struct ust_app_session
*ua_sess
, struct ust_app_channel
*ua_chan
)
1787 struct ust_app_stream
*stream
, *stmp
;
1793 health_code_update();
1795 DBG("UST app sending channel %s to UST app sock %d", ua_chan
->name
,
1798 /* Send channel to the application. */
1799 ret
= ust_consumer_send_channel_to_ust(app
, ua_sess
, ua_chan
);
1800 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
1801 ret
= -ENOTCONN
; /* Caused by app exiting. */
1803 } else if (ret
< 0) {
1807 health_code_update();
1809 /* Send all streams to application. */
1810 cds_list_for_each_entry_safe(stream
, stmp
, &ua_chan
->streams
.head
, list
) {
1811 ret
= ust_consumer_send_stream_to_ust(app
, ua_chan
, stream
);
1812 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
1813 ret
= -ENOTCONN
; /* Caused by app exiting. */
1815 } else if (ret
< 0) {
1818 /* We don't need the stream anymore once sent to the tracer. */
1819 cds_list_del(&stream
->list
);
1820 delete_ust_app_stream(-1, stream
, app
);
1822 /* Flag the channel that it is sent to the application. */
1823 ua_chan
->is_sent
= 1;
1826 health_code_update();
1831 * Create the specified event onto the UST tracer for a UST session.
1833 * Should be called with session mutex held.
1836 int create_ust_event(struct ust_app
*app
, struct ust_app_session
*ua_sess
,
1837 struct ust_app_channel
*ua_chan
, struct ust_app_event
*ua_event
)
1841 health_code_update();
1843 /* Create UST event on tracer */
1844 pthread_mutex_lock(&app
->sock_lock
);
1845 ret
= ustctl_create_event(app
->sock
, &ua_event
->attr
, ua_chan
->obj
,
1847 pthread_mutex_unlock(&app
->sock_lock
);
1849 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
1851 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1852 ua_event
->attr
.name
, app
->pid
, ret
);
1855 * This is normal behavior, an application can die during the
1856 * creation process. Don't report an error so the execution can
1857 * continue normally.
1860 DBG3("UST app create event failed. Application is dead.");
1865 ua_event
->handle
= ua_event
->obj
->handle
;
1867 DBG2("UST app event %s created successfully for pid:%d object: %p",
1868 ua_event
->attr
.name
, app
->pid
, ua_event
->obj
);
1870 health_code_update();
1872 /* Set filter if one is present. */
1873 if (ua_event
->filter
) {
1874 ret
= set_ust_object_filter(app
, ua_event
->filter
, ua_event
->obj
);
1880 /* Set exclusions for the event */
1881 if (ua_event
->exclusion
) {
1882 ret
= set_ust_object_exclusions(app
, ua_event
->exclusion
, ua_event
->obj
);
1888 /* If event not enabled, disable it on the tracer */
1889 if (ua_event
->enabled
) {
1891 * We now need to explicitly enable the event, since it
1892 * is now disabled at creation.
1894 ret
= enable_ust_object(app
, ua_event
->obj
);
1897 * If we hit an EPERM, something is wrong with our enable call. If
1898 * we get an EEXIST, there is a problem on the tracer side since we
1902 case -LTTNG_UST_ERR_PERM
:
1903 /* Code flow problem */
1905 case -LTTNG_UST_ERR_EXIST
:
1906 /* It's OK for our use case. */
1917 health_code_update();
1921 static int init_ust_event_notifier_from_event_rule(
1922 const struct lttng_event_rule
*rule
,
1923 struct lttng_ust_event_notifier
*event_notifier
)
1925 enum lttng_event_rule_status status
;
1926 enum lttng_loglevel_type loglevel_type
;
1927 enum lttng_ust_loglevel_type ust_loglevel_type
= LTTNG_UST_LOGLEVEL_ALL
;
1928 int loglevel
= -1, ret
= 0;
1929 const char *pattern
;
1931 /* For now only LTTNG_EVENT_RULE_TYPE_TRACEPOINT are supported. */
1932 assert(lttng_event_rule_get_type(rule
) ==
1933 LTTNG_EVENT_RULE_TYPE_TRACEPOINT
);
1935 memset(event_notifier
, 0, sizeof(*event_notifier
));
1937 status
= lttng_event_rule_tracepoint_get_pattern(rule
, &pattern
);
1938 if (status
!= LTTNG_EVENT_RULE_STATUS_OK
) {
1939 /* At this point, this is a fatal error. */
1943 status
= lttng_event_rule_tracepoint_get_log_level_type(
1944 rule
, &loglevel_type
);
1945 if (status
!= LTTNG_EVENT_RULE_STATUS_OK
) {
1946 /* At this point, this is a fatal error. */
1950 switch (loglevel_type
) {
1951 case LTTNG_EVENT_LOGLEVEL_ALL
:
1952 ust_loglevel_type
= LTTNG_UST_LOGLEVEL_ALL
;
1954 case LTTNG_EVENT_LOGLEVEL_RANGE
:
1955 ust_loglevel_type
= LTTNG_UST_LOGLEVEL_RANGE
;
1957 case LTTNG_EVENT_LOGLEVEL_SINGLE
:
1958 ust_loglevel_type
= LTTNG_UST_LOGLEVEL_SINGLE
;
1961 /* Unknown log level specification type. */
1965 if (loglevel_type
!= LTTNG_EVENT_LOGLEVEL_ALL
) {
1966 status
= lttng_event_rule_tracepoint_get_log_level(
1968 assert(status
== LTTNG_EVENT_RULE_STATUS_OK
);
1971 event_notifier
->event
.instrumentation
= LTTNG_UST_TRACEPOINT
;
1972 ret
= lttng_strncpy(event_notifier
->event
.name
, pattern
,
1973 LTTNG_UST_SYM_NAME_LEN
- 1);
1975 ERR("Failed to copy event rule pattern to notifier: pattern = '%s' ",
1980 event_notifier
->event
.loglevel_type
= ust_loglevel_type
;
1981 event_notifier
->event
.loglevel
= loglevel
;
1987 * Create the specified event notifier against the user space tracer of a
1988 * given application.
1990 static int create_ust_event_notifier(struct ust_app
*app
,
1991 struct ust_app_event_notifier_rule
*ua_event_notifier_rule
)
1994 struct lttng_ust_event_notifier event_notifier
;
1996 health_code_update();
1997 assert(app
->event_notifier_group
.object
);
1999 ret
= init_ust_event_notifier_from_event_rule(
2000 ua_event_notifier_rule
->event_rule
, &event_notifier
);
2002 ERR("Failed to initialize UST event notifier from event rule: app = '%s' (ppid: %d)",
2003 app
->name
, app
->ppid
);
2007 event_notifier
.event
.token
= ua_event_notifier_rule
->token
;
2009 /* Create UST event notifier against the tracer. */
2010 pthread_mutex_lock(&app
->sock_lock
);
2011 ret
= ustctl_create_event_notifier(app
->sock
, &event_notifier
,
2012 app
->event_notifier_group
.object
,
2013 &ua_event_notifier_rule
->obj
);
2014 pthread_mutex_unlock(&app
->sock_lock
);
2016 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
2017 ERR("Error ustctl create event notifier: name = '%s', app = '%s' (ppid: %d), ret = %d",
2018 event_notifier
.event
.name
, app
->name
,
2022 * This is normal behavior, an application can die
2023 * during the creation process. Don't report an error so
2024 * the execution can continue normally.
2027 DBG3("UST app create event notifier failed (application is dead): app = '%s' (ppid = %d)",
2028 app
->name
, app
->ppid
);
2034 ua_event_notifier_rule
->handle
= ua_event_notifier_rule
->obj
->handle
;
2036 DBG2("UST app event notifier %s created successfully: app = '%s' (ppid: %d), object: %p",
2037 event_notifier
.event
.name
, app
->name
, app
->ppid
,
2038 ua_event_notifier_rule
->obj
);
2040 health_code_update();
2042 /* Set filter if one is present. */
2043 if (ua_event_notifier_rule
->filter
) {
2044 ret
= set_ust_object_filter(app
, ua_event_notifier_rule
->filter
,
2045 ua_event_notifier_rule
->obj
);
2051 /* Set exclusions for the event. */
2052 if (ua_event_notifier_rule
->exclusion
) {
2053 ret
= set_ust_object_exclusions(app
,
2054 ua_event_notifier_rule
->exclusion
,
2055 ua_event_notifier_rule
->obj
);
2062 * We now need to explicitly enable the event, since it
2063 * is disabled at creation.
2065 ret
= enable_ust_object(app
, ua_event_notifier_rule
->obj
);
2068 * If we hit an EPERM, something is wrong with our enable call.
2069 * If we get an EEXIST, there is a problem on the tracer side
2070 * since we just created it.
2073 case -LTTNG_UST_ERR_PERM
:
2074 /* Code flow problem. */
2076 case -LTTNG_UST_ERR_EXIST
:
2077 /* It's OK for our use case. */
2087 ua_event_notifier_rule
->enabled
= true;
2090 health_code_update();
2095 * Copy data between an UST app event and a LTT event.
2097 static void shadow_copy_event(struct ust_app_event
*ua_event
,
2098 struct ltt_ust_event
*uevent
)
2100 size_t exclusion_alloc_size
;
2102 strncpy(ua_event
->name
, uevent
->attr
.name
, sizeof(ua_event
->name
));
2103 ua_event
->name
[sizeof(ua_event
->name
) - 1] = '\0';
2105 ua_event
->enabled
= uevent
->enabled
;
2107 /* Copy event attributes */
2108 memcpy(&ua_event
->attr
, &uevent
->attr
, sizeof(ua_event
->attr
));
2110 /* Copy filter bytecode */
2111 if (uevent
->filter
) {
2112 ua_event
->filter
= copy_filter_bytecode(uevent
->filter
);
2113 /* Filter might be NULL here in case of ENONEM. */
2116 /* Copy exclusion data */
2117 if (uevent
->exclusion
) {
2118 exclusion_alloc_size
= sizeof(struct lttng_event_exclusion
) +
2119 LTTNG_UST_SYM_NAME_LEN
* uevent
->exclusion
->count
;
2120 ua_event
->exclusion
= zmalloc(exclusion_alloc_size
);
2121 if (ua_event
->exclusion
== NULL
) {
2124 memcpy(ua_event
->exclusion
, uevent
->exclusion
,
2125 exclusion_alloc_size
);
2131 * Copy data between an UST app channel and a LTT channel.
2133 static void shadow_copy_channel(struct ust_app_channel
*ua_chan
,
2134 struct ltt_ust_channel
*uchan
)
2136 DBG2("UST app shadow copy of channel %s started", ua_chan
->name
);
2138 strncpy(ua_chan
->name
, uchan
->name
, sizeof(ua_chan
->name
));
2139 ua_chan
->name
[sizeof(ua_chan
->name
) - 1] = '\0';
2141 ua_chan
->tracefile_size
= uchan
->tracefile_size
;
2142 ua_chan
->tracefile_count
= uchan
->tracefile_count
;
2144 /* Copy event attributes since the layout is different. */
2145 ua_chan
->attr
.subbuf_size
= uchan
->attr
.subbuf_size
;
2146 ua_chan
->attr
.num_subbuf
= uchan
->attr
.num_subbuf
;
2147 ua_chan
->attr
.overwrite
= uchan
->attr
.overwrite
;
2148 ua_chan
->attr
.switch_timer_interval
= uchan
->attr
.switch_timer_interval
;
2149 ua_chan
->attr
.read_timer_interval
= uchan
->attr
.read_timer_interval
;
2150 ua_chan
->monitor_timer_interval
= uchan
->monitor_timer_interval
;
2151 ua_chan
->attr
.output
= uchan
->attr
.output
;
2152 ua_chan
->attr
.blocking_timeout
= uchan
->attr
.u
.s
.blocking_timeout
;
2155 * Note that the attribute channel type is not set since the channel on the
2156 * tracing registry side does not have this information.
2159 ua_chan
->enabled
= uchan
->enabled
;
2160 ua_chan
->tracing_channel_id
= uchan
->id
;
2162 DBG3("UST app shadow copy of channel %s done", ua_chan
->name
);
2166 * Copy data between a UST app session and a regular LTT session.
2168 static void shadow_copy_session(struct ust_app_session
*ua_sess
,
2169 struct ltt_ust_session
*usess
, struct ust_app
*app
)
2171 struct tm
*timeinfo
;
2174 char tmp_shm_path
[PATH_MAX
];
2176 timeinfo
= localtime(&app
->registration_time
);
2177 strftime(datetime
, sizeof(datetime
), "%Y%m%d-%H%M%S", timeinfo
);
2179 DBG2("Shadow copy of session handle %d", ua_sess
->handle
);
2181 ua_sess
->tracing_id
= usess
->id
;
2182 ua_sess
->id
= get_next_session_id();
2183 LTTNG_OPTIONAL_SET(&ua_sess
->real_credentials
.uid
, app
->uid
);
2184 LTTNG_OPTIONAL_SET(&ua_sess
->real_credentials
.gid
, app
->gid
);
2185 LTTNG_OPTIONAL_SET(&ua_sess
->effective_credentials
.uid
, usess
->uid
);
2186 LTTNG_OPTIONAL_SET(&ua_sess
->effective_credentials
.gid
, usess
->gid
);
2187 ua_sess
->buffer_type
= usess
->buffer_type
;
2188 ua_sess
->bits_per_long
= app
->bits_per_long
;
2190 /* There is only one consumer object per session possible. */
2191 consumer_output_get(usess
->consumer
);
2192 ua_sess
->consumer
= usess
->consumer
;
2194 ua_sess
->output_traces
= usess
->output_traces
;
2195 ua_sess
->live_timer_interval
= usess
->live_timer_interval
;
2196 copy_channel_attr_to_ustctl(&ua_sess
->metadata_attr
,
2197 &usess
->metadata_attr
);
2199 switch (ua_sess
->buffer_type
) {
2200 case LTTNG_BUFFER_PER_PID
:
2201 ret
= snprintf(ua_sess
->path
, sizeof(ua_sess
->path
),
2202 DEFAULT_UST_TRACE_PID_PATH
"/%s-%d-%s", app
->name
, app
->pid
,
2205 case LTTNG_BUFFER_PER_UID
:
2206 ret
= snprintf(ua_sess
->path
, sizeof(ua_sess
->path
),
2207 DEFAULT_UST_TRACE_UID_PATH
,
2208 lttng_credentials_get_uid(&ua_sess
->real_credentials
),
2209 app
->bits_per_long
);
2216 PERROR("asprintf UST shadow copy session");
2221 strncpy(ua_sess
->root_shm_path
, usess
->root_shm_path
,
2222 sizeof(ua_sess
->root_shm_path
));
2223 ua_sess
->root_shm_path
[sizeof(ua_sess
->root_shm_path
) - 1] = '\0';
2224 strncpy(ua_sess
->shm_path
, usess
->shm_path
,
2225 sizeof(ua_sess
->shm_path
));
2226 ua_sess
->shm_path
[sizeof(ua_sess
->shm_path
) - 1] = '\0';
2227 if (ua_sess
->shm_path
[0]) {
2228 switch (ua_sess
->buffer_type
) {
2229 case LTTNG_BUFFER_PER_PID
:
2230 ret
= snprintf(tmp_shm_path
, sizeof(tmp_shm_path
),
2231 "/" DEFAULT_UST_TRACE_PID_PATH
"/%s-%d-%s",
2232 app
->name
, app
->pid
, datetime
);
2234 case LTTNG_BUFFER_PER_UID
:
2235 ret
= snprintf(tmp_shm_path
, sizeof(tmp_shm_path
),
2236 "/" DEFAULT_UST_TRACE_UID_PATH
,
2237 app
->uid
, app
->bits_per_long
);
2244 PERROR("sprintf UST shadow copy session");
2248 strncat(ua_sess
->shm_path
, tmp_shm_path
,
2249 sizeof(ua_sess
->shm_path
) - strlen(ua_sess
->shm_path
) - 1);
2250 ua_sess
->shm_path
[sizeof(ua_sess
->shm_path
) - 1] = '\0';
2255 consumer_output_put(ua_sess
->consumer
);
2259 * Lookup sesison wrapper.
2262 void __lookup_session_by_app(const struct ltt_ust_session
*usess
,
2263 struct ust_app
*app
, struct lttng_ht_iter
*iter
)
2265 /* Get right UST app session from app */
2266 lttng_ht_lookup(app
->sessions
, &usess
->id
, iter
);
2270 * Return ust app session from the app session hashtable using the UST session
2273 static struct ust_app_session
*lookup_session_by_app(
2274 const struct ltt_ust_session
*usess
, struct ust_app
*app
)
2276 struct lttng_ht_iter iter
;
2277 struct lttng_ht_node_u64
*node
;
2279 __lookup_session_by_app(usess
, app
, &iter
);
2280 node
= lttng_ht_iter_get_node_u64(&iter
);
2285 return caa_container_of(node
, struct ust_app_session
, node
);
2292 * Setup buffer registry per PID for the given session and application. If none
2293 * is found, a new one is created, added to the global registry and
2294 * initialized. If regp is valid, it's set with the newly created object.
2296 * Return 0 on success or else a negative value.
2298 static int setup_buffer_reg_pid(struct ust_app_session
*ua_sess
,
2299 struct ust_app
*app
, struct buffer_reg_pid
**regp
)
2302 struct buffer_reg_pid
*reg_pid
;
2309 reg_pid
= buffer_reg_pid_find(ua_sess
->id
);
2312 * This is the create channel path meaning that if there is NO
2313 * registry available, we have to create one for this session.
2315 ret
= buffer_reg_pid_create(ua_sess
->id
, ®_pid
,
2316 ua_sess
->root_shm_path
, ua_sess
->shm_path
);
2324 /* Initialize registry. */
2325 ret
= ust_registry_session_init(®_pid
->registry
->reg
.ust
, app
,
2326 app
->bits_per_long
, app
->uint8_t_alignment
,
2327 app
->uint16_t_alignment
, app
->uint32_t_alignment
,
2328 app
->uint64_t_alignment
, app
->long_alignment
,
2329 app
->byte_order
, app
->version
.major
, app
->version
.minor
,
2330 reg_pid
->root_shm_path
, reg_pid
->shm_path
,
2331 lttng_credentials_get_uid(&ua_sess
->effective_credentials
),
2332 lttng_credentials_get_gid(&ua_sess
->effective_credentials
),
2333 ua_sess
->tracing_id
,
2337 * reg_pid->registry->reg.ust is NULL upon error, so we need to
2338 * destroy the buffer registry, because it is always expected
2339 * that if the buffer registry can be found, its ust registry is
2342 buffer_reg_pid_destroy(reg_pid
);
2346 buffer_reg_pid_add(reg_pid
);
2348 DBG3("UST app buffer registry per PID created successfully");
2360 * Setup buffer registry per UID for the given session and application. If none
2361 * is found, a new one is created, added to the global registry and
2362 * initialized. If regp is valid, it's set with the newly created object.
2364 * Return 0 on success or else a negative value.
2366 static int setup_buffer_reg_uid(struct ltt_ust_session
*usess
,
2367 struct ust_app_session
*ua_sess
,
2368 struct ust_app
*app
, struct buffer_reg_uid
**regp
)
2371 struct buffer_reg_uid
*reg_uid
;
2378 reg_uid
= buffer_reg_uid_find(usess
->id
, app
->bits_per_long
, app
->uid
);
2381 * This is the create channel path meaning that if there is NO
2382 * registry available, we have to create one for this session.
2384 ret
= buffer_reg_uid_create(usess
->id
, app
->bits_per_long
, app
->uid
,
2385 LTTNG_DOMAIN_UST
, ®_uid
,
2386 ua_sess
->root_shm_path
, ua_sess
->shm_path
);
2394 /* Initialize registry. */
2395 ret
= ust_registry_session_init(®_uid
->registry
->reg
.ust
, NULL
,
2396 app
->bits_per_long
, app
->uint8_t_alignment
,
2397 app
->uint16_t_alignment
, app
->uint32_t_alignment
,
2398 app
->uint64_t_alignment
, app
->long_alignment
,
2399 app
->byte_order
, app
->version
.major
,
2400 app
->version
.minor
, reg_uid
->root_shm_path
,
2401 reg_uid
->shm_path
, usess
->uid
, usess
->gid
,
2402 ua_sess
->tracing_id
, app
->uid
);
2405 * reg_uid->registry->reg.ust is NULL upon error, so we need to
2406 * destroy the buffer registry, because it is always expected
2407 * that if the buffer registry can be found, its ust registry is
2410 buffer_reg_uid_destroy(reg_uid
, NULL
);
2413 /* Add node to teardown list of the session. */
2414 cds_list_add(®_uid
->lnode
, &usess
->buffer_reg_uid_list
);
2416 buffer_reg_uid_add(reg_uid
);
2418 DBG3("UST app buffer registry per UID created successfully");
2429 * Create a session on the tracer side for the given app.
2431 * On success, ua_sess_ptr is populated with the session pointer or else left
2432 * untouched. If the session was created, is_created is set to 1. On error,
2433 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
2436 * Returns 0 on success or else a negative code which is either -ENOMEM or
2437 * -ENOTCONN which is the default code if the ustctl_create_session fails.
2439 static int find_or_create_ust_app_session(struct ltt_ust_session
*usess
,
2440 struct ust_app
*app
, struct ust_app_session
**ua_sess_ptr
,
2443 int ret
, created
= 0;
2444 struct ust_app_session
*ua_sess
;
2448 assert(ua_sess_ptr
);
2450 health_code_update();
2452 ua_sess
= lookup_session_by_app(usess
, app
);
2453 if (ua_sess
== NULL
) {
2454 DBG2("UST app pid: %d session id %" PRIu64
" not found, creating it",
2455 app
->pid
, usess
->id
);
2456 ua_sess
= alloc_ust_app_session();
2457 if (ua_sess
== NULL
) {
2458 /* Only malloc can failed so something is really wrong */
2462 shadow_copy_session(ua_sess
, usess
, app
);
2466 switch (usess
->buffer_type
) {
2467 case LTTNG_BUFFER_PER_PID
:
2468 /* Init local registry. */
2469 ret
= setup_buffer_reg_pid(ua_sess
, app
, NULL
);
2471 delete_ust_app_session(-1, ua_sess
, app
);
2475 case LTTNG_BUFFER_PER_UID
:
2476 /* Look for a global registry. If none exists, create one. */
2477 ret
= setup_buffer_reg_uid(usess
, ua_sess
, app
, NULL
);
2479 delete_ust_app_session(-1, ua_sess
, app
);
2489 health_code_update();
2491 if (ua_sess
->handle
== -1) {
2492 pthread_mutex_lock(&app
->sock_lock
);
2493 ret
= ustctl_create_session(app
->sock
);
2494 pthread_mutex_unlock(&app
->sock_lock
);
2496 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
2497 ERR("Creating session for app pid %d with ret %d",
2500 DBG("UST app creating session failed. Application is dead");
2502 * This is normal behavior, an application can die during the
2503 * creation process. Don't report an error so the execution can
2504 * continue normally. This will get flagged ENOTCONN and the
2505 * caller will handle it.
2509 delete_ust_app_session(-1, ua_sess
, app
);
2510 if (ret
!= -ENOMEM
) {
2512 * Tracer is probably gone or got an internal error so let's
2513 * behave like it will soon unregister or not usable.
2520 ua_sess
->handle
= ret
;
2522 /* Add ust app session to app's HT */
2523 lttng_ht_node_init_u64(&ua_sess
->node
,
2524 ua_sess
->tracing_id
);
2525 lttng_ht_add_unique_u64(app
->sessions
, &ua_sess
->node
);
2526 lttng_ht_node_init_ulong(&ua_sess
->ust_objd_node
, ua_sess
->handle
);
2527 lttng_ht_add_unique_ulong(app
->ust_sessions_objd
,
2528 &ua_sess
->ust_objd_node
);
2530 DBG2("UST app session created successfully with handle %d", ret
);
2533 *ua_sess_ptr
= ua_sess
;
2535 *is_created
= created
;
2538 /* Everything went well. */
2542 health_code_update();
2547 * Match function for a hash table lookup of ust_app_ctx.
2549 * It matches an ust app context based on the context type and, in the case
2550 * of perf counters, their name.
2552 static int ht_match_ust_app_ctx(struct cds_lfht_node
*node
, const void *_key
)
2554 struct ust_app_ctx
*ctx
;
2555 const struct lttng_ust_context_attr
*key
;
2560 ctx
= caa_container_of(node
, struct ust_app_ctx
, node
.node
);
2564 if (ctx
->ctx
.ctx
!= key
->ctx
) {
2569 case LTTNG_UST_CONTEXT_PERF_THREAD_COUNTER
:
2570 if (strncmp(key
->u
.perf_counter
.name
,
2571 ctx
->ctx
.u
.perf_counter
.name
,
2572 sizeof(key
->u
.perf_counter
.name
))) {
2576 case LTTNG_UST_CONTEXT_APP_CONTEXT
:
2577 if (strcmp(key
->u
.app_ctx
.provider_name
,
2578 ctx
->ctx
.u
.app_ctx
.provider_name
) ||
2579 strcmp(key
->u
.app_ctx
.ctx_name
,
2580 ctx
->ctx
.u
.app_ctx
.ctx_name
)) {
2596 * Lookup for an ust app context from an lttng_ust_context.
2598 * Must be called while holding RCU read side lock.
2599 * Return an ust_app_ctx object or NULL on error.
2602 struct ust_app_ctx
*find_ust_app_context(struct lttng_ht
*ht
,
2603 struct lttng_ust_context_attr
*uctx
)
2605 struct lttng_ht_iter iter
;
2606 struct lttng_ht_node_ulong
*node
;
2607 struct ust_app_ctx
*app_ctx
= NULL
;
2612 /* Lookup using the lttng_ust_context_type and a custom match fct. */
2613 cds_lfht_lookup(ht
->ht
, ht
->hash_fct((void *) uctx
->ctx
, lttng_ht_seed
),
2614 ht_match_ust_app_ctx
, uctx
, &iter
.iter
);
2615 node
= lttng_ht_iter_get_node_ulong(&iter
);
2620 app_ctx
= caa_container_of(node
, struct ust_app_ctx
, node
);
2627 * Create a context for the channel on the tracer.
2629 * Called with UST app session lock held and a RCU read side lock.
2632 int create_ust_app_channel_context(struct ust_app_channel
*ua_chan
,
2633 struct lttng_ust_context_attr
*uctx
,
2634 struct ust_app
*app
)
2637 struct ust_app_ctx
*ua_ctx
;
2639 DBG2("UST app adding context to channel %s", ua_chan
->name
);
2641 ua_ctx
= find_ust_app_context(ua_chan
->ctx
, uctx
);
2647 ua_ctx
= alloc_ust_app_ctx(uctx
);
2648 if (ua_ctx
== NULL
) {
2654 lttng_ht_node_init_ulong(&ua_ctx
->node
, (unsigned long) ua_ctx
->ctx
.ctx
);
2655 lttng_ht_add_ulong(ua_chan
->ctx
, &ua_ctx
->node
);
2656 cds_list_add_tail(&ua_ctx
->list
, &ua_chan
->ctx_list
);
2658 ret
= create_ust_channel_context(ua_chan
, ua_ctx
, app
);
2668 * Enable on the tracer side a ust app event for the session and channel.
2670 * Called with UST app session lock held.
2673 int enable_ust_app_event(struct ust_app_session
*ua_sess
,
2674 struct ust_app_event
*ua_event
, struct ust_app
*app
)
2678 ret
= enable_ust_object(app
, ua_event
->obj
);
2683 ua_event
->enabled
= 1;
2690 * Disable on the tracer side a ust app event for the session and channel.
2692 static int disable_ust_app_event(struct ust_app_session
*ua_sess
,
2693 struct ust_app_event
*ua_event
, struct ust_app
*app
)
2697 ret
= disable_ust_object(app
, ua_event
->obj
);
2702 ua_event
->enabled
= 0;
2709 * Lookup ust app channel for session and disable it on the tracer side.
2712 int disable_ust_app_channel(struct ust_app_session
*ua_sess
,
2713 struct ust_app_channel
*ua_chan
, struct ust_app
*app
)
2717 ret
= disable_ust_channel(app
, ua_sess
, ua_chan
);
2722 ua_chan
->enabled
= 0;
2729 * Lookup ust app channel for session and enable it on the tracer side. This
2730 * MUST be called with a RCU read side lock acquired.
2732 static int enable_ust_app_channel(struct ust_app_session
*ua_sess
,
2733 struct ltt_ust_channel
*uchan
, struct ust_app
*app
)
2736 struct lttng_ht_iter iter
;
2737 struct lttng_ht_node_str
*ua_chan_node
;
2738 struct ust_app_channel
*ua_chan
;
2740 lttng_ht_lookup(ua_sess
->channels
, (void *)uchan
->name
, &iter
);
2741 ua_chan_node
= lttng_ht_iter_get_node_str(&iter
);
2742 if (ua_chan_node
== NULL
) {
2743 DBG2("Unable to find channel %s in ust session id %" PRIu64
,
2744 uchan
->name
, ua_sess
->tracing_id
);
2748 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
, node
);
2750 ret
= enable_ust_channel(app
, ua_sess
, ua_chan
);
2760 * Ask the consumer to create a channel and get it if successful.
2762 * Called with UST app session lock held.
2764 * Return 0 on success or else a negative value.
2766 static int do_consumer_create_channel(struct ltt_ust_session
*usess
,
2767 struct ust_app_session
*ua_sess
, struct ust_app_channel
*ua_chan
,
2768 int bitness
, struct ust_registry_session
*registry
,
2769 uint64_t trace_archive_id
)
2772 unsigned int nb_fd
= 0;
2773 struct consumer_socket
*socket
;
2781 health_code_update();
2783 /* Get the right consumer socket for the application. */
2784 socket
= consumer_find_socket_by_bitness(bitness
, usess
->consumer
);
2790 health_code_update();
2792 /* Need one fd for the channel. */
2793 ret
= lttng_fd_get(LTTNG_FD_APPS
, 1);
2795 ERR("Exhausted number of available FD upon create channel");
2800 * Ask consumer to create channel. The consumer will return the number of
2801 * stream we have to expect.
2803 ret
= ust_consumer_ask_channel(ua_sess
, ua_chan
, usess
->consumer
, socket
,
2804 registry
, usess
->current_trace_chunk
);
2810 * Compute the number of fd needed before receiving them. It must be 2 per
2811 * stream (2 being the default value here).
2813 nb_fd
= DEFAULT_UST_STREAM_FD_NUM
* ua_chan
->expected_stream_count
;
2815 /* Reserve the amount of file descriptor we need. */
2816 ret
= lttng_fd_get(LTTNG_FD_APPS
, nb_fd
);
2818 ERR("Exhausted number of available FD upon create channel");
2819 goto error_fd_get_stream
;
2822 health_code_update();
2825 * Now get the channel from the consumer. This call wil populate the stream
2826 * list of that channel and set the ust objects.
2828 if (usess
->consumer
->enabled
) {
2829 ret
= ust_consumer_get_channel(socket
, ua_chan
);
2839 lttng_fd_put(LTTNG_FD_APPS
, nb_fd
);
2840 error_fd_get_stream
:
2842 * Initiate a destroy channel on the consumer since we had an error
2843 * handling it on our side. The return value is of no importance since we
2844 * already have a ret value set by the previous error that we need to
2847 (void) ust_consumer_destroy_channel(socket
, ua_chan
);
2849 lttng_fd_put(LTTNG_FD_APPS
, 1);
2851 health_code_update();
2857 * Duplicate the ust data object of the ust app stream and save it in the
2858 * buffer registry stream.
2860 * Return 0 on success or else a negative value.
2862 static int duplicate_stream_object(struct buffer_reg_stream
*reg_stream
,
2863 struct ust_app_stream
*stream
)
2870 /* Reserve the amount of file descriptor we need. */
2871 ret
= lttng_fd_get(LTTNG_FD_APPS
, 2);
2873 ERR("Exhausted number of available FD upon duplicate stream");
2877 /* Duplicate object for stream once the original is in the registry. */
2878 ret
= ustctl_duplicate_ust_object_data(&stream
->obj
,
2879 reg_stream
->obj
.ust
);
2881 ERR("Duplicate stream obj from %p to %p failed with ret %d",
2882 reg_stream
->obj
.ust
, stream
->obj
, ret
);
2883 lttng_fd_put(LTTNG_FD_APPS
, 2);
2886 stream
->handle
= stream
->obj
->handle
;
2893 * Duplicate the ust data object of the ust app. channel and save it in the
2894 * buffer registry channel.
2896 * Return 0 on success or else a negative value.
2898 static int duplicate_channel_object(struct buffer_reg_channel
*reg_chan
,
2899 struct ust_app_channel
*ua_chan
)
2906 /* Need two fds for the channel. */
2907 ret
= lttng_fd_get(LTTNG_FD_APPS
, 1);
2909 ERR("Exhausted number of available FD upon duplicate channel");
2913 /* Duplicate object for stream once the original is in the registry. */
2914 ret
= ustctl_duplicate_ust_object_data(&ua_chan
->obj
, reg_chan
->obj
.ust
);
2916 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
2917 reg_chan
->obj
.ust
, ua_chan
->obj
, ret
);
2920 ua_chan
->handle
= ua_chan
->obj
->handle
;
2925 lttng_fd_put(LTTNG_FD_APPS
, 1);
2931 * For a given channel buffer registry, setup all streams of the given ust
2932 * application channel.
2934 * Return 0 on success or else a negative value.
2936 static int setup_buffer_reg_streams(struct buffer_reg_channel
*reg_chan
,
2937 struct ust_app_channel
*ua_chan
,
2938 struct ust_app
*app
)
2941 struct ust_app_stream
*stream
, *stmp
;
2946 DBG2("UST app setup buffer registry stream");
2948 /* Send all streams to application. */
2949 cds_list_for_each_entry_safe(stream
, stmp
, &ua_chan
->streams
.head
, list
) {
2950 struct buffer_reg_stream
*reg_stream
;
2952 ret
= buffer_reg_stream_create(®_stream
);
2958 * Keep original pointer and nullify it in the stream so the delete
2959 * stream call does not release the object.
2961 reg_stream
->obj
.ust
= stream
->obj
;
2963 buffer_reg_stream_add(reg_stream
, reg_chan
);
2965 /* We don't need the streams anymore. */
2966 cds_list_del(&stream
->list
);
2967 delete_ust_app_stream(-1, stream
, app
);
2975 * Create a buffer registry channel for the given session registry and
2976 * application channel object. If regp pointer is valid, it's set with the
2977 * created object. Important, the created object is NOT added to the session
2978 * registry hash table.
2980 * Return 0 on success else a negative value.
2982 static int create_buffer_reg_channel(struct buffer_reg_session
*reg_sess
,
2983 struct ust_app_channel
*ua_chan
, struct buffer_reg_channel
**regp
)
2986 struct buffer_reg_channel
*reg_chan
= NULL
;
2991 DBG2("UST app creating buffer registry channel for %s", ua_chan
->name
);
2993 /* Create buffer registry channel. */
2994 ret
= buffer_reg_channel_create(ua_chan
->tracing_channel_id
, ®_chan
);
2999 reg_chan
->consumer_key
= ua_chan
->key
;
3000 reg_chan
->subbuf_size
= ua_chan
->attr
.subbuf_size
;
3001 reg_chan
->num_subbuf
= ua_chan
->attr
.num_subbuf
;
3003 /* Create and add a channel registry to session. */
3004 ret
= ust_registry_channel_add(reg_sess
->reg
.ust
,
3005 ua_chan
->tracing_channel_id
);
3009 buffer_reg_channel_add(reg_sess
, reg_chan
);
3018 /* Safe because the registry channel object was not added to any HT. */
3019 buffer_reg_channel_destroy(reg_chan
, LTTNG_DOMAIN_UST
);
3025 * Setup buffer registry channel for the given session registry and application
3026 * channel object. If regp pointer is valid, it's set with the created object.
3028 * Return 0 on success else a negative value.
3030 static int setup_buffer_reg_channel(struct buffer_reg_session
*reg_sess
,
3031 struct ust_app_channel
*ua_chan
, struct buffer_reg_channel
*reg_chan
,
3032 struct ust_app
*app
)
3039 assert(ua_chan
->obj
);
3041 DBG2("UST app setup buffer registry channel for %s", ua_chan
->name
);
3043 /* Setup all streams for the registry. */
3044 ret
= setup_buffer_reg_streams(reg_chan
, ua_chan
, app
);
3049 reg_chan
->obj
.ust
= ua_chan
->obj
;
3050 ua_chan
->obj
= NULL
;
3055 buffer_reg_channel_remove(reg_sess
, reg_chan
);
3056 buffer_reg_channel_destroy(reg_chan
, LTTNG_DOMAIN_UST
);
3061 * Send buffer registry channel to the application.
3063 * Return 0 on success else a negative value.
3065 static int send_channel_uid_to_ust(struct buffer_reg_channel
*reg_chan
,
3066 struct ust_app
*app
, struct ust_app_session
*ua_sess
,
3067 struct ust_app_channel
*ua_chan
)
3070 struct buffer_reg_stream
*reg_stream
;
3077 DBG("UST app sending buffer registry channel to ust sock %d", app
->sock
);
3079 ret
= duplicate_channel_object(reg_chan
, ua_chan
);
3084 /* Send channel to the application. */
3085 ret
= ust_consumer_send_channel_to_ust(app
, ua_sess
, ua_chan
);
3086 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
3087 ret
= -ENOTCONN
; /* Caused by app exiting. */
3089 } else if (ret
< 0) {
3093 health_code_update();
3095 /* Send all streams to application. */
3096 pthread_mutex_lock(®_chan
->stream_list_lock
);
3097 cds_list_for_each_entry(reg_stream
, ®_chan
->streams
, lnode
) {
3098 struct ust_app_stream stream
;
3100 ret
= duplicate_stream_object(reg_stream
, &stream
);
3102 goto error_stream_unlock
;
3105 ret
= ust_consumer_send_stream_to_ust(app
, ua_chan
, &stream
);
3107 (void) release_ust_app_stream(-1, &stream
, app
);
3108 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
3109 ret
= -ENOTCONN
; /* Caused by app exiting. */
3111 goto error_stream_unlock
;
3115 * The return value is not important here. This function will output an
3118 (void) release_ust_app_stream(-1, &stream
, app
);
3120 ua_chan
->is_sent
= 1;
3122 error_stream_unlock
:
3123 pthread_mutex_unlock(®_chan
->stream_list_lock
);
3129 * Create and send to the application the created buffers with per UID buffers.
3131 * This MUST be called with a RCU read side lock acquired.
3132 * The session list lock and the session's lock must be acquired.
3134 * Return 0 on success else a negative value.
3136 static int create_channel_per_uid(struct ust_app
*app
,
3137 struct ltt_ust_session
*usess
, struct ust_app_session
*ua_sess
,
3138 struct ust_app_channel
*ua_chan
)
3141 struct buffer_reg_uid
*reg_uid
;
3142 struct buffer_reg_channel
*reg_chan
;
3143 struct ltt_session
*session
= NULL
;
3144 enum lttng_error_code notification_ret
;
3145 struct ust_registry_channel
*chan_reg
;
3152 DBG("UST app creating channel %s with per UID buffers", ua_chan
->name
);
3154 reg_uid
= buffer_reg_uid_find(usess
->id
, app
->bits_per_long
, app
->uid
);
3156 * The session creation handles the creation of this global registry
3157 * object. If none can be find, there is a code flow problem or a
3162 reg_chan
= buffer_reg_channel_find(ua_chan
->tracing_channel_id
,
3168 /* Create the buffer registry channel object. */
3169 ret
= create_buffer_reg_channel(reg_uid
->registry
, ua_chan
, ®_chan
);
3171 ERR("Error creating the UST channel \"%s\" registry instance",
3176 session
= session_find_by_id(ua_sess
->tracing_id
);
3178 assert(pthread_mutex_trylock(&session
->lock
));
3179 assert(session_trylock_list());
3182 * Create the buffers on the consumer side. This call populates the
3183 * ust app channel object with all streams and data object.
3185 ret
= do_consumer_create_channel(usess
, ua_sess
, ua_chan
,
3186 app
->bits_per_long
, reg_uid
->registry
->reg
.ust
,
3187 session
->most_recent_chunk_id
.value
);
3189 ERR("Error creating UST channel \"%s\" on the consumer daemon",
3193 * Let's remove the previously created buffer registry channel so
3194 * it's not visible anymore in the session registry.
3196 ust_registry_channel_del_free(reg_uid
->registry
->reg
.ust
,
3197 ua_chan
->tracing_channel_id
, false);
3198 buffer_reg_channel_remove(reg_uid
->registry
, reg_chan
);
3199 buffer_reg_channel_destroy(reg_chan
, LTTNG_DOMAIN_UST
);
3204 * Setup the streams and add it to the session registry.
3206 ret
= setup_buffer_reg_channel(reg_uid
->registry
,
3207 ua_chan
, reg_chan
, app
);
3209 ERR("Error setting up UST channel \"%s\"", ua_chan
->name
);
3213 /* Notify the notification subsystem of the channel's creation. */
3214 pthread_mutex_lock(®_uid
->registry
->reg
.ust
->lock
);
3215 chan_reg
= ust_registry_channel_find(reg_uid
->registry
->reg
.ust
,
3216 ua_chan
->tracing_channel_id
);
3218 chan_reg
->consumer_key
= ua_chan
->key
;
3220 pthread_mutex_unlock(®_uid
->registry
->reg
.ust
->lock
);
3222 notification_ret
= notification_thread_command_add_channel(
3223 notification_thread_handle
, session
->name
,
3224 lttng_credentials_get_uid(&ua_sess
->effective_credentials
),
3225 lttng_credentials_get_gid(&ua_sess
->effective_credentials
),
3227 ua_chan
->key
, LTTNG_DOMAIN_UST
,
3228 ua_chan
->attr
.subbuf_size
* ua_chan
->attr
.num_subbuf
);
3229 if (notification_ret
!= LTTNG_OK
) {
3230 ret
= - (int) notification_ret
;
3231 ERR("Failed to add channel to notification thread");
3236 /* Send buffers to the application. */
3237 ret
= send_channel_uid_to_ust(reg_chan
, app
, ua_sess
, ua_chan
);
3239 if (ret
!= -ENOTCONN
) {
3240 ERR("Error sending channel to application");
3247 session_put(session
);
3253 * Create and send to the application the created buffers with per PID buffers.
3255 * Called with UST app session lock held.
3256 * The session list lock and the session's lock must be acquired.
3258 * Return 0 on success else a negative value.
3260 static int create_channel_per_pid(struct ust_app
*app
,
3261 struct ltt_ust_session
*usess
, struct ust_app_session
*ua_sess
,
3262 struct ust_app_channel
*ua_chan
)
3265 struct ust_registry_session
*registry
;
3266 enum lttng_error_code cmd_ret
;
3267 struct ltt_session
*session
= NULL
;
3268 uint64_t chan_reg_key
;
3269 struct ust_registry_channel
*chan_reg
;
3276 DBG("UST app creating channel %s with per PID buffers", ua_chan
->name
);
3280 registry
= get_session_registry(ua_sess
);
3281 /* The UST app session lock is held, registry shall not be null. */
3284 /* Create and add a new channel registry to session. */
3285 ret
= ust_registry_channel_add(registry
, ua_chan
->key
);
3287 ERR("Error creating the UST channel \"%s\" registry instance",
3292 session
= session_find_by_id(ua_sess
->tracing_id
);
3295 assert(pthread_mutex_trylock(&session
->lock
));
3296 assert(session_trylock_list());
3298 /* Create and get channel on the consumer side. */
3299 ret
= do_consumer_create_channel(usess
, ua_sess
, ua_chan
,
3300 app
->bits_per_long
, registry
,
3301 session
->most_recent_chunk_id
.value
);
3303 ERR("Error creating UST channel \"%s\" on the consumer daemon",
3305 goto error_remove_from_registry
;
3308 ret
= send_channel_pid_to_ust(app
, ua_sess
, ua_chan
);
3310 if (ret
!= -ENOTCONN
) {
3311 ERR("Error sending channel to application");
3313 goto error_remove_from_registry
;
3316 chan_reg_key
= ua_chan
->key
;
3317 pthread_mutex_lock(®istry
->lock
);
3318 chan_reg
= ust_registry_channel_find(registry
, chan_reg_key
);
3320 chan_reg
->consumer_key
= ua_chan
->key
;
3321 pthread_mutex_unlock(®istry
->lock
);
3323 cmd_ret
= notification_thread_command_add_channel(
3324 notification_thread_handle
, session
->name
,
3325 lttng_credentials_get_uid(&ua_sess
->effective_credentials
),
3326 lttng_credentials_get_gid(&ua_sess
->effective_credentials
),
3328 ua_chan
->key
, LTTNG_DOMAIN_UST
,
3329 ua_chan
->attr
.subbuf_size
* ua_chan
->attr
.num_subbuf
);
3330 if (cmd_ret
!= LTTNG_OK
) {
3331 ret
= - (int) cmd_ret
;
3332 ERR("Failed to add channel to notification thread");
3333 goto error_remove_from_registry
;
3336 error_remove_from_registry
:
3338 ust_registry_channel_del_free(registry
, ua_chan
->key
, false);
3343 session_put(session
);
3349 * From an already allocated ust app channel, create the channel buffers if
3350 * needed and send them to the application. This MUST be called with a RCU read
3351 * side lock acquired.
3353 * Called with UST app session lock held.
3355 * Return 0 on success or else a negative value. Returns -ENOTCONN if
3356 * the application exited concurrently.
3358 static int ust_app_channel_send(struct ust_app
*app
,
3359 struct ltt_ust_session
*usess
, struct ust_app_session
*ua_sess
,
3360 struct ust_app_channel
*ua_chan
)
3366 assert(usess
->active
);
3370 /* Handle buffer type before sending the channel to the application. */
3371 switch (usess
->buffer_type
) {
3372 case LTTNG_BUFFER_PER_UID
:
3374 ret
= create_channel_per_uid(app
, usess
, ua_sess
, ua_chan
);
3380 case LTTNG_BUFFER_PER_PID
:
3382 ret
= create_channel_per_pid(app
, usess
, ua_sess
, ua_chan
);
3394 /* Initialize ust objd object using the received handle and add it. */
3395 lttng_ht_node_init_ulong(&ua_chan
->ust_objd_node
, ua_chan
->handle
);
3396 lttng_ht_add_unique_ulong(app
->ust_objd
, &ua_chan
->ust_objd_node
);
3398 /* If channel is not enabled, disable it on the tracer */
3399 if (!ua_chan
->enabled
) {
3400 ret
= disable_ust_channel(app
, ua_sess
, ua_chan
);
3411 * Create UST app channel and return it through ua_chanp if not NULL.
3413 * Called with UST app session lock and RCU read-side lock held.
3415 * Return 0 on success or else a negative value.
3417 static int ust_app_channel_allocate(struct ust_app_session
*ua_sess
,
3418 struct ltt_ust_channel
*uchan
,
3419 enum lttng_ust_chan_type type
, struct ltt_ust_session
*usess
,
3420 struct ust_app_channel
**ua_chanp
)
3423 struct lttng_ht_iter iter
;
3424 struct lttng_ht_node_str
*ua_chan_node
;
3425 struct ust_app_channel
*ua_chan
;
3427 /* Lookup channel in the ust app session */
3428 lttng_ht_lookup(ua_sess
->channels
, (void *)uchan
->name
, &iter
);
3429 ua_chan_node
= lttng_ht_iter_get_node_str(&iter
);
3430 if (ua_chan_node
!= NULL
) {
3431 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
, node
);
3435 ua_chan
= alloc_ust_app_channel(uchan
->name
, ua_sess
, &uchan
->attr
);
3436 if (ua_chan
== NULL
) {
3437 /* Only malloc can fail here */
3441 shadow_copy_channel(ua_chan
, uchan
);
3443 /* Set channel type. */
3444 ua_chan
->attr
.type
= type
;
3446 /* Only add the channel if successful on the tracer side. */
3447 lttng_ht_add_unique_str(ua_sess
->channels
, &ua_chan
->node
);
3450 *ua_chanp
= ua_chan
;
3453 /* Everything went well. */
3461 * Create UST app event and create it on the tracer side.
3463 * Must be called with the RCU read side lock held.
3464 * Called with ust app session mutex held.
3467 int create_ust_app_event(struct ust_app_session
*ua_sess
,
3468 struct ust_app_channel
*ua_chan
, struct ltt_ust_event
*uevent
,
3469 struct ust_app
*app
)
3472 struct ust_app_event
*ua_event
;
3474 ua_event
= alloc_ust_app_event(uevent
->attr
.name
, &uevent
->attr
);
3475 if (ua_event
== NULL
) {
3476 /* Only failure mode of alloc_ust_app_event(). */
3480 shadow_copy_event(ua_event
, uevent
);
3482 /* Create it on the tracer side */
3483 ret
= create_ust_event(app
, ua_sess
, ua_chan
, ua_event
);
3486 * Not found previously means that it does not exist on the
3487 * tracer. If the application reports that the event existed,
3488 * it means there is a bug in the sessiond or lttng-ust
3489 * (or corruption, etc.)
3491 if (ret
== -LTTNG_UST_ERR_EXIST
) {
3492 ERR("Tracer for application reported that an event being created already existed: "
3493 "event_name = \"%s\", pid = %d, ppid = %d, uid = %d, gid = %d",
3495 app
->pid
, app
->ppid
, app
->uid
,
3501 add_unique_ust_app_event(ua_chan
, ua_event
);
3503 DBG2("UST app create event completed: app = '%s' (ppid: %d)",
3504 app
->name
, app
->ppid
);
3510 /* Valid. Calling here is already in a read side lock */
3511 delete_ust_app_event(-1, ua_event
, app
);
3516 * Create UST app event notifier rule and create it on the tracer side.
3518 * Must be called with the RCU read side lock held.
3519 * Called with ust app session mutex held.
3522 int create_ust_app_event_notifier_rule(struct lttng_event_rule
*rule
,
3523 struct ust_app
*app
, uint64_t token
)
3526 struct ust_app_event_notifier_rule
*ua_event_notifier_rule
;
3528 ua_event_notifier_rule
= alloc_ust_app_event_notifier_rule(rule
, token
);
3529 if (ua_event_notifier_rule
== NULL
) {
3534 /* Create it on the tracer side. */
3535 ret
= create_ust_event_notifier(app
, ua_event_notifier_rule
);
3538 * Not found previously means that it does not exist on the
3539 * tracer. If the application reports that the event existed,
3540 * it means there is a bug in the sessiond or lttng-ust
3541 * (or corruption, etc.)
3543 if (ret
== -LTTNG_UST_ERR_EXIST
) {
3544 ERR("Tracer for application reported that an event notifier being created already exists: "
3545 "token = \"%" PRIu64
"\", pid = %d, ppid = %d, uid = %d, gid = %d",
3547 app
->pid
, app
->ppid
, app
->uid
,
3553 lttng_ht_add_unique_u64(app
->token_to_event_notifier_rule_ht
,
3554 &ua_event_notifier_rule
->node
);
3556 DBG2("UST app create token event rule completed: app = '%s' (ppid: %d), token = %" PRIu64
,
3557 app
->name
, app
->ppid
, token
);
3563 /* The RCU read side lock is already being held by the caller. */
3564 delete_ust_app_event_notifier_rule(-1, ua_event_notifier_rule
, app
);
3569 * Create UST metadata and open it on the tracer side.
3571 * Called with UST app session lock held and RCU read side lock.
3573 static int create_ust_app_metadata(struct ust_app_session
*ua_sess
,
3574 struct ust_app
*app
, struct consumer_output
*consumer
)
3577 struct ust_app_channel
*metadata
;
3578 struct consumer_socket
*socket
;
3579 struct ust_registry_session
*registry
;
3580 struct ltt_session
*session
= NULL
;
3586 registry
= get_session_registry(ua_sess
);
3587 /* The UST app session is held registry shall not be null. */
3590 pthread_mutex_lock(®istry
->lock
);
3592 /* Metadata already exists for this registry or it was closed previously */
3593 if (registry
->metadata_key
|| registry
->metadata_closed
) {
3598 /* Allocate UST metadata */
3599 metadata
= alloc_ust_app_channel(DEFAULT_METADATA_NAME
, ua_sess
, NULL
);
3601 /* malloc() failed */
3606 memcpy(&metadata
->attr
, &ua_sess
->metadata_attr
, sizeof(metadata
->attr
));
3608 /* Need one fd for the channel. */
3609 ret
= lttng_fd_get(LTTNG_FD_APPS
, 1);
3611 ERR("Exhausted number of available FD upon create metadata");
3615 /* Get the right consumer socket for the application. */
3616 socket
= consumer_find_socket_by_bitness(app
->bits_per_long
, consumer
);
3619 goto error_consumer
;
3623 * Keep metadata key so we can identify it on the consumer side. Assign it
3624 * to the registry *before* we ask the consumer so we avoid the race of the
3625 * consumer requesting the metadata and the ask_channel call on our side
3626 * did not returned yet.
3628 registry
->metadata_key
= metadata
->key
;
3630 session
= session_find_by_id(ua_sess
->tracing_id
);
3633 assert(pthread_mutex_trylock(&session
->lock
));
3634 assert(session_trylock_list());
3637 * Ask the metadata channel creation to the consumer. The metadata object
3638 * will be created by the consumer and kept their. However, the stream is
3639 * never added or monitored until we do a first push metadata to the
3642 ret
= ust_consumer_ask_channel(ua_sess
, metadata
, consumer
, socket
,
3643 registry
, session
->current_trace_chunk
);
3645 /* Nullify the metadata key so we don't try to close it later on. */
3646 registry
->metadata_key
= 0;
3647 goto error_consumer
;
3651 * The setup command will make the metadata stream be sent to the relayd,
3652 * if applicable, and the thread managing the metadatas. This is important
3653 * because after this point, if an error occurs, the only way the stream
3654 * can be deleted is to be monitored in the consumer.
3656 ret
= consumer_setup_metadata(socket
, metadata
->key
);
3658 /* Nullify the metadata key so we don't try to close it later on. */
3659 registry
->metadata_key
= 0;
3660 goto error_consumer
;
3663 DBG2("UST metadata with key %" PRIu64
" created for app pid %d",
3664 metadata
->key
, app
->pid
);
3667 lttng_fd_put(LTTNG_FD_APPS
, 1);
3668 delete_ust_app_channel(-1, metadata
, app
);
3670 pthread_mutex_unlock(®istry
->lock
);
3672 session_put(session
);
3678 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
3679 * acquired before calling this function.
3681 struct ust_app
*ust_app_find_by_pid(pid_t pid
)
3683 struct ust_app
*app
= NULL
;
3684 struct lttng_ht_node_ulong
*node
;
3685 struct lttng_ht_iter iter
;
3687 lttng_ht_lookup(ust_app_ht
, (void *)((unsigned long) pid
), &iter
);
3688 node
= lttng_ht_iter_get_node_ulong(&iter
);
3690 DBG2("UST app no found with pid %d", pid
);
3694 DBG2("Found UST app by pid %d", pid
);
3696 app
= caa_container_of(node
, struct ust_app
, pid_n
);
3703 * Allocate and init an UST app object using the registration information and
3704 * the command socket. This is called when the command socket connects to the
3707 * The object is returned on success or else NULL.
3709 struct ust_app
*ust_app_create(struct ust_register_msg
*msg
, int sock
)
3711 struct ust_app
*lta
= NULL
;
3712 struct lttng_pipe
*event_notifier_event_source_pipe
= NULL
;
3717 DBG3("UST app creating application for socket %d", sock
);
3719 if ((msg
->bits_per_long
== 64 &&
3720 (uatomic_read(&ust_consumerd64_fd
) == -EINVAL
))
3721 || (msg
->bits_per_long
== 32 &&
3722 (uatomic_read(&ust_consumerd32_fd
) == -EINVAL
))) {
3723 ERR("Registration failed: application \"%s\" (pid: %d) has "
3724 "%d-bit long, but no consumerd for this size is available.\n",
3725 msg
->name
, msg
->pid
, msg
->bits_per_long
);
3729 event_notifier_event_source_pipe
= lttng_pipe_open(FD_CLOEXEC
);
3730 if (!event_notifier_event_source_pipe
) {
3731 PERROR("Failed to open application event source pipe: '%s' (ppid = %d)",
3732 msg
->name
, msg
->ppid
);
3736 lta
= zmalloc(sizeof(struct ust_app
));
3739 goto error_free_pipe
;
3742 lta
->event_notifier_group
.event_pipe
= event_notifier_event_source_pipe
;
3744 lta
->ppid
= msg
->ppid
;
3745 lta
->uid
= msg
->uid
;
3746 lta
->gid
= msg
->gid
;
3748 lta
->bits_per_long
= msg
->bits_per_long
;
3749 lta
->uint8_t_alignment
= msg
->uint8_t_alignment
;
3750 lta
->uint16_t_alignment
= msg
->uint16_t_alignment
;
3751 lta
->uint32_t_alignment
= msg
->uint32_t_alignment
;
3752 lta
->uint64_t_alignment
= msg
->uint64_t_alignment
;
3753 lta
->long_alignment
= msg
->long_alignment
;
3754 lta
->byte_order
= msg
->byte_order
;
3756 lta
->v_major
= msg
->major
;
3757 lta
->v_minor
= msg
->minor
;
3758 lta
->sessions
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3759 lta
->ust_objd
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
3760 lta
->ust_sessions_objd
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
3761 lta
->notify_sock
= -1;
3762 lta
->token_to_event_notifier_rule_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3764 /* Copy name and make sure it's NULL terminated. */
3765 strncpy(lta
->name
, msg
->name
, sizeof(lta
->name
));
3766 lta
->name
[UST_APP_PROCNAME_LEN
] = '\0';
3769 * Before this can be called, when receiving the registration information,
3770 * the application compatibility is checked. So, at this point, the
3771 * application can work with this session daemon.
3773 lta
->compatible
= 1;
3775 lta
->pid
= msg
->pid
;
3776 lttng_ht_node_init_ulong(<a
->pid_n
, (unsigned long) lta
->pid
);
3778 pthread_mutex_init(<a
->sock_lock
, NULL
);
3779 lttng_ht_node_init_ulong(<a
->sock_n
, (unsigned long) lta
->sock
);
3781 CDS_INIT_LIST_HEAD(<a
->teardown_head
);
3785 lttng_pipe_destroy(event_notifier_event_source_pipe
);
3791 * For a given application object, add it to every hash table.
3793 void ust_app_add(struct ust_app
*app
)
3796 assert(app
->notify_sock
>= 0);
3798 app
->registration_time
= time(NULL
);
3803 * On a re-registration, we want to kick out the previous registration of
3806 lttng_ht_add_replace_ulong(ust_app_ht
, &app
->pid_n
);
3809 * The socket _should_ be unique until _we_ call close. So, a add_unique
3810 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
3811 * already in the table.
3813 lttng_ht_add_unique_ulong(ust_app_ht_by_sock
, &app
->sock_n
);
3815 /* Add application to the notify socket hash table. */
3816 lttng_ht_node_init_ulong(&app
->notify_sock_n
, app
->notify_sock
);
3817 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock
, &app
->notify_sock_n
);
3819 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock:%d name:%s "
3820 "notify_sock:%d (version %d.%d)", app
->pid
, app
->ppid
, app
->uid
,
3821 app
->gid
, app
->sock
, app
->name
, app
->notify_sock
, app
->v_major
,
3828 * Set the application version into the object.
3830 * Return 0 on success else a negative value either an errno code or a
3831 * LTTng-UST error code.
3833 int ust_app_version(struct ust_app
*app
)
3839 pthread_mutex_lock(&app
->sock_lock
);
3840 ret
= ustctl_tracer_version(app
->sock
, &app
->version
);
3841 pthread_mutex_unlock(&app
->sock_lock
);
3843 if (ret
!= -LTTNG_UST_ERR_EXITING
&& ret
!= -EPIPE
) {
3844 ERR("UST app %d version failed with ret %d", app
->sock
, ret
);
3846 DBG3("UST app %d version failed. Application is dead", app
->sock
);
3854 * Setup the base event notifier group.
3856 * Return 0 on success else a negative value either an errno code or a
3857 * LTTng-UST error code.
3859 int ust_app_setup_event_notifier_group(struct ust_app
*app
)
3862 int event_pipe_write_fd
;
3863 struct lttng_ust_object_data
*event_notifier_group
= NULL
;
3864 enum lttng_error_code lttng_ret
;
3868 /* Get the write side of the pipe. */
3869 event_pipe_write_fd
= lttng_pipe_get_writefd(
3870 app
->event_notifier_group
.event_pipe
);
3872 pthread_mutex_lock(&app
->sock_lock
);
3873 ret
= ustctl_create_event_notifier_group(app
->sock
,
3874 event_pipe_write_fd
, &event_notifier_group
);
3875 pthread_mutex_unlock(&app
->sock_lock
);
3877 if (ret
!= -LTTNG_UST_ERR_EXITING
&& ret
!= -EPIPE
) {
3878 ERR("Failed to create application event notifier group: ret = %d, app socket fd = %d, event_pipe_write_fd = %d",
3879 ret
, app
->sock
, event_pipe_write_fd
);
3881 DBG("Failed to create application event notifier group (application is dead): app socket fd = %d",
3888 ret
= lttng_pipe_write_close(app
->event_notifier_group
.event_pipe
);
3890 ERR("Failed to close write end of the application's event source pipe: app = '%s' (ppid = %d)",
3891 app
->name
, app
->ppid
);
3895 lttng_ret
= notification_thread_command_add_tracer_event_source(
3896 notification_thread_handle
,
3897 lttng_pipe_get_readfd(app
->event_notifier_group
.event_pipe
),
3899 if (lttng_ret
!= LTTNG_OK
) {
3900 ERR("Failed to add tracer event source to notification thread");
3905 /* Assign handle only when the complete setup is valid. */
3906 app
->event_notifier_group
.object
= event_notifier_group
;
3910 ustctl_release_object(app
->sock
, app
->event_notifier_group
.object
);
3911 free(app
->event_notifier_group
.object
);
3916 * Unregister app by removing it from the global traceable app list and freeing
3919 * The socket is already closed at this point so no close to sock.
3921 void ust_app_unregister(int sock
)
3923 struct ust_app
*lta
;
3924 struct lttng_ht_node_ulong
*node
;
3925 struct lttng_ht_iter ust_app_sock_iter
;
3926 struct lttng_ht_iter iter
;
3927 struct ust_app_session
*ua_sess
;
3932 /* Get the node reference for a call_rcu */
3933 lttng_ht_lookup(ust_app_ht_by_sock
, (void *)((unsigned long) sock
), &ust_app_sock_iter
);
3934 node
= lttng_ht_iter_get_node_ulong(&ust_app_sock_iter
);
3937 lta
= caa_container_of(node
, struct ust_app
, sock_n
);
3938 DBG("PID %d unregistering with sock %d", lta
->pid
, sock
);
3941 * For per-PID buffers, perform "push metadata" and flush all
3942 * application streams before removing app from hash tables,
3943 * ensuring proper behavior of data_pending check.
3944 * Remove sessions so they are not visible during deletion.
3946 cds_lfht_for_each_entry(lta
->sessions
->ht
, &iter
.iter
, ua_sess
,
3948 struct ust_registry_session
*registry
;
3950 ret
= lttng_ht_del(lta
->sessions
, &iter
);
3952 /* The session was already removed so scheduled for teardown. */
3956 if (ua_sess
->buffer_type
== LTTNG_BUFFER_PER_PID
) {
3957 (void) ust_app_flush_app_session(lta
, ua_sess
);
3961 * Add session to list for teardown. This is safe since at this point we
3962 * are the only one using this list.
3964 pthread_mutex_lock(&ua_sess
->lock
);
3966 if (ua_sess
->deleted
) {
3967 pthread_mutex_unlock(&ua_sess
->lock
);
3972 * Normally, this is done in the delete session process which is
3973 * executed in the call rcu below. However, upon registration we can't
3974 * afford to wait for the grace period before pushing data or else the
3975 * data pending feature can race between the unregistration and stop
3976 * command where the data pending command is sent *before* the grace
3979 * The close metadata below nullifies the metadata pointer in the
3980 * session so the delete session will NOT push/close a second time.
3982 registry
= get_session_registry(ua_sess
);
3984 /* Push metadata for application before freeing the application. */
3985 (void) push_metadata(registry
, ua_sess
->consumer
);
3988 * Don't ask to close metadata for global per UID buffers. Close
3989 * metadata only on destroy trace session in this case. Also, the
3990 * previous push metadata could have flag the metadata registry to
3991 * close so don't send a close command if closed.
3993 if (ua_sess
->buffer_type
!= LTTNG_BUFFER_PER_UID
) {
3994 /* And ask to close it for this session registry. */
3995 (void) close_metadata(registry
, ua_sess
->consumer
);
3998 cds_list_add(&ua_sess
->teardown_node
, <a
->teardown_head
);
4000 pthread_mutex_unlock(&ua_sess
->lock
);
4003 /* Remove application from PID hash table */
4004 ret
= lttng_ht_del(ust_app_ht_by_sock
, &ust_app_sock_iter
);
4008 * Remove application from notify hash table. The thread handling the
4009 * notify socket could have deleted the node so ignore on error because
4010 * either way it's valid. The close of that socket is handled by the
4011 * apps_notify_thread.
4013 iter
.iter
.node
= <a
->notify_sock_n
.node
;
4014 (void) lttng_ht_del(ust_app_ht_by_notify_sock
, &iter
);
4017 * Ignore return value since the node might have been removed before by an
4018 * add replace during app registration because the PID can be reassigned by
4021 iter
.iter
.node
= <a
->pid_n
.node
;
4022 ret
= lttng_ht_del(ust_app_ht
, &iter
);
4024 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
4029 call_rcu(<a
->pid_n
.head
, delete_ust_app_rcu
);
4036 * Fill events array with all events name of all registered apps.
4038 int ust_app_list_events(struct lttng_event
**events
)
4041 size_t nbmem
, count
= 0;
4042 struct lttng_ht_iter iter
;
4043 struct ust_app
*app
;
4044 struct lttng_event
*tmp_event
;
4046 nbmem
= UST_APP_EVENT_LIST_SIZE
;
4047 tmp_event
= zmalloc(nbmem
* sizeof(struct lttng_event
));
4048 if (tmp_event
== NULL
) {
4049 PERROR("zmalloc ust app events");
4056 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4057 struct lttng_ust_tracepoint_iter uiter
;
4059 health_code_update();
4061 if (!app
->compatible
) {
4063 * TODO: In time, we should notice the caller of this error by
4064 * telling him that this is a version error.
4068 pthread_mutex_lock(&app
->sock_lock
);
4069 handle
= ustctl_tracepoint_list(app
->sock
);
4071 if (handle
!= -EPIPE
&& handle
!= -LTTNG_UST_ERR_EXITING
) {
4072 ERR("UST app list events getting handle failed for app pid %d",
4075 pthread_mutex_unlock(&app
->sock_lock
);
4079 while ((ret
= ustctl_tracepoint_list_get(app
->sock
, handle
,
4080 &uiter
)) != -LTTNG_UST_ERR_NOENT
) {
4081 /* Handle ustctl error. */
4085 if (ret
!= -LTTNG_UST_ERR_EXITING
&& ret
!= -EPIPE
) {
4086 ERR("UST app tp list get failed for app %d with ret %d",
4089 DBG3("UST app tp list get failed. Application is dead");
4091 * This is normal behavior, an application can die during the
4092 * creation process. Don't report an error so the execution can
4093 * continue normally. Continue normal execution.
4098 release_ret
= ustctl_release_handle(app
->sock
, handle
);
4099 if (release_ret
< 0 &&
4100 release_ret
!= -LTTNG_UST_ERR_EXITING
&&
4101 release_ret
!= -EPIPE
) {
4102 ERR("Error releasing app handle for app %d with ret %d", app
->sock
, release_ret
);
4104 pthread_mutex_unlock(&app
->sock_lock
);
4108 health_code_update();
4109 if (count
>= nbmem
) {
4110 /* In case the realloc fails, we free the memory */
4111 struct lttng_event
*new_tmp_event
;
4114 new_nbmem
= nbmem
<< 1;
4115 DBG2("Reallocating event list from %zu to %zu entries",
4117 new_tmp_event
= realloc(tmp_event
,
4118 new_nbmem
* sizeof(struct lttng_event
));
4119 if (new_tmp_event
== NULL
) {
4122 PERROR("realloc ust app events");
4125 release_ret
= ustctl_release_handle(app
->sock
, handle
);
4126 if (release_ret
< 0 &&
4127 release_ret
!= -LTTNG_UST_ERR_EXITING
&&
4128 release_ret
!= -EPIPE
) {
4129 ERR("Error releasing app handle for app %d with ret %d", app
->sock
, release_ret
);
4131 pthread_mutex_unlock(&app
->sock_lock
);
4134 /* Zero the new memory */
4135 memset(new_tmp_event
+ nbmem
, 0,
4136 (new_nbmem
- nbmem
) * sizeof(struct lttng_event
));
4138 tmp_event
= new_tmp_event
;
4140 memcpy(tmp_event
[count
].name
, uiter
.name
, LTTNG_UST_SYM_NAME_LEN
);
4141 tmp_event
[count
].loglevel
= uiter
.loglevel
;
4142 tmp_event
[count
].type
= (enum lttng_event_type
) LTTNG_UST_TRACEPOINT
;
4143 tmp_event
[count
].pid
= app
->pid
;
4144 tmp_event
[count
].enabled
= -1;
4147 ret
= ustctl_release_handle(app
->sock
, handle
);
4148 pthread_mutex_unlock(&app
->sock_lock
);
4149 if (ret
< 0 && ret
!= -LTTNG_UST_ERR_EXITING
&& ret
!= -EPIPE
) {
4150 ERR("Error releasing app handle for app %d with ret %d", app
->sock
, ret
);
4155 *events
= tmp_event
;
4157 DBG2("UST app list events done (%zu events)", count
);
4162 health_code_update();
4167 * Fill events array with all events name of all registered apps.
4169 int ust_app_list_event_fields(struct lttng_event_field
**fields
)
4172 size_t nbmem
, count
= 0;
4173 struct lttng_ht_iter iter
;
4174 struct ust_app
*app
;
4175 struct lttng_event_field
*tmp_event
;
4177 nbmem
= UST_APP_EVENT_LIST_SIZE
;
4178 tmp_event
= zmalloc(nbmem
* sizeof(struct lttng_event_field
));
4179 if (tmp_event
== NULL
) {
4180 PERROR("zmalloc ust app event fields");
4187 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4188 struct lttng_ust_field_iter uiter
;
4190 health_code_update();
4192 if (!app
->compatible
) {
4194 * TODO: In time, we should notice the caller of this error by
4195 * telling him that this is a version error.
4199 pthread_mutex_lock(&app
->sock_lock
);
4200 handle
= ustctl_tracepoint_field_list(app
->sock
);
4202 if (handle
!= -EPIPE
&& handle
!= -LTTNG_UST_ERR_EXITING
) {
4203 ERR("UST app list field getting handle failed for app pid %d",
4206 pthread_mutex_unlock(&app
->sock_lock
);
4210 while ((ret
= ustctl_tracepoint_field_list_get(app
->sock
, handle
,
4211 &uiter
)) != -LTTNG_UST_ERR_NOENT
) {
4212 /* Handle ustctl error. */
4216 if (ret
!= -LTTNG_UST_ERR_EXITING
&& ret
!= -EPIPE
) {
4217 ERR("UST app tp list field failed for app %d with ret %d",
4220 DBG3("UST app tp list field failed. Application is dead");
4222 * This is normal behavior, an application can die during the
4223 * creation process. Don't report an error so the execution can
4224 * continue normally. Reset list and count for next app.
4229 release_ret
= ustctl_release_handle(app
->sock
, handle
);
4230 pthread_mutex_unlock(&app
->sock_lock
);
4231 if (release_ret
< 0 &&
4232 release_ret
!= -LTTNG_UST_ERR_EXITING
&&
4233 release_ret
!= -EPIPE
) {
4234 ERR("Error releasing app handle for app %d with ret %d", app
->sock
, release_ret
);
4239 health_code_update();
4240 if (count
>= nbmem
) {
4241 /* In case the realloc fails, we free the memory */
4242 struct lttng_event_field
*new_tmp_event
;
4245 new_nbmem
= nbmem
<< 1;
4246 DBG2("Reallocating event field list from %zu to %zu entries",
4248 new_tmp_event
= realloc(tmp_event
,
4249 new_nbmem
* sizeof(struct lttng_event_field
));
4250 if (new_tmp_event
== NULL
) {
4253 PERROR("realloc ust app event fields");
4256 release_ret
= ustctl_release_handle(app
->sock
, handle
);
4257 pthread_mutex_unlock(&app
->sock_lock
);
4259 release_ret
!= -LTTNG_UST_ERR_EXITING
&&
4260 release_ret
!= -EPIPE
) {
4261 ERR("Error releasing app handle for app %d with ret %d", app
->sock
, release_ret
);
4265 /* Zero the new memory */
4266 memset(new_tmp_event
+ nbmem
, 0,
4267 (new_nbmem
- nbmem
) * sizeof(struct lttng_event_field
));
4269 tmp_event
= new_tmp_event
;
4272 memcpy(tmp_event
[count
].field_name
, uiter
.field_name
, LTTNG_UST_SYM_NAME_LEN
);
4273 /* Mapping between these enums matches 1 to 1. */
4274 tmp_event
[count
].type
= (enum lttng_event_field_type
) uiter
.type
;
4275 tmp_event
[count
].nowrite
= uiter
.nowrite
;
4277 memcpy(tmp_event
[count
].event
.name
, uiter
.event_name
, LTTNG_UST_SYM_NAME_LEN
);
4278 tmp_event
[count
].event
.loglevel
= uiter
.loglevel
;
4279 tmp_event
[count
].event
.type
= LTTNG_EVENT_TRACEPOINT
;
4280 tmp_event
[count
].event
.pid
= app
->pid
;
4281 tmp_event
[count
].event
.enabled
= -1;
4284 ret
= ustctl_release_handle(app
->sock
, handle
);
4285 pthread_mutex_unlock(&app
->sock_lock
);
4287 ret
!= -LTTNG_UST_ERR_EXITING
&&
4289 ERR("Error releasing app handle for app %d with ret %d", app
->sock
, ret
);
4294 *fields
= tmp_event
;
4296 DBG2("UST app list event fields done (%zu events)", count
);
4301 health_code_update();
4306 * Free and clean all traceable apps of the global list.
4308 * Should _NOT_ be called with RCU read-side lock held.
4310 void ust_app_clean_list(void)
4313 struct ust_app
*app
;
4314 struct lttng_ht_iter iter
;
4316 DBG2("UST app cleaning registered apps hash table");
4320 /* Cleanup notify socket hash table */
4321 if (ust_app_ht_by_notify_sock
) {
4322 cds_lfht_for_each_entry(ust_app_ht_by_notify_sock
->ht
, &iter
.iter
, app
,
4323 notify_sock_n
.node
) {
4325 * Assert that all notifiers are gone as all triggers
4326 * are unregistered prior to this clean-up.
4328 assert(lttng_ht_get_count(app
->token_to_event_notifier_rule_ht
) == 0);
4330 ust_app_notify_sock_unregister(app
->notify_sock
);
4335 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4336 ret
= lttng_ht_del(ust_app_ht
, &iter
);
4338 call_rcu(&app
->pid_n
.head
, delete_ust_app_rcu
);
4342 /* Cleanup socket hash table */
4343 if (ust_app_ht_by_sock
) {
4344 cds_lfht_for_each_entry(ust_app_ht_by_sock
->ht
, &iter
.iter
, app
,
4346 ret
= lttng_ht_del(ust_app_ht_by_sock
, &iter
);
4353 /* Destroy is done only when the ht is empty */
4355 ht_cleanup_push(ust_app_ht
);
4357 if (ust_app_ht_by_sock
) {
4358 ht_cleanup_push(ust_app_ht_by_sock
);
4360 if (ust_app_ht_by_notify_sock
) {
4361 ht_cleanup_push(ust_app_ht_by_notify_sock
);
4366 * Init UST app hash table.
4368 int ust_app_ht_alloc(void)
4370 ust_app_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
4374 ust_app_ht_by_sock
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
4375 if (!ust_app_ht_by_sock
) {
4378 ust_app_ht_by_notify_sock
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
4379 if (!ust_app_ht_by_notify_sock
) {
4386 * For a specific UST session, disable the channel for all registered apps.
4388 int ust_app_disable_channel_glb(struct ltt_ust_session
*usess
,
4389 struct ltt_ust_channel
*uchan
)
4392 struct lttng_ht_iter iter
;
4393 struct lttng_ht_node_str
*ua_chan_node
;
4394 struct ust_app
*app
;
4395 struct ust_app_session
*ua_sess
;
4396 struct ust_app_channel
*ua_chan
;
4398 assert(usess
->active
);
4399 DBG2("UST app disabling channel %s from global domain for session id %" PRIu64
,
4400 uchan
->name
, usess
->id
);
4404 /* For every registered applications */
4405 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4406 struct lttng_ht_iter uiter
;
4407 if (!app
->compatible
) {
4409 * TODO: In time, we should notice the caller of this error by
4410 * telling him that this is a version error.
4414 ua_sess
= lookup_session_by_app(usess
, app
);
4415 if (ua_sess
== NULL
) {
4420 lttng_ht_lookup(ua_sess
->channels
, (void *)uchan
->name
, &uiter
);
4421 ua_chan_node
= lttng_ht_iter_get_node_str(&uiter
);
4422 /* If the session if found for the app, the channel must be there */
4423 assert(ua_chan_node
);
4425 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
, node
);
4426 /* The channel must not be already disabled */
4427 assert(ua_chan
->enabled
== 1);
4429 /* Disable channel onto application */
4430 ret
= disable_ust_app_channel(ua_sess
, ua_chan
, app
);
4432 /* XXX: We might want to report this error at some point... */
4442 * For a specific UST session, enable the channel for all registered apps.
4444 int ust_app_enable_channel_glb(struct ltt_ust_session
*usess
,
4445 struct ltt_ust_channel
*uchan
)
4448 struct lttng_ht_iter iter
;
4449 struct ust_app
*app
;
4450 struct ust_app_session
*ua_sess
;
4452 assert(usess
->active
);
4453 DBG2("UST app enabling channel %s to global domain for session id %" PRIu64
,
4454 uchan
->name
, usess
->id
);
4458 /* For every registered applications */
4459 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4460 if (!app
->compatible
) {
4462 * TODO: In time, we should notice the caller of this error by
4463 * telling him that this is a version error.
4467 ua_sess
= lookup_session_by_app(usess
, app
);
4468 if (ua_sess
== NULL
) {
4472 /* Enable channel onto application */
4473 ret
= enable_ust_app_channel(ua_sess
, uchan
, app
);
4475 /* XXX: We might want to report this error at some point... */
4485 * Disable an event in a channel and for a specific session.
4487 int ust_app_disable_event_glb(struct ltt_ust_session
*usess
,
4488 struct ltt_ust_channel
*uchan
, struct ltt_ust_event
*uevent
)
4491 struct lttng_ht_iter iter
, uiter
;
4492 struct lttng_ht_node_str
*ua_chan_node
;
4493 struct ust_app
*app
;
4494 struct ust_app_session
*ua_sess
;
4495 struct ust_app_channel
*ua_chan
;
4496 struct ust_app_event
*ua_event
;
4498 assert(usess
->active
);
4499 DBG("UST app disabling event %s for all apps in channel "
4500 "%s for session id %" PRIu64
,
4501 uevent
->attr
.name
, uchan
->name
, usess
->id
);
4505 /* For all registered applications */
4506 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4507 if (!app
->compatible
) {
4509 * TODO: In time, we should notice the caller of this error by
4510 * telling him that this is a version error.
4514 ua_sess
= lookup_session_by_app(usess
, app
);
4515 if (ua_sess
== NULL
) {
4520 /* Lookup channel in the ust app session */
4521 lttng_ht_lookup(ua_sess
->channels
, (void *)uchan
->name
, &uiter
);
4522 ua_chan_node
= lttng_ht_iter_get_node_str(&uiter
);
4523 if (ua_chan_node
== NULL
) {
4524 DBG2("Channel %s not found in session id %" PRIu64
" for app pid %d."
4525 "Skipping", uchan
->name
, usess
->id
, app
->pid
);
4528 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
, node
);
4530 ua_event
= find_ust_app_event(ua_chan
->events
, uevent
->attr
.name
,
4531 uevent
->filter
, uevent
->attr
.loglevel
,
4533 if (ua_event
== NULL
) {
4534 DBG2("Event %s not found in channel %s for app pid %d."
4535 "Skipping", uevent
->attr
.name
, uchan
->name
, app
->pid
);
4539 ret
= disable_ust_app_event(ua_sess
, ua_event
, app
);
4541 /* XXX: Report error someday... */
4550 /* The ua_sess lock must be held by the caller. */
4552 int ust_app_channel_create(struct ltt_ust_session
*usess
,
4553 struct ust_app_session
*ua_sess
,
4554 struct ltt_ust_channel
*uchan
, struct ust_app
*app
,
4555 struct ust_app_channel
**_ua_chan
)
4558 struct ust_app_channel
*ua_chan
= NULL
;
4561 ASSERT_LOCKED(ua_sess
->lock
);
4563 if (!strncmp(uchan
->name
, DEFAULT_METADATA_NAME
,
4564 sizeof(uchan
->name
))) {
4565 copy_channel_attr_to_ustctl(&ua_sess
->metadata_attr
,
4569 struct ltt_ust_context
*uctx
= NULL
;
4572 * Create channel onto application and synchronize its
4575 ret
= ust_app_channel_allocate(ua_sess
, uchan
,
4576 LTTNG_UST_CHAN_PER_CPU
, usess
,
4582 ret
= ust_app_channel_send(app
, usess
,
4589 cds_list_for_each_entry(uctx
, &uchan
->ctx_list
, list
) {
4590 ret
= create_ust_app_channel_context(ua_chan
,
4603 * The application's socket is not valid. Either a bad socket
4604 * or a timeout on it. We can't inform the caller that for a
4605 * specific app, the session failed so lets continue here.
4607 ret
= 0; /* Not an error. */
4615 if (ret
== 0 && _ua_chan
) {
4617 * Only return the application's channel on success. Note
4618 * that the channel can still be part of the application's
4619 * channel hashtable on error.
4621 *_ua_chan
= ua_chan
;
4627 * Enable event for a specific session and channel on the tracer.
4629 int ust_app_enable_event_glb(struct ltt_ust_session
*usess
,
4630 struct ltt_ust_channel
*uchan
, struct ltt_ust_event
*uevent
)
4633 struct lttng_ht_iter iter
, uiter
;
4634 struct lttng_ht_node_str
*ua_chan_node
;
4635 struct ust_app
*app
;
4636 struct ust_app_session
*ua_sess
;
4637 struct ust_app_channel
*ua_chan
;
4638 struct ust_app_event
*ua_event
;
4640 assert(usess
->active
);
4641 DBG("UST app enabling event %s for all apps for session id %" PRIu64
,
4642 uevent
->attr
.name
, usess
->id
);
4645 * NOTE: At this point, this function is called only if the session and
4646 * channel passed are already created for all apps. and enabled on the
4652 /* For all registered applications */
4653 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4654 if (!app
->compatible
) {
4656 * TODO: In time, we should notice the caller of this error by
4657 * telling him that this is a version error.
4661 ua_sess
= lookup_session_by_app(usess
, app
);
4663 /* The application has problem or is probably dead. */
4667 pthread_mutex_lock(&ua_sess
->lock
);
4669 if (ua_sess
->deleted
) {
4670 pthread_mutex_unlock(&ua_sess
->lock
);
4674 /* Lookup channel in the ust app session */
4675 lttng_ht_lookup(ua_sess
->channels
, (void *)uchan
->name
, &uiter
);
4676 ua_chan_node
= lttng_ht_iter_get_node_str(&uiter
);
4678 * It is possible that the channel cannot be found is
4679 * the channel/event creation occurs concurrently with
4680 * an application exit.
4682 if (!ua_chan_node
) {
4683 pthread_mutex_unlock(&ua_sess
->lock
);
4687 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
, node
);
4689 /* Get event node */
4690 ua_event
= find_ust_app_event(ua_chan
->events
, uevent
->attr
.name
,
4691 uevent
->filter
, uevent
->attr
.loglevel
, uevent
->exclusion
);
4692 if (ua_event
== NULL
) {
4693 DBG3("UST app enable event %s not found for app PID %d."
4694 "Skipping app", uevent
->attr
.name
, app
->pid
);
4698 ret
= enable_ust_app_event(ua_sess
, ua_event
, app
);
4700 pthread_mutex_unlock(&ua_sess
->lock
);
4704 pthread_mutex_unlock(&ua_sess
->lock
);
4713 * For a specific existing UST session and UST channel, creates the event for
4714 * all registered apps.
4716 int ust_app_create_event_glb(struct ltt_ust_session
*usess
,
4717 struct ltt_ust_channel
*uchan
, struct ltt_ust_event
*uevent
)
4720 struct lttng_ht_iter iter
, uiter
;
4721 struct lttng_ht_node_str
*ua_chan_node
;
4722 struct ust_app
*app
;
4723 struct ust_app_session
*ua_sess
;
4724 struct ust_app_channel
*ua_chan
;
4726 assert(usess
->active
);
4727 DBG("UST app creating event %s for all apps for session id %" PRIu64
,
4728 uevent
->attr
.name
, usess
->id
);
4732 /* For all registered applications */
4733 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4734 if (!app
->compatible
) {
4736 * TODO: In time, we should notice the caller of this error by
4737 * telling him that this is a version error.
4741 ua_sess
= lookup_session_by_app(usess
, app
);
4743 /* The application has problem or is probably dead. */
4747 pthread_mutex_lock(&ua_sess
->lock
);
4749 if (ua_sess
->deleted
) {
4750 pthread_mutex_unlock(&ua_sess
->lock
);
4754 /* Lookup channel in the ust app session */
4755 lttng_ht_lookup(ua_sess
->channels
, (void *)uchan
->name
, &uiter
);
4756 ua_chan_node
= lttng_ht_iter_get_node_str(&uiter
);
4757 /* If the channel is not found, there is a code flow error */
4758 assert(ua_chan_node
);
4760 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
, node
);
4762 ret
= create_ust_app_event(ua_sess
, ua_chan
, uevent
, app
);
4763 pthread_mutex_unlock(&ua_sess
->lock
);
4765 if (ret
!= -LTTNG_UST_ERR_EXIST
) {
4766 /* Possible value at this point: -ENOMEM. If so, we stop! */
4769 DBG2("UST app event %s already exist on app PID %d",
4770 uevent
->attr
.name
, app
->pid
);
4780 * Start tracing for a specific UST session and app.
4782 * Called with UST app session lock held.
4786 int ust_app_start_trace(struct ltt_ust_session
*usess
, struct ust_app
*app
)
4789 struct ust_app_session
*ua_sess
;
4791 DBG("Starting tracing for ust app pid %d", app
->pid
);
4795 if (!app
->compatible
) {
4799 ua_sess
= lookup_session_by_app(usess
, app
);
4800 if (ua_sess
== NULL
) {
4801 /* The session is in teardown process. Ignore and continue. */
4805 pthread_mutex_lock(&ua_sess
->lock
);
4807 if (ua_sess
->deleted
) {
4808 pthread_mutex_unlock(&ua_sess
->lock
);
4812 if (ua_sess
->enabled
) {
4813 pthread_mutex_unlock(&ua_sess
->lock
);
4817 /* Upon restart, we skip the setup, already done */
4818 if (ua_sess
->started
) {
4822 health_code_update();
4825 /* This starts the UST tracing */
4826 pthread_mutex_lock(&app
->sock_lock
);
4827 ret
= ustctl_start_session(app
->sock
, ua_sess
->handle
);
4828 pthread_mutex_unlock(&app
->sock_lock
);
4830 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
4831 ERR("Error starting tracing for app pid: %d (ret: %d)",
4834 DBG("UST app start session failed. Application is dead.");
4836 * This is normal behavior, an application can die during the
4837 * creation process. Don't report an error so the execution can
4838 * continue normally.
4840 pthread_mutex_unlock(&ua_sess
->lock
);
4846 /* Indicate that the session has been started once */
4847 ua_sess
->started
= 1;
4848 ua_sess
->enabled
= 1;
4850 pthread_mutex_unlock(&ua_sess
->lock
);
4852 health_code_update();
4854 /* Quiescent wait after starting trace */
4855 pthread_mutex_lock(&app
->sock_lock
);
4856 ret
= ustctl_wait_quiescent(app
->sock
);
4857 pthread_mutex_unlock(&app
->sock_lock
);
4858 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
4859 ERR("UST app wait quiescent failed for app pid %d ret %d",
4865 health_code_update();
4869 pthread_mutex_unlock(&ua_sess
->lock
);
4871 health_code_update();
4876 * Stop tracing for a specific UST session and app.
4879 int ust_app_stop_trace(struct ltt_ust_session
*usess
, struct ust_app
*app
)
4882 struct ust_app_session
*ua_sess
;
4883 struct ust_registry_session
*registry
;
4885 DBG("Stopping tracing for ust app pid %d", app
->pid
);
4889 if (!app
->compatible
) {
4890 goto end_no_session
;
4893 ua_sess
= lookup_session_by_app(usess
, app
);
4894 if (ua_sess
== NULL
) {
4895 goto end_no_session
;
4898 pthread_mutex_lock(&ua_sess
->lock
);
4900 if (ua_sess
->deleted
) {
4901 pthread_mutex_unlock(&ua_sess
->lock
);
4902 goto end_no_session
;
4906 * If started = 0, it means that stop trace has been called for a session
4907 * that was never started. It's possible since we can have a fail start
4908 * from either the application manager thread or the command thread. Simply
4909 * indicate that this is a stop error.
4911 if (!ua_sess
->started
) {
4912 goto error_rcu_unlock
;
4915 health_code_update();
4917 /* This inhibits UST tracing */
4918 pthread_mutex_lock(&app
->sock_lock
);
4919 ret
= ustctl_stop_session(app
->sock
, ua_sess
->handle
);
4920 pthread_mutex_unlock(&app
->sock_lock
);
4922 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
4923 ERR("Error stopping tracing for app pid: %d (ret: %d)",
4926 DBG("UST app stop session failed. Application is dead.");
4928 * This is normal behavior, an application can die during the
4929 * creation process. Don't report an error so the execution can
4930 * continue normally.
4934 goto error_rcu_unlock
;
4937 health_code_update();
4938 ua_sess
->enabled
= 0;
4940 /* Quiescent wait after stopping trace */
4941 pthread_mutex_lock(&app
->sock_lock
);
4942 ret
= ustctl_wait_quiescent(app
->sock
);
4943 pthread_mutex_unlock(&app
->sock_lock
);
4944 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
4945 ERR("UST app wait quiescent failed for app pid %d ret %d",
4949 health_code_update();
4951 registry
= get_session_registry(ua_sess
);
4953 /* The UST app session is held registry shall not be null. */
4956 /* Push metadata for application before freeing the application. */
4957 (void) push_metadata(registry
, ua_sess
->consumer
);
4960 pthread_mutex_unlock(&ua_sess
->lock
);
4963 health_code_update();
4967 pthread_mutex_unlock(&ua_sess
->lock
);
4969 health_code_update();
4974 int ust_app_flush_app_session(struct ust_app
*app
,
4975 struct ust_app_session
*ua_sess
)
4977 int ret
, retval
= 0;
4978 struct lttng_ht_iter iter
;
4979 struct ust_app_channel
*ua_chan
;
4980 struct consumer_socket
*socket
;
4982 DBG("Flushing app session buffers for ust app pid %d", app
->pid
);
4986 if (!app
->compatible
) {
4987 goto end_not_compatible
;
4990 pthread_mutex_lock(&ua_sess
->lock
);
4992 if (ua_sess
->deleted
) {
4996 health_code_update();
4998 /* Flushing buffers */
4999 socket
= consumer_find_socket_by_bitness(app
->bits_per_long
,
5002 /* Flush buffers and push metadata. */
5003 switch (ua_sess
->buffer_type
) {
5004 case LTTNG_BUFFER_PER_PID
:
5005 cds_lfht_for_each_entry(ua_sess
->channels
->ht
, &iter
.iter
, ua_chan
,
5007 health_code_update();
5008 ret
= consumer_flush_channel(socket
, ua_chan
->key
);
5010 ERR("Error flushing consumer channel");
5016 case LTTNG_BUFFER_PER_UID
:
5022 health_code_update();
5025 pthread_mutex_unlock(&ua_sess
->lock
);
5029 health_code_update();
5034 * Flush buffers for all applications for a specific UST session.
5035 * Called with UST session lock held.
5038 int ust_app_flush_session(struct ltt_ust_session
*usess
)
5043 DBG("Flushing session buffers for all ust apps");
5047 /* Flush buffers and push metadata. */
5048 switch (usess
->buffer_type
) {
5049 case LTTNG_BUFFER_PER_UID
:
5051 struct buffer_reg_uid
*reg
;
5052 struct lttng_ht_iter iter
;
5054 /* Flush all per UID buffers associated to that session. */
5055 cds_list_for_each_entry(reg
, &usess
->buffer_reg_uid_list
, lnode
) {
5056 struct ust_registry_session
*ust_session_reg
;
5057 struct buffer_reg_channel
*reg_chan
;
5058 struct consumer_socket
*socket
;
5060 /* Get consumer socket to use to push the metadata.*/
5061 socket
= consumer_find_socket_by_bitness(reg
->bits_per_long
,
5064 /* Ignore request if no consumer is found for the session. */
5068 cds_lfht_for_each_entry(reg
->registry
->channels
->ht
, &iter
.iter
,
5069 reg_chan
, node
.node
) {
5071 * The following call will print error values so the return
5072 * code is of little importance because whatever happens, we
5073 * have to try them all.
5075 (void) consumer_flush_channel(socket
, reg_chan
->consumer_key
);
5078 ust_session_reg
= reg
->registry
->reg
.ust
;
5079 /* Push metadata. */
5080 (void) push_metadata(ust_session_reg
, usess
->consumer
);
5084 case LTTNG_BUFFER_PER_PID
:
5086 struct ust_app_session
*ua_sess
;
5087 struct lttng_ht_iter iter
;
5088 struct ust_app
*app
;
5090 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
5091 ua_sess
= lookup_session_by_app(usess
, app
);
5092 if (ua_sess
== NULL
) {
5095 (void) ust_app_flush_app_session(app
, ua_sess
);
5106 health_code_update();
5111 int ust_app_clear_quiescent_app_session(struct ust_app
*app
,
5112 struct ust_app_session
*ua_sess
)
5115 struct lttng_ht_iter iter
;
5116 struct ust_app_channel
*ua_chan
;
5117 struct consumer_socket
*socket
;
5119 DBG("Clearing stream quiescent state for ust app pid %d", app
->pid
);
5123 if (!app
->compatible
) {
5124 goto end_not_compatible
;
5127 pthread_mutex_lock(&ua_sess
->lock
);
5129 if (ua_sess
->deleted
) {
5133 health_code_update();
5135 socket
= consumer_find_socket_by_bitness(app
->bits_per_long
,
5138 ERR("Failed to find consumer (%" PRIu32
") socket",
5139 app
->bits_per_long
);
5144 /* Clear quiescent state. */
5145 switch (ua_sess
->buffer_type
) {
5146 case LTTNG_BUFFER_PER_PID
:
5147 cds_lfht_for_each_entry(ua_sess
->channels
->ht
, &iter
.iter
,
5148 ua_chan
, node
.node
) {
5149 health_code_update();
5150 ret
= consumer_clear_quiescent_channel(socket
,
5153 ERR("Error clearing quiescent state for consumer channel");
5159 case LTTNG_BUFFER_PER_UID
:
5166 health_code_update();
5169 pthread_mutex_unlock(&ua_sess
->lock
);
5173 health_code_update();
5178 * Clear quiescent state in each stream for all applications for a
5179 * specific UST session.
5180 * Called with UST session lock held.
5183 int ust_app_clear_quiescent_session(struct ltt_ust_session
*usess
)
5188 DBG("Clearing stream quiescent state for all ust apps");
5192 switch (usess
->buffer_type
) {
5193 case LTTNG_BUFFER_PER_UID
:
5195 struct lttng_ht_iter iter
;
5196 struct buffer_reg_uid
*reg
;
5199 * Clear quiescent for all per UID buffers associated to
5202 cds_list_for_each_entry(reg
, &usess
->buffer_reg_uid_list
, lnode
) {
5203 struct consumer_socket
*socket
;
5204 struct buffer_reg_channel
*reg_chan
;
5206 /* Get associated consumer socket.*/
5207 socket
= consumer_find_socket_by_bitness(
5208 reg
->bits_per_long
, usess
->consumer
);
5211 * Ignore request if no consumer is found for
5217 cds_lfht_for_each_entry(reg
->registry
->channels
->ht
,
5218 &iter
.iter
, reg_chan
, node
.node
) {
5220 * The following call will print error values so
5221 * the return code is of little importance
5222 * because whatever happens, we have to try them
5225 (void) consumer_clear_quiescent_channel(socket
,
5226 reg_chan
->consumer_key
);
5231 case LTTNG_BUFFER_PER_PID
:
5233 struct ust_app_session
*ua_sess
;
5234 struct lttng_ht_iter iter
;
5235 struct ust_app
*app
;
5237 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
,
5239 ua_sess
= lookup_session_by_app(usess
, app
);
5240 if (ua_sess
== NULL
) {
5243 (void) ust_app_clear_quiescent_app_session(app
,
5255 health_code_update();
5260 * Destroy a specific UST session in apps.
5262 static int destroy_trace(struct ltt_ust_session
*usess
, struct ust_app
*app
)
5265 struct ust_app_session
*ua_sess
;
5266 struct lttng_ht_iter iter
;
5267 struct lttng_ht_node_u64
*node
;
5269 DBG("Destroy tracing for ust app pid %d", app
->pid
);
5273 if (!app
->compatible
) {
5277 __lookup_session_by_app(usess
, app
, &iter
);
5278 node
= lttng_ht_iter_get_node_u64(&iter
);
5280 /* Session is being or is deleted. */
5283 ua_sess
= caa_container_of(node
, struct ust_app_session
, node
);
5285 health_code_update();
5286 destroy_app_session(app
, ua_sess
);
5288 health_code_update();
5290 /* Quiescent wait after stopping trace */
5291 pthread_mutex_lock(&app
->sock_lock
);
5292 ret
= ustctl_wait_quiescent(app
->sock
);
5293 pthread_mutex_unlock(&app
->sock_lock
);
5294 if (ret
< 0 && ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
5295 ERR("UST app wait quiescent failed for app pid %d ret %d",
5300 health_code_update();
5305 * Start tracing for the UST session.
5307 int ust_app_start_trace_all(struct ltt_ust_session
*usess
)
5309 struct lttng_ht_iter iter
;
5310 struct ust_app
*app
;
5312 DBG("Starting all UST traces");
5315 * Even though the start trace might fail, flag this session active so
5316 * other application coming in are started by default.
5323 * In a start-stop-start use-case, we need to clear the quiescent state
5324 * of each channel set by the prior stop command, thus ensuring that a
5325 * following stop or destroy is sure to grab a timestamp_end near those
5326 * operations, even if the packet is empty.
5328 (void) ust_app_clear_quiescent_session(usess
);
5330 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
5331 ust_app_global_update(usess
, app
);
5340 * Start tracing for the UST session.
5341 * Called with UST session lock held.
5343 int ust_app_stop_trace_all(struct ltt_ust_session
*usess
)
5346 struct lttng_ht_iter iter
;
5347 struct ust_app
*app
;
5349 DBG("Stopping all UST traces");
5352 * Even though the stop trace might fail, flag this session inactive so
5353 * other application coming in are not started by default.
5359 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
5360 ret
= ust_app_stop_trace(usess
, app
);
5362 /* Continue to next apps even on error */
5367 (void) ust_app_flush_session(usess
);
5375 * Destroy app UST session.
5377 int ust_app_destroy_trace_all(struct ltt_ust_session
*usess
)
5380 struct lttng_ht_iter iter
;
5381 struct ust_app
*app
;
5383 DBG("Destroy all UST traces");
5387 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
5388 ret
= destroy_trace(usess
, app
);
5390 /* Continue to next apps even on error */
5400 /* The ua_sess lock must be held by the caller. */
5402 int find_or_create_ust_app_channel(
5403 struct ltt_ust_session
*usess
,
5404 struct ust_app_session
*ua_sess
,
5405 struct ust_app
*app
,
5406 struct ltt_ust_channel
*uchan
,
5407 struct ust_app_channel
**ua_chan
)
5410 struct lttng_ht_iter iter
;
5411 struct lttng_ht_node_str
*ua_chan_node
;
5413 lttng_ht_lookup(ua_sess
->channels
, (void *) uchan
->name
, &iter
);
5414 ua_chan_node
= lttng_ht_iter_get_node_str(&iter
);
5416 *ua_chan
= caa_container_of(ua_chan_node
,
5417 struct ust_app_channel
, node
);
5421 ret
= ust_app_channel_create(usess
, ua_sess
, uchan
, app
, ua_chan
);
5430 int ust_app_channel_synchronize_event(struct ust_app_channel
*ua_chan
,
5431 struct ltt_ust_event
*uevent
, struct ust_app_session
*ua_sess
,
5432 struct ust_app
*app
)
5435 struct ust_app_event
*ua_event
= NULL
;
5437 ua_event
= find_ust_app_event(ua_chan
->events
, uevent
->attr
.name
,
5438 uevent
->filter
, uevent
->attr
.loglevel
, uevent
->exclusion
);
5440 ret
= create_ust_app_event(ua_sess
, ua_chan
, uevent
, app
);
5445 if (ua_event
->enabled
!= uevent
->enabled
) {
5446 ret
= uevent
->enabled
?
5447 enable_ust_app_event(ua_sess
, ua_event
, app
) :
5448 disable_ust_app_event(ua_sess
, ua_event
, app
);
5456 /* Called with RCU read-side lock held. */
5458 void ust_app_synchronize_event_notifier_rules(struct ust_app
*app
)
5461 enum lttng_error_code ret_code
;
5462 enum lttng_trigger_status t_status
;
5463 struct lttng_ht_iter app_trigger_iter
;
5464 struct lttng_triggers
*triggers
= NULL
;
5465 struct ust_app_event_notifier_rule
*event_notifier_rule
;
5466 unsigned int count
, i
;
5469 * Currrently, registering or unregistering a trigger with an
5470 * event rule condition causes a full synchronization of the event
5473 * The first step attempts to add an event notifier for all registered
5474 * triggers that apply to the user space tracers. Then, the
5475 * application's event notifiers rules are all checked against the list
5476 * of registered triggers. Any event notifier that doesn't have a
5477 * matching trigger can be assumed to have been disabled.
5479 * All of this is inefficient, but is put in place to get the feature
5480 * rolling as it is simpler at this moment. It will be optimized Soon™
5481 * to allow the state of enabled
5482 * event notifiers to be synchronized in a piece-wise way.
5485 /* Get all triggers using uid 0 (root) */
5486 ret_code
= notification_thread_command_list_triggers(
5487 notification_thread_handle
, 0, &triggers
);
5488 if (ret_code
!= LTTNG_OK
) {
5495 t_status
= lttng_triggers_get_count(triggers
, &count
);
5496 if (t_status
!= LTTNG_TRIGGER_STATUS_OK
) {
5501 for (i
= 0; i
< count
; i
++) {
5502 struct lttng_condition
*condition
;
5503 struct lttng_event_rule
*event_rule
;
5504 struct lttng_trigger
*trigger
;
5505 const struct ust_app_event_notifier_rule
*looked_up_event_notifier_rule
;
5506 enum lttng_condition_status condition_status
;
5509 trigger
= lttng_triggers_borrow_mutable_at_index(triggers
, i
);
5512 token
= lttng_trigger_get_tracer_token(trigger
);
5513 condition
= lttng_trigger_get_condition(trigger
);
5515 if (lttng_condition_get_type(condition
) != LTTNG_CONDITION_TYPE_EVENT_RULE_HIT
) {
5516 /* Does not apply */
5520 condition_status
= lttng_condition_event_rule_borrow_rule_mutable(condition
, &event_rule
);
5521 assert(condition_status
== LTTNG_CONDITION_STATUS_OK
);
5523 if (lttng_event_rule_get_domain_type(event_rule
) == LTTNG_DOMAIN_KERNEL
) {
5524 /* Skip kernel related triggers. */
5529 * Find or create the associated token event rule. The caller
5530 * holds the RCU read lock, so this is safe to call without
5531 * explicitly acquiring it here.
5533 looked_up_event_notifier_rule
= find_ust_app_event_notifier_rule(
5534 app
->token_to_event_notifier_rule_ht
, token
);
5535 if (!looked_up_event_notifier_rule
) {
5536 ret
= create_ust_app_event_notifier_rule(event_rule
, app
, token
);
5544 /* Remove all unknown event sources from the app. */
5545 cds_lfht_for_each_entry (app
->token_to_event_notifier_rule_ht
->ht
,
5546 &app_trigger_iter
.iter
, event_notifier_rule
,
5548 const uint64_t app_token
= event_notifier_rule
->token
;
5552 * Check if the app event trigger still exists on the
5553 * notification side.
5555 for (i
= 0; i
< count
; i
++) {
5556 uint64_t notification_thread_token
;
5557 const struct lttng_trigger
*trigger
=
5558 lttng_triggers_get_at_index(
5563 notification_thread_token
=
5564 lttng_trigger_get_tracer_token(trigger
);
5566 if (notification_thread_token
== app_token
) {
5578 * This trigger was unregistered, disable it on the tracer's
5581 ret
= lttng_ht_del(app
->token_to_event_notifier_rule_ht
,
5585 /* Callee logs errors. */
5586 (void) disable_ust_object(app
, event_notifier_rule
->obj
);
5588 delete_ust_app_event_notifier_rule(
5589 app
->sock
, event_notifier_rule
, app
);
5595 lttng_triggers_destroy(triggers
);
5600 * The caller must ensure that the application is compatible and is tracked
5601 * by the process attribute trackers.
5604 void ust_app_synchronize(struct ltt_ust_session
*usess
,
5605 struct ust_app
*app
)
5608 struct cds_lfht_iter uchan_iter
;
5609 struct ltt_ust_channel
*uchan
;
5610 struct ust_app_session
*ua_sess
= NULL
;
5613 * The application's configuration should only be synchronized for
5616 assert(usess
->active
);
5618 ret
= find_or_create_ust_app_session(usess
, app
, &ua_sess
, NULL
);
5620 /* Tracer is probably gone or ENOMEM. */
5625 pthread_mutex_lock(&ua_sess
->lock
);
5626 if (ua_sess
->deleted
) {
5627 pthread_mutex_unlock(&ua_sess
->lock
);
5633 cds_lfht_for_each_entry(usess
->domain_global
.channels
->ht
, &uchan_iter
,
5635 struct ust_app_channel
*ua_chan
;
5636 struct cds_lfht_iter uevent_iter
;
5637 struct ltt_ust_event
*uevent
;
5640 * Search for a matching ust_app_channel. If none is found,
5641 * create it. Creating the channel will cause the ua_chan
5642 * structure to be allocated, the channel buffers to be
5643 * allocated (if necessary) and sent to the application, and
5644 * all enabled contexts will be added to the channel.
5646 ret
= find_or_create_ust_app_channel(usess
, ua_sess
,
5647 app
, uchan
, &ua_chan
);
5649 /* Tracer is probably gone or ENOMEM. */
5654 /* ua_chan will be NULL for the metadata channel */
5658 cds_lfht_for_each_entry(uchan
->events
->ht
, &uevent_iter
, uevent
,
5660 ret
= ust_app_channel_synchronize_event(ua_chan
,
5661 uevent
, ua_sess
, app
);
5667 if (ua_chan
->enabled
!= uchan
->enabled
) {
5668 ret
= uchan
->enabled
?
5669 enable_ust_app_channel(ua_sess
, uchan
, app
) :
5670 disable_ust_app_channel(ua_sess
, ua_chan
, app
);
5678 * Create the metadata for the application. This returns gracefully if a
5679 * metadata was already set for the session.
5681 * The metadata channel must be created after the data channels as the
5682 * consumer daemon assumes this ordering. When interacting with a relay
5683 * daemon, the consumer will use this assumption to send the
5684 * "STREAMS_SENT" message to the relay daemon.
5686 ret
= create_ust_app_metadata(ua_sess
, app
, usess
->consumer
);
5694 pthread_mutex_unlock(&ua_sess
->lock
);
5695 /* Everything went well at this point. */
5700 pthread_mutex_unlock(&ua_sess
->lock
);
5703 destroy_app_session(app
, ua_sess
);
5709 void ust_app_global_destroy(struct ltt_ust_session
*usess
, struct ust_app
*app
)
5711 struct ust_app_session
*ua_sess
;
5713 ua_sess
= lookup_session_by_app(usess
, app
);
5714 if (ua_sess
== NULL
) {
5717 destroy_app_session(app
, ua_sess
);
5721 * Add channels/events from UST global domain to registered apps at sock.
5723 * Called with session lock held.
5724 * Called with RCU read-side lock held.
5726 void ust_app_global_update(struct ltt_ust_session
*usess
, struct ust_app
*app
)
5729 assert(usess
->active
);
5731 DBG2("UST app global update for app sock %d for session id %" PRIu64
,
5732 app
->sock
, usess
->id
);
5734 if (!app
->compatible
) {
5737 if (trace_ust_id_tracker_lookup(LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID
,
5739 trace_ust_id_tracker_lookup(
5740 LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID
,
5742 trace_ust_id_tracker_lookup(
5743 LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID
,
5746 * Synchronize the application's internal tracing configuration
5747 * and start tracing.
5749 ust_app_synchronize(usess
, app
);
5750 ust_app_start_trace(usess
, app
);
5752 ust_app_global_destroy(usess
, app
);
5757 * Add all event notifiers to an application.
5759 * Called with session lock held.
5760 * Called with RCU read-side lock held.
5762 void ust_app_global_update_event_notifier_rules(struct ust_app
*app
)
5764 DBG2("UST application global event notifier rules update: app = '%s' (ppid: %d)",
5765 app
->name
, app
->ppid
);
5767 if (!app
->compatible
) {
5771 if (app
->event_notifier_group
.object
== NULL
) {
5772 WARN("UST app global update of event notifiers for app skipped since communication handle is null: app = '%s' (ppid: %d)",
5773 app
->name
, app
->ppid
);
5777 ust_app_synchronize_event_notifier_rules(app
);
5781 * Called with session lock held.
5783 void ust_app_global_update_all(struct ltt_ust_session
*usess
)
5785 struct lttng_ht_iter iter
;
5786 struct ust_app
*app
;
5789 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
5790 ust_app_global_update(usess
, app
);
5795 void ust_app_global_update_all_event_notifier_rules(void)
5797 struct lttng_ht_iter iter
;
5798 struct ust_app
*app
;
5801 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
5802 ust_app_global_update_event_notifier_rules(app
);
5809 * Add context to a specific channel for global UST domain.
5811 int ust_app_add_ctx_channel_glb(struct ltt_ust_session
*usess
,
5812 struct ltt_ust_channel
*uchan
, struct ltt_ust_context
*uctx
)
5815 struct lttng_ht_node_str
*ua_chan_node
;
5816 struct lttng_ht_iter iter
, uiter
;
5817 struct ust_app_channel
*ua_chan
= NULL
;
5818 struct ust_app_session
*ua_sess
;
5819 struct ust_app
*app
;
5821 assert(usess
->active
);
5824 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
5825 if (!app
->compatible
) {
5827 * TODO: In time, we should notice the caller of this error by
5828 * telling him that this is a version error.
5832 ua_sess
= lookup_session_by_app(usess
, app
);
5833 if (ua_sess
== NULL
) {
5837 pthread_mutex_lock(&ua_sess
->lock
);
5839 if (ua_sess
->deleted
) {
5840 pthread_mutex_unlock(&ua_sess
->lock
);
5844 /* Lookup channel in the ust app session */
5845 lttng_ht_lookup(ua_sess
->channels
, (void *)uchan
->name
, &uiter
);
5846 ua_chan_node
= lttng_ht_iter_get_node_str(&uiter
);
5847 if (ua_chan_node
== NULL
) {
5850 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
,
5852 ret
= create_ust_app_channel_context(ua_chan
, &uctx
->ctx
, app
);
5857 pthread_mutex_unlock(&ua_sess
->lock
);
5865 * Receive registration and populate the given msg structure.
5867 * On success return 0 else a negative value returned by the ustctl call.
5869 int ust_app_recv_registration(int sock
, struct ust_register_msg
*msg
)
5872 uint32_t pid
, ppid
, uid
, gid
;
5876 ret
= ustctl_recv_reg_msg(sock
, &msg
->type
, &msg
->major
, &msg
->minor
,
5877 &pid
, &ppid
, &uid
, &gid
,
5878 &msg
->bits_per_long
,
5879 &msg
->uint8_t_alignment
,
5880 &msg
->uint16_t_alignment
,
5881 &msg
->uint32_t_alignment
,
5882 &msg
->uint64_t_alignment
,
5883 &msg
->long_alignment
,
5890 case LTTNG_UST_ERR_EXITING
:
5891 DBG3("UST app recv reg message failed. Application died");
5893 case LTTNG_UST_ERR_UNSUP_MAJOR
:
5894 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
5895 msg
->major
, msg
->minor
, LTTNG_UST_ABI_MAJOR_VERSION
,
5896 LTTNG_UST_ABI_MINOR_VERSION
);
5899 ERR("UST app recv reg message failed with ret %d", ret
);
5904 msg
->pid
= (pid_t
) pid
;
5905 msg
->ppid
= (pid_t
) ppid
;
5906 msg
->uid
= (uid_t
) uid
;
5907 msg
->gid
= (gid_t
) gid
;
5914 * Return a ust app session object using the application object and the
5915 * session object descriptor has a key. If not found, NULL is returned.
5916 * A RCU read side lock MUST be acquired when calling this function.
5918 static struct ust_app_session
*find_session_by_objd(struct ust_app
*app
,
5921 struct lttng_ht_node_ulong
*node
;
5922 struct lttng_ht_iter iter
;
5923 struct ust_app_session
*ua_sess
= NULL
;
5927 lttng_ht_lookup(app
->ust_sessions_objd
, (void *)((unsigned long) objd
), &iter
);
5928 node
= lttng_ht_iter_get_node_ulong(&iter
);
5930 DBG2("UST app session find by objd %d not found", objd
);
5934 ua_sess
= caa_container_of(node
, struct ust_app_session
, ust_objd_node
);
5941 * Return a ust app channel object using the application object and the channel
5942 * object descriptor has a key. If not found, NULL is returned. A RCU read side
5943 * lock MUST be acquired before calling this function.
5945 static struct ust_app_channel
*find_channel_by_objd(struct ust_app
*app
,
5948 struct lttng_ht_node_ulong
*node
;
5949 struct lttng_ht_iter iter
;
5950 struct ust_app_channel
*ua_chan
= NULL
;
5954 lttng_ht_lookup(app
->ust_objd
, (void *)((unsigned long) objd
), &iter
);
5955 node
= lttng_ht_iter_get_node_ulong(&iter
);
5957 DBG2("UST app channel find by objd %d not found", objd
);
5961 ua_chan
= caa_container_of(node
, struct ust_app_channel
, ust_objd_node
);
5968 * Reply to a register channel notification from an application on the notify
5969 * socket. The channel metadata is also created.
5971 * The session UST registry lock is acquired in this function.
5973 * On success 0 is returned else a negative value.
5975 static int reply_ust_register_channel(int sock
, int cobjd
,
5976 size_t nr_fields
, struct ustctl_field
*fields
)
5978 int ret
, ret_code
= 0;
5980 uint64_t chan_reg_key
;
5981 enum ustctl_channel_header type
;
5982 struct ust_app
*app
;
5983 struct ust_app_channel
*ua_chan
;
5984 struct ust_app_session
*ua_sess
;
5985 struct ust_registry_session
*registry
;
5986 struct ust_registry_channel
*chan_reg
;
5990 /* Lookup application. If not found, there is a code flow error. */
5991 app
= find_app_by_notify_sock(sock
);
5993 DBG("Application socket %d is being torn down. Abort event notify",
5996 goto error_rcu_unlock
;
5999 /* Lookup channel by UST object descriptor. */
6000 ua_chan
= find_channel_by_objd(app
, cobjd
);
6002 DBG("Application channel is being torn down. Abort event notify");
6004 goto error_rcu_unlock
;
6007 assert(ua_chan
->session
);
6008 ua_sess
= ua_chan
->session
;
6010 /* Get right session registry depending on the session buffer type. */
6011 registry
= get_session_registry(ua_sess
);
6013 DBG("Application session is being torn down. Abort event notify");
6015 goto error_rcu_unlock
;
6018 /* Depending on the buffer type, a different channel key is used. */
6019 if (ua_sess
->buffer_type
== LTTNG_BUFFER_PER_UID
) {
6020 chan_reg_key
= ua_chan
->tracing_channel_id
;
6022 chan_reg_key
= ua_chan
->key
;
6025 pthread_mutex_lock(®istry
->lock
);
6027 chan_reg
= ust_registry_channel_find(registry
, chan_reg_key
);
6030 if (!chan_reg
->register_done
) {
6032 * TODO: eventually use the registry event count for
6033 * this channel to better guess header type for per-pid
6036 type
= USTCTL_CHANNEL_HEADER_LARGE
;
6037 chan_reg
->nr_ctx_fields
= nr_fields
;
6038 chan_reg
->ctx_fields
= fields
;
6040 chan_reg
->header_type
= type
;
6042 /* Get current already assigned values. */
6043 type
= chan_reg
->header_type
;
6045 /* Channel id is set during the object creation. */
6046 chan_id
= chan_reg
->chan_id
;
6048 /* Append to metadata */
6049 if (!chan_reg
->metadata_dumped
) {
6050 ret_code
= ust_metadata_channel_statedump(registry
, chan_reg
);
6052 ERR("Error appending channel metadata (errno = %d)", ret_code
);
6058 DBG3("UST app replying to register channel key %" PRIu64
6059 " with id %u, type: %d, ret: %d", chan_reg_key
, chan_id
, type
,
6062 ret
= ustctl_reply_register_channel(sock
, chan_id
, type
, ret_code
);
6064 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
6065 ERR("UST app reply channel failed with ret %d", ret
);
6067 DBG3("UST app reply channel failed. Application died");
6072 /* This channel registry registration is completed. */
6073 chan_reg
->register_done
= 1;
6076 pthread_mutex_unlock(®istry
->lock
);
6084 * Add event to the UST channel registry. When the event is added to the
6085 * registry, the metadata is also created. Once done, this replies to the
6086 * application with the appropriate error code.
6088 * The session UST registry lock is acquired in the function.
6090 * On success 0 is returned else a negative value.
6092 static int add_event_ust_registry(int sock
, int sobjd
, int cobjd
, char *name
,
6093 char *sig
, size_t nr_fields
, struct ustctl_field
*fields
,
6094 int loglevel_value
, char *model_emf_uri
)
6097 uint32_t event_id
= 0;
6098 uint64_t chan_reg_key
;
6099 struct ust_app
*app
;
6100 struct ust_app_channel
*ua_chan
;
6101 struct ust_app_session
*ua_sess
;
6102 struct ust_registry_session
*registry
;
6106 /* Lookup application. If not found, there is a code flow error. */
6107 app
= find_app_by_notify_sock(sock
);
6109 DBG("Application socket %d is being torn down. Abort event notify",
6112 goto error_rcu_unlock
;
6115 /* Lookup channel by UST object descriptor. */
6116 ua_chan
= find_channel_by_objd(app
, cobjd
);
6118 DBG("Application channel is being torn down. Abort event notify");
6120 goto error_rcu_unlock
;
6123 assert(ua_chan
->session
);
6124 ua_sess
= ua_chan
->session
;
6126 registry
= get_session_registry(ua_sess
);
6128 DBG("Application session is being torn down. Abort event notify");
6130 goto error_rcu_unlock
;
6133 if (ua_sess
->buffer_type
== LTTNG_BUFFER_PER_UID
) {
6134 chan_reg_key
= ua_chan
->tracing_channel_id
;
6136 chan_reg_key
= ua_chan
->key
;
6139 pthread_mutex_lock(®istry
->lock
);
6142 * From this point on, this call acquires the ownership of the sig, fields
6143 * and model_emf_uri meaning any free are done inside it if needed. These
6144 * three variables MUST NOT be read/write after this.
6146 ret_code
= ust_registry_create_event(registry
, chan_reg_key
,
6147 sobjd
, cobjd
, name
, sig
, nr_fields
, fields
,
6148 loglevel_value
, model_emf_uri
, ua_sess
->buffer_type
,
6152 model_emf_uri
= NULL
;
6155 * The return value is returned to ustctl so in case of an error, the
6156 * application can be notified. In case of an error, it's important not to
6157 * return a negative error or else the application will get closed.
6159 ret
= ustctl_reply_register_event(sock
, event_id
, ret_code
);
6161 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
6162 ERR("UST app reply event failed with ret %d", ret
);
6164 DBG3("UST app reply event failed. Application died");
6167 * No need to wipe the create event since the application socket will
6168 * get close on error hence cleaning up everything by itself.
6173 DBG3("UST registry event %s with id %" PRId32
" added successfully",
6177 pthread_mutex_unlock(®istry
->lock
);
6182 free(model_emf_uri
);
6187 * Add enum to the UST session registry. Once done, this replies to the
6188 * application with the appropriate error code.
6190 * The session UST registry lock is acquired within this function.
6192 * On success 0 is returned else a negative value.
6194 static int add_enum_ust_registry(int sock
, int sobjd
, char *name
,
6195 struct ustctl_enum_entry
*entries
, size_t nr_entries
)
6197 int ret
= 0, ret_code
;
6198 struct ust_app
*app
;
6199 struct ust_app_session
*ua_sess
;
6200 struct ust_registry_session
*registry
;
6201 uint64_t enum_id
= -1ULL;
6205 /* Lookup application. If not found, there is a code flow error. */
6206 app
= find_app_by_notify_sock(sock
);
6208 /* Return an error since this is not an error */
6209 DBG("Application socket %d is being torn down. Aborting enum registration",
6212 goto error_rcu_unlock
;
6215 /* Lookup session by UST object descriptor. */
6216 ua_sess
= find_session_by_objd(app
, sobjd
);
6218 /* Return an error since this is not an error */
6219 DBG("Application session is being torn down (session not found). Aborting enum registration.");
6221 goto error_rcu_unlock
;
6224 registry
= get_session_registry(ua_sess
);
6226 DBG("Application session is being torn down (registry not found). Aborting enum registration.");
6228 goto error_rcu_unlock
;
6231 pthread_mutex_lock(®istry
->lock
);
6234 * From this point on, the callee acquires the ownership of
6235 * entries. The variable entries MUST NOT be read/written after
6238 ret_code
= ust_registry_create_or_find_enum(registry
, sobjd
, name
,
6239 entries
, nr_entries
, &enum_id
);
6243 * The return value is returned to ustctl so in case of an error, the
6244 * application can be notified. In case of an error, it's important not to
6245 * return a negative error or else the application will get closed.
6247 ret
= ustctl_reply_register_enum(sock
, enum_id
, ret_code
);
6249 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
6250 ERR("UST app reply enum failed with ret %d", ret
);
6252 DBG3("UST app reply enum failed. Application died");
6255 * No need to wipe the create enum since the application socket will
6256 * get close on error hence cleaning up everything by itself.
6261 DBG3("UST registry enum %s added successfully or already found", name
);
6264 pthread_mutex_unlock(®istry
->lock
);
6271 * Handle application notification through the given notify socket.
6273 * Return 0 on success or else a negative value.
6275 int ust_app_recv_notify(int sock
)
6278 enum ustctl_notify_cmd cmd
;
6280 DBG3("UST app receiving notify from sock %d", sock
);
6282 ret
= ustctl_recv_notify(sock
, &cmd
);
6284 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
6285 ERR("UST app recv notify failed with ret %d", ret
);
6287 DBG3("UST app recv notify failed. Application died");
6293 case USTCTL_NOTIFY_CMD_EVENT
:
6295 int sobjd
, cobjd
, loglevel_value
;
6296 char name
[LTTNG_UST_SYM_NAME_LEN
], *sig
, *model_emf_uri
;
6298 struct ustctl_field
*fields
;
6300 DBG2("UST app ustctl register event received");
6302 ret
= ustctl_recv_register_event(sock
, &sobjd
, &cobjd
, name
,
6303 &loglevel_value
, &sig
, &nr_fields
, &fields
,
6306 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
6307 ERR("UST app recv event failed with ret %d", ret
);
6309 DBG3("UST app recv event failed. Application died");
6315 * Add event to the UST registry coming from the notify socket. This
6316 * call will free if needed the sig, fields and model_emf_uri. This
6317 * code path loses the ownsership of these variables and transfer them
6318 * to the this function.
6320 ret
= add_event_ust_registry(sock
, sobjd
, cobjd
, name
, sig
, nr_fields
,
6321 fields
, loglevel_value
, model_emf_uri
);
6328 case USTCTL_NOTIFY_CMD_CHANNEL
:
6332 struct ustctl_field
*fields
;
6334 DBG2("UST app ustctl register channel received");
6336 ret
= ustctl_recv_register_channel(sock
, &sobjd
, &cobjd
, &nr_fields
,
6339 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
6340 ERR("UST app recv channel failed with ret %d", ret
);
6342 DBG3("UST app recv channel failed. Application died");
6348 * The fields ownership are transfered to this function call meaning
6349 * that if needed it will be freed. After this, it's invalid to access
6350 * fields or clean it up.
6352 ret
= reply_ust_register_channel(sock
, cobjd
, nr_fields
,
6360 case USTCTL_NOTIFY_CMD_ENUM
:
6363 char name
[LTTNG_UST_SYM_NAME_LEN
];
6365 struct ustctl_enum_entry
*entries
;
6367 DBG2("UST app ustctl register enum received");
6369 ret
= ustctl_recv_register_enum(sock
, &sobjd
, name
,
6370 &entries
, &nr_entries
);
6372 if (ret
!= -EPIPE
&& ret
!= -LTTNG_UST_ERR_EXITING
) {
6373 ERR("UST app recv enum failed with ret %d", ret
);
6375 DBG3("UST app recv enum failed. Application died");
6380 /* Callee assumes ownership of entries */
6381 ret
= add_enum_ust_registry(sock
, sobjd
, name
,
6382 entries
, nr_entries
);
6390 /* Should NEVER happen. */
6399 * Once the notify socket hangs up, this is called. First, it tries to find the
6400 * corresponding application. On failure, the call_rcu to close the socket is
6401 * executed. If an application is found, it tries to delete it from the notify
6402 * socket hash table. Whathever the result, it proceeds to the call_rcu.
6404 * Note that an object needs to be allocated here so on ENOMEM failure, the
6405 * call RCU is not done but the rest of the cleanup is.
6407 void ust_app_notify_sock_unregister(int sock
)
6410 struct lttng_ht_iter iter
;
6411 struct ust_app
*app
;
6412 struct ust_app_notify_sock_obj
*obj
;
6418 obj
= zmalloc(sizeof(*obj
));
6421 * An ENOMEM is kind of uncool. If this strikes we continue the
6422 * procedure but the call_rcu will not be called. In this case, we
6423 * accept the fd leak rather than possibly creating an unsynchronized
6424 * state between threads.
6426 * TODO: The notify object should be created once the notify socket is
6427 * registered and stored independantely from the ust app object. The
6428 * tricky part is to synchronize the teardown of the application and
6429 * this notify object. Let's keep that in mind so we can avoid this
6430 * kind of shenanigans with ENOMEM in the teardown path.
6437 DBG("UST app notify socket unregister %d", sock
);
6440 * Lookup application by notify socket. If this fails, this means that the
6441 * hash table delete has already been done by the application
6442 * unregistration process so we can safely close the notify socket in a
6445 app
= find_app_by_notify_sock(sock
);
6450 iter
.iter
.node
= &app
->notify_sock_n
.node
;
6453 * Whatever happens here either we fail or succeed, in both cases we have
6454 * to close the socket after a grace period to continue to the call RCU
6455 * here. If the deletion is successful, the application is not visible
6456 * anymore by other threads and is it fails it means that it was already
6457 * deleted from the hash table so either way we just have to close the
6460 (void) lttng_ht_del(ust_app_ht_by_notify_sock
, &iter
);
6466 * Close socket after a grace period to avoid for the socket to be reused
6467 * before the application object is freed creating potential race between
6468 * threads trying to add unique in the global hash table.
6471 call_rcu(&obj
->head
, close_notify_sock_rcu
);
6476 * Destroy a ust app data structure and free its memory.
6478 void ust_app_destroy(struct ust_app
*app
)
6484 call_rcu(&app
->pid_n
.head
, delete_ust_app_rcu
);
6488 * Take a snapshot for a given UST session. The snapshot is sent to the given
6491 * Returns LTTNG_OK on success or a LTTNG_ERR error code.
6493 enum lttng_error_code
ust_app_snapshot_record(
6494 const struct ltt_ust_session
*usess
,
6495 const struct consumer_output
*output
, int wait
,
6496 uint64_t nb_packets_per_stream
)
6499 enum lttng_error_code status
= LTTNG_OK
;
6500 struct lttng_ht_iter iter
;
6501 struct ust_app
*app
;
6502 char *trace_path
= NULL
;
6509 switch (usess
->buffer_type
) {
6510 case LTTNG_BUFFER_PER_UID
:
6512 struct buffer_reg_uid
*reg
;
6514 cds_list_for_each_entry(reg
, &usess
->buffer_reg_uid_list
, lnode
) {
6515 struct buffer_reg_channel
*reg_chan
;
6516 struct consumer_socket
*socket
;
6517 char pathname
[PATH_MAX
];
6518 size_t consumer_path_offset
= 0;
6520 if (!reg
->registry
->reg
.ust
->metadata_key
) {
6521 /* Skip since no metadata is present */
6525 /* Get consumer socket to use to push the metadata.*/
6526 socket
= consumer_find_socket_by_bitness(reg
->bits_per_long
,
6529 status
= LTTNG_ERR_INVALID
;
6533 memset(pathname
, 0, sizeof(pathname
));
6534 ret
= snprintf(pathname
, sizeof(pathname
),
6535 DEFAULT_UST_TRACE_DIR
"/" DEFAULT_UST_TRACE_UID_PATH
,
6536 reg
->uid
, reg
->bits_per_long
);
6538 PERROR("snprintf snapshot path");
6539 status
= LTTNG_ERR_INVALID
;
6542 /* Free path allowed on previous iteration. */
6544 trace_path
= setup_channel_trace_path(usess
->consumer
, pathname
,
6545 &consumer_path_offset
);
6547 status
= LTTNG_ERR_INVALID
;
6550 /* Add the UST default trace dir to path. */
6551 cds_lfht_for_each_entry(reg
->registry
->channels
->ht
, &iter
.iter
,
6552 reg_chan
, node
.node
) {
6553 status
= consumer_snapshot_channel(socket
,
6554 reg_chan
->consumer_key
,
6555 output
, 0, usess
->uid
,
6556 usess
->gid
, &trace_path
[consumer_path_offset
], wait
,
6557 nb_packets_per_stream
);
6558 if (status
!= LTTNG_OK
) {
6562 status
= consumer_snapshot_channel(socket
,
6563 reg
->registry
->reg
.ust
->metadata_key
, output
, 1,
6564 usess
->uid
, usess
->gid
, &trace_path
[consumer_path_offset
],
6566 if (status
!= LTTNG_OK
) {
6572 case LTTNG_BUFFER_PER_PID
:
6574 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
6575 struct consumer_socket
*socket
;
6576 struct lttng_ht_iter chan_iter
;
6577 struct ust_app_channel
*ua_chan
;
6578 struct ust_app_session
*ua_sess
;
6579 struct ust_registry_session
*registry
;
6580 char pathname
[PATH_MAX
];
6581 size_t consumer_path_offset
= 0;
6583 ua_sess
= lookup_session_by_app(usess
, app
);
6585 /* Session not associated with this app. */
6589 /* Get the right consumer socket for the application. */
6590 socket
= consumer_find_socket_by_bitness(app
->bits_per_long
,
6593 status
= LTTNG_ERR_INVALID
;
6597 /* Add the UST default trace dir to path. */
6598 memset(pathname
, 0, sizeof(pathname
));
6599 ret
= snprintf(pathname
, sizeof(pathname
), DEFAULT_UST_TRACE_DIR
"/%s",
6602 status
= LTTNG_ERR_INVALID
;
6603 PERROR("snprintf snapshot path");
6606 /* Free path allowed on previous iteration. */
6608 trace_path
= setup_channel_trace_path(usess
->consumer
, pathname
,
6609 &consumer_path_offset
);
6611 status
= LTTNG_ERR_INVALID
;
6614 cds_lfht_for_each_entry(ua_sess
->channels
->ht
, &chan_iter
.iter
,
6615 ua_chan
, node
.node
) {
6616 status
= consumer_snapshot_channel(socket
,
6617 ua_chan
->key
, output
, 0,
6618 lttng_credentials_get_uid(&ua_sess
->effective_credentials
),
6619 lttng_credentials_get_gid(&ua_sess
->effective_credentials
),
6620 &trace_path
[consumer_path_offset
], wait
,
6621 nb_packets_per_stream
);
6625 case LTTNG_ERR_CHAN_NOT_FOUND
:
6632 registry
= get_session_registry(ua_sess
);
6634 DBG("Application session is being torn down. Skip application.");
6637 status
= consumer_snapshot_channel(socket
,
6638 registry
->metadata_key
, output
, 1,
6639 lttng_credentials_get_uid(&ua_sess
->effective_credentials
),
6640 lttng_credentials_get_gid(&ua_sess
->effective_credentials
),
6641 &trace_path
[consumer_path_offset
], wait
, 0);
6645 case LTTNG_ERR_CHAN_NOT_FOUND
:
6665 * Return the size taken by one more packet per stream.
6667 uint64_t ust_app_get_size_one_more_packet_per_stream(
6668 const struct ltt_ust_session
*usess
, uint64_t cur_nr_packets
)
6670 uint64_t tot_size
= 0;
6671 struct ust_app
*app
;
6672 struct lttng_ht_iter iter
;
6676 switch (usess
->buffer_type
) {
6677 case LTTNG_BUFFER_PER_UID
:
6679 struct buffer_reg_uid
*reg
;
6681 cds_list_for_each_entry(reg
, &usess
->buffer_reg_uid_list
, lnode
) {
6682 struct buffer_reg_channel
*reg_chan
;
6685 cds_lfht_for_each_entry(reg
->registry
->channels
->ht
, &iter
.iter
,
6686 reg_chan
, node
.node
) {
6687 if (cur_nr_packets
>= reg_chan
->num_subbuf
) {
6689 * Don't take channel into account if we
6690 * already grab all its packets.
6694 tot_size
+= reg_chan
->subbuf_size
* reg_chan
->stream_count
;
6700 case LTTNG_BUFFER_PER_PID
:
6703 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
6704 struct ust_app_channel
*ua_chan
;
6705 struct ust_app_session
*ua_sess
;
6706 struct lttng_ht_iter chan_iter
;
6708 ua_sess
= lookup_session_by_app(usess
, app
);
6710 /* Session not associated with this app. */
6714 cds_lfht_for_each_entry(ua_sess
->channels
->ht
, &chan_iter
.iter
,
6715 ua_chan
, node
.node
) {
6716 if (cur_nr_packets
>= ua_chan
->attr
.num_subbuf
) {
6718 * Don't take channel into account if we
6719 * already grab all its packets.
6723 tot_size
+= ua_chan
->attr
.subbuf_size
* ua_chan
->streams
.count
;
6737 int ust_app_uid_get_channel_runtime_stats(uint64_t ust_session_id
,
6738 struct cds_list_head
*buffer_reg_uid_list
,
6739 struct consumer_output
*consumer
, uint64_t uchan_id
,
6740 int overwrite
, uint64_t *discarded
, uint64_t *lost
)
6743 uint64_t consumer_chan_key
;
6748 ret
= buffer_reg_uid_consumer_channel_key(
6749 buffer_reg_uid_list
, uchan_id
, &consumer_chan_key
);
6757 ret
= consumer_get_lost_packets(ust_session_id
,
6758 consumer_chan_key
, consumer
, lost
);
6760 ret
= consumer_get_discarded_events(ust_session_id
,
6761 consumer_chan_key
, consumer
, discarded
);
6768 int ust_app_pid_get_channel_runtime_stats(struct ltt_ust_session
*usess
,
6769 struct ltt_ust_channel
*uchan
,
6770 struct consumer_output
*consumer
, int overwrite
,
6771 uint64_t *discarded
, uint64_t *lost
)
6774 struct lttng_ht_iter iter
;
6775 struct lttng_ht_node_str
*ua_chan_node
;
6776 struct ust_app
*app
;
6777 struct ust_app_session
*ua_sess
;
6778 struct ust_app_channel
*ua_chan
;
6785 * Iterate over every registered applications. Sum counters for
6786 * all applications containing requested session and channel.
6788 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
6789 struct lttng_ht_iter uiter
;
6791 ua_sess
= lookup_session_by_app(usess
, app
);
6792 if (ua_sess
== NULL
) {
6797 lttng_ht_lookup(ua_sess
->channels
, (void *) uchan
->name
, &uiter
);
6798 ua_chan_node
= lttng_ht_iter_get_node_str(&uiter
);
6799 /* If the session is found for the app, the channel must be there */
6800 assert(ua_chan_node
);
6802 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
, node
);
6807 ret
= consumer_get_lost_packets(usess
->id
, ua_chan
->key
,
6814 uint64_t _discarded
;
6816 ret
= consumer_get_discarded_events(usess
->id
,
6817 ua_chan
->key
, consumer
, &_discarded
);
6821 (*discarded
) += _discarded
;
6830 int ust_app_regenerate_statedump(struct ltt_ust_session
*usess
,
6831 struct ust_app
*app
)
6834 struct ust_app_session
*ua_sess
;
6836 DBG("Regenerating the metadata for ust app pid %d", app
->pid
);
6840 ua_sess
= lookup_session_by_app(usess
, app
);
6841 if (ua_sess
== NULL
) {
6842 /* The session is in teardown process. Ignore and continue. */
6846 pthread_mutex_lock(&ua_sess
->lock
);
6848 if (ua_sess
->deleted
) {
6852 pthread_mutex_lock(&app
->sock_lock
);
6853 ret
= ustctl_regenerate_statedump(app
->sock
, ua_sess
->handle
);
6854 pthread_mutex_unlock(&app
->sock_lock
);
6857 pthread_mutex_unlock(&ua_sess
->lock
);
6861 health_code_update();
6866 * Regenerate the statedump for each app in the session.
6868 int ust_app_regenerate_statedump_all(struct ltt_ust_session
*usess
)
6871 struct lttng_ht_iter iter
;
6872 struct ust_app
*app
;
6874 DBG("Regenerating the metadata for all UST apps");
6878 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
6879 if (!app
->compatible
) {
6883 ret
= ust_app_regenerate_statedump(usess
, app
);
6885 /* Continue to the next app even on error */
6896 * Rotate all the channels of a session.
6898 * Return LTTNG_OK on success or else an LTTng error code.
6900 enum lttng_error_code
ust_app_rotate_session(struct ltt_session
*session
)
6903 enum lttng_error_code cmd_ret
= LTTNG_OK
;
6904 struct lttng_ht_iter iter
;
6905 struct ust_app
*app
;
6906 struct ltt_ust_session
*usess
= session
->ust_session
;
6912 switch (usess
->buffer_type
) {
6913 case LTTNG_BUFFER_PER_UID
:
6915 struct buffer_reg_uid
*reg
;
6917 cds_list_for_each_entry(reg
, &usess
->buffer_reg_uid_list
, lnode
) {
6918 struct buffer_reg_channel
*reg_chan
;
6919 struct consumer_socket
*socket
;
6921 if (!reg
->registry
->reg
.ust
->metadata_key
) {
6922 /* Skip since no metadata is present */
6926 /* Get consumer socket to use to push the metadata.*/
6927 socket
= consumer_find_socket_by_bitness(reg
->bits_per_long
,
6930 cmd_ret
= LTTNG_ERR_INVALID
;
6934 /* Rotate the data channels. */
6935 cds_lfht_for_each_entry(reg
->registry
->channels
->ht
, &iter
.iter
,
6936 reg_chan
, node
.node
) {
6937 ret
= consumer_rotate_channel(socket
,
6938 reg_chan
->consumer_key
,
6939 usess
->uid
, usess
->gid
,
6941 /* is_metadata_channel */ false);
6943 cmd_ret
= LTTNG_ERR_ROTATION_FAIL_CONSUMER
;
6948 (void) push_metadata(reg
->registry
->reg
.ust
, usess
->consumer
);
6950 ret
= consumer_rotate_channel(socket
,
6951 reg
->registry
->reg
.ust
->metadata_key
,
6952 usess
->uid
, usess
->gid
,
6954 /* is_metadata_channel */ true);
6956 cmd_ret
= LTTNG_ERR_ROTATION_FAIL_CONSUMER
;
6962 case LTTNG_BUFFER_PER_PID
:
6964 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
6965 struct consumer_socket
*socket
;
6966 struct lttng_ht_iter chan_iter
;
6967 struct ust_app_channel
*ua_chan
;
6968 struct ust_app_session
*ua_sess
;
6969 struct ust_registry_session
*registry
;
6971 ua_sess
= lookup_session_by_app(usess
, app
);
6973 /* Session not associated with this app. */
6977 /* Get the right consumer socket for the application. */
6978 socket
= consumer_find_socket_by_bitness(app
->bits_per_long
,
6981 cmd_ret
= LTTNG_ERR_INVALID
;
6985 registry
= get_session_registry(ua_sess
);
6987 DBG("Application session is being torn down. Skip application.");
6991 /* Rotate the data channels. */
6992 cds_lfht_for_each_entry(ua_sess
->channels
->ht
, &chan_iter
.iter
,
6993 ua_chan
, node
.node
) {
6994 ret
= consumer_rotate_channel(socket
,
6996 lttng_credentials_get_uid(&ua_sess
->effective_credentials
),
6997 lttng_credentials_get_gid(&ua_sess
->effective_credentials
),
6999 /* is_metadata_channel */ false);
7001 /* Per-PID buffer and application going away. */
7002 if (ret
== -LTTNG_ERR_CHAN_NOT_FOUND
)
7004 cmd_ret
= LTTNG_ERR_ROTATION_FAIL_CONSUMER
;
7009 /* Rotate the metadata channel. */
7010 (void) push_metadata(registry
, usess
->consumer
);
7011 ret
= consumer_rotate_channel(socket
,
7012 registry
->metadata_key
,
7013 lttng_credentials_get_uid(&ua_sess
->effective_credentials
),
7014 lttng_credentials_get_gid(&ua_sess
->effective_credentials
),
7016 /* is_metadata_channel */ true);
7018 /* Per-PID buffer and application going away. */
7019 if (ret
== -LTTNG_ERR_CHAN_NOT_FOUND
)
7021 cmd_ret
= LTTNG_ERR_ROTATION_FAIL_CONSUMER
;
7039 enum lttng_error_code
ust_app_create_channel_subdirectories(
7040 const struct ltt_ust_session
*usess
)
7042 enum lttng_error_code ret
= LTTNG_OK
;
7043 struct lttng_ht_iter iter
;
7044 enum lttng_trace_chunk_status chunk_status
;
7045 char *pathname_index
;
7048 assert(usess
->current_trace_chunk
);
7051 switch (usess
->buffer_type
) {
7052 case LTTNG_BUFFER_PER_UID
:
7054 struct buffer_reg_uid
*reg
;
7056 cds_list_for_each_entry(reg
, &usess
->buffer_reg_uid_list
, lnode
) {
7057 fmt_ret
= asprintf(&pathname_index
,
7058 DEFAULT_UST_TRACE_DIR
"/" DEFAULT_UST_TRACE_UID_PATH
"/" DEFAULT_INDEX_DIR
,
7059 reg
->uid
, reg
->bits_per_long
);
7061 ERR("Failed to format channel index directory");
7062 ret
= LTTNG_ERR_CREATE_DIR_FAIL
;
7067 * Create the index subdirectory which will take care
7068 * of implicitly creating the channel's path.
7070 chunk_status
= lttng_trace_chunk_create_subdirectory(
7071 usess
->current_trace_chunk
,
7073 free(pathname_index
);
7074 if (chunk_status
!= LTTNG_TRACE_CHUNK_STATUS_OK
) {
7075 ret
= LTTNG_ERR_CREATE_DIR_FAIL
;
7081 case LTTNG_BUFFER_PER_PID
:
7083 struct ust_app
*app
;
7086 * Create the toplevel ust/ directory in case no apps are running.
7088 chunk_status
= lttng_trace_chunk_create_subdirectory(
7089 usess
->current_trace_chunk
,
7090 DEFAULT_UST_TRACE_DIR
);
7091 if (chunk_status
!= LTTNG_TRACE_CHUNK_STATUS_OK
) {
7092 ret
= LTTNG_ERR_CREATE_DIR_FAIL
;
7096 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
,
7098 struct ust_app_session
*ua_sess
;
7099 struct ust_registry_session
*registry
;
7101 ua_sess
= lookup_session_by_app(usess
, app
);
7103 /* Session not associated with this app. */
7107 registry
= get_session_registry(ua_sess
);
7109 DBG("Application session is being torn down. Skip application.");
7113 fmt_ret
= asprintf(&pathname_index
,
7114 DEFAULT_UST_TRACE_DIR
"/%s/" DEFAULT_INDEX_DIR
,
7117 ERR("Failed to format channel index directory");
7118 ret
= LTTNG_ERR_CREATE_DIR_FAIL
;
7122 * Create the index subdirectory which will take care
7123 * of implicitly creating the channel's path.
7125 chunk_status
= lttng_trace_chunk_create_subdirectory(
7126 usess
->current_trace_chunk
,
7128 free(pathname_index
);
7129 if (chunk_status
!= LTTNG_TRACE_CHUNK_STATUS_OK
) {
7130 ret
= LTTNG_ERR_CREATE_DIR_FAIL
;
7147 * Clear all the channels of a session.
7149 * Return LTTNG_OK on success or else an LTTng error code.
7151 enum lttng_error_code
ust_app_clear_session(struct ltt_session
*session
)
7154 enum lttng_error_code cmd_ret
= LTTNG_OK
;
7155 struct lttng_ht_iter iter
;
7156 struct ust_app
*app
;
7157 struct ltt_ust_session
*usess
= session
->ust_session
;
7163 if (usess
->active
) {
7164 ERR("Expecting inactive session %s (%" PRIu64
")", session
->name
, session
->id
);
7165 cmd_ret
= LTTNG_ERR_FATAL
;
7169 switch (usess
->buffer_type
) {
7170 case LTTNG_BUFFER_PER_UID
:
7172 struct buffer_reg_uid
*reg
;
7174 cds_list_for_each_entry(reg
, &usess
->buffer_reg_uid_list
, lnode
) {
7175 struct buffer_reg_channel
*reg_chan
;
7176 struct consumer_socket
*socket
;
7178 /* Get consumer socket to use to push the metadata.*/
7179 socket
= consumer_find_socket_by_bitness(reg
->bits_per_long
,
7182 cmd_ret
= LTTNG_ERR_INVALID
;
7186 /* Clear the data channels. */
7187 cds_lfht_for_each_entry(reg
->registry
->channels
->ht
, &iter
.iter
,
7188 reg_chan
, node
.node
) {
7189 ret
= consumer_clear_channel(socket
,
7190 reg_chan
->consumer_key
);
7196 (void) push_metadata(reg
->registry
->reg
.ust
, usess
->consumer
);
7199 * Clear the metadata channel.
7200 * Metadata channel is not cleared per se but we still need to
7201 * perform a rotation operation on it behind the scene.
7203 ret
= consumer_clear_channel(socket
,
7204 reg
->registry
->reg
.ust
->metadata_key
);
7211 case LTTNG_BUFFER_PER_PID
:
7213 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
7214 struct consumer_socket
*socket
;
7215 struct lttng_ht_iter chan_iter
;
7216 struct ust_app_channel
*ua_chan
;
7217 struct ust_app_session
*ua_sess
;
7218 struct ust_registry_session
*registry
;
7220 ua_sess
= lookup_session_by_app(usess
, app
);
7222 /* Session not associated with this app. */
7226 /* Get the right consumer socket for the application. */
7227 socket
= consumer_find_socket_by_bitness(app
->bits_per_long
,
7230 cmd_ret
= LTTNG_ERR_INVALID
;
7234 registry
= get_session_registry(ua_sess
);
7236 DBG("Application session is being torn down. Skip application.");
7240 /* Clear the data channels. */
7241 cds_lfht_for_each_entry(ua_sess
->channels
->ht
, &chan_iter
.iter
,
7242 ua_chan
, node
.node
) {
7243 ret
= consumer_clear_channel(socket
, ua_chan
->key
);
7245 /* Per-PID buffer and application going away. */
7246 if (ret
== -LTTNG_ERR_CHAN_NOT_FOUND
) {
7253 (void) push_metadata(registry
, usess
->consumer
);
7256 * Clear the metadata channel.
7257 * Metadata channel is not cleared per se but we still need to
7258 * perform rotation operation on it behind the scene.
7260 ret
= consumer_clear_channel(socket
, registry
->metadata_key
);
7262 /* Per-PID buffer and application going away. */
7263 if (ret
== -LTTNG_ERR_CHAN_NOT_FOUND
) {
7281 case LTTCOMM_CONSUMERD_RELAYD_CLEAR_DISALLOWED
:
7282 cmd_ret
= LTTNG_ERR_CLEAR_RELAY_DISALLOWED
;
7285 cmd_ret
= LTTNG_ERR_CLEAR_FAIL_CONSUMER
;
7295 * This function skips the metadata channel as the begin/end timestamps of a
7296 * metadata packet are useless.
7298 * Moreover, opening a packet after a "clear" will cause problems for live
7299 * sessions as it will introduce padding that was not part of the first trace
7300 * chunk. The relay daemon expects the content of the metadata stream of
7301 * successive metadata trace chunks to be strict supersets of one another.
7303 * For example, flushing a packet at the beginning of the metadata stream of
7304 * a trace chunk resulting from a "clear" session command will cause the
7305 * size of the metadata stream of the new trace chunk to not match the size of
7306 * the metadata stream of the original chunk. This will confuse the relay
7307 * daemon as the same "offset" in a metadata stream will no longer point
7308 * to the same content.
7310 enum lttng_error_code
ust_app_open_packets(struct ltt_session
*session
)
7312 enum lttng_error_code ret
= LTTNG_OK
;
7313 struct lttng_ht_iter iter
;
7314 struct ltt_ust_session
*usess
= session
->ust_session
;
7320 switch (usess
->buffer_type
) {
7321 case LTTNG_BUFFER_PER_UID
:
7323 struct buffer_reg_uid
*reg
;
7325 cds_list_for_each_entry (
7326 reg
, &usess
->buffer_reg_uid_list
, lnode
) {
7327 struct buffer_reg_channel
*reg_chan
;
7328 struct consumer_socket
*socket
;
7330 socket
= consumer_find_socket_by_bitness(
7331 reg
->bits_per_long
, usess
->consumer
);
7333 ret
= LTTNG_ERR_FATAL
;
7337 cds_lfht_for_each_entry(reg
->registry
->channels
->ht
,
7338 &iter
.iter
, reg_chan
, node
.node
) {
7339 const int open_ret
=
7340 consumer_open_channel_packets(
7342 reg_chan
->consumer_key
);
7345 ret
= LTTNG_ERR_UNK
;
7352 case LTTNG_BUFFER_PER_PID
:
7354 struct ust_app
*app
;
7356 cds_lfht_for_each_entry (
7357 ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
7358 struct consumer_socket
*socket
;
7359 struct lttng_ht_iter chan_iter
;
7360 struct ust_app_channel
*ua_chan
;
7361 struct ust_app_session
*ua_sess
;
7362 struct ust_registry_session
*registry
;
7364 ua_sess
= lookup_session_by_app(usess
, app
);
7366 /* Session not associated with this app. */
7370 /* Get the right consumer socket for the application. */
7371 socket
= consumer_find_socket_by_bitness(
7372 app
->bits_per_long
, usess
->consumer
);
7374 ret
= LTTNG_ERR_FATAL
;
7378 registry
= get_session_registry(ua_sess
);
7380 DBG("Application session is being torn down. Skip application.");
7384 cds_lfht_for_each_entry(ua_sess
->channels
->ht
,
7385 &chan_iter
.iter
, ua_chan
, node
.node
) {
7386 const int open_ret
=
7387 consumer_open_channel_packets(
7393 * Per-PID buffer and application going
7396 if (open_ret
== -LTTNG_ERR_CHAN_NOT_FOUND
) {
7400 ret
= LTTNG_ERR_UNK
;