sessiond: agent: enable events matching event notifiers
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
1 /*
2 * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
3 * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
4 *
5 * SPDX-License-Identifier: GPL-2.0-only
6 *
7 */
8
9 #define _LGPL_SOURCE
10 #include <inttypes.h>
11 #include <pthread.h>
12 #include <stdio.h>
13 #include <stdlib.h>
14 #include <string.h>
15 #include <sys/stat.h>
16 #include <sys/types.h>
17 #include <unistd.h>
18 #include <urcu/compiler.h>
19 #include <signal.h>
20
21 #include <common/compat/errno.h>
22 #include <common/common.h>
23 #include <common/hashtable/utils.h>
24 #include <lttng/event-rule/event-rule.h>
25 #include <lttng/event-rule/event-rule-internal.h>
26 #include <lttng/event-rule/tracepoint.h>
27 #include <lttng/condition/condition.h>
28 #include <lttng/condition/event-rule-internal.h>
29 #include <lttng/condition/event-rule.h>
30 #include <common/sessiond-comm/sessiond-comm.h>
31
32 #include "buffer-registry.h"
33 #include "fd-limit.h"
34 #include "health-sessiond.h"
35 #include "ust-app.h"
36 #include "ust-consumer.h"
37 #include "lttng-ust-ctl.h"
38 #include "lttng-ust-error.h"
39 #include "utils.h"
40 #include "session.h"
41 #include "lttng-sessiond.h"
42 #include "notification-thread-commands.h"
43 #include "rotate.h"
44 #include "event.h"
45
46 struct lttng_ht *ust_app_ht;
47 struct lttng_ht *ust_app_ht_by_sock;
48 struct lttng_ht *ust_app_ht_by_notify_sock;
49
50 static
51 int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
52
53 /* Next available channel key. Access under next_channel_key_lock. */
54 static uint64_t _next_channel_key;
55 static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
56
57 /* Next available session ID. Access under next_session_id_lock. */
58 static uint64_t _next_session_id;
59 static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
60
61 /*
62 * Return the incremented value of next_channel_key.
63 */
64 static uint64_t get_next_channel_key(void)
65 {
66 uint64_t ret;
67
68 pthread_mutex_lock(&next_channel_key_lock);
69 ret = ++_next_channel_key;
70 pthread_mutex_unlock(&next_channel_key_lock);
71 return ret;
72 }
73
74 /*
75 * Return the atomically incremented value of next_session_id.
76 */
77 static uint64_t get_next_session_id(void)
78 {
79 uint64_t ret;
80
81 pthread_mutex_lock(&next_session_id_lock);
82 ret = ++_next_session_id;
83 pthread_mutex_unlock(&next_session_id_lock);
84 return ret;
85 }
86
87 static void copy_channel_attr_to_ustctl(
88 struct ustctl_consumer_channel_attr *attr,
89 struct lttng_ust_channel_attr *uattr)
90 {
91 /* Copy event attributes since the layout is different. */
92 attr->subbuf_size = uattr->subbuf_size;
93 attr->num_subbuf = uattr->num_subbuf;
94 attr->overwrite = uattr->overwrite;
95 attr->switch_timer_interval = uattr->switch_timer_interval;
96 attr->read_timer_interval = uattr->read_timer_interval;
97 attr->output = uattr->output;
98 attr->blocking_timeout = uattr->u.s.blocking_timeout;
99 }
100
101 /*
102 * Match function for the hash table lookup.
103 *
104 * It matches an ust app event based on three attributes which are the event
105 * name, the filter bytecode and the loglevel.
106 */
107 static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
108 {
109 struct ust_app_event *event;
110 const struct ust_app_ht_key *key;
111 int ev_loglevel_value;
112
113 assert(node);
114 assert(_key);
115
116 event = caa_container_of(node, struct ust_app_event, node.node);
117 key = _key;
118 ev_loglevel_value = event->attr.loglevel;
119
120 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
121
122 /* Event name */
123 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
124 goto no_match;
125 }
126
127 /* Event loglevel. */
128 if (ev_loglevel_value != key->loglevel_type) {
129 if (event->attr.loglevel_type == LTTNG_UST_LOGLEVEL_ALL
130 && key->loglevel_type == 0 &&
131 ev_loglevel_value == -1) {
132 /*
133 * Match is accepted. This is because on event creation, the
134 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
135 * -1 are accepted for this loglevel type since 0 is the one set by
136 * the API when receiving an enable event.
137 */
138 } else {
139 goto no_match;
140 }
141 }
142
143 /* One of the filters is NULL, fail. */
144 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
145 goto no_match;
146 }
147
148 if (key->filter && event->filter) {
149 /* Both filters exists, check length followed by the bytecode. */
150 if (event->filter->len != key->filter->len ||
151 memcmp(event->filter->data, key->filter->data,
152 event->filter->len) != 0) {
153 goto no_match;
154 }
155 }
156
157 /* One of the exclusions is NULL, fail. */
158 if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
159 goto no_match;
160 }
161
162 if (key->exclusion && event->exclusion) {
163 /* Both exclusions exists, check count followed by the names. */
164 if (event->exclusion->count != key->exclusion->count ||
165 memcmp(event->exclusion->names, key->exclusion->names,
166 event->exclusion->count * LTTNG_UST_SYM_NAME_LEN) != 0) {
167 goto no_match;
168 }
169 }
170
171
172 /* Match. */
173 return 1;
174
175 no_match:
176 return 0;
177 }
178
179 /*
180 * Unique add of an ust app event in the given ht. This uses the custom
181 * ht_match_ust_app_event match function and the event name as hash.
182 */
183 static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
184 struct ust_app_event *event)
185 {
186 struct cds_lfht_node *node_ptr;
187 struct ust_app_ht_key key;
188 struct lttng_ht *ht;
189
190 assert(ua_chan);
191 assert(ua_chan->events);
192 assert(event);
193
194 ht = ua_chan->events;
195 key.name = event->attr.name;
196 key.filter = event->filter;
197 key.loglevel_type = event->attr.loglevel;
198 key.exclusion = event->exclusion;
199
200 node_ptr = cds_lfht_add_unique(ht->ht,
201 ht->hash_fct(event->node.key, lttng_ht_seed),
202 ht_match_ust_app_event, &key, &event->node.node);
203 assert(node_ptr == &event->node.node);
204 }
205
206 /*
207 * Close the notify socket from the given RCU head object. This MUST be called
208 * through a call_rcu().
209 */
210 static void close_notify_sock_rcu(struct rcu_head *head)
211 {
212 int ret;
213 struct ust_app_notify_sock_obj *obj =
214 caa_container_of(head, struct ust_app_notify_sock_obj, head);
215
216 /* Must have a valid fd here. */
217 assert(obj->fd >= 0);
218
219 ret = close(obj->fd);
220 if (ret) {
221 ERR("close notify sock %d RCU", obj->fd);
222 }
223 lttng_fd_put(LTTNG_FD_APPS, 1);
224
225 free(obj);
226 }
227
228 /*
229 * Return the session registry according to the buffer type of the given
230 * session.
231 *
232 * A registry per UID object MUST exists before calling this function or else
233 * it assert() if not found. RCU read side lock must be acquired.
234 */
235 static struct ust_registry_session *get_session_registry(
236 struct ust_app_session *ua_sess)
237 {
238 struct ust_registry_session *registry = NULL;
239
240 assert(ua_sess);
241
242 switch (ua_sess->buffer_type) {
243 case LTTNG_BUFFER_PER_PID:
244 {
245 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
246 if (!reg_pid) {
247 goto error;
248 }
249 registry = reg_pid->registry->reg.ust;
250 break;
251 }
252 case LTTNG_BUFFER_PER_UID:
253 {
254 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
255 ua_sess->tracing_id, ua_sess->bits_per_long,
256 lttng_credentials_get_uid(&ua_sess->real_credentials));
257 if (!reg_uid) {
258 goto error;
259 }
260 registry = reg_uid->registry->reg.ust;
261 break;
262 }
263 default:
264 assert(0);
265 };
266
267 error:
268 return registry;
269 }
270
271 /*
272 * Delete ust context safely. RCU read lock must be held before calling
273 * this function.
274 */
275 static
276 void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx,
277 struct ust_app *app)
278 {
279 int ret;
280
281 assert(ua_ctx);
282
283 if (ua_ctx->obj) {
284 pthread_mutex_lock(&app->sock_lock);
285 ret = ustctl_release_object(sock, ua_ctx->obj);
286 pthread_mutex_unlock(&app->sock_lock);
287 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
288 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
289 sock, ua_ctx->obj->handle, ret);
290 }
291 free(ua_ctx->obj);
292 }
293 free(ua_ctx);
294 }
295
296 /*
297 * Delete ust app event safely. RCU read lock must be held before calling
298 * this function.
299 */
300 static
301 void delete_ust_app_event(int sock, struct ust_app_event *ua_event,
302 struct ust_app *app)
303 {
304 int ret;
305
306 assert(ua_event);
307
308 free(ua_event->filter);
309 if (ua_event->exclusion != NULL)
310 free(ua_event->exclusion);
311 if (ua_event->obj != NULL) {
312 pthread_mutex_lock(&app->sock_lock);
313 ret = ustctl_release_object(sock, ua_event->obj);
314 pthread_mutex_unlock(&app->sock_lock);
315 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
316 ERR("UST app sock %d release event obj failed with ret %d",
317 sock, ret);
318 }
319 free(ua_event->obj);
320 }
321 free(ua_event);
322 }
323
324 /*
325 * Delayed reclaim of a ust_app_event_notifier_rule object. This MUST be called
326 * through a call_rcu().
327 */
328 static
329 void free_ust_app_event_notifier_rule_rcu(struct rcu_head *head)
330 {
331 struct ust_app_event_notifier_rule *obj = caa_container_of(
332 head, struct ust_app_event_notifier_rule, rcu_head);
333
334 free(obj);
335 }
336
337 /*
338 * Delete ust app event notifier rule safely.
339 */
340 static void delete_ust_app_event_notifier_rule(int sock,
341 struct ust_app_event_notifier_rule *ua_event_notifier_rule,
342 struct ust_app *app)
343 {
344 int ret;
345
346 assert(ua_event_notifier_rule);
347
348 if (ua_event_notifier_rule->exclusion != NULL) {
349 free(ua_event_notifier_rule->exclusion);
350 }
351
352 if (ua_event_notifier_rule->obj != NULL) {
353 pthread_mutex_lock(&app->sock_lock);
354 ret = ustctl_release_object(sock, ua_event_notifier_rule->obj);
355 pthread_mutex_unlock(&app->sock_lock);
356 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
357 ERR("Failed to release event notifier object: app = '%s' (ppid %d), ret = %d",
358 app->name, (int) app->ppid, ret);
359 }
360
361 free(ua_event_notifier_rule->obj);
362 }
363
364 lttng_event_rule_put(ua_event_notifier_rule->event_rule);
365 call_rcu(&ua_event_notifier_rule->rcu_head,
366 free_ust_app_event_notifier_rule_rcu);
367 }
368
369 /*
370 * Release ust data object of the given stream.
371 *
372 * Return 0 on success or else a negative value.
373 */
374 static int release_ust_app_stream(int sock, struct ust_app_stream *stream,
375 struct ust_app *app)
376 {
377 int ret = 0;
378
379 assert(stream);
380
381 if (stream->obj) {
382 pthread_mutex_lock(&app->sock_lock);
383 ret = ustctl_release_object(sock, stream->obj);
384 pthread_mutex_unlock(&app->sock_lock);
385 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
386 ERR("UST app sock %d release stream obj failed with ret %d",
387 sock, ret);
388 }
389 lttng_fd_put(LTTNG_FD_APPS, 2);
390 free(stream->obj);
391 }
392
393 return ret;
394 }
395
396 /*
397 * Delete ust app stream safely. RCU read lock must be held before calling
398 * this function.
399 */
400 static
401 void delete_ust_app_stream(int sock, struct ust_app_stream *stream,
402 struct ust_app *app)
403 {
404 assert(stream);
405
406 (void) release_ust_app_stream(sock, stream, app);
407 free(stream);
408 }
409
410 /*
411 * We need to execute ht_destroy outside of RCU read-side critical
412 * section and outside of call_rcu thread, so we postpone its execution
413 * using ht_cleanup_push. It is simpler than to change the semantic of
414 * the many callers of delete_ust_app_session().
415 */
416 static
417 void delete_ust_app_channel_rcu(struct rcu_head *head)
418 {
419 struct ust_app_channel *ua_chan =
420 caa_container_of(head, struct ust_app_channel, rcu_head);
421
422 ht_cleanup_push(ua_chan->ctx);
423 ht_cleanup_push(ua_chan->events);
424 free(ua_chan);
425 }
426
427 /*
428 * Extract the lost packet or discarded events counter when the channel is
429 * being deleted and store the value in the parent channel so we can
430 * access it from lttng list and at stop/destroy.
431 *
432 * The session list lock must be held by the caller.
433 */
434 static
435 void save_per_pid_lost_discarded_counters(struct ust_app_channel *ua_chan)
436 {
437 uint64_t discarded = 0, lost = 0;
438 struct ltt_session *session;
439 struct ltt_ust_channel *uchan;
440
441 if (ua_chan->attr.type != LTTNG_UST_CHAN_PER_CPU) {
442 return;
443 }
444
445 rcu_read_lock();
446 session = session_find_by_id(ua_chan->session->tracing_id);
447 if (!session || !session->ust_session) {
448 /*
449 * Not finding the session is not an error because there are
450 * multiple ways the channels can be torn down.
451 *
452 * 1) The session daemon can initiate the destruction of the
453 * ust app session after receiving a destroy command or
454 * during its shutdown/teardown.
455 * 2) The application, since we are in per-pid tracing, is
456 * unregistering and tearing down its ust app session.
457 *
458 * Both paths are protected by the session list lock which
459 * ensures that the accounting of lost packets and discarded
460 * events is done exactly once. The session is then unpublished
461 * from the session list, resulting in this condition.
462 */
463 goto end;
464 }
465
466 if (ua_chan->attr.overwrite) {
467 consumer_get_lost_packets(ua_chan->session->tracing_id,
468 ua_chan->key, session->ust_session->consumer,
469 &lost);
470 } else {
471 consumer_get_discarded_events(ua_chan->session->tracing_id,
472 ua_chan->key, session->ust_session->consumer,
473 &discarded);
474 }
475 uchan = trace_ust_find_channel_by_name(
476 session->ust_session->domain_global.channels,
477 ua_chan->name);
478 if (!uchan) {
479 ERR("Missing UST channel to store discarded counters");
480 goto end;
481 }
482
483 uchan->per_pid_closed_app_discarded += discarded;
484 uchan->per_pid_closed_app_lost += lost;
485
486 end:
487 rcu_read_unlock();
488 if (session) {
489 session_put(session);
490 }
491 }
492
493 /*
494 * Delete ust app channel safely. RCU read lock must be held before calling
495 * this function.
496 *
497 * The session list lock must be held by the caller.
498 */
499 static
500 void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
501 struct ust_app *app)
502 {
503 int ret;
504 struct lttng_ht_iter iter;
505 struct ust_app_event *ua_event;
506 struct ust_app_ctx *ua_ctx;
507 struct ust_app_stream *stream, *stmp;
508 struct ust_registry_session *registry;
509
510 assert(ua_chan);
511
512 DBG3("UST app deleting channel %s", ua_chan->name);
513
514 /* Wipe stream */
515 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
516 cds_list_del(&stream->list);
517 delete_ust_app_stream(sock, stream, app);
518 }
519
520 /* Wipe context */
521 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
522 cds_list_del(&ua_ctx->list);
523 ret = lttng_ht_del(ua_chan->ctx, &iter);
524 assert(!ret);
525 delete_ust_app_ctx(sock, ua_ctx, app);
526 }
527
528 /* Wipe events */
529 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
530 node.node) {
531 ret = lttng_ht_del(ua_chan->events, &iter);
532 assert(!ret);
533 delete_ust_app_event(sock, ua_event, app);
534 }
535
536 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
537 /* Wipe and free registry from session registry. */
538 registry = get_session_registry(ua_chan->session);
539 if (registry) {
540 ust_registry_channel_del_free(registry, ua_chan->key,
541 sock >= 0);
542 }
543 /*
544 * A negative socket can be used by the caller when
545 * cleaning-up a ua_chan in an error path. Skip the
546 * accounting in this case.
547 */
548 if (sock >= 0) {
549 save_per_pid_lost_discarded_counters(ua_chan);
550 }
551 }
552
553 if (ua_chan->obj != NULL) {
554 /* Remove channel from application UST object descriptor. */
555 iter.iter.node = &ua_chan->ust_objd_node.node;
556 ret = lttng_ht_del(app->ust_objd, &iter);
557 assert(!ret);
558 pthread_mutex_lock(&app->sock_lock);
559 ret = ustctl_release_object(sock, ua_chan->obj);
560 pthread_mutex_unlock(&app->sock_lock);
561 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
562 ERR("UST app sock %d release channel obj failed with ret %d",
563 sock, ret);
564 }
565 lttng_fd_put(LTTNG_FD_APPS, 1);
566 free(ua_chan->obj);
567 }
568 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
569 }
570
571 int ust_app_register_done(struct ust_app *app)
572 {
573 int ret;
574
575 pthread_mutex_lock(&app->sock_lock);
576 ret = ustctl_register_done(app->sock);
577 pthread_mutex_unlock(&app->sock_lock);
578 return ret;
579 }
580
581 int ust_app_release_object(struct ust_app *app, struct lttng_ust_object_data *data)
582 {
583 int ret, sock;
584
585 if (app) {
586 pthread_mutex_lock(&app->sock_lock);
587 sock = app->sock;
588 } else {
589 sock = -1;
590 }
591 ret = ustctl_release_object(sock, data);
592 if (app) {
593 pthread_mutex_unlock(&app->sock_lock);
594 }
595 return ret;
596 }
597
598 /*
599 * Push metadata to consumer socket.
600 *
601 * RCU read-side lock must be held to guarantee existance of socket.
602 * Must be called with the ust app session lock held.
603 * Must be called with the registry lock held.
604 *
605 * On success, return the len of metadata pushed or else a negative value.
606 * Returning a -EPIPE return value means we could not send the metadata,
607 * but it can be caused by recoverable errors (e.g. the application has
608 * terminated concurrently).
609 */
610 ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
611 struct consumer_socket *socket, int send_zero_data)
612 {
613 int ret;
614 char *metadata_str = NULL;
615 size_t len, offset, new_metadata_len_sent;
616 ssize_t ret_val;
617 uint64_t metadata_key, metadata_version;
618
619 assert(registry);
620 assert(socket);
621
622 metadata_key = registry->metadata_key;
623
624 /*
625 * Means that no metadata was assigned to the session. This can
626 * happens if no start has been done previously.
627 */
628 if (!metadata_key) {
629 return 0;
630 }
631
632 offset = registry->metadata_len_sent;
633 len = registry->metadata_len - registry->metadata_len_sent;
634 new_metadata_len_sent = registry->metadata_len;
635 metadata_version = registry->metadata_version;
636 if (len == 0) {
637 DBG3("No metadata to push for metadata key %" PRIu64,
638 registry->metadata_key);
639 ret_val = len;
640 if (send_zero_data) {
641 DBG("No metadata to push");
642 goto push_data;
643 }
644 goto end;
645 }
646
647 /* Allocate only what we have to send. */
648 metadata_str = zmalloc(len);
649 if (!metadata_str) {
650 PERROR("zmalloc ust app metadata string");
651 ret_val = -ENOMEM;
652 goto error;
653 }
654 /* Copy what we haven't sent out. */
655 memcpy(metadata_str, registry->metadata + offset, len);
656
657 push_data:
658 pthread_mutex_unlock(&registry->lock);
659 /*
660 * We need to unlock the registry while we push metadata to
661 * break a circular dependency between the consumerd metadata
662 * lock and the sessiond registry lock. Indeed, pushing metadata
663 * to the consumerd awaits that it gets pushed all the way to
664 * relayd, but doing so requires grabbing the metadata lock. If
665 * a concurrent metadata request is being performed by
666 * consumerd, this can try to grab the registry lock on the
667 * sessiond while holding the metadata lock on the consumer
668 * daemon. Those push and pull schemes are performed on two
669 * different bidirectionnal communication sockets.
670 */
671 ret = consumer_push_metadata(socket, metadata_key,
672 metadata_str, len, offset, metadata_version);
673 pthread_mutex_lock(&registry->lock);
674 if (ret < 0) {
675 /*
676 * There is an acceptable race here between the registry
677 * metadata key assignment and the creation on the
678 * consumer. The session daemon can concurrently push
679 * metadata for this registry while being created on the
680 * consumer since the metadata key of the registry is
681 * assigned *before* it is setup to avoid the consumer
682 * to ask for metadata that could possibly be not found
683 * in the session daemon.
684 *
685 * The metadata will get pushed either by the session
686 * being stopped or the consumer requesting metadata if
687 * that race is triggered.
688 */
689 if (ret == -LTTCOMM_CONSUMERD_CHANNEL_FAIL) {
690 ret = 0;
691 } else {
692 ERR("Error pushing metadata to consumer");
693 }
694 ret_val = ret;
695 goto error_push;
696 } else {
697 /*
698 * Metadata may have been concurrently pushed, since
699 * we're not holding the registry lock while pushing to
700 * consumer. This is handled by the fact that we send
701 * the metadata content, size, and the offset at which
702 * that metadata belongs. This may arrive out of order
703 * on the consumer side, and the consumer is able to
704 * deal with overlapping fragments. The consumer
705 * supports overlapping fragments, which must be
706 * contiguous starting from offset 0. We keep the
707 * largest metadata_len_sent value of the concurrent
708 * send.
709 */
710 registry->metadata_len_sent =
711 max_t(size_t, registry->metadata_len_sent,
712 new_metadata_len_sent);
713 }
714 free(metadata_str);
715 return len;
716
717 end:
718 error:
719 if (ret_val) {
720 /*
721 * On error, flag the registry that the metadata is
722 * closed. We were unable to push anything and this
723 * means that either the consumer is not responding or
724 * the metadata cache has been destroyed on the
725 * consumer.
726 */
727 registry->metadata_closed = 1;
728 }
729 error_push:
730 free(metadata_str);
731 return ret_val;
732 }
733
734 /*
735 * For a given application and session, push metadata to consumer.
736 * Either sock or consumer is required : if sock is NULL, the default
737 * socket to send the metadata is retrieved from consumer, if sock
738 * is not NULL we use it to send the metadata.
739 * RCU read-side lock must be held while calling this function,
740 * therefore ensuring existance of registry. It also ensures existance
741 * of socket throughout this function.
742 *
743 * Return 0 on success else a negative error.
744 * Returning a -EPIPE return value means we could not send the metadata,
745 * but it can be caused by recoverable errors (e.g. the application has
746 * terminated concurrently).
747 */
748 static int push_metadata(struct ust_registry_session *registry,
749 struct consumer_output *consumer)
750 {
751 int ret_val;
752 ssize_t ret;
753 struct consumer_socket *socket;
754
755 assert(registry);
756 assert(consumer);
757
758 pthread_mutex_lock(&registry->lock);
759 if (registry->metadata_closed) {
760 ret_val = -EPIPE;
761 goto error;
762 }
763
764 /* Get consumer socket to use to push the metadata.*/
765 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
766 consumer);
767 if (!socket) {
768 ret_val = -1;
769 goto error;
770 }
771
772 ret = ust_app_push_metadata(registry, socket, 0);
773 if (ret < 0) {
774 ret_val = ret;
775 goto error;
776 }
777 pthread_mutex_unlock(&registry->lock);
778 return 0;
779
780 error:
781 pthread_mutex_unlock(&registry->lock);
782 return ret_val;
783 }
784
785 /*
786 * Send to the consumer a close metadata command for the given session. Once
787 * done, the metadata channel is deleted and the session metadata pointer is
788 * nullified. The session lock MUST be held unless the application is
789 * in the destroy path.
790 *
791 * Do not hold the registry lock while communicating with the consumerd, because
792 * doing so causes inter-process deadlocks between consumerd and sessiond with
793 * the metadata request notification.
794 *
795 * Return 0 on success else a negative value.
796 */
797 static int close_metadata(struct ust_registry_session *registry,
798 struct consumer_output *consumer)
799 {
800 int ret;
801 struct consumer_socket *socket;
802 uint64_t metadata_key;
803 bool registry_was_already_closed;
804
805 assert(registry);
806 assert(consumer);
807
808 rcu_read_lock();
809
810 pthread_mutex_lock(&registry->lock);
811 metadata_key = registry->metadata_key;
812 registry_was_already_closed = registry->metadata_closed;
813 if (metadata_key != 0) {
814 /*
815 * Metadata closed. Even on error this means that the consumer
816 * is not responding or not found so either way a second close
817 * should NOT be emit for this registry.
818 */
819 registry->metadata_closed = 1;
820 }
821 pthread_mutex_unlock(&registry->lock);
822
823 if (metadata_key == 0 || registry_was_already_closed) {
824 ret = 0;
825 goto end;
826 }
827
828 /* Get consumer socket to use to push the metadata.*/
829 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
830 consumer);
831 if (!socket) {
832 ret = -1;
833 goto end;
834 }
835
836 ret = consumer_close_metadata(socket, metadata_key);
837 if (ret < 0) {
838 goto end;
839 }
840
841 end:
842 rcu_read_unlock();
843 return ret;
844 }
845
846 /*
847 * We need to execute ht_destroy outside of RCU read-side critical
848 * section and outside of call_rcu thread, so we postpone its execution
849 * using ht_cleanup_push. It is simpler than to change the semantic of
850 * the many callers of delete_ust_app_session().
851 */
852 static
853 void delete_ust_app_session_rcu(struct rcu_head *head)
854 {
855 struct ust_app_session *ua_sess =
856 caa_container_of(head, struct ust_app_session, rcu_head);
857
858 ht_cleanup_push(ua_sess->channels);
859 free(ua_sess);
860 }
861
862 /*
863 * Delete ust app session safely. RCU read lock must be held before calling
864 * this function.
865 *
866 * The session list lock must be held by the caller.
867 */
868 static
869 void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
870 struct ust_app *app)
871 {
872 int ret;
873 struct lttng_ht_iter iter;
874 struct ust_app_channel *ua_chan;
875 struct ust_registry_session *registry;
876
877 assert(ua_sess);
878
879 pthread_mutex_lock(&ua_sess->lock);
880
881 assert(!ua_sess->deleted);
882 ua_sess->deleted = true;
883
884 registry = get_session_registry(ua_sess);
885 /* Registry can be null on error path during initialization. */
886 if (registry) {
887 /* Push metadata for application before freeing the application. */
888 (void) push_metadata(registry, ua_sess->consumer);
889
890 /*
891 * Don't ask to close metadata for global per UID buffers. Close
892 * metadata only on destroy trace session in this case. Also, the
893 * previous push metadata could have flag the metadata registry to
894 * close so don't send a close command if closed.
895 */
896 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
897 /* And ask to close it for this session registry. */
898 (void) close_metadata(registry, ua_sess->consumer);
899 }
900 }
901
902 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
903 node.node) {
904 ret = lttng_ht_del(ua_sess->channels, &iter);
905 assert(!ret);
906 delete_ust_app_channel(sock, ua_chan, app);
907 }
908
909 /* In case of per PID, the registry is kept in the session. */
910 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
911 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
912 if (reg_pid) {
913 /*
914 * Registry can be null on error path during
915 * initialization.
916 */
917 buffer_reg_pid_remove(reg_pid);
918 buffer_reg_pid_destroy(reg_pid);
919 }
920 }
921
922 if (ua_sess->handle != -1) {
923 pthread_mutex_lock(&app->sock_lock);
924 ret = ustctl_release_handle(sock, ua_sess->handle);
925 pthread_mutex_unlock(&app->sock_lock);
926 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
927 ERR("UST app sock %d release session handle failed with ret %d",
928 sock, ret);
929 }
930 /* Remove session from application UST object descriptor. */
931 iter.iter.node = &ua_sess->ust_objd_node.node;
932 ret = lttng_ht_del(app->ust_sessions_objd, &iter);
933 assert(!ret);
934 }
935
936 pthread_mutex_unlock(&ua_sess->lock);
937
938 consumer_output_put(ua_sess->consumer);
939
940 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
941 }
942
943 /*
944 * Delete a traceable application structure from the global list. Never call
945 * this function outside of a call_rcu call.
946 *
947 * RCU read side lock should _NOT_ be held when calling this function.
948 */
949 static
950 void delete_ust_app(struct ust_app *app)
951 {
952 int ret, sock;
953 struct ust_app_session *ua_sess, *tmp_ua_sess;
954 struct lttng_ht_iter iter;
955 struct ust_app_event_notifier_rule *event_notifier_rule;
956 bool event_notifier_write_fd_is_open;
957
958 /*
959 * The session list lock must be held during this function to guarantee
960 * the existence of ua_sess.
961 */
962 session_lock_list();
963 /* Delete ust app sessions info */
964 sock = app->sock;
965 app->sock = -1;
966
967 /* Wipe sessions */
968 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
969 teardown_node) {
970 /* Free every object in the session and the session. */
971 rcu_read_lock();
972 delete_ust_app_session(sock, ua_sess, app);
973 rcu_read_unlock();
974 }
975
976 /* Remove the event notifier rules associated with this app. */
977 rcu_read_lock();
978 cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
979 &iter.iter, event_notifier_rule, node.node) {
980 ret = lttng_ht_del(app->token_to_event_notifier_rule_ht, &iter);
981 assert(!ret);
982
983 delete_ust_app_event_notifier_rule(
984 app->sock, event_notifier_rule, app);
985 }
986
987 rcu_read_unlock();
988
989 ht_cleanup_push(app->sessions);
990 ht_cleanup_push(app->ust_sessions_objd);
991 ht_cleanup_push(app->ust_objd);
992 ht_cleanup_push(app->token_to_event_notifier_rule_ht);
993
994 /*
995 * This could be NULL if the event notifier setup failed (e.g the app
996 * was killed or the tracer does not support this feature).
997 */
998 if (app->event_notifier_group.object) {
999 enum lttng_error_code ret_code;
1000 const int event_notifier_read_fd = lttng_pipe_get_readfd(
1001 app->event_notifier_group.event_pipe);
1002
1003 ret_code = notification_thread_command_remove_tracer_event_source(
1004 notification_thread_handle,
1005 event_notifier_read_fd);
1006 if (ret_code != LTTNG_OK) {
1007 ERR("Failed to remove application tracer event source from notification thread");
1008 }
1009
1010 ustctl_release_object(sock, app->event_notifier_group.object);
1011 free(app->event_notifier_group.object);
1012 }
1013
1014 event_notifier_write_fd_is_open = lttng_pipe_is_write_open(
1015 app->event_notifier_group.event_pipe);
1016 lttng_pipe_destroy(app->event_notifier_group.event_pipe);
1017 /*
1018 * Release the file descriptors reserved for the event notifier pipe.
1019 * The app could be destroyed before the write end of the pipe could be
1020 * passed to the application (and closed). In that case, both file
1021 * descriptors must be released.
1022 */
1023 lttng_fd_put(LTTNG_FD_APPS, event_notifier_write_fd_is_open ? 2 : 1);
1024
1025 /*
1026 * Wait until we have deleted the application from the sock hash table
1027 * before closing this socket, otherwise an application could re-use the
1028 * socket ID and race with the teardown, using the same hash table entry.
1029 *
1030 * It's OK to leave the close in call_rcu. We want it to stay unique for
1031 * all RCU readers that could run concurrently with unregister app,
1032 * therefore we _need_ to only close that socket after a grace period. So
1033 * it should stay in this RCU callback.
1034 *
1035 * This close() is a very important step of the synchronization model so
1036 * every modification to this function must be carefully reviewed.
1037 */
1038 ret = close(sock);
1039 if (ret) {
1040 PERROR("close");
1041 }
1042 lttng_fd_put(LTTNG_FD_APPS, 1);
1043
1044 DBG2("UST app pid %d deleted", app->pid);
1045 free(app);
1046 session_unlock_list();
1047 }
1048
1049 /*
1050 * URCU intermediate call to delete an UST app.
1051 */
1052 static
1053 void delete_ust_app_rcu(struct rcu_head *head)
1054 {
1055 struct lttng_ht_node_ulong *node =
1056 caa_container_of(head, struct lttng_ht_node_ulong, head);
1057 struct ust_app *app =
1058 caa_container_of(node, struct ust_app, pid_n);
1059
1060 DBG3("Call RCU deleting app PID %d", app->pid);
1061 delete_ust_app(app);
1062 }
1063
1064 /*
1065 * Delete the session from the application ht and delete the data structure by
1066 * freeing every object inside and releasing them.
1067 *
1068 * The session list lock must be held by the caller.
1069 */
1070 static void destroy_app_session(struct ust_app *app,
1071 struct ust_app_session *ua_sess)
1072 {
1073 int ret;
1074 struct lttng_ht_iter iter;
1075
1076 assert(app);
1077 assert(ua_sess);
1078
1079 iter.iter.node = &ua_sess->node.node;
1080 ret = lttng_ht_del(app->sessions, &iter);
1081 if (ret) {
1082 /* Already scheduled for teardown. */
1083 goto end;
1084 }
1085
1086 /* Once deleted, free the data structure. */
1087 delete_ust_app_session(app->sock, ua_sess, app);
1088
1089 end:
1090 return;
1091 }
1092
1093 /*
1094 * Alloc new UST app session.
1095 */
1096 static
1097 struct ust_app_session *alloc_ust_app_session(void)
1098 {
1099 struct ust_app_session *ua_sess;
1100
1101 /* Init most of the default value by allocating and zeroing */
1102 ua_sess = zmalloc(sizeof(struct ust_app_session));
1103 if (ua_sess == NULL) {
1104 PERROR("malloc");
1105 goto error_free;
1106 }
1107
1108 ua_sess->handle = -1;
1109 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
1110 ua_sess->metadata_attr.type = LTTNG_UST_CHAN_METADATA;
1111 pthread_mutex_init(&ua_sess->lock, NULL);
1112
1113 return ua_sess;
1114
1115 error_free:
1116 return NULL;
1117 }
1118
1119 /*
1120 * Alloc new UST app channel.
1121 */
1122 static
1123 struct ust_app_channel *alloc_ust_app_channel(const char *name,
1124 struct ust_app_session *ua_sess,
1125 struct lttng_ust_channel_attr *attr)
1126 {
1127 struct ust_app_channel *ua_chan;
1128
1129 /* Init most of the default value by allocating and zeroing */
1130 ua_chan = zmalloc(sizeof(struct ust_app_channel));
1131 if (ua_chan == NULL) {
1132 PERROR("malloc");
1133 goto error;
1134 }
1135
1136 /* Setup channel name */
1137 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
1138 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1139
1140 ua_chan->enabled = 1;
1141 ua_chan->handle = -1;
1142 ua_chan->session = ua_sess;
1143 ua_chan->key = get_next_channel_key();
1144 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1145 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
1146 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
1147
1148 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
1149 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
1150
1151 /* Copy attributes */
1152 if (attr) {
1153 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
1154 ua_chan->attr.subbuf_size = attr->subbuf_size;
1155 ua_chan->attr.num_subbuf = attr->num_subbuf;
1156 ua_chan->attr.overwrite = attr->overwrite;
1157 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
1158 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
1159 ua_chan->attr.output = attr->output;
1160 ua_chan->attr.blocking_timeout = attr->u.s.blocking_timeout;
1161 }
1162 /* By default, the channel is a per cpu channel. */
1163 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
1164
1165 DBG3("UST app channel %s allocated", ua_chan->name);
1166
1167 return ua_chan;
1168
1169 error:
1170 return NULL;
1171 }
1172
1173 /*
1174 * Allocate and initialize a UST app stream.
1175 *
1176 * Return newly allocated stream pointer or NULL on error.
1177 */
1178 struct ust_app_stream *ust_app_alloc_stream(void)
1179 {
1180 struct ust_app_stream *stream = NULL;
1181
1182 stream = zmalloc(sizeof(*stream));
1183 if (stream == NULL) {
1184 PERROR("zmalloc ust app stream");
1185 goto error;
1186 }
1187
1188 /* Zero could be a valid value for a handle so flag it to -1. */
1189 stream->handle = -1;
1190
1191 error:
1192 return stream;
1193 }
1194
1195 /*
1196 * Alloc new UST app event.
1197 */
1198 static
1199 struct ust_app_event *alloc_ust_app_event(char *name,
1200 struct lttng_ust_event *attr)
1201 {
1202 struct ust_app_event *ua_event;
1203
1204 /* Init most of the default value by allocating and zeroing */
1205 ua_event = zmalloc(sizeof(struct ust_app_event));
1206 if (ua_event == NULL) {
1207 PERROR("Failed to allocate ust_app_event structure");
1208 goto error;
1209 }
1210
1211 ua_event->enabled = 1;
1212 strncpy(ua_event->name, name, sizeof(ua_event->name));
1213 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1214 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
1215
1216 /* Copy attributes */
1217 if (attr) {
1218 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
1219 }
1220
1221 DBG3("UST app event %s allocated", ua_event->name);
1222
1223 return ua_event;
1224
1225 error:
1226 return NULL;
1227 }
1228
1229 /*
1230 * Allocate a new UST app event notifier rule.
1231 */
1232 static struct ust_app_event_notifier_rule *alloc_ust_app_event_notifier_rule(
1233 struct lttng_event_rule *event_rule, uint64_t token)
1234 {
1235 enum lttng_event_rule_generate_exclusions_status
1236 generate_exclusion_status;
1237 struct ust_app_event_notifier_rule *ua_event_notifier_rule;
1238
1239 ua_event_notifier_rule = zmalloc(sizeof(struct ust_app_event_notifier_rule));
1240 if (ua_event_notifier_rule == NULL) {
1241 PERROR("Failed to allocate ust_app_event_notifier_rule structure");
1242 goto error;
1243 }
1244
1245 ua_event_notifier_rule->enabled = 1;
1246 ua_event_notifier_rule->token = token;
1247 lttng_ht_node_init_u64(&ua_event_notifier_rule->node, token);
1248
1249 /* Get reference of the event rule. */
1250 if (!lttng_event_rule_get(event_rule)) {
1251 abort();
1252 }
1253
1254 ua_event_notifier_rule->event_rule = event_rule;
1255 ua_event_notifier_rule->filter = lttng_event_rule_get_filter_bytecode(event_rule);
1256 generate_exclusion_status = lttng_event_rule_generate_exclusions(
1257 event_rule, &ua_event_notifier_rule->exclusion);
1258 switch (generate_exclusion_status) {
1259 case LTTNG_EVENT_RULE_GENERATE_EXCLUSIONS_STATUS_OK:
1260 case LTTNG_EVENT_RULE_GENERATE_EXCLUSIONS_STATUS_NONE:
1261 break;
1262 default:
1263 /* Error occured. */
1264 ERR("Failed to generate exclusions from event rule while allocating an event notifier rule");
1265 goto error_put_event_rule;
1266 }
1267
1268 DBG3("UST app event notifier rule allocated: token = %" PRIu64,
1269 ua_event_notifier_rule->token);
1270
1271 return ua_event_notifier_rule;
1272
1273 error_put_event_rule:
1274 lttng_event_rule_put(event_rule);
1275 error:
1276 free(ua_event_notifier_rule);
1277 return NULL;
1278 }
1279
1280 /*
1281 * Alloc new UST app context.
1282 */
1283 static
1284 struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context_attr *uctx)
1285 {
1286 struct ust_app_ctx *ua_ctx;
1287
1288 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
1289 if (ua_ctx == NULL) {
1290 goto error;
1291 }
1292
1293 CDS_INIT_LIST_HEAD(&ua_ctx->list);
1294
1295 if (uctx) {
1296 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
1297 if (uctx->ctx == LTTNG_UST_CONTEXT_APP_CONTEXT) {
1298 char *provider_name = NULL, *ctx_name = NULL;
1299
1300 provider_name = strdup(uctx->u.app_ctx.provider_name);
1301 ctx_name = strdup(uctx->u.app_ctx.ctx_name);
1302 if (!provider_name || !ctx_name) {
1303 free(provider_name);
1304 free(ctx_name);
1305 goto error;
1306 }
1307
1308 ua_ctx->ctx.u.app_ctx.provider_name = provider_name;
1309 ua_ctx->ctx.u.app_ctx.ctx_name = ctx_name;
1310 }
1311 }
1312
1313 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
1314 return ua_ctx;
1315 error:
1316 free(ua_ctx);
1317 return NULL;
1318 }
1319
1320 /*
1321 * Create a liblttng-ust filter bytecode from given bytecode.
1322 *
1323 * Return allocated filter or NULL on error.
1324 */
1325 static struct lttng_ust_filter_bytecode *create_ust_bytecode_from_bytecode(
1326 const struct lttng_filter_bytecode *orig_f)
1327 {
1328 struct lttng_ust_filter_bytecode *filter = NULL;
1329
1330 /* Copy filter bytecode */
1331 filter = zmalloc(sizeof(*filter) + orig_f->len);
1332 if (!filter) {
1333 PERROR("zmalloc alloc ust filter bytecode");
1334 goto error;
1335 }
1336
1337 assert(sizeof(struct lttng_filter_bytecode) ==
1338 sizeof(struct lttng_ust_filter_bytecode));
1339 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
1340 error:
1341 return filter;
1342 }
1343
1344 /*
1345 * Find an ust_app using the sock and return it. RCU read side lock must be
1346 * held before calling this helper function.
1347 */
1348 struct ust_app *ust_app_find_by_sock(int sock)
1349 {
1350 struct lttng_ht_node_ulong *node;
1351 struct lttng_ht_iter iter;
1352
1353 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
1354 node = lttng_ht_iter_get_node_ulong(&iter);
1355 if (node == NULL) {
1356 DBG2("UST app find by sock %d not found", sock);
1357 goto error;
1358 }
1359
1360 return caa_container_of(node, struct ust_app, sock_n);
1361
1362 error:
1363 return NULL;
1364 }
1365
1366 /*
1367 * Find an ust_app using the notify sock and return it. RCU read side lock must
1368 * be held before calling this helper function.
1369 */
1370 static struct ust_app *find_app_by_notify_sock(int sock)
1371 {
1372 struct lttng_ht_node_ulong *node;
1373 struct lttng_ht_iter iter;
1374
1375 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
1376 &iter);
1377 node = lttng_ht_iter_get_node_ulong(&iter);
1378 if (node == NULL) {
1379 DBG2("UST app find by notify sock %d not found", sock);
1380 goto error;
1381 }
1382
1383 return caa_container_of(node, struct ust_app, notify_sock_n);
1384
1385 error:
1386 return NULL;
1387 }
1388
1389 /*
1390 * Lookup for an ust app event based on event name, filter bytecode and the
1391 * event loglevel.
1392 *
1393 * Return an ust_app_event object or NULL on error.
1394 */
1395 static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
1396 const char *name, const struct lttng_filter_bytecode *filter,
1397 int loglevel_value,
1398 const struct lttng_event_exclusion *exclusion)
1399 {
1400 struct lttng_ht_iter iter;
1401 struct lttng_ht_node_str *node;
1402 struct ust_app_event *event = NULL;
1403 struct ust_app_ht_key key;
1404
1405 assert(name);
1406 assert(ht);
1407
1408 /* Setup key for event lookup. */
1409 key.name = name;
1410 key.filter = filter;
1411 key.loglevel_type = loglevel_value;
1412 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
1413 key.exclusion = exclusion;
1414
1415 /* Lookup using the event name as hash and a custom match fct. */
1416 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1417 ht_match_ust_app_event, &key, &iter.iter);
1418 node = lttng_ht_iter_get_node_str(&iter);
1419 if (node == NULL) {
1420 goto end;
1421 }
1422
1423 event = caa_container_of(node, struct ust_app_event, node);
1424
1425 end:
1426 return event;
1427 }
1428
1429 /*
1430 * Look-up an event notifier rule based on its token id.
1431 *
1432 * Must be called with the RCU read lock held.
1433 * Return an ust_app_event_notifier_rule object or NULL on error.
1434 */
1435 static struct ust_app_event_notifier_rule *find_ust_app_event_notifier_rule(
1436 struct lttng_ht *ht, uint64_t token)
1437 {
1438 struct lttng_ht_iter iter;
1439 struct lttng_ht_node_u64 *node;
1440 struct ust_app_event_notifier_rule *event_notifier_rule = NULL;
1441
1442 assert(ht);
1443
1444 lttng_ht_lookup(ht, &token, &iter);
1445 node = lttng_ht_iter_get_node_u64(&iter);
1446 if (node == NULL) {
1447 DBG2("UST app event notifier rule token not found: token = %" PRIu64,
1448 token);
1449 goto end;
1450 }
1451
1452 event_notifier_rule = caa_container_of(
1453 node, struct ust_app_event_notifier_rule, node);
1454 end:
1455 return event_notifier_rule;
1456 }
1457
1458 /*
1459 * Create the channel context on the tracer.
1460 *
1461 * Called with UST app session lock held.
1462 */
1463 static
1464 int create_ust_channel_context(struct ust_app_channel *ua_chan,
1465 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1466 {
1467 int ret;
1468
1469 health_code_update();
1470
1471 pthread_mutex_lock(&app->sock_lock);
1472 ret = ustctl_add_context(app->sock, &ua_ctx->ctx,
1473 ua_chan->obj, &ua_ctx->obj);
1474 pthread_mutex_unlock(&app->sock_lock);
1475 if (ret < 0) {
1476 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1477 ERR("UST app create channel context failed for app (pid: %d) "
1478 "with ret %d", app->pid, ret);
1479 } else {
1480 /*
1481 * This is normal behavior, an application can die during the
1482 * creation process. Don't report an error so the execution can
1483 * continue normally.
1484 */
1485 ret = 0;
1486 DBG3("UST app add context failed. Application is dead.");
1487 }
1488 goto error;
1489 }
1490
1491 ua_ctx->handle = ua_ctx->obj->handle;
1492
1493 DBG2("UST app context handle %d created successfully for channel %s",
1494 ua_ctx->handle, ua_chan->name);
1495
1496 error:
1497 health_code_update();
1498 return ret;
1499 }
1500
1501 /*
1502 * Set the filter on the tracer.
1503 */
1504 static int set_ust_object_filter(struct ust_app *app,
1505 const struct lttng_filter_bytecode *bytecode,
1506 struct lttng_ust_object_data *ust_object)
1507 {
1508 int ret;
1509 struct lttng_ust_filter_bytecode *ust_bytecode = NULL;
1510
1511 health_code_update();
1512
1513 ust_bytecode = create_ust_bytecode_from_bytecode(bytecode);
1514 if (!ust_bytecode) {
1515 ret = -LTTNG_ERR_NOMEM;
1516 goto error;
1517 }
1518 pthread_mutex_lock(&app->sock_lock);
1519 ret = ustctl_set_filter(app->sock, ust_bytecode,
1520 ust_object);
1521 pthread_mutex_unlock(&app->sock_lock);
1522 if (ret < 0) {
1523 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1524 ERR("UST app set object filter failed for object %p of app (pid: %d) "
1525 "with ret %d", ust_object, app->pid, ret);
1526 } else {
1527 /*
1528 * This is normal behavior, an application can die during the
1529 * creation process. Don't report an error so the execution can
1530 * continue normally.
1531 */
1532 ret = 0;
1533 DBG3("Failed to set UST app object filter. Application is dead.");
1534 }
1535 goto error;
1536 }
1537
1538 DBG2("UST filter successfully set for object %p", ust_object);
1539
1540 error:
1541 health_code_update();
1542 free(ust_bytecode);
1543 return ret;
1544 }
1545
1546 static
1547 struct lttng_ust_event_exclusion *create_ust_exclusion_from_exclusion(
1548 const struct lttng_event_exclusion *exclusion)
1549 {
1550 struct lttng_ust_event_exclusion *ust_exclusion = NULL;
1551 size_t exclusion_alloc_size = sizeof(struct lttng_ust_event_exclusion) +
1552 LTTNG_UST_SYM_NAME_LEN * exclusion->count;
1553
1554 ust_exclusion = zmalloc(exclusion_alloc_size);
1555 if (!ust_exclusion) {
1556 PERROR("malloc");
1557 goto end;
1558 }
1559
1560 assert(sizeof(struct lttng_event_exclusion) ==
1561 sizeof(struct lttng_ust_event_exclusion));
1562 memcpy(ust_exclusion, exclusion, exclusion_alloc_size);
1563 end:
1564 return ust_exclusion;
1565 }
1566
1567 /*
1568 * Set event exclusions on the tracer.
1569 */
1570 static int set_ust_object_exclusions(struct ust_app *app,
1571 const struct lttng_event_exclusion *exclusions,
1572 struct lttng_ust_object_data *ust_object)
1573 {
1574 int ret;
1575 struct lttng_ust_event_exclusion *ust_exclusions = NULL;
1576
1577 assert(exclusions && exclusions->count > 0);
1578
1579 health_code_update();
1580
1581 ust_exclusions = create_ust_exclusion_from_exclusion(
1582 exclusions);
1583 if (!ust_exclusions) {
1584 ret = -LTTNG_ERR_NOMEM;
1585 goto error;
1586 }
1587 pthread_mutex_lock(&app->sock_lock);
1588 ret = ustctl_set_exclusion(app->sock, ust_exclusions, ust_object);
1589 pthread_mutex_unlock(&app->sock_lock);
1590 if (ret < 0) {
1591 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1592 ERR("Failed to set UST app exclusions for object %p of app (pid: %d) "
1593 "with ret %d", ust_object, app->pid, ret);
1594 } else {
1595 /*
1596 * This is normal behavior, an application can die during the
1597 * creation process. Don't report an error so the execution can
1598 * continue normally.
1599 */
1600 ret = 0;
1601 DBG3("Failed to set UST app object exclusions. Application is dead.");
1602 }
1603 goto error;
1604 }
1605
1606 DBG2("UST exclusions set successfully for object %p", ust_object);
1607
1608 error:
1609 health_code_update();
1610 free(ust_exclusions);
1611 return ret;
1612 }
1613
1614 /*
1615 * Disable the specified event on to UST tracer for the UST session.
1616 */
1617 static int disable_ust_object(struct ust_app *app,
1618 struct lttng_ust_object_data *object)
1619 {
1620 int ret;
1621
1622 health_code_update();
1623
1624 pthread_mutex_lock(&app->sock_lock);
1625 ret = ustctl_disable(app->sock, object);
1626 pthread_mutex_unlock(&app->sock_lock);
1627 if (ret < 0) {
1628 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1629 ERR("Failed to disable UST app object %p app (pid: %d) with ret %d",
1630 object, app->pid, ret);
1631 } else {
1632 /*
1633 * This is normal behavior, an application can die during the
1634 * creation process. Don't report an error so the execution can
1635 * continue normally.
1636 */
1637 ret = 0;
1638 DBG3("Failed to disable UST app object. Application is dead.");
1639 }
1640 goto error;
1641 }
1642
1643 DBG2("UST app object %p disabled successfully for app (pid: %d)",
1644 object, app->pid);
1645
1646 error:
1647 health_code_update();
1648 return ret;
1649 }
1650
1651 /*
1652 * Disable the specified channel on to UST tracer for the UST session.
1653 */
1654 static int disable_ust_channel(struct ust_app *app,
1655 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1656 {
1657 int ret;
1658
1659 health_code_update();
1660
1661 pthread_mutex_lock(&app->sock_lock);
1662 ret = ustctl_disable(app->sock, ua_chan->obj);
1663 pthread_mutex_unlock(&app->sock_lock);
1664 if (ret < 0) {
1665 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1666 ERR("UST app channel %s disable failed for app (pid: %d) "
1667 "and session handle %d with ret %d",
1668 ua_chan->name, app->pid, ua_sess->handle, ret);
1669 } else {
1670 /*
1671 * This is normal behavior, an application can die during the
1672 * creation process. Don't report an error so the execution can
1673 * continue normally.
1674 */
1675 ret = 0;
1676 DBG3("UST app disable channel failed. Application is dead.");
1677 }
1678 goto error;
1679 }
1680
1681 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
1682 ua_chan->name, app->pid);
1683
1684 error:
1685 health_code_update();
1686 return ret;
1687 }
1688
1689 /*
1690 * Enable the specified channel on to UST tracer for the UST session.
1691 */
1692 static int enable_ust_channel(struct ust_app *app,
1693 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1694 {
1695 int ret;
1696
1697 health_code_update();
1698
1699 pthread_mutex_lock(&app->sock_lock);
1700 ret = ustctl_enable(app->sock, ua_chan->obj);
1701 pthread_mutex_unlock(&app->sock_lock);
1702 if (ret < 0) {
1703 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1704 ERR("UST app channel %s enable failed for app (pid: %d) "
1705 "and session handle %d with ret %d",
1706 ua_chan->name, app->pid, ua_sess->handle, ret);
1707 } else {
1708 /*
1709 * This is normal behavior, an application can die during the
1710 * creation process. Don't report an error so the execution can
1711 * continue normally.
1712 */
1713 ret = 0;
1714 DBG3("UST app enable channel failed. Application is dead.");
1715 }
1716 goto error;
1717 }
1718
1719 ua_chan->enabled = 1;
1720
1721 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
1722 ua_chan->name, app->pid);
1723
1724 error:
1725 health_code_update();
1726 return ret;
1727 }
1728
1729 /*
1730 * Enable the specified event on to UST tracer for the UST session.
1731 */
1732 static int enable_ust_object(
1733 struct ust_app *app, struct lttng_ust_object_data *ust_object)
1734 {
1735 int ret;
1736
1737 health_code_update();
1738
1739 pthread_mutex_lock(&app->sock_lock);
1740 ret = ustctl_enable(app->sock, ust_object);
1741 pthread_mutex_unlock(&app->sock_lock);
1742 if (ret < 0) {
1743 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1744 ERR("UST app enable failed for object %p app (pid: %d) with ret %d",
1745 ust_object, app->pid, ret);
1746 } else {
1747 /*
1748 * This is normal behavior, an application can die during the
1749 * creation process. Don't report an error so the execution can
1750 * continue normally.
1751 */
1752 ret = 0;
1753 DBG3("Failed to enable UST app object. Application is dead.");
1754 }
1755 goto error;
1756 }
1757
1758 DBG2("UST app object %p enabled successfully for app (pid: %d)",
1759 ust_object, app->pid);
1760
1761 error:
1762 health_code_update();
1763 return ret;
1764 }
1765
1766 /*
1767 * Send channel and stream buffer to application.
1768 *
1769 * Return 0 on success. On error, a negative value is returned.
1770 */
1771 static int send_channel_pid_to_ust(struct ust_app *app,
1772 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1773 {
1774 int ret;
1775 struct ust_app_stream *stream, *stmp;
1776
1777 assert(app);
1778 assert(ua_sess);
1779 assert(ua_chan);
1780
1781 health_code_update();
1782
1783 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1784 app->sock);
1785
1786 /* Send channel to the application. */
1787 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
1788 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1789 ret = -ENOTCONN; /* Caused by app exiting. */
1790 goto error;
1791 } else if (ret < 0) {
1792 goto error;
1793 }
1794
1795 health_code_update();
1796
1797 /* Send all streams to application. */
1798 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1799 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
1800 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1801 ret = -ENOTCONN; /* Caused by app exiting. */
1802 goto error;
1803 } else if (ret < 0) {
1804 goto error;
1805 }
1806 /* We don't need the stream anymore once sent to the tracer. */
1807 cds_list_del(&stream->list);
1808 delete_ust_app_stream(-1, stream, app);
1809 }
1810 /* Flag the channel that it is sent to the application. */
1811 ua_chan->is_sent = 1;
1812
1813 error:
1814 health_code_update();
1815 return ret;
1816 }
1817
1818 /*
1819 * Create the specified event onto the UST tracer for a UST session.
1820 *
1821 * Should be called with session mutex held.
1822 */
1823 static
1824 int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1825 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
1826 {
1827 int ret = 0;
1828
1829 health_code_update();
1830
1831 /* Create UST event on tracer */
1832 pthread_mutex_lock(&app->sock_lock);
1833 ret = ustctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
1834 &ua_event->obj);
1835 pthread_mutex_unlock(&app->sock_lock);
1836 if (ret < 0) {
1837 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1838 abort();
1839 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1840 ua_event->attr.name, app->pid, ret);
1841 } else {
1842 /*
1843 * This is normal behavior, an application can die during the
1844 * creation process. Don't report an error so the execution can
1845 * continue normally.
1846 */
1847 ret = 0;
1848 DBG3("UST app create event failed. Application is dead.");
1849 }
1850 goto error;
1851 }
1852
1853 ua_event->handle = ua_event->obj->handle;
1854
1855 DBG2("UST app event %s created successfully for pid:%d object: %p",
1856 ua_event->attr.name, app->pid, ua_event->obj);
1857
1858 health_code_update();
1859
1860 /* Set filter if one is present. */
1861 if (ua_event->filter) {
1862 ret = set_ust_object_filter(app, ua_event->filter, ua_event->obj);
1863 if (ret < 0) {
1864 goto error;
1865 }
1866 }
1867
1868 /* Set exclusions for the event */
1869 if (ua_event->exclusion) {
1870 ret = set_ust_object_exclusions(app, ua_event->exclusion, ua_event->obj);
1871 if (ret < 0) {
1872 goto error;
1873 }
1874 }
1875
1876 /* If event not enabled, disable it on the tracer */
1877 if (ua_event->enabled) {
1878 /*
1879 * We now need to explicitly enable the event, since it
1880 * is now disabled at creation.
1881 */
1882 ret = enable_ust_object(app, ua_event->obj);
1883 if (ret < 0) {
1884 /*
1885 * If we hit an EPERM, something is wrong with our enable call. If
1886 * we get an EEXIST, there is a problem on the tracer side since we
1887 * just created it.
1888 */
1889 switch (ret) {
1890 case -LTTNG_UST_ERR_PERM:
1891 /* Code flow problem */
1892 assert(0);
1893 case -LTTNG_UST_ERR_EXIST:
1894 /* It's OK for our use case. */
1895 ret = 0;
1896 break;
1897 default:
1898 break;
1899 }
1900 goto error;
1901 }
1902 }
1903
1904 error:
1905 health_code_update();
1906 return ret;
1907 }
1908
1909 static int init_ust_event_notifier_from_event_rule(
1910 const struct lttng_event_rule *rule,
1911 struct lttng_ust_event_notifier *event_notifier)
1912 {
1913 enum lttng_event_rule_status status;
1914 enum lttng_loglevel_type loglevel_type;
1915 enum lttng_ust_loglevel_type ust_loglevel_type = LTTNG_UST_LOGLEVEL_ALL;
1916 int loglevel = -1, ret = 0;
1917 const char *pattern;
1918
1919 /* For now only LTTNG_EVENT_RULE_TYPE_TRACEPOINT are supported. */
1920 assert(lttng_event_rule_get_type(rule) ==
1921 LTTNG_EVENT_RULE_TYPE_TRACEPOINT);
1922
1923 memset(event_notifier, 0, sizeof(*event_notifier));
1924
1925 if (lttng_event_rule_targets_agent_domain(rule)) {
1926 /*
1927 * Special event for agents
1928 * The actual meat of the event is in the filter that will be
1929 * attached later on.
1930 * Set the default values for the agent event.
1931 */
1932 pattern = event_get_default_agent_ust_name(
1933 lttng_event_rule_get_domain_type(rule));
1934 loglevel = 0;
1935 ust_loglevel_type = LTTNG_UST_LOGLEVEL_ALL;
1936 } else {
1937 status = lttng_event_rule_tracepoint_get_pattern(
1938 rule, &pattern);
1939 if (status != LTTNG_EVENT_RULE_STATUS_OK) {
1940 /* At this point, this is a fatal error. */
1941 abort();
1942 }
1943
1944 status = lttng_event_rule_tracepoint_get_log_level_type(
1945 rule, &loglevel_type);
1946 if (status != LTTNG_EVENT_RULE_STATUS_OK) {
1947 /* At this point, this is a fatal error. */
1948 abort();
1949 }
1950
1951 switch (loglevel_type) {
1952 case LTTNG_EVENT_LOGLEVEL_ALL:
1953 ust_loglevel_type = LTTNG_UST_LOGLEVEL_ALL;
1954 break;
1955 case LTTNG_EVENT_LOGLEVEL_RANGE:
1956 ust_loglevel_type = LTTNG_UST_LOGLEVEL_RANGE;
1957 break;
1958 case LTTNG_EVENT_LOGLEVEL_SINGLE:
1959 ust_loglevel_type = LTTNG_UST_LOGLEVEL_SINGLE;
1960 break;
1961 default:
1962 /* Unknown log level specification type. */
1963 abort();
1964 }
1965
1966 if (loglevel_type != LTTNG_EVENT_LOGLEVEL_ALL) {
1967 status = lttng_event_rule_tracepoint_get_log_level(
1968 rule, &loglevel);
1969 assert(status == LTTNG_EVENT_RULE_STATUS_OK);
1970 }
1971 }
1972
1973 event_notifier->event.instrumentation = LTTNG_UST_TRACEPOINT;
1974 ret = lttng_strncpy(event_notifier->event.name, pattern,
1975 LTTNG_UST_SYM_NAME_LEN - 1);
1976 if (ret) {
1977 ERR("Failed to copy event rule pattern to notifier: pattern = '%s' ",
1978 pattern);
1979 goto end;
1980 }
1981
1982 event_notifier->event.loglevel_type = ust_loglevel_type;
1983 event_notifier->event.loglevel = loglevel;
1984 end:
1985 return ret;
1986 }
1987
1988 /*
1989 * Create the specified event notifier against the user space tracer of a
1990 * given application.
1991 */
1992 static int create_ust_event_notifier(struct ust_app *app,
1993 struct ust_app_event_notifier_rule *ua_event_notifier_rule)
1994 {
1995 int ret = 0;
1996 struct lttng_ust_event_notifier event_notifier;
1997
1998 health_code_update();
1999 assert(app->event_notifier_group.object);
2000
2001 ret = init_ust_event_notifier_from_event_rule(
2002 ua_event_notifier_rule->event_rule, &event_notifier);
2003 if (ret) {
2004 ERR("Failed to initialize UST event notifier from event rule: app = '%s' (ppid: %d)",
2005 app->name, app->ppid);
2006 goto error;
2007 }
2008
2009 event_notifier.event.token = ua_event_notifier_rule->token;
2010
2011 /* Create UST event notifier against the tracer. */
2012 pthread_mutex_lock(&app->sock_lock);
2013 ret = ustctl_create_event_notifier(app->sock, &event_notifier,
2014 app->event_notifier_group.object,
2015 &ua_event_notifier_rule->obj);
2016 pthread_mutex_unlock(&app->sock_lock);
2017 if (ret < 0) {
2018 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
2019 ERR("Error ustctl create event notifier: name = '%s', app = '%s' (ppid: %d), ret = %d",
2020 event_notifier.event.name, app->name,
2021 app->ppid, ret);
2022 } else {
2023 /*
2024 * This is normal behavior, an application can die
2025 * during the creation process. Don't report an error so
2026 * the execution can continue normally.
2027 */
2028 ret = 0;
2029 DBG3("UST app create event notifier failed (application is dead): app = '%s' (ppid = %d)",
2030 app->name, app->ppid);
2031 }
2032
2033 goto error;
2034 }
2035
2036 ua_event_notifier_rule->handle = ua_event_notifier_rule->obj->handle;
2037
2038 DBG2("UST app event notifier %s created successfully: app = '%s' (ppid: %d), object: %p",
2039 event_notifier.event.name, app->name, app->ppid,
2040 ua_event_notifier_rule->obj);
2041
2042 health_code_update();
2043
2044 /* Set filter if one is present. */
2045 if (ua_event_notifier_rule->filter) {
2046 ret = set_ust_object_filter(app, ua_event_notifier_rule->filter,
2047 ua_event_notifier_rule->obj);
2048 if (ret < 0) {
2049 goto error;
2050 }
2051 }
2052
2053 /* Set exclusions for the event. */
2054 if (ua_event_notifier_rule->exclusion) {
2055 ret = set_ust_object_exclusions(app,
2056 ua_event_notifier_rule->exclusion,
2057 ua_event_notifier_rule->obj);
2058 if (ret < 0) {
2059 goto error;
2060 }
2061 }
2062
2063 /*
2064 * We now need to explicitly enable the event, since it
2065 * is disabled at creation.
2066 */
2067 ret = enable_ust_object(app, ua_event_notifier_rule->obj);
2068 if (ret < 0) {
2069 /*
2070 * If we hit an EPERM, something is wrong with our enable call.
2071 * If we get an EEXIST, there is a problem on the tracer side
2072 * since we just created it.
2073 */
2074 switch (ret) {
2075 case -LTTNG_UST_ERR_PERM:
2076 /* Code flow problem. */
2077 abort();
2078 case -LTTNG_UST_ERR_EXIST:
2079 /* It's OK for our use case. */
2080 ret = 0;
2081 break;
2082 default:
2083 break;
2084 }
2085
2086 goto error;
2087 }
2088
2089 ua_event_notifier_rule->enabled = true;
2090
2091 error:
2092 health_code_update();
2093 return ret;
2094 }
2095
2096 /*
2097 * Copy data between an UST app event and a LTT event.
2098 */
2099 static void shadow_copy_event(struct ust_app_event *ua_event,
2100 struct ltt_ust_event *uevent)
2101 {
2102 size_t exclusion_alloc_size;
2103
2104 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
2105 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
2106
2107 ua_event->enabled = uevent->enabled;
2108
2109 /* Copy event attributes */
2110 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
2111
2112 /* Copy filter bytecode */
2113 if (uevent->filter) {
2114 ua_event->filter = lttng_filter_bytecode_copy(uevent->filter);
2115 /* Filter might be NULL here in case of ENONEM. */
2116 }
2117
2118 /* Copy exclusion data */
2119 if (uevent->exclusion) {
2120 exclusion_alloc_size = sizeof(struct lttng_event_exclusion) +
2121 LTTNG_UST_SYM_NAME_LEN * uevent->exclusion->count;
2122 ua_event->exclusion = zmalloc(exclusion_alloc_size);
2123 if (ua_event->exclusion == NULL) {
2124 PERROR("malloc");
2125 } else {
2126 memcpy(ua_event->exclusion, uevent->exclusion,
2127 exclusion_alloc_size);
2128 }
2129 }
2130 }
2131
2132 /*
2133 * Copy data between an UST app channel and a LTT channel.
2134 */
2135 static void shadow_copy_channel(struct ust_app_channel *ua_chan,
2136 struct ltt_ust_channel *uchan)
2137 {
2138 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
2139
2140 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
2141 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
2142
2143 ua_chan->tracefile_size = uchan->tracefile_size;
2144 ua_chan->tracefile_count = uchan->tracefile_count;
2145
2146 /* Copy event attributes since the layout is different. */
2147 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
2148 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
2149 ua_chan->attr.overwrite = uchan->attr.overwrite;
2150 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
2151 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
2152 ua_chan->monitor_timer_interval = uchan->monitor_timer_interval;
2153 ua_chan->attr.output = uchan->attr.output;
2154 ua_chan->attr.blocking_timeout = uchan->attr.u.s.blocking_timeout;
2155
2156 /*
2157 * Note that the attribute channel type is not set since the channel on the
2158 * tracing registry side does not have this information.
2159 */
2160
2161 ua_chan->enabled = uchan->enabled;
2162 ua_chan->tracing_channel_id = uchan->id;
2163
2164 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
2165 }
2166
2167 /*
2168 * Copy data between a UST app session and a regular LTT session.
2169 */
2170 static void shadow_copy_session(struct ust_app_session *ua_sess,
2171 struct ltt_ust_session *usess, struct ust_app *app)
2172 {
2173 struct tm *timeinfo;
2174 char datetime[16];
2175 int ret;
2176 char tmp_shm_path[PATH_MAX];
2177
2178 timeinfo = localtime(&app->registration_time);
2179 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
2180
2181 DBG2("Shadow copy of session handle %d", ua_sess->handle);
2182
2183 ua_sess->tracing_id = usess->id;
2184 ua_sess->id = get_next_session_id();
2185 LTTNG_OPTIONAL_SET(&ua_sess->real_credentials.uid, app->uid);
2186 LTTNG_OPTIONAL_SET(&ua_sess->real_credentials.gid, app->gid);
2187 LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.uid, usess->uid);
2188 LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.gid, usess->gid);
2189 ua_sess->buffer_type = usess->buffer_type;
2190 ua_sess->bits_per_long = app->bits_per_long;
2191
2192 /* There is only one consumer object per session possible. */
2193 consumer_output_get(usess->consumer);
2194 ua_sess->consumer = usess->consumer;
2195
2196 ua_sess->output_traces = usess->output_traces;
2197 ua_sess->live_timer_interval = usess->live_timer_interval;
2198 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
2199 &usess->metadata_attr);
2200
2201 switch (ua_sess->buffer_type) {
2202 case LTTNG_BUFFER_PER_PID:
2203 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
2204 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
2205 datetime);
2206 break;
2207 case LTTNG_BUFFER_PER_UID:
2208 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
2209 DEFAULT_UST_TRACE_UID_PATH,
2210 lttng_credentials_get_uid(&ua_sess->real_credentials),
2211 app->bits_per_long);
2212 break;
2213 default:
2214 assert(0);
2215 goto error;
2216 }
2217 if (ret < 0) {
2218 PERROR("asprintf UST shadow copy session");
2219 assert(0);
2220 goto error;
2221 }
2222
2223 strncpy(ua_sess->root_shm_path, usess->root_shm_path,
2224 sizeof(ua_sess->root_shm_path));
2225 ua_sess->root_shm_path[sizeof(ua_sess->root_shm_path) - 1] = '\0';
2226 strncpy(ua_sess->shm_path, usess->shm_path,
2227 sizeof(ua_sess->shm_path));
2228 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
2229 if (ua_sess->shm_path[0]) {
2230 switch (ua_sess->buffer_type) {
2231 case LTTNG_BUFFER_PER_PID:
2232 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
2233 "/" DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s",
2234 app->name, app->pid, datetime);
2235 break;
2236 case LTTNG_BUFFER_PER_UID:
2237 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
2238 "/" DEFAULT_UST_TRACE_UID_PATH,
2239 app->uid, app->bits_per_long);
2240 break;
2241 default:
2242 assert(0);
2243 goto error;
2244 }
2245 if (ret < 0) {
2246 PERROR("sprintf UST shadow copy session");
2247 assert(0);
2248 goto error;
2249 }
2250 strncat(ua_sess->shm_path, tmp_shm_path,
2251 sizeof(ua_sess->shm_path) - strlen(ua_sess->shm_path) - 1);
2252 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
2253 }
2254 return;
2255
2256 error:
2257 consumer_output_put(ua_sess->consumer);
2258 }
2259
2260 /*
2261 * Lookup sesison wrapper.
2262 */
2263 static
2264 void __lookup_session_by_app(const struct ltt_ust_session *usess,
2265 struct ust_app *app, struct lttng_ht_iter *iter)
2266 {
2267 /* Get right UST app session from app */
2268 lttng_ht_lookup(app->sessions, &usess->id, iter);
2269 }
2270
2271 /*
2272 * Return ust app session from the app session hashtable using the UST session
2273 * id.
2274 */
2275 static struct ust_app_session *lookup_session_by_app(
2276 const struct ltt_ust_session *usess, struct ust_app *app)
2277 {
2278 struct lttng_ht_iter iter;
2279 struct lttng_ht_node_u64 *node;
2280
2281 __lookup_session_by_app(usess, app, &iter);
2282 node = lttng_ht_iter_get_node_u64(&iter);
2283 if (node == NULL) {
2284 goto error;
2285 }
2286
2287 return caa_container_of(node, struct ust_app_session, node);
2288
2289 error:
2290 return NULL;
2291 }
2292
2293 /*
2294 * Setup buffer registry per PID for the given session and application. If none
2295 * is found, a new one is created, added to the global registry and
2296 * initialized. If regp is valid, it's set with the newly created object.
2297 *
2298 * Return 0 on success or else a negative value.
2299 */
2300 static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
2301 struct ust_app *app, struct buffer_reg_pid **regp)
2302 {
2303 int ret = 0;
2304 struct buffer_reg_pid *reg_pid;
2305
2306 assert(ua_sess);
2307 assert(app);
2308
2309 rcu_read_lock();
2310
2311 reg_pid = buffer_reg_pid_find(ua_sess->id);
2312 if (!reg_pid) {
2313 /*
2314 * This is the create channel path meaning that if there is NO
2315 * registry available, we have to create one for this session.
2316 */
2317 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid,
2318 ua_sess->root_shm_path, ua_sess->shm_path);
2319 if (ret < 0) {
2320 goto error;
2321 }
2322 } else {
2323 goto end;
2324 }
2325
2326 /* Initialize registry. */
2327 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
2328 app->bits_per_long, app->uint8_t_alignment,
2329 app->uint16_t_alignment, app->uint32_t_alignment,
2330 app->uint64_t_alignment, app->long_alignment,
2331 app->byte_order, app->version.major, app->version.minor,
2332 reg_pid->root_shm_path, reg_pid->shm_path,
2333 lttng_credentials_get_uid(&ua_sess->effective_credentials),
2334 lttng_credentials_get_gid(&ua_sess->effective_credentials),
2335 ua_sess->tracing_id,
2336 app->uid);
2337 if (ret < 0) {
2338 /*
2339 * reg_pid->registry->reg.ust is NULL upon error, so we need to
2340 * destroy the buffer registry, because it is always expected
2341 * that if the buffer registry can be found, its ust registry is
2342 * non-NULL.
2343 */
2344 buffer_reg_pid_destroy(reg_pid);
2345 goto error;
2346 }
2347
2348 buffer_reg_pid_add(reg_pid);
2349
2350 DBG3("UST app buffer registry per PID created successfully");
2351
2352 end:
2353 if (regp) {
2354 *regp = reg_pid;
2355 }
2356 error:
2357 rcu_read_unlock();
2358 return ret;
2359 }
2360
2361 /*
2362 * Setup buffer registry per UID for the given session and application. If none
2363 * is found, a new one is created, added to the global registry and
2364 * initialized. If regp is valid, it's set with the newly created object.
2365 *
2366 * Return 0 on success or else a negative value.
2367 */
2368 static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
2369 struct ust_app_session *ua_sess,
2370 struct ust_app *app, struct buffer_reg_uid **regp)
2371 {
2372 int ret = 0;
2373 struct buffer_reg_uid *reg_uid;
2374
2375 assert(usess);
2376 assert(app);
2377
2378 rcu_read_lock();
2379
2380 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2381 if (!reg_uid) {
2382 /*
2383 * This is the create channel path meaning that if there is NO
2384 * registry available, we have to create one for this session.
2385 */
2386 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
2387 LTTNG_DOMAIN_UST, &reg_uid,
2388 ua_sess->root_shm_path, ua_sess->shm_path);
2389 if (ret < 0) {
2390 goto error;
2391 }
2392 } else {
2393 goto end;
2394 }
2395
2396 /* Initialize registry. */
2397 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
2398 app->bits_per_long, app->uint8_t_alignment,
2399 app->uint16_t_alignment, app->uint32_t_alignment,
2400 app->uint64_t_alignment, app->long_alignment,
2401 app->byte_order, app->version.major,
2402 app->version.minor, reg_uid->root_shm_path,
2403 reg_uid->shm_path, usess->uid, usess->gid,
2404 ua_sess->tracing_id, app->uid);
2405 if (ret < 0) {
2406 /*
2407 * reg_uid->registry->reg.ust is NULL upon error, so we need to
2408 * destroy the buffer registry, because it is always expected
2409 * that if the buffer registry can be found, its ust registry is
2410 * non-NULL.
2411 */
2412 buffer_reg_uid_destroy(reg_uid, NULL);
2413 goto error;
2414 }
2415 /* Add node to teardown list of the session. */
2416 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
2417
2418 buffer_reg_uid_add(reg_uid);
2419
2420 DBG3("UST app buffer registry per UID created successfully");
2421 end:
2422 if (regp) {
2423 *regp = reg_uid;
2424 }
2425 error:
2426 rcu_read_unlock();
2427 return ret;
2428 }
2429
2430 /*
2431 * Create a session on the tracer side for the given app.
2432 *
2433 * On success, ua_sess_ptr is populated with the session pointer or else left
2434 * untouched. If the session was created, is_created is set to 1. On error,
2435 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
2436 * be NULL.
2437 *
2438 * Returns 0 on success or else a negative code which is either -ENOMEM or
2439 * -ENOTCONN which is the default code if the ustctl_create_session fails.
2440 */
2441 static int find_or_create_ust_app_session(struct ltt_ust_session *usess,
2442 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
2443 int *is_created)
2444 {
2445 int ret, created = 0;
2446 struct ust_app_session *ua_sess;
2447
2448 assert(usess);
2449 assert(app);
2450 assert(ua_sess_ptr);
2451
2452 health_code_update();
2453
2454 ua_sess = lookup_session_by_app(usess, app);
2455 if (ua_sess == NULL) {
2456 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
2457 app->pid, usess->id);
2458 ua_sess = alloc_ust_app_session();
2459 if (ua_sess == NULL) {
2460 /* Only malloc can failed so something is really wrong */
2461 ret = -ENOMEM;
2462 goto error;
2463 }
2464 shadow_copy_session(ua_sess, usess, app);
2465 created = 1;
2466 }
2467
2468 switch (usess->buffer_type) {
2469 case LTTNG_BUFFER_PER_PID:
2470 /* Init local registry. */
2471 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
2472 if (ret < 0) {
2473 delete_ust_app_session(-1, ua_sess, app);
2474 goto error;
2475 }
2476 break;
2477 case LTTNG_BUFFER_PER_UID:
2478 /* Look for a global registry. If none exists, create one. */
2479 ret = setup_buffer_reg_uid(usess, ua_sess, app, NULL);
2480 if (ret < 0) {
2481 delete_ust_app_session(-1, ua_sess, app);
2482 goto error;
2483 }
2484 break;
2485 default:
2486 assert(0);
2487 ret = -EINVAL;
2488 goto error;
2489 }
2490
2491 health_code_update();
2492
2493 if (ua_sess->handle == -1) {
2494 pthread_mutex_lock(&app->sock_lock);
2495 ret = ustctl_create_session(app->sock);
2496 pthread_mutex_unlock(&app->sock_lock);
2497 if (ret < 0) {
2498 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
2499 ERR("Creating session for app pid %d with ret %d",
2500 app->pid, ret);
2501 } else {
2502 DBG("UST app creating session failed. Application is dead");
2503 /*
2504 * This is normal behavior, an application can die during the
2505 * creation process. Don't report an error so the execution can
2506 * continue normally. This will get flagged ENOTCONN and the
2507 * caller will handle it.
2508 */
2509 ret = 0;
2510 }
2511 delete_ust_app_session(-1, ua_sess, app);
2512 if (ret != -ENOMEM) {
2513 /*
2514 * Tracer is probably gone or got an internal error so let's
2515 * behave like it will soon unregister or not usable.
2516 */
2517 ret = -ENOTCONN;
2518 }
2519 goto error;
2520 }
2521
2522 ua_sess->handle = ret;
2523
2524 /* Add ust app session to app's HT */
2525 lttng_ht_node_init_u64(&ua_sess->node,
2526 ua_sess->tracing_id);
2527 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
2528 lttng_ht_node_init_ulong(&ua_sess->ust_objd_node, ua_sess->handle);
2529 lttng_ht_add_unique_ulong(app->ust_sessions_objd,
2530 &ua_sess->ust_objd_node);
2531
2532 DBG2("UST app session created successfully with handle %d", ret);
2533 }
2534
2535 *ua_sess_ptr = ua_sess;
2536 if (is_created) {
2537 *is_created = created;
2538 }
2539
2540 /* Everything went well. */
2541 ret = 0;
2542
2543 error:
2544 health_code_update();
2545 return ret;
2546 }
2547
2548 /*
2549 * Match function for a hash table lookup of ust_app_ctx.
2550 *
2551 * It matches an ust app context based on the context type and, in the case
2552 * of perf counters, their name.
2553 */
2554 static int ht_match_ust_app_ctx(struct cds_lfht_node *node, const void *_key)
2555 {
2556 struct ust_app_ctx *ctx;
2557 const struct lttng_ust_context_attr *key;
2558
2559 assert(node);
2560 assert(_key);
2561
2562 ctx = caa_container_of(node, struct ust_app_ctx, node.node);
2563 key = _key;
2564
2565 /* Context type */
2566 if (ctx->ctx.ctx != key->ctx) {
2567 goto no_match;
2568 }
2569
2570 switch(key->ctx) {
2571 case LTTNG_UST_CONTEXT_PERF_THREAD_COUNTER:
2572 if (strncmp(key->u.perf_counter.name,
2573 ctx->ctx.u.perf_counter.name,
2574 sizeof(key->u.perf_counter.name))) {
2575 goto no_match;
2576 }
2577 break;
2578 case LTTNG_UST_CONTEXT_APP_CONTEXT:
2579 if (strcmp(key->u.app_ctx.provider_name,
2580 ctx->ctx.u.app_ctx.provider_name) ||
2581 strcmp(key->u.app_ctx.ctx_name,
2582 ctx->ctx.u.app_ctx.ctx_name)) {
2583 goto no_match;
2584 }
2585 break;
2586 default:
2587 break;
2588 }
2589
2590 /* Match. */
2591 return 1;
2592
2593 no_match:
2594 return 0;
2595 }
2596
2597 /*
2598 * Lookup for an ust app context from an lttng_ust_context.
2599 *
2600 * Must be called while holding RCU read side lock.
2601 * Return an ust_app_ctx object or NULL on error.
2602 */
2603 static
2604 struct ust_app_ctx *find_ust_app_context(struct lttng_ht *ht,
2605 struct lttng_ust_context_attr *uctx)
2606 {
2607 struct lttng_ht_iter iter;
2608 struct lttng_ht_node_ulong *node;
2609 struct ust_app_ctx *app_ctx = NULL;
2610
2611 assert(uctx);
2612 assert(ht);
2613
2614 /* Lookup using the lttng_ust_context_type and a custom match fct. */
2615 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) uctx->ctx, lttng_ht_seed),
2616 ht_match_ust_app_ctx, uctx, &iter.iter);
2617 node = lttng_ht_iter_get_node_ulong(&iter);
2618 if (!node) {
2619 goto end;
2620 }
2621
2622 app_ctx = caa_container_of(node, struct ust_app_ctx, node);
2623
2624 end:
2625 return app_ctx;
2626 }
2627
2628 /*
2629 * Create a context for the channel on the tracer.
2630 *
2631 * Called with UST app session lock held and a RCU read side lock.
2632 */
2633 static
2634 int create_ust_app_channel_context(struct ust_app_channel *ua_chan,
2635 struct lttng_ust_context_attr *uctx,
2636 struct ust_app *app)
2637 {
2638 int ret = 0;
2639 struct ust_app_ctx *ua_ctx;
2640
2641 DBG2("UST app adding context to channel %s", ua_chan->name);
2642
2643 ua_ctx = find_ust_app_context(ua_chan->ctx, uctx);
2644 if (ua_ctx) {
2645 ret = -EEXIST;
2646 goto error;
2647 }
2648
2649 ua_ctx = alloc_ust_app_ctx(uctx);
2650 if (ua_ctx == NULL) {
2651 /* malloc failed */
2652 ret = -ENOMEM;
2653 goto error;
2654 }
2655
2656 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
2657 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
2658 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
2659
2660 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
2661 if (ret < 0) {
2662 goto error;
2663 }
2664
2665 error:
2666 return ret;
2667 }
2668
2669 /*
2670 * Enable on the tracer side a ust app event for the session and channel.
2671 *
2672 * Called with UST app session lock held.
2673 */
2674 static
2675 int enable_ust_app_event(struct ust_app_session *ua_sess,
2676 struct ust_app_event *ua_event, struct ust_app *app)
2677 {
2678 int ret;
2679
2680 ret = enable_ust_object(app, ua_event->obj);
2681 if (ret < 0) {
2682 goto error;
2683 }
2684
2685 ua_event->enabled = 1;
2686
2687 error:
2688 return ret;
2689 }
2690
2691 /*
2692 * Disable on the tracer side a ust app event for the session and channel.
2693 */
2694 static int disable_ust_app_event(struct ust_app_session *ua_sess,
2695 struct ust_app_event *ua_event, struct ust_app *app)
2696 {
2697 int ret;
2698
2699 ret = disable_ust_object(app, ua_event->obj);
2700 if (ret < 0) {
2701 goto error;
2702 }
2703
2704 ua_event->enabled = 0;
2705
2706 error:
2707 return ret;
2708 }
2709
2710 /*
2711 * Lookup ust app channel for session and disable it on the tracer side.
2712 */
2713 static
2714 int disable_ust_app_channel(struct ust_app_session *ua_sess,
2715 struct ust_app_channel *ua_chan, struct ust_app *app)
2716 {
2717 int ret;
2718
2719 ret = disable_ust_channel(app, ua_sess, ua_chan);
2720 if (ret < 0) {
2721 goto error;
2722 }
2723
2724 ua_chan->enabled = 0;
2725
2726 error:
2727 return ret;
2728 }
2729
2730 /*
2731 * Lookup ust app channel for session and enable it on the tracer side. This
2732 * MUST be called with a RCU read side lock acquired.
2733 */
2734 static int enable_ust_app_channel(struct ust_app_session *ua_sess,
2735 struct ltt_ust_channel *uchan, struct ust_app *app)
2736 {
2737 int ret = 0;
2738 struct lttng_ht_iter iter;
2739 struct lttng_ht_node_str *ua_chan_node;
2740 struct ust_app_channel *ua_chan;
2741
2742 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2743 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2744 if (ua_chan_node == NULL) {
2745 DBG2("Unable to find channel %s in ust session id %" PRIu64,
2746 uchan->name, ua_sess->tracing_id);
2747 goto error;
2748 }
2749
2750 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2751
2752 ret = enable_ust_channel(app, ua_sess, ua_chan);
2753 if (ret < 0) {
2754 goto error;
2755 }
2756
2757 error:
2758 return ret;
2759 }
2760
2761 /*
2762 * Ask the consumer to create a channel and get it if successful.
2763 *
2764 * Called with UST app session lock held.
2765 *
2766 * Return 0 on success or else a negative value.
2767 */
2768 static int do_consumer_create_channel(struct ltt_ust_session *usess,
2769 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
2770 int bitness, struct ust_registry_session *registry,
2771 uint64_t trace_archive_id)
2772 {
2773 int ret;
2774 unsigned int nb_fd = 0;
2775 struct consumer_socket *socket;
2776
2777 assert(usess);
2778 assert(ua_sess);
2779 assert(ua_chan);
2780 assert(registry);
2781
2782 rcu_read_lock();
2783 health_code_update();
2784
2785 /* Get the right consumer socket for the application. */
2786 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
2787 if (!socket) {
2788 ret = -EINVAL;
2789 goto error;
2790 }
2791
2792 health_code_update();
2793
2794 /* Need one fd for the channel. */
2795 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2796 if (ret < 0) {
2797 ERR("Exhausted number of available FD upon create channel");
2798 goto error;
2799 }
2800
2801 /*
2802 * Ask consumer to create channel. The consumer will return the number of
2803 * stream we have to expect.
2804 */
2805 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
2806 registry, usess->current_trace_chunk);
2807 if (ret < 0) {
2808 goto error_ask;
2809 }
2810
2811 /*
2812 * Compute the number of fd needed before receiving them. It must be 2 per
2813 * stream (2 being the default value here).
2814 */
2815 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
2816
2817 /* Reserve the amount of file descriptor we need. */
2818 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
2819 if (ret < 0) {
2820 ERR("Exhausted number of available FD upon create channel");
2821 goto error_fd_get_stream;
2822 }
2823
2824 health_code_update();
2825
2826 /*
2827 * Now get the channel from the consumer. This call wil populate the stream
2828 * list of that channel and set the ust objects.
2829 */
2830 if (usess->consumer->enabled) {
2831 ret = ust_consumer_get_channel(socket, ua_chan);
2832 if (ret < 0) {
2833 goto error_destroy;
2834 }
2835 }
2836
2837 rcu_read_unlock();
2838 return 0;
2839
2840 error_destroy:
2841 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
2842 error_fd_get_stream:
2843 /*
2844 * Initiate a destroy channel on the consumer since we had an error
2845 * handling it on our side. The return value is of no importance since we
2846 * already have a ret value set by the previous error that we need to
2847 * return.
2848 */
2849 (void) ust_consumer_destroy_channel(socket, ua_chan);
2850 error_ask:
2851 lttng_fd_put(LTTNG_FD_APPS, 1);
2852 error:
2853 health_code_update();
2854 rcu_read_unlock();
2855 return ret;
2856 }
2857
2858 /*
2859 * Duplicate the ust data object of the ust app stream and save it in the
2860 * buffer registry stream.
2861 *
2862 * Return 0 on success or else a negative value.
2863 */
2864 static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
2865 struct ust_app_stream *stream)
2866 {
2867 int ret;
2868
2869 assert(reg_stream);
2870 assert(stream);
2871
2872 /* Reserve the amount of file descriptor we need. */
2873 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
2874 if (ret < 0) {
2875 ERR("Exhausted number of available FD upon duplicate stream");
2876 goto error;
2877 }
2878
2879 /* Duplicate object for stream once the original is in the registry. */
2880 ret = ustctl_duplicate_ust_object_data(&stream->obj,
2881 reg_stream->obj.ust);
2882 if (ret < 0) {
2883 ERR("Duplicate stream obj from %p to %p failed with ret %d",
2884 reg_stream->obj.ust, stream->obj, ret);
2885 lttng_fd_put(LTTNG_FD_APPS, 2);
2886 goto error;
2887 }
2888 stream->handle = stream->obj->handle;
2889
2890 error:
2891 return ret;
2892 }
2893
2894 /*
2895 * Duplicate the ust data object of the ust app. channel and save it in the
2896 * buffer registry channel.
2897 *
2898 * Return 0 on success or else a negative value.
2899 */
2900 static int duplicate_channel_object(struct buffer_reg_channel *reg_chan,
2901 struct ust_app_channel *ua_chan)
2902 {
2903 int ret;
2904
2905 assert(reg_chan);
2906 assert(ua_chan);
2907
2908 /* Need two fds for the channel. */
2909 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2910 if (ret < 0) {
2911 ERR("Exhausted number of available FD upon duplicate channel");
2912 goto error_fd_get;
2913 }
2914
2915 /* Duplicate object for stream once the original is in the registry. */
2916 ret = ustctl_duplicate_ust_object_data(&ua_chan->obj, reg_chan->obj.ust);
2917 if (ret < 0) {
2918 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
2919 reg_chan->obj.ust, ua_chan->obj, ret);
2920 goto error;
2921 }
2922 ua_chan->handle = ua_chan->obj->handle;
2923
2924 return 0;
2925
2926 error:
2927 lttng_fd_put(LTTNG_FD_APPS, 1);
2928 error_fd_get:
2929 return ret;
2930 }
2931
2932 /*
2933 * For a given channel buffer registry, setup all streams of the given ust
2934 * application channel.
2935 *
2936 * Return 0 on success or else a negative value.
2937 */
2938 static int setup_buffer_reg_streams(struct buffer_reg_channel *reg_chan,
2939 struct ust_app_channel *ua_chan,
2940 struct ust_app *app)
2941 {
2942 int ret = 0;
2943 struct ust_app_stream *stream, *stmp;
2944
2945 assert(reg_chan);
2946 assert(ua_chan);
2947
2948 DBG2("UST app setup buffer registry stream");
2949
2950 /* Send all streams to application. */
2951 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
2952 struct buffer_reg_stream *reg_stream;
2953
2954 ret = buffer_reg_stream_create(&reg_stream);
2955 if (ret < 0) {
2956 goto error;
2957 }
2958
2959 /*
2960 * Keep original pointer and nullify it in the stream so the delete
2961 * stream call does not release the object.
2962 */
2963 reg_stream->obj.ust = stream->obj;
2964 stream->obj = NULL;
2965 buffer_reg_stream_add(reg_stream, reg_chan);
2966
2967 /* We don't need the streams anymore. */
2968 cds_list_del(&stream->list);
2969 delete_ust_app_stream(-1, stream, app);
2970 }
2971
2972 error:
2973 return ret;
2974 }
2975
2976 /*
2977 * Create a buffer registry channel for the given session registry and
2978 * application channel object. If regp pointer is valid, it's set with the
2979 * created object. Important, the created object is NOT added to the session
2980 * registry hash table.
2981 *
2982 * Return 0 on success else a negative value.
2983 */
2984 static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2985 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
2986 {
2987 int ret;
2988 struct buffer_reg_channel *reg_chan = NULL;
2989
2990 assert(reg_sess);
2991 assert(ua_chan);
2992
2993 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
2994
2995 /* Create buffer registry channel. */
2996 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &reg_chan);
2997 if (ret < 0) {
2998 goto error_create;
2999 }
3000 assert(reg_chan);
3001 reg_chan->consumer_key = ua_chan->key;
3002 reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
3003 reg_chan->num_subbuf = ua_chan->attr.num_subbuf;
3004
3005 /* Create and add a channel registry to session. */
3006 ret = ust_registry_channel_add(reg_sess->reg.ust,
3007 ua_chan->tracing_channel_id);
3008 if (ret < 0) {
3009 goto error;
3010 }
3011 buffer_reg_channel_add(reg_sess, reg_chan);
3012
3013 if (regp) {
3014 *regp = reg_chan;
3015 }
3016
3017 return 0;
3018
3019 error:
3020 /* Safe because the registry channel object was not added to any HT. */
3021 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
3022 error_create:
3023 return ret;
3024 }
3025
3026 /*
3027 * Setup buffer registry channel for the given session registry and application
3028 * channel object. If regp pointer is valid, it's set with the created object.
3029 *
3030 * Return 0 on success else a negative value.
3031 */
3032 static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
3033 struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan,
3034 struct ust_app *app)
3035 {
3036 int ret;
3037
3038 assert(reg_sess);
3039 assert(reg_chan);
3040 assert(ua_chan);
3041 assert(ua_chan->obj);
3042
3043 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
3044
3045 /* Setup all streams for the registry. */
3046 ret = setup_buffer_reg_streams(reg_chan, ua_chan, app);
3047 if (ret < 0) {
3048 goto error;
3049 }
3050
3051 reg_chan->obj.ust = ua_chan->obj;
3052 ua_chan->obj = NULL;
3053
3054 return 0;
3055
3056 error:
3057 buffer_reg_channel_remove(reg_sess, reg_chan);
3058 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
3059 return ret;
3060 }
3061
3062 /*
3063 * Send buffer registry channel to the application.
3064 *
3065 * Return 0 on success else a negative value.
3066 */
3067 static int send_channel_uid_to_ust(struct buffer_reg_channel *reg_chan,
3068 struct ust_app *app, struct ust_app_session *ua_sess,
3069 struct ust_app_channel *ua_chan)
3070 {
3071 int ret;
3072 struct buffer_reg_stream *reg_stream;
3073
3074 assert(reg_chan);
3075 assert(app);
3076 assert(ua_sess);
3077 assert(ua_chan);
3078
3079 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
3080
3081 ret = duplicate_channel_object(reg_chan, ua_chan);
3082 if (ret < 0) {
3083 goto error;
3084 }
3085
3086 /* Send channel to the application. */
3087 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
3088 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
3089 ret = -ENOTCONN; /* Caused by app exiting. */
3090 goto error;
3091 } else if (ret < 0) {
3092 goto error;
3093 }
3094
3095 health_code_update();
3096
3097 /* Send all streams to application. */
3098 pthread_mutex_lock(&reg_chan->stream_list_lock);
3099 cds_list_for_each_entry(reg_stream, &reg_chan->streams, lnode) {
3100 struct ust_app_stream stream;
3101
3102 ret = duplicate_stream_object(reg_stream, &stream);
3103 if (ret < 0) {
3104 goto error_stream_unlock;
3105 }
3106
3107 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
3108 if (ret < 0) {
3109 (void) release_ust_app_stream(-1, &stream, app);
3110 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
3111 ret = -ENOTCONN; /* Caused by app exiting. */
3112 }
3113 goto error_stream_unlock;
3114 }
3115
3116 /*
3117 * The return value is not important here. This function will output an
3118 * error if needed.
3119 */
3120 (void) release_ust_app_stream(-1, &stream, app);
3121 }
3122 ua_chan->is_sent = 1;
3123
3124 error_stream_unlock:
3125 pthread_mutex_unlock(&reg_chan->stream_list_lock);
3126 error:
3127 return ret;
3128 }
3129
3130 /*
3131 * Create and send to the application the created buffers with per UID buffers.
3132 *
3133 * This MUST be called with a RCU read side lock acquired.
3134 * The session list lock and the session's lock must be acquired.
3135 *
3136 * Return 0 on success else a negative value.
3137 */
3138 static int create_channel_per_uid(struct ust_app *app,
3139 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
3140 struct ust_app_channel *ua_chan)
3141 {
3142 int ret;
3143 struct buffer_reg_uid *reg_uid;
3144 struct buffer_reg_channel *reg_chan;
3145 struct ltt_session *session = NULL;
3146 enum lttng_error_code notification_ret;
3147 struct ust_registry_channel *chan_reg;
3148
3149 assert(app);
3150 assert(usess);
3151 assert(ua_sess);
3152 assert(ua_chan);
3153
3154 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
3155
3156 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
3157 /*
3158 * The session creation handles the creation of this global registry
3159 * object. If none can be find, there is a code flow problem or a
3160 * teardown race.
3161 */
3162 assert(reg_uid);
3163
3164 reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
3165 reg_uid);
3166 if (reg_chan) {
3167 goto send_channel;
3168 }
3169
3170 /* Create the buffer registry channel object. */
3171 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &reg_chan);
3172 if (ret < 0) {
3173 ERR("Error creating the UST channel \"%s\" registry instance",
3174 ua_chan->name);
3175 goto error;
3176 }
3177
3178 session = session_find_by_id(ua_sess->tracing_id);
3179 assert(session);
3180 assert(pthread_mutex_trylock(&session->lock));
3181 assert(session_trylock_list());
3182
3183 /*
3184 * Create the buffers on the consumer side. This call populates the
3185 * ust app channel object with all streams and data object.
3186 */
3187 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
3188 app->bits_per_long, reg_uid->registry->reg.ust,
3189 session->most_recent_chunk_id.value);
3190 if (ret < 0) {
3191 ERR("Error creating UST channel \"%s\" on the consumer daemon",
3192 ua_chan->name);
3193
3194 /*
3195 * Let's remove the previously created buffer registry channel so
3196 * it's not visible anymore in the session registry.
3197 */
3198 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
3199 ua_chan->tracing_channel_id, false);
3200 buffer_reg_channel_remove(reg_uid->registry, reg_chan);
3201 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
3202 goto error;
3203 }
3204
3205 /*
3206 * Setup the streams and add it to the session registry.
3207 */
3208 ret = setup_buffer_reg_channel(reg_uid->registry,
3209 ua_chan, reg_chan, app);
3210 if (ret < 0) {
3211 ERR("Error setting up UST channel \"%s\"", ua_chan->name);
3212 goto error;
3213 }
3214
3215 /* Notify the notification subsystem of the channel's creation. */
3216 pthread_mutex_lock(&reg_uid->registry->reg.ust->lock);
3217 chan_reg = ust_registry_channel_find(reg_uid->registry->reg.ust,
3218 ua_chan->tracing_channel_id);
3219 assert(chan_reg);
3220 chan_reg->consumer_key = ua_chan->key;
3221 chan_reg = NULL;
3222 pthread_mutex_unlock(&reg_uid->registry->reg.ust->lock);
3223
3224 notification_ret = notification_thread_command_add_channel(
3225 notification_thread_handle, session->name,
3226 lttng_credentials_get_uid(&ua_sess->effective_credentials),
3227 lttng_credentials_get_gid(&ua_sess->effective_credentials),
3228 ua_chan->name,
3229 ua_chan->key, LTTNG_DOMAIN_UST,
3230 ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
3231 if (notification_ret != LTTNG_OK) {
3232 ret = - (int) notification_ret;
3233 ERR("Failed to add channel to notification thread");
3234 goto error;
3235 }
3236
3237 send_channel:
3238 /* Send buffers to the application. */
3239 ret = send_channel_uid_to_ust(reg_chan, app, ua_sess, ua_chan);
3240 if (ret < 0) {
3241 if (ret != -ENOTCONN) {
3242 ERR("Error sending channel to application");
3243 }
3244 goto error;
3245 }
3246
3247 error:
3248 if (session) {
3249 session_put(session);
3250 }
3251 return ret;
3252 }
3253
3254 /*
3255 * Create and send to the application the created buffers with per PID buffers.
3256 *
3257 * Called with UST app session lock held.
3258 * The session list lock and the session's lock must be acquired.
3259 *
3260 * Return 0 on success else a negative value.
3261 */
3262 static int create_channel_per_pid(struct ust_app *app,
3263 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
3264 struct ust_app_channel *ua_chan)
3265 {
3266 int ret;
3267 struct ust_registry_session *registry;
3268 enum lttng_error_code cmd_ret;
3269 struct ltt_session *session = NULL;
3270 uint64_t chan_reg_key;
3271 struct ust_registry_channel *chan_reg;
3272
3273 assert(app);
3274 assert(usess);
3275 assert(ua_sess);
3276 assert(ua_chan);
3277
3278 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
3279
3280 rcu_read_lock();
3281
3282 registry = get_session_registry(ua_sess);
3283 /* The UST app session lock is held, registry shall not be null. */
3284 assert(registry);
3285
3286 /* Create and add a new channel registry to session. */
3287 ret = ust_registry_channel_add(registry, ua_chan->key);
3288 if (ret < 0) {
3289 ERR("Error creating the UST channel \"%s\" registry instance",
3290 ua_chan->name);
3291 goto error;
3292 }
3293
3294 session = session_find_by_id(ua_sess->tracing_id);
3295 assert(session);
3296
3297 assert(pthread_mutex_trylock(&session->lock));
3298 assert(session_trylock_list());
3299
3300 /* Create and get channel on the consumer side. */
3301 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
3302 app->bits_per_long, registry,
3303 session->most_recent_chunk_id.value);
3304 if (ret < 0) {
3305 ERR("Error creating UST channel \"%s\" on the consumer daemon",
3306 ua_chan->name);
3307 goto error_remove_from_registry;
3308 }
3309
3310 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
3311 if (ret < 0) {
3312 if (ret != -ENOTCONN) {
3313 ERR("Error sending channel to application");
3314 }
3315 goto error_remove_from_registry;
3316 }
3317
3318 chan_reg_key = ua_chan->key;
3319 pthread_mutex_lock(&registry->lock);
3320 chan_reg = ust_registry_channel_find(registry, chan_reg_key);
3321 assert(chan_reg);
3322 chan_reg->consumer_key = ua_chan->key;
3323 pthread_mutex_unlock(&registry->lock);
3324
3325 cmd_ret = notification_thread_command_add_channel(
3326 notification_thread_handle, session->name,
3327 lttng_credentials_get_uid(&ua_sess->effective_credentials),
3328 lttng_credentials_get_gid(&ua_sess->effective_credentials),
3329 ua_chan->name,
3330 ua_chan->key, LTTNG_DOMAIN_UST,
3331 ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
3332 if (cmd_ret != LTTNG_OK) {
3333 ret = - (int) cmd_ret;
3334 ERR("Failed to add channel to notification thread");
3335 goto error_remove_from_registry;
3336 }
3337
3338 error_remove_from_registry:
3339 if (ret) {
3340 ust_registry_channel_del_free(registry, ua_chan->key, false);
3341 }
3342 error:
3343 rcu_read_unlock();
3344 if (session) {
3345 session_put(session);
3346 }
3347 return ret;
3348 }
3349
3350 /*
3351 * From an already allocated ust app channel, create the channel buffers if
3352 * needed and send them to the application. This MUST be called with a RCU read
3353 * side lock acquired.
3354 *
3355 * Called with UST app session lock held.
3356 *
3357 * Return 0 on success or else a negative value. Returns -ENOTCONN if
3358 * the application exited concurrently.
3359 */
3360 static int ust_app_channel_send(struct ust_app *app,
3361 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
3362 struct ust_app_channel *ua_chan)
3363 {
3364 int ret;
3365
3366 assert(app);
3367 assert(usess);
3368 assert(usess->active);
3369 assert(ua_sess);
3370 assert(ua_chan);
3371
3372 /* Handle buffer type before sending the channel to the application. */
3373 switch (usess->buffer_type) {
3374 case LTTNG_BUFFER_PER_UID:
3375 {
3376 ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
3377 if (ret < 0) {
3378 goto error;
3379 }
3380 break;
3381 }
3382 case LTTNG_BUFFER_PER_PID:
3383 {
3384 ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
3385 if (ret < 0) {
3386 goto error;
3387 }
3388 break;
3389 }
3390 default:
3391 assert(0);
3392 ret = -EINVAL;
3393 goto error;
3394 }
3395
3396 /* Initialize ust objd object using the received handle and add it. */
3397 lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
3398 lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
3399
3400 /* If channel is not enabled, disable it on the tracer */
3401 if (!ua_chan->enabled) {
3402 ret = disable_ust_channel(app, ua_sess, ua_chan);
3403 if (ret < 0) {
3404 goto error;
3405 }
3406 }
3407
3408 error:
3409 return ret;
3410 }
3411
3412 /*
3413 * Create UST app channel and return it through ua_chanp if not NULL.
3414 *
3415 * Called with UST app session lock and RCU read-side lock held.
3416 *
3417 * Return 0 on success or else a negative value.
3418 */
3419 static int ust_app_channel_allocate(struct ust_app_session *ua_sess,
3420 struct ltt_ust_channel *uchan,
3421 enum lttng_ust_chan_type type, struct ltt_ust_session *usess,
3422 struct ust_app_channel **ua_chanp)
3423 {
3424 int ret = 0;
3425 struct lttng_ht_iter iter;
3426 struct lttng_ht_node_str *ua_chan_node;
3427 struct ust_app_channel *ua_chan;
3428
3429 /* Lookup channel in the ust app session */
3430 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
3431 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
3432 if (ua_chan_node != NULL) {
3433 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3434 goto end;
3435 }
3436
3437 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
3438 if (ua_chan == NULL) {
3439 /* Only malloc can fail here */
3440 ret = -ENOMEM;
3441 goto error;
3442 }
3443 shadow_copy_channel(ua_chan, uchan);
3444
3445 /* Set channel type. */
3446 ua_chan->attr.type = type;
3447
3448 /* Only add the channel if successful on the tracer side. */
3449 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
3450 end:
3451 if (ua_chanp) {
3452 *ua_chanp = ua_chan;
3453 }
3454
3455 /* Everything went well. */
3456 return 0;
3457
3458 error:
3459 return ret;
3460 }
3461
3462 /*
3463 * Create UST app event and create it on the tracer side.
3464 *
3465 * Must be called with the RCU read side lock held.
3466 * Called with ust app session mutex held.
3467 */
3468 static
3469 int create_ust_app_event(struct ust_app_session *ua_sess,
3470 struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
3471 struct ust_app *app)
3472 {
3473 int ret = 0;
3474 struct ust_app_event *ua_event;
3475
3476 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
3477 if (ua_event == NULL) {
3478 /* Only failure mode of alloc_ust_app_event(). */
3479 ret = -ENOMEM;
3480 goto end;
3481 }
3482 shadow_copy_event(ua_event, uevent);
3483
3484 /* Create it on the tracer side */
3485 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
3486 if (ret < 0) {
3487 /*
3488 * Not found previously means that it does not exist on the
3489 * tracer. If the application reports that the event existed,
3490 * it means there is a bug in the sessiond or lttng-ust
3491 * (or corruption, etc.)
3492 */
3493 if (ret == -LTTNG_UST_ERR_EXIST) {
3494 ERR("Tracer for application reported that an event being created already existed: "
3495 "event_name = \"%s\", pid = %d, ppid = %d, uid = %d, gid = %d",
3496 uevent->attr.name,
3497 app->pid, app->ppid, app->uid,
3498 app->gid);
3499 }
3500 goto error;
3501 }
3502
3503 add_unique_ust_app_event(ua_chan, ua_event);
3504
3505 DBG2("UST app create event completed: app = '%s' (ppid: %d)",
3506 app->name, app->ppid);
3507
3508 end:
3509 return ret;
3510
3511 error:
3512 /* Valid. Calling here is already in a read side lock */
3513 delete_ust_app_event(-1, ua_event, app);
3514 return ret;
3515 }
3516
3517 /*
3518 * Create UST app event notifier rule and create it on the tracer side.
3519 *
3520 * Must be called with the RCU read side lock held.
3521 * Called with ust app session mutex held.
3522 */
3523 static
3524 int create_ust_app_event_notifier_rule(struct lttng_event_rule *rule,
3525 struct ust_app *app, uint64_t token)
3526 {
3527 int ret = 0;
3528 struct ust_app_event_notifier_rule *ua_event_notifier_rule;
3529
3530 ua_event_notifier_rule = alloc_ust_app_event_notifier_rule(rule, token);
3531 if (ua_event_notifier_rule == NULL) {
3532 ret = -ENOMEM;
3533 goto end;
3534 }
3535
3536 /* Create it on the tracer side. */
3537 ret = create_ust_event_notifier(app, ua_event_notifier_rule);
3538 if (ret < 0) {
3539 /*
3540 * Not found previously means that it does not exist on the
3541 * tracer. If the application reports that the event existed,
3542 * it means there is a bug in the sessiond or lttng-ust
3543 * (or corruption, etc.)
3544 */
3545 if (ret == -LTTNG_UST_ERR_EXIST) {
3546 ERR("Tracer for application reported that an event notifier being created already exists: "
3547 "token = \"%" PRIu64 "\", pid = %d, ppid = %d, uid = %d, gid = %d",
3548 token,
3549 app->pid, app->ppid, app->uid,
3550 app->gid);
3551 }
3552 goto error;
3553 }
3554
3555 lttng_ht_add_unique_u64(app->token_to_event_notifier_rule_ht,
3556 &ua_event_notifier_rule->node);
3557
3558 DBG2("UST app create token event rule completed: app = '%s' (ppid: %d), token = %" PRIu64,
3559 app->name, app->ppid, token);
3560
3561 end:
3562 return ret;
3563
3564 error:
3565 /* The RCU read side lock is already being held by the caller. */
3566 delete_ust_app_event_notifier_rule(-1, ua_event_notifier_rule, app);
3567 return ret;
3568 }
3569
3570 /*
3571 * Create UST metadata and open it on the tracer side.
3572 *
3573 * Called with UST app session lock held and RCU read side lock.
3574 */
3575 static int create_ust_app_metadata(struct ust_app_session *ua_sess,
3576 struct ust_app *app, struct consumer_output *consumer)
3577 {
3578 int ret = 0;
3579 struct ust_app_channel *metadata;
3580 struct consumer_socket *socket;
3581 struct ust_registry_session *registry;
3582 struct ltt_session *session = NULL;
3583
3584 assert(ua_sess);
3585 assert(app);
3586 assert(consumer);
3587
3588 registry = get_session_registry(ua_sess);
3589 /* The UST app session is held registry shall not be null. */
3590 assert(registry);
3591
3592 pthread_mutex_lock(&registry->lock);
3593
3594 /* Metadata already exists for this registry or it was closed previously */
3595 if (registry->metadata_key || registry->metadata_closed) {
3596 ret = 0;
3597 goto error;
3598 }
3599
3600 /* Allocate UST metadata */
3601 metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
3602 if (!metadata) {
3603 /* malloc() failed */
3604 ret = -ENOMEM;
3605 goto error;
3606 }
3607
3608 memcpy(&metadata->attr, &ua_sess->metadata_attr, sizeof(metadata->attr));
3609
3610 /* Need one fd for the channel. */
3611 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
3612 if (ret < 0) {
3613 ERR("Exhausted number of available FD upon create metadata");
3614 goto error;
3615 }
3616
3617 /* Get the right consumer socket for the application. */
3618 socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
3619 if (!socket) {
3620 ret = -EINVAL;
3621 goto error_consumer;
3622 }
3623
3624 /*
3625 * Keep metadata key so we can identify it on the consumer side. Assign it
3626 * to the registry *before* we ask the consumer so we avoid the race of the
3627 * consumer requesting the metadata and the ask_channel call on our side
3628 * did not returned yet.
3629 */
3630 registry->metadata_key = metadata->key;
3631
3632 session = session_find_by_id(ua_sess->tracing_id);
3633 assert(session);
3634
3635 assert(pthread_mutex_trylock(&session->lock));
3636 assert(session_trylock_list());
3637
3638 /*
3639 * Ask the metadata channel creation to the consumer. The metadata object
3640 * will be created by the consumer and kept their. However, the stream is
3641 * never added or monitored until we do a first push metadata to the
3642 * consumer.
3643 */
3644 ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
3645 registry, session->current_trace_chunk);
3646 if (ret < 0) {
3647 /* Nullify the metadata key so we don't try to close it later on. */
3648 registry->metadata_key = 0;
3649 goto error_consumer;
3650 }
3651
3652 /*
3653 * The setup command will make the metadata stream be sent to the relayd,
3654 * if applicable, and the thread managing the metadatas. This is important
3655 * because after this point, if an error occurs, the only way the stream
3656 * can be deleted is to be monitored in the consumer.
3657 */
3658 ret = consumer_setup_metadata(socket, metadata->key);
3659 if (ret < 0) {
3660 /* Nullify the metadata key so we don't try to close it later on. */
3661 registry->metadata_key = 0;
3662 goto error_consumer;
3663 }
3664
3665 DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
3666 metadata->key, app->pid);
3667
3668 error_consumer:
3669 lttng_fd_put(LTTNG_FD_APPS, 1);
3670 delete_ust_app_channel(-1, metadata, app);
3671 error:
3672 pthread_mutex_unlock(&registry->lock);
3673 if (session) {
3674 session_put(session);
3675 }
3676 return ret;
3677 }
3678
3679 /*
3680 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
3681 * acquired before calling this function.
3682 */
3683 struct ust_app *ust_app_find_by_pid(pid_t pid)
3684 {
3685 struct ust_app *app = NULL;
3686 struct lttng_ht_node_ulong *node;
3687 struct lttng_ht_iter iter;
3688
3689 lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
3690 node = lttng_ht_iter_get_node_ulong(&iter);
3691 if (node == NULL) {
3692 DBG2("UST app no found with pid %d", pid);
3693 goto error;
3694 }
3695
3696 DBG2("Found UST app by pid %d", pid);
3697
3698 app = caa_container_of(node, struct ust_app, pid_n);
3699
3700 error:
3701 return app;
3702 }
3703
3704 /*
3705 * Allocate and init an UST app object using the registration information and
3706 * the command socket. This is called when the command socket connects to the
3707 * session daemon.
3708 *
3709 * The object is returned on success or else NULL.
3710 */
3711 struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
3712 {
3713 int ret;
3714 struct ust_app *lta = NULL;
3715 struct lttng_pipe *event_notifier_event_source_pipe = NULL;
3716
3717 assert(msg);
3718 assert(sock >= 0);
3719
3720 DBG3("UST app creating application for socket %d", sock);
3721
3722 if ((msg->bits_per_long == 64 &&
3723 (uatomic_read(&ust_consumerd64_fd) == -EINVAL))
3724 || (msg->bits_per_long == 32 &&
3725 (uatomic_read(&ust_consumerd32_fd) == -EINVAL))) {
3726 ERR("Registration failed: application \"%s\" (pid: %d) has "
3727 "%d-bit long, but no consumerd for this size is available.\n",
3728 msg->name, msg->pid, msg->bits_per_long);
3729 goto error;
3730 }
3731
3732 /*
3733 * Reserve the two file descriptors of the event source pipe. The write
3734 * end will be closed once it is passed to the application, at which
3735 * point a single 'put' will be performed.
3736 */
3737 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
3738 if (ret) {
3739 ERR("Failed to reserve two file descriptors for the event source pipe while creating a new application instance: app = '%s' (ppid: %d)",
3740 msg->name, (int) msg->ppid);
3741 goto error;
3742 }
3743
3744 event_notifier_event_source_pipe = lttng_pipe_open(FD_CLOEXEC);
3745 if (!event_notifier_event_source_pipe) {
3746 PERROR("Failed to open application event source pipe: '%s' (ppid = %d)",
3747 msg->name, msg->ppid);
3748 goto error;
3749 }
3750
3751 lta = zmalloc(sizeof(struct ust_app));
3752 if (lta == NULL) {
3753 PERROR("malloc");
3754 goto error_free_pipe;
3755 }
3756
3757 lta->event_notifier_group.event_pipe = event_notifier_event_source_pipe;
3758
3759 lta->ppid = msg->ppid;
3760 lta->uid = msg->uid;
3761 lta->gid = msg->gid;
3762
3763 lta->bits_per_long = msg->bits_per_long;
3764 lta->uint8_t_alignment = msg->uint8_t_alignment;
3765 lta->uint16_t_alignment = msg->uint16_t_alignment;
3766 lta->uint32_t_alignment = msg->uint32_t_alignment;
3767 lta->uint64_t_alignment = msg->uint64_t_alignment;
3768 lta->long_alignment = msg->long_alignment;
3769 lta->byte_order = msg->byte_order;
3770
3771 lta->v_major = msg->major;
3772 lta->v_minor = msg->minor;
3773 lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3774 lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3775 lta->ust_sessions_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3776 lta->notify_sock = -1;
3777 lta->token_to_event_notifier_rule_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3778
3779 /* Copy name and make sure it's NULL terminated. */
3780 strncpy(lta->name, msg->name, sizeof(lta->name));
3781 lta->name[UST_APP_PROCNAME_LEN] = '\0';
3782
3783 /*
3784 * Before this can be called, when receiving the registration information,
3785 * the application compatibility is checked. So, at this point, the
3786 * application can work with this session daemon.
3787 */
3788 lta->compatible = 1;
3789
3790 lta->pid = msg->pid;
3791 lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
3792 lta->sock = sock;
3793 pthread_mutex_init(&lta->sock_lock, NULL);
3794 lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
3795
3796 CDS_INIT_LIST_HEAD(&lta->teardown_head);
3797 return lta;
3798
3799 error_free_pipe:
3800 lttng_pipe_destroy(event_notifier_event_source_pipe);
3801 lttng_fd_put(LTTNG_FD_APPS, 2);
3802 error:
3803 return NULL;
3804 }
3805
3806 /*
3807 * For a given application object, add it to every hash table.
3808 */
3809 void ust_app_add(struct ust_app *app)
3810 {
3811 assert(app);
3812 assert(app->notify_sock >= 0);
3813
3814 app->registration_time = time(NULL);
3815
3816 rcu_read_lock();
3817
3818 /*
3819 * On a re-registration, we want to kick out the previous registration of
3820 * that pid
3821 */
3822 lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
3823
3824 /*
3825 * The socket _should_ be unique until _we_ call close. So, a add_unique
3826 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
3827 * already in the table.
3828 */
3829 lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
3830
3831 /* Add application to the notify socket hash table. */
3832 lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
3833 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
3834
3835 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock:%d name:%s "
3836 "notify_sock:%d (version %d.%d)", app->pid, app->ppid, app->uid,
3837 app->gid, app->sock, app->name, app->notify_sock, app->v_major,
3838 app->v_minor);
3839
3840 rcu_read_unlock();
3841 }
3842
3843 /*
3844 * Set the application version into the object.
3845 *
3846 * Return 0 on success else a negative value either an errno code or a
3847 * LTTng-UST error code.
3848 */
3849 int ust_app_version(struct ust_app *app)
3850 {
3851 int ret;
3852
3853 assert(app);
3854
3855 pthread_mutex_lock(&app->sock_lock);
3856 ret = ustctl_tracer_version(app->sock, &app->version);
3857 pthread_mutex_unlock(&app->sock_lock);
3858 if (ret < 0) {
3859 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3860 ERR("UST app %d version failed with ret %d", app->sock, ret);
3861 } else {
3862 DBG3("UST app %d version failed. Application is dead", app->sock);
3863 }
3864 }
3865
3866 return ret;
3867 }
3868
3869 /*
3870 * Setup the base event notifier group.
3871 *
3872 * Return 0 on success else a negative value either an errno code or a
3873 * LTTng-UST error code.
3874 */
3875 int ust_app_setup_event_notifier_group(struct ust_app *app)
3876 {
3877 int ret;
3878 int event_pipe_write_fd;
3879 struct lttng_ust_object_data *event_notifier_group = NULL;
3880 enum lttng_error_code lttng_ret;
3881
3882 assert(app);
3883
3884 /* Get the write side of the pipe. */
3885 event_pipe_write_fd = lttng_pipe_get_writefd(
3886 app->event_notifier_group.event_pipe);
3887
3888 pthread_mutex_lock(&app->sock_lock);
3889 ret = ustctl_create_event_notifier_group(app->sock,
3890 event_pipe_write_fd, &event_notifier_group);
3891 pthread_mutex_unlock(&app->sock_lock);
3892 if (ret < 0) {
3893 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3894 ERR("Failed to create application event notifier group: ret = %d, app socket fd = %d, event_pipe_write_fd = %d",
3895 ret, app->sock, event_pipe_write_fd);
3896 } else {
3897 DBG("Failed to create application event notifier group (application is dead): app socket fd = %d",
3898 app->sock);
3899 }
3900
3901 goto error;
3902 }
3903
3904 ret = lttng_pipe_write_close(app->event_notifier_group.event_pipe);
3905 if (ret) {
3906 ERR("Failed to close write end of the application's event source pipe: app = '%s' (ppid = %d)",
3907 app->name, app->ppid);
3908 goto error;
3909 }
3910
3911 /*
3912 * Release the file descriptor that was reserved for the write-end of
3913 * the pipe.
3914 */
3915 lttng_fd_put(LTTNG_FD_APPS, 1);
3916
3917 lttng_ret = notification_thread_command_add_tracer_event_source(
3918 notification_thread_handle,
3919 lttng_pipe_get_readfd(app->event_notifier_group.event_pipe),
3920 LTTNG_DOMAIN_UST);
3921 if (lttng_ret != LTTNG_OK) {
3922 ERR("Failed to add tracer event source to notification thread");
3923 ret = - 1;
3924 goto error;
3925 }
3926
3927 /* Assign handle only when the complete setup is valid. */
3928 app->event_notifier_group.object = event_notifier_group;
3929 return ret;
3930
3931 error:
3932 ustctl_release_object(app->sock, app->event_notifier_group.object);
3933 free(app->event_notifier_group.object);
3934 return ret;
3935 }
3936
3937 /*
3938 * Unregister app by removing it from the global traceable app list and freeing
3939 * the data struct.
3940 *
3941 * The socket is already closed at this point so no close to sock.
3942 */
3943 void ust_app_unregister(int sock)
3944 {
3945 struct ust_app *lta;
3946 struct lttng_ht_node_ulong *node;
3947 struct lttng_ht_iter ust_app_sock_iter;
3948 struct lttng_ht_iter iter;
3949 struct ust_app_session *ua_sess;
3950 int ret;
3951
3952 rcu_read_lock();
3953
3954 /* Get the node reference for a call_rcu */
3955 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &ust_app_sock_iter);
3956 node = lttng_ht_iter_get_node_ulong(&ust_app_sock_iter);
3957 assert(node);
3958
3959 lta = caa_container_of(node, struct ust_app, sock_n);
3960 DBG("PID %d unregistering with sock %d", lta->pid, sock);
3961
3962 /*
3963 * For per-PID buffers, perform "push metadata" and flush all
3964 * application streams before removing app from hash tables,
3965 * ensuring proper behavior of data_pending check.
3966 * Remove sessions so they are not visible during deletion.
3967 */
3968 cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
3969 node.node) {
3970 struct ust_registry_session *registry;
3971
3972 ret = lttng_ht_del(lta->sessions, &iter);
3973 if (ret) {
3974 /* The session was already removed so scheduled for teardown. */
3975 continue;
3976 }
3977
3978 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
3979 (void) ust_app_flush_app_session(lta, ua_sess);
3980 }
3981
3982 /*
3983 * Add session to list for teardown. This is safe since at this point we
3984 * are the only one using this list.
3985 */
3986 pthread_mutex_lock(&ua_sess->lock);
3987
3988 if (ua_sess->deleted) {
3989 pthread_mutex_unlock(&ua_sess->lock);
3990 continue;
3991 }
3992
3993 /*
3994 * Normally, this is done in the delete session process which is
3995 * executed in the call rcu below. However, upon registration we can't
3996 * afford to wait for the grace period before pushing data or else the
3997 * data pending feature can race between the unregistration and stop
3998 * command where the data pending command is sent *before* the grace
3999 * period ended.
4000 *
4001 * The close metadata below nullifies the metadata pointer in the
4002 * session so the delete session will NOT push/close a second time.
4003 */
4004 registry = get_session_registry(ua_sess);
4005 if (registry) {
4006 /* Push metadata for application before freeing the application. */
4007 (void) push_metadata(registry, ua_sess->consumer);
4008
4009 /*
4010 * Don't ask to close metadata for global per UID buffers. Close
4011 * metadata only on destroy trace session in this case. Also, the
4012 * previous push metadata could have flag the metadata registry to
4013 * close so don't send a close command if closed.
4014 */
4015 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
4016 /* And ask to close it for this session registry. */
4017 (void) close_metadata(registry, ua_sess->consumer);
4018 }
4019 }
4020 cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
4021
4022 pthread_mutex_unlock(&ua_sess->lock);
4023 }
4024
4025 /* Remove application from PID hash table */
4026 ret = lttng_ht_del(ust_app_ht_by_sock, &ust_app_sock_iter);
4027 assert(!ret);
4028
4029 /*
4030 * Remove application from notify hash table. The thread handling the
4031 * notify socket could have deleted the node so ignore on error because
4032 * either way it's valid. The close of that socket is handled by the
4033 * apps_notify_thread.
4034 */
4035 iter.iter.node = &lta->notify_sock_n.node;
4036 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
4037
4038 /*
4039 * Ignore return value since the node might have been removed before by an
4040 * add replace during app registration because the PID can be reassigned by
4041 * the OS.
4042 */
4043 iter.iter.node = &lta->pid_n.node;
4044 ret = lttng_ht_del(ust_app_ht, &iter);
4045 if (ret) {
4046 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
4047 lta->pid);
4048 }
4049
4050 /* Free memory */
4051 call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
4052
4053 rcu_read_unlock();
4054 return;
4055 }
4056
4057 /*
4058 * Fill events array with all events name of all registered apps.
4059 */
4060 int ust_app_list_events(struct lttng_event **events)
4061 {
4062 int ret, handle;
4063 size_t nbmem, count = 0;
4064 struct lttng_ht_iter iter;
4065 struct ust_app *app;
4066 struct lttng_event *tmp_event;
4067
4068 nbmem = UST_APP_EVENT_LIST_SIZE;
4069 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event));
4070 if (tmp_event == NULL) {
4071 PERROR("zmalloc ust app events");
4072 ret = -ENOMEM;
4073 goto error;
4074 }
4075
4076 rcu_read_lock();
4077
4078 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4079 struct lttng_ust_tracepoint_iter uiter;
4080
4081 health_code_update();
4082
4083 if (!app->compatible) {
4084 /*
4085 * TODO: In time, we should notice the caller of this error by
4086 * telling him that this is a version error.
4087 */
4088 continue;
4089 }
4090 pthread_mutex_lock(&app->sock_lock);
4091 handle = ustctl_tracepoint_list(app->sock);
4092 if (handle < 0) {
4093 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
4094 ERR("UST app list events getting handle failed for app pid %d",
4095 app->pid);
4096 }
4097 pthread_mutex_unlock(&app->sock_lock);
4098 continue;
4099 }
4100
4101 while ((ret = ustctl_tracepoint_list_get(app->sock, handle,
4102 &uiter)) != -LTTNG_UST_ERR_NOENT) {
4103 /* Handle ustctl error. */
4104 if (ret < 0) {
4105 int release_ret;
4106
4107 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
4108 ERR("UST app tp list get failed for app %d with ret %d",
4109 app->sock, ret);
4110 } else {
4111 DBG3("UST app tp list get failed. Application is dead");
4112 /*
4113 * This is normal behavior, an application can die during the
4114 * creation process. Don't report an error so the execution can
4115 * continue normally. Continue normal execution.
4116 */
4117 break;
4118 }
4119 free(tmp_event);
4120 release_ret = ustctl_release_handle(app->sock, handle);
4121 if (release_ret < 0 &&
4122 release_ret != -LTTNG_UST_ERR_EXITING &&
4123 release_ret != -EPIPE) {
4124 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
4125 }
4126 pthread_mutex_unlock(&app->sock_lock);
4127 goto rcu_error;
4128 }
4129
4130 health_code_update();
4131 if (count >= nbmem) {
4132 /* In case the realloc fails, we free the memory */
4133 struct lttng_event *new_tmp_event;
4134 size_t new_nbmem;
4135
4136 new_nbmem = nbmem << 1;
4137 DBG2("Reallocating event list from %zu to %zu entries",
4138 nbmem, new_nbmem);
4139 new_tmp_event = realloc(tmp_event,
4140 new_nbmem * sizeof(struct lttng_event));
4141 if (new_tmp_event == NULL) {
4142 int release_ret;
4143
4144 PERROR("realloc ust app events");
4145 free(tmp_event);
4146 ret = -ENOMEM;
4147 release_ret = ustctl_release_handle(app->sock, handle);
4148 if (release_ret < 0 &&
4149 release_ret != -LTTNG_UST_ERR_EXITING &&
4150 release_ret != -EPIPE) {
4151 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
4152 }
4153 pthread_mutex_unlock(&app->sock_lock);
4154 goto rcu_error;
4155 }
4156 /* Zero the new memory */
4157 memset(new_tmp_event + nbmem, 0,
4158 (new_nbmem - nbmem) * sizeof(struct lttng_event));
4159 nbmem = new_nbmem;
4160 tmp_event = new_tmp_event;
4161 }
4162 memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_SYM_NAME_LEN);
4163 tmp_event[count].loglevel = uiter.loglevel;
4164 tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_TRACEPOINT;
4165 tmp_event[count].pid = app->pid;
4166 tmp_event[count].enabled = -1;
4167 count++;
4168 }
4169 ret = ustctl_release_handle(app->sock, handle);
4170 pthread_mutex_unlock(&app->sock_lock);
4171 if (ret < 0 && ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
4172 ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
4173 }
4174 }
4175
4176 ret = count;
4177 *events = tmp_event;
4178
4179 DBG2("UST app list events done (%zu events)", count);
4180
4181 rcu_error:
4182 rcu_read_unlock();
4183 error:
4184 health_code_update();
4185 return ret;
4186 }
4187
4188 /*
4189 * Fill events array with all events name of all registered apps.
4190 */
4191 int ust_app_list_event_fields(struct lttng_event_field **fields)
4192 {
4193 int ret, handle;
4194 size_t nbmem, count = 0;
4195 struct lttng_ht_iter iter;
4196 struct ust_app *app;
4197 struct lttng_event_field *tmp_event;
4198
4199 nbmem = UST_APP_EVENT_LIST_SIZE;
4200 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event_field));
4201 if (tmp_event == NULL) {
4202 PERROR("zmalloc ust app event fields");
4203 ret = -ENOMEM;
4204 goto error;
4205 }
4206
4207 rcu_read_lock();
4208
4209 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4210 struct lttng_ust_field_iter uiter;
4211
4212 health_code_update();
4213
4214 if (!app->compatible) {
4215 /*
4216 * TODO: In time, we should notice the caller of this error by
4217 * telling him that this is a version error.
4218 */
4219 continue;
4220 }
4221 pthread_mutex_lock(&app->sock_lock);
4222 handle = ustctl_tracepoint_field_list(app->sock);
4223 if (handle < 0) {
4224 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
4225 ERR("UST app list field getting handle failed for app pid %d",
4226 app->pid);
4227 }
4228 pthread_mutex_unlock(&app->sock_lock);
4229 continue;
4230 }
4231
4232 while ((ret = ustctl_tracepoint_field_list_get(app->sock, handle,
4233 &uiter)) != -LTTNG_UST_ERR_NOENT) {
4234 /* Handle ustctl error. */
4235 if (ret < 0) {
4236 int release_ret;
4237
4238 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
4239 ERR("UST app tp list field failed for app %d with ret %d",
4240 app->sock, ret);
4241 } else {
4242 DBG3("UST app tp list field failed. Application is dead");
4243 /*
4244 * This is normal behavior, an application can die during the
4245 * creation process. Don't report an error so the execution can
4246 * continue normally. Reset list and count for next app.
4247 */
4248 break;
4249 }
4250 free(tmp_event);
4251 release_ret = ustctl_release_handle(app->sock, handle);
4252 pthread_mutex_unlock(&app->sock_lock);
4253 if (release_ret < 0 &&
4254 release_ret != -LTTNG_UST_ERR_EXITING &&
4255 release_ret != -EPIPE) {
4256 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
4257 }
4258 goto rcu_error;
4259 }
4260
4261 health_code_update();
4262 if (count >= nbmem) {
4263 /* In case the realloc fails, we free the memory */
4264 struct lttng_event_field *new_tmp_event;
4265 size_t new_nbmem;
4266
4267 new_nbmem = nbmem << 1;
4268 DBG2("Reallocating event field list from %zu to %zu entries",
4269 nbmem, new_nbmem);
4270 new_tmp_event = realloc(tmp_event,
4271 new_nbmem * sizeof(struct lttng_event_field));
4272 if (new_tmp_event == NULL) {
4273 int release_ret;
4274
4275 PERROR("realloc ust app event fields");
4276 free(tmp_event);
4277 ret = -ENOMEM;
4278 release_ret = ustctl_release_handle(app->sock, handle);
4279 pthread_mutex_unlock(&app->sock_lock);
4280 if (release_ret &&
4281 release_ret != -LTTNG_UST_ERR_EXITING &&
4282 release_ret != -EPIPE) {
4283 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
4284 }
4285 goto rcu_error;
4286 }
4287 /* Zero the new memory */
4288 memset(new_tmp_event + nbmem, 0,
4289 (new_nbmem - nbmem) * sizeof(struct lttng_event_field));
4290 nbmem = new_nbmem;
4291 tmp_event = new_tmp_event;
4292 }
4293
4294 memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_SYM_NAME_LEN);
4295 /* Mapping between these enums matches 1 to 1. */
4296 tmp_event[count].type = (enum lttng_event_field_type) uiter.type;
4297 tmp_event[count].nowrite = uiter.nowrite;
4298
4299 memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_SYM_NAME_LEN);
4300 tmp_event[count].event.loglevel = uiter.loglevel;
4301 tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT;
4302 tmp_event[count].event.pid = app->pid;
4303 tmp_event[count].event.enabled = -1;
4304 count++;
4305 }
4306 ret = ustctl_release_handle(app->sock, handle);
4307 pthread_mutex_unlock(&app->sock_lock);
4308 if (ret < 0 &&
4309 ret != -LTTNG_UST_ERR_EXITING &&
4310 ret != -EPIPE) {
4311 ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
4312 }
4313 }
4314
4315 ret = count;
4316 *fields = tmp_event;
4317
4318 DBG2("UST app list event fields done (%zu events)", count);
4319
4320 rcu_error:
4321 rcu_read_unlock();
4322 error:
4323 health_code_update();
4324 return ret;
4325 }
4326
4327 /*
4328 * Free and clean all traceable apps of the global list.
4329 *
4330 * Should _NOT_ be called with RCU read-side lock held.
4331 */
4332 void ust_app_clean_list(void)
4333 {
4334 int ret;
4335 struct ust_app *app;
4336 struct lttng_ht_iter iter;
4337
4338 DBG2("UST app cleaning registered apps hash table");
4339
4340 rcu_read_lock();
4341
4342 /* Cleanup notify socket hash table */
4343 if (ust_app_ht_by_notify_sock) {
4344 cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
4345 notify_sock_n.node) {
4346 /*
4347 * Assert that all notifiers are gone as all triggers
4348 * are unregistered prior to this clean-up.
4349 */
4350 assert(lttng_ht_get_count(app->token_to_event_notifier_rule_ht) == 0);
4351
4352 ust_app_notify_sock_unregister(app->notify_sock);
4353 }
4354 }
4355
4356 if (ust_app_ht) {
4357 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4358 ret = lttng_ht_del(ust_app_ht, &iter);
4359 assert(!ret);
4360 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
4361 }
4362 }
4363
4364 /* Cleanup socket hash table */
4365 if (ust_app_ht_by_sock) {
4366 cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
4367 sock_n.node) {
4368 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
4369 assert(!ret);
4370 }
4371 }
4372
4373 rcu_read_unlock();
4374
4375 /* Destroy is done only when the ht is empty */
4376 if (ust_app_ht) {
4377 ht_cleanup_push(ust_app_ht);
4378 }
4379 if (ust_app_ht_by_sock) {
4380 ht_cleanup_push(ust_app_ht_by_sock);
4381 }
4382 if (ust_app_ht_by_notify_sock) {
4383 ht_cleanup_push(ust_app_ht_by_notify_sock);
4384 }
4385 }
4386
4387 /*
4388 * Init UST app hash table.
4389 */
4390 int ust_app_ht_alloc(void)
4391 {
4392 ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
4393 if (!ust_app_ht) {
4394 return -1;
4395 }
4396 ust_app_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
4397 if (!ust_app_ht_by_sock) {
4398 return -1;
4399 }
4400 ust_app_ht_by_notify_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
4401 if (!ust_app_ht_by_notify_sock) {
4402 return -1;
4403 }
4404 return 0;
4405 }
4406
4407 /*
4408 * For a specific UST session, disable the channel for all registered apps.
4409 */
4410 int ust_app_disable_channel_glb(struct ltt_ust_session *usess,
4411 struct ltt_ust_channel *uchan)
4412 {
4413 int ret = 0;
4414 struct lttng_ht_iter iter;
4415 struct lttng_ht_node_str *ua_chan_node;
4416 struct ust_app *app;
4417 struct ust_app_session *ua_sess;
4418 struct ust_app_channel *ua_chan;
4419
4420 assert(usess->active);
4421 DBG2("UST app disabling channel %s from global domain for session id %" PRIu64,
4422 uchan->name, usess->id);
4423
4424 rcu_read_lock();
4425
4426 /* For every registered applications */
4427 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4428 struct lttng_ht_iter uiter;
4429 if (!app->compatible) {
4430 /*
4431 * TODO: In time, we should notice the caller of this error by
4432 * telling him that this is a version error.
4433 */
4434 continue;
4435 }
4436 ua_sess = lookup_session_by_app(usess, app);
4437 if (ua_sess == NULL) {
4438 continue;
4439 }
4440
4441 /* Get channel */
4442 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4443 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4444 /* If the session if found for the app, the channel must be there */
4445 assert(ua_chan_node);
4446
4447 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4448 /* The channel must not be already disabled */
4449 assert(ua_chan->enabled == 1);
4450
4451 /* Disable channel onto application */
4452 ret = disable_ust_app_channel(ua_sess, ua_chan, app);
4453 if (ret < 0) {
4454 /* XXX: We might want to report this error at some point... */
4455 continue;
4456 }
4457 }
4458
4459 rcu_read_unlock();
4460 return ret;
4461 }
4462
4463 /*
4464 * For a specific UST session, enable the channel for all registered apps.
4465 */
4466 int ust_app_enable_channel_glb(struct ltt_ust_session *usess,
4467 struct ltt_ust_channel *uchan)
4468 {
4469 int ret = 0;
4470 struct lttng_ht_iter iter;
4471 struct ust_app *app;
4472 struct ust_app_session *ua_sess;
4473
4474 assert(usess->active);
4475 DBG2("UST app enabling channel %s to global domain for session id %" PRIu64,
4476 uchan->name, usess->id);
4477
4478 rcu_read_lock();
4479
4480 /* For every registered applications */
4481 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4482 if (!app->compatible) {
4483 /*
4484 * TODO: In time, we should notice the caller of this error by
4485 * telling him that this is a version error.
4486 */
4487 continue;
4488 }
4489 ua_sess = lookup_session_by_app(usess, app);
4490 if (ua_sess == NULL) {
4491 continue;
4492 }
4493
4494 /* Enable channel onto application */
4495 ret = enable_ust_app_channel(ua_sess, uchan, app);
4496 if (ret < 0) {
4497 /* XXX: We might want to report this error at some point... */
4498 continue;
4499 }
4500 }
4501
4502 rcu_read_unlock();
4503 return ret;
4504 }
4505
4506 /*
4507 * Disable an event in a channel and for a specific session.
4508 */
4509 int ust_app_disable_event_glb(struct ltt_ust_session *usess,
4510 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4511 {
4512 int ret = 0;
4513 struct lttng_ht_iter iter, uiter;
4514 struct lttng_ht_node_str *ua_chan_node;
4515 struct ust_app *app;
4516 struct ust_app_session *ua_sess;
4517 struct ust_app_channel *ua_chan;
4518 struct ust_app_event *ua_event;
4519
4520 assert(usess->active);
4521 DBG("UST app disabling event %s for all apps in channel "
4522 "%s for session id %" PRIu64,
4523 uevent->attr.name, uchan->name, usess->id);
4524
4525 rcu_read_lock();
4526
4527 /* For all registered applications */
4528 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4529 if (!app->compatible) {
4530 /*
4531 * TODO: In time, we should notice the caller of this error by
4532 * telling him that this is a version error.
4533 */
4534 continue;
4535 }
4536 ua_sess = lookup_session_by_app(usess, app);
4537 if (ua_sess == NULL) {
4538 /* Next app */
4539 continue;
4540 }
4541
4542 /* Lookup channel in the ust app session */
4543 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4544 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4545 if (ua_chan_node == NULL) {
4546 DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
4547 "Skipping", uchan->name, usess->id, app->pid);
4548 continue;
4549 }
4550 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4551
4552 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4553 uevent->filter, uevent->attr.loglevel,
4554 uevent->exclusion);
4555 if (ua_event == NULL) {
4556 DBG2("Event %s not found in channel %s for app pid %d."
4557 "Skipping", uevent->attr.name, uchan->name, app->pid);
4558 continue;
4559 }
4560
4561 ret = disable_ust_app_event(ua_sess, ua_event, app);
4562 if (ret < 0) {
4563 /* XXX: Report error someday... */
4564 continue;
4565 }
4566 }
4567
4568 rcu_read_unlock();
4569 return ret;
4570 }
4571
4572 /* The ua_sess lock must be held by the caller. */
4573 static
4574 int ust_app_channel_create(struct ltt_ust_session *usess,
4575 struct ust_app_session *ua_sess,
4576 struct ltt_ust_channel *uchan, struct ust_app *app,
4577 struct ust_app_channel **_ua_chan)
4578 {
4579 int ret = 0;
4580 struct ust_app_channel *ua_chan = NULL;
4581
4582 assert(ua_sess);
4583 ASSERT_LOCKED(ua_sess->lock);
4584
4585 if (!strncmp(uchan->name, DEFAULT_METADATA_NAME,
4586 sizeof(uchan->name))) {
4587 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
4588 &uchan->attr);
4589 ret = 0;
4590 } else {
4591 struct ltt_ust_context *uctx = NULL;
4592
4593 /*
4594 * Create channel onto application and synchronize its
4595 * configuration.
4596 */
4597 ret = ust_app_channel_allocate(ua_sess, uchan,
4598 LTTNG_UST_CHAN_PER_CPU, usess,
4599 &ua_chan);
4600 if (ret < 0) {
4601 goto error;
4602 }
4603
4604 ret = ust_app_channel_send(app, usess,
4605 ua_sess, ua_chan);
4606 if (ret) {
4607 goto error;
4608 }
4609
4610 /* Add contexts. */
4611 cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
4612 ret = create_ust_app_channel_context(ua_chan,
4613 &uctx->ctx, app);
4614 if (ret) {
4615 goto error;
4616 }
4617 }
4618 }
4619
4620 error:
4621 if (ret < 0) {
4622 switch (ret) {
4623 case -ENOTCONN:
4624 /*
4625 * The application's socket is not valid. Either a bad socket
4626 * or a timeout on it. We can't inform the caller that for a
4627 * specific app, the session failed so lets continue here.
4628 */
4629 ret = 0; /* Not an error. */
4630 break;
4631 case -ENOMEM:
4632 default:
4633 break;
4634 }
4635 }
4636
4637 if (ret == 0 && _ua_chan) {
4638 /*
4639 * Only return the application's channel on success. Note
4640 * that the channel can still be part of the application's
4641 * channel hashtable on error.
4642 */
4643 *_ua_chan = ua_chan;
4644 }
4645 return ret;
4646 }
4647
4648 /*
4649 * Enable event for a specific session and channel on the tracer.
4650 */
4651 int ust_app_enable_event_glb(struct ltt_ust_session *usess,
4652 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4653 {
4654 int ret = 0;
4655 struct lttng_ht_iter iter, uiter;
4656 struct lttng_ht_node_str *ua_chan_node;
4657 struct ust_app *app;
4658 struct ust_app_session *ua_sess;
4659 struct ust_app_channel *ua_chan;
4660 struct ust_app_event *ua_event;
4661
4662 assert(usess->active);
4663 DBG("UST app enabling event %s for all apps for session id %" PRIu64,
4664 uevent->attr.name, usess->id);
4665
4666 /*
4667 * NOTE: At this point, this function is called only if the session and
4668 * channel passed are already created for all apps. and enabled on the
4669 * tracer also.
4670 */
4671
4672 rcu_read_lock();
4673
4674 /* For all registered applications */
4675 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4676 if (!app->compatible) {
4677 /*
4678 * TODO: In time, we should notice the caller of this error by
4679 * telling him that this is a version error.
4680 */
4681 continue;
4682 }
4683 ua_sess = lookup_session_by_app(usess, app);
4684 if (!ua_sess) {
4685 /* The application has problem or is probably dead. */
4686 continue;
4687 }
4688
4689 pthread_mutex_lock(&ua_sess->lock);
4690
4691 if (ua_sess->deleted) {
4692 pthread_mutex_unlock(&ua_sess->lock);
4693 continue;
4694 }
4695
4696 /* Lookup channel in the ust app session */
4697 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4698 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4699 /*
4700 * It is possible that the channel cannot be found is
4701 * the channel/event creation occurs concurrently with
4702 * an application exit.
4703 */
4704 if (!ua_chan_node) {
4705 pthread_mutex_unlock(&ua_sess->lock);
4706 continue;
4707 }
4708
4709 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4710
4711 /* Get event node */
4712 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4713 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
4714 if (ua_event == NULL) {
4715 DBG3("UST app enable event %s not found for app PID %d."
4716 "Skipping app", uevent->attr.name, app->pid);
4717 goto next_app;
4718 }
4719
4720 ret = enable_ust_app_event(ua_sess, ua_event, app);
4721 if (ret < 0) {
4722 pthread_mutex_unlock(&ua_sess->lock);
4723 goto error;
4724 }
4725 next_app:
4726 pthread_mutex_unlock(&ua_sess->lock);
4727 }
4728
4729 error:
4730 rcu_read_unlock();
4731 return ret;
4732 }
4733
4734 /*
4735 * For a specific existing UST session and UST channel, creates the event for
4736 * all registered apps.
4737 */
4738 int ust_app_create_event_glb(struct ltt_ust_session *usess,
4739 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4740 {
4741 int ret = 0;
4742 struct lttng_ht_iter iter, uiter;
4743 struct lttng_ht_node_str *ua_chan_node;
4744 struct ust_app *app;
4745 struct ust_app_session *ua_sess;
4746 struct ust_app_channel *ua_chan;
4747
4748 assert(usess->active);
4749 DBG("UST app creating event %s for all apps for session id %" PRIu64,
4750 uevent->attr.name, usess->id);
4751
4752 rcu_read_lock();
4753
4754 /* For all registered applications */
4755 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4756 if (!app->compatible) {
4757 /*
4758 * TODO: In time, we should notice the caller of this error by
4759 * telling him that this is a version error.
4760 */
4761 continue;
4762 }
4763 ua_sess = lookup_session_by_app(usess, app);
4764 if (!ua_sess) {
4765 /* The application has problem or is probably dead. */
4766 continue;
4767 }
4768
4769 pthread_mutex_lock(&ua_sess->lock);
4770
4771 if (ua_sess->deleted) {
4772 pthread_mutex_unlock(&ua_sess->lock);
4773 continue;
4774 }
4775
4776 /* Lookup channel in the ust app session */
4777 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4778 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4779 /* If the channel is not found, there is a code flow error */
4780 assert(ua_chan_node);
4781
4782 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4783
4784 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
4785 pthread_mutex_unlock(&ua_sess->lock);
4786 if (ret < 0) {
4787 if (ret != -LTTNG_UST_ERR_EXIST) {
4788 /* Possible value at this point: -ENOMEM. If so, we stop! */
4789 break;
4790 }
4791 DBG2("UST app event %s already exist on app PID %d",
4792 uevent->attr.name, app->pid);
4793 continue;
4794 }
4795 }
4796
4797 rcu_read_unlock();
4798 return ret;
4799 }
4800
4801 /*
4802 * Start tracing for a specific UST session and app.
4803 *
4804 * Called with UST app session lock held.
4805 *
4806 */
4807 static
4808 int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
4809 {
4810 int ret = 0;
4811 struct ust_app_session *ua_sess;
4812
4813 DBG("Starting tracing for ust app pid %d", app->pid);
4814
4815 rcu_read_lock();
4816
4817 if (!app->compatible) {
4818 goto end;
4819 }
4820
4821 ua_sess = lookup_session_by_app(usess, app);
4822 if (ua_sess == NULL) {
4823 /* The session is in teardown process. Ignore and continue. */
4824 goto end;
4825 }
4826
4827 pthread_mutex_lock(&ua_sess->lock);
4828
4829 if (ua_sess->deleted) {
4830 pthread_mutex_unlock(&ua_sess->lock);
4831 goto end;
4832 }
4833
4834 if (ua_sess->enabled) {
4835 pthread_mutex_unlock(&ua_sess->lock);
4836 goto end;
4837 }
4838
4839 /* Upon restart, we skip the setup, already done */
4840 if (ua_sess->started) {
4841 goto skip_setup;
4842 }
4843
4844 health_code_update();
4845
4846 skip_setup:
4847 /* This starts the UST tracing */
4848 pthread_mutex_lock(&app->sock_lock);
4849 ret = ustctl_start_session(app->sock, ua_sess->handle);
4850 pthread_mutex_unlock(&app->sock_lock);
4851 if (ret < 0) {
4852 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4853 ERR("Error starting tracing for app pid: %d (ret: %d)",
4854 app->pid, ret);
4855 } else {
4856 DBG("UST app start session failed. Application is dead.");
4857 /*
4858 * This is normal behavior, an application can die during the
4859 * creation process. Don't report an error so the execution can
4860 * continue normally.
4861 */
4862 pthread_mutex_unlock(&ua_sess->lock);
4863 goto end;
4864 }
4865 goto error_unlock;
4866 }
4867
4868 /* Indicate that the session has been started once */
4869 ua_sess->started = 1;
4870 ua_sess->enabled = 1;
4871
4872 pthread_mutex_unlock(&ua_sess->lock);
4873
4874 health_code_update();
4875
4876 /* Quiescent wait after starting trace */
4877 pthread_mutex_lock(&app->sock_lock);
4878 ret = ustctl_wait_quiescent(app->sock);
4879 pthread_mutex_unlock(&app->sock_lock);
4880 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4881 ERR("UST app wait quiescent failed for app pid %d ret %d",
4882 app->pid, ret);
4883 }
4884
4885 end:
4886 rcu_read_unlock();
4887 health_code_update();
4888 return 0;
4889
4890 error_unlock:
4891 pthread_mutex_unlock(&ua_sess->lock);
4892 rcu_read_unlock();
4893 health_code_update();
4894 return -1;
4895 }
4896
4897 /*
4898 * Stop tracing for a specific UST session and app.
4899 */
4900 static
4901 int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
4902 {
4903 int ret = 0;
4904 struct ust_app_session *ua_sess;
4905 struct ust_registry_session *registry;
4906
4907 DBG("Stopping tracing for ust app pid %d", app->pid);
4908
4909 rcu_read_lock();
4910
4911 if (!app->compatible) {
4912 goto end_no_session;
4913 }
4914
4915 ua_sess = lookup_session_by_app(usess, app);
4916 if (ua_sess == NULL) {
4917 goto end_no_session;
4918 }
4919
4920 pthread_mutex_lock(&ua_sess->lock);
4921
4922 if (ua_sess->deleted) {
4923 pthread_mutex_unlock(&ua_sess->lock);
4924 goto end_no_session;
4925 }
4926
4927 /*
4928 * If started = 0, it means that stop trace has been called for a session
4929 * that was never started. It's possible since we can have a fail start
4930 * from either the application manager thread or the command thread. Simply
4931 * indicate that this is a stop error.
4932 */
4933 if (!ua_sess->started) {
4934 goto error_rcu_unlock;
4935 }
4936
4937 health_code_update();
4938
4939 /* This inhibits UST tracing */
4940 pthread_mutex_lock(&app->sock_lock);
4941 ret = ustctl_stop_session(app->sock, ua_sess->handle);
4942 pthread_mutex_unlock(&app->sock_lock);
4943 if (ret < 0) {
4944 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4945 ERR("Error stopping tracing for app pid: %d (ret: %d)",
4946 app->pid, ret);
4947 } else {
4948 DBG("UST app stop session failed. Application is dead.");
4949 /*
4950 * This is normal behavior, an application can die during the
4951 * creation process. Don't report an error so the execution can
4952 * continue normally.
4953 */
4954 goto end_unlock;
4955 }
4956 goto error_rcu_unlock;
4957 }
4958
4959 health_code_update();
4960 ua_sess->enabled = 0;
4961
4962 /* Quiescent wait after stopping trace */
4963 pthread_mutex_lock(&app->sock_lock);
4964 ret = ustctl_wait_quiescent(app->sock);
4965 pthread_mutex_unlock(&app->sock_lock);
4966 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4967 ERR("UST app wait quiescent failed for app pid %d ret %d",
4968 app->pid, ret);
4969 }
4970
4971 health_code_update();
4972
4973 registry = get_session_registry(ua_sess);
4974
4975 /* The UST app session is held registry shall not be null. */
4976 assert(registry);
4977
4978 /* Push metadata for application before freeing the application. */
4979 (void) push_metadata(registry, ua_sess->consumer);
4980
4981 end_unlock:
4982 pthread_mutex_unlock(&ua_sess->lock);
4983 end_no_session:
4984 rcu_read_unlock();
4985 health_code_update();
4986 return 0;
4987
4988 error_rcu_unlock:
4989 pthread_mutex_unlock(&ua_sess->lock);
4990 rcu_read_unlock();
4991 health_code_update();
4992 return -1;
4993 }
4994
4995 static
4996 int ust_app_flush_app_session(struct ust_app *app,
4997 struct ust_app_session *ua_sess)
4998 {
4999 int ret, retval = 0;
5000 struct lttng_ht_iter iter;
5001 struct ust_app_channel *ua_chan;
5002 struct consumer_socket *socket;
5003
5004 DBG("Flushing app session buffers for ust app pid %d", app->pid);
5005
5006 rcu_read_lock();
5007
5008 if (!app->compatible) {
5009 goto end_not_compatible;
5010 }
5011
5012 pthread_mutex_lock(&ua_sess->lock);
5013
5014 if (ua_sess->deleted) {
5015 goto end_deleted;
5016 }
5017
5018 health_code_update();
5019
5020 /* Flushing buffers */
5021 socket = consumer_find_socket_by_bitness(app->bits_per_long,
5022 ua_sess->consumer);
5023
5024 /* Flush buffers and push metadata. */
5025 switch (ua_sess->buffer_type) {
5026 case LTTNG_BUFFER_PER_PID:
5027 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
5028 node.node) {
5029 health_code_update();
5030 ret = consumer_flush_channel(socket, ua_chan->key);
5031 if (ret) {
5032 ERR("Error flushing consumer channel");
5033 retval = -1;
5034 continue;
5035 }
5036 }
5037 break;
5038 case LTTNG_BUFFER_PER_UID:
5039 default:
5040 assert(0);
5041 break;
5042 }
5043
5044 health_code_update();
5045
5046 end_deleted:
5047 pthread_mutex_unlock(&ua_sess->lock);
5048
5049 end_not_compatible:
5050 rcu_read_unlock();
5051 health_code_update();
5052 return retval;
5053 }
5054
5055 /*
5056 * Flush buffers for all applications for a specific UST session.
5057 * Called with UST session lock held.
5058 */
5059 static
5060 int ust_app_flush_session(struct ltt_ust_session *usess)
5061
5062 {
5063 int ret = 0;
5064
5065 DBG("Flushing session buffers for all ust apps");
5066
5067 rcu_read_lock();
5068
5069 /* Flush buffers and push metadata. */
5070 switch (usess->buffer_type) {
5071 case LTTNG_BUFFER_PER_UID:
5072 {
5073 struct buffer_reg_uid *reg;
5074 struct lttng_ht_iter iter;
5075
5076 /* Flush all per UID buffers associated to that session. */
5077 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5078 struct ust_registry_session *ust_session_reg;
5079 struct buffer_reg_channel *reg_chan;
5080 struct consumer_socket *socket;
5081
5082 /* Get consumer socket to use to push the metadata.*/
5083 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
5084 usess->consumer);
5085 if (!socket) {
5086 /* Ignore request if no consumer is found for the session. */
5087 continue;
5088 }
5089
5090 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5091 reg_chan, node.node) {
5092 /*
5093 * The following call will print error values so the return
5094 * code is of little importance because whatever happens, we
5095 * have to try them all.
5096 */
5097 (void) consumer_flush_channel(socket, reg_chan->consumer_key);
5098 }
5099
5100 ust_session_reg = reg->registry->reg.ust;
5101 /* Push metadata. */
5102 (void) push_metadata(ust_session_reg, usess->consumer);
5103 }
5104 break;
5105 }
5106 case LTTNG_BUFFER_PER_PID:
5107 {
5108 struct ust_app_session *ua_sess;
5109 struct lttng_ht_iter iter;
5110 struct ust_app *app;
5111
5112 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5113 ua_sess = lookup_session_by_app(usess, app);
5114 if (ua_sess == NULL) {
5115 continue;
5116 }
5117 (void) ust_app_flush_app_session(app, ua_sess);
5118 }
5119 break;
5120 }
5121 default:
5122 ret = -1;
5123 assert(0);
5124 break;
5125 }
5126
5127 rcu_read_unlock();
5128 health_code_update();
5129 return ret;
5130 }
5131
5132 static
5133 int ust_app_clear_quiescent_app_session(struct ust_app *app,
5134 struct ust_app_session *ua_sess)
5135 {
5136 int ret = 0;
5137 struct lttng_ht_iter iter;
5138 struct ust_app_channel *ua_chan;
5139 struct consumer_socket *socket;
5140
5141 DBG("Clearing stream quiescent state for ust app pid %d", app->pid);
5142
5143 rcu_read_lock();
5144
5145 if (!app->compatible) {
5146 goto end_not_compatible;
5147 }
5148
5149 pthread_mutex_lock(&ua_sess->lock);
5150
5151 if (ua_sess->deleted) {
5152 goto end_unlock;
5153 }
5154
5155 health_code_update();
5156
5157 socket = consumer_find_socket_by_bitness(app->bits_per_long,
5158 ua_sess->consumer);
5159 if (!socket) {
5160 ERR("Failed to find consumer (%" PRIu32 ") socket",
5161 app->bits_per_long);
5162 ret = -1;
5163 goto end_unlock;
5164 }
5165
5166 /* Clear quiescent state. */
5167 switch (ua_sess->buffer_type) {
5168 case LTTNG_BUFFER_PER_PID:
5169 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter,
5170 ua_chan, node.node) {
5171 health_code_update();
5172 ret = consumer_clear_quiescent_channel(socket,
5173 ua_chan->key);
5174 if (ret) {
5175 ERR("Error clearing quiescent state for consumer channel");
5176 ret = -1;
5177 continue;
5178 }
5179 }
5180 break;
5181 case LTTNG_BUFFER_PER_UID:
5182 default:
5183 assert(0);
5184 ret = -1;
5185 break;
5186 }
5187
5188 health_code_update();
5189
5190 end_unlock:
5191 pthread_mutex_unlock(&ua_sess->lock);
5192
5193 end_not_compatible:
5194 rcu_read_unlock();
5195 health_code_update();
5196 return ret;
5197 }
5198
5199 /*
5200 * Clear quiescent state in each stream for all applications for a
5201 * specific UST session.
5202 * Called with UST session lock held.
5203 */
5204 static
5205 int ust_app_clear_quiescent_session(struct ltt_ust_session *usess)
5206
5207 {
5208 int ret = 0;
5209
5210 DBG("Clearing stream quiescent state for all ust apps");
5211
5212 rcu_read_lock();
5213
5214 switch (usess->buffer_type) {
5215 case LTTNG_BUFFER_PER_UID:
5216 {
5217 struct lttng_ht_iter iter;
5218 struct buffer_reg_uid *reg;
5219
5220 /*
5221 * Clear quiescent for all per UID buffers associated to
5222 * that session.
5223 */
5224 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5225 struct consumer_socket *socket;
5226 struct buffer_reg_channel *reg_chan;
5227
5228 /* Get associated consumer socket.*/
5229 socket = consumer_find_socket_by_bitness(
5230 reg->bits_per_long, usess->consumer);
5231 if (!socket) {
5232 /*
5233 * Ignore request if no consumer is found for
5234 * the session.
5235 */
5236 continue;
5237 }
5238
5239 cds_lfht_for_each_entry(reg->registry->channels->ht,
5240 &iter.iter, reg_chan, node.node) {
5241 /*
5242 * The following call will print error values so
5243 * the return code is of little importance
5244 * because whatever happens, we have to try them
5245 * all.
5246 */
5247 (void) consumer_clear_quiescent_channel(socket,
5248 reg_chan->consumer_key);
5249 }
5250 }
5251 break;
5252 }
5253 case LTTNG_BUFFER_PER_PID:
5254 {
5255 struct ust_app_session *ua_sess;
5256 struct lttng_ht_iter iter;
5257 struct ust_app *app;
5258
5259 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
5260 pid_n.node) {
5261 ua_sess = lookup_session_by_app(usess, app);
5262 if (ua_sess == NULL) {
5263 continue;
5264 }
5265 (void) ust_app_clear_quiescent_app_session(app,
5266 ua_sess);
5267 }
5268 break;
5269 }
5270 default:
5271 ret = -1;
5272 assert(0);
5273 break;
5274 }
5275
5276 rcu_read_unlock();
5277 health_code_update();
5278 return ret;
5279 }
5280
5281 /*
5282 * Destroy a specific UST session in apps.
5283 */
5284 static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
5285 {
5286 int ret;
5287 struct ust_app_session *ua_sess;
5288 struct lttng_ht_iter iter;
5289 struct lttng_ht_node_u64 *node;
5290
5291 DBG("Destroy tracing for ust app pid %d", app->pid);
5292
5293 rcu_read_lock();
5294
5295 if (!app->compatible) {
5296 goto end;
5297 }
5298
5299 __lookup_session_by_app(usess, app, &iter);
5300 node = lttng_ht_iter_get_node_u64(&iter);
5301 if (node == NULL) {
5302 /* Session is being or is deleted. */
5303 goto end;
5304 }
5305 ua_sess = caa_container_of(node, struct ust_app_session, node);
5306
5307 health_code_update();
5308 destroy_app_session(app, ua_sess);
5309
5310 health_code_update();
5311
5312 /* Quiescent wait after stopping trace */
5313 pthread_mutex_lock(&app->sock_lock);
5314 ret = ustctl_wait_quiescent(app->sock);
5315 pthread_mutex_unlock(&app->sock_lock);
5316 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5317 ERR("UST app wait quiescent failed for app pid %d ret %d",
5318 app->pid, ret);
5319 }
5320 end:
5321 rcu_read_unlock();
5322 health_code_update();
5323 return 0;
5324 }
5325
5326 /*
5327 * Start tracing for the UST session.
5328 */
5329 int ust_app_start_trace_all(struct ltt_ust_session *usess)
5330 {
5331 struct lttng_ht_iter iter;
5332 struct ust_app *app;
5333
5334 DBG("Starting all UST traces");
5335
5336 /*
5337 * Even though the start trace might fail, flag this session active so
5338 * other application coming in are started by default.
5339 */
5340 usess->active = 1;
5341
5342 rcu_read_lock();
5343
5344 /*
5345 * In a start-stop-start use-case, we need to clear the quiescent state
5346 * of each channel set by the prior stop command, thus ensuring that a
5347 * following stop or destroy is sure to grab a timestamp_end near those
5348 * operations, even if the packet is empty.
5349 */
5350 (void) ust_app_clear_quiescent_session(usess);
5351
5352 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5353 ust_app_global_update(usess, app);
5354 }
5355
5356 rcu_read_unlock();
5357
5358 return 0;
5359 }
5360
5361 /*
5362 * Start tracing for the UST session.
5363 * Called with UST session lock held.
5364 */
5365 int ust_app_stop_trace_all(struct ltt_ust_session *usess)
5366 {
5367 int ret = 0;
5368 struct lttng_ht_iter iter;
5369 struct ust_app *app;
5370
5371 DBG("Stopping all UST traces");
5372
5373 /*
5374 * Even though the stop trace might fail, flag this session inactive so
5375 * other application coming in are not started by default.
5376 */
5377 usess->active = 0;
5378
5379 rcu_read_lock();
5380
5381 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5382 ret = ust_app_stop_trace(usess, app);
5383 if (ret < 0) {
5384 /* Continue to next apps even on error */
5385 continue;
5386 }
5387 }
5388
5389 (void) ust_app_flush_session(usess);
5390
5391 rcu_read_unlock();
5392
5393 return 0;
5394 }
5395
5396 /*
5397 * Destroy app UST session.
5398 */
5399 int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
5400 {
5401 int ret = 0;
5402 struct lttng_ht_iter iter;
5403 struct ust_app *app;
5404
5405 DBG("Destroy all UST traces");
5406
5407 rcu_read_lock();
5408
5409 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5410 ret = destroy_trace(usess, app);
5411 if (ret < 0) {
5412 /* Continue to next apps even on error */
5413 continue;
5414 }
5415 }
5416
5417 rcu_read_unlock();
5418
5419 return 0;
5420 }
5421
5422 /* The ua_sess lock must be held by the caller. */
5423 static
5424 int find_or_create_ust_app_channel(
5425 struct ltt_ust_session *usess,
5426 struct ust_app_session *ua_sess,
5427 struct ust_app *app,
5428 struct ltt_ust_channel *uchan,
5429 struct ust_app_channel **ua_chan)
5430 {
5431 int ret = 0;
5432 struct lttng_ht_iter iter;
5433 struct lttng_ht_node_str *ua_chan_node;
5434
5435 lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &iter);
5436 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
5437 if (ua_chan_node) {
5438 *ua_chan = caa_container_of(ua_chan_node,
5439 struct ust_app_channel, node);
5440 goto end;
5441 }
5442
5443 ret = ust_app_channel_create(usess, ua_sess, uchan, app, ua_chan);
5444 if (ret) {
5445 goto end;
5446 }
5447 end:
5448 return ret;
5449 }
5450
5451 static
5452 int ust_app_channel_synchronize_event(struct ust_app_channel *ua_chan,
5453 struct ltt_ust_event *uevent, struct ust_app_session *ua_sess,
5454 struct ust_app *app)
5455 {
5456 int ret = 0;
5457 struct ust_app_event *ua_event = NULL;
5458
5459 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
5460 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
5461 if (!ua_event) {
5462 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
5463 if (ret < 0) {
5464 goto end;
5465 }
5466 } else {
5467 if (ua_event->enabled != uevent->enabled) {
5468 ret = uevent->enabled ?
5469 enable_ust_app_event(ua_sess, ua_event, app) :
5470 disable_ust_app_event(ua_sess, ua_event, app);
5471 }
5472 }
5473
5474 end:
5475 return ret;
5476 }
5477
5478 /* Called with RCU read-side lock held. */
5479 static
5480 void ust_app_synchronize_event_notifier_rules(struct ust_app *app)
5481 {
5482 int ret = 0;
5483 enum lttng_error_code ret_code;
5484 enum lttng_trigger_status t_status;
5485 struct lttng_ht_iter app_trigger_iter;
5486 struct lttng_triggers *triggers = NULL;
5487 struct ust_app_event_notifier_rule *event_notifier_rule;
5488 unsigned int count, i;
5489
5490 /*
5491 * Currrently, registering or unregistering a trigger with an
5492 * event rule condition causes a full synchronization of the event
5493 * notifiers.
5494 *
5495 * The first step attempts to add an event notifier for all registered
5496 * triggers that apply to the user space tracers. Then, the
5497 * application's event notifiers rules are all checked against the list
5498 * of registered triggers. Any event notifier that doesn't have a
5499 * matching trigger can be assumed to have been disabled.
5500 *
5501 * All of this is inefficient, but is put in place to get the feature
5502 * rolling as it is simpler at this moment. It will be optimized Soon™
5503 * to allow the state of enabled
5504 * event notifiers to be synchronized in a piece-wise way.
5505 */
5506
5507 /* Get all triggers using uid 0 (root) */
5508 ret_code = notification_thread_command_list_triggers(
5509 notification_thread_handle, 0, &triggers);
5510 if (ret_code != LTTNG_OK) {
5511 ret = -1;
5512 goto end;
5513 }
5514
5515 assert(triggers);
5516
5517 t_status = lttng_triggers_get_count(triggers, &count);
5518 if (t_status != LTTNG_TRIGGER_STATUS_OK) {
5519 ret = -1;
5520 goto end;
5521 }
5522
5523 for (i = 0; i < count; i++) {
5524 struct lttng_condition *condition;
5525 struct lttng_event_rule *event_rule;
5526 struct lttng_trigger *trigger;
5527 const struct ust_app_event_notifier_rule *looked_up_event_notifier_rule;
5528 enum lttng_condition_status condition_status;
5529 uint64_t token;
5530
5531 trigger = lttng_triggers_borrow_mutable_at_index(triggers, i);
5532 assert(trigger);
5533
5534 token = lttng_trigger_get_tracer_token(trigger);
5535 condition = lttng_trigger_get_condition(trigger);
5536
5537 if (lttng_condition_get_type(condition) != LTTNG_CONDITION_TYPE_EVENT_RULE_HIT) {
5538 /* Does not apply */
5539 continue;
5540 }
5541
5542 condition_status = lttng_condition_event_rule_borrow_rule_mutable(condition, &event_rule);
5543 assert(condition_status == LTTNG_CONDITION_STATUS_OK);
5544
5545 if (lttng_event_rule_get_domain_type(event_rule) == LTTNG_DOMAIN_KERNEL) {
5546 /* Skip kernel related triggers. */
5547 continue;
5548 }
5549
5550 /*
5551 * Find or create the associated token event rule. The caller
5552 * holds the RCU read lock, so this is safe to call without
5553 * explicitly acquiring it here.
5554 */
5555 looked_up_event_notifier_rule = find_ust_app_event_notifier_rule(
5556 app->token_to_event_notifier_rule_ht, token);
5557 if (!looked_up_event_notifier_rule) {
5558 ret = create_ust_app_event_notifier_rule(event_rule, app, token);
5559 if (ret < 0) {
5560 goto end;
5561 }
5562 }
5563 }
5564
5565 rcu_read_lock();
5566 /* Remove all unknown event sources from the app. */
5567 cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
5568 &app_trigger_iter.iter, event_notifier_rule,
5569 node.node) {
5570 const uint64_t app_token = event_notifier_rule->token;
5571 bool found = false;
5572
5573 /*
5574 * Check if the app event trigger still exists on the
5575 * notification side.
5576 */
5577 for (i = 0; i < count; i++) {
5578 uint64_t notification_thread_token;
5579 const struct lttng_trigger *trigger =
5580 lttng_triggers_get_at_index(
5581 triggers, i);
5582
5583 assert(trigger);
5584
5585 notification_thread_token =
5586 lttng_trigger_get_tracer_token(trigger);
5587
5588 if (notification_thread_token == app_token) {
5589 found = true;
5590 break;
5591 }
5592 }
5593
5594 if (found) {
5595 /* Still valid. */
5596 continue;
5597 }
5598
5599 /*
5600 * This trigger was unregistered, disable it on the tracer's
5601 * side.
5602 */
5603 ret = lttng_ht_del(app->token_to_event_notifier_rule_ht,
5604 &app_trigger_iter);
5605 assert(ret == 0);
5606
5607 /* Callee logs errors. */
5608 (void) disable_ust_object(app, event_notifier_rule->obj);
5609
5610 delete_ust_app_event_notifier_rule(
5611 app->sock, event_notifier_rule, app);
5612 }
5613
5614 rcu_read_unlock();
5615
5616 end:
5617 lttng_triggers_destroy(triggers);
5618 return;
5619 }
5620
5621 /*
5622 * The caller must ensure that the application is compatible and is tracked
5623 * by the process attribute trackers.
5624 */
5625 static
5626 void ust_app_synchronize(struct ltt_ust_session *usess,
5627 struct ust_app *app)
5628 {
5629 int ret = 0;
5630 struct cds_lfht_iter uchan_iter;
5631 struct ltt_ust_channel *uchan;
5632 struct ust_app_session *ua_sess = NULL;
5633
5634 /*
5635 * The application's configuration should only be synchronized for
5636 * active sessions.
5637 */
5638 assert(usess->active);
5639
5640 ret = find_or_create_ust_app_session(usess, app, &ua_sess, NULL);
5641 if (ret < 0) {
5642 /* Tracer is probably gone or ENOMEM. */
5643 goto error;
5644 }
5645 assert(ua_sess);
5646
5647 pthread_mutex_lock(&ua_sess->lock);
5648 if (ua_sess->deleted) {
5649 pthread_mutex_unlock(&ua_sess->lock);
5650 goto end;
5651 }
5652
5653 rcu_read_lock();
5654
5655 cds_lfht_for_each_entry(usess->domain_global.channels->ht, &uchan_iter,
5656 uchan, node.node) {
5657 struct ust_app_channel *ua_chan;
5658 struct cds_lfht_iter uevent_iter;
5659 struct ltt_ust_event *uevent;
5660
5661 /*
5662 * Search for a matching ust_app_channel. If none is found,
5663 * create it. Creating the channel will cause the ua_chan
5664 * structure to be allocated, the channel buffers to be
5665 * allocated (if necessary) and sent to the application, and
5666 * all enabled contexts will be added to the channel.
5667 */
5668 ret = find_or_create_ust_app_channel(usess, ua_sess,
5669 app, uchan, &ua_chan);
5670 if (ret) {
5671 /* Tracer is probably gone or ENOMEM. */
5672 goto error_unlock;
5673 }
5674
5675 if (!ua_chan) {
5676 /* ua_chan will be NULL for the metadata channel */
5677 continue;
5678 }
5679
5680 cds_lfht_for_each_entry(uchan->events->ht, &uevent_iter, uevent,
5681 node.node) {
5682 ret = ust_app_channel_synchronize_event(ua_chan,
5683 uevent, ua_sess, app);
5684 if (ret) {
5685 goto error_unlock;
5686 }
5687 }
5688
5689 if (ua_chan->enabled != uchan->enabled) {
5690 ret = uchan->enabled ?
5691 enable_ust_app_channel(ua_sess, uchan, app) :
5692 disable_ust_app_channel(ua_sess, ua_chan, app);
5693 if (ret) {
5694 goto error_unlock;
5695 }
5696 }
5697 }
5698
5699 /*
5700 * Create the metadata for the application. This returns gracefully if a
5701 * metadata was already set for the session.
5702 *
5703 * The metadata channel must be created after the data channels as the
5704 * consumer daemon assumes this ordering. When interacting with a relay
5705 * daemon, the consumer will use this assumption to send the
5706 * "STREAMS_SENT" message to the relay daemon.
5707 */
5708 ret = create_ust_app_metadata(ua_sess, app, usess->consumer);
5709 if (ret < 0) {
5710 goto error_unlock;
5711 }
5712
5713 rcu_read_unlock();
5714
5715 end:
5716 pthread_mutex_unlock(&ua_sess->lock);
5717 /* Everything went well at this point. */
5718 return;
5719
5720 error_unlock:
5721 rcu_read_unlock();
5722 pthread_mutex_unlock(&ua_sess->lock);
5723 error:
5724 if (ua_sess) {
5725 destroy_app_session(app, ua_sess);
5726 }
5727 return;
5728 }
5729
5730 static
5731 void ust_app_global_destroy(struct ltt_ust_session *usess, struct ust_app *app)
5732 {
5733 struct ust_app_session *ua_sess;
5734
5735 ua_sess = lookup_session_by_app(usess, app);
5736 if (ua_sess == NULL) {
5737 return;
5738 }
5739 destroy_app_session(app, ua_sess);
5740 }
5741
5742 /*
5743 * Add channels/events from UST global domain to registered apps at sock.
5744 *
5745 * Called with session lock held.
5746 * Called with RCU read-side lock held.
5747 */
5748 void ust_app_global_update(struct ltt_ust_session *usess, struct ust_app *app)
5749 {
5750 assert(usess);
5751 assert(usess->active);
5752
5753 DBG2("UST app global update for app sock %d for session id %" PRIu64,
5754 app->sock, usess->id);
5755
5756 if (!app->compatible) {
5757 return;
5758 }
5759 if (trace_ust_id_tracker_lookup(LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID,
5760 usess, app->pid) &&
5761 trace_ust_id_tracker_lookup(
5762 LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID,
5763 usess, app->uid) &&
5764 trace_ust_id_tracker_lookup(
5765 LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID,
5766 usess, app->gid)) {
5767 /*
5768 * Synchronize the application's internal tracing configuration
5769 * and start tracing.
5770 */
5771 ust_app_synchronize(usess, app);
5772 ust_app_start_trace(usess, app);
5773 } else {
5774 ust_app_global_destroy(usess, app);
5775 }
5776 }
5777
5778 /*
5779 * Add all event notifiers to an application.
5780 *
5781 * Called with session lock held.
5782 * Called with RCU read-side lock held.
5783 */
5784 void ust_app_global_update_event_notifier_rules(struct ust_app *app)
5785 {
5786 DBG2("UST application global event notifier rules update: app = '%s' (ppid: %d)",
5787 app->name, app->ppid);
5788
5789 if (!app->compatible) {
5790 return;
5791 }
5792
5793 if (app->event_notifier_group.object == NULL) {
5794 WARN("UST app global update of event notifiers for app skipped since communication handle is null: app = '%s' (ppid: %d)",
5795 app->name, app->ppid);
5796 return;
5797 }
5798
5799 ust_app_synchronize_event_notifier_rules(app);
5800 }
5801
5802 /*
5803 * Called with session lock held.
5804 */
5805 void ust_app_global_update_all(struct ltt_ust_session *usess)
5806 {
5807 struct lttng_ht_iter iter;
5808 struct ust_app *app;
5809
5810 rcu_read_lock();
5811 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5812 ust_app_global_update(usess, app);
5813 }
5814 rcu_read_unlock();
5815 }
5816
5817 void ust_app_global_update_all_event_notifier_rules(void)
5818 {
5819 struct lttng_ht_iter iter;
5820 struct ust_app *app;
5821
5822 rcu_read_lock();
5823 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5824 ust_app_global_update_event_notifier_rules(app);
5825 }
5826
5827 rcu_read_unlock();
5828 }
5829
5830 /*
5831 * Add context to a specific channel for global UST domain.
5832 */
5833 int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
5834 struct ltt_ust_channel *uchan, struct ltt_ust_context *uctx)
5835 {
5836 int ret = 0;
5837 struct lttng_ht_node_str *ua_chan_node;
5838 struct lttng_ht_iter iter, uiter;
5839 struct ust_app_channel *ua_chan = NULL;
5840 struct ust_app_session *ua_sess;
5841 struct ust_app *app;
5842
5843 assert(usess->active);
5844
5845 rcu_read_lock();
5846 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5847 if (!app->compatible) {
5848 /*
5849 * TODO: In time, we should notice the caller of this error by
5850 * telling him that this is a version error.
5851 */
5852 continue;
5853 }
5854 ua_sess = lookup_session_by_app(usess, app);
5855 if (ua_sess == NULL) {
5856 continue;
5857 }
5858
5859 pthread_mutex_lock(&ua_sess->lock);
5860
5861 if (ua_sess->deleted) {
5862 pthread_mutex_unlock(&ua_sess->lock);
5863 continue;
5864 }
5865
5866 /* Lookup channel in the ust app session */
5867 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
5868 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
5869 if (ua_chan_node == NULL) {
5870 goto next_app;
5871 }
5872 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel,
5873 node);
5874 ret = create_ust_app_channel_context(ua_chan, &uctx->ctx, app);
5875 if (ret < 0) {
5876 goto next_app;
5877 }
5878 next_app:
5879 pthread_mutex_unlock(&ua_sess->lock);
5880 }
5881
5882 rcu_read_unlock();
5883 return ret;
5884 }
5885
5886 /*
5887 * Receive registration and populate the given msg structure.
5888 *
5889 * On success return 0 else a negative value returned by the ustctl call.
5890 */
5891 int ust_app_recv_registration(int sock, struct ust_register_msg *msg)
5892 {
5893 int ret;
5894 uint32_t pid, ppid, uid, gid;
5895
5896 assert(msg);
5897
5898 ret = ustctl_recv_reg_msg(sock, &msg->type, &msg->major, &msg->minor,
5899 &pid, &ppid, &uid, &gid,
5900 &msg->bits_per_long,
5901 &msg->uint8_t_alignment,
5902 &msg->uint16_t_alignment,
5903 &msg->uint32_t_alignment,
5904 &msg->uint64_t_alignment,
5905 &msg->long_alignment,
5906 &msg->byte_order,
5907 msg->name);
5908 if (ret < 0) {
5909 switch (-ret) {
5910 case EPIPE:
5911 case ECONNRESET:
5912 case LTTNG_UST_ERR_EXITING:
5913 DBG3("UST app recv reg message failed. Application died");
5914 break;
5915 case LTTNG_UST_ERR_UNSUP_MAJOR:
5916 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
5917 msg->major, msg->minor, LTTNG_UST_ABI_MAJOR_VERSION,
5918 LTTNG_UST_ABI_MINOR_VERSION);
5919 break;
5920 default:
5921 ERR("UST app recv reg message failed with ret %d", ret);
5922 break;
5923 }
5924 goto error;
5925 }
5926 msg->pid = (pid_t) pid;
5927 msg->ppid = (pid_t) ppid;
5928 msg->uid = (uid_t) uid;
5929 msg->gid = (gid_t) gid;
5930
5931 error:
5932 return ret;
5933 }
5934
5935 /*
5936 * Return a ust app session object using the application object and the
5937 * session object descriptor has a key. If not found, NULL is returned.
5938 * A RCU read side lock MUST be acquired when calling this function.
5939 */
5940 static struct ust_app_session *find_session_by_objd(struct ust_app *app,
5941 int objd)
5942 {
5943 struct lttng_ht_node_ulong *node;
5944 struct lttng_ht_iter iter;
5945 struct ust_app_session *ua_sess = NULL;
5946
5947 assert(app);
5948
5949 lttng_ht_lookup(app->ust_sessions_objd, (void *)((unsigned long) objd), &iter);
5950 node = lttng_ht_iter_get_node_ulong(&iter);
5951 if (node == NULL) {
5952 DBG2("UST app session find by objd %d not found", objd);
5953 goto error;
5954 }
5955
5956 ua_sess = caa_container_of(node, struct ust_app_session, ust_objd_node);
5957
5958 error:
5959 return ua_sess;
5960 }
5961
5962 /*
5963 * Return a ust app channel object using the application object and the channel
5964 * object descriptor has a key. If not found, NULL is returned. A RCU read side
5965 * lock MUST be acquired before calling this function.
5966 */
5967 static struct ust_app_channel *find_channel_by_objd(struct ust_app *app,
5968 int objd)
5969 {
5970 struct lttng_ht_node_ulong *node;
5971 struct lttng_ht_iter iter;
5972 struct ust_app_channel *ua_chan = NULL;
5973
5974 assert(app);
5975
5976 lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter);
5977 node = lttng_ht_iter_get_node_ulong(&iter);
5978 if (node == NULL) {
5979 DBG2("UST app channel find by objd %d not found", objd);
5980 goto error;
5981 }
5982
5983 ua_chan = caa_container_of(node, struct ust_app_channel, ust_objd_node);
5984
5985 error:
5986 return ua_chan;
5987 }
5988
5989 /*
5990 * Reply to a register channel notification from an application on the notify
5991 * socket. The channel metadata is also created.
5992 *
5993 * The session UST registry lock is acquired in this function.
5994 *
5995 * On success 0 is returned else a negative value.
5996 */
5997 static int reply_ust_register_channel(int sock, int cobjd,
5998 size_t nr_fields, struct ustctl_field *fields)
5999 {
6000 int ret, ret_code = 0;
6001 uint32_t chan_id;
6002 uint64_t chan_reg_key;
6003 enum ustctl_channel_header type;
6004 struct ust_app *app;
6005 struct ust_app_channel *ua_chan;
6006 struct ust_app_session *ua_sess;
6007 struct ust_registry_session *registry;
6008 struct ust_registry_channel *chan_reg;
6009
6010 rcu_read_lock();
6011
6012 /* Lookup application. If not found, there is a code flow error. */
6013 app = find_app_by_notify_sock(sock);
6014 if (!app) {
6015 DBG("Application socket %d is being torn down. Abort event notify",
6016 sock);
6017 ret = 0;
6018 goto error_rcu_unlock;
6019 }
6020
6021 /* Lookup channel by UST object descriptor. */
6022 ua_chan = find_channel_by_objd(app, cobjd);
6023 if (!ua_chan) {
6024 DBG("Application channel is being torn down. Abort event notify");
6025 ret = 0;
6026 goto error_rcu_unlock;
6027 }
6028
6029 assert(ua_chan->session);
6030 ua_sess = ua_chan->session;
6031
6032 /* Get right session registry depending on the session buffer type. */
6033 registry = get_session_registry(ua_sess);
6034 if (!registry) {
6035 DBG("Application session is being torn down. Abort event notify");
6036 ret = 0;
6037 goto error_rcu_unlock;
6038 };
6039
6040 /* Depending on the buffer type, a different channel key is used. */
6041 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
6042 chan_reg_key = ua_chan->tracing_channel_id;
6043 } else {
6044 chan_reg_key = ua_chan->key;
6045 }
6046
6047 pthread_mutex_lock(&registry->lock);
6048
6049 chan_reg = ust_registry_channel_find(registry, chan_reg_key);
6050 assert(chan_reg);
6051
6052 if (!chan_reg->register_done) {
6053 /*
6054 * TODO: eventually use the registry event count for
6055 * this channel to better guess header type for per-pid
6056 * buffers.
6057 */
6058 type = USTCTL_CHANNEL_HEADER_LARGE;
6059 chan_reg->nr_ctx_fields = nr_fields;
6060 chan_reg->ctx_fields = fields;
6061 fields = NULL;
6062 chan_reg->header_type = type;
6063 } else {
6064 /* Get current already assigned values. */
6065 type = chan_reg->header_type;
6066 }
6067 /* Channel id is set during the object creation. */
6068 chan_id = chan_reg->chan_id;
6069
6070 /* Append to metadata */
6071 if (!chan_reg->metadata_dumped) {
6072 ret_code = ust_metadata_channel_statedump(registry, chan_reg);
6073 if (ret_code) {
6074 ERR("Error appending channel metadata (errno = %d)", ret_code);
6075 goto reply;
6076 }
6077 }
6078
6079 reply:
6080 DBG3("UST app replying to register channel key %" PRIu64
6081 " with id %u, type: %d, ret: %d", chan_reg_key, chan_id, type,
6082 ret_code);
6083
6084 ret = ustctl_reply_register_channel(sock, chan_id, type, ret_code);
6085 if (ret < 0) {
6086 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6087 ERR("UST app reply channel failed with ret %d", ret);
6088 } else {
6089 DBG3("UST app reply channel failed. Application died");
6090 }
6091 goto error;
6092 }
6093
6094 /* This channel registry registration is completed. */
6095 chan_reg->register_done = 1;
6096
6097 error:
6098 pthread_mutex_unlock(&registry->lock);
6099 error_rcu_unlock:
6100 rcu_read_unlock();
6101 free(fields);
6102 return ret;
6103 }
6104
6105 /*
6106 * Add event to the UST channel registry. When the event is added to the
6107 * registry, the metadata is also created. Once done, this replies to the
6108 * application with the appropriate error code.
6109 *
6110 * The session UST registry lock is acquired in the function.
6111 *
6112 * On success 0 is returned else a negative value.
6113 */
6114 static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
6115 char *sig, size_t nr_fields, struct ustctl_field *fields,
6116 int loglevel_value, char *model_emf_uri)
6117 {
6118 int ret, ret_code;
6119 uint32_t event_id = 0;
6120 uint64_t chan_reg_key;
6121 struct ust_app *app;
6122 struct ust_app_channel *ua_chan;
6123 struct ust_app_session *ua_sess;
6124 struct ust_registry_session *registry;
6125
6126 rcu_read_lock();
6127
6128 /* Lookup application. If not found, there is a code flow error. */
6129 app = find_app_by_notify_sock(sock);
6130 if (!app) {
6131 DBG("Application socket %d is being torn down. Abort event notify",
6132 sock);
6133 ret = 0;
6134 goto error_rcu_unlock;
6135 }
6136
6137 /* Lookup channel by UST object descriptor. */
6138 ua_chan = find_channel_by_objd(app, cobjd);
6139 if (!ua_chan) {
6140 DBG("Application channel is being torn down. Abort event notify");
6141 ret = 0;
6142 goto error_rcu_unlock;
6143 }
6144
6145 assert(ua_chan->session);
6146 ua_sess = ua_chan->session;
6147
6148 registry = get_session_registry(ua_sess);
6149 if (!registry) {
6150 DBG("Application session is being torn down. Abort event notify");
6151 ret = 0;
6152 goto error_rcu_unlock;
6153 }
6154
6155 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
6156 chan_reg_key = ua_chan->tracing_channel_id;
6157 } else {
6158 chan_reg_key = ua_chan->key;
6159 }
6160
6161 pthread_mutex_lock(&registry->lock);
6162
6163 /*
6164 * From this point on, this call acquires the ownership of the sig, fields
6165 * and model_emf_uri meaning any free are done inside it if needed. These
6166 * three variables MUST NOT be read/write after this.
6167 */
6168 ret_code = ust_registry_create_event(registry, chan_reg_key,
6169 sobjd, cobjd, name, sig, nr_fields, fields,
6170 loglevel_value, model_emf_uri, ua_sess->buffer_type,
6171 &event_id, app);
6172 sig = NULL;
6173 fields = NULL;
6174 model_emf_uri = NULL;
6175
6176 /*
6177 * The return value is returned to ustctl so in case of an error, the
6178 * application can be notified. In case of an error, it's important not to
6179 * return a negative error or else the application will get closed.
6180 */
6181 ret = ustctl_reply_register_event(sock, event_id, ret_code);
6182 if (ret < 0) {
6183 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6184 ERR("UST app reply event failed with ret %d", ret);
6185 } else {
6186 DBG3("UST app reply event failed. Application died");
6187 }
6188 /*
6189 * No need to wipe the create event since the application socket will
6190 * get close on error hence cleaning up everything by itself.
6191 */
6192 goto error;
6193 }
6194
6195 DBG3("UST registry event %s with id %" PRId32 " added successfully",
6196 name, event_id);
6197
6198 error:
6199 pthread_mutex_unlock(&registry->lock);
6200 error_rcu_unlock:
6201 rcu_read_unlock();
6202 free(sig);
6203 free(fields);
6204 free(model_emf_uri);
6205 return ret;
6206 }
6207
6208 /*
6209 * Add enum to the UST session registry. Once done, this replies to the
6210 * application with the appropriate error code.
6211 *
6212 * The session UST registry lock is acquired within this function.
6213 *
6214 * On success 0 is returned else a negative value.
6215 */
6216 static int add_enum_ust_registry(int sock, int sobjd, char *name,
6217 struct ustctl_enum_entry *entries, size_t nr_entries)
6218 {
6219 int ret = 0, ret_code;
6220 struct ust_app *app;
6221 struct ust_app_session *ua_sess;
6222 struct ust_registry_session *registry;
6223 uint64_t enum_id = -1ULL;
6224
6225 rcu_read_lock();
6226
6227 /* Lookup application. If not found, there is a code flow error. */
6228 app = find_app_by_notify_sock(sock);
6229 if (!app) {
6230 /* Return an error since this is not an error */
6231 DBG("Application socket %d is being torn down. Aborting enum registration",
6232 sock);
6233 free(entries);
6234 goto error_rcu_unlock;
6235 }
6236
6237 /* Lookup session by UST object descriptor. */
6238 ua_sess = find_session_by_objd(app, sobjd);
6239 if (!ua_sess) {
6240 /* Return an error since this is not an error */
6241 DBG("Application session is being torn down (session not found). Aborting enum registration.");
6242 free(entries);
6243 goto error_rcu_unlock;
6244 }
6245
6246 registry = get_session_registry(ua_sess);
6247 if (!registry) {
6248 DBG("Application session is being torn down (registry not found). Aborting enum registration.");
6249 free(entries);
6250 goto error_rcu_unlock;
6251 }
6252
6253 pthread_mutex_lock(&registry->lock);
6254
6255 /*
6256 * From this point on, the callee acquires the ownership of
6257 * entries. The variable entries MUST NOT be read/written after
6258 * call.
6259 */
6260 ret_code = ust_registry_create_or_find_enum(registry, sobjd, name,
6261 entries, nr_entries, &enum_id);
6262 entries = NULL;
6263
6264 /*
6265 * The return value is returned to ustctl so in case of an error, the
6266 * application can be notified. In case of an error, it's important not to
6267 * return a negative error or else the application will get closed.
6268 */
6269 ret = ustctl_reply_register_enum(sock, enum_id, ret_code);
6270 if (ret < 0) {
6271 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6272 ERR("UST app reply enum failed with ret %d", ret);
6273 } else {
6274 DBG3("UST app reply enum failed. Application died");
6275 }
6276 /*
6277 * No need to wipe the create enum since the application socket will
6278 * get close on error hence cleaning up everything by itself.
6279 */
6280 goto error;
6281 }
6282
6283 DBG3("UST registry enum %s added successfully or already found", name);
6284
6285 error:
6286 pthread_mutex_unlock(&registry->lock);
6287 error_rcu_unlock:
6288 rcu_read_unlock();
6289 return ret;
6290 }
6291
6292 /*
6293 * Handle application notification through the given notify socket.
6294 *
6295 * Return 0 on success or else a negative value.
6296 */
6297 int ust_app_recv_notify(int sock)
6298 {
6299 int ret;
6300 enum ustctl_notify_cmd cmd;
6301
6302 DBG3("UST app receiving notify from sock %d", sock);
6303
6304 ret = ustctl_recv_notify(sock, &cmd);
6305 if (ret < 0) {
6306 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6307 ERR("UST app recv notify failed with ret %d", ret);
6308 } else {
6309 DBG3("UST app recv notify failed. Application died");
6310 }
6311 goto error;
6312 }
6313
6314 switch (cmd) {
6315 case USTCTL_NOTIFY_CMD_EVENT:
6316 {
6317 int sobjd, cobjd, loglevel_value;
6318 char name[LTTNG_UST_SYM_NAME_LEN], *sig, *model_emf_uri;
6319 size_t nr_fields;
6320 struct ustctl_field *fields;
6321
6322 DBG2("UST app ustctl register event received");
6323
6324 ret = ustctl_recv_register_event(sock, &sobjd, &cobjd, name,
6325 &loglevel_value, &sig, &nr_fields, &fields,
6326 &model_emf_uri);
6327 if (ret < 0) {
6328 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6329 ERR("UST app recv event failed with ret %d", ret);
6330 } else {
6331 DBG3("UST app recv event failed. Application died");
6332 }
6333 goto error;
6334 }
6335
6336 /*
6337 * Add event to the UST registry coming from the notify socket. This
6338 * call will free if needed the sig, fields and model_emf_uri. This
6339 * code path loses the ownsership of these variables and transfer them
6340 * to the this function.
6341 */
6342 ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
6343 fields, loglevel_value, model_emf_uri);
6344 if (ret < 0) {
6345 goto error;
6346 }
6347
6348 break;
6349 }
6350 case USTCTL_NOTIFY_CMD_CHANNEL:
6351 {
6352 int sobjd, cobjd;
6353 size_t nr_fields;
6354 struct ustctl_field *fields;
6355
6356 DBG2("UST app ustctl register channel received");
6357
6358 ret = ustctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields,
6359 &fields);
6360 if (ret < 0) {
6361 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6362 ERR("UST app recv channel failed with ret %d", ret);
6363 } else {
6364 DBG3("UST app recv channel failed. Application died");
6365 }
6366 goto error;
6367 }
6368
6369 /*
6370 * The fields ownership are transfered to this function call meaning
6371 * that if needed it will be freed. After this, it's invalid to access
6372 * fields or clean it up.
6373 */
6374 ret = reply_ust_register_channel(sock, cobjd, nr_fields,
6375 fields);
6376 if (ret < 0) {
6377 goto error;
6378 }
6379
6380 break;
6381 }
6382 case USTCTL_NOTIFY_CMD_ENUM:
6383 {
6384 int sobjd;
6385 char name[LTTNG_UST_SYM_NAME_LEN];
6386 size_t nr_entries;
6387 struct ustctl_enum_entry *entries;
6388
6389 DBG2("UST app ustctl register enum received");
6390
6391 ret = ustctl_recv_register_enum(sock, &sobjd, name,
6392 &entries, &nr_entries);
6393 if (ret < 0) {
6394 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6395 ERR("UST app recv enum failed with ret %d", ret);
6396 } else {
6397 DBG3("UST app recv enum failed. Application died");
6398 }
6399 goto error;
6400 }
6401
6402 /* Callee assumes ownership of entries */
6403 ret = add_enum_ust_registry(sock, sobjd, name,
6404 entries, nr_entries);
6405 if (ret < 0) {
6406 goto error;
6407 }
6408
6409 break;
6410 }
6411 default:
6412 /* Should NEVER happen. */
6413 assert(0);
6414 }
6415
6416 error:
6417 return ret;
6418 }
6419
6420 /*
6421 * Once the notify socket hangs up, this is called. First, it tries to find the
6422 * corresponding application. On failure, the call_rcu to close the socket is
6423 * executed. If an application is found, it tries to delete it from the notify
6424 * socket hash table. Whathever the result, it proceeds to the call_rcu.
6425 *
6426 * Note that an object needs to be allocated here so on ENOMEM failure, the
6427 * call RCU is not done but the rest of the cleanup is.
6428 */
6429 void ust_app_notify_sock_unregister(int sock)
6430 {
6431 int err_enomem = 0;
6432 struct lttng_ht_iter iter;
6433 struct ust_app *app;
6434 struct ust_app_notify_sock_obj *obj;
6435
6436 assert(sock >= 0);
6437
6438 rcu_read_lock();
6439
6440 obj = zmalloc(sizeof(*obj));
6441 if (!obj) {
6442 /*
6443 * An ENOMEM is kind of uncool. If this strikes we continue the
6444 * procedure but the call_rcu will not be called. In this case, we
6445 * accept the fd leak rather than possibly creating an unsynchronized
6446 * state between threads.
6447 *
6448 * TODO: The notify object should be created once the notify socket is
6449 * registered and stored independantely from the ust app object. The
6450 * tricky part is to synchronize the teardown of the application and
6451 * this notify object. Let's keep that in mind so we can avoid this
6452 * kind of shenanigans with ENOMEM in the teardown path.
6453 */
6454 err_enomem = 1;
6455 } else {
6456 obj->fd = sock;
6457 }
6458
6459 DBG("UST app notify socket unregister %d", sock);
6460
6461 /*
6462 * Lookup application by notify socket. If this fails, this means that the
6463 * hash table delete has already been done by the application
6464 * unregistration process so we can safely close the notify socket in a
6465 * call RCU.
6466 */
6467 app = find_app_by_notify_sock(sock);
6468 if (!app) {
6469 goto close_socket;
6470 }
6471
6472 iter.iter.node = &app->notify_sock_n.node;
6473
6474 /*
6475 * Whatever happens here either we fail or succeed, in both cases we have
6476 * to close the socket after a grace period to continue to the call RCU
6477 * here. If the deletion is successful, the application is not visible
6478 * anymore by other threads and is it fails it means that it was already
6479 * deleted from the hash table so either way we just have to close the
6480 * socket.
6481 */
6482 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
6483
6484 close_socket:
6485 rcu_read_unlock();
6486
6487 /*
6488 * Close socket after a grace period to avoid for the socket to be reused
6489 * before the application object is freed creating potential race between
6490 * threads trying to add unique in the global hash table.
6491 */
6492 if (!err_enomem) {
6493 call_rcu(&obj->head, close_notify_sock_rcu);
6494 }
6495 }
6496
6497 /*
6498 * Destroy a ust app data structure and free its memory.
6499 */
6500 void ust_app_destroy(struct ust_app *app)
6501 {
6502 if (!app) {
6503 return;
6504 }
6505
6506 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
6507 }
6508
6509 /*
6510 * Take a snapshot for a given UST session. The snapshot is sent to the given
6511 * output.
6512 *
6513 * Returns LTTNG_OK on success or a LTTNG_ERR error code.
6514 */
6515 enum lttng_error_code ust_app_snapshot_record(
6516 const struct ltt_ust_session *usess,
6517 const struct consumer_output *output, int wait,
6518 uint64_t nb_packets_per_stream)
6519 {
6520 int ret = 0;
6521 enum lttng_error_code status = LTTNG_OK;
6522 struct lttng_ht_iter iter;
6523 struct ust_app *app;
6524 char *trace_path = NULL;
6525
6526 assert(usess);
6527 assert(output);
6528
6529 rcu_read_lock();
6530
6531 switch (usess->buffer_type) {
6532 case LTTNG_BUFFER_PER_UID:
6533 {
6534 struct buffer_reg_uid *reg;
6535
6536 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
6537 struct buffer_reg_channel *reg_chan;
6538 struct consumer_socket *socket;
6539 char pathname[PATH_MAX];
6540 size_t consumer_path_offset = 0;
6541
6542 if (!reg->registry->reg.ust->metadata_key) {
6543 /* Skip since no metadata is present */
6544 continue;
6545 }
6546
6547 /* Get consumer socket to use to push the metadata.*/
6548 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
6549 usess->consumer);
6550 if (!socket) {
6551 status = LTTNG_ERR_INVALID;
6552 goto error;
6553 }
6554
6555 memset(pathname, 0, sizeof(pathname));
6556 ret = snprintf(pathname, sizeof(pathname),
6557 DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH,
6558 reg->uid, reg->bits_per_long);
6559 if (ret < 0) {
6560 PERROR("snprintf snapshot path");
6561 status = LTTNG_ERR_INVALID;
6562 goto error;
6563 }
6564 /* Free path allowed on previous iteration. */
6565 free(trace_path);
6566 trace_path = setup_channel_trace_path(usess->consumer, pathname,
6567 &consumer_path_offset);
6568 if (!trace_path) {
6569 status = LTTNG_ERR_INVALID;
6570 goto error;
6571 }
6572 /* Add the UST default trace dir to path. */
6573 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
6574 reg_chan, node.node) {
6575 status = consumer_snapshot_channel(socket,
6576 reg_chan->consumer_key,
6577 output, 0, usess->uid,
6578 usess->gid, &trace_path[consumer_path_offset], wait,
6579 nb_packets_per_stream);
6580 if (status != LTTNG_OK) {
6581 goto error;
6582 }
6583 }
6584 status = consumer_snapshot_channel(socket,
6585 reg->registry->reg.ust->metadata_key, output, 1,
6586 usess->uid, usess->gid, &trace_path[consumer_path_offset],
6587 wait, 0);
6588 if (status != LTTNG_OK) {
6589 goto error;
6590 }
6591 }
6592 break;
6593 }
6594 case LTTNG_BUFFER_PER_PID:
6595 {
6596 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6597 struct consumer_socket *socket;
6598 struct lttng_ht_iter chan_iter;
6599 struct ust_app_channel *ua_chan;
6600 struct ust_app_session *ua_sess;
6601 struct ust_registry_session *registry;
6602 char pathname[PATH_MAX];
6603 size_t consumer_path_offset = 0;
6604
6605 ua_sess = lookup_session_by_app(usess, app);
6606 if (!ua_sess) {
6607 /* Session not associated with this app. */
6608 continue;
6609 }
6610
6611 /* Get the right consumer socket for the application. */
6612 socket = consumer_find_socket_by_bitness(app->bits_per_long,
6613 output);
6614 if (!socket) {
6615 status = LTTNG_ERR_INVALID;
6616 goto error;
6617 }
6618
6619 /* Add the UST default trace dir to path. */
6620 memset(pathname, 0, sizeof(pathname));
6621 ret = snprintf(pathname, sizeof(pathname), DEFAULT_UST_TRACE_DIR "/%s",
6622 ua_sess->path);
6623 if (ret < 0) {
6624 status = LTTNG_ERR_INVALID;
6625 PERROR("snprintf snapshot path");
6626 goto error;
6627 }
6628 /* Free path allowed on previous iteration. */
6629 free(trace_path);
6630 trace_path = setup_channel_trace_path(usess->consumer, pathname,
6631 &consumer_path_offset);
6632 if (!trace_path) {
6633 status = LTTNG_ERR_INVALID;
6634 goto error;
6635 }
6636 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
6637 ua_chan, node.node) {
6638 status = consumer_snapshot_channel(socket,
6639 ua_chan->key, output, 0,
6640 lttng_credentials_get_uid(&ua_sess->effective_credentials),
6641 lttng_credentials_get_gid(&ua_sess->effective_credentials),
6642 &trace_path[consumer_path_offset], wait,
6643 nb_packets_per_stream);
6644 switch (status) {
6645 case LTTNG_OK:
6646 break;
6647 case LTTNG_ERR_CHAN_NOT_FOUND:
6648 continue;
6649 default:
6650 goto error;
6651 }
6652 }
6653
6654 registry = get_session_registry(ua_sess);
6655 if (!registry) {
6656 DBG("Application session is being torn down. Skip application.");
6657 continue;
6658 }
6659 status = consumer_snapshot_channel(socket,
6660 registry->metadata_key, output, 1,
6661 lttng_credentials_get_uid(&ua_sess->effective_credentials),
6662 lttng_credentials_get_gid(&ua_sess->effective_credentials),
6663 &trace_path[consumer_path_offset], wait, 0);
6664 switch (status) {
6665 case LTTNG_OK:
6666 break;
6667 case LTTNG_ERR_CHAN_NOT_FOUND:
6668 continue;
6669 default:
6670 goto error;
6671 }
6672 }
6673 break;
6674 }
6675 default:
6676 assert(0);
6677 break;
6678 }
6679
6680 error:
6681 free(trace_path);
6682 rcu_read_unlock();
6683 return status;
6684 }
6685
6686 /*
6687 * Return the size taken by one more packet per stream.
6688 */
6689 uint64_t ust_app_get_size_one_more_packet_per_stream(
6690 const struct ltt_ust_session *usess, uint64_t cur_nr_packets)
6691 {
6692 uint64_t tot_size = 0;
6693 struct ust_app *app;
6694 struct lttng_ht_iter iter;
6695
6696 assert(usess);
6697
6698 switch (usess->buffer_type) {
6699 case LTTNG_BUFFER_PER_UID:
6700 {
6701 struct buffer_reg_uid *reg;
6702
6703 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
6704 struct buffer_reg_channel *reg_chan;
6705
6706 rcu_read_lock();
6707 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
6708 reg_chan, node.node) {
6709 if (cur_nr_packets >= reg_chan->num_subbuf) {
6710 /*
6711 * Don't take channel into account if we
6712 * already grab all its packets.
6713 */
6714 continue;
6715 }
6716 tot_size += reg_chan->subbuf_size * reg_chan->stream_count;
6717 }
6718 rcu_read_unlock();
6719 }
6720 break;
6721 }
6722 case LTTNG_BUFFER_PER_PID:
6723 {
6724 rcu_read_lock();
6725 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6726 struct ust_app_channel *ua_chan;
6727 struct ust_app_session *ua_sess;
6728 struct lttng_ht_iter chan_iter;
6729
6730 ua_sess = lookup_session_by_app(usess, app);
6731 if (!ua_sess) {
6732 /* Session not associated with this app. */
6733 continue;
6734 }
6735
6736 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
6737 ua_chan, node.node) {
6738 if (cur_nr_packets >= ua_chan->attr.num_subbuf) {
6739 /*
6740 * Don't take channel into account if we
6741 * already grab all its packets.
6742 */
6743 continue;
6744 }
6745 tot_size += ua_chan->attr.subbuf_size * ua_chan->streams.count;
6746 }
6747 }
6748 rcu_read_unlock();
6749 break;
6750 }
6751 default:
6752 assert(0);
6753 break;
6754 }
6755
6756 return tot_size;
6757 }
6758
6759 int ust_app_uid_get_channel_runtime_stats(uint64_t ust_session_id,
6760 struct cds_list_head *buffer_reg_uid_list,
6761 struct consumer_output *consumer, uint64_t uchan_id,
6762 int overwrite, uint64_t *discarded, uint64_t *lost)
6763 {
6764 int ret;
6765 uint64_t consumer_chan_key;
6766
6767 *discarded = 0;
6768 *lost = 0;
6769
6770 ret = buffer_reg_uid_consumer_channel_key(
6771 buffer_reg_uid_list, uchan_id, &consumer_chan_key);
6772 if (ret < 0) {
6773 /* Not found */
6774 ret = 0;
6775 goto end;
6776 }
6777
6778 if (overwrite) {
6779 ret = consumer_get_lost_packets(ust_session_id,
6780 consumer_chan_key, consumer, lost);
6781 } else {
6782 ret = consumer_get_discarded_events(ust_session_id,
6783 consumer_chan_key, consumer, discarded);
6784 }
6785
6786 end:
6787 return ret;
6788 }
6789
6790 int ust_app_pid_get_channel_runtime_stats(struct ltt_ust_session *usess,
6791 struct ltt_ust_channel *uchan,
6792 struct consumer_output *consumer, int overwrite,
6793 uint64_t *discarded, uint64_t *lost)
6794 {
6795 int ret = 0;
6796 struct lttng_ht_iter iter;
6797 struct lttng_ht_node_str *ua_chan_node;
6798 struct ust_app *app;
6799 struct ust_app_session *ua_sess;
6800 struct ust_app_channel *ua_chan;
6801
6802 *discarded = 0;
6803 *lost = 0;
6804
6805 rcu_read_lock();
6806 /*
6807 * Iterate over every registered applications. Sum counters for
6808 * all applications containing requested session and channel.
6809 */
6810 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6811 struct lttng_ht_iter uiter;
6812
6813 ua_sess = lookup_session_by_app(usess, app);
6814 if (ua_sess == NULL) {
6815 continue;
6816 }
6817
6818 /* Get channel */
6819 lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
6820 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
6821 /* If the session is found for the app, the channel must be there */
6822 assert(ua_chan_node);
6823
6824 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
6825
6826 if (overwrite) {
6827 uint64_t _lost;
6828
6829 ret = consumer_get_lost_packets(usess->id, ua_chan->key,
6830 consumer, &_lost);
6831 if (ret < 0) {
6832 break;
6833 }
6834 (*lost) += _lost;
6835 } else {
6836 uint64_t _discarded;
6837
6838 ret = consumer_get_discarded_events(usess->id,
6839 ua_chan->key, consumer, &_discarded);
6840 if (ret < 0) {
6841 break;
6842 }
6843 (*discarded) += _discarded;
6844 }
6845 }
6846
6847 rcu_read_unlock();
6848 return ret;
6849 }
6850
6851 static
6852 int ust_app_regenerate_statedump(struct ltt_ust_session *usess,
6853 struct ust_app *app)
6854 {
6855 int ret = 0;
6856 struct ust_app_session *ua_sess;
6857
6858 DBG("Regenerating the metadata for ust app pid %d", app->pid);
6859
6860 rcu_read_lock();
6861
6862 ua_sess = lookup_session_by_app(usess, app);
6863 if (ua_sess == NULL) {
6864 /* The session is in teardown process. Ignore and continue. */
6865 goto end;
6866 }
6867
6868 pthread_mutex_lock(&ua_sess->lock);
6869
6870 if (ua_sess->deleted) {
6871 goto end_unlock;
6872 }
6873
6874 pthread_mutex_lock(&app->sock_lock);
6875 ret = ustctl_regenerate_statedump(app->sock, ua_sess->handle);
6876 pthread_mutex_unlock(&app->sock_lock);
6877
6878 end_unlock:
6879 pthread_mutex_unlock(&ua_sess->lock);
6880
6881 end:
6882 rcu_read_unlock();
6883 health_code_update();
6884 return ret;
6885 }
6886
6887 /*
6888 * Regenerate the statedump for each app in the session.
6889 */
6890 int ust_app_regenerate_statedump_all(struct ltt_ust_session *usess)
6891 {
6892 int ret = 0;
6893 struct lttng_ht_iter iter;
6894 struct ust_app *app;
6895
6896 DBG("Regenerating the metadata for all UST apps");
6897
6898 rcu_read_lock();
6899
6900 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6901 if (!app->compatible) {
6902 continue;
6903 }
6904
6905 ret = ust_app_regenerate_statedump(usess, app);
6906 if (ret < 0) {
6907 /* Continue to the next app even on error */
6908 continue;
6909 }
6910 }
6911
6912 rcu_read_unlock();
6913
6914 return 0;
6915 }
6916
6917 /*
6918 * Rotate all the channels of a session.
6919 *
6920 * Return LTTNG_OK on success or else an LTTng error code.
6921 */
6922 enum lttng_error_code ust_app_rotate_session(struct ltt_session *session)
6923 {
6924 int ret;
6925 enum lttng_error_code cmd_ret = LTTNG_OK;
6926 struct lttng_ht_iter iter;
6927 struct ust_app *app;
6928 struct ltt_ust_session *usess = session->ust_session;
6929
6930 assert(usess);
6931
6932 rcu_read_lock();
6933
6934 switch (usess->buffer_type) {
6935 case LTTNG_BUFFER_PER_UID:
6936 {
6937 struct buffer_reg_uid *reg;
6938
6939 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
6940 struct buffer_reg_channel *reg_chan;
6941 struct consumer_socket *socket;
6942
6943 if (!reg->registry->reg.ust->metadata_key) {
6944 /* Skip since no metadata is present */
6945 continue;
6946 }
6947
6948 /* Get consumer socket to use to push the metadata.*/
6949 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
6950 usess->consumer);
6951 if (!socket) {
6952 cmd_ret = LTTNG_ERR_INVALID;
6953 goto error;
6954 }
6955
6956 /* Rotate the data channels. */
6957 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
6958 reg_chan, node.node) {
6959 ret = consumer_rotate_channel(socket,
6960 reg_chan->consumer_key,
6961 usess->uid, usess->gid,
6962 usess->consumer,
6963 /* is_metadata_channel */ false);
6964 if (ret < 0) {
6965 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
6966 goto error;
6967 }
6968 }
6969
6970 (void) push_metadata(reg->registry->reg.ust, usess->consumer);
6971
6972 ret = consumer_rotate_channel(socket,
6973 reg->registry->reg.ust->metadata_key,
6974 usess->uid, usess->gid,
6975 usess->consumer,
6976 /* is_metadata_channel */ true);
6977 if (ret < 0) {
6978 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
6979 goto error;
6980 }
6981 }
6982 break;
6983 }
6984 case LTTNG_BUFFER_PER_PID:
6985 {
6986 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6987 struct consumer_socket *socket;
6988 struct lttng_ht_iter chan_iter;
6989 struct ust_app_channel *ua_chan;
6990 struct ust_app_session *ua_sess;
6991 struct ust_registry_session *registry;
6992
6993 ua_sess = lookup_session_by_app(usess, app);
6994 if (!ua_sess) {
6995 /* Session not associated with this app. */
6996 continue;
6997 }
6998
6999 /* Get the right consumer socket for the application. */
7000 socket = consumer_find_socket_by_bitness(app->bits_per_long,
7001 usess->consumer);
7002 if (!socket) {
7003 cmd_ret = LTTNG_ERR_INVALID;
7004 goto error;
7005 }
7006
7007 registry = get_session_registry(ua_sess);
7008 if (!registry) {
7009 DBG("Application session is being torn down. Skip application.");
7010 continue;
7011 }
7012
7013 /* Rotate the data channels. */
7014 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
7015 ua_chan, node.node) {
7016 ret = consumer_rotate_channel(socket,
7017 ua_chan->key,
7018 lttng_credentials_get_uid(&ua_sess->effective_credentials),
7019 lttng_credentials_get_gid(&ua_sess->effective_credentials),
7020 ua_sess->consumer,
7021 /* is_metadata_channel */ false);
7022 if (ret < 0) {
7023 /* Per-PID buffer and application going away. */
7024 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND)
7025 continue;
7026 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
7027 goto error;
7028 }
7029 }
7030
7031 /* Rotate the metadata channel. */
7032 (void) push_metadata(registry, usess->consumer);
7033 ret = consumer_rotate_channel(socket,
7034 registry->metadata_key,
7035 lttng_credentials_get_uid(&ua_sess->effective_credentials),
7036 lttng_credentials_get_gid(&ua_sess->effective_credentials),
7037 ua_sess->consumer,
7038 /* is_metadata_channel */ true);
7039 if (ret < 0) {
7040 /* Per-PID buffer and application going away. */
7041 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND)
7042 continue;
7043 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
7044 goto error;
7045 }
7046 }
7047 break;
7048 }
7049 default:
7050 assert(0);
7051 break;
7052 }
7053
7054 cmd_ret = LTTNG_OK;
7055
7056 error:
7057 rcu_read_unlock();
7058 return cmd_ret;
7059 }
7060
7061 enum lttng_error_code ust_app_create_channel_subdirectories(
7062 const struct ltt_ust_session *usess)
7063 {
7064 enum lttng_error_code ret = LTTNG_OK;
7065 struct lttng_ht_iter iter;
7066 enum lttng_trace_chunk_status chunk_status;
7067 char *pathname_index;
7068 int fmt_ret;
7069
7070 assert(usess->current_trace_chunk);
7071 rcu_read_lock();
7072
7073 switch (usess->buffer_type) {
7074 case LTTNG_BUFFER_PER_UID:
7075 {
7076 struct buffer_reg_uid *reg;
7077
7078 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
7079 fmt_ret = asprintf(&pathname_index,
7080 DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH "/" DEFAULT_INDEX_DIR,
7081 reg->uid, reg->bits_per_long);
7082 if (fmt_ret < 0) {
7083 ERR("Failed to format channel index directory");
7084 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7085 goto error;
7086 }
7087
7088 /*
7089 * Create the index subdirectory which will take care
7090 * of implicitly creating the channel's path.
7091 */
7092 chunk_status = lttng_trace_chunk_create_subdirectory(
7093 usess->current_trace_chunk,
7094 pathname_index);
7095 free(pathname_index);
7096 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
7097 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7098 goto error;
7099 }
7100 }
7101 break;
7102 }
7103 case LTTNG_BUFFER_PER_PID:
7104 {
7105 struct ust_app *app;
7106
7107 /*
7108 * Create the toplevel ust/ directory in case no apps are running.
7109 */
7110 chunk_status = lttng_trace_chunk_create_subdirectory(
7111 usess->current_trace_chunk,
7112 DEFAULT_UST_TRACE_DIR);
7113 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
7114 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7115 goto error;
7116 }
7117
7118 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
7119 pid_n.node) {
7120 struct ust_app_session *ua_sess;
7121 struct ust_registry_session *registry;
7122
7123 ua_sess = lookup_session_by_app(usess, app);
7124 if (!ua_sess) {
7125 /* Session not associated with this app. */
7126 continue;
7127 }
7128
7129 registry = get_session_registry(ua_sess);
7130 if (!registry) {
7131 DBG("Application session is being torn down. Skip application.");
7132 continue;
7133 }
7134
7135 fmt_ret = asprintf(&pathname_index,
7136 DEFAULT_UST_TRACE_DIR "/%s/" DEFAULT_INDEX_DIR,
7137 ua_sess->path);
7138 if (fmt_ret < 0) {
7139 ERR("Failed to format channel index directory");
7140 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7141 goto error;
7142 }
7143 /*
7144 * Create the index subdirectory which will take care
7145 * of implicitly creating the channel's path.
7146 */
7147 chunk_status = lttng_trace_chunk_create_subdirectory(
7148 usess->current_trace_chunk,
7149 pathname_index);
7150 free(pathname_index);
7151 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
7152 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7153 goto error;
7154 }
7155 }
7156 break;
7157 }
7158 default:
7159 abort();
7160 }
7161
7162 ret = LTTNG_OK;
7163 error:
7164 rcu_read_unlock();
7165 return ret;
7166 }
7167
7168 /*
7169 * Clear all the channels of a session.
7170 *
7171 * Return LTTNG_OK on success or else an LTTng error code.
7172 */
7173 enum lttng_error_code ust_app_clear_session(struct ltt_session *session)
7174 {
7175 int ret;
7176 enum lttng_error_code cmd_ret = LTTNG_OK;
7177 struct lttng_ht_iter iter;
7178 struct ust_app *app;
7179 struct ltt_ust_session *usess = session->ust_session;
7180
7181 assert(usess);
7182
7183 rcu_read_lock();
7184
7185 if (usess->active) {
7186 ERR("Expecting inactive session %s (%" PRIu64 ")", session->name, session->id);
7187 cmd_ret = LTTNG_ERR_FATAL;
7188 goto end;
7189 }
7190
7191 switch (usess->buffer_type) {
7192 case LTTNG_BUFFER_PER_UID:
7193 {
7194 struct buffer_reg_uid *reg;
7195
7196 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
7197 struct buffer_reg_channel *reg_chan;
7198 struct consumer_socket *socket;
7199
7200 /* Get consumer socket to use to push the metadata.*/
7201 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
7202 usess->consumer);
7203 if (!socket) {
7204 cmd_ret = LTTNG_ERR_INVALID;
7205 goto error_socket;
7206 }
7207
7208 /* Clear the data channels. */
7209 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
7210 reg_chan, node.node) {
7211 ret = consumer_clear_channel(socket,
7212 reg_chan->consumer_key);
7213 if (ret < 0) {
7214 goto error;
7215 }
7216 }
7217
7218 (void) push_metadata(reg->registry->reg.ust, usess->consumer);
7219
7220 /*
7221 * Clear the metadata channel.
7222 * Metadata channel is not cleared per se but we still need to
7223 * perform a rotation operation on it behind the scene.
7224 */
7225 ret = consumer_clear_channel(socket,
7226 reg->registry->reg.ust->metadata_key);
7227 if (ret < 0) {
7228 goto error;
7229 }
7230 }
7231 break;
7232 }
7233 case LTTNG_BUFFER_PER_PID:
7234 {
7235 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7236 struct consumer_socket *socket;
7237 struct lttng_ht_iter chan_iter;
7238 struct ust_app_channel *ua_chan;
7239 struct ust_app_session *ua_sess;
7240 struct ust_registry_session *registry;
7241
7242 ua_sess = lookup_session_by_app(usess, app);
7243 if (!ua_sess) {
7244 /* Session not associated with this app. */
7245 continue;
7246 }
7247
7248 /* Get the right consumer socket for the application. */
7249 socket = consumer_find_socket_by_bitness(app->bits_per_long,
7250 usess->consumer);
7251 if (!socket) {
7252 cmd_ret = LTTNG_ERR_INVALID;
7253 goto error_socket;
7254 }
7255
7256 registry = get_session_registry(ua_sess);
7257 if (!registry) {
7258 DBG("Application session is being torn down. Skip application.");
7259 continue;
7260 }
7261
7262 /* Clear the data channels. */
7263 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
7264 ua_chan, node.node) {
7265 ret = consumer_clear_channel(socket, ua_chan->key);
7266 if (ret < 0) {
7267 /* Per-PID buffer and application going away. */
7268 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
7269 continue;
7270 }
7271 goto error;
7272 }
7273 }
7274
7275 (void) push_metadata(registry, usess->consumer);
7276
7277 /*
7278 * Clear the metadata channel.
7279 * Metadata channel is not cleared per se but we still need to
7280 * perform rotation operation on it behind the scene.
7281 */
7282 ret = consumer_clear_channel(socket, registry->metadata_key);
7283 if (ret < 0) {
7284 /* Per-PID buffer and application going away. */
7285 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
7286 continue;
7287 }
7288 goto error;
7289 }
7290 }
7291 break;
7292 }
7293 default:
7294 assert(0);
7295 break;
7296 }
7297
7298 cmd_ret = LTTNG_OK;
7299 goto end;
7300
7301 error:
7302 switch (-ret) {
7303 case LTTCOMM_CONSUMERD_RELAYD_CLEAR_DISALLOWED:
7304 cmd_ret = LTTNG_ERR_CLEAR_RELAY_DISALLOWED;
7305 break;
7306 default:
7307 cmd_ret = LTTNG_ERR_CLEAR_FAIL_CONSUMER;
7308 }
7309
7310 error_socket:
7311 end:
7312 rcu_read_unlock();
7313 return cmd_ret;
7314 }
7315
7316 /*
7317 * This function skips the metadata channel as the begin/end timestamps of a
7318 * metadata packet are useless.
7319 *
7320 * Moreover, opening a packet after a "clear" will cause problems for live
7321 * sessions as it will introduce padding that was not part of the first trace
7322 * chunk. The relay daemon expects the content of the metadata stream of
7323 * successive metadata trace chunks to be strict supersets of one another.
7324 *
7325 * For example, flushing a packet at the beginning of the metadata stream of
7326 * a trace chunk resulting from a "clear" session command will cause the
7327 * size of the metadata stream of the new trace chunk to not match the size of
7328 * the metadata stream of the original chunk. This will confuse the relay
7329 * daemon as the same "offset" in a metadata stream will no longer point
7330 * to the same content.
7331 */
7332 enum lttng_error_code ust_app_open_packets(struct ltt_session *session)
7333 {
7334 enum lttng_error_code ret = LTTNG_OK;
7335 struct lttng_ht_iter iter;
7336 struct ltt_ust_session *usess = session->ust_session;
7337
7338 assert(usess);
7339
7340 rcu_read_lock();
7341
7342 switch (usess->buffer_type) {
7343 case LTTNG_BUFFER_PER_UID:
7344 {
7345 struct buffer_reg_uid *reg;
7346
7347 cds_list_for_each_entry (
7348 reg, &usess->buffer_reg_uid_list, lnode) {
7349 struct buffer_reg_channel *reg_chan;
7350 struct consumer_socket *socket;
7351
7352 socket = consumer_find_socket_by_bitness(
7353 reg->bits_per_long, usess->consumer);
7354 if (!socket) {
7355 ret = LTTNG_ERR_FATAL;
7356 goto error;
7357 }
7358
7359 cds_lfht_for_each_entry(reg->registry->channels->ht,
7360 &iter.iter, reg_chan, node.node) {
7361 const int open_ret =
7362 consumer_open_channel_packets(
7363 socket,
7364 reg_chan->consumer_key);
7365
7366 if (open_ret < 0) {
7367 ret = LTTNG_ERR_UNK;
7368 goto error;
7369 }
7370 }
7371 }
7372 break;
7373 }
7374 case LTTNG_BUFFER_PER_PID:
7375 {
7376 struct ust_app *app;
7377
7378 cds_lfht_for_each_entry (
7379 ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7380 struct consumer_socket *socket;
7381 struct lttng_ht_iter chan_iter;
7382 struct ust_app_channel *ua_chan;
7383 struct ust_app_session *ua_sess;
7384 struct ust_registry_session *registry;
7385
7386 ua_sess = lookup_session_by_app(usess, app);
7387 if (!ua_sess) {
7388 /* Session not associated with this app. */
7389 continue;
7390 }
7391
7392 /* Get the right consumer socket for the application. */
7393 socket = consumer_find_socket_by_bitness(
7394 app->bits_per_long, usess->consumer);
7395 if (!socket) {
7396 ret = LTTNG_ERR_FATAL;
7397 goto error;
7398 }
7399
7400 registry = get_session_registry(ua_sess);
7401 if (!registry) {
7402 DBG("Application session is being torn down. Skip application.");
7403 continue;
7404 }
7405
7406 cds_lfht_for_each_entry(ua_sess->channels->ht,
7407 &chan_iter.iter, ua_chan, node.node) {
7408 const int open_ret =
7409 consumer_open_channel_packets(
7410 socket,
7411 ua_chan->key);
7412
7413 if (open_ret < 0) {
7414 /*
7415 * Per-PID buffer and application going
7416 * away.
7417 */
7418 if (open_ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
7419 continue;
7420 }
7421
7422 ret = LTTNG_ERR_UNK;
7423 goto error;
7424 }
7425 }
7426 }
7427 break;
7428 }
7429 default:
7430 abort();
7431 break;
7432 }
7433
7434 error:
7435 rcu_read_unlock();
7436 return ret;
7437 }
This page took 0.261611 seconds and 4 git commands to generate.