clang-tidy: add Chrome-inspired checks
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.cpp
... / ...
CommitLineData
1/*
2 * Copyright (C) 2011 EfficiOS Inc.
3 * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
4 *
5 * SPDX-License-Identifier: GPL-2.0-only
6 *
7 */
8
9#define _LGPL_SOURCE
10
11#include "buffer-registry.hpp"
12#include "condition-internal.hpp"
13#include "event-notifier-error-accounting.hpp"
14#include "event.hpp"
15#include "fd-limit.hpp"
16#include "field.hpp"
17#include "health-sessiond.hpp"
18#include "lttng-sessiond.hpp"
19#include "lttng-ust-ctl.hpp"
20#include "lttng-ust-error.hpp"
21#include "notification-thread-commands.hpp"
22#include "rotate.hpp"
23#include "session.hpp"
24#include "ust-app.hpp"
25#include "ust-consumer.hpp"
26#include "ust-field-convert.hpp"
27#include "utils.hpp"
28
29#include <common/bytecode/bytecode.hpp>
30#include <common/common.hpp>
31#include <common/compat/errno.hpp>
32#include <common/exception.hpp>
33#include <common/format.hpp>
34#include <common/hashtable/utils.hpp>
35#include <common/make-unique.hpp>
36#include <common/sessiond-comm/sessiond-comm.hpp>
37#include <common/urcu.hpp>
38
39#include <lttng/condition/condition.h>
40#include <lttng/condition/event-rule-matches-internal.hpp>
41#include <lttng/condition/event-rule-matches.h>
42#include <lttng/event-rule/event-rule-internal.hpp>
43#include <lttng/event-rule/event-rule.h>
44#include <lttng/event-rule/user-tracepoint.h>
45#include <lttng/trigger/trigger-internal.hpp>
46
47#include <errno.h>
48#include <fcntl.h>
49#include <inttypes.h>
50#include <pthread.h>
51#include <signal.h>
52#include <stdio.h>
53#include <stdlib.h>
54#include <string.h>
55#include <sys/mman.h>
56#include <sys/stat.h>
57#include <sys/types.h>
58#include <unistd.h>
59#include <urcu/compiler.h>
60#include <vector>
61
62namespace lsu = lttng::sessiond::ust;
63namespace lst = lttng::sessiond::trace;
64
65struct lttng_ht *ust_app_ht;
66struct lttng_ht *ust_app_ht_by_sock;
67struct lttng_ht *ust_app_ht_by_notify_sock;
68
69static int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
70
71/* Next available channel key. Access under next_channel_key_lock. */
72static uint64_t _next_channel_key;
73static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
74
75/* Next available session ID. Access under next_session_id_lock. */
76static uint64_t _next_session_id;
77static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
78
79namespace {
80
81/*
82 * Return the session registry according to the buffer type of the given
83 * session.
84 *
85 * A registry per UID object MUST exists before calling this function or else
86 * it LTTNG_ASSERT() if not found. RCU read side lock must be acquired.
87 */
88static lsu::registry_session *get_session_registry(const struct ust_app_session *ua_sess)
89{
90 lsu::registry_session *registry = nullptr;
91
92 LTTNG_ASSERT(ua_sess);
93
94 switch (ua_sess->buffer_type) {
95 case LTTNG_BUFFER_PER_PID:
96 {
97 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
98 if (!reg_pid) {
99 goto error;
100 }
101 registry = reg_pid->registry->reg.ust;
102 break;
103 }
104 case LTTNG_BUFFER_PER_UID:
105 {
106 struct buffer_reg_uid *reg_uid =
107 buffer_reg_uid_find(ua_sess->tracing_id,
108 ua_sess->bits_per_long,
109 lttng_credentials_get_uid(&ua_sess->real_credentials));
110 if (!reg_uid) {
111 goto error;
112 }
113 registry = reg_uid->registry->reg.ust;
114 break;
115 }
116 default:
117 abort();
118 };
119
120error:
121 return registry;
122}
123
124lsu::registry_session::locked_ptr get_locked_session_registry(const struct ust_app_session *ua_sess)
125{
126 auto session = get_session_registry(ua_sess);
127 if (session) {
128 pthread_mutex_lock(&session->_lock);
129 }
130
131 return lsu::registry_session::locked_ptr{ session };
132}
133} /* namespace */
134
135/*
136 * Return the incremented value of next_channel_key.
137 */
138static uint64_t get_next_channel_key()
139{
140 uint64_t ret;
141
142 pthread_mutex_lock(&next_channel_key_lock);
143 ret = ++_next_channel_key;
144 pthread_mutex_unlock(&next_channel_key_lock);
145 return ret;
146}
147
148/*
149 * Return the atomically incremented value of next_session_id.
150 */
151static uint64_t get_next_session_id()
152{
153 uint64_t ret;
154
155 pthread_mutex_lock(&next_session_id_lock);
156 ret = ++_next_session_id;
157 pthread_mutex_unlock(&next_session_id_lock);
158 return ret;
159}
160
161static void copy_channel_attr_to_ustctl(struct lttng_ust_ctl_consumer_channel_attr *attr,
162 struct lttng_ust_abi_channel_attr *uattr)
163{
164 /* Copy event attributes since the layout is different. */
165 attr->subbuf_size = uattr->subbuf_size;
166 attr->num_subbuf = uattr->num_subbuf;
167 attr->overwrite = uattr->overwrite;
168 attr->switch_timer_interval = uattr->switch_timer_interval;
169 attr->read_timer_interval = uattr->read_timer_interval;
170 attr->output = (lttng_ust_abi_output) uattr->output;
171 attr->blocking_timeout = uattr->u.s.blocking_timeout;
172}
173
174/*
175 * Match function for the hash table lookup.
176 *
177 * It matches an ust app event based on three attributes which are the event
178 * name, the filter bytecode and the loglevel.
179 */
180static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
181{
182 struct ust_app_event *event;
183 const struct ust_app_ht_key *key;
184 int ev_loglevel_value;
185
186 LTTNG_ASSERT(node);
187 LTTNG_ASSERT(_key);
188
189 event = caa_container_of(node, struct ust_app_event, node.node);
190 key = (ust_app_ht_key *) _key;
191 ev_loglevel_value = event->attr.loglevel;
192
193 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
194
195 /* Event name */
196 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
197 goto no_match;
198 }
199
200 /* Event loglevel. */
201 if (ev_loglevel_value != key->loglevel_type) {
202 if (event->attr.loglevel_type == LTTNG_UST_ABI_LOGLEVEL_ALL &&
203 key->loglevel_type == 0 && ev_loglevel_value == -1) {
204 /*
205 * Match is accepted. This is because on event creation, the
206 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
207 * -1 are accepted for this loglevel type since 0 is the one set by
208 * the API when receiving an enable event.
209 */
210 } else {
211 goto no_match;
212 }
213 }
214
215 /* One of the filters is NULL, fail. */
216 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
217 goto no_match;
218 }
219
220 if (key->filter && event->filter) {
221 /* Both filters exists, check length followed by the bytecode. */
222 if (event->filter->len != key->filter->len ||
223 memcmp(event->filter->data, key->filter->data, event->filter->len) != 0) {
224 goto no_match;
225 }
226 }
227
228 /* One of the exclusions is NULL, fail. */
229 if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
230 goto no_match;
231 }
232
233 if (key->exclusion && event->exclusion) {
234 /* Both exclusions exists, check count followed by the names. */
235 if (event->exclusion->count != key->exclusion->count ||
236 memcmp(event->exclusion->names,
237 key->exclusion->names,
238 event->exclusion->count * LTTNG_UST_ABI_SYM_NAME_LEN) != 0) {
239 goto no_match;
240 }
241 }
242
243 /* Match. */
244 return 1;
245
246no_match:
247 return 0;
248}
249
250/*
251 * Unique add of an ust app event in the given ht. This uses the custom
252 * ht_match_ust_app_event match function and the event name as hash.
253 */
254static void add_unique_ust_app_event(struct ust_app_channel *ua_chan, struct ust_app_event *event)
255{
256 struct cds_lfht_node *node_ptr;
257 struct ust_app_ht_key key;
258 struct lttng_ht *ht;
259
260 LTTNG_ASSERT(ua_chan);
261 LTTNG_ASSERT(ua_chan->events);
262 LTTNG_ASSERT(event);
263
264 ht = ua_chan->events;
265 key.name = event->attr.name;
266 key.filter = event->filter;
267 key.loglevel_type = (lttng_ust_abi_loglevel_type) event->attr.loglevel;
268 key.exclusion = event->exclusion;
269
270 node_ptr = cds_lfht_add_unique(ht->ht,
271 ht->hash_fct(event->node.key, lttng_ht_seed),
272 ht_match_ust_app_event,
273 &key,
274 &event->node.node);
275 LTTNG_ASSERT(node_ptr == &event->node.node);
276}
277
278/*
279 * Close the notify socket from the given RCU head object. This MUST be called
280 * through a call_rcu().
281 */
282static void close_notify_sock_rcu(struct rcu_head *head)
283{
284 int ret;
285 struct ust_app_notify_sock_obj *obj =
286 lttng::utils::container_of(head, &ust_app_notify_sock_obj::head);
287
288 /* Must have a valid fd here. */
289 LTTNG_ASSERT(obj->fd >= 0);
290
291 ret = close(obj->fd);
292 if (ret) {
293 ERR("close notify sock %d RCU", obj->fd);
294 }
295 lttng_fd_put(LTTNG_FD_APPS, 1);
296
297 free(obj);
298}
299
300/*
301 * Delete ust context safely. RCU read lock must be held before calling
302 * this function.
303 */
304static void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx, struct ust_app *app)
305{
306 int ret;
307
308 LTTNG_ASSERT(ua_ctx);
309 ASSERT_RCU_READ_LOCKED();
310
311 if (ua_ctx->obj) {
312 pthread_mutex_lock(&app->sock_lock);
313 ret = lttng_ust_ctl_release_object(sock, ua_ctx->obj);
314 pthread_mutex_unlock(&app->sock_lock);
315 if (ret < 0) {
316 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
317 DBG3("UST app release ctx failed. Application is dead: pid = %d, sock = %d",
318 app->pid,
319 app->sock);
320 } else if (ret == -EAGAIN) {
321 WARN("UST app release ctx failed. Communication time out: pid = %d, sock = %d",
322 app->pid,
323 app->sock);
324 } else {
325 ERR("UST app release ctx obj handle %d failed with ret %d: pid = %d, sock = %d",
326 ua_ctx->obj->handle,
327 ret,
328 app->pid,
329 app->sock);
330 }
331 }
332 free(ua_ctx->obj);
333 }
334 free(ua_ctx);
335}
336
337/*
338 * Delete ust app event safely. RCU read lock must be held before calling
339 * this function.
340 */
341static void delete_ust_app_event(int sock, struct ust_app_event *ua_event, struct ust_app *app)
342{
343 int ret;
344
345 LTTNG_ASSERT(ua_event);
346 ASSERT_RCU_READ_LOCKED();
347
348 free(ua_event->filter);
349 if (ua_event->exclusion != nullptr)
350 free(ua_event->exclusion);
351 if (ua_event->obj != nullptr) {
352 pthread_mutex_lock(&app->sock_lock);
353 ret = lttng_ust_ctl_release_object(sock, ua_event->obj);
354 pthread_mutex_unlock(&app->sock_lock);
355 if (ret < 0) {
356 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
357 DBG3("UST app release event failed. Application is dead: pid = %d, sock = %d",
358 app->pid,
359 app->sock);
360 } else if (ret == -EAGAIN) {
361 WARN("UST app release event failed. Communication time out: pid = %d, sock = %d",
362 app->pid,
363 app->sock);
364 } else {
365 ERR("UST app release event obj failed with ret %d: pid = %d, sock = %d",
366 ret,
367 app->pid,
368 app->sock);
369 }
370 }
371 free(ua_event->obj);
372 }
373 free(ua_event);
374}
375
376/*
377 * Delayed reclaim of a ust_app_event_notifier_rule object. This MUST be called
378 * through a call_rcu().
379 */
380static void free_ust_app_event_notifier_rule_rcu(struct rcu_head *head)
381{
382 struct ust_app_event_notifier_rule *obj =
383 lttng::utils::container_of(head, &ust_app_event_notifier_rule::rcu_head);
384
385 free(obj);
386}
387
388/*
389 * Delete ust app event notifier rule safely.
390 */
391static void delete_ust_app_event_notifier_rule(
392 int sock, struct ust_app_event_notifier_rule *ua_event_notifier_rule, struct ust_app *app)
393{
394 int ret;
395
396 LTTNG_ASSERT(ua_event_notifier_rule);
397
398 if (ua_event_notifier_rule->exclusion != nullptr) {
399 free(ua_event_notifier_rule->exclusion);
400 }
401
402 if (ua_event_notifier_rule->obj != nullptr) {
403 pthread_mutex_lock(&app->sock_lock);
404 ret = lttng_ust_ctl_release_object(sock, ua_event_notifier_rule->obj);
405 pthread_mutex_unlock(&app->sock_lock);
406 if (ret < 0) {
407 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
408 DBG3("UST app release event notifier failed. Application is dead: pid = %d, sock = %d",
409 app->pid,
410 app->sock);
411 } else if (ret == -EAGAIN) {
412 WARN("UST app release event notifier failed. Communication time out: pid = %d, sock = %d",
413 app->pid,
414 app->sock);
415 } else {
416 ERR("UST app release event notifier failed with ret %d: pid = %d, sock = %d",
417 ret,
418 app->pid,
419 app->sock);
420 }
421 }
422
423 free(ua_event_notifier_rule->obj);
424 }
425
426 lttng_trigger_put(ua_event_notifier_rule->trigger);
427 call_rcu(&ua_event_notifier_rule->rcu_head, free_ust_app_event_notifier_rule_rcu);
428}
429
430/*
431 * Release ust data object of the given stream.
432 *
433 * Return 0 on success or else a negative value.
434 */
435static int release_ust_app_stream(int sock, struct ust_app_stream *stream, struct ust_app *app)
436{
437 int ret = 0;
438
439 LTTNG_ASSERT(stream);
440
441 if (stream->obj) {
442 pthread_mutex_lock(&app->sock_lock);
443 ret = lttng_ust_ctl_release_object(sock, stream->obj);
444 pthread_mutex_unlock(&app->sock_lock);
445 if (ret < 0) {
446 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
447 DBG3("UST app release stream failed. Application is dead: pid = %d, sock = %d",
448 app->pid,
449 app->sock);
450 } else if (ret == -EAGAIN) {
451 WARN("UST app release stream failed. Communication time out: pid = %d, sock = %d",
452 app->pid,
453 app->sock);
454 } else {
455 ERR("UST app release stream obj failed with ret %d: pid = %d, sock = %d",
456 ret,
457 app->pid,
458 app->sock);
459 }
460 }
461 lttng_fd_put(LTTNG_FD_APPS, 2);
462 free(stream->obj);
463 }
464
465 return ret;
466}
467
468/*
469 * Delete ust app stream safely. RCU read lock must be held before calling
470 * this function.
471 */
472static void delete_ust_app_stream(int sock, struct ust_app_stream *stream, struct ust_app *app)
473{
474 LTTNG_ASSERT(stream);
475 ASSERT_RCU_READ_LOCKED();
476
477 (void) release_ust_app_stream(sock, stream, app);
478 free(stream);
479}
480
481static void delete_ust_app_channel_rcu(struct rcu_head *head)
482{
483 struct ust_app_channel *ua_chan =
484 lttng::utils::container_of(head, &ust_app_channel::rcu_head);
485
486 lttng_ht_destroy(ua_chan->ctx);
487 lttng_ht_destroy(ua_chan->events);
488 free(ua_chan);
489}
490
491/*
492 * Extract the lost packet or discarded events counter when the channel is
493 * being deleted and store the value in the parent channel so we can
494 * access it from lttng list and at stop/destroy.
495 *
496 * The session list lock must be held by the caller.
497 */
498static void save_per_pid_lost_discarded_counters(struct ust_app_channel *ua_chan)
499{
500 uint64_t discarded = 0, lost = 0;
501 struct ltt_session *session;
502 struct ltt_ust_channel *uchan;
503
504 if (ua_chan->attr.type != LTTNG_UST_ABI_CHAN_PER_CPU) {
505 return;
506 }
507
508 rcu_read_lock();
509 session = session_find_by_id(ua_chan->session->tracing_id);
510 if (!session || !session->ust_session) {
511 /*
512 * Not finding the session is not an error because there are
513 * multiple ways the channels can be torn down.
514 *
515 * 1) The session daemon can initiate the destruction of the
516 * ust app session after receiving a destroy command or
517 * during its shutdown/teardown.
518 * 2) The application, since we are in per-pid tracing, is
519 * unregistering and tearing down its ust app session.
520 *
521 * Both paths are protected by the session list lock which
522 * ensures that the accounting of lost packets and discarded
523 * events is done exactly once. The session is then unpublished
524 * from the session list, resulting in this condition.
525 */
526 goto end;
527 }
528
529 if (ua_chan->attr.overwrite) {
530 consumer_get_lost_packets(ua_chan->session->tracing_id,
531 ua_chan->key,
532 session->ust_session->consumer,
533 &lost);
534 } else {
535 consumer_get_discarded_events(ua_chan->session->tracing_id,
536 ua_chan->key,
537 session->ust_session->consumer,
538 &discarded);
539 }
540 uchan = trace_ust_find_channel_by_name(session->ust_session->domain_global.channels,
541 ua_chan->name);
542 if (!uchan) {
543 ERR("Missing UST channel to store discarded counters");
544 goto end;
545 }
546
547 uchan->per_pid_closed_app_discarded += discarded;
548 uchan->per_pid_closed_app_lost += lost;
549
550end:
551 rcu_read_unlock();
552 if (session) {
553 session_put(session);
554 }
555}
556
557/*
558 * Delete ust app channel safely. RCU read lock must be held before calling
559 * this function.
560 *
561 * The session list lock must be held by the caller.
562 */
563static void delete_ust_app_channel(int sock,
564 struct ust_app_channel *ua_chan,
565 struct ust_app *app,
566 const lsu::registry_session::locked_ptr& locked_registry)
567{
568 int ret;
569 struct lttng_ht_iter iter;
570 struct ust_app_event *ua_event;
571 struct ust_app_ctx *ua_ctx;
572 struct ust_app_stream *stream, *stmp;
573
574 LTTNG_ASSERT(ua_chan);
575 ASSERT_RCU_READ_LOCKED();
576
577 DBG3("UST app deleting channel %s", ua_chan->name);
578
579 /* Wipe stream */
580 cds_list_for_each_entry_safe (stream, stmp, &ua_chan->streams.head, list) {
581 cds_list_del(&stream->list);
582 delete_ust_app_stream(sock, stream, app);
583 }
584
585 /* Wipe context */
586 cds_lfht_for_each_entry (ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
587 cds_list_del(&ua_ctx->list);
588 ret = lttng_ht_del(ua_chan->ctx, &iter);
589 LTTNG_ASSERT(!ret);
590 delete_ust_app_ctx(sock, ua_ctx, app);
591 }
592
593 /* Wipe events */
594 cds_lfht_for_each_entry (ua_chan->events->ht, &iter.iter, ua_event, node.node) {
595 ret = lttng_ht_del(ua_chan->events, &iter);
596 LTTNG_ASSERT(!ret);
597 delete_ust_app_event(sock, ua_event, app);
598 }
599
600 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
601 /* Wipe and free registry from session registry. */
602 if (locked_registry) {
603 try {
604 locked_registry->remove_channel(ua_chan->key, sock >= 0);
605 } catch (const std::exception& ex) {
606 DBG("Could not find channel for removal: %s", ex.what());
607 }
608 }
609
610 /*
611 * A negative socket can be used by the caller when
612 * cleaning-up a ua_chan in an error path. Skip the
613 * accounting in this case.
614 */
615 if (sock >= 0) {
616 save_per_pid_lost_discarded_counters(ua_chan);
617 }
618 }
619
620 if (ua_chan->obj != nullptr) {
621 /* Remove channel from application UST object descriptor. */
622 iter.iter.node = &ua_chan->ust_objd_node.node;
623 ret = lttng_ht_del(app->ust_objd, &iter);
624 LTTNG_ASSERT(!ret);
625 pthread_mutex_lock(&app->sock_lock);
626 ret = lttng_ust_ctl_release_object(sock, ua_chan->obj);
627 pthread_mutex_unlock(&app->sock_lock);
628 if (ret < 0) {
629 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
630 DBG3("UST app channel %s release failed. Application is dead: pid = %d, sock = %d",
631 ua_chan->name,
632 app->pid,
633 app->sock);
634 } else if (ret == -EAGAIN) {
635 WARN("UST app channel %s release failed. Communication time out: pid = %d, sock = %d",
636 ua_chan->name,
637 app->pid,
638 app->sock);
639 } else {
640 ERR("UST app channel %s release failed with ret %d: pid = %d, sock = %d",
641 ua_chan->name,
642 ret,
643 app->pid,
644 app->sock);
645 }
646 }
647 lttng_fd_put(LTTNG_FD_APPS, 1);
648 free(ua_chan->obj);
649 }
650 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
651}
652
653int ust_app_register_done(struct ust_app *app)
654{
655 int ret;
656
657 pthread_mutex_lock(&app->sock_lock);
658 ret = lttng_ust_ctl_register_done(app->sock);
659 pthread_mutex_unlock(&app->sock_lock);
660 return ret;
661}
662
663int ust_app_release_object(struct ust_app *app, struct lttng_ust_abi_object_data *data)
664{
665 int ret, sock;
666
667 if (app) {
668 pthread_mutex_lock(&app->sock_lock);
669 sock = app->sock;
670 } else {
671 sock = -1;
672 }
673 ret = lttng_ust_ctl_release_object(sock, data);
674 if (app) {
675 pthread_mutex_unlock(&app->sock_lock);
676 }
677 return ret;
678}
679
680/*
681 * Push metadata to consumer socket.
682 *
683 * RCU read-side lock must be held to guarantee existence of socket.
684 * Must be called with the ust app session lock held.
685 * Must be called with the registry lock held.
686 *
687 * On success, return the len of metadata pushed or else a negative value.
688 * Returning a -EPIPE return value means we could not send the metadata,
689 * but it can be caused by recoverable errors (e.g. the application has
690 * terminated concurrently).
691 */
692ssize_t ust_app_push_metadata(const lsu::registry_session::locked_ptr& locked_registry,
693 struct consumer_socket *socket,
694 int send_zero_data)
695{
696 int ret;
697 char *metadata_str = nullptr;
698 size_t len, offset, new_metadata_len_sent;
699 ssize_t ret_val;
700 uint64_t metadata_key, metadata_version;
701
702 LTTNG_ASSERT(locked_registry);
703 LTTNG_ASSERT(socket);
704 ASSERT_RCU_READ_LOCKED();
705
706 metadata_key = locked_registry->_metadata_key;
707
708 /*
709 * Means that no metadata was assigned to the session. This can
710 * happens if no start has been done previously.
711 */
712 if (!metadata_key) {
713 return 0;
714 }
715
716 offset = locked_registry->_metadata_len_sent;
717 len = locked_registry->_metadata_len - locked_registry->_metadata_len_sent;
718 new_metadata_len_sent = locked_registry->_metadata_len;
719 metadata_version = locked_registry->_metadata_version;
720 if (len == 0) {
721 DBG3("No metadata to push for metadata key %" PRIu64,
722 locked_registry->_metadata_key);
723 ret_val = len;
724 if (send_zero_data) {
725 DBG("No metadata to push");
726 goto push_data;
727 }
728 goto end;
729 }
730
731 /* Allocate only what we have to send. */
732 metadata_str = calloc<char>(len);
733 if (!metadata_str) {
734 PERROR("zmalloc ust app metadata string");
735 ret_val = -ENOMEM;
736 goto error;
737 }
738 /* Copy what we haven't sent out. */
739 memcpy(metadata_str, locked_registry->_metadata + offset, len);
740
741push_data:
742 pthread_mutex_unlock(&locked_registry->_lock);
743 /*
744 * We need to unlock the registry while we push metadata to
745 * break a circular dependency between the consumerd metadata
746 * lock and the sessiond registry lock. Indeed, pushing metadata
747 * to the consumerd awaits that it gets pushed all the way to
748 * relayd, but doing so requires grabbing the metadata lock. If
749 * a concurrent metadata request is being performed by
750 * consumerd, this can try to grab the registry lock on the
751 * sessiond while holding the metadata lock on the consumer
752 * daemon. Those push and pull schemes are performed on two
753 * different bidirectionnal communication sockets.
754 */
755 ret = consumer_push_metadata(
756 socket, metadata_key, metadata_str, len, offset, metadata_version);
757 pthread_mutex_lock(&locked_registry->_lock);
758 if (ret < 0) {
759 /*
760 * There is an acceptable race here between the registry
761 * metadata key assignment and the creation on the
762 * consumer. The session daemon can concurrently push
763 * metadata for this registry while being created on the
764 * consumer since the metadata key of the registry is
765 * assigned *before* it is setup to avoid the consumer
766 * to ask for metadata that could possibly be not found
767 * in the session daemon.
768 *
769 * The metadata will get pushed either by the session
770 * being stopped or the consumer requesting metadata if
771 * that race is triggered.
772 */
773 if (ret == -LTTCOMM_CONSUMERD_CHANNEL_FAIL) {
774 ret = 0;
775 } else {
776 ERR("Error pushing metadata to consumer");
777 }
778 ret_val = ret;
779 goto error_push;
780 } else {
781 /*
782 * Metadata may have been concurrently pushed, since
783 * we're not holding the registry lock while pushing to
784 * consumer. This is handled by the fact that we send
785 * the metadata content, size, and the offset at which
786 * that metadata belongs. This may arrive out of order
787 * on the consumer side, and the consumer is able to
788 * deal with overlapping fragments. The consumer
789 * supports overlapping fragments, which must be
790 * contiguous starting from offset 0. We keep the
791 * largest metadata_len_sent value of the concurrent
792 * send.
793 */
794 locked_registry->_metadata_len_sent =
795 std::max(locked_registry->_metadata_len_sent, new_metadata_len_sent);
796 }
797 free(metadata_str);
798 return len;
799
800end:
801error:
802 if (ret_val) {
803 /*
804 * On error, flag the registry that the metadata is
805 * closed. We were unable to push anything and this
806 * means that either the consumer is not responding or
807 * the metadata cache has been destroyed on the
808 * consumer.
809 */
810 locked_registry->_metadata_closed = true;
811 }
812error_push:
813 free(metadata_str);
814 return ret_val;
815}
816
817/*
818 * For a given application and session, push metadata to consumer.
819 * Either sock or consumer is required : if sock is NULL, the default
820 * socket to send the metadata is retrieved from consumer, if sock
821 * is not NULL we use it to send the metadata.
822 * RCU read-side lock must be held while calling this function,
823 * therefore ensuring existence of registry. It also ensures existence
824 * of socket throughout this function.
825 *
826 * Return 0 on success else a negative error.
827 * Returning a -EPIPE return value means we could not send the metadata,
828 * but it can be caused by recoverable errors (e.g. the application has
829 * terminated concurrently).
830 */
831static int push_metadata(const lsu::registry_session::locked_ptr& locked_registry,
832 struct consumer_output *consumer)
833{
834 int ret_val;
835 ssize_t ret;
836 struct consumer_socket *socket;
837
838 LTTNG_ASSERT(locked_registry);
839 LTTNG_ASSERT(consumer);
840 ASSERT_RCU_READ_LOCKED();
841
842 if (locked_registry->_metadata_closed) {
843 ret_val = -EPIPE;
844 goto error;
845 }
846
847 /* Get consumer socket to use to push the metadata.*/
848 socket = consumer_find_socket_by_bitness(locked_registry->abi.bits_per_long, consumer);
849 if (!socket) {
850 ret_val = -1;
851 goto error;
852 }
853
854 ret = ust_app_push_metadata(locked_registry, socket, 0);
855 if (ret < 0) {
856 ret_val = ret;
857 goto error;
858 }
859 return 0;
860
861error:
862 return ret_val;
863}
864
865/*
866 * Send to the consumer a close metadata command for the given session. Once
867 * done, the metadata channel is deleted and the session metadata pointer is
868 * nullified. The session lock MUST be held unless the application is
869 * in the destroy path.
870 *
871 * Do not hold the registry lock while communicating with the consumerd, because
872 * doing so causes inter-process deadlocks between consumerd and sessiond with
873 * the metadata request notification.
874 *
875 * Return 0 on success else a negative value.
876 */
877static int close_metadata(uint64_t metadata_key,
878 unsigned int consumer_bitness,
879 struct consumer_output *consumer)
880{
881 int ret;
882 struct consumer_socket *socket;
883 lttng::urcu::read_lock_guard read_lock_guard;
884
885 LTTNG_ASSERT(consumer);
886
887 /* Get consumer socket to use to push the metadata. */
888 socket = consumer_find_socket_by_bitness(consumer_bitness, consumer);
889 if (!socket) {
890 ret = -1;
891 goto end;
892 }
893
894 ret = consumer_close_metadata(socket, metadata_key);
895 if (ret < 0) {
896 goto end;
897 }
898
899end:
900 return ret;
901}
902
903static void delete_ust_app_session_rcu(struct rcu_head *head)
904{
905 struct ust_app_session *ua_sess =
906 lttng::utils::container_of(head, &ust_app_session::rcu_head);
907
908 lttng_ht_destroy(ua_sess->channels);
909 free(ua_sess);
910}
911
912/*
913 * Delete ust app session safely. RCU read lock must be held before calling
914 * this function.
915 *
916 * The session list lock must be held by the caller.
917 */
918static void delete_ust_app_session(int sock, struct ust_app_session *ua_sess, struct ust_app *app)
919{
920 int ret;
921 struct lttng_ht_iter iter;
922 struct ust_app_channel *ua_chan;
923
924 LTTNG_ASSERT(ua_sess);
925 ASSERT_RCU_READ_LOCKED();
926
927 pthread_mutex_lock(&ua_sess->lock);
928
929 LTTNG_ASSERT(!ua_sess->deleted);
930 ua_sess->deleted = true;
931
932 auto locked_registry = get_locked_session_registry(ua_sess);
933 /* Registry can be null on error path during initialization. */
934 if (locked_registry) {
935 /* Push metadata for application before freeing the application. */
936 (void) push_metadata(locked_registry, ua_sess->consumer);
937 }
938
939 cds_lfht_for_each_entry (ua_sess->channels->ht, &iter.iter, ua_chan, node.node) {
940 ret = lttng_ht_del(ua_sess->channels, &iter);
941 LTTNG_ASSERT(!ret);
942 delete_ust_app_channel(sock, ua_chan, app, locked_registry);
943 }
944
945 if (locked_registry) {
946 /*
947 * Don't ask to close metadata for global per UID buffers. Close
948 * metadata only on destroy trace session in this case. Also, the
949 * previous push metadata could have flag the metadata registry to
950 * close so don't send a close command if closed.
951 */
952 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
953 const auto metadata_key = locked_registry->_metadata_key;
954 const auto consumer_bitness = locked_registry->abi.bits_per_long;
955
956 if (!locked_registry->_metadata_closed && metadata_key != 0) {
957 locked_registry->_metadata_closed = true;
958 }
959
960 /* Release lock before communication, see comments in close_metadata(). */
961 locked_registry.reset();
962 (void) close_metadata(metadata_key, consumer_bitness, ua_sess->consumer);
963 }
964 }
965
966 /* In case of per PID, the registry is kept in the session. */
967 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
968 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
969 if (reg_pid) {
970 /*
971 * Registry can be null on error path during
972 * initialization.
973 */
974 buffer_reg_pid_remove(reg_pid);
975 buffer_reg_pid_destroy(reg_pid);
976 }
977 }
978
979 if (ua_sess->handle != -1) {
980 pthread_mutex_lock(&app->sock_lock);
981 ret = lttng_ust_ctl_release_handle(sock, ua_sess->handle);
982 pthread_mutex_unlock(&app->sock_lock);
983 if (ret < 0) {
984 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
985 DBG3("UST app release session handle failed. Application is dead: pid = %d, sock = %d",
986 app->pid,
987 app->sock);
988 } else if (ret == -EAGAIN) {
989 WARN("UST app release session handle failed. Communication time out: pid = %d, sock = %d",
990 app->pid,
991 app->sock);
992 } else {
993 ERR("UST app release session handle failed with ret %d: pid = %d, sock = %d",
994 ret,
995 app->pid,
996 app->sock);
997 }
998 }
999
1000 /* Remove session from application UST object descriptor. */
1001 iter.iter.node = &ua_sess->ust_objd_node.node;
1002 ret = lttng_ht_del(app->ust_sessions_objd, &iter);
1003 LTTNG_ASSERT(!ret);
1004 }
1005
1006 pthread_mutex_unlock(&ua_sess->lock);
1007
1008 consumer_output_put(ua_sess->consumer);
1009
1010 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
1011}
1012
1013/*
1014 * Delete a traceable application structure from the global list. Never call
1015 * this function outside of a call_rcu call.
1016 */
1017static void delete_ust_app(struct ust_app *app)
1018{
1019 int ret, sock;
1020 struct ust_app_session *ua_sess, *tmp_ua_sess;
1021 struct lttng_ht_iter iter;
1022 struct ust_app_event_notifier_rule *event_notifier_rule;
1023 bool event_notifier_write_fd_is_open;
1024
1025 /*
1026 * The session list lock must be held during this function to guarantee
1027 * the existence of ua_sess.
1028 */
1029 session_lock_list();
1030 /* Delete ust app sessions info */
1031 sock = app->sock;
1032 app->sock = -1;
1033
1034 /* Wipe sessions */
1035 cds_list_for_each_entry_safe (ua_sess, tmp_ua_sess, &app->teardown_head, teardown_node) {
1036 /* Free every object in the session and the session. */
1037 rcu_read_lock();
1038 delete_ust_app_session(sock, ua_sess, app);
1039 rcu_read_unlock();
1040 }
1041
1042 /* Remove the event notifier rules associated with this app. */
1043 rcu_read_lock();
1044 cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
1045 &iter.iter,
1046 event_notifier_rule,
1047 node.node) {
1048 ret = lttng_ht_del(app->token_to_event_notifier_rule_ht, &iter);
1049 LTTNG_ASSERT(!ret);
1050
1051 delete_ust_app_event_notifier_rule(app->sock, event_notifier_rule, app);
1052 }
1053
1054 rcu_read_unlock();
1055
1056 lttng_ht_destroy(app->sessions);
1057 lttng_ht_destroy(app->ust_sessions_objd);
1058 lttng_ht_destroy(app->ust_objd);
1059 lttng_ht_destroy(app->token_to_event_notifier_rule_ht);
1060
1061 /*
1062 * This could be NULL if the event notifier setup failed (e.g the app
1063 * was killed or the tracer does not support this feature).
1064 */
1065 if (app->event_notifier_group.object) {
1066 enum lttng_error_code ret_code;
1067 enum event_notifier_error_accounting_status status;
1068
1069 const int event_notifier_read_fd =
1070 lttng_pipe_get_readfd(app->event_notifier_group.event_pipe);
1071
1072 ret_code = notification_thread_command_remove_tracer_event_source(
1073 the_notification_thread_handle, event_notifier_read_fd);
1074 if (ret_code != LTTNG_OK) {
1075 ERR("Failed to remove application tracer event source from notification thread");
1076 }
1077
1078 status = event_notifier_error_accounting_unregister_app(app);
1079 if (status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
1080 ERR("Error unregistering app from event notifier error accounting");
1081 }
1082
1083 lttng_ust_ctl_release_object(sock, app->event_notifier_group.object);
1084 free(app->event_notifier_group.object);
1085 }
1086
1087 event_notifier_write_fd_is_open =
1088 lttng_pipe_is_write_open(app->event_notifier_group.event_pipe);
1089 lttng_pipe_destroy(app->event_notifier_group.event_pipe);
1090 /*
1091 * Release the file descriptors reserved for the event notifier pipe.
1092 * The app could be destroyed before the write end of the pipe could be
1093 * passed to the application (and closed). In that case, both file
1094 * descriptors must be released.
1095 */
1096 lttng_fd_put(LTTNG_FD_APPS, event_notifier_write_fd_is_open ? 2 : 1);
1097
1098 /*
1099 * Wait until we have deleted the application from the sock hash table
1100 * before closing this socket, otherwise an application could re-use the
1101 * socket ID and race with the teardown, using the same hash table entry.
1102 *
1103 * It's OK to leave the close in call_rcu. We want it to stay unique for
1104 * all RCU readers that could run concurrently with unregister app,
1105 * therefore we _need_ to only close that socket after a grace period. So
1106 * it should stay in this RCU callback.
1107 *
1108 * This close() is a very important step of the synchronization model so
1109 * every modification to this function must be carefully reviewed.
1110 */
1111 ret = close(sock);
1112 if (ret) {
1113 PERROR("close");
1114 }
1115 lttng_fd_put(LTTNG_FD_APPS, 1);
1116
1117 DBG2("UST app pid %d deleted", app->pid);
1118 free(app);
1119 session_unlock_list();
1120}
1121
1122/*
1123 * URCU intermediate call to delete an UST app.
1124 */
1125static void delete_ust_app_rcu(struct rcu_head *head)
1126{
1127 struct lttng_ht_node_ulong *node =
1128 lttng::utils::container_of(head, &lttng_ht_node_ulong::head);
1129 struct ust_app *app = lttng::utils::container_of(node, &ust_app::pid_n);
1130
1131 DBG3("Call RCU deleting app PID %d", app->pid);
1132 delete_ust_app(app);
1133}
1134
1135/*
1136 * Delete the session from the application ht and delete the data structure by
1137 * freeing every object inside and releasing them.
1138 *
1139 * The session list lock must be held by the caller.
1140 */
1141static void destroy_app_session(struct ust_app *app, struct ust_app_session *ua_sess)
1142{
1143 int ret;
1144 struct lttng_ht_iter iter;
1145
1146 LTTNG_ASSERT(app);
1147 LTTNG_ASSERT(ua_sess);
1148
1149 iter.iter.node = &ua_sess->node.node;
1150 ret = lttng_ht_del(app->sessions, &iter);
1151 if (ret) {
1152 /* Already scheduled for teardown. */
1153 goto end;
1154 }
1155
1156 /* Once deleted, free the data structure. */
1157 delete_ust_app_session(app->sock, ua_sess, app);
1158
1159end:
1160 return;
1161}
1162
1163/*
1164 * Alloc new UST app session.
1165 */
1166static struct ust_app_session *alloc_ust_app_session()
1167{
1168 struct ust_app_session *ua_sess;
1169
1170 /* Init most of the default value by allocating and zeroing */
1171 ua_sess = zmalloc<ust_app_session>();
1172 if (ua_sess == nullptr) {
1173 PERROR("malloc");
1174 goto error_free;
1175 }
1176
1177 ua_sess->handle = -1;
1178 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
1179 ua_sess->metadata_attr.type = LTTNG_UST_ABI_CHAN_METADATA;
1180 pthread_mutex_init(&ua_sess->lock, nullptr);
1181
1182 return ua_sess;
1183
1184error_free:
1185 return nullptr;
1186}
1187
1188/*
1189 * Alloc new UST app channel.
1190 */
1191static struct ust_app_channel *alloc_ust_app_channel(const char *name,
1192 struct ust_app_session *ua_sess,
1193 struct lttng_ust_abi_channel_attr *attr)
1194{
1195 struct ust_app_channel *ua_chan;
1196
1197 /* Init most of the default value by allocating and zeroing */
1198 ua_chan = zmalloc<ust_app_channel>();
1199 if (ua_chan == nullptr) {
1200 PERROR("malloc");
1201 goto error;
1202 }
1203
1204 /* Setup channel name */
1205 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
1206 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1207
1208 ua_chan->enabled = 1;
1209 ua_chan->handle = -1;
1210 ua_chan->session = ua_sess;
1211 ua_chan->key = get_next_channel_key();
1212 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1213 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
1214 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
1215
1216 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
1217 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
1218
1219 /* Copy attributes */
1220 if (attr) {
1221 /* Translate from lttng_ust_channel to lttng_ust_ctl_consumer_channel_attr. */
1222 ua_chan->attr.subbuf_size = attr->subbuf_size;
1223 ua_chan->attr.num_subbuf = attr->num_subbuf;
1224 ua_chan->attr.overwrite = attr->overwrite;
1225 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
1226 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
1227 ua_chan->attr.output = (lttng_ust_abi_output) attr->output;
1228 ua_chan->attr.blocking_timeout = attr->u.s.blocking_timeout;
1229 }
1230 /* By default, the channel is a per cpu channel. */
1231 ua_chan->attr.type = LTTNG_UST_ABI_CHAN_PER_CPU;
1232
1233 DBG3("UST app channel %s allocated", ua_chan->name);
1234
1235 return ua_chan;
1236
1237error:
1238 return nullptr;
1239}
1240
1241/*
1242 * Allocate and initialize a UST app stream.
1243 *
1244 * Return newly allocated stream pointer or NULL on error.
1245 */
1246struct ust_app_stream *ust_app_alloc_stream()
1247{
1248 struct ust_app_stream *stream = nullptr;
1249
1250 stream = zmalloc<ust_app_stream>();
1251 if (stream == nullptr) {
1252 PERROR("zmalloc ust app stream");
1253 goto error;
1254 }
1255
1256 /* Zero could be a valid value for a handle so flag it to -1. */
1257 stream->handle = -1;
1258
1259error:
1260 return stream;
1261}
1262
1263/*
1264 * Alloc new UST app event.
1265 */
1266static struct ust_app_event *alloc_ust_app_event(char *name, struct lttng_ust_abi_event *attr)
1267{
1268 struct ust_app_event *ua_event;
1269
1270 /* Init most of the default value by allocating and zeroing */
1271 ua_event = zmalloc<ust_app_event>();
1272 if (ua_event == nullptr) {
1273 PERROR("Failed to allocate ust_app_event structure");
1274 goto error;
1275 }
1276
1277 ua_event->enabled = 1;
1278 strncpy(ua_event->name, name, sizeof(ua_event->name));
1279 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1280 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
1281
1282 /* Copy attributes */
1283 if (attr) {
1284 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
1285 }
1286
1287 DBG3("UST app event %s allocated", ua_event->name);
1288
1289 return ua_event;
1290
1291error:
1292 return nullptr;
1293}
1294
1295/*
1296 * Allocate a new UST app event notifier rule.
1297 */
1298static struct ust_app_event_notifier_rule *
1299alloc_ust_app_event_notifier_rule(struct lttng_trigger *trigger)
1300{
1301 enum lttng_event_rule_generate_exclusions_status generate_exclusion_status;
1302 enum lttng_condition_status cond_status;
1303 struct ust_app_event_notifier_rule *ua_event_notifier_rule;
1304 struct lttng_condition *condition = nullptr;
1305 const struct lttng_event_rule *event_rule = nullptr;
1306
1307 ua_event_notifier_rule = zmalloc<ust_app_event_notifier_rule>();
1308 if (ua_event_notifier_rule == nullptr) {
1309 PERROR("Failed to allocate ust_app_event_notifier_rule structure");
1310 goto error;
1311 }
1312
1313 ua_event_notifier_rule->enabled = 1;
1314 ua_event_notifier_rule->token = lttng_trigger_get_tracer_token(trigger);
1315 lttng_ht_node_init_u64(&ua_event_notifier_rule->node, ua_event_notifier_rule->token);
1316
1317 condition = lttng_trigger_get_condition(trigger);
1318 LTTNG_ASSERT(condition);
1319 LTTNG_ASSERT(lttng_condition_get_type(condition) ==
1320 LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES);
1321
1322 cond_status = lttng_condition_event_rule_matches_get_rule(condition, &event_rule);
1323 LTTNG_ASSERT(cond_status == LTTNG_CONDITION_STATUS_OK);
1324 LTTNG_ASSERT(event_rule);
1325
1326 ua_event_notifier_rule->error_counter_index =
1327 lttng_condition_event_rule_matches_get_error_counter_index(condition);
1328 /* Acquire the event notifier's reference to the trigger. */
1329 lttng_trigger_get(trigger);
1330
1331 ua_event_notifier_rule->trigger = trigger;
1332 ua_event_notifier_rule->filter = lttng_event_rule_get_filter_bytecode(event_rule);
1333 generate_exclusion_status = lttng_event_rule_generate_exclusions(
1334 event_rule, &ua_event_notifier_rule->exclusion);
1335 switch (generate_exclusion_status) {
1336 case LTTNG_EVENT_RULE_GENERATE_EXCLUSIONS_STATUS_OK:
1337 case LTTNG_EVENT_RULE_GENERATE_EXCLUSIONS_STATUS_NONE:
1338 break;
1339 default:
1340 /* Error occurred. */
1341 ERR("Failed to generate exclusions from trigger while allocating an event notifier rule");
1342 goto error_put_trigger;
1343 }
1344
1345 DBG3("UST app event notifier rule allocated: token = %" PRIu64,
1346 ua_event_notifier_rule->token);
1347
1348 return ua_event_notifier_rule;
1349
1350error_put_trigger:
1351 lttng_trigger_put(trigger);
1352error:
1353 free(ua_event_notifier_rule);
1354 return nullptr;
1355}
1356
1357/*
1358 * Alloc new UST app context.
1359 */
1360static struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context_attr *uctx)
1361{
1362 struct ust_app_ctx *ua_ctx;
1363
1364 ua_ctx = zmalloc<ust_app_ctx>();
1365 if (ua_ctx == nullptr) {
1366 goto error;
1367 }
1368
1369 CDS_INIT_LIST_HEAD(&ua_ctx->list);
1370
1371 if (uctx) {
1372 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
1373 if (uctx->ctx == LTTNG_UST_ABI_CONTEXT_APP_CONTEXT) {
1374 char *provider_name = nullptr, *ctx_name = nullptr;
1375
1376 provider_name = strdup(uctx->u.app_ctx.provider_name);
1377 ctx_name = strdup(uctx->u.app_ctx.ctx_name);
1378 if (!provider_name || !ctx_name) {
1379 free(provider_name);
1380 free(ctx_name);
1381 goto error;
1382 }
1383
1384 ua_ctx->ctx.u.app_ctx.provider_name = provider_name;
1385 ua_ctx->ctx.u.app_ctx.ctx_name = ctx_name;
1386 }
1387 }
1388
1389 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
1390 return ua_ctx;
1391error:
1392 free(ua_ctx);
1393 return nullptr;
1394}
1395
1396/*
1397 * Create a liblttng-ust filter bytecode from given bytecode.
1398 *
1399 * Return allocated filter or NULL on error.
1400 */
1401static struct lttng_ust_abi_filter_bytecode *
1402create_ust_filter_bytecode_from_bytecode(const struct lttng_bytecode *orig_f)
1403{
1404 struct lttng_ust_abi_filter_bytecode *filter = nullptr;
1405
1406 /* Copy filter bytecode. */
1407 filter = zmalloc<lttng_ust_abi_filter_bytecode>(sizeof(*filter) + orig_f->len);
1408 if (!filter) {
1409 PERROR("Failed to allocate lttng_ust_filter_bytecode: bytecode len = %" PRIu32
1410 " bytes",
1411 orig_f->len);
1412 goto error;
1413 }
1414
1415 LTTNG_ASSERT(sizeof(struct lttng_bytecode) == sizeof(struct lttng_ust_abi_filter_bytecode));
1416 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
1417error:
1418 return filter;
1419}
1420
1421/*
1422 * Create a liblttng-ust capture bytecode from given bytecode.
1423 *
1424 * Return allocated filter or NULL on error.
1425 */
1426static struct lttng_ust_abi_capture_bytecode *
1427create_ust_capture_bytecode_from_bytecode(const struct lttng_bytecode *orig_f)
1428{
1429 struct lttng_ust_abi_capture_bytecode *capture = nullptr;
1430
1431 /* Copy capture bytecode. */
1432 capture = zmalloc<lttng_ust_abi_capture_bytecode>(sizeof(*capture) + orig_f->len);
1433 if (!capture) {
1434 PERROR("Failed to allocate lttng_ust_abi_capture_bytecode: bytecode len = %" PRIu32
1435 " bytes",
1436 orig_f->len);
1437 goto error;
1438 }
1439
1440 LTTNG_ASSERT(sizeof(struct lttng_bytecode) ==
1441 sizeof(struct lttng_ust_abi_capture_bytecode));
1442 memcpy(capture, orig_f, sizeof(*capture) + orig_f->len);
1443error:
1444 return capture;
1445}
1446
1447/*
1448 * Find an ust_app using the sock and return it. RCU read side lock must be
1449 * held before calling this helper function.
1450 */
1451struct ust_app *ust_app_find_by_sock(int sock)
1452{
1453 struct lttng_ht_node_ulong *node;
1454 struct lttng_ht_iter iter;
1455
1456 ASSERT_RCU_READ_LOCKED();
1457
1458 lttng_ht_lookup(ust_app_ht_by_sock, (void *) ((unsigned long) sock), &iter);
1459 node = lttng_ht_iter_get_node_ulong(&iter);
1460 if (node == nullptr) {
1461 DBG2("UST app find by sock %d not found", sock);
1462 goto error;
1463 }
1464
1465 return lttng::utils::container_of(node, &ust_app::sock_n);
1466
1467error:
1468 return nullptr;
1469}
1470
1471/*
1472 * Find an ust_app using the notify sock and return it. RCU read side lock must
1473 * be held before calling this helper function.
1474 */
1475static struct ust_app *find_app_by_notify_sock(int sock)
1476{
1477 struct lttng_ht_node_ulong *node;
1478 struct lttng_ht_iter iter;
1479
1480 ASSERT_RCU_READ_LOCKED();
1481
1482 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *) ((unsigned long) sock), &iter);
1483 node = lttng_ht_iter_get_node_ulong(&iter);
1484 if (node == nullptr) {
1485 DBG2("UST app find by notify sock %d not found", sock);
1486 goto error;
1487 }
1488
1489 return lttng::utils::container_of(node, &ust_app::notify_sock_n);
1490
1491error:
1492 return nullptr;
1493}
1494
1495/*
1496 * Lookup for an ust app event based on event name, filter bytecode and the
1497 * event loglevel.
1498 *
1499 * Return an ust_app_event object or NULL on error.
1500 */
1501static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
1502 const char *name,
1503 const struct lttng_bytecode *filter,
1504 int loglevel_value,
1505 const struct lttng_event_exclusion *exclusion)
1506{
1507 struct lttng_ht_iter iter;
1508 struct lttng_ht_node_str *node;
1509 struct ust_app_event *event = nullptr;
1510 struct ust_app_ht_key key;
1511
1512 LTTNG_ASSERT(name);
1513 LTTNG_ASSERT(ht);
1514
1515 /* Setup key for event lookup. */
1516 key.name = name;
1517 key.filter = filter;
1518 key.loglevel_type = (lttng_ust_abi_loglevel_type) loglevel_value;
1519 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
1520 key.exclusion = exclusion;
1521
1522 /* Lookup using the event name as hash and a custom match fct. */
1523 cds_lfht_lookup(ht->ht,
1524 ht->hash_fct((void *) name, lttng_ht_seed),
1525 ht_match_ust_app_event,
1526 &key,
1527 &iter.iter);
1528 node = lttng_ht_iter_get_node_str(&iter);
1529 if (node == nullptr) {
1530 goto end;
1531 }
1532
1533 event = lttng::utils::container_of(node, &ust_app_event::node);
1534
1535end:
1536 return event;
1537}
1538
1539/*
1540 * Look-up an event notifier rule based on its token id.
1541 *
1542 * Must be called with the RCU read lock held.
1543 * Return an ust_app_event_notifier_rule object or NULL on error.
1544 */
1545static struct ust_app_event_notifier_rule *find_ust_app_event_notifier_rule(struct lttng_ht *ht,
1546 uint64_t token)
1547{
1548 struct lttng_ht_iter iter;
1549 struct lttng_ht_node_u64 *node;
1550 struct ust_app_event_notifier_rule *event_notifier_rule = nullptr;
1551
1552 LTTNG_ASSERT(ht);
1553 ASSERT_RCU_READ_LOCKED();
1554
1555 lttng_ht_lookup(ht, &token, &iter);
1556 node = lttng_ht_iter_get_node_u64(&iter);
1557 if (node == nullptr) {
1558 DBG2("UST app event notifier rule token not found: token = %" PRIu64, token);
1559 goto end;
1560 }
1561
1562 event_notifier_rule = lttng::utils::container_of(node, &ust_app_event_notifier_rule::node);
1563end:
1564 return event_notifier_rule;
1565}
1566
1567/*
1568 * Create the channel context on the tracer.
1569 *
1570 * Called with UST app session lock held.
1571 */
1572static int create_ust_channel_context(struct ust_app_channel *ua_chan,
1573 struct ust_app_ctx *ua_ctx,
1574 struct ust_app *app)
1575{
1576 int ret;
1577
1578 health_code_update();
1579
1580 pthread_mutex_lock(&app->sock_lock);
1581 ret = lttng_ust_ctl_add_context(app->sock, &ua_ctx->ctx, ua_chan->obj, &ua_ctx->obj);
1582 pthread_mutex_unlock(&app->sock_lock);
1583 if (ret < 0) {
1584 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1585 ret = 0;
1586 DBG3("UST app create channel context failed. Application is dead: pid = %d, sock = %d",
1587 app->pid,
1588 app->sock);
1589 } else if (ret == -EAGAIN) {
1590 ret = 0;
1591 WARN("UST app create channel context failed. Communication time out: pid = %d, sock = %d",
1592 app->pid,
1593 app->sock);
1594 } else {
1595 ERR("UST app create channel context failed with ret %d: pid = %d, sock = %d",
1596 ret,
1597 app->pid,
1598 app->sock);
1599 }
1600 goto error;
1601 }
1602
1603 ua_ctx->handle = ua_ctx->obj->handle;
1604
1605 DBG2("UST app context handle %d created successfully for channel %s",
1606 ua_ctx->handle,
1607 ua_chan->name);
1608
1609error:
1610 health_code_update();
1611 return ret;
1612}
1613
1614/*
1615 * Set the filter on the tracer.
1616 */
1617static int set_ust_object_filter(struct ust_app *app,
1618 const struct lttng_bytecode *bytecode,
1619 struct lttng_ust_abi_object_data *ust_object)
1620{
1621 int ret;
1622 struct lttng_ust_abi_filter_bytecode *ust_bytecode = nullptr;
1623
1624 health_code_update();
1625
1626 ust_bytecode = create_ust_filter_bytecode_from_bytecode(bytecode);
1627 if (!ust_bytecode) {
1628 ret = -LTTNG_ERR_NOMEM;
1629 goto error;
1630 }
1631 pthread_mutex_lock(&app->sock_lock);
1632 ret = lttng_ust_ctl_set_filter(app->sock, ust_bytecode, ust_object);
1633 pthread_mutex_unlock(&app->sock_lock);
1634 if (ret < 0) {
1635 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1636 ret = 0;
1637 DBG3("UST app set filter failed. Application is dead: pid = %d, sock = %d",
1638 app->pid,
1639 app->sock);
1640 } else if (ret == -EAGAIN) {
1641 ret = 0;
1642 WARN("UST app set filter failed. Communication time out: pid = %d, sock = %d",
1643 app->pid,
1644 app->sock);
1645 } else {
1646 ERR("UST app set filter failed with ret %d: pid = %d, sock = %d, object = %p",
1647 ret,
1648 app->pid,
1649 app->sock,
1650 ust_object);
1651 }
1652 goto error;
1653 }
1654
1655 DBG2("UST filter successfully set: object = %p", ust_object);
1656
1657error:
1658 health_code_update();
1659 free(ust_bytecode);
1660 return ret;
1661}
1662
1663/*
1664 * Set a capture bytecode for the passed object.
1665 * The sequence number enforces the ordering at runtime and on reception of
1666 * the captured payloads.
1667 */
1668static int set_ust_capture(struct ust_app *app,
1669 const struct lttng_bytecode *bytecode,
1670 unsigned int capture_seqnum,
1671 struct lttng_ust_abi_object_data *ust_object)
1672{
1673 int ret;
1674 struct lttng_ust_abi_capture_bytecode *ust_bytecode = nullptr;
1675
1676 health_code_update();
1677
1678 ust_bytecode = create_ust_capture_bytecode_from_bytecode(bytecode);
1679 if (!ust_bytecode) {
1680 ret = -LTTNG_ERR_NOMEM;
1681 goto error;
1682 }
1683
1684 /*
1685 * Set the sequence number to ensure the capture of fields is ordered.
1686 */
1687 ust_bytecode->seqnum = capture_seqnum;
1688
1689 pthread_mutex_lock(&app->sock_lock);
1690 ret = lttng_ust_ctl_set_capture(app->sock, ust_bytecode, ust_object);
1691 pthread_mutex_unlock(&app->sock_lock);
1692 if (ret < 0) {
1693 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1694 ret = 0;
1695 DBG3("UST app set capture failed. Application is dead: pid = %d, sock = %d",
1696 app->pid,
1697 app->sock);
1698 } else if (ret == -EAGAIN) {
1699 ret = 0;
1700 DBG3("UST app set capture failed. Communication timeout: pid = %d, sock = %d",
1701 app->pid,
1702 app->sock);
1703 } else {
1704 ERR("UST app event set capture failed with ret %d: pid = %d, sock = %d",
1705 ret,
1706 app->pid,
1707 app->sock);
1708 }
1709
1710 goto error;
1711 }
1712
1713 DBG2("UST capture successfully set: object = %p", ust_object);
1714
1715error:
1716 health_code_update();
1717 free(ust_bytecode);
1718 return ret;
1719}
1720
1721static struct lttng_ust_abi_event_exclusion *
1722create_ust_exclusion_from_exclusion(const struct lttng_event_exclusion *exclusion)
1723{
1724 struct lttng_ust_abi_event_exclusion *ust_exclusion = nullptr;
1725 size_t exclusion_alloc_size = sizeof(struct lttng_ust_abi_event_exclusion) +
1726 LTTNG_UST_ABI_SYM_NAME_LEN * exclusion->count;
1727
1728 ust_exclusion = zmalloc<lttng_ust_abi_event_exclusion>(exclusion_alloc_size);
1729 if (!ust_exclusion) {
1730 PERROR("malloc");
1731 goto end;
1732 }
1733
1734 LTTNG_ASSERT(sizeof(struct lttng_event_exclusion) ==
1735 sizeof(struct lttng_ust_abi_event_exclusion));
1736 memcpy(ust_exclusion, exclusion, exclusion_alloc_size);
1737end:
1738 return ust_exclusion;
1739}
1740
1741/*
1742 * Set event exclusions on the tracer.
1743 */
1744static int set_ust_object_exclusions(struct ust_app *app,
1745 const struct lttng_event_exclusion *exclusions,
1746 struct lttng_ust_abi_object_data *ust_object)
1747{
1748 int ret;
1749 struct lttng_ust_abi_event_exclusion *ust_exclusions = nullptr;
1750
1751 LTTNG_ASSERT(exclusions && exclusions->count > 0);
1752
1753 health_code_update();
1754
1755 ust_exclusions = create_ust_exclusion_from_exclusion(exclusions);
1756 if (!ust_exclusions) {
1757 ret = -LTTNG_ERR_NOMEM;
1758 goto error;
1759 }
1760 pthread_mutex_lock(&app->sock_lock);
1761 ret = lttng_ust_ctl_set_exclusion(app->sock, ust_exclusions, ust_object);
1762 pthread_mutex_unlock(&app->sock_lock);
1763 if (ret < 0) {
1764 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1765 ret = 0;
1766 DBG3("UST app event exclusion failed. Application is dead: pid = %d, sock = %d",
1767 app->pid,
1768 app->sock);
1769 } else if (ret == -EAGAIN) {
1770 ret = 0;
1771 WARN("UST app event exclusion failed. Communication time out(pid: %d, sock = %d",
1772 app->pid,
1773 app->sock);
1774 } else {
1775 ERR("UST app event exclusions failed with ret %d: pid = %d, sock = %d, object = %p",
1776 ret,
1777 app->pid,
1778 app->sock,
1779 ust_object);
1780 }
1781 goto error;
1782 }
1783
1784 DBG2("UST exclusions set successfully for object %p", ust_object);
1785
1786error:
1787 health_code_update();
1788 free(ust_exclusions);
1789 return ret;
1790}
1791
1792/*
1793 * Disable the specified event on to UST tracer for the UST session.
1794 */
1795static int disable_ust_object(struct ust_app *app, struct lttng_ust_abi_object_data *object)
1796{
1797 int ret;
1798
1799 health_code_update();
1800
1801 pthread_mutex_lock(&app->sock_lock);
1802 ret = lttng_ust_ctl_disable(app->sock, object);
1803 pthread_mutex_unlock(&app->sock_lock);
1804 if (ret < 0) {
1805 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1806 ret = 0;
1807 DBG3("UST app disable object failed. Application is dead: pid = %d, sock = %d",
1808 app->pid,
1809 app->sock);
1810 } else if (ret == -EAGAIN) {
1811 ret = 0;
1812 WARN("UST app disable object failed. Communication time out: pid = %d, sock = %d",
1813 app->pid,
1814 app->sock);
1815 } else {
1816 ERR("UST app disable object failed with ret %d: pid = %d, sock = %d, object = %p",
1817 ret,
1818 app->pid,
1819 app->sock,
1820 object);
1821 }
1822 goto error;
1823 }
1824
1825 DBG2("UST app object %p disabled successfully for app: pid = %d", object, app->pid);
1826
1827error:
1828 health_code_update();
1829 return ret;
1830}
1831
1832/*
1833 * Disable the specified channel on to UST tracer for the UST session.
1834 */
1835static int disable_ust_channel(struct ust_app *app,
1836 struct ust_app_session *ua_sess,
1837 struct ust_app_channel *ua_chan)
1838{
1839 int ret;
1840
1841 health_code_update();
1842
1843 pthread_mutex_lock(&app->sock_lock);
1844 ret = lttng_ust_ctl_disable(app->sock, ua_chan->obj);
1845 pthread_mutex_unlock(&app->sock_lock);
1846 if (ret < 0) {
1847 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1848 ret = 0;
1849 DBG3("UST app disable channel failed. Application is dead: pid = %d, sock = %d",
1850 app->pid,
1851 app->sock);
1852 } else if (ret == -EAGAIN) {
1853 ret = 0;
1854 WARN("UST app disable channel failed. Communication time out: pid = %d, sock = %d",
1855 app->pid,
1856 app->sock);
1857 } else {
1858 ERR("UST app channel %s disable failed, session handle %d, with ret %d: pid = %d, sock = %d",
1859 ua_chan->name,
1860 ua_sess->handle,
1861 ret,
1862 app->pid,
1863 app->sock);
1864 }
1865 goto error;
1866 }
1867
1868 DBG2("UST app channel %s disabled successfully for app: pid = %d", ua_chan->name, app->pid);
1869
1870error:
1871 health_code_update();
1872 return ret;
1873}
1874
1875/*
1876 * Enable the specified channel on to UST tracer for the UST session.
1877 */
1878static int enable_ust_channel(struct ust_app *app,
1879 struct ust_app_session *ua_sess,
1880 struct ust_app_channel *ua_chan)
1881{
1882 int ret;
1883
1884 health_code_update();
1885
1886 pthread_mutex_lock(&app->sock_lock);
1887 ret = lttng_ust_ctl_enable(app->sock, ua_chan->obj);
1888 pthread_mutex_unlock(&app->sock_lock);
1889 if (ret < 0) {
1890 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1891 ret = 0;
1892 DBG3("UST app channel %s enable failed. Application is dead: pid = %d, sock = %d",
1893 ua_chan->name,
1894 app->pid,
1895 app->sock);
1896 } else if (ret == -EAGAIN) {
1897 ret = 0;
1898 WARN("UST app channel %s enable failed. Communication time out: pid = %d, sock = %d",
1899 ua_chan->name,
1900 app->pid,
1901 app->sock);
1902 } else {
1903 ERR("UST app channel %s enable failed, session handle %d, with ret %d: pid = %d, sock = %d",
1904 ua_chan->name,
1905 ua_sess->handle,
1906 ret,
1907 app->pid,
1908 app->sock);
1909 }
1910 goto error;
1911 }
1912
1913 ua_chan->enabled = 1;
1914
1915 DBG2("UST app channel %s enabled successfully for app: pid = %d", ua_chan->name, app->pid);
1916
1917error:
1918 health_code_update();
1919 return ret;
1920}
1921
1922/*
1923 * Enable the specified event on to UST tracer for the UST session.
1924 */
1925static int enable_ust_object(struct ust_app *app, struct lttng_ust_abi_object_data *ust_object)
1926{
1927 int ret;
1928
1929 health_code_update();
1930
1931 pthread_mutex_lock(&app->sock_lock);
1932 ret = lttng_ust_ctl_enable(app->sock, ust_object);
1933 pthread_mutex_unlock(&app->sock_lock);
1934 if (ret < 0) {
1935 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1936 ret = 0;
1937 DBG3("UST app enable object failed. Application is dead: pid = %d, sock = %d",
1938 app->pid,
1939 app->sock);
1940 } else if (ret == -EAGAIN) {
1941 ret = 0;
1942 WARN("UST app enable object failed. Communication time out: pid = %d, sock = %d",
1943 app->pid,
1944 app->sock);
1945 } else {
1946 ERR("UST app enable object failed with ret %d: pid = %d, sock = %d, object = %p",
1947 ret,
1948 app->pid,
1949 app->sock,
1950 ust_object);
1951 }
1952 goto error;
1953 }
1954
1955 DBG2("UST app object %p enabled successfully for app: pid = %d", ust_object, app->pid);
1956
1957error:
1958 health_code_update();
1959 return ret;
1960}
1961
1962/*
1963 * Send channel and stream buffer to application.
1964 *
1965 * Return 0 on success. On error, a negative value is returned.
1966 */
1967static int send_channel_pid_to_ust(struct ust_app *app,
1968 struct ust_app_session *ua_sess,
1969 struct ust_app_channel *ua_chan)
1970{
1971 int ret;
1972 struct ust_app_stream *stream, *stmp;
1973
1974 LTTNG_ASSERT(app);
1975 LTTNG_ASSERT(ua_sess);
1976 LTTNG_ASSERT(ua_chan);
1977
1978 health_code_update();
1979
1980 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name, app->sock);
1981
1982 /* Send channel to the application. */
1983 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
1984 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1985 ret = -ENOTCONN; /* Caused by app exiting. */
1986 goto error;
1987 } else if (ret == -EAGAIN) {
1988 /* Caused by timeout. */
1989 WARN("Communication with application %d timed out on send_channel for channel \"%s\" of session \"%" PRIu64
1990 "\".",
1991 app->pid,
1992 ua_chan->name,
1993 ua_sess->tracing_id);
1994 /* Treat this the same way as an application that is exiting. */
1995 ret = -ENOTCONN;
1996 goto error;
1997 } else if (ret < 0) {
1998 goto error;
1999 }
2000
2001 health_code_update();
2002
2003 /* Send all streams to application. */
2004 cds_list_for_each_entry_safe (stream, stmp, &ua_chan->streams.head, list) {
2005 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
2006 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2007 ret = -ENOTCONN; /* Caused by app exiting. */
2008 goto error;
2009 } else if (ret == -EAGAIN) {
2010 /* Caused by timeout. */
2011 WARN("Communication with application %d timed out on send_stream for stream \"%s\" of channel \"%s\" of session \"%" PRIu64
2012 "\".",
2013 app->pid,
2014 stream->name,
2015 ua_chan->name,
2016 ua_sess->tracing_id);
2017 /*
2018 * Treat this the same way as an application that is
2019 * exiting.
2020 */
2021 ret = -ENOTCONN;
2022 } else if (ret < 0) {
2023 goto error;
2024 }
2025 /* We don't need the stream anymore once sent to the tracer. */
2026 cds_list_del(&stream->list);
2027 delete_ust_app_stream(-1, stream, app);
2028 }
2029
2030error:
2031 health_code_update();
2032 return ret;
2033}
2034
2035/*
2036 * Create the specified event onto the UST tracer for a UST session.
2037 *
2038 * Should be called with session mutex held.
2039 */
2040static int create_ust_event(struct ust_app *app,
2041 struct ust_app_channel *ua_chan,
2042 struct ust_app_event *ua_event)
2043{
2044 int ret = 0;
2045
2046 health_code_update();
2047
2048 /* Create UST event on tracer */
2049 pthread_mutex_lock(&app->sock_lock);
2050 ret = lttng_ust_ctl_create_event(app->sock, &ua_event->attr, ua_chan->obj, &ua_event->obj);
2051 pthread_mutex_unlock(&app->sock_lock);
2052 if (ret < 0) {
2053 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2054 ret = 0;
2055 DBG3("UST app create event failed. Application is dead: pid = %d, sock = %d",
2056 app->pid,
2057 app->sock);
2058 } else if (ret == -EAGAIN) {
2059 ret = 0;
2060 WARN("UST app create event failed. Communication time out: pid = %d, sock = %d",
2061 app->pid,
2062 app->sock);
2063 } else {
2064 ERR("UST app create event '%s' failed with ret %d: pid = %d, sock = %d",
2065 ua_event->attr.name,
2066 ret,
2067 app->pid,
2068 app->sock);
2069 }
2070 goto error;
2071 }
2072
2073 ua_event->handle = ua_event->obj->handle;
2074
2075 DBG2("UST app event %s created successfully for pid:%d object = %p",
2076 ua_event->attr.name,
2077 app->pid,
2078 ua_event->obj);
2079
2080 health_code_update();
2081
2082 /* Set filter if one is present. */
2083 if (ua_event->filter) {
2084 ret = set_ust_object_filter(app, ua_event->filter, ua_event->obj);
2085 if (ret < 0) {
2086 goto error;
2087 }
2088 }
2089
2090 /* Set exclusions for the event */
2091 if (ua_event->exclusion) {
2092 ret = set_ust_object_exclusions(app, ua_event->exclusion, ua_event->obj);
2093 if (ret < 0) {
2094 goto error;
2095 }
2096 }
2097
2098 /* If event not enabled, disable it on the tracer */
2099 if (ua_event->enabled) {
2100 /*
2101 * We now need to explicitly enable the event, since it
2102 * is now disabled at creation.
2103 */
2104 ret = enable_ust_object(app, ua_event->obj);
2105 if (ret < 0) {
2106 /*
2107 * If we hit an EPERM, something is wrong with our enable call. If
2108 * we get an EEXIST, there is a problem on the tracer side since we
2109 * just created it.
2110 */
2111 switch (ret) {
2112 case -LTTNG_UST_ERR_PERM:
2113 /* Code flow problem */
2114 abort();
2115 case -LTTNG_UST_ERR_EXIST:
2116 /* It's OK for our use case. */
2117 ret = 0;
2118 break;
2119 default:
2120 break;
2121 }
2122 goto error;
2123 }
2124 }
2125
2126error:
2127 health_code_update();
2128 return ret;
2129}
2130
2131static int
2132init_ust_event_notifier_from_event_rule(const struct lttng_event_rule *rule,
2133 struct lttng_ust_abi_event_notifier *event_notifier)
2134{
2135 enum lttng_event_rule_status status;
2136 enum lttng_ust_abi_loglevel_type ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL;
2137 int loglevel = -1, ret = 0;
2138 const char *pattern;
2139
2140 memset(event_notifier, 0, sizeof(*event_notifier));
2141
2142 if (lttng_event_rule_targets_agent_domain(rule)) {
2143 /*
2144 * Special event for agents
2145 * The actual meat of the event is in the filter that will be
2146 * attached later on.
2147 * Set the default values for the agent event.
2148 */
2149 pattern = event_get_default_agent_ust_name(lttng_event_rule_get_domain_type(rule));
2150 loglevel = 0;
2151 ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL;
2152 } else {
2153 const struct lttng_log_level_rule *log_level_rule;
2154
2155 LTTNG_ASSERT(lttng_event_rule_get_type(rule) ==
2156 LTTNG_EVENT_RULE_TYPE_USER_TRACEPOINT);
2157
2158 status = lttng_event_rule_user_tracepoint_get_name_pattern(rule, &pattern);
2159 if (status != LTTNG_EVENT_RULE_STATUS_OK) {
2160 /* At this point, this is a fatal error. */
2161 abort();
2162 }
2163
2164 status = lttng_event_rule_user_tracepoint_get_log_level_rule(rule, &log_level_rule);
2165 if (status == LTTNG_EVENT_RULE_STATUS_UNSET) {
2166 ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL;
2167 } else if (status == LTTNG_EVENT_RULE_STATUS_OK) {
2168 enum lttng_log_level_rule_status llr_status;
2169
2170 switch (lttng_log_level_rule_get_type(log_level_rule)) {
2171 case LTTNG_LOG_LEVEL_RULE_TYPE_EXACTLY:
2172 ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_SINGLE;
2173 llr_status = lttng_log_level_rule_exactly_get_level(log_level_rule,
2174 &loglevel);
2175 break;
2176 case LTTNG_LOG_LEVEL_RULE_TYPE_AT_LEAST_AS_SEVERE_AS:
2177 ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_RANGE;
2178 llr_status = lttng_log_level_rule_at_least_as_severe_as_get_level(
2179 log_level_rule, &loglevel);
2180 break;
2181 default:
2182 abort();
2183 }
2184
2185 LTTNG_ASSERT(llr_status == LTTNG_LOG_LEVEL_RULE_STATUS_OK);
2186 } else {
2187 /* At this point this is a fatal error. */
2188 abort();
2189 }
2190 }
2191
2192 event_notifier->event.instrumentation = LTTNG_UST_ABI_TRACEPOINT;
2193 ret = lttng_strncpy(
2194 event_notifier->event.name, pattern, sizeof(event_notifier->event.name));
2195 if (ret) {
2196 ERR("Failed to copy event rule pattern to notifier: pattern = '%s' ", pattern);
2197 goto end;
2198 }
2199
2200 event_notifier->event.loglevel_type = ust_loglevel_type;
2201 event_notifier->event.loglevel = loglevel;
2202end:
2203 return ret;
2204}
2205
2206/*
2207 * Create the specified event notifier against the user space tracer of a
2208 * given application.
2209 */
2210static int create_ust_event_notifier(struct ust_app *app,
2211 struct ust_app_event_notifier_rule *ua_event_notifier_rule)
2212{
2213 int ret = 0;
2214 enum lttng_condition_status condition_status;
2215 const struct lttng_condition *condition = nullptr;
2216 struct lttng_ust_abi_event_notifier event_notifier;
2217 const struct lttng_event_rule *event_rule = nullptr;
2218 unsigned int capture_bytecode_count = 0, i;
2219 enum lttng_condition_status cond_status;
2220 enum lttng_event_rule_type event_rule_type;
2221
2222 health_code_update();
2223 LTTNG_ASSERT(app->event_notifier_group.object);
2224
2225 condition = lttng_trigger_get_const_condition(ua_event_notifier_rule->trigger);
2226 LTTNG_ASSERT(condition);
2227 LTTNG_ASSERT(lttng_condition_get_type(condition) ==
2228 LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES);
2229
2230 condition_status = lttng_condition_event_rule_matches_get_rule(condition, &event_rule);
2231 LTTNG_ASSERT(condition_status == LTTNG_CONDITION_STATUS_OK);
2232
2233 LTTNG_ASSERT(event_rule);
2234
2235 event_rule_type = lttng_event_rule_get_type(event_rule);
2236 LTTNG_ASSERT(event_rule_type == LTTNG_EVENT_RULE_TYPE_USER_TRACEPOINT ||
2237 event_rule_type == LTTNG_EVENT_RULE_TYPE_JUL_LOGGING ||
2238 event_rule_type == LTTNG_EVENT_RULE_TYPE_LOG4J_LOGGING ||
2239 event_rule_type == LTTNG_EVENT_RULE_TYPE_PYTHON_LOGGING);
2240
2241 init_ust_event_notifier_from_event_rule(event_rule, &event_notifier);
2242 event_notifier.event.token = ua_event_notifier_rule->token;
2243 event_notifier.error_counter_index = ua_event_notifier_rule->error_counter_index;
2244
2245 /* Create UST event notifier against the tracer. */
2246 pthread_mutex_lock(&app->sock_lock);
2247 ret = lttng_ust_ctl_create_event_notifier(app->sock,
2248 &event_notifier,
2249 app->event_notifier_group.object,
2250 &ua_event_notifier_rule->obj);
2251 pthread_mutex_unlock(&app->sock_lock);
2252 if (ret < 0) {
2253 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2254 ret = 0;
2255 DBG3("UST app create event notifier failed. Application is dead: pid = %d, sock = %d",
2256 app->pid,
2257 app->sock);
2258 } else if (ret == -EAGAIN) {
2259 ret = 0;
2260 WARN("UST app create event notifier failed. Communication time out: pid = %d, sock = %d",
2261 app->pid,
2262 app->sock);
2263 } else {
2264 ERR("UST app create event notifier '%s' failed with ret %d: pid = %d, sock = %d",
2265 event_notifier.event.name,
2266 ret,
2267 app->pid,
2268 app->sock);
2269 }
2270 goto error;
2271 }
2272
2273 ua_event_notifier_rule->handle = ua_event_notifier_rule->obj->handle;
2274
2275 DBG2("UST app event notifier %s created successfully: app = '%s': pid = %d, object = %p",
2276 event_notifier.event.name,
2277 app->name,
2278 app->pid,
2279 ua_event_notifier_rule->obj);
2280
2281 health_code_update();
2282
2283 /* Set filter if one is present. */
2284 if (ua_event_notifier_rule->filter) {
2285 ret = set_ust_object_filter(
2286 app, ua_event_notifier_rule->filter, ua_event_notifier_rule->obj);
2287 if (ret < 0) {
2288 goto error;
2289 }
2290 }
2291
2292 /* Set exclusions for the event. */
2293 if (ua_event_notifier_rule->exclusion) {
2294 ret = set_ust_object_exclusions(
2295 app, ua_event_notifier_rule->exclusion, ua_event_notifier_rule->obj);
2296 if (ret < 0) {
2297 goto error;
2298 }
2299 }
2300
2301 /* Set the capture bytecodes. */
2302 cond_status = lttng_condition_event_rule_matches_get_capture_descriptor_count(
2303 condition, &capture_bytecode_count);
2304 LTTNG_ASSERT(cond_status == LTTNG_CONDITION_STATUS_OK);
2305
2306 for (i = 0; i < capture_bytecode_count; i++) {
2307 const struct lttng_bytecode *capture_bytecode =
2308 lttng_condition_event_rule_matches_get_capture_bytecode_at_index(condition,
2309 i);
2310
2311 ret = set_ust_capture(app, capture_bytecode, i, ua_event_notifier_rule->obj);
2312 if (ret < 0) {
2313 goto error;
2314 }
2315 }
2316
2317 /*
2318 * We now need to explicitly enable the event, since it
2319 * is disabled at creation.
2320 */
2321 ret = enable_ust_object(app, ua_event_notifier_rule->obj);
2322 if (ret < 0) {
2323 /*
2324 * If we hit an EPERM, something is wrong with our enable call.
2325 * If we get an EEXIST, there is a problem on the tracer side
2326 * since we just created it.
2327 */
2328 switch (ret) {
2329 case -LTTNG_UST_ERR_PERM:
2330 /* Code flow problem. */
2331 abort();
2332 case -LTTNG_UST_ERR_EXIST:
2333 /* It's OK for our use case. */
2334 ret = 0;
2335 break;
2336 default:
2337 break;
2338 }
2339
2340 goto error;
2341 }
2342
2343 ua_event_notifier_rule->enabled = true;
2344
2345error:
2346 health_code_update();
2347 return ret;
2348}
2349
2350/*
2351 * Copy data between an UST app event and a LTT event.
2352 */
2353static void shadow_copy_event(struct ust_app_event *ua_event, struct ltt_ust_event *uevent)
2354{
2355 size_t exclusion_alloc_size;
2356
2357 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
2358 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
2359
2360 ua_event->enabled = uevent->enabled;
2361
2362 /* Copy event attributes */
2363 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
2364
2365 /* Copy filter bytecode */
2366 if (uevent->filter) {
2367 ua_event->filter = lttng_bytecode_copy(uevent->filter);
2368 /* Filter might be NULL here in case of ENONEM. */
2369 }
2370
2371 /* Copy exclusion data */
2372 if (uevent->exclusion) {
2373 exclusion_alloc_size = sizeof(struct lttng_event_exclusion) +
2374 LTTNG_UST_ABI_SYM_NAME_LEN * uevent->exclusion->count;
2375 ua_event->exclusion = zmalloc<lttng_event_exclusion>(exclusion_alloc_size);
2376 if (ua_event->exclusion == nullptr) {
2377 PERROR("malloc");
2378 } else {
2379 memcpy(ua_event->exclusion, uevent->exclusion, exclusion_alloc_size);
2380 }
2381 }
2382}
2383
2384/*
2385 * Copy data between an UST app channel and a LTT channel.
2386 */
2387static void shadow_copy_channel(struct ust_app_channel *ua_chan, struct ltt_ust_channel *uchan)
2388{
2389 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
2390
2391 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
2392 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
2393
2394 ua_chan->tracefile_size = uchan->tracefile_size;
2395 ua_chan->tracefile_count = uchan->tracefile_count;
2396
2397 /* Copy event attributes since the layout is different. */
2398 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
2399 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
2400 ua_chan->attr.overwrite = uchan->attr.overwrite;
2401 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
2402 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
2403 ua_chan->monitor_timer_interval = uchan->monitor_timer_interval;
2404 ua_chan->attr.output = (lttng_ust_abi_output) uchan->attr.output;
2405 ua_chan->attr.blocking_timeout = uchan->attr.u.s.blocking_timeout;
2406
2407 /*
2408 * Note that the attribute channel type is not set since the channel on the
2409 * tracing registry side does not have this information.
2410 */
2411
2412 ua_chan->enabled = uchan->enabled;
2413 ua_chan->tracing_channel_id = uchan->id;
2414
2415 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
2416}
2417
2418/*
2419 * Copy data between a UST app session and a regular LTT session.
2420 */
2421static void shadow_copy_session(struct ust_app_session *ua_sess,
2422 struct ltt_ust_session *usess,
2423 struct ust_app *app)
2424{
2425 struct tm *timeinfo;
2426 char datetime[16];
2427 int ret;
2428 char tmp_shm_path[PATH_MAX];
2429
2430 timeinfo = localtime(&app->registration_time);
2431 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
2432
2433 DBG2("Shadow copy of session handle %d", ua_sess->handle);
2434
2435 ua_sess->tracing_id = usess->id;
2436 ua_sess->id = get_next_session_id();
2437 LTTNG_OPTIONAL_SET(&ua_sess->real_credentials.uid, app->uid);
2438 LTTNG_OPTIONAL_SET(&ua_sess->real_credentials.gid, app->gid);
2439 LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.uid, usess->uid);
2440 LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.gid, usess->gid);
2441 ua_sess->buffer_type = usess->buffer_type;
2442 ua_sess->bits_per_long = app->abi.bits_per_long;
2443
2444 /* There is only one consumer object per session possible. */
2445 consumer_output_get(usess->consumer);
2446 ua_sess->consumer = usess->consumer;
2447
2448 ua_sess->output_traces = usess->output_traces;
2449 ua_sess->live_timer_interval = usess->live_timer_interval;
2450 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr, &usess->metadata_attr);
2451
2452 switch (ua_sess->buffer_type) {
2453 case LTTNG_BUFFER_PER_PID:
2454 ret = snprintf(ua_sess->path,
2455 sizeof(ua_sess->path),
2456 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s",
2457 app->name,
2458 app->pid,
2459 datetime);
2460 break;
2461 case LTTNG_BUFFER_PER_UID:
2462 ret = snprintf(ua_sess->path,
2463 sizeof(ua_sess->path),
2464 DEFAULT_UST_TRACE_UID_PATH,
2465 lttng_credentials_get_uid(&ua_sess->real_credentials),
2466 app->abi.bits_per_long);
2467 break;
2468 default:
2469 abort();
2470 goto error;
2471 }
2472 if (ret < 0) {
2473 PERROR("asprintf UST shadow copy session");
2474 abort();
2475 goto error;
2476 }
2477
2478 strncpy(ua_sess->root_shm_path, usess->root_shm_path, sizeof(ua_sess->root_shm_path));
2479 ua_sess->root_shm_path[sizeof(ua_sess->root_shm_path) - 1] = '\0';
2480 strncpy(ua_sess->shm_path, usess->shm_path, sizeof(ua_sess->shm_path));
2481 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
2482 if (ua_sess->shm_path[0]) {
2483 switch (ua_sess->buffer_type) {
2484 case LTTNG_BUFFER_PER_PID:
2485 ret = snprintf(tmp_shm_path,
2486 sizeof(tmp_shm_path),
2487 "/" DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s",
2488 app->name,
2489 app->pid,
2490 datetime);
2491 break;
2492 case LTTNG_BUFFER_PER_UID:
2493 ret = snprintf(tmp_shm_path,
2494 sizeof(tmp_shm_path),
2495 "/" DEFAULT_UST_TRACE_UID_PATH,
2496 app->uid,
2497 app->abi.bits_per_long);
2498 break;
2499 default:
2500 abort();
2501 goto error;
2502 }
2503 if (ret < 0) {
2504 PERROR("sprintf UST shadow copy session");
2505 abort();
2506 goto error;
2507 }
2508 strncat(ua_sess->shm_path,
2509 tmp_shm_path,
2510 sizeof(ua_sess->shm_path) - strlen(ua_sess->shm_path) - 1);
2511 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
2512 }
2513 return;
2514
2515error:
2516 consumer_output_put(ua_sess->consumer);
2517}
2518
2519/*
2520 * Lookup sesison wrapper.
2521 */
2522static void __lookup_session_by_app(const struct ltt_ust_session *usess,
2523 struct ust_app *app,
2524 struct lttng_ht_iter *iter)
2525{
2526 /* Get right UST app session from app */
2527 lttng_ht_lookup(app->sessions, &usess->id, iter);
2528}
2529
2530/*
2531 * Return ust app session from the app session hashtable using the UST session
2532 * id.
2533 */
2534static struct ust_app_session *lookup_session_by_app(const struct ltt_ust_session *usess,
2535 struct ust_app *app)
2536{
2537 struct lttng_ht_iter iter;
2538 struct lttng_ht_node_u64 *node;
2539
2540 __lookup_session_by_app(usess, app, &iter);
2541 node = lttng_ht_iter_get_node_u64(&iter);
2542 if (node == nullptr) {
2543 goto error;
2544 }
2545
2546 return lttng::utils::container_of(node, &ust_app_session::node);
2547
2548error:
2549 return nullptr;
2550}
2551
2552/*
2553 * Setup buffer registry per PID for the given session and application. If none
2554 * is found, a new one is created, added to the global registry and
2555 * initialized. If regp is valid, it's set with the newly created object.
2556 *
2557 * Return 0 on success or else a negative value.
2558 */
2559static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
2560 struct ust_app *app,
2561 struct buffer_reg_pid **regp)
2562{
2563 int ret = 0;
2564 struct buffer_reg_pid *reg_pid;
2565
2566 LTTNG_ASSERT(ua_sess);
2567 LTTNG_ASSERT(app);
2568
2569 rcu_read_lock();
2570
2571 reg_pid = buffer_reg_pid_find(ua_sess->id);
2572 if (!reg_pid) {
2573 /*
2574 * This is the create channel path meaning that if there is NO
2575 * registry available, we have to create one for this session.
2576 */
2577 ret = buffer_reg_pid_create(
2578 ua_sess->id, &reg_pid, ua_sess->root_shm_path, ua_sess->shm_path);
2579 if (ret < 0) {
2580 goto error;
2581 }
2582 } else {
2583 goto end;
2584 }
2585
2586 /* Initialize registry. */
2587 reg_pid->registry->reg.ust = ust_registry_session_per_pid_create(
2588 app,
2589 app->abi,
2590 app->version.major,
2591 app->version.minor,
2592 reg_pid->root_shm_path,
2593 reg_pid->shm_path,
2594 lttng_credentials_get_uid(&ua_sess->effective_credentials),
2595 lttng_credentials_get_gid(&ua_sess->effective_credentials),
2596 ua_sess->tracing_id);
2597 if (!reg_pid->registry->reg.ust) {
2598 /*
2599 * reg_pid->registry->reg.ust is NULL upon error, so we need to
2600 * destroy the buffer registry, because it is always expected
2601 * that if the buffer registry can be found, its ust registry is
2602 * non-NULL.
2603 */
2604 buffer_reg_pid_destroy(reg_pid);
2605 goto error;
2606 }
2607
2608 buffer_reg_pid_add(reg_pid);
2609
2610 DBG3("UST app buffer registry per PID created successfully");
2611
2612end:
2613 if (regp) {
2614 *regp = reg_pid;
2615 }
2616error:
2617 rcu_read_unlock();
2618 return ret;
2619}
2620
2621/*
2622 * Setup buffer registry per UID for the given session and application. If none
2623 * is found, a new one is created, added to the global registry and
2624 * initialized. If regp is valid, it's set with the newly created object.
2625 *
2626 * Return 0 on success or else a negative value.
2627 */
2628static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
2629 struct ust_app_session *ua_sess,
2630 struct ust_app *app,
2631 struct buffer_reg_uid **regp)
2632{
2633 int ret = 0;
2634 struct buffer_reg_uid *reg_uid;
2635
2636 LTTNG_ASSERT(usess);
2637 LTTNG_ASSERT(app);
2638
2639 rcu_read_lock();
2640
2641 reg_uid = buffer_reg_uid_find(usess->id, app->abi.bits_per_long, app->uid);
2642 if (!reg_uid) {
2643 /*
2644 * This is the create channel path meaning that if there is NO
2645 * registry available, we have to create one for this session.
2646 */
2647 ret = buffer_reg_uid_create(usess->id,
2648 app->abi.bits_per_long,
2649 app->uid,
2650 LTTNG_DOMAIN_UST,
2651 &reg_uid,
2652 ua_sess->root_shm_path,
2653 ua_sess->shm_path);
2654 if (ret < 0) {
2655 goto error;
2656 }
2657 } else {
2658 goto end;
2659 }
2660
2661 /* Initialize registry. */
2662 reg_uid->registry->reg.ust = ust_registry_session_per_uid_create(app->abi,
2663 app->version.major,
2664 app->version.minor,
2665 reg_uid->root_shm_path,
2666 reg_uid->shm_path,
2667 usess->uid,
2668 usess->gid,
2669 ua_sess->tracing_id,
2670 app->uid);
2671 if (!reg_uid->registry->reg.ust) {
2672 /*
2673 * reg_uid->registry->reg.ust is NULL upon error, so we need to
2674 * destroy the buffer registry, because it is always expected
2675 * that if the buffer registry can be found, its ust registry is
2676 * non-NULL.
2677 */
2678 buffer_reg_uid_destroy(reg_uid, nullptr);
2679 goto error;
2680 }
2681
2682 /* Add node to teardown list of the session. */
2683 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
2684
2685 buffer_reg_uid_add(reg_uid);
2686
2687 DBG3("UST app buffer registry per UID created successfully");
2688end:
2689 if (regp) {
2690 *regp = reg_uid;
2691 }
2692error:
2693 rcu_read_unlock();
2694 return ret;
2695}
2696
2697/*
2698 * Create a session on the tracer side for the given app.
2699 *
2700 * On success, ua_sess_ptr is populated with the session pointer or else left
2701 * untouched. If the session was created, is_created is set to 1. On error,
2702 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
2703 * be NULL.
2704 *
2705 * Returns 0 on success or else a negative code which is either -ENOMEM or
2706 * -ENOTCONN which is the default code if the lttng_ust_ctl_create_session fails.
2707 */
2708static int find_or_create_ust_app_session(struct ltt_ust_session *usess,
2709 struct ust_app *app,
2710 struct ust_app_session **ua_sess_ptr,
2711 int *is_created)
2712{
2713 int ret, created = 0;
2714 struct ust_app_session *ua_sess;
2715
2716 LTTNG_ASSERT(usess);
2717 LTTNG_ASSERT(app);
2718 LTTNG_ASSERT(ua_sess_ptr);
2719
2720 health_code_update();
2721
2722 ua_sess = lookup_session_by_app(usess, app);
2723 if (ua_sess == nullptr) {
2724 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
2725 app->pid,
2726 usess->id);
2727 ua_sess = alloc_ust_app_session();
2728 if (ua_sess == nullptr) {
2729 /* Only malloc can failed so something is really wrong */
2730 ret = -ENOMEM;
2731 goto error;
2732 }
2733 shadow_copy_session(ua_sess, usess, app);
2734 created = 1;
2735 }
2736
2737 switch (usess->buffer_type) {
2738 case LTTNG_BUFFER_PER_PID:
2739 /* Init local registry. */
2740 ret = setup_buffer_reg_pid(ua_sess, app, nullptr);
2741 if (ret < 0) {
2742 delete_ust_app_session(-1, ua_sess, app);
2743 goto error;
2744 }
2745 break;
2746 case LTTNG_BUFFER_PER_UID:
2747 /* Look for a global registry. If none exists, create one. */
2748 ret = setup_buffer_reg_uid(usess, ua_sess, app, nullptr);
2749 if (ret < 0) {
2750 delete_ust_app_session(-1, ua_sess, app);
2751 goto error;
2752 }
2753 break;
2754 default:
2755 abort();
2756 ret = -EINVAL;
2757 goto error;
2758 }
2759
2760 health_code_update();
2761
2762 if (ua_sess->handle == -1) {
2763 pthread_mutex_lock(&app->sock_lock);
2764 ret = lttng_ust_ctl_create_session(app->sock);
2765 pthread_mutex_unlock(&app->sock_lock);
2766 if (ret < 0) {
2767 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2768 DBG("UST app creating session failed. Application is dead: pid = %d, sock = %d",
2769 app->pid,
2770 app->sock);
2771 ret = 0;
2772 } else if (ret == -EAGAIN) {
2773 DBG("UST app creating session failed. Communication time out: pid = %d, sock = %d",
2774 app->pid,
2775 app->sock);
2776 ret = 0;
2777 } else {
2778 ERR("UST app creating session failed with ret %d: pid = %d, sock =%d",
2779 ret,
2780 app->pid,
2781 app->sock);
2782 }
2783 delete_ust_app_session(-1, ua_sess, app);
2784 if (ret != -ENOMEM) {
2785 /*
2786 * Tracer is probably gone or got an internal error so let's
2787 * behave like it will soon unregister or not usable.
2788 */
2789 ret = -ENOTCONN;
2790 }
2791 goto error;
2792 }
2793
2794 ua_sess->handle = ret;
2795
2796 /* Add ust app session to app's HT */
2797 lttng_ht_node_init_u64(&ua_sess->node, ua_sess->tracing_id);
2798 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
2799 lttng_ht_node_init_ulong(&ua_sess->ust_objd_node, ua_sess->handle);
2800 lttng_ht_add_unique_ulong(app->ust_sessions_objd, &ua_sess->ust_objd_node);
2801
2802 DBG2("UST app session created successfully with handle %d", ret);
2803 }
2804
2805 *ua_sess_ptr = ua_sess;
2806 if (is_created) {
2807 *is_created = created;
2808 }
2809
2810 /* Everything went well. */
2811 ret = 0;
2812
2813error:
2814 health_code_update();
2815 return ret;
2816}
2817
2818/*
2819 * Match function for a hash table lookup of ust_app_ctx.
2820 *
2821 * It matches an ust app context based on the context type and, in the case
2822 * of perf counters, their name.
2823 */
2824static int ht_match_ust_app_ctx(struct cds_lfht_node *node, const void *_key)
2825{
2826 struct ust_app_ctx *ctx;
2827 const struct lttng_ust_context_attr *key;
2828
2829 LTTNG_ASSERT(node);
2830 LTTNG_ASSERT(_key);
2831
2832 ctx = caa_container_of(node, struct ust_app_ctx, node.node);
2833 key = (lttng_ust_context_attr *) _key;
2834
2835 /* Context type */
2836 if (ctx->ctx.ctx != key->ctx) {
2837 goto no_match;
2838 }
2839
2840 switch (key->ctx) {
2841 case LTTNG_UST_ABI_CONTEXT_PERF_THREAD_COUNTER:
2842 if (strncmp(key->u.perf_counter.name,
2843 ctx->ctx.u.perf_counter.name,
2844 sizeof(key->u.perf_counter.name))) {
2845 goto no_match;
2846 }
2847 break;
2848 case LTTNG_UST_ABI_CONTEXT_APP_CONTEXT:
2849 if (strcmp(key->u.app_ctx.provider_name, ctx->ctx.u.app_ctx.provider_name) ||
2850 strcmp(key->u.app_ctx.ctx_name, ctx->ctx.u.app_ctx.ctx_name)) {
2851 goto no_match;
2852 }
2853 break;
2854 default:
2855 break;
2856 }
2857
2858 /* Match. */
2859 return 1;
2860
2861no_match:
2862 return 0;
2863}
2864
2865/*
2866 * Lookup for an ust app context from an lttng_ust_context.
2867 *
2868 * Must be called while holding RCU read side lock.
2869 * Return an ust_app_ctx object or NULL on error.
2870 */
2871static struct ust_app_ctx *find_ust_app_context(struct lttng_ht *ht,
2872 struct lttng_ust_context_attr *uctx)
2873{
2874 struct lttng_ht_iter iter;
2875 struct lttng_ht_node_ulong *node;
2876 struct ust_app_ctx *app_ctx = nullptr;
2877
2878 LTTNG_ASSERT(uctx);
2879 LTTNG_ASSERT(ht);
2880 ASSERT_RCU_READ_LOCKED();
2881
2882 /* Lookup using the lttng_ust_context_type and a custom match fct. */
2883 cds_lfht_lookup(ht->ht,
2884 ht->hash_fct((void *) uctx->ctx, lttng_ht_seed),
2885 ht_match_ust_app_ctx,
2886 uctx,
2887 &iter.iter);
2888 node = lttng_ht_iter_get_node_ulong(&iter);
2889 if (!node) {
2890 goto end;
2891 }
2892
2893 app_ctx = lttng::utils::container_of(node, &ust_app_ctx::node);
2894
2895end:
2896 return app_ctx;
2897}
2898
2899/*
2900 * Create a context for the channel on the tracer.
2901 *
2902 * Called with UST app session lock held and a RCU read side lock.
2903 */
2904static int create_ust_app_channel_context(struct ust_app_channel *ua_chan,
2905 struct lttng_ust_context_attr *uctx,
2906 struct ust_app *app)
2907{
2908 int ret = 0;
2909 struct ust_app_ctx *ua_ctx;
2910
2911 ASSERT_RCU_READ_LOCKED();
2912
2913 DBG2("UST app adding context to channel %s", ua_chan->name);
2914
2915 ua_ctx = find_ust_app_context(ua_chan->ctx, uctx);
2916 if (ua_ctx) {
2917 ret = -EEXIST;
2918 goto error;
2919 }
2920
2921 ua_ctx = alloc_ust_app_ctx(uctx);
2922 if (ua_ctx == nullptr) {
2923 /* malloc failed */
2924 ret = -ENOMEM;
2925 goto error;
2926 }
2927
2928 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
2929 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
2930 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
2931
2932 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
2933 if (ret < 0) {
2934 goto error;
2935 }
2936
2937error:
2938 return ret;
2939}
2940
2941/*
2942 * Enable on the tracer side a ust app event for the session and channel.
2943 *
2944 * Called with UST app session lock held.
2945 */
2946static int enable_ust_app_event(struct ust_app_event *ua_event, struct ust_app *app)
2947{
2948 int ret;
2949
2950 ret = enable_ust_object(app, ua_event->obj);
2951 if (ret < 0) {
2952 goto error;
2953 }
2954
2955 ua_event->enabled = 1;
2956
2957error:
2958 return ret;
2959}
2960
2961/*
2962 * Disable on the tracer side a ust app event for the session and channel.
2963 */
2964static int disable_ust_app_event(struct ust_app_event *ua_event, struct ust_app *app)
2965{
2966 int ret;
2967
2968 ret = disable_ust_object(app, ua_event->obj);
2969 if (ret < 0) {
2970 goto error;
2971 }
2972
2973 ua_event->enabled = 0;
2974
2975error:
2976 return ret;
2977}
2978
2979/*
2980 * Lookup ust app channel for session and disable it on the tracer side.
2981 */
2982static int disable_ust_app_channel(struct ust_app_session *ua_sess,
2983 struct ust_app_channel *ua_chan,
2984 struct ust_app *app)
2985{
2986 int ret;
2987
2988 ret = disable_ust_channel(app, ua_sess, ua_chan);
2989 if (ret < 0) {
2990 goto error;
2991 }
2992
2993 ua_chan->enabled = 0;
2994
2995error:
2996 return ret;
2997}
2998
2999/*
3000 * Lookup ust app channel for session and enable it on the tracer side. This
3001 * MUST be called with a RCU read side lock acquired.
3002 */
3003static int enable_ust_app_channel(struct ust_app_session *ua_sess,
3004 struct ltt_ust_channel *uchan,
3005 struct ust_app *app)
3006{
3007 int ret = 0;
3008 struct lttng_ht_iter iter;
3009 struct lttng_ht_node_str *ua_chan_node;
3010 struct ust_app_channel *ua_chan;
3011
3012 ASSERT_RCU_READ_LOCKED();
3013
3014 lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &iter);
3015 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
3016 if (ua_chan_node == nullptr) {
3017 DBG2("Unable to find channel %s in ust session id %" PRIu64,
3018 uchan->name,
3019 ua_sess->tracing_id);
3020 goto error;
3021 }
3022
3023 ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
3024
3025 ret = enable_ust_channel(app, ua_sess, ua_chan);
3026 if (ret < 0) {
3027 goto error;
3028 }
3029
3030error:
3031 return ret;
3032}
3033
3034/*
3035 * Ask the consumer to create a channel and get it if successful.
3036 *
3037 * Called with UST app session lock held.
3038 *
3039 * Return 0 on success or else a negative value.
3040 */
3041static int do_consumer_create_channel(struct ltt_ust_session *usess,
3042 struct ust_app_session *ua_sess,
3043 struct ust_app_channel *ua_chan,
3044 int bitness,
3045 lsu::registry_session *registry)
3046{
3047 int ret;
3048 unsigned int nb_fd = 0;
3049 struct consumer_socket *socket;
3050
3051 LTTNG_ASSERT(usess);
3052 LTTNG_ASSERT(ua_sess);
3053 LTTNG_ASSERT(ua_chan);
3054 LTTNG_ASSERT(registry);
3055
3056 rcu_read_lock();
3057 health_code_update();
3058
3059 /* Get the right consumer socket for the application. */
3060 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
3061 if (!socket) {
3062 ret = -EINVAL;
3063 goto error;
3064 }
3065
3066 health_code_update();
3067
3068 /* Need one fd for the channel. */
3069 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
3070 if (ret < 0) {
3071 ERR("Exhausted number of available FD upon create channel");
3072 goto error;
3073 }
3074
3075 /*
3076 * Ask consumer to create channel. The consumer will return the number of
3077 * stream we have to expect.
3078 */
3079 ret = ust_consumer_ask_channel(
3080 ua_sess, ua_chan, usess->consumer, socket, registry, usess->current_trace_chunk);
3081 if (ret < 0) {
3082 goto error_ask;
3083 }
3084
3085 /*
3086 * Compute the number of fd needed before receiving them. It must be 2 per
3087 * stream (2 being the default value here).
3088 */
3089 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
3090
3091 /* Reserve the amount of file descriptor we need. */
3092 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
3093 if (ret < 0) {
3094 ERR("Exhausted number of available FD upon create channel");
3095 goto error_fd_get_stream;
3096 }
3097
3098 health_code_update();
3099
3100 /*
3101 * Now get the channel from the consumer. This call will populate the stream
3102 * list of that channel and set the ust objects.
3103 */
3104 if (usess->consumer->enabled) {
3105 ret = ust_consumer_get_channel(socket, ua_chan);
3106 if (ret < 0) {
3107 goto error_destroy;
3108 }
3109 }
3110
3111 rcu_read_unlock();
3112 return 0;
3113
3114error_destroy:
3115 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
3116error_fd_get_stream:
3117 /*
3118 * Initiate a destroy channel on the consumer since we had an error
3119 * handling it on our side. The return value is of no importance since we
3120 * already have a ret value set by the previous error that we need to
3121 * return.
3122 */
3123 (void) ust_consumer_destroy_channel(socket, ua_chan);
3124error_ask:
3125 lttng_fd_put(LTTNG_FD_APPS, 1);
3126error:
3127 health_code_update();
3128 rcu_read_unlock();
3129 return ret;
3130}
3131
3132/*
3133 * Duplicate the ust data object of the ust app stream and save it in the
3134 * buffer registry stream.
3135 *
3136 * Return 0 on success or else a negative value.
3137 */
3138static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
3139 struct ust_app_stream *stream)
3140{
3141 int ret;
3142
3143 LTTNG_ASSERT(reg_stream);
3144 LTTNG_ASSERT(stream);
3145
3146 /* Duplicating a stream requires 2 new fds. Reserve them. */
3147 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
3148 if (ret < 0) {
3149 ERR("Exhausted number of available FD upon duplicate stream");
3150 goto error;
3151 }
3152
3153 /* Duplicate object for stream once the original is in the registry. */
3154 ret = lttng_ust_ctl_duplicate_ust_object_data(&stream->obj, reg_stream->obj.ust);
3155 if (ret < 0) {
3156 ERR("Duplicate stream obj from %p to %p failed with ret %d",
3157 reg_stream->obj.ust,
3158 stream->obj,
3159 ret);
3160 lttng_fd_put(LTTNG_FD_APPS, 2);
3161 goto error;
3162 }
3163 stream->handle = stream->obj->handle;
3164
3165error:
3166 return ret;
3167}
3168
3169/*
3170 * Duplicate the ust data object of the ust app. channel and save it in the
3171 * buffer registry channel.
3172 *
3173 * Return 0 on success or else a negative value.
3174 */
3175static int duplicate_channel_object(struct buffer_reg_channel *buf_reg_chan,
3176 struct ust_app_channel *ua_chan)
3177{
3178 int ret;
3179
3180 LTTNG_ASSERT(buf_reg_chan);
3181 LTTNG_ASSERT(ua_chan);
3182
3183 /* Duplicating a channel requires 1 new fd. Reserve it. */
3184 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
3185 if (ret < 0) {
3186 ERR("Exhausted number of available FD upon duplicate channel");
3187 goto error_fd_get;
3188 }
3189
3190 /* Duplicate object for stream once the original is in the registry. */
3191 ret = lttng_ust_ctl_duplicate_ust_object_data(&ua_chan->obj, buf_reg_chan->obj.ust);
3192 if (ret < 0) {
3193 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
3194 buf_reg_chan->obj.ust,
3195 ua_chan->obj,
3196 ret);
3197 goto error;
3198 }
3199 ua_chan->handle = ua_chan->obj->handle;
3200
3201 return 0;
3202
3203error:
3204 lttng_fd_put(LTTNG_FD_APPS, 1);
3205error_fd_get:
3206 return ret;
3207}
3208
3209/*
3210 * For a given channel buffer registry, setup all streams of the given ust
3211 * application channel.
3212 *
3213 * Return 0 on success or else a negative value.
3214 */
3215static int setup_buffer_reg_streams(struct buffer_reg_channel *buf_reg_chan,
3216 struct ust_app_channel *ua_chan,
3217 struct ust_app *app)
3218{
3219 int ret = 0;
3220 struct ust_app_stream *stream, *stmp;
3221
3222 LTTNG_ASSERT(buf_reg_chan);
3223 LTTNG_ASSERT(ua_chan);
3224
3225 DBG2("UST app setup buffer registry stream");
3226
3227 /* Send all streams to application. */
3228 cds_list_for_each_entry_safe (stream, stmp, &ua_chan->streams.head, list) {
3229 struct buffer_reg_stream *reg_stream;
3230
3231 ret = buffer_reg_stream_create(&reg_stream);
3232 if (ret < 0) {
3233 goto error;
3234 }
3235
3236 /*
3237 * Keep original pointer and nullify it in the stream so the delete
3238 * stream call does not release the object.
3239 */
3240 reg_stream->obj.ust = stream->obj;
3241 stream->obj = nullptr;
3242 buffer_reg_stream_add(reg_stream, buf_reg_chan);
3243
3244 /* We don't need the streams anymore. */
3245 cds_list_del(&stream->list);
3246 delete_ust_app_stream(-1, stream, app);
3247 }
3248
3249error:
3250 return ret;
3251}
3252
3253/*
3254 * Create a buffer registry channel for the given session registry and
3255 * application channel object. If regp pointer is valid, it's set with the
3256 * created object. Important, the created object is NOT added to the session
3257 * registry hash table.
3258 *
3259 * Return 0 on success else a negative value.
3260 */
3261static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
3262 struct ust_app_channel *ua_chan,
3263 struct buffer_reg_channel **regp)
3264{
3265 int ret;
3266 struct buffer_reg_channel *buf_reg_chan = nullptr;
3267
3268 LTTNG_ASSERT(reg_sess);
3269 LTTNG_ASSERT(ua_chan);
3270
3271 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
3272
3273 /* Create buffer registry channel. */
3274 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &buf_reg_chan);
3275 if (ret < 0) {
3276 goto error_create;
3277 }
3278 LTTNG_ASSERT(buf_reg_chan);
3279 buf_reg_chan->consumer_key = ua_chan->key;
3280 buf_reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
3281 buf_reg_chan->num_subbuf = ua_chan->attr.num_subbuf;
3282
3283 /* Create and add a channel registry to session. */
3284 try {
3285 reg_sess->reg.ust->add_channel(ua_chan->tracing_channel_id);
3286 } catch (const std::exception& ex) {
3287 ERR("Failed to add a channel registry to userspace registry session: %s",
3288 ex.what());
3289 ret = -1;
3290 goto error;
3291 }
3292
3293 buffer_reg_channel_add(reg_sess, buf_reg_chan);
3294
3295 if (regp) {
3296 *regp = buf_reg_chan;
3297 }
3298
3299 return 0;
3300
3301error:
3302 /* Safe because the registry channel object was not added to any HT. */
3303 buffer_reg_channel_destroy(buf_reg_chan, LTTNG_DOMAIN_UST);
3304error_create:
3305 return ret;
3306}
3307
3308/*
3309 * Setup buffer registry channel for the given session registry and application
3310 * channel object. If regp pointer is valid, it's set with the created object.
3311 *
3312 * Return 0 on success else a negative value.
3313 */
3314static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
3315 struct ust_app_channel *ua_chan,
3316 struct buffer_reg_channel *buf_reg_chan,
3317 struct ust_app *app)
3318{
3319 int ret;
3320
3321 LTTNG_ASSERT(reg_sess);
3322 LTTNG_ASSERT(buf_reg_chan);
3323 LTTNG_ASSERT(ua_chan);
3324 LTTNG_ASSERT(ua_chan->obj);
3325
3326 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
3327
3328 /* Setup all streams for the registry. */
3329 ret = setup_buffer_reg_streams(buf_reg_chan, ua_chan, app);
3330 if (ret < 0) {
3331 goto error;
3332 }
3333
3334 buf_reg_chan->obj.ust = ua_chan->obj;
3335 ua_chan->obj = nullptr;
3336
3337 return 0;
3338
3339error:
3340 buffer_reg_channel_remove(reg_sess, buf_reg_chan);
3341 buffer_reg_channel_destroy(buf_reg_chan, LTTNG_DOMAIN_UST);
3342 return ret;
3343}
3344
3345/*
3346 * Send buffer registry channel to the application.
3347 *
3348 * Return 0 on success else a negative value.
3349 */
3350static int send_channel_uid_to_ust(struct buffer_reg_channel *buf_reg_chan,
3351 struct ust_app *app,
3352 struct ust_app_session *ua_sess,
3353 struct ust_app_channel *ua_chan)
3354{
3355 int ret;
3356 struct buffer_reg_stream *reg_stream;
3357
3358 LTTNG_ASSERT(buf_reg_chan);
3359 LTTNG_ASSERT(app);
3360 LTTNG_ASSERT(ua_sess);
3361 LTTNG_ASSERT(ua_chan);
3362
3363 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
3364
3365 ret = duplicate_channel_object(buf_reg_chan, ua_chan);
3366 if (ret < 0) {
3367 goto error;
3368 }
3369
3370 /* Send channel to the application. */
3371 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
3372 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
3373 ret = -ENOTCONN; /* Caused by app exiting. */
3374 goto error;
3375 } else if (ret == -EAGAIN) {
3376 /* Caused by timeout. */
3377 WARN("Communication with application %d timed out on send_channel for channel \"%s\" of session \"%" PRIu64
3378 "\".",
3379 app->pid,
3380 ua_chan->name,
3381 ua_sess->tracing_id);
3382 /* Treat this the same way as an application that is exiting. */
3383 ret = -ENOTCONN;
3384 goto error;
3385 } else if (ret < 0) {
3386 goto error;
3387 }
3388
3389 health_code_update();
3390
3391 /* Send all streams to application. */
3392 pthread_mutex_lock(&buf_reg_chan->stream_list_lock);
3393 cds_list_for_each_entry (reg_stream, &buf_reg_chan->streams, lnode) {
3394 struct ust_app_stream stream = {};
3395
3396 ret = duplicate_stream_object(reg_stream, &stream);
3397 if (ret < 0) {
3398 goto error_stream_unlock;
3399 }
3400
3401 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
3402 if (ret < 0) {
3403 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
3404 ret = -ENOTCONN; /* Caused by app exiting. */
3405 } else if (ret == -EAGAIN) {
3406 /*
3407 * Caused by timeout.
3408 * Treat this the same way as an application
3409 * that is exiting.
3410 */
3411 WARN("Communication with application %d timed out on send_stream for stream of channel \"%s\" of session \"%" PRIu64
3412 "\".",
3413 app->pid,
3414 ua_chan->name,
3415 ua_sess->tracing_id);
3416 ret = -ENOTCONN;
3417 }
3418 (void) release_ust_app_stream(-1, &stream, app);
3419 goto error_stream_unlock;
3420 }
3421
3422 /*
3423 * The return value is not important here. This function will output an
3424 * error if needed.
3425 */
3426 (void) release_ust_app_stream(-1, &stream, app);
3427 }
3428
3429error_stream_unlock:
3430 pthread_mutex_unlock(&buf_reg_chan->stream_list_lock);
3431error:
3432 return ret;
3433}
3434
3435/*
3436 * Create and send to the application the created buffers with per UID buffers.
3437 *
3438 * This MUST be called with a RCU read side lock acquired.
3439 * The session list lock and the session's lock must be acquired.
3440 *
3441 * Return 0 on success else a negative value.
3442 */
3443static int create_channel_per_uid(struct ust_app *app,
3444 struct ltt_ust_session *usess,
3445 struct ust_app_session *ua_sess,
3446 struct ust_app_channel *ua_chan)
3447{
3448 int ret;
3449 struct buffer_reg_uid *reg_uid;
3450 struct buffer_reg_channel *buf_reg_chan;
3451 struct ltt_session *session = nullptr;
3452 enum lttng_error_code notification_ret;
3453
3454 LTTNG_ASSERT(app);
3455 LTTNG_ASSERT(usess);
3456 LTTNG_ASSERT(ua_sess);
3457 LTTNG_ASSERT(ua_chan);
3458 ASSERT_RCU_READ_LOCKED();
3459
3460 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
3461
3462 reg_uid = buffer_reg_uid_find(usess->id, app->abi.bits_per_long, app->uid);
3463 /*
3464 * The session creation handles the creation of this global registry
3465 * object. If none can be find, there is a code flow problem or a
3466 * teardown race.
3467 */
3468 LTTNG_ASSERT(reg_uid);
3469
3470 buf_reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id, reg_uid);
3471 if (buf_reg_chan) {
3472 goto send_channel;
3473 }
3474
3475 /* Create the buffer registry channel object. */
3476 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &buf_reg_chan);
3477 if (ret < 0) {
3478 ERR("Error creating the UST channel \"%s\" registry instance", ua_chan->name);
3479 goto error;
3480 }
3481
3482 session = session_find_by_id(ua_sess->tracing_id);
3483 LTTNG_ASSERT(session);
3484 ASSERT_LOCKED(session->lock);
3485 ASSERT_SESSION_LIST_LOCKED();
3486
3487 /*
3488 * Create the buffers on the consumer side. This call populates the
3489 * ust app channel object with all streams and data object.
3490 */
3491 ret = do_consumer_create_channel(
3492 usess, ua_sess, ua_chan, app->abi.bits_per_long, reg_uid->registry->reg.ust);
3493 if (ret < 0) {
3494 ERR("Error creating UST channel \"%s\" on the consumer daemon", ua_chan->name);
3495
3496 /*
3497 * Let's remove the previously created buffer registry channel so
3498 * it's not visible anymore in the session registry.
3499 */
3500 auto locked_registry = reg_uid->registry->reg.ust->lock();
3501 try {
3502 locked_registry->remove_channel(ua_chan->tracing_channel_id, false);
3503 } catch (const std::exception& ex) {
3504 DBG("Could not find channel for removal: %s", ex.what());
3505 }
3506 buffer_reg_channel_remove(reg_uid->registry, buf_reg_chan);
3507 buffer_reg_channel_destroy(buf_reg_chan, LTTNG_DOMAIN_UST);
3508 goto error;
3509 }
3510
3511 /*
3512 * Setup the streams and add it to the session registry.
3513 */
3514 ret = setup_buffer_reg_channel(reg_uid->registry, ua_chan, buf_reg_chan, app);
3515 if (ret < 0) {
3516 ERR("Error setting up UST channel \"%s\"", ua_chan->name);
3517 goto error;
3518 }
3519
3520 {
3521 auto locked_registry = reg_uid->registry->reg.ust->lock();
3522 auto& ust_reg_chan = locked_registry->channel(ua_chan->tracing_channel_id);
3523
3524 ust_reg_chan._consumer_key = ua_chan->key;
3525 }
3526
3527 /* Notify the notification subsystem of the channel's creation. */
3528 notification_ret = notification_thread_command_add_channel(
3529 the_notification_thread_handle,
3530 session->id,
3531 ua_chan->name,
3532 ua_chan->key,
3533 LTTNG_DOMAIN_UST,
3534 ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
3535 if (notification_ret != LTTNG_OK) {
3536 ret = -(int) notification_ret;
3537 ERR("Failed to add channel to notification thread");
3538 goto error;
3539 }
3540
3541send_channel:
3542 /* Send buffers to the application. */
3543 ret = send_channel_uid_to_ust(buf_reg_chan, app, ua_sess, ua_chan);
3544 if (ret < 0) {
3545 if (ret != -ENOTCONN) {
3546 ERR("Error sending channel to application");
3547 }
3548 goto error;
3549 }
3550
3551error:
3552 if (session) {
3553 session_put(session);
3554 }
3555 return ret;
3556}
3557
3558/*
3559 * Create and send to the application the created buffers with per PID buffers.
3560 *
3561 * Called with UST app session lock held.
3562 * The session list lock and the session's lock must be acquired.
3563 *
3564 * Return 0 on success else a negative value.
3565 */
3566static int create_channel_per_pid(struct ust_app *app,
3567 struct ltt_ust_session *usess,
3568 struct ust_app_session *ua_sess,
3569 struct ust_app_channel *ua_chan)
3570{
3571 int ret;
3572 lsu::registry_session *registry;
3573 enum lttng_error_code cmd_ret;
3574 struct ltt_session *session = nullptr;
3575 uint64_t chan_reg_key;
3576
3577 LTTNG_ASSERT(app);
3578 LTTNG_ASSERT(usess);
3579 LTTNG_ASSERT(ua_sess);
3580 LTTNG_ASSERT(ua_chan);
3581
3582 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
3583
3584 rcu_read_lock();
3585
3586 registry = get_session_registry(ua_sess);
3587 /* The UST app session lock is held, registry shall not be null. */
3588 LTTNG_ASSERT(registry);
3589
3590 /* Create and add a new channel registry to session. */
3591 try {
3592 registry->add_channel(ua_chan->key);
3593 } catch (const std::exception& ex) {
3594 ERR("Error creating the UST channel \"%s\" registry instance: %s",
3595 ua_chan->name,
3596 ex.what());
3597 ret = -1;
3598 goto error;
3599 }
3600
3601 session = session_find_by_id(ua_sess->tracing_id);
3602 LTTNG_ASSERT(session);
3603 ASSERT_LOCKED(session->lock);
3604 ASSERT_SESSION_LIST_LOCKED();
3605
3606 /* Create and get channel on the consumer side. */
3607 ret = do_consumer_create_channel(usess, ua_sess, ua_chan, app->abi.bits_per_long, registry);
3608 if (ret < 0) {
3609 ERR("Error creating UST channel \"%s\" on the consumer daemon", ua_chan->name);
3610 goto error_remove_from_registry;
3611 }
3612
3613 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
3614 if (ret < 0) {
3615 if (ret != -ENOTCONN) {
3616 ERR("Error sending channel to application");
3617 }
3618 goto error_remove_from_registry;
3619 }
3620
3621 chan_reg_key = ua_chan->key;
3622 {
3623 auto locked_registry = registry->lock();
3624
3625 auto& ust_reg_chan = locked_registry->channel(chan_reg_key);
3626 ust_reg_chan._consumer_key = ua_chan->key;
3627 }
3628
3629 cmd_ret = notification_thread_command_add_channel(the_notification_thread_handle,
3630 session->id,
3631 ua_chan->name,
3632 ua_chan->key,
3633 LTTNG_DOMAIN_UST,
3634 ua_chan->attr.subbuf_size *
3635 ua_chan->attr.num_subbuf);
3636 if (cmd_ret != LTTNG_OK) {
3637 ret = -(int) cmd_ret;
3638 ERR("Failed to add channel to notification thread");
3639 goto error_remove_from_registry;
3640 }
3641
3642error_remove_from_registry:
3643 if (ret) {
3644 try {
3645 auto locked_registry = registry->lock();
3646 locked_registry->remove_channel(ua_chan->key, false);
3647 } catch (const std::exception& ex) {
3648 DBG("Could not find channel for removal: %s", ex.what());
3649 }
3650 }
3651error:
3652 rcu_read_unlock();
3653 if (session) {
3654 session_put(session);
3655 }
3656 return ret;
3657}
3658
3659/*
3660 * From an already allocated ust app channel, create the channel buffers if
3661 * needed and send them to the application. This MUST be called with a RCU read
3662 * side lock acquired.
3663 *
3664 * Called with UST app session lock held.
3665 *
3666 * Return 0 on success or else a negative value. Returns -ENOTCONN if
3667 * the application exited concurrently.
3668 */
3669static int ust_app_channel_send(struct ust_app *app,
3670 struct ltt_ust_session *usess,
3671 struct ust_app_session *ua_sess,
3672 struct ust_app_channel *ua_chan)
3673{
3674 int ret;
3675
3676 LTTNG_ASSERT(app);
3677 LTTNG_ASSERT(usess);
3678 LTTNG_ASSERT(usess->active);
3679 LTTNG_ASSERT(ua_sess);
3680 LTTNG_ASSERT(ua_chan);
3681 ASSERT_RCU_READ_LOCKED();
3682
3683 /* Handle buffer type before sending the channel to the application. */
3684 switch (usess->buffer_type) {
3685 case LTTNG_BUFFER_PER_UID:
3686 {
3687 ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
3688 if (ret < 0) {
3689 goto error;
3690 }
3691 break;
3692 }
3693 case LTTNG_BUFFER_PER_PID:
3694 {
3695 ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
3696 if (ret < 0) {
3697 goto error;
3698 }
3699 break;
3700 }
3701 default:
3702 abort();
3703 ret = -EINVAL;
3704 goto error;
3705 }
3706
3707 /* Initialize ust objd object using the received handle and add it. */
3708 lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
3709 lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
3710
3711 /* If channel is not enabled, disable it on the tracer */
3712 if (!ua_chan->enabled) {
3713 ret = disable_ust_channel(app, ua_sess, ua_chan);
3714 if (ret < 0) {
3715 goto error;
3716 }
3717 }
3718
3719error:
3720 return ret;
3721}
3722
3723/*
3724 * Create UST app channel and return it through ua_chanp if not NULL.
3725 *
3726 * Called with UST app session lock and RCU read-side lock held.
3727 *
3728 * Return 0 on success or else a negative value.
3729 */
3730static int ust_app_channel_allocate(struct ust_app_session *ua_sess,
3731 struct ltt_ust_channel *uchan,
3732 enum lttng_ust_abi_chan_type type,
3733 struct ltt_ust_session *usess __attribute__((unused)),
3734 struct ust_app_channel **ua_chanp)
3735{
3736 int ret = 0;
3737 struct lttng_ht_iter iter;
3738 struct lttng_ht_node_str *ua_chan_node;
3739 struct ust_app_channel *ua_chan;
3740
3741 ASSERT_RCU_READ_LOCKED();
3742
3743 /* Lookup channel in the ust app session */
3744 lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &iter);
3745 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
3746 if (ua_chan_node != nullptr) {
3747 ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
3748 goto end;
3749 }
3750
3751 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
3752 if (ua_chan == nullptr) {
3753 /* Only malloc can fail here */
3754 ret = -ENOMEM;
3755 goto error;
3756 }
3757 shadow_copy_channel(ua_chan, uchan);
3758
3759 /* Set channel type. */
3760 ua_chan->attr.type = type;
3761
3762 /* Only add the channel if successful on the tracer side. */
3763 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
3764end:
3765 if (ua_chanp) {
3766 *ua_chanp = ua_chan;
3767 }
3768
3769 /* Everything went well. */
3770 return 0;
3771
3772error:
3773 return ret;
3774}
3775
3776/*
3777 * Create UST app event and create it on the tracer side.
3778 *
3779 * Must be called with the RCU read side lock held.
3780 * Called with ust app session mutex held.
3781 */
3782static int create_ust_app_event(struct ust_app_channel *ua_chan,
3783 struct ltt_ust_event *uevent,
3784 struct ust_app *app)
3785{
3786 int ret = 0;
3787 struct ust_app_event *ua_event;
3788
3789 ASSERT_RCU_READ_LOCKED();
3790
3791 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
3792 if (ua_event == nullptr) {
3793 /* Only failure mode of alloc_ust_app_event(). */
3794 ret = -ENOMEM;
3795 goto end;
3796 }
3797 shadow_copy_event(ua_event, uevent);
3798
3799 /* Create it on the tracer side */
3800 ret = create_ust_event(app, ua_chan, ua_event);
3801 if (ret < 0) {
3802 /*
3803 * Not found previously means that it does not exist on the
3804 * tracer. If the application reports that the event existed,
3805 * it means there is a bug in the sessiond or lttng-ust
3806 * (or corruption, etc.)
3807 */
3808 if (ret == -LTTNG_UST_ERR_EXIST) {
3809 ERR("Tracer for application reported that an event being created already existed: "
3810 "event_name = \"%s\", pid = %d, ppid = %d, uid = %d, gid = %d",
3811 uevent->attr.name,
3812 app->pid,
3813 app->ppid,
3814 app->uid,
3815 app->gid);
3816 }
3817 goto error;
3818 }
3819
3820 add_unique_ust_app_event(ua_chan, ua_event);
3821
3822 DBG2("UST app create event completed: app = '%s' pid = %d", app->name, app->pid);
3823
3824end:
3825 return ret;
3826
3827error:
3828 /* Valid. Calling here is already in a read side lock */
3829 delete_ust_app_event(-1, ua_event, app);
3830 return ret;
3831}
3832
3833/*
3834 * Create UST app event notifier rule and create it on the tracer side.
3835 *
3836 * Must be called with the RCU read side lock held.
3837 * Called with ust app session mutex held.
3838 */
3839static int create_ust_app_event_notifier_rule(struct lttng_trigger *trigger, struct ust_app *app)
3840{
3841 int ret = 0;
3842 struct ust_app_event_notifier_rule *ua_event_notifier_rule;
3843
3844 ASSERT_RCU_READ_LOCKED();
3845
3846 ua_event_notifier_rule = alloc_ust_app_event_notifier_rule(trigger);
3847 if (ua_event_notifier_rule == nullptr) {
3848 ret = -ENOMEM;
3849 goto end;
3850 }
3851
3852 /* Create it on the tracer side. */
3853 ret = create_ust_event_notifier(app, ua_event_notifier_rule);
3854 if (ret < 0) {
3855 /*
3856 * Not found previously means that it does not exist on the
3857 * tracer. If the application reports that the event existed,
3858 * it means there is a bug in the sessiond or lttng-ust
3859 * (or corruption, etc.)
3860 */
3861 if (ret == -LTTNG_UST_ERR_EXIST) {
3862 ERR("Tracer for application reported that an event notifier being created already exists: "
3863 "token = \"%" PRIu64 "\", pid = %d, ppid = %d, uid = %d, gid = %d",
3864 lttng_trigger_get_tracer_token(trigger),
3865 app->pid,
3866 app->ppid,
3867 app->uid,
3868 app->gid);
3869 }
3870 goto error;
3871 }
3872
3873 lttng_ht_add_unique_u64(app->token_to_event_notifier_rule_ht,
3874 &ua_event_notifier_rule->node);
3875
3876 DBG2("UST app create token event rule completed: app = '%s', pid = %d, token = %" PRIu64,
3877 app->name,
3878 app->pid,
3879 lttng_trigger_get_tracer_token(trigger));
3880
3881 goto end;
3882
3883error:
3884 /* The RCU read side lock is already being held by the caller. */
3885 delete_ust_app_event_notifier_rule(-1, ua_event_notifier_rule, app);
3886end:
3887 return ret;
3888}
3889
3890/*
3891 * Create UST metadata and open it on the tracer side.
3892 *
3893 * Called with UST app session lock held and RCU read side lock.
3894 */
3895static int create_ust_app_metadata(struct ust_app_session *ua_sess,
3896 struct ust_app *app,
3897 struct consumer_output *consumer)
3898{
3899 int ret = 0;
3900 struct ust_app_channel *metadata;
3901 struct consumer_socket *socket;
3902 struct ltt_session *session = nullptr;
3903
3904 LTTNG_ASSERT(ua_sess);
3905 LTTNG_ASSERT(app);
3906 LTTNG_ASSERT(consumer);
3907 ASSERT_RCU_READ_LOCKED();
3908
3909 auto locked_registry = get_locked_session_registry(ua_sess);
3910 /* The UST app session is held registry shall not be null. */
3911 LTTNG_ASSERT(locked_registry);
3912
3913 /* Metadata already exists for this registry or it was closed previously */
3914 if (locked_registry->_metadata_key || locked_registry->_metadata_closed) {
3915 ret = 0;
3916 goto error;
3917 }
3918
3919 /* Allocate UST metadata */
3920 metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, nullptr);
3921 if (!metadata) {
3922 /* malloc() failed */
3923 ret = -ENOMEM;
3924 goto error;
3925 }
3926
3927 memcpy(&metadata->attr, &ua_sess->metadata_attr, sizeof(metadata->attr));
3928
3929 /* Need one fd for the channel. */
3930 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
3931 if (ret < 0) {
3932 ERR("Exhausted number of available FD upon create metadata");
3933 goto error;
3934 }
3935
3936 /* Get the right consumer socket for the application. */
3937 socket = consumer_find_socket_by_bitness(app->abi.bits_per_long, consumer);
3938 if (!socket) {
3939 ret = -EINVAL;
3940 goto error_consumer;
3941 }
3942
3943 /*
3944 * Keep metadata key so we can identify it on the consumer side. Assign it
3945 * to the registry *before* we ask the consumer so we avoid the race of the
3946 * consumer requesting the metadata and the ask_channel call on our side
3947 * did not returned yet.
3948 */
3949 locked_registry->_metadata_key = metadata->key;
3950
3951 session = session_find_by_id(ua_sess->tracing_id);
3952 LTTNG_ASSERT(session);
3953 ASSERT_LOCKED(session->lock);
3954 ASSERT_SESSION_LIST_LOCKED();
3955
3956 /*
3957 * Ask the metadata channel creation to the consumer. The metadata object
3958 * will be created by the consumer and kept their. However, the stream is
3959 * never added or monitored until we do a first push metadata to the
3960 * consumer.
3961 */
3962 ret = ust_consumer_ask_channel(ua_sess,
3963 metadata,
3964 consumer,
3965 socket,
3966 locked_registry.get(),
3967 session->current_trace_chunk);
3968 if (ret < 0) {
3969 /* Nullify the metadata key so we don't try to close it later on. */
3970 locked_registry->_metadata_key = 0;
3971 goto error_consumer;
3972 }
3973
3974 /*
3975 * The setup command will make the metadata stream be sent to the relayd,
3976 * if applicable, and the thread managing the metadatas. This is important
3977 * because after this point, if an error occurs, the only way the stream
3978 * can be deleted is to be monitored in the consumer.
3979 */
3980 ret = consumer_setup_metadata(socket, metadata->key);
3981 if (ret < 0) {
3982 /* Nullify the metadata key so we don't try to close it later on. */
3983 locked_registry->_metadata_key = 0;
3984 goto error_consumer;
3985 }
3986
3987 DBG2("UST metadata with key %" PRIu64 " created for app pid %d", metadata->key, app->pid);
3988
3989error_consumer:
3990 lttng_fd_put(LTTNG_FD_APPS, 1);
3991 delete_ust_app_channel(-1, metadata, app, locked_registry);
3992error:
3993 if (session) {
3994 session_put(session);
3995 }
3996 return ret;
3997}
3998
3999/*
4000 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
4001 * acquired before calling this function.
4002 */
4003struct ust_app *ust_app_find_by_pid(pid_t pid)
4004{
4005 struct ust_app *app = nullptr;
4006 struct lttng_ht_node_ulong *node;
4007 struct lttng_ht_iter iter;
4008
4009 lttng_ht_lookup(ust_app_ht, (void *) ((unsigned long) pid), &iter);
4010 node = lttng_ht_iter_get_node_ulong(&iter);
4011 if (node == nullptr) {
4012 DBG2("UST app no found with pid %d", pid);
4013 goto error;
4014 }
4015
4016 DBG2("Found UST app by pid %d", pid);
4017
4018 app = lttng::utils::container_of(node, &ust_app::pid_n);
4019
4020error:
4021 return app;
4022}
4023
4024/*
4025 * Allocate and init an UST app object using the registration information and
4026 * the command socket. This is called when the command socket connects to the
4027 * session daemon.
4028 *
4029 * The object is returned on success or else NULL.
4030 */
4031struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
4032{
4033 int ret;
4034 struct ust_app *lta = nullptr;
4035 struct lttng_pipe *event_notifier_event_source_pipe = nullptr;
4036
4037 LTTNG_ASSERT(msg);
4038 LTTNG_ASSERT(sock >= 0);
4039
4040 DBG3("UST app creating application for socket %d", sock);
4041
4042 if ((msg->bits_per_long == 64 && (uatomic_read(&the_ust_consumerd64_fd) == -EINVAL)) ||
4043 (msg->bits_per_long == 32 && (uatomic_read(&the_ust_consumerd32_fd) == -EINVAL))) {
4044 ERR("Registration failed: application \"%s\" (pid: %d) has "
4045 "%d-bit long, but no consumerd for this size is available.\n",
4046 msg->name,
4047 msg->pid,
4048 msg->bits_per_long);
4049 goto error;
4050 }
4051
4052 /*
4053 * Reserve the two file descriptors of the event source pipe. The write
4054 * end will be closed once it is passed to the application, at which
4055 * point a single 'put' will be performed.
4056 */
4057 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
4058 if (ret) {
4059 ERR("Failed to reserve two file descriptors for the event source pipe while creating a new application instance: app = '%s', pid = %d",
4060 msg->name,
4061 (int) msg->pid);
4062 goto error;
4063 }
4064
4065 event_notifier_event_source_pipe = lttng_pipe_open(FD_CLOEXEC);
4066 if (!event_notifier_event_source_pipe) {
4067 PERROR("Failed to open application event source pipe: '%s' (pid = %d)",
4068 msg->name,
4069 msg->pid);
4070 goto error;
4071 }
4072
4073 lta = zmalloc<ust_app>();
4074 if (lta == nullptr) {
4075 PERROR("malloc");
4076 goto error_free_pipe;
4077 }
4078
4079 lta->event_notifier_group.event_pipe = event_notifier_event_source_pipe;
4080
4081 lta->ppid = msg->ppid;
4082 lta->uid = msg->uid;
4083 lta->gid = msg->gid;
4084
4085 lta->abi = {
4086 .bits_per_long = msg->bits_per_long,
4087 .long_alignment = msg->long_alignment,
4088 .uint8_t_alignment = msg->uint8_t_alignment,
4089 .uint16_t_alignment = msg->uint16_t_alignment,
4090 .uint32_t_alignment = msg->uint32_t_alignment,
4091 .uint64_t_alignment = msg->uint64_t_alignment,
4092 .byte_order = msg->byte_order == LITTLE_ENDIAN ?
4093 lttng::sessiond::trace::byte_order::LITTLE_ENDIAN_ :
4094 lttng::sessiond::trace::byte_order::BIG_ENDIAN_,
4095 };
4096
4097 lta->v_major = msg->major;
4098 lta->v_minor = msg->minor;
4099 lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
4100 lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
4101 lta->ust_sessions_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
4102 lta->notify_sock = -1;
4103 lta->token_to_event_notifier_rule_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
4104
4105 /* Copy name and make sure it's NULL terminated. */
4106 strncpy(lta->name, msg->name, sizeof(lta->name));
4107 lta->name[UST_APP_PROCNAME_LEN] = '\0';
4108
4109 /*
4110 * Before this can be called, when receiving the registration information,
4111 * the application compatibility is checked. So, at this point, the
4112 * application can work with this session daemon.
4113 */
4114 lta->compatible = 1;
4115
4116 lta->pid = msg->pid;
4117 lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
4118 lta->sock = sock;
4119 pthread_mutex_init(&lta->sock_lock, nullptr);
4120 lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
4121
4122 CDS_INIT_LIST_HEAD(&lta->teardown_head);
4123 return lta;
4124
4125error_free_pipe:
4126 lttng_pipe_destroy(event_notifier_event_source_pipe);
4127 lttng_fd_put(LTTNG_FD_APPS, 2);
4128error:
4129 return nullptr;
4130}
4131
4132/*
4133 * For a given application object, add it to every hash table.
4134 */
4135void ust_app_add(struct ust_app *app)
4136{
4137 LTTNG_ASSERT(app);
4138 LTTNG_ASSERT(app->notify_sock >= 0);
4139
4140 app->registration_time = time(nullptr);
4141
4142 rcu_read_lock();
4143
4144 /*
4145 * On a re-registration, we want to kick out the previous registration of
4146 * that pid
4147 */
4148 lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
4149
4150 /*
4151 * The socket _should_ be unique until _we_ call close. So, a add_unique
4152 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
4153 * already in the table.
4154 */
4155 lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
4156
4157 /* Add application to the notify socket hash table. */
4158 lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
4159 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
4160
4161 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock =%d name:%s "
4162 "notify_sock =%d (version %d.%d)",
4163 app->pid,
4164 app->ppid,
4165 app->uid,
4166 app->gid,
4167 app->sock,
4168 app->name,
4169 app->notify_sock,
4170 app->v_major,
4171 app->v_minor);
4172
4173 rcu_read_unlock();
4174}
4175
4176/*
4177 * Set the application version into the object.
4178 *
4179 * Return 0 on success else a negative value either an errno code or a
4180 * LTTng-UST error code.
4181 */
4182int ust_app_version(struct ust_app *app)
4183{
4184 int ret;
4185
4186 LTTNG_ASSERT(app);
4187
4188 pthread_mutex_lock(&app->sock_lock);
4189 ret = lttng_ust_ctl_tracer_version(app->sock, &app->version);
4190 pthread_mutex_unlock(&app->sock_lock);
4191 if (ret < 0) {
4192 if (ret == -LTTNG_UST_ERR_EXITING || ret == -EPIPE) {
4193 DBG3("UST app version failed. Application is dead: pid = %d, sock = %d",
4194 app->pid,
4195 app->sock);
4196 } else if (ret == -EAGAIN) {
4197 WARN("UST app version failed. Communication time out: pid = %d, sock = %d",
4198 app->pid,
4199 app->sock);
4200 } else {
4201 ERR("UST app version failed with ret %d: pid = %d, sock = %d",
4202 ret,
4203 app->pid,
4204 app->sock);
4205 }
4206 }
4207
4208 return ret;
4209}
4210
4211bool ust_app_supports_notifiers(const struct ust_app *app)
4212{
4213 return app->v_major >= 9;
4214}
4215
4216bool ust_app_supports_counters(const struct ust_app *app)
4217{
4218 return app->v_major >= 9;
4219}
4220
4221/*
4222 * Setup the base event notifier group.
4223 *
4224 * Return 0 on success else a negative value either an errno code or a
4225 * LTTng-UST error code.
4226 */
4227int ust_app_setup_event_notifier_group(struct ust_app *app)
4228{
4229 int ret;
4230 int event_pipe_write_fd;
4231 struct lttng_ust_abi_object_data *event_notifier_group = nullptr;
4232 enum lttng_error_code lttng_ret;
4233 enum event_notifier_error_accounting_status event_notifier_error_accounting_status;
4234
4235 LTTNG_ASSERT(app);
4236
4237 if (!ust_app_supports_notifiers(app)) {
4238 ret = -ENOSYS;
4239 goto error;
4240 }
4241
4242 /* Get the write side of the pipe. */
4243 event_pipe_write_fd = lttng_pipe_get_writefd(app->event_notifier_group.event_pipe);
4244
4245 pthread_mutex_lock(&app->sock_lock);
4246 ret = lttng_ust_ctl_create_event_notifier_group(
4247 app->sock, event_pipe_write_fd, &event_notifier_group);
4248 pthread_mutex_unlock(&app->sock_lock);
4249 if (ret < 0) {
4250 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
4251 ret = 0;
4252 DBG3("UST app create event notifier group failed. Application is dead: pid = %d, sock = %d",
4253 app->pid,
4254 app->sock);
4255 } else if (ret == -EAGAIN) {
4256 ret = 0;
4257 WARN("UST app create event notifier group failed. Communication time out: pid = %d, sock = %d",
4258 app->pid,
4259 app->sock);
4260 } else {
4261 ERR("UST app create event notifier group failed with ret %d: pid = %d, sock = %d, event_pipe_write_fd: %d",
4262 ret,
4263 app->pid,
4264 app->sock,
4265 event_pipe_write_fd);
4266 }
4267 goto error;
4268 }
4269
4270 ret = lttng_pipe_write_close(app->event_notifier_group.event_pipe);
4271 if (ret) {
4272 ERR("Failed to close write end of the application's event source pipe: app = '%s' (pid = %d)",
4273 app->name,
4274 app->pid);
4275 goto error;
4276 }
4277
4278 /*
4279 * Release the file descriptor that was reserved for the write-end of
4280 * the pipe.
4281 */
4282 lttng_fd_put(LTTNG_FD_APPS, 1);
4283
4284 lttng_ret = notification_thread_command_add_tracer_event_source(
4285 the_notification_thread_handle,
4286 lttng_pipe_get_readfd(app->event_notifier_group.event_pipe),
4287 LTTNG_DOMAIN_UST);
4288 if (lttng_ret != LTTNG_OK) {
4289 ERR("Failed to add tracer event source to notification thread");
4290 ret = -1;
4291 goto error;
4292 }
4293
4294 /* Assign handle only when the complete setup is valid. */
4295 app->event_notifier_group.object = event_notifier_group;
4296
4297 event_notifier_error_accounting_status = event_notifier_error_accounting_register_app(app);
4298 switch (event_notifier_error_accounting_status) {
4299 case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK:
4300 break;
4301 case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_UNSUPPORTED:
4302 DBG3("Failed to setup event notifier error accounting (application does not support notifier error accounting): app socket fd = %d, app name = '%s', app pid = %d",
4303 app->sock,
4304 app->name,
4305 (int) app->pid);
4306 ret = 0;
4307 goto error_accounting;
4308 case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_APP_DEAD:
4309 DBG3("Failed to setup event notifier error accounting (application is dead): app socket fd = %d, app name = '%s', app pid = %d",
4310 app->sock,
4311 app->name,
4312 (int) app->pid);
4313 ret = 0;
4314 goto error_accounting;
4315 default:
4316 ERR("Failed to setup event notifier error accounting for app");
4317 ret = -1;
4318 goto error_accounting;
4319 }
4320
4321 return ret;
4322
4323error_accounting:
4324 lttng_ret = notification_thread_command_remove_tracer_event_source(
4325 the_notification_thread_handle,
4326 lttng_pipe_get_readfd(app->event_notifier_group.event_pipe));
4327 if (lttng_ret != LTTNG_OK) {
4328 ERR("Failed to remove application tracer event source from notification thread");
4329 }
4330
4331error:
4332 lttng_ust_ctl_release_object(app->sock, app->event_notifier_group.object);
4333 free(app->event_notifier_group.object);
4334 app->event_notifier_group.object = nullptr;
4335 return ret;
4336}
4337
4338/*
4339 * Unregister app by removing it from the global traceable app list and freeing
4340 * the data struct.
4341 *
4342 * The socket is already closed at this point so no close to sock.
4343 */
4344void ust_app_unregister(int sock)
4345{
4346 struct ust_app *lta;
4347 struct lttng_ht_node_ulong *node;
4348 struct lttng_ht_iter ust_app_sock_iter;
4349 struct lttng_ht_iter iter;
4350 struct ust_app_session *ua_sess;
4351 int ret;
4352
4353 rcu_read_lock();
4354
4355 /* Get the node reference for a call_rcu */
4356 lttng_ht_lookup(ust_app_ht_by_sock, (void *) ((unsigned long) sock), &ust_app_sock_iter);
4357 node = lttng_ht_iter_get_node_ulong(&ust_app_sock_iter);
4358 LTTNG_ASSERT(node);
4359
4360 lta = lttng::utils::container_of(node, &ust_app::sock_n);
4361 DBG("PID %d unregistering with sock %d", lta->pid, sock);
4362
4363 /*
4364 * For per-PID buffers, perform "push metadata" and flush all
4365 * application streams before removing app from hash tables,
4366 * ensuring proper behavior of data_pending check.
4367 * Remove sessions so they are not visible during deletion.
4368 */
4369 cds_lfht_for_each_entry (lta->sessions->ht, &iter.iter, ua_sess, node.node) {
4370 ret = lttng_ht_del(lta->sessions, &iter);
4371 if (ret) {
4372 /* The session was already removed so scheduled for teardown. */
4373 continue;
4374 }
4375
4376 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
4377 (void) ust_app_flush_app_session(lta, ua_sess);
4378 }
4379
4380 /*
4381 * Add session to list for teardown. This is safe since at this point we
4382 * are the only one using this list.
4383 */
4384 pthread_mutex_lock(&ua_sess->lock);
4385
4386 if (ua_sess->deleted) {
4387 pthread_mutex_unlock(&ua_sess->lock);
4388 continue;
4389 }
4390
4391 /*
4392 * Normally, this is done in the delete session process which is
4393 * executed in the call rcu below. However, upon registration we can't
4394 * afford to wait for the grace period before pushing data or else the
4395 * data pending feature can race between the unregistration and stop
4396 * command where the data pending command is sent *before* the grace
4397 * period ended.
4398 *
4399 * The close metadata below nullifies the metadata pointer in the
4400 * session so the delete session will NOT push/close a second time.
4401 */
4402 auto locked_registry = get_locked_session_registry(ua_sess);
4403 if (locked_registry) {
4404 /* Push metadata for application before freeing the application. */
4405 (void) push_metadata(locked_registry, ua_sess->consumer);
4406
4407 /*
4408 * Don't ask to close metadata for global per UID buffers. Close
4409 * metadata only on destroy trace session in this case. Also, the
4410 * previous push metadata could have flag the metadata registry to
4411 * close so don't send a close command if closed.
4412 */
4413 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
4414 const auto metadata_key = locked_registry->_metadata_key;
4415 const auto consumer_bitness = locked_registry->abi.bits_per_long;
4416
4417 if (!locked_registry->_metadata_closed && metadata_key != 0) {
4418 locked_registry->_metadata_closed = true;
4419 }
4420
4421 /* Release lock before communication, see comments in
4422 * close_metadata(). */
4423 locked_registry.reset();
4424 (void) close_metadata(
4425 metadata_key, consumer_bitness, ua_sess->consumer);
4426 } else {
4427 locked_registry.reset();
4428 }
4429 }
4430 cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
4431
4432 pthread_mutex_unlock(&ua_sess->lock);
4433 }
4434
4435 /* Remove application from PID hash table */
4436 ret = lttng_ht_del(ust_app_ht_by_sock, &ust_app_sock_iter);
4437 LTTNG_ASSERT(!ret);
4438
4439 /*
4440 * Remove application from notify hash table. The thread handling the
4441 * notify socket could have deleted the node so ignore on error because
4442 * either way it's valid. The close of that socket is handled by the
4443 * apps_notify_thread.
4444 */
4445 iter.iter.node = &lta->notify_sock_n.node;
4446 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
4447
4448 /*
4449 * Ignore return value since the node might have been removed before by an
4450 * add replace during app registration because the PID can be reassigned by
4451 * the OS.
4452 */
4453 iter.iter.node = &lta->pid_n.node;
4454 ret = lttng_ht_del(ust_app_ht, &iter);
4455 if (ret) {
4456 DBG3("Unregister app by PID %d failed. This can happen on pid reuse", lta->pid);
4457 }
4458
4459 /* Free memory */
4460 call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
4461
4462 rcu_read_unlock();
4463 return;
4464}
4465
4466/*
4467 * Fill events array with all events name of all registered apps.
4468 */
4469int ust_app_list_events(struct lttng_event **events)
4470{
4471 int ret, handle;
4472 size_t nbmem, count = 0;
4473 struct lttng_ht_iter iter;
4474 struct ust_app *app;
4475 struct lttng_event *tmp_event;
4476
4477 nbmem = UST_APP_EVENT_LIST_SIZE;
4478 tmp_event = calloc<lttng_event>(nbmem);
4479 if (tmp_event == nullptr) {
4480 PERROR("zmalloc ust app events");
4481 ret = -ENOMEM;
4482 goto error;
4483 }
4484
4485 rcu_read_lock();
4486
4487 cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4488 struct lttng_ust_abi_tracepoint_iter uiter;
4489
4490 health_code_update();
4491
4492 if (!app->compatible) {
4493 /*
4494 * TODO: In time, we should notice the caller of this error by
4495 * telling him that this is a version error.
4496 */
4497 continue;
4498 }
4499 pthread_mutex_lock(&app->sock_lock);
4500 handle = lttng_ust_ctl_tracepoint_list(app->sock);
4501 if (handle < 0) {
4502 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
4503 ERR("UST app list events getting handle failed for app pid %d",
4504 app->pid);
4505 }
4506 pthread_mutex_unlock(&app->sock_lock);
4507 continue;
4508 }
4509
4510 while ((ret = lttng_ust_ctl_tracepoint_list_get(app->sock, handle, &uiter)) !=
4511 -LTTNG_UST_ERR_NOENT) {
4512 /* Handle ustctl error. */
4513 if (ret < 0) {
4514 int release_ret;
4515
4516 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
4517 ERR("UST app tp list get failed for app %d with ret %d",
4518 app->sock,
4519 ret);
4520 } else {
4521 DBG3("UST app tp list get failed. Application is dead");
4522 break;
4523 }
4524 free(tmp_event);
4525 release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
4526 if (release_ret < 0 && release_ret != -LTTNG_UST_ERR_EXITING &&
4527 release_ret != -EPIPE) {
4528 ERR("Error releasing app handle for app %d with ret %d",
4529 app->sock,
4530 release_ret);
4531 }
4532 pthread_mutex_unlock(&app->sock_lock);
4533 goto rcu_error;
4534 }
4535
4536 health_code_update();
4537 if (count >= nbmem) {
4538 /* In case the realloc fails, we free the memory */
4539 struct lttng_event *new_tmp_event;
4540 size_t new_nbmem;
4541
4542 new_nbmem = nbmem << 1;
4543 DBG2("Reallocating event list from %zu to %zu entries",
4544 nbmem,
4545 new_nbmem);
4546 new_tmp_event = (lttng_event *) realloc(
4547 tmp_event, new_nbmem * sizeof(struct lttng_event));
4548 if (new_tmp_event == nullptr) {
4549 int release_ret;
4550
4551 PERROR("realloc ust app events");
4552 free(tmp_event);
4553 ret = -ENOMEM;
4554 release_ret =
4555 lttng_ust_ctl_release_handle(app->sock, handle);
4556 if (release_ret < 0 &&
4557 release_ret != -LTTNG_UST_ERR_EXITING &&
4558 release_ret != -EPIPE) {
4559 ERR("Error releasing app handle for app %d with ret %d",
4560 app->sock,
4561 release_ret);
4562 }
4563 pthread_mutex_unlock(&app->sock_lock);
4564 goto rcu_error;
4565 }
4566 /* Zero the new memory */
4567 memset(new_tmp_event + nbmem,
4568 0,
4569 (new_nbmem - nbmem) * sizeof(struct lttng_event));
4570 nbmem = new_nbmem;
4571 tmp_event = new_tmp_event;
4572 }
4573 memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_ABI_SYM_NAME_LEN);
4574 tmp_event[count].loglevel = uiter.loglevel;
4575 tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_ABI_TRACEPOINT;
4576 tmp_event[count].pid = app->pid;
4577 tmp_event[count].enabled = -1;
4578 count++;
4579 }
4580 ret = lttng_ust_ctl_release_handle(app->sock, handle);
4581 pthread_mutex_unlock(&app->sock_lock);
4582 if (ret < 0) {
4583 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
4584 DBG3("Error releasing app handle. Application died: pid = %d, sock = %d",
4585 app->pid,
4586 app->sock);
4587 } else if (ret == -EAGAIN) {
4588 WARN("Error releasing app handle. Communication time out: pid = %d, sock = %d",
4589 app->pid,
4590 app->sock);
4591 } else {
4592 ERR("Error releasing app handle with ret %d: pid = %d, sock = %d",
4593 ret,
4594 app->pid,
4595 app->sock);
4596 }
4597 }
4598 }
4599
4600 ret = count;
4601 *events = tmp_event;
4602
4603 DBG2("UST app list events done (%zu events)", count);
4604
4605rcu_error:
4606 rcu_read_unlock();
4607error:
4608 health_code_update();
4609 return ret;
4610}
4611
4612/*
4613 * Fill events array with all events name of all registered apps.
4614 */
4615int ust_app_list_event_fields(struct lttng_event_field **fields)
4616{
4617 int ret, handle;
4618 size_t nbmem, count = 0;
4619 struct lttng_ht_iter iter;
4620 struct ust_app *app;
4621 struct lttng_event_field *tmp_event;
4622
4623 nbmem = UST_APP_EVENT_LIST_SIZE;
4624 tmp_event = calloc<lttng_event_field>(nbmem);
4625 if (tmp_event == nullptr) {
4626 PERROR("zmalloc ust app event fields");
4627 ret = -ENOMEM;
4628 goto error;
4629 }
4630
4631 rcu_read_lock();
4632
4633 cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4634 struct lttng_ust_abi_field_iter uiter;
4635
4636 health_code_update();
4637
4638 if (!app->compatible) {
4639 /*
4640 * TODO: In time, we should notice the caller of this error by
4641 * telling him that this is a version error.
4642 */
4643 continue;
4644 }
4645 pthread_mutex_lock(&app->sock_lock);
4646 handle = lttng_ust_ctl_tracepoint_field_list(app->sock);
4647 if (handle < 0) {
4648 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
4649 ERR("UST app list field getting handle failed for app pid %d",
4650 app->pid);
4651 }
4652 pthread_mutex_unlock(&app->sock_lock);
4653 continue;
4654 }
4655
4656 while ((ret = lttng_ust_ctl_tracepoint_field_list_get(app->sock, handle, &uiter)) !=
4657 -LTTNG_UST_ERR_NOENT) {
4658 /* Handle ustctl error. */
4659 if (ret < 0) {
4660 int release_ret;
4661
4662 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
4663 ERR("UST app tp list field failed for app %d with ret %d",
4664 app->sock,
4665 ret);
4666 } else {
4667 DBG3("UST app tp list field failed. Application is dead");
4668 break;
4669 }
4670 free(tmp_event);
4671 release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
4672 pthread_mutex_unlock(&app->sock_lock);
4673 if (release_ret < 0 && release_ret != -LTTNG_UST_ERR_EXITING &&
4674 release_ret != -EPIPE) {
4675 ERR("Error releasing app handle for app %d with ret %d",
4676 app->sock,
4677 release_ret);
4678 }
4679 goto rcu_error;
4680 }
4681
4682 health_code_update();
4683 if (count >= nbmem) {
4684 /* In case the realloc fails, we free the memory */
4685 struct lttng_event_field *new_tmp_event;
4686 size_t new_nbmem;
4687
4688 new_nbmem = nbmem << 1;
4689 DBG2("Reallocating event field list from %zu to %zu entries",
4690 nbmem,
4691 new_nbmem);
4692 new_tmp_event = (lttng_event_field *) realloc(
4693 tmp_event, new_nbmem * sizeof(struct lttng_event_field));
4694 if (new_tmp_event == nullptr) {
4695 int release_ret;
4696
4697 PERROR("realloc ust app event fields");
4698 free(tmp_event);
4699 ret = -ENOMEM;
4700 release_ret =
4701 lttng_ust_ctl_release_handle(app->sock, handle);
4702 pthread_mutex_unlock(&app->sock_lock);
4703 if (release_ret && release_ret != -LTTNG_UST_ERR_EXITING &&
4704 release_ret != -EPIPE) {
4705 ERR("Error releasing app handle for app %d with ret %d",
4706 app->sock,
4707 release_ret);
4708 }
4709 goto rcu_error;
4710 }
4711 /* Zero the new memory */
4712 memset(new_tmp_event + nbmem,
4713 0,
4714 (new_nbmem - nbmem) * sizeof(struct lttng_event_field));
4715 nbmem = new_nbmem;
4716 tmp_event = new_tmp_event;
4717 }
4718
4719 memcpy(tmp_event[count].field_name,
4720 uiter.field_name,
4721 LTTNG_UST_ABI_SYM_NAME_LEN);
4722 /* Mapping between these enums matches 1 to 1. */
4723 tmp_event[count].type = (enum lttng_event_field_type) uiter.type;
4724 tmp_event[count].nowrite = uiter.nowrite;
4725
4726 memcpy(tmp_event[count].event.name,
4727 uiter.event_name,
4728 LTTNG_UST_ABI_SYM_NAME_LEN);
4729 tmp_event[count].event.loglevel = uiter.loglevel;
4730 tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT;
4731 tmp_event[count].event.pid = app->pid;
4732 tmp_event[count].event.enabled = -1;
4733 count++;
4734 }
4735 ret = lttng_ust_ctl_release_handle(app->sock, handle);
4736 pthread_mutex_unlock(&app->sock_lock);
4737 if (ret < 0 && ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
4738 ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
4739 }
4740 }
4741
4742 ret = count;
4743 *fields = tmp_event;
4744
4745 DBG2("UST app list event fields done (%zu events)", count);
4746
4747rcu_error:
4748 rcu_read_unlock();
4749error:
4750 health_code_update();
4751 return ret;
4752}
4753
4754/*
4755 * Free and clean all traceable apps of the global list.
4756 */
4757void ust_app_clean_list()
4758{
4759 int ret;
4760 struct ust_app *app;
4761 struct lttng_ht_iter iter;
4762
4763 DBG2("UST app cleaning registered apps hash table");
4764
4765 rcu_read_lock();
4766
4767 /* Cleanup notify socket hash table */
4768 if (ust_app_ht_by_notify_sock) {
4769 cds_lfht_for_each_entry (
4770 ust_app_ht_by_notify_sock->ht, &iter.iter, app, notify_sock_n.node) {
4771 /*
4772 * Assert that all notifiers are gone as all triggers
4773 * are unregistered prior to this clean-up.
4774 */
4775 LTTNG_ASSERT(lttng_ht_get_count(app->token_to_event_notifier_rule_ht) == 0);
4776
4777 ust_app_notify_sock_unregister(app->notify_sock);
4778 }
4779 }
4780
4781 if (ust_app_ht) {
4782 cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4783 ret = lttng_ht_del(ust_app_ht, &iter);
4784 LTTNG_ASSERT(!ret);
4785 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
4786 }
4787 }
4788
4789 /* Cleanup socket hash table */
4790 if (ust_app_ht_by_sock) {
4791 cds_lfht_for_each_entry (ust_app_ht_by_sock->ht, &iter.iter, app, sock_n.node) {
4792 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
4793 LTTNG_ASSERT(!ret);
4794 }
4795 }
4796
4797 rcu_read_unlock();
4798
4799 /* Destroy is done only when the ht is empty */
4800 if (ust_app_ht) {
4801 lttng_ht_destroy(ust_app_ht);
4802 }
4803 if (ust_app_ht_by_sock) {
4804 lttng_ht_destroy(ust_app_ht_by_sock);
4805 }
4806 if (ust_app_ht_by_notify_sock) {
4807 lttng_ht_destroy(ust_app_ht_by_notify_sock);
4808 }
4809}
4810
4811/*
4812 * Init UST app hash table.
4813 */
4814int ust_app_ht_alloc()
4815{
4816 ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
4817 if (!ust_app_ht) {
4818 return -1;
4819 }
4820 ust_app_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
4821 if (!ust_app_ht_by_sock) {
4822 return -1;
4823 }
4824 ust_app_ht_by_notify_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
4825 if (!ust_app_ht_by_notify_sock) {
4826 return -1;
4827 }
4828 return 0;
4829}
4830
4831/*
4832 * For a specific UST session, disable the channel for all registered apps.
4833 */
4834int ust_app_disable_channel_glb(struct ltt_ust_session *usess, struct ltt_ust_channel *uchan)
4835{
4836 int ret = 0;
4837 struct lttng_ht_iter iter;
4838 struct lttng_ht_node_str *ua_chan_node;
4839 struct ust_app *app;
4840 struct ust_app_session *ua_sess;
4841 struct ust_app_channel *ua_chan;
4842
4843 LTTNG_ASSERT(usess->active);
4844 DBG2("UST app disabling channel %s from global domain for session id %" PRIu64,
4845 uchan->name,
4846 usess->id);
4847
4848 rcu_read_lock();
4849
4850 /* For every registered applications */
4851 cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4852 struct lttng_ht_iter uiter;
4853 if (!app->compatible) {
4854 /*
4855 * TODO: In time, we should notice the caller of this error by
4856 * telling him that this is a version error.
4857 */
4858 continue;
4859 }
4860 ua_sess = lookup_session_by_app(usess, app);
4861 if (ua_sess == nullptr) {
4862 continue;
4863 }
4864
4865 /* Get channel */
4866 lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
4867 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4868 /* If the session if found for the app, the channel must be there */
4869 LTTNG_ASSERT(ua_chan_node);
4870
4871 ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
4872 /* The channel must not be already disabled */
4873 LTTNG_ASSERT(ua_chan->enabled == 1);
4874
4875 /* Disable channel onto application */
4876 ret = disable_ust_app_channel(ua_sess, ua_chan, app);
4877 if (ret < 0) {
4878 /* XXX: We might want to report this error at some point... */
4879 continue;
4880 }
4881 }
4882
4883 rcu_read_unlock();
4884 return ret;
4885}
4886
4887/*
4888 * For a specific UST session, enable the channel for all registered apps.
4889 */
4890int ust_app_enable_channel_glb(struct ltt_ust_session *usess, struct ltt_ust_channel *uchan)
4891{
4892 int ret = 0;
4893 struct lttng_ht_iter iter;
4894 struct ust_app *app;
4895 struct ust_app_session *ua_sess;
4896
4897 LTTNG_ASSERT(usess->active);
4898 DBG2("UST app enabling channel %s to global domain for session id %" PRIu64,
4899 uchan->name,
4900 usess->id);
4901
4902 rcu_read_lock();
4903
4904 /* For every registered applications */
4905 cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4906 if (!app->compatible) {
4907 /*
4908 * TODO: In time, we should notice the caller of this error by
4909 * telling him that this is a version error.
4910 */
4911 continue;
4912 }
4913 ua_sess = lookup_session_by_app(usess, app);
4914 if (ua_sess == nullptr) {
4915 continue;
4916 }
4917
4918 /* Enable channel onto application */
4919 ret = enable_ust_app_channel(ua_sess, uchan, app);
4920 if (ret < 0) {
4921 /* XXX: We might want to report this error at some point... */
4922 continue;
4923 }
4924 }
4925
4926 rcu_read_unlock();
4927 return ret;
4928}
4929
4930/*
4931 * Disable an event in a channel and for a specific session.
4932 */
4933int ust_app_disable_event_glb(struct ltt_ust_session *usess,
4934 struct ltt_ust_channel *uchan,
4935 struct ltt_ust_event *uevent)
4936{
4937 int ret = 0;
4938 struct lttng_ht_iter iter, uiter;
4939 struct lttng_ht_node_str *ua_chan_node;
4940 struct ust_app *app;
4941 struct ust_app_session *ua_sess;
4942 struct ust_app_channel *ua_chan;
4943 struct ust_app_event *ua_event;
4944
4945 LTTNG_ASSERT(usess->active);
4946 DBG("UST app disabling event %s for all apps in channel "
4947 "%s for session id %" PRIu64,
4948 uevent->attr.name,
4949 uchan->name,
4950 usess->id);
4951
4952 rcu_read_lock();
4953
4954 /* For all registered applications */
4955 cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4956 if (!app->compatible) {
4957 /*
4958 * TODO: In time, we should notice the caller of this error by
4959 * telling him that this is a version error.
4960 */
4961 continue;
4962 }
4963 ua_sess = lookup_session_by_app(usess, app);
4964 if (ua_sess == nullptr) {
4965 /* Next app */
4966 continue;
4967 }
4968
4969 /* Lookup channel in the ust app session */
4970 lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
4971 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4972 if (ua_chan_node == nullptr) {
4973 DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
4974 "Skipping",
4975 uchan->name,
4976 usess->id,
4977 app->pid);
4978 continue;
4979 }
4980 ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
4981
4982 ua_event = find_ust_app_event(ua_chan->events,
4983 uevent->attr.name,
4984 uevent->filter,
4985 uevent->attr.loglevel,
4986 uevent->exclusion);
4987 if (ua_event == nullptr) {
4988 DBG2("Event %s not found in channel %s for app pid %d."
4989 "Skipping",
4990 uevent->attr.name,
4991 uchan->name,
4992 app->pid);
4993 continue;
4994 }
4995
4996 ret = disable_ust_app_event(ua_event, app);
4997 if (ret < 0) {
4998 /* XXX: Report error someday... */
4999 continue;
5000 }
5001 }
5002
5003 rcu_read_unlock();
5004 return ret;
5005}
5006
5007/* The ua_sess lock must be held by the caller. */
5008static int ust_app_channel_create(struct ltt_ust_session *usess,
5009 struct ust_app_session *ua_sess,
5010 struct ltt_ust_channel *uchan,
5011 struct ust_app *app,
5012 struct ust_app_channel **_ua_chan)
5013{
5014 int ret = 0;
5015 struct ust_app_channel *ua_chan = nullptr;
5016
5017 LTTNG_ASSERT(ua_sess);
5018 ASSERT_LOCKED(ua_sess->lock);
5019
5020 if (!strncmp(uchan->name, DEFAULT_METADATA_NAME, sizeof(uchan->name))) {
5021 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr, &uchan->attr);
5022 ret = 0;
5023 } else {
5024 struct ltt_ust_context *uctx = nullptr;
5025
5026 /*
5027 * Create channel onto application and synchronize its
5028 * configuration.
5029 */
5030 ret = ust_app_channel_allocate(
5031 ua_sess, uchan, LTTNG_UST_ABI_CHAN_PER_CPU, usess, &ua_chan);
5032 if (ret < 0) {
5033 goto error;
5034 }
5035
5036 ret = ust_app_channel_send(app, usess, ua_sess, ua_chan);
5037 if (ret) {
5038 goto error;
5039 }
5040
5041 /* Add contexts. */
5042 cds_list_for_each_entry (uctx, &uchan->ctx_list, list) {
5043 ret = create_ust_app_channel_context(ua_chan, &uctx->ctx, app);
5044 if (ret) {
5045 goto error;
5046 }
5047 }
5048 }
5049
5050error:
5051 if (ret < 0) {
5052 switch (ret) {
5053 case -ENOTCONN:
5054 /*
5055 * The application's socket is not valid. Either a bad socket
5056 * or a timeout on it. We can't inform the caller that for a
5057 * specific app, the session failed so lets continue here.
5058 */
5059 ret = 0; /* Not an error. */
5060 break;
5061 case -ENOMEM:
5062 default:
5063 break;
5064 }
5065 }
5066
5067 if (ret == 0 && _ua_chan) {
5068 /*
5069 * Only return the application's channel on success. Note
5070 * that the channel can still be part of the application's
5071 * channel hashtable on error.
5072 */
5073 *_ua_chan = ua_chan;
5074 }
5075 return ret;
5076}
5077
5078/*
5079 * Enable event for a specific session and channel on the tracer.
5080 */
5081int ust_app_enable_event_glb(struct ltt_ust_session *usess,
5082 struct ltt_ust_channel *uchan,
5083 struct ltt_ust_event *uevent)
5084{
5085 int ret = 0;
5086 struct lttng_ht_iter iter, uiter;
5087 struct lttng_ht_node_str *ua_chan_node;
5088 struct ust_app *app;
5089 struct ust_app_session *ua_sess;
5090 struct ust_app_channel *ua_chan;
5091 struct ust_app_event *ua_event;
5092
5093 LTTNG_ASSERT(usess->active);
5094 DBG("UST app enabling event %s for all apps for session id %" PRIu64,
5095 uevent->attr.name,
5096 usess->id);
5097
5098 /*
5099 * NOTE: At this point, this function is called only if the session and
5100 * channel passed are already created for all apps. and enabled on the
5101 * tracer also.
5102 */
5103
5104 rcu_read_lock();
5105
5106 /* For all registered applications */
5107 cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5108 if (!app->compatible) {
5109 /*
5110 * TODO: In time, we should notice the caller of this error by
5111 * telling him that this is a version error.
5112 */
5113 continue;
5114 }
5115 ua_sess = lookup_session_by_app(usess, app);
5116 if (!ua_sess) {
5117 /* The application has problem or is probably dead. */
5118 continue;
5119 }
5120
5121 pthread_mutex_lock(&ua_sess->lock);
5122
5123 if (ua_sess->deleted) {
5124 pthread_mutex_unlock(&ua_sess->lock);
5125 continue;
5126 }
5127
5128 /* Lookup channel in the ust app session */
5129 lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
5130 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
5131 /*
5132 * It is possible that the channel cannot be found is
5133 * the channel/event creation occurs concurrently with
5134 * an application exit.
5135 */
5136 if (!ua_chan_node) {
5137 pthread_mutex_unlock(&ua_sess->lock);
5138 continue;
5139 }
5140
5141 ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
5142
5143 /* Get event node */
5144 ua_event = find_ust_app_event(ua_chan->events,
5145 uevent->attr.name,
5146 uevent->filter,
5147 uevent->attr.loglevel,
5148 uevent->exclusion);
5149 if (ua_event == nullptr) {
5150 DBG3("UST app enable event %s not found for app PID %d."
5151 "Skipping app",
5152 uevent->attr.name,
5153 app->pid);
5154 goto next_app;
5155 }
5156
5157 ret = enable_ust_app_event(ua_event, app);
5158 if (ret < 0) {
5159 pthread_mutex_unlock(&ua_sess->lock);
5160 goto error;
5161 }
5162 next_app:
5163 pthread_mutex_unlock(&ua_sess->lock);
5164 }
5165
5166error:
5167 rcu_read_unlock();
5168 return ret;
5169}
5170
5171/*
5172 * For a specific existing UST session and UST channel, creates the event for
5173 * all registered apps.
5174 */
5175int ust_app_create_event_glb(struct ltt_ust_session *usess,
5176 struct ltt_ust_channel *uchan,
5177 struct ltt_ust_event *uevent)
5178{
5179 int ret = 0;
5180 struct lttng_ht_iter iter, uiter;
5181 struct lttng_ht_node_str *ua_chan_node;
5182 struct ust_app *app;
5183 struct ust_app_session *ua_sess;
5184 struct ust_app_channel *ua_chan;
5185
5186 LTTNG_ASSERT(usess->active);
5187 DBG("UST app creating event %s for all apps for session id %" PRIu64,
5188 uevent->attr.name,
5189 usess->id);
5190
5191 rcu_read_lock();
5192
5193 /* For all registered applications */
5194 cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5195 if (!app->compatible) {
5196 /*
5197 * TODO: In time, we should notice the caller of this error by
5198 * telling him that this is a version error.
5199 */
5200 continue;
5201 }
5202 ua_sess = lookup_session_by_app(usess, app);
5203 if (!ua_sess) {
5204 /* The application has problem or is probably dead. */
5205 continue;
5206 }
5207
5208 pthread_mutex_lock(&ua_sess->lock);
5209
5210 if (ua_sess->deleted) {
5211 pthread_mutex_unlock(&ua_sess->lock);
5212 continue;
5213 }
5214
5215 /* Lookup channel in the ust app session */
5216 lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
5217 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
5218 /* If the channel is not found, there is a code flow error */
5219 LTTNG_ASSERT(ua_chan_node);
5220
5221 ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
5222
5223 ret = create_ust_app_event(ua_chan, uevent, app);
5224 pthread_mutex_unlock(&ua_sess->lock);
5225 if (ret < 0) {
5226 if (ret != -LTTNG_UST_ERR_EXIST) {
5227 /* Possible value at this point: -ENOMEM. If so, we stop! */
5228 break;
5229 }
5230 DBG2("UST app event %s already exist on app PID %d",
5231 uevent->attr.name,
5232 app->pid);
5233 continue;
5234 }
5235 }
5236
5237 rcu_read_unlock();
5238 return ret;
5239}
5240
5241/*
5242 * Start tracing for a specific UST session and app.
5243 *
5244 * Called with UST app session lock held.
5245 *
5246 */
5247static int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
5248{
5249 int ret = 0;
5250 struct ust_app_session *ua_sess;
5251
5252 DBG("Starting tracing for ust app pid %d", app->pid);
5253
5254 rcu_read_lock();
5255
5256 if (!app->compatible) {
5257 goto end;
5258 }
5259
5260 ua_sess = lookup_session_by_app(usess, app);
5261 if (ua_sess == nullptr) {
5262 /* The session is in teardown process. Ignore and continue. */
5263 goto end;
5264 }
5265
5266 pthread_mutex_lock(&ua_sess->lock);
5267
5268 if (ua_sess->deleted) {
5269 pthread_mutex_unlock(&ua_sess->lock);
5270 goto end;
5271 }
5272
5273 if (ua_sess->enabled) {
5274 pthread_mutex_unlock(&ua_sess->lock);
5275 goto end;
5276 }
5277
5278 /* Upon restart, we skip the setup, already done */
5279 if (ua_sess->started) {
5280 goto skip_setup;
5281 }
5282
5283 health_code_update();
5284
5285skip_setup:
5286 /* This starts the UST tracing */
5287 pthread_mutex_lock(&app->sock_lock);
5288 ret = lttng_ust_ctl_start_session(app->sock, ua_sess->handle);
5289 pthread_mutex_unlock(&app->sock_lock);
5290 if (ret < 0) {
5291 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
5292 DBG3("UST app start session failed. Application is dead: pid = %d, sock = %d",
5293 app->pid,
5294 app->sock);
5295 pthread_mutex_unlock(&ua_sess->lock);
5296 goto end;
5297 } else if (ret == -EAGAIN) {
5298 WARN("UST app start session failed. Communication time out: pid = %d, sock = %d",
5299 app->pid,
5300 app->sock);
5301 pthread_mutex_unlock(&ua_sess->lock);
5302 goto end;
5303
5304 } else {
5305 ERR("UST app start session failed with ret %d: pid = %d, sock = %d",
5306 ret,
5307 app->pid,
5308 app->sock);
5309 }
5310 goto error_unlock;
5311 }
5312
5313 /* Indicate that the session has been started once */
5314 ua_sess->started = 1;
5315 ua_sess->enabled = 1;
5316
5317 pthread_mutex_unlock(&ua_sess->lock);
5318
5319 health_code_update();
5320
5321 /* Quiescent wait after starting trace */
5322 pthread_mutex_lock(&app->sock_lock);
5323 ret = lttng_ust_ctl_wait_quiescent(app->sock);
5324 pthread_mutex_unlock(&app->sock_lock);
5325 if (ret < 0) {
5326 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
5327 DBG3("UST app wait quiescent failed. Application is dead: pid = %d, sock = %d",
5328 app->pid,
5329 app->sock);
5330 } else if (ret == -EAGAIN) {
5331 WARN("UST app wait quiescent failed. Communication time out: pid = %d, sock = %d",
5332 app->pid,
5333 app->sock);
5334 } else {
5335 ERR("UST app wait quiescent failed with ret %d: pid %d, sock = %d",
5336 ret,
5337 app->pid,
5338 app->sock);
5339 }
5340 }
5341
5342end:
5343 rcu_read_unlock();
5344 health_code_update();
5345 return 0;
5346
5347error_unlock:
5348 pthread_mutex_unlock(&ua_sess->lock);
5349 rcu_read_unlock();
5350 health_code_update();
5351 return -1;
5352}
5353
5354/*
5355 * Stop tracing for a specific UST session and app.
5356 */
5357static int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
5358{
5359 int ret = 0;
5360 struct ust_app_session *ua_sess;
5361
5362 DBG("Stopping tracing for ust app pid %d", app->pid);
5363
5364 rcu_read_lock();
5365
5366 if (!app->compatible) {
5367 goto end_no_session;
5368 }
5369
5370 ua_sess = lookup_session_by_app(usess, app);
5371 if (ua_sess == nullptr) {
5372 goto end_no_session;
5373 }
5374
5375 pthread_mutex_lock(&ua_sess->lock);
5376
5377 if (ua_sess->deleted) {
5378 pthread_mutex_unlock(&ua_sess->lock);
5379 goto end_no_session;
5380 }
5381
5382 /*
5383 * If started = 0, it means that stop trace has been called for a session
5384 * that was never started. It's possible since we can have a fail start
5385 * from either the application manager thread or the command thread. Simply
5386 * indicate that this is a stop error.
5387 */
5388 if (!ua_sess->started) {
5389 goto error_rcu_unlock;
5390 }
5391
5392 health_code_update();
5393
5394 /* This inhibits UST tracing */
5395 pthread_mutex_lock(&app->sock_lock);
5396 ret = lttng_ust_ctl_stop_session(app->sock, ua_sess->handle);
5397 pthread_mutex_unlock(&app->sock_lock);
5398 if (ret < 0) {
5399 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
5400 DBG3("UST app stop session failed. Application is dead: pid = %d, sock = %d",
5401 app->pid,
5402 app->sock);
5403 goto end_unlock;
5404 } else if (ret == -EAGAIN) {
5405 WARN("UST app stop session failed. Communication time out: pid = %d, sock = %d",
5406 app->pid,
5407 app->sock);
5408 goto end_unlock;
5409
5410 } else {
5411 ERR("UST app stop session failed with ret %d: pid = %d, sock = %d",
5412 ret,
5413 app->pid,
5414 app->sock);
5415 }
5416 goto error_rcu_unlock;
5417 }
5418
5419 health_code_update();
5420 ua_sess->enabled = 0;
5421
5422 /* Quiescent wait after stopping trace */
5423 pthread_mutex_lock(&app->sock_lock);
5424 ret = lttng_ust_ctl_wait_quiescent(app->sock);
5425 pthread_mutex_unlock(&app->sock_lock);
5426 if (ret < 0) {
5427 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
5428 DBG3("UST app wait quiescent failed. Application is dead: pid= %d, sock = %d",
5429 app->pid,
5430 app->sock);
5431 } else if (ret == -EAGAIN) {
5432 WARN("UST app wait quiescent failed. Communication time out: pid= %d, sock = %d",
5433 app->pid,
5434 app->sock);
5435 } else {
5436 ERR("UST app wait quiescent failed with ret %d: pid= %d, sock = %d",
5437 ret,
5438 app->pid,
5439 app->sock);
5440 }
5441 }
5442
5443 health_code_update();
5444
5445 {
5446 auto locked_registry = get_locked_session_registry(ua_sess);
5447
5448 /* The UST app session is held registry shall not be null. */
5449 LTTNG_ASSERT(locked_registry);
5450
5451 /* Push metadata for application before freeing the application. */
5452 (void) push_metadata(locked_registry, ua_sess->consumer);
5453 }
5454
5455end_unlock:
5456 pthread_mutex_unlock(&ua_sess->lock);
5457end_no_session:
5458 rcu_read_unlock();
5459 health_code_update();
5460 return 0;
5461
5462error_rcu_unlock:
5463 pthread_mutex_unlock(&ua_sess->lock);
5464 rcu_read_unlock();
5465 health_code_update();
5466 return -1;
5467}
5468
5469static int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess)
5470{
5471 int ret, retval = 0;
5472 struct lttng_ht_iter iter;
5473 struct ust_app_channel *ua_chan;
5474 struct consumer_socket *socket;
5475
5476 DBG("Flushing app session buffers for ust app pid %d", app->pid);
5477
5478 rcu_read_lock();
5479
5480 if (!app->compatible) {
5481 goto end_not_compatible;
5482 }
5483
5484 pthread_mutex_lock(&ua_sess->lock);
5485
5486 if (ua_sess->deleted) {
5487 goto end_deleted;
5488 }
5489
5490 health_code_update();
5491
5492 /* Flushing buffers */
5493 socket = consumer_find_socket_by_bitness(app->abi.bits_per_long, ua_sess->consumer);
5494
5495 /* Flush buffers and push metadata. */
5496 switch (ua_sess->buffer_type) {
5497 case LTTNG_BUFFER_PER_PID:
5498 cds_lfht_for_each_entry (ua_sess->channels->ht, &iter.iter, ua_chan, node.node) {
5499 health_code_update();
5500 ret = consumer_flush_channel(socket, ua_chan->key);
5501 if (ret) {
5502 ERR("Error flushing consumer channel");
5503 retval = -1;
5504 continue;
5505 }
5506 }
5507 break;
5508 case LTTNG_BUFFER_PER_UID:
5509 default:
5510 abort();
5511 break;
5512 }
5513
5514 health_code_update();
5515
5516end_deleted:
5517 pthread_mutex_unlock(&ua_sess->lock);
5518
5519end_not_compatible:
5520 rcu_read_unlock();
5521 health_code_update();
5522 return retval;
5523}
5524
5525/*
5526 * Flush buffers for all applications for a specific UST session.
5527 * Called with UST session lock held.
5528 */
5529static int ust_app_flush_session(struct ltt_ust_session *usess)
5530
5531{
5532 int ret = 0;
5533
5534 DBG("Flushing session buffers for all ust apps");
5535
5536 rcu_read_lock();
5537
5538 /* Flush buffers and push metadata. */
5539 switch (usess->buffer_type) {
5540 case LTTNG_BUFFER_PER_UID:
5541 {
5542 struct buffer_reg_uid *reg;
5543 struct lttng_ht_iter iter;
5544
5545 /* Flush all per UID buffers associated to that session. */
5546 cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
5547 lsu::registry_session *ust_session_reg;
5548 struct buffer_reg_channel *buf_reg_chan;
5549 struct consumer_socket *socket;
5550
5551 /* Get consumer socket to use to push the metadata.*/
5552 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
5553 usess->consumer);
5554 if (!socket) {
5555 /* Ignore request if no consumer is found for the session. */
5556 continue;
5557 }
5558
5559 cds_lfht_for_each_entry (
5560 reg->registry->channels->ht, &iter.iter, buf_reg_chan, node.node) {
5561 /*
5562 * The following call will print error values so the return
5563 * code is of little importance because whatever happens, we
5564 * have to try them all.
5565 */
5566 (void) consumer_flush_channel(socket, buf_reg_chan->consumer_key);
5567 }
5568
5569 ust_session_reg = reg->registry->reg.ust;
5570 /* Push metadata. */
5571 auto locked_registry = ust_session_reg->lock();
5572 (void) push_metadata(locked_registry, usess->consumer);
5573 }
5574 break;
5575 }
5576 case LTTNG_BUFFER_PER_PID:
5577 {
5578 struct ust_app_session *ua_sess;
5579 struct lttng_ht_iter iter;
5580 struct ust_app *app;
5581
5582 cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5583 ua_sess = lookup_session_by_app(usess, app);
5584 if (ua_sess == nullptr) {
5585 continue;
5586 }
5587 (void) ust_app_flush_app_session(app, ua_sess);
5588 }
5589 break;
5590 }
5591 default:
5592 ret = -1;
5593 abort();
5594 break;
5595 }
5596
5597 rcu_read_unlock();
5598 health_code_update();
5599 return ret;
5600}
5601
5602static int ust_app_clear_quiescent_app_session(struct ust_app *app, struct ust_app_session *ua_sess)
5603{
5604 int ret = 0;
5605 struct lttng_ht_iter iter;
5606 struct ust_app_channel *ua_chan;
5607 struct consumer_socket *socket;
5608
5609 DBG("Clearing stream quiescent state for ust app pid %d", app->pid);
5610
5611 rcu_read_lock();
5612
5613 if (!app->compatible) {
5614 goto end_not_compatible;
5615 }
5616
5617 pthread_mutex_lock(&ua_sess->lock);
5618
5619 if (ua_sess->deleted) {
5620 goto end_unlock;
5621 }
5622
5623 health_code_update();
5624
5625 socket = consumer_find_socket_by_bitness(app->abi.bits_per_long, ua_sess->consumer);
5626 if (!socket) {
5627 ERR("Failed to find consumer (%" PRIu32 ") socket", app->abi.bits_per_long);
5628 ret = -1;
5629 goto end_unlock;
5630 }
5631
5632 /* Clear quiescent state. */
5633 switch (ua_sess->buffer_type) {
5634 case LTTNG_BUFFER_PER_PID:
5635 cds_lfht_for_each_entry (ua_sess->channels->ht, &iter.iter, ua_chan, node.node) {
5636 health_code_update();
5637 ret = consumer_clear_quiescent_channel(socket, ua_chan->key);
5638 if (ret) {
5639 ERR("Error clearing quiescent state for consumer channel");
5640 ret = -1;
5641 continue;
5642 }
5643 }
5644 break;
5645 case LTTNG_BUFFER_PER_UID:
5646 default:
5647 abort();
5648 ret = -1;
5649 break;
5650 }
5651
5652 health_code_update();
5653
5654end_unlock:
5655 pthread_mutex_unlock(&ua_sess->lock);
5656
5657end_not_compatible:
5658 rcu_read_unlock();
5659 health_code_update();
5660 return ret;
5661}
5662
5663/*
5664 * Clear quiescent state in each stream for all applications for a
5665 * specific UST session.
5666 * Called with UST session lock held.
5667 */
5668static int ust_app_clear_quiescent_session(struct ltt_ust_session *usess)
5669
5670{
5671 int ret = 0;
5672
5673 DBG("Clearing stream quiescent state for all ust apps");
5674
5675 rcu_read_lock();
5676
5677 switch (usess->buffer_type) {
5678 case LTTNG_BUFFER_PER_UID:
5679 {
5680 struct lttng_ht_iter iter;
5681 struct buffer_reg_uid *reg;
5682
5683 /*
5684 * Clear quiescent for all per UID buffers associated to
5685 * that session.
5686 */
5687 cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
5688 struct consumer_socket *socket;
5689 struct buffer_reg_channel *buf_reg_chan;
5690
5691 /* Get associated consumer socket.*/
5692 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
5693 usess->consumer);
5694 if (!socket) {
5695 /*
5696 * Ignore request if no consumer is found for
5697 * the session.
5698 */
5699 continue;
5700 }
5701
5702 cds_lfht_for_each_entry (
5703 reg->registry->channels->ht, &iter.iter, buf_reg_chan, node.node) {
5704 /*
5705 * The following call will print error values so
5706 * the return code is of little importance
5707 * because whatever happens, we have to try them
5708 * all.
5709 */
5710 (void) consumer_clear_quiescent_channel(socket,
5711 buf_reg_chan->consumer_key);
5712 }
5713 }
5714 break;
5715 }
5716 case LTTNG_BUFFER_PER_PID:
5717 {
5718 struct ust_app_session *ua_sess;
5719 struct lttng_ht_iter iter;
5720 struct ust_app *app;
5721
5722 cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5723 ua_sess = lookup_session_by_app(usess, app);
5724 if (ua_sess == nullptr) {
5725 continue;
5726 }
5727 (void) ust_app_clear_quiescent_app_session(app, ua_sess);
5728 }
5729 break;
5730 }
5731 default:
5732 ret = -1;
5733 abort();
5734 break;
5735 }
5736
5737 rcu_read_unlock();
5738 health_code_update();
5739 return ret;
5740}
5741
5742/*
5743 * Destroy a specific UST session in apps.
5744 */
5745static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
5746{
5747 int ret;
5748 struct ust_app_session *ua_sess;
5749 struct lttng_ht_iter iter;
5750 struct lttng_ht_node_u64 *node;
5751
5752 DBG("Destroy tracing for ust app pid %d", app->pid);
5753
5754 rcu_read_lock();
5755
5756 if (!app->compatible) {
5757 goto end;
5758 }
5759
5760 __lookup_session_by_app(usess, app, &iter);
5761 node = lttng_ht_iter_get_node_u64(&iter);
5762 if (node == nullptr) {
5763 /* Session is being or is deleted. */
5764 goto end;
5765 }
5766 ua_sess = lttng::utils::container_of(node, &ust_app_session::node);
5767
5768 health_code_update();
5769 destroy_app_session(app, ua_sess);
5770
5771 health_code_update();
5772
5773 /* Quiescent wait after stopping trace */
5774 pthread_mutex_lock(&app->sock_lock);
5775 ret = lttng_ust_ctl_wait_quiescent(app->sock);
5776 pthread_mutex_unlock(&app->sock_lock);
5777 if (ret < 0) {
5778 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
5779 DBG3("UST app wait quiescent failed. Application is dead: pid= %d, sock = %d",
5780 app->pid,
5781 app->sock);
5782 } else if (ret == -EAGAIN) {
5783 WARN("UST app wait quiescent failed. Communication time out: pid= %d, sock = %d",
5784 app->pid,
5785 app->sock);
5786 } else {
5787 ERR("UST app wait quiescent failed with ret %d: pid= %d, sock = %d",
5788 ret,
5789 app->pid,
5790 app->sock);
5791 }
5792 }
5793end:
5794 rcu_read_unlock();
5795 health_code_update();
5796 return 0;
5797}
5798
5799/*
5800 * Start tracing for the UST session.
5801 */
5802int ust_app_start_trace_all(struct ltt_ust_session *usess)
5803{
5804 struct lttng_ht_iter iter;
5805 struct ust_app *app;
5806
5807 DBG("Starting all UST traces");
5808
5809 /*
5810 * Even though the start trace might fail, flag this session active so
5811 * other application coming in are started by default.
5812 */
5813 usess->active = 1;
5814
5815 rcu_read_lock();
5816
5817 /*
5818 * In a start-stop-start use-case, we need to clear the quiescent state
5819 * of each channel set by the prior stop command, thus ensuring that a
5820 * following stop or destroy is sure to grab a timestamp_end near those
5821 * operations, even if the packet is empty.
5822 */
5823 (void) ust_app_clear_quiescent_session(usess);
5824
5825 cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5826 ust_app_global_update(usess, app);
5827 }
5828
5829 rcu_read_unlock();
5830
5831 return 0;
5832}
5833
5834/*
5835 * Start tracing for the UST session.
5836 * Called with UST session lock held.
5837 */
5838int ust_app_stop_trace_all(struct ltt_ust_session *usess)
5839{
5840 int ret = 0;
5841 struct lttng_ht_iter iter;
5842 struct ust_app *app;
5843
5844 DBG("Stopping all UST traces");
5845
5846 /*
5847 * Even though the stop trace might fail, flag this session inactive so
5848 * other application coming in are not started by default.
5849 */
5850 usess->active = 0;
5851
5852 rcu_read_lock();
5853
5854 cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5855 ret = ust_app_stop_trace(usess, app);
5856 if (ret < 0) {
5857 /* Continue to next apps even on error */
5858 continue;
5859 }
5860 }
5861
5862 (void) ust_app_flush_session(usess);
5863
5864 rcu_read_unlock();
5865
5866 return 0;
5867}
5868
5869/*
5870 * Destroy app UST session.
5871 */
5872int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
5873{
5874 int ret = 0;
5875 struct lttng_ht_iter iter;
5876 struct ust_app *app;
5877
5878 DBG("Destroy all UST traces");
5879
5880 rcu_read_lock();
5881
5882 cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5883 ret = destroy_trace(usess, app);
5884 if (ret < 0) {
5885 /* Continue to next apps even on error */
5886 continue;
5887 }
5888 }
5889
5890 rcu_read_unlock();
5891
5892 return 0;
5893}
5894
5895/* The ua_sess lock must be held by the caller. */
5896static int find_or_create_ust_app_channel(struct ltt_ust_session *usess,
5897 struct ust_app_session *ua_sess,
5898 struct ust_app *app,
5899 struct ltt_ust_channel *uchan,
5900 struct ust_app_channel **ua_chan)
5901{
5902 int ret = 0;
5903 struct lttng_ht_iter iter;
5904 struct lttng_ht_node_str *ua_chan_node;
5905
5906 lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &iter);
5907 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
5908 if (ua_chan_node) {
5909 *ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
5910 goto end;
5911 }
5912
5913 ret = ust_app_channel_create(usess, ua_sess, uchan, app, ua_chan);
5914 if (ret) {
5915 goto end;
5916 }
5917end:
5918 return ret;
5919}
5920
5921static int ust_app_channel_synchronize_event(struct ust_app_channel *ua_chan,
5922 struct ltt_ust_event *uevent,
5923 struct ust_app *app)
5924{
5925 int ret = 0;
5926 struct ust_app_event *ua_event = nullptr;
5927
5928 ua_event = find_ust_app_event(ua_chan->events,
5929 uevent->attr.name,
5930 uevent->filter,
5931 uevent->attr.loglevel,
5932 uevent->exclusion);
5933 if (!ua_event) {
5934 ret = create_ust_app_event(ua_chan, uevent, app);
5935 if (ret < 0) {
5936 goto end;
5937 }
5938 } else {
5939 if (ua_event->enabled != uevent->enabled) {
5940 ret = uevent->enabled ? enable_ust_app_event(ua_event, app) :
5941 disable_ust_app_event(ua_event, app);
5942 }
5943 }
5944
5945end:
5946 return ret;
5947}
5948
5949/* Called with RCU read-side lock held. */
5950static void ust_app_synchronize_event_notifier_rules(struct ust_app *app)
5951{
5952 int ret = 0;
5953 enum lttng_error_code ret_code;
5954 enum lttng_trigger_status t_status;
5955 struct lttng_ht_iter app_trigger_iter;
5956 struct lttng_triggers *triggers = nullptr;
5957 struct ust_app_event_notifier_rule *event_notifier_rule;
5958 unsigned int count, i;
5959
5960 ASSERT_RCU_READ_LOCKED();
5961
5962 if (!ust_app_supports_notifiers(app)) {
5963 goto end;
5964 }
5965
5966 /*
5967 * Currrently, registering or unregistering a trigger with an
5968 * event rule condition causes a full synchronization of the event
5969 * notifiers.
5970 *
5971 * The first step attempts to add an event notifier for all registered
5972 * triggers that apply to the user space tracers. Then, the
5973 * application's event notifiers rules are all checked against the list
5974 * of registered triggers. Any event notifier that doesn't have a
5975 * matching trigger can be assumed to have been disabled.
5976 *
5977 * All of this is inefficient, but is put in place to get the feature
5978 * rolling as it is simpler at this moment. It will be optimized Soon™
5979 * to allow the state of enabled
5980 * event notifiers to be synchronized in a piece-wise way.
5981 */
5982
5983 /* Get all triggers using uid 0 (root) */
5984 ret_code = notification_thread_command_list_triggers(
5985 the_notification_thread_handle, 0, &triggers);
5986 if (ret_code != LTTNG_OK) {
5987 goto end;
5988 }
5989
5990 LTTNG_ASSERT(triggers);
5991
5992 t_status = lttng_triggers_get_count(triggers, &count);
5993 if (t_status != LTTNG_TRIGGER_STATUS_OK) {
5994 goto end;
5995 }
5996
5997 for (i = 0; i < count; i++) {
5998 struct lttng_condition *condition;
5999 struct lttng_event_rule *event_rule;
6000 struct lttng_trigger *trigger;
6001 const struct ust_app_event_notifier_rule *looked_up_event_notifier_rule;
6002 enum lttng_condition_status condition_status;
6003 uint64_t token;
6004
6005 trigger = lttng_triggers_borrow_mutable_at_index(triggers, i);
6006 LTTNG_ASSERT(trigger);
6007
6008 token = lttng_trigger_get_tracer_token(trigger);
6009 condition = lttng_trigger_get_condition(trigger);
6010
6011 if (lttng_condition_get_type(condition) !=
6012 LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES) {
6013 /* Does not apply */
6014 continue;
6015 }
6016
6017 condition_status = lttng_condition_event_rule_matches_borrow_rule_mutable(
6018 condition, &event_rule);
6019 LTTNG_ASSERT(condition_status == LTTNG_CONDITION_STATUS_OK);
6020
6021 if (lttng_event_rule_get_domain_type(event_rule) == LTTNG_DOMAIN_KERNEL) {
6022 /* Skip kernel related triggers. */
6023 continue;
6024 }
6025
6026 /*
6027 * Find or create the associated token event rule. The caller
6028 * holds the RCU read lock, so this is safe to call without
6029 * explicitly acquiring it here.
6030 */
6031 looked_up_event_notifier_rule = find_ust_app_event_notifier_rule(
6032 app->token_to_event_notifier_rule_ht, token);
6033 if (!looked_up_event_notifier_rule) {
6034 ret = create_ust_app_event_notifier_rule(trigger, app);
6035 if (ret < 0) {
6036 goto end;
6037 }
6038 }
6039 }
6040
6041 rcu_read_lock();
6042 /* Remove all unknown event sources from the app. */
6043 cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
6044 &app_trigger_iter.iter,
6045 event_notifier_rule,
6046 node.node) {
6047 const uint64_t app_token = event_notifier_rule->token;
6048 bool found = false;
6049
6050 /*
6051 * Check if the app event trigger still exists on the
6052 * notification side.
6053 */
6054 for (i = 0; i < count; i++) {
6055 uint64_t notification_thread_token;
6056 const struct lttng_trigger *trigger =
6057 lttng_triggers_get_at_index(triggers, i);
6058
6059 LTTNG_ASSERT(trigger);
6060
6061 notification_thread_token = lttng_trigger_get_tracer_token(trigger);
6062
6063 if (notification_thread_token == app_token) {
6064 found = true;
6065 break;
6066 }
6067 }
6068
6069 if (found) {
6070 /* Still valid. */
6071 continue;
6072 }
6073
6074 /*
6075 * This trigger was unregistered, disable it on the tracer's
6076 * side.
6077 */
6078 ret = lttng_ht_del(app->token_to_event_notifier_rule_ht, &app_trigger_iter);
6079 LTTNG_ASSERT(ret == 0);
6080
6081 /* Callee logs errors. */
6082 (void) disable_ust_object(app, event_notifier_rule->obj);
6083
6084 delete_ust_app_event_notifier_rule(app->sock, event_notifier_rule, app);
6085 }
6086
6087 rcu_read_unlock();
6088
6089end:
6090 lttng_triggers_destroy(triggers);
6091 return;
6092}
6093
6094/*
6095 * RCU read lock must be held by the caller.
6096 */
6097static void ust_app_synchronize_all_channels(struct ltt_ust_session *usess,
6098 struct ust_app_session *ua_sess,
6099 struct ust_app *app)
6100{
6101 int ret = 0;
6102 struct cds_lfht_iter uchan_iter;
6103 struct ltt_ust_channel *uchan;
6104
6105 LTTNG_ASSERT(usess);
6106 LTTNG_ASSERT(ua_sess);
6107 LTTNG_ASSERT(app);
6108 ASSERT_RCU_READ_LOCKED();
6109
6110 cds_lfht_for_each_entry (usess->domain_global.channels->ht, &uchan_iter, uchan, node.node) {
6111 struct ust_app_channel *ua_chan;
6112 struct cds_lfht_iter uevent_iter;
6113 struct ltt_ust_event *uevent;
6114
6115 /*
6116 * Search for a matching ust_app_channel. If none is found,
6117 * create it. Creating the channel will cause the ua_chan
6118 * structure to be allocated, the channel buffers to be
6119 * allocated (if necessary) and sent to the application, and
6120 * all enabled contexts will be added to the channel.
6121 */
6122 ret = find_or_create_ust_app_channel(usess, ua_sess, app, uchan, &ua_chan);
6123 if (ret) {
6124 /* Tracer is probably gone or ENOMEM. */
6125 goto end;
6126 }
6127
6128 if (!ua_chan) {
6129 /* ua_chan will be NULL for the metadata channel */
6130 continue;
6131 }
6132
6133 cds_lfht_for_each_entry (uchan->events->ht, &uevent_iter, uevent, node.node) {
6134 ret = ust_app_channel_synchronize_event(ua_chan, uevent, app);
6135 if (ret) {
6136 goto end;
6137 }
6138 }
6139
6140 if (ua_chan->enabled != uchan->enabled) {
6141 ret = uchan->enabled ? enable_ust_app_channel(ua_sess, uchan, app) :
6142 disable_ust_app_channel(ua_sess, ua_chan, app);
6143 if (ret) {
6144 goto end;
6145 }
6146 }
6147 }
6148end:
6149 return;
6150}
6151
6152/*
6153 * The caller must ensure that the application is compatible and is tracked
6154 * by the process attribute trackers.
6155 */
6156static void ust_app_synchronize(struct ltt_ust_session *usess, struct ust_app *app)
6157{
6158 int ret = 0;
6159 struct ust_app_session *ua_sess = nullptr;
6160
6161 /*
6162 * The application's configuration should only be synchronized for
6163 * active sessions.
6164 */
6165 LTTNG_ASSERT(usess->active);
6166
6167 ret = find_or_create_ust_app_session(usess, app, &ua_sess, nullptr);
6168 if (ret < 0) {
6169 /* Tracer is probably gone or ENOMEM. */
6170 if (ua_sess) {
6171 destroy_app_session(app, ua_sess);
6172 }
6173 goto end;
6174 }
6175 LTTNG_ASSERT(ua_sess);
6176
6177 pthread_mutex_lock(&ua_sess->lock);
6178 if (ua_sess->deleted) {
6179 goto deleted_session;
6180 }
6181
6182 rcu_read_lock();
6183
6184 ust_app_synchronize_all_channels(usess, ua_sess, app);
6185
6186 /*
6187 * Create the metadata for the application. This returns gracefully if a
6188 * metadata was already set for the session.
6189 *
6190 * The metadata channel must be created after the data channels as the
6191 * consumer daemon assumes this ordering. When interacting with a relay
6192 * daemon, the consumer will use this assumption to send the
6193 * "STREAMS_SENT" message to the relay daemon.
6194 */
6195 ret = create_ust_app_metadata(ua_sess, app, usess->consumer);
6196 if (ret < 0) {
6197 ERR("Metadata creation failed for app sock %d for session id %" PRIu64,
6198 app->sock,
6199 usess->id);
6200 }
6201
6202 rcu_read_unlock();
6203
6204deleted_session:
6205 pthread_mutex_unlock(&ua_sess->lock);
6206end:
6207 return;
6208}
6209
6210static void ust_app_global_destroy(struct ltt_ust_session *usess, struct ust_app *app)
6211{
6212 struct ust_app_session *ua_sess;
6213
6214 ua_sess = lookup_session_by_app(usess, app);
6215 if (ua_sess == nullptr) {
6216 return;
6217 }
6218 destroy_app_session(app, ua_sess);
6219}
6220
6221/*
6222 * Add channels/events from UST global domain to registered apps at sock.
6223 *
6224 * Called with session lock held.
6225 * Called with RCU read-side lock held.
6226 */
6227void ust_app_global_update(struct ltt_ust_session *usess, struct ust_app *app)
6228{
6229 LTTNG_ASSERT(usess);
6230 LTTNG_ASSERT(usess->active);
6231 ASSERT_RCU_READ_LOCKED();
6232
6233 DBG2("UST app global update for app sock %d for session id %" PRIu64, app->sock, usess->id);
6234
6235 if (!app->compatible) {
6236 return;
6237 }
6238 if (trace_ust_id_tracker_lookup(LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID, usess, app->pid) &&
6239 trace_ust_id_tracker_lookup(LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID, usess, app->uid) &&
6240 trace_ust_id_tracker_lookup(LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID, usess, app->gid)) {
6241 /*
6242 * Synchronize the application's internal tracing configuration
6243 * and start tracing.
6244 */
6245 ust_app_synchronize(usess, app);
6246 ust_app_start_trace(usess, app);
6247 } else {
6248 ust_app_global_destroy(usess, app);
6249 }
6250}
6251
6252/*
6253 * Add all event notifiers to an application.
6254 *
6255 * Called with session lock held.
6256 * Called with RCU read-side lock held.
6257 */
6258void ust_app_global_update_event_notifier_rules(struct ust_app *app)
6259{
6260 ASSERT_RCU_READ_LOCKED();
6261
6262 DBG2("UST application global event notifier rules update: app = '%s', pid = %d",
6263 app->name,
6264 app->pid);
6265
6266 if (!app->compatible || !ust_app_supports_notifiers(app)) {
6267 return;
6268 }
6269
6270 if (app->event_notifier_group.object == nullptr) {
6271 WARN("UST app global update of event notifiers for app skipped since communication handle is null: app = '%s', pid = %d",
6272 app->name,
6273 app->pid);
6274 return;
6275 }
6276
6277 ust_app_synchronize_event_notifier_rules(app);
6278}
6279
6280/*
6281 * Called with session lock held.
6282 */
6283void ust_app_global_update_all(struct ltt_ust_session *usess)
6284{
6285 struct lttng_ht_iter iter;
6286 struct ust_app *app;
6287
6288 rcu_read_lock();
6289 cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6290 ust_app_global_update(usess, app);
6291 }
6292 rcu_read_unlock();
6293}
6294
6295void ust_app_global_update_all_event_notifier_rules()
6296{
6297 struct lttng_ht_iter iter;
6298 struct ust_app *app;
6299
6300 rcu_read_lock();
6301 cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6302 ust_app_global_update_event_notifier_rules(app);
6303 }
6304
6305 rcu_read_unlock();
6306}
6307
6308/*
6309 * Add context to a specific channel for global UST domain.
6310 */
6311int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
6312 struct ltt_ust_channel *uchan,
6313 struct ltt_ust_context *uctx)
6314{
6315 int ret = 0;
6316 struct lttng_ht_node_str *ua_chan_node;
6317 struct lttng_ht_iter iter, uiter;
6318 struct ust_app_channel *ua_chan = nullptr;
6319 struct ust_app_session *ua_sess;
6320 struct ust_app *app;
6321
6322 LTTNG_ASSERT(usess->active);
6323
6324 rcu_read_lock();
6325 cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6326 if (!app->compatible) {
6327 /*
6328 * TODO: In time, we should notice the caller of this error by
6329 * telling him that this is a version error.
6330 */
6331 continue;
6332 }
6333 ua_sess = lookup_session_by_app(usess, app);
6334 if (ua_sess == nullptr) {
6335 continue;
6336 }
6337
6338 pthread_mutex_lock(&ua_sess->lock);
6339
6340 if (ua_sess->deleted) {
6341 pthread_mutex_unlock(&ua_sess->lock);
6342 continue;
6343 }
6344
6345 /* Lookup channel in the ust app session */
6346 lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
6347 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
6348 if (ua_chan_node == nullptr) {
6349 goto next_app;
6350 }
6351 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
6352 ret = create_ust_app_channel_context(ua_chan, &uctx->ctx, app);
6353 if (ret < 0) {
6354 goto next_app;
6355 }
6356 next_app:
6357 pthread_mutex_unlock(&ua_sess->lock);
6358 }
6359
6360 rcu_read_unlock();
6361 return ret;
6362}
6363
6364/*
6365 * Receive registration and populate the given msg structure.
6366 *
6367 * On success return 0 else a negative value returned by the ustctl call.
6368 */
6369int ust_app_recv_registration(int sock, struct ust_register_msg *msg)
6370{
6371 int ret;
6372 uint32_t pid, ppid, uid, gid;
6373
6374 LTTNG_ASSERT(msg);
6375
6376 ret = lttng_ust_ctl_recv_reg_msg(sock,
6377 &msg->type,
6378 &msg->major,
6379 &msg->minor,
6380 &pid,
6381 &ppid,
6382 &uid,
6383 &gid,
6384 &msg->bits_per_long,
6385 &msg->uint8_t_alignment,
6386 &msg->uint16_t_alignment,
6387 &msg->uint32_t_alignment,
6388 &msg->uint64_t_alignment,
6389 &msg->long_alignment,
6390 &msg->byte_order,
6391 msg->name);
6392 if (ret < 0) {
6393 switch (-ret) {
6394 case EPIPE:
6395 case ECONNRESET:
6396 case LTTNG_UST_ERR_EXITING:
6397 DBG3("UST app recv reg message failed. Application died");
6398 break;
6399 case LTTNG_UST_ERR_UNSUP_MAJOR:
6400 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
6401 msg->major,
6402 msg->minor,
6403 LTTNG_UST_ABI_MAJOR_VERSION,
6404 LTTNG_UST_ABI_MINOR_VERSION);
6405 break;
6406 default:
6407 ERR("UST app recv reg message failed with ret %d", ret);
6408 break;
6409 }
6410 goto error;
6411 }
6412 msg->pid = (pid_t) pid;
6413 msg->ppid = (pid_t) ppid;
6414 msg->uid = (uid_t) uid;
6415 msg->gid = (gid_t) gid;
6416
6417error:
6418 return ret;
6419}
6420
6421/*
6422 * Return a ust app session object using the application object and the
6423 * session object descriptor has a key. If not found, NULL is returned.
6424 * A RCU read side lock MUST be acquired when calling this function.
6425 */
6426static struct ust_app_session *find_session_by_objd(struct ust_app *app, int objd)
6427{
6428 struct lttng_ht_node_ulong *node;
6429 struct lttng_ht_iter iter;
6430 struct ust_app_session *ua_sess = nullptr;
6431
6432 LTTNG_ASSERT(app);
6433 ASSERT_RCU_READ_LOCKED();
6434
6435 lttng_ht_lookup(app->ust_sessions_objd, (void *) ((unsigned long) objd), &iter);
6436 node = lttng_ht_iter_get_node_ulong(&iter);
6437 if (node == nullptr) {
6438 DBG2("UST app session find by objd %d not found", objd);
6439 goto error;
6440 }
6441
6442 ua_sess = lttng::utils::container_of(node, &ust_app_session::ust_objd_node);
6443
6444error:
6445 return ua_sess;
6446}
6447
6448/*
6449 * Return a ust app channel object using the application object and the channel
6450 * object descriptor has a key. If not found, NULL is returned. A RCU read side
6451 * lock MUST be acquired before calling this function.
6452 */
6453static struct ust_app_channel *find_channel_by_objd(struct ust_app *app, int objd)
6454{
6455 struct lttng_ht_node_ulong *node;
6456 struct lttng_ht_iter iter;
6457 struct ust_app_channel *ua_chan = nullptr;
6458
6459 LTTNG_ASSERT(app);
6460 ASSERT_RCU_READ_LOCKED();
6461
6462 lttng_ht_lookup(app->ust_objd, (void *) ((unsigned long) objd), &iter);
6463 node = lttng_ht_iter_get_node_ulong(&iter);
6464 if (node == nullptr) {
6465 DBG2("UST app channel find by objd %d not found", objd);
6466 goto error;
6467 }
6468
6469 ua_chan = lttng::utils::container_of(node, &ust_app_channel::ust_objd_node);
6470
6471error:
6472 return ua_chan;
6473}
6474
6475/*
6476 * Reply to a register channel notification from an application on the notify
6477 * socket. The channel metadata is also created.
6478 *
6479 * The session UST registry lock is acquired in this function.
6480 *
6481 * On success 0 is returned else a negative value.
6482 */
6483static int handle_app_register_channel_notification(int sock,
6484 int cobjd,
6485 struct lttng_ust_ctl_field *raw_context_fields,
6486 size_t context_field_count)
6487{
6488 int ret, ret_code = 0;
6489 uint32_t chan_id;
6490 uint64_t chan_reg_key;
6491 struct ust_app *app;
6492 struct ust_app_channel *ua_chan;
6493 struct ust_app_session *ua_sess;
6494 auto ust_ctl_context_fields =
6495 lttng::make_unique_wrapper<lttng_ust_ctl_field, lttng::free>(raw_context_fields);
6496
6497 lttng::urcu::read_lock_guard read_lock_guard;
6498
6499 /* Lookup application. If not found, there is a code flow error. */
6500 app = find_app_by_notify_sock(sock);
6501 if (!app) {
6502 DBG("Application socket %d is being torn down. Abort event notify", sock);
6503 return -1;
6504 }
6505
6506 /* Lookup channel by UST object descriptor. */
6507 ua_chan = find_channel_by_objd(app, cobjd);
6508 if (!ua_chan) {
6509 DBG("Application channel is being torn down. Abort event notify");
6510 return 0;
6511 }
6512
6513 LTTNG_ASSERT(ua_chan->session);
6514 ua_sess = ua_chan->session;
6515
6516 /* Get right session registry depending on the session buffer type. */
6517 auto locked_registry_session = get_locked_session_registry(ua_sess);
6518 if (!locked_registry_session) {
6519 DBG("Application session is being torn down. Abort event notify");
6520 return 0;
6521 };
6522
6523 /* Depending on the buffer type, a different channel key is used. */
6524 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
6525 chan_reg_key = ua_chan->tracing_channel_id;
6526 } else {
6527 chan_reg_key = ua_chan->key;
6528 }
6529
6530 auto& ust_reg_chan = locked_registry_session->channel(chan_reg_key);
6531
6532 /* Channel id is set during the object creation. */
6533 chan_id = ust_reg_chan.id;
6534
6535 /*
6536 * The application returns the typing information of the channel's
6537 * context fields. In per-PID buffering mode, this is the first and only
6538 * time we get this information. It is our chance to finalize the
6539 * initialiation of the channel and serialize it's layout's description
6540 * to the trace's metadata.
6541 *
6542 * However, in per-UID buffering mode, every application will provide
6543 * this information (redundantly). The first time will allow us to
6544 * complete the initialization. The following times, we simply validate
6545 * that all apps provide the same typing for the context fields as a
6546 * sanity check.
6547 */
6548 try {
6549 auto app_context_fields = lsu::create_trace_fields_from_ust_ctl_fields(
6550 *locked_registry_session,
6551 ust_ctl_context_fields.get(),
6552 context_field_count,
6553 lst::field_location::root::EVENT_RECORD_COMMON_CONTEXT,
6554 lsu::ctl_field_quirks::UNDERSCORE_PREFIXED_VARIANT_TAG_MAPPINGS);
6555
6556 if (!ust_reg_chan.is_registered()) {
6557 lst::type::cuptr event_context = app_context_fields.size() ?
6558 lttng::make_unique<lst::structure_type>(
6559 0, std::move(app_context_fields)) :
6560 nullptr;
6561
6562 ust_reg_chan.event_context(std::move(event_context));
6563 } else {
6564 /*
6565 * Validate that the context fields match between
6566 * registry and newcoming application.
6567 */
6568 bool context_fields_match;
6569 const auto *previous_event_context = ust_reg_chan.event_context();
6570
6571 if (!previous_event_context) {
6572 context_fields_match = app_context_fields.size() == 0;
6573 } else {
6574 const lst::structure_type app_event_context_struct(
6575 0, std::move(app_context_fields));
6576
6577 context_fields_match = *previous_event_context ==
6578 app_event_context_struct;
6579 }
6580
6581 if (!context_fields_match) {
6582 ERR("Registering application channel due to context field mismatch: pid = %d, sock = %d",
6583 app->pid,
6584 app->sock);
6585 ret_code = -EINVAL;
6586 goto reply;
6587 }
6588 }
6589 } catch (std::exception& ex) {
6590 ERR("Failed to handle application context: %s", ex.what());
6591 ret_code = -EINVAL;
6592 goto reply;
6593 }
6594
6595reply:
6596 DBG3("UST app replying to register channel key %" PRIu64 " with id %u, ret = %d",
6597 chan_reg_key,
6598 chan_id,
6599 ret_code);
6600
6601 ret = lttng_ust_ctl_reply_register_channel(
6602 sock,
6603 chan_id,
6604 ust_reg_chan.header_type_ == lst::stream_class::header_type::COMPACT ?
6605 LTTNG_UST_CTL_CHANNEL_HEADER_COMPACT :
6606 LTTNG_UST_CTL_CHANNEL_HEADER_LARGE,
6607 ret_code);
6608 if (ret < 0) {
6609 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
6610 DBG3("UST app reply channel failed. Application died: pid = %d, sock = %d",
6611 app->pid,
6612 app->sock);
6613 } else if (ret == -EAGAIN) {
6614 WARN("UST app reply channel failed. Communication time out: pid = %d, sock = %d",
6615 app->pid,
6616 app->sock);
6617 } else {
6618 ERR("UST app reply channel failed with ret %d: pid = %d, sock = %d",
6619 ret,
6620 app->pid,
6621 app->sock);
6622 }
6623
6624 return ret;
6625 }
6626
6627 /* This channel registry's registration is completed. */
6628 ust_reg_chan.set_as_registered();
6629
6630 return ret;
6631}
6632
6633/*
6634 * Add event to the UST channel registry. When the event is added to the
6635 * registry, the metadata is also created. Once done, this replies to the
6636 * application with the appropriate error code.
6637 *
6638 * The session UST registry lock is acquired in the function.
6639 *
6640 * On success 0 is returned else a negative value.
6641 */
6642static int add_event_ust_registry(int sock,
6643 int sobjd,
6644 int cobjd,
6645 const char *name,
6646 char *raw_signature,
6647 size_t nr_fields,
6648 struct lttng_ust_ctl_field *raw_fields,
6649 int loglevel_value,
6650 char *raw_model_emf_uri)
6651{
6652 int ret, ret_code;
6653 uint32_t event_id = 0;
6654 uint64_t chan_reg_key;
6655 struct ust_app *app;
6656 struct ust_app_channel *ua_chan;
6657 struct ust_app_session *ua_sess;
6658 lttng::urcu::read_lock_guard rcu_lock;
6659 auto signature = lttng::make_unique_wrapper<char, lttng::free>(raw_signature);
6660 auto fields = lttng::make_unique_wrapper<lttng_ust_ctl_field, lttng::free>(raw_fields);
6661 auto model_emf_uri = lttng::make_unique_wrapper<char, lttng::free>(raw_model_emf_uri);
6662
6663 /* Lookup application. If not found, there is a code flow error. */
6664 app = find_app_by_notify_sock(sock);
6665 if (!app) {
6666 DBG("Application socket %d is being torn down. Abort event notify", sock);
6667 return -1;
6668 }
6669
6670 /* Lookup channel by UST object descriptor. */
6671 ua_chan = find_channel_by_objd(app, cobjd);
6672 if (!ua_chan) {
6673 DBG("Application channel is being torn down. Abort event notify");
6674 return 0;
6675 }
6676
6677 LTTNG_ASSERT(ua_chan->session);
6678 ua_sess = ua_chan->session;
6679
6680 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
6681 chan_reg_key = ua_chan->tracing_channel_id;
6682 } else {
6683 chan_reg_key = ua_chan->key;
6684 }
6685
6686 {
6687 auto locked_registry = get_locked_session_registry(ua_sess);
6688 if (locked_registry) {
6689 /*
6690 * From this point on, this call acquires the ownership of the signature,
6691 * fields and model_emf_uri meaning any free are done inside it if needed.
6692 * These three variables MUST NOT be read/write after this.
6693 */
6694 try {
6695 auto& channel = locked_registry->channel(chan_reg_key);
6696
6697 /* event_id is set on success. */
6698 channel.add_event(
6699 sobjd,
6700 cobjd,
6701 name,
6702 signature.get(),
6703 lsu::create_trace_fields_from_ust_ctl_fields(
6704 *locked_registry,
6705 fields.get(),
6706 nr_fields,
6707 lst::field_location::root::EVENT_RECORD_PAYLOAD,
6708 lsu::ctl_field_quirks::
6709 UNDERSCORE_PREFIXED_VARIANT_TAG_MAPPINGS),
6710 loglevel_value,
6711 model_emf_uri.get() ?
6712 nonstd::optional<std::string>(model_emf_uri.get()) :
6713 nonstd::nullopt,
6714 ua_sess->buffer_type,
6715 *app,
6716 event_id);
6717 ret_code = 0;
6718 } catch (const std::exception& ex) {
6719 ERR("Failed to add event `%s` to registry session: %s",
6720 name,
6721 ex.what());
6722 /* Inform the application of the error; don't return directly. */
6723 ret_code = -EINVAL;
6724 }
6725 } else {
6726 DBG("Application session is being torn down. Abort event notify");
6727 return 0;
6728 }
6729 }
6730
6731 /*
6732 * The return value is returned to ustctl so in case of an error, the
6733 * application can be notified. In case of an error, it's important not to
6734 * return a negative error or else the application will get closed.
6735 */
6736 ret = lttng_ust_ctl_reply_register_event(sock, event_id, ret_code);
6737 if (ret < 0) {
6738 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
6739 DBG3("UST app reply event failed. Application died: pid = %d, sock = %d.",
6740 app->pid,
6741 app->sock);
6742 } else if (ret == -EAGAIN) {
6743 WARN("UST app reply event failed. Communication time out: pid = %d, sock = %d",
6744 app->pid,
6745 app->sock);
6746 } else {
6747 ERR("UST app reply event failed with ret %d: pid = %d, sock = %d",
6748 ret,
6749 app->pid,
6750 app->sock);
6751 }
6752 /*
6753 * No need to wipe the create event since the application socket will
6754 * get close on error hence cleaning up everything by itself.
6755 */
6756 return ret;
6757 }
6758
6759 DBG3("UST registry event %s with id %" PRId32 " added successfully", name, event_id);
6760 return ret;
6761}
6762
6763/*
6764 * Add enum to the UST session registry. Once done, this replies to the
6765 * application with the appropriate error code.
6766 *
6767 * The session UST registry lock is acquired within this function.
6768 *
6769 * On success 0 is returned else a negative value.
6770 */
6771static int add_enum_ust_registry(int sock,
6772 int sobjd,
6773 const char *name,
6774 struct lttng_ust_ctl_enum_entry *raw_entries,
6775 size_t nr_entries)
6776{
6777 int ret = 0;
6778 struct ust_app *app;
6779 struct ust_app_session *ua_sess;
6780 uint64_t enum_id = -1ULL;
6781 lttng::urcu::read_lock_guard read_lock_guard;
6782 auto entries = lttng::make_unique_wrapper<struct lttng_ust_ctl_enum_entry, lttng::free>(
6783 raw_entries);
6784
6785 /* Lookup application. If not found, there is a code flow error. */
6786 app = find_app_by_notify_sock(sock);
6787 if (!app) {
6788 /* Return an error since this is not an error */
6789 DBG("Application socket %d is being torn down. Aborting enum registration", sock);
6790 return -1;
6791 }
6792
6793 /* Lookup session by UST object descriptor. */
6794 ua_sess = find_session_by_objd(app, sobjd);
6795 if (!ua_sess) {
6796 /* Return an error since this is not an error */
6797 DBG("Application session is being torn down (session not found). Aborting enum registration.");
6798 return 0;
6799 }
6800
6801 auto locked_registry = get_locked_session_registry(ua_sess);
6802 if (!locked_registry) {
6803 DBG("Application session is being torn down (registry not found). Aborting enum registration.");
6804 return 0;
6805 }
6806
6807 /*
6808 * From this point on, the callee acquires the ownership of
6809 * entries. The variable entries MUST NOT be read/written after
6810 * call.
6811 */
6812 int application_reply_code;
6813 try {
6814 locked_registry->create_or_find_enum(
6815 sobjd, name, entries.release(), nr_entries, &enum_id);
6816 application_reply_code = 0;
6817 } catch (const std::exception& ex) {
6818 ERR("%s: %s",
6819 fmt::format(
6820 "Failed to create or find enumeration provided by application: app = {}, enumeration name = {}",
6821 *app,
6822 name)
6823 .c_str(),
6824 ex.what());
6825 application_reply_code = -1;
6826 }
6827
6828 /*
6829 * The return value is returned to ustctl so in case of an error, the
6830 * application can be notified. In case of an error, it's important not to
6831 * return a negative error or else the application will get closed.
6832 */
6833 ret = lttng_ust_ctl_reply_register_enum(sock, enum_id, application_reply_code);
6834 if (ret < 0) {
6835 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
6836 DBG3("UST app reply enum failed. Application died: pid = %d, sock = %d",
6837 app->pid,
6838 app->sock);
6839 } else if (ret == -EAGAIN) {
6840 WARN("UST app reply enum failed. Communication time out: pid = %d, sock = %d",
6841 app->pid,
6842 app->sock);
6843 } else {
6844 ERR("UST app reply enum failed with ret %d: pid = %d, sock = %d",
6845 ret,
6846 app->pid,
6847 app->sock);
6848 }
6849 /*
6850 * No need to wipe the create enum since the application socket will
6851 * get close on error hence cleaning up everything by itself.
6852 */
6853 return ret;
6854 }
6855
6856 DBG3("UST registry enum %s added successfully or already found", name);
6857 return 0;
6858}
6859
6860/*
6861 * Handle application notification through the given notify socket.
6862 *
6863 * Return 0 on success or else a negative value.
6864 */
6865int ust_app_recv_notify(int sock)
6866{
6867 int ret;
6868 enum lttng_ust_ctl_notify_cmd cmd;
6869
6870 DBG3("UST app receiving notify from sock %d", sock);
6871
6872 ret = lttng_ust_ctl_recv_notify(sock, &cmd);
6873 if (ret < 0) {
6874 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
6875 DBG3("UST app recv notify failed. Application died: sock = %d", sock);
6876 } else if (ret == -EAGAIN) {
6877 WARN("UST app recv notify failed. Communication time out: sock = %d", sock);
6878 } else {
6879 ERR("UST app recv notify failed with ret %d: sock = %d", ret, sock);
6880 }
6881 goto error;
6882 }
6883
6884 switch (cmd) {
6885 case LTTNG_UST_CTL_NOTIFY_CMD_EVENT:
6886 {
6887 int sobjd, cobjd, loglevel_value;
6888 char name[LTTNG_UST_ABI_SYM_NAME_LEN], *sig, *model_emf_uri;
6889 size_t nr_fields;
6890 struct lttng_ust_ctl_field *fields;
6891
6892 DBG2("UST app ustctl register event received");
6893
6894 ret = lttng_ust_ctl_recv_register_event(sock,
6895 &sobjd,
6896 &cobjd,
6897 name,
6898 &loglevel_value,
6899 &sig,
6900 &nr_fields,
6901 &fields,
6902 &model_emf_uri);
6903 if (ret < 0) {
6904 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
6905 DBG3("UST app recv event failed. Application died: sock = %d",
6906 sock);
6907 } else if (ret == -EAGAIN) {
6908 WARN("UST app recv event failed. Communication time out: sock = %d",
6909 sock);
6910 } else {
6911 ERR("UST app recv event failed with ret %d: sock = %d", ret, sock);
6912 }
6913 goto error;
6914 }
6915
6916 {
6917 lttng::urcu::read_lock_guard rcu_lock;
6918 const struct ust_app *app = find_app_by_notify_sock(sock);
6919 if (!app) {
6920 DBG("Application socket %d is being torn down. Abort event notify",
6921 sock);
6922 ret = -1;
6923 goto error;
6924 }
6925 }
6926
6927 if ((!fields && nr_fields > 0) || (fields && nr_fields == 0)) {
6928 ERR("Invalid return value from lttng_ust_ctl_recv_register_event: fields = %p, nr_fields = %zu",
6929 fields,
6930 nr_fields);
6931 ret = -1;
6932 free(fields);
6933 goto error;
6934 }
6935
6936 /*
6937 * Add event to the UST registry coming from the notify socket. This
6938 * call will free if needed the sig, fields and model_emf_uri. This
6939 * code path loses the ownsership of these variables and transfer them
6940 * to the this function.
6941 */
6942 ret = add_event_ust_registry(sock,
6943 sobjd,
6944 cobjd,
6945 name,
6946 sig,
6947 nr_fields,
6948 fields,
6949 loglevel_value,
6950 model_emf_uri);
6951 if (ret < 0) {
6952 goto error;
6953 }
6954
6955 break;
6956 }
6957 case LTTNG_UST_CTL_NOTIFY_CMD_CHANNEL:
6958 {
6959 int sobjd, cobjd;
6960 size_t field_count;
6961 struct lttng_ust_ctl_field *context_fields;
6962
6963 DBG2("UST app ustctl register channel received");
6964
6965 ret = lttng_ust_ctl_recv_register_channel(
6966 sock, &sobjd, &cobjd, &field_count, &context_fields);
6967 if (ret < 0) {
6968 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
6969 DBG3("UST app recv channel failed. Application died: sock = %d",
6970 sock);
6971 } else if (ret == -EAGAIN) {
6972 WARN("UST app recv channel failed. Communication time out: sock = %d",
6973 sock);
6974 } else {
6975 ERR("UST app recv channel failed with ret %d: sock = %d",
6976 ret,
6977 sock);
6978 }
6979 goto error;
6980 }
6981
6982 /*
6983 * The fields ownership are transfered to this function call meaning
6984 * that if needed it will be freed. After this, it's invalid to access
6985 * fields or clean them up.
6986 */
6987 ret = handle_app_register_channel_notification(
6988 sock, cobjd, context_fields, field_count);
6989 if (ret < 0) {
6990 goto error;
6991 }
6992
6993 break;
6994 }
6995 case LTTNG_UST_CTL_NOTIFY_CMD_ENUM:
6996 {
6997 int sobjd;
6998 char name[LTTNG_UST_ABI_SYM_NAME_LEN];
6999 size_t nr_entries;
7000 struct lttng_ust_ctl_enum_entry *entries;
7001
7002 DBG2("UST app ustctl register enum received");
7003
7004 ret = lttng_ust_ctl_recv_register_enum(sock, &sobjd, name, &entries, &nr_entries);
7005 if (ret < 0) {
7006 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
7007 DBG3("UST app recv enum failed. Application died: sock = %d", sock);
7008 } else if (ret == -EAGAIN) {
7009 WARN("UST app recv enum failed. Communication time out: sock = %d",
7010 sock);
7011 } else {
7012 ERR("UST app recv enum failed with ret %d: sock = %d", ret, sock);
7013 }
7014 goto error;
7015 }
7016
7017 /* Callee assumes ownership of entries. */
7018 ret = add_enum_ust_registry(sock, sobjd, name, entries, nr_entries);
7019 if (ret < 0) {
7020 goto error;
7021 }
7022
7023 break;
7024 }
7025 default:
7026 /* Should NEVER happen. */
7027 abort();
7028 }
7029
7030error:
7031 return ret;
7032}
7033
7034/*
7035 * Once the notify socket hangs up, this is called. First, it tries to find the
7036 * corresponding application. On failure, the call_rcu to close the socket is
7037 * executed. If an application is found, it tries to delete it from the notify
7038 * socket hash table. Whathever the result, it proceeds to the call_rcu.
7039 *
7040 * Note that an object needs to be allocated here so on ENOMEM failure, the
7041 * call RCU is not done but the rest of the cleanup is.
7042 */
7043void ust_app_notify_sock_unregister(int sock)
7044{
7045 int err_enomem = 0;
7046 struct lttng_ht_iter iter;
7047 struct ust_app *app;
7048 struct ust_app_notify_sock_obj *obj;
7049
7050 LTTNG_ASSERT(sock >= 0);
7051
7052 rcu_read_lock();
7053
7054 obj = zmalloc<ust_app_notify_sock_obj>();
7055 if (!obj) {
7056 /*
7057 * An ENOMEM is kind of uncool. If this strikes we continue the
7058 * procedure but the call_rcu will not be called. In this case, we
7059 * accept the fd leak rather than possibly creating an unsynchronized
7060 * state between threads.
7061 *
7062 * TODO: The notify object should be created once the notify socket is
7063 * registered and stored independantely from the ust app object. The
7064 * tricky part is to synchronize the teardown of the application and
7065 * this notify object. Let's keep that in mind so we can avoid this
7066 * kind of shenanigans with ENOMEM in the teardown path.
7067 */
7068 err_enomem = 1;
7069 } else {
7070 obj->fd = sock;
7071 }
7072
7073 DBG("UST app notify socket unregister %d", sock);
7074
7075 /*
7076 * Lookup application by notify socket. If this fails, this means that the
7077 * hash table delete has already been done by the application
7078 * unregistration process so we can safely close the notify socket in a
7079 * call RCU.
7080 */
7081 app = find_app_by_notify_sock(sock);
7082 if (!app) {
7083 goto close_socket;
7084 }
7085
7086 iter.iter.node = &app->notify_sock_n.node;
7087
7088 /*
7089 * Whatever happens here either we fail or succeed, in both cases we have
7090 * to close the socket after a grace period to continue to the call RCU
7091 * here. If the deletion is successful, the application is not visible
7092 * anymore by other threads and is it fails it means that it was already
7093 * deleted from the hash table so either way we just have to close the
7094 * socket.
7095 */
7096 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
7097
7098close_socket:
7099 rcu_read_unlock();
7100
7101 /*
7102 * Close socket after a grace period to avoid for the socket to be reused
7103 * before the application object is freed creating potential race between
7104 * threads trying to add unique in the global hash table.
7105 */
7106 if (!err_enomem) {
7107 call_rcu(&obj->head, close_notify_sock_rcu);
7108 }
7109}
7110
7111/*
7112 * Destroy a ust app data structure and free its memory.
7113 */
7114void ust_app_destroy(struct ust_app *app)
7115{
7116 if (!app) {
7117 return;
7118 }
7119
7120 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
7121}
7122
7123/*
7124 * Take a snapshot for a given UST session. The snapshot is sent to the given
7125 * output.
7126 *
7127 * Returns LTTNG_OK on success or a LTTNG_ERR error code.
7128 */
7129enum lttng_error_code ust_app_snapshot_record(const struct ltt_ust_session *usess,
7130 const struct consumer_output *output,
7131 uint64_t nb_packets_per_stream)
7132{
7133 int ret = 0;
7134 enum lttng_error_code status = LTTNG_OK;
7135 struct lttng_ht_iter iter;
7136 struct ust_app *app;
7137 char *trace_path = nullptr;
7138
7139 LTTNG_ASSERT(usess);
7140 LTTNG_ASSERT(output);
7141
7142 rcu_read_lock();
7143
7144 switch (usess->buffer_type) {
7145 case LTTNG_BUFFER_PER_UID:
7146 {
7147 struct buffer_reg_uid *reg;
7148
7149 cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
7150 struct buffer_reg_channel *buf_reg_chan;
7151 struct consumer_socket *socket;
7152 char pathname[PATH_MAX];
7153 size_t consumer_path_offset = 0;
7154
7155 if (!reg->registry->reg.ust->_metadata_key) {
7156 /* Skip since no metadata is present */
7157 continue;
7158 }
7159
7160 /* Get consumer socket to use to push the metadata.*/
7161 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
7162 usess->consumer);
7163 if (!socket) {
7164 status = LTTNG_ERR_INVALID;
7165 goto error;
7166 }
7167
7168 memset(pathname, 0, sizeof(pathname));
7169 ret = snprintf(pathname,
7170 sizeof(pathname),
7171 DEFAULT_UST_TRACE_UID_PATH,
7172 reg->uid,
7173 reg->bits_per_long);
7174 if (ret < 0) {
7175 PERROR("snprintf snapshot path");
7176 status = LTTNG_ERR_INVALID;
7177 goto error;
7178 }
7179 /* Free path allowed on previous iteration. */
7180 free(trace_path);
7181 trace_path = setup_channel_trace_path(
7182 usess->consumer, pathname, &consumer_path_offset);
7183 if (!trace_path) {
7184 status = LTTNG_ERR_INVALID;
7185 goto error;
7186 }
7187 /* Add the UST default trace dir to path. */
7188 cds_lfht_for_each_entry (
7189 reg->registry->channels->ht, &iter.iter, buf_reg_chan, node.node) {
7190 status =
7191 consumer_snapshot_channel(socket,
7192 buf_reg_chan->consumer_key,
7193 output,
7194 0,
7195 &trace_path[consumer_path_offset],
7196 nb_packets_per_stream);
7197 if (status != LTTNG_OK) {
7198 goto error;
7199 }
7200 }
7201 status = consumer_snapshot_channel(socket,
7202 reg->registry->reg.ust->_metadata_key,
7203 output,
7204 1,
7205 &trace_path[consumer_path_offset],
7206 0);
7207 if (status != LTTNG_OK) {
7208 goto error;
7209 }
7210 }
7211 break;
7212 }
7213 case LTTNG_BUFFER_PER_PID:
7214 {
7215 cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7216 struct consumer_socket *socket;
7217 struct lttng_ht_iter chan_iter;
7218 struct ust_app_channel *ua_chan;
7219 struct ust_app_session *ua_sess;
7220 lsu::registry_session *registry;
7221 char pathname[PATH_MAX];
7222 size_t consumer_path_offset = 0;
7223
7224 ua_sess = lookup_session_by_app(usess, app);
7225 if (!ua_sess) {
7226 /* Session not associated with this app. */
7227 continue;
7228 }
7229
7230 /* Get the right consumer socket for the application. */
7231 socket = consumer_find_socket_by_bitness(app->abi.bits_per_long, output);
7232 if (!socket) {
7233 status = LTTNG_ERR_INVALID;
7234 goto error;
7235 }
7236
7237 /* Add the UST default trace dir to path. */
7238 memset(pathname, 0, sizeof(pathname));
7239 ret = snprintf(pathname, sizeof(pathname), "%s", ua_sess->path);
7240 if (ret < 0) {
7241 status = LTTNG_ERR_INVALID;
7242 PERROR("snprintf snapshot path");
7243 goto error;
7244 }
7245 /* Free path allowed on previous iteration. */
7246 free(trace_path);
7247 trace_path = setup_channel_trace_path(
7248 usess->consumer, pathname, &consumer_path_offset);
7249 if (!trace_path) {
7250 status = LTTNG_ERR_INVALID;
7251 goto error;
7252 }
7253 cds_lfht_for_each_entry (
7254 ua_sess->channels->ht, &chan_iter.iter, ua_chan, node.node) {
7255 status =
7256 consumer_snapshot_channel(socket,
7257 ua_chan->key,
7258 output,
7259 0,
7260 &trace_path[consumer_path_offset],
7261 nb_packets_per_stream);
7262 switch (status) {
7263 case LTTNG_OK:
7264 break;
7265 case LTTNG_ERR_CHAN_NOT_FOUND:
7266 continue;
7267 default:
7268 goto error;
7269 }
7270 }
7271
7272 registry = get_session_registry(ua_sess);
7273 if (!registry) {
7274 DBG("Application session is being torn down. Skip application.");
7275 continue;
7276 }
7277 status = consumer_snapshot_channel(socket,
7278 registry->_metadata_key,
7279 output,
7280 1,
7281 &trace_path[consumer_path_offset],
7282 0);
7283 switch (status) {
7284 case LTTNG_OK:
7285 break;
7286 case LTTNG_ERR_CHAN_NOT_FOUND:
7287 continue;
7288 default:
7289 goto error;
7290 }
7291 }
7292 break;
7293 }
7294 default:
7295 abort();
7296 break;
7297 }
7298
7299error:
7300 free(trace_path);
7301 rcu_read_unlock();
7302 return status;
7303}
7304
7305/*
7306 * Return the size taken by one more packet per stream.
7307 */
7308uint64_t ust_app_get_size_one_more_packet_per_stream(const struct ltt_ust_session *usess,
7309 uint64_t cur_nr_packets)
7310{
7311 uint64_t tot_size = 0;
7312 struct ust_app *app;
7313 struct lttng_ht_iter iter;
7314
7315 LTTNG_ASSERT(usess);
7316
7317 switch (usess->buffer_type) {
7318 case LTTNG_BUFFER_PER_UID:
7319 {
7320 struct buffer_reg_uid *reg;
7321
7322 cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
7323 struct buffer_reg_channel *buf_reg_chan;
7324
7325 rcu_read_lock();
7326 cds_lfht_for_each_entry (
7327 reg->registry->channels->ht, &iter.iter, buf_reg_chan, node.node) {
7328 if (cur_nr_packets >= buf_reg_chan->num_subbuf) {
7329 /*
7330 * Don't take channel into account if we
7331 * already grab all its packets.
7332 */
7333 continue;
7334 }
7335 tot_size += buf_reg_chan->subbuf_size * buf_reg_chan->stream_count;
7336 }
7337 rcu_read_unlock();
7338 }
7339 break;
7340 }
7341 case LTTNG_BUFFER_PER_PID:
7342 {
7343 rcu_read_lock();
7344 cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7345 struct ust_app_channel *ua_chan;
7346 struct ust_app_session *ua_sess;
7347 struct lttng_ht_iter chan_iter;
7348
7349 ua_sess = lookup_session_by_app(usess, app);
7350 if (!ua_sess) {
7351 /* Session not associated with this app. */
7352 continue;
7353 }
7354
7355 cds_lfht_for_each_entry (
7356 ua_sess->channels->ht, &chan_iter.iter, ua_chan, node.node) {
7357 if (cur_nr_packets >= ua_chan->attr.num_subbuf) {
7358 /*
7359 * Don't take channel into account if we
7360 * already grab all its packets.
7361 */
7362 continue;
7363 }
7364 tot_size += ua_chan->attr.subbuf_size * ua_chan->streams.count;
7365 }
7366 }
7367 rcu_read_unlock();
7368 break;
7369 }
7370 default:
7371 abort();
7372 break;
7373 }
7374
7375 return tot_size;
7376}
7377
7378int ust_app_uid_get_channel_runtime_stats(uint64_t ust_session_id,
7379 struct cds_list_head *buffer_reg_uid_list,
7380 struct consumer_output *consumer,
7381 uint64_t uchan_id,
7382 int overwrite,
7383 uint64_t *discarded,
7384 uint64_t *lost)
7385{
7386 int ret;
7387 uint64_t consumer_chan_key;
7388
7389 *discarded = 0;
7390 *lost = 0;
7391
7392 ret = buffer_reg_uid_consumer_channel_key(
7393 buffer_reg_uid_list, uchan_id, &consumer_chan_key);
7394 if (ret < 0) {
7395 /* Not found */
7396 ret = 0;
7397 goto end;
7398 }
7399
7400 if (overwrite) {
7401 ret = consumer_get_lost_packets(ust_session_id, consumer_chan_key, consumer, lost);
7402 } else {
7403 ret = consumer_get_discarded_events(
7404 ust_session_id, consumer_chan_key, consumer, discarded);
7405 }
7406
7407end:
7408 return ret;
7409}
7410
7411int ust_app_pid_get_channel_runtime_stats(struct ltt_ust_session *usess,
7412 struct ltt_ust_channel *uchan,
7413 struct consumer_output *consumer,
7414 int overwrite,
7415 uint64_t *discarded,
7416 uint64_t *lost)
7417{
7418 int ret = 0;
7419 struct lttng_ht_iter iter;
7420 struct lttng_ht_node_str *ua_chan_node;
7421 struct ust_app *app;
7422 struct ust_app_session *ua_sess;
7423 struct ust_app_channel *ua_chan;
7424
7425 *discarded = 0;
7426 *lost = 0;
7427
7428 rcu_read_lock();
7429 /*
7430 * Iterate over every registered applications. Sum counters for
7431 * all applications containing requested session and channel.
7432 */
7433 cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7434 struct lttng_ht_iter uiter;
7435
7436 ua_sess = lookup_session_by_app(usess, app);
7437 if (ua_sess == nullptr) {
7438 continue;
7439 }
7440
7441 /* Get channel */
7442 lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
7443 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
7444 /* If the session is found for the app, the channel must be there */
7445 LTTNG_ASSERT(ua_chan_node);
7446
7447 ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
7448
7449 if (overwrite) {
7450 uint64_t _lost;
7451
7452 ret = consumer_get_lost_packets(usess->id, ua_chan->key, consumer, &_lost);
7453 if (ret < 0) {
7454 break;
7455 }
7456 (*lost) += _lost;
7457 } else {
7458 uint64_t _discarded;
7459
7460 ret = consumer_get_discarded_events(
7461 usess->id, ua_chan->key, consumer, &_discarded);
7462 if (ret < 0) {
7463 break;
7464 }
7465 (*discarded) += _discarded;
7466 }
7467 }
7468
7469 rcu_read_unlock();
7470 return ret;
7471}
7472
7473static int ust_app_regenerate_statedump(struct ltt_ust_session *usess, struct ust_app *app)
7474{
7475 int ret = 0;
7476 struct ust_app_session *ua_sess;
7477
7478 DBG("Regenerating the metadata for ust app pid %d", app->pid);
7479
7480 rcu_read_lock();
7481
7482 ua_sess = lookup_session_by_app(usess, app);
7483 if (ua_sess == nullptr) {
7484 /* The session is in teardown process. Ignore and continue. */
7485 goto end;
7486 }
7487
7488 pthread_mutex_lock(&ua_sess->lock);
7489
7490 if (ua_sess->deleted) {
7491 goto end_unlock;
7492 }
7493
7494 pthread_mutex_lock(&app->sock_lock);
7495 ret = lttng_ust_ctl_regenerate_statedump(app->sock, ua_sess->handle);
7496 pthread_mutex_unlock(&app->sock_lock);
7497
7498end_unlock:
7499 pthread_mutex_unlock(&ua_sess->lock);
7500
7501end:
7502 rcu_read_unlock();
7503 health_code_update();
7504 return ret;
7505}
7506
7507/*
7508 * Regenerate the statedump for each app in the session.
7509 */
7510int ust_app_regenerate_statedump_all(struct ltt_ust_session *usess)
7511{
7512 int ret = 0;
7513 struct lttng_ht_iter iter;
7514 struct ust_app *app;
7515
7516 DBG("Regenerating the metadata for all UST apps");
7517
7518 rcu_read_lock();
7519
7520 cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7521 if (!app->compatible) {
7522 continue;
7523 }
7524
7525 ret = ust_app_regenerate_statedump(usess, app);
7526 if (ret < 0) {
7527 /* Continue to the next app even on error */
7528 continue;
7529 }
7530 }
7531
7532 rcu_read_unlock();
7533
7534 return 0;
7535}
7536
7537/*
7538 * Rotate all the channels of a session.
7539 *
7540 * Return LTTNG_OK on success or else an LTTng error code.
7541 */
7542enum lttng_error_code ust_app_rotate_session(struct ltt_session *session)
7543{
7544 int ret;
7545 enum lttng_error_code cmd_ret = LTTNG_OK;
7546 struct lttng_ht_iter iter;
7547 struct ust_app *app;
7548 struct ltt_ust_session *usess = session->ust_session;
7549
7550 LTTNG_ASSERT(usess);
7551
7552 rcu_read_lock();
7553
7554 switch (usess->buffer_type) {
7555 case LTTNG_BUFFER_PER_UID:
7556 {
7557 struct buffer_reg_uid *reg;
7558
7559 cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
7560 struct buffer_reg_channel *buf_reg_chan;
7561 struct consumer_socket *socket;
7562
7563 /* Get consumer socket to use to push the metadata.*/
7564 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
7565 usess->consumer);
7566 if (!socket) {
7567 cmd_ret = LTTNG_ERR_INVALID;
7568 goto error;
7569 }
7570
7571 /* Rotate the data channels. */
7572 cds_lfht_for_each_entry (
7573 reg->registry->channels->ht, &iter.iter, buf_reg_chan, node.node) {
7574 ret = consumer_rotate_channel(socket,
7575 buf_reg_chan->consumer_key,
7576 usess->consumer,
7577 /* is_metadata_channel */ false);
7578 if (ret < 0) {
7579 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
7580 goto error;
7581 }
7582 }
7583
7584 /*
7585 * The metadata channel might not be present.
7586 *
7587 * Consumer stream allocation can be done
7588 * asynchronously and can fail on intermediary
7589 * operations (i.e add context) and lead to data
7590 * channels created with no metadata channel.
7591 */
7592 if (!reg->registry->reg.ust->_metadata_key) {
7593 /* Skip since no metadata is present. */
7594 continue;
7595 }
7596
7597 {
7598 auto locked_registry = reg->registry->reg.ust->lock();
7599 (void) push_metadata(locked_registry, usess->consumer);
7600 }
7601
7602 ret = consumer_rotate_channel(socket,
7603 reg->registry->reg.ust->_metadata_key,
7604 usess->consumer,
7605 /* is_metadata_channel */ true);
7606 if (ret < 0) {
7607 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
7608 goto error;
7609 }
7610 }
7611 break;
7612 }
7613 case LTTNG_BUFFER_PER_PID:
7614 {
7615 cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7616 struct consumer_socket *socket;
7617 struct lttng_ht_iter chan_iter;
7618 struct ust_app_channel *ua_chan;
7619 struct ust_app_session *ua_sess;
7620 lsu::registry_session *registry;
7621
7622 ua_sess = lookup_session_by_app(usess, app);
7623 if (!ua_sess) {
7624 /* Session not associated with this app. */
7625 continue;
7626 }
7627
7628 /* Get the right consumer socket for the application. */
7629 socket = consumer_find_socket_by_bitness(app->abi.bits_per_long,
7630 usess->consumer);
7631 if (!socket) {
7632 cmd_ret = LTTNG_ERR_INVALID;
7633 goto error;
7634 }
7635
7636 registry = get_session_registry(ua_sess);
7637 if (!registry) {
7638 DBG("Application session is being torn down. Skip application.");
7639 continue;
7640 }
7641
7642 /* Rotate the data channels. */
7643 cds_lfht_for_each_entry (
7644 ua_sess->channels->ht, &chan_iter.iter, ua_chan, node.node) {
7645 ret = consumer_rotate_channel(socket,
7646 ua_chan->key,
7647 ua_sess->consumer,
7648 /* is_metadata_channel */ false);
7649 if (ret < 0) {
7650 /* Per-PID buffer and application going away. */
7651 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND)
7652 continue;
7653 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
7654 goto error;
7655 }
7656 }
7657
7658 /* Rotate the metadata channel. */
7659 {
7660 auto locked_registry = registry->lock();
7661
7662 (void) push_metadata(locked_registry, usess->consumer);
7663 }
7664 ret = consumer_rotate_channel(socket,
7665 registry->_metadata_key,
7666 ua_sess->consumer,
7667 /* is_metadata_channel */ true);
7668 if (ret < 0) {
7669 /* Per-PID buffer and application going away. */
7670 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND)
7671 continue;
7672 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
7673 goto error;
7674 }
7675 }
7676 break;
7677 }
7678 default:
7679 abort();
7680 break;
7681 }
7682
7683 cmd_ret = LTTNG_OK;
7684
7685error:
7686 rcu_read_unlock();
7687 return cmd_ret;
7688}
7689
7690enum lttng_error_code ust_app_create_channel_subdirectories(const struct ltt_ust_session *usess)
7691{
7692 enum lttng_error_code ret = LTTNG_OK;
7693 struct lttng_ht_iter iter;
7694 enum lttng_trace_chunk_status chunk_status;
7695 char *pathname_index;
7696 int fmt_ret;
7697
7698 LTTNG_ASSERT(usess->current_trace_chunk);
7699 rcu_read_lock();
7700
7701 switch (usess->buffer_type) {
7702 case LTTNG_BUFFER_PER_UID:
7703 {
7704 struct buffer_reg_uid *reg;
7705
7706 cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
7707 fmt_ret = asprintf(&pathname_index,
7708 DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH
7709 "/" DEFAULT_INDEX_DIR,
7710 reg->uid,
7711 reg->bits_per_long);
7712 if (fmt_ret < 0) {
7713 ERR("Failed to format channel index directory");
7714 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7715 goto error;
7716 }
7717
7718 /*
7719 * Create the index subdirectory which will take care
7720 * of implicitly creating the channel's path.
7721 */
7722 chunk_status = lttng_trace_chunk_create_subdirectory(
7723 usess->current_trace_chunk, pathname_index);
7724 free(pathname_index);
7725 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
7726 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7727 goto error;
7728 }
7729 }
7730 break;
7731 }
7732 case LTTNG_BUFFER_PER_PID:
7733 {
7734 struct ust_app *app;
7735
7736 /*
7737 * Create the toplevel ust/ directory in case no apps are running.
7738 */
7739 chunk_status = lttng_trace_chunk_create_subdirectory(usess->current_trace_chunk,
7740 DEFAULT_UST_TRACE_DIR);
7741 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
7742 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7743 goto error;
7744 }
7745
7746 cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7747 struct ust_app_session *ua_sess;
7748 lsu::registry_session *registry;
7749
7750 ua_sess = lookup_session_by_app(usess, app);
7751 if (!ua_sess) {
7752 /* Session not associated with this app. */
7753 continue;
7754 }
7755
7756 registry = get_session_registry(ua_sess);
7757 if (!registry) {
7758 DBG("Application session is being torn down. Skip application.");
7759 continue;
7760 }
7761
7762 fmt_ret = asprintf(&pathname_index,
7763 DEFAULT_UST_TRACE_DIR "/%s/" DEFAULT_INDEX_DIR,
7764 ua_sess->path);
7765 if (fmt_ret < 0) {
7766 ERR("Failed to format channel index directory");
7767 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7768 goto error;
7769 }
7770 /*
7771 * Create the index subdirectory which will take care
7772 * of implicitly creating the channel's path.
7773 */
7774 chunk_status = lttng_trace_chunk_create_subdirectory(
7775 usess->current_trace_chunk, pathname_index);
7776 free(pathname_index);
7777 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
7778 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7779 goto error;
7780 }
7781 }
7782 break;
7783 }
7784 default:
7785 abort();
7786 }
7787
7788 ret = LTTNG_OK;
7789error:
7790 rcu_read_unlock();
7791 return ret;
7792}
7793
7794/*
7795 * Clear all the channels of a session.
7796 *
7797 * Return LTTNG_OK on success or else an LTTng error code.
7798 */
7799enum lttng_error_code ust_app_clear_session(struct ltt_session *session)
7800{
7801 int ret;
7802 enum lttng_error_code cmd_ret = LTTNG_OK;
7803 struct lttng_ht_iter iter;
7804 struct ust_app *app;
7805 struct ltt_ust_session *usess = session->ust_session;
7806
7807 LTTNG_ASSERT(usess);
7808
7809 rcu_read_lock();
7810
7811 if (usess->active) {
7812 ERR("Expecting inactive session %s (%" PRIu64 ")", session->name, session->id);
7813 cmd_ret = LTTNG_ERR_FATAL;
7814 goto end;
7815 }
7816
7817 switch (usess->buffer_type) {
7818 case LTTNG_BUFFER_PER_UID:
7819 {
7820 struct buffer_reg_uid *reg;
7821
7822 cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
7823 struct buffer_reg_channel *buf_reg_chan;
7824 struct consumer_socket *socket;
7825
7826 /* Get consumer socket to use to push the metadata.*/
7827 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
7828 usess->consumer);
7829 if (!socket) {
7830 cmd_ret = LTTNG_ERR_INVALID;
7831 goto error_socket;
7832 }
7833
7834 /* Clear the data channels. */
7835 cds_lfht_for_each_entry (
7836 reg->registry->channels->ht, &iter.iter, buf_reg_chan, node.node) {
7837 ret = consumer_clear_channel(socket, buf_reg_chan->consumer_key);
7838 if (ret < 0) {
7839 goto error;
7840 }
7841 }
7842
7843 {
7844 auto locked_registry = reg->registry->reg.ust->lock();
7845 (void) push_metadata(locked_registry, usess->consumer);
7846 }
7847
7848 /*
7849 * Clear the metadata channel.
7850 * Metadata channel is not cleared per se but we still need to
7851 * perform a rotation operation on it behind the scene.
7852 */
7853 ret = consumer_clear_channel(socket, reg->registry->reg.ust->_metadata_key);
7854 if (ret < 0) {
7855 goto error;
7856 }
7857 }
7858 break;
7859 }
7860 case LTTNG_BUFFER_PER_PID:
7861 {
7862 cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7863 struct consumer_socket *socket;
7864 struct lttng_ht_iter chan_iter;
7865 struct ust_app_channel *ua_chan;
7866 struct ust_app_session *ua_sess;
7867 lsu::registry_session *registry;
7868
7869 ua_sess = lookup_session_by_app(usess, app);
7870 if (!ua_sess) {
7871 /* Session not associated with this app. */
7872 continue;
7873 }
7874
7875 /* Get the right consumer socket for the application. */
7876 socket = consumer_find_socket_by_bitness(app->abi.bits_per_long,
7877 usess->consumer);
7878 if (!socket) {
7879 cmd_ret = LTTNG_ERR_INVALID;
7880 goto error_socket;
7881 }
7882
7883 registry = get_session_registry(ua_sess);
7884 if (!registry) {
7885 DBG("Application session is being torn down. Skip application.");
7886 continue;
7887 }
7888
7889 /* Clear the data channels. */
7890 cds_lfht_for_each_entry (
7891 ua_sess->channels->ht, &chan_iter.iter, ua_chan, node.node) {
7892 ret = consumer_clear_channel(socket, ua_chan->key);
7893 if (ret < 0) {
7894 /* Per-PID buffer and application going away. */
7895 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
7896 continue;
7897 }
7898 goto error;
7899 }
7900 }
7901
7902 {
7903 auto locked_registry = registry->lock();
7904 (void) push_metadata(locked_registry, usess->consumer);
7905 }
7906
7907 /*
7908 * Clear the metadata channel.
7909 * Metadata channel is not cleared per se but we still need to
7910 * perform rotation operation on it behind the scene.
7911 */
7912 ret = consumer_clear_channel(socket, registry->_metadata_key);
7913 if (ret < 0) {
7914 /* Per-PID buffer and application going away. */
7915 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
7916 continue;
7917 }
7918 goto error;
7919 }
7920 }
7921 break;
7922 }
7923 default:
7924 abort();
7925 break;
7926 }
7927
7928 cmd_ret = LTTNG_OK;
7929 goto end;
7930
7931error:
7932 switch (-ret) {
7933 case LTTCOMM_CONSUMERD_RELAYD_CLEAR_DISALLOWED:
7934 cmd_ret = LTTNG_ERR_CLEAR_RELAY_DISALLOWED;
7935 break;
7936 default:
7937 cmd_ret = LTTNG_ERR_CLEAR_FAIL_CONSUMER;
7938 }
7939
7940error_socket:
7941end:
7942 rcu_read_unlock();
7943 return cmd_ret;
7944}
7945
7946/*
7947 * This function skips the metadata channel as the begin/end timestamps of a
7948 * metadata packet are useless.
7949 *
7950 * Moreover, opening a packet after a "clear" will cause problems for live
7951 * sessions as it will introduce padding that was not part of the first trace
7952 * chunk. The relay daemon expects the content of the metadata stream of
7953 * successive metadata trace chunks to be strict supersets of one another.
7954 *
7955 * For example, flushing a packet at the beginning of the metadata stream of
7956 * a trace chunk resulting from a "clear" session command will cause the
7957 * size of the metadata stream of the new trace chunk to not match the size of
7958 * the metadata stream of the original chunk. This will confuse the relay
7959 * daemon as the same "offset" in a metadata stream will no longer point
7960 * to the same content.
7961 */
7962enum lttng_error_code ust_app_open_packets(struct ltt_session *session)
7963{
7964 enum lttng_error_code ret = LTTNG_OK;
7965 struct lttng_ht_iter iter;
7966 struct ltt_ust_session *usess = session->ust_session;
7967
7968 LTTNG_ASSERT(usess);
7969
7970 rcu_read_lock();
7971
7972 switch (usess->buffer_type) {
7973 case LTTNG_BUFFER_PER_UID:
7974 {
7975 struct buffer_reg_uid *reg;
7976
7977 cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
7978 struct buffer_reg_channel *buf_reg_chan;
7979 struct consumer_socket *socket;
7980
7981 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
7982 usess->consumer);
7983 if (!socket) {
7984 ret = LTTNG_ERR_FATAL;
7985 goto error;
7986 }
7987
7988 cds_lfht_for_each_entry (
7989 reg->registry->channels->ht, &iter.iter, buf_reg_chan, node.node) {
7990 const int open_ret = consumer_open_channel_packets(
7991 socket, buf_reg_chan->consumer_key);
7992
7993 if (open_ret < 0) {
7994 ret = LTTNG_ERR_UNK;
7995 goto error;
7996 }
7997 }
7998 }
7999 break;
8000 }
8001 case LTTNG_BUFFER_PER_PID:
8002 {
8003 struct ust_app *app;
8004
8005 cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
8006 struct consumer_socket *socket;
8007 struct lttng_ht_iter chan_iter;
8008 struct ust_app_channel *ua_chan;
8009 struct ust_app_session *ua_sess;
8010 lsu::registry_session *registry;
8011
8012 ua_sess = lookup_session_by_app(usess, app);
8013 if (!ua_sess) {
8014 /* Session not associated with this app. */
8015 continue;
8016 }
8017
8018 /* Get the right consumer socket for the application. */
8019 socket = consumer_find_socket_by_bitness(app->abi.bits_per_long,
8020 usess->consumer);
8021 if (!socket) {
8022 ret = LTTNG_ERR_FATAL;
8023 goto error;
8024 }
8025
8026 registry = get_session_registry(ua_sess);
8027 if (!registry) {
8028 DBG("Application session is being torn down. Skip application.");
8029 continue;
8030 }
8031
8032 cds_lfht_for_each_entry (
8033 ua_sess->channels->ht, &chan_iter.iter, ua_chan, node.node) {
8034 const int open_ret =
8035 consumer_open_channel_packets(socket, ua_chan->key);
8036
8037 if (open_ret < 0) {
8038 /*
8039 * Per-PID buffer and application going
8040 * away.
8041 */
8042 if (open_ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
8043 continue;
8044 }
8045
8046 ret = LTTNG_ERR_UNK;
8047 goto error;
8048 }
8049 }
8050 }
8051 break;
8052 }
8053 default:
8054 abort();
8055 break;
8056 }
8057
8058error:
8059 rcu_read_unlock();
8060 return ret;
8061}
8062
8063lsu::ctl_field_quirks ust_app::ctl_field_quirks() const
8064{
8065 /*
8066 * Application contexts are expressed as variants. LTTng-UST announces
8067 * those by registering an enumeration named `..._tag`. It then registers a
8068 * variant as part of the event context that contains the various possible
8069 * types.
8070 *
8071 * Unfortunately, the names used in the enumeration and variant don't
8072 * match: the enumeration names are all prefixed with an underscore while
8073 * the variant type tag fields aren't.
8074 *
8075 * While the CTF 1.8.3 specification mentions that
8076 * underscores *should* (not *must*) be removed by CTF readers. Babeltrace
8077 * 1.x (and possibly others) expect a perfect match between the names used
8078 * by tags and variants.
8079 *
8080 * When the UNDERSCORE_PREFIXED_VARIANT_TAG_MAPPINGS quirk is enabled,
8081 * the variant's fields are modified to match the mappings of its tag.
8082 *
8083 * From ABI version >= 10.x, the variant fields and tag mapping names
8084 * correctly match, making this quirk unnecessary.
8085 */
8086 return v_major <= 9 ? lsu::ctl_field_quirks::UNDERSCORE_PREFIXED_VARIANT_TAG_MAPPINGS :
8087 lsu::ctl_field_quirks::NONE;
8088}
This page took 0.171102 seconds and 4 git commands to generate.