Clean-up: sessiond: prepend `the_` to global variable names
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
1 /*
2 * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
3 * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
4 *
5 * SPDX-License-Identifier: GPL-2.0-only
6 *
7 */
8
9 #define _LGPL_SOURCE
10 #include <errno.h>
11 #include <fcntl.h>
12 #include <inttypes.h>
13 #include <pthread.h>
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <string.h>
17 #include <sys/mman.h>
18 #include <sys/stat.h>
19 #include <sys/types.h>
20 #include <unistd.h>
21 #include <urcu/compiler.h>
22 #include <signal.h>
23
24 #include <common/bytecode/bytecode.h>
25 #include <common/compat/errno.h>
26 #include <common/common.h>
27 #include <common/hashtable/utils.h>
28 #include <lttng/event-rule/event-rule.h>
29 #include <lttng/event-rule/event-rule-internal.h>
30 #include <lttng/event-rule/tracepoint.h>
31 #include <lttng/condition/condition.h>
32 #include <lttng/condition/on-event-internal.h>
33 #include <lttng/condition/on-event.h>
34 #include <lttng/trigger/trigger-internal.h>
35 #include <common/sessiond-comm/sessiond-comm.h>
36
37 #include "buffer-registry.h"
38 #include "condition-internal.h"
39 #include "fd-limit.h"
40 #include "health-sessiond.h"
41 #include "ust-app.h"
42 #include "ust-consumer.h"
43 #include "lttng-ust-ctl.h"
44 #include "lttng-ust-error.h"
45 #include "utils.h"
46 #include "session.h"
47 #include "lttng-sessiond.h"
48 #include "notification-thread-commands.h"
49 #include "rotate.h"
50 #include "event.h"
51 #include "event-notifier-error-accounting.h"
52
53
54 struct lttng_ht *ust_app_ht;
55 struct lttng_ht *ust_app_ht_by_sock;
56 struct lttng_ht *ust_app_ht_by_notify_sock;
57
58 static
59 int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
60
61 /* Next available channel key. Access under next_channel_key_lock. */
62 static uint64_t _next_channel_key;
63 static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
64
65 /* Next available session ID. Access under next_session_id_lock. */
66 static uint64_t _next_session_id;
67 static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
68
69 /*
70 * Return the incremented value of next_channel_key.
71 */
72 static uint64_t get_next_channel_key(void)
73 {
74 uint64_t ret;
75
76 pthread_mutex_lock(&next_channel_key_lock);
77 ret = ++_next_channel_key;
78 pthread_mutex_unlock(&next_channel_key_lock);
79 return ret;
80 }
81
82 /*
83 * Return the atomically incremented value of next_session_id.
84 */
85 static uint64_t get_next_session_id(void)
86 {
87 uint64_t ret;
88
89 pthread_mutex_lock(&next_session_id_lock);
90 ret = ++_next_session_id;
91 pthread_mutex_unlock(&next_session_id_lock);
92 return ret;
93 }
94
95 static void copy_channel_attr_to_ustctl(
96 struct ustctl_consumer_channel_attr *attr,
97 struct lttng_ust_abi_channel_attr *uattr)
98 {
99 /* Copy event attributes since the layout is different. */
100 attr->subbuf_size = uattr->subbuf_size;
101 attr->num_subbuf = uattr->num_subbuf;
102 attr->overwrite = uattr->overwrite;
103 attr->switch_timer_interval = uattr->switch_timer_interval;
104 attr->read_timer_interval = uattr->read_timer_interval;
105 attr->output = uattr->output;
106 attr->blocking_timeout = uattr->u.s.blocking_timeout;
107 }
108
109 /*
110 * Match function for the hash table lookup.
111 *
112 * It matches an ust app event based on three attributes which are the event
113 * name, the filter bytecode and the loglevel.
114 */
115 static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
116 {
117 struct ust_app_event *event;
118 const struct ust_app_ht_key *key;
119 int ev_loglevel_value;
120
121 assert(node);
122 assert(_key);
123
124 event = caa_container_of(node, struct ust_app_event, node.node);
125 key = _key;
126 ev_loglevel_value = event->attr.loglevel;
127
128 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
129
130 /* Event name */
131 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
132 goto no_match;
133 }
134
135 /* Event loglevel. */
136 if (ev_loglevel_value != key->loglevel_type) {
137 if (event->attr.loglevel_type == LTTNG_UST_ABI_LOGLEVEL_ALL
138 && key->loglevel_type == 0 &&
139 ev_loglevel_value == -1) {
140 /*
141 * Match is accepted. This is because on event creation, the
142 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
143 * -1 are accepted for this loglevel type since 0 is the one set by
144 * the API when receiving an enable event.
145 */
146 } else {
147 goto no_match;
148 }
149 }
150
151 /* One of the filters is NULL, fail. */
152 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
153 goto no_match;
154 }
155
156 if (key->filter && event->filter) {
157 /* Both filters exists, check length followed by the bytecode. */
158 if (event->filter->len != key->filter->len ||
159 memcmp(event->filter->data, key->filter->data,
160 event->filter->len) != 0) {
161 goto no_match;
162 }
163 }
164
165 /* One of the exclusions is NULL, fail. */
166 if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
167 goto no_match;
168 }
169
170 if (key->exclusion && event->exclusion) {
171 /* Both exclusions exists, check count followed by the names. */
172 if (event->exclusion->count != key->exclusion->count ||
173 memcmp(event->exclusion->names, key->exclusion->names,
174 event->exclusion->count * LTTNG_UST_ABI_SYM_NAME_LEN) != 0) {
175 goto no_match;
176 }
177 }
178
179
180 /* Match. */
181 return 1;
182
183 no_match:
184 return 0;
185 }
186
187 /*
188 * Unique add of an ust app event in the given ht. This uses the custom
189 * ht_match_ust_app_event match function and the event name as hash.
190 */
191 static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
192 struct ust_app_event *event)
193 {
194 struct cds_lfht_node *node_ptr;
195 struct ust_app_ht_key key;
196 struct lttng_ht *ht;
197
198 assert(ua_chan);
199 assert(ua_chan->events);
200 assert(event);
201
202 ht = ua_chan->events;
203 key.name = event->attr.name;
204 key.filter = event->filter;
205 key.loglevel_type = event->attr.loglevel;
206 key.exclusion = event->exclusion;
207
208 node_ptr = cds_lfht_add_unique(ht->ht,
209 ht->hash_fct(event->node.key, lttng_ht_seed),
210 ht_match_ust_app_event, &key, &event->node.node);
211 assert(node_ptr == &event->node.node);
212 }
213
214 /*
215 * Close the notify socket from the given RCU head object. This MUST be called
216 * through a call_rcu().
217 */
218 static void close_notify_sock_rcu(struct rcu_head *head)
219 {
220 int ret;
221 struct ust_app_notify_sock_obj *obj =
222 caa_container_of(head, struct ust_app_notify_sock_obj, head);
223
224 /* Must have a valid fd here. */
225 assert(obj->fd >= 0);
226
227 ret = close(obj->fd);
228 if (ret) {
229 ERR("close notify sock %d RCU", obj->fd);
230 }
231 lttng_fd_put(LTTNG_FD_APPS, 1);
232
233 free(obj);
234 }
235
236 /*
237 * Return the session registry according to the buffer type of the given
238 * session.
239 *
240 * A registry per UID object MUST exists before calling this function or else
241 * it assert() if not found. RCU read side lock must be acquired.
242 */
243 static struct ust_registry_session *get_session_registry(
244 struct ust_app_session *ua_sess)
245 {
246 struct ust_registry_session *registry = NULL;
247
248 assert(ua_sess);
249
250 switch (ua_sess->buffer_type) {
251 case LTTNG_BUFFER_PER_PID:
252 {
253 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
254 if (!reg_pid) {
255 goto error;
256 }
257 registry = reg_pid->registry->reg.ust;
258 break;
259 }
260 case LTTNG_BUFFER_PER_UID:
261 {
262 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
263 ua_sess->tracing_id, ua_sess->bits_per_long,
264 lttng_credentials_get_uid(&ua_sess->real_credentials));
265 if (!reg_uid) {
266 goto error;
267 }
268 registry = reg_uid->registry->reg.ust;
269 break;
270 }
271 default:
272 assert(0);
273 };
274
275 error:
276 return registry;
277 }
278
279 /*
280 * Delete ust context safely. RCU read lock must be held before calling
281 * this function.
282 */
283 static
284 void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx,
285 struct ust_app *app)
286 {
287 int ret;
288
289 assert(ua_ctx);
290
291 if (ua_ctx->obj) {
292 pthread_mutex_lock(&app->sock_lock);
293 ret = ustctl_release_object(sock, ua_ctx->obj);
294 pthread_mutex_unlock(&app->sock_lock);
295 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
296 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
297 sock, ua_ctx->obj->handle, ret);
298 }
299 free(ua_ctx->obj);
300 }
301 free(ua_ctx);
302 }
303
304 /*
305 * Delete ust app event safely. RCU read lock must be held before calling
306 * this function.
307 */
308 static
309 void delete_ust_app_event(int sock, struct ust_app_event *ua_event,
310 struct ust_app *app)
311 {
312 int ret;
313
314 assert(ua_event);
315
316 free(ua_event->filter);
317 if (ua_event->exclusion != NULL)
318 free(ua_event->exclusion);
319 if (ua_event->obj != NULL) {
320 pthread_mutex_lock(&app->sock_lock);
321 ret = ustctl_release_object(sock, ua_event->obj);
322 pthread_mutex_unlock(&app->sock_lock);
323 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
324 ERR("UST app sock %d release event obj failed with ret %d",
325 sock, ret);
326 }
327 free(ua_event->obj);
328 }
329 free(ua_event);
330 }
331
332 /*
333 * Delayed reclaim of a ust_app_event_notifier_rule object. This MUST be called
334 * through a call_rcu().
335 */
336 static
337 void free_ust_app_event_notifier_rule_rcu(struct rcu_head *head)
338 {
339 struct ust_app_event_notifier_rule *obj = caa_container_of(
340 head, struct ust_app_event_notifier_rule, rcu_head);
341
342 free(obj);
343 }
344
345 /*
346 * Delete ust app event notifier rule safely.
347 */
348 static void delete_ust_app_event_notifier_rule(int sock,
349 struct ust_app_event_notifier_rule *ua_event_notifier_rule,
350 struct ust_app *app)
351 {
352 int ret;
353
354 assert(ua_event_notifier_rule);
355
356 if (ua_event_notifier_rule->exclusion != NULL) {
357 free(ua_event_notifier_rule->exclusion);
358 }
359
360 if (ua_event_notifier_rule->obj != NULL) {
361 pthread_mutex_lock(&app->sock_lock);
362 ret = ustctl_release_object(sock, ua_event_notifier_rule->obj);
363 pthread_mutex_unlock(&app->sock_lock);
364 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
365 ERR("Failed to release event notifier object: app = '%s' (ppid %d), ret = %d",
366 app->name, (int) app->ppid, ret);
367 }
368
369 free(ua_event_notifier_rule->obj);
370 }
371
372 lttng_trigger_put(ua_event_notifier_rule->trigger);
373 call_rcu(&ua_event_notifier_rule->rcu_head,
374 free_ust_app_event_notifier_rule_rcu);
375 }
376
377 /*
378 * Release ust data object of the given stream.
379 *
380 * Return 0 on success or else a negative value.
381 */
382 static int release_ust_app_stream(int sock, struct ust_app_stream *stream,
383 struct ust_app *app)
384 {
385 int ret = 0;
386
387 assert(stream);
388
389 if (stream->obj) {
390 pthread_mutex_lock(&app->sock_lock);
391 ret = ustctl_release_object(sock, stream->obj);
392 pthread_mutex_unlock(&app->sock_lock);
393 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
394 ERR("UST app sock %d release stream obj failed with ret %d",
395 sock, ret);
396 }
397 lttng_fd_put(LTTNG_FD_APPS, 2);
398 free(stream->obj);
399 }
400
401 return ret;
402 }
403
404 /*
405 * Delete ust app stream safely. RCU read lock must be held before calling
406 * this function.
407 */
408 static
409 void delete_ust_app_stream(int sock, struct ust_app_stream *stream,
410 struct ust_app *app)
411 {
412 assert(stream);
413
414 (void) release_ust_app_stream(sock, stream, app);
415 free(stream);
416 }
417
418 /*
419 * We need to execute ht_destroy outside of RCU read-side critical
420 * section and outside of call_rcu thread, so we postpone its execution
421 * using ht_cleanup_push. It is simpler than to change the semantic of
422 * the many callers of delete_ust_app_session().
423 */
424 static
425 void delete_ust_app_channel_rcu(struct rcu_head *head)
426 {
427 struct ust_app_channel *ua_chan =
428 caa_container_of(head, struct ust_app_channel, rcu_head);
429
430 ht_cleanup_push(ua_chan->ctx);
431 ht_cleanup_push(ua_chan->events);
432 free(ua_chan);
433 }
434
435 /*
436 * Extract the lost packet or discarded events counter when the channel is
437 * being deleted and store the value in the parent channel so we can
438 * access it from lttng list and at stop/destroy.
439 *
440 * The session list lock must be held by the caller.
441 */
442 static
443 void save_per_pid_lost_discarded_counters(struct ust_app_channel *ua_chan)
444 {
445 uint64_t discarded = 0, lost = 0;
446 struct ltt_session *session;
447 struct ltt_ust_channel *uchan;
448
449 if (ua_chan->attr.type != LTTNG_UST_ABI_CHAN_PER_CPU) {
450 return;
451 }
452
453 rcu_read_lock();
454 session = session_find_by_id(ua_chan->session->tracing_id);
455 if (!session || !session->ust_session) {
456 /*
457 * Not finding the session is not an error because there are
458 * multiple ways the channels can be torn down.
459 *
460 * 1) The session daemon can initiate the destruction of the
461 * ust app session after receiving a destroy command or
462 * during its shutdown/teardown.
463 * 2) The application, since we are in per-pid tracing, is
464 * unregistering and tearing down its ust app session.
465 *
466 * Both paths are protected by the session list lock which
467 * ensures that the accounting of lost packets and discarded
468 * events is done exactly once. The session is then unpublished
469 * from the session list, resulting in this condition.
470 */
471 goto end;
472 }
473
474 if (ua_chan->attr.overwrite) {
475 consumer_get_lost_packets(ua_chan->session->tracing_id,
476 ua_chan->key, session->ust_session->consumer,
477 &lost);
478 } else {
479 consumer_get_discarded_events(ua_chan->session->tracing_id,
480 ua_chan->key, session->ust_session->consumer,
481 &discarded);
482 }
483 uchan = trace_ust_find_channel_by_name(
484 session->ust_session->domain_global.channels,
485 ua_chan->name);
486 if (!uchan) {
487 ERR("Missing UST channel to store discarded counters");
488 goto end;
489 }
490
491 uchan->per_pid_closed_app_discarded += discarded;
492 uchan->per_pid_closed_app_lost += lost;
493
494 end:
495 rcu_read_unlock();
496 if (session) {
497 session_put(session);
498 }
499 }
500
501 /*
502 * Delete ust app channel safely. RCU read lock must be held before calling
503 * this function.
504 *
505 * The session list lock must be held by the caller.
506 */
507 static
508 void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
509 struct ust_app *app)
510 {
511 int ret;
512 struct lttng_ht_iter iter;
513 struct ust_app_event *ua_event;
514 struct ust_app_ctx *ua_ctx;
515 struct ust_app_stream *stream, *stmp;
516 struct ust_registry_session *registry;
517
518 assert(ua_chan);
519
520 DBG3("UST app deleting channel %s", ua_chan->name);
521
522 /* Wipe stream */
523 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
524 cds_list_del(&stream->list);
525 delete_ust_app_stream(sock, stream, app);
526 }
527
528 /* Wipe context */
529 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
530 cds_list_del(&ua_ctx->list);
531 ret = lttng_ht_del(ua_chan->ctx, &iter);
532 assert(!ret);
533 delete_ust_app_ctx(sock, ua_ctx, app);
534 }
535
536 /* Wipe events */
537 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
538 node.node) {
539 ret = lttng_ht_del(ua_chan->events, &iter);
540 assert(!ret);
541 delete_ust_app_event(sock, ua_event, app);
542 }
543
544 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
545 /* Wipe and free registry from session registry. */
546 registry = get_session_registry(ua_chan->session);
547 if (registry) {
548 ust_registry_channel_del_free(registry, ua_chan->key,
549 sock >= 0);
550 }
551 /*
552 * A negative socket can be used by the caller when
553 * cleaning-up a ua_chan in an error path. Skip the
554 * accounting in this case.
555 */
556 if (sock >= 0) {
557 save_per_pid_lost_discarded_counters(ua_chan);
558 }
559 }
560
561 if (ua_chan->obj != NULL) {
562 /* Remove channel from application UST object descriptor. */
563 iter.iter.node = &ua_chan->ust_objd_node.node;
564 ret = lttng_ht_del(app->ust_objd, &iter);
565 assert(!ret);
566 pthread_mutex_lock(&app->sock_lock);
567 ret = ustctl_release_object(sock, ua_chan->obj);
568 pthread_mutex_unlock(&app->sock_lock);
569 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
570 ERR("UST app sock %d release channel obj failed with ret %d",
571 sock, ret);
572 }
573 lttng_fd_put(LTTNG_FD_APPS, 1);
574 free(ua_chan->obj);
575 }
576 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
577 }
578
579 int ust_app_register_done(struct ust_app *app)
580 {
581 int ret;
582
583 pthread_mutex_lock(&app->sock_lock);
584 ret = ustctl_register_done(app->sock);
585 pthread_mutex_unlock(&app->sock_lock);
586 return ret;
587 }
588
589 int ust_app_release_object(struct ust_app *app, struct lttng_ust_abi_object_data *data)
590 {
591 int ret, sock;
592
593 if (app) {
594 pthread_mutex_lock(&app->sock_lock);
595 sock = app->sock;
596 } else {
597 sock = -1;
598 }
599 ret = ustctl_release_object(sock, data);
600 if (app) {
601 pthread_mutex_unlock(&app->sock_lock);
602 }
603 return ret;
604 }
605
606 /*
607 * Push metadata to consumer socket.
608 *
609 * RCU read-side lock must be held to guarantee existance of socket.
610 * Must be called with the ust app session lock held.
611 * Must be called with the registry lock held.
612 *
613 * On success, return the len of metadata pushed or else a negative value.
614 * Returning a -EPIPE return value means we could not send the metadata,
615 * but it can be caused by recoverable errors (e.g. the application has
616 * terminated concurrently).
617 */
618 ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
619 struct consumer_socket *socket, int send_zero_data)
620 {
621 int ret;
622 char *metadata_str = NULL;
623 size_t len, offset, new_metadata_len_sent;
624 ssize_t ret_val;
625 uint64_t metadata_key, metadata_version;
626
627 assert(registry);
628 assert(socket);
629
630 metadata_key = registry->metadata_key;
631
632 /*
633 * Means that no metadata was assigned to the session. This can
634 * happens if no start has been done previously.
635 */
636 if (!metadata_key) {
637 return 0;
638 }
639
640 offset = registry->metadata_len_sent;
641 len = registry->metadata_len - registry->metadata_len_sent;
642 new_metadata_len_sent = registry->metadata_len;
643 metadata_version = registry->metadata_version;
644 if (len == 0) {
645 DBG3("No metadata to push for metadata key %" PRIu64,
646 registry->metadata_key);
647 ret_val = len;
648 if (send_zero_data) {
649 DBG("No metadata to push");
650 goto push_data;
651 }
652 goto end;
653 }
654
655 /* Allocate only what we have to send. */
656 metadata_str = zmalloc(len);
657 if (!metadata_str) {
658 PERROR("zmalloc ust app metadata string");
659 ret_val = -ENOMEM;
660 goto error;
661 }
662 /* Copy what we haven't sent out. */
663 memcpy(metadata_str, registry->metadata + offset, len);
664
665 push_data:
666 pthread_mutex_unlock(&registry->lock);
667 /*
668 * We need to unlock the registry while we push metadata to
669 * break a circular dependency between the consumerd metadata
670 * lock and the sessiond registry lock. Indeed, pushing metadata
671 * to the consumerd awaits that it gets pushed all the way to
672 * relayd, but doing so requires grabbing the metadata lock. If
673 * a concurrent metadata request is being performed by
674 * consumerd, this can try to grab the registry lock on the
675 * sessiond while holding the metadata lock on the consumer
676 * daemon. Those push and pull schemes are performed on two
677 * different bidirectionnal communication sockets.
678 */
679 ret = consumer_push_metadata(socket, metadata_key,
680 metadata_str, len, offset, metadata_version);
681 pthread_mutex_lock(&registry->lock);
682 if (ret < 0) {
683 /*
684 * There is an acceptable race here between the registry
685 * metadata key assignment and the creation on the
686 * consumer. The session daemon can concurrently push
687 * metadata for this registry while being created on the
688 * consumer since the metadata key of the registry is
689 * assigned *before* it is setup to avoid the consumer
690 * to ask for metadata that could possibly be not found
691 * in the session daemon.
692 *
693 * The metadata will get pushed either by the session
694 * being stopped or the consumer requesting metadata if
695 * that race is triggered.
696 */
697 if (ret == -LTTCOMM_CONSUMERD_CHANNEL_FAIL) {
698 ret = 0;
699 } else {
700 ERR("Error pushing metadata to consumer");
701 }
702 ret_val = ret;
703 goto error_push;
704 } else {
705 /*
706 * Metadata may have been concurrently pushed, since
707 * we're not holding the registry lock while pushing to
708 * consumer. This is handled by the fact that we send
709 * the metadata content, size, and the offset at which
710 * that metadata belongs. This may arrive out of order
711 * on the consumer side, and the consumer is able to
712 * deal with overlapping fragments. The consumer
713 * supports overlapping fragments, which must be
714 * contiguous starting from offset 0. We keep the
715 * largest metadata_len_sent value of the concurrent
716 * send.
717 */
718 registry->metadata_len_sent =
719 max_t(size_t, registry->metadata_len_sent,
720 new_metadata_len_sent);
721 }
722 free(metadata_str);
723 return len;
724
725 end:
726 error:
727 if (ret_val) {
728 /*
729 * On error, flag the registry that the metadata is
730 * closed. We were unable to push anything and this
731 * means that either the consumer is not responding or
732 * the metadata cache has been destroyed on the
733 * consumer.
734 */
735 registry->metadata_closed = 1;
736 }
737 error_push:
738 free(metadata_str);
739 return ret_val;
740 }
741
742 /*
743 * For a given application and session, push metadata to consumer.
744 * Either sock or consumer is required : if sock is NULL, the default
745 * socket to send the metadata is retrieved from consumer, if sock
746 * is not NULL we use it to send the metadata.
747 * RCU read-side lock must be held while calling this function,
748 * therefore ensuring existance of registry. It also ensures existance
749 * of socket throughout this function.
750 *
751 * Return 0 on success else a negative error.
752 * Returning a -EPIPE return value means we could not send the metadata,
753 * but it can be caused by recoverable errors (e.g. the application has
754 * terminated concurrently).
755 */
756 static int push_metadata(struct ust_registry_session *registry,
757 struct consumer_output *consumer)
758 {
759 int ret_val;
760 ssize_t ret;
761 struct consumer_socket *socket;
762
763 assert(registry);
764 assert(consumer);
765
766 pthread_mutex_lock(&registry->lock);
767 if (registry->metadata_closed) {
768 ret_val = -EPIPE;
769 goto error;
770 }
771
772 /* Get consumer socket to use to push the metadata.*/
773 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
774 consumer);
775 if (!socket) {
776 ret_val = -1;
777 goto error;
778 }
779
780 ret = ust_app_push_metadata(registry, socket, 0);
781 if (ret < 0) {
782 ret_val = ret;
783 goto error;
784 }
785 pthread_mutex_unlock(&registry->lock);
786 return 0;
787
788 error:
789 pthread_mutex_unlock(&registry->lock);
790 return ret_val;
791 }
792
793 /*
794 * Send to the consumer a close metadata command for the given session. Once
795 * done, the metadata channel is deleted and the session metadata pointer is
796 * nullified. The session lock MUST be held unless the application is
797 * in the destroy path.
798 *
799 * Do not hold the registry lock while communicating with the consumerd, because
800 * doing so causes inter-process deadlocks between consumerd and sessiond with
801 * the metadata request notification.
802 *
803 * Return 0 on success else a negative value.
804 */
805 static int close_metadata(struct ust_registry_session *registry,
806 struct consumer_output *consumer)
807 {
808 int ret;
809 struct consumer_socket *socket;
810 uint64_t metadata_key;
811 bool registry_was_already_closed;
812
813 assert(registry);
814 assert(consumer);
815
816 rcu_read_lock();
817
818 pthread_mutex_lock(&registry->lock);
819 metadata_key = registry->metadata_key;
820 registry_was_already_closed = registry->metadata_closed;
821 if (metadata_key != 0) {
822 /*
823 * Metadata closed. Even on error this means that the consumer
824 * is not responding or not found so either way a second close
825 * should NOT be emit for this registry.
826 */
827 registry->metadata_closed = 1;
828 }
829 pthread_mutex_unlock(&registry->lock);
830
831 if (metadata_key == 0 || registry_was_already_closed) {
832 ret = 0;
833 goto end;
834 }
835
836 /* Get consumer socket to use to push the metadata.*/
837 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
838 consumer);
839 if (!socket) {
840 ret = -1;
841 goto end;
842 }
843
844 ret = consumer_close_metadata(socket, metadata_key);
845 if (ret < 0) {
846 goto end;
847 }
848
849 end:
850 rcu_read_unlock();
851 return ret;
852 }
853
854 /*
855 * We need to execute ht_destroy outside of RCU read-side critical
856 * section and outside of call_rcu thread, so we postpone its execution
857 * using ht_cleanup_push. It is simpler than to change the semantic of
858 * the many callers of delete_ust_app_session().
859 */
860 static
861 void delete_ust_app_session_rcu(struct rcu_head *head)
862 {
863 struct ust_app_session *ua_sess =
864 caa_container_of(head, struct ust_app_session, rcu_head);
865
866 ht_cleanup_push(ua_sess->channels);
867 free(ua_sess);
868 }
869
870 /*
871 * Delete ust app session safely. RCU read lock must be held before calling
872 * this function.
873 *
874 * The session list lock must be held by the caller.
875 */
876 static
877 void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
878 struct ust_app *app)
879 {
880 int ret;
881 struct lttng_ht_iter iter;
882 struct ust_app_channel *ua_chan;
883 struct ust_registry_session *registry;
884
885 assert(ua_sess);
886
887 pthread_mutex_lock(&ua_sess->lock);
888
889 assert(!ua_sess->deleted);
890 ua_sess->deleted = true;
891
892 registry = get_session_registry(ua_sess);
893 /* Registry can be null on error path during initialization. */
894 if (registry) {
895 /* Push metadata for application before freeing the application. */
896 (void) push_metadata(registry, ua_sess->consumer);
897
898 /*
899 * Don't ask to close metadata for global per UID buffers. Close
900 * metadata only on destroy trace session in this case. Also, the
901 * previous push metadata could have flag the metadata registry to
902 * close so don't send a close command if closed.
903 */
904 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
905 /* And ask to close it for this session registry. */
906 (void) close_metadata(registry, ua_sess->consumer);
907 }
908 }
909
910 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
911 node.node) {
912 ret = lttng_ht_del(ua_sess->channels, &iter);
913 assert(!ret);
914 delete_ust_app_channel(sock, ua_chan, app);
915 }
916
917 /* In case of per PID, the registry is kept in the session. */
918 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
919 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
920 if (reg_pid) {
921 /*
922 * Registry can be null on error path during
923 * initialization.
924 */
925 buffer_reg_pid_remove(reg_pid);
926 buffer_reg_pid_destroy(reg_pid);
927 }
928 }
929
930 if (ua_sess->handle != -1) {
931 pthread_mutex_lock(&app->sock_lock);
932 ret = ustctl_release_handle(sock, ua_sess->handle);
933 pthread_mutex_unlock(&app->sock_lock);
934 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
935 ERR("UST app sock %d release session handle failed with ret %d",
936 sock, ret);
937 }
938 /* Remove session from application UST object descriptor. */
939 iter.iter.node = &ua_sess->ust_objd_node.node;
940 ret = lttng_ht_del(app->ust_sessions_objd, &iter);
941 assert(!ret);
942 }
943
944 pthread_mutex_unlock(&ua_sess->lock);
945
946 consumer_output_put(ua_sess->consumer);
947
948 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
949 }
950
951 /*
952 * Delete a traceable application structure from the global list. Never call
953 * this function outside of a call_rcu call.
954 *
955 * RCU read side lock should _NOT_ be held when calling this function.
956 */
957 static
958 void delete_ust_app(struct ust_app *app)
959 {
960 int ret, sock;
961 struct ust_app_session *ua_sess, *tmp_ua_sess;
962 struct lttng_ht_iter iter;
963 struct ust_app_event_notifier_rule *event_notifier_rule;
964 bool event_notifier_write_fd_is_open;
965
966 /*
967 * The session list lock must be held during this function to guarantee
968 * the existence of ua_sess.
969 */
970 session_lock_list();
971 /* Delete ust app sessions info */
972 sock = app->sock;
973 app->sock = -1;
974
975 /* Wipe sessions */
976 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
977 teardown_node) {
978 /* Free every object in the session and the session. */
979 rcu_read_lock();
980 delete_ust_app_session(sock, ua_sess, app);
981 rcu_read_unlock();
982 }
983
984 /* Remove the event notifier rules associated with this app. */
985 rcu_read_lock();
986 cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
987 &iter.iter, event_notifier_rule, node.node) {
988 ret = lttng_ht_del(app->token_to_event_notifier_rule_ht, &iter);
989 assert(!ret);
990
991 delete_ust_app_event_notifier_rule(
992 app->sock, event_notifier_rule, app);
993 }
994
995 rcu_read_unlock();
996
997 ht_cleanup_push(app->sessions);
998 ht_cleanup_push(app->ust_sessions_objd);
999 ht_cleanup_push(app->ust_objd);
1000 ht_cleanup_push(app->token_to_event_notifier_rule_ht);
1001
1002 /*
1003 * This could be NULL if the event notifier setup failed (e.g the app
1004 * was killed or the tracer does not support this feature).
1005 */
1006 if (app->event_notifier_group.object) {
1007 enum lttng_error_code ret_code;
1008 enum event_notifier_error_accounting_status status;
1009
1010 const int event_notifier_read_fd = lttng_pipe_get_readfd(
1011 app->event_notifier_group.event_pipe);
1012
1013 ret_code = notification_thread_command_remove_tracer_event_source(
1014 the_notification_thread_handle,
1015 event_notifier_read_fd);
1016 if (ret_code != LTTNG_OK) {
1017 ERR("Failed to remove application tracer event source from notification thread");
1018 }
1019
1020 status = event_notifier_error_accounting_unregister_app(app);
1021 if (status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
1022 ERR("Error unregistering app from event notifier error accounting");
1023 }
1024
1025 ustctl_release_object(sock, app->event_notifier_group.object);
1026 free(app->event_notifier_group.object);
1027 }
1028
1029 event_notifier_write_fd_is_open = lttng_pipe_is_write_open(
1030 app->event_notifier_group.event_pipe);
1031 lttng_pipe_destroy(app->event_notifier_group.event_pipe);
1032 /*
1033 * Release the file descriptors reserved for the event notifier pipe.
1034 * The app could be destroyed before the write end of the pipe could be
1035 * passed to the application (and closed). In that case, both file
1036 * descriptors must be released.
1037 */
1038 lttng_fd_put(LTTNG_FD_APPS, event_notifier_write_fd_is_open ? 2 : 1);
1039
1040 /*
1041 * Wait until we have deleted the application from the sock hash table
1042 * before closing this socket, otherwise an application could re-use the
1043 * socket ID and race with the teardown, using the same hash table entry.
1044 *
1045 * It's OK to leave the close in call_rcu. We want it to stay unique for
1046 * all RCU readers that could run concurrently with unregister app,
1047 * therefore we _need_ to only close that socket after a grace period. So
1048 * it should stay in this RCU callback.
1049 *
1050 * This close() is a very important step of the synchronization model so
1051 * every modification to this function must be carefully reviewed.
1052 */
1053 ret = close(sock);
1054 if (ret) {
1055 PERROR("close");
1056 }
1057 lttng_fd_put(LTTNG_FD_APPS, 1);
1058
1059 DBG2("UST app pid %d deleted", app->pid);
1060 free(app);
1061 session_unlock_list();
1062 }
1063
1064 /*
1065 * URCU intermediate call to delete an UST app.
1066 */
1067 static
1068 void delete_ust_app_rcu(struct rcu_head *head)
1069 {
1070 struct lttng_ht_node_ulong *node =
1071 caa_container_of(head, struct lttng_ht_node_ulong, head);
1072 struct ust_app *app =
1073 caa_container_of(node, struct ust_app, pid_n);
1074
1075 DBG3("Call RCU deleting app PID %d", app->pid);
1076 delete_ust_app(app);
1077 }
1078
1079 /*
1080 * Delete the session from the application ht and delete the data structure by
1081 * freeing every object inside and releasing them.
1082 *
1083 * The session list lock must be held by the caller.
1084 */
1085 static void destroy_app_session(struct ust_app *app,
1086 struct ust_app_session *ua_sess)
1087 {
1088 int ret;
1089 struct lttng_ht_iter iter;
1090
1091 assert(app);
1092 assert(ua_sess);
1093
1094 iter.iter.node = &ua_sess->node.node;
1095 ret = lttng_ht_del(app->sessions, &iter);
1096 if (ret) {
1097 /* Already scheduled for teardown. */
1098 goto end;
1099 }
1100
1101 /* Once deleted, free the data structure. */
1102 delete_ust_app_session(app->sock, ua_sess, app);
1103
1104 end:
1105 return;
1106 }
1107
1108 /*
1109 * Alloc new UST app session.
1110 */
1111 static
1112 struct ust_app_session *alloc_ust_app_session(void)
1113 {
1114 struct ust_app_session *ua_sess;
1115
1116 /* Init most of the default value by allocating and zeroing */
1117 ua_sess = zmalloc(sizeof(struct ust_app_session));
1118 if (ua_sess == NULL) {
1119 PERROR("malloc");
1120 goto error_free;
1121 }
1122
1123 ua_sess->handle = -1;
1124 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
1125 ua_sess->metadata_attr.type = LTTNG_UST_ABI_CHAN_METADATA;
1126 pthread_mutex_init(&ua_sess->lock, NULL);
1127
1128 return ua_sess;
1129
1130 error_free:
1131 return NULL;
1132 }
1133
1134 /*
1135 * Alloc new UST app channel.
1136 */
1137 static
1138 struct ust_app_channel *alloc_ust_app_channel(const char *name,
1139 struct ust_app_session *ua_sess,
1140 struct lttng_ust_abi_channel_attr *attr)
1141 {
1142 struct ust_app_channel *ua_chan;
1143
1144 /* Init most of the default value by allocating and zeroing */
1145 ua_chan = zmalloc(sizeof(struct ust_app_channel));
1146 if (ua_chan == NULL) {
1147 PERROR("malloc");
1148 goto error;
1149 }
1150
1151 /* Setup channel name */
1152 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
1153 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1154
1155 ua_chan->enabled = 1;
1156 ua_chan->handle = -1;
1157 ua_chan->session = ua_sess;
1158 ua_chan->key = get_next_channel_key();
1159 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1160 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
1161 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
1162
1163 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
1164 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
1165
1166 /* Copy attributes */
1167 if (attr) {
1168 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
1169 ua_chan->attr.subbuf_size = attr->subbuf_size;
1170 ua_chan->attr.num_subbuf = attr->num_subbuf;
1171 ua_chan->attr.overwrite = attr->overwrite;
1172 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
1173 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
1174 ua_chan->attr.output = attr->output;
1175 ua_chan->attr.blocking_timeout = attr->u.s.blocking_timeout;
1176 }
1177 /* By default, the channel is a per cpu channel. */
1178 ua_chan->attr.type = LTTNG_UST_ABI_CHAN_PER_CPU;
1179
1180 DBG3("UST app channel %s allocated", ua_chan->name);
1181
1182 return ua_chan;
1183
1184 error:
1185 return NULL;
1186 }
1187
1188 /*
1189 * Allocate and initialize a UST app stream.
1190 *
1191 * Return newly allocated stream pointer or NULL on error.
1192 */
1193 struct ust_app_stream *ust_app_alloc_stream(void)
1194 {
1195 struct ust_app_stream *stream = NULL;
1196
1197 stream = zmalloc(sizeof(*stream));
1198 if (stream == NULL) {
1199 PERROR("zmalloc ust app stream");
1200 goto error;
1201 }
1202
1203 /* Zero could be a valid value for a handle so flag it to -1. */
1204 stream->handle = -1;
1205
1206 error:
1207 return stream;
1208 }
1209
1210 /*
1211 * Alloc new UST app event.
1212 */
1213 static
1214 struct ust_app_event *alloc_ust_app_event(char *name,
1215 struct lttng_ust_abi_event *attr)
1216 {
1217 struct ust_app_event *ua_event;
1218
1219 /* Init most of the default value by allocating and zeroing */
1220 ua_event = zmalloc(sizeof(struct ust_app_event));
1221 if (ua_event == NULL) {
1222 PERROR("Failed to allocate ust_app_event structure");
1223 goto error;
1224 }
1225
1226 ua_event->enabled = 1;
1227 strncpy(ua_event->name, name, sizeof(ua_event->name));
1228 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1229 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
1230
1231 /* Copy attributes */
1232 if (attr) {
1233 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
1234 }
1235
1236 DBG3("UST app event %s allocated", ua_event->name);
1237
1238 return ua_event;
1239
1240 error:
1241 return NULL;
1242 }
1243
1244 /*
1245 * Allocate a new UST app event notifier rule.
1246 */
1247 static struct ust_app_event_notifier_rule *alloc_ust_app_event_notifier_rule(
1248 struct lttng_trigger *trigger)
1249 {
1250 enum lttng_event_rule_generate_exclusions_status
1251 generate_exclusion_status;
1252 struct ust_app_event_notifier_rule *ua_event_notifier_rule;
1253 struct lttng_condition *condition = NULL;
1254 const struct lttng_event_rule *event_rule = NULL;
1255
1256 ua_event_notifier_rule = zmalloc(sizeof(struct ust_app_event_notifier_rule));
1257 if (ua_event_notifier_rule == NULL) {
1258 PERROR("Failed to allocate ust_app_event_notifier_rule structure");
1259 goto error;
1260 }
1261
1262 ua_event_notifier_rule->enabled = 1;
1263 ua_event_notifier_rule->token = lttng_trigger_get_tracer_token(trigger);
1264 lttng_ht_node_init_u64(&ua_event_notifier_rule->node,
1265 ua_event_notifier_rule->token);
1266
1267 condition = lttng_trigger_get_condition(trigger);
1268 assert(condition);
1269 assert(lttng_condition_get_type(condition) == LTTNG_CONDITION_TYPE_ON_EVENT);
1270
1271 assert(LTTNG_CONDITION_STATUS_OK == lttng_condition_on_event_get_rule(condition, &event_rule));
1272 assert(event_rule);
1273
1274 /* Acquire the event notifier's reference to the trigger. */
1275 lttng_trigger_get(trigger);
1276
1277 ua_event_notifier_rule->trigger = trigger;
1278 ua_event_notifier_rule->filter = lttng_event_rule_get_filter_bytecode(event_rule);
1279 generate_exclusion_status = lttng_event_rule_generate_exclusions(
1280 event_rule, &ua_event_notifier_rule->exclusion);
1281 switch (generate_exclusion_status) {
1282 case LTTNG_EVENT_RULE_GENERATE_EXCLUSIONS_STATUS_OK:
1283 case LTTNG_EVENT_RULE_GENERATE_EXCLUSIONS_STATUS_NONE:
1284 break;
1285 default:
1286 /* Error occured. */
1287 ERR("Failed to generate exclusions from trigger while allocating an event notifier rule");
1288 goto error_put_trigger;
1289 }
1290
1291 DBG3("UST app event notifier rule allocated: token = %" PRIu64,
1292 ua_event_notifier_rule->token);
1293
1294 return ua_event_notifier_rule;
1295
1296 error_put_trigger:
1297 lttng_trigger_put(trigger);
1298 error:
1299 free(ua_event_notifier_rule);
1300 return NULL;
1301 }
1302
1303 /*
1304 * Alloc new UST app context.
1305 */
1306 static
1307 struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context_attr *uctx)
1308 {
1309 struct ust_app_ctx *ua_ctx;
1310
1311 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
1312 if (ua_ctx == NULL) {
1313 goto error;
1314 }
1315
1316 CDS_INIT_LIST_HEAD(&ua_ctx->list);
1317
1318 if (uctx) {
1319 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
1320 if (uctx->ctx == LTTNG_UST_ABI_CONTEXT_APP_CONTEXT) {
1321 char *provider_name = NULL, *ctx_name = NULL;
1322
1323 provider_name = strdup(uctx->u.app_ctx.provider_name);
1324 ctx_name = strdup(uctx->u.app_ctx.ctx_name);
1325 if (!provider_name || !ctx_name) {
1326 free(provider_name);
1327 free(ctx_name);
1328 goto error;
1329 }
1330
1331 ua_ctx->ctx.u.app_ctx.provider_name = provider_name;
1332 ua_ctx->ctx.u.app_ctx.ctx_name = ctx_name;
1333 }
1334 }
1335
1336 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
1337 return ua_ctx;
1338 error:
1339 free(ua_ctx);
1340 return NULL;
1341 }
1342
1343 /*
1344 * Create a liblttng-ust filter bytecode from given bytecode.
1345 *
1346 * Return allocated filter or NULL on error.
1347 */
1348 static struct lttng_ust_abi_filter_bytecode *create_ust_filter_bytecode_from_bytecode(
1349 const struct lttng_bytecode *orig_f)
1350 {
1351 struct lttng_ust_abi_filter_bytecode *filter = NULL;
1352
1353 /* Copy filter bytecode. */
1354 filter = zmalloc(sizeof(*filter) + orig_f->len);
1355 if (!filter) {
1356 PERROR("Failed to allocate lttng_ust_filter_bytecode: bytecode len = %" PRIu32 " bytes", orig_f->len);
1357 goto error;
1358 }
1359
1360 assert(sizeof(struct lttng_bytecode) ==
1361 sizeof(struct lttng_ust_abi_filter_bytecode));
1362 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
1363 error:
1364 return filter;
1365 }
1366
1367 /*
1368 * Create a liblttng-ust capture bytecode from given bytecode.
1369 *
1370 * Return allocated filter or NULL on error.
1371 */
1372 static struct lttng_ust_abi_capture_bytecode *
1373 create_ust_capture_bytecode_from_bytecode(const struct lttng_bytecode *orig_f)
1374 {
1375 struct lttng_ust_abi_capture_bytecode *capture = NULL;
1376
1377 /* Copy capture bytecode. */
1378 capture = zmalloc(sizeof(*capture) + orig_f->len);
1379 if (!capture) {
1380 PERROR("Failed to allocate lttng_ust_abi_capture_bytecode: bytecode len = %" PRIu32 " bytes", orig_f->len);
1381 goto error;
1382 }
1383
1384 assert(sizeof(struct lttng_bytecode) ==
1385 sizeof(struct lttng_ust_abi_capture_bytecode));
1386 memcpy(capture, orig_f, sizeof(*capture) + orig_f->len);
1387 error:
1388 return capture;
1389 }
1390
1391 /*
1392 * Find an ust_app using the sock and return it. RCU read side lock must be
1393 * held before calling this helper function.
1394 */
1395 struct ust_app *ust_app_find_by_sock(int sock)
1396 {
1397 struct lttng_ht_node_ulong *node;
1398 struct lttng_ht_iter iter;
1399
1400 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
1401 node = lttng_ht_iter_get_node_ulong(&iter);
1402 if (node == NULL) {
1403 DBG2("UST app find by sock %d not found", sock);
1404 goto error;
1405 }
1406
1407 return caa_container_of(node, struct ust_app, sock_n);
1408
1409 error:
1410 return NULL;
1411 }
1412
1413 /*
1414 * Find an ust_app using the notify sock and return it. RCU read side lock must
1415 * be held before calling this helper function.
1416 */
1417 static struct ust_app *find_app_by_notify_sock(int sock)
1418 {
1419 struct lttng_ht_node_ulong *node;
1420 struct lttng_ht_iter iter;
1421
1422 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
1423 &iter);
1424 node = lttng_ht_iter_get_node_ulong(&iter);
1425 if (node == NULL) {
1426 DBG2("UST app find by notify sock %d not found", sock);
1427 goto error;
1428 }
1429
1430 return caa_container_of(node, struct ust_app, notify_sock_n);
1431
1432 error:
1433 return NULL;
1434 }
1435
1436 /*
1437 * Lookup for an ust app event based on event name, filter bytecode and the
1438 * event loglevel.
1439 *
1440 * Return an ust_app_event object or NULL on error.
1441 */
1442 static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
1443 const char *name, const struct lttng_bytecode *filter,
1444 int loglevel_value,
1445 const struct lttng_event_exclusion *exclusion)
1446 {
1447 struct lttng_ht_iter iter;
1448 struct lttng_ht_node_str *node;
1449 struct ust_app_event *event = NULL;
1450 struct ust_app_ht_key key;
1451
1452 assert(name);
1453 assert(ht);
1454
1455 /* Setup key for event lookup. */
1456 key.name = name;
1457 key.filter = filter;
1458 key.loglevel_type = loglevel_value;
1459 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
1460 key.exclusion = exclusion;
1461
1462 /* Lookup using the event name as hash and a custom match fct. */
1463 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1464 ht_match_ust_app_event, &key, &iter.iter);
1465 node = lttng_ht_iter_get_node_str(&iter);
1466 if (node == NULL) {
1467 goto end;
1468 }
1469
1470 event = caa_container_of(node, struct ust_app_event, node);
1471
1472 end:
1473 return event;
1474 }
1475
1476 /*
1477 * Look-up an event notifier rule based on its token id.
1478 *
1479 * Must be called with the RCU read lock held.
1480 * Return an ust_app_event_notifier_rule object or NULL on error.
1481 */
1482 static struct ust_app_event_notifier_rule *find_ust_app_event_notifier_rule(
1483 struct lttng_ht *ht, uint64_t token)
1484 {
1485 struct lttng_ht_iter iter;
1486 struct lttng_ht_node_u64 *node;
1487 struct ust_app_event_notifier_rule *event_notifier_rule = NULL;
1488
1489 assert(ht);
1490
1491 lttng_ht_lookup(ht, &token, &iter);
1492 node = lttng_ht_iter_get_node_u64(&iter);
1493 if (node == NULL) {
1494 DBG2("UST app event notifier rule token not found: token = %" PRIu64,
1495 token);
1496 goto end;
1497 }
1498
1499 event_notifier_rule = caa_container_of(
1500 node, struct ust_app_event_notifier_rule, node);
1501 end:
1502 return event_notifier_rule;
1503 }
1504
1505 /*
1506 * Create the channel context on the tracer.
1507 *
1508 * Called with UST app session lock held.
1509 */
1510 static
1511 int create_ust_channel_context(struct ust_app_channel *ua_chan,
1512 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1513 {
1514 int ret;
1515
1516 health_code_update();
1517
1518 pthread_mutex_lock(&app->sock_lock);
1519 ret = ustctl_add_context(app->sock, &ua_ctx->ctx,
1520 ua_chan->obj, &ua_ctx->obj);
1521 pthread_mutex_unlock(&app->sock_lock);
1522 if (ret < 0) {
1523 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1524 ERR("UST app create channel context failed for app (pid: %d) "
1525 "with ret %d", app->pid, ret);
1526 } else {
1527 /*
1528 * This is normal behavior, an application can die during the
1529 * creation process. Don't report an error so the execution can
1530 * continue normally.
1531 */
1532 ret = 0;
1533 DBG3("UST app add context failed. Application is dead.");
1534 }
1535 goto error;
1536 }
1537
1538 ua_ctx->handle = ua_ctx->obj->handle;
1539
1540 DBG2("UST app context handle %d created successfully for channel %s",
1541 ua_ctx->handle, ua_chan->name);
1542
1543 error:
1544 health_code_update();
1545 return ret;
1546 }
1547
1548 /*
1549 * Set the filter on the tracer.
1550 */
1551 static int set_ust_object_filter(struct ust_app *app,
1552 const struct lttng_bytecode *bytecode,
1553 struct lttng_ust_abi_object_data *ust_object)
1554 {
1555 int ret;
1556 struct lttng_ust_abi_filter_bytecode *ust_bytecode = NULL;
1557
1558 health_code_update();
1559
1560 ust_bytecode = create_ust_filter_bytecode_from_bytecode(bytecode);
1561 if (!ust_bytecode) {
1562 ret = -LTTNG_ERR_NOMEM;
1563 goto error;
1564 }
1565 pthread_mutex_lock(&app->sock_lock);
1566 ret = ustctl_set_filter(app->sock, ust_bytecode,
1567 ust_object);
1568 pthread_mutex_unlock(&app->sock_lock);
1569 if (ret < 0) {
1570 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1571 ERR("UST app set object filter failed: object = %p of app pid = %d, ret = %d",
1572 ust_object, app->pid, ret);
1573 } else {
1574 /*
1575 * This is normal behavior, an application can die during the
1576 * creation process. Don't report an error so the execution can
1577 * continue normally.
1578 */
1579 ret = 0;
1580 DBG3("Failed to set UST app object filter. Application is dead.");
1581 }
1582 goto error;
1583 }
1584
1585 DBG2("UST filter successfully set: object = %p", ust_object);
1586
1587 error:
1588 health_code_update();
1589 free(ust_bytecode);
1590 return ret;
1591 }
1592
1593 /*
1594 * Set a capture bytecode for the passed object.
1595 * The sequence number enforces the ordering at runtime and on reception of
1596 * the captured payloads.
1597 */
1598 static int set_ust_capture(struct ust_app *app,
1599 const struct lttng_bytecode *bytecode,
1600 unsigned int capture_seqnum,
1601 struct lttng_ust_abi_object_data *ust_object)
1602 {
1603 int ret;
1604 struct lttng_ust_abi_capture_bytecode *ust_bytecode = NULL;
1605
1606 health_code_update();
1607
1608 ust_bytecode = create_ust_capture_bytecode_from_bytecode(bytecode);
1609 if (!ust_bytecode) {
1610 ret = -LTTNG_ERR_NOMEM;
1611 goto error;
1612 }
1613
1614 /*
1615 * Set the sequence number to ensure the capture of fields is ordered.
1616 */
1617 ust_bytecode->seqnum = capture_seqnum;
1618
1619 pthread_mutex_lock(&app->sock_lock);
1620 ret = ustctl_set_capture(app->sock, ust_bytecode,
1621 ust_object);
1622 pthread_mutex_unlock(&app->sock_lock);
1623 if (ret < 0) {
1624 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1625 ERR("UST app set object capture failed: object = %p of app pid = %d, ret = %d",
1626 ust_object, app->pid, ret);
1627 } else {
1628 /*
1629 * This is normal behavior, an application can die during the
1630 * creation process. Don't report an error so the execution can
1631 * continue normally.
1632 */
1633 ret = 0;
1634 DBG3("Failed to set UST app object capture. Application is dead.");
1635 }
1636
1637 goto error;
1638 }
1639
1640 DBG2("UST capture successfully set: object = %p", ust_object);
1641
1642 error:
1643 health_code_update();
1644 free(ust_bytecode);
1645 return ret;
1646 }
1647
1648 static
1649 struct lttng_ust_abi_event_exclusion *create_ust_exclusion_from_exclusion(
1650 const struct lttng_event_exclusion *exclusion)
1651 {
1652 struct lttng_ust_abi_event_exclusion *ust_exclusion = NULL;
1653 size_t exclusion_alloc_size = sizeof(struct lttng_ust_abi_event_exclusion) +
1654 LTTNG_UST_ABI_SYM_NAME_LEN * exclusion->count;
1655
1656 ust_exclusion = zmalloc(exclusion_alloc_size);
1657 if (!ust_exclusion) {
1658 PERROR("malloc");
1659 goto end;
1660 }
1661
1662 assert(sizeof(struct lttng_event_exclusion) ==
1663 sizeof(struct lttng_ust_abi_event_exclusion));
1664 memcpy(ust_exclusion, exclusion, exclusion_alloc_size);
1665 end:
1666 return ust_exclusion;
1667 }
1668
1669 /*
1670 * Set event exclusions on the tracer.
1671 */
1672 static int set_ust_object_exclusions(struct ust_app *app,
1673 const struct lttng_event_exclusion *exclusions,
1674 struct lttng_ust_abi_object_data *ust_object)
1675 {
1676 int ret;
1677 struct lttng_ust_abi_event_exclusion *ust_exclusions = NULL;
1678
1679 assert(exclusions && exclusions->count > 0);
1680
1681 health_code_update();
1682
1683 ust_exclusions = create_ust_exclusion_from_exclusion(
1684 exclusions);
1685 if (!ust_exclusions) {
1686 ret = -LTTNG_ERR_NOMEM;
1687 goto error;
1688 }
1689 pthread_mutex_lock(&app->sock_lock);
1690 ret = ustctl_set_exclusion(app->sock, ust_exclusions, ust_object);
1691 pthread_mutex_unlock(&app->sock_lock);
1692 if (ret < 0) {
1693 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1694 ERR("Failed to set UST app exclusions for object %p of app (pid: %d) "
1695 "with ret %d", ust_object, app->pid, ret);
1696 } else {
1697 /*
1698 * This is normal behavior, an application can die during the
1699 * creation process. Don't report an error so the execution can
1700 * continue normally.
1701 */
1702 ret = 0;
1703 DBG3("Failed to set UST app object exclusions. Application is dead.");
1704 }
1705 goto error;
1706 }
1707
1708 DBG2("UST exclusions set successfully for object %p", ust_object);
1709
1710 error:
1711 health_code_update();
1712 free(ust_exclusions);
1713 return ret;
1714 }
1715
1716 /*
1717 * Disable the specified event on to UST tracer for the UST session.
1718 */
1719 static int disable_ust_object(struct ust_app *app,
1720 struct lttng_ust_abi_object_data *object)
1721 {
1722 int ret;
1723
1724 health_code_update();
1725
1726 pthread_mutex_lock(&app->sock_lock);
1727 ret = ustctl_disable(app->sock, object);
1728 pthread_mutex_unlock(&app->sock_lock);
1729 if (ret < 0) {
1730 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1731 ERR("Failed to disable UST app object %p app (pid: %d) with ret %d",
1732 object, app->pid, ret);
1733 } else {
1734 /*
1735 * This is normal behavior, an application can die during the
1736 * creation process. Don't report an error so the execution can
1737 * continue normally.
1738 */
1739 ret = 0;
1740 DBG3("Failed to disable UST app object. Application is dead.");
1741 }
1742 goto error;
1743 }
1744
1745 DBG2("UST app object %p disabled successfully for app (pid: %d)",
1746 object, app->pid);
1747
1748 error:
1749 health_code_update();
1750 return ret;
1751 }
1752
1753 /*
1754 * Disable the specified channel on to UST tracer for the UST session.
1755 */
1756 static int disable_ust_channel(struct ust_app *app,
1757 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1758 {
1759 int ret;
1760
1761 health_code_update();
1762
1763 pthread_mutex_lock(&app->sock_lock);
1764 ret = ustctl_disable(app->sock, ua_chan->obj);
1765 pthread_mutex_unlock(&app->sock_lock);
1766 if (ret < 0) {
1767 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1768 ERR("UST app channel %s disable failed for app (pid: %d) "
1769 "and session handle %d with ret %d",
1770 ua_chan->name, app->pid, ua_sess->handle, ret);
1771 } else {
1772 /*
1773 * This is normal behavior, an application can die during the
1774 * creation process. Don't report an error so the execution can
1775 * continue normally.
1776 */
1777 ret = 0;
1778 DBG3("UST app disable channel failed. Application is dead.");
1779 }
1780 goto error;
1781 }
1782
1783 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
1784 ua_chan->name, app->pid);
1785
1786 error:
1787 health_code_update();
1788 return ret;
1789 }
1790
1791 /*
1792 * Enable the specified channel on to UST tracer for the UST session.
1793 */
1794 static int enable_ust_channel(struct ust_app *app,
1795 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1796 {
1797 int ret;
1798
1799 health_code_update();
1800
1801 pthread_mutex_lock(&app->sock_lock);
1802 ret = ustctl_enable(app->sock, ua_chan->obj);
1803 pthread_mutex_unlock(&app->sock_lock);
1804 if (ret < 0) {
1805 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1806 ERR("UST app channel %s enable failed for app (pid: %d) "
1807 "and session handle %d with ret %d",
1808 ua_chan->name, app->pid, ua_sess->handle, ret);
1809 } else {
1810 /*
1811 * This is normal behavior, an application can die during the
1812 * creation process. Don't report an error so the execution can
1813 * continue normally.
1814 */
1815 ret = 0;
1816 DBG3("UST app enable channel failed. Application is dead.");
1817 }
1818 goto error;
1819 }
1820
1821 ua_chan->enabled = 1;
1822
1823 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
1824 ua_chan->name, app->pid);
1825
1826 error:
1827 health_code_update();
1828 return ret;
1829 }
1830
1831 /*
1832 * Enable the specified event on to UST tracer for the UST session.
1833 */
1834 static int enable_ust_object(
1835 struct ust_app *app, struct lttng_ust_abi_object_data *ust_object)
1836 {
1837 int ret;
1838
1839 health_code_update();
1840
1841 pthread_mutex_lock(&app->sock_lock);
1842 ret = ustctl_enable(app->sock, ust_object);
1843 pthread_mutex_unlock(&app->sock_lock);
1844 if (ret < 0) {
1845 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1846 ERR("UST app enable failed for object %p app (pid: %d) with ret %d",
1847 ust_object, app->pid, ret);
1848 } else {
1849 /*
1850 * This is normal behavior, an application can die during the
1851 * creation process. Don't report an error so the execution can
1852 * continue normally.
1853 */
1854 ret = 0;
1855 DBG3("Failed to enable UST app object. Application is dead.");
1856 }
1857 goto error;
1858 }
1859
1860 DBG2("UST app object %p enabled successfully for app (pid: %d)",
1861 ust_object, app->pid);
1862
1863 error:
1864 health_code_update();
1865 return ret;
1866 }
1867
1868 /*
1869 * Send channel and stream buffer to application.
1870 *
1871 * Return 0 on success. On error, a negative value is returned.
1872 */
1873 static int send_channel_pid_to_ust(struct ust_app *app,
1874 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1875 {
1876 int ret;
1877 struct ust_app_stream *stream, *stmp;
1878
1879 assert(app);
1880 assert(ua_sess);
1881 assert(ua_chan);
1882
1883 health_code_update();
1884
1885 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1886 app->sock);
1887
1888 /* Send channel to the application. */
1889 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
1890 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1891 ret = -ENOTCONN; /* Caused by app exiting. */
1892 goto error;
1893 } else if (ret < 0) {
1894 goto error;
1895 }
1896
1897 health_code_update();
1898
1899 /* Send all streams to application. */
1900 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1901 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
1902 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1903 ret = -ENOTCONN; /* Caused by app exiting. */
1904 goto error;
1905 } else if (ret < 0) {
1906 goto error;
1907 }
1908 /* We don't need the stream anymore once sent to the tracer. */
1909 cds_list_del(&stream->list);
1910 delete_ust_app_stream(-1, stream, app);
1911 }
1912 /* Flag the channel that it is sent to the application. */
1913 ua_chan->is_sent = 1;
1914
1915 error:
1916 health_code_update();
1917 return ret;
1918 }
1919
1920 /*
1921 * Create the specified event onto the UST tracer for a UST session.
1922 *
1923 * Should be called with session mutex held.
1924 */
1925 static
1926 int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1927 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
1928 {
1929 int ret = 0;
1930
1931 health_code_update();
1932
1933 /* Create UST event on tracer */
1934 pthread_mutex_lock(&app->sock_lock);
1935 ret = ustctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
1936 &ua_event->obj);
1937 pthread_mutex_unlock(&app->sock_lock);
1938 if (ret < 0) {
1939 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1940 abort();
1941 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1942 ua_event->attr.name, app->pid, ret);
1943 } else {
1944 /*
1945 * This is normal behavior, an application can die during the
1946 * creation process. Don't report an error so the execution can
1947 * continue normally.
1948 */
1949 ret = 0;
1950 DBG3("UST app create event failed. Application is dead.");
1951 }
1952 goto error;
1953 }
1954
1955 ua_event->handle = ua_event->obj->handle;
1956
1957 DBG2("UST app event %s created successfully for pid:%d object: %p",
1958 ua_event->attr.name, app->pid, ua_event->obj);
1959
1960 health_code_update();
1961
1962 /* Set filter if one is present. */
1963 if (ua_event->filter) {
1964 ret = set_ust_object_filter(app, ua_event->filter, ua_event->obj);
1965 if (ret < 0) {
1966 goto error;
1967 }
1968 }
1969
1970 /* Set exclusions for the event */
1971 if (ua_event->exclusion) {
1972 ret = set_ust_object_exclusions(app, ua_event->exclusion, ua_event->obj);
1973 if (ret < 0) {
1974 goto error;
1975 }
1976 }
1977
1978 /* If event not enabled, disable it on the tracer */
1979 if (ua_event->enabled) {
1980 /*
1981 * We now need to explicitly enable the event, since it
1982 * is now disabled at creation.
1983 */
1984 ret = enable_ust_object(app, ua_event->obj);
1985 if (ret < 0) {
1986 /*
1987 * If we hit an EPERM, something is wrong with our enable call. If
1988 * we get an EEXIST, there is a problem on the tracer side since we
1989 * just created it.
1990 */
1991 switch (ret) {
1992 case -LTTNG_UST_ERR_PERM:
1993 /* Code flow problem */
1994 assert(0);
1995 case -LTTNG_UST_ERR_EXIST:
1996 /* It's OK for our use case. */
1997 ret = 0;
1998 break;
1999 default:
2000 break;
2001 }
2002 goto error;
2003 }
2004 }
2005
2006 error:
2007 health_code_update();
2008 return ret;
2009 }
2010
2011 static int init_ust_event_notifier_from_event_rule(
2012 const struct lttng_event_rule *rule,
2013 struct lttng_ust_abi_event_notifier *event_notifier)
2014 {
2015 enum lttng_event_rule_status status;
2016 enum lttng_ust_abi_loglevel_type ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL;
2017 int loglevel = -1, ret = 0;
2018 const char *pattern;
2019
2020 /* For now only LTTNG_EVENT_RULE_TYPE_TRACEPOINT are supported. */
2021 assert(lttng_event_rule_get_type(rule) ==
2022 LTTNG_EVENT_RULE_TYPE_TRACEPOINT);
2023
2024 memset(event_notifier, 0, sizeof(*event_notifier));
2025
2026 if (lttng_event_rule_targets_agent_domain(rule)) {
2027 /*
2028 * Special event for agents
2029 * The actual meat of the event is in the filter that will be
2030 * attached later on.
2031 * Set the default values for the agent event.
2032 */
2033 pattern = event_get_default_agent_ust_name(
2034 lttng_event_rule_get_domain_type(rule));
2035 loglevel = 0;
2036 ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL;
2037 } else {
2038 const struct lttng_log_level_rule *log_level_rule;
2039
2040 status = lttng_event_rule_tracepoint_get_pattern(rule, &pattern);
2041 if (status != LTTNG_EVENT_RULE_STATUS_OK) {
2042 /* At this point, this is a fatal error. */
2043 abort();
2044 }
2045
2046 status = lttng_event_rule_tracepoint_get_log_level_rule(
2047 rule, &log_level_rule);
2048 if (status == LTTNG_EVENT_RULE_STATUS_UNSET) {
2049 ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL;
2050 } else if (status == LTTNG_EVENT_RULE_STATUS_OK) {
2051 enum lttng_log_level_rule_status llr_status;
2052
2053 switch (lttng_log_level_rule_get_type(log_level_rule)) {
2054 case LTTNG_LOG_LEVEL_RULE_TYPE_EXACTLY:
2055 ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_SINGLE;
2056 llr_status = lttng_log_level_rule_exactly_get_level(
2057 log_level_rule, &loglevel);
2058 break;
2059 case LTTNG_LOG_LEVEL_RULE_TYPE_AT_LEAST_AS_SEVERE_AS:
2060 ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_RANGE;
2061 llr_status = lttng_log_level_rule_at_least_as_severe_as_get_level(
2062 log_level_rule, &loglevel);
2063 break;
2064 default:
2065 abort();
2066 }
2067
2068 assert(llr_status == LTTNG_LOG_LEVEL_RULE_STATUS_OK);
2069 } else {
2070 /* At this point this is a fatal error. */
2071 abort();
2072 }
2073 }
2074
2075 event_notifier->event.instrumentation = LTTNG_UST_ABI_TRACEPOINT;
2076 ret = lttng_strncpy(event_notifier->event.name, pattern,
2077 LTTNG_UST_ABI_SYM_NAME_LEN - 1);
2078 if (ret) {
2079 ERR("Failed to copy event rule pattern to notifier: pattern = '%s' ",
2080 pattern);
2081 goto end;
2082 }
2083
2084 event_notifier->event.loglevel_type = ust_loglevel_type;
2085 event_notifier->event.loglevel = loglevel;
2086 end:
2087 return ret;
2088 }
2089
2090 /*
2091 * Create the specified event notifier against the user space tracer of a
2092 * given application.
2093 */
2094 static int create_ust_event_notifier(struct ust_app *app,
2095 struct ust_app_event_notifier_rule *ua_event_notifier_rule)
2096 {
2097 int ret = 0;
2098 enum lttng_condition_status condition_status;
2099 const struct lttng_condition *condition = NULL;
2100 struct lttng_ust_abi_event_notifier event_notifier;
2101 const struct lttng_event_rule *event_rule = NULL;
2102 unsigned int capture_bytecode_count = 0, i;
2103 enum lttng_condition_status cond_status;
2104
2105 health_code_update();
2106 assert(app->event_notifier_group.object);
2107
2108 condition = lttng_trigger_get_const_condition(
2109 ua_event_notifier_rule->trigger);
2110 assert(condition);
2111 assert(lttng_condition_get_type(condition) == LTTNG_CONDITION_TYPE_ON_EVENT);
2112
2113 condition_status = lttng_condition_on_event_get_rule(
2114 condition, &event_rule);
2115 assert(condition_status == LTTNG_CONDITION_STATUS_OK);
2116
2117 assert(event_rule);
2118 assert(lttng_event_rule_get_type(event_rule) == LTTNG_EVENT_RULE_TYPE_TRACEPOINT);
2119
2120 init_ust_event_notifier_from_event_rule(event_rule, &event_notifier);
2121 event_notifier.event.token = ua_event_notifier_rule->token;
2122 event_notifier.error_counter_index = ua_event_notifier_rule->error_counter_index;
2123
2124 /* Create UST event notifier against the tracer. */
2125 pthread_mutex_lock(&app->sock_lock);
2126 ret = ustctl_create_event_notifier(app->sock, &event_notifier,
2127 app->event_notifier_group.object,
2128 &ua_event_notifier_rule->obj);
2129 pthread_mutex_unlock(&app->sock_lock);
2130 if (ret < 0) {
2131 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
2132 ERR("Error ustctl create event notifier: name = '%s', app = '%s' (ppid: %d), ret = %d",
2133 event_notifier.event.name, app->name,
2134 app->ppid, ret);
2135 } else {
2136 /*
2137 * This is normal behavior, an application can die
2138 * during the creation process. Don't report an error so
2139 * the execution can continue normally.
2140 */
2141 ret = 0;
2142 DBG3("UST app create event notifier failed (application is dead): app = '%s' (ppid = %d)",
2143 app->name, app->ppid);
2144 }
2145
2146 goto error;
2147 }
2148
2149 ua_event_notifier_rule->handle = ua_event_notifier_rule->obj->handle;
2150
2151 DBG2("UST app event notifier %s created successfully: app = '%s' (ppid: %d), object: %p",
2152 event_notifier.event.name, app->name, app->ppid,
2153 ua_event_notifier_rule->obj);
2154
2155 health_code_update();
2156
2157 /* Set filter if one is present. */
2158 if (ua_event_notifier_rule->filter) {
2159 ret = set_ust_object_filter(app, ua_event_notifier_rule->filter,
2160 ua_event_notifier_rule->obj);
2161 if (ret < 0) {
2162 goto error;
2163 }
2164 }
2165
2166 /* Set exclusions for the event. */
2167 if (ua_event_notifier_rule->exclusion) {
2168 ret = set_ust_object_exclusions(app,
2169 ua_event_notifier_rule->exclusion,
2170 ua_event_notifier_rule->obj);
2171 if (ret < 0) {
2172 goto error;
2173 }
2174 }
2175
2176 /* Set the capture bytecodes. */
2177 cond_status = lttng_condition_on_event_get_capture_descriptor_count(
2178 condition, &capture_bytecode_count);
2179 assert(cond_status == LTTNG_CONDITION_STATUS_OK);
2180
2181 for (i = 0; i < capture_bytecode_count; i++) {
2182 const struct lttng_bytecode *capture_bytecode =
2183 lttng_condition_on_event_get_capture_bytecode_at_index(
2184 condition, i);
2185
2186 ret = set_ust_capture(app, capture_bytecode, i,
2187 ua_event_notifier_rule->obj);
2188 if (ret < 0) {
2189 goto error;
2190 }
2191 }
2192
2193 /*
2194 * We now need to explicitly enable the event, since it
2195 * is disabled at creation.
2196 */
2197 ret = enable_ust_object(app, ua_event_notifier_rule->obj);
2198 if (ret < 0) {
2199 /*
2200 * If we hit an EPERM, something is wrong with our enable call.
2201 * If we get an EEXIST, there is a problem on the tracer side
2202 * since we just created it.
2203 */
2204 switch (ret) {
2205 case -LTTNG_UST_ERR_PERM:
2206 /* Code flow problem. */
2207 abort();
2208 case -LTTNG_UST_ERR_EXIST:
2209 /* It's OK for our use case. */
2210 ret = 0;
2211 break;
2212 default:
2213 break;
2214 }
2215
2216 goto error;
2217 }
2218
2219 ua_event_notifier_rule->enabled = true;
2220
2221 error:
2222 health_code_update();
2223 return ret;
2224 }
2225
2226 /*
2227 * Copy data between an UST app event and a LTT event.
2228 */
2229 static void shadow_copy_event(struct ust_app_event *ua_event,
2230 struct ltt_ust_event *uevent)
2231 {
2232 size_t exclusion_alloc_size;
2233
2234 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
2235 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
2236
2237 ua_event->enabled = uevent->enabled;
2238
2239 /* Copy event attributes */
2240 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
2241
2242 /* Copy filter bytecode */
2243 if (uevent->filter) {
2244 ua_event->filter = lttng_bytecode_copy(uevent->filter);
2245 /* Filter might be NULL here in case of ENONEM. */
2246 }
2247
2248 /* Copy exclusion data */
2249 if (uevent->exclusion) {
2250 exclusion_alloc_size = sizeof(struct lttng_event_exclusion) +
2251 LTTNG_UST_ABI_SYM_NAME_LEN * uevent->exclusion->count;
2252 ua_event->exclusion = zmalloc(exclusion_alloc_size);
2253 if (ua_event->exclusion == NULL) {
2254 PERROR("malloc");
2255 } else {
2256 memcpy(ua_event->exclusion, uevent->exclusion,
2257 exclusion_alloc_size);
2258 }
2259 }
2260 }
2261
2262 /*
2263 * Copy data between an UST app channel and a LTT channel.
2264 */
2265 static void shadow_copy_channel(struct ust_app_channel *ua_chan,
2266 struct ltt_ust_channel *uchan)
2267 {
2268 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
2269
2270 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
2271 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
2272
2273 ua_chan->tracefile_size = uchan->tracefile_size;
2274 ua_chan->tracefile_count = uchan->tracefile_count;
2275
2276 /* Copy event attributes since the layout is different. */
2277 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
2278 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
2279 ua_chan->attr.overwrite = uchan->attr.overwrite;
2280 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
2281 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
2282 ua_chan->monitor_timer_interval = uchan->monitor_timer_interval;
2283 ua_chan->attr.output = uchan->attr.output;
2284 ua_chan->attr.blocking_timeout = uchan->attr.u.s.blocking_timeout;
2285
2286 /*
2287 * Note that the attribute channel type is not set since the channel on the
2288 * tracing registry side does not have this information.
2289 */
2290
2291 ua_chan->enabled = uchan->enabled;
2292 ua_chan->tracing_channel_id = uchan->id;
2293
2294 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
2295 }
2296
2297 /*
2298 * Copy data between a UST app session and a regular LTT session.
2299 */
2300 static void shadow_copy_session(struct ust_app_session *ua_sess,
2301 struct ltt_ust_session *usess, struct ust_app *app)
2302 {
2303 struct tm *timeinfo;
2304 char datetime[16];
2305 int ret;
2306 char tmp_shm_path[PATH_MAX];
2307
2308 timeinfo = localtime(&app->registration_time);
2309 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
2310
2311 DBG2("Shadow copy of session handle %d", ua_sess->handle);
2312
2313 ua_sess->tracing_id = usess->id;
2314 ua_sess->id = get_next_session_id();
2315 LTTNG_OPTIONAL_SET(&ua_sess->real_credentials.uid, app->uid);
2316 LTTNG_OPTIONAL_SET(&ua_sess->real_credentials.gid, app->gid);
2317 LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.uid, usess->uid);
2318 LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.gid, usess->gid);
2319 ua_sess->buffer_type = usess->buffer_type;
2320 ua_sess->bits_per_long = app->bits_per_long;
2321
2322 /* There is only one consumer object per session possible. */
2323 consumer_output_get(usess->consumer);
2324 ua_sess->consumer = usess->consumer;
2325
2326 ua_sess->output_traces = usess->output_traces;
2327 ua_sess->live_timer_interval = usess->live_timer_interval;
2328 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
2329 &usess->metadata_attr);
2330
2331 switch (ua_sess->buffer_type) {
2332 case LTTNG_BUFFER_PER_PID:
2333 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
2334 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
2335 datetime);
2336 break;
2337 case LTTNG_BUFFER_PER_UID:
2338 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
2339 DEFAULT_UST_TRACE_UID_PATH,
2340 lttng_credentials_get_uid(&ua_sess->real_credentials),
2341 app->bits_per_long);
2342 break;
2343 default:
2344 assert(0);
2345 goto error;
2346 }
2347 if (ret < 0) {
2348 PERROR("asprintf UST shadow copy session");
2349 assert(0);
2350 goto error;
2351 }
2352
2353 strncpy(ua_sess->root_shm_path, usess->root_shm_path,
2354 sizeof(ua_sess->root_shm_path));
2355 ua_sess->root_shm_path[sizeof(ua_sess->root_shm_path) - 1] = '\0';
2356 strncpy(ua_sess->shm_path, usess->shm_path,
2357 sizeof(ua_sess->shm_path));
2358 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
2359 if (ua_sess->shm_path[0]) {
2360 switch (ua_sess->buffer_type) {
2361 case LTTNG_BUFFER_PER_PID:
2362 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
2363 "/" DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s",
2364 app->name, app->pid, datetime);
2365 break;
2366 case LTTNG_BUFFER_PER_UID:
2367 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
2368 "/" DEFAULT_UST_TRACE_UID_PATH,
2369 app->uid, app->bits_per_long);
2370 break;
2371 default:
2372 assert(0);
2373 goto error;
2374 }
2375 if (ret < 0) {
2376 PERROR("sprintf UST shadow copy session");
2377 assert(0);
2378 goto error;
2379 }
2380 strncat(ua_sess->shm_path, tmp_shm_path,
2381 sizeof(ua_sess->shm_path) - strlen(ua_sess->shm_path) - 1);
2382 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
2383 }
2384 return;
2385
2386 error:
2387 consumer_output_put(ua_sess->consumer);
2388 }
2389
2390 /*
2391 * Lookup sesison wrapper.
2392 */
2393 static
2394 void __lookup_session_by_app(const struct ltt_ust_session *usess,
2395 struct ust_app *app, struct lttng_ht_iter *iter)
2396 {
2397 /* Get right UST app session from app */
2398 lttng_ht_lookup(app->sessions, &usess->id, iter);
2399 }
2400
2401 /*
2402 * Return ust app session from the app session hashtable using the UST session
2403 * id.
2404 */
2405 static struct ust_app_session *lookup_session_by_app(
2406 const struct ltt_ust_session *usess, struct ust_app *app)
2407 {
2408 struct lttng_ht_iter iter;
2409 struct lttng_ht_node_u64 *node;
2410
2411 __lookup_session_by_app(usess, app, &iter);
2412 node = lttng_ht_iter_get_node_u64(&iter);
2413 if (node == NULL) {
2414 goto error;
2415 }
2416
2417 return caa_container_of(node, struct ust_app_session, node);
2418
2419 error:
2420 return NULL;
2421 }
2422
2423 /*
2424 * Setup buffer registry per PID for the given session and application. If none
2425 * is found, a new one is created, added to the global registry and
2426 * initialized. If regp is valid, it's set with the newly created object.
2427 *
2428 * Return 0 on success or else a negative value.
2429 */
2430 static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
2431 struct ust_app *app, struct buffer_reg_pid **regp)
2432 {
2433 int ret = 0;
2434 struct buffer_reg_pid *reg_pid;
2435
2436 assert(ua_sess);
2437 assert(app);
2438
2439 rcu_read_lock();
2440
2441 reg_pid = buffer_reg_pid_find(ua_sess->id);
2442 if (!reg_pid) {
2443 /*
2444 * This is the create channel path meaning that if there is NO
2445 * registry available, we have to create one for this session.
2446 */
2447 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid,
2448 ua_sess->root_shm_path, ua_sess->shm_path);
2449 if (ret < 0) {
2450 goto error;
2451 }
2452 } else {
2453 goto end;
2454 }
2455
2456 /* Initialize registry. */
2457 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
2458 app->bits_per_long, app->uint8_t_alignment,
2459 app->uint16_t_alignment, app->uint32_t_alignment,
2460 app->uint64_t_alignment, app->long_alignment,
2461 app->byte_order, app->version.major, app->version.minor,
2462 reg_pid->root_shm_path, reg_pid->shm_path,
2463 lttng_credentials_get_uid(&ua_sess->effective_credentials),
2464 lttng_credentials_get_gid(&ua_sess->effective_credentials),
2465 ua_sess->tracing_id,
2466 app->uid);
2467 if (ret < 0) {
2468 /*
2469 * reg_pid->registry->reg.ust is NULL upon error, so we need to
2470 * destroy the buffer registry, because it is always expected
2471 * that if the buffer registry can be found, its ust registry is
2472 * non-NULL.
2473 */
2474 buffer_reg_pid_destroy(reg_pid);
2475 goto error;
2476 }
2477
2478 buffer_reg_pid_add(reg_pid);
2479
2480 DBG3("UST app buffer registry per PID created successfully");
2481
2482 end:
2483 if (regp) {
2484 *regp = reg_pid;
2485 }
2486 error:
2487 rcu_read_unlock();
2488 return ret;
2489 }
2490
2491 /*
2492 * Setup buffer registry per UID for the given session and application. If none
2493 * is found, a new one is created, added to the global registry and
2494 * initialized. If regp is valid, it's set with the newly created object.
2495 *
2496 * Return 0 on success or else a negative value.
2497 */
2498 static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
2499 struct ust_app_session *ua_sess,
2500 struct ust_app *app, struct buffer_reg_uid **regp)
2501 {
2502 int ret = 0;
2503 struct buffer_reg_uid *reg_uid;
2504
2505 assert(usess);
2506 assert(app);
2507
2508 rcu_read_lock();
2509
2510 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2511 if (!reg_uid) {
2512 /*
2513 * This is the create channel path meaning that if there is NO
2514 * registry available, we have to create one for this session.
2515 */
2516 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
2517 LTTNG_DOMAIN_UST, &reg_uid,
2518 ua_sess->root_shm_path, ua_sess->shm_path);
2519 if (ret < 0) {
2520 goto error;
2521 }
2522 } else {
2523 goto end;
2524 }
2525
2526 /* Initialize registry. */
2527 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
2528 app->bits_per_long, app->uint8_t_alignment,
2529 app->uint16_t_alignment, app->uint32_t_alignment,
2530 app->uint64_t_alignment, app->long_alignment,
2531 app->byte_order, app->version.major,
2532 app->version.minor, reg_uid->root_shm_path,
2533 reg_uid->shm_path, usess->uid, usess->gid,
2534 ua_sess->tracing_id, app->uid);
2535 if (ret < 0) {
2536 /*
2537 * reg_uid->registry->reg.ust is NULL upon error, so we need to
2538 * destroy the buffer registry, because it is always expected
2539 * that if the buffer registry can be found, its ust registry is
2540 * non-NULL.
2541 */
2542 buffer_reg_uid_destroy(reg_uid, NULL);
2543 goto error;
2544 }
2545 /* Add node to teardown list of the session. */
2546 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
2547
2548 buffer_reg_uid_add(reg_uid);
2549
2550 DBG3("UST app buffer registry per UID created successfully");
2551 end:
2552 if (regp) {
2553 *regp = reg_uid;
2554 }
2555 error:
2556 rcu_read_unlock();
2557 return ret;
2558 }
2559
2560 /*
2561 * Create a session on the tracer side for the given app.
2562 *
2563 * On success, ua_sess_ptr is populated with the session pointer or else left
2564 * untouched. If the session was created, is_created is set to 1. On error,
2565 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
2566 * be NULL.
2567 *
2568 * Returns 0 on success or else a negative code which is either -ENOMEM or
2569 * -ENOTCONN which is the default code if the ustctl_create_session fails.
2570 */
2571 static int find_or_create_ust_app_session(struct ltt_ust_session *usess,
2572 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
2573 int *is_created)
2574 {
2575 int ret, created = 0;
2576 struct ust_app_session *ua_sess;
2577
2578 assert(usess);
2579 assert(app);
2580 assert(ua_sess_ptr);
2581
2582 health_code_update();
2583
2584 ua_sess = lookup_session_by_app(usess, app);
2585 if (ua_sess == NULL) {
2586 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
2587 app->pid, usess->id);
2588 ua_sess = alloc_ust_app_session();
2589 if (ua_sess == NULL) {
2590 /* Only malloc can failed so something is really wrong */
2591 ret = -ENOMEM;
2592 goto error;
2593 }
2594 shadow_copy_session(ua_sess, usess, app);
2595 created = 1;
2596 }
2597
2598 switch (usess->buffer_type) {
2599 case LTTNG_BUFFER_PER_PID:
2600 /* Init local registry. */
2601 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
2602 if (ret < 0) {
2603 delete_ust_app_session(-1, ua_sess, app);
2604 goto error;
2605 }
2606 break;
2607 case LTTNG_BUFFER_PER_UID:
2608 /* Look for a global registry. If none exists, create one. */
2609 ret = setup_buffer_reg_uid(usess, ua_sess, app, NULL);
2610 if (ret < 0) {
2611 delete_ust_app_session(-1, ua_sess, app);
2612 goto error;
2613 }
2614 break;
2615 default:
2616 assert(0);
2617 ret = -EINVAL;
2618 goto error;
2619 }
2620
2621 health_code_update();
2622
2623 if (ua_sess->handle == -1) {
2624 pthread_mutex_lock(&app->sock_lock);
2625 ret = ustctl_create_session(app->sock);
2626 pthread_mutex_unlock(&app->sock_lock);
2627 if (ret < 0) {
2628 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
2629 ERR("Creating session for app pid %d with ret %d",
2630 app->pid, ret);
2631 } else {
2632 DBG("UST app creating session failed. Application is dead");
2633 /*
2634 * This is normal behavior, an application can die during the
2635 * creation process. Don't report an error so the execution can
2636 * continue normally. This will get flagged ENOTCONN and the
2637 * caller will handle it.
2638 */
2639 ret = 0;
2640 }
2641 delete_ust_app_session(-1, ua_sess, app);
2642 if (ret != -ENOMEM) {
2643 /*
2644 * Tracer is probably gone or got an internal error so let's
2645 * behave like it will soon unregister or not usable.
2646 */
2647 ret = -ENOTCONN;
2648 }
2649 goto error;
2650 }
2651
2652 ua_sess->handle = ret;
2653
2654 /* Add ust app session to app's HT */
2655 lttng_ht_node_init_u64(&ua_sess->node,
2656 ua_sess->tracing_id);
2657 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
2658 lttng_ht_node_init_ulong(&ua_sess->ust_objd_node, ua_sess->handle);
2659 lttng_ht_add_unique_ulong(app->ust_sessions_objd,
2660 &ua_sess->ust_objd_node);
2661
2662 DBG2("UST app session created successfully with handle %d", ret);
2663 }
2664
2665 *ua_sess_ptr = ua_sess;
2666 if (is_created) {
2667 *is_created = created;
2668 }
2669
2670 /* Everything went well. */
2671 ret = 0;
2672
2673 error:
2674 health_code_update();
2675 return ret;
2676 }
2677
2678 /*
2679 * Match function for a hash table lookup of ust_app_ctx.
2680 *
2681 * It matches an ust app context based on the context type and, in the case
2682 * of perf counters, their name.
2683 */
2684 static int ht_match_ust_app_ctx(struct cds_lfht_node *node, const void *_key)
2685 {
2686 struct ust_app_ctx *ctx;
2687 const struct lttng_ust_context_attr *key;
2688
2689 assert(node);
2690 assert(_key);
2691
2692 ctx = caa_container_of(node, struct ust_app_ctx, node.node);
2693 key = _key;
2694
2695 /* Context type */
2696 if (ctx->ctx.ctx != key->ctx) {
2697 goto no_match;
2698 }
2699
2700 switch(key->ctx) {
2701 case LTTNG_UST_ABI_CONTEXT_PERF_THREAD_COUNTER:
2702 if (strncmp(key->u.perf_counter.name,
2703 ctx->ctx.u.perf_counter.name,
2704 sizeof(key->u.perf_counter.name))) {
2705 goto no_match;
2706 }
2707 break;
2708 case LTTNG_UST_ABI_CONTEXT_APP_CONTEXT:
2709 if (strcmp(key->u.app_ctx.provider_name,
2710 ctx->ctx.u.app_ctx.provider_name) ||
2711 strcmp(key->u.app_ctx.ctx_name,
2712 ctx->ctx.u.app_ctx.ctx_name)) {
2713 goto no_match;
2714 }
2715 break;
2716 default:
2717 break;
2718 }
2719
2720 /* Match. */
2721 return 1;
2722
2723 no_match:
2724 return 0;
2725 }
2726
2727 /*
2728 * Lookup for an ust app context from an lttng_ust_context.
2729 *
2730 * Must be called while holding RCU read side lock.
2731 * Return an ust_app_ctx object or NULL on error.
2732 */
2733 static
2734 struct ust_app_ctx *find_ust_app_context(struct lttng_ht *ht,
2735 struct lttng_ust_context_attr *uctx)
2736 {
2737 struct lttng_ht_iter iter;
2738 struct lttng_ht_node_ulong *node;
2739 struct ust_app_ctx *app_ctx = NULL;
2740
2741 assert(uctx);
2742 assert(ht);
2743
2744 /* Lookup using the lttng_ust_context_type and a custom match fct. */
2745 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) uctx->ctx, lttng_ht_seed),
2746 ht_match_ust_app_ctx, uctx, &iter.iter);
2747 node = lttng_ht_iter_get_node_ulong(&iter);
2748 if (!node) {
2749 goto end;
2750 }
2751
2752 app_ctx = caa_container_of(node, struct ust_app_ctx, node);
2753
2754 end:
2755 return app_ctx;
2756 }
2757
2758 /*
2759 * Create a context for the channel on the tracer.
2760 *
2761 * Called with UST app session lock held and a RCU read side lock.
2762 */
2763 static
2764 int create_ust_app_channel_context(struct ust_app_channel *ua_chan,
2765 struct lttng_ust_context_attr *uctx,
2766 struct ust_app *app)
2767 {
2768 int ret = 0;
2769 struct ust_app_ctx *ua_ctx;
2770
2771 DBG2("UST app adding context to channel %s", ua_chan->name);
2772
2773 ua_ctx = find_ust_app_context(ua_chan->ctx, uctx);
2774 if (ua_ctx) {
2775 ret = -EEXIST;
2776 goto error;
2777 }
2778
2779 ua_ctx = alloc_ust_app_ctx(uctx);
2780 if (ua_ctx == NULL) {
2781 /* malloc failed */
2782 ret = -ENOMEM;
2783 goto error;
2784 }
2785
2786 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
2787 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
2788 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
2789
2790 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
2791 if (ret < 0) {
2792 goto error;
2793 }
2794
2795 error:
2796 return ret;
2797 }
2798
2799 /*
2800 * Enable on the tracer side a ust app event for the session and channel.
2801 *
2802 * Called with UST app session lock held.
2803 */
2804 static
2805 int enable_ust_app_event(struct ust_app_session *ua_sess,
2806 struct ust_app_event *ua_event, struct ust_app *app)
2807 {
2808 int ret;
2809
2810 ret = enable_ust_object(app, ua_event->obj);
2811 if (ret < 0) {
2812 goto error;
2813 }
2814
2815 ua_event->enabled = 1;
2816
2817 error:
2818 return ret;
2819 }
2820
2821 /*
2822 * Disable on the tracer side a ust app event for the session and channel.
2823 */
2824 static int disable_ust_app_event(struct ust_app_session *ua_sess,
2825 struct ust_app_event *ua_event, struct ust_app *app)
2826 {
2827 int ret;
2828
2829 ret = disable_ust_object(app, ua_event->obj);
2830 if (ret < 0) {
2831 goto error;
2832 }
2833
2834 ua_event->enabled = 0;
2835
2836 error:
2837 return ret;
2838 }
2839
2840 /*
2841 * Lookup ust app channel for session and disable it on the tracer side.
2842 */
2843 static
2844 int disable_ust_app_channel(struct ust_app_session *ua_sess,
2845 struct ust_app_channel *ua_chan, struct ust_app *app)
2846 {
2847 int ret;
2848
2849 ret = disable_ust_channel(app, ua_sess, ua_chan);
2850 if (ret < 0) {
2851 goto error;
2852 }
2853
2854 ua_chan->enabled = 0;
2855
2856 error:
2857 return ret;
2858 }
2859
2860 /*
2861 * Lookup ust app channel for session and enable it on the tracer side. This
2862 * MUST be called with a RCU read side lock acquired.
2863 */
2864 static int enable_ust_app_channel(struct ust_app_session *ua_sess,
2865 struct ltt_ust_channel *uchan, struct ust_app *app)
2866 {
2867 int ret = 0;
2868 struct lttng_ht_iter iter;
2869 struct lttng_ht_node_str *ua_chan_node;
2870 struct ust_app_channel *ua_chan;
2871
2872 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2873 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2874 if (ua_chan_node == NULL) {
2875 DBG2("Unable to find channel %s in ust session id %" PRIu64,
2876 uchan->name, ua_sess->tracing_id);
2877 goto error;
2878 }
2879
2880 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2881
2882 ret = enable_ust_channel(app, ua_sess, ua_chan);
2883 if (ret < 0) {
2884 goto error;
2885 }
2886
2887 error:
2888 return ret;
2889 }
2890
2891 /*
2892 * Ask the consumer to create a channel and get it if successful.
2893 *
2894 * Called with UST app session lock held.
2895 *
2896 * Return 0 on success or else a negative value.
2897 */
2898 static int do_consumer_create_channel(struct ltt_ust_session *usess,
2899 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
2900 int bitness, struct ust_registry_session *registry,
2901 uint64_t trace_archive_id)
2902 {
2903 int ret;
2904 unsigned int nb_fd = 0;
2905 struct consumer_socket *socket;
2906
2907 assert(usess);
2908 assert(ua_sess);
2909 assert(ua_chan);
2910 assert(registry);
2911
2912 rcu_read_lock();
2913 health_code_update();
2914
2915 /* Get the right consumer socket for the application. */
2916 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
2917 if (!socket) {
2918 ret = -EINVAL;
2919 goto error;
2920 }
2921
2922 health_code_update();
2923
2924 /* Need one fd for the channel. */
2925 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2926 if (ret < 0) {
2927 ERR("Exhausted number of available FD upon create channel");
2928 goto error;
2929 }
2930
2931 /*
2932 * Ask consumer to create channel. The consumer will return the number of
2933 * stream we have to expect.
2934 */
2935 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
2936 registry, usess->current_trace_chunk);
2937 if (ret < 0) {
2938 goto error_ask;
2939 }
2940
2941 /*
2942 * Compute the number of fd needed before receiving them. It must be 2 per
2943 * stream (2 being the default value here).
2944 */
2945 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
2946
2947 /* Reserve the amount of file descriptor we need. */
2948 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
2949 if (ret < 0) {
2950 ERR("Exhausted number of available FD upon create channel");
2951 goto error_fd_get_stream;
2952 }
2953
2954 health_code_update();
2955
2956 /*
2957 * Now get the channel from the consumer. This call wil populate the stream
2958 * list of that channel and set the ust objects.
2959 */
2960 if (usess->consumer->enabled) {
2961 ret = ust_consumer_get_channel(socket, ua_chan);
2962 if (ret < 0) {
2963 goto error_destroy;
2964 }
2965 }
2966
2967 rcu_read_unlock();
2968 return 0;
2969
2970 error_destroy:
2971 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
2972 error_fd_get_stream:
2973 /*
2974 * Initiate a destroy channel on the consumer since we had an error
2975 * handling it on our side. The return value is of no importance since we
2976 * already have a ret value set by the previous error that we need to
2977 * return.
2978 */
2979 (void) ust_consumer_destroy_channel(socket, ua_chan);
2980 error_ask:
2981 lttng_fd_put(LTTNG_FD_APPS, 1);
2982 error:
2983 health_code_update();
2984 rcu_read_unlock();
2985 return ret;
2986 }
2987
2988 /*
2989 * Duplicate the ust data object of the ust app stream and save it in the
2990 * buffer registry stream.
2991 *
2992 * Return 0 on success or else a negative value.
2993 */
2994 static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
2995 struct ust_app_stream *stream)
2996 {
2997 int ret;
2998
2999 assert(reg_stream);
3000 assert(stream);
3001
3002 /* Reserve the amount of file descriptor we need. */
3003 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
3004 if (ret < 0) {
3005 ERR("Exhausted number of available FD upon duplicate stream");
3006 goto error;
3007 }
3008
3009 /* Duplicate object for stream once the original is in the registry. */
3010 ret = ustctl_duplicate_ust_object_data(&stream->obj,
3011 reg_stream->obj.ust);
3012 if (ret < 0) {
3013 ERR("Duplicate stream obj from %p to %p failed with ret %d",
3014 reg_stream->obj.ust, stream->obj, ret);
3015 lttng_fd_put(LTTNG_FD_APPS, 2);
3016 goto error;
3017 }
3018 stream->handle = stream->obj->handle;
3019
3020 error:
3021 return ret;
3022 }
3023
3024 /*
3025 * Duplicate the ust data object of the ust app. channel and save it in the
3026 * buffer registry channel.
3027 *
3028 * Return 0 on success or else a negative value.
3029 */
3030 static int duplicate_channel_object(struct buffer_reg_channel *buf_reg_chan,
3031 struct ust_app_channel *ua_chan)
3032 {
3033 int ret;
3034
3035 assert(buf_reg_chan);
3036 assert(ua_chan);
3037
3038 /* Need two fds for the channel. */
3039 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
3040 if (ret < 0) {
3041 ERR("Exhausted number of available FD upon duplicate channel");
3042 goto error_fd_get;
3043 }
3044
3045 /* Duplicate object for stream once the original is in the registry. */
3046 ret = ustctl_duplicate_ust_object_data(&ua_chan->obj, buf_reg_chan->obj.ust);
3047 if (ret < 0) {
3048 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
3049 buf_reg_chan->obj.ust, ua_chan->obj, ret);
3050 goto error;
3051 }
3052 ua_chan->handle = ua_chan->obj->handle;
3053
3054 return 0;
3055
3056 error:
3057 lttng_fd_put(LTTNG_FD_APPS, 1);
3058 error_fd_get:
3059 return ret;
3060 }
3061
3062 /*
3063 * For a given channel buffer registry, setup all streams of the given ust
3064 * application channel.
3065 *
3066 * Return 0 on success or else a negative value.
3067 */
3068 static int setup_buffer_reg_streams(struct buffer_reg_channel *buf_reg_chan,
3069 struct ust_app_channel *ua_chan,
3070 struct ust_app *app)
3071 {
3072 int ret = 0;
3073 struct ust_app_stream *stream, *stmp;
3074
3075 assert(buf_reg_chan);
3076 assert(ua_chan);
3077
3078 DBG2("UST app setup buffer registry stream");
3079
3080 /* Send all streams to application. */
3081 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
3082 struct buffer_reg_stream *reg_stream;
3083
3084 ret = buffer_reg_stream_create(&reg_stream);
3085 if (ret < 0) {
3086 goto error;
3087 }
3088
3089 /*
3090 * Keep original pointer and nullify it in the stream so the delete
3091 * stream call does not release the object.
3092 */
3093 reg_stream->obj.ust = stream->obj;
3094 stream->obj = NULL;
3095 buffer_reg_stream_add(reg_stream, buf_reg_chan);
3096
3097 /* We don't need the streams anymore. */
3098 cds_list_del(&stream->list);
3099 delete_ust_app_stream(-1, stream, app);
3100 }
3101
3102 error:
3103 return ret;
3104 }
3105
3106 /*
3107 * Create a buffer registry channel for the given session registry and
3108 * application channel object. If regp pointer is valid, it's set with the
3109 * created object. Important, the created object is NOT added to the session
3110 * registry hash table.
3111 *
3112 * Return 0 on success else a negative value.
3113 */
3114 static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
3115 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
3116 {
3117 int ret;
3118 struct buffer_reg_channel *buf_reg_chan = NULL;
3119
3120 assert(reg_sess);
3121 assert(ua_chan);
3122
3123 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
3124
3125 /* Create buffer registry channel. */
3126 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &buf_reg_chan);
3127 if (ret < 0) {
3128 goto error_create;
3129 }
3130 assert(buf_reg_chan);
3131 buf_reg_chan->consumer_key = ua_chan->key;
3132 buf_reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
3133 buf_reg_chan->num_subbuf = ua_chan->attr.num_subbuf;
3134
3135 /* Create and add a channel registry to session. */
3136 ret = ust_registry_channel_add(reg_sess->reg.ust,
3137 ua_chan->tracing_channel_id);
3138 if (ret < 0) {
3139 goto error;
3140 }
3141 buffer_reg_channel_add(reg_sess, buf_reg_chan);
3142
3143 if (regp) {
3144 *regp = buf_reg_chan;
3145 }
3146
3147 return 0;
3148
3149 error:
3150 /* Safe because the registry channel object was not added to any HT. */
3151 buffer_reg_channel_destroy(buf_reg_chan, LTTNG_DOMAIN_UST);
3152 error_create:
3153 return ret;
3154 }
3155
3156 /*
3157 * Setup buffer registry channel for the given session registry and application
3158 * channel object. If regp pointer is valid, it's set with the created object.
3159 *
3160 * Return 0 on success else a negative value.
3161 */
3162 static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
3163 struct ust_app_channel *ua_chan, struct buffer_reg_channel *buf_reg_chan,
3164 struct ust_app *app)
3165 {
3166 int ret;
3167
3168 assert(reg_sess);
3169 assert(buf_reg_chan);
3170 assert(ua_chan);
3171 assert(ua_chan->obj);
3172
3173 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
3174
3175 /* Setup all streams for the registry. */
3176 ret = setup_buffer_reg_streams(buf_reg_chan, ua_chan, app);
3177 if (ret < 0) {
3178 goto error;
3179 }
3180
3181 buf_reg_chan->obj.ust = ua_chan->obj;
3182 ua_chan->obj = NULL;
3183
3184 return 0;
3185
3186 error:
3187 buffer_reg_channel_remove(reg_sess, buf_reg_chan);
3188 buffer_reg_channel_destroy(buf_reg_chan, LTTNG_DOMAIN_UST);
3189 return ret;
3190 }
3191
3192 /*
3193 * Send buffer registry channel to the application.
3194 *
3195 * Return 0 on success else a negative value.
3196 */
3197 static int send_channel_uid_to_ust(struct buffer_reg_channel *buf_reg_chan,
3198 struct ust_app *app, struct ust_app_session *ua_sess,
3199 struct ust_app_channel *ua_chan)
3200 {
3201 int ret;
3202 struct buffer_reg_stream *reg_stream;
3203
3204 assert(buf_reg_chan);
3205 assert(app);
3206 assert(ua_sess);
3207 assert(ua_chan);
3208
3209 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
3210
3211 ret = duplicate_channel_object(buf_reg_chan, ua_chan);
3212 if (ret < 0) {
3213 goto error;
3214 }
3215
3216 /* Send channel to the application. */
3217 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
3218 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
3219 ret = -ENOTCONN; /* Caused by app exiting. */
3220 goto error;
3221 } else if (ret < 0) {
3222 goto error;
3223 }
3224
3225 health_code_update();
3226
3227 /* Send all streams to application. */
3228 pthread_mutex_lock(&buf_reg_chan->stream_list_lock);
3229